Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion src/common/storage/src/operator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -355,11 +355,15 @@ fn init_moka_operator(v: &StorageMokaConfig) -> Result<impl Builder> {

/// init_webhdfs_operator will init a WebHDFS operator
fn init_webhdfs_operator(v: &StorageWebhdfsConfig) -> Result<impl Builder> {
let builder = services::Webhdfs::default()
let mut builder = services::Webhdfs::default()
.endpoint(&v.endpoint_url)
.root(&v.root)
.delegation(&v.delegation);

if v.disable_list_batch {
builder = builder.disable_list_batch();
}

Ok(builder)
}

Expand Down
4 changes: 3 additions & 1 deletion src/meta/app/src/storage/storage_params.rs
Original file line number Diff line number Diff line change
Expand Up @@ -522,14 +522,16 @@ pub struct StorageWebhdfsConfig {
pub endpoint_url: String,
pub root: String,
pub delegation: String,
pub disable_list_batch: bool,
}

impl Debug for StorageWebhdfsConfig {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
let mut ds = f.debug_struct("StorageWebhdfsConfig");

ds.field("endpoint_url", &self.endpoint_url)
.field("root", &self.root);
.field("root", &self.root)
.field("disable_list_batch", &self.disable_list_batch);

ds.field("delegation", &mask_string(&self.delegation, 3));

Expand Down
2 changes: 2 additions & 0 deletions src/meta/proto-conv/src/config_from_to_protobuf_impl.rs
Original file line number Diff line number Diff line change
Expand Up @@ -261,6 +261,7 @@ impl FromToProto for StorageWebhdfsConfig {
endpoint_url: p.endpoint_url,
root: p.root,
delegation: p.delegation,
disable_list_batch: p.disable_list_batch,
})
}

Expand All @@ -271,6 +272,7 @@ impl FromToProto for StorageWebhdfsConfig {
endpoint_url: self.endpoint_url.clone(),
root: self.root.clone(),
delegation: self.delegation.clone(),
disable_list_batch: self.disable_list_batch,

username: String::new(), // reserved for future use
password: String::new(), // reserved for future use
Expand Down
1 change: 1 addition & 0 deletions src/meta/proto-conv/src/util.rs
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,7 @@ const META_CHANGE_LOG: &[(u64, &str)] = &[
(114, "2024-12-12: Add: New DataType Interval."),
(115, "2024-12-16: Add: udf.proto: add UDAFScript and UDAFServer"),
(116, "2025-01-09: Add: MarkedDeletedIndexMeta"),
(117, "2025-01-21: Add: config.proto: add disable_list_batch in WebhdfsConfig")
// Dear developer:
// If you're gonna add a new metadata version, you'll have to add a test for it.
// You could just copy an existing test file(e.g., `../tests/it/v024_table_meta.rs`)
Expand Down
1 change: 1 addition & 0 deletions src/meta/proto-conv/tests/it/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -114,3 +114,4 @@ mod v113_warehouse_grantobject;
mod v114_interval_datatype;
mod v115_add_udaf_script;
mod v116_marked_deleted_index_meta;
mod v117_webhdfs_add_disable_list_batch;
1 change: 1 addition & 0 deletions src/meta/proto-conv/tests/it/user_proto_conv.rs
Original file line number Diff line number Diff line change
Expand Up @@ -321,6 +321,7 @@ pub(crate) fn test_webhdfs_stage_info() -> mt::principal::StageInfo {
endpoint_url: "https://webhdfs.example.com".to_string(),
root: "/path/to/stage/files".to_string(),
delegation: "<delegation_token>".to_string(),
disable_list_batch: false,
}),
},
is_temporary: false,
Expand Down
1 change: 1 addition & 0 deletions src/meta/proto-conv/tests/it/user_stage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,7 @@ fn test_user_stage_webhdfs_v30() -> anyhow::Result<()> {
endpoint_url: "https://webhdfs.example.com".to_string(),
root: "/path/to/stage/files".to_string(),
delegation: "<delegation_token>".to_string(),
disable_list_batch: false,
}),
},
file_format_params: mt::principal::FileFormatParams::Json(
Expand Down
1 change: 1 addition & 0 deletions src/meta/proto-conv/tests/it/v030_user_stage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ fn test_decode_v30_user_stage() -> anyhow::Result<()> {
endpoint_url: "https://webhdfs.example.com".to_string(),
root: "/path/to/stage/files".to_string(),
delegation: "<delegation_token>".to_string(),
disable_list_batch: false,
}),
},
file_format_params: mt::principal::FileFormatParams::Json(
Expand Down
1 change: 1 addition & 0 deletions src/meta/proto-conv/tests/it/v031_copy_max_file.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ fn test_decode_v31_copy_max_file() -> anyhow::Result<()> {
endpoint_url: "https://webhdfs.example.com".to_string(),
root: "/path/to/stage/files".to_string(),
delegation: "<delegation_token>".to_string(),
disable_list_batch: false,
}),
},
file_format_params: mt::principal::FileFormatParams::Json(
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
// Copyright 2023 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

use chrono::DateTime;
use chrono::Utc;
use databend_common_meta_app as mt;
use databend_common_meta_app::principal::UserIdentity;
use databend_common_meta_app::storage::StorageParams;
use databend_common_meta_app::storage::StorageWebhdfsConfig;
use fastrace::func_name;

use crate::common;

// These bytes are built when a new version in introduced,
// and are kept for backward compatibility test.
//
// *************************************************************
// * These messages should never be updated, *
// * only be added when a new version is added, *
// * or be removed when an old version is no longer supported. *
// *************************************************************
//
#[test]
fn test_v117_webhdfs_add_disable_list_batch() -> anyhow::Result<()> {
// Encoded data of version 117 of databend_common_meta_app::principal::user_stage::StageInfo:
// It is generated with common::test_pb_from_to().
let stage_info_v117 = vec![
10, 22, 119, 101, 98, 104, 100, 102, 115, 58, 47, 47, 100, 105, 114, 47, 116, 111, 47, 102,
105, 108, 101, 115, 16, 1, 26, 12, 10, 10, 42, 8, 48, 1, 160, 6, 117, 168, 6, 24, 42, 11,
10, 2, 48, 2, 16, 142, 8, 24, 1, 56, 1, 50, 4, 116, 101, 115, 116, 56, 100, 66, 29, 10, 8,
100, 97, 116, 97, 98, 101, 110, 100, 18, 11, 100, 97, 116, 97, 98, 101, 110, 100, 46, 114,
115, 160, 6, 117, 168, 6, 24, 74, 10, 34, 8, 8, 2, 160, 6, 117, 168, 6, 24, 82, 23, 49, 57,
55, 48, 45, 48, 49, 45, 48, 49, 32, 48, 48, 58, 48, 48, 58, 48, 48, 32, 85, 84, 67, 160, 6,
117, 168, 6, 24,
];

let want = || mt::principal::StageInfo {
stage_name: "webhdfs://dir/to/files".to_string(),
stage_type: mt::principal::StageType::External,
stage_params: mt::principal::StageParams {
storage: StorageParams::Webhdfs(StorageWebhdfsConfig {
disable_list_batch: true,
..Default::default()
}),
},
is_temporary: false,
file_format_params: mt::principal::FileFormatParams::Json(
mt::principal::JsonFileFormatParams {
compression: mt::principal::StageFileCompression::Bz2,
},
),
copy_options: mt::principal::CopyOptions {
on_error: mt::principal::OnErrorMode::AbortNum(2),
size_limit: 1038,
max_files: 0,
split_size: 0,
purge: true,
single: false,
max_file_size: 0,
disable_variant_check: true,
return_failed_only: false,
detailed_output: false,
},
comment: "test".to_string(),
number_of_files: 100,
creator: Some(UserIdentity {
username: "databend".to_string(),
hostname: "databend.rs".to_string(),
}),
created_on: DateTime::<Utc>::default(),
};

common::test_pb_from_to(func_name!(), want())?;
common::test_load_old(func_name!(), stage_info_v117.as_slice(), 117, want())?;

Ok(())
}
2 changes: 2 additions & 0 deletions src/meta/protos/proto/config.proto
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,8 @@ message WebhdfsStorageConfig {

string username = 4; // reserved for future use
string password = 5; // reserved for future use

bool disable_list_batch = 6;
}

message ObsStorageConfig {
Expand Down
10 changes: 10 additions & 0 deletions src/query/config/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1190,6 +1190,14 @@ pub struct WebhdfsStorageConfig {
#[clap(long = "storage-webhdfs-root", value_name = "VALUE", default_value_t)]
#[serde(rename = "root")]
pub webhdfs_root: String,
/// Disable list batch if hdfs doesn't support yet.
#[clap(
long = "storage-webhdfs-disable-list-batch",
value_name = "VALUE",
default_value_t
)]
#[serde(rename = "disable_list_batch")]
pub webhdfs_disable_list_batch: bool,
}

impl Default for WebhdfsStorageConfig {
Expand All @@ -1214,6 +1222,7 @@ impl From<InnerStorageWebhdfsConfig> for WebhdfsStorageConfig {
webhdfs_delegation: v.delegation,
webhdfs_endpoint_url: v.endpoint_url,
webhdfs_root: v.root,
webhdfs_disable_list_batch: v.disable_list_batch,
}
}
}
Expand All @@ -1226,6 +1235,7 @@ impl TryFrom<WebhdfsStorageConfig> for InnerStorageWebhdfsConfig {
delegation: value.webhdfs_delegation,
endpoint_url: value.webhdfs_endpoint_url,
root: value.webhdfs_root,
disable_list_batch: value.webhdfs_disable_list_batch,
})
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -194,6 +194,7 @@ DB.Table: 'system'.'configs', Table: configs-table_id:1, ver:0, Engine: SystemCo
| 'storage' | 'storage_type' | 'null' | '' |
| 'storage' | 'type' | 'fs' | '' |
| 'storage' | 'webhdfs.delegation' | '' | '' |
| 'storage' | 'webhdfs.disable_list_batch' | 'false' | '' |
| 'storage' | 'webhdfs.endpoint_url' | '' | '' |
| 'storage' | 'webhdfs.root' | '' | '' |
+-----------+-------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+
Expand Down
15 changes: 15 additions & 0 deletions src/query/sql/src/planner/binder/location.rs
Original file line number Diff line number Diff line change
Expand Up @@ -397,11 +397,26 @@ fn parse_webhdfs_params(l: &mut UriLocation, root: String) -> Result<StoragePara
let endpoint_url = format!("{prefix}://{}", l.name);

let delegation = l.connection.get("delegation").cloned().unwrap_or_default();
let disable_list_batch = l
.connection
.get("disable_list_batch")
.map(|v| v.to_lowercase().parse::<bool>())
.unwrap_or(Ok(true))
.map_err(|e| {
Error::new(
ErrorKind::InvalidInput,
format!(
"disable_list_batch should be `TRUE` or `FALSE`, parse error with: {:?}",
e,
),
)
})?;

let sp = StorageParams::Webhdfs(StorageWebhdfsConfig {
endpoint_url,
root,
delegation,
disable_list_batch,
});

l.connection
Expand Down
13 changes: 9 additions & 4 deletions src/query/sql/tests/location.rs
Original file line number Diff line number Diff line change
Expand Up @@ -386,16 +386,21 @@ async fn test_parse_uri_location() -> Result<()> {
"webhdfs".to_string(),
"example.com".to_string(),
"/path/to/dir/".to_string(),
vec![("https", "TrUE"), ("delegation", "databendthebest")]
.into_iter()
.map(|(k, v)| (k.to_string(), v.to_string()))
.collect::<BTreeMap<_, _>>(),
vec![
("https", "TrUE"),
("delegation", "databendthebest"),
("disable_list_batch", "true"),
]
.into_iter()
.map(|(k, v)| (k.to_string(), v.to_string()))
.collect::<BTreeMap<_, _>>(),
),
(
StorageParams::Webhdfs(StorageWebhdfsConfig {
root: "/path/to/dir/".to_string(),
endpoint_url: "https://example.com".to_string(),
delegation: "databendthebest".to_string(),
disable_list_batch: true,
}),
"/".to_string(),
),
Expand Down
Loading