Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -33,4 +33,9 @@ fluss = { version = "0.1.0", path = "./crates/fluss" }
tokio = { version = "1.44.2", features = ["full"] }
clap = { version = "4.5.37", features = ["derive"] }
arrow = { version = "57.0.0", features = ["ipc_compression"] }
chrono = { version = "0.4", features = ["clock", "std", "wasmbind"] }

serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
opendal = "0.53"
jiff = { version = "0.2" }
2 changes: 1 addition & 1 deletion crates/examples/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ version = { workspace = true }
[dependencies]
fluss = { workspace = true }
tokio = { workspace = true }
clap = { workspace = true}
clap = { workspace = true }
[[example]]
name = "example-table"
path = "src/example_table.rs"
11 changes: 6 additions & 5 deletions crates/fluss/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,12 @@ version = { workspace = true }
name = "fluss"

[features]
default = ["storage-memory", "storage-fs"]
storage-all = ["storage-memory", "storage-fs"]
default = ["storage-memory", "storage-fs", "storage-s3"]
storage-all = ["storage-memory", "storage-fs", "storage-s3"]

storage-memory = ["opendal/services-memory"]
storage-fs = ["opendal/services-fs"]
storage-s3 = ["opendal/services-s3"]
integration_tests = []

[dependencies]
Expand All @@ -39,9 +40,9 @@ crc32c = "0.6.8"
linked-hash-map = "0.5.6"
prost = "0.14"
rand = "0.9.1"
serde = { version = "1.0.219", features = ["derive", "rc"] }
serde_json = "1.0.140"
thiserror = "2"
serde = { workspace = true, features = ["rc"] }
serde_json = { workspace = true }
thiserror = "1.0"
log = { version = "0.4", features = ["kv_std"] }
tokio = { workspace = true }
parking_lot = "0.12"
Expand Down
161 changes: 161 additions & 0 deletions crates/fluss/src/client/credentials.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,161 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.

use crate::client::metadata::Metadata;
use crate::error::{Error, Result};
use crate::rpc::RpcClient;
use crate::rpc::message::GetSecurityTokenRequest;
use parking_lot::RwLock;
use serde::Deserialize;
use std::collections::HashMap;
use std::sync::Arc;
use std::time::{Duration, Instant};

const CACHE_TTL: Duration = Duration::from_secs(3600);

#[derive(Debug, Deserialize)]
struct Credentials {
access_key_id: String,
access_key_secret: String,
security_token: Option<String>,
}

struct CachedToken {
access_key_id: String,
secret_access_key: String,
security_token: Option<String>,
addition_infos: HashMap<String, String>,
cached_at: Instant,
}

impl CachedToken {
fn to_remote_fs_props(&self) -> HashMap<String, String> {
let mut props = HashMap::new();

props.insert("access_key_id".to_string(), self.access_key_id.clone());
props.insert(
"secret_access_key".to_string(),
self.secret_access_key.clone(),
);

if let Some(token) = &self.security_token {
props.insert("security_token".to_string(), token.clone());
}

for (key, value) in &self.addition_infos {
if let Some((opendal_key, transform)) = convert_hadoop_key_to_opendal(key) {
let final_value = if transform {
// Invert boolean value (path_style_access -> enable_virtual_host_style)
if value == "true" {
"false".to_string()
} else {
"true".to_string()
}
} else {
value.clone()
};
props.insert(opendal_key, final_value);
}
}

props
}
}

/// Returns (opendal_key, needs_inversion)
/// needs_inversion is true for path_style_access -> enable_virtual_host_style conversion
fn convert_hadoop_key_to_opendal(hadoop_key: &str) -> Option<(String, bool)> {
match hadoop_key {
"fs.s3a.endpoint" => Some(("endpoint".to_string(), false)),
"fs.s3a.endpoint.region" => Some(("region".to_string(), false)),
"fs.s3a.path.style.access" => Some(("enable_virtual_host_style".to_string(), true)),
"fs.s3a.connection.ssl.enabled" => None,
_ => None,
}
}

pub struct CredentialsCache {
inner: RwLock<Option<CachedToken>>,
}

impl CredentialsCache {
pub fn new() -> Self {
Self {
inner: RwLock::new(None),
}
}

pub async fn get_or_refresh(
&self,
rpc_client: &Arc<RpcClient>,
metadata: &Arc<Metadata>,
) -> Result<HashMap<String, String>> {
{
let guard = self.inner.read();
if let Some(cached) = guard.as_ref() {
if cached.cached_at.elapsed() < CACHE_TTL {
return Ok(cached.to_remote_fs_props());
}
}
}

self.refresh_from_server(rpc_client, metadata).await
}

async fn refresh_from_server(
&self,
rpc_client: &Arc<RpcClient>,
metadata: &Arc<Metadata>,
) -> Result<HashMap<String, String>> {
let cluster = metadata.get_cluster();
let server_node = cluster
.get_coordinator_server()
.or_else(|| Some(cluster.get_one_available_server()))
.expect("no available server to fetch security token");
Comment on lines +125 to +128
Copy link

Copilot AI Dec 14, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Using .expect() with a panic message bypasses proper error handling. If no server is available, this will cause a panic instead of returning a proper error. Consider using .ok_or_else() to return an appropriate Error type instead, allowing the caller to handle this failure gracefully.

Copilot uses AI. Check for mistakes.
let conn = rpc_client.get_connection(server_node).await?;

let request = GetSecurityTokenRequest::new();
let response = conn.request(request).await?;

let credentials: Credentials = serde_json::from_slice(&response.token)
.map_err(|e| Error::JsonSerdeError(e.to_string()))?;

let mut addition_infos = HashMap::new();
for kv in &response.addition_info {
addition_infos.insert(kv.key.clone(), kv.value.clone());
}

let cached = CachedToken {
access_key_id: credentials.access_key_id,
secret_access_key: credentials.access_key_secret,
security_token: credentials.security_token,
addition_infos,
cached_at: Instant::now(),
};

let props = cached.to_remote_fs_props();
*self.inner.write() = Some(cached);

Ok(props)
}
}

impl Default for CredentialsCache {
fn default() -> Self {
Self::new()
}
}
2 changes: 2 additions & 0 deletions crates/fluss/src/client/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,14 @@

mod admin;
mod connection;
mod credentials;
mod metadata;
mod table;
mod write;

pub use admin::*;
pub use connection::*;
pub use credentials::*;
pub use metadata::*;
pub use table::*;
pub use write::*;
73 changes: 64 additions & 9 deletions crates/fluss/src/client/table/remote_log.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ use crate::metadata::TableBucket;
use crate::proto::{PbRemoteLogFetchInfo, PbRemoteLogSegment};
use crate::record::{LogRecordsBatchs, ReadContext, ScanRecord};
use crate::util::delete_file;
use parking_lot::RwLock;
use std::collections::HashMap;
use std::io;
use std::path::{Path, PathBuf};
Expand Down Expand Up @@ -115,11 +116,19 @@ impl RemoteLogDownloadFuture {
/// Downloader for remote log segment files
pub struct RemoteLogDownloader {
local_log_dir: TempDir,
remote_fs_props: RwLock<HashMap<String, String>>,
}

impl RemoteLogDownloader {
pub fn new(local_log_dir: TempDir) -> Result<Self> {
Ok(Self { local_log_dir })
Ok(Self {
local_log_dir,
remote_fs_props: RwLock::new(HashMap::new()),
})
}

pub fn set_remote_fs_props(&self, props: HashMap<String, String>) {
*self.remote_fs_props.write() = props;
}

/// Request to fetch a remote log segment to local. This method is non-blocking.
Expand All @@ -133,10 +142,16 @@ impl RemoteLogDownloader {
let local_file_path = self.local_log_dir.path().join(&local_file_name);
let remote_path = self.build_remote_path(remote_log_tablet_dir, segment);
let remote_log_tablet_dir = remote_log_tablet_dir.to_string();
let remote_fs_props = self.remote_fs_props.read().clone();
// Spawn async download task
tokio::spawn(async move {
let result =
Self::download_file(&remote_log_tablet_dir, &remote_path, &local_file_path).await;
let result = Self::download_file(
&remote_log_tablet_dir,
&remote_path,
&local_file_path,
&remote_fs_props,
)
.await;
let _ = sender.send(result);
});
Ok(RemoteLogDownloadFuture::new(receiver))
Expand All @@ -157,6 +172,7 @@ impl RemoteLogDownloader {
remote_log_tablet_dir: &str,
remote_path: &str,
local_path: &Path,
remote_fs_props: &HashMap<String, String>,
) -> Result<PathBuf> {
// Handle both URL (e.g., "s3://bucket/path") and local file paths
// If the path doesn't contain "://", treat it as a local file path
Expand All @@ -169,11 +185,27 @@ impl RemoteLogDownloader {
// Create FileIO from the remote log tablet dir URL to get the storage
let file_io_builder = FileIO::from_url(&remote_log_tablet_dir_url)?;

// For S3/S3A URLs, inject S3 credentials from props
let file_io_builder = if remote_log_tablet_dir.starts_with("s3://")
|| remote_log_tablet_dir.starts_with("s3a://")
{
file_io_builder.with_props(
remote_fs_props
.iter()
.map(|(k, v)| (k.as_str(), v.as_str())),
)
} else {
file_io_builder
};

// Build storage and create operator directly
let storage = Storage::build(file_io_builder)?;
let (op, relative_path) = storage.create(remote_path)?;

// Get file metadata to know the size
// Timeout for remote storage operations (30 seconds)
const REMOTE_OP_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(30);
Copy link

Copilot AI Dec 14, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The hardcoded timeout constant is defined locally in the function instead of at the module level or as a configurable constant. Consider moving this to a module-level constant for better maintainability and consistency with the pattern used in storage_s3.rs (lines 32-33) where timeouts are defined at the module level.

Suggested change
const REMOTE_OP_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(30);

Copilot uses AI. Check for mistakes.

// Get file metadata to know the size with timeout
let meta = op.stat(relative_path).await?;
let file_size = meta.content_length();

Expand All @@ -184,13 +216,36 @@ impl RemoteLogDownloader {
// opendal::Reader::read accepts a range, so we read in chunks
const CHUNK_SIZE: u64 = 8 * 1024 * 1024; // 8MB chunks for efficient reading
let mut offset = 0u64;
let mut chunk_count = 0u64;
let total_chunks = file_size.div_ceil(CHUNK_SIZE);

while offset < file_size {
let end = std::cmp::min(offset + CHUNK_SIZE, file_size);
let range = offset..end;

// Read chunk from remote storage
let chunk = op.read_with(relative_path).range(range.clone()).await?;
chunk_count += 1;

if chunk_count <= 3 || chunk_count % 10 == 0 {
log::debug!(
"Remote log download: reading chunk {}/{} (offset {})",
chunk_count,
total_chunks,
offset
);
}

// Read chunk from remote storage with timeout
let read_future = op.read_with(relative_path).range(range.clone());
let chunk = tokio::time::timeout(REMOTE_OP_TIMEOUT, read_future)
.await
.map_err(|_| {
Error::Io(io::Error::new(
io::ErrorKind::TimedOut,
format!(
"Timeout reading chunk from remote storage: {} at offset {}",
remote_path, offset
),
))
})??;
let bytes = chunk.to_bytes();

// Write chunk to local file
Expand Down Expand Up @@ -254,10 +309,10 @@ impl RemotePendingFetch {
// delete the downloaded local file to free disk
delete_file(file_path).await;

// Parse log records
// Parse log records (remote log contains full data, need client-side projection)
let mut fetch_records = vec![];
for log_record in &mut LogRecordsBatchs::new(data) {
fetch_records.extend(log_record.records(&self.read_context)?);
fetch_records.extend(log_record.records_for_remote_log(&self.read_context)?);
}

let mut result = HashMap::new();
Expand Down
9 changes: 9 additions & 0 deletions crates/fluss/src/client/table/scanner.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
// under the License.

use crate::client::connection::FlussConnection;
use crate::client::credentials::CredentialsCache;
use crate::client::metadata::Metadata;
use crate::error::{Error, Result};
use crate::metadata::{TableBucket, TableInfo, TablePath};
Expand Down Expand Up @@ -194,6 +195,7 @@ struct LogFetcher {
log_scanner_status: Arc<LogScannerStatus>,
read_context: ReadContext,
remote_log_downloader: RemoteLogDownloader,
credentials_cache: CredentialsCache,
}

impl LogFetcher {
Expand All @@ -217,6 +219,7 @@ impl LogFetcher {
log_scanner_status,
read_context,
remote_log_downloader: RemoteLogDownloader::new(tmp_dir)?,
credentials_cache: CredentialsCache::new(),
})
}

Expand Down Expand Up @@ -256,6 +259,12 @@ impl LogFetcher {
if let Some(ref remote_log_fetch_info) =
fetch_log_for_bucket.remote_log_fetch_info
{
let remote_fs_props = self
.credentials_cache
.get_or_refresh(&self.conns, &self.metadata)
.await?;
self.remote_log_downloader
.set_remote_fs_props(remote_fs_props);
let remote_fetch_info = RemoteLogFetchInfo::from_proto(
remote_log_fetch_info,
table_bucket.clone(),
Expand Down
Loading
Loading