Skip to content

Commit ae449db

Browse files
refactor(headers2): Address code review feedback
- Consolidate redundant if/else blocks in compression state initialization (reduces 5 async lock acquisitions to 1) - Simplify block locator construction (both branches returned vec![hash]) - Change process_headers to take &[CompressedHeader] instead of Vec to avoid cloning headers during sync 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <[email protected]>
1 parent ae708c7 commit ae449db

File tree

2 files changed

+16
-44
lines changed

2 files changed

+16
-44
lines changed

dash-spv/src/sync/headers/manager.rs

Lines changed: 12 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -320,24 +320,7 @@ impl<S: StorageManager + Send + Sync + 'static, N: NetworkManager + Send + Sync
320320
base_hash: Option<BlockHash>,
321321
) -> SyncResult<()> {
322322
let block_locator = match base_hash {
323-
Some(hash) => {
324-
// When syncing from a checkpoint, we need to create a proper locator
325-
// that helps the peer understand we want headers AFTER this point
326-
if self.is_synced_from_checkpoint() {
327-
// For checkpoint sync, only include the checkpoint hash
328-
// Including genesis would allow peers to fall back to sending headers from genesis
329-
// if they don't recognize the checkpoint, which is exactly what we want to avoid
330-
tracing::debug!(
331-
"📍 Using checkpoint-only locator for height {}: [{}]",
332-
self.get_sync_base_height(),
333-
hash
334-
);
335-
vec![hash]
336-
} else {
337-
// Always include the hash in the locator - peer needs to know our position
338-
vec![hash]
339-
}
340-
}
323+
Some(hash) => vec![hash],
341324
None => {
342325
// Check if we're syncing from a checkpoint
343326
if self.is_synced_from_checkpoint()
@@ -461,33 +444,22 @@ impl<S: StorageManager + Send + Sync + 'static, N: NetworkManager + Send + Sync
461444
// Check if we need to initialize the compression state
462445
let state = self.headers2_state.get_state(peer_id);
463446
if state.prev_header.is_none() {
464-
// If we're syncing from genesis (height 0), initialize with genesis header
465-
if self.chain_state.read().await.tip_height() == 0 {
466-
// We have genesis header at index 0
467-
if let Some(genesis_header) = self.chain_state.read().await.header_at_height(0)
468-
{
469-
tracing::info!(
470-
"Initializing headers2 compression state for peer {} with genesis header",
471-
peer_id
472-
);
473-
self.headers2_state.init_peer_state(peer_id, *genesis_header);
474-
}
475-
} else if self.chain_state.read().await.tip_height() > 0 {
476-
// Get our current tip to use as the base for compression
477-
if let Some(tip_header) = self.chain_state.read().await.get_tip_header() {
478-
tracing::info!(
479-
"Initializing headers2 compression state for peer {} with tip header at height {}",
480-
peer_id,
481-
self.chain_state.read().await.tip_height()
482-
);
483-
self.headers2_state.init_peer_state(peer_id, tip_header);
484-
}
447+
// Initialize with header at current tip height (works for both genesis and later)
448+
let chain_state = self.chain_state.read().await;
449+
let tip_height = chain_state.tip_height();
450+
if let Some(tip_header) = chain_state.header_at_height(tip_height) {
451+
tracing::info!(
452+
"Initializing headers2 compression state for peer {} with header at height {}",
453+
peer_id,
454+
tip_height
455+
);
456+
self.headers2_state.init_peer_state(peer_id, *tip_header);
485457
}
486458
}
487459
}
488460

489461
// Decompress headers using the peer's compression state
490-
let headers = match self.headers2_state.process_headers(peer_id, headers2.headers.clone()) {
462+
let headers = match self.headers2_state.process_headers(peer_id, &headers2.headers) {
491463
Ok(headers) => headers,
492464
Err(e) => {
493465
tracing::error!(

dash-spv/src/sync/headers2/manager.rs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ impl Headers2StateManager {
9696
pub fn process_headers(
9797
&mut self,
9898
peer_id: PeerId,
99-
headers: Vec<CompressedHeader>,
99+
headers: &[CompressedHeader],
100100
) -> Result<Vec<Header>, ProcessError> {
101101
if headers.is_empty() {
102102
return Ok(Vec::new());
@@ -116,7 +116,7 @@ impl Headers2StateManager {
116116
let mut decompressed = Vec::with_capacity(headers.len());
117117

118118
// Process headers and collect statistics
119-
for (i, compressed) in headers.into_iter().enumerate() {
119+
for (i, compressed) in headers.iter().enumerate() {
120120
// Update statistics
121121
self.total_headers_received += 1;
122122
self.total_bytes_received += compressed.encoded_size() as u64;
@@ -230,7 +230,7 @@ mod tests {
230230
let compressed2 = compress_state.compress(&header2);
231231

232232
// Process headers
233-
let result = manager.process_headers(peer_id, vec![compressed1, compressed2]);
233+
let result = manager.process_headers(peer_id, &[compressed1, compressed2]);
234234
assert!(result.is_ok());
235235

236236
let decompressed = result.expect("decompression should succeed in test");
@@ -261,7 +261,7 @@ mod tests {
261261

262262
// Try to process it as first header - should fail with DecompressionError
263263
// because the peer doesn't have the previous header state
264-
let result = manager.process_headers(peer_id, vec![compressed]);
264+
let result = manager.process_headers(peer_id, &[compressed]);
265265
assert!(matches!(result, Err(ProcessError::DecompressionError(0, _))));
266266
}
267267

0 commit comments

Comments
 (0)