@@ -371,10 +371,11 @@ CallTraceStorage::CallTraceStorage() : _active_storage(nullptr), _standby_storag
371371
372372CallTraceStorage::~CallTraceStorage () {
373373 // Atomically invalidate storage pointers to prevent new put() operations
374- // Relaxed ordering is sufficient since no concurrent readers exist during destruction
375- CallTraceHashTable* active = const_cast <CallTraceHashTable*>(__atomic_exchange_n (&_active_storage, nullptr , __ATOMIC_RELAXED));
376- CallTraceHashTable* standby = const_cast <CallTraceHashTable*>(__atomic_exchange_n (&_standby_storage, nullptr , __ATOMIC_RELAXED));
377- CallTraceHashTable* scratch = const_cast <CallTraceHashTable*>(__atomic_exchange_n (&_scratch_storage, nullptr , __ATOMIC_RELAXED));
374+ // ACQ_REL ordering: RELEASE ensures nullptr is visible to put()'s ACQUIRE load,
375+ // ACQUIRE ensures we see the latest pointer value for subsequent deletion
376+ CallTraceHashTable* active = const_cast <CallTraceHashTable*>(__atomic_exchange_n (&_active_storage, nullptr , __ATOMIC_ACQ_REL));
377+ CallTraceHashTable* standby = const_cast <CallTraceHashTable*>(__atomic_exchange_n (&_standby_storage, nullptr , __ATOMIC_ACQ_REL));
378+ CallTraceHashTable* scratch = const_cast <CallTraceHashTable*>(__atomic_exchange_n (&_scratch_storage, nullptr , __ATOMIC_ACQ_REL));
378379
379380 // Wait for any ongoing hazard pointer usage to complete and delete each unique table
380381 // Note: In triple-buffering, all three pointers should be unique, but check anyway
@@ -472,12 +473,13 @@ void CallTraceStorage::processTraces(std::function<void(const std::unordered_set
472473 }
473474 }
474475
475- // PHASE 2: 8 -Step Triple-Buffer Rotation
476+ // PHASE 2: 10 -Step Triple-Buffer Rotation
476477
477- // Load storage pointers - relaxed ordering sufficient in single-threaded processTraces context
478- CallTraceHashTable* original_active = const_cast <CallTraceHashTable*>(__atomic_load_n (&_active_storage, __ATOMIC_RELAXED));
479- CallTraceHashTable* original_standby = const_cast <CallTraceHashTable*>(__atomic_load_n (&_standby_storage, __ATOMIC_RELAXED));
480- CallTraceHashTable* original_scratch = const_cast <CallTraceHashTable*>(__atomic_load_n (&_scratch_storage, __ATOMIC_RELAXED));
478+ // Load storage pointers - ACQUIRE ordering synchronizes with RELEASE stores from
479+ // previous processTraces() calls and constructor initialization
480+ CallTraceHashTable* original_active = const_cast <CallTraceHashTable*>(__atomic_load_n (&_active_storage, __ATOMIC_ACQUIRE));
481+ CallTraceHashTable* original_standby = const_cast <CallTraceHashTable*>(__atomic_load_n (&_standby_storage, __ATOMIC_ACQUIRE));
482+ CallTraceHashTable* original_scratch = const_cast <CallTraceHashTable*>(__atomic_load_n (&_scratch_storage, __ATOMIC_ACQUIRE));
481483
482484 // Clear process collection for reuse (no malloc/free)
483485 _traces_buffer.clear ();
@@ -492,8 +494,11 @@ void CallTraceStorage::processTraces(std::function<void(const std::unordered_set
492494 }
493495 });
494496
495- // Step 3: Clear original_standby after collection -> it will become the new active
496- original_standby->clear ();
497+ // Step 3: Clear standby table structure but DEFER memory deallocation
498+ // The standby traces are now in _traces_buffer as raw pointers.
499+ // We must keep the underlying memory alive until processor() finishes.
500+ // clearTableOnly() resets the table for reuse but returns the chunks for later freeing.
501+ ChunkList standby_chunks = original_standby->clearTableOnly ();
497502
498503 {
499504 // Critical section for table swap operations - disallow signals to interrupt
@@ -531,7 +536,11 @@ void CallTraceStorage::processTraces(std::function<void(const std::unordered_set
531536
532537 processor (_traces_buffer);
533538
534- // Step 9: Clear the original active area (now scratch)
539+ // Step 9: NOW safe to free standby chunks - processor is done accessing those traces
540+ // This completes the deferred deallocation that prevents use-after-free
541+ LinearAllocator::freeChunks (standby_chunks);
542+
543+ // Step 10: Clear the original active area (now scratch)
535544 original_active->clear ();
536545
537546 // Triple-buffer rotation maintains trace continuity with thread-safe malloc-free operations:
@@ -546,9 +555,10 @@ void CallTraceStorage::clear() {
546555 // Mark critical section during clear operation for consistency
547556 CriticalSection cs;
548557
549- // Load current table pointers - relaxed ordering sufficient within critical section
550- CallTraceHashTable* active = const_cast <CallTraceHashTable*>(__atomic_load_n (&_active_storage, __ATOMIC_RELAXED));
551- CallTraceHashTable* standby = const_cast <CallTraceHashTable*>(__atomic_load_n (&_standby_storage, __ATOMIC_RELAXED));
558+ // Load current table pointers - ACQUIRE ordering synchronizes with RELEASE stores
559+ // from processTraces() rotation and constructor initialization
560+ CallTraceHashTable* active = const_cast <CallTraceHashTable*>(__atomic_load_n (&_active_storage, __ATOMIC_ACQUIRE));
561+ CallTraceHashTable* standby = const_cast <CallTraceHashTable*>(__atomic_load_n (&_standby_storage, __ATOMIC_ACQUIRE));
552562
553563 // Direct clear operations with critical section protection
554564 if (active) {
0 commit comments