1717#include " linearAllocator.h"
1818#include " counters.h"
1919#include " os_dd.h"
20+ #include " common.h"
21+ #include < stdio.h>
22+
23+ // Enable ASan memory poisoning for better use-after-free detection
24+ #ifdef __has_feature
25+ #if __has_feature(address_sanitizer)
26+ #define ASAN_ENABLED 1
27+ #endif
28+ #endif
29+
30+ #ifdef __SANITIZE_ADDRESS__
31+ #define ASAN_ENABLED 1
32+ #endif
33+
34+ #ifdef ASAN_ENABLED
35+ #include < sanitizer/asan_interface.h>
36+ #endif
2037
2138LinearAllocator::LinearAllocator (size_t chunk_size) {
2239 _chunk_size = chunk_size;
@@ -32,13 +49,85 @@ void LinearAllocator::clear() {
3249 if (_reserve->prev == _tail) {
3350 freeChunk (_reserve);
3451 }
52+
53+ // ASAN POISONING: Mark all allocated memory as poisoned BEFORE freeing chunks
54+ // This catches use-after-free even when memory isn't munmap'd (kept in _tail)
55+ #ifdef ASAN_ENABLED
56+ int chunk_count = 0 ;
57+ size_t total_poisoned = 0 ;
58+ for (Chunk *chunk = _tail; chunk != NULL ; chunk = chunk->prev ) {
59+ // Poison from the start of usable data to the current offset
60+ size_t used_size = chunk->offs - sizeof (Chunk);
61+ if (used_size > 0 ) {
62+ void * data_start = (char *)chunk + sizeof (Chunk);
63+ ASAN_POISON_MEMORY_REGION (data_start, used_size);
64+ chunk_count++;
65+ total_poisoned += used_size;
66+ }
67+ }
68+ if (chunk_count > 0 ) {
69+ TEST_LOG (" [LinearAllocator::clear] ASan poisoned %d chunks, %zu bytes total" , chunk_count, total_poisoned);
70+ }
71+ #endif
72+
3573 while (_tail->prev != NULL ) {
3674 Chunk *current = _tail;
3775 _tail = _tail->prev ;
3876 freeChunk (current);
3977 }
4078 _reserve = _tail;
4179 _tail->offs = sizeof (Chunk);
80+
81+ // DON'T UNPOISON HERE - let alloc() do it on-demand!
82+ // This ensures ASan can catch use-after-free bugs when code accesses
83+ // memory that was cleared but not yet reallocated.
84+ }
85+
86+ ChunkList LinearAllocator::detachChunks () {
87+ // Capture current state before detaching
88+ ChunkList result (_tail, _chunk_size);
89+
90+ // Handle reserve chunk: if it's ahead of tail, it needs special handling
91+ if (_reserve->prev == _tail) {
92+ // Reserve is a separate chunk ahead of tail - it becomes part of detached list
93+ // We need to include it in the chain by making it the new head
94+ result.head = _reserve;
95+ }
96+
97+ // Allocate a fresh chunk for new allocations
98+ Chunk* fresh = allocateChunk (NULL );
99+ if (fresh != NULL ) {
100+ _tail = fresh;
101+ _reserve = fresh;
102+ } else {
103+ // Allocation failed - restore original state and return empty list
104+ // This maintains the invariant that the allocator is always usable
105+ _reserve = _tail;
106+ _tail->offs = sizeof (Chunk);
107+ return ChunkList ();
108+ }
109+
110+ return result;
111+ }
112+
113+ void LinearAllocator::freeChunks (ChunkList& chunks) {
114+ if (chunks.head == nullptr || chunks.chunk_size == 0 ) {
115+ return ;
116+ }
117+
118+ // Walk the chain and free each chunk
119+ Chunk* current = chunks.head ;
120+ while (current != nullptr ) {
121+ Chunk* prev = current->prev ;
122+ OS::safeFree (current, chunks.chunk_size );
123+ Counters::decrement (LINEAR_ALLOCATOR_BYTES, chunks.chunk_size );
124+ Counters::decrement (LINEAR_ALLOCATOR_CHUNKS);
125+ current = prev;
126+ }
127+
128+ // Mark as freed to prevent double-free
129+ chunks.head = nullptr ;
130+ chunks.chunk_size = 0 ;
42131}
43132
44133void *LinearAllocator::alloc (size_t size) {
@@ -49,11 +138,20 @@ void *LinearAllocator::alloc(size_t size) {
49138 offs + size <= _chunk_size;
50139 offs = __atomic_load_n (&chunk->offs , __ATOMIC_ACQUIRE)) {
51140 if (__sync_bool_compare_and_swap (&chunk->offs , offs, offs + size)) {
141+ void * allocated_ptr = (char *)chunk + offs;
142+
143+ // ASAN UNPOISONING: Unpoison ONLY the allocated region on-demand
144+ // This allows ASan to detect use-after-free of memory that was cleared
145+ // but not yet reallocated
146+ #ifdef ASAN_ENABLED
147+ ASAN_UNPOISON_MEMORY_REGION (allocated_ptr, size);
148+ #endif
149+
52150 if (_chunk_size / 2 - offs < size) {
53151 // Stepped over a middle of the chunk - it's time to prepare a new one
54152 reserveChunk (chunk);
55153 }
56- return ( char *)chunk + offs ;
154+ return allocated_ptr ;
57155 }
58156 }
59157 } while ((chunk = getNextChunk (chunk)) != NULL );
@@ -66,6 +164,16 @@ Chunk *LinearAllocator::allocateChunk(Chunk *current) {
66164 if (chunk != NULL ) {
67165 chunk->prev = current;
68166 chunk->offs = sizeof (Chunk);
167+
168+ // ASAN UNPOISONING: New chunks from mmap are clean, unpoison them for use
169+ // mmap returns memory that ASan may track as unallocated, so we need to
170+ // explicitly unpoison it to allow allocations
171+ #ifdef ASAN_ENABLED
172+ size_t usable_size = _chunk_size - sizeof (Chunk);
173+ void * data_start = (char *)chunk + sizeof (Chunk);
174+ ASAN_UNPOISON_MEMORY_REGION (data_start, usable_size);
175+ #endif
176+
69177 Counters::increment (LINEAR_ALLOCATOR_BYTES, _chunk_size);
70178 Counters::increment (LINEAR_ALLOCATOR_CHUNKS);
71179 }
0 commit comments