diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..fb48e8b --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ +*.o +*.su +.vscode/ +kasan_test.lds \ No newline at end of file diff --git a/Makefile b/Makefile index 48aea9a..a98e6d7 100644 --- a/Makefile +++ b/Makefile @@ -35,6 +35,7 @@ CFLAGS += -DPRINTF_DISABLE_SUPPORT_FLOAT CFLAGS += -DPRINTF_DISABLE_SUPPORT_EXPONENTIAL CFLAGS += -DPRINTF_DISABLE_SUPPORT_PTRDIFF_T CFLAGS += -DPRINTF_DISABLE_SUPPORT_LONG_LONG +CFLAGS += -DDLMALLOC_ENABLED CFLAGS += -Wno-incompatible-library-redeclaration @@ -54,8 +55,10 @@ KASAN_CC_FLAGS += -mllvm -asan-stack=$(KASAN_SANITIZE_STACK) KASAN_CC_FLAGS += -mllvm -asan-globals=$(KASAN_SANITIZE_GLOBALS) KASAN_CC_FLAGS += -DKASAN_ENABLED -SRCS := kasan.c \ - heap.c \ +SRCS := heap.c \ + kasan_common.c \ + kasan_simple_malloc.c \ + kasan_dlmalloc.c \ kasan_test.c \ sanitized_lib.c \ rt_utils.c \ diff --git a/README.md b/README.md index 567d872..28685e1 100644 --- a/README.md +++ b/README.md @@ -26,14 +26,13 @@ sudo apt-get install build-essential gcc-multilib llvm clang lld \ qemu-system-arm qemu-system-misc qemu-system-x86 ``` -## Project layout +## Project layout (with dlmalloc) The project constists of the following components: * `kasan_test.c` -- main test driver which runs KASan test cases * `sanitized_lib.c` -- this module implements the test cases and is built with the KASan instrumentation -* `kasan.c` -- implementation of runtime routines needed for KASan sanitizer * `heap.c` -- simple implementation of heap management routines for testing KASan * `third_party/printf.c` -- a compact implementation of `printf` function @@ -48,6 +47,11 @@ The project constists of the following components: * `Makefile.arch` -- Makefile fragments whith architecture-specific parameters for building and running the project in the emulator +Files for `malloc`: +* `kasan_common.c` -- implementation of common runtime routines needed for KASan sanitizer +* `kasan_simple_malloc.c` -- implementation of KASan runtime routines for simple malloc +* `kasan_common.c` -- implementation of KASan runtime routines for dlmalloc + ## Running diff --git a/heap.c b/heap.c index 8e6c67f..44ab3a9 100644 --- a/heap.c +++ b/heap.c @@ -10,34 +10,46 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ +#pragma #include "common.h" -#include "kasan.h" +#include "kasan_common.h" +#include "kasan_simple_malloc.h" +#include "kasan_dlmalloc.h" +#include "heap.h" +#include "printf.h" // These symbols are defined in the linker script. extern char __heap_start; extern char __heap_end; -static void *heap_head; -static size_t heap_size; +size_t mem_malloc_size = 0; +unsigned long mem_malloc_start = 0; +unsigned long mem_malloc_end = 0; +unsigned long mem_malloc_brk = 0; void initialize_heap(void) { - heap_head = (void *)&__heap_start; - heap_size = (void *)&__heap_end - (void *)&__heap_start; -} - -void *allocate_chunk(unsigned long size) { - void *result = heap_head; - if (size > heap_size) return NULL; - - size = (size + 7) & (~7UL); - heap_head += size; - heap_size -= size; - return result; + mem_malloc_start = (unsigned long)&__heap_start; + mem_malloc_end = (unsigned long)&__heap_end; + mem_malloc_size = mem_malloc_end - mem_malloc_start; + mem_malloc_brk = mem_malloc_start; + initialize_kasan(); } void free_chunk(void *ptr) { (void)ptr; } -void *malloc(unsigned long size) { return kasan_malloc_hook(size); } +void *malloc(unsigned long size) { + #ifdef DLMALLOC_ENABLED + return kasan_dlmalloc_hook(size); + #else + return kasan_malloc_hook(size); + #endif // DLMALLOC_ENABLED +} -void free(void *ptr) { return kasan_free_hook(ptr); } \ No newline at end of file +void free(void *ptr) { + #ifdef DLMALLOC_ENABLED + return kasan_dlfree_hook(ptr); + #else + return kasan_free_hook(ptr); + #endif // DLMALLOC_ENABLED +} \ No newline at end of file diff --git a/heap.h b/heap.h index 78ef518..fdf3ee3 100644 --- a/heap.h +++ b/heap.h @@ -10,15 +10,18 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ +#include "common.h" #ifndef __KASAN_HEAP_H__ #define __KASAN_HEAP_H__ -void initialize_heap(void); +extern size_t mem_malloc_size; +extern unsigned long mem_malloc_start; +extern unsigned long mem_malloc_end; +extern unsigned long mem_malloc_brk; -void *allocate_chunk(unsigned long size); +void initialize_heap(void); void free_chunk(void *ptr); - void *malloc(unsigned long size); void free(void *ptr); diff --git a/kasan.h b/kasan.h deleted file mode 100644 index 02f07b7..0000000 --- a/kasan.h +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Copyright 2024 Google LLC - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -void initialize_kasan(void); - -void *kasan_malloc_hook(unsigned long size); - -void kasan_free_hook(void *ptr); \ No newline at end of file diff --git a/kasan.c b/kasan_common.c similarity index 79% rename from kasan.c rename to kasan_common.c index dd4fa95..999a0c5 100644 --- a/kasan.c +++ b/kasan_common.c @@ -12,27 +12,9 @@ */ #include "common.h" -#include "heap.h" +// #include "heap.h" #include "printf.h" - -#define KASAN_SHADOW_SHIFT 3 -#define KASAN_SHADOW_GRANULE_SIZE (1UL << KASAN_SHADOW_SHIFT) -#define KASAN_SHADOW_MASK (KASAN_SHADOW_GRANULE_SIZE - 1) - -#define ASAN_SHADOW_UNPOISONED_MAGIC 0x00 -#define ASAN_SHADOW_RESERVED_MAGIC 0xff -#define ASAN_SHADOW_GLOBAL_REDZONE_MAGIC 0xf9 -#define ASAN_SHADOW_HEAP_HEAD_REDZONE_MAGIC 0xfa -#define ASAN_SHADOW_HEAP_TAIL_REDZONE_MAGIC 0xfb -#define ASAN_SHADOW_HEAP_FREE_MAGIC 0xfd - -#define KASAN_HEAP_HEAD_REDZONE_SIZE 0x20 -#define KASAN_HEAP_TAIL_REDZONE_SIZE 0x20 - -#define KASAN_MEM_TO_SHADOW(addr) \ - (((addr) >> KASAN_SHADOW_SHIFT) + KASAN_SHADOW_MAPPING_OFFSET) -#define KASAN_SHADOW_TO_MEM(shadow) \ - (((shadow) - KASAN_SHADOW_MAPPING_OFFSET) << KASAN_SHADOW_SHIFT) +#include "kasan_common.h" void kasan_bug_report(unsigned long addr, size_t size, unsigned long buggy_shadow_address, uint8_t is_write, @@ -75,7 +57,7 @@ static inline unsigned long get_poisoned_shadow_address(unsigned long addr, } // Both `address` and `size` must be 8-byte aligned. -static void poison_shadow(unsigned long address, size_t size, uint8_t value) { +void poison_shadow(unsigned long address, size_t size, uint8_t value) { unsigned long shadow_start, shadow_end; size_t shadow_length = 0; @@ -87,7 +69,7 @@ static void poison_shadow(unsigned long address, size_t size, uint8_t value) { } // `address` must be 8-byte aligned -static void unpoison_shadow(unsigned long address, size_t size) { +void unpoison_shadow(unsigned long address, size_t size) { poison_shadow(address, size & (~KASAN_SHADOW_MASK), ASAN_SHADOW_UNPOISONED_MAGIC); @@ -165,50 +147,6 @@ void *__kasan_memset(void *buf, int c, unsigned int size, unsigned long pc) { return memset(buf, c, size); } -// Implement KASan heap management hooks. - -struct KASAN_HEAP_HEADER { - unsigned int aligned_size; -}; - -void *kasan_malloc_hook(unsigned int size) { - struct KASAN_HEAP_HEADER *kasan_heap_hdr = NULL; - unsigned int algined_size = (size + KASAN_SHADOW_MASK) & (~KASAN_SHADOW_MASK); - unsigned int total_size = algined_size + KASAN_HEAP_HEAD_REDZONE_SIZE + - KASAN_HEAP_TAIL_REDZONE_SIZE; - - void *ptr = allocate_chunk(total_size); - if (ptr == NULL) return NULL; - - kasan_heap_hdr = (struct KASAN_HEAP_HEADER *)ptr; - kasan_heap_hdr->aligned_size = algined_size; - - unpoison_shadow((unsigned long)(ptr + KASAN_HEAP_HEAD_REDZONE_SIZE), size); - poison_shadow((unsigned long)ptr, KASAN_HEAP_HEAD_REDZONE_SIZE, - ASAN_SHADOW_HEAP_HEAD_REDZONE_MAGIC); - poison_shadow( - (unsigned long)(ptr + KASAN_HEAP_HEAD_REDZONE_SIZE + algined_size), - KASAN_HEAP_TAIL_REDZONE_SIZE, ASAN_SHADOW_HEAP_TAIL_REDZONE_MAGIC); - - return ptr + KASAN_HEAP_HEAD_REDZONE_SIZE; -} - -void kasan_free_hook(void *ptr) { - struct KASAN_HEAP_HEADER *kasan_heap_hdr = NULL; - unsigned int aligned_size = 0; - - if (ptr == NULL) return; - - kasan_heap_hdr = - (struct KASAN_HEAP_HEADER *)(ptr - KASAN_HEAP_HEAD_REDZONE_SIZE); - aligned_size = kasan_heap_hdr->aligned_size; - - free_chunk(kasan_heap_hdr); - poison_shadow((unsigned long)ptr, aligned_size, ASAN_SHADOW_HEAP_FREE_MAGIC); - - return; -} - // Implement KAsan error reporting routines. static void kasan_print_16_bytes_no_bug(const char *prefix, diff --git a/kasan_common.h b/kasan_common.h new file mode 100644 index 0000000..a3d7d02 --- /dev/null +++ b/kasan_common.h @@ -0,0 +1,41 @@ +/* + * Copyright 2024 Google LLC + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "common.h" +#ifndef __KASAN_COMMON__ +#define __KASAN_COMMON__ + +#define KASAN_SHADOW_SHIFT 3 +#define KASAN_SHADOW_GRANULE_SIZE (1UL << KASAN_SHADOW_SHIFT) +#define KASAN_SHADOW_MASK (KASAN_SHADOW_GRANULE_SIZE - 1) + +#define ASAN_SHADOW_UNPOISONED_MAGIC 0x00 +#define ASAN_SHADOW_RESERVED_MAGIC 0xff +#define ASAN_SHADOW_GLOBAL_REDZONE_MAGIC 0xf9 +#define ASAN_SHADOW_HEAP_HEAD_REDZONE_MAGIC 0xfa +#define ASAN_SHADOW_HEAP_TAIL_REDZONE_MAGIC 0xfb +#define ASAN_SHADOW_HEAP_FREE_MAGIC 0xfd + +#define KASAN_HEAP_HEAD_REDZONE_SIZE 0x20 +#define KASAN_HEAP_TAIL_REDZONE_SIZE 0x20 + +#define KASAN_MEM_TO_SHADOW(addr) \ + (((addr) >> KASAN_SHADOW_SHIFT) + KASAN_SHADOW_MAPPING_OFFSET) +#define KASAN_SHADOW_TO_MEM(shadow) \ + (((shadow) - KASAN_SHADOW_MAPPING_OFFSET) << KASAN_SHADOW_SHIFT) + +void initialize_kasan(void); +void poison_shadow(unsigned long address, size_t size, uint8_t value); +void unpoison_shadow(unsigned long address, size_t size); + +#endif \ No newline at end of file diff --git a/kasan_dlmalloc.c b/kasan_dlmalloc.c new file mode 100644 index 0000000..c8944ab --- /dev/null +++ b/kasan_dlmalloc.c @@ -0,0 +1,443 @@ +#include "heap.h" +#include "kasan_dlmalloc.h" +#include "kasan_common.h" + +static void poison_mem(void *address, size_t size, size_t aligned_size) { + unpoison_shadow((unsigned long)(address + KASAN_HEAP_HEAD_REDZONE_SIZE), size); + poison_shadow((unsigned long)address, KASAN_HEAP_HEAD_REDZONE_SIZE, + ASAN_SHADOW_HEAP_HEAD_REDZONE_MAGIC); + poison_shadow( + (unsigned long)(address + KASAN_HEAP_HEAD_REDZONE_SIZE + aligned_size), + KASAN_HEAP_TAIL_REDZONE_SIZE, ASAN_SHADOW_HEAP_TAIL_REDZONE_MAGIC); +} + +void *sbrk(ptrdiff_t increment) +{ + unsigned long old = mem_malloc_brk; + unsigned long new = old + increment; + + if ((new < mem_malloc_start) || (new > mem_malloc_end)) + return (void *)MORECORE_FAILURE; + + /* + * if we are giving memory back make sure we clear it out since + * we set MORECORE_CLEARS to 1 + */ + if (increment < 0) + memset((void *)new, 0, -increment); + + mem_malloc_brk = new; + + return (void *)old; +} + +// ------------------------------------------------------------------------------------------------------------ +// ---------------------------------------------------------dlmalloc_trim-------------------------------------- +// ------------------------------------------------------------------------------------------------------------ + +int dlmalloc_trim(size_t pad) { + long top_size; /* Amount of top-most memory */ + long extra; /* Amount to release */ + char* current_brk; /* address returned by pre-check sbrk call */ + char* new_brk; /* address returned by negative sbrk call */ + + unsigned long pagesz = malloc_getpagesize; + + top_size = chunksize(top); + extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz; + + if (extra < (long)pagesz) /* Not enough memory to release */ + return 0; + + else { + /* Test to make sure no one else called sbrk */ + current_brk = (char*)(MORECORE (0)); + if (current_brk != (char*)(top) + top_size) + return 0; /* Apparently we don't own memory; must fail */ + else { + new_brk = (char*)(MORECORE (-extra)); + + if (new_brk == (char*)(MORECORE_FAILURE)) /* sbrk failed? */ + { + /* Try to figure out what we have */ + current_brk = (char*)(MORECORE (0)); + top_size = current_brk - (char*)top; + if (top_size >= (long)MINSIZE) /* if not, we are very very dead! */ + { + sbrked_mem = current_brk - sbrk_base; + set_head(top, top_size | PREV_INUSE); + } + return 0; + } else + { + /* Success. Adjust top accordingly. */ + set_head(top, (top_size - extra) | PREV_INUSE); + sbrked_mem -= extra; + return 1; + } + } + } +} + + +// ------------------------------------------------------------------------------------------------------------ +// ------------------------------------------------kasan_dlfree_hook------------------------------------------- +// ------------------------------------------------------------------------------------------------------------ + +void kasan_dlfree_hook(void* mem) { + mchunkptr p; /* chunk corresponding to mem */ + INTERNAL_SIZE_T hd; /* its head field */ + INTERNAL_SIZE_T sz; /* its size */ + int idx; /* its bin index */ + mchunkptr next; /* next contiguous chunk */ + INTERNAL_SIZE_T nextsz; /* its size */ + INTERNAL_SIZE_T prevsz; /* size of previous contiguous chunk */ + mchunkptr bck; /* misc temp for linking */ + mchunkptr fwd; /* misc temp for linking */ + int islr; /* track whether merging with last_remainder */ + + if (mem == NULL) /* free(0) has no effect */ + return; + + p = mem2chunkBeforeRedzone(mem); + hd = p->size; + + sz = hd & ~PREV_INUSE; + next = chunk_at_offset(p, sz); + nextsz = chunksize(next); + if (next == top) /* merge with top */ + { + sz += nextsz; + + if (!(hd & PREV_INUSE)) /* consolidate backward */ + { + prevsz = p->prev_size; + p = chunk_at_offset(p, -((long) prevsz)); + sz += prevsz; + unlink(p, bck, fwd); + } + + set_head(p, sz | PREV_INUSE); + top = p; + if ((unsigned long)(sz) >= (unsigned long)trim_threshold) + dlmalloc_trim(top_pad); + + poison_shadow((unsigned long)chunk2mem(p), chunkUserSize(p), ASAN_SHADOW_HEAP_FREE_MAGIC); + return; + } + + set_head(next, nextsz); /* clear inuse bit */ + + islr = 0; + + if (!(hd & PREV_INUSE)) /* consolidate backward */ + { + prevsz = p->prev_size; + p = chunk_at_offset(p, -((long) prevsz)); + sz += prevsz; + + if (p->fd == last_remainder) /* keep as last_remainder */ + islr = 1; + else + unlink(p, bck, fwd); + } + + if (!(inuse_bit_at_offset(next, nextsz))) { + sz += nextsz; + + if (!islr && next->fd == last_remainder) /* re-insert last_remainder */ + { + islr = 1; + link_last_remainder(p); + } + else + unlink(next, bck, fwd); + } + + set_head(p, sz | PREV_INUSE); + set_foot(p, sz); + if (!islr) + frontlink(p, sz, idx, bck, fwd); + poison_shadow((unsigned long)chunk2mem(p), chunkUserSize(p), ASAN_SHADOW_HEAP_FREE_MAGIC); + +} +// ------------------------------------------------------------------------------------------------------------ +// ----------------------------------------------malloc_extend_top--------------------------------------------- +// ------------------------------------------------------------------------------------------------------------ + +static void malloc_extend_top(INTERNAL_SIZE_T nb) { + char* brk; /* return value from sbrk */ + INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of sbrked space */ + INTERNAL_SIZE_T correction; /* bytes for 2nd sbrk call */ + char* new_brk; /* return of 2nd sbrk call */ + INTERNAL_SIZE_T top_size; /* new size of top chunk */ + + mchunkptr old_top = top; /* Record state of old top */ + INTERNAL_SIZE_T old_top_size = chunksize(old_top); + char* old_end = (char*)(chunk_at_offset(old_top, old_top_size)); + + /* Pad request with top_pad plus minimal overhead */ + + INTERNAL_SIZE_T sbrk_size = nb + top_pad + MINSIZE; + unsigned long pagesz = malloc_getpagesize; + + /* If not the first time through, round to preserve page boundary */ + /* Otherwise, we need to correct to a page size below anyway. */ + /* (We also correct below if an intervening foreign sbrk call.) */ + + if (sbrk_base != (char*)(-1)) + sbrk_size = (sbrk_size + (pagesz - 1)) & ~(pagesz - 1); + + brk = (char*)(MORECORE (sbrk_size)); + + /* Fail if sbrk failed or if a foreign sbrk call killed our space */ + if (brk == (char*)(MORECORE_FAILURE) || (brk < old_end && old_top != initial_top)) return; + + sbrked_mem += sbrk_size; + + if (brk == old_end) { + top_size = sbrk_size + old_top_size; + set_head(top, top_size | PREV_INUSE); + } + else { + if (sbrk_base == (char*)(-1)) sbrk_base = brk; + else sbrked_mem += brk - (char*)old_end; + + front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK; + if (front_misalign > 0) { + correction = (MALLOC_ALIGNMENT) - front_misalign; + brk += correction; + } + else correction = 0; + correction += ((((unsigned long)(brk + sbrk_size))+(pagesz-1)) & + ~(pagesz - 1)) - ((unsigned long)(brk + sbrk_size)); + + /* Allocate correction */ + new_brk = (char*)(MORECORE (correction)); + if (new_brk == (char*)(MORECORE_FAILURE)) return; + + sbrked_mem += correction; + + top = (mchunkptr)brk; + top_size = new_brk - brk + correction; + set_head(top, top_size | PREV_INUSE); + + if (old_top != initial_top) + { + + /* There must have been an intervening foreign sbrk call. */ + /* A double fencepost is necessary to prevent consolidation */ + + /* If not enough space to do this, then user did something very wrong */ + if (old_top_size < MINSIZE) + { + set_head(top, PREV_INUSE); /* will force null return from malloc */ + return; + } + + /* Also keep size a multiple of MALLOC_ALIGNMENT */ + old_top_size = (old_top_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK; + set_head_size(old_top, old_top_size); + chunk_at_offset(old_top, old_top_size)->size = SIZE_SZ|PREV_INUSE; + chunk_at_offset(old_top, old_top_size + SIZE_SZ)->size =SIZE_SZ|PREV_INUSE; + /* If possible, release the rest. */ + if (old_top_size >= MINSIZE) kasan_dlfree_hook(chunk2memAfterRedzone(old_top)); + } + } + + if ((unsigned long)sbrked_mem > (unsigned long)max_sbrked_mem) + max_sbrked_mem = sbrked_mem; + if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem) + max_total_mem = mmapped_mem + sbrked_mem; + + /* We always land on a page boundary */ +} + +// ------------------------------------------------------------------------------------------------------------ +// -------------------------------------------------------dlmalloc--------------------------------------------- +// ------------------------------------------------------------------------------------------------------------ + +void *kasan_dlmalloc_hook(size_t bytes) { + mchunkptr victim; /* inspected/selected chunk */ + INTERNAL_SIZE_T victim_size; /* its size */ + int idx; /* index for bin traversal */ + mbinptr bin; /* associated bin */ + mchunkptr remainder; /* remainder from a split */ + long remainder_size; /* its size */ + int remainder_index; /* its bin index */ + unsigned long block; /* block traverser bit */ + int startidx; /* first bin of a traversed block */ + mchunkptr fwd; /* misc temp for linking */ + mchunkptr bck; /* misc temp for linking */ + mbinptr q; /* misc temp */ + + INTERNAL_SIZE_T nb; + /* check if initialize_dlmalloc_heap() was run */ + + if ((mem_malloc_start == 0) && (mem_malloc_end == 0)) { + return NULL; + } + + if (bytes > HEAP_SIZE || (long)bytes < 0) return NULL; + + nb = request2size(bytes); /* padded request size; */ + unsigned int total_kasan_size = nb + \ + KASAN_HEAP_HEAD_REDZONE_SIZE + \ + KASAN_HEAP_TAIL_REDZONE_SIZE; + + if (is_small_request(total_kasan_size)) /* Faster version for small requests */ { + idx = smallbin_index(total_kasan_size); + /* No traversal or size check necessary for small bins. */ + + q = bin_at(idx); + victim = last(q); + + if (victim == q) { + q = next_bin(q); + victim = last(q); + } + if (victim != q) { + victim_size = chunksize(victim); // here we have to find the KASAN chunk size adding REDZONES back and forth + unlink(victim, bck, fwd); // KASAN does not need to do anything + set_inuse_bit_at_offset(victim, victim_size); // ??? probably not anything to do here + + poison_mem(chunk2mem(victim), bytes, nb); + return chunk2memAfterRedzone(victim); + } + + idx += 2; + } + else { + idx = bin_index(total_kasan_size); + bin = bin_at(idx); + + for (victim = last(bin); victim != bin; victim = victim->bk) + { + victim_size = chunksize(victim); + + remainder_size = victim_size - total_kasan_size; + + if (remainder_size >= (long)MINSIZE) { + --idx; + break; + } else if (remainder_size >= 0) { + + unlink(victim, bck, fwd); + set_inuse_bit_at_offset(victim, victim_size); + poison_mem(chunk2mem(victim), bytes, nb); + return chunk2memAfterRedzone(victim); + } + } + ++idx; + } + + if ( (victim = last_remainder->fd) != last_remainder) + { + victim_size = chunksize(victim); + + remainder_size = victim_size - total_kasan_size; + + if (remainder_size >= (long)MINSIZE){ + remainder = chunk_at_offset(victim, total_kasan_size); + set_head(victim, total_kasan_size | PREV_INUSE); + link_last_remainder(remainder); + set_head(remainder, remainder_size | PREV_INUSE); + set_foot(remainder, remainder_size); + + poison_mem(chunk2mem(victim), bytes, nb); + return chunk2memAfterRedzone(victim); + } + + clear_last_remainder; + + if (remainder_size >= 0) { + set_inuse_bit_at_offset(victim, victim_size); + + poison_mem(chunk2mem(victim), bytes, nb); + return chunk2memAfterRedzone(victim); + } + + frontlink(victim, victim_size, remainder_index, bck, fwd); + } + + + if ( (block = idx2binblock(idx)) <= binblocks_r) { + + if ( (block & binblocks_r) == 0){ + idx = (idx & ~(BINBLOCKWIDTH - 1)) + BINBLOCKWIDTH; + block <<= 1; + while ((block & binblocks_r) == 0) + { + idx += BINBLOCKWIDTH; + block <<= 1; + } + } + + for (;;) { + startidx = idx; + q = bin = bin_at(idx); + + /* For each bin in this block ... */ + do { + for (victim = last(bin); victim != bin; victim = victim->bk) { + victim_size = chunksize(victim); + + remainder_size = victim_size - total_kasan_size; + + if (remainder_size >= (long)MINSIZE) { + remainder = chunk_at_offset(victim, total_kasan_size); + set_head(victim, total_kasan_size | PREV_INUSE); + unlink(victim, bck, fwd); + link_last_remainder(remainder); + set_head(remainder, remainder_size | PREV_INUSE); + set_foot(remainder, remainder_size); + + poison_mem(chunk2mem(victim), bytes, nb); + return chunk2memAfterRedzone(victim); + } else if (remainder_size >= 0) { + set_inuse_bit_at_offset(victim, victim_size); + unlink(victim, bck, fwd); + + poison_mem(chunk2mem(victim), bytes, nb); + return chunk2memAfterRedzone(victim); + } + } + + bin = next_bin(bin); + + } while ((++idx & (BINBLOCKWIDTH - 1)) != 0); + + do { + if ((startidx & (BINBLOCKWIDTH - 1)) == 0) { + av_[1] = (mbinptr)(binblocks_r & ~block); + break; + } + --startidx; + q = prev_bin(q); + } while (first(q) == q); + if ( (block <<= 1) <= binblocks_r && (block != 0) ) { + while ((block & binblocks_r) == 0) { + idx += BINBLOCKWIDTH; + block <<= 1; + } + } + else break; + } + } + + /* Require that there be a remainder, ensuring top always exists */ + if ( (remainder_size = chunksize(top) - total_kasan_size) < (long)MINSIZE) { + + malloc_extend_top(total_kasan_size); + if ( (remainder_size = chunksize(top) - total_kasan_size) < (long)MINSIZE) + return NULL; /* propagate failure */ + } + victim = top; + set_head(victim, total_kasan_size | PREV_INUSE); + top = chunk_at_offset(victim, total_kasan_size); + set_head(top, remainder_size | PREV_INUSE); + poison_mem(chunk2mem(victim), bytes, nb); + + return chunk2memAfterRedzone(victim); +} + diff --git a/kasan_dlmalloc.h b/kasan_dlmalloc.h new file mode 100644 index 0000000..6b84746 --- /dev/null +++ b/kasan_dlmalloc.h @@ -0,0 +1,334 @@ +#include "kasan_common.h" +#include "heap.h" +#include "common.h" + +#ifndef __KASAN_DLMALLOC_H__ +#define __KASAN_DLMALLOC_H__ + +#define INTERNAL_SIZE_T size_t + +struct malloc_chunk +{ + INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */ + INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */ + struct malloc_chunk* fd; /* double links -- used only if free. */ + struct malloc_chunk* bk; +} __attribute__((__may_alias__)) ; + +#define HEAP_SIZE 0x10000 + +typedef struct malloc_chunk* mchunkptr; +typedef struct malloc_chunk* mbinptr; + +#define SIZE_SZ (sizeof(INTERNAL_SIZE_T)) +#define MALLOC_ALIGNMENT (SIZE_SZ + SIZE_SZ) +#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1) +#define MINSIZE (sizeof(struct malloc_chunk)) + +/* conversion from malloc headers to user pointers, and back */ +#define chunk2mem(p) ((void*)((char*)(p) + 2*SIZE_SZ)) +#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ)) + +#define chunk2memAfterRedzone(p) ((void*)((char*)(p) + 2*SIZE_SZ + KASAN_HEAP_HEAD_REDZONE_SIZE)) +#define mem2chunkBeforeRedzone(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ - KASAN_HEAP_HEAD_REDZONE_SIZE)) + +/* pad request bytes into a usable size */ + +#define request2size(req) \ + ((((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) < \ + (MINSIZE + MALLOC_ALIGN_MASK)) ? MINSIZE : \ + (((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) & ~(MALLOC_ALIGN_MASK))) + +/* Check if m has acceptable alignment */ + +#define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0) + +/* + Physical chunk operations +*/ + +/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */ + +#define PREV_INUSE 0x1 + +/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */ + +#define IS_MMAPPED 0x2 + +/* Bits to mask off when extracting size */ + +#define SIZE_BITS (PREV_INUSE|IS_MMAPPED) + +/* Ptr to next physical malloc_chunk. */ + +#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) )) + +/* Ptr to previous physical malloc_chunk */ + +#define prev_chunk(p)\ + ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) )) + +/* Treat space at ptr + offset as a chunk */ + +#define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) + +/* + Dealing with use bits +*/ + +/* extract p's inuse bit */ + +#define inuse(p)\ +((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE) + +/* extract inuse bit of previous chunk */ + +#define prev_inuse(p) ((p)->size & PREV_INUSE) + +/* check for mmap()'ed chunk */ + +#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED) + +/* set/clear chunk as in use without otherwise disturbing */ + +#define set_inuse(p)\ +((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE + +#define clear_inuse(p)\ +((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE) + +/* check/set/clear inuse bits in known places */ + +#define inuse_bit_at_offset(p, s)\ + (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE) + +#define set_inuse_bit_at_offset(p, s)\ + (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE) + +#define clear_inuse_bit_at_offset(p, s)\ + (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE)) + +/* + Dealing with size fields +*/ + +/* Get size of usable space*/ + +#define chunkUserSize(p) ((p)->size - 2*SIZE_SZ) + +/* Get size, ignoring use bits */ + +#define chunksize(p) ((p)->size & ~(SIZE_BITS)) + +/* Set size at head, without disturbing its use bit */ + +#define set_head_size(p, s) ((p)->size = (((p)->size & PREV_INUSE) | (s))) + +/* Set size/use ignoring previous bits in header */ + +#define set_head(p, s) ((p)->size = (s)) + +/* Set size at footer (only when chunk is not in use) */ + +#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s)) + +/* + Bins + + The bins, `av_' are an array of pairs of pointers serving as the + heads of (initially empty) doubly-linked lists of chunks, laid out + in a way so that each pair can be treated as if it were in a + malloc_chunk. (This way, the fd/bk offsets for linking bin heads + and chunks are the same). + + Bins for sizes < 512 bytes contain chunks of all the same size, spaced + 8 bytes apart. Larger bins are approximately logarithmically + spaced. (See the table below.) The `av_' array is never mentioned + directly in the code, but instead via bin access macros. + + Bin layout: + + 64 bins of size 8 + 32 bins of size 64 + 16 bins of size 512 + 8 bins of size 4096 + 4 bins of size 32768 + 2 bins of size 262144 + 1 bin of size what's left + + There is actually a little bit of slop in the numbers in bin_index + for the sake of speed. This makes no difference elsewhere. + + The special chunks `top' and `last_remainder' get their own bins, + (this is implemented via yet more trickery with the av_ array), + although `top' is never properly linked to its bin since it is + always handled specially. + +*/ + +#define NAV 128 /* number of bins */ + +typedef struct malloc_chunk* mbinptr; + +/* access macros */ + +#define bin_at(i) ((mbinptr)((char*)&(av_[2*(i) + 2]) - 2*SIZE_SZ)) +#define next_bin(b) ((mbinptr)((char*)(b) + 2 * sizeof(mbinptr))) +#define prev_bin(b) ((mbinptr)((char*)(b) - 2 * sizeof(mbinptr))) + +/* + The first 2 bins are never indexed. The corresponding av_ cells are instead + used for bookkeeping. This is not to save space, but to simplify + indexing, maintain locality, and avoid some initialization tests. +*/ + +#define top (av_[2]) /* The topmost chunk */ +#define last_remainder (bin_at(1)) /* remainder from last split */ + +/* + Because top initially points to its own bin with initial + zero size, thus forcing extension on the first malloc request, + we avoid having any special code in malloc to check whether + it even exists yet. But we still need to in malloc_extend_top. +*/ + +#define initial_top ((mchunkptr)(bin_at(0))) + +/* Helper macro to initialize bins */ + +#define IAV(i) bin_at(i), bin_at(i) + +static mbinptr av_[NAV * 2 + 2] = { + NULL, NULL, + IAV(0), IAV(1), IAV(2), IAV(3), IAV(4), IAV(5), IAV(6), IAV(7), + IAV(8), IAV(9), IAV(10), IAV(11), IAV(12), IAV(13), IAV(14), IAV(15), + IAV(16), IAV(17), IAV(18), IAV(19), IAV(20), IAV(21), IAV(22), IAV(23), + IAV(24), IAV(25), IAV(26), IAV(27), IAV(28), IAV(29), IAV(30), IAV(31), + IAV(32), IAV(33), IAV(34), IAV(35), IAV(36), IAV(37), IAV(38), IAV(39), + IAV(40), IAV(41), IAV(42), IAV(43), IAV(44), IAV(45), IAV(46), IAV(47), + IAV(48), IAV(49), IAV(50), IAV(51), IAV(52), IAV(53), IAV(54), IAV(55), + IAV(56), IAV(57), IAV(58), IAV(59), IAV(60), IAV(61), IAV(62), IAV(63), + IAV(64), IAV(65), IAV(66), IAV(67), IAV(68), IAV(69), IAV(70), IAV(71), + IAV(72), IAV(73), IAV(74), IAV(75), IAV(76), IAV(77), IAV(78), IAV(79), + IAV(80), IAV(81), IAV(82), IAV(83), IAV(84), IAV(85), IAV(86), IAV(87), + IAV(88), IAV(89), IAV(90), IAV(91), IAV(92), IAV(93), IAV(94), IAV(95), + IAV(96), IAV(97), IAV(98), IAV(99), IAV(100), IAV(101), IAV(102), IAV(103), + IAV(104), IAV(105), IAV(106), IAV(107), IAV(108), IAV(109), IAV(110), IAV(111), + IAV(112), IAV(113), IAV(114), IAV(115), IAV(116), IAV(117), IAV(118), IAV(119), + IAV(120), IAV(121), IAV(122), IAV(123), IAV(124), IAV(125), IAV(126), IAV(127) +}; + +/* field-extraction macros */ + +#define first(b) ((b)->fd) +#define last(b) ((b)->bk) + +/* + Indexing into bins +*/ + +#define bin_index(sz) \ +(((((unsigned long)(sz)) >> 9) == 0) ? (((unsigned long)(sz)) >> 3): \ + ((((unsigned long)(sz)) >> 9) <= 4) ? 56 + (((unsigned long)(sz)) >> 6): \ + ((((unsigned long)(sz)) >> 9) <= 20) ? 91 + (((unsigned long)(sz)) >> 9): \ + ((((unsigned long)(sz)) >> 9) <= 84) ? 110 + (((unsigned long)(sz)) >> 12): \ + ((((unsigned long)(sz)) >> 9) <= 340) ? 119 + (((unsigned long)(sz)) >> 15): \ + ((((unsigned long)(sz)) >> 9) <= 1364) ? 124 + (((unsigned long)(sz)) >> 18): \ + 126) + +#define MAX_SMALLBIN 63 +#define MAX_SMALLBIN_SIZE 512 +#define SMALLBIN_WIDTH 8 +#define smallbin_index(sz) (((unsigned long)(sz)) >> 3) +#define is_small_request(nb) (nb < MAX_SMALLBIN_SIZE - SMALLBIN_WIDTH) + +#define frontlink(P, S, IDX, BK, FD) \ +{ \ + if (S < MAX_SMALLBIN_SIZE) \ + { \ + IDX = smallbin_index(S); \ + mark_binblock(IDX); \ + BK = bin_at(IDX); \ + FD = BK->fd; \ + P->bk = BK; \ + P->fd = FD; \ + FD->bk = BK->fd = P; \ + } \ + else \ + { \ + IDX = bin_index(S); \ + BK = bin_at(IDX); \ + FD = BK->fd; \ + if (FD == BK) mark_binblock(IDX); \ + else \ + { \ + while (FD != BK && S < chunksize(FD)) FD = FD->fd; \ + BK = FD->bk; \ + } \ + P->bk = BK; \ + P->fd = FD; \ + FD->bk = BK->fd = P; \ + } \ +} + +#define link_last_remainder(P) \ +{ \ + last_remainder->fd = last_remainder->bk = P; \ + P->fd = P->bk = last_remainder; \ +} + +#define unlink(P, BK, FD) \ +{ \ + BK = P->bk; \ + FD = P->fd; \ + FD->bk = BK; \ + BK->fd = FD; \ +} + +#define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0) +#define clear_last_remainder \ + (last_remainder->fd = last_remainder->bk = last_remainder) + +#define BINBLOCKWIDTH 4 /* bins per block */ +#define binblocks_r ((INTERNAL_SIZE_T)av_[1]) /* bitvector of nonempty blocks */ +#define binblocks_w (av_[1]) +#define idx2binblock(ix) ((unsigned)1 << (ix / BINBLOCKWIDTH)) +#define mark_binblock(ii) (binblocks_w = (mbinptr)(binblocks_r | idx2binblock(ii))) +#define clear_binblock(ii) (binblocks_w = (mbinptr)(binblocks_r & ~(idx2binblock(ii)))) + +#define DEFAULT_TRIM_THRESHOLD (128 * 1024) +#define DEFAULT_TOP_PAD (0) + +static unsigned long trim_threshold = DEFAULT_TRIM_THRESHOLD; +static unsigned long top_pad = DEFAULT_TOP_PAD; + + +#define malloc_getpagesize 4096 +#define MORECORE sbrk +#define MORECORE_FAILURE 0 +#define MORECORE_CLEARS 1 + +struct mallinfo { + int arena; /* total space allocated from system */ + int ordblks; /* number of non-inuse chunks */ + int smblks; /* unused -- always zero */ + int hblks; /* number of mmapped regions */ + int hblkhd; /* total space in mmapped regions */ + int usmblks; /* unused -- always zero */ + int fsmblks; /* unused -- always zero */ + int uordblks; /* total allocated space */ + int fordblks; /* total non-inuse space */ + int keepcost; /* top-most, releasable (via malloc_trim) space */ +}; + +static struct mallinfo current_mallinfo = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; +static char* sbrk_base = (char*)(-1); + +#define sbrked_mem (current_mallinfo.arena) +static unsigned long max_sbrked_mem = 0; +static unsigned long mmapped_mem = 0; +static unsigned long max_total_mem = 0; + +void kasan_dlfree_hook(void* mem); +void *kasan_dlmalloc_hook(size_t bytes); +#endif // __KASAN_HEAP_H__ \ No newline at end of file diff --git a/kasan_simple_malloc.c b/kasan_simple_malloc.c new file mode 100644 index 0000000..d7a5dfa --- /dev/null +++ b/kasan_simple_malloc.c @@ -0,0 +1,52 @@ +#include "heap.h" +#include "common.h" +#include "kasan_common.h" +#include "kasan_simple_malloc.h" + +void *allocate_chunk(unsigned long size) { + void *result = (void *)mem_malloc_start; + if (size > mem_malloc_size) return NULL; + + size = (size + 7) & (~7UL); + mem_malloc_start += size; + mem_malloc_size -= size; + return result; +} + +void *kasan_malloc_hook(unsigned int size) { + struct KASAN_HEAP_HEADER *kasan_heap_hdr = NULL; + unsigned int algined_size = (size + KASAN_SHADOW_MASK) & (~KASAN_SHADOW_MASK); + unsigned int total_size = algined_size + KASAN_HEAP_HEAD_REDZONE_SIZE + + KASAN_HEAP_TAIL_REDZONE_SIZE; + + void *ptr = allocate_chunk(total_size); + if (ptr == NULL) return NULL; + + kasan_heap_hdr = (struct KASAN_HEAP_HEADER *)ptr; + kasan_heap_hdr->aligned_size = algined_size; + + unpoison_shadow((unsigned long)(ptr + KASAN_HEAP_HEAD_REDZONE_SIZE), size); + poison_shadow((unsigned long)ptr, KASAN_HEAP_HEAD_REDZONE_SIZE, + ASAN_SHADOW_HEAP_HEAD_REDZONE_MAGIC); + poison_shadow( + (unsigned long)(ptr + KASAN_HEAP_HEAD_REDZONE_SIZE + algined_size), + KASAN_HEAP_TAIL_REDZONE_SIZE, ASAN_SHADOW_HEAP_TAIL_REDZONE_MAGIC); + + return ptr + KASAN_HEAP_HEAD_REDZONE_SIZE; +} + +void kasan_free_hook(void *ptr) { + struct KASAN_HEAP_HEADER *kasan_heap_hdr = NULL; + unsigned int aligned_size = 0; + + if (ptr == NULL) return; + + kasan_heap_hdr = + (struct KASAN_HEAP_HEADER *)(ptr - KASAN_HEAP_HEAD_REDZONE_SIZE); + aligned_size = kasan_heap_hdr->aligned_size; + + free_chunk(kasan_heap_hdr); + poison_shadow((unsigned long)ptr, aligned_size, ASAN_SHADOW_HEAP_FREE_MAGIC); + + return; +} \ No newline at end of file diff --git a/kasan_simple_malloc.h b/kasan_simple_malloc.h new file mode 100644 index 0000000..67deae9 --- /dev/null +++ b/kasan_simple_malloc.h @@ -0,0 +1,12 @@ + +#ifndef __KASAN_SIMPLE_MALLOC__ +#define __KASAN_SIMPLE_MALLOC__ + +struct KASAN_HEAP_HEADER { + unsigned int aligned_size; +}; + +void *kasan_malloc_hook(unsigned int size); +void kasan_free_hook(void *ptr); + +#endif // __KASAN_SIMPLE_MALLOC__ \ No newline at end of file diff --git a/kasan_test b/kasan_test new file mode 100755 index 0000000..1de5dd3 Binary files /dev/null and b/kasan_test differ diff --git a/kasan_test.c b/kasan_test.c index c282830..8382e59 100644 --- a/kasan_test.c +++ b/kasan_test.c @@ -11,7 +11,6 @@ * GNU General Public License for more details. */ -#include "kasan.h" #include "heap.h" #include "printf.h" @@ -26,9 +25,8 @@ int main(void) { initialize_heap(); - initialize_kasan(); - test_heap_overflow(); + test_use_after_free(); test_stack_overflow(); test_globals_overflow(); test_memset_overflow(); diff --git a/sanitized_lib.c b/sanitized_lib.c index e29a11c..de8e3b8 100644 --- a/sanitized_lib.c +++ b/sanitized_lib.c @@ -25,6 +25,15 @@ void test_heap_overflow(void) { ptr[oob_index] = 0; } +void test_use_after_free(void) { + int size = 17; + unsigned char *ptr = malloc(size); + printf("\nKASan test: heap use after free\n"); + printf("Freeing allocated pointer %d of size %d and reusing it \n", ptr, size); + free(ptr); + ptr[0] = 0; +} + char oob_value; void test_stack_overflow(void) { diff --git a/sanitized_lib.h b/sanitized_lib.h index bac0df0..80a241e 100644 --- a/sanitized_lib.h +++ b/sanitized_lib.h @@ -15,6 +15,7 @@ #define __SANITIZED_LIB_H__ void test_heap_overflow(void); +void test_use_after_free(void); void test_stack_overflow(void); void test_globals_overflow(void); void test_memset_overflow(void);