aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorAlex Auvolat <alex.auvolat@ens.fr>2014-12-06 20:05:02 +0100
committerAlex Auvolat <alex.auvolat@ens.fr>2014-12-06 20:05:02 +0100
commitb38d90b5cacee9bfc775f1fa4b31c5863654c5e8 (patch)
tree4e8c8eecbdc49dff1d2352eae177eb855ac1d53b /kernel
parenta48466109f59d507f9108635a5dc4ec865173f85 (diff)
downloadmacroscope-b38d90b5cacee9bfc775f1fa4b31c5863654c5e8.tar.gz
macroscope-b38d90b5cacee9bfc775f1fa4b31c5863654c5e8.zip
Add kernel stack protector. Find bug. Correct it.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/include/slab_alloc.h3
-rw-r--r--kernel/l0/kmain.c27
-rw-r--r--kernel/l0/loader.s7
-rw-r--r--kernel/l0/paging.c28
-rw-r--r--kernel/lib/slab_alloc.c24
5 files changed, 56 insertions, 33 deletions
diff --git a/kernel/include/slab_alloc.h b/kernel/include/slab_alloc.h
index 5355a66..eb9588d 100644
--- a/kernel/include/slab_alloc.h
+++ b/kernel/include/slab_alloc.h
@@ -13,8 +13,11 @@
#include <assert.h>
#define ASSERT assert
#define PAGE_SIZE 0x1000
+#include <stdio.h>
+#define dbg_printf printf
#else
#include <sys.h> // this is macroscope
+#include <dbglog.h>
#endif
// expected format for the array of slab_type_t given to slab_create :
diff --git a/kernel/l0/kmain.c b/kernel/l0/kmain.c
index 97b5a97..53afb6b 100644
--- a/kernel/l0/kmain.c
+++ b/kernel/l0/kmain.c
@@ -31,31 +31,36 @@ void test_pf_handler(pagedir_t *pd, region_info_t *i, void* addr) {
void* page_alloc_fun_for_kmalloc(size_t bytes) {
void* addr = region_alloc(bytes, REGION_T_CORE_HEAP, test_pf_handler);
- dbg_printf("Alloc %p bytes for kmalloc at: %p\n", bytes, addr);
+ dbg_printf("[alloc 0x%p for kmalloc : %p]\n", bytes, addr);
return addr;
}
void page_free_fun_for_kmalloc(void* ptr) {
+ dbg_printf("[Free 0x%p", ptr);
+
region_info_t *i = find_region(ptr);
ASSERT(i != 0 && i->type == REGION_T_CORE_HEAP);
for (void* x = i->addr; x < i->addr + i->size; x += PAGE_SIZE) {
uint32_t f = pd_get_frame(x);
+ dbg_printf(" %i", f);
if (f != 0) {
pd_unmap_page(x);
frame_free(f, 1);
}
}
+ dbg_printf(" : ");
region_free(ptr);
+ dbg_printf("ok]\n");
}
slab_type_t slab_sizes[] = {
- { "8B obj", 8, 1 },
+ { "8B obj", 8, 2 },
{ "16B obj", 16, 2 },
{ "32B obj", 32, 2 },
- { "64B obj", 64, 2 },
- { "128B obj", 128, 2 },
+ { "64B obj", 64, 4 },
+ { "128B obj", 128, 4 },
{ "256B obj", 256, 4 },
- { "512B obj", 512, 4 },
+ { "512B obj", 512, 8 },
{ "1KB obj", 1024, 8 },
- { "2KB obj", 2048, 8 },
+ { "2KB obj", 2048, 16 },
{ "4KB obj", 4096, 16 },
{ 0, 0, 0 }
};
@@ -149,16 +154,16 @@ void kmain(struct multiboot_info_t *mbd, int32_t mb_magic) {
region_free(s);
BOCHS_BREAKPOINT;
- // TEST SLAB ALLOCATOR!!!
+ // Test slab allocator !
mem_allocator_t *a =
create_slab_allocator(slab_sizes, page_alloc_fun_for_kmalloc,
page_free_fun_for_kmalloc);
dbg_printf("Created slab allocator at 0x%p\n", a);
dbg_print_region_stats();
- const int m = 10000;
- uint16_t* ptr[m];
+ const int m = 200;
+ uint16_t** ptr = slab_alloc(a, m * sizeof(uint32_t));
for (int i = 0; i < m; i++) {
- size_t s = 1 << ((i * 7) % 12 + 2);
+ size_t s = 1 << ((i * 7) % 11 + 2);
ptr[i] = (uint16_t*)slab_alloc(a, s);
ASSERT((void*)ptr[i] >= kernel_data_end && (size_t)ptr[i] < 0xFFC00000);
*ptr[i] = ((i * 211) % 1024);
@@ -172,7 +177,7 @@ void kmain(struct multiboot_info_t *mbd, int32_t mb_magic) {
slab_free(a, ptr[i]);
}
dbg_print_region_stats();
- dbg_printf("Destroying slab allocator.\n");
+ dbg_printf("Destroying slab allocator...\n");
destroy_slab_allocator(a);
dbg_print_region_stats();
diff --git a/kernel/l0/loader.s b/kernel/l0/loader.s
index 6ad4ff0..5d0a2b8 100644
--- a/kernel/l0/loader.s
+++ b/kernel/l0/loader.s
@@ -1,6 +1,7 @@
[EXTERN kmain] ; kmain is defined in kmain.c
[GLOBAL loader] ; making entry point visible to linker
-[GLOBAL kernel_pd] ; make kernel page directory visible
+[GLOBAL kernel_pd] ; make kernel page directory visible
+[GLOBAL kernel_stack_protector] ; used to detect kernel stack overflow
; higher-half kernel setup
K_HIGHHALF_ADDR equ 0xC0000000
@@ -74,7 +75,9 @@ hang:
jmp hang
[section .bss]
-align 4
+align 0x1000
+kernel_stack_protector:
+ resb 0x1000 ; as soon as we have efficient paging, we WON'T map this page
stack_bottom:
resb LOADER_STACK_SIZE
stack_top:
diff --git a/kernel/l0/paging.c b/kernel/l0/paging.c
index 5000c71..d289712 100644
--- a/kernel/l0/paging.c
+++ b/kernel/l0/paging.c
@@ -39,6 +39,8 @@ extern pagetable_t kernel_pd;
// pre-allocate a page table so that we can map the first 4M of kernel memory
static pagetable_t __attribute__((aligned(PAGE_SIZE))) kernel_pt0;
+extern char kernel_stack_protector;
+
static pagedir_t kernel_pd_d;
static pagedir_t *current_pd_d;
@@ -58,19 +60,13 @@ void page_fault_handler(registers_t *regs) {
return;
}
+ if (vaddr >= (void*)&kernel_stack_protector && vaddr < (void*)&kernel_stack_protector + PAGE_SIZE) {
+ dbg_printf("Kernel stack overflow at 0x%p\n", vaddr);
+ PANIC("Kernel stack overflow.");
+ }
+
if ((size_t)vaddr >= PD_MIRROR_ADDR) {
dbg_printf("Fault on access to mirrorred PD at 0x%p\n", vaddr);
-
- uint32_t x = (size_t)vaddr - PD_MIRROR_ADDR;
- uint32_t page = (x % PAGE_SIZE) / 4;
- uint32_t pt = x / PAGE_SIZE;
- dbg_printf("For pt 0x%p, page 0x%p -> addr 0x%p\n", pt, page, ((pt * 1024) + page) * PAGE_SIZE);
-
- for (int i = 0; i < N_PAGES_IN_PT; i++) {
- //dbg_printf("%i. 0x%p\n", i, kernel_pd.page[i]);
- }
-
- dbg_dump_registers(regs);
dbg_print_region_stats();
PANIC("Unhandled kernel space page fault");
}
@@ -108,7 +104,11 @@ void paging_setup(void* kernel_data_end) {
ASSERT(PAGE_OF_ADDR(K_HIGHHALF_ADDR) == 0); // kernel is 4M-aligned
ASSERT(FIRST_KERNEL_PT == 768);
for (size_t i = 0; i < n_kernel_pages; i++) {
- kernel_pt0.page[i] = (i << PTE_FRAME_SHIFT) | PTE_PRESENT | PTE_RW | PTE_GLOBAL;
+ if ((i * PAGE_SIZE) + K_HIGHHALF_ADDR == (size_t)&kernel_stack_protector) {
+ frame_free(i, 1); // don't map kernel stack protector page
+ } else {
+ kernel_pt0.page[i] = (i << PTE_FRAME_SHIFT) | PTE_PRESENT | PTE_RW | PTE_GLOBAL;
+ }
}
for (size_t i = n_kernel_pages; i < 1024; i++){
kernel_pt0.page[i] = 0;
@@ -180,8 +180,6 @@ int pd_map_page(void* vaddr, uint32_t frame_id, bool rw) {
(new_pt_frame << PTE_FRAME_SHIFT) | PTE_PRESENT | PTE_RW;
invlpg(&current_pt[pt]);
}
- dbg_printf("[%p,%i,%i,", vaddr, pt, page);
-
current_pt[pt].page[page] =
(frame_id << PTE_FRAME_SHIFT)
| PTE_PRESENT
@@ -189,8 +187,6 @@ int pd_map_page(void* vaddr, uint32_t frame_id, bool rw) {
| (rw ? PTE_RW : 0);
invlpg(vaddr);
- dbg_printf("]");
-
return 0;
}
diff --git a/kernel/lib/slab_alloc.c b/kernel/lib/slab_alloc.c
index 8e8c0a3..63ee0e0 100644
--- a/kernel/lib/slab_alloc.c
+++ b/kernel/lib/slab_alloc.c
@@ -17,6 +17,7 @@ typedef struct region {
void* region_addr;
size_t region_size;
struct region *next_region;
+ bool contains_descriptors;
} region_t;
typedef union descriptor {
@@ -67,6 +68,7 @@ static descriptor_t *take_descriptor(mem_allocator_t *a) {
region_t *drd = &dd->r;
drd->region_addr = p;
drd->region_size = PAGE_SIZE;
+ drd->contains_descriptors = true;
drd->next_region = a->all_regions;
a->all_regions = drd;
}
@@ -91,7 +93,7 @@ mem_allocator_t* create_slab_allocator(const slab_type_t *types, page_alloc_fun_
ptr.addr = af(PAGE_SIZE);
if (ptr.addr == 0) return 0; // could not allocate
- void* end_addr = ptr.addr + PAGE_SIZE;
+ const void* end_addr = ptr.addr + PAGE_SIZE;
mem_allocator_t *a = ptr.a;
ptr.a++;
@@ -109,7 +111,7 @@ mem_allocator_t* create_slab_allocator(const slab_type_t *types, page_alloc_fun_
}
a->first_free_descriptor = 0;
- while ((void*)(ptr.d + 1) <= end_addr) {
+ while (ptr.d + 1 <= (descriptor_t*)end_addr) {
add_free_descriptor(a, ptr.d);
ptr.d++;
}
@@ -126,11 +128,23 @@ static void stack_and_destroy_regions(page_free_fun_t ff, region_t *r) {
}
void destroy_slab_allocator(mem_allocator_t *a) {
for (int i = 0; a->types[i].obj_size != 0; i++) {
- for (cache_t *c = a->slabs[i].first_cache; c != 0; c++) {
+ for (cache_t *c = a->slabs[i].first_cache; c != 0; c = c->next_cache) {
a->free_fun(c->region_addr);
}
}
- stack_and_destroy_regions(a->free_fun, a->all_regions);
+ region_t *dr = 0;
+ region_t *i = a->all_regions;
+ while (i != 0) {
+ region_t *r = i;
+ i = r->next_region;
+ if (r->contains_descriptors) {
+ r->next_region = dr;
+ dr = r;
+ } else {
+ a->free_fun(r->region_addr);
+ }
+ }
+ stack_and_destroy_regions(a->free_fun, dr);
a->free_fun(a);
}
@@ -198,6 +212,7 @@ void* slab_alloc(mem_allocator_t* a, size_t sz) {
return 0;
} else {
r->region_size = sz;
+ r->contains_descriptors = false;
r->next_region = a->all_regions;
a->all_regions = r;
@@ -254,6 +269,7 @@ void slab_free(mem_allocator_t* a, void* addr) {
a->free_fun(addr); // found it, free it
region_t *r = i->next_region;
+ ASSERT(!r->contains_descriptors);
i->next_region = r->next_region;
add_free_descriptor(a, (descriptor_t*)r);
return;