aboutsummaryrefslogtreecommitdiff
path: root/kernel/l0
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/l0')
-rw-r--r--kernel/l0/kmain.c27
-rw-r--r--kernel/l0/loader.s7
-rw-r--r--kernel/l0/paging.c28
3 files changed, 33 insertions, 29 deletions
diff --git a/kernel/l0/kmain.c b/kernel/l0/kmain.c
index 97b5a97..53afb6b 100644
--- a/kernel/l0/kmain.c
+++ b/kernel/l0/kmain.c
@@ -31,31 +31,36 @@ void test_pf_handler(pagedir_t *pd, region_info_t *i, void* addr) {
void* page_alloc_fun_for_kmalloc(size_t bytes) {
void* addr = region_alloc(bytes, REGION_T_CORE_HEAP, test_pf_handler);
- dbg_printf("Alloc %p bytes for kmalloc at: %p\n", bytes, addr);
+ dbg_printf("[alloc 0x%p for kmalloc : %p]\n", bytes, addr);
return addr;
}
void page_free_fun_for_kmalloc(void* ptr) {
+ dbg_printf("[Free 0x%p", ptr);
+
region_info_t *i = find_region(ptr);
ASSERT(i != 0 && i->type == REGION_T_CORE_HEAP);
for (void* x = i->addr; x < i->addr + i->size; x += PAGE_SIZE) {
uint32_t f = pd_get_frame(x);
+ dbg_printf(" %i", f);
if (f != 0) {
pd_unmap_page(x);
frame_free(f, 1);
}
}
+ dbg_printf(" : ");
region_free(ptr);
+ dbg_printf("ok]\n");
}
slab_type_t slab_sizes[] = {
- { "8B obj", 8, 1 },
+ { "8B obj", 8, 2 },
{ "16B obj", 16, 2 },
{ "32B obj", 32, 2 },
- { "64B obj", 64, 2 },
- { "128B obj", 128, 2 },
+ { "64B obj", 64, 4 },
+ { "128B obj", 128, 4 },
{ "256B obj", 256, 4 },
- { "512B obj", 512, 4 },
+ { "512B obj", 512, 8 },
{ "1KB obj", 1024, 8 },
- { "2KB obj", 2048, 8 },
+ { "2KB obj", 2048, 16 },
{ "4KB obj", 4096, 16 },
{ 0, 0, 0 }
};
@@ -149,16 +154,16 @@ void kmain(struct multiboot_info_t *mbd, int32_t mb_magic) {
region_free(s);
BOCHS_BREAKPOINT;
- // TEST SLAB ALLOCATOR!!!
+ // Test slab allocator !
mem_allocator_t *a =
create_slab_allocator(slab_sizes, page_alloc_fun_for_kmalloc,
page_free_fun_for_kmalloc);
dbg_printf("Created slab allocator at 0x%p\n", a);
dbg_print_region_stats();
- const int m = 10000;
- uint16_t* ptr[m];
+ const int m = 200;
+ uint16_t** ptr = slab_alloc(a, m * sizeof(uint32_t));
for (int i = 0; i < m; i++) {
- size_t s = 1 << ((i * 7) % 12 + 2);
+ size_t s = 1 << ((i * 7) % 11 + 2);
ptr[i] = (uint16_t*)slab_alloc(a, s);
ASSERT((void*)ptr[i] >= kernel_data_end && (size_t)ptr[i] < 0xFFC00000);
*ptr[i] = ((i * 211) % 1024);
@@ -172,7 +177,7 @@ void kmain(struct multiboot_info_t *mbd, int32_t mb_magic) {
slab_free(a, ptr[i]);
}
dbg_print_region_stats();
- dbg_printf("Destroying slab allocator.\n");
+ dbg_printf("Destroying slab allocator...\n");
destroy_slab_allocator(a);
dbg_print_region_stats();
diff --git a/kernel/l0/loader.s b/kernel/l0/loader.s
index 6ad4ff0..5d0a2b8 100644
--- a/kernel/l0/loader.s
+++ b/kernel/l0/loader.s
@@ -1,6 +1,7 @@
[EXTERN kmain] ; kmain is defined in kmain.c
[GLOBAL loader] ; making entry point visible to linker
-[GLOBAL kernel_pd] ; make kernel page directory visible
+[GLOBAL kernel_pd] ; make kernel page directory visible
+[GLOBAL kernel_stack_protector] ; used to detect kernel stack overflow
; higher-half kernel setup
K_HIGHHALF_ADDR equ 0xC0000000
@@ -74,7 +75,9 @@ hang:
jmp hang
[section .bss]
-align 4
+align 0x1000
+kernel_stack_protector:
+ resb 0x1000 ; as soon as we have efficient paging, we WON'T map this page
stack_bottom:
resb LOADER_STACK_SIZE
stack_top:
diff --git a/kernel/l0/paging.c b/kernel/l0/paging.c
index 5000c71..d289712 100644
--- a/kernel/l0/paging.c
+++ b/kernel/l0/paging.c
@@ -39,6 +39,8 @@ extern pagetable_t kernel_pd;
// pre-allocate a page table so that we can map the first 4M of kernel memory
static pagetable_t __attribute__((aligned(PAGE_SIZE))) kernel_pt0;
+extern char kernel_stack_protector;
+
static pagedir_t kernel_pd_d;
static pagedir_t *current_pd_d;
@@ -58,19 +60,13 @@ void page_fault_handler(registers_t *regs) {
return;
}
+ if (vaddr >= (void*)&kernel_stack_protector && vaddr < (void*)&kernel_stack_protector + PAGE_SIZE) {
+ dbg_printf("Kernel stack overflow at 0x%p\n", vaddr);
+ PANIC("Kernel stack overflow.");
+ }
+
if ((size_t)vaddr >= PD_MIRROR_ADDR) {
dbg_printf("Fault on access to mirrorred PD at 0x%p\n", vaddr);
-
- uint32_t x = (size_t)vaddr - PD_MIRROR_ADDR;
- uint32_t page = (x % PAGE_SIZE) / 4;
- uint32_t pt = x / PAGE_SIZE;
- dbg_printf("For pt 0x%p, page 0x%p -> addr 0x%p\n", pt, page, ((pt * 1024) + page) * PAGE_SIZE);
-
- for (int i = 0; i < N_PAGES_IN_PT; i++) {
- //dbg_printf("%i. 0x%p\n", i, kernel_pd.page[i]);
- }
-
- dbg_dump_registers(regs);
dbg_print_region_stats();
PANIC("Unhandled kernel space page fault");
}
@@ -108,7 +104,11 @@ void paging_setup(void* kernel_data_end) {
ASSERT(PAGE_OF_ADDR(K_HIGHHALF_ADDR) == 0); // kernel is 4M-aligned
ASSERT(FIRST_KERNEL_PT == 768);
for (size_t i = 0; i < n_kernel_pages; i++) {
- kernel_pt0.page[i] = (i << PTE_FRAME_SHIFT) | PTE_PRESENT | PTE_RW | PTE_GLOBAL;
+ if ((i * PAGE_SIZE) + K_HIGHHALF_ADDR == (size_t)&kernel_stack_protector) {
+ frame_free(i, 1); // don't map kernel stack protector page
+ } else {
+ kernel_pt0.page[i] = (i << PTE_FRAME_SHIFT) | PTE_PRESENT | PTE_RW | PTE_GLOBAL;
+ }
}
for (size_t i = n_kernel_pages; i < 1024; i++){
kernel_pt0.page[i] = 0;
@@ -180,8 +180,6 @@ int pd_map_page(void* vaddr, uint32_t frame_id, bool rw) {
(new_pt_frame << PTE_FRAME_SHIFT) | PTE_PRESENT | PTE_RW;
invlpg(&current_pt[pt]);
}
- dbg_printf("[%p,%i,%i,", vaddr, pt, page);
-
current_pt[pt].page[page] =
(frame_id << PTE_FRAME_SHIFT)
| PTE_PRESENT
@@ -189,8 +187,6 @@ int pd_map_page(void* vaddr, uint32_t frame_id, bool rw) {
| (rw ? PTE_RW : 0);
invlpg(vaddr);
- dbg_printf("]");
-
return 0;
}