From 274765f7daa3cc1094f9f26196fcf2b9a5289ee2 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Fri, 5 Dec 2014 15:06:17 +0100 Subject: Corrections : - replace size_t by void* in many places - correct bug in region freeing code --- kernel/l0/kmain.c | 50 +++++++++++++++++++++++++++++++------------------- kernel/l0/paging.c | 34 ++++++++++++++++------------------ kernel/l0/region.c | 42 +++++++++++++++++++----------------------- 3 files changed, 66 insertions(+), 60 deletions(-) (limited to 'kernel/l0') diff --git a/kernel/l0/kmain.c b/kernel/l0/kmain.c index e052eac..a935c9c 100644 --- a/kernel/l0/kmain.c +++ b/kernel/l0/kmain.c @@ -16,7 +16,7 @@ void breakpoint_handler(registers_t *regs) { BOCHS_BREAKPOINT; } -void test_pf_handler(pagedir_t *pd, region_info_t *i, size_t addr) { +void test_pf_handler(pagedir_t *pd, region_info_t *i, void* addr) { dbg_printf("0x%p", addr); uint32_t f = frame_alloc(1); @@ -27,11 +27,11 @@ void test_pf_handler(pagedir_t *pd, region_info_t *i, size_t addr) { if (error) PANIC("Could not map frame (OOM)"); } -void* page_alloc_fun_for_kmalloc(const size_t bytes) { - return (void*)region_alloc(bytes, REGION_T_CORE_HEAP, test_pf_handler); +void* page_alloc_fun_for_kmalloc(size_t bytes) { + return region_alloc(bytes, REGION_T_CORE_HEAP, test_pf_handler); } -void page_free_fun_for_kmalloc(const void* ptr) { - region_free((size_t)ptr); +void page_free_fun_for_kmalloc(void* ptr) { + region_free(ptr); } slab_type_t slab_sizes[] = { { "8B obj", 8, 1 }, @@ -44,7 +44,6 @@ slab_type_t slab_sizes[] = { { "1KB obj", 1024, 8 }, { "2KB obj", 2048, 8 }, { "4KB obj", 4096, 16 }, - { "8KB obj", 8192, 32 }, { 0, 0, 0 } }; @@ -72,7 +71,7 @@ void kmain(struct multiboot_info_t *mbd, int32_t mb_magic) { // used for allocation of data structures before malloc is set up // a pointer to this pointer is passed to the functions that might have // to allocate memory ; they just increment it of the allocated quantity - void* kernel_data_end = (void*)&k_end_addr; + void* kernel_data_end = &k_end_addr; frame_init_allocator(total_ram, &kernel_data_end); dbg_printf("kernel_data_end: 0x%p\n", kernel_data_end); @@ -84,16 +83,16 @@ void kmain(struct multiboot_info_t *mbd, int32_t mb_magic) { region_allocator_init(kernel_data_end); dbg_print_region_stats(); - size_t p = region_alloc(0x1000, REGION_T_HW, 0); + void* p = region_alloc(0x1000, REGION_T_HW, 0); dbg_printf("Allocated one-page region: 0x%p\n", p); dbg_print_region_stats(); - size_t q = region_alloc(0x1000, REGION_T_HW, 0); + void* q = region_alloc(0x1000, REGION_T_HW, 0); dbg_printf("Allocated one-page region: 0x%p\n", q); dbg_print_region_stats(); - size_t r = region_alloc(0x2000, REGION_T_HW, 0); + void* r = region_alloc(0x2000, REGION_T_HW, 0); dbg_printf("Allocated two-page region: 0x%p\n", r); dbg_print_region_stats(); - size_t s = region_alloc(0x10000, REGION_T_CORE_HEAP, 0); + void* s = region_alloc(0x10000, REGION_T_CORE_HEAP, 0); dbg_printf("Allocated 16-page region: 0x%p\n", s); dbg_print_region_stats(); region_free(p); @@ -111,7 +110,7 @@ void kmain(struct multiboot_info_t *mbd, int32_t mb_magic) { // allocate a big region and try to write into it const size_t n = 1000; - size_t p0 = region_alloc(n * PAGE_SIZE, REGION_T_HW, test_pf_handler); + void* p0 = region_alloc(n * PAGE_SIZE, REGION_T_HW, test_pf_handler); for (size_t i = 0; i < n; i++) { uint32_t *x = (uint32_t*)(p0 + i * PAGE_SIZE); dbg_printf("[%i : ", i); @@ -125,30 +124,43 @@ void kmain(struct multiboot_info_t *mbd, int32_t mb_magic) { uint32_t *x = (uint32_t*)(p0 + i * PAGE_SIZE); ASSERT(x[1] == (i * 20422) % 122); - size_t f = pd_get_frame((size_t)x); + uint32_t f = pd_get_frame(x); ASSERT(f != 0); - pd_unmap_page((size_t)x); + pd_unmap_page(x); frame_free(f, 1); } region_free(s); // TEST SLAB ALLOCATOR!!! - mem_allocator_t *a = create_slab_allocator(slab_sizes, - page_alloc_fun_for_kmalloc, - page_free_fun_for_kmalloc); + mem_allocator_t *a = + create_slab_allocator(slab_sizes, page_alloc_fun_for_kmalloc, + page_free_fun_for_kmalloc); dbg_printf("Created slab allocator at 0x%p\n", a); const int m = 100; void* ptr[m]; for (int i = 0; i < m; i++) { - size_t s = 1 << ((i * 7) % 12); + size_t s = 1 << ((i * 7) % 12 + 1); ptr[i] = slab_alloc(a, s); dbg_printf("Alloc %i : 0x%p\n", s, ptr[i]); dbg_print_region_stats(); } for (int i = 0; i < m; i++) { - slab_free(a, ptr[i]); + slab_free(a, ptr[m - i - 1]); } + dbg_print_region_stats(); + for (int i = 0; i < m; i++) { + size_t s = 1 << ((i * 7) % 12 + 1); + ASSERT(slab_alloc(a, s) == ptr[i]); + } + dbg_print_region_stats(); + for (int i = 0; i < m; i++) { + slab_free(a, ptr[m - i - 1]); + } + dbg_print_region_stats(); + dbg_printf("Destroying slab allocator.\n"); + destroy_slab_allocator(a); + dbg_print_region_stats(); PANIC("Reached kmain end! Falling off the edge."); diff --git a/kernel/l0/paging.c b/kernel/l0/paging.c index 0104d5e..ca217c6 100644 --- a/kernel/l0/paging.c +++ b/kernel/l0/paging.c @@ -23,7 +23,7 @@ typedef struct page_table { } pagetable_t; struct page_directory { - size_t phys_addr; // physical address of page directory + uint32_t phys_addr; // physical address of page directory // to modify a page directory, we first map it // then we can use mirroring to edit it // (the last 4M of the address space are mapped to the PD itself) @@ -46,17 +46,15 @@ static pagedir_t *current_pd_d; #define current_pd ((pagetable_t*)(PD_MIRROR_ADDR + (N_PAGES_IN_PT-1)*PAGE_SIZE)) void page_fault_handler(registers_t *regs) { - size_t vaddr; + void* vaddr; asm volatile("movl %%cr2, %0":"=r"(vaddr)); - if (vaddr >= K_HIGHHALF_ADDR) { + if ((size_t)vaddr >= K_HIGHHALF_ADDR) { uint32_t pt = PT_OF_ADDR(vaddr); - if (current_pd != &kernel_pd && vaddr >= K_HIGHHALF_ADDR - && current_pd->page[pt] != kernel_pd.page[pt]) - { + if (current_pd != &kernel_pd && current_pd->page[pt] != kernel_pd.page[pt]) { current_pd->page[pt] = kernel_pd.page[pt]; - invlpg((size_t)(¤t_pt[pt])); + invlpg(¤t_pt[pt]); return; } @@ -104,7 +102,7 @@ void paging_setup(void* kernel_data_end) { kernel_pd.page[N_PAGES_IN_PT-1] = (((size_t)&kernel_pd - K_HIGHHALF_ADDR) & PAGE_MASK) | PTE_PRESENT | PTE_RW; - invlpg(K_HIGHHALF_ADDR); + invlpg((void*)K_HIGHHALF_ADDR); // paging already enabled in loader, nothing to do. switch_pagedir(&kernel_pd_d); @@ -128,7 +126,7 @@ pagedir_t *get_kernel_pagedir() { void switch_pagedir(pagedir_t *pd) { asm volatile("movl %0, %%cr3":: "r"(pd->phys_addr)); - invlpg((size_t)current_pd); + invlpg(current_pd); current_pd_d = pd; } @@ -136,46 +134,46 @@ void switch_pagedir(pagedir_t *pd) { // Mapping and unmapping of pages // // ============================== // -uint32_t pd_get_frame(size_t vaddr) { +uint32_t pd_get_frame(void* vaddr) { uint32_t pt = PT_OF_ADDR(vaddr); uint32_t page = PAGE_OF_ADDR(vaddr); - pagetable_t *pd = (vaddr >= K_HIGHHALF_ADDR ? &kernel_pd : current_pd); + pagetable_t *pd = ((size_t)vaddr >= K_HIGHHALF_ADDR ? &kernel_pd : current_pd); if (!pd->page[pt] & PTE_PRESENT) return 0; if (!current_pt[pt].page[page] & PTE_PRESENT) return 0; return current_pt[pt].page[page] >> PTE_FRAME_SHIFT; } -int pd_map_page(size_t vaddr, uint32_t frame_id, uint32_t rw) { +int pd_map_page(void* vaddr, uint32_t frame_id, bool rw) { uint32_t pt = PT_OF_ADDR(vaddr); uint32_t page = PAGE_OF_ADDR(vaddr); - pagetable_t *pd = (vaddr >= K_HIGHHALF_ADDR ? &kernel_pd : current_pd); + pagetable_t *pd = ((size_t)vaddr >= K_HIGHHALF_ADDR ? &kernel_pd : current_pd); if (!pd->page[pt] & PTE_PRESENT) { - size_t new_pt_frame = frame_alloc(1); + uint32_t new_pt_frame = frame_alloc(1); if (new_pt_frame == 0) return 1; // OOM current_pd->page[pt] = pd->page[pt] = (new_pt_frame << PTE_FRAME_SHIFT) | PTE_PRESENT | PTE_RW; - invlpg((size_t)¤t_pt[pt]); + invlpg(¤t_pt[pt]); } current_pt[pt].page[page] = frame_id << PTE_FRAME_SHIFT | PTE_PRESENT - | (vaddr < K_HIGHHALF_ADDR ? PTE_USER : PTE_GLOBAL) + | ((size_t)vaddr < K_HIGHHALF_ADDR ? PTE_USER : PTE_GLOBAL) | (rw ? PTE_RW : 0); return 0; } -void pd_unmap_page(size_t vaddr) { +void pd_unmap_page(void* vaddr) { uint32_t pt = PT_OF_ADDR(vaddr); uint32_t page = PAGE_OF_ADDR(vaddr); - pagetable_t *pd = (vaddr >= K_HIGHHALF_ADDR ? &kernel_pd : current_pd); + pagetable_t *pd = ((size_t)vaddr >= K_HIGHHALF_ADDR ? &kernel_pd : current_pd); if (!pd->page[pt] & PTE_PRESENT) return; if (!current_pt[pt].page[page] & PTE_PRESENT) return; diff --git a/kernel/l0/region.c b/kernel/l0/region.c index 5673f83..7b1d138 100644 --- a/kernel/l0/region.c +++ b/kernel/l0/region.c @@ -7,7 +7,8 @@ typedef union region_descriptor { union region_descriptor *next; } unused_descriptor; struct { - size_t addr, size; + void* addr; + size_t size; union region_descriptor *next_by_size, *first_bigger; union region_descriptor *next_by_addr; } free; @@ -100,9 +101,9 @@ static void add_free_region(descriptor_t *d) { add_unused_descriptor(d); add_free_region(i); return; - } else if (i->free.next_by_addr == 0) { + } else if (i->free.next_by_addr == 0 || i->free.next_by_addr->free.addr > d->free.addr) { + d->free.next_by_addr = i->free.next_by_addr; i->free.next_by_addr = d; - d->free.next_by_addr = 0; break; } else if (d->free.addr + d->free.size == i->free.next_by_addr->free.addr) { // concatenate d . i->next_by_addr @@ -112,11 +113,6 @@ static void add_free_region(descriptor_t *d) { add_unused_descriptor(j); add_free_region(d); return; - } else if (i->free.next_by_addr->free.addr > d->free.addr) { - // insert between i and i->next_by_addr - d->free.next_by_addr = i->free.next_by_addr; - i->free.next_by_addr = d; - break; } else { // continue i = i->free.next_by_addr; @@ -134,19 +130,19 @@ static void add_free_region(descriptor_t *d) { while (i != 0) { ASSERT(d->free.size > i->free.size); if (i->free.next_by_size == 0) { - i->free.next_by_size = d; - if (d->free.size > i->free.size) i->free.first_bigger = d; d->free.next_by_size = 0; d->free.first_bigger = 0; - break; - } else if (i->free.next_by_size->free.size >= d->free.size) { i->free.next_by_size = d; if (d->free.size > i->free.size) i->free.first_bigger = d; + break; + } else if (i->free.next_by_size->free.size >= d->free.size) { d->free.next_by_size = i->free.next_by_size; d->free.first_bigger = (i->free.next_by_size->free.size > d->free.size ? i->free.next_by_size : i->free.next_by_size->free.first_bigger); + i->free.next_by_size = d; + if (d->free.size > i->free.size) i->free.first_bigger = d; break; } else { // continue @@ -156,7 +152,7 @@ static void add_free_region(descriptor_t *d) { } } -static descriptor_t *find_used_region(size_t addr) { +static descriptor_t *find_used_region(void* addr) { for (descriptor_t *i = first_used_region; i != 0; i = i->used.next_by_addr) { if (addr >= i->used.i.addr && addr < i->used.i.addr + i->used.i.size) return i; if (i->used.i.addr > addr) break; @@ -201,7 +197,7 @@ static void remove_used_region(descriptor_t *d) { void region_allocator_init(void* kernel_data_end) { descriptor_t *u0 = &base_descriptors[0]; - u0->used.i.addr = K_HIGHHALF_ADDR; + u0->used.i.addr = (void*)K_HIGHHALF_ADDR; u0->used.i.size = PAGE_ALIGN_UP(kernel_data_end) - K_HIGHHALF_ADDR; u0->used.i.type = REGION_T_KERNEL_BASE; u0->used.i.pf = 0; @@ -209,8 +205,8 @@ void region_allocator_init(void* kernel_data_end) { first_used_region = u0; descriptor_t *f0 = &base_descriptors[1]; - f0->free.addr = PAGE_ALIGN_UP(kernel_data_end); - f0->free.size = (LAST_KERNEL_ADDR-f0->free.addr); + f0->free.addr = (void*)PAGE_ALIGN_UP(kernel_data_end); + f0->free.size = ((void*)LAST_KERNEL_ADDR - f0->free.addr); f0->free.next_by_size = 0; f0->free.first_bigger = 0; first_free_region_by_size = first_free_region_by_addr = f0; @@ -222,7 +218,7 @@ void region_allocator_init(void* kernel_data_end) { } } -static size_t region_alloc_inner(size_t size, uint32_t type, page_fault_handler_t pf, bool use_reserve) { +static void* region_alloc_inner(size_t size, uint32_t type, page_fault_handler_t pf, bool use_reserve) { size = PAGE_ALIGN_UP(size); for (descriptor_t *i = first_free_region_by_size; i != 0; i = i->free.first_bigger) { @@ -253,7 +249,7 @@ static size_t region_alloc_inner(size_t size, uint32_t type, page_fault_handler_ remove_free_region(i); if (x != 0) add_free_region(x); - size_t addr = i->free.addr; + void* addr = i->free.addr; i->used.i.addr = addr; i->used.i.size = size; i->used.i.type = type; @@ -266,12 +262,12 @@ static size_t region_alloc_inner(size_t size, uint32_t type, page_fault_handler_ return 0; //No big enough block found } -size_t region_alloc(size_t size, uint32_t type, page_fault_handler_t pf) { +void* region_alloc(size_t size, uint32_t type, page_fault_handler_t pf) { if (n_unused_descriptors <= N_RESERVE_DESCRIPTORS) { uint32_t frame = frame_alloc(1); if (frame == 0) return 0; - size_t descriptor_region = region_alloc_inner(PAGE_SIZE, REGION_T_DESCRIPTORS, 0, true); + void* descriptor_region = region_alloc_inner(PAGE_SIZE, REGION_T_DESCRIPTORS, 0, true); ASSERT(descriptor_region != 0); int error = pd_map_page(descriptor_region, frame, 1); @@ -283,7 +279,7 @@ size_t region_alloc(size_t size, uint32_t type, page_fault_handler_t pf) { } for (descriptor_t *d = (descriptor_t*)descriptor_region; - (size_t)(d+1) <= (descriptor_region + PAGE_SIZE); + (void*)(d+1) <= (descriptor_region + PAGE_SIZE); d++) { add_unused_descriptor(d); } @@ -291,13 +287,13 @@ size_t region_alloc(size_t size, uint32_t type, page_fault_handler_t pf) { return region_alloc_inner(size, type, pf, false); } -region_info_t *find_region(size_t addr) { +region_info_t *find_region(void* addr) { descriptor_t *d = find_used_region(addr); if (d == 0) return 0; return &d->used.i; } -void region_free(size_t addr) { +void region_free(void* addr) { descriptor_t *d = find_used_region(addr); if (d == 0) return; -- cgit v1.2.3