diff options
author | Alex Auvolat <alex.auvolat@ens.fr> | 2014-12-03 17:20:29 +0100 |
---|---|---|
committer | Alex Auvolat <alex.auvolat@ens.fr> | 2014-12-03 17:20:29 +0100 |
commit | d78a3d8be9c194554580cb3c73c1c5ebd0d82a9b (patch) | |
tree | 68f05ca7bedff54a5b7ec20bbcc33232b235a44f | |
parent | 6a56675851c16a0cefcf5a2d10a1227c37e9d886 (diff) | |
download | macroscope-d78a3d8be9c194554580cb3c73c1c5ebd0d82a9b.tar.gz macroscope-d78a3d8be9c194554580cb3c73c1c5ebd0d82a9b.zip |
Improve region allocator with an idea to break dependency cycle
-rw-r--r-- | kernel/include/paging.h | 2 | ||||
-rw-r--r-- | kernel/include/region.h | 8 | ||||
-rw-r--r-- | kernel/l0/kmain.c | 2 | ||||
-rw-r--r-- | kernel/l0/region.c | 81 |
4 files changed, 80 insertions, 13 deletions
diff --git a/kernel/include/paging.h b/kernel/include/paging.h index 5766e65..7082e2e 100644 --- a/kernel/include/paging.h +++ b/kernel/include/paging.h @@ -13,6 +13,8 @@ pagedir_t *get_kernel_pagedir(); void switch_pagedir(pagedir_t *pd); +// The three functions below DO NOT DEPEND on argument pd for +// addresses above K_HIGHHALF_ADDR, so just pass 0 to map/unmap in kernel space. uint32_t pd_get_frame(pagedir_t *pd, size_t vaddr); // get physical frame for virtual address int pd_map_page(pagedir_t *pd, size_t vaddr, uint32_t frame_id, diff --git a/kernel/include/region.h b/kernel/include/region.h index 701e2d9..3c64081 100644 --- a/kernel/include/region.h +++ b/kernel/include/region.h @@ -8,8 +8,7 @@ // Region types #define REGION_T_KERNEL_BASE 0x00000001 // base kernel code & data #define REGION_T_DESCRIPTORS 0x00000002 // contains more region descriptors -#define REGION_T_PAGEDIR 0x00000010 // used to map a page directory -#define REGION_T_PAGETABLE 0x00000020 // used to map a page table +#define REGION_T_PAGETABLE 0x00000010 // used to map a page table/page directory #define REGION_T_CORE_HEAP 0x00000100 // used for the core kernel heap #define REGION_T_PROC_HEAP 0x00000200 // used for a kernel process' heap #define REGION_T_CACHE 0x00001000 // used for cache @@ -30,4 +29,9 @@ size_t region_alloc(size_t size, uint32_t type, page_fault_handler_t pf); // ret region_info_t *find_region(size_t addr); void region_free(size_t addr); +#define N_PAGES_IN_PT_REGION 4 +// special call for use by paging code +// allocates a region of N_PAGES_IN_PT_REGION pages +size_t region_alloc_for_pt(); + void dbg_print_region_stats(); diff --git a/kernel/l0/kmain.c b/kernel/l0/kmain.c index cd84cf7..f07e243 100644 --- a/kernel/l0/kmain.c +++ b/kernel/l0/kmain.c @@ -26,7 +26,7 @@ void kmain(struct multiboot_info_t *mbd, int32_t mb_magic) { idt_init(); dbg_printf("IDT set up.\n"); idt_set_ex_handler(EX_BREAKPOINT, breakpoint_handler); - asm volatile("int $0x3"); // test breakpoint + // asm volatile("int $0x3"); // test breakpoint size_t total_ram = ((mbd->mem_upper + mbd->mem_lower) * 1024); dbg_printf("Total ram: %d Kb\n", total_ram / 1024); diff --git a/kernel/l0/region.c b/kernel/l0/region.c index 421a018..63e464d 100644 --- a/kernel/l0/region.c +++ b/kernel/l0/region.c @@ -1,5 +1,6 @@ #include <region.h> #include <dbglog.h> +#include <frame.h> typedef union region_descriptor { struct { @@ -16,10 +17,13 @@ typedef union region_descriptor { } used; } descriptor_t; +#define N_RESERVE_DESCRIPTORS 3 // always keep at least 3 unused descriptors + #define N_BASE_DESCRIPTORS 12 // pre-allocate memory for 12 descriptors static descriptor_t base_descriptors[N_BASE_DESCRIPTORS]; static descriptor_t *first_unused_descriptor; +uint32_t n_unused_descriptors; static descriptor_t *first_free_region_by_addr, *first_free_region_by_size; static descriptor_t *first_used_region; @@ -28,13 +32,17 @@ static descriptor_t *first_used_region; // ========================================================= // static void add_unused_descriptor(descriptor_t *d) { + n_unused_descriptors++; d->unused_descriptor.next = first_unused_descriptor; first_unused_descriptor = d; } static descriptor_t *get_unused_descriptor() { descriptor_t *r = first_unused_descriptor; - if (r != 0) first_unused_descriptor = r->unused_descriptor.next; + if (r != 0) { + first_unused_descriptor = r->unused_descriptor.next; + n_unused_descriptors--; + } return r; } @@ -207,24 +215,33 @@ void region_allocator_init(void* kernel_data_end) { f0->free.first_bigger = 0; first_free_region_by_size = first_free_region_by_addr = f0; + n_unused_descriptors = 0; first_unused_descriptor = 0; for (int i = 2; i < N_BASE_DESCRIPTORS; i++) { add_unused_descriptor(&base_descriptors[i]); } } -size_t region_alloc(size_t size, uint32_t type, page_fault_handler_t pf) { +static size_t region_alloc_inner(size_t size, uint32_t type, page_fault_handler_t pf, bool use_reserve) { size = PAGE_ALIGN_UP(size); for (descriptor_t *i = first_free_region_by_size; i != 0; i = i->free.first_bigger) { if (i->free.size >= size) { - remove_free_region(i); + // region i is the one we want to allocate in + descriptor_t *x = 0; if (i->free.size > size) { - descriptor_t *x = get_unused_descriptor(); - if (x == 0) { + if (n_unused_descriptors <= N_RESERVE_DESCRIPTORS && !use_reserve) { return 0; - // TODO: allocate more descriptors } + + // this assert is a bit tricky to prove, + // but basically it means that the allocation function + // is called less than N_RESERVE_DESCRIPTORS times with + // the use_reserve flag before more descriptors + // are allocated. + x = get_unused_descriptor(); + ASSERT(x != 0); + x->free.size = i->free.size - size; if (size >= 0x4000) { x->free.addr = i->free.addr + size; @@ -232,18 +249,63 @@ size_t region_alloc(size_t size, uint32_t type, page_fault_handler_t pf) { x->free.addr = i->free.addr; i->free.addr += x->free.size; } - add_free_region(x); } + // do the allocation + remove_free_region(i); + if (x != 0) add_free_region(x); + size_t addr = i->free.addr; i->used.i.addr = addr; i->used.i.size = size; i->used.i.type = type; i->used.i.pf = pf; add_used_region(i); + return addr; } } - return 0; //Not found + return 0; //No big enough block found +} + +bool region_alloc_for_pt_use_reserve = false; +size_t region_alloc_for_pt() { + if (region_alloc_for_pt_use_reserve) { + return region_alloc_inner( + N_PAGES_IN_PT_REGION * PAGE_SIZE, + REGION_T_PAGETABLE, + 0, true); + } else { + return region_alloc( + N_PAGES_IN_PT_REGION * PAGE_SIZE, + REGION_T_PAGETABLE, 0); + } +} + +size_t region_alloc(size_t size, uint32_t type, page_fault_handler_t pf) { + if (n_unused_descriptors <= N_RESERVE_DESCRIPTORS) { + uint32_t frame = frame_alloc(1); + if (frame == 0) return 0; + + size_t descriptor_region = region_alloc_inner(PAGE_SIZE, REGION_T_DESCRIPTORS, 0, true); + ASSERT(descriptor_region != 0); + + region_alloc_for_pt_use_reserve = true; + int error = pd_map_page(0, descriptor_region, frame, 1); + region_alloc_for_pt_use_reserve = false; + if (error) { + // this can happen if we weren't able to allocate a frame for + // a new pagetable + frame_free(frame, 1); + return 0; + } + + for (descriptor_t *d = (descriptor_t*)descriptor_region; + (size_t)(d+1) <= (descriptor_region + PAGE_SIZE); + d++) { + add_unused_descriptor(d); + } + } + return region_alloc_inner(size, type, pf, false); } region_info_t *find_region(size_t addr) { @@ -280,8 +342,7 @@ void dbg_print_region_stats() { dbg_printf("| 0x%p - 0x%p", d->used.i.addr, d->used.i.addr + d->used.i.size); if (d->used.i.type & REGION_T_KERNEL_BASE) dbg_printf(" Kernel code & base data"); if (d->used.i.type & REGION_T_DESCRIPTORS) dbg_printf(" Region descriptors"); - if (d->used.i.type & REGION_T_PAGEDIR) dbg_printf(" Mapped page directory"); - if (d->used.i.type & REGION_T_PAGETABLE) dbg_printf(" Mapped page table"); + if (d->used.i.type & REGION_T_PAGETABLE) dbg_printf(" Mapped PD/PT"); if (d->used.i.type & REGION_T_CORE_HEAP) dbg_printf(" Core heap"); if (d->used.i.type & REGION_T_PROC_HEAP) dbg_printf(" Kernel process heap"); if (d->used.i.type & REGION_T_CACHE) dbg_printf(" Cache"); |