aboutsummaryrefslogtreecommitdiff
path: root/src/kernel/core
diff options
context:
space:
mode:
authorAlex Auvolat <alex@adnab.me>2015-03-14 16:36:31 +0100
committerAlex Auvolat <alex@adnab.me>2015-03-14 16:36:31 +0100
commit3e2a3170501fb02b5b46a342c47d2ba8b1a6e244 (patch)
tree6cd0a3f3ded016de581e3b3841ab19f1e511dd86 /src/kernel/core
parent57c5eceb846f3109d017b4e6d4cbd7c507c0d098 (diff)
downloadkogata-3e2a3170501fb02b5b46a342c47d2ba8b1a6e244.tar.gz
kogata-3e2a3170501fb02b5b46a342c47d2ba8b1a6e244.zip
Factorize region allocator between kernel and user processes (same code was there twice)
Diffstat (limited to 'src/kernel/core')
-rw-r--r--src/kernel/core/kmain.c3
-rw-r--r--src/kernel/core/region.c379
2 files changed, 10 insertions, 372 deletions
diff --git a/src/kernel/core/kmain.c b/src/kernel/core/kmain.c
index 3ee2238..2774e11 100644
--- a/src/kernel/core/kmain.c
+++ b/src/kernel/core/kmain.c
@@ -113,7 +113,8 @@ void kmain(multiboot_info_t *mbd, int32_t mb_magic) {
paging_setup(kernel_data_end);
dbg_printf("Paging seems to be working!\n");
- region_allocator_init(kernel_data_end);
+ region_allocator_init((void*)K_HIGHHALF_ADDR, (void*)PAGE_ALIGN_UP(kernel_data_end),
+ (void*)LAST_KERNEL_ADDR, alloc_map_single_frame);
dbg_printf("Region allocator initialized.\n");
TEST_PLACEHOLDER_AFTER_REGION;
diff --git a/src/kernel/core/region.c b/src/kernel/core/region.c
index 6a4c7b4..2a55fb3 100644
--- a/src/kernel/core/region.c
+++ b/src/kernel/core/region.c
@@ -1,352 +1,23 @@
#include <region.h>
-#include <dbglog.h>
#include <frame.h>
-#include <mutex.h>
+#include <paging.h>
-typedef union region_descriptor {
- struct {
- union region_descriptor *next;
- } unused_descriptor;
- struct {
- void* addr;
- size_t size;
- union region_descriptor *next_by_size, *first_bigger;
- union region_descriptor *next_by_addr;
- } free;
- struct {
- region_info_t i;
- union region_descriptor *next_by_addr;
- } used;
-} descriptor_t;
+bool alloc_map_single_frame(void* addr) {
+ int frame = frame_alloc(1);
+ if (frame == 0) return false;
-#define N_RESERVE_DESCRIPTORS 2 // always keep at least 2 unused descriptors
-
-#define N_BASE_DESCRIPTORS 12 // pre-allocate memory for 12 descriptors
-static descriptor_t base_descriptors[N_BASE_DESCRIPTORS];
-
-static descriptor_t *first_unused_descriptor;
-uint32_t n_unused_descriptors;
-static descriptor_t *first_free_region_by_addr, *first_free_region_by_size;
-static descriptor_t *first_used_region;
-
-STATIC_MUTEX(ra_mutex); // region allocator mutex
-
-// ========================================================= //
-// HELPER FUNCTIONS FOR THE MANIPULATION OF THE REGION LISTS //
-// ========================================================= //
-
-void add_unused_descriptor(descriptor_t *d) {
- n_unused_descriptors++;
- d->unused_descriptor.next = first_unused_descriptor;
- first_unused_descriptor = d;
-}
-
-descriptor_t *get_unused_descriptor() {
- descriptor_t *r = first_unused_descriptor;
- if (r != 0) {
- first_unused_descriptor = r->unused_descriptor.next;
- n_unused_descriptors--;
+ if (!pd_map_page(addr, frame, true)) {
+ frame_free(frame, 1);
+ return false;
}
- return r;
-}
-void remove_free_region(descriptor_t *d) {
- if (first_free_region_by_size == d) {
- first_free_region_by_size = d->free.next_by_size;
- } else {
- for (descriptor_t *i = first_free_region_by_size; i != 0; i = i->free.next_by_size) {
- if (i->free.first_bigger == d) {
- i->free.first_bigger = d->free.next_by_size;
- }
- if (i->free.next_by_size == d) {
- i->free.next_by_size = d->free.next_by_size;
- break;
- }
- }
- }
- if (first_free_region_by_addr == d) {
- first_free_region_by_addr = d->free.next_by_addr;
- } else {
- for (descriptor_t *i = first_free_region_by_addr; i != 0; i = i->free.next_by_addr) {
- if (i->free.next_by_addr == d) {
- i->free.next_by_addr = d->free.next_by_addr;
- break;
- }
- }
- }
-}
-
-void add_free_region(descriptor_t *d) {
- /*dbg_printf("Add free region 0x%p - 0x%p\n", d->free.addr, d->free.size + d->free.addr);*/
- // Find position of region in address-ordered list
- // Possibly concatenate free region
- descriptor_t *i = first_free_region_by_addr;
- if (i == 0) {
- ASSERT(first_free_region_by_size == 0);
- first_free_region_by_addr = first_free_region_by_size = d;
- d->free.next_by_size = d->free.first_bigger = d->free.next_by_addr = 0;
- return;
- } else if (d->free.addr + d->free.size == i->free.addr) {
- // concatenate d . i
- remove_free_region(i);
- d->free.size += i->free.size;
- add_unused_descriptor(i);
- add_free_region(d);
- return;
- } else if (i->free.addr > d->free.addr) {
- // insert before i
- d->free.next_by_addr = i;
- first_free_region_by_addr = d;
- } else {
- while (i != 0) {
- ASSERT(d->free.addr > i->free.addr);
- if (i->free.addr + i->free.size == d->free.addr) {
- // concatenate i . d
- remove_free_region(i);
- i->free.size += d->free.size;
- add_unused_descriptor(d);
- add_free_region(i);
- return;
- } else if (i->free.next_by_addr == 0 || i->free.next_by_addr->free.addr > d->free.addr) {
- d->free.next_by_addr = i->free.next_by_addr;
- i->free.next_by_addr = d;
- break;
- } else if (d->free.addr + d->free.size == i->free.next_by_addr->free.addr) {
- // concatenate d . i->next_by_addr
- descriptor_t *j = i->free.next_by_addr;
- remove_free_region(j);
- d->free.size += j->free.size;
- add_unused_descriptor(j);
- add_free_region(d);
- return;
- } else {
- // continue
- i = i->free.next_by_addr;
- }
- }
- }
- // Now add it in size-ordered list
- i = first_free_region_by_size;
- ASSERT(i != 0);
- if (d->free.size <= i->free.size) {
- d->free.next_by_size = i;
- d->free.first_bigger = (i->free.size > d->free.size ? i : i->free.first_bigger);
- first_free_region_by_size = d;
- } else {
- while (i != 0) {
- ASSERT(d->free.size > i->free.size);
- if (i->free.next_by_size == 0) {
- d->free.next_by_size = 0;
- d->free.first_bigger = 0;
- i->free.next_by_size = d;
- if (d->free.size > i->free.size) i->free.first_bigger = d;
- break;
- } else if (i->free.next_by_size->free.size >= d->free.size) {
- d->free.next_by_size = i->free.next_by_size;
- d->free.first_bigger =
- (i->free.next_by_size->free.size > d->free.size
- ? i->free.next_by_size
- : i->free.next_by_size->free.first_bigger);
- i->free.next_by_size = d;
- if (d->free.size > i->free.size) i->free.first_bigger = d;
- break;
- } else {
- // continue
- i = i->free.next_by_size;
- }
- }
- }
-}
-
-descriptor_t *find_used_region(void* addr) {
- for (descriptor_t *i = first_used_region; i != 0; i = i->used.next_by_addr) {
- if (addr >= i->used.i.addr && addr < i->used.i.addr + i->used.i.size) return i;
- if (i->used.i.addr > addr) break;
- }
- return 0;
-}
-
-void add_used_region(descriptor_t *d) {
- descriptor_t *i = first_used_region;
- ASSERT(i != 0);
- ASSERT(i->used.i.addr < d->used.i.addr); // first region by address is never free
-
- while (i != 0) {
- ASSERT(i != d);
- ASSERT(i->used.i.addr != d->used.i.addr);
- ASSERT(i->used.i.addr < d->used.i.addr);
- if (i->used.next_by_addr == 0 || i->used.next_by_addr->used.i.addr > d->used.i.addr) {
- d->used.next_by_addr = i->used.next_by_addr;
- i->used.next_by_addr = d;
- return;
- } else {
- i = i->used.next_by_addr;
- }
- }
- ASSERT(false);
-}
-
-void remove_used_region(descriptor_t *d) {
- if (first_used_region == d) {
- first_used_region = d->used.next_by_addr;
- } else {
- for (descriptor_t *i = first_used_region; i != 0; i = i->used.next_by_addr) {
- if (i->used.i.addr > d->used.i.addr) break;
- if (i->used.next_by_addr == d) {
- i->used.next_by_addr = d->used.next_by_addr;
- break;
- }
- }
- }
-}
-
-// =============== //
-// THE ACTUAL CODE //
-// =============== //
-
-void region_allocator_init(void* kernel_data_end) {
- n_unused_descriptors = 0;
- first_unused_descriptor = 0;
- for (int i = 0; i < N_BASE_DESCRIPTORS; i++) {
- add_unused_descriptor(&base_descriptors[i]);
- }
-
- descriptor_t *f0 = get_unused_descriptor();
- f0->free.addr = (void*)PAGE_ALIGN_UP(kernel_data_end);
- f0->free.size = ((void*)LAST_KERNEL_ADDR - f0->free.addr);
- f0->free.next_by_size = 0;
- f0->free.first_bigger = 0;
- first_free_region_by_size = first_free_region_by_addr = f0;
-
- descriptor_t *u0 = get_unused_descriptor();
- u0->used.i.addr = (void*)K_HIGHHALF_ADDR;
- u0->used.i.size = PAGE_ALIGN_UP(kernel_data_end) - K_HIGHHALF_ADDR;
- u0->used.i.type = "Kernel code & data";
- u0->used.next_by_addr = 0;
- first_used_region = u0;
-}
-
-void region_free_inner(void* addr) {
- descriptor_t *d = find_used_region(addr);
- if (d == 0) return;
-
- region_info_t i = d->used.i;
-
- remove_used_region(d);
- d->free.addr = i.addr;
- d->free.size = i.size;
- add_free_region(d);
-}
-void region_free(void* addr) {
- mutex_lock(&ra_mutex);
- region_free_inner(addr);
- mutex_unlock(&ra_mutex);
-}
-
-void* region_alloc_inner(size_t size, char* type, bool use_reserve) {
- size = PAGE_ALIGN_UP(size);
-
- for (descriptor_t *i = first_free_region_by_size; i != 0; i = i->free.first_bigger) {
- if (i->free.size >= size) {
- // region i is the one we want to allocate in
- descriptor_t *x = 0;
- if (i->free.size > size) {
- if (n_unused_descriptors <= N_RESERVE_DESCRIPTORS && !use_reserve) {
- return 0;
- }
-
- // this assert basically means that the allocation function
- // is called less than N_RESERVE_DESCRIPTORS times with
- // the use_reserve flag before more descriptors
- // are allocated.
- x = get_unused_descriptor();
- ASSERT(x != 0);
-
- x->free.size = i->free.size - size;
- if (size >= 0x4000) {
- x->free.addr = i->free.addr + size;
- } else {
- x->free.addr = i->free.addr;
- i->free.addr += x->free.size;
- }
- }
- // do the allocation
- remove_free_region(i);
- if (x != 0) add_free_region(x);
-
- void* addr = i->free.addr;
- i->used.i.addr = addr;
- i->used.i.size = size;
- i->used.i.type = type;
- add_used_region(i);
-
- return addr;
- }
- }
- return 0; //No big enough block found
-}
-
-void* region_alloc(size_t size, char* type) {
- void* result = 0;
- mutex_lock(&ra_mutex);
-
- if (n_unused_descriptors <= N_RESERVE_DESCRIPTORS) {
- uint32_t frame = frame_alloc(1);
- if (frame == 0) goto try_anyway;
-
- void* descriptor_region = region_alloc_inner(PAGE_SIZE, "Region descriptors", true);
- ASSERT(descriptor_region != 0);
-
- bool map_ok = pd_map_page(descriptor_region, frame, 1);
- if (!map_ok) {
- // this can happen if we weren't able to allocate a frame for
- // a new pagetable
- frame_free(frame, 1);
- region_free_inner(descriptor_region);
- goto try_anyway;
- }
-
- for (descriptor_t *d = (descriptor_t*)descriptor_region;
- (void*)(d+1) <= (descriptor_region + PAGE_SIZE);
- d++) {
- add_unused_descriptor(d);
- }
- }
- try_anyway:
- // even if we don't have enough unused descriptors, we might find
- // a free region that has exactly the right size and therefore
- // does not require splitting, so we try the allocation in all cases
- result = region_alloc_inner(size, type, false);
-
- mutex_unlock(&ra_mutex);
- return result;
-}
-
-region_info_t *find_region(void* addr) {
- region_info_t *r = 0;
- mutex_lock(&ra_mutex);
-
- descriptor_t *d = find_used_region(addr);
- if (d != 0) r = &d->used.i;
-
- mutex_unlock(&ra_mutex);
- return r;
+ return true;
}
// ========================================================= //
// HELPER FUNCTIONS : SIMPLE PF HANDLERS ; FREEING FUNCTIONS //
// ========================================================= //
-void pf_handler_unexpected(pagedir_t *pd, struct region_info *r, void* addr) {
- dbg_printf("Unexpected page fault at 0x%p\n", addr);
- PANIC("Unexpected page fault");
-}
-
-void pf_handler_stackoverflow(pagedir_t *pd, struct region_info *r, void* addr) {
- dbg_printf("Kernel stack overflow at 0x%p\n", addr);
- PANIC("Kernel stack overflow");
-}
-
void region_free_unmap_free(void* ptr) {
region_info_t *i = find_region(ptr);
ASSERT(i != 0);
@@ -371,38 +42,4 @@ void region_free_unmap(void* ptr) {
region_free(ptr);
}
-// =========================== //
-// DEBUG LOG PRINTING FUNCTION //
-// =========================== //
-
-void dbg_print_region_info() {
- mutex_lock(&ra_mutex);
-
- dbg_printf("/ Free kernel regions, by address:\n");
- for (descriptor_t *d = first_free_region_by_addr; d != 0; d = d->free.next_by_addr) {
- dbg_printf("| 0x%p - 0x%p\n", d->free.addr, d->free.addr + d->free.size);
- ASSERT(d != d->free.next_by_addr);
- }
- dbg_printf("- Free kernel regions, by size:\n");
- for (descriptor_t *d = first_free_region_by_size; d != 0; d = d->free.next_by_size) {
- dbg_printf("| 0x%p - 0x%p ", d->free.addr, d->free.addr + d->free.size);
- dbg_printf("(0x%p, next in size: 0x%p)\n", d->free.size,
- (d->free.first_bigger == 0 ? 0 : d->free.first_bigger->free.addr));
- ASSERT(d != d->free.next_by_size);
- }
- dbg_printf("- Used kernel regions:\n");
- for (descriptor_t *d = first_used_region; d != 0; d = d->used.next_by_addr) {
- dbg_printf("| 0x%p - 0x%p %s\n", d->used.i.addr, d->used.i.addr + d->used.i.size, d->used.i.type);
- ASSERT(d != d->used.next_by_addr);
- }
-
- int nfreerd = 0;
- for (descriptor_t *i = first_unused_descriptor; i != 0; i = i->unused_descriptor.next)
- nfreerd++;
-
- dbg_printf("\\ Free region descriptors: %d (%d)\n", nfreerd, n_unused_descriptors);
-
- mutex_unlock(&ra_mutex);
-}
-
/* vim: set ts=4 sw=4 tw=0 noet :*/