diff options
author | Alex Auvolat <alex.auvolat@ens.fr> | 2015-03-02 18:16:15 +0100 |
---|---|---|
committer | Alex Auvolat <alex.auvolat@ens.fr> | 2015-03-02 18:16:15 +0100 |
commit | ceb687b02964197133fd2236cdbc74bf3948d034 (patch) | |
tree | b34b7aeec70990978c2562aa2d7b00202a223926 /src/kernel/core/kmalloc.c | |
parent | b68881abc4c50bbc8ee9e81b4e18b0ea011b83b7 (diff) | |
download | kogata-ceb687b02964197133fd2236cdbc74bf3948d034.tar.gz kogata-ceb687b02964197133fd2236cdbc74bf3948d034.zip |
No lazy allocation of kernel memory. Placeholder for pmem freeing routine.
Diffstat (limited to 'src/kernel/core/kmalloc.c')
-rw-r--r-- | src/kernel/core/kmalloc.c | 35 |
1 files changed, 33 insertions, 2 deletions
diff --git a/src/kernel/core/kmalloc.c b/src/kernel/core/kmalloc.c index e15572a..a8b115b 100644 --- a/src/kernel/core/kmalloc.c +++ b/src/kernel/core/kmalloc.c @@ -6,10 +6,30 @@ #include <frame.h> #include <paging.h> #include <region.h> +#include <freemem.h> static void* page_alloc_fun_for_kmalloc(size_t bytes) { - void* addr = region_alloc(bytes, "Core kernel heap", default_allocator_pf_handler); + void* addr = region_alloc(bytes, "Core kernel heap", pf_handler_unexpected); + if (addr == 0) return 0; + + // Map physical memory + for (void* i = addr; i < addr + bytes; i += PAGE_SIZE) { + int f = frame_alloc(1); + if (f == 0) goto failure; + if (!pd_map_page(i, f, true)) goto failure; + } + return addr; + +failure: + for (void* i = addr; i < addr + bytes; i += PAGE_SIZE) { + int f = pd_get_frame(i); + if (f != 0) { + pd_unmap_page(i); + frame_free(f, 1); + } + } + return 0; } static slab_type_t slab_sizes[] = { @@ -35,7 +55,7 @@ void kmalloc_setup() { region_free_unmap_free); } -void* malloc(size_t sz) { +static void* malloc0(size_t sz) { void* res = 0; mutex_lock(&malloc_mutex); @@ -45,6 +65,17 @@ void* malloc(size_t sz) { return res; } +void* malloc(size_t sz) { + void* res; + int tries = 0; + + while ((res = malloc0(sz)) == 0 && (tries++) < 3) { + free_some_memory(); + } + + return res; +} + void free(void* ptr) { mutex_lock(&malloc_mutex); slab_free(kernel_allocator, ptr); |