aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/include/slab_alloc.h34
-rw-r--r--kernel/l0/kmain.c44
-rw-r--r--kernel/lib/slab_alloc.c217
4 files changed, 292 insertions, 5 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 58a6339..2adacbc 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -9,7 +9,7 @@ CFLAGS = -ffreestanding -O2 -std=gnu99 -Wall -Wextra -I . -I ./include -g -Wno-u
LD = i586-elf-gcc
LDFLAGS = -T linker.ld -ffreestanding -O2 -nostdlib -lgcc
-OBJ = lib/string.o lib/printf.o \
+OBJ = lib/string.o lib/printf.o lib/slab_alloc.o \
l0/loader.o l0/kmain.o l0/dbglog.o l0/sys.o \
l0/gdt.o l0/idt.o l0/interrupt.o \
l0/frame.o l0/paging.o l0/region.o
diff --git a/kernel/include/slab_alloc.h b/kernel/include/slab_alloc.h
new file mode 100644
index 0000000..a3bd7de
--- /dev/null
+++ b/kernel/include/slab_alloc.h
@@ -0,0 +1,34 @@
+#pragma once
+
+// Self-contained piece of library : a slab allocator...
+// Depends on page_alloc_fun_t and page_free_fun_t : a couple of functions
+// that can allocate/free multiples of one page at a time
+
+#include <stdint.h>
+#include <stddef.h>
+#include <stdbool.h>
+
+#include <sys.h>
+
+// expected format for the array of slab_type_t given to slab_create :
+// an array of slab_type descriptors, with last descriptor full of zeroes
+// and with obj_size increasing (strictly) in the array
+typedef struct slab_type {
+ const char *descr;
+ size_t obj_size;
+ size_t pages_per_cache;
+} slab_type_t;
+
+struct mem_allocator;
+typedef struct mem_allocator mem_allocator_t;
+
+typedef void* (*page_alloc_fun_t)(const size_t bytes);
+typedef void (*page_free_fun_t)(const void* ptr);
+
+mem_allocator_t* create_slab_allocator(const slab_type_t *types, page_alloc_fun_t af, page_free_fun_t ff);
+void destroy_slab_allocator(mem_allocator_t*);
+
+void* slab_alloc(mem_allocator_t* a, const size_t sz);
+void slab_free(mem_allocator_t* a, const void* ptr);
+
+/* vim: set ts=4 sw=4 tw=0 noet :*/
diff --git a/kernel/l0/kmain.c b/kernel/l0/kmain.c
index 1f290a7..e052eac 100644
--- a/kernel/l0/kmain.c
+++ b/kernel/l0/kmain.c
@@ -9,6 +9,8 @@
#include <paging.h>
#include <region.h>
+#include <slab_alloc.h>
+
void breakpoint_handler(registers_t *regs) {
dbg_printf("Breakpoint! (int3)\n");
BOCHS_BREAKPOINT;
@@ -25,6 +27,28 @@ void test_pf_handler(pagedir_t *pd, region_info_t *i, size_t addr) {
if (error) PANIC("Could not map frame (OOM)");
}
+void* page_alloc_fun_for_kmalloc(const size_t bytes) {
+ return (void*)region_alloc(bytes, REGION_T_CORE_HEAP, test_pf_handler);
+}
+void page_free_fun_for_kmalloc(const void* ptr) {
+ region_free((size_t)ptr);
+}
+slab_type_t slab_sizes[] = {
+ { "8B obj", 8, 1 },
+ { "16B obj", 16, 2 },
+ { "32B obj", 32, 2 },
+ { "64B obj", 64, 2 },
+ { "128B obj", 128, 2 },
+ { "256B obj", 256, 4 },
+ { "512B obj", 512, 4 },
+ { "1KB obj", 1024, 8 },
+ { "2KB obj", 2048, 8 },
+ { "4KB obj", 4096, 16 },
+ { "8KB obj", 8192, 32 },
+ { 0, 0, 0 }
+};
+
+
extern char k_end_addr; // defined in linker script : 0xC0000000 plus kernel stuff
void kmain(struct multiboot_info_t *mbd, int32_t mb_magic) {
@@ -109,11 +133,23 @@ void kmain(struct multiboot_info_t *mbd, int32_t mb_magic) {
}
region_free(s);
+ // TEST SLAB ALLOCATOR!!!
+ mem_allocator_t *a = create_slab_allocator(slab_sizes,
+ page_alloc_fun_for_kmalloc,
+ page_free_fun_for_kmalloc);
+ dbg_printf("Created slab allocator at 0x%p\n", a);
+ const int m = 100;
+ void* ptr[m];
+ for (int i = 0; i < m; i++) {
+ size_t s = 1 << ((i * 7) % 12);
+ ptr[i] = slab_alloc(a, s);
+ dbg_printf("Alloc %i : 0x%p\n", s, ptr[i]);
+ dbg_print_region_stats();
+ }
+ for (int i = 0; i < m; i++) {
+ slab_free(a, ptr[i]);
+ }
- // TODO:
- // - setup allocator for physical pages (eg: buddy allocator, see OSDev wiki)
- // - setup allocator for virtual memory space
- // - setup paging
PANIC("Reached kmain end! Falling off the edge.");
}
diff --git a/kernel/lib/slab_alloc.c b/kernel/lib/slab_alloc.c
new file mode 100644
index 0000000..e1aa8b5
--- /dev/null
+++ b/kernel/lib/slab_alloc.c
@@ -0,0 +1,217 @@
+#include <slab_alloc.h>
+
+typedef struct object {
+ struct object *next;
+} object_t;
+
+typedef struct cache {
+ struct { // pack this so that it takes less space...
+ uint32_t is_a_cache : 1; // if not, this is a region allocated alone
+ uint32_t n_free_objs : 31;
+ };
+
+ union {
+ struct { // when this is a cache
+ object_t* first_free_obj;
+ struct cache *next_cache;
+ } c;
+ struct { // when this is a region allocated alone
+ size_t region_size;
+ } sr;
+ };
+
+ size_t region_addr;
+ struct cache *next_region;
+} cache_t;
+
+typedef struct slab {
+ cache_t *first_cache; // linked list of caches
+} slab_t;
+
+struct mem_allocator {
+ const slab_type_t *types;
+ slab_t *slabs;
+ cache_t *first_free_region_descriptor;
+ cache_t *all_regions;
+
+ page_alloc_fun_t alloc_fun;
+ page_free_fun_t free_fun;
+};
+
+// ============================================== //
+// Helper functions for the manipulation of lists //
+// ============================================== //
+
+void add_free_region_descriptor(mem_allocator_t *a, cache_t *c) {
+ c->next_region = a->first_free_region_descriptor;
+ a->first_free_region_descriptor = c;
+}
+
+cache_t *take_region_descriptor(mem_allocator_t *a) {
+ if (a->first_free_region_descriptor == 0) {
+ // TODO : allocate more descriptors (not complicated)
+ return 0;
+ }
+ cache_t *x = a->first_free_region_descriptor;
+ a->first_free_region_descriptor = x->next_region;
+ return x;
+}
+
+// ============================== //
+// The actual allocator functions //
+// ============================== //
+
+mem_allocator_t* create_slab_allocator(const slab_type_t *types, page_alloc_fun_t af, page_free_fun_t ff) {
+ union {
+ size_t addr;
+ mem_allocator_t *a;
+ slab_t *s;
+ cache_t *c;
+ } ptr;
+
+ ptr.addr = (size_t)af(PAGE_SIZE);
+ if (ptr.addr == 0) return 0; // could not allocate
+ size_t end_addr = ptr.addr + PAGE_SIZE;
+
+ mem_allocator_t *a = ptr.a;
+ ptr.a++;
+
+ a->all_regions = 0;
+ a->alloc_fun = af;
+ a->free_fun = ff;
+
+ a->types = types;
+ a->slabs = ptr.s;
+ for (const slab_type_t *t = types; t->obj_size != 0; t++) {
+ ptr.s->first_cache = 0;
+ ptr.s++;
+ }
+
+ a->first_free_region_descriptor = 0;
+ while ((size_t)(ptr.c + 1) <= end_addr) {
+ add_free_region_descriptor(a, ptr.c);
+ ptr.c++;
+ }
+
+ return a;
+}
+
+static void stack_and_destroy_regions(page_free_fun_t ff, cache_t *r) {
+ if (r == 0) return;
+ size_t addr = r->region_addr;
+ stack_and_destroy_regions(ff, r->next_region);
+ ff((void*)addr);
+}
+void destroy_slab_allocator(mem_allocator_t *a) {
+ stack_and_destroy_regions(a->free_fun, a->all_regions);
+ a->free_fun(a);
+}
+
+void* slab_alloc(mem_allocator_t* a, const size_t sz) {
+ for (int i = 0; a->types[i].obj_size != 0; i++) {
+ const size_t obj_size = a->types[i].obj_size;
+ if (sz <= obj_size) {
+ // find a cache with free space
+ cache_t *fc = a->slabs[i].first_cache;
+ while (fc != 0 && fc->n_free_objs == 0) {
+ ASSERT(fc->c.first_free_obj == 0); // make sure n_free == 0 iff no object in the free stack
+ fc = fc->c.next_cache;
+ }
+ // if none found, try to allocate a new one
+ if (fc == 0) {
+ fc = take_region_descriptor(a);
+ if (fc == 0) return 0;
+
+ const size_t cache_size = a->types[i].pages_per_cache * PAGE_SIZE;
+ fc->region_addr = (size_t)a->alloc_fun(cache_size);
+ if (fc->region_addr == 0) {
+ add_free_region_descriptor(a, fc);
+ return 0;
+ }
+
+ fc->is_a_cache = 1;
+ fc->n_free_objs = 0;
+ fc->c.first_free_obj = 0;
+ for (size_t i = fc->region_addr; i + obj_size <= fc->region_addr + cache_size; i += obj_size) {
+ object_t *x = (object_t*)i;
+ x->next = fc->c.first_free_obj;
+ fc->c.first_free_obj = x;
+ fc->n_free_objs++;
+ }
+ ASSERT(fc->n_free_objs == cache_size / obj_size);
+
+ fc->next_region = a->all_regions;
+ a->all_regions = fc;
+ fc->c.next_cache = a->slabs[i].first_cache;
+ a->slabs[i].first_cache = fc;
+ }
+ // allocate on fc
+ ASSERT(fc != 0 && fc->n_free_objs > 0);
+ object_t *x = fc->c.first_free_obj;
+ fc->c.first_free_obj = x->next;
+ fc->n_free_objs--;
+ // TODO : if fc is full, put it at the end
+ return (void*)x;
+ }
+ }
+
+ // otherwise directly allocate using a->alloc_fun
+ cache_t *r = take_region_descriptor(a);
+ if (r == 0) return 0;
+
+ r->region_addr = (size_t)a->alloc_fun(sz);
+ if (r->region_addr == 0) {
+ add_free_region_descriptor(a, r);
+ return 0;
+ } else {
+ r->is_a_cache = 0;
+ r->sr.region_size = sz;
+
+ r->next_region = a->all_regions;
+ a->all_regions = r;
+
+ return (void*)r->region_addr;
+ }
+}
+
+void slab_free(mem_allocator_t* a, const void* ptr) {
+ const size_t addr = (size_t)ptr;
+
+ for (int i = 0; a->types[i].obj_size != 0; i++) {
+ size_t region_size = PAGE_SIZE * a->types[i].pages_per_cache;
+ for (cache_t *r = a->slabs[i].first_cache; r != 0; r = r->c.next_cache) {
+ if (addr >= r->region_addr && addr < r->region_addr + region_size) {
+ ASSERT((addr - r->region_addr) % a->types[i].obj_size == 0);
+ ASSERT(r->is_a_cache);
+ object_t *o = (object_t*)addr;
+ o->next = r->c.first_free_obj;
+ r->c.first_free_obj = o;
+ r->n_free_objs++;
+ // TODO : if cache is empty, free it
+ return;
+ }
+ }
+ }
+
+ // otherwise the block was directly allocated : look for it in regions.
+ a->free_fun(ptr);
+ ASSERT(a->all_regions != 0);
+
+ if (a->all_regions->region_addr == addr) {
+ cache_t *r = a->all_regions;
+ ASSERT(r->is_a_cache == 0);
+ a->all_regions = r->next_region;
+ add_free_region_descriptor(a, r);
+ } else {
+ for (cache_t *i = a->all_regions; i->next_region != 0; i = i->next_region) {
+ if (i->next_region->region_addr == addr) {
+ cache_t *r = i->next_region;
+ ASSERT(r->is_a_cache == 0);
+ i->next_region = r->next_region;
+ add_free_region_descriptor(a, r);
+ return;
+ }
+ }
+ ASSERT(false);
+ }
+}