aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorAlex Auvolat <alex.auvolat@ens.fr>2014-12-06 19:31:05 +0100
committerAlex Auvolat <alex.auvolat@ens.fr>2014-12-06 19:31:05 +0100
commita48466109f59d507f9108635a5dc4ec865173f85 (patch)
treefea09e9e54d40352a25a885b361d694d40b7417b /kernel
parentff27e3d5ca61fb6234b0876dc3368d0e5dfc5a95 (diff)
downloadmacroscope-a48466109f59d507f9108635a5dc4ec865173f85.tar.gz
macroscope-a48466109f59d507f9108635a5dc4ec865173f85.zip
Clean slab_alloc. Bug still here.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/l0/kmain.c2
-rw-r--r--kernel/l0/paging.c2
-rw-r--r--kernel/lib/slab_alloc.c196
3 files changed, 109 insertions, 91 deletions
diff --git a/kernel/l0/kmain.c b/kernel/l0/kmain.c
index 8cb370a..97b5a97 100644
--- a/kernel/l0/kmain.c
+++ b/kernel/l0/kmain.c
@@ -31,10 +31,12 @@ void test_pf_handler(pagedir_t *pd, region_info_t *i, void* addr) {
void* page_alloc_fun_for_kmalloc(size_t bytes) {
void* addr = region_alloc(bytes, REGION_T_CORE_HEAP, test_pf_handler);
+ dbg_printf("Alloc %p bytes for kmalloc at: %p\n", bytes, addr);
return addr;
}
void page_free_fun_for_kmalloc(void* ptr) {
region_info_t *i = find_region(ptr);
+ ASSERT(i != 0 && i->type == REGION_T_CORE_HEAP);
for (void* x = i->addr; x < i->addr + i->size; x += PAGE_SIZE) {
uint32_t f = pd_get_frame(x);
if (f != 0) {
diff --git a/kernel/l0/paging.c b/kernel/l0/paging.c
index adb076c..5000c71 100644
--- a/kernel/l0/paging.c
+++ b/kernel/l0/paging.c
@@ -67,7 +67,7 @@ void page_fault_handler(registers_t *regs) {
dbg_printf("For pt 0x%p, page 0x%p -> addr 0x%p\n", pt, page, ((pt * 1024) + page) * PAGE_SIZE);
for (int i = 0; i < N_PAGES_IN_PT; i++) {
- dbg_printf("%i. 0x%p\n", i, kernel_pd.page[i]);
+ //dbg_printf("%i. 0x%p\n", i, kernel_pd.page[i]);
}
dbg_dump_registers(regs);
diff --git a/kernel/lib/slab_alloc.c b/kernel/lib/slab_alloc.c
index d9efc4f..8e8c0a3 100644
--- a/kernel/lib/slab_alloc.c
+++ b/kernel/lib/slab_alloc.c
@@ -5,24 +5,25 @@ typedef struct object {
} object_t;
typedef struct cache {
- struct { // pack this so that it takes less space...
- uint32_t is_a_cache : 1; // if not, this is a region allocated alone
- uint32_t n_free_objs : 31;
- };
+ void* region_addr;
+
+ uint32_t n_free_objs;
+ object_t* first_free_obj;
- union {
- struct { // when this is a cache
- object_t* first_free_obj;
- struct cache *next_cache;
- } c;
- struct { // when this is a region allocated alone
- size_t region_size;
- } sr;
- };
+ struct cache *next_cache; // next cache in this slab
+} cache_t;
+typedef struct region {
void* region_addr;
- struct cache *next_region;
-} cache_t;
+ size_t region_size;
+ struct region *next_region;
+} region_t;
+
+typedef union descriptor {
+ cache_t c;
+ region_t r;
+ union descriptor *next_free;
+} descriptor_t;
typedef struct slab {
cache_t *first_cache; // linked list of caches
@@ -31,8 +32,9 @@ typedef struct slab {
struct mem_allocator {
const slab_type_t *types;
slab_t *slabs;
- cache_t *first_free_region_descriptor;
- cache_t *all_caches, *all_other_regions;;
+
+ descriptor_t *first_free_descriptor;
+ region_t *all_regions;
page_alloc_fun_t alloc_fun;
page_free_fun_t free_fun;
@@ -42,23 +44,36 @@ struct mem_allocator {
// Helper functions for the manipulation of lists //
// ============================================== //
-void add_free_region_descriptor(mem_allocator_t *a, cache_t *c) {
- c->next_region = a->first_free_region_descriptor;
- a->first_free_region_descriptor = c;
+static void add_free_descriptor(mem_allocator_t *a, descriptor_t *c) {
+ c->next_free = a->first_free_descriptor;
+ a->first_free_descriptor = c;
}
-cache_t *take_region_descriptor(mem_allocator_t *a) {
- if (a->first_free_region_descriptor == 0) {
+static descriptor_t *take_descriptor(mem_allocator_t *a) {
+ if (a->first_free_descriptor == 0) {
void* p = a->alloc_fun(PAGE_SIZE);
if (p == 0) return 0;
- void* end = p + PAGE_SIZE;
- for (cache_t *i = (cache_t*)p; i + 1 <= (cache_t*)end; i++) {
- add_free_region_descriptor(a, i);
+ const void* end = p + PAGE_SIZE;
+ for (descriptor_t *i = (descriptor_t*)p; i + 1 <= (descriptor_t*)end; i++) {
+ add_free_descriptor(a, i);
}
+
+ // register the descriptor region
+ descriptor_t *dd = a->first_free_descriptor;
+ ASSERT(dd != 0);
+ a->first_free_descriptor = dd->next_free;
+
+ region_t *drd = &dd->r;
+ drd->region_addr = p;
+ drd->region_size = PAGE_SIZE;
+ drd->next_region = a->all_regions;
+ a->all_regions = drd;
}
- cache_t *x = a->first_free_region_descriptor;
- a->first_free_region_descriptor = x->next_region;
+
+ descriptor_t *x = a->first_free_descriptor;
+ ASSERT(x != 0);
+ a->first_free_descriptor = x->next_free;
return x;
}
@@ -71,7 +86,7 @@ mem_allocator_t* create_slab_allocator(const slab_type_t *types, page_alloc_fun_
void* addr;
mem_allocator_t *a;
slab_t *s;
- cache_t *c;
+ descriptor_t *d;
} ptr;
ptr.addr = af(PAGE_SIZE);
@@ -81,27 +96,28 @@ mem_allocator_t* create_slab_allocator(const slab_type_t *types, page_alloc_fun_
mem_allocator_t *a = ptr.a;
ptr.a++;
- a->all_caches = a->all_other_regions = 0;
+ a->all_regions = 0;
a->alloc_fun = af;
a->free_fun = ff;
a->types = types;
a->slabs = ptr.s;
for (const slab_type_t *t = types; t->obj_size != 0; t++) {
+ ASSERT(t->obj_size >= sizeof(object_t));
ptr.s->first_cache = 0;
ptr.s++;
}
- a->first_free_region_descriptor = 0;
- while ((void*)(ptr.c + 1) <= end_addr) {
- add_free_region_descriptor(a, ptr.c);
- ptr.c++;
+ a->first_free_descriptor = 0;
+ while ((void*)(ptr.d + 1) <= end_addr) {
+ add_free_descriptor(a, ptr.d);
+ ptr.d++;
}
return a;
}
-static void stack_and_destroy_regions(page_free_fun_t ff, cache_t *r) {
+static void stack_and_destroy_regions(page_free_fun_t ff, region_t *r) {
if (r == 0) return;
void* addr = r->region_addr;
ASSERT(r != r->next_region);
@@ -109,8 +125,12 @@ static void stack_and_destroy_regions(page_free_fun_t ff, cache_t *r) {
ff(addr);
}
void destroy_slab_allocator(mem_allocator_t *a) {
- stack_and_destroy_regions(a->free_fun, a->all_caches);
- stack_and_destroy_regions(a->free_fun, a->all_other_regions);
+ for (int i = 0; a->types[i].obj_size != 0; i++) {
+ for (cache_t *c = a->slabs[i].first_cache; c != 0; c++) {
+ a->free_fun(c->region_addr);
+ }
+ }
+ stack_and_destroy_regions(a->free_fun, a->all_regions);
a->free_fun(a);
}
@@ -121,61 +141,66 @@ void* slab_alloc(mem_allocator_t* a, size_t sz) {
// find a cache with free space
cache_t *fc = a->slabs[i].first_cache;
while (fc != 0 && fc->n_free_objs == 0) {
- ASSERT(fc->c.first_free_obj == 0); // make sure n_free == 0 iff no object in the free stack
- fc = fc->c.next_cache;
+ ASSERT(fc->first_free_obj == 0); // make sure n_free == 0 iff no object in the free stack
+ fc = fc->next_cache;
}
// if none found, try to allocate a new one
if (fc == 0) {
- fc = take_region_descriptor(a);
- if (fc == 0) return 0;
+ descriptor_t *fcd = take_descriptor(a);
+ if (fcd == 0) return 0;
+
+ fc = &fcd->c;
+ ASSERT((descriptor_t*)fc == fcd);
const size_t cache_size = a->types[i].pages_per_cache * PAGE_SIZE;
fc->region_addr = a->alloc_fun(cache_size);
if (fc->region_addr == 0) {
- add_free_region_descriptor(a, fc);
+ add_free_descriptor(a, fcd);
return 0;
}
- fc->is_a_cache = 1;
fc->n_free_objs = 0;
- fc->c.first_free_obj = 0;
- for (void* i = fc->region_addr; i + obj_size <= fc->region_addr + cache_size; i += obj_size) {
- object_t *x = (object_t*)i;
- x->next = fc->c.first_free_obj;
- fc->c.first_free_obj = x;
+ fc->first_free_obj = 0;
+ for (void* p = fc->region_addr; p + obj_size <= fc->region_addr + cache_size; p += obj_size) {
+ object_t *x = (object_t*)p;
+ x->next = fc->first_free_obj;
+ fc->first_free_obj = x;
fc->n_free_objs++;
}
ASSERT(fc->n_free_objs == cache_size / obj_size);
- fc->next_region = a->all_caches;
- a->all_caches = fc;
- fc->c.next_cache = a->slabs[i].first_cache;
+ fc->next_cache = a->slabs[i].first_cache;
a->slabs[i].first_cache = fc;
}
// allocate on fc
ASSERT(fc != 0 && fc->n_free_objs > 0);
- object_t *x = fc->c.first_free_obj;
- fc->c.first_free_obj = x->next;
+
+ object_t *x = fc->first_free_obj;
+ fc->first_free_obj = x->next;
fc->n_free_objs--;
+
+ ASSERT((fc->n_free_objs == 0) == (fc->first_free_obj == 0));
+
// TODO : if fc is full, put it at the end
return x;
}
}
// otherwise directly allocate using a->alloc_fun
- cache_t *r = take_region_descriptor(a);
- if (r == 0) return 0;
+ descriptor_t *rd = take_descriptor(a);
+ if (rd == 0) return 0;
+ region_t *r = &rd->r;
+ ASSERT((descriptor_t*)r == rd);
r->region_addr = a->alloc_fun(sz);
if (r->region_addr == 0) {
- add_free_region_descriptor(a, r);
+ add_free_descriptor(a, rd);
return 0;
} else {
- r->is_a_cache = 0;
- r->sr.region_size = sz;
+ r->region_size = sz;
- r->next_region = a->all_other_regions;
- a->all_other_regions = r;
+ r->next_region = a->all_regions;
+ a->all_regions = r;
return (void*)r->region_addr;
}
@@ -185,39 +210,29 @@ void slab_free(mem_allocator_t* a, void* addr) {
for (int i = 0; a->types[i].obj_size != 0; i++) {
size_t region_size = PAGE_SIZE * a->types[i].pages_per_cache;
- for (cache_t *r = a->slabs[i].first_cache; r != 0; r = r->c.next_cache) {
+ for (cache_t *r = a->slabs[i].first_cache; r != 0; r = r->next_cache) {
if (addr >= r->region_addr && addr < r->region_addr + region_size) {
ASSERT((addr - r->region_addr) % a->types[i].obj_size == 0);
- ASSERT(r->is_a_cache);
+
object_t *o = (object_t*)addr;
- o->next = r->c.first_free_obj;
- r->c.first_free_obj = o;
+ o->next = r->first_free_obj;
+ r->first_free_obj = o;
r->n_free_objs++;
if (r->n_free_objs == region_size / a->types[i].obj_size) {
// region is completely unused, free it.
if (a->slabs[i].first_cache == r) {
- a->slabs[i].first_cache = r->c.next_cache;
- } else {
- for (cache_t *it = a->slabs[i].first_cache; it->c.next_cache != 0; it = it->c.next_cache) {
- if (it->c.next_cache == r) {
- it->c.next_cache = r->c.next_cache;
- break;
- }
- }
- }
- if (a->all_caches == r) {
- a->all_caches = r->next_region;
+ a->slabs[i].first_cache = r->next_cache;
} else {
- for (cache_t *it = a->all_caches; it->next_region != 0; it = it->next_region) {
- if (it->next_region == r) {
- it->next_region = r->next_region;
+ for (cache_t *it = a->slabs[i].first_cache; it->next_cache != 0; it = it->next_cache) {
+ if (it->next_cache == r) {
+ it->next_cache = r->next_cache;
break;
}
}
}
a->free_fun(r->region_addr);
- add_free_region_descriptor(a, r);
+ add_free_descriptor(a, (descriptor_t*)r);
}
return;
}
@@ -225,21 +240,22 @@ void slab_free(mem_allocator_t* a, void* addr) {
}
// otherwise the block was directly allocated : look for it in regions.
- a->free_fun(addr);
- ASSERT(a->all_other_regions != 0);
-
- if (a->all_other_regions->region_addr == addr) {
- cache_t *r = a->all_other_regions;
- ASSERT(r->is_a_cache == 0);
- a->all_other_regions = r->next_region;
- add_free_region_descriptor(a, r);
+ ASSERT(a->all_regions != 0);
+
+ if (a->all_regions->region_addr == addr) {
+ a->free_fun(addr); // found it, free it
+
+ region_t *r = a->all_regions;
+ a->all_regions = r->next_region;
+ add_free_descriptor(a, (descriptor_t*)r);
} else {
- for (cache_t *i = a->all_other_regions; i->next_region != 0; i = i->next_region) {
+ for (region_t *i = a->all_regions; i->next_region != 0; i = i->next_region) {
if (i->next_region->region_addr == addr) {
- cache_t *r = i->next_region;
- ASSERT(r->is_a_cache == 0);
+ a->free_fun(addr); // found it, free it
+
+ region_t *r = i->next_region;
i->next_region = r->next_region;
- add_free_region_descriptor(a, r);
+ add_free_descriptor(a, (descriptor_t*)r);
return;
}
}