aboutsummaryrefslogtreecommitdiff
path: root/kernel/lib/slab_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/lib/slab_alloc.c')
-rw-r--r--kernel/lib/slab_alloc.c62
1 files changed, 47 insertions, 15 deletions
diff --git a/kernel/lib/slab_alloc.c b/kernel/lib/slab_alloc.c
index 9ec39b0..d9efc4f 100644
--- a/kernel/lib/slab_alloc.c
+++ b/kernel/lib/slab_alloc.c
@@ -32,7 +32,7 @@ struct mem_allocator {
const slab_type_t *types;
slab_t *slabs;
cache_t *first_free_region_descriptor;
- cache_t *all_regions;
+ cache_t *all_caches, *all_other_regions;;
page_alloc_fun_t alloc_fun;
page_free_fun_t free_fun;
@@ -49,8 +49,13 @@ void add_free_region_descriptor(mem_allocator_t *a, cache_t *c) {
cache_t *take_region_descriptor(mem_allocator_t *a) {
if (a->first_free_region_descriptor == 0) {
- // TODO : allocate more descriptors (not complicated)
- return 0;
+ void* p = a->alloc_fun(PAGE_SIZE);
+ if (p == 0) return 0;
+
+ void* end = p + PAGE_SIZE;
+ for (cache_t *i = (cache_t*)p; i + 1 <= (cache_t*)end; i++) {
+ add_free_region_descriptor(a, i);
+ }
}
cache_t *x = a->first_free_region_descriptor;
a->first_free_region_descriptor = x->next_region;
@@ -76,7 +81,7 @@ mem_allocator_t* create_slab_allocator(const slab_type_t *types, page_alloc_fun_
mem_allocator_t *a = ptr.a;
ptr.a++;
- a->all_regions = 0;
+ a->all_caches = a->all_other_regions = 0;
a->alloc_fun = af;
a->free_fun = ff;
@@ -99,11 +104,13 @@ mem_allocator_t* create_slab_allocator(const slab_type_t *types, page_alloc_fun_
static void stack_and_destroy_regions(page_free_fun_t ff, cache_t *r) {
if (r == 0) return;
void* addr = r->region_addr;
+ ASSERT(r != r->next_region);
stack_and_destroy_regions(ff, r->next_region);
ff(addr);
}
void destroy_slab_allocator(mem_allocator_t *a) {
- stack_and_destroy_regions(a->free_fun, a->all_regions);
+ stack_and_destroy_regions(a->free_fun, a->all_caches);
+ stack_and_destroy_regions(a->free_fun, a->all_other_regions);
a->free_fun(a);
}
@@ -140,8 +147,8 @@ void* slab_alloc(mem_allocator_t* a, size_t sz) {
}
ASSERT(fc->n_free_objs == cache_size / obj_size);
- fc->next_region = a->all_regions;
- a->all_regions = fc;
+ fc->next_region = a->all_caches;
+ a->all_caches = fc;
fc->c.next_cache = a->slabs[i].first_cache;
a->slabs[i].first_cache = fc;
}
@@ -167,8 +174,8 @@ void* slab_alloc(mem_allocator_t* a, size_t sz) {
r->is_a_cache = 0;
r->sr.region_size = sz;
- r->next_region = a->all_regions;
- a->all_regions = r;
+ r->next_region = a->all_other_regions;
+ a->all_other_regions = r;
return (void*)r->region_addr;
}
@@ -186,7 +193,32 @@ void slab_free(mem_allocator_t* a, void* addr) {
o->next = r->c.first_free_obj;
r->c.first_free_obj = o;
r->n_free_objs++;
- // TODO : if cache is empty, free it
+
+ if (r->n_free_objs == region_size / a->types[i].obj_size) {
+ // region is completely unused, free it.
+ if (a->slabs[i].first_cache == r) {
+ a->slabs[i].first_cache = r->c.next_cache;
+ } else {
+ for (cache_t *it = a->slabs[i].first_cache; it->c.next_cache != 0; it = it->c.next_cache) {
+ if (it->c.next_cache == r) {
+ it->c.next_cache = r->c.next_cache;
+ break;
+ }
+ }
+ }
+ if (a->all_caches == r) {
+ a->all_caches = r->next_region;
+ } else {
+ for (cache_t *it = a->all_caches; it->next_region != 0; it = it->next_region) {
+ if (it->next_region == r) {
+ it->next_region = r->next_region;
+ break;
+ }
+ }
+ }
+ a->free_fun(r->region_addr);
+ add_free_region_descriptor(a, r);
+ }
return;
}
}
@@ -194,15 +226,15 @@ void slab_free(mem_allocator_t* a, void* addr) {
// otherwise the block was directly allocated : look for it in regions.
a->free_fun(addr);
- ASSERT(a->all_regions != 0);
+ ASSERT(a->all_other_regions != 0);
- if (a->all_regions->region_addr == addr) {
- cache_t *r = a->all_regions;
+ if (a->all_other_regions->region_addr == addr) {
+ cache_t *r = a->all_other_regions;
ASSERT(r->is_a_cache == 0);
- a->all_regions = r->next_region;
+ a->all_other_regions = r->next_region;
add_free_region_descriptor(a, r);
} else {
- for (cache_t *i = a->all_regions; i->next_region != 0; i = i->next_region) {
+ for (cache_t *i = a->all_other_regions; i->next_region != 0; i = i->next_region) {
if (i->next_region->region_addr == addr) {
cache_t *r = i->next_region;
ASSERT(r->is_a_cache == 0);