aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorAlex Auvolat <alex.auvolat@ens.fr>2014-12-05 15:06:17 +0100
committerAlex Auvolat <alex.auvolat@ens.fr>2014-12-05 15:06:17 +0100
commit274765f7daa3cc1094f9f26196fcf2b9a5289ee2 (patch)
tree974b59905b942ded566c5f9c2d512472646288fc /kernel
parent902eea7a56b38c20bbdca414e58fc6c3f4393025 (diff)
downloadkogata-274765f7daa3cc1094f9f26196fcf2b9a5289ee2.tar.gz
kogata-274765f7daa3cc1094f9f26196fcf2b9a5289ee2.zip
Corrections :
- replace size_t by void* in many places - correct bug in region freeing code
Diffstat (limited to 'kernel')
-rw-r--r--kernel/include/paging.h8
-rw-r--r--kernel/include/region.h11
-rw-r--r--kernel/include/slab_alloc.h8
-rw-r--r--kernel/include/sys.h2
-rw-r--r--kernel/l0/kmain.c50
-rw-r--r--kernel/l0/paging.c34
-rw-r--r--kernel/l0/region.c42
-rw-r--r--kernel/lib/slab_alloc.c29
8 files changed, 95 insertions, 89 deletions
diff --git a/kernel/include/paging.h b/kernel/include/paging.h
index 2e1f844..720d6b3 100644
--- a/kernel/include/paging.h
+++ b/kernel/include/paging.h
@@ -1,6 +1,7 @@
#pragma once
#include <sys.h>
+#include <stdbool.h>
struct page_directory;
typedef struct page_directory pagedir_t;
@@ -14,10 +15,9 @@ pagedir_t *get_kernel_pagedir();
void switch_pagedir(pagedir_t *pd);
// these functions are always relative to the currently mapped page directory
-uint32_t pd_get_frame(size_t vaddr); // get physical frame for virtual address
-int pd_map_page(size_t vaddr, uint32_t frame_id,
- uint32_t rw); // returns nonzero on error
-void pd_unmap_page(size_t vaddr); // does nothing if page not mapped
+uint32_t pd_get_frame(void* vaddr); // get physical frame for virtual address
+int pd_map_page(void* vaddr, uint32_t frame_id, bool rw); // returns nonzero on error
+void pd_unmap_page(void* vaddr); // does nothing if page not mapped
pagedir_t *create_pagedir(); // returns zero on error
void delete_pagedir(pagedir_t *pd);
diff --git a/kernel/include/region.h b/kernel/include/region.h
index 1628370..bc26bfe 100644
--- a/kernel/include/region.h
+++ b/kernel/include/region.h
@@ -15,18 +15,19 @@
#define REGION_T_HW 0x00002000 // used for hardware access
struct region_info;
-typedef void (*page_fault_handler_t)(pagedir_t *pd, struct region_info *r, size_t addr);
+typedef void (*page_fault_handler_t)(pagedir_t *pd, struct region_info *r, void* addr);
typedef struct region_info {
- size_t addr, size;
+ void* addr;
+ size_t size;
uint32_t type;
page_fault_handler_t pf;
} region_info_t;
void region_allocator_init(void* kernel_data_end);
-size_t region_alloc(size_t size, uint32_t type, page_fault_handler_t pf); // returns 0 on error
-region_info_t *find_region(size_t addr);
-void region_free(size_t addr);
+void* region_alloc(size_t size, uint32_t type, page_fault_handler_t pf); // returns 0 on error
+region_info_t *find_region(void* addr);
+void region_free(void* addr);
void dbg_print_region_stats();
diff --git a/kernel/include/slab_alloc.h b/kernel/include/slab_alloc.h
index a3bd7de..5c575ba 100644
--- a/kernel/include/slab_alloc.h
+++ b/kernel/include/slab_alloc.h
@@ -22,13 +22,13 @@ typedef struct slab_type {
struct mem_allocator;
typedef struct mem_allocator mem_allocator_t;
-typedef void* (*page_alloc_fun_t)(const size_t bytes);
-typedef void (*page_free_fun_t)(const void* ptr);
+typedef void* (*page_alloc_fun_t)(size_t bytes);
+typedef void (*page_free_fun_t)(void* ptr);
mem_allocator_t* create_slab_allocator(const slab_type_t *types, page_alloc_fun_t af, page_free_fun_t ff);
void destroy_slab_allocator(mem_allocator_t*);
-void* slab_alloc(mem_allocator_t* a, const size_t sz);
-void slab_free(mem_allocator_t* a, const void* ptr);
+void* slab_alloc(mem_allocator_t* a, size_t sz);
+void slab_free(mem_allocator_t* a, void* ptr);
/* vim: set ts=4 sw=4 tw=0 noet :*/
diff --git a/kernel/include/sys.h b/kernel/include/sys.h
index ff98c60..c8721ec 100644
--- a/kernel/include/sys.h
+++ b/kernel/include/sys.h
@@ -22,7 +22,7 @@ static inline uint16_t inw(uint16_t port) {
return ret;
}
-static inline void invlpg(size_t addr) {
+static inline void invlpg(void* addr) {
asm volatile("invlpg (%0)" : : "r"(addr) : "memory");
}
diff --git a/kernel/l0/kmain.c b/kernel/l0/kmain.c
index e052eac..a935c9c 100644
--- a/kernel/l0/kmain.c
+++ b/kernel/l0/kmain.c
@@ -16,7 +16,7 @@ void breakpoint_handler(registers_t *regs) {
BOCHS_BREAKPOINT;
}
-void test_pf_handler(pagedir_t *pd, region_info_t *i, size_t addr) {
+void test_pf_handler(pagedir_t *pd, region_info_t *i, void* addr) {
dbg_printf("0x%p", addr);
uint32_t f = frame_alloc(1);
@@ -27,11 +27,11 @@ void test_pf_handler(pagedir_t *pd, region_info_t *i, size_t addr) {
if (error) PANIC("Could not map frame (OOM)");
}
-void* page_alloc_fun_for_kmalloc(const size_t bytes) {
- return (void*)region_alloc(bytes, REGION_T_CORE_HEAP, test_pf_handler);
+void* page_alloc_fun_for_kmalloc(size_t bytes) {
+ return region_alloc(bytes, REGION_T_CORE_HEAP, test_pf_handler);
}
-void page_free_fun_for_kmalloc(const void* ptr) {
- region_free((size_t)ptr);
+void page_free_fun_for_kmalloc(void* ptr) {
+ region_free(ptr);
}
slab_type_t slab_sizes[] = {
{ "8B obj", 8, 1 },
@@ -44,7 +44,6 @@ slab_type_t slab_sizes[] = {
{ "1KB obj", 1024, 8 },
{ "2KB obj", 2048, 8 },
{ "4KB obj", 4096, 16 },
- { "8KB obj", 8192, 32 },
{ 0, 0, 0 }
};
@@ -72,7 +71,7 @@ void kmain(struct multiboot_info_t *mbd, int32_t mb_magic) {
// used for allocation of data structures before malloc is set up
// a pointer to this pointer is passed to the functions that might have
// to allocate memory ; they just increment it of the allocated quantity
- void* kernel_data_end = (void*)&k_end_addr;
+ void* kernel_data_end = &k_end_addr;
frame_init_allocator(total_ram, &kernel_data_end);
dbg_printf("kernel_data_end: 0x%p\n", kernel_data_end);
@@ -84,16 +83,16 @@ void kmain(struct multiboot_info_t *mbd, int32_t mb_magic) {
region_allocator_init(kernel_data_end);
dbg_print_region_stats();
- size_t p = region_alloc(0x1000, REGION_T_HW, 0);
+ void* p = region_alloc(0x1000, REGION_T_HW, 0);
dbg_printf("Allocated one-page region: 0x%p\n", p);
dbg_print_region_stats();
- size_t q = region_alloc(0x1000, REGION_T_HW, 0);
+ void* q = region_alloc(0x1000, REGION_T_HW, 0);
dbg_printf("Allocated one-page region: 0x%p\n", q);
dbg_print_region_stats();
- size_t r = region_alloc(0x2000, REGION_T_HW, 0);
+ void* r = region_alloc(0x2000, REGION_T_HW, 0);
dbg_printf("Allocated two-page region: 0x%p\n", r);
dbg_print_region_stats();
- size_t s = region_alloc(0x10000, REGION_T_CORE_HEAP, 0);
+ void* s = region_alloc(0x10000, REGION_T_CORE_HEAP, 0);
dbg_printf("Allocated 16-page region: 0x%p\n", s);
dbg_print_region_stats();
region_free(p);
@@ -111,7 +110,7 @@ void kmain(struct multiboot_info_t *mbd, int32_t mb_magic) {
// allocate a big region and try to write into it
const size_t n = 1000;
- size_t p0 = region_alloc(n * PAGE_SIZE, REGION_T_HW, test_pf_handler);
+ void* p0 = region_alloc(n * PAGE_SIZE, REGION_T_HW, test_pf_handler);
for (size_t i = 0; i < n; i++) {
uint32_t *x = (uint32_t*)(p0 + i * PAGE_SIZE);
dbg_printf("[%i : ", i);
@@ -125,30 +124,43 @@ void kmain(struct multiboot_info_t *mbd, int32_t mb_magic) {
uint32_t *x = (uint32_t*)(p0 + i * PAGE_SIZE);
ASSERT(x[1] == (i * 20422) % 122);
- size_t f = pd_get_frame((size_t)x);
+ uint32_t f = pd_get_frame(x);
ASSERT(f != 0);
- pd_unmap_page((size_t)x);
+ pd_unmap_page(x);
frame_free(f, 1);
}
region_free(s);
// TEST SLAB ALLOCATOR!!!
- mem_allocator_t *a = create_slab_allocator(slab_sizes,
- page_alloc_fun_for_kmalloc,
- page_free_fun_for_kmalloc);
+ mem_allocator_t *a =
+ create_slab_allocator(slab_sizes, page_alloc_fun_for_kmalloc,
+ page_free_fun_for_kmalloc);
dbg_printf("Created slab allocator at 0x%p\n", a);
const int m = 100;
void* ptr[m];
for (int i = 0; i < m; i++) {
- size_t s = 1 << ((i * 7) % 12);
+ size_t s = 1 << ((i * 7) % 12 + 1);
ptr[i] = slab_alloc(a, s);
dbg_printf("Alloc %i : 0x%p\n", s, ptr[i]);
dbg_print_region_stats();
}
for (int i = 0; i < m; i++) {
- slab_free(a, ptr[i]);
+ slab_free(a, ptr[m - i - 1]);
}
+ dbg_print_region_stats();
+ for (int i = 0; i < m; i++) {
+ size_t s = 1 << ((i * 7) % 12 + 1);
+ ASSERT(slab_alloc(a, s) == ptr[i]);
+ }
+ dbg_print_region_stats();
+ for (int i = 0; i < m; i++) {
+ slab_free(a, ptr[m - i - 1]);
+ }
+ dbg_print_region_stats();
+ dbg_printf("Destroying slab allocator.\n");
+ destroy_slab_allocator(a);
+ dbg_print_region_stats();
PANIC("Reached kmain end! Falling off the edge.");
diff --git a/kernel/l0/paging.c b/kernel/l0/paging.c
index 0104d5e..ca217c6 100644
--- a/kernel/l0/paging.c
+++ b/kernel/l0/paging.c
@@ -23,7 +23,7 @@ typedef struct page_table {
} pagetable_t;
struct page_directory {
- size_t phys_addr; // physical address of page directory
+ uint32_t phys_addr; // physical address of page directory
// to modify a page directory, we first map it
// then we can use mirroring to edit it
// (the last 4M of the address space are mapped to the PD itself)
@@ -46,17 +46,15 @@ static pagedir_t *current_pd_d;
#define current_pd ((pagetable_t*)(PD_MIRROR_ADDR + (N_PAGES_IN_PT-1)*PAGE_SIZE))
void page_fault_handler(registers_t *regs) {
- size_t vaddr;
+ void* vaddr;
asm volatile("movl %%cr2, %0":"=r"(vaddr));
- if (vaddr >= K_HIGHHALF_ADDR) {
+ if ((size_t)vaddr >= K_HIGHHALF_ADDR) {
uint32_t pt = PT_OF_ADDR(vaddr);
- if (current_pd != &kernel_pd && vaddr >= K_HIGHHALF_ADDR
- && current_pd->page[pt] != kernel_pd.page[pt])
- {
+ if (current_pd != &kernel_pd && current_pd->page[pt] != kernel_pd.page[pt]) {
current_pd->page[pt] = kernel_pd.page[pt];
- invlpg((size_t)(&current_pt[pt]));
+ invlpg(&current_pt[pt]);
return;
}
@@ -104,7 +102,7 @@ void paging_setup(void* kernel_data_end) {
kernel_pd.page[N_PAGES_IN_PT-1] =
(((size_t)&kernel_pd - K_HIGHHALF_ADDR) & PAGE_MASK) | PTE_PRESENT | PTE_RW;
- invlpg(K_HIGHHALF_ADDR);
+ invlpg((void*)K_HIGHHALF_ADDR);
// paging already enabled in loader, nothing to do.
switch_pagedir(&kernel_pd_d);
@@ -128,7 +126,7 @@ pagedir_t *get_kernel_pagedir() {
void switch_pagedir(pagedir_t *pd) {
asm volatile("movl %0, %%cr3":: "r"(pd->phys_addr));
- invlpg((size_t)current_pd);
+ invlpg(current_pd);
current_pd_d = pd;
}
@@ -136,46 +134,46 @@ void switch_pagedir(pagedir_t *pd) {
// Mapping and unmapping of pages //
// ============================== //
-uint32_t pd_get_frame(size_t vaddr) {
+uint32_t pd_get_frame(void* vaddr) {
uint32_t pt = PT_OF_ADDR(vaddr);
uint32_t page = PAGE_OF_ADDR(vaddr);
- pagetable_t *pd = (vaddr >= K_HIGHHALF_ADDR ? &kernel_pd : current_pd);
+ pagetable_t *pd = ((size_t)vaddr >= K_HIGHHALF_ADDR ? &kernel_pd : current_pd);
if (!pd->page[pt] & PTE_PRESENT) return 0;
if (!current_pt[pt].page[page] & PTE_PRESENT) return 0;
return current_pt[pt].page[page] >> PTE_FRAME_SHIFT;
}
-int pd_map_page(size_t vaddr, uint32_t frame_id, uint32_t rw) {
+int pd_map_page(void* vaddr, uint32_t frame_id, bool rw) {
uint32_t pt = PT_OF_ADDR(vaddr);
uint32_t page = PAGE_OF_ADDR(vaddr);
- pagetable_t *pd = (vaddr >= K_HIGHHALF_ADDR ? &kernel_pd : current_pd);
+ pagetable_t *pd = ((size_t)vaddr >= K_HIGHHALF_ADDR ? &kernel_pd : current_pd);
if (!pd->page[pt] & PTE_PRESENT) {
- size_t new_pt_frame = frame_alloc(1);
+ uint32_t new_pt_frame = frame_alloc(1);
if (new_pt_frame == 0) return 1; // OOM
current_pd->page[pt] = pd->page[pt] =
(new_pt_frame << PTE_FRAME_SHIFT) | PTE_PRESENT | PTE_RW;
- invlpg((size_t)&current_pt[pt]);
+ invlpg(&current_pt[pt]);
}
current_pt[pt].page[page] =
frame_id << PTE_FRAME_SHIFT
| PTE_PRESENT
- | (vaddr < K_HIGHHALF_ADDR ? PTE_USER : PTE_GLOBAL)
+ | ((size_t)vaddr < K_HIGHHALF_ADDR ? PTE_USER : PTE_GLOBAL)
| (rw ? PTE_RW : 0);
return 0;
}
-void pd_unmap_page(size_t vaddr) {
+void pd_unmap_page(void* vaddr) {
uint32_t pt = PT_OF_ADDR(vaddr);
uint32_t page = PAGE_OF_ADDR(vaddr);
- pagetable_t *pd = (vaddr >= K_HIGHHALF_ADDR ? &kernel_pd : current_pd);
+ pagetable_t *pd = ((size_t)vaddr >= K_HIGHHALF_ADDR ? &kernel_pd : current_pd);
if (!pd->page[pt] & PTE_PRESENT) return;
if (!current_pt[pt].page[page] & PTE_PRESENT) return;
diff --git a/kernel/l0/region.c b/kernel/l0/region.c
index 5673f83..7b1d138 100644
--- a/kernel/l0/region.c
+++ b/kernel/l0/region.c
@@ -7,7 +7,8 @@ typedef union region_descriptor {
union region_descriptor *next;
} unused_descriptor;
struct {
- size_t addr, size;
+ void* addr;
+ size_t size;
union region_descriptor *next_by_size, *first_bigger;
union region_descriptor *next_by_addr;
} free;
@@ -100,9 +101,9 @@ static void add_free_region(descriptor_t *d) {
add_unused_descriptor(d);
add_free_region(i);
return;
- } else if (i->free.next_by_addr == 0) {
+ } else if (i->free.next_by_addr == 0 || i->free.next_by_addr->free.addr > d->free.addr) {
+ d->free.next_by_addr = i->free.next_by_addr;
i->free.next_by_addr = d;
- d->free.next_by_addr = 0;
break;
} else if (d->free.addr + d->free.size == i->free.next_by_addr->free.addr) {
// concatenate d . i->next_by_addr
@@ -112,11 +113,6 @@ static void add_free_region(descriptor_t *d) {
add_unused_descriptor(j);
add_free_region(d);
return;
- } else if (i->free.next_by_addr->free.addr > d->free.addr) {
- // insert between i and i->next_by_addr
- d->free.next_by_addr = i->free.next_by_addr;
- i->free.next_by_addr = d;
- break;
} else {
// continue
i = i->free.next_by_addr;
@@ -134,19 +130,19 @@ static void add_free_region(descriptor_t *d) {
while (i != 0) {
ASSERT(d->free.size > i->free.size);
if (i->free.next_by_size == 0) {
- i->free.next_by_size = d;
- if (d->free.size > i->free.size) i->free.first_bigger = d;
d->free.next_by_size = 0;
d->free.first_bigger = 0;
- break;
- } else if (i->free.next_by_size->free.size >= d->free.size) {
i->free.next_by_size = d;
if (d->free.size > i->free.size) i->free.first_bigger = d;
+ break;
+ } else if (i->free.next_by_size->free.size >= d->free.size) {
d->free.next_by_size = i->free.next_by_size;
d->free.first_bigger =
(i->free.next_by_size->free.size > d->free.size
? i->free.next_by_size
: i->free.next_by_size->free.first_bigger);
+ i->free.next_by_size = d;
+ if (d->free.size > i->free.size) i->free.first_bigger = d;
break;
} else {
// continue
@@ -156,7 +152,7 @@ static void add_free_region(descriptor_t *d) {
}
}
-static descriptor_t *find_used_region(size_t addr) {
+static descriptor_t *find_used_region(void* addr) {
for (descriptor_t *i = first_used_region; i != 0; i = i->used.next_by_addr) {
if (addr >= i->used.i.addr && addr < i->used.i.addr + i->used.i.size) return i;
if (i->used.i.addr > addr) break;
@@ -201,7 +197,7 @@ static void remove_used_region(descriptor_t *d) {
void region_allocator_init(void* kernel_data_end) {
descriptor_t *u0 = &base_descriptors[0];
- u0->used.i.addr = K_HIGHHALF_ADDR;
+ u0->used.i.addr = (void*)K_HIGHHALF_ADDR;
u0->used.i.size = PAGE_ALIGN_UP(kernel_data_end) - K_HIGHHALF_ADDR;
u0->used.i.type = REGION_T_KERNEL_BASE;
u0->used.i.pf = 0;
@@ -209,8 +205,8 @@ void region_allocator_init(void* kernel_data_end) {
first_used_region = u0;
descriptor_t *f0 = &base_descriptors[1];
- f0->free.addr = PAGE_ALIGN_UP(kernel_data_end);
- f0->free.size = (LAST_KERNEL_ADDR-f0->free.addr);
+ f0->free.addr = (void*)PAGE_ALIGN_UP(kernel_data_end);
+ f0->free.size = ((void*)LAST_KERNEL_ADDR - f0->free.addr);
f0->free.next_by_size = 0;
f0->free.first_bigger = 0;
first_free_region_by_size = first_free_region_by_addr = f0;
@@ -222,7 +218,7 @@ void region_allocator_init(void* kernel_data_end) {
}
}
-static size_t region_alloc_inner(size_t size, uint32_t type, page_fault_handler_t pf, bool use_reserve) {
+static void* region_alloc_inner(size_t size, uint32_t type, page_fault_handler_t pf, bool use_reserve) {
size = PAGE_ALIGN_UP(size);
for (descriptor_t *i = first_free_region_by_size; i != 0; i = i->free.first_bigger) {
@@ -253,7 +249,7 @@ static size_t region_alloc_inner(size_t size, uint32_t type, page_fault_handler_
remove_free_region(i);
if (x != 0) add_free_region(x);
- size_t addr = i->free.addr;
+ void* addr = i->free.addr;
i->used.i.addr = addr;
i->used.i.size = size;
i->used.i.type = type;
@@ -266,12 +262,12 @@ static size_t region_alloc_inner(size_t size, uint32_t type, page_fault_handler_
return 0; //No big enough block found
}
-size_t region_alloc(size_t size, uint32_t type, page_fault_handler_t pf) {
+void* region_alloc(size_t size, uint32_t type, page_fault_handler_t pf) {
if (n_unused_descriptors <= N_RESERVE_DESCRIPTORS) {
uint32_t frame = frame_alloc(1);
if (frame == 0) return 0;
- size_t descriptor_region = region_alloc_inner(PAGE_SIZE, REGION_T_DESCRIPTORS, 0, true);
+ void* descriptor_region = region_alloc_inner(PAGE_SIZE, REGION_T_DESCRIPTORS, 0, true);
ASSERT(descriptor_region != 0);
int error = pd_map_page(descriptor_region, frame, 1);
@@ -283,7 +279,7 @@ size_t region_alloc(size_t size, uint32_t type, page_fault_handler_t pf) {
}
for (descriptor_t *d = (descriptor_t*)descriptor_region;
- (size_t)(d+1) <= (descriptor_region + PAGE_SIZE);
+ (void*)(d+1) <= (descriptor_region + PAGE_SIZE);
d++) {
add_unused_descriptor(d);
}
@@ -291,13 +287,13 @@ size_t region_alloc(size_t size, uint32_t type, page_fault_handler_t pf) {
return region_alloc_inner(size, type, pf, false);
}
-region_info_t *find_region(size_t addr) {
+region_info_t *find_region(void* addr) {
descriptor_t *d = find_used_region(addr);
if (d == 0) return 0;
return &d->used.i;
}
-void region_free(size_t addr) {
+void region_free(void* addr) {
descriptor_t *d = find_used_region(addr);
if (d == 0) return;
diff --git a/kernel/lib/slab_alloc.c b/kernel/lib/slab_alloc.c
index e1aa8b5..9ec39b0 100644
--- a/kernel/lib/slab_alloc.c
+++ b/kernel/lib/slab_alloc.c
@@ -20,7 +20,7 @@ typedef struct cache {
} sr;
};
- size_t region_addr;
+ void* region_addr;
struct cache *next_region;
} cache_t;
@@ -63,15 +63,15 @@ cache_t *take_region_descriptor(mem_allocator_t *a) {
mem_allocator_t* create_slab_allocator(const slab_type_t *types, page_alloc_fun_t af, page_free_fun_t ff) {
union {
- size_t addr;
+ void* addr;
mem_allocator_t *a;
slab_t *s;
cache_t *c;
} ptr;
- ptr.addr = (size_t)af(PAGE_SIZE);
+ ptr.addr = af(PAGE_SIZE);
if (ptr.addr == 0) return 0; // could not allocate
- size_t end_addr = ptr.addr + PAGE_SIZE;
+ void* end_addr = ptr.addr + PAGE_SIZE;
mem_allocator_t *a = ptr.a;
ptr.a++;
@@ -88,7 +88,7 @@ mem_allocator_t* create_slab_allocator(const slab_type_t *types, page_alloc_fun_
}
a->first_free_region_descriptor = 0;
- while ((size_t)(ptr.c + 1) <= end_addr) {
+ while ((void*)(ptr.c + 1) <= end_addr) {
add_free_region_descriptor(a, ptr.c);
ptr.c++;
}
@@ -98,16 +98,16 @@ mem_allocator_t* create_slab_allocator(const slab_type_t *types, page_alloc_fun_
static void stack_and_destroy_regions(page_free_fun_t ff, cache_t *r) {
if (r == 0) return;
- size_t addr = r->region_addr;
+ void* addr = r->region_addr;
stack_and_destroy_regions(ff, r->next_region);
- ff((void*)addr);
+ ff(addr);
}
void destroy_slab_allocator(mem_allocator_t *a) {
stack_and_destroy_regions(a->free_fun, a->all_regions);
a->free_fun(a);
}
-void* slab_alloc(mem_allocator_t* a, const size_t sz) {
+void* slab_alloc(mem_allocator_t* a, size_t sz) {
for (int i = 0; a->types[i].obj_size != 0; i++) {
const size_t obj_size = a->types[i].obj_size;
if (sz <= obj_size) {
@@ -123,7 +123,7 @@ void* slab_alloc(mem_allocator_t* a, const size_t sz) {
if (fc == 0) return 0;
const size_t cache_size = a->types[i].pages_per_cache * PAGE_SIZE;
- fc->region_addr = (size_t)a->alloc_fun(cache_size);
+ fc->region_addr = a->alloc_fun(cache_size);
if (fc->region_addr == 0) {
add_free_region_descriptor(a, fc);
return 0;
@@ -132,7 +132,7 @@ void* slab_alloc(mem_allocator_t* a, const size_t sz) {
fc->is_a_cache = 1;
fc->n_free_objs = 0;
fc->c.first_free_obj = 0;
- for (size_t i = fc->region_addr; i + obj_size <= fc->region_addr + cache_size; i += obj_size) {
+ for (void* i = fc->region_addr; i + obj_size <= fc->region_addr + cache_size; i += obj_size) {
object_t *x = (object_t*)i;
x->next = fc->c.first_free_obj;
fc->c.first_free_obj = x;
@@ -151,7 +151,7 @@ void* slab_alloc(mem_allocator_t* a, const size_t sz) {
fc->c.first_free_obj = x->next;
fc->n_free_objs--;
// TODO : if fc is full, put it at the end
- return (void*)x;
+ return x;
}
}
@@ -159,7 +159,7 @@ void* slab_alloc(mem_allocator_t* a, const size_t sz) {
cache_t *r = take_region_descriptor(a);
if (r == 0) return 0;
- r->region_addr = (size_t)a->alloc_fun(sz);
+ r->region_addr = a->alloc_fun(sz);
if (r->region_addr == 0) {
add_free_region_descriptor(a, r);
return 0;
@@ -174,8 +174,7 @@ void* slab_alloc(mem_allocator_t* a, const size_t sz) {
}
}
-void slab_free(mem_allocator_t* a, const void* ptr) {
- const size_t addr = (size_t)ptr;
+void slab_free(mem_allocator_t* a, void* addr) {
for (int i = 0; a->types[i].obj_size != 0; i++) {
size_t region_size = PAGE_SIZE * a->types[i].pages_per_cache;
@@ -194,7 +193,7 @@ void slab_free(mem_allocator_t* a, const void* ptr) {
}
// otherwise the block was directly allocated : look for it in regions.
- a->free_fun(ptr);
+ a->free_fun(addr);
ASSERT(a->all_regions != 0);
if (a->all_regions->region_addr == addr) {