From cf0b8a52287ee7c747b1d5a7d77abdef1fb46f94 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Fri, 13 Feb 2015 20:28:54 +0100 Subject: Reorganize code in preparation for user apps. --- src/common/Makefile | 11 -- src/common/README | 9 +- src/common/hashtbl.c | 172 ----------------------- src/common/libalgo/Makefile | 11 ++ src/common/libalgo/hashtbl.c | 172 +++++++++++++++++++++++ src/common/libc/Makefile | 11 ++ src/common/libc/printf.c | 120 ++++++++++++++++ src/common/libc/string.c | 126 +++++++++++++++++ src/common/libkogata/Makefile | 11 ++ src/common/libkogata/mutex.c | 27 ++++ src/common/libkogata/slab_alloc.c | 283 ++++++++++++++++++++++++++++++++++++++ src/common/mutex.c | 27 ---- src/common/printf.c | 120 ---------------- src/common/slab_alloc.c | 283 -------------------------------------- src/common/string.c | 126 ----------------- 15 files changed, 769 insertions(+), 740 deletions(-) delete mode 100644 src/common/Makefile delete mode 100644 src/common/hashtbl.c create mode 100644 src/common/libalgo/Makefile create mode 100644 src/common/libalgo/hashtbl.c create mode 100644 src/common/libc/Makefile create mode 100644 src/common/libc/printf.c create mode 100644 src/common/libc/string.c create mode 100644 src/common/libkogata/Makefile create mode 100644 src/common/libkogata/mutex.c create mode 100644 src/common/libkogata/slab_alloc.c delete mode 100644 src/common/mutex.c delete mode 100644 src/common/printf.c delete mode 100644 src/common/slab_alloc.c delete mode 100644 src/common/string.c (limited to 'src/common') diff --git a/src/common/Makefile b/src/common/Makefile deleted file mode 100644 index 40b8ba9..0000000 --- a/src/common/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -OBJ = string.o printf.o slab_alloc.o mutex.o hashtbl.o - -LIB = - -CFLAGS = -I ./include - -LDFLAGS = - -OUT = common.lib - -include ../rules.make diff --git a/src/common/README b/src/common/README index e40df40..7e3e1f7 100644 --- a/src/common/README +++ b/src/common/README @@ -1,5 +1,9 @@ This directory contains the library functions common to userland -and kernel code. +and kernel code, for the three basic libraries of the system : + +- libkogata : memory allocation, system functions +- libc : (partial)implementation of standard libc functions +- libalgo : usefull data structures It relies on a few functions being exported : @@ -14,6 +18,9 @@ These function are supposed to be defined in the code that calls the common functions. The headers for these functions are to be found in `assert.h` and `malloc.h`. +In kernel code, these functions are defined somewhere in the kernel. +In user code, these functions are defined in the user part of libkogata. + Panic and panic_assert end the execution of the current program (or of the kernel when in kernel-mode). diff --git a/src/common/hashtbl.c b/src/common/hashtbl.c deleted file mode 100644 index b87bcef..0000000 --- a/src/common/hashtbl.c +++ /dev/null @@ -1,172 +0,0 @@ -#include -#include -#include - -#define DEFAULT_INIT_SIZE 16 -#define SLOT_OF_HASH(k, nslots) (((size_t)(k)*21179)%(size_t)(nslots)) - -typedef struct hashtbl_item { - void* key; - void* val; - struct hashtbl_item *next; -} hashtbl_item_t; - -// When nitems > size * 3/4, size is multiplied by two -// When nitems < size * 1/4, size is divided by two -struct hashtbl { - key_eq_fun_t ef; - hash_fun_t hf; - free_fun_t kff; - size_t size, nitems; - hashtbl_item_t **items; -}; - -hashtbl_t *create_hashtbl(key_eq_fun_t ef, hash_fun_t hf, free_fun_t kff, size_t initial_size) { - hashtbl_t *ht = (hashtbl_t*)malloc(sizeof(hashtbl_t)); - if (ht == 0) return 0; - - ht->ef = ef; - ht->hf = hf; - ht->kff = kff; - - ht->size = (initial_size == 0 ? DEFAULT_INIT_SIZE : initial_size); - ht->nitems = 0; - - ht->items = (hashtbl_item_t**)malloc(ht->size * sizeof(hashtbl_item_t*)); - if (ht->items == 0) { - free(ht); - return 0; - } - - for (size_t i = 0; i < ht->size; i++) ht->items[i] = 0; - - return ht; -} - -void delete_hashtbl(hashtbl_t *ht, free_fun_t data_free_fun) { - // Free items - for (size_t i = 0; i < ht->size; i++) { - while (ht->items[i] != 0) { - hashtbl_item_t *n = ht->items[i]->next; - if (data_free_fun) data_free_fun(ht->items[i]->val); - if (ht->kff) ht->kff(ht->items[i]->key); - free(ht->items[i]); - ht->items[i] = n; - } - } - - // Free table - free(ht->items); - free(ht); -} - -static void hashtbl_check_size(hashtbl_t *ht) { - size_t nsize = 0; - if (4 * ht->nitems < ht->size) nsize = ht->size / 2; - if (4 * ht->nitems > 3 * ht->size) nsize = ht->size * 2; - - if (nsize != 0) { - hashtbl_item_t **nitems = (hashtbl_item_t**)malloc(nsize * sizeof(hashtbl_item_t*)); - if (nitems == 0) return; // if we can't realloc, too bad, we just lose space - - for (size_t i = 0; i < nsize; i++) nitems[i] = 0; - - // rehash - for (size_t i = 0; i < ht->size; i++) { - while (ht->items[i] != 0) { - hashtbl_item_t *x = ht->items[i]; - ht->items[i] = x->next; - - size_t slot = SLOT_OF_HASH(ht->hf(x->key), nsize); - x->next = nitems[slot]; - nitems[slot] = x; - } - } - free(ht->items); - ht->size = nsize; - ht->items = nitems; - } -} - -bool hashtbl_add(hashtbl_t *ht, void* key, void* v) { - size_t slot = SLOT_OF_HASH(ht->hf(key), ht->size); - - hashtbl_item_t *i = (hashtbl_item_t*)malloc(sizeof(hashtbl_item_t)); - if (i == 0) return false; // OOM - - // make sure item is not already present - hashtbl_remove(ht, key); - - i->key = key; - i->val = v; - i->next = ht->items[slot]; - ht->items[slot] = i; - ht->nitems++; - - hashtbl_check_size(ht); - - return true; -} - -void* hashtbl_find(hashtbl_t* ht, const void* key) { - size_t slot = SLOT_OF_HASH(ht->hf(key), ht->size); - - for (hashtbl_item_t *i = ht->items[slot]; i != 0; i = i->next) { - if (ht->ef(i->key, key)) return i->val; - } - - return 0; -} - -void hashtbl_remove(hashtbl_t* ht, const void* key) { - size_t slot = SLOT_OF_HASH(ht->hf(key), ht->size); - - if (ht->items[slot] == 0) return; - - if (ht->ef(ht->items[slot]->key, key)) { - hashtbl_item_t *x = ht->items[slot]; - ht->items[slot] = x->next; - if (ht->kff) ht->kff(x->key); - free(x); - ht->nitems--; - } else { - for (hashtbl_item_t *i = ht->items[slot]; i->next != 0; i = i->next) { - if (ht->ef(i->next->key, key)) { - hashtbl_item_t *x = i->next; - i->next = x->next; - if (ht->kff) ht->kff(x->key); - free(x); - ht->nitems--; - break; - } - } - } - - hashtbl_check_size(ht); -} - -size_t hashtbl_count(hashtbl_t* ht) { - return ht->nitems; -} - -hash_t id_hash_fun(const void* v) { - return (hash_t)v; -} - -hash_t str_hash_fun(const void* v) { - hash_t h = 712; - for (char* s = (char*)v; *s != 0; s++) { - h = h * 101 + *s; - } - return h; -} - -bool id_key_eq_fun(const void* a, const void* b) { - return a == b; -} - -bool str_key_eq_fun(const void* a, const void* b) { - return strcmp((const char*)a, (const char*)b) == 0; -} - -/* vim: set ts=4 sw=4 tw=0 noet :*/ diff --git a/src/common/libalgo/Makefile b/src/common/libalgo/Makefile new file mode 100644 index 0000000..3e021c4 --- /dev/null +++ b/src/common/libalgo/Makefile @@ -0,0 +1,11 @@ +OBJ = hashtbl.o + +LIB = + +CFLAGS = -I ../include + +LDFLAGS = + +OUT = libalgo.lib + +include ../../rules.make diff --git a/src/common/libalgo/hashtbl.c b/src/common/libalgo/hashtbl.c new file mode 100644 index 0000000..b87bcef --- /dev/null +++ b/src/common/libalgo/hashtbl.c @@ -0,0 +1,172 @@ +#include +#include +#include + +#define DEFAULT_INIT_SIZE 16 +#define SLOT_OF_HASH(k, nslots) (((size_t)(k)*21179)%(size_t)(nslots)) + +typedef struct hashtbl_item { + void* key; + void* val; + struct hashtbl_item *next; +} hashtbl_item_t; + +// When nitems > size * 3/4, size is multiplied by two +// When nitems < size * 1/4, size is divided by two +struct hashtbl { + key_eq_fun_t ef; + hash_fun_t hf; + free_fun_t kff; + size_t size, nitems; + hashtbl_item_t **items; +}; + +hashtbl_t *create_hashtbl(key_eq_fun_t ef, hash_fun_t hf, free_fun_t kff, size_t initial_size) { + hashtbl_t *ht = (hashtbl_t*)malloc(sizeof(hashtbl_t)); + if (ht == 0) return 0; + + ht->ef = ef; + ht->hf = hf; + ht->kff = kff; + + ht->size = (initial_size == 0 ? DEFAULT_INIT_SIZE : initial_size); + ht->nitems = 0; + + ht->items = (hashtbl_item_t**)malloc(ht->size * sizeof(hashtbl_item_t*)); + if (ht->items == 0) { + free(ht); + return 0; + } + + for (size_t i = 0; i < ht->size; i++) ht->items[i] = 0; + + return ht; +} + +void delete_hashtbl(hashtbl_t *ht, free_fun_t data_free_fun) { + // Free items + for (size_t i = 0; i < ht->size; i++) { + while (ht->items[i] != 0) { + hashtbl_item_t *n = ht->items[i]->next; + if (data_free_fun) data_free_fun(ht->items[i]->val); + if (ht->kff) ht->kff(ht->items[i]->key); + free(ht->items[i]); + ht->items[i] = n; + } + } + + // Free table + free(ht->items); + free(ht); +} + +static void hashtbl_check_size(hashtbl_t *ht) { + size_t nsize = 0; + if (4 * ht->nitems < ht->size) nsize = ht->size / 2; + if (4 * ht->nitems > 3 * ht->size) nsize = ht->size * 2; + + if (nsize != 0) { + hashtbl_item_t **nitems = (hashtbl_item_t**)malloc(nsize * sizeof(hashtbl_item_t*)); + if (nitems == 0) return; // if we can't realloc, too bad, we just lose space + + for (size_t i = 0; i < nsize; i++) nitems[i] = 0; + + // rehash + for (size_t i = 0; i < ht->size; i++) { + while (ht->items[i] != 0) { + hashtbl_item_t *x = ht->items[i]; + ht->items[i] = x->next; + + size_t slot = SLOT_OF_HASH(ht->hf(x->key), nsize); + x->next = nitems[slot]; + nitems[slot] = x; + } + } + free(ht->items); + ht->size = nsize; + ht->items = nitems; + } +} + +bool hashtbl_add(hashtbl_t *ht, void* key, void* v) { + size_t slot = SLOT_OF_HASH(ht->hf(key), ht->size); + + hashtbl_item_t *i = (hashtbl_item_t*)malloc(sizeof(hashtbl_item_t)); + if (i == 0) return false; // OOM + + // make sure item is not already present + hashtbl_remove(ht, key); + + i->key = key; + i->val = v; + i->next = ht->items[slot]; + ht->items[slot] = i; + ht->nitems++; + + hashtbl_check_size(ht); + + return true; +} + +void* hashtbl_find(hashtbl_t* ht, const void* key) { + size_t slot = SLOT_OF_HASH(ht->hf(key), ht->size); + + for (hashtbl_item_t *i = ht->items[slot]; i != 0; i = i->next) { + if (ht->ef(i->key, key)) return i->val; + } + + return 0; +} + +void hashtbl_remove(hashtbl_t* ht, const void* key) { + size_t slot = SLOT_OF_HASH(ht->hf(key), ht->size); + + if (ht->items[slot] == 0) return; + + if (ht->ef(ht->items[slot]->key, key)) { + hashtbl_item_t *x = ht->items[slot]; + ht->items[slot] = x->next; + if (ht->kff) ht->kff(x->key); + free(x); + ht->nitems--; + } else { + for (hashtbl_item_t *i = ht->items[slot]; i->next != 0; i = i->next) { + if (ht->ef(i->next->key, key)) { + hashtbl_item_t *x = i->next; + i->next = x->next; + if (ht->kff) ht->kff(x->key); + free(x); + ht->nitems--; + break; + } + } + } + + hashtbl_check_size(ht); +} + +size_t hashtbl_count(hashtbl_t* ht) { + return ht->nitems; +} + +hash_t id_hash_fun(const void* v) { + return (hash_t)v; +} + +hash_t str_hash_fun(const void* v) { + hash_t h = 712; + for (char* s = (char*)v; *s != 0; s++) { + h = h * 101 + *s; + } + return h; +} + +bool id_key_eq_fun(const void* a, const void* b) { + return a == b; +} + +bool str_key_eq_fun(const void* a, const void* b) { + return strcmp((const char*)a, (const char*)b) == 0; +} + +/* vim: set ts=4 sw=4 tw=0 noet :*/ diff --git a/src/common/libc/Makefile b/src/common/libc/Makefile new file mode 100644 index 0000000..54fa9f1 --- /dev/null +++ b/src/common/libc/Makefile @@ -0,0 +1,11 @@ +OBJ = string.o printf.o + +LIB = + +CFLAGS = -I ../include + +LDFLAGS = + +OUT = libc.lib + +include ../../rules.make diff --git a/src/common/libc/printf.c b/src/common/libc/printf.c new file mode 100644 index 0000000..68e08d8 --- /dev/null +++ b/src/common/libc/printf.c @@ -0,0 +1,120 @@ +#include +#include + +int snprintf(char * buff, size_t len, const char *format, ...) { + va_list ap; + + va_start(ap, format); + len = vsnprintf(buff, len, format, ap); + va_end(ap); + + return len; +} + +int vsnprintf(char *buff, size_t len, const char* format, va_list ap){ + size_t i, result; + + if (!buff || !format) + return -1; + +#define PUTCHAR(thechar) \ + do { \ + if (result < len-1) \ + *buff++ = (thechar); \ + result++; \ + } while (0) + + result = 0; + for(i = 0; format[i] != '\0' ; i++) { + if (format[i] == '%') { + i++; + switch(format[i]) { + case '%': + PUTCHAR('%'); + break; + + case 'i':; + case 'd': { + int integer = va_arg(ap,int); + int cpt2 = 0; + char buff_int[16]; + + if (integer<0) + PUTCHAR('-'); + + do { + int m10 = integer%10; + m10 = (m10 < 0)? -m10:m10; + buff_int[cpt2++] = (char)('0'+ m10); + integer = integer/10; + } while(integer != 0); + + for(cpt2 = cpt2 - 1; cpt2 >= 0; cpt2--) + PUTCHAR(buff_int[cpt2]); + + break; + } + case 'c': { + int value = va_arg(ap,int); + PUTCHAR((char)value); + break; + } + case 's': { + char *string = va_arg(ap,char *); + if (!string) + string = "(null)"; + for(; *string != '\0' ; string++) + PUTCHAR(*string); + break; + } + case 'x': { + unsigned int hexa = va_arg(ap,int); + unsigned int nb; + int j, had_nonzero = 0; + for(j = 0; j < 8; j++) { + nb = (unsigned int)(hexa << (j*4)); + nb = (nb >> 28) & 0xf; + // Skip the leading zeros + if (nb == 0) { + if (had_nonzero) + PUTCHAR('0'); + } else { + had_nonzero = 1; + if (nb < 10) + PUTCHAR('0'+nb); + else + PUTCHAR('a'+(nb-10)); + } + } + if (!had_nonzero) + PUTCHAR('0'); + break; + } + case 'p': { + unsigned int hexa = va_arg(ap,int); + unsigned int nb; + int j; + for (j = 0; j < 8; j++) { + nb = (unsigned int)(hexa << (j*4)); + nb = (nb >> 28) & 0xf; + if (nb < 10) + PUTCHAR('0'+nb); + else + PUTCHAR('a'+(nb-10)); + } + break; + } + default: + PUTCHAR('%'); + PUTCHAR(format[i]); + } + } else { + PUTCHAR(format[i]); + } + } + + *buff = '\0'; + return result; +} + +/* vim: set ts=4 sw=4 tw=0 noet :*/ diff --git a/src/common/libc/string.c b/src/common/libc/string.c new file mode 100644 index 0000000..f6c27b4 --- /dev/null +++ b/src/common/libc/string.c @@ -0,0 +1,126 @@ +#include +#include + + +size_t strlen(const char* str) { + size_t ret = 0; + while (str[ret] != 0) + ret++; + return ret; +} + +char *strchr(const char *str, char c) { + do { + if (*str == c) return (char*)str; + } while (*(str++)); + return NULL; +} + +char *strrchr(const char *str, char c) { + char* ret = NULL; + do { + if (*str == c) ret = (char*)str; + } while (*(str++)); + return ret; +} + +char *strcpy(char *dest, const char *src) { + memcpy(dest, src, strlen(src) + 1); + return (char*)dest; +} + +char *strncpy(char *dest, const char *src, size_t n) { + size_t x = strlen(src + 1); + if (n < x) x = n; + memcpy(dest, src, x); + if (n > x) memset(dest + n, 0, n - x); + return (char*)dest; +} + +char *strcat(char *dest, const char *src) { + char *dest2 = dest; + dest2 += strlen(dest) - 1; + while (*src) { + *dest2 = *src; + src++; + dest2++; + } + *dest2 = 0; + return dest; +} + +int strcmp(const char *s1, const char *s2) { + while ((*s1) && (*s1 == *s2)) { + s1++; + s2++; + } + return (*(unsigned char*)s1 - *(unsigned char*)s2); +} + +int strncmp(const char *s1, const char *s2, size_t n) { + size_t i = 0; + while ((*s1) && (*s1 == *s2) && i != n) { + s1++; + s2++; + i++; + } + return (*(unsigned char*)s1 - *(unsigned char*)s2); +} + +void *memcpy(void *vd, const void *vs, size_t count) { + uint8_t *dest = (uint8_t*)vd, *src = (uint8_t*)vs; + size_t f = count % 4, n = count / 4, i; + const uint32_t* s = (uint32_t*)src; + uint32_t* d = (uint32_t*)dest; + for (i = 0; i < n; i++) { + d[i] = s[i]; + } + if (f != 0) { + for (i = count - f; i < count; i++) { + dest[i] = src[i]; + } + } + return vd; +} + +void *memmove(void *vd, const void *vs, size_t count) { + uint8_t *dest = (uint8_t*)vd, *src = (uint8_t*)vs; + + if (vd < vs) { + for (size_t i = 0; i < count; i++) + dest[i] = src[i]; + } else { + for (size_t i = 0; i < count; i++) + dest[count - i] = src[count - i]; + } + return vd; +} + +int memcmp(const void *va, const void *vb, size_t count) { + uint8_t *a = (uint8_t*)va; + uint8_t *b = (uint8_t*)vb; + for (size_t i = 0; i < count; i++) { + if (a[i] != b[i]) return (int)a[i] - (int)b[i]; + } + return 0; +} + +void *memset(void *dest, int val, size_t count) { + uint8_t *dest_c = (uint8_t*)dest; + for (size_t i = 0; i < count; i++) { + dest_c[i] = val; + } + return dest; +} + +char *strdup(const char* str) { + int len = strlen(str) + 1; + + char* ret = (char*)malloc(len); + if (ret == 0) return 0; + + memcpy(ret, str, len); + return ret; +} + +/* vim: set ts=4 sw=4 tw=0 noet :*/ diff --git a/src/common/libkogata/Makefile b/src/common/libkogata/Makefile new file mode 100644 index 0000000..ea70be5 --- /dev/null +++ b/src/common/libkogata/Makefile @@ -0,0 +1,11 @@ +OBJ = slab_alloc.o mutex.o + +LIB = + +CFLAGS = -I ../include + +LDFLAGS = + +OUT = libkogata.lib + +include ../../rules.make diff --git a/src/common/libkogata/mutex.c b/src/common/libkogata/mutex.c new file mode 100644 index 0000000..b345ee5 --- /dev/null +++ b/src/common/libkogata/mutex.c @@ -0,0 +1,27 @@ +#include + +/* Internal use only. This function is atomic, meaning it cannot be interrupted by a system task switch. */ +static uint32_t atomic_exchange(uint32_t* ptr, uint32_t newval) { + uint32_t r; + asm volatile("xchg (%%ecx), %%eax" : "=a"(r) : "c"(ptr), "a"(newval)); + return r; +} + +void mutex_lock(uint32_t* mutex) { + while (atomic_exchange(mutex, MUTEX_LOCKED) == MUTEX_LOCKED) { + yield(); + } +} + +bool mutex_try_lock(uint32_t* mutex) { + if (atomic_exchange(mutex, MUTEX_LOCKED) == MUTEX_LOCKED) { + return false; + } + return true; +} + +void mutex_unlock(uint32_t* mutex) { + *mutex = MUTEX_UNLOCKED; +} + +/* vim: set ts=4 sw=4 tw=0 noet :*/ diff --git a/src/common/libkogata/slab_alloc.c b/src/common/libkogata/slab_alloc.c new file mode 100644 index 0000000..714c49f --- /dev/null +++ b/src/common/libkogata/slab_alloc.c @@ -0,0 +1,283 @@ +#include + +typedef struct object { + struct object *next; +} object_t; + +typedef struct cache { + void* region_addr; + + uint32_t n_free_objs; + object_t* first_free_obj; + + struct cache *next_cache; // next cache in this slab +} cache_t; + +typedef struct region { + void* region_addr; + size_t region_size; + struct region *next_region; + bool contains_descriptors; +} region_t; + +typedef union descriptor { + cache_t c; + region_t r; + union descriptor *next_free; +} descriptor_t; + +typedef struct slab { + cache_t *first_cache; // linked list of caches +} slab_t; + +struct mem_allocator { + const slab_type_t *types; + slab_t *slabs; + + descriptor_t *first_free_descriptor; + region_t *all_regions; + + page_alloc_fun_t alloc_fun; + page_free_fun_t free_fun; +}; + +// ============================================== // +// Helper functions for the manipulation of lists // +// ============================================== // + +static void add_free_descriptor(mem_allocator_t *a, descriptor_t *c) { + c->next_free = a->first_free_descriptor; + a->first_free_descriptor = c; +} + +static descriptor_t *take_descriptor(mem_allocator_t *a) { + if (a->first_free_descriptor == 0) { + void* p = a->alloc_fun(PAGE_SIZE); + if (p == 0) return 0; + + const void* end = p + PAGE_SIZE; + for (descriptor_t *i = (descriptor_t*)p; i + 1 <= (descriptor_t*)end; i++) { + add_free_descriptor(a, i); + } + + // register the descriptor region + descriptor_t *dd = a->first_free_descriptor; + ASSERT(dd != 0); + a->first_free_descriptor = dd->next_free; + + region_t *drd = &dd->r; + drd->region_addr = p; + drd->region_size = PAGE_SIZE; + drd->contains_descriptors = true; + drd->next_region = a->all_regions; + a->all_regions = drd; + } + + descriptor_t *x = a->first_free_descriptor; + ASSERT(x != 0); + a->first_free_descriptor = x->next_free; + return x; +} + +// ============================== // +// The actual allocator functions // +// ============================== // + +mem_allocator_t* create_slab_allocator(const slab_type_t *types, page_alloc_fun_t af, page_free_fun_t ff) { + union { + void* addr; + mem_allocator_t *a; + slab_t *s; + descriptor_t *d; + } ptr; + + ptr.addr = af(PAGE_SIZE); + if (ptr.addr == 0) return 0; // could not allocate + const void* end_addr = ptr.addr + PAGE_SIZE; + + mem_allocator_t *a = ptr.a; + ptr.a++; + + a->all_regions = 0; + a->alloc_fun = af; + a->free_fun = ff; + + a->types = types; + a->slabs = ptr.s; + for (const slab_type_t *t = types; t->obj_size != 0; t++) { + ASSERT(t->obj_size >= sizeof(object_t)); + ptr.s->first_cache = 0; + ptr.s++; + } + + a->first_free_descriptor = 0; + while (ptr.d + 1 <= (descriptor_t*)end_addr) { + add_free_descriptor(a, ptr.d); + ptr.d++; + } + + return a; +} + +static void stack_and_destroy_regions(page_free_fun_t ff, region_t *r) { + if (r == 0) return; + void* addr = r->region_addr; + ASSERT(r != r->next_region); + stack_and_destroy_regions(ff, r->next_region); + ff(addr); +} +void destroy_slab_allocator(mem_allocator_t *a) { + for (int i = 0; a->types[i].obj_size != 0; i++) { + for (cache_t *c = a->slabs[i].first_cache; c != 0; c = c->next_cache) { + a->free_fun(c->region_addr); + } + } + region_t *dr = 0; + region_t *i = a->all_regions; + while (i != 0) { + region_t *r = i; + i = r->next_region; + if (r->contains_descriptors) { + r->next_region = dr; + dr = r; + } else { + a->free_fun(r->region_addr); + } + } + stack_and_destroy_regions(a->free_fun, dr); + a->free_fun(a); +} + +void* slab_alloc(mem_allocator_t* a, size_t sz) { + for (int i = 0; a->types[i].obj_size != 0; i++) { + const size_t obj_size = a->types[i].obj_size; + if (sz <= obj_size) { + // find a cache with free space + cache_t *fc = a->slabs[i].first_cache; + while (fc != 0 && fc->n_free_objs == 0) { + ASSERT(fc->first_free_obj == 0); // make sure n_free == 0 iff no object in the free stack + fc = fc->next_cache; + } + // if none found, try to allocate a new one + if (fc == 0) { + descriptor_t *fcd = take_descriptor(a); + if (fcd == 0) return 0; + + fc = &fcd->c; + ASSERT((descriptor_t*)fc == fcd); + + const size_t cache_size = a->types[i].pages_per_cache * PAGE_SIZE; + fc->region_addr = a->alloc_fun(cache_size); + if (fc->region_addr == 0) { + add_free_descriptor(a, fcd); + return 0; + } + + fc->n_free_objs = 0; + fc->first_free_obj = 0; + for (void* p = fc->region_addr; p + obj_size <= fc->region_addr + cache_size; p += obj_size) { + object_t *x = (object_t*)p; + x->next = fc->first_free_obj; + fc->first_free_obj = x; + fc->n_free_objs++; + } + ASSERT(fc->n_free_objs == cache_size / obj_size); + + fc->next_cache = a->slabs[i].first_cache; + a->slabs[i].first_cache = fc; + } + // allocate on fc + ASSERT(fc != 0 && fc->n_free_objs > 0); + + object_t *x = fc->first_free_obj; + fc->first_free_obj = x->next; + fc->n_free_objs--; + + ASSERT((fc->n_free_objs == 0) == (fc->first_free_obj == 0)); + + // TODO : if fc is full, put it at the end + return x; + } + } + + // otherwise directly allocate using a->alloc_fun + descriptor_t *rd = take_descriptor(a); + if (rd == 0) return 0; + region_t *r = &rd->r; + ASSERT((descriptor_t*)r == rd); + + r->region_addr = a->alloc_fun(sz); + if (r->region_addr == 0) { + add_free_descriptor(a, rd); + return 0; + } else { + r->region_size = sz; + r->contains_descriptors = false; + + r->next_region = a->all_regions; + a->all_regions = r; + + return (void*)r->region_addr; + } +} + +void slab_free(mem_allocator_t* a, void* addr) { + + for (int i = 0; a->types[i].obj_size != 0; i++) { + size_t region_size = PAGE_SIZE * a->types[i].pages_per_cache; + for (cache_t *r = a->slabs[i].first_cache; r != 0; r = r->next_cache) { + if (addr >= r->region_addr && addr < r->region_addr + region_size) { + ASSERT((addr - r->region_addr) % a->types[i].obj_size == 0); + + object_t *o = (object_t*)addr; + o->next = r->first_free_obj; + r->first_free_obj = o; + r->n_free_objs++; + + if (r->n_free_objs == region_size / a->types[i].obj_size) { + // region is completely unused, free it. + if (a->slabs[i].first_cache == r) { + a->slabs[i].first_cache = r->next_cache; + } else { + for (cache_t *it = a->slabs[i].first_cache; it->next_cache != 0; it = it->next_cache) { + if (it->next_cache == r) { + it->next_cache = r->next_cache; + break; + } + } + } + a->free_fun(r->region_addr); + add_free_descriptor(a, (descriptor_t*)r); + } + return; + } + } + } + + // otherwise the block was directly allocated : look for it in regions. + ASSERT(a->all_regions != 0); + + if (a->all_regions->region_addr == addr) { + a->free_fun(addr); // found it, free it + + region_t *r = a->all_regions; + a->all_regions = r->next_region; + add_free_descriptor(a, (descriptor_t*)r); + } else { + for (region_t *i = a->all_regions; i->next_region != 0; i = i->next_region) { + if (i->next_region->region_addr == addr) { + a->free_fun(addr); // found it, free it + + region_t *r = i->next_region; + ASSERT(!r->contains_descriptors); + i->next_region = r->next_region; + add_free_descriptor(a, (descriptor_t*)r); + return; + } + } + ASSERT(false); + } +} + +/* vim: set ts=4 sw=4 tw=0 noet :*/ + diff --git a/src/common/mutex.c b/src/common/mutex.c deleted file mode 100644 index b345ee5..0000000 --- a/src/common/mutex.c +++ /dev/null @@ -1,27 +0,0 @@ -#include - -/* Internal use only. This function is atomic, meaning it cannot be interrupted by a system task switch. */ -static uint32_t atomic_exchange(uint32_t* ptr, uint32_t newval) { - uint32_t r; - asm volatile("xchg (%%ecx), %%eax" : "=a"(r) : "c"(ptr), "a"(newval)); - return r; -} - -void mutex_lock(uint32_t* mutex) { - while (atomic_exchange(mutex, MUTEX_LOCKED) == MUTEX_LOCKED) { - yield(); - } -} - -bool mutex_try_lock(uint32_t* mutex) { - if (atomic_exchange(mutex, MUTEX_LOCKED) == MUTEX_LOCKED) { - return false; - } - return true; -} - -void mutex_unlock(uint32_t* mutex) { - *mutex = MUTEX_UNLOCKED; -} - -/* vim: set ts=4 sw=4 tw=0 noet :*/ diff --git a/src/common/printf.c b/src/common/printf.c deleted file mode 100644 index 68e08d8..0000000 --- a/src/common/printf.c +++ /dev/null @@ -1,120 +0,0 @@ -#include -#include - -int snprintf(char * buff, size_t len, const char *format, ...) { - va_list ap; - - va_start(ap, format); - len = vsnprintf(buff, len, format, ap); - va_end(ap); - - return len; -} - -int vsnprintf(char *buff, size_t len, const char* format, va_list ap){ - size_t i, result; - - if (!buff || !format) - return -1; - -#define PUTCHAR(thechar) \ - do { \ - if (result < len-1) \ - *buff++ = (thechar); \ - result++; \ - } while (0) - - result = 0; - for(i = 0; format[i] != '\0' ; i++) { - if (format[i] == '%') { - i++; - switch(format[i]) { - case '%': - PUTCHAR('%'); - break; - - case 'i':; - case 'd': { - int integer = va_arg(ap,int); - int cpt2 = 0; - char buff_int[16]; - - if (integer<0) - PUTCHAR('-'); - - do { - int m10 = integer%10; - m10 = (m10 < 0)? -m10:m10; - buff_int[cpt2++] = (char)('0'+ m10); - integer = integer/10; - } while(integer != 0); - - for(cpt2 = cpt2 - 1; cpt2 >= 0; cpt2--) - PUTCHAR(buff_int[cpt2]); - - break; - } - case 'c': { - int value = va_arg(ap,int); - PUTCHAR((char)value); - break; - } - case 's': { - char *string = va_arg(ap,char *); - if (!string) - string = "(null)"; - for(; *string != '\0' ; string++) - PUTCHAR(*string); - break; - } - case 'x': { - unsigned int hexa = va_arg(ap,int); - unsigned int nb; - int j, had_nonzero = 0; - for(j = 0; j < 8; j++) { - nb = (unsigned int)(hexa << (j*4)); - nb = (nb >> 28) & 0xf; - // Skip the leading zeros - if (nb == 0) { - if (had_nonzero) - PUTCHAR('0'); - } else { - had_nonzero = 1; - if (nb < 10) - PUTCHAR('0'+nb); - else - PUTCHAR('a'+(nb-10)); - } - } - if (!had_nonzero) - PUTCHAR('0'); - break; - } - case 'p': { - unsigned int hexa = va_arg(ap,int); - unsigned int nb; - int j; - for (j = 0; j < 8; j++) { - nb = (unsigned int)(hexa << (j*4)); - nb = (nb >> 28) & 0xf; - if (nb < 10) - PUTCHAR('0'+nb); - else - PUTCHAR('a'+(nb-10)); - } - break; - } - default: - PUTCHAR('%'); - PUTCHAR(format[i]); - } - } else { - PUTCHAR(format[i]); - } - } - - *buff = '\0'; - return result; -} - -/* vim: set ts=4 sw=4 tw=0 noet :*/ diff --git a/src/common/slab_alloc.c b/src/common/slab_alloc.c deleted file mode 100644 index 714c49f..0000000 --- a/src/common/slab_alloc.c +++ /dev/null @@ -1,283 +0,0 @@ -#include - -typedef struct object { - struct object *next; -} object_t; - -typedef struct cache { - void* region_addr; - - uint32_t n_free_objs; - object_t* first_free_obj; - - struct cache *next_cache; // next cache in this slab -} cache_t; - -typedef struct region { - void* region_addr; - size_t region_size; - struct region *next_region; - bool contains_descriptors; -} region_t; - -typedef union descriptor { - cache_t c; - region_t r; - union descriptor *next_free; -} descriptor_t; - -typedef struct slab { - cache_t *first_cache; // linked list of caches -} slab_t; - -struct mem_allocator { - const slab_type_t *types; - slab_t *slabs; - - descriptor_t *first_free_descriptor; - region_t *all_regions; - - page_alloc_fun_t alloc_fun; - page_free_fun_t free_fun; -}; - -// ============================================== // -// Helper functions for the manipulation of lists // -// ============================================== // - -static void add_free_descriptor(mem_allocator_t *a, descriptor_t *c) { - c->next_free = a->first_free_descriptor; - a->first_free_descriptor = c; -} - -static descriptor_t *take_descriptor(mem_allocator_t *a) { - if (a->first_free_descriptor == 0) { - void* p = a->alloc_fun(PAGE_SIZE); - if (p == 0) return 0; - - const void* end = p + PAGE_SIZE; - for (descriptor_t *i = (descriptor_t*)p; i + 1 <= (descriptor_t*)end; i++) { - add_free_descriptor(a, i); - } - - // register the descriptor region - descriptor_t *dd = a->first_free_descriptor; - ASSERT(dd != 0); - a->first_free_descriptor = dd->next_free; - - region_t *drd = &dd->r; - drd->region_addr = p; - drd->region_size = PAGE_SIZE; - drd->contains_descriptors = true; - drd->next_region = a->all_regions; - a->all_regions = drd; - } - - descriptor_t *x = a->first_free_descriptor; - ASSERT(x != 0); - a->first_free_descriptor = x->next_free; - return x; -} - -// ============================== // -// The actual allocator functions // -// ============================== // - -mem_allocator_t* create_slab_allocator(const slab_type_t *types, page_alloc_fun_t af, page_free_fun_t ff) { - union { - void* addr; - mem_allocator_t *a; - slab_t *s; - descriptor_t *d; - } ptr; - - ptr.addr = af(PAGE_SIZE); - if (ptr.addr == 0) return 0; // could not allocate - const void* end_addr = ptr.addr + PAGE_SIZE; - - mem_allocator_t *a = ptr.a; - ptr.a++; - - a->all_regions = 0; - a->alloc_fun = af; - a->free_fun = ff; - - a->types = types; - a->slabs = ptr.s; - for (const slab_type_t *t = types; t->obj_size != 0; t++) { - ASSERT(t->obj_size >= sizeof(object_t)); - ptr.s->first_cache = 0; - ptr.s++; - } - - a->first_free_descriptor = 0; - while (ptr.d + 1 <= (descriptor_t*)end_addr) { - add_free_descriptor(a, ptr.d); - ptr.d++; - } - - return a; -} - -static void stack_and_destroy_regions(page_free_fun_t ff, region_t *r) { - if (r == 0) return; - void* addr = r->region_addr; - ASSERT(r != r->next_region); - stack_and_destroy_regions(ff, r->next_region); - ff(addr); -} -void destroy_slab_allocator(mem_allocator_t *a) { - for (int i = 0; a->types[i].obj_size != 0; i++) { - for (cache_t *c = a->slabs[i].first_cache; c != 0; c = c->next_cache) { - a->free_fun(c->region_addr); - } - } - region_t *dr = 0; - region_t *i = a->all_regions; - while (i != 0) { - region_t *r = i; - i = r->next_region; - if (r->contains_descriptors) { - r->next_region = dr; - dr = r; - } else { - a->free_fun(r->region_addr); - } - } - stack_and_destroy_regions(a->free_fun, dr); - a->free_fun(a); -} - -void* slab_alloc(mem_allocator_t* a, size_t sz) { - for (int i = 0; a->types[i].obj_size != 0; i++) { - const size_t obj_size = a->types[i].obj_size; - if (sz <= obj_size) { - // find a cache with free space - cache_t *fc = a->slabs[i].first_cache; - while (fc != 0 && fc->n_free_objs == 0) { - ASSERT(fc->first_free_obj == 0); // make sure n_free == 0 iff no object in the free stack - fc = fc->next_cache; - } - // if none found, try to allocate a new one - if (fc == 0) { - descriptor_t *fcd = take_descriptor(a); - if (fcd == 0) return 0; - - fc = &fcd->c; - ASSERT((descriptor_t*)fc == fcd); - - const size_t cache_size = a->types[i].pages_per_cache * PAGE_SIZE; - fc->region_addr = a->alloc_fun(cache_size); - if (fc->region_addr == 0) { - add_free_descriptor(a, fcd); - return 0; - } - - fc->n_free_objs = 0; - fc->first_free_obj = 0; - for (void* p = fc->region_addr; p + obj_size <= fc->region_addr + cache_size; p += obj_size) { - object_t *x = (object_t*)p; - x->next = fc->first_free_obj; - fc->first_free_obj = x; - fc->n_free_objs++; - } - ASSERT(fc->n_free_objs == cache_size / obj_size); - - fc->next_cache = a->slabs[i].first_cache; - a->slabs[i].first_cache = fc; - } - // allocate on fc - ASSERT(fc != 0 && fc->n_free_objs > 0); - - object_t *x = fc->first_free_obj; - fc->first_free_obj = x->next; - fc->n_free_objs--; - - ASSERT((fc->n_free_objs == 0) == (fc->first_free_obj == 0)); - - // TODO : if fc is full, put it at the end - return x; - } - } - - // otherwise directly allocate using a->alloc_fun - descriptor_t *rd = take_descriptor(a); - if (rd == 0) return 0; - region_t *r = &rd->r; - ASSERT((descriptor_t*)r == rd); - - r->region_addr = a->alloc_fun(sz); - if (r->region_addr == 0) { - add_free_descriptor(a, rd); - return 0; - } else { - r->region_size = sz; - r->contains_descriptors = false; - - r->next_region = a->all_regions; - a->all_regions = r; - - return (void*)r->region_addr; - } -} - -void slab_free(mem_allocator_t* a, void* addr) { - - for (int i = 0; a->types[i].obj_size != 0; i++) { - size_t region_size = PAGE_SIZE * a->types[i].pages_per_cache; - for (cache_t *r = a->slabs[i].first_cache; r != 0; r = r->next_cache) { - if (addr >= r->region_addr && addr < r->region_addr + region_size) { - ASSERT((addr - r->region_addr) % a->types[i].obj_size == 0); - - object_t *o = (object_t*)addr; - o->next = r->first_free_obj; - r->first_free_obj = o; - r->n_free_objs++; - - if (r->n_free_objs == region_size / a->types[i].obj_size) { - // region is completely unused, free it. - if (a->slabs[i].first_cache == r) { - a->slabs[i].first_cache = r->next_cache; - } else { - for (cache_t *it = a->slabs[i].first_cache; it->next_cache != 0; it = it->next_cache) { - if (it->next_cache == r) { - it->next_cache = r->next_cache; - break; - } - } - } - a->free_fun(r->region_addr); - add_free_descriptor(a, (descriptor_t*)r); - } - return; - } - } - } - - // otherwise the block was directly allocated : look for it in regions. - ASSERT(a->all_regions != 0); - - if (a->all_regions->region_addr == addr) { - a->free_fun(addr); // found it, free it - - region_t *r = a->all_regions; - a->all_regions = r->next_region; - add_free_descriptor(a, (descriptor_t*)r); - } else { - for (region_t *i = a->all_regions; i->next_region != 0; i = i->next_region) { - if (i->next_region->region_addr == addr) { - a->free_fun(addr); // found it, free it - - region_t *r = i->next_region; - ASSERT(!r->contains_descriptors); - i->next_region = r->next_region; - add_free_descriptor(a, (descriptor_t*)r); - return; - } - } - ASSERT(false); - } -} - -/* vim: set ts=4 sw=4 tw=0 noet :*/ - diff --git a/src/common/string.c b/src/common/string.c deleted file mode 100644 index f6c27b4..0000000 --- a/src/common/string.c +++ /dev/null @@ -1,126 +0,0 @@ -#include -#include - - -size_t strlen(const char* str) { - size_t ret = 0; - while (str[ret] != 0) - ret++; - return ret; -} - -char *strchr(const char *str, char c) { - do { - if (*str == c) return (char*)str; - } while (*(str++)); - return NULL; -} - -char *strrchr(const char *str, char c) { - char* ret = NULL; - do { - if (*str == c) ret = (char*)str; - } while (*(str++)); - return ret; -} - -char *strcpy(char *dest, const char *src) { - memcpy(dest, src, strlen(src) + 1); - return (char*)dest; -} - -char *strncpy(char *dest, const char *src, size_t n) { - size_t x = strlen(src + 1); - if (n < x) x = n; - memcpy(dest, src, x); - if (n > x) memset(dest + n, 0, n - x); - return (char*)dest; -} - -char *strcat(char *dest, const char *src) { - char *dest2 = dest; - dest2 += strlen(dest) - 1; - while (*src) { - *dest2 = *src; - src++; - dest2++; - } - *dest2 = 0; - return dest; -} - -int strcmp(const char *s1, const char *s2) { - while ((*s1) && (*s1 == *s2)) { - s1++; - s2++; - } - return (*(unsigned char*)s1 - *(unsigned char*)s2); -} - -int strncmp(const char *s1, const char *s2, size_t n) { - size_t i = 0; - while ((*s1) && (*s1 == *s2) && i != n) { - s1++; - s2++; - i++; - } - return (*(unsigned char*)s1 - *(unsigned char*)s2); -} - -void *memcpy(void *vd, const void *vs, size_t count) { - uint8_t *dest = (uint8_t*)vd, *src = (uint8_t*)vs; - size_t f = count % 4, n = count / 4, i; - const uint32_t* s = (uint32_t*)src; - uint32_t* d = (uint32_t*)dest; - for (i = 0; i < n; i++) { - d[i] = s[i]; - } - if (f != 0) { - for (i = count - f; i < count; i++) { - dest[i] = src[i]; - } - } - return vd; -} - -void *memmove(void *vd, const void *vs, size_t count) { - uint8_t *dest = (uint8_t*)vd, *src = (uint8_t*)vs; - - if (vd < vs) { - for (size_t i = 0; i < count; i++) - dest[i] = src[i]; - } else { - for (size_t i = 0; i < count; i++) - dest[count - i] = src[count - i]; - } - return vd; -} - -int memcmp(const void *va, const void *vb, size_t count) { - uint8_t *a = (uint8_t*)va; - uint8_t *b = (uint8_t*)vb; - for (size_t i = 0; i < count; i++) { - if (a[i] != b[i]) return (int)a[i] - (int)b[i]; - } - return 0; -} - -void *memset(void *dest, int val, size_t count) { - uint8_t *dest_c = (uint8_t*)dest; - for (size_t i = 0; i < count; i++) { - dest_c[i] = val; - } - return dest; -} - -char *strdup(const char* str) { - int len = strlen(str) + 1; - - char* ret = (char*)malloc(len); - if (ret == 0) return 0; - - memcpy(ret, str, len); - return ret; -} - -/* vim: set ts=4 sw=4 tw=0 noet :*/ -- cgit v1.2.3