From 32407e728971006ed3d0885e01c22fb66c8adc57 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Fri, 15 Jul 2016 23:12:14 +0200 Subject: Move stuff around, again --- src/common/bam.lua | 1 - src/common/include/algo.h | 37 ---- src/common/include/btree.h | 33 --- src/common/include/debug.h | 18 -- src/common/include/hashtbl.h | 35 ---- src/common/include/kogata/algo.h | 37 ++++ src/common/include/kogata/btree.h | 33 +++ src/common/include/kogata/debug.h | 18 ++ src/common/include/kogata/hashtbl.h | 35 ++++ src/common/include/kogata/malloc.h | 13 ++ src/common/include/kogata/mutex.h | 22 ++ src/common/include/kogata/printf.h | 10 + src/common/include/kogata/region_alloc.h | 32 +++ src/common/include/kogata/slab_alloc.h | 45 ++++ src/common/include/malloc.h | 13 -- src/common/include/mutex.h | 22 -- src/common/include/printf.h | 10 - src/common/include/region_alloc.h | 32 --- src/common/include/slab_alloc.h | 45 ---- src/common/libalgo/btree.c | 343 ------------------------------- src/common/libalgo/hashtbl.c | 177 ---------------- src/common/libalgo/keyval.c | 51 ----- src/common/libc/printf.c | 3 +- src/common/libc/string.c | 2 +- src/common/libkogata/btree.c | 343 +++++++++++++++++++++++++++++++ src/common/libkogata/hashtbl.c | 177 ++++++++++++++++ src/common/libkogata/keyval.c | 51 +++++ src/common/libkogata/mutex.c | 2 +- src/common/libkogata/region_alloc.c | 6 +- src/common/libkogata/slab_alloc.c | 2 +- 30 files changed, 824 insertions(+), 824 deletions(-) delete mode 100644 src/common/include/algo.h delete mode 100644 src/common/include/btree.h delete mode 100644 src/common/include/debug.h delete mode 100644 src/common/include/hashtbl.h create mode 100644 src/common/include/kogata/algo.h create mode 100644 src/common/include/kogata/btree.h create mode 100644 src/common/include/kogata/debug.h create mode 100644 src/common/include/kogata/hashtbl.h create mode 100644 src/common/include/kogata/malloc.h create mode 100644 src/common/include/kogata/mutex.h create mode 100644 src/common/include/kogata/printf.h create mode 100644 src/common/include/kogata/region_alloc.h create mode 100644 src/common/include/kogata/slab_alloc.h delete mode 100644 src/common/include/malloc.h delete mode 100644 src/common/include/mutex.h delete mode 100644 src/common/include/printf.h delete mode 100644 src/common/include/region_alloc.h delete mode 100644 src/common/include/slab_alloc.h delete mode 100644 src/common/libalgo/btree.c delete mode 100644 src/common/libalgo/hashtbl.c delete mode 100644 src/common/libalgo/keyval.c create mode 100644 src/common/libkogata/btree.c create mode 100644 src/common/libkogata/hashtbl.c create mode 100644 src/common/libkogata/keyval.c (limited to 'src/common') diff --git a/src/common/bam.lua b/src/common/bam.lua index 8b302fa..6d4a29c 100644 --- a/src/common/bam.lua +++ b/src/common/bam.lua @@ -3,6 +3,5 @@ local function lib(name) return Compile(common_settings, source) end -common_libalgo = lib('libalgo') common_libc = lib('libc') common_libkogata = lib('libkogata') diff --git a/src/common/include/algo.h b/src/common/include/algo.h deleted file mode 100644 index 80af052..0000000 --- a/src/common/include/algo.h +++ /dev/null @@ -1,37 +0,0 @@ -#pragma once - -#include -#include -#include - -#define MIN(a, b) ((a)<(b)?(a):(b)) -#define MAX(a, b) ((a)>(b)?(a):(b)) -#define ABS(a) ((a)<0?-(a):(a)) - -// ============================================================= // -// FUNCTION TYPES FOR KEY-VALUE DATA STRUCTURES (HASHTBL, BTREE) // - -typedef uint32_t hash_t; -typedef hash_t (*hash_fun_t)(const void*); - -typedef int (*key_cmp_fun_t)(const void*, const void*); -typedef bool (*key_eq_fun_t)(const void*, const void*); - -typedef void (*kv_iter_fun_t)(void* key, void* value); - -// void* is considered as an unsigned integer (and not a pointer) -hash_t id_hash_fun(const void* v); -bool id_key_eq_fun(const void* a, const void* b); -int id_key_cmp_fun(const void* a, const void* b); - -// void* considered as char* -hash_t str_hash_fun(const void* v); -bool str_key_eq_fun(const void* a, const void* b); -int str_key_cmp_fun(const void* a, const void* b); - -// Freeing functions (they are of type kv_iter_fun_t) -void free_key(void* key, void* val); -void free_val(void* key, void* val); -void free_key_val(void* key, void* val); - -/* vim: set ts=4 sw=4 tw=0 noet :*/ diff --git a/src/common/include/btree.h b/src/common/include/btree.h deleted file mode 100644 index a645d66..0000000 --- a/src/common/include/btree.h +++ /dev/null @@ -1,33 +0,0 @@ -#pragma once - -#include - -// A btree may contain several bindings for the same key (in that case they are not ordered) -// - btree_find returns any item with matching key, or null if none exists -// - btree_lower returns any item with matching key, or if none returns last item with smaller key -// - btree_upper returns any item with matching key, or if none returns first item with bigger key -// - btree_remove removes *all bindings* with matching key -// - btree_remove_v removes bindings with matching *key and value* -// - btree_iter_on calls iterator function on all bindings with matching key - -// Memory management is same as for hashtbl (a kv_iter_fun_t is called when an item is released) - -struct btree; -typedef struct btree btree_t; - -btree_t* create_btree(key_cmp_fun_t cf, kv_iter_fun_t on_release); -void delete_btree(btree_t *t); - -bool btree_add(btree_t *t, void* key, void* val); -void btree_remove(btree_t *t, const void* key); -void btree_remove_v(btree_t *t, const void* key, const void* value); - -void* btree_find(btree_t *i, const void* key); -void* btree_lower(btree_t *i, const void* key, void** actual_key); -void* btree_upper(btree_t *i, const void* key, void** actual_key); -void btree_iter(btree_t *i, kv_iter_fun_t f); -void btree_iter_on(btree_t *i, const void* key, kv_iter_fun_t f); - -size_t btree_count(btree_t *i); - -/* vim: set ts=4 sw=4 tw=0 noet :*/ diff --git a/src/common/include/debug.h b/src/common/include/debug.h deleted file mode 100644 index 2db5a80..0000000 --- a/src/common/include/debug.h +++ /dev/null @@ -1,18 +0,0 @@ -#pragma once - -#include -#include - -void sys_panic(const char* message, const char* file, int line) -__attribute__((__noreturn__)); - -void sys_panic_assert(const char* assertion, const char* file, int line) -__attribute__((__noreturn__)); - -#define PANIC(s) sys_panic(s, __FILE__, __LINE__); -#define ASSERT(s) { if (!(s)) sys_panic_assert(#s, __FILE__, __LINE__); } - -void dbg_print(const char* str); -void dbg_printf(const char* format, ...); - -/* vim: set ts=4 sw=4 tw=0 noet :*/ diff --git a/src/common/include/hashtbl.h b/src/common/include/hashtbl.h deleted file mode 100644 index b9c7178..0000000 --- a/src/common/include/hashtbl.h +++ /dev/null @@ -1,35 +0,0 @@ -#pragma once - -#include - -// Simple hashtable structure (key -> void*) -// Supports adding, seeking, removing, iterating -// When adding a binding to the table, the previous binding for same key (if exists) is removed - -// The hashtbl is allocated with malloc/free -// The keys/values are not copied by the hastbl, but when a key/value pair is removed from the -// table some operations may be required (freeing memory), so a kv_iter_fun_t is passed when the -// table is created which will be called whenever a k/v is released (on hashtbl_remove and delete_hashtbl) - -// A hashtbl may have only one binding for a given key (on add, previous binding is removed if necessary) - -struct hashtbl; -typedef struct hashtbl hashtbl_t; - -hashtbl_t* create_hashtbl(key_eq_fun_t ef, hash_fun_t hf, kv_iter_fun_t on_release); -void delete_hashtbl(hashtbl_t* ht); - -bool hashtbl_add(hashtbl_t* ht, void* key, void* v); // true = ok, false on error (OOM for instance) -void hashtbl_remove(hashtbl_t* ht, const void* key); - -void* hashtbl_find(hashtbl_t* ht, const void* key); // null when not found -void hashtbl_iter(hashtbl_t* ht, kv_iter_fun_t f); - -// hashtbl_change is particular : -// - it does NOT call malloc and uses the existing hashtbl cell -// - it does NOT call the on_release fun on the previous element -bool hashtbl_change(hashtbl_t* ht, void* key, void* v); - -size_t hashtbl_count(hashtbl_t* ht); - -/* vim: set ts=4 sw=4 tw=0 noet :*/ diff --git a/src/common/include/kogata/algo.h b/src/common/include/kogata/algo.h new file mode 100644 index 0000000..80af052 --- /dev/null +++ b/src/common/include/kogata/algo.h @@ -0,0 +1,37 @@ +#pragma once + +#include +#include +#include + +#define MIN(a, b) ((a)<(b)?(a):(b)) +#define MAX(a, b) ((a)>(b)?(a):(b)) +#define ABS(a) ((a)<0?-(a):(a)) + +// ============================================================= // +// FUNCTION TYPES FOR KEY-VALUE DATA STRUCTURES (HASHTBL, BTREE) // + +typedef uint32_t hash_t; +typedef hash_t (*hash_fun_t)(const void*); + +typedef int (*key_cmp_fun_t)(const void*, const void*); +typedef bool (*key_eq_fun_t)(const void*, const void*); + +typedef void (*kv_iter_fun_t)(void* key, void* value); + +// void* is considered as an unsigned integer (and not a pointer) +hash_t id_hash_fun(const void* v); +bool id_key_eq_fun(const void* a, const void* b); +int id_key_cmp_fun(const void* a, const void* b); + +// void* considered as char* +hash_t str_hash_fun(const void* v); +bool str_key_eq_fun(const void* a, const void* b); +int str_key_cmp_fun(const void* a, const void* b); + +// Freeing functions (they are of type kv_iter_fun_t) +void free_key(void* key, void* val); +void free_val(void* key, void* val); +void free_key_val(void* key, void* val); + +/* vim: set ts=4 sw=4 tw=0 noet :*/ diff --git a/src/common/include/kogata/btree.h b/src/common/include/kogata/btree.h new file mode 100644 index 0000000..e796a2d --- /dev/null +++ b/src/common/include/kogata/btree.h @@ -0,0 +1,33 @@ +#pragma once + +#include + +// A btree may contain several bindings for the same key (in that case they are not ordered) +// - btree_find returns any item with matching key, or null if none exists +// - btree_lower returns any item with matching key, or if none returns last item with smaller key +// - btree_upper returns any item with matching key, or if none returns first item with bigger key +// - btree_remove removes *all bindings* with matching key +// - btree_remove_v removes bindings with matching *key and value* +// - btree_iter_on calls iterator function on all bindings with matching key + +// Memory management is same as for hashtbl (a kv_iter_fun_t is called when an item is released) + +struct btree; +typedef struct btree btree_t; + +btree_t* create_btree(key_cmp_fun_t cf, kv_iter_fun_t on_release); +void delete_btree(btree_t *t); + +bool btree_add(btree_t *t, void* key, void* val); +void btree_remove(btree_t *t, const void* key); +void btree_remove_v(btree_t *t, const void* key, const void* value); + +void* btree_find(btree_t *i, const void* key); +void* btree_lower(btree_t *i, const void* key, void** actual_key); +void* btree_upper(btree_t *i, const void* key, void** actual_key); +void btree_iter(btree_t *i, kv_iter_fun_t f); +void btree_iter_on(btree_t *i, const void* key, kv_iter_fun_t f); + +size_t btree_count(btree_t *i); + +/* vim: set ts=4 sw=4 tw=0 noet :*/ diff --git a/src/common/include/kogata/debug.h b/src/common/include/kogata/debug.h new file mode 100644 index 0000000..2db5a80 --- /dev/null +++ b/src/common/include/kogata/debug.h @@ -0,0 +1,18 @@ +#pragma once + +#include +#include + +void sys_panic(const char* message, const char* file, int line) +__attribute__((__noreturn__)); + +void sys_panic_assert(const char* assertion, const char* file, int line) +__attribute__((__noreturn__)); + +#define PANIC(s) sys_panic(s, __FILE__, __LINE__); +#define ASSERT(s) { if (!(s)) sys_panic_assert(#s, __FILE__, __LINE__); } + +void dbg_print(const char* str); +void dbg_printf(const char* format, ...); + +/* vim: set ts=4 sw=4 tw=0 noet :*/ diff --git a/src/common/include/kogata/hashtbl.h b/src/common/include/kogata/hashtbl.h new file mode 100644 index 0000000..e8f327c --- /dev/null +++ b/src/common/include/kogata/hashtbl.h @@ -0,0 +1,35 @@ +#pragma once + +#include + +// Simple hashtable structure (key -> void*) +// Supports adding, seeking, removing, iterating +// When adding a binding to the table, the previous binding for same key (if exists) is removed + +// The hashtbl is allocated with malloc/free +// The keys/values are not copied by the hastbl, but when a key/value pair is removed from the +// table some operations may be required (freeing memory), so a kv_iter_fun_t is passed when the +// table is created which will be called whenever a k/v is released (on hashtbl_remove and delete_hashtbl) + +// A hashtbl may have only one binding for a given key (on add, previous binding is removed if necessary) + +struct hashtbl; +typedef struct hashtbl hashtbl_t; + +hashtbl_t* create_hashtbl(key_eq_fun_t ef, hash_fun_t hf, kv_iter_fun_t on_release); +void delete_hashtbl(hashtbl_t* ht); + +bool hashtbl_add(hashtbl_t* ht, void* key, void* v); // true = ok, false on error (OOM for instance) +void hashtbl_remove(hashtbl_t* ht, const void* key); + +void* hashtbl_find(hashtbl_t* ht, const void* key); // null when not found +void hashtbl_iter(hashtbl_t* ht, kv_iter_fun_t f); + +// hashtbl_change is particular : +// - it does NOT call malloc and uses the existing hashtbl cell +// - it does NOT call the on_release fun on the previous element +bool hashtbl_change(hashtbl_t* ht, void* key, void* v); + +size_t hashtbl_count(hashtbl_t* ht); + +/* vim: set ts=4 sw=4 tw=0 noet :*/ diff --git a/src/common/include/kogata/malloc.h b/src/common/include/kogata/malloc.h new file mode 100644 index 0000000..e55c25e --- /dev/null +++ b/src/common/include/kogata/malloc.h @@ -0,0 +1,13 @@ +#pragma once + +#include +#include + +// Header is in common/, but implementation is not. + +void* malloc(size_t sz); +void free(void* ptr); +void* calloc(size_t nmemb, size_t sz); +void* realloc(void* ptr, size_t sz); + +/* vim: set ts=4 sw=4 tw=0 noet :*/ diff --git a/src/common/include/kogata/mutex.h b/src/common/include/kogata/mutex.h new file mode 100644 index 0000000..88c077e --- /dev/null +++ b/src/common/include/kogata/mutex.h @@ -0,0 +1,22 @@ +#pragma once + +#include +#include + +#define MUTEX_LOCKED 1 +#define MUTEX_UNLOCKED 0 + + +typedef uint32_t mutex_t; + +void mutex_lock(mutex_t* mutex); //wait for mutex to be free +bool mutex_try_lock(mutex_t* mutex); //lock mutex only if free, returns true when locked, false when was busy +void mutex_unlock(mutex_t* mutex); + +// the mutex code assumes a yield() function is defined somewhere +void yield(); + +#define STATIC_MUTEX(name) static mutex_t name __attribute__((section("locks"))) = MUTEX_UNLOCKED; + + +/* vim: set ts=4 sw=4 tw=0 noet :*/ diff --git a/src/common/include/kogata/printf.h b/src/common/include/kogata/printf.h new file mode 100644 index 0000000..b4e1c1b --- /dev/null +++ b/src/common/include/kogata/printf.h @@ -0,0 +1,10 @@ +#pragma once + +#include +#include +#include + +int snprintf(char* s, size_t n, const char* format, ...); +int vsnprintf(char* s, size_t n, const char* format, va_list arg); + +/* vim: set ts=4 sw=4 tw=0 noet :*/ diff --git a/src/common/include/kogata/region_alloc.h b/src/common/include/kogata/region_alloc.h new file mode 100644 index 0000000..d314bd5 --- /dev/null +++ b/src/common/include/kogata/region_alloc.h @@ -0,0 +1,32 @@ +#pragma once + +// Virtual memory region allocator + +// This is entirely thread-safe + +#include +#include +#include + +struct region_info; + +typedef bool (*map_page_fun_t)(void* addr); // map a single page (used by region allocator) + +typedef struct region_info { + void* addr; + size_t size; + char* type; +} region_info_t; + +// rsvd_end : when used for kernel memory region management, a reserved region +// exists between begin (=K_HIGHHALF_ADDR) and the end of kernel static data +// for user processes, use rsvd_end = begin (no reserved region) +void region_allocator_init(void* begin, void* rsvd_end, void* end, map_page_fun_t map); + +void* region_alloc(size_t size, char* type); // returns 0 on error +region_info_t *find_region(void* addr); +void region_free(void* addr); + +void dbg_print_region_info(); + +/* vim: set ts=4 sw=4 tw=0 noet :*/ diff --git a/src/common/include/kogata/slab_alloc.h b/src/common/include/kogata/slab_alloc.h new file mode 100644 index 0000000..1191057 --- /dev/null +++ b/src/common/include/kogata/slab_alloc.h @@ -0,0 +1,45 @@ +#pragma once + +// Self-contained piece of library : a slab allocator... +// Depends on page_alloc_fun_t and page_free_fun_t : a couple of functions +// that can allocate/free multiples of one page at a time + +#include +#include +#include + + +#if defined(__linux__) +//redefine necessary stuff +#include // standard linux assert.h +#define ASSERT assert +#include +#define dbg_printf printf +#else +#include +#endif + +#define PAGE_SIZE 0x1000 + +// expected format for the array of slab_type_t given to slab_create : +// an array of slab_type descriptors, with last descriptor full of zeroes +// and with obj_size increasing (strictly) in the array +typedef struct slab_type { + const char *descr; + size_t obj_size; + size_t pages_per_cache; +} slab_type_t; + +struct mem_allocator; +typedef struct mem_allocator mem_allocator_t; + +typedef void* (*page_alloc_fun_t)(size_t bytes); +typedef void (*page_free_fun_t)(void* ptr); + +mem_allocator_t* create_slab_allocator(const slab_type_t *types, page_alloc_fun_t af, page_free_fun_t ff); +void destroy_slab_allocator(mem_allocator_t*); + +void* slab_alloc(mem_allocator_t* a, size_t sz); +void slab_free(mem_allocator_t* a, void* ptr); + +/* vim: set ts=4 sw=4 tw=0 noet :*/ diff --git a/src/common/include/malloc.h b/src/common/include/malloc.h deleted file mode 100644 index e55c25e..0000000 --- a/src/common/include/malloc.h +++ /dev/null @@ -1,13 +0,0 @@ -#pragma once - -#include -#include - -// Header is in common/, but implementation is not. - -void* malloc(size_t sz); -void free(void* ptr); -void* calloc(size_t nmemb, size_t sz); -void* realloc(void* ptr, size_t sz); - -/* vim: set ts=4 sw=4 tw=0 noet :*/ diff --git a/src/common/include/mutex.h b/src/common/include/mutex.h deleted file mode 100644 index 88c077e..0000000 --- a/src/common/include/mutex.h +++ /dev/null @@ -1,22 +0,0 @@ -#pragma once - -#include -#include - -#define MUTEX_LOCKED 1 -#define MUTEX_UNLOCKED 0 - - -typedef uint32_t mutex_t; - -void mutex_lock(mutex_t* mutex); //wait for mutex to be free -bool mutex_try_lock(mutex_t* mutex); //lock mutex only if free, returns true when locked, false when was busy -void mutex_unlock(mutex_t* mutex); - -// the mutex code assumes a yield() function is defined somewhere -void yield(); - -#define STATIC_MUTEX(name) static mutex_t name __attribute__((section("locks"))) = MUTEX_UNLOCKED; - - -/* vim: set ts=4 sw=4 tw=0 noet :*/ diff --git a/src/common/include/printf.h b/src/common/include/printf.h deleted file mode 100644 index b4e1c1b..0000000 --- a/src/common/include/printf.h +++ /dev/null @@ -1,10 +0,0 @@ -#pragma once - -#include -#include -#include - -int snprintf(char* s, size_t n, const char* format, ...); -int vsnprintf(char* s, size_t n, const char* format, va_list arg); - -/* vim: set ts=4 sw=4 tw=0 noet :*/ diff --git a/src/common/include/region_alloc.h b/src/common/include/region_alloc.h deleted file mode 100644 index d314bd5..0000000 --- a/src/common/include/region_alloc.h +++ /dev/null @@ -1,32 +0,0 @@ -#pragma once - -// Virtual memory region allocator - -// This is entirely thread-safe - -#include -#include -#include - -struct region_info; - -typedef bool (*map_page_fun_t)(void* addr); // map a single page (used by region allocator) - -typedef struct region_info { - void* addr; - size_t size; - char* type; -} region_info_t; - -// rsvd_end : when used for kernel memory region management, a reserved region -// exists between begin (=K_HIGHHALF_ADDR) and the end of kernel static data -// for user processes, use rsvd_end = begin (no reserved region) -void region_allocator_init(void* begin, void* rsvd_end, void* end, map_page_fun_t map); - -void* region_alloc(size_t size, char* type); // returns 0 on error -region_info_t *find_region(void* addr); -void region_free(void* addr); - -void dbg_print_region_info(); - -/* vim: set ts=4 sw=4 tw=0 noet :*/ diff --git a/src/common/include/slab_alloc.h b/src/common/include/slab_alloc.h deleted file mode 100644 index c8d5d6c..0000000 --- a/src/common/include/slab_alloc.h +++ /dev/null @@ -1,45 +0,0 @@ -#pragma once - -// Self-contained piece of library : a slab allocator... -// Depends on page_alloc_fun_t and page_free_fun_t : a couple of functions -// that can allocate/free multiples of one page at a time - -#include -#include -#include - - -#if defined(__linux__) -//redefine necessary stuff -#include // standard linux assert.h -#define ASSERT assert -#include -#define dbg_printf printf -#else -#include -#endif - -#define PAGE_SIZE 0x1000 - -// expected format for the array of slab_type_t given to slab_create : -// an array of slab_type descriptors, with last descriptor full of zeroes -// and with obj_size increasing (strictly) in the array -typedef struct slab_type { - const char *descr; - size_t obj_size; - size_t pages_per_cache; -} slab_type_t; - -struct mem_allocator; -typedef struct mem_allocator mem_allocator_t; - -typedef void* (*page_alloc_fun_t)(size_t bytes); -typedef void (*page_free_fun_t)(void* ptr); - -mem_allocator_t* create_slab_allocator(const slab_type_t *types, page_alloc_fun_t af, page_free_fun_t ff); -void destroy_slab_allocator(mem_allocator_t*); - -void* slab_alloc(mem_allocator_t* a, size_t sz); -void slab_free(mem_allocator_t* a, void* ptr); - -/* vim: set ts=4 sw=4 tw=0 noet :*/ diff --git a/src/common/libalgo/btree.c b/src/common/libalgo/btree.c deleted file mode 100644 index d1bfd18..0000000 --- a/src/common/libalgo/btree.c +++ /dev/null @@ -1,343 +0,0 @@ -#include -#include - -#include - -typedef struct btree_item { - void *key, *val; - - int height; - struct btree_item *left, *right; -} btree_item_t; - -struct btree { - key_cmp_fun_t cf; - kv_iter_fun_t releasef; - - btree_item_t *root; - int nitems; -}; - -btree_t* create_btree(key_cmp_fun_t cf, kv_iter_fun_t relf) { - btree_t* t = (btree_t*)malloc(sizeof(btree_t)); - - if (t == 0) return 0; - - t->cf = cf; - t->releasef = relf; - - t->root = 0; - t->nitems = 0; - - return t; -} - -void _btree_delete_item_rec(btree_item_t *i,kv_iter_fun_t relf) { - if (i == 0) return; - - _btree_delete_item_rec(i->left, relf); - _btree_delete_item_rec(i->right, relf); - - if (relf) relf(i->key, i->val); - free(i); -} -void delete_btree(btree_t *t) { - _btree_delete_item_rec(t->root, t->releasef); - - free(t); -} - -size_t btree_count(btree_t *i) { - return i->nitems; -} - -// ============== // -// TREE ROTATIONS // -// ============== // - -static inline int height(btree_item_t *i) { - if (i == 0) return 0; - return i->height; -} - -void btree_recalc_height(btree_item_t *i) { - if (i == 0) return; - - i->height = MAX(height(i->left), height(i->right)) + 1; -} - -btree_item_t* btree_rotate_left(btree_item_t *i) { - // I(X(a, b), Y) -> a X b I Y -> X(a, I(b, Y)) - - btree_item_t *x = i->left; - if (x == 0) return i; - btree_item_t *b = x->right; - - x->right = i; - i->left = b; - - btree_recalc_height(i); - btree_recalc_height(x); - - return x; -} - -btree_item_t* btree_rotate_right(btree_item_t *i) { - // I(X, Y(a,b)) -> X I a Y b -> Y(I(X, a), b) - - btree_item_t *y = i->right; - if (y == 0) return i; - btree_item_t *a = y->left; - - y->left = i; - i->right = a; - - btree_recalc_height(i); - btree_recalc_height(y); - - return y; -} - -btree_item_t* btree_equilibrate(btree_item_t *i) { - if (i == 0) return 0; - - while (height(i->left) - height(i->right) >= 2) - i = btree_rotate_left(i); - - while (height(i->right) - height(i->left) >= 2) - i = btree_rotate_right(i); - - return i; -} - -// =================== // -// ADDING AND DELETING // -// =================== // - -btree_item_t* _btree_insert(btree_t *t, btree_item_t *root, btree_item_t *i) { - if (root == 0) return i; - - if (t->cf(i->key, root->key) <= 0) { - root->left = _btree_insert(t, root->left, i); - } else { - root->right = _btree_insert(t, root->right, i); - } - btree_recalc_height(root); - - return btree_equilibrate(root); -} -bool btree_add(btree_t *t, void* key, void* val) { - btree_item_t *x = (btree_item_t*)malloc(sizeof(btree_item_t)); - if (x == 0) return false; - - x->key = key; - x->val = val; - x->left = x->right = 0; - btree_recalc_height(x); - - t->root = _btree_insert(t, t->root, x); - t->nitems++; - - return true; -} - -btree_item_t *_btree_extract_smallest(btree_item_t *i, btree_item_t **out_smallest) { - ASSERT(i != 0); - if (i->left == 0) { - *out_smallest = i; - return i->right; - } else { - i->left = _btree_extract_smallest(i->left, out_smallest); - btree_recalc_height(i); - return btree_equilibrate(i); - } -} -btree_item_t *_btree_remove_aux(btree_t *t, btree_item_t *i, const void* key) { - if (i == 0) return 0; - - int c = t->cf(key, i->key); - if (c < 0) { - i->left = _btree_remove_aux(t, i->left, key); - return btree_equilibrate(i); - } else if (c > 0) { - i->right = _btree_remove_aux(t, i->right, key); - return btree_equilibrate(i); - } else { - // remove item i - - btree_item_t *new_i; - if (i->right == 0 || i->left == 0) { - new_i = (i->right == 0 ? i->left : i->right); - } else { - btree_item_t *new_i_right = _btree_extract_smallest(i->right, &new_i); - - new_i->left = i->left; - new_i->right = new_i_right; - - btree_recalc_height(new_i); - new_i = btree_equilibrate(new_i); - } - - if (t->releasef) t->releasef(i->key, i->val); - free(i); - t->nitems--; - - return _btree_remove_aux(t, new_i, key); // loop because several elements may correspond - } -} -void btree_remove(btree_t *t, const void* key) { - t->root = _btree_remove_aux(t, t->root, key); -} - -btree_item_t *_btree_extract_smallest_v(btree_item_t *i, btree_item_t **out_smallest) { - ASSERT(i != 0); - if (i->left == 0) { - *out_smallest = i; - return i->right; - } else { - i->left = _btree_extract_smallest_v(i->left, out_smallest); - btree_recalc_height(i); - return btree_equilibrate(i); - } -} -btree_item_t *_btree_remove_aux_v(btree_t *t, btree_item_t *i, const void* key, const void* val) { - if (i == 0) return 0; - - int c = t->cf(key, i->key); - if (c < 0) { - i->left = _btree_remove_aux_v(t, i->left, key, val); - return btree_equilibrate(i); - } else if (c > 0) { - i->right = _btree_remove_aux_v(t, i->right, key, val); - return btree_equilibrate(i); - } else if (i->val == val) { - // remove item i - - btree_item_t *new_i; - if (i->right == 0 || i->left == 0) { - new_i = (i->right == 0 ? i->left : i->right); - } else { - btree_item_t *new_i_right = _btree_extract_smallest_v(i->right, &new_i); - - new_i->left = i->left; - new_i->right = new_i_right; - - btree_recalc_height(new_i); - new_i = btree_equilibrate(new_i); - } - - if (t->releasef) t->releasef(i->key, i->val); - free(i); - t->nitems--; - - return _btree_remove_aux_v(t, new_i, key, val); // loop because several elements may correspond - } else { - i->left = _btree_remove_aux_v(t, i->left, key, val); - i->right = _btree_remove_aux_v(t, i->right, key, val); - btree_recalc_height(i); - return btree_equilibrate(i); - } -} -void btree_remove_v(btree_t *t, const void* key, const void* val) { - t->root = _btree_remove_aux_v(t, t->root, key, val); -} - -// ======================== // -// LOOKING UP AND ITERATING // -// ======================== // - -btree_item_t *_btree_find_aux(btree_t *t, btree_item_t *i, const void* key) { - if (i == 0) return 0; - - int c = t->cf(key, i->key); - if (c == 0) { - return i; - } else if (c < 0) { - return _btree_find_aux(t, i->left, key); - } else { - return _btree_find_aux(t, i->right, key); - } -} -void* btree_find(btree_t *t, const void* key) { - - btree_item_t *i = _btree_find_aux(t, t->root, key); - - if (i == 0) return 0; - return i->val; -} - -btree_item_t *_btree_lower_aux(btree_t *t, btree_item_t *i, const void* key) { - if (i == 0) return 0; - - int c = t->cf(key, i->key); - if (c == 0) { - return i; - } else if (c < 0) { - return _btree_lower_aux(t, i->left, key); - } else { - btree_item_t *r = _btree_lower_aux(t, i->right, key); - if (r == 0) r = i; - return r; - } -} -void* btree_lower(btree_t *t, const void* key, void** actual_key) { - btree_item_t *i = _btree_lower_aux(t, t->root, key); - - if (i == 0) return 0; - if (actual_key != 0) *actual_key = i->key; - return i->val; -} - -btree_item_t *_btree_upper_aux(btree_t *t, btree_item_t *i, const void* key) { - if (i == 0) return 0; - - int c = t->cf(key, i->key); - if (c == 0) { - return i; - } else if (c < 0) { - btree_item_t *r = _btree_upper_aux(t, i->left, key); - if (r == 0) r = i; - return r; - } else { - return _btree_upper_aux(t, i->right, key); - } -} -void* btree_upper(btree_t *t, const void* key, void** actual_key) { - btree_item_t *i = _btree_upper_aux(t, t->root, key); - - if (i == 0) return 0; - if (actual_key != 0) *actual_key = i->key; - return i->val; -} - - -void _btree_iter_aux(btree_item_t *i, kv_iter_fun_t f) { - if (i == 0) return; - - _btree_iter_aux(i->left, f); - f(i->key, i->val); - _btree_iter_aux(i->right, f); -} -void btree_iter(btree_t *t, kv_iter_fun_t f) { - _btree_iter_aux(t->root, f); -} - -void _btree_iter_on_aux(btree_t *t, btree_item_t *i, const void* key, kv_iter_fun_t f) { - if (i == 0) return; - - int c = t->cf(key, i->key); - if (c == 0) { - _btree_iter_on_aux(t, i->left, key, f); - f(i->key, i->val); - _btree_iter_on_aux(t, i->right, key, f); - } else if (c < 0) { - _btree_iter_on_aux(t, i->left, key, f); - } else { - _btree_iter_on_aux(t, i->right, key, f); - } -} -void btree_iter_on(btree_t *t, const void* key, kv_iter_fun_t f) { - _btree_iter_on_aux(t, t->root, key, f); -} - -/* vim: set ts=4 sw=4 tw=0 noet :*/ diff --git a/src/common/libalgo/hashtbl.c b/src/common/libalgo/hashtbl.c deleted file mode 100644 index 0284d75..0000000 --- a/src/common/libalgo/hashtbl.c +++ /dev/null @@ -1,177 +0,0 @@ -#include -#include - -#include - -#define DEFAULT_HT_INIT_SIZE 16 -#define SLOT_OF_HASH(k, nslots) (((size_t)(k)*21179)%(size_t)(nslots)) - -typedef struct hashtbl_item { - void* key; - void* val; - struct hashtbl_item *next; -} hashtbl_item_t; - -// When nitems > size * 3/4, size is multiplied by two -// When nitems < size * 1/4, size is divided by two -struct hashtbl { - key_eq_fun_t ef; - hash_fun_t hf; - kv_iter_fun_t releasef; - size_t size, nitems; - hashtbl_item_t **items; -}; - -hashtbl_t *create_hashtbl(key_eq_fun_t ef, hash_fun_t hf, kv_iter_fun_t on_release) { - hashtbl_t *ht = (hashtbl_t*)malloc(sizeof(hashtbl_t)); - if (ht == 0) return 0; - - ht->ef = ef; - ht->hf = hf; - ht->releasef = on_release; - - ht->size = DEFAULT_HT_INIT_SIZE; - ht->nitems = 0; - - ht->items = (hashtbl_item_t**)malloc(ht->size * sizeof(hashtbl_item_t*)); - if (ht->items == 0) { - free(ht); - return 0; - } - - for (size_t i = 0; i < ht->size; i++) ht->items[i] = 0; - - return ht; -} - -void delete_hashtbl(hashtbl_t *ht) { - // Free items - for (size_t i = 0; i < ht->size; i++) { - while (ht->items[i] != 0) { - hashtbl_item_t *x = ht->items[i]; - ht->items[i] = x->next; - - if (ht->releasef) ht->releasef(x->key, x->val); - - free(x); - } - } - - // Free table - free(ht->items); - free(ht); -} - -void hashtbl_check_size(hashtbl_t *ht) { - size_t nsize = 0; - if (4 * ht->nitems < ht->size) nsize = ht->size / 2; - if (4 * ht->nitems > 3 * ht->size) nsize = ht->size * 2; - - if (nsize != 0 && nsize >= DEFAULT_HT_INIT_SIZE) { - hashtbl_item_t **nitems = (hashtbl_item_t**)malloc(nsize * sizeof(hashtbl_item_t*)); - if (nitems == 0) return; // if we can't realloc, too bad, we just lose space/efficienty - - for (size_t i = 0; i < nsize; i++) nitems[i] = 0; - - // rehash - for (size_t i = 0; i < ht->size; i++) { - while (ht->items[i] != 0) { - hashtbl_item_t *x = ht->items[i]; - ht->items[i] = x->next; - - size_t slot = SLOT_OF_HASH(ht->hf(x->key), nsize); - x->next = nitems[slot]; - nitems[slot] = x; - } - } - free(ht->items); - ht->size = nsize; - ht->items = nitems; - } -} - -bool hashtbl_add(hashtbl_t *ht, void* key, void* v) { - hashtbl_item_t *i = (hashtbl_item_t*)malloc(sizeof(hashtbl_item_t)); - if (i == 0) return false; // OOM - - // make sure item is not already present - hashtbl_remove(ht, key); - - size_t slot = SLOT_OF_HASH(ht->hf(key), ht->size); - - i->key = key; - i->val = v; - i->next = ht->items[slot]; - ht->items[slot] = i; - ht->nitems++; - - hashtbl_check_size(ht); - - return true; -} - -void hashtbl_remove(hashtbl_t* ht, const void* key) { - size_t slot = SLOT_OF_HASH(ht->hf(key), ht->size); - - if (ht->items[slot] == 0) return; - - hashtbl_item_t *x = 0; - - if (ht->ef(ht->items[slot]->key, key)) { - x = ht->items[slot]; - ht->items[slot] = x->next; - } else { - for (hashtbl_item_t *i = ht->items[slot]; i->next != 0; i = i->next) { - if (ht->ef(i->next->key, key)) { - x = i->next; - i->next = x->next; - break; - } - } - } - - if (x != 0) { - ht->nitems--; - if (ht->releasef) ht->releasef(x->key, x->val); - free(x); - } - - hashtbl_check_size(ht); -} - -void* hashtbl_find(hashtbl_t* ht, const void* key) { - size_t slot = SLOT_OF_HASH(ht->hf(key), ht->size); - - for (hashtbl_item_t *i = ht->items[slot]; i != 0; i = i->next) { - if (ht->ef(i->key, key)) return i->val; - } - - return 0; -} - -void hashtbl_iter(hashtbl_t *ht, kv_iter_fun_t f) { - for (size_t s = 0; s < ht->size; s++) { - for (hashtbl_item_t *i = ht->items[s]; i != 0; i = i->next) { - f(i->key, i->val); - } - } -} - -size_t hashtbl_count(hashtbl_t* ht) { - return ht->nitems; -} - -bool hashtbl_change(hashtbl_t* ht, void* key, void* newval) { - size_t slot = SLOT_OF_HASH(ht->hf(key), ht->size); - - for (hashtbl_item_t *i = ht->items[slot]; i != 0; i = i->next) { - if (ht->ef(i->key, key)) { - i->val = newval; - return true; - } - } - - return false; -} - -/* vim: set ts=4 sw=4 tw=0 noet :*/ diff --git a/src/common/libalgo/keyval.c b/src/common/libalgo/keyval.c deleted file mode 100644 index 528fa90..0000000 --- a/src/common/libalgo/keyval.c +++ /dev/null @@ -1,51 +0,0 @@ -#include -#include - -#include - -// Hashing and comparing - -hash_t id_hash_fun(const void* v) { - return (hash_t)v; -} - -hash_t str_hash_fun(const void* v) { - hash_t h = 712; - for (char* s = (char*)v; *s != 0; s++) { - h = h * 101 + *s; - } - return h; -} - -bool id_key_eq_fun(const void* a, const void* b) { - return a == b; -} - -bool str_key_eq_fun(const void* a, const void* b) { - return strcmp((const char*)a, (const char*)b) == 0; -} - -int id_key_cmp_fun(const void* a, const void* b) { - return (a == b ? 0 : (a < b ? -1 : 1)); -} - -int str_key_cmp_fun(const void* a, const void* b) { - return strcmp((const char*)a, (const char*)b); -} - -// Freeing functions - -void free_key(void* key, void* val) { - free(key); -} - -void free_val(void* key, void* val) { - free(val); -} - -void free_key_val(void* key, void* val) { - free(key); - free(val); -} - -/* vim: set ts=4 sw=4 tw=0 noet :*/ diff --git a/src/common/libc/printf.c b/src/common/libc/printf.c index 8618741..d1671c3 100644 --- a/src/common/libc/printf.c +++ b/src/common/libc/printf.c @@ -1,6 +1,7 @@ -#include #include +#include + int snprintf(char * buff, size_t len, const char *format, ...) { va_list ap; diff --git a/src/common/libc/string.c b/src/common/libc/string.c index 90cea34..e1ed21e 100644 --- a/src/common/libc/string.c +++ b/src/common/libc/string.c @@ -1,6 +1,6 @@ #include -#include +#include size_t strlen(const char* str) { size_t ret = 0; diff --git a/src/common/libkogata/btree.c b/src/common/libkogata/btree.c new file mode 100644 index 0000000..dc5f11f --- /dev/null +++ b/src/common/libkogata/btree.c @@ -0,0 +1,343 @@ +#include +#include + +#include + +typedef struct btree_item { + void *key, *val; + + int height; + struct btree_item *left, *right; +} btree_item_t; + +struct btree { + key_cmp_fun_t cf; + kv_iter_fun_t releasef; + + btree_item_t *root; + int nitems; +}; + +btree_t* create_btree(key_cmp_fun_t cf, kv_iter_fun_t relf) { + btree_t* t = (btree_t*)malloc(sizeof(btree_t)); + + if (t == 0) return 0; + + t->cf = cf; + t->releasef = relf; + + t->root = 0; + t->nitems = 0; + + return t; +} + +void _btree_delete_item_rec(btree_item_t *i,kv_iter_fun_t relf) { + if (i == 0) return; + + _btree_delete_item_rec(i->left, relf); + _btree_delete_item_rec(i->right, relf); + + if (relf) relf(i->key, i->val); + free(i); +} +void delete_btree(btree_t *t) { + _btree_delete_item_rec(t->root, t->releasef); + + free(t); +} + +size_t btree_count(btree_t *i) { + return i->nitems; +} + +// ============== // +// TREE ROTATIONS // +// ============== // + +static inline int height(btree_item_t *i) { + if (i == 0) return 0; + return i->height; +} + +void btree_recalc_height(btree_item_t *i) { + if (i == 0) return; + + i->height = MAX(height(i->left), height(i->right)) + 1; +} + +btree_item_t* btree_rotate_left(btree_item_t *i) { + // I(X(a, b), Y) -> a X b I Y -> X(a, I(b, Y)) + + btree_item_t *x = i->left; + if (x == 0) return i; + btree_item_t *b = x->right; + + x->right = i; + i->left = b; + + btree_recalc_height(i); + btree_recalc_height(x); + + return x; +} + +btree_item_t* btree_rotate_right(btree_item_t *i) { + // I(X, Y(a,b)) -> X I a Y b -> Y(I(X, a), b) + + btree_item_t *y = i->right; + if (y == 0) return i; + btree_item_t *a = y->left; + + y->left = i; + i->right = a; + + btree_recalc_height(i); + btree_recalc_height(y); + + return y; +} + +btree_item_t* btree_equilibrate(btree_item_t *i) { + if (i == 0) return 0; + + while (height(i->left) - height(i->right) >= 2) + i = btree_rotate_left(i); + + while (height(i->right) - height(i->left) >= 2) + i = btree_rotate_right(i); + + return i; +} + +// =================== // +// ADDING AND DELETING // +// =================== // + +btree_item_t* _btree_insert(btree_t *t, btree_item_t *root, btree_item_t *i) { + if (root == 0) return i; + + if (t->cf(i->key, root->key) <= 0) { + root->left = _btree_insert(t, root->left, i); + } else { + root->right = _btree_insert(t, root->right, i); + } + btree_recalc_height(root); + + return btree_equilibrate(root); +} +bool btree_add(btree_t *t, void* key, void* val) { + btree_item_t *x = (btree_item_t*)malloc(sizeof(btree_item_t)); + if (x == 0) return false; + + x->key = key; + x->val = val; + x->left = x->right = 0; + btree_recalc_height(x); + + t->root = _btree_insert(t, t->root, x); + t->nitems++; + + return true; +} + +btree_item_t *_btree_extract_smallest(btree_item_t *i, btree_item_t **out_smallest) { + ASSERT(i != 0); + if (i->left == 0) { + *out_smallest = i; + return i->right; + } else { + i->left = _btree_extract_smallest(i->left, out_smallest); + btree_recalc_height(i); + return btree_equilibrate(i); + } +} +btree_item_t *_btree_remove_aux(btree_t *t, btree_item_t *i, const void* key) { + if (i == 0) return 0; + + int c = t->cf(key, i->key); + if (c < 0) { + i->left = _btree_remove_aux(t, i->left, key); + return btree_equilibrate(i); + } else if (c > 0) { + i->right = _btree_remove_aux(t, i->right, key); + return btree_equilibrate(i); + } else { + // remove item i + + btree_item_t *new_i; + if (i->right == 0 || i->left == 0) { + new_i = (i->right == 0 ? i->left : i->right); + } else { + btree_item_t *new_i_right = _btree_extract_smallest(i->right, &new_i); + + new_i->left = i->left; + new_i->right = new_i_right; + + btree_recalc_height(new_i); + new_i = btree_equilibrate(new_i); + } + + if (t->releasef) t->releasef(i->key, i->val); + free(i); + t->nitems--; + + return _btree_remove_aux(t, new_i, key); // loop because several elements may correspond + } +} +void btree_remove(btree_t *t, const void* key) { + t->root = _btree_remove_aux(t, t->root, key); +} + +btree_item_t *_btree_extract_smallest_v(btree_item_t *i, btree_item_t **out_smallest) { + ASSERT(i != 0); + if (i->left == 0) { + *out_smallest = i; + return i->right; + } else { + i->left = _btree_extract_smallest_v(i->left, out_smallest); + btree_recalc_height(i); + return btree_equilibrate(i); + } +} +btree_item_t *_btree_remove_aux_v(btree_t *t, btree_item_t *i, const void* key, const void* val) { + if (i == 0) return 0; + + int c = t->cf(key, i->key); + if (c < 0) { + i->left = _btree_remove_aux_v(t, i->left, key, val); + return btree_equilibrate(i); + } else if (c > 0) { + i->right = _btree_remove_aux_v(t, i->right, key, val); + return btree_equilibrate(i); + } else if (i->val == val) { + // remove item i + + btree_item_t *new_i; + if (i->right == 0 || i->left == 0) { + new_i = (i->right == 0 ? i->left : i->right); + } else { + btree_item_t *new_i_right = _btree_extract_smallest_v(i->right, &new_i); + + new_i->left = i->left; + new_i->right = new_i_right; + + btree_recalc_height(new_i); + new_i = btree_equilibrate(new_i); + } + + if (t->releasef) t->releasef(i->key, i->val); + free(i); + t->nitems--; + + return _btree_remove_aux_v(t, new_i, key, val); // loop because several elements may correspond + } else { + i->left = _btree_remove_aux_v(t, i->left, key, val); + i->right = _btree_remove_aux_v(t, i->right, key, val); + btree_recalc_height(i); + return btree_equilibrate(i); + } +} +void btree_remove_v(btree_t *t, const void* key, const void* val) { + t->root = _btree_remove_aux_v(t, t->root, key, val); +} + +// ======================== // +// LOOKING UP AND ITERATING // +// ======================== // + +btree_item_t *_btree_find_aux(btree_t *t, btree_item_t *i, const void* key) { + if (i == 0) return 0; + + int c = t->cf(key, i->key); + if (c == 0) { + return i; + } else if (c < 0) { + return _btree_find_aux(t, i->left, key); + } else { + return _btree_find_aux(t, i->right, key); + } +} +void* btree_find(btree_t *t, const void* key) { + + btree_item_t *i = _btree_find_aux(t, t->root, key); + + if (i == 0) return 0; + return i->val; +} + +btree_item_t *_btree_lower_aux(btree_t *t, btree_item_t *i, const void* key) { + if (i == 0) return 0; + + int c = t->cf(key, i->key); + if (c == 0) { + return i; + } else if (c < 0) { + return _btree_lower_aux(t, i->left, key); + } else { + btree_item_t *r = _btree_lower_aux(t, i->right, key); + if (r == 0) r = i; + return r; + } +} +void* btree_lower(btree_t *t, const void* key, void** actual_key) { + btree_item_t *i = _btree_lower_aux(t, t->root, key); + + if (i == 0) return 0; + if (actual_key != 0) *actual_key = i->key; + return i->val; +} + +btree_item_t *_btree_upper_aux(btree_t *t, btree_item_t *i, const void* key) { + if (i == 0) return 0; + + int c = t->cf(key, i->key); + if (c == 0) { + return i; + } else if (c < 0) { + btree_item_t *r = _btree_upper_aux(t, i->left, key); + if (r == 0) r = i; + return r; + } else { + return _btree_upper_aux(t, i->right, key); + } +} +void* btree_upper(btree_t *t, const void* key, void** actual_key) { + btree_item_t *i = _btree_upper_aux(t, t->root, key); + + if (i == 0) return 0; + if (actual_key != 0) *actual_key = i->key; + return i->val; +} + + +void _btree_iter_aux(btree_item_t *i, kv_iter_fun_t f) { + if (i == 0) return; + + _btree_iter_aux(i->left, f); + f(i->key, i->val); + _btree_iter_aux(i->right, f); +} +void btree_iter(btree_t *t, kv_iter_fun_t f) { + _btree_iter_aux(t->root, f); +} + +void _btree_iter_on_aux(btree_t *t, btree_item_t *i, const void* key, kv_iter_fun_t f) { + if (i == 0) return; + + int c = t->cf(key, i->key); + if (c == 0) { + _btree_iter_on_aux(t, i->left, key, f); + f(i->key, i->val); + _btree_iter_on_aux(t, i->right, key, f); + } else if (c < 0) { + _btree_iter_on_aux(t, i->left, key, f); + } else { + _btree_iter_on_aux(t, i->right, key, f); + } +} +void btree_iter_on(btree_t *t, const void* key, kv_iter_fun_t f) { + _btree_iter_on_aux(t, t->root, key, f); +} + +/* vim: set ts=4 sw=4 tw=0 noet :*/ diff --git a/src/common/libkogata/hashtbl.c b/src/common/libkogata/hashtbl.c new file mode 100644 index 0000000..b0097cd --- /dev/null +++ b/src/common/libkogata/hashtbl.c @@ -0,0 +1,177 @@ +#include +#include + +#include + +#define DEFAULT_HT_INIT_SIZE 16 +#define SLOT_OF_HASH(k, nslots) (((size_t)(k)*21179)%(size_t)(nslots)) + +typedef struct hashtbl_item { + void* key; + void* val; + struct hashtbl_item *next; +} hashtbl_item_t; + +// When nitems > size * 3/4, size is multiplied by two +// When nitems < size * 1/4, size is divided by two +struct hashtbl { + key_eq_fun_t ef; + hash_fun_t hf; + kv_iter_fun_t releasef; + size_t size, nitems; + hashtbl_item_t **items; +}; + +hashtbl_t *create_hashtbl(key_eq_fun_t ef, hash_fun_t hf, kv_iter_fun_t on_release) { + hashtbl_t *ht = (hashtbl_t*)malloc(sizeof(hashtbl_t)); + if (ht == 0) return 0; + + ht->ef = ef; + ht->hf = hf; + ht->releasef = on_release; + + ht->size = DEFAULT_HT_INIT_SIZE; + ht->nitems = 0; + + ht->items = (hashtbl_item_t**)malloc(ht->size * sizeof(hashtbl_item_t*)); + if (ht->items == 0) { + free(ht); + return 0; + } + + for (size_t i = 0; i < ht->size; i++) ht->items[i] = 0; + + return ht; +} + +void delete_hashtbl(hashtbl_t *ht) { + // Free items + for (size_t i = 0; i < ht->size; i++) { + while (ht->items[i] != 0) { + hashtbl_item_t *x = ht->items[i]; + ht->items[i] = x->next; + + if (ht->releasef) ht->releasef(x->key, x->val); + + free(x); + } + } + + // Free table + free(ht->items); + free(ht); +} + +void hashtbl_check_size(hashtbl_t *ht) { + size_t nsize = 0; + if (4 * ht->nitems < ht->size) nsize = ht->size / 2; + if (4 * ht->nitems > 3 * ht->size) nsize = ht->size * 2; + + if (nsize != 0 && nsize >= DEFAULT_HT_INIT_SIZE) { + hashtbl_item_t **nitems = (hashtbl_item_t**)malloc(nsize * sizeof(hashtbl_item_t*)); + if (nitems == 0) return; // if we can't realloc, too bad, we just lose space/efficienty + + for (size_t i = 0; i < nsize; i++) nitems[i] = 0; + + // rehash + for (size_t i = 0; i < ht->size; i++) { + while (ht->items[i] != 0) { + hashtbl_item_t *x = ht->items[i]; + ht->items[i] = x->next; + + size_t slot = SLOT_OF_HASH(ht->hf(x->key), nsize); + x->next = nitems[slot]; + nitems[slot] = x; + } + } + free(ht->items); + ht->size = nsize; + ht->items = nitems; + } +} + +bool hashtbl_add(hashtbl_t *ht, void* key, void* v) { + hashtbl_item_t *i = (hashtbl_item_t*)malloc(sizeof(hashtbl_item_t)); + if (i == 0) return false; // OOM + + // make sure item is not already present + hashtbl_remove(ht, key); + + size_t slot = SLOT_OF_HASH(ht->hf(key), ht->size); + + i->key = key; + i->val = v; + i->next = ht->items[slot]; + ht->items[slot] = i; + ht->nitems++; + + hashtbl_check_size(ht); + + return true; +} + +void hashtbl_remove(hashtbl_t* ht, const void* key) { + size_t slot = SLOT_OF_HASH(ht->hf(key), ht->size); + + if (ht->items[slot] == 0) return; + + hashtbl_item_t *x = 0; + + if (ht->ef(ht->items[slot]->key, key)) { + x = ht->items[slot]; + ht->items[slot] = x->next; + } else { + for (hashtbl_item_t *i = ht->items[slot]; i->next != 0; i = i->next) { + if (ht->ef(i->next->key, key)) { + x = i->next; + i->next = x->next; + break; + } + } + } + + if (x != 0) { + ht->nitems--; + if (ht->releasef) ht->releasef(x->key, x->val); + free(x); + } + + hashtbl_check_size(ht); +} + +void* hashtbl_find(hashtbl_t* ht, const void* key) { + size_t slot = SLOT_OF_HASH(ht->hf(key), ht->size); + + for (hashtbl_item_t *i = ht->items[slot]; i != 0; i = i->next) { + if (ht->ef(i->key, key)) return i->val; + } + + return 0; +} + +void hashtbl_iter(hashtbl_t *ht, kv_iter_fun_t f) { + for (size_t s = 0; s < ht->size; s++) { + for (hashtbl_item_t *i = ht->items[s]; i != 0; i = i->next) { + f(i->key, i->val); + } + } +} + +size_t hashtbl_count(hashtbl_t* ht) { + return ht->nitems; +} + +bool hashtbl_change(hashtbl_t* ht, void* key, void* newval) { + size_t slot = SLOT_OF_HASH(ht->hf(key), ht->size); + + for (hashtbl_item_t *i = ht->items[slot]; i != 0; i = i->next) { + if (ht->ef(i->key, key)) { + i->val = newval; + return true; + } + } + + return false; +} + +/* vim: set ts=4 sw=4 tw=0 noet :*/ diff --git a/src/common/libkogata/keyval.c b/src/common/libkogata/keyval.c new file mode 100644 index 0000000..9541c72 --- /dev/null +++ b/src/common/libkogata/keyval.c @@ -0,0 +1,51 @@ +#include + +#include +#include + +// Hashing and comparing + +hash_t id_hash_fun(const void* v) { + return (hash_t)v; +} + +hash_t str_hash_fun(const void* v) { + hash_t h = 712; + for (char* s = (char*)v; *s != 0; s++) { + h = h * 101 + *s; + } + return h; +} + +bool id_key_eq_fun(const void* a, const void* b) { + return a == b; +} + +bool str_key_eq_fun(const void* a, const void* b) { + return strcmp((const char*)a, (const char*)b) == 0; +} + +int id_key_cmp_fun(const void* a, const void* b) { + return (a == b ? 0 : (a < b ? -1 : 1)); +} + +int str_key_cmp_fun(const void* a, const void* b) { + return strcmp((const char*)a, (const char*)b); +} + +// Freeing functions + +void free_key(void* key, void* val) { + free(key); +} + +void free_val(void* key, void* val) { + free(val); +} + +void free_key_val(void* key, void* val) { + free(key); + free(val); +} + +/* vim: set ts=4 sw=4 tw=0 noet :*/ diff --git a/src/common/libkogata/mutex.c b/src/common/libkogata/mutex.c index b345ee5..e521330 100644 --- a/src/common/libkogata/mutex.c +++ b/src/common/libkogata/mutex.c @@ -1,4 +1,4 @@ -#include +#include /* Internal use only. This function is atomic, meaning it cannot be interrupted by a system task switch. */ static uint32_t atomic_exchange(uint32_t* ptr, uint32_t newval) { diff --git a/src/common/libkogata/region_alloc.c b/src/common/libkogata/region_alloc.c index 1f2fe0f..85c437a 100644 --- a/src/common/libkogata/region_alloc.c +++ b/src/common/libkogata/region_alloc.c @@ -1,6 +1,6 @@ -#include -#include -#include +#include +#include +#include // we cannot include sys... diff --git a/src/common/libkogata/slab_alloc.c b/src/common/libkogata/slab_alloc.c index 0736655..6362207 100644 --- a/src/common/libkogata/slab_alloc.c +++ b/src/common/libkogata/slab_alloc.c @@ -1,4 +1,4 @@ -#include +#include typedef struct object { struct object *next; -- cgit v1.2.3