aboutsummaryrefslogtreecommitdiff
path: root/src/lib/libkogata
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib/libkogata')
-rw-r--r--src/lib/libkogata/Makefile2
-rw-r--r--src/lib/libkogata/debug.c8
-rw-r--r--src/lib/libkogata/malloc.c50
-rw-r--r--src/lib/libkogata/start.c8
-rw-r--r--src/lib/libkogata/syscall.c36
-rw-r--r--src/lib/libkogata/user_region.c353
6 files changed, 446 insertions, 11 deletions
diff --git a/src/lib/libkogata/Makefile b/src/lib/libkogata/Makefile
index 2649e25..ee00fa3 100644
--- a/src/lib/libkogata/Makefile
+++ b/src/lib/libkogata/Makefile
@@ -1,4 +1,4 @@
-OBJ = start.o malloc.o debug.o syscall.o
+OBJ = start.o malloc.o debug.o syscall.o user_region.o
LIB = ../../common/libkogata/libkogata.lib
diff --git a/src/lib/libkogata/debug.c b/src/lib/libkogata/debug.c
index 9688877..a8b9414 100644
--- a/src/lib/libkogata/debug.c
+++ b/src/lib/libkogata/debug.c
@@ -2,13 +2,17 @@
#include <debug.h>
+#include <syscall.h>
+
void panic(const char* msg, const char* file, int line) {
- // TODO
+ dbg_printf("Panic '%s', at %s:%d\n", msg, file, line);
+ exit(-1);
while(true);
}
void panic_assert(const char* assert, const char* file, int line) {
- // TODO
+ dbg_printf("Assert failed '%s', at %s:%d\n", assert, file, line);
+ exit(-1);
while(true);
}
diff --git a/src/lib/libkogata/malloc.c b/src/lib/libkogata/malloc.c
index 272dd7d..56a5397 100644
--- a/src/lib/libkogata/malloc.c
+++ b/src/lib/libkogata/malloc.c
@@ -1,12 +1,56 @@
#include <malloc.h>
+#include <slab_alloc.h>
+
+#include <syscall.h>
+#include <user_region.h>
+
+static void* heap_alloc_pages(size_t s) {
+ void* addr = region_alloc(s, "Heap");
+ if (addr == 0) return 0;
+
+ bool map_ok = mmap(addr, s, FM_READ | FM_WRITE);
+ if (!map_ok) {
+ region_free(addr);
+ return 0;
+ }
+
+ return addr;
+}
+
+static void heap_free_pages(void* addr) {
+ munmap(addr);
+ region_free(addr);
+}
+
+static mem_allocator_t *mem_allocator;
+static slab_type_t slab_sizes[] = {
+ { "8B malloc objects", 8, 2 },
+ { "16B malloc objects", 16, 2 },
+ { "32B malloc objects", 32, 2 },
+ { "64B malloc objects", 64, 4 },
+ { "128B malloc objects", 128, 4 },
+ { "256B malloc objects", 256, 4 },
+ { "512B malloc objects", 512, 8 },
+ { "1KB malloc objects", 1024, 8 },
+ { "2KB malloc objects", 2048, 16 },
+ { "4KB malloc objects", 4096, 16 },
+ { 0, 0, 0 }
+};
+
+void malloc_setup() {
+ region_allocator_init((void*)0x40000000, (void*)0xB0000000);
+
+ mem_allocator = create_slab_allocator(slab_sizes, heap_alloc_pages, heap_free_pages);
+
+ ASSERT(mem_allocator != 0);
+}
void* malloc(size_t size) {
- // TODO
- return 0;
+ return slab_alloc(mem_allocator, size);
}
void free(void* ptr) {
- // TODO
+ slab_free(mem_allocator, ptr);
}
/* vim: set ts=4 sw=4 tw=0 noet :*/
diff --git a/src/lib/libkogata/start.c b/src/lib/libkogata/start.c
index 984a3cd..bd22d7a 100644
--- a/src/lib/libkogata/start.c
+++ b/src/lib/libkogata/start.c
@@ -1,9 +1,13 @@
#include <syscall.h>
-extern int main(int, char**);
+void malloc_setup();
+
+int main(int, char**);
void __libkogata_start() {
- // TODO : setup
+ malloc_setup();
+
+ // TODO : more setup ?
int ret = main(0, 0);
diff --git a/src/lib/libkogata/syscall.c b/src/lib/libkogata/syscall.c
index b9cefa7..8d24628 100644
--- a/src/lib/libkogata/syscall.c
+++ b/src/lib/libkogata/syscall.c
@@ -2,17 +2,47 @@
#include <syscall.h>
#include <string.h>
+#include <printf.h>
+
+static uint32_t call(uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t ss, uint32_t dd) {
+ uint32_t ret;
+ asm volatile("int $0x40"
+ :"=a"(ret)
+ :"a"(a),"b"(b),"c"(c),"d"(d),"S"(ss),"D"(dd));
+ return ret;
+}
void dbg_print(const char* str) {
- asm volatile("int $0x40"::"a"(SC_DBG_PRINT),"b"(str),"c"(strlen(str)));
+ call(SC_DBG_PRINT, (uint32_t)str, strlen(str), 0, 0, 0);
+}
+
+void dbg_printf(const char* fmt, ...) {
+ va_list ap;
+ char buffer[256];
+
+ va_start(ap, fmt);
+ vsnprintf(buffer, 256, fmt, ap);
+ va_end(ap);
+
+ dbg_print(buffer);
}
void yield() {
- asm volatile("int $0x40"::"a"(SC_YIELD));
+ call(SC_YIELD, 0, 0, 0, 0, 0);
}
void exit(int code) {
- asm volatile("int $0x40"::"a"(SC_EXIT),"b"(code));
+ call(SC_EXIT, code, 0, 0, 0, 0);
+}
+
+bool mmap(void* addr, size_t size, int mode) {
+ return call(SC_MMAP, (uint32_t)addr, size, mode, 0, 0);
+}
+bool mchmap(void* addr, int mode) {
+ return call(SC_MCHMAP, (uint32_t)addr, mode, 0, 0, 0);
+}
+bool munmap(void* addr) {
+ return call(SC_MUNMAP, (uint32_t)addr, 0, 0, 0, 0);
}
/* vim: set ts=4 sw=4 tw=0 noet :*/
diff --git a/src/lib/libkogata/user_region.c b/src/lib/libkogata/user_region.c
new file mode 100644
index 0000000..516259b
--- /dev/null
+++ b/src/lib/libkogata/user_region.c
@@ -0,0 +1,353 @@
+#include <user_region.h>
+#include <debug.h>
+#include <mutex.h>
+
+#include <syscall.h>
+
+typedef union region_descriptor {
+ struct {
+ union region_descriptor *next;
+ } unused_descriptor;
+ struct {
+ void* addr;
+ size_t size;
+ union region_descriptor *next_by_size, *first_bigger;
+ union region_descriptor *next_by_addr;
+ } free;
+ struct {
+ region_info_t i;
+ union region_descriptor *next_by_addr;
+ } used;
+} descriptor_t;
+
+#define N_RESERVE_DESCRIPTORS 2 // always keep at least 2 unused descriptors
+
+#define N_BASE_DESCRIPTORS 12 // pre-allocate memory for 12 descriptors
+static descriptor_t base_descriptors[N_BASE_DESCRIPTORS];
+
+static descriptor_t *first_unused_descriptor;
+uint32_t n_unused_descriptors;
+static descriptor_t *first_free_region_by_addr, *first_free_region_by_size;
+static descriptor_t *first_used_region;
+
+STATIC_MUTEX(ra_mutex); // region allocator mutex
+
+// ========================================================= //
+// HELPER FUNCTIONS FOR THE MANIPULATION OF THE REGION LISTS //
+// ========================================================= //
+
+static void add_unused_descriptor(descriptor_t *d) {
+ n_unused_descriptors++;
+ d->unused_descriptor.next = first_unused_descriptor;
+ first_unused_descriptor = d;
+}
+
+static descriptor_t *get_unused_descriptor() {
+ descriptor_t *r = first_unused_descriptor;
+ if (r != 0) {
+ first_unused_descriptor = r->unused_descriptor.next;
+ n_unused_descriptors--;
+ }
+ return r;
+}
+
+static void remove_free_region(descriptor_t *d) {
+ if (first_free_region_by_size == d) {
+ first_free_region_by_size = d->free.next_by_size;
+ } else {
+ for (descriptor_t *i = first_free_region_by_size; i != 0; i = i->free.next_by_size) {
+ if (i->free.next_by_size == d) {
+ i->free.next_by_size = d->free.next_by_size;
+ break;
+ }
+ }
+ }
+ if (first_free_region_by_addr == d) {
+ first_free_region_by_addr = d->free.next_by_addr;
+ } else {
+ for (descriptor_t *i = first_free_region_by_addr; i != 0; i = i->free.next_by_addr) {
+ if (i->free.next_by_addr == d) {
+ i->free.next_by_addr = d->free.next_by_addr;
+ break;
+ }
+ }
+ }
+}
+
+static void add_free_region(descriptor_t *d) {
+ /*dbg_printf("Add free region 0x%p - 0x%p\n", d->free.addr, d->free.size + d->free.addr);*/
+ // Find position of region in address-ordered list
+ // Possibly concatenate free region
+ descriptor_t *i = first_free_region_by_addr;
+ if (i == 0) {
+ ASSERT(first_free_region_by_size == 0);
+ first_free_region_by_addr = first_free_region_by_size = d;
+ d->free.next_by_size = d->free.first_bigger = d->free.next_by_addr = 0;
+ return;
+ } else if (d->free.addr + d->free.size == i->free.addr) {
+ // concatenate d . i
+ remove_free_region(i);
+ d->free.size += i->free.size;
+ add_unused_descriptor(i);
+ add_free_region(d);
+ return;
+ } else if (i->free.addr > d->free.addr) {
+ // insert before i
+ d->free.next_by_addr = i;
+ first_free_region_by_addr = d;
+ } else {
+ while (i != 0) {
+ ASSERT(d->free.addr > i->free.addr);
+ if (i->free.addr + i->free.size == d->free.addr) {
+ // concatenate i . d
+ remove_free_region(i);
+ i->free.size += d->free.size;
+ add_unused_descriptor(d);
+ add_free_region(i);
+ return;
+ } else if (i->free.next_by_addr == 0 || i->free.next_by_addr->free.addr > d->free.addr) {
+ d->free.next_by_addr = i->free.next_by_addr;
+ i->free.next_by_addr = d;
+ break;
+ } else if (d->free.addr + d->free.size == i->free.next_by_addr->free.addr) {
+ // concatenate d . i->next_by_addr
+ descriptor_t *j = i->free.next_by_addr;
+ remove_free_region(j);
+ d->free.size += j->free.size;
+ add_unused_descriptor(j);
+ add_free_region(d);
+ return;
+ } else {
+ // continue
+ i = i->free.next_by_addr;
+ }
+ }
+ }
+ // Now add it in size-ordered list
+ i = first_free_region_by_size;
+ ASSERT(i != 0);
+ if (d->free.size <= i->free.size) {
+ d->free.next_by_size = i;
+ d->free.first_bigger = (i->free.size > d->free.size ? i : i->free.first_bigger);
+ first_free_region_by_size = d;
+ } else {
+ while (i != 0) {
+ ASSERT(d->free.size > i->free.size);
+ if (i->free.next_by_size == 0) {
+ d->free.next_by_size = 0;
+ d->free.first_bigger = 0;
+ i->free.next_by_size = d;
+ if (d->free.size > i->free.size) i->free.first_bigger = d;
+ break;
+ } else if (i->free.next_by_size->free.size >= d->free.size) {
+ d->free.next_by_size = i->free.next_by_size;
+ d->free.first_bigger =
+ (i->free.next_by_size->free.size > d->free.size
+ ? i->free.next_by_size
+ : i->free.next_by_size->free.first_bigger);
+ i->free.next_by_size = d;
+ if (d->free.size > i->free.size) i->free.first_bigger = d;
+ break;
+ } else {
+ // continue
+ i = i->free.next_by_size;
+ }
+ }
+ }
+}
+
+static descriptor_t *find_used_region(void* addr) {
+ for (descriptor_t *i = first_used_region; i != 0; i = i->used.next_by_addr) {
+ if (addr >= i->used.i.addr && addr < i->used.i.addr + i->used.i.size) return i;
+ if (i->used.i.addr > addr) break;
+ }
+ return 0;
+}
+
+static void add_used_region(descriptor_t *d) {
+ if (first_used_region == 0 || d->used.i.addr < first_used_region->used.i.addr) {
+ d->used.next_by_addr = first_used_region;
+ first_used_region = d;
+ } else {
+ descriptor_t *i = first_used_region;
+ ASSERT(i->used.i.addr < d->used.i.addr); // first region by address is never free
+
+ while (i != 0) {
+ ASSERT(i->used.i.addr < d->used.i.addr);
+ if (i->used.next_by_addr == 0 || i->used.next_by_addr->used.i.addr > d->used.i.addr) {
+ d->used.next_by_addr = i->used.next_by_addr;
+ i->used.next_by_addr = d;
+ return;
+ } else {
+ i = i->used.next_by_addr;
+ }
+ }
+ ASSERT(false);
+ }
+}
+
+static void remove_used_region(descriptor_t *d) {
+ if (first_used_region == d) {
+ first_used_region = d->used.next_by_addr;
+ } else {
+ for (descriptor_t *i = first_used_region; i != 0; i = i->used.next_by_addr) {
+ if (i->used.i.addr > d->used.i.addr) break;
+ if (i->used.next_by_addr == d) {
+ i->used.next_by_addr = d->used.next_by_addr;
+ break;
+ }
+ }
+ }
+}
+
+// =============== //
+// THE ACTUAL CODE //
+// =============== //
+
+void region_allocator_init(void* begin, void* end) {
+ n_unused_descriptors = 0;
+ first_unused_descriptor = 0;
+ for (int i = 0; i < N_BASE_DESCRIPTORS; i++) {
+ add_unused_descriptor(&base_descriptors[i]);
+ }
+
+ descriptor_t *f0 = get_unused_descriptor();
+ f0->free.addr = (void*)PAGE_ALIGN_UP(begin);
+ f0->free.size = ((void*)PAGE_ALIGN_DOWN(end) - f0->free.addr);
+ f0->free.next_by_size = 0;
+ f0->free.first_bigger = 0;
+ first_free_region_by_size = first_free_region_by_addr = f0;
+
+ first_used_region = 0;
+}
+
+static void region_free_inner(void* addr) {
+ descriptor_t *d = find_used_region(addr);
+ if (d == 0) return;
+
+ region_info_t i = d->used.i;
+
+ remove_used_region(d);
+ d->free.addr = i.addr;
+ d->free.size = i.size;
+ add_free_region(d);
+}
+void region_free(void* addr) {
+ mutex_lock(&ra_mutex);
+ region_free_inner(addr);
+ mutex_unlock(&ra_mutex);
+}
+
+static void* region_alloc_inner(size_t size, char* type, bool use_reserve) {
+ size = PAGE_ALIGN_UP(size);
+
+ for (descriptor_t *i = first_free_region_by_size; i != 0; i = i->free.first_bigger) {
+ if (i->free.size >= size) {
+ // region i is the one we want to allocate in
+ descriptor_t *x = 0;
+ if (i->free.size > size) {
+ if (n_unused_descriptors <= N_RESERVE_DESCRIPTORS && !use_reserve) {
+ return 0;
+ }
+
+ // this assert basically means that the allocation function
+ // is called less than N_RESERVE_DESCRIPTORS times with
+ // the use_reserve flag before more descriptors
+ // are allocated.
+ x = get_unused_descriptor();
+ ASSERT(x != 0);
+
+ x->free.size = i->free.size - size;
+ if (size >= 0x4000) {
+ x->free.addr = i->free.addr + size;
+ } else {
+ x->free.addr = i->free.addr;
+ i->free.addr += x->free.size;
+ }
+ }
+ // do the allocation
+ remove_free_region(i);
+ if (x != 0) add_free_region(x);
+
+ void* addr = i->free.addr;
+ i->used.i.addr = addr;
+ i->used.i.size = size;
+ i->used.i.type = type;
+ add_used_region(i);
+
+ return addr;
+ }
+ }
+ return 0; //No big enough block found
+}
+
+void* region_alloc(size_t size, char* type) {
+ void* result = 0;
+ mutex_lock(&ra_mutex);
+
+ if (n_unused_descriptors <= N_RESERVE_DESCRIPTORS) {
+ void* descriptor_region = region_alloc_inner(PAGE_SIZE, "Region descriptors", true);
+ ASSERT(descriptor_region != 0);
+
+ bool map_ok = mmap(descriptor_region, PAGE_SIZE, MM_READ | MM_WRITE);
+ if (!map_ok) {
+ // this can happen if we weren't able to mmap the region (for whatever reason)
+ region_free_inner(descriptor_region);
+ goto try_anyway;
+ }
+
+ for (descriptor_t *d = (descriptor_t*)descriptor_region;
+ (void*)(d+1) <= (descriptor_region + PAGE_SIZE);
+ d++) {
+ add_unused_descriptor(d);
+ }
+ }
+ try_anyway:
+ // even if we don't have enough unused descriptors, we might find
+ // a free region that has exactly the right size and therefore
+ // does not require splitting, so we try the allocation in all cases
+ result = region_alloc_inner(size, type, false);
+
+ mutex_unlock(&ra_mutex);
+ return result;
+}
+
+region_info_t *find_region(void* addr) {
+ region_info_t *r = 0;
+ mutex_lock(&ra_mutex);
+
+ descriptor_t *d = find_used_region(addr);
+ if (d != 0) r = &d->used.i;
+
+ mutex_unlock(&ra_mutex);
+ return r;
+}
+
+// =========================== //
+// DEBUG LOG PRINTING FUNCTION //
+// =========================== //
+
+void dbg_print_region_info() {
+ mutex_lock(&ra_mutex);
+
+ dbg_printf("/ Free process regions, by address:\n");
+ for (descriptor_t *d = first_free_region_by_addr; d != 0; d = d->free.next_by_addr) {
+ dbg_printf("| 0x%p - 0x%p\n", d->free.addr, d->free.addr + d->free.size);
+ ASSERT(d != d->free.next_by_addr);
+ }
+ dbg_printf("- Free process regions, by size:\n");
+ for (descriptor_t *d = first_free_region_by_size; d != 0; d = d->free.next_by_size) {
+ dbg_printf("| 0x%p - 0x%p\n", d->free.addr, d->free.addr + d->free.size);
+ ASSERT(d != d->free.next_by_size);
+ }
+ dbg_printf("- Used process regions:\n");
+ for (descriptor_t *d = first_used_region; d != 0; d = d->used.next_by_addr) {
+ dbg_printf("| 0x%p - 0x%p %s\n", d->used.i.addr, d->used.i.addr + d->used.i.size, d->used.i.type);
+ ASSERT(d != d->used.next_by_addr);
+ }
+ dbg_printf("\\\n");
+
+ mutex_unlock(&ra_mutex);
+}
+
+/* vim: set ts=4 sw=4 tw=0 noet :*/