aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlex Auvolat <alex@adnab.me>2015-03-11 17:59:28 +0100
committerAlex Auvolat <alex@adnab.me>2015-03-11 17:59:28 +0100
commit0b76aff59b586d87ee0449bc7deda878f4633515 (patch)
tree8fd773681e302d84bc1f33c2a2bdf791f0b0df95
parent64b9108a58d3483e9b63511c4cf74b12dceeb0f6 (diff)
downloadkogata-0b76aff59b586d87ee0449bc7deda878f4633515.tar.gz
kogata-0b76aff59b586d87ee0449bc7deda878f4633515.zip
Add better stack tracing technology (now uses kernel memory map!)
-rwxr-xr-xmake_cdrom.sh2
-rw-r--r--src/common/libkogata/slab_alloc.c92
-rw-r--r--src/kernel/core/idt.c16
-rw-r--r--src/kernel/core/kmain.c6
-rw-r--r--src/kernel/core/kmalloc.c4
-rw-r--r--src/kernel/core/paging.c2
-rw-r--r--src/kernel/core/region.c18
-rw-r--r--src/kernel/core/sys.c58
-rw-r--r--src/kernel/core/thread.c31
-rw-r--r--src/kernel/dev/pckbd.c3
-rw-r--r--src/kernel/include/dev/v86.h4
-rw-r--r--src/kernel/include/sys.h3
-rw-r--r--src/kernel/user/ipc.c20
-rw-r--r--src/kernel/user/process.c4
-rw-r--r--src/lib/libkogata/draw.c2
-rw-r--r--src/rules.make4
16 files changed, 173 insertions, 96 deletions
diff --git a/make_cdrom.sh b/make_cdrom.sh
index 0f1b09f..e15e9bc 100755
--- a/make_cdrom.sh
+++ b/make_cdrom.sh
@@ -9,6 +9,7 @@ fi
# Copy system files to CDROM
cp src/kernel/kernel.bin cdrom/boot; strip cdrom/boot/kernel.bin
+cp src/kernel/kernel.map cdrom/boot
cp src/sysbin/init/init.bin cdrom/boot; strip cdrom/boot/init.bin
mkdir -p cdrom/sys/bin
@@ -34,6 +35,7 @@ default 0
title kogata OS
kernel /boot/kernel.bin root=io:/disk/atapi0 root_opts=l init=root:/boot/init.bin config=default
+module /boot/kernel.map
EOF
# Generate CDROm image
diff --git a/src/common/libkogata/slab_alloc.c b/src/common/libkogata/slab_alloc.c
index 714c49f..6d0b9d6 100644
--- a/src/common/libkogata/slab_alloc.c
+++ b/src/common/libkogata/slab_alloc.c
@@ -45,12 +45,12 @@ struct mem_allocator {
// Helper functions for the manipulation of lists //
// ============================================== //
-static void add_free_descriptor(mem_allocator_t *a, descriptor_t *c) {
+void add_free_descriptor(mem_allocator_t *a, descriptor_t *c) {
c->next_free = a->first_free_descriptor;
a->first_free_descriptor = c;
}
-static descriptor_t *take_descriptor(mem_allocator_t *a) {
+descriptor_t *take_descriptor(mem_allocator_t *a) {
if (a->first_free_descriptor == 0) {
void* p = a->alloc_fun(PAGE_SIZE);
if (p == 0) return 0;
@@ -119,7 +119,7 @@ mem_allocator_t* create_slab_allocator(const slab_type_t *types, page_alloc_fun_
return a;
}
-static void stack_and_destroy_regions(page_free_fun_t ff, region_t *r) {
+void stack_and_destroy_regions(page_free_fun_t ff, region_t *r) {
if (r == 0) return;
void* addr = r->region_addr;
ASSERT(r != r->next_region);
@@ -151,53 +151,57 @@ void destroy_slab_allocator(mem_allocator_t *a) {
void* slab_alloc(mem_allocator_t* a, size_t sz) {
for (int i = 0; a->types[i].obj_size != 0; i++) {
const size_t obj_size = a->types[i].obj_size;
- if (sz <= obj_size) {
- // find a cache with free space
- cache_t *fc = a->slabs[i].first_cache;
- while (fc != 0 && fc->n_free_objs == 0) {
- ASSERT(fc->first_free_obj == 0); // make sure n_free == 0 iff no object in the free stack
- fc = fc->next_cache;
- }
- // if none found, try to allocate a new one
- if (fc == 0) {
- descriptor_t *fcd = take_descriptor(a);
- if (fcd == 0) return 0;
-
- fc = &fcd->c;
- ASSERT((descriptor_t*)fc == fcd);
-
- const size_t cache_size = a->types[i].pages_per_cache * PAGE_SIZE;
- fc->region_addr = a->alloc_fun(cache_size);
- if (fc->region_addr == 0) {
- add_free_descriptor(a, fcd);
- return 0;
- }
- fc->n_free_objs = 0;
- fc->first_free_obj = 0;
- for (void* p = fc->region_addr; p + obj_size <= fc->region_addr + cache_size; p += obj_size) {
- object_t *x = (object_t*)p;
- x->next = fc->first_free_obj;
- fc->first_free_obj = x;
- fc->n_free_objs++;
- }
- ASSERT(fc->n_free_objs == cache_size / obj_size);
+ if (sz > obj_size) continue;
- fc->next_cache = a->slabs[i].first_cache;
- a->slabs[i].first_cache = fc;
+ // find a cache with free space
+ cache_t *fc = a->slabs[i].first_cache;
+ while (fc != 0 && fc->n_free_objs == 0) {
+ // make sure n_free == 0 iff no object in the free stack
+ ASSERT((fc->first_free_obj == 0) == (fc->n_free_objs == 0));
+ fc = fc->next_cache;
+ }
+ // if none found, try to allocate a new one
+ if (fc == 0) {
+ descriptor_t *fcd = take_descriptor(a);
+ if (fcd == 0) return 0;
+
+ fc = &fcd->c;
+ ASSERT((descriptor_t*)fc == fcd);
+
+ const size_t cache_size = a->types[i].pages_per_cache * PAGE_SIZE;
+ fc->region_addr = a->alloc_fun(cache_size);
+ if (fc->region_addr == 0) {
+ add_free_descriptor(a, fcd);
+ return 0;
}
- // allocate on fc
- ASSERT(fc != 0 && fc->n_free_objs > 0);
-
- object_t *x = fc->first_free_obj;
- fc->first_free_obj = x->next;
- fc->n_free_objs--;
- ASSERT((fc->n_free_objs == 0) == (fc->first_free_obj == 0));
+ fc->n_free_objs = 0;
+ fc->first_free_obj = 0;
+ for (void* p = fc->region_addr; p + obj_size <= fc->region_addr + cache_size; p += obj_size) {
+ object_t *x = (object_t*)p;
+ x->next = fc->first_free_obj;
+ fc->first_free_obj = x;
+ fc->n_free_objs++;
+ }
+ ASSERT(fc->n_free_objs == cache_size / obj_size);
- // TODO : if fc is full, put it at the end
- return x;
+ fc->next_cache = a->slabs[i].first_cache;
+ a->slabs[i].first_cache = fc;
}
+ // allocate on fc
+ ASSERT(fc != 0);
+ ASSERT(fc->n_free_objs > 0);
+ ASSERT(fc->first_free_obj != 0);
+
+ object_t *x = fc->first_free_obj;
+ fc->first_free_obj = x->next;
+ fc->n_free_objs--;
+
+ ASSERT((fc->n_free_objs == 0) == (fc->first_free_obj == 0));
+
+ // TODO : if fc is full, put it at the end
+ return x;
}
// otherwise directly allocate using a->alloc_fun
diff --git a/src/kernel/core/idt.c b/src/kernel/core/idt.c
index d963b4d..457e5ae 100644
--- a/src/kernel/core/idt.c
+++ b/src/kernel/core/idt.c
@@ -91,7 +91,8 @@ static isr_handler_t ex_handlers[32] = {0};
/* Called in interrupt.s when an exception fires (interrupt 0 to 31) */
void idt_ex_handler(registers_t *regs) {
- dbg_printf("ex%d.", regs->int_no);
+ /*dbg_printf("ex%d.", regs->int_no);*/
+
if (ex_handlers[regs->int_no] != 0) {
ex_handlers[regs->int_no](regs);
} else {
@@ -115,7 +116,7 @@ void idt_ex_handler(registers_t *regs) {
void idt_irq_handler(registers_t *regs) {
int st = enter_critical(CL_EXCL); // if someone tries to yield(), an assert will fail
- dbg_printf("irq%d.", regs->err_code);
+ if (regs->err_code != 0) dbg_printf("irq%d.", regs->err_code);
if (regs->err_code > 7) {
outb(0xA0, 0x20);
@@ -278,15 +279,8 @@ void dbg_dump_registers(registers_t *regs) {
dbg_printf("| EIP: 0x%p CS : 0x%p DS : 0x%p SS : 0x%p\n", regs->eip, regs->cs, regs->ds, regs->ss);
dbg_printf("| EFl: 0x%p I# : 0x%p Err: 0x%p\n", regs->eflags, regs->int_no, regs->err_code);
dbg_printf("- Stack trace:\n");
-
- uint32_t ebp = regs->ebp, eip = regs->eip;
- int i = 0;
- while (ebp >= K_HIGHHALF_ADDR && eip >= K_HIGHHALF_ADDR && i++ < 10) {
- dbg_printf("| 0x%p EIP: 0x%p\n", ebp, eip);
- uint32_t *d = (uint32_t*)ebp;
- ebp = d[0];
- eip = d[1];
- }
+
+ kernel_stacktrace(regs->ebp, regs->eip);
dbg_printf("\\\n");
}
diff --git a/src/kernel/core/kmain.c b/src/kernel/core/kmain.c
index 005f449..3ee2238 100644
--- a/src/kernel/core/kmain.c
+++ b/src/kernel/core/kmain.c
@@ -248,6 +248,12 @@ fs_t *setup_iofs(multiboot_info_t *mbd) {
ASSERT(nullfs_add_ram_file(iofs, name,
(char*)mods[i].mod_start,
len, FM_READ | FM_MMAP));
+
+ if (strncmp(modname, "kernel.map", 10) == 0) {
+ // remark: load_kernel_symbol_map modifies the data region,
+ // which is not a problem because nullfs_add_ram_file copied the thing already
+ load_kernel_symbol_map((char*)mods[i].mod_start, len);
+ }
}
return iofs;
diff --git a/src/kernel/core/kmalloc.c b/src/kernel/core/kmalloc.c
index cc20487..53ac83c 100644
--- a/src/kernel/core/kmalloc.c
+++ b/src/kernel/core/kmalloc.c
@@ -8,7 +8,7 @@
#include <region.h>
#include <freemem.h>
-static void* page_alloc_fun_for_kmalloc(size_t bytes) {
+void* page_alloc_fun_for_kmalloc(size_t bytes) {
void* addr = region_alloc(bytes, "Core kernel heap");
if (addr == 0) return 0;
@@ -59,7 +59,7 @@ void kmalloc_setup() {
region_free_unmap_free);
}
-static void* malloc0(size_t sz) {
+void* malloc0(size_t sz) {
void* res = 0;
mutex_lock(&malloc_mutex);
diff --git a/src/kernel/core/paging.c b/src/kernel/core/paging.c
index cefb8c3..fa22879 100644
--- a/src/kernel/core/paging.c
+++ b/src/kernel/core/paging.c
@@ -68,7 +68,6 @@ void page_fault_handler(registers_t *regs) {
if ((size_t)vaddr < PAGE_SIZE) {
dbg_printf("Null pointer dereference in kernel code (0x%p)\n", vaddr);
dbg_dump_registers(regs);
- dbg_print_region_info();
PANIC("Null pointer dereference in kernel code.");
} else if ((size_t)vaddr < K_HIGHHALF_ADDR) {
if (pd->user_pfh == 0) {
@@ -110,7 +109,6 @@ void page_fault_handler(registers_t *regs) {
dbg_printf("Kernel pagefault in region with no handler at 0x%p (%s region)\n", vaddr, region);
dbg_dump_registers(regs);
- dbg_print_region_info();
PANIC("Unhandled kernel space page fault");
}
}
diff --git a/src/kernel/core/region.c b/src/kernel/core/region.c
index 1f1958f..c4c34d1 100644
--- a/src/kernel/core/region.c
+++ b/src/kernel/core/region.c
@@ -35,13 +35,13 @@ STATIC_MUTEX(ra_mutex); // region allocator mutex
// HELPER FUNCTIONS FOR THE MANIPULATION OF THE REGION LISTS //
// ========================================================= //
-static void add_unused_descriptor(descriptor_t *d) {
+void add_unused_descriptor(descriptor_t *d) {
n_unused_descriptors++;
d->unused_descriptor.next = first_unused_descriptor;
first_unused_descriptor = d;
}
-static descriptor_t *get_unused_descriptor() {
+descriptor_t *get_unused_descriptor() {
descriptor_t *r = first_unused_descriptor;
if (r != 0) {
first_unused_descriptor = r->unused_descriptor.next;
@@ -50,7 +50,7 @@ static descriptor_t *get_unused_descriptor() {
return r;
}
-static void remove_free_region(descriptor_t *d) {
+void remove_free_region(descriptor_t *d) {
if (first_free_region_by_size == d) {
first_free_region_by_size = d->free.next_by_size;
} else {
@@ -73,7 +73,7 @@ static void remove_free_region(descriptor_t *d) {
}
}
-static void add_free_region(descriptor_t *d) {
+void add_free_region(descriptor_t *d) {
/*dbg_printf("Add free region 0x%p - 0x%p\n", d->free.addr, d->free.size + d->free.addr);*/
// Find position of region in address-ordered list
// Possibly concatenate free region
@@ -155,7 +155,7 @@ static void add_free_region(descriptor_t *d) {
}
}
-static descriptor_t *find_used_region(void* addr) {
+descriptor_t *find_used_region(void* addr) {
for (descriptor_t *i = first_used_region; i != 0; i = i->used.next_by_addr) {
if (addr >= i->used.i.addr && addr < i->used.i.addr + i->used.i.size) return i;
if (i->used.i.addr > addr) break;
@@ -163,7 +163,7 @@ static descriptor_t *find_used_region(void* addr) {
return 0;
}
-static void add_used_region(descriptor_t *d) {
+void add_used_region(descriptor_t *d) {
descriptor_t *i = first_used_region;
ASSERT(i->used.i.addr < d->used.i.addr); // first region by address is never free
@@ -180,7 +180,7 @@ static void add_used_region(descriptor_t *d) {
ASSERT(false);
}
-static void remove_used_region(descriptor_t *d) {
+void remove_used_region(descriptor_t *d) {
if (first_used_region == d) {
first_used_region = d->used.next_by_addr;
} else {
@@ -220,7 +220,7 @@ void region_allocator_init(void* kernel_data_end) {
first_used_region = u0;
}
-static void region_free_inner(void* addr) {
+void region_free_inner(void* addr) {
descriptor_t *d = find_used_region(addr);
if (d == 0) return;
@@ -237,7 +237,7 @@ void region_free(void* addr) {
mutex_unlock(&ra_mutex);
}
-static void* region_alloc_inner(size_t size, char* type, bool use_reserve) {
+void* region_alloc_inner(size_t size, char* type, bool use_reserve) {
size = PAGE_ALIGN_UP(size);
for (descriptor_t *i = first_free_region_by_size; i != 0; i = i->free.first_bigger) {
diff --git a/src/kernel/core/sys.c b/src/kernel/core/sys.c
index 8be6c7f..554c95e 100644
--- a/src/kernel/core/sys.c
+++ b/src/kernel/core/sys.c
@@ -1,6 +1,9 @@
#include <sys.h>
#include <dbglog.h>
#include <thread.h>
+#include <string.h>
+
+#include <btree.h>
// Kernel panic and kernel assert failure
@@ -9,6 +12,15 @@ static void panic_do(const char* type, const char *msg, const char* file, int li
asm volatile("cli;");
dbg_printf("/\n| %s:\t%s\n", type, msg);
dbg_printf("| File: \t%s:%i\n", file, line);
+
+ dbg_printf("- trace\n");
+ dbg_printf("| current thread: 0x%p\n", current_thread);
+ uint32_t *ebp;
+ asm volatile("mov %%ebp, %0":"=r"(ebp));
+ kernel_stacktrace(ebp[0], ebp[1]);
+
+ dbg_print_region_info();
+
dbg_printf("| System halted -_-'\n");
dbg_printf("\\---------------------------------------------------------/");
BOCHS_BREAKPOINT;
@@ -26,4 +38,50 @@ void panic_assert(const char* assertion, const char* file, int line) {
panic_do("ASSERT FAILED", assertion, file, line);
}
+// ---- kernel symbol map
+
+btree_t *kernel_symbol_map = 0;
+
+void load_kernel_symbol_map(char* text, size_t len) {
+ kernel_symbol_map = create_btree(id_key_cmp_fun, 0);
+ ASSERT (kernel_symbol_map != 0);
+
+ dbg_printf("Loading kernel symbol map...\n");
+
+ char* it = text;
+ while (it < text + len) {
+ char* eol = it;
+ while (eol < text + len && *eol != 0 && *eol != '\n') eol++;
+ if (eol >= text + len) break;
+ *eol = 0;
+
+ if (it[16] == '0' && it[17] == 'x' && it[34] == ' ' && it[49] == ' ') {
+ uint32_t addr = 0;
+ for (unsigned i = 18; i < 34; i++) {
+ addr *= 16;
+ if (it[i] >= '0' && it[i] <= '9') addr += it[i] - '0';
+ if (it[i] >= 'a' && it[i] <= 'f') addr += it[i] - 'a' + 10;
+ }
+ btree_add(kernel_symbol_map, (void*)addr, it + 50);
+ }
+
+ it = eol + 1;
+ }
+}
+
+void kernel_stacktrace(uint32_t ebp, uint32_t eip) {
+ int i = 0;
+ while (ebp >= K_HIGHHALF_ADDR && eip >= K_HIGHHALF_ADDR && i++ < 10) {
+ char* sym = 0;
+ if (kernel_symbol_map != 0) sym = btree_lower(kernel_symbol_map, (void*)eip);
+
+ dbg_printf("| 0x%p EIP: 0x%p %s\n", ebp, eip, sym);
+
+ uint32_t *d = (uint32_t*)ebp;
+ ebp = d[0];
+ eip = d[1];
+ }
+}
+
+
/* vim: set ts=4 sw=4 tw=0 noet :*/
diff --git a/src/kernel/core/thread.c b/src/kernel/core/thread.c
index 19ad6c8..1009ec7 100644
--- a/src/kernel/core/thread.c
+++ b/src/kernel/core/thread.c
@@ -120,7 +120,7 @@ void run_scheduler() {
// At this point, interrupts are disabled
// This function is expected NEVER TO RETURN
- thread_t *prev_thread = current_thread;
+ /*thread_t *prev_thread = current_thread;*/
if (current_thread != 0 && current_thread->state == T_STATE_RUNNING) {
current_thread->last_ran = get_kernel_time();
@@ -129,7 +129,7 @@ void run_scheduler() {
}
current_thread = dequeue_thread();
- if (current_thread != prev_thread) dbg_printf("[0x%p]\n", current_thread);
+ /*if (current_thread != prev_thread) dbg_printf("[0x%p]\n", current_thread);*/
if (current_thread != 0) {
thread_t *ptr = current_thread;
@@ -146,7 +146,7 @@ void run_scheduler() {
}
}
-static void run_thread(void (*entry)(void*), void* data) {
+void run_thread(void (*entry)(void*), void* data) {
ASSERT(current_thread->state == T_STATE_RUNNING);
dbg_printf("Begin thread 0x%p (in process %d)\n",
@@ -210,6 +210,10 @@ thread_t *new_thread(entry_t entry, void* data) {
t->next_in_proc = 0;
t->user_ex_handler = 0;
+ t->waiting_on = 0;
+ t->n_waiting_on = 0;
+ t->next_waiter = 0;
+
return t;
}
@@ -285,11 +289,11 @@ bool wait_on_many(void** x, size_t n) {
// ---- Set ourselves as the waiting thread for all the requested objets
- dbg_printf("Wait on many:");
- for (size_t i = 0; i < n; i++) {
- dbg_printf(" 0x%p", x[i]);
- }
- dbg_printf("\n");
+ /*dbg_printf("Wait on many:");*/
+ /*for (size_t i = 0; i < n; i++) {*/
+ /*dbg_printf(" 0x%p", x[i]);*/
+ /*}*/
+ /*dbg_printf("\n");*/
current_thread->waiting_on = x;
current_thread->n_waiting_on = n;
@@ -310,8 +314,12 @@ bool wait_on_many(void** x, size_t n) {
if (waiters == current_thread) {
waiters = current_thread->next_waiter;
} else {
+ ASSERT(waiters != 0);
for (thread_t *w = waiters; w->next_waiter != 0; w = w->next_waiter) {
- if (w->next_waiter == current_thread) w->next_waiter = current_thread->next_waiter;
+ if (w->next_waiter == current_thread) {
+ w->next_waiter = current_thread->next_waiter;
+ break;
+ }
}
}
@@ -368,12 +376,12 @@ bool resume_on(void* x) {
int st = enter_critical(CL_NOINT);
- dbg_printf("Resume on 0x%p:", x);
+ /*dbg_printf("Resume on 0x%p:", x);*/
for (thread_t *t = waiters; t != 0; t = t->next_waiter) {
for (int i = 0; i < t->n_waiting_on; i++) {
if (t->waiting_on[i] == x) {
- dbg_printf(" 0x%p", t);
+ /*dbg_printf(" 0x%p", t);*/
if (t->state == T_STATE_PAUSED) {
t->state = T_STATE_RUNNING;
@@ -386,6 +394,7 @@ bool resume_on(void* x) {
}
}
}
+ /*dbg_printf("\n");*/
exit_critical(st);
diff --git a/src/kernel/dev/pckbd.c b/src/kernel/dev/pckbd.c
index 8a4c002..f7452b2 100644
--- a/src/kernel/dev/pckbd.c
+++ b/src/kernel/dev/pckbd.c
@@ -74,6 +74,9 @@ fs_node_ops_t pckbd_ops = {
};
void pckbd_setup(fs_t *iofs) {
+ uint8_t a = 0, b = 0;
+ while ((a = inb(0x60)) != b) b = a;
+
idt_set_irq_handler(IRQ1, &irq1_handler);
nullfs_add_node(iofs, "/input/pckbd", 0, &pckbd_ops, 0);
diff --git a/src/kernel/include/dev/v86.h b/src/kernel/include/dev/v86.h
index b260fa7..4347b08 100644
--- a/src/kernel/include/dev/v86.h
+++ b/src/kernel/include/dev/v86.h
@@ -17,10 +17,10 @@ typedef uint32_t v86_farptr_t;
#define V86_SEG_OF_LIN(x) ((size_t)(x) >> 4)
#define V86_OFF_OF_LIN(x) ((size_t)(x) & 0x0F)
#define V86_LIN_OF_SEG_OFF(seg, off) ((((size_t)(seg)) << 4) + ((size_t)(off)))
-inline void* v86_lin_of_fp(v86_farptr_t x) {
+static inline void* v86_lin_of_fp(v86_farptr_t x) {
return (void*)V86_LIN_OF_SEG_OFF(x>>16, x & 0xFFFF);
}
-inline v86_farptr_t v86_fp_of_lin(void* p) {
+static inline v86_farptr_t v86_fp_of_lin(void* p) {
return (V86_SEG_OF_LIN(p) << 16) | V86_OFF_OF_LIN(p);
}
diff --git a/src/kernel/include/sys.h b/src/kernel/include/sys.h
index 61bfc59..038135b 100644
--- a/src/kernel/include/sys.h
+++ b/src/kernel/include/sys.h
@@ -92,4 +92,7 @@ static inline void invlpg(void* addr) {
#define ALIGN4_DOWN(x) (((size_t)x)&MASK4)
+void load_kernel_symbol_map(char* text, size_t len);
+void kernel_stacktrace(uint32_t ebp, uint32_t eip);
+
/* vim: set ts=4 sw=4 tw=0 noet :*/
diff --git a/src/kernel/user/ipc.c b/src/kernel/user/ipc.c
index 9149663..7f34fdf 100644
--- a/src/kernel/user/ipc.c
+++ b/src/kernel/user/ipc.c
@@ -9,15 +9,15 @@
#include <hashtbl.h>
-static size_t channel_read(fs_handle_t *c, size_t offset, size_t len, char* buf);
-static size_t channel_write(fs_handle_t *c, size_t offset, size_t len, const char* buf);
-static int channel_poll(fs_handle_t *c, void** out_wait_obj);
-static bool channel_open(fs_node_t *c, int mode);
-static bool channel_stat(fs_node_t *c, stat_t *st);
-static void channel_close(fs_handle_t *c);
-static void channel_dispose(fs_node_t *c);
-
-static fs_node_ops_t channel_ops = {
+size_t channel_read(fs_handle_t *c, size_t offset, size_t len, char* buf);
+size_t channel_write(fs_handle_t *c, size_t offset, size_t len, const char* buf);
+int channel_poll(fs_handle_t *c, void** out_wait_obj);
+bool channel_open(fs_node_t *c, int mode);
+bool channel_stat(fs_node_t *c, stat_t *st);
+void channel_close(fs_handle_t *c);
+void channel_dispose(fs_node_t *c);
+
+fs_node_ops_t channel_ops = {
.read = channel_read,
.write = channel_write,
.close = channel_close,
@@ -323,7 +323,7 @@ typedef struct {
static token_table_entry_t *expired_token = 0;
-static void token_expiration_check(void* x) {
+void token_expiration_check(void* x) {
mutex_lock(&token_table_mutex);
do {
diff --git a/src/kernel/user/process.c b/src/kernel/user/process.c
index 542c294..52f1031 100644
--- a/src/kernel/user/process.c
+++ b/src/kernel/user/process.c
@@ -81,7 +81,7 @@ error:
return 0;
}
-static void run_user_code(void* param) {
+void run_user_code(void* param) {
setup_data_t *d = (setup_data_t*)param;
void* esp = d->sp;
@@ -490,7 +490,7 @@ void proc_close_fd(process_t *p, int fd) {
// USER MEMORY REGION MANAGEMENT //
// ============================= //
-static user_region_t *find_user_region(process_t *proc, const void* addr) {
+user_region_t *find_user_region(process_t *proc, const void* addr) {
user_region_t *r = (user_region_t*)btree_lower(proc->regions_idx, addr);
if (r == 0) return 0;
diff --git a/src/lib/libkogata/draw.c b/src/lib/libkogata/draw.c
index 22ad8ba..911fe0d 100644
--- a/src/lib/libkogata/draw.c
+++ b/src/lib/libkogata/draw.c
@@ -63,7 +63,7 @@ color_t g_color_rgb(fb_t *f, uint8_t r, uint8_t g, uint8_t b) {
// ---- Plot
-inline void g_plot24(uint8_t* p, color_t c) {
+static inline void g_plot24(uint8_t* p, color_t c) {
p[0] = c & 0xFF;
p[1] = (c >> 8) & 0xFF;
p[2] = (c >> 16) & 0xFF;
diff --git a/src/rules.make b/src/rules.make
index b8a3287..7a5c21e 100644
--- a/src/rules.make
+++ b/src/rules.make
@@ -4,11 +4,11 @@ AS = nasm
ASFLAGS = -felf -g
CC = i586-elf-gcc
-CFLAGS += -ffreestanding -std=gnu99 -Wall -Wextra -Werror -Wno-unused-parameter -Wno-unused-function -I . -I ./include -g -O2 -Os
+CFLAGS += -ffreestanding -std=gnu99 -Wall -Wextra -Werror -Wno-unused-parameter -Wno-unused-function -I . -I ./include -g -O0
# CXX = i586-elf-g++
# CXFLAGS = -ffreestanding -O3 -Wall -Wextra -I . -I ./include -fno-exceptions -fno-rtti
LD = i586-elf-gcc
-LDFLAGS += -ffreestanding -nostdlib -lgcc -O2 -Os
+LDFLAGS += -ffreestanding -nostdlib -lgcc -O0
all: $(OUT)