aboutsummaryrefslogtreecommitdiff
path: root/src/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'src/kernel')
-rw-r--r--src/kernel/core/kmain.c20
-rw-r--r--src/kernel/core/loader.s1
-rw-r--r--src/kernel/core/sys.c7
-rw-r--r--src/kernel/core/worker.c2
-rw-r--r--src/kernel/user/process.c2
5 files changed, 22 insertions, 10 deletions
diff --git a/src/kernel/core/kmain.c b/src/kernel/core/kmain.c
index 99ad0d7..38336a9 100644
--- a/src/kernel/core/kmain.c
+++ b/src/kernel/core/kmain.c
@@ -125,6 +125,20 @@ void kmain(multiboot_info_t *mbd, int32_t mb_magic) {
setup_syscall_table();
dbg_printf("System calls setup ok.\n");
+ // If we have a kernel map, load it now
+ for (unsigned i = 0; i < mbd->mods_count; i++) {
+ char* modname = (char*)mods[i].string;
+ size_t len = mods[i].mod_end - mods[i].mod_start;
+
+ if (strlen(modname) > 4 && strcmp(modname + strlen(modname) - 4, ".map") == 0) {
+ // Copy data to somewhere safe, as it will be modified
+ void* dup_data = malloc(len + 1);
+ memcpy(dup_data, (void*)mods[i].mod_start, len);
+
+ load_kernel_symbol_map((char*)dup_data, len);
+ }
+ }
+
// enter multi-threading mode
// interrupts are enabled at this moment, so all
// code run from now on should be preemtible (ie thread-safe)
@@ -249,12 +263,6 @@ fs_t *setup_iofs(multiboot_info_t *mbd) {
ASSERT(nullfs_add_ram_file(iofs, name,
(char*)mods[i].mod_start,
len, FM_READ | FM_MMAP));
-
- if (strlen(modname) > 4 && strcmp(modname + strlen(modname) - 4, ".map") == 0) {
- // remark: load_kernel_symbol_map modifies the data region,
- // which is not a problem because nullfs_add_ram_file copied the thing already
- load_kernel_symbol_map((char*)mods[i].mod_start, len);
- }
}
return iofs;
diff --git a/src/kernel/core/loader.s b/src/kernel/core/loader.s
index 447d82d..6f14be6 100644
--- a/src/kernel/core/loader.s
+++ b/src/kernel/core/loader.s
@@ -2,6 +2,7 @@
[GLOBAL loader] ; making entry point visible to linker
[GLOBAL kernel_pd] ; make kernel page directory visible
[GLOBAL kernel_stack_protector] ; used to detect kernel stack overflow
+[GLOBAL kernel_stack_bottom] ; usefull for debugging
[GLOBAL kernel_stack_top] ; stack re-used by scheduler
; higher-half kernel setup
diff --git a/src/kernel/core/sys.c b/src/kernel/core/sys.c
index 333cb58..02b66e5 100644
--- a/src/kernel/core/sys.c
+++ b/src/kernel/core/sys.c
@@ -73,9 +73,12 @@ void kernel_stacktrace(uint32_t ebp, uint32_t eip) {
int i = 0;
while (ebp >= K_HIGHHALF_ADDR) {
char* sym = 0;
- if (kernel_symbol_map != 0) sym = btree_lower(kernel_symbol_map, (void*)eip);
+ void* fn_ptr = 0;
+ if (kernel_symbol_map != 0) {
+ sym = btree_lower(kernel_symbol_map, (void*)eip, &fn_ptr);
+ }
- dbg_printf("| 0x%p EIP: 0x%p %s\n", ebp, eip, sym);
+ dbg_printf("| 0x%p EIP: 0x%p %s +0x%p\n", ebp, eip, sym, ((void*)eip - fn_ptr));
uint32_t *d = (uint32_t*)ebp;
ebp = d[0];
diff --git a/src/kernel/core/worker.c b/src/kernel/core/worker.c
index 6852329..9b50949 100644
--- a/src/kernel/core/worker.c
+++ b/src/kernel/core/worker.c
@@ -50,7 +50,7 @@ void worker_thread(void* x) {
while (true) {
mutex_lock(&tasks_mutex);
- worker_task_t *next_task = btree_upper(tasks, &zero64);
+ worker_task_t *next_task = btree_upper(tasks, &zero64, 0);
next_task_time = (next_task == 0 ? UINT64_MAX : next_task->time);
if (next_task != 0 && next_task->time <= time) {
diff --git a/src/kernel/user/process.c b/src/kernel/user/process.c
index 7b0c351..43294ce 100644
--- a/src/kernel/user/process.c
+++ b/src/kernel/user/process.c
@@ -541,7 +541,7 @@ void proc_close_fd(process_t *p, int fd) {
user_region_t *find_user_region(process_t *proc, const void* addr) {
mutex_lock(&proc->lock);
- user_region_t *r = (proc->regions_idx != 0 ? (user_region_t*)btree_lower(proc->regions_idx, addr) : 0);
+ user_region_t *r = (proc->regions_idx != 0 ? (user_region_t*)btree_lower(proc->regions_idx, addr, 0) : 0);
if (r != 0) {
ASSERT(addr >= r->addr);