aboutsummaryrefslogtreecommitdiff
path: root/src/kernel/core
diff options
context:
space:
mode:
authorAlex Auvolat <alex@adnab.me>2015-03-11 16:22:40 +0100
committerAlex Auvolat <alex@adnab.me>2015-03-11 16:22:40 +0100
commit9b9ef5a2c0ec8e66c7da24c4229d89a90a10e914 (patch)
tree471cb88f003eb58ce84342f2e7ab7effda04ce2d /src/kernel/core
parenta7ff74cdf2835625282491242ede57b05ceaa782 (diff)
downloadkogata-9b9ef5a2c0ec8e66c7da24c4229d89a90a10e914.tar.gz
kogata-9b9ef5a2c0ec8e66c7da24c4229d89a90a10e914.zip
Bugfixing in progress. Strange bug: wait_on adds to waiters but later not in waiters.
Diffstat (limited to 'src/kernel/core')
-rw-r--r--src/kernel/core/idt.c17
-rw-r--r--src/kernel/core/thread.c41
-rw-r--r--src/kernel/core/worker.c27
3 files changed, 49 insertions, 36 deletions
diff --git a/src/kernel/core/idt.c b/src/kernel/core/idt.c
index f99460f..f3dd63b 100644
--- a/src/kernel/core/idt.c
+++ b/src/kernel/core/idt.c
@@ -91,6 +91,7 @@ static isr_handler_t ex_handlers[32] = {0};
/* Called in interrupt.s when an exception fires (interrupt 0 to 31) */
void idt_ex_handler(registers_t *regs) {
+ dbg_printf("Ex handler: %d\n", regs->int_no);
if (ex_handlers[regs->int_no] != 0) {
ex_handlers[regs->int_no](regs);
} else {
@@ -114,21 +115,21 @@ void idt_ex_handler(registers_t *regs) {
void idt_irq_handler(registers_t *regs) {
int st = enter_critical(CL_EXCL); // if someone tries to yield(), an assert will fail
+ dbg_printf("irq%d.", regs->err_code);
+
if (regs->err_code > 7) {
outb(0xA0, 0x20);
}
outb(0x20, 0x20);
- if (regs->err_code == 0) {
- irq0_handler(regs, st);
- } else {
- /*dbg_printf("irq%d.", regs->err_code);*/
+ if (irq_handlers[regs->err_code] != 0) {
+ irq_handlers[regs->err_code](regs);
+ }
- if (irq_handlers[regs->err_code] != 0) {
- irq_handlers[regs->err_code](regs);
- }
+ exit_critical(st);
- exit_critical(st);
+ if (regs->err_code == 0) {
+ threading_irq0_handler();
}
// maybe exit
diff --git a/src/kernel/core/thread.c b/src/kernel/core/thread.c
index 4e15c0e..ed2c185 100644
--- a/src/kernel/core/thread.c
+++ b/src/kernel/core/thread.c
@@ -19,7 +19,6 @@ void resume_context(saved_context_t *ctx);
thread_t *current_thread = 0;
static hashtbl_t *waiters = 0; // threads waiting on a ressource
-STATIC_MUTEX(waiters_mutex);
// ====================== //
// THE PROGRAMMABLE TIMER //
@@ -44,6 +43,8 @@ void set_pit_frequency(uint32_t freq) {
int enter_critical(int level) {
asm volatile("cli");
+ /*dbg_printf(" >%d< ", level);*/
+
if (current_thread == 0) return CL_EXCL;
int prev_level = current_thread->critical_level;
@@ -57,6 +58,8 @@ int enter_critical(int level) {
void exit_critical(int prev_level) {
asm volatile("cli");
+ /*dbg_printf(" <%d> ", prev_level);*/
+
if (current_thread == 0) return;
if (prev_level < current_thread->critical_level) current_thread->critical_level = prev_level;
@@ -117,6 +120,8 @@ void run_scheduler() {
// At this point, interrupts are disabled
// This function is expected NEVER TO RETURN
+ thread_t *prev_thread = current_thread;
+
if (current_thread != 0 && current_thread->state == T_STATE_RUNNING) {
current_thread->last_ran = get_kernel_time();
if (current_thread->proc) current_thread->proc->last_ran = current_thread->last_ran;
@@ -124,7 +129,7 @@ void run_scheduler() {
}
current_thread = dequeue_thread();
- /*dbg_printf("[0x%p]\n", current_thread);*/
+ if (current_thread != prev_thread) dbg_printf("[0x%p]\n", current_thread);
if (current_thread != 0) {
thread_t *ptr = current_thread;
@@ -221,11 +226,10 @@ void delete_thread(thread_t *t) {
// SETUP CODE //
// ========== //
-void irq0_handler(registers_t *regs, int crit_level) {
+void irq0_handler(registers_t *regs) {
notify_time_pass(1000000 / TASK_SWITCH_FREQUENCY);
-
- exit_critical(crit_level);
-
+}
+void threading_irq0_handler() {
if (current_thread != 0 && current_thread->critical_level == CL_USER) {
save_context_and_enter_scheduler(&current_thread->ctx);
}
@@ -235,7 +239,7 @@ void threading_setup(entry_t cont, void* arg) {
ASSERT(waiters != 0);
set_pit_frequency(TASK_SWITCH_FREQUENCY);
- // no need to set irq0 handler
+ idt_set_irq_handler(IRQ0, &irq0_handler);
thread_t *t = new_thread(cont, arg);
ASSERT(t != 0);
@@ -265,7 +269,8 @@ void start_thread(thread_t *t) {
}
void yield() {
- ASSERT(current_thread != 0 && current_thread->critical_level != CL_EXCL);
+ ASSERT(current_thread != 0);
+ ASSERT(current_thread->critical_level != CL_EXCL);
save_context_and_enter_scheduler(&current_thread->ctx);
}
@@ -275,10 +280,11 @@ bool wait_on(void* x) {
}
bool wait_on_many(void** x, size_t n) {
- ASSERT(current_thread != 0 && current_thread->critical_level != CL_EXCL);
+ ASSERT(current_thread != 0);
+ ASSERT(current_thread->critical_level != CL_EXCL);
ASSERT(n > 0);
- mutex_lock(&waiters_mutex);
+ int st = enter_critical(CL_NOINT);
// ---- Check we can wait on all the requested objects
bool ok = true;
@@ -291,34 +297,34 @@ bool wait_on_many(void** x, size_t n) {
}
} else if (prev_th != (void*)1) {
ok = false;
+ break;
}
}
if (!ok) {
- mutex_unlock(&waiters_mutex);
+ exit_critical(st);
return false;
}
// ---- Set ourselves as the waiting thread for all the requested objets
- int st = enter_critical(CL_NOSWITCH);
+ dbg_printf("Wait on many: ");
for (size_t i = 0; i < n; i++) {
ASSERT(hashtbl_change(waiters, x[i], current_thread));
+ dbg_printf("0x%p (0x%p) ", x[i], hashtbl_find(waiters, x[i]));
}
+ dbg_printf("\n");
// ---- Go to sleep
- mutex_unlock(&waiters_mutex);
current_thread->state = T_STATE_PAUSED;
save_context_and_enter_scheduler(&current_thread->ctx);
// ---- Remove ourselves from the list
- mutex_lock(&waiters_mutex);
for (size_t i = 0; i < n; i++) {
ASSERT(hashtbl_change(waiters, x[i], (void*)1));
}
- mutex_unlock(&waiters_mutex);
exit_critical(st);
// ---- Check that we weren't waked up because of a kill request
@@ -368,15 +374,17 @@ void exit() {
}
bool resume_on(void* x) {
+
thread_t *thread;
bool ret = false;
- mutex_lock(&waiters_mutex);
int st = enter_critical(CL_NOINT);
thread = hashtbl_find(waiters, x);
+ dbg_printf("Resume on 0x%p : 0x%p\n", x, thread);
+
if (thread != 0 && thread != (void*)1) {
if (thread->state == T_STATE_PAUSED) {
thread->state = T_STATE_RUNNING;
@@ -387,7 +395,6 @@ bool resume_on(void* x) {
}
}
- mutex_unlock(&waiters_mutex);
exit_critical(st);
return ret;
diff --git a/src/kernel/core/worker.c b/src/kernel/core/worker.c
index cb7bf34..6852329 100644
--- a/src/kernel/core/worker.c
+++ b/src/kernel/core/worker.c
@@ -49,21 +49,24 @@ void worker_thread(void* x) {
while (true) {
mutex_lock(&tasks_mutex);
- worker_task_t *t = btree_upper(tasks, &zero64);
- next_task_time = (t == 0 ? UINT64_MAX : t->time);
- if (t != 0 && t->time <= time) {
- btree_remove_v(tasks, &t->time, t);
+
+ worker_task_t *next_task = btree_upper(tasks, &zero64);
+ next_task_time = (next_task == 0 ? UINT64_MAX : next_task->time);
+
+ if (next_task != 0 && next_task->time <= time) {
+ btree_remove_v(tasks, &next_task->time, next_task);
} else {
- t = 0;
+ next_task = 0;
}
+
mutex_unlock(&tasks_mutex);
- if (t != 0) {
- prng_add_entropy((uint8_t*)&t, sizeof(t));
+ if (next_task != 0) {
+ prng_add_entropy((uint8_t*)&next_task, sizeof(next_task));
// do task :-)
- t->fun(t->data);
- free(t);
+ next_task->fun(next_task->data);
+ free(next_task);
} else {
ASSERT(wait_on(current_thread));
}
@@ -79,11 +82,12 @@ bool worker_push_in(int usecs, entry_t fun, void* data) {
t->data = data;
mutex_lock(&tasks_mutex);
+
btree_add(tasks, &t->time, t);
- mutex_unlock(&tasks_mutex);
-
if (t->time < next_task_time) next_task_time = t->time;
+ mutex_unlock(&tasks_mutex);
+
return true;
}
@@ -95,6 +99,7 @@ void notify_time_pass(int usecs) {
time += usecs;
if (next_task_time <= time) {
for (int i = 0; i < nworkers; i++) {
+ if (workers[i] == 0) continue;
if (resume_on(workers[i])) break;
}
}