aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAlex Auvolat <alex.auvolat@ens.fr>2015-03-02 17:55:31 +0100
committerAlex Auvolat <alex.auvolat@ens.fr>2015-03-02 17:55:31 +0100
commitb68881abc4c50bbc8ee9e81b4e18b0ea011b83b7 (patch)
tree56af7d51db555183d62c3c50c614c8775efc6aa7 /src
parentf610cb7baa26b2803fce8b6e4604e8639c71d1d3 (diff)
downloadkogata-b68881abc4c50bbc8ee9e81b4e18b0ea011b83b7.tar.gz
kogata-b68881abc4c50bbc8ee9e81b4e18b0ea011b83b7.zip
Setup critical section management for parts that must not be interrupted.
Diffstat (limited to 'src')
-rw-r--r--src/kernel/core/idt.c4
-rw-r--r--src/kernel/core/thread.c61
-rw-r--r--src/kernel/core/worker.c2
-rw-r--r--src/kernel/dev/pciide.c12
-rw-r--r--src/kernel/include/idt.h15
-rw-r--r--src/kernel/include/thread.h14
-rw-r--r--src/kernel/user/process.c2
7 files changed, 65 insertions, 45 deletions
diff --git a/src/kernel/core/idt.c b/src/kernel/core/idt.c
index d34d03f..5c77502 100644
--- a/src/kernel/core/idt.c
+++ b/src/kernel/core/idt.c
@@ -107,6 +107,8 @@ void idt_ex_handler(registers_t *regs) {
/* Called in interrupt.s when an IRQ fires (interrupt 32 to 47) */
void idt_irq_handler(registers_t *regs) {
+ int st = enter_critical(CL_EXCL); // if someone tries to yield(), an assert will fail
+
if (regs->err_code > 7) {
outb(0xA0, 0x20);
}
@@ -116,6 +118,8 @@ void idt_irq_handler(registers_t *regs) {
if (irq_handlers[regs->err_code] != 0) {
irq_handlers[regs->err_code](regs);
}
+
+ exit_critical(st);
}
/* Caled in interrupt.s when a syscall is called */
diff --git a/src/kernel/core/thread.c b/src/kernel/core/thread.c
index 60dccde..5d2abae 100644
--- a/src/kernel/core/thread.c
+++ b/src/kernel/core/thread.c
@@ -30,19 +30,30 @@ void set_pit_frequency(uint32_t freq) {
outb(0x40, h);
}
-// ============================= //
-// HELPER : IF FLAG MANIPULATION //
-// ============================= //
+// =========================== //
+// CRITICAL SECTION MANAGEMENT //
+// =========================== //
-static inline bool disable_interrupts() {
- uint32_t eflags;
- asm volatile("pushf; pop %0" : "=r"(eflags));
+int enter_critical(int level) {
asm volatile("cli");
- return (eflags & EFLAGS_IF) != 0;
+
+ if (current_thread == 0) return CL_EXCL;
+
+ int prev_level = current_thread->critical_level;
+ if (level > prev_level) current_thread->critical_level = level;
+
+ if (current_thread->critical_level < CL_NOINT) asm volatile("sti");
+
+ return prev_level;
}
-static inline void resume_interrupts(bool st) {
- if (st) asm volatile("sti");
+void exit_critical(int prev_level) {
+ asm volatile("cli");
+
+ if (current_thread == 0) return;
+
+ if (prev_level < current_thread->critical_level) current_thread->critical_level = prev_level;
+ if (current_thread->critical_level < CL_NOINT) asm volatile("sti");
}
// ================== //
@@ -89,8 +100,8 @@ void run_scheduler() {
if (current_thread->proc) current_thread->proc->last_ran = current_thread->last_ran;
enqueue_thread(current_thread, true);
}
-
current_thread = dequeue_thread();
+
if (current_thread != 0) {
set_kernel_stack(current_thread->stack_region->addr + current_thread->stack_region->size);
resume_context(&current_thread->ctx);
@@ -100,7 +111,6 @@ void run_scheduler() {
// At this point an IRQ has happenned
// and has been processed. Loop around.
run_scheduler();
- ASSERT(false);
}
}
@@ -151,6 +161,7 @@ thread_t *new_thread(entry_t entry, void* data) {
// used by user processes
t->proc = 0;
t->user_ex_handler = 0;
+ t->critical_level = CL_USER;
return t;
}
@@ -161,7 +172,7 @@ thread_t *new_thread(entry_t entry, void* data) {
static void irq0_handler(registers_t *regs) {
worker_notify_time(1000000 / TASK_SWITCH_FREQUENCY);
- if (current_thread != 0) {
+ if (current_thread != 0 && current_thread->critical_level == CL_USER) {
save_context_and_enter_scheduler(&current_thread->ctx);
}
}
@@ -172,7 +183,8 @@ void threading_setup(entry_t cont, void* arg) {
thread_t *t = new_thread(cont, arg);
ASSERT(t != 0);
- resume_thread(t, false);
+ resume_thread(t);
+ exit_critical(CL_USER);
run_scheduler(); // never returns
ASSERT(false);
@@ -183,28 +195,22 @@ void threading_setup(entry_t cont, void* arg) {
// ======================= //
void yield() {
- if (current_thread == 0) {
- // might happen before threading is initialized
- // (but should not...)
- dbg_printf("Warning: probable deadlock.\n");
- } else {
- save_context_and_enter_scheduler(&current_thread->ctx);
- }
+ ASSERT(current_thread != 0 && current_thread->critical_level != CL_EXCL);
+
+ save_context_and_enter_scheduler(&current_thread->ctx);
}
void pause() {
- bool st = disable_interrupts();
+ ASSERT(current_thread != 0 && current_thread->critical_level != CL_EXCL);
current_thread->state = T_STATE_PAUSED;
save_context_and_enter_scheduler(&current_thread->ctx);
-
- resume_interrupts(st);
}
void usleep(int usecs) {
void sleeper_resume(void* t) {
thread_t *thread = (thread_t*)t;
- resume_thread(thread, true);
+ resume_thread(thread);
}
if (current_thread == 0) return;
bool ok = worker_push_in(usecs, sleeper_resume, current_thread);
@@ -218,19 +224,18 @@ void exit() {
ASSERT(false);
}
-bool resume_thread(thread_t *thread, bool run_at_once) {
+bool resume_thread(thread_t *thread) {
bool ret = false;
- bool st = disable_interrupts();
+ int st = enter_critical(CL_NOINT);
if (thread->state == T_STATE_PAUSED) {
ret = true;
thread->state = T_STATE_RUNNING;
enqueue_thread(thread, false);
}
- if (run_at_once) yield();
- resume_interrupts(st);
+ exit_critical(st);
return ret;
}
diff --git a/src/kernel/core/worker.c b/src/kernel/core/worker.c
index 4be2f93..4f37b22 100644
--- a/src/kernel/core/worker.c
+++ b/src/kernel/core/worker.c
@@ -88,7 +88,7 @@ void worker_notify_time(int usecs) {
time += usecs;
if (next_task_time <= time) {
for (int i = 0; i < nworkers; i++) {
- if (resume_thread(workers[i], false)) break;
+ if (resume_thread(workers[i])) break;
}
}
}
diff --git a/src/kernel/dev/pciide.c b/src/kernel/dev/pciide.c
index 429432f..927e9da 100644
--- a/src/kernel/dev/pciide.c
+++ b/src/kernel/dev/pciide.c
@@ -167,7 +167,7 @@ void irq14_handler(registers_t *regs) {
if (wait_irq14) {
thread_t *t = wait_irq14;
wait_irq14 = 0;
- resume_thread(t, true); // may not return depending on conditions
+ resume_thread(t);
}
}
@@ -175,7 +175,7 @@ void irq15_handler(registers_t *regs) {
if (wait_irq15) {
thread_t *t = wait_irq15;
wait_irq15 = 0;
- resume_thread(t, true);
+ resume_thread(t);
}
}
@@ -183,7 +183,7 @@ void pciirq_handler(int pci_id) {
if (wait_pciirq) {
thread_t *t = wait_pciirq;
wait_pciirq = 0;
- resume_thread(t, true);
+ resume_thread(t);
}
}
@@ -202,7 +202,8 @@ static void ide_prewait_irq(ide_controller_t *c, int channel) {
}
static void ide_wait_irq(ide_controller_t *c, int channel) {
- asm volatile("cli");
+ int st = enter_critical(CL_NOINT);
+
int irq = c->channels[channel].irq;
if (irq == 14) {
if (wait_irq14) pause();
@@ -214,7 +215,8 @@ static void ide_wait_irq(ide_controller_t *c, int channel) {
if (wait_pciirq) pause();
mutex_unlock(&on_pciirq);
}
- asm volatile("sti");
+
+ exit_critical(st);
}
// ===================================== //
diff --git a/src/kernel/include/idt.h b/src/kernel/include/idt.h
index 2cccfa5..45cc3e3 100644
--- a/src/kernel/include/idt.h
+++ b/src/kernel/include/idt.h
@@ -75,14 +75,13 @@ void idt_set_ex_handler(int number, isr_handler_t func); //Set exception handler
void idt_set_irq_handler(int number, isr_handler_t func); //Set IRQ handler
// Warning about IRQ handlers :
-// IRQ handlers must not call yield(), because that may not return ! Therefore they cannot
-// use mutexes, memory allocation and most usefull things. Basically the only thing they
-// can do is wake up another thread, so it is a good idea to have a thread that waits for
-// the IRQ and does something when it happens, and the IRQ handler only wakes up that thread
-// when the IRQ happens.
-// Remark on resume_thread : if the second argument is set to true, yield() is called in the
-// function, so it may never return in some circumstances
-// IRQ handlers are never preemptible
+// IRQ handlers may run on the kernel0 stack, which is reset each time the scheduler
+// is entered. Therefore IRQ handler routines may not call yield(), which makes them
+// unable to use mutexes, memory allocation and most usefull things. Basically the only
+// thing they can do is wake up another thread, so it is a good idea to have a thread
+// that waits for the IRQ and does something when it happens, and the IRQ handler only
+// wakes up that thread when the IRQ happens. They may also communicate via atomical
+// or lock free data structures.
void dbg_dump_registers(registers_t*);
diff --git a/src/kernel/include/thread.h b/src/kernel/include/thread.h
index fa7dc6f..127b0d7 100644
--- a/src/kernel/include/thread.h
+++ b/src/kernel/include/thread.h
@@ -9,7 +9,7 @@
#define T_STATE_PAUSED 2
#define T_STATE_FINISHED 3
-#define KPROC_STACK_SIZE 0x8000 // 8Kb
+#define KPROC_STACK_SIZE 0x2000 // 8Kb
#define TASK_SWITCH_FREQUENCY 100 // in herz
@@ -25,6 +25,7 @@ typedef struct thread {
uint32_t state;
uint64_t last_ran;
+ int critical_level;
region_info_t *stack_region;
@@ -46,7 +47,16 @@ void pause();
void exit();
void usleep(int usecs);
-bool resume_thread(thread_t *thread, bool run_at_once); // true if thrad was paused, false if was running
+bool resume_thread(thread_t *thread);
void kill_thread(thread_t *thread);
+// Kernel critical sections
+#define CL_EXCL 3 // No interruptions accepted, context switching not allowed
+#define CL_NOINT 2 // No interruptions accepted, timer context switching disabled (manual switch allowed)
+#define CL_NOSWITCH 1 // Interruptions accepted, timer context switching disabled (manual switch allowed)
+#define CL_USER 0 // Anything can happen
+
+int enter_critical(int level);
+void exit_critical(int prev_level);
+
/* vim: set ts=4 sw=4 tw=0 noet :*/
diff --git a/src/kernel/user/process.c b/src/kernel/user/process.c
index c2eb413..989a47a 100644
--- a/src/kernel/user/process.c
+++ b/src/kernel/user/process.c
@@ -94,7 +94,7 @@ bool start_process(process_t *p, void* entry) {
th->proc = p;
th->user_ex_handler = proc_user_exception;
- resume_thread(th, false);
+ resume_thread(th);
return true;
}