aboutsummaryrefslogtreecommitdiff
path: root/kernel/l0
diff options
context:
space:
mode:
authorAlex Auvolat <alex.auvolat@ens.fr>2014-12-07 19:59:34 +0100
committerAlex Auvolat <alex.auvolat@ens.fr>2014-12-07 19:59:34 +0100
commitf0475a3d517b4df8eb2b73f22eaec91a72bcc51f (patch)
tree084e6cd1db812cafdd67ba650796eda3091c2ea7 /kernel/l0
parentd7aaba8c85cc71f3184cddcf20b740c5157c864d (diff)
downloadmacroscope-f0475a3d517b4df8eb2b73f22eaec91a72bcc51f.tar.gz
macroscope-f0475a3d517b4df8eb2b73f22eaec91a72bcc51f.zip
Implement multitasking.
Diffstat (limited to 'kernel/l0')
-rw-r--r--kernel/l0/context_switch.s46
-rw-r--r--kernel/l0/kmain.c36
-rw-r--r--kernel/l0/loader.s7
-rw-r--r--kernel/l0/region.c11
-rw-r--r--kernel/l0/task.c189
5 files changed, 268 insertions, 21 deletions
diff --git a/kernel/l0/context_switch.s b/kernel/l0/context_switch.s
new file mode 100644
index 0000000..7351997
--- /dev/null
+++ b/kernel/l0/context_switch.s
@@ -0,0 +1,46 @@
+[EXTERN kernel_stack_top]
+[EXTERN run_scheduler]
+
+[GLOBAL save_context_and_enter_scheduler]
+; void save_context_and_enter_scheduler(struct saved_context *ctx);
+save_context_and_enter_scheduler:
+ pushf
+ cli
+ pusha ; Pushes edi,esi,ebp,esp,ebx,edx,ecx,eax
+
+ mov eax, cr3
+ push eax
+
+ mov ax, ds ; Lower 16-bits of eax = ds.
+ push eax ; save the data segment descriptor
+
+ mov eax, [esp+48] ; get address of saved_context structure
+ mov [eax], esp ; save esp
+ mov dword [eax+4], resume_saved_context ; save eip
+
+ mov esp, kernel_stack_top
+ jmp run_scheduler
+
+resume_saved_context:
+ pop eax
+ mov ds, ax
+ mov es, ax
+ mov fs, ax
+ mov gs, ax
+
+ pop eax
+ mov cr3, eax
+
+ popa
+ popf
+ ret
+
+
+[GLOBAL resume_context]
+resume_context:
+ mov eax, [esp+4] ; get address of saved context
+ mov esp, [eax] ; resume esp
+ mov ecx, [eax+4] ; jump to specified eip
+ jmp ecx
+
+; vim: set ts=4 sw=4 tw=0 noet :
diff --git a/kernel/l0/kmain.c b/kernel/l0/kmain.c
index e6831b2..0a6bed9 100644
--- a/kernel/l0/kmain.c
+++ b/kernel/l0/kmain.c
@@ -10,6 +10,8 @@
#include <region.h>
#include <kmalloc.h>
+#include <task.h>
+
#include <slab_alloc.h>
extern char k_end_addr; // defined in linker script : 0xC0000000 plus kernel stuff
@@ -19,11 +21,6 @@ void breakpoint_handler(registers_t *regs) {
BOCHS_BREAKPOINT;
}
-void yield() {
- // multitasking not implemented yet
- dbg_printf("Warning : probable deadlock?\n");
-}
-
void region_test1() {
void* p = region_alloc(0x1000, REGION_T_HW, 0);
dbg_printf("Allocated one-page region: 0x%p\n", p);
@@ -102,6 +99,28 @@ void kmalloc_test(void* kernel_data_end) {
dbg_printf("Kmalloc test OK.\n");
dbg_print_region_stats();
}
+
+void test_task(void* a) {
+ int i = 0;
+ while(1) {
+ dbg_printf("b");
+ for (int x = 0; x < 100000; x++) asm volatile("xor %%ebx, %%ebx":::"%ebx");
+ if (++i == 8) {
+ yield();
+ i = 0;
+ }
+ }
+}
+void kernel_init_stage2(void* data) {
+ task_t *tb = new_task(test_task);
+ resume_with_result(tb, 0, false);
+
+ while(1) {
+ dbg_printf("a");
+ for (int x = 0; x < 100000; x++) asm volatile("xor %%ebx, %%ebx":::"%ebx");
+ }
+ PANIC("Reached kmain end! Falling off the edge.");
+}
void kmain(struct multiboot_info_t *mbd, int32_t mb_magic) {
dbglog_setup();
@@ -141,8 +160,11 @@ void kmain(struct multiboot_info_t *mbd, int32_t mb_magic) {
kmalloc_setup();
kmalloc_test(kernel_data_end);
-
- PANIC("Reached kmain end! Falling off the edge.");
+ // enter multi-tasking mode
+ // interrupts are enabled at this moment, so all
+ // code run from now on should be preemtible (ie thread-safe)
+ tasking_setup(kernel_init_stage2, 0);
+ PANIC("Should never come here.");
}
/* vim: set ts=4 sw=4 tw=0 noet :*/
diff --git a/kernel/l0/loader.s b/kernel/l0/loader.s
index 5d0a2b8..447d82d 100644
--- a/kernel/l0/loader.s
+++ b/kernel/l0/loader.s
@@ -2,6 +2,7 @@
[GLOBAL loader] ; making entry point visible to linker
[GLOBAL kernel_pd] ; make kernel page directory visible
[GLOBAL kernel_stack_protector] ; used to detect kernel stack overflow
+[GLOBAL kernel_stack_top] ; stack re-used by scheduler
; higher-half kernel setup
K_HIGHHALF_ADDR equ 0xC0000000
@@ -60,7 +61,7 @@ higherhalf: ; now we're running in higher half
mov dword [kernel_pd], 0
invlpg [0]
- mov esp, stack_top ; set up the stack
+ mov esp, kernel_stack_top ; set up the stack
push eax ; pass Multiboot magic number
add ebx, K_HIGHHALF_ADDR ; update the MB info structure so that it is in higher half
@@ -78,8 +79,8 @@ hang:
align 0x1000
kernel_stack_protector:
resb 0x1000 ; as soon as we have efficient paging, we WON'T map this page
-stack_bottom:
+kernel_stack_bottom:
resb LOADER_STACK_SIZE
-stack_top:
+kernel_stack_top:
; vim: set ts=4 sw=4 tw=0 noet :
diff --git a/kernel/l0/region.c b/kernel/l0/region.c
index 3691747..a384a4d 100644
--- a/kernel/l0/region.c
+++ b/kernel/l0/region.c
@@ -333,17 +333,6 @@ region_info_t *find_region(void* addr) {
// HELPER FUNCTIONS : SIMPLE PF HANDLERS ; FREEING FUNCTIONS //
// ========================================================= //
-void stack_pf_handler(pagedir_t *pd, struct region_info *r, void* addr) {
- if (addr < r->addr + PAGE_SIZE) {
- dbg_printf("Stack overflow at 0x%p.", addr);
- if (r->type & REGION_T_KPROC_STACK) dbg_printf(" (in kernel process stack)\n");
- if (r->type & REGION_T_PROC_KSTACK) dbg_printf(" (in process kernel stack)\n");
- dbg_print_region_stats();
- PANIC("Stack overflow.");
- }
- default_allocator_pf_handler(pd, r, addr);
-}
-
void default_allocator_pf_handler(pagedir_t *pd, struct region_info *r, void* addr) {
ASSERT(pd_get_frame(addr) == 0); // if error is of another type (RO, protected), we don't do anyting
diff --git a/kernel/l0/task.c b/kernel/l0/task.c
new file mode 100644
index 0000000..e02dcf6
--- /dev/null
+++ b/kernel/l0/task.c
@@ -0,0 +1,189 @@
+#include <task.h>
+#include <kmalloc.h>
+#include <dbglog.h>
+#include <idt.h>
+
+#include <frame.h>
+#include <paging.h>
+
+void save_context_and_enter_scheduler(saved_context_t *ctx);
+void resume_context(saved_context_t *ctx);
+
+task_t *current_task = 0;
+
+// ====================== //
+// THE PROGRAMMABLE TIMER //
+// ====================== //
+
+void set_pit_frequency(uint32_t freq) {
+ uint32_t divisor = 1193180 / freq;
+ ASSERT(divisor < 65536); // must fit on 16 bits
+
+ uint8_t l = (divisor & 0xFF);
+ uint8_t h = ((divisor >> 8) & 0xFF);
+
+ outb(0x43, 0x36);
+ outb(0x40, l);
+ outb(0x40, h);
+}
+
+// ================== //
+// THE TASK SCHEDULER //
+// ================== //
+
+static task_t *queue_first_task = 0, *queue_last_task = 0;
+
+void enqueue_task(task_t *t, bool just_ran) {
+ ASSERT(t->state == T_STATE_RUNNING);
+ if (queue_first_task == 0) {
+ queue_first_task = queue_last_task = t;
+ t->next_in_queue = 0;
+ } else if (just_ran) {
+ t->next_in_queue = 0;
+ queue_last_task->next_in_queue = t;
+ queue_last_task = t;
+ } else {
+ t->next_in_queue = queue_first_task;
+ queue_first_task = t;
+ }
+}
+
+task_t* dequeue_task() {
+ task_t *t = queue_first_task;
+ if (t == 0) return 0;
+
+ queue_first_task = t->next_in_queue;
+ if (queue_first_task == 0) queue_last_task = 0;
+
+ return t;
+}
+
+// ================ //
+// THE TASKING CODE //
+// ================ //
+
+void run_scheduler() {
+ // This function is expected NEVER TO RETURN
+ if (current_task != 0 && current_task->state == T_STATE_RUNNING) {
+ enqueue_task(current_task, true);
+ }
+ current_task = dequeue_task();
+ if (current_task != 0) {
+ resume_context(&current_task->ctx);
+ } else {
+ // Wait for an IRQ
+ asm volatile("sti; hlt");
+ // At this point an IRQ has happenned
+ // and has been processed. Loop around.
+ run_scheduler();
+ ASSERT(false);
+ }
+}
+
+static void run_task(void (*entry)(void*)) {
+ ASSERT(current_task->state == T_STATE_RUNNING);
+ ASSERT(current_task->has_result);
+
+ current_task->has_result = false;
+
+ asm volatile("sti");
+ entry(current_task->result);
+
+ current_task->state = T_STATE_FINISHED;
+ // TODO : add job for deleting the task, or whatever
+ yield(); // expected never to return!
+ ASSERT(false);
+}
+task_t *new_task(entry_t entry) {
+ task_t *t = (task_t*)kmalloc(sizeof(task_t));
+ if (t == 0) return 0;
+
+ void* stack = region_alloc(KPROC_STACK_SIZE, REGION_T_KPROC_STACK, 0);
+ if (stack == 0) {
+ kfree(t);
+ return 0;
+ }
+
+ for (void* i = stack + PAGE_SIZE; i < stack + KPROC_STACK_SIZE; i += PAGE_SIZE) {
+ uint32_t f = frame_alloc(1);
+ if (f == 0) {
+ region_free_unmap_free(stack);
+ kfree(t);
+ return 0;
+ }
+ pd_map_page(i, f, true);
+ }
+
+ t->stack_region = find_region(stack);
+
+ t->ctx.esp = (uint32_t*)(t->stack_region->addr + t->stack_region->size);
+ *(--t->ctx.esp) = (uint32_t)entry; // push first argument : entry point
+ *(--t->ctx.esp) = 0; // push invalid return address (the run_task function never returns)
+
+ t->ctx.eip = (void(*)())run_task;
+ t->state = T_STATE_WAITING;
+ t->result = 0;
+ t->has_result = false;
+
+ t->more_data = 0;
+
+ return t;
+}
+
+void tasking_setup(entry_t cont, void* arg) {
+ set_pit_frequency(100);
+ idt_set_irq_handler(IRQ0, yield);
+
+ task_t *t = new_task(cont);
+ ASSERT(t != 0);
+
+ resume_with_result(t, arg, false);
+
+ run_scheduler(); // never returns
+ ASSERT(false);
+}
+
+void yield() {
+ if (current_task == 0) {
+ // might happen before tasking is initialized
+ dbg_printf("Warning: probable deadlock.");
+ } else {
+ save_context_and_enter_scheduler(&current_task->ctx);
+ }
+}
+
+void* wait_for_result() {
+ uint32_t eflags;
+ asm volatile("pushf; pop %0" : "=r"(eflags));
+ asm volatile("cli");
+
+ if (!current_task->has_result) {
+ current_task->state = T_STATE_WAITING;
+ save_context_and_enter_scheduler(&current_task->ctx);
+ }
+ ASSERT(current_task->has_result);
+ current_task->has_result = false;
+
+ if (eflags & EFLAGS_IF) asm volatile("sti");
+
+ return current_task->result;
+}
+
+void resume_with_result(task_t *task, void* data, bool run_at_once) {
+ uint32_t eflags;
+ asm volatile("pushf; pop %0" : "=r"(eflags));
+ asm volatile("cli");
+
+ task->has_result = true;
+ task->result = data;
+
+ if (task->state == T_STATE_WAITING) {
+ task->state = T_STATE_RUNNING;
+ enqueue_task(task, false);
+ }
+ if (run_at_once) yield();
+
+ if (eflags & EFLAGS_IF) asm volatile("sti");
+}
+
+/* vim: set ts=4 sw=4 tw=0 noet :*/