diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/Makefile | 2 | ||||
-rw-r--r-- | kernel/include/idt.h | 5 | ||||
-rw-r--r-- | kernel/include/region.h | 2 | ||||
-rw-r--r-- | kernel/include/task.h | 42 | ||||
-rw-r--r-- | kernel/l0/context_switch.s | 46 | ||||
-rw-r--r-- | kernel/l0/kmain.c | 36 | ||||
-rw-r--r-- | kernel/l0/loader.s | 7 | ||||
-rw-r--r-- | kernel/l0/region.c | 11 | ||||
-rw-r--r-- | kernel/l0/task.c | 189 |
9 files changed, 313 insertions, 27 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 8b0095e..0088085 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -11,7 +11,7 @@ LDFLAGS = -T linker.ld -ffreestanding -O2 -nostdlib -lgcc -Xlinker -Map=kernel.m OBJ = lib/string.o lib/printf.o lib/slab_alloc.o lib/mutex.o \ l0/loader.o l0/kmain.o l0/dbglog.o l0/sys.o \ - l0/gdt.o l0/idt.o l0/interrupt.o \ + l0/gdt.o l0/idt.o l0/interrupt.o l0/context_switch.o l0/task.o \ l0/frame.o l0/paging.o l0/region.o l0/kmalloc.o OUT = kernel.bin diff --git a/kernel/include/idt.h b/kernel/include/idt.h index 9415c8e..8e84cea 100644 --- a/kernel/include/idt.h +++ b/kernel/include/idt.h @@ -60,13 +60,12 @@ #define EFLAGS_IF (0x1 << 9) -struct registers { +typedef struct registers { uint32_t ds; // Data segment selector uint32_t edi, esi, ebp, useless_esp, ebx, edx, ecx, eax; // Pushed by pusha. uint32_t int_no, err_code; // Interrupt number and error code (if applicable) uint32_t eip, cs, eflags, esp, ss; // Pushed by the processor automatically. -}; -typedef struct registers registers_t; +} registers_t; typedef void (*isr_handler_t)(registers_t*); diff --git a/kernel/include/region.h b/kernel/include/region.h index ac68047..b45a85d 100644 --- a/kernel/include/region.h +++ b/kernel/include/region.h @@ -34,8 +34,6 @@ region_info_t *find_region(void* addr); void region_free(void* addr); // some usefull PF handlers -// stack_pf_handler : allocates new frames and panics on access to first page of region (stack overflow) -void stack_pf_handler(pagedir_t *pd, struct region_info *r, void* addr); // default_allocator_pf_handler : just allocates new frames on page faults void default_allocator_pf_handler(pagedir_t *pd, struct region_info *r, void* addr); diff --git a/kernel/include/task.h b/kernel/include/task.h new file mode 100644 index 0000000..8ba50b0 --- /dev/null +++ b/kernel/include/task.h @@ -0,0 +1,42 @@ +#pragma once + +#include <sys.h> +#include <region.h> + +#define T_STATE_RUNNING 1 +#define T_STATE_FINISHED 2 +#define T_STATE_WAITING 3 + +#define KPROC_STACK_SIZE 0x8000 // 8Kb + +typedef struct saved_context { + uint32_t *esp; + void (*eip)(); +} saved_context_t; + +typedef struct task { + saved_context_t ctx; + + uint32_t state; + void* result; + bool has_result; + + region_info_t *stack_region; + + void* more_data; + + struct task *next_in_queue; +} task_t; + +typedef void (*entry_t)(void*); + +void tasking_setup(entry_t cont, void* data); // never returns +task_t *new_task(entry_t entry); // task is PAUSED, and must be resume_with_result'ed +extern task_t *current_task; + + +void yield(); +void* wait_for_result(); +void resume_with_result(task_t *task, void* data, bool run_at_once); + +/* vim: set ts=4 sw=4 tw=0 noet :*/ diff --git a/kernel/l0/context_switch.s b/kernel/l0/context_switch.s new file mode 100644 index 0000000..7351997 --- /dev/null +++ b/kernel/l0/context_switch.s @@ -0,0 +1,46 @@ +[EXTERN kernel_stack_top] +[EXTERN run_scheduler] + +[GLOBAL save_context_and_enter_scheduler] +; void save_context_and_enter_scheduler(struct saved_context *ctx); +save_context_and_enter_scheduler: + pushf + cli + pusha ; Pushes edi,esi,ebp,esp,ebx,edx,ecx,eax + + mov eax, cr3 + push eax + + mov ax, ds ; Lower 16-bits of eax = ds. + push eax ; save the data segment descriptor + + mov eax, [esp+48] ; get address of saved_context structure + mov [eax], esp ; save esp + mov dword [eax+4], resume_saved_context ; save eip + + mov esp, kernel_stack_top + jmp run_scheduler + +resume_saved_context: + pop eax + mov ds, ax + mov es, ax + mov fs, ax + mov gs, ax + + pop eax + mov cr3, eax + + popa + popf + ret + + +[GLOBAL resume_context] +resume_context: + mov eax, [esp+4] ; get address of saved context + mov esp, [eax] ; resume esp + mov ecx, [eax+4] ; jump to specified eip + jmp ecx + +; vim: set ts=4 sw=4 tw=0 noet : diff --git a/kernel/l0/kmain.c b/kernel/l0/kmain.c index e6831b2..0a6bed9 100644 --- a/kernel/l0/kmain.c +++ b/kernel/l0/kmain.c @@ -10,6 +10,8 @@ #include <region.h> #include <kmalloc.h> +#include <task.h> + #include <slab_alloc.h> extern char k_end_addr; // defined in linker script : 0xC0000000 plus kernel stuff @@ -19,11 +21,6 @@ void breakpoint_handler(registers_t *regs) { BOCHS_BREAKPOINT; } -void yield() { - // multitasking not implemented yet - dbg_printf("Warning : probable deadlock?\n"); -} - void region_test1() { void* p = region_alloc(0x1000, REGION_T_HW, 0); dbg_printf("Allocated one-page region: 0x%p\n", p); @@ -102,6 +99,28 @@ void kmalloc_test(void* kernel_data_end) { dbg_printf("Kmalloc test OK.\n"); dbg_print_region_stats(); } + +void test_task(void* a) { + int i = 0; + while(1) { + dbg_printf("b"); + for (int x = 0; x < 100000; x++) asm volatile("xor %%ebx, %%ebx":::"%ebx"); + if (++i == 8) { + yield(); + i = 0; + } + } +} +void kernel_init_stage2(void* data) { + task_t *tb = new_task(test_task); + resume_with_result(tb, 0, false); + + while(1) { + dbg_printf("a"); + for (int x = 0; x < 100000; x++) asm volatile("xor %%ebx, %%ebx":::"%ebx"); + } + PANIC("Reached kmain end! Falling off the edge."); +} void kmain(struct multiboot_info_t *mbd, int32_t mb_magic) { dbglog_setup(); @@ -141,8 +160,11 @@ void kmain(struct multiboot_info_t *mbd, int32_t mb_magic) { kmalloc_setup(); kmalloc_test(kernel_data_end); - - PANIC("Reached kmain end! Falling off the edge."); + // enter multi-tasking mode + // interrupts are enabled at this moment, so all + // code run from now on should be preemtible (ie thread-safe) + tasking_setup(kernel_init_stage2, 0); + PANIC("Should never come here."); } /* vim: set ts=4 sw=4 tw=0 noet :*/ diff --git a/kernel/l0/loader.s b/kernel/l0/loader.s index 5d0a2b8..447d82d 100644 --- a/kernel/l0/loader.s +++ b/kernel/l0/loader.s @@ -2,6 +2,7 @@ [GLOBAL loader] ; making entry point visible to linker [GLOBAL kernel_pd] ; make kernel page directory visible [GLOBAL kernel_stack_protector] ; used to detect kernel stack overflow +[GLOBAL kernel_stack_top] ; stack re-used by scheduler ; higher-half kernel setup K_HIGHHALF_ADDR equ 0xC0000000 @@ -60,7 +61,7 @@ higherhalf: ; now we're running in higher half mov dword [kernel_pd], 0 invlpg [0] - mov esp, stack_top ; set up the stack + mov esp, kernel_stack_top ; set up the stack push eax ; pass Multiboot magic number add ebx, K_HIGHHALF_ADDR ; update the MB info structure so that it is in higher half @@ -78,8 +79,8 @@ hang: align 0x1000 kernel_stack_protector: resb 0x1000 ; as soon as we have efficient paging, we WON'T map this page -stack_bottom: +kernel_stack_bottom: resb LOADER_STACK_SIZE -stack_top: +kernel_stack_top: ; vim: set ts=4 sw=4 tw=0 noet : diff --git a/kernel/l0/region.c b/kernel/l0/region.c index 3691747..a384a4d 100644 --- a/kernel/l0/region.c +++ b/kernel/l0/region.c @@ -333,17 +333,6 @@ region_info_t *find_region(void* addr) { // HELPER FUNCTIONS : SIMPLE PF HANDLERS ; FREEING FUNCTIONS // // ========================================================= // -void stack_pf_handler(pagedir_t *pd, struct region_info *r, void* addr) { - if (addr < r->addr + PAGE_SIZE) { - dbg_printf("Stack overflow at 0x%p.", addr); - if (r->type & REGION_T_KPROC_STACK) dbg_printf(" (in kernel process stack)\n"); - if (r->type & REGION_T_PROC_KSTACK) dbg_printf(" (in process kernel stack)\n"); - dbg_print_region_stats(); - PANIC("Stack overflow."); - } - default_allocator_pf_handler(pd, r, addr); -} - void default_allocator_pf_handler(pagedir_t *pd, struct region_info *r, void* addr) { ASSERT(pd_get_frame(addr) == 0); // if error is of another type (RO, protected), we don't do anyting diff --git a/kernel/l0/task.c b/kernel/l0/task.c new file mode 100644 index 0000000..e02dcf6 --- /dev/null +++ b/kernel/l0/task.c @@ -0,0 +1,189 @@ +#include <task.h> +#include <kmalloc.h> +#include <dbglog.h> +#include <idt.h> + +#include <frame.h> +#include <paging.h> + +void save_context_and_enter_scheduler(saved_context_t *ctx); +void resume_context(saved_context_t *ctx); + +task_t *current_task = 0; + +// ====================== // +// THE PROGRAMMABLE TIMER // +// ====================== // + +void set_pit_frequency(uint32_t freq) { + uint32_t divisor = 1193180 / freq; + ASSERT(divisor < 65536); // must fit on 16 bits + + uint8_t l = (divisor & 0xFF); + uint8_t h = ((divisor >> 8) & 0xFF); + + outb(0x43, 0x36); + outb(0x40, l); + outb(0x40, h); +} + +// ================== // +// THE TASK SCHEDULER // +// ================== // + +static task_t *queue_first_task = 0, *queue_last_task = 0; + +void enqueue_task(task_t *t, bool just_ran) { + ASSERT(t->state == T_STATE_RUNNING); + if (queue_first_task == 0) { + queue_first_task = queue_last_task = t; + t->next_in_queue = 0; + } else if (just_ran) { + t->next_in_queue = 0; + queue_last_task->next_in_queue = t; + queue_last_task = t; + } else { + t->next_in_queue = queue_first_task; + queue_first_task = t; + } +} + +task_t* dequeue_task() { + task_t *t = queue_first_task; + if (t == 0) return 0; + + queue_first_task = t->next_in_queue; + if (queue_first_task == 0) queue_last_task = 0; + + return t; +} + +// ================ // +// THE TASKING CODE // +// ================ // + +void run_scheduler() { + // This function is expected NEVER TO RETURN + if (current_task != 0 && current_task->state == T_STATE_RUNNING) { + enqueue_task(current_task, true); + } + current_task = dequeue_task(); + if (current_task != 0) { + resume_context(¤t_task->ctx); + } else { + // Wait for an IRQ + asm volatile("sti; hlt"); + // At this point an IRQ has happenned + // and has been processed. Loop around. + run_scheduler(); + ASSERT(false); + } +} + +static void run_task(void (*entry)(void*)) { + ASSERT(current_task->state == T_STATE_RUNNING); + ASSERT(current_task->has_result); + + current_task->has_result = false; + + asm volatile("sti"); + entry(current_task->result); + + current_task->state = T_STATE_FINISHED; + // TODO : add job for deleting the task, or whatever + yield(); // expected never to return! + ASSERT(false); +} +task_t *new_task(entry_t entry) { + task_t *t = (task_t*)kmalloc(sizeof(task_t)); + if (t == 0) return 0; + + void* stack = region_alloc(KPROC_STACK_SIZE, REGION_T_KPROC_STACK, 0); + if (stack == 0) { + kfree(t); + return 0; + } + + for (void* i = stack + PAGE_SIZE; i < stack + KPROC_STACK_SIZE; i += PAGE_SIZE) { + uint32_t f = frame_alloc(1); + if (f == 0) { + region_free_unmap_free(stack); + kfree(t); + return 0; + } + pd_map_page(i, f, true); + } + + t->stack_region = find_region(stack); + + t->ctx.esp = (uint32_t*)(t->stack_region->addr + t->stack_region->size); + *(--t->ctx.esp) = (uint32_t)entry; // push first argument : entry point + *(--t->ctx.esp) = 0; // push invalid return address (the run_task function never returns) + + t->ctx.eip = (void(*)())run_task; + t->state = T_STATE_WAITING; + t->result = 0; + t->has_result = false; + + t->more_data = 0; + + return t; +} + +void tasking_setup(entry_t cont, void* arg) { + set_pit_frequency(100); + idt_set_irq_handler(IRQ0, yield); + + task_t *t = new_task(cont); + ASSERT(t != 0); + + resume_with_result(t, arg, false); + + run_scheduler(); // never returns + ASSERT(false); +} + +void yield() { + if (current_task == 0) { + // might happen before tasking is initialized + dbg_printf("Warning: probable deadlock."); + } else { + save_context_and_enter_scheduler(¤t_task->ctx); + } +} + +void* wait_for_result() { + uint32_t eflags; + asm volatile("pushf; pop %0" : "=r"(eflags)); + asm volatile("cli"); + + if (!current_task->has_result) { + current_task->state = T_STATE_WAITING; + save_context_and_enter_scheduler(¤t_task->ctx); + } + ASSERT(current_task->has_result); + current_task->has_result = false; + + if (eflags & EFLAGS_IF) asm volatile("sti"); + + return current_task->result; +} + +void resume_with_result(task_t *task, void* data, bool run_at_once) { + uint32_t eflags; + asm volatile("pushf; pop %0" : "=r"(eflags)); + asm volatile("cli"); + + task->has_result = true; + task->result = data; + + if (task->state == T_STATE_WAITING) { + task->state = T_STATE_RUNNING; + enqueue_task(task, false); + } + if (run_at_once) yield(); + + if (eflags & EFLAGS_IF) asm volatile("sti"); +} + +/* vim: set ts=4 sw=4 tw=0 noet :*/ |