aboutsummaryrefslogtreecommitdiff
path: root/kernel/l0
diff options
context:
space:
mode:
authorAlex Auvolat <alex.auvolat@ens.fr>2014-12-07 11:24:06 +0100
committerAlex Auvolat <alex.auvolat@ens.fr>2014-12-07 11:24:06 +0100
commit26b68c108664cb54089613bdbc54624ed66f7fda (patch)
treecafbab0b0c9c1db1869e4346cebc96b2d6f53570 /kernel/l0
parentacc786cb5805d057932ada3e7c571bb8e901cd67 (diff)
downloadmacroscope-26b68c108664cb54089613bdbc54624ed66f7fda.tar.gz
macroscope-26b68c108664cb54089613bdbc54624ed66f7fda.zip
Make L0 thread-safe (mostly).
Diffstat (limited to 'kernel/l0')
-rw-r--r--kernel/l0/idt.c10
-rw-r--r--kernel/l0/kmain.c2
-rw-r--r--kernel/l0/paging.c25
-rw-r--r--kernel/l0/region.c58
4 files changed, 72 insertions, 23 deletions
diff --git a/kernel/l0/idt.c b/kernel/l0/idt.c
index 76aa225..8562636 100644
--- a/kernel/l0/idt.c
+++ b/kernel/l0/idt.c
@@ -91,6 +91,11 @@ static isr_handler_t ex_handlers[32] = {0};
void idt_exHandler(registers_t *regs) {
if (ex_handlers[regs->int_no] != 0) {
ex_handlers[regs->int_no](regs);
+ } else {
+ //TODO: make sure all exceptions happenning in userspace do not cause kernel panic...
+ dbg_printf("Unhandled exception: %i\n", regs->int_no);
+ dbg_dump_registers(regs);
+ PANIC("Unhandled exception");
}
}
@@ -132,7 +137,8 @@ static const struct {
void (*fun)();
uint8_t type;
} gates[] = {
- // Processor exceptions are traps : handling them should be preemptible
+ // Most processor exceptions are traps and handling them
+ // should be preemptible
{ 0, isr0, GATE_TYPE_TRAP },
{ 1, isr1, GATE_TYPE_TRAP },
{ 2, isr2, GATE_TYPE_TRAP },
@@ -147,7 +153,7 @@ static const struct {
{ 11, isr11, GATE_TYPE_TRAP },
{ 12, isr12, GATE_TYPE_TRAP },
{ 13, isr13, GATE_TYPE_TRAP },
- { 14, isr14, GATE_TYPE_TRAP },
+ { 14, isr14, GATE_TYPE_INTERRUPT }, // reenables interrupts later on
{ 15, isr15, GATE_TYPE_TRAP },
{ 16, isr16, GATE_TYPE_TRAP },
{ 17, isr17, GATE_TYPE_TRAP },
diff --git a/kernel/l0/kmain.c b/kernel/l0/kmain.c
index 5d85fe5..b70c6f4 100644
--- a/kernel/l0/kmain.c
+++ b/kernel/l0/kmain.c
@@ -47,7 +47,7 @@ slab_type_t slab_sizes[] = {
void kmain(struct multiboot_info_t *mbd, int32_t mb_magic) {
dbglog_setup();
- dbg_printf("Hello, kernel World!\n");
+ dbg_printf("Hello, kernel world!\n");
dbg_printf("This is %s, version %s.\n", OS_NAME, OS_VERSION);
ASSERT(mb_magic == MULTIBOOT_BOOTLOADER_MAGIC);
diff --git a/kernel/l0/paging.c b/kernel/l0/paging.c
index 811b677..744424f 100644
--- a/kernel/l0/paging.c
+++ b/kernel/l0/paging.c
@@ -3,6 +3,7 @@
#include <idt.h>
#include <dbglog.h>
#include <region.h>
+#include <mutex.h>
#define PAGE_OF_ADDR(x) (((size_t)x >> PAGE_SHIFT) % N_PAGES_IN_PT)
#define PT_OF_ADDR(x) ((size_t)x >> (PAGE_SHIFT + PT_SHIFT))
@@ -28,7 +29,7 @@ struct page_directory {
// then we can use mirroring to edit it
// (the last 4M of the address space are mapped to the PD itself)
- // more info to be stored here, potentially
+ mutex_t mutex;
};
@@ -59,6 +60,7 @@ void page_fault_handler(registers_t *regs) {
invlpg(&current_pt[pt]);
return;
}
+ asm volatile("sti"); // from now on we are preemptible
if (vaddr >= (void*)&kernel_stack_protector && vaddr < (void*)&kernel_stack_protector + PAGE_SIZE) {
dbg_printf("Kernel stack overflow at 0x%p\n", vaddr);
@@ -85,6 +87,8 @@ void page_fault_handler(registers_t *regs) {
}
i->pf(current_pd_d, i, vaddr);
} else {
+ asm volatile("sti"); // userspace PF handlers should always be preemptible
+
dbg_printf("Userspace page fault at 0x%p\n", vaddr);
PANIC("Unhandled userspace page fault");
// not handled yet
@@ -100,13 +104,15 @@ void paging_setup(void* kernel_data_end) {
// setup kernel_pd_d structure
kernel_pd_d.phys_addr = (size_t)&kernel_pd - K_HIGHHALF_ADDR;
+ kernel_pd_d.mutex = MUTEX_UNLOCKED;
// setup kernel_pt0
ASSERT(PAGE_OF_ADDR(K_HIGHHALF_ADDR) == 0); // kernel is 4M-aligned
ASSERT(FIRST_KERNEL_PT == 768);
for (size_t i = 0; i < n_kernel_pages; i++) {
if ((i * PAGE_SIZE) + K_HIGHHALF_ADDR == (size_t)&kernel_stack_protector) {
- frame_free(i, 1); // don't map kernel stack protector page
+ kernel_pt0.page[i] = 0; // don't map kernel stack protector page
+ frame_free(i, 1);
} else {
kernel_pt0.page[i] = (i << PTE_FRAME_SHIFT) | PTE_PRESENT | PTE_RW | PTE_GLOBAL;
}
@@ -146,7 +152,6 @@ pagedir_t *get_kernel_pagedir() {
void switch_pagedir(pagedir_t *pd) {
asm volatile("movl %0, %%cr3":: "r"(pd->phys_addr));
- invlpg(current_pd);
current_pd_d = pd;
}
@@ -171,11 +176,16 @@ int pd_map_page(void* vaddr, uint32_t frame_id, bool rw) {
ASSERT((size_t)vaddr < PD_MIRROR_ADDR);
+ pagedir_t *pdd = ((size_t)vaddr >= K_HIGHHALF_ADDR ? &kernel_pd_d : current_pd_d);
pagetable_t *pd = ((size_t)vaddr >= K_HIGHHALF_ADDR ? &kernel_pd : current_pd);
+ mutex_lock(&pdd->mutex);
if (!pd->page[pt] & PTE_PRESENT) {
uint32_t new_pt_frame = frame_alloc(1);
- if (new_pt_frame == 0) return 1; // OOM
+ if (new_pt_frame == 0) {
+ mutex_unlock(&pdd->mutex);
+ return 1; // OOM
+ }
current_pd->page[pt] = pd->page[pt] =
(new_pt_frame << PTE_FRAME_SHIFT) | PTE_PRESENT | PTE_RW;
@@ -188,6 +198,7 @@ int pd_map_page(void* vaddr, uint32_t frame_id, bool rw) {
| (rw ? PTE_RW : 0);
invlpg(vaddr);
+ mutex_unlock(&pdd->mutex);
return 0;
}
@@ -196,6 +207,7 @@ void pd_unmap_page(void* vaddr) {
uint32_t page = PAGE_OF_ADDR(vaddr);
pagetable_t *pd = ((size_t)vaddr >= K_HIGHHALF_ADDR ? &kernel_pd : current_pd);
+ // no need to lock the PD's mutex
if (!pd->page[pt] & PTE_PRESENT) return;
if (!current_pt[pt].page[page] & PTE_PRESENT) return;
@@ -203,7 +215,10 @@ void pd_unmap_page(void* vaddr) {
current_pt[pt].page[page] = 0;
invlpg(vaddr);
- // TODO (?) : if pagetable is completely empty, free it
+ // If the page table is completely empty we might want to free
+ // it, but we would actually lose a lot of time checking if
+ // the PT is really empty (since we don't store the
+ // number of used pages in each PT), so it's probably not worth it
}
// Creation and deletion of page directories
diff --git a/kernel/l0/region.c b/kernel/l0/region.c
index 8d59b49..aa73a22 100644
--- a/kernel/l0/region.c
+++ b/kernel/l0/region.c
@@ -1,6 +1,7 @@
#include <region.h>
#include <dbglog.h>
#include <frame.h>
+#include <mutex.h>
typedef union region_descriptor {
struct {
@@ -28,6 +29,8 @@ uint32_t n_unused_descriptors;
static descriptor_t *first_free_region_by_addr, *first_free_region_by_size;
static descriptor_t *first_used_region;
+STATIC_MUTEX(ra_mutex); // region allocator mutex
+
// ========================================================= //
// HELPER FUNCTIONS FOR THE MANIPULATION OF THE REGION LISTS //
// ========================================================= //
@@ -218,6 +221,23 @@ void region_allocator_init(void* kernel_data_end) {
first_used_region = u0;
}
+static void region_free_inner(void* addr) {
+ descriptor_t *d = find_used_region(addr);
+ if (d == 0) return;
+
+ region_info_t i = d->used.i;
+
+ remove_used_region(d);
+ d->free.addr = i.addr;
+ d->free.size = i.size;
+ add_free_region(d);
+}
+void region_free(void* addr) {
+ mutex_lock(&ra_mutex);
+ region_free_inner(addr);
+ mutex_unlock(&ra_mutex);
+}
+
static void* region_alloc_inner(size_t size, uint32_t type, page_fault_handler_t pf, bool use_reserve) {
size = PAGE_ALIGN_UP(size);
@@ -263,9 +283,12 @@ static void* region_alloc_inner(size_t size, uint32_t type, page_fault_handler_t
}
void* region_alloc(size_t size, uint32_t type, page_fault_handler_t pf) {
+ void* result = 0;
+ mutex_lock(&ra_mutex);
+
if (n_unused_descriptors <= N_RESERVE_DESCRIPTORS) {
uint32_t frame = frame_alloc(1);
- if (frame == 0) return 0;
+ if (frame == 0) goto try_anyway;
void* descriptor_region = region_alloc_inner(PAGE_SIZE, REGION_T_DESCRIPTORS, 0, true);
ASSERT(descriptor_region != 0);
@@ -275,7 +298,8 @@ void* region_alloc(size_t size, uint32_t type, page_fault_handler_t pf) {
// this can happen if we weren't able to allocate a frame for
// a new pagetable
frame_free(frame, 1);
- return 0;
+ region_free_inner(descriptor_region);
+ goto try_anyway;
}
for (descriptor_t *d = (descriptor_t*)descriptor_region;
@@ -284,25 +308,25 @@ void* region_alloc(size_t size, uint32_t type, page_fault_handler_t pf) {
add_unused_descriptor(d);
}
}
- return region_alloc_inner(size, type, pf, false);
+ try_anyway:
+ // even if we don't have enough unused descriptors, we might find
+ // a free region that has exactly the right size and therefore
+ // does not require splitting, so we try the allocation in all cases
+ result = region_alloc_inner(size, type, pf, false);
+
+ mutex_unlock(&ra_mutex);
+ return result;
}
region_info_t *find_region(void* addr) {
- descriptor_t *d = find_used_region(addr);
- if (d == 0) return 0;
- return &d->used.i;
-}
+ region_info_t *r = 0;
+ mutex_lock(&ra_mutex);
-void region_free(void* addr) {
descriptor_t *d = find_used_region(addr);
- if (d == 0) return;
-
- region_info_t i = d->used.i;
+ if (d != 0) r = &d->used.i;
- remove_used_region(d);
- d->free.addr = i.addr;
- d->free.size = i.size;
- add_free_region(d);
+ mutex_unlock(&ra_mutex);
+ return r;
}
// ========================================================= //
@@ -357,6 +381,8 @@ void region_free_unmap(void* ptr) {
// =========================== //
void dbg_print_region_stats() {
+ mutex_lock(&ra_mutex);
+
dbg_printf("/ Free kernel regions, by address:\n");
for (descriptor_t *d = first_free_region_by_addr; d != 0; d = d->free.next_by_addr) {
dbg_printf("| 0x%p - 0x%p\n", d->free.addr, d->free.addr + d->free.size);
@@ -382,6 +408,8 @@ void dbg_print_region_stats() {
ASSERT(d != d->used.next_by_addr);
}
dbg_printf("\\\n");
+
+ mutex_unlock(&ra_mutex);
}
/* vim: set ts=4 sw=4 tw=0 noet :*/