aboutsummaryrefslogtreecommitdiff
path: root/src/kernel/core
diff options
context:
space:
mode:
Diffstat (limited to 'src/kernel/core')
-rw-r--r--src/kernel/core/kmalloc.c2
-rw-r--r--src/kernel/core/paging.c33
-rw-r--r--src/kernel/core/region.c10
-rw-r--r--src/kernel/core/thread.c2
4 files changed, 22 insertions, 25 deletions
diff --git a/src/kernel/core/kmalloc.c b/src/kernel/core/kmalloc.c
index 85e6173..c977e54 100644
--- a/src/kernel/core/kmalloc.c
+++ b/src/kernel/core/kmalloc.c
@@ -9,7 +9,7 @@
#include <freemem.h>
static void* page_alloc_fun_for_kmalloc(size_t bytes) {
- void* addr = region_alloc(bytes, "Core kernel heap", 0);
+ void* addr = region_alloc(bytes, "Core kernel heap");
if (addr == 0) return 0;
// Map physical memory
diff --git a/src/kernel/core/paging.c b/src/kernel/core/paging.c
index f2289bd..eec21f6 100644
--- a/src/kernel/core/paging.c
+++ b/src/kernel/core/paging.c
@@ -8,6 +8,8 @@
#include <malloc.h>
#include <freemem.h>
+#include <string.h>
+
#define PAGE_OF_ADDR(x) (((size_t)(x) >> PAGE_SHIFT) % N_PAGES_IN_PT)
#define PT_OF_ADDR(x) ((size_t)(x) >> (PAGE_SHIFT + PT_SHIFT))
@@ -103,18 +105,13 @@ void page_fault_handler(registers_t *regs) {
}
region_info_t *i = find_region(vaddr);
- if (i == 0) {
- dbg_printf("Kernel pagefault in non-existing region at 0x%p\n", vaddr);
- dbg_dump_registers(regs);
- PANIC("Unhandled kernel space page fault");
- }
- if (i->pf == 0) {
- dbg_printf("Kernel pagefault in region with no handler at 0x%p (%s region)\n", vaddr, i->type);
- dbg_dump_registers(regs);
- dbg_print_region_info();
- PANIC("Unhandled kernel space page fault");
- }
- i->pf(get_current_pagedir(), i, vaddr);
+
+ char* region = (i == 0 ? "non-exting" : i->type);
+
+ dbg_printf("Kernel pagefault in region with no handler at 0x%p (%s region)\n", vaddr, region);
+ dbg_dump_registers(regs);
+ dbg_print_region_info();
+ PANIC("Unhandled kernel space page fault");
}
}
}
@@ -203,6 +200,7 @@ bool pd_map_page(void* vaddr, uint32_t frame_id, bool rw) {
const uint32_t page = PAGE_OF_ADDR(vaddr);
ASSERT((size_t)vaddr < PD_MIRROR_ADDR);
+ ASSERT(frame_id != 0);
bool on_kernel_pd = (size_t)vaddr >= K_HIGHHALF_ADDR || current_thread == 0;
@@ -212,7 +210,6 @@ bool pd_map_page(void* vaddr, uint32_t frame_id, bool rw) {
mutex_lock(&pdd->mutex);
if (!(pd->page[pt] & PTE_PRESENT)) {
-
uint32_t new_pt_frame;
int tries = 0;
while ((new_pt_frame = frame_alloc(1)) == 0 && (tries++) < 3) {
@@ -226,13 +223,15 @@ bool pd_map_page(void* vaddr, uint32_t frame_id, bool rw) {
}
current_pd->page[pt] = pd->page[pt] =
- (new_pt_frame << PTE_FRAME_SHIFT) | PTE_PRESENT | PTE_RW
+ (new_pt_frame << PTE_FRAME_SHIFT)
+ | PTE_PRESENT | PTE_RW
| ((size_t)vaddr < K_HIGHHALF_ADDR ? PTE_USER : 0);
+
invlpg(&current_pt[pt]);
+ memset(&current_pt[pt], 0, PAGE_SIZE);
}
current_pt[pt].page[page] =
- (frame_id << PTE_FRAME_SHIFT)
- | PTE_PRESENT
+ (frame_id << PTE_FRAME_SHIFT) | PTE_PRESENT
| ((size_t)vaddr < K_HIGHHALF_ADDR ? PTE_USER : PTE_GLOBAL)
| (rw ? PTE_RW : 0);
invlpg(vaddr);
@@ -273,7 +272,7 @@ pagedir_t *create_pagedir(user_pf_handler_t pf, void* pfd) {
pd = (pagedir_t*)malloc(sizeof(pagedir_t));
if (pd == 0) goto error;
- temp = region_alloc(PAGE_SIZE, "Temporary pagedir mapping", 0);
+ temp = region_alloc(PAGE_SIZE, "Temporary pagedir mapping");
if (temp == 0) goto error;
bool map_ok = pd_map_page(temp, pd_phys, true);
diff --git a/src/kernel/core/region.c b/src/kernel/core/region.c
index d22a98f..1f1958f 100644
--- a/src/kernel/core/region.c
+++ b/src/kernel/core/region.c
@@ -216,7 +216,6 @@ void region_allocator_init(void* kernel_data_end) {
u0->used.i.addr = (void*)K_HIGHHALF_ADDR;
u0->used.i.size = PAGE_ALIGN_UP(kernel_data_end) - K_HIGHHALF_ADDR;
u0->used.i.type = "Kernel code & data";
- u0->used.i.pf = 0;
u0->used.next_by_addr = 0;
first_used_region = u0;
}
@@ -238,7 +237,7 @@ void region_free(void* addr) {
mutex_unlock(&ra_mutex);
}
-static void* region_alloc_inner(size_t size, char* type, kernel_pf_handler_t pf, bool use_reserve) {
+static void* region_alloc_inner(size_t size, char* type, bool use_reserve) {
size = PAGE_ALIGN_UP(size);
for (descriptor_t *i = first_free_region_by_size; i != 0; i = i->free.first_bigger) {
@@ -273,7 +272,6 @@ static void* region_alloc_inner(size_t size, char* type, kernel_pf_handler_t pf,
i->used.i.addr = addr;
i->used.i.size = size;
i->used.i.type = type;
- i->used.i.pf = pf;
add_used_region(i);
return addr;
@@ -282,7 +280,7 @@ static void* region_alloc_inner(size_t size, char* type, kernel_pf_handler_t pf,
return 0; //No big enough block found
}
-void* region_alloc(size_t size, char* type, kernel_pf_handler_t pf) {
+void* region_alloc(size_t size, char* type) {
void* result = 0;
mutex_lock(&ra_mutex);
@@ -290,7 +288,7 @@ void* region_alloc(size_t size, char* type, kernel_pf_handler_t pf) {
uint32_t frame = frame_alloc(1);
if (frame == 0) goto try_anyway;
- void* descriptor_region = region_alloc_inner(PAGE_SIZE, "Region descriptors", 0, true);
+ void* descriptor_region = region_alloc_inner(PAGE_SIZE, "Region descriptors", true);
ASSERT(descriptor_region != 0);
bool map_ok = pd_map_page(descriptor_region, frame, 1);
@@ -312,7 +310,7 @@ void* region_alloc(size_t size, char* type, kernel_pf_handler_t pf) {
// even if we don't have enough unused descriptors, we might find
// a free region that has exactly the right size and therefore
// does not require splitting, so we try the allocation in all cases
- result = region_alloc_inner(size, type, pf, false);
+ result = region_alloc_inner(size, type, false);
mutex_unlock(&ra_mutex);
return result;
diff --git a/src/kernel/core/thread.c b/src/kernel/core/thread.c
index 74e5a30..1689433 100644
--- a/src/kernel/core/thread.c
+++ b/src/kernel/core/thread.c
@@ -157,7 +157,7 @@ thread_t *new_thread(entry_t entry, void* data) {
thread_t *t = (thread_t*)malloc(sizeof(thread_t));
if (t == 0) return 0;
- void* stack = region_alloc(KPROC_STACK_SIZE + PAGE_SIZE, "Stack", 0);
+ void* stack = region_alloc(KPROC_STACK_SIZE + PAGE_SIZE, "Stack");
if (stack == 0) {
free(t);
return 0;