diff options
author | Alex Auvolat <alex.auvolat@ens.fr> | 2015-02-09 18:04:59 +0100 |
---|---|---|
committer | Alex Auvolat <alex.auvolat@ens.fr> | 2015-02-09 18:04:59 +0100 |
commit | caf842864bdc0794e387f9580af96ab1036996f4 (patch) | |
tree | 92b1e05e5ed79628132e320dcaa84a25eebd0dd7 /src | |
parent | 3b03f1e41558131ca2f83a460f4d2e289cbedac0 (diff) | |
download | kogata-caf842864bdc0794e387f9580af96ab1036996f4.tar.gz kogata-caf842864bdc0794e387f9580af96ab1036996f4.zip |
Change semantics of pd_map_page to return true on success, false on error.
Diffstat (limited to 'src')
-rw-r--r-- | src/kernel/core/paging.c | 10 | ||||
-rw-r--r-- | src/kernel/core/region.c | 8 | ||||
-rw-r--r-- | src/kernel/core/thread.c | 5 | ||||
-rw-r--r-- | src/kernel/include/paging.h | 2 |
4 files changed, 14 insertions, 11 deletions
diff --git a/src/kernel/core/paging.c b/src/kernel/core/paging.c index e60ca53..25113ca 100644 --- a/src/kernel/core/paging.c +++ b/src/kernel/core/paging.c @@ -171,7 +171,7 @@ uint32_t pd_get_frame(void* vaddr) { return current_pt[pt].page[page] >> PTE_FRAME_SHIFT; } -int pd_map_page(void* vaddr, uint32_t frame_id, bool rw) { +bool pd_map_page(void* vaddr, uint32_t frame_id, bool rw) { uint32_t pt = PT_OF_ADDR(vaddr); uint32_t page = PAGE_OF_ADDR(vaddr); @@ -186,7 +186,7 @@ int pd_map_page(void* vaddr, uint32_t frame_id, bool rw) { uint32_t new_pt_frame = frame_alloc(1); if (new_pt_frame == 0) { mutex_unlock(&pdd->mutex); - return 1; // OOM + return false; // OOM } current_pd->page[pt] = pd->page[pt] = @@ -201,7 +201,7 @@ int pd_map_page(void* vaddr, uint32_t frame_id, bool rw) { invlpg(vaddr); mutex_unlock(&pdd->mutex); - return 0; + return true; } void pd_unmap_page(void* vaddr) { @@ -239,8 +239,8 @@ pagedir_t *create_pagedir() { temp = region_alloc(PAGE_SIZE, 0, 0); if (temp == 0) goto error; - int error = pd_map_page(temp, pd_phys, true); - if (error) goto error; + bool map_ok = pd_map_page(temp, pd_phys, true); + if (!map_ok) goto error; pd->phys_addr = pd_phys * PAGE_SIZE; pd->mutex = MUTEX_UNLOCKED; diff --git a/src/kernel/core/region.c b/src/kernel/core/region.c index 3127048..c14f129 100644 --- a/src/kernel/core/region.c +++ b/src/kernel/core/region.c @@ -293,8 +293,8 @@ void* region_alloc(size_t size, char* type, page_fault_handler_t pf) { void* descriptor_region = region_alloc_inner(PAGE_SIZE, "Region descriptors", 0, true); ASSERT(descriptor_region != 0); - int error = pd_map_page(descriptor_region, frame, 1); - if (error) { + bool map_ok = pd_map_page(descriptor_region, frame, 1); + if (!map_ok) { // this can happen if we weren't able to allocate a frame for // a new pagetable frame_free(frame, 1); @@ -339,8 +339,8 @@ void default_allocator_pf_handler(pagedir_t *pd, struct region_info *r, void* ad uint32_t f = frame_alloc(1); if (f == 0) PANIC("Out Of Memory"); - int error = pd_map_page(addr, f, 1); - if (error) PANIC("Could not map frame (OOM)"); + bool map_ok = pd_map_page(addr, f, 1); + if (!map_ok) PANIC("Could not map frame (OOM)"); } void region_free_unmap_free(void* ptr) { diff --git a/src/kernel/core/thread.c b/src/kernel/core/thread.c index 7f0bb5b..3f25add 100644 --- a/src/kernel/core/thread.c +++ b/src/kernel/core/thread.c @@ -129,7 +129,10 @@ thread_t *new_thread(entry_t entry, void* data) { free(t); return 0; } - pd_map_page(i, f, true); + bool map_ok = pd_map_page(i, f, true); + if (!map_ok) { + PANIC("TODO"); + } } t->stack_region = find_region(stack); diff --git a/src/kernel/include/paging.h b/src/kernel/include/paging.h index 44014a2..d42ec52 100644 --- a/src/kernel/include/paging.h +++ b/src/kernel/include/paging.h @@ -16,7 +16,7 @@ void switch_pagedir(pagedir_t *pd); // these functions are always relative to the currently mapped page directory uint32_t pd_get_frame(void* vaddr); // get physical frame for virtual address -int pd_map_page(void* vaddr, uint32_t frame_id, bool rw); // returns nonzero on error +bool pd_map_page(void* vaddr, uint32_t frame_id, bool rw); // returns true on success, false on failure void pd_unmap_page(void* vaddr); // does nothing if page not mapped // Note on concurrency : we expect that multiple threads will not try to map/unmap |