aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorAlex Auvolat <alex.auvolat@ens.fr>2014-12-04 10:43:58 +0100
committerAlex Auvolat <alex.auvolat@ens.fr>2014-12-04 10:43:58 +0100
commit292e4141078d18993b1395820631503ac852eb3d (patch)
tree3b3e5cb75d9bf3abb2639deedc247eba2cdeabe6 /kernel
parentd78a3d8be9c194554580cb3c73c1c5ebd0d82a9b (diff)
downloadmacroscope-292e4141078d18993b1395820631503ac852eb3d.tar.gz
macroscope-292e4141078d18993b1395820631503ac852eb3d.zip
Make paging work ! \o/
Diffstat (limited to 'kernel')
-rw-r--r--kernel/config.h4
-rw-r--r--kernel/include/paging.h10
-rw-r--r--kernel/include/region.h5
-rw-r--r--kernel/include/sys.h11
-rw-r--r--kernel/l0/dbglog.c2
-rw-r--r--kernel/l0/frame.c10
-rw-r--r--kernel/l0/idt.c10
-rw-r--r--kernel/l0/kmain.c40
-rw-r--r--kernel/l0/loader.s8
-rw-r--r--kernel/l0/paging.c183
-rw-r--r--kernel/l0/region.c25
11 files changed, 191 insertions, 117 deletions
diff --git a/kernel/config.h b/kernel/config.h
index ad3dfb2..d198295 100644
--- a/kernel/config.h
+++ b/kernel/config.h
@@ -14,9 +14,7 @@
#endif
-extern char k_highhalf_addr, k_end_addr; // defined in linker script : 0xC0000000
-#define K_HIGHHALF_ADDR ((size_t)&k_highhalf_addr)
-#define K_END_ADDR ((size_t)&k_end_addr)
+#define K_HIGHHALF_ADDR ((size_t)0xC0000000)
#define OS_NAME "macrO.Scope"
#define OS_VERSION "0.0.1"
diff --git a/kernel/include/paging.h b/kernel/include/paging.h
index 7082e2e..2e1f844 100644
--- a/kernel/include/paging.h
+++ b/kernel/include/paging.h
@@ -13,13 +13,11 @@ pagedir_t *get_kernel_pagedir();
void switch_pagedir(pagedir_t *pd);
-// The three functions below DO NOT DEPEND on argument pd for
-// addresses above K_HIGHHALF_ADDR, so just pass 0 to map/unmap in kernel space.
-uint32_t pd_get_frame(pagedir_t *pd, size_t vaddr); // get physical frame for virtual address
-int pd_map_page(pagedir_t *pd,
- size_t vaddr, uint32_t frame_id,
+// these functions are always relative to the currently mapped page directory
+uint32_t pd_get_frame(size_t vaddr); // get physical frame for virtual address
+int pd_map_page(size_t vaddr, uint32_t frame_id,
uint32_t rw); // returns nonzero on error
-void pd_unmap_page(pagedir_t *pd, size_t vaddr); // does nothing if page wasn't mapped
+void pd_unmap_page(size_t vaddr); // does nothing if page not mapped
pagedir_t *create_pagedir(); // returns zero on error
void delete_pagedir(pagedir_t *pd);
diff --git a/kernel/include/region.h b/kernel/include/region.h
index 3c64081..1628370 100644
--- a/kernel/include/region.h
+++ b/kernel/include/region.h
@@ -29,9 +29,4 @@ size_t region_alloc(size_t size, uint32_t type, page_fault_handler_t pf); // ret
region_info_t *find_region(size_t addr);
void region_free(size_t addr);
-#define N_PAGES_IN_PT_REGION 4
-// special call for use by paging code
-// allocates a region of N_PAGES_IN_PT_REGION pages
-size_t region_alloc_for_pt();
-
void dbg_print_region_stats();
diff --git a/kernel/include/sys.h b/kernel/include/sys.h
index a9d2d4c..ff98c60 100644
--- a/kernel/include/sys.h
+++ b/kernel/include/sys.h
@@ -22,6 +22,10 @@ static inline uint16_t inw(uint16_t port) {
return ret;
}
+static inline void invlpg(size_t addr) {
+ asm volatile("invlpg (%0)" : : "r"(addr) : "memory");
+}
+
void panic(const char* message, const char* file, int line);
void panic_assert(const char* assertion, const char* file, int line);
#define PANIC(s) panic(s, __FILE__, __LINE__);
@@ -37,6 +41,13 @@ void panic_assert(const char* assertion, const char* file, int line);
#define PAGE_ALIGN_DOWN(x) (((size_t)x) & PAGE_MASK)
#define PAGE_ALIGN_UP(x) ((((size_t)x)&(~PAGE_MASK)) == 0 ? ((size_t)x) : (((size_t)x) & PAGE_MASK) + PAGE_SIZE)
#define PAGE_ID(x) (((size_t)x) / PAGE_SIZE)
+#define PAGE_SHIFT 12
+#define PT_SHIFT 10
+// PAGE_SHIFT + PT_SHIFT + PT_SHIFT = 32
+#define N_PAGES_IN_PT 1024
+#define PD_MIRROR_ADDR 0xFFC00000 // last 4MB used for PD mirroring
+#define LAST_KERNEL_ADDR PD_MIRROR_ADDR
+#define FIRST_KERNEL_PT (K_HIGHHALF_ADDR >> (PAGE_SHIFT+PT_SHIFT)) // must be 768
#define MASK4 0xFFFFFFFC
#define ALIGN4_UP(x) ((((size_t)x)&(~MASK4)) == 0 ? ((size_t)x) : (((size_t)x) & MASK4) + 4)
diff --git a/kernel/l0/dbglog.c b/kernel/l0/dbglog.c
index 377e6c8..5a7251b 100644
--- a/kernel/l0/dbglog.c
+++ b/kernel/l0/dbglog.c
@@ -38,7 +38,7 @@ static void vga_update_cursor() {
static void vga_init() {
vga_row = 0;
vga_column = 0;
- vga_buffer = (uint16_t*) (&k_highhalf_addr + 0xB8000);
+ vga_buffer = (uint16_t*) (K_HIGHHALF_ADDR + 0xB8000);
for (size_t y = 0; y < VGA_HEIGHT; y++) {
for (size_t x = 0; x < VGA_WIDTH; x++) {
diff --git a/kernel/l0/frame.c b/kernel/l0/frame.c
index 1f16eaf..c646a48 100644
--- a/kernel/l0/frame.c
+++ b/kernel/l0/frame.c
@@ -4,8 +4,8 @@
// TODO: buddy allocator
// this is a simple bitmap allocator
-#define INDEX_FROM_BIT(a) (a/(8*4))
-#define OFFSET_FROM_BIT(a) (a%(8*4))
+#define INDEX_FROM_BIT(a) ((a)/(8*4))
+#define OFFSET_FROM_BIT(a) ((a)%(8*4))
static uint32_t *frame_bitset;
static uint32_t nframes, nused_frames;
@@ -54,9 +54,9 @@ uint32_t frame_alloc(size_t n) {
}
void frame_free(uint32_t base, size_t n) {
- for (size_t x = 0; x < n; x++) {
- uint32_t idx = INDEX_FROM_BIT(base + n);
- uint32_t ofs = OFFSET_FROM_BIT(base + n);
+ for (size_t i = 0; i < n; i++) {
+ uint32_t idx = INDEX_FROM_BIT(base + i);
+ uint32_t ofs = OFFSET_FROM_BIT(base + i);
if (frame_bitset[idx] & (0x1 << ofs)) {
frame_bitset[idx] &= ~(0x1 << ofs);
nused_frames--;
diff --git a/kernel/l0/idt.c b/kernel/l0/idt.c
index 8a981af..3bfcfc5 100644
--- a/kernel/l0/idt.c
+++ b/kernel/l0/idt.c
@@ -66,11 +66,11 @@ static isr_handler_t ex_handlers[32] = {0};
/* Called in interrupt.s when an exception fires (interrupt 0 to 31) */
void idt_exHandler(registers_t *regs) {
- dbg_printf("/ Exception %i\n", regs->int_no);
- dbg_printf("| EAX: 0x%p EBX: 0x%p ECX: 0x%p EDX: 0x%p\n", regs->eax, regs->ebx, regs->ecx, regs->edx);
- dbg_printf("| EDI: 0x%p ESI: 0x%p ESP: 0x%p EBP: 0x%p\n", regs->edi, regs->esi, regs->esp, regs->ebp);
- dbg_printf("| EIP: 0x%p CS : 0x%p DS : 0x%p SS : 0x%p\n", regs->eip, regs->cs, regs->ds, regs->ss);
- dbg_printf("\\ EFl: 0x%p I# : 0x%p Err: 0x%p\n", regs->eflags, regs->int_no, regs->err_code);
+ /*dbg_printf("/ Exception %i\n", regs->int_no);*/
+ /*dbg_printf("| EAX: 0x%p EBX: 0x%p ECX: 0x%p EDX: 0x%p\n", regs->eax, regs->ebx, regs->ecx, regs->edx);*/
+ /*dbg_printf("| EDI: 0x%p ESI: 0x%p ESP: 0x%p EBP: 0x%p\n", regs->edi, regs->esi, regs->esp, regs->ebp);*/
+ /*dbg_printf("| EIP: 0x%p CS : 0x%p DS : 0x%p SS : 0x%p\n", regs->eip, regs->cs, regs->ds, regs->ss);*/
+ /*dbg_printf("\\ EFl: 0x%p I# : 0x%p Err: 0x%p\n", regs->eflags, regs->int_no, regs->err_code);*/
if (ex_handlers[regs->int_no] != 0) {
ex_handlers[regs->int_no](regs);
diff --git a/kernel/l0/kmain.c b/kernel/l0/kmain.c
index f07e243..a221b0e 100644
--- a/kernel/l0/kmain.c
+++ b/kernel/l0/kmain.c
@@ -13,6 +13,19 @@ void breakpoint_handler(registers_t *regs) {
dbg_printf("Breakpoint! (int3)\n");
BOCHS_BREAKPOINT;
}
+
+void test_pf_handler(pagedir_t *pd, region_info_t *i, size_t addr) {
+ dbg_printf("0x%p", addr);
+
+ uint32_t f = frame_alloc(1);
+ if (f == 0) PANIC("Out Of Memory");
+ dbg_printf(" -> %i", f);
+
+ int error = pd_map_page(addr, f, 1);
+ if (error) PANIC("Could not map frame (OOM)");
+}
+
+extern char k_end_addr; // defined in linker script : 0xC0000000
void kmain(struct multiboot_info_t *mbd, int32_t mb_magic) {
dbglog_setup();
@@ -35,7 +48,7 @@ void kmain(struct multiboot_info_t *mbd, int32_t mb_magic) {
// used for allocation of data structures before malloc is set up
// a pointer to this pointer is passed to the functions that might have
// to allocate memory ; they just increment it of the allocated quantity
- void* kernel_data_end = (void*)K_END_ADDR;
+ void* kernel_data_end = (void*)&k_end_addr;
frame_init_allocator(total_ram, &kernel_data_end);
dbg_printf("kernel_data_end: 0x%p\n", kernel_data_end);
@@ -72,6 +85,31 @@ void kmain(struct multiboot_info_t *mbd, int32_t mb_magic) {
dbg_printf("Freed region 0x%p\n", s);
dbg_print_region_stats();
+ // allocate a big region and try to write into it
+ const size_t n = 1000;
+ size_t p0 = region_alloc(n * PAGE_SIZE, REGION_T_HW, test_pf_handler);
+ for (size_t i = 0; i < n; i++) {
+ uint32_t *x = (uint32_t*)(p0 + i * PAGE_SIZE);
+ dbg_printf("[%i : ", i);
+ x[0] = 12;
+ dbg_printf(" : .");
+ x[1] = (i * 20422) % 122;
+ dbg_printf("]\n", i);
+ }
+ // unmap memory
+ for (size_t i = 0; i < n; i++) {
+ uint32_t *x = (uint32_t*)(p0 + i * PAGE_SIZE);
+ ASSERT(x[1] == (i * 20422) % 122);
+
+ size_t f = pd_get_frame((size_t)x);
+ ASSERT(f != 0);
+ pd_unmap_page((size_t)x);
+
+ frame_free(f, 1);
+ }
+ region_free(s);
+
+
// TODO:
// - setup allocator for physical pages (eg: buddy allocator, see OSDev wiki)
// - setup allocator for virtual memory space
diff --git a/kernel/l0/loader.s b/kernel/l0/loader.s
index e4c4611..6ad4ff0 100644
--- a/kernel/l0/loader.s
+++ b/kernel/l0/loader.s
@@ -1,6 +1,6 @@
[EXTERN kmain] ; kmain is defined in kmain.c
[GLOBAL loader] ; making entry point visible to linker
-[GLOBAL kernel_pagedir] ; make kernel page directory visible
+[GLOBAL kernel_pd] ; make kernel page directory visible
; higher-half kernel setup
K_HIGHHALF_ADDR equ 0xC0000000
@@ -25,7 +25,7 @@ multiboot_header:
loader:
; setup the boot page directory used for higher-half
- mov ecx, kernel_pagedir
+ mov ecx, kernel_pd
sub ecx, K_HIGHHALF_ADDR ; access its lower-half address
mov cr3, ecx
@@ -45,7 +45,7 @@ loader:
[section .data]
align 0x1000
-kernel_pagedir:
+kernel_pd:
; uses 4MB pages
; identity-maps the first 4Mb of RAM, and also maps them with offset += k_highhalf_addr
dd 0x00000083
@@ -56,7 +56,7 @@ kernel_pagedir:
[section .text]
higherhalf: ; now we're running in higher half
; unmap first 4Mb
- mov dword [kernel_pagedir], 0
+ mov dword [kernel_pd], 0
invlpg [0]
mov esp, stack_top ; set up the stack
diff --git a/kernel/l0/paging.c b/kernel/l0/paging.c
index f076aa1..74f3d09 100644
--- a/kernel/l0/paging.c
+++ b/kernel/l0/paging.c
@@ -2,83 +2,112 @@
#include <frame.h>
#include <idt.h>
#include <dbglog.h>
-
-typedef union page {
- struct {
- uint32_t present : 1;
- uint32_t rw : 1;
- uint32_t user : 1;
- uint32_t write_through : 1;
- uint32_t disable_cache : 1;
- uint32_t accessed : 1;
- uint32_t dirty : 1; // only PTE
- uint32_t size_4m : 1; // only PDE
- uint32_t global : 1; // only PTE
- uint32_t rsvd : 3;
- uint32_t frame : 20;
- };
- uint32_t as_int32;
-} page_t;
+#include <region.h>
+
+#define PAGE_OF_ADDR(x) (((size_t)x >> PAGE_SHIFT) % N_PAGES_IN_PT)
+#define PT_OF_ADDR(x) ((size_t)x >> (PAGE_SHIFT + PT_SHIFT))
+
+#define PTE_PRESENT (1<<0)
+#define PTE_RW (1<<1)
+#define PTE_USER (1<<2)
+#define PTE_WRITE_THROUGH (1<<3)
+#define PTE_DISABLE_CACHE (1<<4)
+#define PTE_ACCESSED (1<<5)
+#define PTE_DIRTY (1<<6) // only PTE
+#define PTE_SIZE_4M (1<<7) // only PDE
+#define PTE_GLOBAL (1<<8) // only PTE
+#define PTE_FRAME_SHIFT 12
typedef struct page_table {
- page_t page[1024];
+ uint32_t page[1024];
} pagetable_t;
struct page_directory {
- pagetable_t *pt[1024]; // virtual addresses of each page table
- pagetable_t *dir; // virtual address of page directory
size_t phys_addr; // physical address of page directory
+ // to modify a page directory, we first map it
+ // then we can use mirroring to edit it
+ // (the last 4M of the address space are mapped to the PD itself)
+
+ // more info to be stored here, potentially
};
// access kernel page directory page defined in loader.s
// (this is a correct higher-half address)
-extern pagetable_t kernel_pagedir;
+extern pagetable_t kernel_pd;
+
+// pre-allocate a page table so that we can map the first 4M of kernel memory
+static pagetable_t __attribute__((aligned(PAGE_SIZE))) kernel_pt0;
-static pagetable_t __attribute__((aligned(PAGE_SIZE))) kernel_pt768;
-static pagedir_t kernel_pd;
+static pagedir_t kernel_pd_d;
+static pagedir_t *current_pd_d;
-static pagedir_t *current_pd;
+#define current_pt ((pagetable_t*)PD_MIRROR_ADDR)
+#define current_pd ((pagetable_t*)(PD_MIRROR_ADDR + (N_PAGES_IN_PT-1)*PAGE_SIZE))
void page_fault_handler(registers_t *regs) {
- size_t addr;
- asm volatile("movl %%cr2, %0":"=r"(addr));
- dbg_printf("Page fault at 0x%p\n", addr);
- PANIC("PAGE FAULT");
- // not handled yet
+ size_t vaddr;
+ asm volatile("movl %%cr2, %0":"=r"(vaddr));
+
+ if (vaddr >= K_HIGHHALF_ADDR) {
+ uint32_t pt = PT_OF_ADDR(vaddr);
+
+ if (current_pd != &kernel_pd && vaddr >= K_HIGHHALF_ADDR
+ && current_pd->page[pt] != kernel_pd.page[pt])
+ {
+ current_pd->page[pt] = kernel_pd.page[pt];
+ invlpg((size_t)(&current_pt[pt]));
+ return;
+ }
+
+ region_info_t *i = find_region(vaddr);
+ if (i == 0) {
+ dbg_printf("Kernel pagefault in non-existing region at 0x%p\n", vaddr);
+ PANIC("Unhandled kernel space page fault");
+ }
+ if (i->pf == 0) {
+ dbg_printf("Kernel pagefault in region with no handler at 0x%p\n", vaddr);
+ PANIC("Unhandled kernel space page fault");
+ }
+ i->pf(current_pd_d, i, vaddr);
+ } else {
+ dbg_printf("Userspace page fault at 0x%p\n", vaddr);
+ PANIC("Unhandled userspace page fault");
+ // not handled yet
+ // TODO
+ }
}
void paging_setup(void* kernel_data_end) {
size_t n_kernel_pages =
PAGE_ALIGN_UP((size_t)kernel_data_end - K_HIGHHALF_ADDR)/PAGE_SIZE;
- ASSERT(n_kernel_pages <= 1024);
+ ASSERT(n_kernel_pages <= 1024); // we use less than 4M for kernel
- // setup kernel_pd structure
- kernel_pd.dir = &kernel_pagedir;
- kernel_pd.phys_addr = (size_t)kernel_pd.dir - K_HIGHHALF_ADDR;
- for (size_t i = 0; i < 1024; i++) kernel_pd.pt[i] = 0;
+ // setup kernel_pd_d structure
+ kernel_pd_d.phys_addr = (size_t)&kernel_pd - K_HIGHHALF_ADDR;
- // setup kernel_pt768
+ // setup kernel_pt0
+ ASSERT(PAGE_OF_ADDR(K_HIGHHALF_ADDR) == 0); // kernel is 4M-aligned
+ ASSERT(FIRST_KERNEL_PT == 768);
for (size_t i = 0; i < n_kernel_pages; i++) {
- kernel_pt768.page[i].as_int32 = 0; // clear any junk
- kernel_pt768.page[i].present = 1;
- kernel_pt768.page[i].user = 0;
- kernel_pt768.page[i].rw = 1;
- kernel_pt768.page[i].frame = i;
+ kernel_pt0.page[i] = (i << PTE_FRAME_SHIFT) | PTE_PRESENT | PTE_RW | PTE_GLOBAL;
}
for (size_t i = n_kernel_pages; i < 1024; i++){
- kernel_pt768.page[i].as_int32 = 0;
+ kernel_pt0.page[i] = 0;
}
- // replace 4M mapping by kernel_pt768
- kernel_pd.pt[768] = &kernel_pt768;
- kernel_pd.dir->page[768].as_int32 =
- (((size_t)&kernel_pt768 - K_HIGHHALF_ADDR) & PAGE_MASK) | 0x07;
+ // replace 4M mapping by kernel_pt0
+ kernel_pd.page[FIRST_KERNEL_PT] =
+ (((size_t)&kernel_pt0 - K_HIGHHALF_ADDR) & PAGE_MASK) | PTE_PRESENT | PTE_RW;
+ // set up mirroring
+ kernel_pd.page[N_PAGES_IN_PT-1] =
+ (((size_t)&kernel_pd - K_HIGHHALF_ADDR) & PAGE_MASK) | PTE_PRESENT | PTE_RW;
- current_pd = &kernel_pd;
+ invlpg(K_HIGHHALF_ADDR);
// paging already enabled in loader, nothing to do.
+ switch_pagedir(&kernel_pd_d);
// disable 4M pages (remove PSE bit in CR4)
uint32_t cr4;
@@ -90,45 +119,67 @@ void paging_setup(void* kernel_data_end) {
}
pagedir_t *get_current_pagedir() {
- return current_pd;
+ return current_pd_d;
}
pagedir_t *get_kernel_pagedir() {
- return &kernel_pd;
+ return &kernel_pd_d;
}
void switch_pagedir(pagedir_t *pd) {
asm volatile("movl %0, %%cr3":: "r"(pd->phys_addr));
+ invlpg((size_t)current_pd);
+ current_pd_d = pd;
}
// ============================== //
// Mapping and unmapping of pages //
// ============================== //
-uint32_t pd_get_frame(pagedir_t *pd, size_t vaddr) {
- uint32_t page = vaddr / PAGE_SIZE;
- uint32_t pt = page / PAGE_SIZE;
- uint32_t pt_page = page % PAGE_SIZE;
+uint32_t pd_get_frame(size_t vaddr) {
+ uint32_t pt = PT_OF_ADDR(vaddr);
+ uint32_t page = PAGE_OF_ADDR(vaddr);
+
+ pagetable_t *pd = (vaddr >= K_HIGHHALF_ADDR ? &kernel_pd : current_pd);
- if (pd == 0) return 0;
- if (pd->pt[pt] == 0) return 0;
- if (!pd->pt[pt]->page[pt_page].present) return 0;
- return pd->pt[pt]->page[pt_page].frame;
+ if (!pd->page[pt] & PTE_PRESENT) return 0;
+ if (!current_pt[pt].page[page] & PTE_PRESENT) return 0;
+ return current_pt[pt].page[page] >> PTE_FRAME_SHIFT;
}
-int pd_map_page(pagedir_t *pd, size_t vaddr, uint32_t frame_id, uint32_t rw) {
- return 1; // TODO
+int pd_map_page(size_t vaddr, uint32_t frame_id, uint32_t rw) {
+ uint32_t pt = PT_OF_ADDR(vaddr);
+ uint32_t page = PAGE_OF_ADDR(vaddr);
+
+ pagetable_t *pd = (vaddr >= K_HIGHHALF_ADDR ? &kernel_pd : current_pd);
+
+ if (!pd->page[pt] & PTE_PRESENT) {
+ size_t new_pt_frame = frame_alloc(1);
+ if (new_pt_frame == 0) return 1; // OOM
+
+ current_pd->page[pt] = pd->page[pt] =
+ (new_pt_frame << PTE_FRAME_SHIFT) | PTE_PRESENT | PTE_RW;
+ invlpg((size_t)&current_pt[pt]);
+ }
+
+ current_pt[pt].page[page] =
+ frame_id << PTE_FRAME_SHIFT
+ | PTE_PRESENT
+ | (vaddr < K_HIGHHALF_ADDR ? PTE_USER : PTE_GLOBAL)
+ | (rw ? PTE_RW : 0);
+
+ return 0;
}
-void pd_unmap_page(pagedir_t *pd, size_t vaddr) {
- uint32_t page = vaddr / PAGE_SIZE;
- uint32_t pt = page / PAGE_SIZE;
- uint32_t pt_page = page % PAGE_SIZE;
+void pd_unmap_page(size_t vaddr) {
+ uint32_t pt = PT_OF_ADDR(vaddr);
+ uint32_t page = PAGE_OF_ADDR(vaddr);
+
+ pagetable_t *pd = (vaddr >= K_HIGHHALF_ADDR ? &kernel_pd : current_pd);
- if (pd == 0) return;
- if (pd->pt[pt] == 0) return;
- if (!pd->pt[pt]->page[pt_page].present) return;
- pd->pt[pt]->page[pt_page].as_int32 = 0;
+ if (!pd->page[pt] & PTE_PRESENT) return;
+ if (!current_pt[pt].page[page] & PTE_PRESENT) return;
+ current_pt[pt].page[page] &= ~PTE_PRESENT;
// TODO (?) : if pagetable is completely empty, free it
}
diff --git a/kernel/l0/region.c b/kernel/l0/region.c
index 63e464d..6001346 100644
--- a/kernel/l0/region.c
+++ b/kernel/l0/region.c
@@ -17,7 +17,7 @@ typedef union region_descriptor {
} used;
} descriptor_t;
-#define N_RESERVE_DESCRIPTORS 3 // always keep at least 3 unused descriptors
+#define N_RESERVE_DESCRIPTORS 2 // always keep at least 3 unused descriptors
#define N_BASE_DESCRIPTORS 12 // pre-allocate memory for 12 descriptors
static descriptor_t base_descriptors[N_BASE_DESCRIPTORS];
@@ -210,7 +210,7 @@ void region_allocator_init(void* kernel_data_end) {
descriptor_t *f0 = &base_descriptors[1];
f0->free.addr = PAGE_ALIGN_UP(kernel_data_end);
- f0->free.size = (0xFFFFF000-f0->free.addr); // last page cannot be used...
+ f0->free.size = (LAST_KERNEL_ADDR-f0->free.addr);
f0->free.next_by_size = 0;
f0->free.first_bigger = 0;
first_free_region_by_size = first_free_region_by_addr = f0;
@@ -234,8 +234,7 @@ static size_t region_alloc_inner(size_t size, uint32_t type, page_fault_handler_
return 0;
}
- // this assert is a bit tricky to prove,
- // but basically it means that the allocation function
+ // this assert basically means that the allocation function
// is called less than N_RESERVE_DESCRIPTORS times with
// the use_reserve flag before more descriptors
// are allocated.
@@ -267,20 +266,6 @@ static size_t region_alloc_inner(size_t size, uint32_t type, page_fault_handler_
return 0; //No big enough block found
}
-bool region_alloc_for_pt_use_reserve = false;
-size_t region_alloc_for_pt() {
- if (region_alloc_for_pt_use_reserve) {
- return region_alloc_inner(
- N_PAGES_IN_PT_REGION * PAGE_SIZE,
- REGION_T_PAGETABLE,
- 0, true);
- } else {
- return region_alloc(
- N_PAGES_IN_PT_REGION * PAGE_SIZE,
- REGION_T_PAGETABLE, 0);
- }
-}
-
size_t region_alloc(size_t size, uint32_t type, page_fault_handler_t pf) {
if (n_unused_descriptors <= N_RESERVE_DESCRIPTORS) {
uint32_t frame = frame_alloc(1);
@@ -289,9 +274,7 @@ size_t region_alloc(size_t size, uint32_t type, page_fault_handler_t pf) {
size_t descriptor_region = region_alloc_inner(PAGE_SIZE, REGION_T_DESCRIPTORS, 0, true);
ASSERT(descriptor_region != 0);
- region_alloc_for_pt_use_reserve = true;
- int error = pd_map_page(0, descriptor_region, frame, 1);
- region_alloc_for_pt_use_reserve = false;
+ int error = pd_map_page(descriptor_region, frame, 1);
if (error) {
// this can happen if we weren't able to allocate a frame for
// a new pagetable