aboutsummaryrefslogtreecommitdiff
path: root/kernel/l0/paging.c
diff options
context:
space:
mode:
authorAlex Auvolat <alex.auvolat@ens.fr>2014-12-04 10:43:58 +0100
committerAlex Auvolat <alex.auvolat@ens.fr>2014-12-04 10:43:58 +0100
commit292e4141078d18993b1395820631503ac852eb3d (patch)
tree3b3e5cb75d9bf3abb2639deedc247eba2cdeabe6 /kernel/l0/paging.c
parentd78a3d8be9c194554580cb3c73c1c5ebd0d82a9b (diff)
downloadmacroscope-292e4141078d18993b1395820631503ac852eb3d.tar.gz
macroscope-292e4141078d18993b1395820631503ac852eb3d.zip
Make paging work ! \o/
Diffstat (limited to 'kernel/l0/paging.c')
-rw-r--r--kernel/l0/paging.c183
1 files changed, 117 insertions, 66 deletions
diff --git a/kernel/l0/paging.c b/kernel/l0/paging.c
index f076aa1..74f3d09 100644
--- a/kernel/l0/paging.c
+++ b/kernel/l0/paging.c
@@ -2,83 +2,112 @@
#include <frame.h>
#include <idt.h>
#include <dbglog.h>
-
-typedef union page {
- struct {
- uint32_t present : 1;
- uint32_t rw : 1;
- uint32_t user : 1;
- uint32_t write_through : 1;
- uint32_t disable_cache : 1;
- uint32_t accessed : 1;
- uint32_t dirty : 1; // only PTE
- uint32_t size_4m : 1; // only PDE
- uint32_t global : 1; // only PTE
- uint32_t rsvd : 3;
- uint32_t frame : 20;
- };
- uint32_t as_int32;
-} page_t;
+#include <region.h>
+
+#define PAGE_OF_ADDR(x) (((size_t)x >> PAGE_SHIFT) % N_PAGES_IN_PT)
+#define PT_OF_ADDR(x) ((size_t)x >> (PAGE_SHIFT + PT_SHIFT))
+
+#define PTE_PRESENT (1<<0)
+#define PTE_RW (1<<1)
+#define PTE_USER (1<<2)
+#define PTE_WRITE_THROUGH (1<<3)
+#define PTE_DISABLE_CACHE (1<<4)
+#define PTE_ACCESSED (1<<5)
+#define PTE_DIRTY (1<<6) // only PTE
+#define PTE_SIZE_4M (1<<7) // only PDE
+#define PTE_GLOBAL (1<<8) // only PTE
+#define PTE_FRAME_SHIFT 12
typedef struct page_table {
- page_t page[1024];
+ uint32_t page[1024];
} pagetable_t;
struct page_directory {
- pagetable_t *pt[1024]; // virtual addresses of each page table
- pagetable_t *dir; // virtual address of page directory
size_t phys_addr; // physical address of page directory
+ // to modify a page directory, we first map it
+ // then we can use mirroring to edit it
+ // (the last 4M of the address space are mapped to the PD itself)
+
+ // more info to be stored here, potentially
};
// access kernel page directory page defined in loader.s
// (this is a correct higher-half address)
-extern pagetable_t kernel_pagedir;
+extern pagetable_t kernel_pd;
+
+// pre-allocate a page table so that we can map the first 4M of kernel memory
+static pagetable_t __attribute__((aligned(PAGE_SIZE))) kernel_pt0;
-static pagetable_t __attribute__((aligned(PAGE_SIZE))) kernel_pt768;
-static pagedir_t kernel_pd;
+static pagedir_t kernel_pd_d;
+static pagedir_t *current_pd_d;
-static pagedir_t *current_pd;
+#define current_pt ((pagetable_t*)PD_MIRROR_ADDR)
+#define current_pd ((pagetable_t*)(PD_MIRROR_ADDR + (N_PAGES_IN_PT-1)*PAGE_SIZE))
void page_fault_handler(registers_t *regs) {
- size_t addr;
- asm volatile("movl %%cr2, %0":"=r"(addr));
- dbg_printf("Page fault at 0x%p\n", addr);
- PANIC("PAGE FAULT");
- // not handled yet
+ size_t vaddr;
+ asm volatile("movl %%cr2, %0":"=r"(vaddr));
+
+ if (vaddr >= K_HIGHHALF_ADDR) {
+ uint32_t pt = PT_OF_ADDR(vaddr);
+
+ if (current_pd != &kernel_pd && vaddr >= K_HIGHHALF_ADDR
+ && current_pd->page[pt] != kernel_pd.page[pt])
+ {
+ current_pd->page[pt] = kernel_pd.page[pt];
+ invlpg((size_t)(&current_pt[pt]));
+ return;
+ }
+
+ region_info_t *i = find_region(vaddr);
+ if (i == 0) {
+ dbg_printf("Kernel pagefault in non-existing region at 0x%p\n", vaddr);
+ PANIC("Unhandled kernel space page fault");
+ }
+ if (i->pf == 0) {
+ dbg_printf("Kernel pagefault in region with no handler at 0x%p\n", vaddr);
+ PANIC("Unhandled kernel space page fault");
+ }
+ i->pf(current_pd_d, i, vaddr);
+ } else {
+ dbg_printf("Userspace page fault at 0x%p\n", vaddr);
+ PANIC("Unhandled userspace page fault");
+ // not handled yet
+ // TODO
+ }
}
void paging_setup(void* kernel_data_end) {
size_t n_kernel_pages =
PAGE_ALIGN_UP((size_t)kernel_data_end - K_HIGHHALF_ADDR)/PAGE_SIZE;
- ASSERT(n_kernel_pages <= 1024);
+ ASSERT(n_kernel_pages <= 1024); // we use less than 4M for kernel
- // setup kernel_pd structure
- kernel_pd.dir = &kernel_pagedir;
- kernel_pd.phys_addr = (size_t)kernel_pd.dir - K_HIGHHALF_ADDR;
- for (size_t i = 0; i < 1024; i++) kernel_pd.pt[i] = 0;
+ // setup kernel_pd_d structure
+ kernel_pd_d.phys_addr = (size_t)&kernel_pd - K_HIGHHALF_ADDR;
- // setup kernel_pt768
+ // setup kernel_pt0
+ ASSERT(PAGE_OF_ADDR(K_HIGHHALF_ADDR) == 0); // kernel is 4M-aligned
+ ASSERT(FIRST_KERNEL_PT == 768);
for (size_t i = 0; i < n_kernel_pages; i++) {
- kernel_pt768.page[i].as_int32 = 0; // clear any junk
- kernel_pt768.page[i].present = 1;
- kernel_pt768.page[i].user = 0;
- kernel_pt768.page[i].rw = 1;
- kernel_pt768.page[i].frame = i;
+ kernel_pt0.page[i] = (i << PTE_FRAME_SHIFT) | PTE_PRESENT | PTE_RW | PTE_GLOBAL;
}
for (size_t i = n_kernel_pages; i < 1024; i++){
- kernel_pt768.page[i].as_int32 = 0;
+ kernel_pt0.page[i] = 0;
}
- // replace 4M mapping by kernel_pt768
- kernel_pd.pt[768] = &kernel_pt768;
- kernel_pd.dir->page[768].as_int32 =
- (((size_t)&kernel_pt768 - K_HIGHHALF_ADDR) & PAGE_MASK) | 0x07;
+ // replace 4M mapping by kernel_pt0
+ kernel_pd.page[FIRST_KERNEL_PT] =
+ (((size_t)&kernel_pt0 - K_HIGHHALF_ADDR) & PAGE_MASK) | PTE_PRESENT | PTE_RW;
+ // set up mirroring
+ kernel_pd.page[N_PAGES_IN_PT-1] =
+ (((size_t)&kernel_pd - K_HIGHHALF_ADDR) & PAGE_MASK) | PTE_PRESENT | PTE_RW;
- current_pd = &kernel_pd;
+ invlpg(K_HIGHHALF_ADDR);
// paging already enabled in loader, nothing to do.
+ switch_pagedir(&kernel_pd_d);
// disable 4M pages (remove PSE bit in CR4)
uint32_t cr4;
@@ -90,45 +119,67 @@ void paging_setup(void* kernel_data_end) {
}
pagedir_t *get_current_pagedir() {
- return current_pd;
+ return current_pd_d;
}
pagedir_t *get_kernel_pagedir() {
- return &kernel_pd;
+ return &kernel_pd_d;
}
void switch_pagedir(pagedir_t *pd) {
asm volatile("movl %0, %%cr3":: "r"(pd->phys_addr));
+ invlpg((size_t)current_pd);
+ current_pd_d = pd;
}
// ============================== //
// Mapping and unmapping of pages //
// ============================== //
-uint32_t pd_get_frame(pagedir_t *pd, size_t vaddr) {
- uint32_t page = vaddr / PAGE_SIZE;
- uint32_t pt = page / PAGE_SIZE;
- uint32_t pt_page = page % PAGE_SIZE;
+uint32_t pd_get_frame(size_t vaddr) {
+ uint32_t pt = PT_OF_ADDR(vaddr);
+ uint32_t page = PAGE_OF_ADDR(vaddr);
+
+ pagetable_t *pd = (vaddr >= K_HIGHHALF_ADDR ? &kernel_pd : current_pd);
- if (pd == 0) return 0;
- if (pd->pt[pt] == 0) return 0;
- if (!pd->pt[pt]->page[pt_page].present) return 0;
- return pd->pt[pt]->page[pt_page].frame;
+ if (!pd->page[pt] & PTE_PRESENT) return 0;
+ if (!current_pt[pt].page[page] & PTE_PRESENT) return 0;
+ return current_pt[pt].page[page] >> PTE_FRAME_SHIFT;
}
-int pd_map_page(pagedir_t *pd, size_t vaddr, uint32_t frame_id, uint32_t rw) {
- return 1; // TODO
+int pd_map_page(size_t vaddr, uint32_t frame_id, uint32_t rw) {
+ uint32_t pt = PT_OF_ADDR(vaddr);
+ uint32_t page = PAGE_OF_ADDR(vaddr);
+
+ pagetable_t *pd = (vaddr >= K_HIGHHALF_ADDR ? &kernel_pd : current_pd);
+
+ if (!pd->page[pt] & PTE_PRESENT) {
+ size_t new_pt_frame = frame_alloc(1);
+ if (new_pt_frame == 0) return 1; // OOM
+
+ current_pd->page[pt] = pd->page[pt] =
+ (new_pt_frame << PTE_FRAME_SHIFT) | PTE_PRESENT | PTE_RW;
+ invlpg((size_t)&current_pt[pt]);
+ }
+
+ current_pt[pt].page[page] =
+ frame_id << PTE_FRAME_SHIFT
+ | PTE_PRESENT
+ | (vaddr < K_HIGHHALF_ADDR ? PTE_USER : PTE_GLOBAL)
+ | (rw ? PTE_RW : 0);
+
+ return 0;
}
-void pd_unmap_page(pagedir_t *pd, size_t vaddr) {
- uint32_t page = vaddr / PAGE_SIZE;
- uint32_t pt = page / PAGE_SIZE;
- uint32_t pt_page = page % PAGE_SIZE;
+void pd_unmap_page(size_t vaddr) {
+ uint32_t pt = PT_OF_ADDR(vaddr);
+ uint32_t page = PAGE_OF_ADDR(vaddr);
+
+ pagetable_t *pd = (vaddr >= K_HIGHHALF_ADDR ? &kernel_pd : current_pd);
- if (pd == 0) return;
- if (pd->pt[pt] == 0) return;
- if (!pd->pt[pt]->page[pt_page].present) return;
- pd->pt[pt]->page[pt_page].as_int32 = 0;
+ if (!pd->page[pt] & PTE_PRESENT) return;
+ if (!current_pt[pt].page[page] & PTE_PRESENT) return;
+ current_pt[pt].page[page] &= ~PTE_PRESENT;
// TODO (?) : if pagetable is completely empty, free it
}