#include "paging.h" #include #include "mem.h" #include "seg.h" #include #include #include #include static bitset frames; page_directory *kernel_pagedir, *current_pagedir; /************************** PHYSICAL MEMORY ALLOCATION ************************/ /* Allocates a page of physical memory. */ uint32_t frame_alloc() { uint32_t free = frames.firstFree(); if (free == (uint32_t) -1) { PANIC("No more frames to allocate, system is out of memory!"); } frames.set(free); return free; } void frame_free(uint32_t id) { frames.clear(id); } /************************* PAGING INITIALIZATION *****************************/ /* This function creates the kernel page directory. It must be called before the GDT is loaded. It maps 0xC0000000+ (k_highhalf_addr) to the corresponding physical kernel code, but it also maps 0x00000000+ to that code because with the false GDT we set up in loader_.asm, the code will be looked for at the beginning of the memory. Only when the real GDT is loaded we can de-allocate pages at 0x00000000 ; this is done by paging_cleanup. */ void paging_init(size_t totalRam) { uint32_t i; frames.size = totalRam / 0x1000; frames.bits = (uint32_t*)ksbrk(frames.mem_size()); memset(frames.bits, 0, frames.mem_size()); kernel_pagedir = (page_directory*)ksbrk(sizeof(page_directory)); kernel_pagedir->mappedSegs = 0; kernel_pagedir->tablesPhysical = (uint32_t*)kmalloc_page(&kernel_pagedir->physicalAddr); for (i = 0; i < 1024; i++) { kernel_pagedir->tables[i] = 0; kernel_pagedir->tablesPhysical[i] = 0; } for (i = K_HIGHHALF_ADDR; i < mem_placementAddr; i += 0x1000) { page_map(pagedir_getPage(kernel_pagedir, i, 1), frame_alloc(), 0, 0); } for (i = 0; i < (mem_placementAddr - K_HIGHHALF_ADDR) / 0x100000; i++) { kernel_pagedir->tablesPhysical[i] = kernel_pagedir->tablesPhysical[i + FIRST_KERNEL_PAGETABLE]; kernel_pagedir->tables[i] = kernel_pagedir->tables[i + FIRST_KERNEL_PAGETABLE]; } pagedir_switch(kernel_pagedir); } /* De-allocates pages at 0x00000000 where kernel code was read from with the GDT from loader_.asm. */ void paging_cleanup() { size_t i; for (i = 0; i < (mem_placementAddr - K_HIGHHALF_ADDR) / 0x100000; i++) { kernel_pagedir->tablesPhysical[i] = 0; kernel_pagedir->tables[i] = 0; } } /************************* PAGING EVERYDAY USE *****************************/ /* Switch to a page directory. Can be done if we are sure not to be interrupted by a task switch. Example use for cross-memory space writing in linker/elf.c */ void pagedir_switch(page_directory *pd) { current_pagedir = pd; asm volatile("mov %0, %%cr3" : : "r"(pd->physicalAddr)); uint32_t cr0; asm volatile("mov %%cr0, %0" : "=r"(cr0)); cr0 |= 0x80000000; asm volatile("mov %0, %%cr0" : : "r"(cr0)); } /* Creates a new page directory for a process, and maps the kernel page tables on it. */ page_directory *pagedir_new() { uint32_t i; page_directory *pd = new page_directory(); pd->tablesPhysical = (uint32_t*)kmalloc_page(&pd->physicalAddr); pd->mappedSegs = 0; for (i = 0; i < 1024; i++) { pd->tables[i] = 0; pd->tablesPhysical[i] = 0; } for (i = FIRST_KERNEL_PAGETABLE; i < 1024; i++) { pd->tables[i] = kernel_pagedir->tables[i]; pd->tablesPhysical[i] = kernel_pagedir->tablesPhysical[i]; } return pd; } /* Deletes a page directory, cleaning it up. */ void pagedir_delete(page_directory *pd) { uint32_t i; //Unmap segments while (pd->mappedSegs != 0) pd->mappedSegs->seg->unmap(pd->mappedSegs); //Cleanup page tables for (i = 0; i < FIRST_KERNEL_PAGETABLE; i++) { kfree_page(pd->tables[i]); } kfree_page(pd->tablesPhysical); kfree(pd); } /* Handle a paging fault. First, looks for the corresponding segment. If the segment was found and it handles the fault, return normally. Else, display informatinos and return an error. */ uint32_t paging_fault(registers *regs) { size_t addr; segment_map *seg = 0; asm volatile("mov %%cr2, %0" : "=r"(addr)); seg = current_pagedir->mappedSegs; while (seg) { if (seg->start <= addr && seg->start + seg->len > addr) break; seg = seg->next; } if (seg != 0) { if (seg->seg->handle_fault(seg, addr, (regs->err_code & 0x2) && (regs->eip < K_HIGHHALF_ADDR)) != 0) { seg = 0; } } if (seg == 0) { dbg_printf("[ke:%s:%d] Unhandled page fault\n", __FILE__, __LINE__); dbg_printf("\tPID: %d\n", current_thread->process->pid); dbg_printf("\tcr2: %p\n", addr); dbg_printf("\tflags:"); if (regs->err_code & 0x1) dbg_printf(" present"); if (regs->err_code & 0x2) dbg_printf(" write"); if (regs->err_code & 0x4) dbg_printf(" user"); if (regs->err_code & 0x8) dbg_printf(" rsvd"); if (regs->err_code & 0x10) dbg_printf(" opfetch"); dbg_printf("\n"); return 1; } return 0; } /* Gets the corresponding page in a page directory for a given address. If make is set, the necessary page table can be created. Can return 0 if make is not set. */ page *pagedir_getPage(page_directory *pd, uint32_t address, int make) { address /= 0x1000; uint32_t table_idx = address / 1024; if (pd->tables[table_idx]) { return &pd->tables[table_idx]->pages[address % 1024]; } else if (make) { pd->tables[table_idx] = (page_table*)kmalloc_page(pd->tablesPhysical + table_idx); memset((uint8_t*)pd->tables[table_idx], 0, 0x1000); pd->tablesPhysical[table_idx] |= 0x07; if (table_idx >= FIRST_KERNEL_PAGETABLE) { tasking_updateKernelPagetable(table_idx, pd->tables[table_idx], pd->tablesPhysical[table_idx]); } return &pd->tables[table_idx]->pages[address % 1024]; } else { return 0; } } /* Modifies a page structure so that it is mapped to a frame. */ void page_map(page *page, uint32_t frame, uint32_t user, uint32_t rw) { if (page != 0 && page->frame == 0 && page->present == 0) { page->present = 1; page->rw = (rw ? 1 : 0); page->user = (user ? 1 : 0); page->frame = frame; } } /* Modifies a page structure so that it is no longer mapped to a frame. */ void page_unmap(page *page) { if (page != 0) { page->frame = 0; page->present = 0; } } /* Same as above but also frees the frame. */ void page_unmapFree(page *page) { if (page != 0) { if (page->frame != 0) frame_free(page->frame); page->frame = 0; page->present = 0; } }