blob: 702391ef2c01d70597590846dce8541e3a851b74 (
plain) (
tree)
|
|
#pragma once
#include <sys.h>
#include <stdbool.h>
#include <idt.h>
// Bits in the error code for page fault
#define PF_PRESENT_BIT (1<<0)
#define PF_WRITE_BIT (1<<1)
#define PF_USER_BIT (1<<2)
#define PF_RSVD_WRITE_BIT (1<<3)
#define PF_OPFETCH_BIT (1<<4)
#define PTE_PRESENT (1<<0)
#define PTE_RW (1<<1)
#define PTE_USER (1<<2)
#define PTE_WRITE_THROUGH (1<<3)
#define PTE_DISABLE_CACHE (1<<4)
#define PTE_ACCESSED (1<<5)
#define PTE_DIRTY (1<<6) // only PTE
#define PTE_SIZE_4M (1<<7) // only PDE
#define PTE_GLOBAL (1<<8) // only PTE
#define PTE_FRAME_SHIFT 12
struct page_directory;
typedef struct page_directory pagedir_t;
typedef void (*user_pf_handler_t)(void* handler_data, registers_t *regs, void* addr);
void paging_setup(void* kernel_data_end);
pagedir_t *get_current_pagedir();
pagedir_t *get_kernel_pagedir();
void switch_pagedir(pagedir_t *pd);
// these functions are always relative to the currently mapped page directory
uint32_t pd_get_frame(void* vaddr); // get physical frame for virtual address
uint32_t pd_get_entry(void* vaddr); // same as pd_get_frame but returns whole entry with flags
bool pd_map_page(void* vaddr, uint32_t frame_id, bool rw); // returns true on success, false on failure
void pd_unmap_page(void* vaddr); // does nothing if page not mapped
// Note on concurrency : we expect that multiple threads will not try to map/unmap
// pages in the same region at the same time. It can nevertheless happen that
// several threads try to map pages that belong to the same 4M-section, and in that
// case both might require the allocation of a new PT at the same location. These
// cases are well-handled (the pagedir_t type contains a mutex used for this)
pagedir_t *create_pagedir(user_pf_handler_t pf, void* pf_handler_data); // returns zero on error
void delete_pagedir(pagedir_t *pd);
/* vim: set ts=4 sw=4 tw=0 noet :*/
|