summaryrefslogtreecommitdiff
path: root/src/kernel/mem/mem.cpp
blob: dfdd5ddce26bf78158f15ecea584b77ca71c0c5c (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
#include "mem.h"
#include <core/sys.h>
#include "paging.h"

#include <config.h>

#include "_dlmalloc.h"

#include "mem.h"

#define FREEPAGESTOKEEP 5

#define KHEAP_IDXSIZE 0x4000		// only used with heap.std.h
#define KHEAP_INITSIZE 0x00080000
#define KHEAP_MAXSIZE 0x08000000

size_t mem_placementAddr;
bool _no_more_ksbrk = false;


// ******************************
// 									PAGE ALLOCATION
// 															****************************
static struct freepage {
   size_t virt, phys;
} freepages[FREEPAGESTOKEEP];
uint32_t freepagecount = 0;

/*	For internal use only. Populates the cache of pages that can be given to requesters. */
static void get_free_pages() {
	static uint32_t locked = 0;
	uint32_t i;
	if (locked) return;
	locked = 1;
	while (freepagecount < FREEPAGESTOKEEP) {
		if (_no_more_ksbrk) {
			for (i = 0xFFFFF000; i >= 0xF0000000; i -= 0x1000) {
				if (pagedir_getPage(kernel_pagedir, i, 1)->frame == 0) break;
			}
			freepages[freepagecount].virt = i;
			uint32_t frame = frame_alloc();
			freepages[freepagecount].phys = frame * 0x1000;
			page_map(pagedir_getPage(kernel_pagedir, i, 0), freepages[freepagecount].phys / 0x1000, 0, 0);
			freepagecount++;
		} else {
			if (mem_placementAddr & 0xFFFFF000) {
				mem_placementAddr &= 0xFFFFF000;
				mem_placementAddr += 0x1000;
			}
			freepages[freepagecount].virt = (size_t)ksbrk(0x1000);
			freepages[freepagecount].phys = freepages[freepagecount].virt - K_HIGHHALF_ADDR;
			freepagecount++;
		}
	}
	locked = 0;
}

/*	Gives one page from the cache to someone requesting it. */
void* kmalloc_page(size_t *phys) {
	cli();
	get_free_pages();
	freepagecount--;
	*phys = freepages[freepagecount].phys;
	size_t tmp = freepages[freepagecount].virt;
	sti();
	return (void*)tmp;
}

void kfree_page(void* ptr) {
	size_t addr = (size_t)ptr;
	if (_no_more_ksbrk) {
		page_unmapFree(pagedir_getPage(kernel_pagedir, addr, 0));
	}
}

//***********************************
//										MEMORY ALLOCATION FOR DLMALLOC
//																	*************************

void* ksbrk(size_t size) {
	if (!_no_more_ksbrk) {		// ksbrk is NOT being called by dlmalloc
		if (size & 0x0FFF) {
			size = (size & 0xFFFFF000) + 0x1000;
		}
	}

	size_t tmp = mem_placementAddr;
	size_t er_begin, er_end, i;

	mem_placementAddr += size;

	if (_no_more_ksbrk) {		//  paging enabled, we must allocate these pages
		if (tmp < mem_placementAddr) {
			er_begin = tmp;
			if (er_begin & 0x0FFF) er_begin = (er_begin & 0xFFFFF000) + 0x1000;
			er_end = mem_placementAddr;
			if (er_end & 0x0FFF) er_end = (er_end & 0xFFFFF000) + 0x1000;
			for (i = er_begin; i < er_end; i += 0x1000) {
				page *p = pagedir_getPage(kernel_pagedir, i, 1);
				size_t f = frame_alloc();
				page_map(p, f, 0, 0);
				/* (DBG) monitor_write("<map "); monitor_writeHex(i); monitor_write(" ");
				monitor_writeHex(f); monitor_write("> "); */
			}
		} else if (tmp > mem_placementAddr) {
			er_begin = (size_t)mem_placementAddr;
			if (er_begin & 0x0FFF) er_begin = (er_begin & 0xFFFFF000) + 0x1000;
			er_end = tmp;
			if (er_end & 0x0FFF) er_end = (er_end & 0xFFFFF000) + 0x1000;
			for (i = er_end - 0x1000; i >= er_begin; i -= 0x1000) {
				// (DBG) monitor_write("<unmap:"); monitor_writeHex(i); monitor_write("> ");
				page_unmapFree(pagedir_getPage(kernel_pagedir, i, 0));
			}
		}
	}

	return (void*)tmp;
}

void kbrk(void* ptr) {
	if ((size_t)ptr > (size_t)&end) {
		ksbrk((size_t)ptr - (size_t)mem_placementAddr);
	} else {
		PANIC("INVALID KBRK.");
	}
}