summaryrefslogtreecommitdiff
path: root/Source/Library/Common
diff options
context:
space:
mode:
Diffstat (limited to 'Source/Library/Common')
-rw-r--r--Source/Library/Common/Heap-index.class.cpp51
-rw-r--r--Source/Library/Common/Heap.class.cpp232
-rw-r--r--Source/Library/Common/Heap.class.h92
-rw-r--r--Source/Library/Common/Mutex.class.cpp45
-rw-r--r--Source/Library/Common/Mutex.class.h21
5 files changed, 441 insertions, 0 deletions
diff --git a/Source/Library/Common/Heap-index.class.cpp b/Source/Library/Common/Heap-index.class.cpp
new file mode 100644
index 0000000..3280736
--- /dev/null
+++ b/Source/Library/Common/Heap-index.class.cpp
@@ -0,0 +1,51 @@
+#include "Heap.class.h"
+
+/*
+ * Implementation of the functions for managing heap index
+ */
+
+void Heap::insertIntoIndex(heap_header_t *e) {
+ //If index is full, return
+ if ((m_index.size * sizeof(heap_header_t*) + (u32int)m_index.data) >= m_start) return;
+
+ u32int iterator = 0;
+ while (iterator < m_index.size && m_index.data[iterator]->size < e->size) {
+ if (m_index.data[iterator] == e) return;
+ iterator++;
+ }
+ if (iterator == m_index.size) {
+ m_index.data[m_index.size++] = e;
+ } else {
+ u32int pos = iterator;
+ iterator = m_index.size;
+ while (iterator > pos) {
+ m_index.data[iterator] = m_index.data[iterator - 1];
+ iterator--;
+ }
+ m_index.size++;
+ m_index.data[pos] = e;
+ }
+}
+
+u32int Heap::findIndexEntry(heap_header_t *e) {
+ for (u32int i = 0; i < m_index.size; i++) {
+ if (m_index.data[i] == e)
+ return i;
+ }
+ return (u32int) - 1;
+}
+
+void Heap::removeFromIndex(u32int idx) {
+ m_index.size--;
+ while (idx < m_index.size) {
+ m_index.data[idx] = m_index.data[idx + 1];
+ idx++;
+ }
+}
+
+void Heap::removeFromIndex(heap_header_t *e) {
+ u32int i = findIndexEntry(e);
+ if (i != (u32int) - 1) {
+ removeFromIndex(i);
+ }
+}
diff --git a/Source/Library/Common/Heap.class.cpp b/Source/Library/Common/Heap.class.cpp
new file mode 100644
index 0000000..34e4dc4
--- /dev/null
+++ b/Source/Library/Common/Heap.class.cpp
@@ -0,0 +1,232 @@
+#include "Heap.class.h"
+
+#ifdef THIS_IS_MELON_KERNEL
+#include <MemoryManager/PageDirectory.class.h>
+#define ALLOC(x) m_pagedir->allocFrame(x, m_user, m_rw)
+#define FREE(x) m_pagedir->freeFrame(x)
+#else
+#define ALLOC(x) m_process.allocPage(x)
+#define FREE(x) m_process.freePage(x)
+#endif
+
+#ifdef THIS_IS_MELON_KERNEL
+Heap::Heap() : m_mutex(MUTEX_FALSE) {
+#else
+Heap::Heap() : m_mutex(MUTEX_FALSE), m_process(Process::get()) {
+#endif
+ m_usable = false;
+ m_index.data = 0;
+ m_index.size = 0;
+}
+
+Heap::~Heap() {
+ //TODO (optionnal) : free pages.
+}
+
+#ifdef THIS_IS_MELON_KERNEL
+void Heap::create(u32int start, u32int size, u32int idxsize, PageDirectory* pagedir, bool user, bool rw) {
+#else
+void Heap::create(u32int start, u32int size, u32int idxsize) {
+#endif
+ if (m_usable) return;
+
+ if (start & 0x0FFF) start = (start & 0xFFFFF000) + 0x1000;
+ if (size & 0x0FFF) size = (size & 0xFFFFF000) + 0x1000;
+ m_start = start + idxsize; //m_start is start of real data, start is start of index.
+ m_end = start + size;
+
+#ifdef THIS_IS_MELON_KERNEL
+ m_pagedir = pagedir;
+ m_user = user;
+ m_rw = rw;
+#endif
+
+ //Allocate frames for heap
+ for (u32int i = start ; i < m_end; i += 0x1000) {
+ ALLOC(i);
+ }
+#ifdef THIS_IS_MELON_KERNEL
+ m_pagedir->switchTo();
+#endif
+
+ m_index.data = (heap_header_t **)start; //Set index start. start == start of all heap
+ m_index.size = 0;
+
+ heap_header_t *hole = (heap_header_t*) m_start; //m_start == start of data
+ hole->size = (m_end - m_start);
+ hole->magic = HEAP_MAGIC;
+ hole->is_hole = true;
+
+ heap_footer_t *hole_footer = (heap_footer_t*) (m_end - sizeof(heap_footer_t));
+ hole_footer->header = hole;
+ hole_footer->magic = HEAP_MAGIC;
+
+ insertIntoIndex(hole);
+
+ m_usable = true;
+ m_free = (m_end - m_start);
+
+ m_mutex.unlock();
+}
+
+void Heap::expand(u32int quantity) {
+ if (quantity & 0x00000FFF)
+ quantity = (quantity & 0xFFFFF000) + 0x1000;
+
+ u32int newEnd = m_end + quantity;
+
+ for (u32int i = m_end; i < newEnd; i++) {
+ ALLOC(i);
+ }
+
+ heap_footer_t *last_footer = (heap_footer_t*) (m_end - sizeof(heap_footer_t));
+ heap_header_t *last_header = last_footer->header;
+ if (last_header->is_hole) { //Last block of heap is a hole, update its size
+ removeFromIndex(last_header);
+ last_header->size += quantity;
+
+ last_footer = (heap_footer_t*) (newEnd - sizeof(heap_footer_t));
+
+ last_footer->magic = HEAP_MAGIC;
+ last_footer->header = last_header;
+
+ insertIntoIndex(last_header);
+ } else { //Last block is not a hole. Just add a new hole at the end
+ last_header = (heap_header_t*)m_end;
+ last_footer = (heap_footer_t*) (newEnd - sizeof(heap_footer_t));
+
+ last_header->is_hole = true;
+ last_header->magic = HEAP_MAGIC;
+ last_header->size = quantity;
+
+ last_footer->magic = HEAP_MAGIC;
+ last_footer->header = last_header;
+
+ insertIntoIndex(last_header);
+ }
+
+ m_end = newEnd;
+ m_free += quantity;
+}
+
+void Heap::contract() { //Automatically work out how much we can contract
+ heap_footer_t *last_footer = (heap_footer_t*) (m_end - sizeof(heap_footer_t));
+ heap_header_t *last_header = last_footer->header;
+ if (last_header->is_hole == false) return; //We need a hole at end of heap
+
+ u32int quantity = 0;
+ while ((m_end - m_start) - quantity > HEAP_MIN_SIZE and
+ (last_header->size - quantity) > 0x1000) //Always keep at least 0x1000 free at end
+ quantity += 0x1000;
+ if (quantity == 0) return;
+
+ u32int newEnd = m_end - quantity;
+ m_free -= quantity;
+
+ removeFromIndex(last_header);
+ last_header->size -= quantity;
+ last_footer = (heap_footer_t*)((u32int)last_footer - quantity);
+ last_footer->magic = HEAP_MAGIC;
+ last_footer->header = last_header;
+ insertIntoIndex(last_header);
+
+ for (u32int i = newEnd; i < m_end; i += 0x1000) {
+ FREE(i);
+ }
+
+ m_end = newEnd;
+}
+
+void *Heap::alloc(u32int sz, bool no_expand) {
+ m_mutex.waitLock();
+
+ u32int newsize = sz + sizeof(heap_header_t) + sizeof(heap_footer_t);
+ u32int iterator = 0;
+ while (iterator < m_index.size) {
+ if (m_index.data[iterator]->size >= newsize) break;
+ iterator++;
+ }
+ if (iterator == m_index.size) { //No hole is big enough
+ if (no_expand) {
+ m_mutex.unlock();
+ return 0;
+ }
+ expand((sz & 0xFFFFF000) + 0x1000);
+ m_mutex.unlock();
+ return alloc(sz, true); //Recurse call
+ }
+
+ heap_header_t *loc = m_index.data[iterator];
+ heap_footer_t *footer = (heap_footer_t*)((u32int)loc + loc->size - sizeof(heap_footer_t));
+ loc->is_hole = false; //Update current header
+
+ removeFromIndex(loc);
+
+ //Here we create a new hole after currently allocated block, but only if we have enough space. If we don't, we simply allocate a bigger block so that we don't loose space
+ if (loc->size > (newsize + sizeof(heap_header_t) + sizeof(heap_footer_t))) {
+ loc->size = newsize; //Update header for return block
+
+ heap_footer_t *newfooter = (heap_footer_t*)((u32int)loc + newsize - sizeof(heap_footer_t)); //Write footer for return block
+ newfooter->header = loc;
+ newfooter->magic = HEAP_MAGIC;
+
+ heap_header_t *nextloc = (heap_header_t*)((u32int)loc + newsize); //Write header for new hole
+ nextloc->is_hole = true;
+ nextloc->magic = HEAP_MAGIC;
+ nextloc->size = ((u32int)footer - (u32int)nextloc + sizeof(heap_footer_t));
+
+ footer->header = nextloc; //Write footer for new hole
+ footer->magic = HEAP_MAGIC;
+
+ insertIntoIndex(nextloc);
+ }
+
+ m_free -= loc->size;
+
+ m_mutex.unlock();
+
+ return (void*)((u32int)loc + sizeof(heap_header_t));
+}
+
+void Heap::free(void *ptr) {
+ if (ptr == 0) return;
+
+ heap_header_t *header = (heap_header_t*) ((u32int)ptr - sizeof(heap_header_t));
+ heap_footer_t *footer = (heap_footer_t*)((u32int)header + header->size - sizeof(heap_footer_t));
+ if (header->magic != HEAP_MAGIC or footer->magic != HEAP_MAGIC) return;
+
+ m_mutex.waitLock();
+
+ m_free += header->size;
+
+ //Unify left
+ heap_footer_t *prev_footer = (heap_footer_t*)((u32int)header - sizeof(heap_footer_t));
+ if (prev_footer->magic == HEAP_MAGIC && prev_footer->header->is_hole) {
+ header = prev_footer->header;
+ removeFromIndex(header);
+
+ footer->header = header;
+ header->size = ((u32int)footer - (u32int)header + sizeof(heap_footer_t));
+ }
+
+ //Unify right
+ heap_header_t *next_header = (heap_header_t*)((u32int)footer + sizeof(heap_footer_t));
+ if (next_header->magic == HEAP_MAGIC && next_header->is_hole) {
+ removeFromIndex(next_header);
+ footer = (heap_footer_t*)((u32int)footer + next_header->size);
+
+ footer->header = header;
+ header->size = ((u32int)footer - (u32int)header + sizeof(heap_footer_t));
+ }
+
+ header->is_hole = true;
+
+ insertIntoIndex(header);
+
+ if ((u32int)footer == (m_end - sizeof(heap_footer_t)) and
+ header->size >= 0x2000 and (m_end - m_start > HEAP_MIN_SIZE)) {
+ contract();
+ }
+
+ m_mutex.unlock();
+}
diff --git a/Source/Library/Common/Heap.class.h b/Source/Library/Common/Heap.class.h
new file mode 100644
index 0000000..f5895c7
--- /dev/null
+++ b/Source/Library/Common/Heap.class.h
@@ -0,0 +1,92 @@
+#ifndef DEF_HEAP_CLASS_H
+#define DEF_HEAP_CLASS_H
+
+#include <common.h>
+#include <Mutex.class.h>
+
+//Heap minimum size : 2M
+#define HEAP_MIN_SIZE 0x00200000
+//Heap magic number, for verifications
+#define HEAP_MAGIC 0xBEEF1337
+
+struct heap_header_t {
+ u32int magic;
+ bool is_hole;
+ u32int size;
+};
+
+struct heap_footer_t {
+ u32int magic;
+ heap_header_t *header;
+};
+
+struct heap_index_t {
+ heap_header_t **data;
+ u32int size;
+};
+
+#ifdef THIS_IS_MELON_KERNEL
+class PageDirectory;
+#else
+#include <Binding/Process.class.h>
+#endif
+
+class Heap {
+ private:
+ u32int m_free, m_start, m_end;
+ bool m_usable;
+ heap_index_t m_index;
+#ifdef THIS_IS_MELON_KERNEL
+ bool m_user, m_rw;
+ PageDirectory* m_pagedir;
+#else
+ Process m_process;
+#endif
+
+ Mutex m_mutex;
+
+ void insertIntoIndex(heap_header_t *e);
+ u32int findIndexEntry(heap_header_t *e);
+ void removeFromIndex(u32int idx);
+ void removeFromIndex(heap_header_t *e);
+
+ void expand(u32int quantity);
+ void contract(); //Quantity is automatically calculated
+
+ public:
+ Heap();
+ ~Heap();
+
+#ifdef THIS_IS_MELON_KERNEL
+ void create(u32int start, u32int size, u32int idxsize, PageDirectory* pagedir, bool user, bool rw);
+#else
+ void create(u32int start, u32int size, u32int idxsize);
+#endif
+
+ void* alloc(u32int sz, bool no_expand = false);
+ void free(void* ptr);
+
+ bool usable() {
+ m_mutex.waitLock();
+ bool ret = m_usable;
+ m_mutex.unlock();
+ return ret;
+ }
+
+ u32int size() {
+ m_mutex.waitLock();
+ u32int ret = m_end - m_start;
+ m_mutex.unlock();
+ return ret;
+ }
+
+ u32int free() {
+ m_mutex.waitLock();
+ u32int ret = m_free;
+ m_mutex.unlock();
+ return ret;
+ }
+};
+
+
+#endif
diff --git a/Source/Library/Common/Mutex.class.cpp b/Source/Library/Common/Mutex.class.cpp
new file mode 100644
index 0000000..2e9a63c
--- /dev/null
+++ b/Source/Library/Common/Mutex.class.cpp
@@ -0,0 +1,45 @@
+#include "Mutex.class.h"
+
+#ifdef THIS_IS_MELON_KERNEL
+#include <TaskManager/Task.ns.h>
+#endif
+
+#ifdef THIS_IS_MELON_USERLAND
+#include <Binding/Thread.class.h>
+#endif
+
+u32int atomic_exchange(u32int* ptr, u32int newval) {
+ u32int r;
+ asm volatile("xchg (%%ecx), %%eax" : "=a"(r) : "c"(ptr), "a"(newval));
+ return r;
+}
+
+Mutex::Mutex(u32int locked) {
+ m_locked = locked;
+}
+
+bool Mutex::lock() {
+ if (atomic_exchange(&m_locked, MUTEX_TRUE) == MUTEX_TRUE) return false; //The lock was already locked
+ return true;
+}
+
+void Mutex::waitLock() {
+ while (atomic_exchange(&m_locked, MUTEX_TRUE) == MUTEX_TRUE) {
+#ifdef THIS_IS_MELON_KERNEL
+ if (Task::currThread() != 0) Task::currThread()->sleep(10); //Wait 10ms
+ else return;
+#endif
+
+#ifdef THIS_IS_MELON_USERLAND
+ Thread::get().sleep(10);
+#endif
+ }
+}
+
+void Mutex::unlock() {
+ m_locked = MUTEX_FALSE;
+}
+
+bool Mutex::locked() {
+ return m_locked;
+}
diff --git a/Source/Library/Common/Mutex.class.h b/Source/Library/Common/Mutex.class.h
new file mode 100644
index 0000000..1e3f63d
--- /dev/null
+++ b/Source/Library/Common/Mutex.class.h
@@ -0,0 +1,21 @@
+#ifndef DEF_MUTEX_CLASS_H
+#define DEF_MUTEX_CLASS_H
+
+#include <common.h>
+
+#define MUTEX_FALSE 0
+#define MUTEX_TRUE 1
+
+class Mutex {
+ private:
+ u32int m_locked;
+
+ public:
+ Mutex(u32int locked = MUTEX_FALSE);
+ bool lock(); //Locks the mutex if it is not locked. Returns true if mutex could be locked, false if already locked
+ void waitLock(); //Locks the mutex, waiting for it to be unlocked before if necessary
+ void unlock();
+ bool locked();
+};
+
+#endif