summaryrefslogtreecommitdiff
path: root/sos-code-article5/sos
diff options
context:
space:
mode:
Diffstat (limited to 'sos-code-article5/sos')
-rw-r--r--sos-code-article5/sos/assert.c44
-rw-r--r--sos-code-article5/sos/assert.h38
-rw-r--r--sos-code-article5/sos/errno.h41
-rw-r--r--sos-code-article5/sos/klibc.c274
-rw-r--r--sos-code-article5/sos/klibc.h88
-rw-r--r--sos-code-article5/sos/kmalloc.c113
-rw-r--r--sos-code-article5/sos/kmalloc.h63
-rw-r--r--sos-code-article5/sos/kmem_slab.c812
-rw-r--r--sos-code-article5/sos/kmem_slab.h206
-rw-r--r--sos-code-article5/sos/kmem_vmm.c608
-rw-r--r--sos-code-article5/sos/kmem_vmm.h112
-rw-r--r--sos-code-article5/sos/list.h186
-rw-r--r--sos-code-article5/sos/macros.h41
-rw-r--r--sos-code-article5/sos/main.c450
-rw-r--r--sos-code-article5/sos/physmem.c318
-rw-r--r--sos-code-article5/sos/physmem.h146
-rw-r--r--sos-code-article5/sos/types.h53
17 files changed, 3593 insertions, 0 deletions
diff --git a/sos-code-article5/sos/assert.c b/sos-code-article5/sos/assert.c
new file mode 100644
index 0000000..0f48d57
--- /dev/null
+++ b/sos-code-article5/sos/assert.c
@@ -0,0 +1,44 @@
+/* Copyright (C) 2004 The KOS Team
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+
+#include <sos/klibc.h>
+#include <drivers/bochs.h>
+#include <drivers/x86_videomem.h>
+
+#include "assert.h"
+
+void sos_display_fatal_error(const char *format, /* args */...)
+{
+ char buff[256];
+ va_list ap;
+
+ asm("cli\n"); /* disable interrupts -- x86 only */ \
+
+ va_start(ap, format);
+ vsnprintf(buff, sizeof(buff), format, ap);
+ va_end(ap);
+
+ sos_bochs_putstring(buff); sos_bochs_putstring("\n");
+ sos_x86_videomem_putstring(24, 0,
+ SOS_X86_VIDEO_BG_BLACK
+ | SOS_X86_VIDEO_FG_LTRED , buff);
+
+ /* Infinite loop: processor halted */
+ for ( ; ; )
+ asm("hlt\n");
+}
diff --git a/sos-code-article5/sos/assert.h b/sos-code-article5/sos/assert.h
new file mode 100644
index 0000000..285554f
--- /dev/null
+++ b/sos-code-article5/sos/assert.h
@@ -0,0 +1,38 @@
+/* Copyright (C) 2004 The KOS Team
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#ifndef _SOS_ASSERT_H_
+#define _SOS_ASSERT_H_
+
+
+void sos_display_fatal_error(const char *format, /* args */...)
+ __attribute__ ((format (printf, 1, 2), noreturn));
+
+
+/**
+ * If the expr is FALSE, print a message and halt the machine
+ */
+#define SOS_ASSERT_FATAL(expr) \
+ ({ \
+ int __res=(int)(expr); \
+ if (! __res) \
+ sos_display_fatal_error("%s@%s:%d Assertion " # expr " failed", \
+ __PRETTY_FUNCTION__, __FILE__, __LINE__); \
+ })
+
+
+#endif /* _SOS_ASSERT_H_ */
diff --git a/sos-code-article5/sos/errno.h b/sos-code-article5/sos/errno.h
new file mode 100644
index 0000000..50fb05b
--- /dev/null
+++ b/sos-code-article5/sos/errno.h
@@ -0,0 +1,41 @@
+/* Copyright (C) 2004 The SOS Team
+ Copyright (C) 1999 Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#ifndef _SOS_ERRNO_H_
+#define _SOS_ERRNO_H_
+
+/**
+ * @file errno.h
+ *
+ * SOS return value codes and errors.
+ */
+
+/* Positive values of the error codes */
+#define SOS_OK 0 /* No error */
+#define SOS_EINVAL 1 /* Invalid argument */
+#define SOS_ENOSUP 2 /* Operation not supported */
+#define SOS_ENOMEM 3 /* No available memory */
+#define SOS_EBUSY 4 /* Object or device still in use */
+#define SOS_EFATAL 255 /* Internal fatal error */
+
+/* A negative value means that an error occured. For
+ * example -SOS_EINVAL means that the error was "invalid
+ * argument" */
+typedef int sos_ret_t;
+
+#endif /* _SOS_ERRNO_H_ */
diff --git a/sos-code-article5/sos/klibc.c b/sos-code-article5/sos/klibc.c
new file mode 100644
index 0000000..9d719c1
--- /dev/null
+++ b/sos-code-article5/sos/klibc.c
@@ -0,0 +1,274 @@
+/* Copyright (C) 2004 David Decotigny (with INSA Rennes for vsnprintf)
+ Copyright (C) 2003 The KOS Team
+ Copyright (C) 1999 Free Software Foundation
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#include "klibc.h"
+
+/* For an optimized version, see BSD sources ;) */
+void *memcpy(void *dst0, const void *src0, register unsigned int size)
+{
+ char *dst;
+ const char *src;
+ for (dst = (char*)dst0, src = (const char*)src0 ;
+ size > 0 ;
+ dst++, src++, size--)
+ *dst = *src;
+ return dst0;
+}
+
+/* ditto */
+void *memset(void *dst0, register int c, register unsigned int length)
+{
+ char *dst;
+ for (dst = (char*) dst0 ;
+ length > 0 ;
+ dst++, length --)
+ *dst = (char)c;
+ return dst0;
+}
+
+int memcmp(const void *s1, const void *s2, sos_size_t len)
+{
+ const unsigned char *c1, *c2;
+ unsigned int i;
+
+ for (i = 0, c1 = s1, c2 = s2; i < len; i++, c1++, c2++)
+ {
+ if(*c1 != *c2)
+ return *c1 - *c2;
+ }
+
+ return 0;
+}
+
+
+unsigned int strlen(register const char *str)
+{
+ unsigned int retval = 0;
+
+ while (*str++)
+ retval++;
+
+ return retval;
+}
+
+
+unsigned int strnlen(const char * s, sos_size_t count)
+{
+ const char *sc;
+
+ for (sc = s; count-- && *sc != '\0'; ++sc)
+ /* nothing */continue;
+
+ return sc - s;
+}
+
+
+char *strzcpy(register char *dst, register const char *src, register int len)
+{
+ int i;
+
+ if (len <= 0)
+ return dst;
+
+ for (i = 0; i < len; i++)
+ {
+ dst[i] = src[i];
+ if(src[i] == '\0')
+ return dst;
+ }
+
+ dst[len-1] = '\0';
+ return dst;
+}
+
+
+char *strzcat (char *dest, const char *src, sos_size_t n)
+{
+ char *res = dest;
+
+ for ( ; *dest ; dest++);
+
+ for ( ; *src ; src++, dest++) {
+ *dest = *src;
+ n--;
+ if (n <= 0)
+ break;
+ }
+
+ *dest = '\0';
+ return res;
+}
+
+int strcmp(register const char *s1, register const char *s2)
+{
+ while (*s1 == *s2++)
+ if (*s1++ == 0)
+ return (0);
+
+ return (*(const unsigned char *)s1 - *(const unsigned char *)(s2 - 1));
+}
+
+
+int strncmp(register const char *s1, register const char *s2, register int len)
+{
+ char c1 = '\0', c2 = '\0';
+
+ while (len > 0)
+ {
+ c1 = (unsigned char) *s1++;
+ c2 = (unsigned char) *s2++;
+ if (c1 == '\0' || c1 != c2)
+ return c1 - c2;
+ len--;
+ }
+
+ return c1 - c2;
+}
+
+
+/* I (d2) borrowed and rewrote this for Nachos/INSA Rennes. Thanks to
+ them for having kindly allowed me to do so. */
+int vsnprintf(char *buff, sos_size_t len, const char * format, va_list ap)
+{
+ sos_size_t i, result;
+
+ if (!buff || !format || (len < 0))
+ return -1;
+
+#define PUTCHAR(thechar) \
+ do { \
+ if (result < len-1) \
+ *buff++ = (thechar); \
+ result++; \
+ } while (0)
+
+ result = 0;
+ for(i=0 ; format[i] != '\0' ; i++){
+ switch (format[i])
+ {
+ case '%':
+ i++;
+ switch(format[i])
+ {
+ case '%':
+ {
+ PUTCHAR('%');
+ break;
+ }
+ case 'i':;
+ case 'd':
+ {
+ int integer = va_arg(ap,int);
+ int cpt2 = 0;
+ char buff_int[16];
+
+ if (integer<0)
+ PUTCHAR('-');
+ /* Ne fait pas integer = -integer ici parce que INT_MIN
+ n'a pas d'equivalent positif (int = [-2^31, 2^31-1]) */
+
+ do {
+ int m10 = integer%10;
+ m10 = (m10 < 0)? -m10:m10;
+ buff_int[cpt2++]=(char)('0'+ m10);
+ integer=integer/10;
+ } while(integer!=0);
+
+ for(cpt2 = cpt2 - 1 ; cpt2 >= 0 ; cpt2--)
+ PUTCHAR(buff_int[cpt2]);
+
+ break;
+ }
+
+ case 'c':
+ {
+ int value = va_arg(ap,int);
+ PUTCHAR((char)value);
+ break;
+ }
+
+ case 's':
+ {
+ char *string = va_arg(ap,char *);
+ if (! string)
+ string = "(null)";
+ for( ; *string != '\0' ; string++)
+ PUTCHAR(*string);
+ break;
+ }
+
+ case 'p':
+ PUTCHAR('0');
+ PUTCHAR('x');
+ case 'x':
+ {
+ unsigned int hexa = va_arg(ap,int);
+ unsigned int nb;
+ int i, had_nonzero = 0;
+ for(i=0 ; i < 8 ; i++)
+ {
+ nb = (unsigned int)(hexa << (i*4));
+ nb = (nb >> 28) & 0xf;
+ // Skip the leading zeros
+ if (nb == 0)
+ {
+ if (had_nonzero)
+ PUTCHAR('0');
+ }
+ else
+ {
+ had_nonzero = 1;
+ if (nb < 10)
+ PUTCHAR('0'+nb);
+ else
+ PUTCHAR('a'+(nb-10));
+ }
+ }
+ if (! had_nonzero)
+ PUTCHAR('0');
+ break;
+ }
+ break;
+
+ default:
+ PUTCHAR('%');
+ PUTCHAR(format[i]);
+ }
+ break;
+
+ default:
+ PUTCHAR(format[i]);
+ }
+ }
+
+ *buff = '\0';
+ return result;
+}
+
+
+int snprintf(char * buff, sos_size_t len, const char *format, ...)
+{
+ va_list ap;
+
+ va_start(ap, format);
+ len = vsnprintf(buff, len, format, ap);
+ va_end(ap);
+
+ return len;
+}
diff --git a/sos-code-article5/sos/klibc.h b/sos-code-article5/sos/klibc.h
new file mode 100644
index 0000000..5500175
--- /dev/null
+++ b/sos-code-article5/sos/klibc.h
@@ -0,0 +1,88 @@
+/* Copyright (C) 2003 The KOS Team
+ Copyright (C) 1999 Free Software Foundation
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#ifndef _SOS_KLIBC_H_
+#define _SOS_KLIBC_H_
+
+/**
+ * @file klibc.h
+ *
+ * Basic libc-style support for common useful functions (string.h,
+ * stdarg.h), some with slight non-standard behavior (see comments).
+ *
+ * Most of the prototypes of these functions are borrowed from
+ * FreeBSD, but their implementation (in klibc.c) come either from Kos
+ * (GPL v2) or from David Decotigny (SOS).
+ */
+
+#include <sos/types.h>
+
+/* string.h functions */
+
+void *memcpy(void *dst, const void *src, register unsigned int size ) ;
+void *memset(void *dst, register int c, register unsigned int length ) ;
+int memcmp(const void *s1, const void *s2, sos_size_t n);
+
+unsigned int strlen( register const char *str) ;
+unsigned int strnlen(const char * s, sos_size_t maxlen);
+
+/**
+ * @note Same as strncpy(), with a slightly different semantic.
+ * Actually, strncpy(3C) says " The result will not be null-terminated
+ * if the length of 'from' is n or more.". Here, 'dst' is ALWAYS
+ * null-terminated. And its total len will ALWAYS be <= len, with
+ * null-terminating-char included.
+ */
+char *strzcpy( register char *dst, register const char *src,
+ register int len ) ;
+
+/**
+ * @note Same as strncat(), with the same semantic : 'dst' is ALWAYS
+ * null-terminated. And its total len will ALWAYS be <= len, with
+ * null-terminating-char included.
+ */
+char *strzcat (char *dest, const char *src,
+ const sos_size_t len);
+
+int strcmp(register const char *s1, register const char *s2 );
+int strncmp(register const char *s1, register const char *s2,
+ register int len );
+
+/* Basic stdarg.h macros. Taken from gcc support files */
+#define __GNUC_VA_LIST
+typedef void *__gnuc_va_list;
+typedef __gnuc_va_list va_list;
+#define __va_rounded_size(TYPE) \
+ (((sizeof (TYPE) + sizeof (int) - 1) / sizeof (int)) * sizeof (int))
+#define va_start(AP, LASTARG) \
+ (AP = ((__gnuc_va_list) __builtin_next_arg (LASTARG)))
+#define va_end(AP) \
+ ((void)0)
+#define va_arg(AP, TYPE) \
+ (AP = (__gnuc_va_list) ((char *) (AP) + __va_rounded_size (TYPE)), \
+ *((TYPE *) (void *) ((char *) (AP) - __va_rounded_size (TYPE))))
+#define __va_copy(dest, src) \
+ (dest) = (src)
+
+/* stdarg.h functions. There might be a non-standard behavior: there
+ will always be a trailing '\0' in the resulting string */
+int vsnprintf(char *, sos_size_t, const char *, va_list);
+int snprintf(char *, sos_size_t, const char *, /*args*/ ...)
+ __attribute__ ((format (printf, 3, 4)));
+
+#endif /* _SOS_KLIBC_H_ */
diff --git a/sos-code-article5/sos/kmalloc.c b/sos-code-article5/sos/kmalloc.c
new file mode 100644
index 0000000..b4822b6
--- /dev/null
+++ b/sos-code-article5/sos/kmalloc.c
@@ -0,0 +1,113 @@
+/* Copyright (C) 2004 David Decotigny
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+
+#include <sos/assert.h>
+#include <sos/macros.h>
+
+#include "physmem.h"
+#include "kmem_vmm.h"
+#include "kmem_slab.h"
+
+#include "kmalloc.h"
+
+/* The cache structures for these caches, the object size, their
+ names, and some number of pages that contain them. They might not
+ necessarily be powers of 2s. */
+static struct {
+ const char *name;
+ sos_size_t object_size;
+ sos_count_t pages_per_slab;
+ struct sos_kslab_cache *cache;
+} kmalloc_cache[] =
+ {
+ { "kmalloc 8B objects", 8, 1 },
+ { "kmalloc 16B objects", 16, 1 },
+ { "kmalloc 32B objects", 32, 1 },
+ { "kmalloc 64B objects", 64, 1 },
+ { "kmalloc 128B objects", 128, 1 },
+ { "kmalloc 256B objects", 256, 2 },
+ { "kmalloc 1024B objects", 1024, 2 },
+ { "kmalloc 2048B objects", 2048, 3 },
+ { "kmalloc 4096B objects", 4096, 4 },
+ { "kmalloc 8192B objects", 8192, 8 },
+ { "kmalloc 16384B objects", 16384, 12 },
+ { NULL, 0, 0, NULL }
+ };
+
+
+sos_ret_t sos_kmalloc_setup()
+{
+ int i;
+ for (i = 0 ; kmalloc_cache[i].object_size != 0 ; i ++)
+ {
+ struct sos_kslab_cache *new_cache;
+ new_cache = sos_kmem_cache_create(kmalloc_cache[i].name,
+ kmalloc_cache[i].object_size,
+ kmalloc_cache[i].pages_per_slab,
+ 0,
+ SOS_KSLAB_CREATE_MAP
+ );
+ SOS_ASSERT_FATAL(new_cache != NULL);
+ kmalloc_cache[i].cache = new_cache;
+ }
+ return SOS_OK;
+}
+
+
+sos_vaddr_t sos_kmalloc(sos_size_t size, sos_ui32_t flags)
+{
+ /* Look for a suitable pre-allocated kmalloc cache */
+ int i;
+ for (i = 0 ; kmalloc_cache[i].object_size != 0 ; i ++)
+ {
+ if (kmalloc_cache[i].object_size >= size)
+ return sos_kmem_cache_alloc(kmalloc_cache[i].cache,
+ (flags
+ & SOS_KMALLOC_ATOMIC)?
+ SOS_KSLAB_ALLOC_ATOMIC:0);
+ }
+
+ /* none found yet => we directly use the kmem_vmm subsystem to
+ allocate whole pages */
+ return sos_kmem_vmm_alloc(SOS_PAGE_ALIGN_SUP(size) / SOS_PAGE_SIZE,
+ ( (flags
+ & SOS_KMALLOC_ATOMIC)?
+ SOS_KMEM_VMM_ATOMIC:0)
+ | SOS_KMEM_VMM_MAP
+ );
+}
+
+
+sos_ret_t sos_kfree(sos_vaddr_t vaddr)
+{
+ /* The trouble here is that we aren't sure whether this object is a
+ slab object in a pre-allocated kmalloc cache, or an object
+ directly allocated as a kmem_vmm region. */
+
+ /* We first pretend this object is allocated in a pre-allocated
+ kmalloc cache */
+ if (! sos_kmem_cache_free(vaddr))
+ return SOS_OK; /* Great ! We guessed right ! */
+
+ /* Here we're wrong: it appears not to be an object in a
+ pre-allocated kmalloc cache. So we try to pretend this is a
+ kmem_vmm area */
+ return sos_kmem_vmm_free(vaddr);
+}
+
+
diff --git a/sos-code-article5/sos/kmalloc.h b/sos-code-article5/sos/kmalloc.h
new file mode 100644
index 0000000..bdea478
--- /dev/null
+++ b/sos-code-article5/sos/kmalloc.h
@@ -0,0 +1,63 @@
+/* Copyright (C) 2004 David Decotigny
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#ifndef _SOS_KMALLOC_H_
+#define _SOS_KMALLOC_H_
+
+/**
+ * @file kmalloc.h
+ *
+ * Simple malloc-style wrapper to kmem_vmm.h and kmem_slab.h for
+ * "anonymous" objects (ie not associated to any precise slab cache).
+ */
+
+#include <sos/types.h>
+#include <sos/errno.h>
+
+
+/**
+ * Iniatilize the kmalloc subsystem, ie pre-allocate a series of caches.
+ */
+sos_ret_t sos_kmalloc_setup(void);
+
+/*
+ * sos_kmalloc flags
+ */
+/** sos_kmalloc() should succeed without blocking, or return NULL */
+#define SOS_KMALLOC_ATOMIC 1
+
+/**
+ * Allocate a kernel object of the given size in the most suited slab
+ * cache if size can be handled by one of the pre-allocated caches, or
+ * using directly the range allocator otherwise. The object will
+ * allways be mapped in physical memory (ie implies
+ * SOS_KSLAB_CREATE_MAP and SOS_KMEM_VMM_MAP).
+ *
+ * @param size The size of the object
+ * @param flags The allocation flags (SOS_KMALLOC_* flags)
+ */
+sos_vaddr_t sos_kmalloc(sos_size_t size, sos_ui32_t flags);
+
+/**
+ * @note you are perfectly allowed to give the address of the
+ * kernel image, or the address of the bios area here, it will work:
+ * the kernel/bios WILL be "deallocated". But if you really want to do
+ * this, well..., do expect some "surprises" ;)
+ */
+sos_ret_t sos_kfree(sos_vaddr_t vaddr);
+
+#endif /* _SOS_KMALLOC_H_ */
diff --git a/sos-code-article5/sos/kmem_slab.c b/sos-code-article5/sos/kmem_slab.c
new file mode 100644
index 0000000..557508c
--- /dev/null
+++ b/sos-code-article5/sos/kmem_slab.c
@@ -0,0 +1,812 @@
+/* Copyright (C) 2000 Thomas Petazzoni
+ Copyright (C) 2004 David Decotigny
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#include <sos/macros.h>
+#include <sos/klibc.h>
+#include <sos/list.h>
+#include <sos/assert.h>
+#include <hwcore/paging.h>
+#include <sos/physmem.h>
+#include <sos/kmem_vmm.h>
+
+#include "kmem_slab.h"
+
+/* Dimensioning constants */
+#define NB_PAGES_IN_SLAB_OF_CACHES 1
+#define NB_PAGES_IN_SLAB_OF_RANGES 1
+
+/** The structure of a slab cache */
+struct sos_kslab_cache
+{
+ char *name;
+
+ /* non mutable characteristics of this slab */
+ sos_size_t original_obj_size; /* asked object size */
+ sos_size_t alloc_obj_size; /* actual object size, taking the
+ alignment constraints into account */
+ sos_count_t nb_objects_per_slab;
+ sos_count_t nb_pages_per_slab;
+ sos_count_t min_free_objects;
+
+/* slab cache flags */
+// #define SOS_KSLAB_CREATE_MAP (1<<0) /* See kmem_slab.h */
+// #define SOS_KSLAB_CREATE_ZERO (1<<1) /* " " " " " " " " */
+#define ON_SLAB (1<<31) /* struct sos_kslab is included inside the slab */
+ sos_ui32_t flags;
+
+ /* Supervision data (updated at run-time) */
+ sos_count_t nb_free_objects;
+
+ /* The lists of slabs owned by this cache */
+ struct sos_kslab *slab_list; /* head = non full, tail = full */
+
+ /* The caches are linked together on the kslab_cache_list */
+ struct sos_kslab_cache *prev, *next;
+};
+
+
+/** The structure of a slab */
+struct sos_kslab
+{
+ /** Number of free objects on this slab */
+ sos_count_t nb_free;
+
+ /** The list of these free objects */
+ struct sos_kslab_free_object *free;
+
+ /** The address of the associated range structure */
+ struct sos_kmem_range *range;
+
+ /** Virtual start address of this range */
+ sos_vaddr_t first_object;
+
+ /** Slab cache owning this slab */
+ struct sos_kslab_cache *cache;
+
+ /** Links to the other slabs managed by the same cache */
+ struct sos_kslab *prev, *next;
+};
+
+
+/** The structure of the free objects in the slab */
+struct sos_kslab_free_object
+{
+ struct sos_kslab_free_object *prev, *next;
+};
+
+/** The cache of slab caches */
+static struct sos_kslab_cache *cache_of_struct_kslab_cache;
+
+/** The cache of slab structures for non-ON_SLAB caches */
+static struct sos_kslab_cache *cache_of_struct_kslab;
+
+/** The list of slab caches */
+static struct sos_kslab_cache *kslab_cache_list;
+
+/* Helper function to initialize a cache structure */
+static sos_ret_t
+cache_initialize(/*out*/struct sos_kslab_cache *the_cache,
+ const char* name,
+ sos_size_t obj_size,
+ sos_count_t pages_per_slab,
+ sos_count_t min_free_objs,
+ sos_ui32_t cache_flags)
+{
+ unsigned int space_left;
+ sos_size_t alloc_obj_size;
+
+ if (obj_size <= 0)
+ return -SOS_EINVAL;
+
+ /* Default allocation size is the requested one */
+ alloc_obj_size = obj_size;
+
+ /* Make sure the requested size is large enough to store a
+ free_object structure */
+ if (alloc_obj_size < sizeof(struct sos_kslab_free_object))
+ alloc_obj_size = sizeof(struct sos_kslab_free_object);
+
+ /* Align obj_size on 4 bytes */
+ alloc_obj_size = SOS_ALIGN_SUP(alloc_obj_size, sizeof(int));
+
+ /* Make sure supplied number of pages per slab is consistent with
+ actual allocated object size */
+ if (alloc_obj_size > pages_per_slab*SOS_PAGE_SIZE)
+ return -SOS_EINVAL;
+
+ /* Refuse too large slabs */
+ if (pages_per_slab > MAX_PAGES_PER_SLAB)
+ return -SOS_ENOMEM;
+
+ /* Fills in the cache structure */
+ memset(the_cache, 0x0, sizeof(struct sos_kslab_cache));
+ the_cache->name = (char*)name;
+ the_cache->flags = cache_flags;
+ the_cache->original_obj_size = obj_size;
+ the_cache->alloc_obj_size = alloc_obj_size;
+ the_cache->min_free_objects = min_free_objs;
+ the_cache->nb_pages_per_slab = pages_per_slab;
+
+ /* Small size objets => the slab structure is allocated directly in
+ the slab */
+ if(alloc_obj_size <= sizeof(struct sos_kslab))
+ the_cache->flags |= ON_SLAB;
+
+ /*
+ * Compute the space left once the maximum number of objects
+ * have been allocated in the slab
+ */
+ space_left = the_cache->nb_pages_per_slab*SOS_PAGE_SIZE;
+ if(the_cache->flags & ON_SLAB)
+ space_left -= sizeof(struct sos_kslab);
+ the_cache->nb_objects_per_slab = space_left / alloc_obj_size;
+ space_left -= the_cache->nb_objects_per_slab*alloc_obj_size;
+
+ /* Make sure a single slab is large enough to contain the minimum
+ number of objects requested */
+ if (the_cache->nb_objects_per_slab < min_free_objs)
+ return -SOS_EINVAL;
+
+ /* If there is now enough place for both the objects and the slab
+ structure, then make the slab structure ON_SLAB */
+ if (space_left >= sizeof(struct sos_kslab))
+ the_cache->flags |= ON_SLAB;
+
+ return SOS_OK;
+}
+
+
+/** Helper function to add a new slab for the given cache. */
+static sos_ret_t
+cache_add_slab(struct sos_kslab_cache *kslab_cache,
+ sos_vaddr_t vaddr_slab,
+ struct sos_kslab *slab)
+{
+ int i;
+
+ /* Setup the slab structure */
+ memset(slab, 0x0, sizeof(struct sos_kslab));
+ slab->cache = kslab_cache;
+
+ /* Establish the address of the first free object */
+ slab->first_object = vaddr_slab;
+
+ /* Account for this new slab in the cache */
+ slab->nb_free = kslab_cache->nb_objects_per_slab;
+ kslab_cache->nb_free_objects += slab->nb_free;
+
+ /* Build the list of free objects */
+ for (i = 0 ; i < kslab_cache->nb_objects_per_slab ; i++)
+ {
+ sos_vaddr_t obj_vaddr;
+
+ /* Set object's address */
+ obj_vaddr = slab->first_object + i*kslab_cache->alloc_obj_size;
+
+ /* Add it to the list of free objects */
+ list_add_tail(slab->free,
+ (struct sos_kslab_free_object *)obj_vaddr);
+ }
+
+ /* Add the slab to the cache's slab list: add the head of the list
+ since this slab is non full */
+ list_add_head(kslab_cache->slab_list, slab);
+
+ return SOS_OK;
+}
+
+
+/** Helper function to allocate a new slab for the given kslab_cache */
+static sos_ret_t
+cache_grow(struct sos_kslab_cache *kslab_cache,
+ sos_ui32_t alloc_flags)
+{
+ sos_ui32_t range_alloc_flags;
+
+ struct sos_kmem_range *new_range;
+ sos_vaddr_t new_range_start;
+
+ struct sos_kslab *new_slab;
+
+ /*
+ * Setup the flags for the range allocation
+ */
+ range_alloc_flags = 0;
+
+ /* Atomic ? */
+ if (alloc_flags & SOS_KSLAB_ALLOC_ATOMIC)
+ range_alloc_flags |= SOS_KMEM_VMM_ATOMIC;
+
+ /* Need physical mapping NOW ? */
+ if (kslab_cache->flags & (SOS_KSLAB_CREATE_MAP
+ | SOS_KSLAB_CREATE_ZERO))
+ range_alloc_flags |= SOS_KMEM_VMM_MAP;
+
+ /* Allocate the range */
+ new_range = sos_kmem_vmm_new_range(kslab_cache->nb_pages_per_slab,
+ range_alloc_flags,
+ & new_range_start);
+ if (! new_range)
+ return -SOS_ENOMEM;
+
+ /* Allocate the slab structure */
+ if (kslab_cache->flags & ON_SLAB)
+ {
+ /* Slab structure is ON the slab: simply set its address to the
+ end of the range */
+ sos_vaddr_t slab_vaddr
+ = new_range_start + kslab_cache->nb_pages_per_slab*SOS_PAGE_SIZE
+ - sizeof(struct sos_kslab);
+ new_slab = (struct sos_kslab*)slab_vaddr;
+ }
+ else
+ {
+ /* Slab structure is OFF the slab: allocate it from the cache of
+ slab structures */
+ sos_vaddr_t slab_vaddr
+ = sos_kmem_cache_alloc(cache_of_struct_kslab,
+ alloc_flags);
+ if (! slab_vaddr)
+ {
+ sos_kmem_vmm_del_range(new_range);
+ return -SOS_ENOMEM;
+ }
+ new_slab = (struct sos_kslab*)slab_vaddr;
+ }
+
+ cache_add_slab(kslab_cache, new_range_start, new_slab);
+ new_slab->range = new_range;
+
+ /* Set the backlink from range to this slab */
+ sos_kmem_vmm_set_slab(new_range, new_slab);
+
+ return SOS_OK;
+}
+
+
+/**
+ * Helper function to release a slab
+ *
+ * The corresponding range is always deleted, except when the @param
+ * must_del_range_now is not set. This happens only when the function
+ * gets called from sos_kmem_cache_release_struct_range(), to avoid
+ * large recursions.
+ */
+static sos_ret_t
+cache_release_slab(struct sos_kslab *slab,
+ sos_bool_t must_del_range_now)
+{
+ struct sos_kslab_cache *kslab_cache = slab->cache;
+ struct sos_kmem_range *range = slab->range;
+
+ SOS_ASSERT_FATAL(kslab_cache != NULL);
+ SOS_ASSERT_FATAL(range != NULL);
+ SOS_ASSERT_FATAL(slab->nb_free == slab->cache->nb_objects_per_slab);
+
+ /* First, remove the slab from the slabs' list of the cache */
+ list_delete(kslab_cache->slab_list, slab);
+ slab->cache->nb_free_objects -= slab->nb_free;
+
+ /* Release the slab structure if it is OFF slab */
+ if (! (slab->cache->flags & ON_SLAB))
+ sos_kmem_cache_free((sos_vaddr_t)slab);
+
+ /* Ok, the range is not bound to any slab anymore */
+ sos_kmem_vmm_set_slab(range, NULL);
+
+ /* Always delete the range now, unless we are told not to do so (see
+ sos_kmem_cache_release_struct_range() below) */
+ if (must_del_range_now)
+ return sos_kmem_vmm_del_range(range);
+
+ return SOS_OK;
+}
+
+
+/**
+ * Helper function to create the initial cache of caches, with a very
+ * first slab in it, so that new cache structures can be simply allocated.
+ * @return the cache structure for the cache of caches
+ */
+static struct sos_kslab_cache *
+create_cache_of_caches(sos_vaddr_t vaddr_first_slab_of_caches,
+ int nb_pages)
+{
+ /* The preliminary cache structure we need in order to allocate the
+ first slab in the cache of caches (allocated on the stack !) */
+ struct sos_kslab_cache fake_cache_of_caches;
+
+ /* The real cache structure for the cache of caches */
+ struct sos_kslab_cache *real_cache_of_caches;
+
+ /* The kslab structure for this very first slab */
+ struct sos_kslab *slab_of_caches;
+
+ /* Init the cache structure for the cache of caches */
+ if (cache_initialize(& fake_cache_of_caches,
+ "Caches", sizeof(struct sos_kslab_cache),
+ nb_pages, 0, SOS_KSLAB_CREATE_MAP | ON_SLAB))
+ /* Something wrong with the parameters */
+ return NULL;
+
+ memset((void*)vaddr_first_slab_of_caches, 0x0, nb_pages*SOS_PAGE_SIZE);
+
+ /* Add the pages for the 1st slab of caches */
+ slab_of_caches = (struct sos_kslab*)(vaddr_first_slab_of_caches
+ + nb_pages*SOS_PAGE_SIZE
+ - sizeof(struct sos_kslab));
+
+ /* Add the abovementioned 1st slab to the cache of caches */
+ cache_add_slab(& fake_cache_of_caches,
+ vaddr_first_slab_of_caches,
+ slab_of_caches);
+
+ /* Now we allocate a cache structure, which will be the real cache
+ of caches, ie a cache structure allocated INSIDE the cache of
+ caches, not inside the stack */
+ real_cache_of_caches
+ = (struct sos_kslab_cache*) sos_kmem_cache_alloc(& fake_cache_of_caches,
+ 0);
+ /* We initialize it */
+ memcpy(real_cache_of_caches, & fake_cache_of_caches,
+ sizeof(struct sos_kslab_cache));
+ /* We need to update the slab's 'cache' field */
+ slab_of_caches->cache = real_cache_of_caches;
+
+ /* Add the cache to the list of slab caches */
+ list_add_tail(kslab_cache_list, real_cache_of_caches);
+
+ return real_cache_of_caches;
+}
+
+
+/**
+ * Helper function to create the initial cache of ranges, with a very
+ * first slab in it, so that new kmem_range structures can be simply
+ * allocated.
+ * @return the cache of kmem_range
+ */
+static struct sos_kslab_cache *
+create_cache_of_ranges(sos_vaddr_t vaddr_first_slab_of_ranges,
+ sos_size_t sizeof_struct_range,
+ int nb_pages)
+{
+ /* The cache structure for the cache of kmem_range */
+ struct sos_kslab_cache *cache_of_ranges;
+
+ /* The kslab structure for the very first slab of ranges */
+ struct sos_kslab *slab_of_ranges;
+
+ cache_of_ranges = (struct sos_kslab_cache*)
+ sos_kmem_cache_alloc(cache_of_struct_kslab_cache,
+ 0);
+ if (! cache_of_ranges)
+ return NULL;
+
+ /* Init the cache structure for the cache of ranges with min objects
+ per slab = 2 !!! */
+ if (cache_initialize(cache_of_ranges,
+ "struct kmem_range",
+ sizeof_struct_range,
+ nb_pages, 2, SOS_KSLAB_CREATE_MAP | ON_SLAB))
+ /* Something wrong with the parameters */
+ return NULL;
+
+ /* Add the cache to the list of slab caches */
+ list_add_tail(kslab_cache_list, cache_of_ranges);
+
+ /*
+ * Add the first slab for this cache
+ */
+ memset((void*)vaddr_first_slab_of_ranges, 0x0, nb_pages*SOS_PAGE_SIZE);
+
+ /* Add the pages for the 1st slab of ranges */
+ slab_of_ranges = (struct sos_kslab*)(vaddr_first_slab_of_ranges
+ + nb_pages*SOS_PAGE_SIZE
+ - sizeof(struct sos_kslab));
+
+ cache_add_slab(cache_of_ranges,
+ vaddr_first_slab_of_ranges,
+ slab_of_ranges);
+
+ return cache_of_ranges;
+}
+
+
+struct sos_kslab_cache *
+sos_kmem_cache_setup_prepare(sos_vaddr_t kernel_core_base,
+ sos_vaddr_t kernel_core_top,
+ sos_size_t sizeof_struct_range,
+ /* results */
+ struct sos_kslab **first_struct_slab_of_caches,
+ sos_vaddr_t *first_slab_of_caches_base,
+ sos_count_t *first_slab_of_caches_nb_pages,
+ struct sos_kslab **first_struct_slab_of_ranges,
+ sos_vaddr_t *first_slab_of_ranges_base,
+ sos_count_t *first_slab_of_ranges_nb_pages)
+{
+ int i;
+ sos_ret_t retval;
+ sos_vaddr_t vaddr;
+
+ /* The cache of ranges we are about to allocate */
+ struct sos_kslab_cache *cache_of_ranges;
+
+ /* In the begining, there isn't any cache */
+ kslab_cache_list = NULL;
+ cache_of_struct_kslab = NULL;
+ cache_of_struct_kslab_cache = NULL;
+
+ /*
+ * Create the cache of caches, initialised with 1 allocated slab
+ */
+
+ /* Allocate the pages needed for the 1st slab of caches, and map them
+ in kernel space, right after the kernel */
+ *first_slab_of_caches_base = SOS_PAGE_ALIGN_SUP(kernel_core_top);
+ for (i = 0, vaddr = *first_slab_of_caches_base ;
+ i < NB_PAGES_IN_SLAB_OF_CACHES ;
+ i++, vaddr += SOS_PAGE_SIZE)
+ {
+ sos_paddr_t ppage_paddr;
+
+ ppage_paddr
+ = sos_physmem_ref_physpage_new(FALSE);
+ SOS_ASSERT_FATAL(ppage_paddr != (sos_paddr_t)NULL);
+
+ retval = sos_paging_map(ppage_paddr, vaddr,
+ FALSE,
+ SOS_VM_MAP_ATOMIC
+ | SOS_VM_MAP_PROT_READ
+ | SOS_VM_MAP_PROT_WRITE);
+ SOS_ASSERT_FATAL(retval == SOS_OK);
+
+ retval = sos_physmem_unref_physpage(ppage_paddr);
+ SOS_ASSERT_FATAL(retval == FALSE);
+ }
+
+ /* Create the cache of caches */
+ *first_slab_of_caches_nb_pages = NB_PAGES_IN_SLAB_OF_CACHES;
+ cache_of_struct_kslab_cache
+ = create_cache_of_caches(*first_slab_of_caches_base,
+ NB_PAGES_IN_SLAB_OF_CACHES);
+ SOS_ASSERT_FATAL(cache_of_struct_kslab_cache != NULL);
+
+ /* Retrieve the slab that should have been allocated */
+ *first_struct_slab_of_caches
+ = list_get_head(cache_of_struct_kslab_cache->slab_list);
+
+
+ /*
+ * Create the cache of ranges, initialised with 1 allocated slab
+ */
+ *first_slab_of_ranges_base = vaddr;
+ /* Allocate the 1st slab */
+ for (i = 0, vaddr = *first_slab_of_ranges_base ;
+ i < NB_PAGES_IN_SLAB_OF_RANGES ;
+ i++, vaddr += SOS_PAGE_SIZE)
+ {
+ sos_paddr_t ppage_paddr;
+
+ ppage_paddr
+ = sos_physmem_ref_physpage_new(FALSE);
+ SOS_ASSERT_FATAL(ppage_paddr != (sos_paddr_t)NULL);
+
+ retval = sos_paging_map(ppage_paddr, vaddr,
+ FALSE,
+ SOS_VM_MAP_ATOMIC
+ | SOS_VM_MAP_PROT_READ
+ | SOS_VM_MAP_PROT_WRITE);
+ SOS_ASSERT_FATAL(retval == SOS_OK);
+
+ retval = sos_physmem_unref_physpage(ppage_paddr);
+ SOS_ASSERT_FATAL(retval == FALSE);
+ }
+
+ /* Create the cache of ranges */
+ *first_slab_of_ranges_nb_pages = NB_PAGES_IN_SLAB_OF_RANGES;
+ cache_of_ranges = create_cache_of_ranges(*first_slab_of_ranges_base,
+ sizeof_struct_range,
+ NB_PAGES_IN_SLAB_OF_RANGES);
+ SOS_ASSERT_FATAL(cache_of_ranges != NULL);
+
+ /* Retrieve the slab that should have been allocated */
+ *first_struct_slab_of_ranges
+ = list_get_head(cache_of_ranges->slab_list);
+
+ /*
+ * Create the cache of slabs, without any allocated slab yet
+ */
+ cache_of_struct_kslab
+ = sos_kmem_cache_create("off-slab slab structures",
+ sizeof(struct sos_kslab),
+ 1,
+ 0,
+ SOS_KSLAB_CREATE_MAP);
+ SOS_ASSERT_FATAL(cache_of_struct_kslab != NULL);
+
+ return cache_of_ranges;
+}
+
+
+sos_ret_t
+sos_kmem_cache_setup_commit(struct sos_kslab *first_struct_slab_of_caches,
+ struct sos_kmem_range *first_range_of_caches,
+ struct sos_kslab *first_struct_slab_of_ranges,
+ struct sos_kmem_range *first_range_of_ranges)
+{
+ first_struct_slab_of_caches->range = first_range_of_caches;
+ first_struct_slab_of_ranges->range = first_range_of_ranges;
+ return SOS_OK;
+}
+
+
+struct sos_kslab_cache *
+sos_kmem_cache_create(const char* name,
+ sos_size_t obj_size,
+ sos_count_t pages_per_slab,
+ sos_count_t min_free_objs,
+ sos_ui32_t cache_flags)
+{
+ struct sos_kslab_cache *new_cache;
+
+ /* Allocate the new cache */
+ new_cache = (struct sos_kslab_cache*)
+ sos_kmem_cache_alloc(cache_of_struct_kslab_cache,
+ 0/* NOT ATOMIC */);
+ if (! new_cache)
+ return NULL;
+
+ if (cache_initialize(new_cache, name, obj_size,
+ pages_per_slab, min_free_objs,
+ cache_flags))
+ {
+ /* Something was wrong */
+ sos_kmem_cache_free((sos_vaddr_t)new_cache);
+ return NULL;
+ }
+
+ /* Add the cache to the list of slab caches */
+ list_add_tail(kslab_cache_list, new_cache);
+
+ /* if the min_free_objs is set, pre-allocate a slab */
+ if (min_free_objs)
+ {
+ if (cache_grow(new_cache, 0 /* Not atomic */) != SOS_OK)
+ {
+ sos_kmem_cache_destroy(new_cache);
+ return NULL; /* Not enough memory */
+ }
+ }
+
+ return new_cache;
+}
+
+
+sos_ret_t sos_kmem_cache_destroy(struct sos_kslab_cache *kslab_cache)
+{
+ int nb_slabs;
+ struct sos_kslab *slab;
+
+ if (! kslab_cache)
+ return -SOS_EINVAL;
+
+ /* Refuse to destroy the cache if there are any objects still
+ allocated */
+ list_foreach(kslab_cache->slab_list, slab, nb_slabs)
+ {
+ if (slab->nb_free != kslab_cache->nb_objects_per_slab)
+ return -SOS_EBUSY;
+ }
+
+ /* Remove all the slabs */
+ while ((slab = list_get_head(kslab_cache->slab_list)) != NULL)
+ {
+ cache_release_slab(slab, TRUE);
+ }
+
+ /* Remove the cache */
+ return sos_kmem_cache_free((sos_vaddr_t)kslab_cache);
+}
+
+
+sos_vaddr_t sos_kmem_cache_alloc(struct sos_kslab_cache *kslab_cache,
+ sos_ui32_t alloc_flags)
+{
+ sos_vaddr_t obj_vaddr;
+ struct sos_kslab * slab_head;
+#define ALLOC_RET return
+
+ /* If the slab at the head of the slabs' list has no free object,
+ then the other slabs don't either => need to allocate a new
+ slab */
+ if ((! kslab_cache->slab_list)
+ || (! list_get_head(kslab_cache->slab_list)->free))
+ {
+ if (cache_grow(kslab_cache, alloc_flags) != SOS_OK)
+ /* Not enough memory or blocking alloc */
+ ALLOC_RET( (sos_vaddr_t)NULL);
+ }
+
+ /* Here: we are sure that list_get_head(kslab_cache->slab_list)
+ exists *AND* that list_get_head(kslab_cache->slab_list)->free is
+ NOT NULL */
+ slab_head = list_get_head(kslab_cache->slab_list);
+ SOS_ASSERT_FATAL(slab_head != NULL);
+
+ /* Allocate the object at the head of the slab at the head of the
+ slabs' list */
+ obj_vaddr = (sos_vaddr_t)list_pop_head(slab_head->free);
+ slab_head->nb_free --;
+ kslab_cache->nb_free_objects --;
+
+ /* If needed, reset object's contents */
+ if (kslab_cache->flags & SOS_KSLAB_CREATE_ZERO)
+ memset((void*)obj_vaddr, 0x0, kslab_cache->alloc_obj_size);
+
+ /* Slab is now full ? */
+ if (slab_head->free == NULL)
+ {
+ /* Transfer it at the tail of the slabs' list */
+ struct sos_kslab *slab;
+ slab = list_pop_head(kslab_cache->slab_list);
+ list_add_tail(kslab_cache->slab_list, slab);
+ }
+
+ /*
+ * For caches that require a minimum amount of free objects left,
+ * allocate a slab if needed.
+ *
+ * Notice the "== min_objects - 1": we did not write " <
+ * min_objects" because for the cache of kmem structure, this would
+ * lead to an chicken-and-egg problem, since cache_grow below would
+ * call cache_alloc again for the kmem_vmm cache, so we return here
+ * with the same cache. If the test were " < min_objects", then we
+ * would call cache_grow again for the kmem_vmm cache again and
+ * again... until we reach the bottom of our stack (infinite
+ * recursion). By telling precisely "==", then the cache_grow would
+ * only be called the first time.
+ */
+ if ((kslab_cache->min_free_objects > 0)
+ && (kslab_cache->nb_free_objects == (kslab_cache->min_free_objects - 1)))
+ {
+ /* No: allocate a new slab now */
+ if (cache_grow(kslab_cache, alloc_flags) != SOS_OK)
+ {
+ /* Not enough free memory or blocking alloc => undo the
+ allocation */
+ sos_kmem_cache_free(obj_vaddr);
+ ALLOC_RET( (sos_vaddr_t)NULL);
+ }
+ }
+
+ ALLOC_RET(obj_vaddr);
+}
+
+
+/**
+ * Helper function to free the object located at the given address.
+ *
+ * @param empty_slab is the address of the slab to release, if removing
+ * the object causes the slab to become empty.
+ */
+inline static
+sos_ret_t
+free_object(sos_vaddr_t vaddr,
+ struct sos_kslab ** empty_slab)
+{
+ struct sos_kslab_cache *kslab_cache;
+
+ /* Lookup the slab containing the object in the slabs' list */
+ struct sos_kslab *slab = sos_kmem_vmm_resolve_slab(vaddr);
+
+ /* By default, consider that the slab will not become empty */
+ *empty_slab = NULL;
+
+ /* Did not find the slab */
+ if (! slab)
+ return -SOS_EINVAL;
+
+ SOS_ASSERT_FATAL(slab->cache);
+ kslab_cache = slab->cache;
+
+ /*
+ * Check whether the address really could mark the start of an actual
+ * allocated object
+ */
+ /* Address multiple of an object's size ? */
+ if (( (vaddr - slab->first_object)
+ % kslab_cache->alloc_obj_size) != 0)
+ return -SOS_EINVAL;
+ /* Address not too large ? */
+ if (( (vaddr - slab->first_object)
+ / kslab_cache->alloc_obj_size) >= kslab_cache->nb_objects_per_slab)
+ return -SOS_EINVAL;
+
+ /*
+ * Ok: we now release the object
+ */
+
+ /* Did find a full slab => will not be full any more => move it
+ to the head of the slabs' list */
+ if (! slab->free)
+ {
+ list_delete(kslab_cache->slab_list, slab);
+ list_add_head(kslab_cache->slab_list, slab);
+ }
+
+ /* Release the object */
+ list_add_head(slab->free, (struct sos_kslab_free_object*)vaddr);
+ slab->nb_free++;
+ kslab_cache->nb_free_objects++;
+ SOS_ASSERT_FATAL(slab->nb_free <= slab->cache->nb_objects_per_slab);
+
+ /* Cause the slab to be released if it becomes empty, and if we are
+ allowed to do it */
+ if ((slab->nb_free >= kslab_cache->nb_objects_per_slab)
+ && (kslab_cache->nb_free_objects - slab->nb_free
+ >= kslab_cache->min_free_objects))
+ {
+ *empty_slab = slab;
+ }
+
+ return SOS_OK;
+}
+
+
+sos_ret_t sos_kmem_cache_free(sos_vaddr_t vaddr)
+{
+ sos_ret_t retval;
+ struct sos_kslab *empty_slab;
+
+ /* Remove the object from the slab */
+ retval = free_object(vaddr, & empty_slab);
+ if (retval != SOS_OK)
+ return retval;
+
+ /* Remove the slab and the underlying range if needed */
+ if (empty_slab != NULL)
+ return cache_release_slab(empty_slab, TRUE);
+
+ return SOS_OK;
+}
+
+
+struct sos_kmem_range *
+sos_kmem_cache_release_struct_range(struct sos_kmem_range *the_range)
+{
+ sos_ret_t retval;
+ struct sos_kslab *empty_slab;
+
+ /* Remove the object from the slab */
+ retval = free_object((sos_vaddr_t)the_range, & empty_slab);
+ if (retval != SOS_OK)
+ return NULL;
+
+ /* Remove the slab BUT NOT the underlying range if needed */
+ if (empty_slab != NULL)
+ {
+ struct sos_kmem_range *empty_range = empty_slab->range;
+ SOS_ASSERT_FATAL(cache_release_slab(empty_slab, FALSE) == SOS_OK);
+ SOS_ASSERT_FATAL(empty_range != NULL);
+ return empty_range;
+ }
+
+ return NULL;
+}
+
diff --git a/sos-code-article5/sos/kmem_slab.h b/sos-code-article5/sos/kmem_slab.h
new file mode 100644
index 0000000..603a3aa
--- /dev/null
+++ b/sos-code-article5/sos/kmem_slab.h
@@ -0,0 +1,206 @@
+/* Copyright (C) 2000 Thomas Petazzoni
+ Copyright (C) 2004 David Decotigny
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#ifndef _SOS_KMEM_SLAB_H_
+#define _SOS_KMEM_SLAB_H_
+
+/**
+ * @file kmem_slab.h
+ *
+ * Kernel Memory Allocator based on Bonwick's slab llocator (Solaris
+ * 2.4, Linux 2.4). This allocator achieves good memory utilization
+ * ratio (memory effectively used / memory requested) ie limited
+ * fragmentation, while elegantly handling cache-effect considerations
+ * (TLB locality through the notion of "cache" of slabs, and the
+ * dcache utilization through the notion of cache colouring to
+ * decrease the conflicts in the dcache for accesses to different data
+ * in the same cache).
+ *
+ * This allocator relies on the range allocator (kmem_vmm.h) to
+ * allocate the slabs, which itself relies on the slab allocator to
+ * allocate its "range" data structures, thus leading to a
+ * chicken-and-egg problem. We solve this problem by introducing the
+ * notion of "min_free_objs" for the slab caches, in order for the cache
+ * of ranges to always have enough ranges in reserve to complete the
+ * range allocation before being urged to allocate a new slab of
+ * ranges, which would require the allocation of a new range.
+ *
+ * Compared to Bonwick's recommendations, we don't handle ctor/dtor
+ * routines on the objects, so that we can alter the objects once they
+ * are set free. Thus, the list of free object is stored in the free
+ * objects themselves, not alongside the objects (this also implies that
+ * the SOS_KSLAB_CREATE_MAP flag below is meaningless). We also don't
+ * implement the cache colouring (trivial to add, but we omit it for
+ * readability reasons), and the only alignment constraint we respect
+ * is that allocated objects are aligned on a 4B boundary: for other
+ * alignment constraints, the user must integrate them in the
+ * "object_size" parameter to "sos_kmem_cache_create()".
+ *
+ * References :
+ * - J. Bonwick's paper, "The slab allocator: An object-caching kernel
+ * memory allocator", In USENIX Summer 1994 Technical Conference
+ * - The bible, aka "Unix internals : the new frontiers" (section
+ * 12.10), Uresh Vahalia, Prentice Hall 1996, ISBN 0131019082
+ * - "The Linux slab allocator", B. Fitzgibbons,
+ * http://www.cc.gatech.edu/people/home/bradf/cs7001/proj2/
+ * - The Kos, http://kos.enix.org/
+ */
+#include <sos/types.h>
+#include <sos/errno.h>
+
+/** Opaque data structure that defines a Cache of slabs */
+struct sos_kslab_cache;
+
+/** Opaque data structure that defines a slab. Exported only to
+ kmem_vmm.h */
+struct sos_kslab;
+
+#include "kmem_vmm.h"
+
+
+/** The maximum allowed pages for each slab */
+#define MAX_PAGES_PER_SLAB 32 /* 128 kB */
+
+
+/**
+ * Initialize the slab cache of slab caches, and prepare the cache of
+ * kmem_range for kmem_vmm.
+ *
+ * @param kernel_core_base The virtual address of the first byte used
+ * by the kernel code/data
+ *
+ * @param kernel_core_top The virtual address of the first byte after
+ * the kernel code/data.
+ *
+ * @param sizeof_struct_range the size of the objects (aka "struct
+ * sos_kmem_vmm_ranges") to be allocated in the cache of ranges
+ *
+ * @param first_struct_slab_of_caches (output value) the virtual
+ * address of the first slab structure that gets allocated for the
+ * cache of caches. The function actually manually allocate the first
+ * slab of the cache of caches because of a chicken-and-egg thing. The
+ * address of the slab is used by the kmem_vmm_setup routine to
+ * finalize the allocation of the slab, in order for it to behave like
+ * a real slab afterwards.
+ *
+ * @param first_slab_of_caches_base (output value) the virtual address
+ * of the slab associated to the slab structure.
+ *
+ * @param first_slab_of_caches_nb_pages (output value) the number of
+ * (virtual) pages used by the first slab of the cache of caches.
+ *
+ * @param first_struct_slab_of_ranges (output value) the virtual address
+ * of the first slab that gets allocated for the cache of ranges. Same
+ * explanation as above.
+ *
+ * @param first_slab_of_ranges_base (output value) the virtual address
+ * of the slab associated to the slab structure.
+ *
+ * @param first_slab_of_ranges_nb_pages (output value) the number of
+ * (virtual) pages used by the first slab of the cache of ranges.
+ *
+ * @return the cache of kmem_range immediatly usable
+ */
+struct sos_kslab_cache *
+sos_kmem_cache_setup_prepare(sos_vaddr_t kernel_core_base,
+ sos_vaddr_t kernel_core_top,
+ sos_size_t sizeof_struct_range,
+ /* results */
+ struct sos_kslab **first_struct_slab_of_caches,
+ sos_vaddr_t *first_slab_of_caches_base,
+ sos_count_t *first_slab_of_caches_nb_pages,
+ struct sos_kslab **first_struct_slab_of_ranges,
+ sos_vaddr_t *first_slab_of_ranges_base,
+ sos_count_t *first_slab_of_ranges_nb_pages);
+
+/**
+ * Update the configuration of the cache subsystem once the vmm
+ * subsystem has been fully initialized
+ */
+sos_ret_t
+sos_kmem_cache_setup_commit(struct sos_kslab *first_struct_slab_of_caches,
+ struct sos_kmem_range *first_range_of_caches,
+ struct sos_kslab *first_struct_slab_of_ranges,
+ struct sos_kmem_range *first_range_of_ranges);
+
+
+/*
+ * Flags for sos_kmem_cache_create()
+ */
+/** The slabs should be initially mapped in physical memory */
+#define SOS_KSLAB_CREATE_MAP (1<<0)
+/** The object should always be set to zero at allocation (implies
+ SOS_KSLAB_CREATE_MAP) */
+#define SOS_KSLAB_CREATE_ZERO (1<<1)
+
+/**
+ * @note this function MAY block (involved allocations are not atomic)
+ * @param name must remain valid during the whole cache's life
+ * (shallow copy) !
+ * @param cache_flags An or-ed combination of the SOS_KSLAB_CREATE_* flags
+ */
+struct sos_kslab_cache *
+sos_kmem_cache_create(const char* name,
+ sos_size_t object_size,
+ sos_count_t pages_per_slab,
+ sos_count_t min_free_objects,
+ sos_ui32_t cache_flags);
+
+sos_ret_t sos_kmem_cache_destroy(struct sos_kslab_cache *kslab_cache);
+
+
+/*
+ * Flags for sos_kmem_cache_alloc()
+ */
+/** Allocation should either succeed or fail, without blocking */
+#define SOS_KSLAB_ALLOC_ATOMIC (1<<0)
+
+/**
+ * Allocate an object from the given cache.
+ *
+ * @param alloc_flags An or-ed combination of the SOS_KSLAB_ALLOC_* flags
+ */
+sos_vaddr_t sos_kmem_cache_alloc(struct sos_kslab_cache *kslab_cache,
+ sos_ui32_t alloc_flags);
+
+
+/**
+ * Free an object (assumed to be already allocated and not already
+ * free) at the given virtual address.
+ */
+sos_ret_t sos_kmem_cache_free(sos_vaddr_t vaddr);
+
+
+/*
+ * Function reserved to kmem_vmm.c. Does almost everything
+ * sos_kmem_cache_free() does, except it does not call
+ * sos_kmem_vmm_del_range() if it needs to. This is aimed at avoiding
+ * large recursion when a range is freed with
+ * sos_kmem_vmm_del_range().
+ *
+ * @param the_range The range structure to free
+ *
+ * @return NULL when the range containing 'the_range' still contains
+ * other ranges, or the address of the range which owned 'the_range'
+ * if it becomes empty.
+ */
+struct sos_kmem_range *
+sos_kmem_cache_release_struct_range(struct sos_kmem_range *the_range);
+
+
+#endif /* _SOS_KMEM_SLAB_H_ */
diff --git a/sos-code-article5/sos/kmem_vmm.c b/sos-code-article5/sos/kmem_vmm.c
new file mode 100644
index 0000000..dbf1ee8
--- /dev/null
+++ b/sos-code-article5/sos/kmem_vmm.c
@@ -0,0 +1,608 @@
+/* Copyright (C) 2000 Thomas Petazzoni
+ Copyright (C) 2004 David Decotigny
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+
+#include <sos/list.h>
+#include <sos/physmem.h>
+#include <hwcore/paging.h>
+#include <sos/assert.h>
+
+#include "kmem_vmm.h"
+
+/** The structure of a range of kernel-space virtual addresses */
+struct sos_kmem_range
+{
+ sos_vaddr_t base_vaddr;
+ sos_count_t nb_pages;
+
+ /* The slab owning this range, or NULL */
+ struct sos_kslab *slab;
+
+ struct sos_kmem_range *prev, *next;
+};
+const int sizeof_struct_sos_kmem_range = sizeof(struct sos_kmem_range);
+
+/** The ranges are SORTED in (strictly) ascending base addresses */
+static struct sos_kmem_range *kmem_free_range_list, *kmem_used_range_list;
+
+/** The slab cache for the kmem ranges */
+static struct sos_kslab_cache *kmem_range_cache;
+
+
+
+/** Helper function to get the closest preceding or containing
+ range for the given virtual address */
+static struct sos_kmem_range *
+get_closest_preceding_kmem_range(struct sos_kmem_range *the_list,
+ sos_vaddr_t vaddr)
+{
+ int nb_elements;
+ struct sos_kmem_range *a_range, *ret_range;
+
+ /* kmem_range list is kept SORTED, so we exit as soon as vaddr >= a
+ range base address */
+ ret_range = NULL;
+ list_foreach(the_list, a_range, nb_elements)
+ {
+ if (vaddr < a_range->base_vaddr)
+ return ret_range;
+ ret_range = a_range;
+ }
+
+ /* This will always be the LAST range in the kmem area */
+ return ret_range;
+}
+
+
+/**
+ * Helper function to lookup a free range large enough to hold nb_pages
+ * pages (first fit)
+ */
+static struct sos_kmem_range *find_suitable_free_range(sos_count_t nb_pages)
+{
+ int nb_elements;
+ struct sos_kmem_range *r;
+
+ list_foreach(kmem_free_range_list, r, nb_elements)
+ {
+ if (r->nb_pages >= nb_pages)
+ return r;
+ }
+
+ return NULL;
+}
+
+
+/**
+ * Helper function to add a_range in the_list, in strictly ascending order.
+ *
+ * @return The (possibly) new head of the_list
+ */
+static struct sos_kmem_range *insert_range(struct sos_kmem_range *the_list,
+ struct sos_kmem_range *a_range)
+{
+ struct sos_kmem_range *prec_used;
+
+ /** Look for any preceding range */
+ prec_used = get_closest_preceding_kmem_range(the_list,
+ a_range->base_vaddr);
+ /** insert a_range /after/ this prec_used */
+ if (prec_used != NULL)
+ list_insert_after(the_list, prec_used, a_range);
+ else /* Insert at the beginning of the list */
+ list_add_head(the_list, a_range);
+
+ return the_list;
+}
+
+
+/**
+ * Helper function to retrieve the range owning the given vaddr, by
+ * scanning the physical memory first if vaddr is mapped in RAM
+ */
+static struct sos_kmem_range *lookup_range(sos_vaddr_t vaddr)
+{
+ struct sos_kmem_range *range;
+
+ /* First: try to retrieve the physical page mapped at this address */
+ sos_paddr_t ppage_paddr = SOS_PAGE_ALIGN_INF(sos_paging_get_paddr(vaddr));
+ if (ppage_paddr)
+ {
+ range = sos_physmem_get_kmem_range(ppage_paddr);
+
+ /* If a page is mapped at this address, it is EXPECTED that it
+ is really associated with a range */
+ SOS_ASSERT_FATAL(range != NULL);
+ }
+
+ /* Otherwise scan the list of used ranges, looking for the range
+ owning the address */
+ else
+ {
+ range = get_closest_preceding_kmem_range(kmem_used_range_list,
+ vaddr);
+ /* Not found */
+ if (! range)
+ return NULL;
+
+ /* vaddr not covered by this range */
+ if ( (vaddr < range->base_vaddr)
+ || (vaddr >= (range->base_vaddr + range->nb_pages*SOS_PAGE_SIZE)) )
+ return NULL;
+ }
+
+ return range;
+}
+
+
+/**
+ * Helper function for sos_kmem_vmm_setup() to initialize a new range
+ * that maps a given area as free or as already used.
+ * This function either succeeds or halts the whole system.
+ */
+static struct sos_kmem_range *
+create_range(sos_bool_t is_free,
+ sos_vaddr_t base_vaddr,
+ sos_vaddr_t top_vaddr,
+ struct sos_kslab *associated_slab)
+{
+ struct sos_kmem_range *range;
+
+ SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(base_vaddr));
+ SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(top_vaddr));
+
+ if ((top_vaddr - base_vaddr) < SOS_PAGE_SIZE)
+ return NULL;
+
+ range = (struct sos_kmem_range*)sos_kmem_cache_alloc(kmem_range_cache,
+ SOS_KSLAB_ALLOC_ATOMIC);
+ SOS_ASSERT_FATAL(range != NULL);
+
+ range->base_vaddr = base_vaddr;
+ range->nb_pages = (top_vaddr - base_vaddr) / SOS_PAGE_SIZE;
+
+ if (is_free)
+ {
+ list_add_tail(kmem_free_range_list,
+ range);
+ }
+ else
+ {
+ sos_vaddr_t vaddr;
+ range->slab = associated_slab;
+ list_add_tail(kmem_used_range_list,
+ range);
+
+ /* Ok, set the range owner for the pages in this page */
+ for (vaddr = base_vaddr ;
+ vaddr < top_vaddr ;
+ vaddr += SOS_PAGE_SIZE)
+ {
+ sos_paddr_t ppage_paddr = sos_paging_get_paddr(vaddr);
+ SOS_ASSERT_FATAL((void*)ppage_paddr != NULL);
+ sos_physmem_set_kmem_range(ppage_paddr, range);
+ }
+ }
+
+ return range;
+}
+
+
+sos_ret_t sos_kmem_vmm_setup(sos_vaddr_t kernel_core_base,
+ sos_vaddr_t kernel_core_top,
+ sos_vaddr_t bootstrap_stack_bottom_vaddr,
+ sos_vaddr_t bootstrap_stack_top_vaddr)
+{
+ struct sos_kslab *first_struct_slab_of_caches,
+ *first_struct_slab_of_ranges;
+ sos_vaddr_t first_slab_of_caches_base,
+ first_slab_of_caches_nb_pages,
+ first_slab_of_ranges_base,
+ first_slab_of_ranges_nb_pages;
+ struct sos_kmem_range *first_range_of_caches,
+ *first_range_of_ranges;
+
+ list_init(kmem_free_range_list);
+ list_init(kmem_used_range_list);
+
+ kmem_range_cache
+ = sos_kmem_cache_setup_prepare(kernel_core_base,
+ kernel_core_top,
+ sizeof(struct sos_kmem_range),
+ & first_struct_slab_of_caches,
+ & first_slab_of_caches_base,
+ & first_slab_of_caches_nb_pages,
+ & first_struct_slab_of_ranges,
+ & first_slab_of_ranges_base,
+ & first_slab_of_ranges_nb_pages);
+ SOS_ASSERT_FATAL(kmem_range_cache != NULL);
+
+ /* Mark virtual addresses 16kB - Video as FREE */
+ create_range(TRUE,
+ SOS_KMEM_VMM_BASE,
+ SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO_START),
+ NULL);
+
+ /* Mark virtual addresses in Video hardware mapping as NOT FREE */
+ create_range(FALSE,
+ SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO_START),
+ SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO_END),
+ NULL);
+
+ /* Mark virtual addresses Video - Kernel as FREE */
+ create_range(TRUE,
+ SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO_END),
+ SOS_PAGE_ALIGN_INF(kernel_core_base),
+ NULL);
+
+ /* Mark virtual addresses in Kernel code/data up to the bootstrap stack
+ as NOT FREE */
+ create_range(FALSE,
+ SOS_PAGE_ALIGN_INF(kernel_core_base),
+ bootstrap_stack_bottom_vaddr,
+ NULL);
+
+ /* Mark virtual addresses in the bootstrap stack as NOT FREE too,
+ but in another vmm region in order to be un-allocated later */
+ create_range(FALSE,
+ bootstrap_stack_bottom_vaddr,
+ bootstrap_stack_top_vaddr,
+ NULL);
+
+ /* Mark the remaining virtual addresses in Kernel code/data after
+ the bootstrap stack as NOT FREE */
+ create_range(FALSE,
+ bootstrap_stack_top_vaddr,
+ SOS_PAGE_ALIGN_SUP(kernel_core_top),
+ NULL);
+
+ /* Mark virtual addresses in the first slab of the cache of caches
+ as NOT FREE */
+ SOS_ASSERT_FATAL(SOS_PAGE_ALIGN_SUP(kernel_core_top)
+ == first_slab_of_caches_base);
+ SOS_ASSERT_FATAL(first_struct_slab_of_caches != NULL);
+ first_range_of_caches
+ = create_range(FALSE,
+ first_slab_of_caches_base,
+ first_slab_of_caches_base
+ + first_slab_of_caches_nb_pages*SOS_PAGE_SIZE,
+ first_struct_slab_of_caches);
+
+ /* Mark virtual addresses in the first slab of the cache of ranges
+ as NOT FREE */
+ SOS_ASSERT_FATAL((first_slab_of_caches_base
+ + first_slab_of_caches_nb_pages*SOS_PAGE_SIZE)
+ == first_slab_of_ranges_base);
+ SOS_ASSERT_FATAL(first_struct_slab_of_ranges != NULL);
+ first_range_of_ranges
+ = create_range(FALSE,
+ first_slab_of_ranges_base,
+ first_slab_of_ranges_base
+ + first_slab_of_ranges_nb_pages*SOS_PAGE_SIZE,
+ first_struct_slab_of_ranges);
+
+ /* Mark virtual addresses after these slabs as FREE */
+ create_range(TRUE,
+ first_slab_of_ranges_base
+ + first_slab_of_ranges_nb_pages*SOS_PAGE_SIZE,
+ SOS_KMEM_VMM_TOP,
+ NULL);
+
+ /* Update the cache subsystem so that the artificially-created
+ caches of caches and ranges really behave like *normal* caches (ie
+ those allocated by the normal slab API) */
+ sos_kmem_cache_setup_commit(first_struct_slab_of_caches,
+ first_range_of_caches,
+ first_struct_slab_of_ranges,
+ first_range_of_ranges);
+
+ return SOS_OK;
+}
+
+
+/**
+ * Allocate a new kernel area spanning one or multiple pages.
+ *
+ * @eturn a new range structure
+ */
+struct sos_kmem_range *sos_kmem_vmm_new_range(sos_count_t nb_pages,
+ sos_ui32_t flags,
+ sos_vaddr_t * range_start)
+{
+ struct sos_kmem_range *free_range, *new_range;
+
+ if (nb_pages <= 0)
+ return NULL;
+
+ /* Find a suitable free range to hold the size-sized object */
+ free_range = find_suitable_free_range(nb_pages);
+ if (free_range == NULL)
+ return NULL;
+
+ /* If range has exactly the requested size, just move it to the
+ "used" list */
+ if(free_range->nb_pages == nb_pages)
+ {
+ list_delete(kmem_free_range_list, free_range);
+ kmem_used_range_list = insert_range(kmem_used_range_list,
+ free_range);
+ /* The new_range is exactly the free_range */
+ new_range = free_range;
+ }
+
+ /* Otherwise the range is bigger than the requested size, split it.
+ This involves reducing its size, and allocate a new range, which
+ is going to be added to the "used" list */
+ else
+ {
+ /* free_range split in { new_range | free_range } */
+ new_range = (struct sos_kmem_range*)
+ sos_kmem_cache_alloc(kmem_range_cache,
+ (flags & SOS_KMEM_VMM_ATOMIC)?
+ SOS_KSLAB_ALLOC_ATOMIC:0);
+ if (! new_range)
+ return NULL;
+
+ new_range->base_vaddr = free_range->base_vaddr;
+ new_range->nb_pages = nb_pages;
+ free_range->base_vaddr += nb_pages*SOS_PAGE_SIZE;
+ free_range->nb_pages -= nb_pages;
+
+ /* free_range is still at the same place in the list */
+ /* insert new_range in the used list */
+ kmem_used_range_list = insert_range(kmem_used_range_list,
+ new_range);
+ }
+
+ /* By default, the range is not associated with any slab */
+ new_range->slab = NULL;
+
+ /* If mapping of physical pages is needed, map them now */
+ if (flags & SOS_KMEM_VMM_MAP)
+ {
+ int i;
+ for (i = 0 ; i < nb_pages ; i ++)
+ {
+ /* Get a new physical page */
+ sos_paddr_t ppage_paddr
+ = sos_physmem_ref_physpage_new(! (flags & SOS_KMEM_VMM_ATOMIC));
+
+ /* Map the page in kernel space */
+ if (ppage_paddr)
+ {
+ if (sos_paging_map(ppage_paddr,
+ new_range->base_vaddr
+ + i * SOS_PAGE_SIZE,
+ FALSE /* Not a user page */,
+ ((flags & SOS_KMEM_VMM_ATOMIC)?
+ SOS_VM_MAP_ATOMIC:0)
+ | SOS_VM_MAP_PROT_READ
+ | SOS_VM_MAP_PROT_WRITE))
+ {
+ /* Failed => force unallocation, see below */
+ sos_physmem_unref_physpage(ppage_paddr);
+ ppage_paddr = (sos_paddr_t)NULL;
+ }
+ else
+ {
+ /* Success : page can be unreferenced since it is
+ now mapped */
+ sos_physmem_unref_physpage(ppage_paddr);
+ }
+ }
+
+ /* Undo the allocation if failed to allocate or map a new page */
+ if (! ppage_paddr)
+ {
+ sos_kmem_vmm_del_range(new_range);
+ return NULL;
+ }
+
+ /* Ok, set the range owner for this page */
+ sos_physmem_set_kmem_range(ppage_paddr, new_range);
+ }
+ }
+
+ /* Otherwise we need a correct page fault handler to support
+ deferred mapping (aka demand paging) of ranges */
+ else
+ SOS_ASSERT_FATAL(! "No demand paging yet");
+
+ if (range_start)
+ *range_start = new_range->base_vaddr;
+
+ return new_range;
+}
+
+
+sos_ret_t sos_kmem_vmm_del_range(struct sos_kmem_range *range)
+{
+ int i;
+ struct sos_kmem_range *ranges_to_free;
+ list_init(ranges_to_free);
+
+ SOS_ASSERT_FATAL(range != NULL);
+ SOS_ASSERT_FATAL(range->slab == NULL);
+
+ /* Remove the range from the 'USED' list now */
+ list_delete(kmem_used_range_list, range);
+
+ /*
+ * The following do..while() loop is here to avoid an indirect
+ * recursion: if we call directly kmem_cache_free() from inside the
+ * current function, we take the risk to re-enter the current function
+ * (sos_kmem_vmm_del_range()) again, which may cause problem if it
+ * in turn calls kmem_slab again and sos_kmem_vmm_del_range again,
+ * and again and again. This may happen while freeing ranges of
+ * struct sos_kslab...
+ *
+ * To avoid this,we choose to call a special function of kmem_slab
+ * doing almost the same as sos_kmem_cache_free(), but which does
+ * NOT call us (ie sos_kmem_vmm_del_range()): instead WE add the
+ * range that is to be freed to a list, and the do..while() loop is
+ * here to process this list ! The recursion is replaced by
+ * classical iterations.
+ */
+ do
+ {
+ /* Ok, we got the range. Now, insert this range in the free list */
+ kmem_free_range_list = insert_range(kmem_free_range_list, range);
+
+ /* Unmap the physical pages */
+ for (i = 0 ; i < range->nb_pages ; i ++)
+ {
+ /* This will work even if no page is mapped at this address */
+ sos_paging_unmap(range->base_vaddr + i*SOS_PAGE_SIZE);
+ }
+
+ /* Eventually coalesce it with prev/next free ranges (there is
+ always a valid prev/next link since the list is circular). Note:
+ the tests below will lead to correct behaviour even if the list
+ is limited to the 'range' singleton, at least as long as the
+ range is not zero-sized */
+ /* Merge with preceding one ? */
+ if (range->prev->base_vaddr + range->prev->nb_pages*SOS_PAGE_SIZE
+ == range->base_vaddr)
+ {
+ struct sos_kmem_range *empty_range_of_ranges = NULL;
+ struct sos_kmem_range *prec_free = range->prev;
+
+ /* Merge them */
+ prec_free->nb_pages += range->nb_pages;
+ list_delete(kmem_free_range_list, range);
+
+ /* Mark the range as free. This may cause the slab owning
+ the range to become empty */
+ empty_range_of_ranges =
+ sos_kmem_cache_release_struct_range(range);
+
+ /* If this causes the slab owning the range to become empty,
+ add the range corresponding to the slab at the end of the
+ list of the ranges to be freed: it will be actually freed
+ in one of the next iterations of the do{} loop. */
+ if (empty_range_of_ranges != NULL)
+ {
+ list_delete(kmem_used_range_list, empty_range_of_ranges);
+ list_add_tail(ranges_to_free, empty_range_of_ranges);
+ }
+
+ /* Set range to the beginning of this coelescion */
+ range = prec_free;
+ }
+
+ /* Merge with next one ? [NO 'else' since range may be the result of
+ the merge above] */
+ if (range->base_vaddr + range->nb_pages*SOS_PAGE_SIZE
+ == range->next->base_vaddr)
+ {
+ struct sos_kmem_range *empty_range_of_ranges = NULL;
+ struct sos_kmem_range *next_range = range->next;
+
+ /* Merge them */
+ range->nb_pages += next_range->nb_pages;
+ list_delete(kmem_free_range_list, next_range);
+
+ /* Mark the next_range as free. This may cause the slab
+ owning the next_range to become empty */
+ empty_range_of_ranges =
+ sos_kmem_cache_release_struct_range(next_range);
+
+ /* If this causes the slab owning the next_range to become
+ empty, add the range corresponding to the slab at the end
+ of the list of the ranges to be freed: it will be
+ actually freed in one of the next iterations of the
+ do{} loop. */
+ if (empty_range_of_ranges != NULL)
+ {
+ list_delete(kmem_used_range_list, empty_range_of_ranges);
+ list_add_tail(ranges_to_free, empty_range_of_ranges);
+ }
+ }
+
+
+ /* If deleting the range(s) caused one or more range(s) to be
+ freed, get the next one to free */
+ if (list_is_empty(ranges_to_free))
+ range = NULL; /* No range left to free */
+ else
+ range = list_pop_head(ranges_to_free);
+
+ }
+ /* Stop when there is no range left to be freed for now */
+ while (range != NULL);
+
+ return SOS_OK;
+}
+
+
+sos_vaddr_t sos_kmem_vmm_alloc(sos_count_t nb_pages,
+ sos_ui32_t flags)
+{
+ struct sos_kmem_range *range
+ = sos_kmem_vmm_new_range(nb_pages,
+ flags,
+ NULL);
+ if (! range)
+ return (sos_vaddr_t)NULL;
+
+ return range->base_vaddr;
+}
+
+
+sos_ret_t sos_kmem_vmm_free(sos_vaddr_t vaddr)
+{
+ struct sos_kmem_range *range = lookup_range(vaddr);
+
+ /* We expect that the given address is the base address of the
+ range */
+ if (!range || (range->base_vaddr != vaddr))
+ return -SOS_EINVAL;
+
+ /* We expect that this range is not held by any cache */
+ if (range->slab != NULL)
+ return -SOS_EBUSY;
+
+ return sos_kmem_vmm_del_range(range);
+}
+
+
+sos_ret_t sos_kmem_vmm_set_slab(struct sos_kmem_range *range,
+ struct sos_kslab *slab)
+{
+ if (! range)
+ return -SOS_EINVAL;
+
+ range->slab = slab;
+ return SOS_OK;
+}
+
+struct sos_kslab * sos_kmem_vmm_resolve_slab(sos_vaddr_t vaddr)
+{
+ struct sos_kmem_range *range = lookup_range(vaddr);
+ if (! range)
+ return NULL;
+
+ return range->slab;
+}
+
+
+sos_bool_t sos_kmem_vmm_is_valid_vaddr(sos_vaddr_t vaddr)
+{
+ struct sos_kmem_range *range = lookup_range(vaddr);
+ return (range != NULL);
+}
diff --git a/sos-code-article5/sos/kmem_vmm.h b/sos-code-article5/sos/kmem_vmm.h
new file mode 100644
index 0000000..9cd999b
--- /dev/null
+++ b/sos-code-article5/sos/kmem_vmm.h
@@ -0,0 +1,112 @@
+/* Copyright (C) 2000 Thomas Petazzoni
+ Copyright (C) 2004 David Decotigny
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#ifndef _SOS_KMEM_VMM_H_
+#define _SOS_KMEM_VMM_H_
+
+/**
+ * @file kmem_vmm.h
+ *
+ * Kernel Memory Allocator for multiple-page-sized objects residing in
+ * the kernel (virtual memory) space. Relies on the slab cache
+ * allocator to allocate its (internal) "range" data structure.
+ */
+
+#include <hwcore/paging.h>
+
+/* The base and top virtual addresses covered by the kernel allocator */
+#define SOS_KMEM_VMM_BASE 0x4000 /* 16kB */
+#define SOS_KMEM_VMM_TOP SOS_PAGING_MIRROR_VADDR /* 1GB - 4MB */
+
+/** Opaque structure used internally and declared here for physmem.h */
+struct sos_kmem_range;
+
+#include <sos/kmem_slab.h>
+
+/**
+ * Mark the areas belonging to SOS_KMEM_VMM_BASE and SOS_KMEM_VMM_TOP
+ * are either used or free. Those that are already mapped are marked
+ * as "used", and the 0..SOS_KMEM_VMM_BASE virtual addresses as marked
+ * as "used" too (to detect incorrect pointer dereferences).
+ */
+sos_ret_t sos_kmem_vmm_setup(sos_vaddr_t kernel_core_base,
+ sos_vaddr_t kernel_core_top,
+ sos_vaddr_t bootstrap_stack_bottom_addr,
+ sos_vaddr_t bootstrap_stack_top_addr);
+
+
+/*
+ * Flags for kmem_vmm_new_range and kmem_vmm_alloc
+ */
+/** Physical pages should be immediately mapped */
+#define SOS_KMEM_VMM_MAP (1<<0)
+/** Allocation should either success or fail, without blocking */
+#define SOS_KMEM_VMM_ATOMIC (1<<1)
+
+/**
+ * Allocate a new kernel area spanning one or multiple pages.
+ *
+ * @param range_base_vaddr If not NULL, the start address of the range
+ * is stored in this location
+ * @eturn a new range structure
+ */
+struct sos_kmem_range *sos_kmem_vmm_new_range(sos_size_t nb_pages,
+ sos_ui32_t flags,
+ sos_vaddr_t *range_base_vaddr);
+sos_ret_t sos_kmem_vmm_del_range(struct sos_kmem_range *range);
+
+
+/**
+ * Straighforward variant of sos_kmem_vmm_new_range() returning the
+ * range's start address instead of the range structure
+ */
+sos_vaddr_t sos_kmem_vmm_alloc(sos_size_t nb_pages,
+ sos_ui32_t flags);
+
+/**
+ * @note you are perfectly allowed to give the address of the
+ * kernel image, or the address of the bios area here, it will work:
+ * the kernel/bios WILL be "deallocated". But if you really want to do
+ * this, well..., do expect some "surprises" ;)
+ */
+sos_ret_t sos_kmem_vmm_free(sos_vaddr_t vaddr);
+
+
+/**
+ * @return TRUE when vaddr is covered by any (used) kernel range
+ */
+sos_bool_t sos_kmem_vmm_is_valid_vaddr(sos_vaddr_t vaddr);
+
+
+/* *****************************
+ * Reserved to kmem_slab.c ONLY.
+ */
+/**
+ * Associate the range with the given slab.
+ */
+sos_ret_t sos_kmem_vmm_set_slab(struct sos_kmem_range *range,
+ struct sos_kslab *slab);
+
+/**
+ * Retrieve the (used) slab associated with the range covering vaddr.
+ *
+ * @return NULL if the range is not associated with a KMEM range
+ */
+struct sos_kslab *sos_kmem_vmm_resolve_slab(sos_vaddr_t vaddr);
+
+#endif /* _SOS_KMEM_VMM_H_ */
diff --git a/sos-code-article5/sos/list.h b/sos-code-article5/sos/list.h
new file mode 100644
index 0000000..67e72f3
--- /dev/null
+++ b/sos-code-article5/sos/list.h
@@ -0,0 +1,186 @@
+/* Copyright (C) 2001 David Decotigny
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#ifndef _SOS_LIST_H_
+#define _SOS_LIST_H_
+
+/**
+ * @file list.h
+ *
+ * Circular doubly-linked lists implementation entirely based on C
+ * macros
+ */
+
+
+/* *_named are used when next and prev links are not exactly next
+ and prev. For instance when we have next_in_team, prev_in_team,
+ prev_global and next_global */
+
+#define list_init_named(list,prev,next) \
+ ((list) = NULL)
+
+#define list_singleton_named(list,item,prev,next) ({ \
+ (item)->next = (item)->prev = (item); \
+ (list) = (item); \
+})
+
+#define list_is_empty_named(list,prev,next) \
+ ((list) == NULL)
+
+#define list_get_head_named(list,prev,next) \
+ (list)
+
+#define list_get_tail_named(list,prev,next) \
+ ((list)?((list)->prev):NULL)
+
+/* Internal macro : insert before the head == insert at tail */
+#define __list_insert_atleft_named(before_this,item,prev,next) ({ \
+ (before_this)->prev->next = (item); \
+ (item)->prev = (before_this)->prev; \
+ (before_this)->prev = (item); \
+ (item)->next = (before_this); \
+})
+
+/* @note Before_this and item are expected to be valid ! */
+#define list_insert_before_named(list,before_this,item,prev,next) ({ \
+ __list_insert_atleft_named(before_this,item,prev,next); \
+ if ((list) == (before_this)) (list) = (item); \
+})
+
+/** @note After_this and item are expected to be valid ! */
+#define list_insert_after_named(list,after_this,item,prev,next) ({ \
+ (after_this)->next->prev = (item); \
+ (item)->next = (after_this)->next; \
+ (after_this)->next = (item); \
+ (item)->prev = (after_this); \
+})
+
+#define list_add_head_named(list,item,prev,next) ({ \
+ if (list) \
+ list_insert_before_named(list,list,item,prev,next); \
+ else \
+ list_singleton_named(list,item,prev,next); \
+ (list) = (item); \
+})
+
+#define list_add_tail_named(list,item,prev,next) ({ \
+ if (list) \
+ __list_insert_atleft_named(list,item,prev,next); \
+ else \
+ list_singleton_named(list,item,prev,next); \
+})
+
+/** @note NO check whether item really is in list ! */
+#define list_delete_named(list,item,prev,next) ({ \
+ if ( ((item)->next == (item)) && ((item)->prev == (item)) ) \
+ (item)->next = (item)->prev = (list) = NULL; \
+ else { \
+ (item)->prev->next = (item)->next; \
+ (item)->next->prev = (item)->prev; \
+ if ((item) == (list)) (list) = (item)->next; \
+ (item)->prev = (item)->next = NULL; \
+ } \
+})
+
+#define list_pop_head_named(list,prev,next) ({ \
+ typeof(list) __ret_elt = (list); \
+ list_delete_named(list,__ret_elt,prev,next); \
+ __ret_elt; })
+
+/** Loop statement that iterates through all of its elements, from
+ head to tail */
+#define list_foreach_forward_named(list,iterator,nb_elements,prev,next) \
+ for (nb_elements=0, (iterator) = (list) ; \
+ (iterator) && (!nb_elements || ((iterator) != (list))) ; \
+ nb_elements++, (iterator) = (iterator)->next )
+
+/** Loop statement that iterates through all of its elements, from
+ tail back to head */
+#define list_foreach_backward_named(list,iterator,nb_elements,prev,next) \
+ for (nb_elements=0, (iterator) = list_get_tail_named(list,prev,next) ; \
+ (iterator) && (!nb_elements || \
+ ((iterator) != list_get_tail_named(list,prev,next))) ; \
+ nb_elements++, (iterator) = (iterator)->prev )
+
+#define list_foreach_named list_foreach_forward_named
+
+/** True when we exitted early from the foreach loop (ie break) */
+#define list_foreach_early_break(list,iterator,nb_elements) \
+ ((list) && ( \
+ ((list) != (iterator)) || \
+ ( ((list) == (iterator)) && (nb_elements == 0)) ))
+
+/** Loop statement that also removes the item at each iteration */
+#define list_collapse_named(list,iterator,prev,next) \
+ for ( ; ({ ((iterator) = (list)) ; \
+ if (list) list_delete_named(list,iterator,prev,next) ; \
+ (iterator); }) ; )
+
+
+/*
+ * the same macros : assume that the prev and next fields are really
+ * named "prev" and "next"
+ */
+
+#define list_init(list) \
+ list_init_named(list,prev,next)
+
+#define list_singleton(list,item) \
+ list_singleton_named(list,item,prev,next)
+
+#define list_is_empty(list) \
+ list_is_empty_named(list,prev,next)
+
+#define list_get_head(list) \
+ list_get_head_named(list,prev,next) \
+
+#define list_get_tail(list) \
+ list_get_tail_named(list,prev,next) \
+
+/* @note Before_this and item are expected to be valid ! */
+#define list_insert_after(list,after_this,item) \
+ list_insert_after_named(list,after_this,item,prev,next)
+
+/* @note After_this and item are expected to be valid ! */
+#define list_insert_before(list,before_this,item) \
+ list_insert_before_named(list,before_this,item,prev,next)
+
+#define list_add_head(list,item) \
+ list_add_head_named(list,item,prev,next)
+
+#define list_add_tail(list,item) \
+ list_add_tail_named(list,item,prev,next)
+
+/* @note NO check whether item really is in list ! */
+#define list_delete(list,item) \
+ list_delete_named(list,item,prev,next)
+
+#define list_pop_head(list) \
+ list_pop_head_named(list,prev,next)
+
+#define list_foreach_forward(list,iterator,nb_elements) \
+ list_foreach_forward_named(list,iterator,nb_elements,prev,next)
+
+#define list_foreach_backward(list,iterator,nb_elements) \
+ list_foreach_backward_named(list,iterator,nb_elements,prev,next)
+
+#define list_foreach list_foreach_forward
+
+#define list_collapse(list,iterator) \
+ list_collapse_named(list,iterator,prev,next)
+
+#endif /* _SOS_LIST_H_ */
diff --git a/sos-code-article5/sos/macros.h b/sos-code-article5/sos/macros.h
new file mode 100644
index 0000000..80a05d3
--- /dev/null
+++ b/sos-code-article5/sos/macros.h
@@ -0,0 +1,41 @@
+/* Copyright (C) 2004 The KOS Team
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#ifndef _SOS_MACROS_H_
+#define _SOS_MACROS_H_
+
+/** Align on a boundary (MUST be a power of 2), so that return value <= val */
+#define SOS_ALIGN_INF(val,boundary) \
+ (((unsigned)(val)) & (~((boundary)-1)))
+
+/** Align on a boundary (MUST be a power of 2), so that return value >= val */
+#define SOS_ALIGN_SUP(val,boundary) \
+ ({ unsigned int __bnd=(boundary); \
+ (((((unsigned)(val))-1) & (~(__bnd - 1))) + __bnd); })
+
+/** Check whether val is aligned on a boundary (MUST be a power of 2) */
+#define SOS_IS_ALIGNED(val,boundary) \
+ ( 0 == (((unsigned)(val)) & ((boundary)-1)) )
+
+/**
+ * @return TRUE if val is a power of 2.
+ * @note val is evaluated multiple times
+ */
+#define SOS_IS_POWER_OF_2(val) \
+ ((((val) - 1) & (val)) == 0)
+
+#endif /* _SOS_MACROS_H_ */
diff --git a/sos-code-article5/sos/main.c b/sos-code-article5/sos/main.c
new file mode 100644
index 0000000..39bb448
--- /dev/null
+++ b/sos-code-article5/sos/main.c
@@ -0,0 +1,450 @@
+/* Copyright (C) 2004 The SOS Team
+ Copyright (C) 1999 Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+
+/* Include definitions of the multiboot standard */
+#include <bootstrap/multiboot.h>
+#include <hwcore/idt.h>
+#include <hwcore/gdt.h>
+#include <hwcore/irq.h>
+#include <hwcore/exception.h>
+#include <hwcore/i8254.h>
+#include <sos/list.h>
+#include <sos/physmem.h>
+#include <hwcore/paging.h>
+#include <sos/kmem_vmm.h>
+#include <sos/kmalloc.h>
+#include <sos/klibc.h>
+#include <sos/assert.h>
+#include <drivers/x86_videomem.h>
+#include <drivers/bochs.h>
+
+
+/* Helper function to display each bits of a 32bits integer on the
+ screen as dark or light carrets */
+void display_bits(unsigned char row, unsigned char col,
+ unsigned char attribute,
+ sos_ui32_t integer)
+{
+ int i;
+ /* Scan each bit of the integer, MSb first */
+ for (i = 31 ; i >= 0 ; i--)
+ {
+ /* Test if bit i of 'integer' is set */
+ int bit_i = (integer & (1 << i));
+ /* Ascii 219 => dark carret, Ascii 177 => light carret */
+ unsigned char ascii_code = bit_i?219:177;
+ sos_x86_videomem_putchar(row, col++,
+ attribute,
+ ascii_code);
+ }
+}
+
+
+/* Clock IRQ handler */
+static void clk_it(int intid)
+{
+ static sos_ui32_t clock_count = 0;
+
+ display_bits(0, 48,
+ SOS_X86_VIDEO_FG_LTGREEN | SOS_X86_VIDEO_BG_BLUE,
+ clock_count);
+ clock_count++;
+}
+struct digit
+{
+ struct digit *prev, *next;
+ char value;
+};
+
+/* Representation of a big (positive) integer: Most Significant Digit
+ (MSD) is the HEAD of the list. Least Significant Digit (LSD) is the
+ TAIL of the list */
+typedef struct digit * big_number_t;
+
+
+/* Add a new digit after the LSD */
+void bn_push_lsd(big_number_t * bn, char value)
+{
+ struct digit *d;
+ d = (struct digit*) sos_kmalloc(sizeof(struct digit), 0);
+ SOS_ASSERT_FATAL(d != NULL);
+ d->value = value;
+ list_add_tail(*bn, d);
+}
+
+
+/* Add a new digit before the MSD */
+void bn_push_msd(big_number_t * bn, char value)
+{
+ struct digit *d;
+ d = (struct digit*) sos_kmalloc(sizeof(struct digit), 0);
+ SOS_ASSERT_FATAL(d != NULL);
+ d->value = value;
+ list_add_head(*bn, d);
+}
+
+
+/* Construct a big integer from a (machine) integer */
+big_number_t bn_new(unsigned long int i)
+{
+ big_number_t retval;
+
+ list_init(retval);
+ do
+ {
+ bn_push_msd(&retval, i%10);
+ i /= 10;
+ }
+ while (i != 0);
+
+ return retval;
+}
+
+
+/* Create a new big integer from another big integer */
+big_number_t bn_copy(const big_number_t bn)
+{
+ big_number_t retval;
+ int nb_elts;
+ struct digit *d;
+
+ list_init(retval);
+ list_foreach(bn, d, nb_elts)
+ {
+ bn_push_lsd(&retval, d->value);
+ }
+
+ return retval;
+}
+
+
+/* Free the memory used by a big integer */
+void bn_del(big_number_t * bn)
+{
+ struct digit *d;
+
+ list_collapse(*bn, d)
+ {
+ sos_kfree((sos_vaddr_t)d);
+ }
+}
+
+
+/* Shift left a big integer: bn := bn*10^shift */
+void bn_shift(big_number_t *bn, int shift)
+{
+ for ( ; shift > 0 ; shift --)
+ {
+ bn_push_lsd(bn, 0);
+ }
+}
+
+
+/* Dump the big integer in bochs */
+void bn_print_bochs(const big_number_t bn)
+{
+ int nb_elts;
+ const struct digit *d;
+
+ if (list_is_empty(bn))
+ sos_bochs_printf("0");
+ else
+ list_foreach(bn, d, nb_elts)
+ sos_bochs_printf("%d", d->value);
+}
+
+/* Dump the big integer on the console */
+void bn_print_console(unsigned char row, unsigned char col,
+ unsigned char attribute,
+ const big_number_t bn,
+ int nb_decimals)
+{
+ if (list_is_empty(bn))
+ sos_x86_videomem_printf(row, col, attribute, "0");
+ else
+ {
+ int nb_elts;
+ const struct digit *d;
+ unsigned char x = col;
+
+ list_foreach(bn, d, nb_elts)
+ {
+ if (nb_elts == 0)
+ {
+ sos_x86_videomem_printf(row, x, attribute, "%d.", d->value);
+ x += 2;
+ }
+ else if (nb_elts < nb_decimals)
+ {
+ sos_x86_videomem_printf(row, x, attribute, "%d", d->value);
+ x ++;
+ }
+ }
+
+ sos_x86_videomem_printf(row, x, attribute, " . 10^{%d} ", nb_elts-1);
+ }
+}
+
+
+/* Result is the addition of 2 big integers */
+big_number_t bn_add (const big_number_t bn1, const big_number_t bn2)
+{
+ big_number_t retval;
+ const struct digit *d1, *d2;
+ sos_bool_t bn1_end = FALSE, bn2_end = FALSE;
+ char carry = 0;
+
+ list_init(retval);
+ d1 = list_get_tail(bn1);
+ bn1_end = list_is_empty(bn1);
+ d2 = list_get_tail(bn2);
+ bn2_end = list_is_empty(bn2);
+ do
+ {
+ if (! bn1_end)
+ carry += d1->value;
+ if (! bn2_end)
+ carry += d2->value;
+
+ bn_push_msd(&retval, carry % 10);
+ carry /= 10;
+
+ if (! bn1_end)
+ d1 = d1->prev;
+ if (! bn2_end)
+ d2 = d2->prev;
+ if (d1 == list_get_tail(bn1))
+ bn1_end = TRUE;
+ if (d2 == list_get_tail(bn2))
+ bn2_end = TRUE;
+ }
+ while (!bn1_end || !bn2_end);
+
+ if (carry > 0)
+ {
+ bn_push_msd(&retval, carry);
+ }
+
+ return retval;
+}
+
+
+/* Result is the multiplication of a big integer by a single digit */
+big_number_t bn_muli (const big_number_t bn, char digit)
+{
+ big_number_t retval;
+ int nb_elts;
+ char carry = 0;
+ const struct digit *d;
+
+ list_init(retval);
+ list_foreach_backward(bn, d, nb_elts)
+ {
+ carry += d->value * digit;
+ bn_push_msd(&retval, carry % 10);
+ carry /= 10;
+ }
+
+ if (carry > 0)
+ {
+ bn_push_msd(&retval, carry);
+ }
+
+ return retval;
+}
+
+
+/* Result is the multiplication of 2 big integers */
+big_number_t bn_mult(const big_number_t bn1, const big_number_t bn2)
+{
+ int shift = 0;
+ big_number_t retval;
+ int nb_elts;
+ struct digit *d;
+
+ list_init(retval);
+ list_foreach_backward(bn2, d, nb_elts)
+ {
+ big_number_t retmult = bn_muli(bn1, d->value);
+ big_number_t old_retval = retval;
+ bn_shift(& retmult, shift);
+ retval = bn_add(old_retval, retmult);
+ bn_del(& retmult);
+ bn_del(& old_retval);
+ shift ++;
+ }
+
+ return retval;
+}
+
+
+/* Result is the factorial of an integer */
+big_number_t bn_fact(unsigned long int v)
+{
+ unsigned long int i;
+ big_number_t retval = bn_new(1);
+ for (i = 1 ; i <= v ; i++)
+ {
+ big_number_t I = bn_new(i);
+ big_number_t tmp = bn_mult(retval, I);
+ sos_x86_videomem_printf(4, 0,
+ SOS_X86_VIDEO_BG_BLUE | SOS_X86_VIDEO_FG_LTGREEN,
+ "%d! = ", (int)i);
+ bn_print_console(4, 8, SOS_X86_VIDEO_BG_BLUE | SOS_X86_VIDEO_FG_WHITE,
+ tmp, 55);
+ bn_del(& I);
+ bn_del(& retval);
+ retval = tmp;
+ }
+
+ return retval;
+}
+
+
+void bn_test()
+{
+ big_number_t bn = bn_fact(1000);
+ sos_bochs_printf("1000! = ");
+ bn_print_bochs(bn);
+ sos_bochs_printf("\n");
+
+}
+
+
+
+/* The C entry point of our operating system */
+void sos_main(unsigned long magic, unsigned long addr)
+{
+ unsigned i;
+ sos_paddr_t sos_kernel_core_base_paddr, sos_kernel_core_top_paddr;
+
+ /* Grub sends us a structure, called multiboot_info_t with a lot of
+ precious informations about the system, see the multiboot
+ documentation for more information. */
+ multiboot_info_t *mbi;
+ mbi = (multiboot_info_t *) addr;
+
+ /* Setup bochs and console, and clear the console */
+ sos_bochs_setup();
+
+ sos_x86_videomem_setup();
+ sos_x86_videomem_cls(SOS_X86_VIDEO_BG_BLUE);
+
+ /* Greetings from SOS */
+ if (magic == MULTIBOOT_BOOTLOADER_MAGIC)
+ /* Loaded with Grub */
+ sos_x86_videomem_printf(1, 0,
+ SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE,
+ "Welcome From GRUB to %s%c RAM is %dMB (upper mem = 0x%x kB)",
+ "SOS", ',',
+ (unsigned)(mbi->mem_upper >> 10) + 1,
+ (unsigned)mbi->mem_upper);
+ else
+ /* Not loaded with grub */
+ sos_x86_videomem_printf(1, 0,
+ SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE,
+ "Welcome to SOS");
+
+ sos_bochs_putstring("Message in a bochs\n");
+
+ /* Setup CPU segmentation and IRQ subsystem */
+ sos_gdt_setup();
+ sos_idt_setup();
+
+ /* Setup SOS IRQs and exceptions subsystem */
+ sos_exceptions_setup();
+ sos_irq_setup();
+
+ /* Configure the timer so as to raise the IRQ0 at a 100Hz rate */
+ sos_i8254_set_frequency(100);
+
+ /* We need a multiboot-compliant boot loader to get the size of the RAM */
+ if (magic != MULTIBOOT_BOOTLOADER_MAGIC)
+ {
+ sos_x86_videomem_putstring(20, 0,
+ SOS_X86_VIDEO_FG_LTRED
+ | SOS_X86_VIDEO_BG_BLUE
+ | SOS_X86_VIDEO_FG_BLINKING,
+ "I'm not loaded with Grub !");
+ /* STOP ! */
+ for (;;)
+ continue;
+ }
+
+ /*
+ * Some interrupt handlers
+ */
+
+ /* Binding some HW interrupts and exceptions to software routines */
+ sos_irq_set_routine(SOS_IRQ_TIMER,
+ clk_it);
+
+ /*
+ * Setup physical memory management
+ */
+
+ /* Multiboot says: "The value returned for upper memory is maximally
+ the address of the first upper memory hole minus 1 megabyte.". It
+ also adds: "It is not guaranteed to be this value." aka "YMMV" ;) */
+ sos_physmem_setup((mbi->mem_upper<<10) + (1<<20),
+ & sos_kernel_core_base_paddr,
+ & sos_kernel_core_top_paddr);
+
+ /*
+ * Switch to paged-memory mode
+ */
+
+ /* Disabling interrupts should seem more correct, but it's not really
+ necessary at this stage */
+ if (sos_paging_setup(sos_kernel_core_base_paddr,
+ sos_kernel_core_top_paddr))
+ sos_bochs_printf("Could not setup paged memory mode\n");
+ sos_x86_videomem_printf(2, 0,
+ SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE,
+ "Paged-memory mode is activated");
+
+
+ /*
+ * Setup kernel virtual memory allocator
+ */
+
+ if (sos_kmem_vmm_setup(sos_kernel_core_base_paddr,
+ sos_kernel_core_top_paddr,
+ bootstrap_stack_bottom,
+ bootstrap_stack_bottom + bootstrap_stack_size))
+ sos_bochs_printf("Could not setup the Kernel virtual space allocator\n");
+
+ if (sos_kmalloc_setup())
+ sos_bochs_printf("Could not setup the Kmalloc subsystem\n");
+
+ /* Run some kmalloc tests */
+ bn_test();
+
+ /*
+ * Enabling the HW interrupts here, this will make the timer HW
+ * interrupt call our clk_it handler
+ */
+ asm volatile ("sti\n");
+
+ /* An operatig system never ends */
+ for (;;)
+ continue;
+
+ return;
+}
diff --git a/sos-code-article5/sos/physmem.c b/sos-code-article5/sos/physmem.c
new file mode 100644
index 0000000..2b4046d
--- /dev/null
+++ b/sos-code-article5/sos/physmem.c
@@ -0,0 +1,318 @@
+/* Copyright (C) 2004 David Decotigny
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#include <sos/list.h>
+#include <sos/macros.h>
+#include <sos/assert.h>
+#include <sos/klibc.h>
+
+#include "physmem.h"
+
+/** A descriptor for a physical page in SOS */
+struct physical_page_descr
+{
+ /** The physical base address for the page */
+ sos_paddr_t paddr;
+
+ /** The reference count for this physical page. > 0 means that the
+ page is in the used list. */
+ sos_count_t ref_cnt;
+
+ /** Some data associated with the page when it is mapped in kernel space */
+ struct sos_kmem_range *kernel_range;
+
+ /** The other pages on the list (used, free) */
+ struct physical_page_descr *prev, *next;
+};
+
+/** These are some markers present in the executable file (see sos.lds) */
+extern char __b_kernel, __e_kernel;
+
+/** The array of ppage descriptors will be located at this address */
+#define PAGE_DESCR_ARRAY_ADDR \
+ SOS_PAGE_ALIGN_SUP((sos_paddr_t) (& __e_kernel))
+static struct physical_page_descr * physical_page_descr_array;
+
+/** The list of physical pages currently available */
+static struct physical_page_descr *free_ppage;
+
+/** The list of physical pages currently in use */
+static struct physical_page_descr *used_ppage;
+
+/** We will store here the interval of valid physical addresses */
+static sos_paddr_t physmem_base, physmem_top;
+
+/** We store the number of pages used/free */
+static sos_count_t physmem_total_pages, physmem_used_pages;
+
+sos_ret_t sos_physmem_setup(sos_size_t ram_size,
+ /* out */sos_paddr_t *kernel_core_base,
+ /* out */sos_paddr_t *kernel_core_top)
+{
+ /* The iterator over the page descriptors */
+ struct physical_page_descr *ppage_descr;
+
+ /* The iterator over the physical addresses */
+ sos_paddr_t ppage_addr;
+
+ /* Make sure ram size is aligned on a page boundary */
+ ram_size = SOS_PAGE_ALIGN_INF(ram_size);/* Yes, we may lose at most a page */
+
+ /* Reset the used/free page lists before building them */
+ free_ppage = used_ppage = NULL;
+ physmem_total_pages = physmem_used_pages = 0;
+
+ /* Make sure that there is enough memory to store the array of page
+ descriptors */
+ *kernel_core_base = SOS_PAGE_ALIGN_INF((sos_paddr_t)(& __b_kernel));
+ *kernel_core_top
+ = PAGE_DESCR_ARRAY_ADDR
+ + SOS_PAGE_ALIGN_SUP( (ram_size >> SOS_PAGE_SHIFT)
+ * sizeof(struct physical_page_descr));
+ if (*kernel_core_top > ram_size)
+ return -SOS_ENOMEM;
+
+ /* Page 0-4kB is not available in order to return address 0 as a
+ means to signal "no page available" */
+ physmem_base = SOS_PAGE_SIZE;
+ physmem_top = ram_size;
+
+ /* Setup the page descriptor arrray */
+ physical_page_descr_array
+ = (struct physical_page_descr*)PAGE_DESCR_ARRAY_ADDR;
+
+ /* Scan the list of physical pages */
+ for (ppage_addr = 0,
+ ppage_descr = physical_page_descr_array ;
+ ppage_addr < physmem_top ;
+ ppage_addr += SOS_PAGE_SIZE,
+ ppage_descr ++)
+ {
+ enum { PPAGE_MARK_RESERVED, PPAGE_MARK_FREE,
+ PPAGE_MARK_KERNEL, PPAGE_MARK_HWMAP } todo;
+
+ memset(ppage_descr, 0x0, sizeof(struct physical_page_descr));
+
+ /* Init the page descriptor for this page */
+ ppage_descr->paddr = ppage_addr;
+
+ /* Reserved : 0 ... base */
+ if (ppage_addr < physmem_base)
+ todo = PPAGE_MARK_RESERVED;
+
+ /* Free : base ... BIOS */
+ else if ((ppage_addr >= physmem_base)
+ && (ppage_addr < BIOS_N_VIDEO_START))
+ todo = PPAGE_MARK_FREE;
+
+ /* Used : BIOS */
+ else if ((ppage_addr >= BIOS_N_VIDEO_START)
+ && (ppage_addr < BIOS_N_VIDEO_END))
+ todo = PPAGE_MARK_HWMAP;
+
+ /* Free : BIOS ... kernel */
+ else if ((ppage_addr >= BIOS_N_VIDEO_END)
+ && (ppage_addr < (sos_paddr_t) (& __b_kernel)))
+ todo = PPAGE_MARK_FREE;
+
+ /* Used : Kernel code/data/bss + physcal page descr array */
+ else if ((ppage_addr >= *kernel_core_base)
+ && (ppage_addr < *kernel_core_top))
+ todo = PPAGE_MARK_KERNEL;
+
+ /* Free : first page of descr ... end of RAM */
+ else
+ todo = PPAGE_MARK_FREE;
+
+ /* Actually does the insertion in the used/free page lists */
+ physmem_total_pages ++;
+ switch (todo)
+ {
+ case PPAGE_MARK_FREE:
+ ppage_descr->ref_cnt = 0;
+ list_add_head(free_ppage, ppage_descr);
+ break;
+
+ case PPAGE_MARK_KERNEL:
+ case PPAGE_MARK_HWMAP:
+ ppage_descr->ref_cnt = 1;
+ list_add_head(used_ppage, ppage_descr);
+ physmem_used_pages ++;
+ break;
+
+ default:
+ /* Reserved page: nop */
+ break;
+ }
+ }
+
+ return SOS_OK;
+}
+
+
+sos_paddr_t sos_physmem_ref_physpage_new(sos_bool_t can_block)
+{
+ struct physical_page_descr *ppage_descr;
+
+ if (! free_ppage)
+ return (sos_paddr_t)NULL;
+
+ /* Retrieve a page in the free list */
+ ppage_descr = list_pop_head(free_ppage);
+
+ /* The page is assumed not to be already used */
+ SOS_ASSERT_FATAL(ppage_descr->ref_cnt == 0);
+
+ /* Mark the page as used (this of course sets the ref count to 1) */
+ ppage_descr->ref_cnt ++;
+
+ /* No associated kernel range by default */
+ ppage_descr->kernel_range = NULL;
+
+ /* Put the page in the used list */
+ list_add_tail(used_ppage, ppage_descr);
+ physmem_used_pages ++;
+
+ return ppage_descr->paddr;
+}
+
+
+/**
+ * Helper function to get the physical page descriptor for the given
+ * physical page address.
+ *
+ * @return NULL when out-of-bounds or non-page-aligned
+ */
+inline static struct physical_page_descr *
+get_page_descr_at_paddr(sos_paddr_t ppage_paddr)
+{
+ /* Don't handle non-page-aligned addresses */
+ if (ppage_paddr & SOS_PAGE_MASK)
+ return NULL;
+
+ /* Don't support out-of-bounds requests */
+ if ((ppage_paddr < physmem_base) || (ppage_paddr >= physmem_top))
+ return NULL;
+
+ return physical_page_descr_array + (ppage_paddr >> SOS_PAGE_SHIFT);
+}
+
+
+sos_ret_t sos_physmem_ref_physpage_at(sos_paddr_t ppage_paddr)
+{
+ struct physical_page_descr *ppage_descr
+ = get_page_descr_at_paddr(ppage_paddr);
+
+ if (! ppage_descr)
+ return -SOS_EINVAL;
+
+ /* Increment the reference count for the page */
+ ppage_descr->ref_cnt ++;
+
+ /* If the page is newly referenced (ie we are the only owners of the
+ page => ref cnt == 1), transfer it in the used pages list */
+ if (ppage_descr->ref_cnt == 1)
+ {
+ list_delete(free_ppage, ppage_descr);
+
+ /* No associated kernel range by default */
+ ppage_descr->kernel_range = NULL;
+
+ list_add_tail(used_ppage, ppage_descr);
+ physmem_used_pages ++;
+
+ /* The page is newly referenced */
+ return FALSE;
+ }
+
+ /* The page was already referenced by someone */
+ return TRUE;
+}
+
+
+sos_ret_t
+sos_physmem_unref_physpage(sos_paddr_t ppage_paddr)
+{
+ /* By default the return value indicates that the page is still
+ used */
+ sos_ret_t retval = FALSE;
+
+ struct physical_page_descr *ppage_descr
+ = get_page_descr_at_paddr(ppage_paddr);
+
+ if (! ppage_descr)
+ return -SOS_EINVAL;
+
+ /* Don't do anything if the page is not in the used list */
+ if (ppage_descr->ref_cnt <= 0)
+ return -SOS_EINVAL;
+
+ /* Unreference the page, and, when no mapping is active anymore, put
+ the page in the free list */
+ ppage_descr->ref_cnt--;
+ if (ppage_descr->ref_cnt <= 0)
+ {
+ /* Reset associated kernel range */
+ ppage_descr->kernel_range = NULL;
+
+ /* Transfer the page, considered USED, to the free list */
+ list_delete(used_ppage, ppage_descr);
+ physmem_used_pages --;
+ list_add_head(free_ppage, ppage_descr);
+
+ /* Indicate that the page is now unreferenced */
+ retval = TRUE;
+ }
+
+ return retval;
+}
+
+
+struct sos_kmem_range* sos_physmem_get_kmem_range(sos_paddr_t ppage_paddr)
+{
+ struct physical_page_descr *ppage_descr
+ = get_page_descr_at_paddr(ppage_paddr);
+
+ if (! ppage_descr)
+ return NULL;
+
+ return ppage_descr->kernel_range;
+}
+
+
+sos_ret_t sos_physmem_set_kmem_range(sos_paddr_t ppage_paddr,
+ struct sos_kmem_range *range)
+{
+ struct physical_page_descr *ppage_descr
+ = get_page_descr_at_paddr(ppage_paddr);
+
+ if (! ppage_descr)
+ return -SOS_EINVAL;
+
+ ppage_descr->kernel_range = range;
+ return SOS_OK;
+}
+
+sos_ret_t sos_physmem_get_state(/* out */sos_count_t *total_ppages,
+ /* out */sos_count_t *used_ppages)
+{
+ if (total_ppages)
+ *total_ppages = physmem_total_pages;
+ if (used_ppages)
+ *used_ppages = physmem_used_pages;
+ return SOS_OK;
+}
diff --git a/sos-code-article5/sos/physmem.h b/sos-code-article5/sos/physmem.h
new file mode 100644
index 0000000..54b36b3
--- /dev/null
+++ b/sos-code-article5/sos/physmem.h
@@ -0,0 +1,146 @@
+/* Copyright (C) 2004 David Decotigny
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#ifndef _SOS_PHYSMEM_H_
+#define _SOS_PHYSMEM_H_
+
+/**
+ * @file physmem.h
+ *
+ * Physical pages of memory
+ */
+
+#include <sos/errno.h>
+#include <sos/types.h>
+#include <sos/macros.h>
+
+/** The size of a physical page (arch-dependent) */
+#define SOS_PAGE_SIZE (4*1024)
+
+/** The corresponding shift */
+#define SOS_PAGE_SHIFT 12 /* 4 kB = 2^12 B */
+
+/** The corresponding mask */
+#define SOS_PAGE_MASK ((1<<12) - 1)
+
+#define SOS_PAGE_ALIGN_INF(val) \
+ SOS_ALIGN_INF((val), SOS_PAGE_SIZE)
+#define SOS_PAGE_ALIGN_SUP(val) \
+ SOS_ALIGN_SUP((val), SOS_PAGE_SIZE)
+#define SOS_IS_PAGE_ALIGNED(val) \
+ SOS_IS_ALIGNED((val), SOS_PAGE_SIZE)
+
+/**
+ * This is the reserved physical interval for the x86 video memory and
+ * BIOS area. In physmem.c, we have to mark this area as "used" in
+ * order to prevent from allocating it. And in paging.c, we'd better
+ * map it in virtual space if we really want to be able to print to
+ * the screen (for debugging purpose, at least): for this, the
+ * simplest is to identity-map this area in virtual space (note
+ * however that this mapping could also be non-identical).
+ */
+#define BIOS_N_VIDEO_START 0xa0000
+#define BIOS_N_VIDEO_END 0x100000
+
+
+/**
+ * Initialize the physical memory subsystem, for the physical area [0,
+ * ram_size). This routine takes into account the BIOS and video
+ * areas, to prevent them from future allocations.
+ *
+ * @param ram_size The size of the RAM that will be managed by this subsystem
+ *
+ * @param kernel_core_base The lowest address for which the kernel
+ * assumes identity mapping (ie virtual address == physical address)
+ * will be stored here
+ *
+ * @param kernel_core_top The top address for which the kernel
+ * assumes identity mapping (ie virtual address == physical address)
+ * will be stored here
+ */
+sos_ret_t sos_physmem_setup(sos_size_t ram_size,
+ /* out */sos_paddr_t *kernel_core_base,
+ /* out */sos_paddr_t *kernel_core_top);
+
+/**
+ * Retrieve the total number of pages, and the number of free pages
+ */
+sos_ret_t sos_physmem_get_state(/* out */sos_count_t *total_ppages,
+ /* out */sos_count_t *used_ppages);
+
+
+/**
+ * Get a free page.
+ *
+ * @return The (physical) address of the (physical) page allocated, or
+ * NULL when none currently available.
+ *
+ * @param can_block TRUE if the function is allowed to block
+ * @note The page returned has a reference count equal to 1.
+ */
+sos_paddr_t sos_physmem_ref_physpage_new(sos_bool_t can_block);
+
+
+/**
+ * Increment the reference count of a given physical page. Useful for
+ * VM code which tries to map a precise physical address.
+ *
+ * @param ppage_paddr Physical address of the page (MUST be page-aligned)
+ *
+ * @return TRUE when the page was previously in use, FALSE when the
+ * page was previously in the free list, <0 when the page address is
+ * invalid.
+ */
+sos_ret_t sos_physmem_ref_physpage_at(sos_paddr_t ppage_paddr);
+
+
+/**
+ * Decrement the reference count of the given physical page. When this
+ * reference count reaches 0, the page is marked free, ie is available
+ * for future sos_physmem_get_physpage()
+ *
+ * @param ppage_paddr Physical address of the page (MUST be page-aligned)
+ *
+ * @return FALSE when the page is still in use, TRUE when the page is now
+ * unreferenced, <0 when the page address is invalid
+ */
+sos_ret_t sos_physmem_unref_physpage(sos_paddr_t ppage_paddr);
+
+
+#include <sos/kmem_vmm.h>
+
+/**
+ * Return the kernel memory allocation range associated with the given
+ * physical page, or NULL when page has no associated range
+ *
+ * @param ppage_paddr Physical address of the page (MUST be page-aligned)
+ */
+struct sos_kmem_range* sos_physmem_get_kmem_range(sos_paddr_t ppage_paddr);
+
+
+/**
+ * Set the kernel memory allocation range associated to the given
+ * physical page.
+ *
+ * @param ppage_paddr Physical address of the page (MUST be page-aligned)
+ *
+ * @return error if page is invalid
+ */
+sos_ret_t sos_physmem_set_kmem_range(sos_paddr_t ppage_paddr,
+ struct sos_kmem_range *range);
+
+#endif /* _SOS_PHYSMEM_H_ */
diff --git a/sos-code-article5/sos/types.h b/sos-code-article5/sos/types.h
new file mode 100644
index 0000000..106973a
--- /dev/null
+++ b/sos-code-article5/sos/types.h
@@ -0,0 +1,53 @@
+/* Copyright (C) 2004 The SOS Team
+ Copyright (C) 1999 Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#ifndef _SOS_TYPES_H_
+#define _SOS_TYPES_H_
+
+/**
+ * @file types.h
+ *
+ * SOS basic types definition
+ */
+
+/** Physical address */
+typedef unsigned int sos_paddr_t;
+
+/** Generic virtual address (kernel or user) */
+typedef unsigned int sos_vaddr_t;
+
+/** Memory size of an object (positive) */
+typedef unsigned int sos_size_t;
+/** Generic count of objects */
+typedef unsigned int sos_count_t;
+
+/** Low-level sizes */
+typedef unsigned long int sos_ui32_t; /* 32b unsigned */
+typedef unsigned short int sos_ui16_t; /* 16b unsigned */
+typedef unsigned char sos_ui8_t; /* 8b unsigned */
+typedef signed long int sos_si32_t; /* 32b signed */
+typedef signed short int sos_si16_t; /* 16b signed */
+typedef signed char sos_si8_t; /* 8b signed */
+
+typedef enum { FALSE=0, TRUE } sos_bool_t;
+
+/** Not a proper type, but highly useful with basic type
+ manipulations */
+#define NULL ((void*)0)
+
+#endif /* _SOS_TYPES_H_ */