summaryrefslogtreecommitdiff
path: root/sos-code-article6/sos
diff options
context:
space:
mode:
Diffstat (limited to 'sos-code-article6/sos')
-rw-r--r--sos-code-article6/sos/assert.c44
-rw-r--r--sos-code-article6/sos/assert.h45
-rw-r--r--sos-code-article6/sos/errno.h40
-rw-r--r--sos-code-article6/sos/klibc.c308
-rw-r--r--sos-code-article6/sos/klibc.h103
-rw-r--r--sos-code-article6/sos/kmalloc.c113
-rw-r--r--sos-code-article6/sos/kmalloc.h63
-rw-r--r--sos-code-article6/sos/kmem_slab.c812
-rw-r--r--sos-code-article6/sos/kmem_slab.h206
-rw-r--r--sos-code-article6/sos/kmem_vmm.c606
-rw-r--r--sos-code-article6/sos/kmem_vmm.h113
-rw-r--r--sos-code-article6/sos/list.h186
-rw-r--r--sos-code-article6/sos/macros.h41
-rw-r--r--sos-code-article6/sos/main.c1162
-rw-r--r--sos-code-article6/sos/physmem.c319
-rw-r--r--sos-code-article6/sos/physmem.h147
-rw-r--r--sos-code-article6/sos/types.h52
17 files changed, 4360 insertions, 0 deletions
diff --git a/sos-code-article6/sos/assert.c b/sos-code-article6/sos/assert.c
new file mode 100644
index 0000000..2bc0d41
--- /dev/null
+++ b/sos-code-article6/sos/assert.c
@@ -0,0 +1,44 @@
+/* Copyright (C) 2004 The KOS Team
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+
+#include <sos/klibc.h>
+#include <drivers/bochs.h>
+#include <drivers/x86_videomem.h>
+
+#include "assert.h"
+
+void sos_display_fatal_error(const char *format, /* args */...)
+{
+ char buff[256];
+ va_list ap;
+
+ asm("cli\n"); /* disable interrupts -- x86 only */ \
+
+ va_start(ap, format);
+ vsnprintf(buff, sizeof(buff), format, ap);
+ va_end(ap);
+
+ sos_bochs_putstring(buff); sos_bochs_putstring("\n");
+ sos_x86_videomem_putstring(23, 0,
+ SOS_X86_VIDEO_BG_BLACK
+ | SOS_X86_VIDEO_FG_LTRED , buff);
+
+ /* Infinite loop: processor halted */
+ for ( ; ; )
+ asm("hlt\n");
+}
diff --git a/sos-code-article6/sos/assert.h b/sos-code-article6/sos/assert.h
new file mode 100644
index 0000000..9fcfec0
--- /dev/null
+++ b/sos-code-article6/sos/assert.h
@@ -0,0 +1,45 @@
+/* Copyright (C) 2004 The KOS Team
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#ifndef _SOS_ASSERT_H_
+#define _SOS_ASSERT_H_
+
+
+void sos_display_fatal_error(const char *format, /* args */...)
+ __attribute__ ((format (printf, 1, 2), noreturn));
+
+
+/**
+ * If the expr is FALSE, print a message and halt the machine
+ */
+#define SOS_ASSERT_FATAL(expr) \
+ ({ \
+ int __res=(int)(expr); \
+ if (! __res) \
+ sos_display_fatal_error("%s@%s:%d Assertion " # expr " failed", \
+ __PRETTY_FUNCTION__, __FILE__, __LINE__); \
+ })
+
+
+#define SOS_FATAL_ERROR(fmt,args...) \
+ ({ \
+ sos_display_fatal_error("%s@%s:%d FATAL: " fmt, \
+ __PRETTY_FUNCTION__, __FILE__, __LINE__, \
+ ##args); \
+ })
+
+#endif /* _SOS_ASSERT_H_ */
diff --git a/sos-code-article6/sos/errno.h b/sos-code-article6/sos/errno.h
new file mode 100644
index 0000000..bdc2a11
--- /dev/null
+++ b/sos-code-article6/sos/errno.h
@@ -0,0 +1,40 @@
+/* Copyright (C) 2004 The SOS Team
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#ifndef _SOS_ERRNO_H_
+#define _SOS_ERRNO_H_
+
+/**
+ * @file errno.h
+ *
+ * SOS return value codes and errors.
+ */
+
+/* Positive values of the error codes */
+#define SOS_OK 0 /* No error */
+#define SOS_EINVAL 1 /* Invalid argument */
+#define SOS_ENOSUP 2 /* Operation not supported */
+#define SOS_ENOMEM 3 /* No available memory */
+#define SOS_EBUSY 4 /* Object or device still in use */
+#define SOS_EFATAL 255 /* Internal fatal error */
+
+/* A negative value means that an error occured. For
+ * example -SOS_EINVAL means that the error was "invalid
+ * argument" */
+typedef int sos_ret_t;
+
+#endif /* _SOS_ERRNO_H_ */
diff --git a/sos-code-article6/sos/klibc.c b/sos-code-article6/sos/klibc.c
new file mode 100644
index 0000000..4442842
--- /dev/null
+++ b/sos-code-article6/sos/klibc.c
@@ -0,0 +1,308 @@
+/* Copyright (C) 2004 David Decotigny (with INSA Rennes for vsnprintf)
+ Copyright (C) 2003 The KOS Team
+ Copyright (C) 1999 Free Software Foundation
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#include "klibc.h"
+
+/* For an optimized version, see BSD sources ;) */
+void *memcpy(void *dst0, const void *src0, register unsigned int size)
+{
+ char *dst;
+ const char *src;
+ for (dst = (char*)dst0, src = (const char*)src0 ;
+ size > 0 ;
+ dst++, src++, size--)
+ *dst = *src;
+ return dst0;
+}
+
+/* ditto */
+void *memset(void *dst0, register int c, register unsigned int length)
+{
+ char *dst;
+ for (dst = (char*) dst0 ;
+ length > 0 ;
+ dst++, length --)
+ *dst = (char)c;
+ return dst0;
+}
+
+int memcmp(const void *s1, const void *s2, sos_size_t len)
+{
+ const unsigned char *c1, *c2;
+ unsigned int i;
+
+ for (i = 0, c1 = s1, c2 = s2; i < len; i++, c1++, c2++)
+ {
+ if(*c1 != *c2)
+ return *c1 - *c2;
+ }
+
+ return 0;
+}
+
+
+unsigned int strlen(register const char *str)
+{
+ unsigned int retval = 0;
+
+ while (*str++)
+ retval++;
+
+ return retval;
+}
+
+
+unsigned int strnlen(const char * s, sos_size_t count)
+{
+ const char *sc;
+
+ for (sc = s; count-- && *sc != '\0'; ++sc)
+ /* nothing */continue;
+
+ return sc - s;
+}
+
+
+char *strzcpy(register char *dst, register const char *src, register int len)
+{
+ int i;
+
+ if (len <= 0)
+ return dst;
+
+ for (i = 0; i < len; i++)
+ {
+ dst[i] = src[i];
+ if(src[i] == '\0')
+ return dst;
+ }
+
+ dst[len-1] = '\0';
+ return dst;
+}
+
+
+char *strzcat (char *dest, const char *src, sos_size_t n)
+{
+ char *res = dest;
+
+ for ( ; *dest ; dest++);
+
+ for ( ; *src ; src++, dest++) {
+ *dest = *src;
+ n--;
+ if (n <= 0)
+ break;
+ }
+
+ *dest = '\0';
+ return res;
+}
+
+int strcmp(register const char *s1, register const char *s2)
+{
+ while (*s1 == *s2++)
+ if (*s1++ == 0)
+ return (0);
+
+ return (*(const unsigned char *)s1 - *(const unsigned char *)(s2 - 1));
+}
+
+
+int strncmp(register const char *s1, register const char *s2, register int len)
+{
+ char c1 = '\0', c2 = '\0';
+
+ while (len > 0)
+ {
+ c1 = (unsigned char) *s1++;
+ c2 = (unsigned char) *s2++;
+ if (c1 == '\0' || c1 != c2)
+ return c1 - c2;
+ len--;
+ }
+
+ return c1 - c2;
+}
+
+
+static unsigned long int _random_seed = 93186752;
+
+/**
+ * The following code is borrowed from Glenn Rhoads.
+ * http://remus.rutgers.edu/~rhoads/Code/code.html
+ * License to be defined...
+ */
+unsigned long int random (void)
+{
+/* The following parameters are recommended settings based on research
+ uncomment the one you want. */
+
+/* For RAND_MAX == 4294967291 */
+ static unsigned int a = 1588635695, q = 2, r = 1117695901;
+/* static unsigned int a = 1223106847, m = 4294967291U, q = 3, r = 625646750;*/
+/* static unsigned int a = 279470273, m = 4294967291U, q = 15, r = 102913196;*/
+
+/* For RAND_MAX == 2147483647 */
+/* static unsigned int a = 1583458089, m = 2147483647, q = 1, r = 564025558; */
+/* static unsigned int a = 784588716, m = 2147483647, q = 2, r = 578306215; */
+/* static unsigned int a = 16807, m = 2147483647, q = 127773, r = 2836; */
+/* static unsigned int a = 950706376, m = 2147483647, q = 2, r = 246070895; */
+
+ _random_seed = a*(_random_seed % q) - r*(_random_seed / q);
+ return _random_seed;
+}
+
+
+void srandom (unsigned long int seed)
+{
+ _random_seed = seed;
+}
+
+
+/* I (d2) borrowed and rewrote this for Nachos/INSA Rennes. Thanks to
+ them for having kindly allowed me to do so. */
+int vsnprintf(char *buff, sos_size_t len, const char * format, va_list ap)
+{
+ sos_size_t i, result;
+
+ if (!buff || !format || (len < 0))
+ return -1;
+
+#define PUTCHAR(thechar) \
+ do { \
+ if (result < len-1) \
+ *buff++ = (thechar); \
+ result++; \
+ } while (0)
+
+ result = 0;
+ for(i=0 ; format[i] != '\0' ; i++){
+ switch (format[i])
+ {
+ case '%':
+ i++;
+ switch(format[i])
+ {
+ case '%':
+ {
+ PUTCHAR('%');
+ break;
+ }
+ case 'i':;
+ case 'd':
+ {
+ int integer = va_arg(ap,int);
+ int cpt2 = 0;
+ char buff_int[16];
+
+ if (integer<0)
+ PUTCHAR('-');
+ /* Ne fait pas integer = -integer ici parce que INT_MIN
+ n'a pas d'equivalent positif (int = [-2^31, 2^31-1]) */
+
+ do {
+ int m10 = integer%10;
+ m10 = (m10 < 0)? -m10:m10;
+ buff_int[cpt2++]=(char)('0'+ m10);
+ integer=integer/10;
+ } while(integer!=0);
+
+ for(cpt2 = cpt2 - 1 ; cpt2 >= 0 ; cpt2--)
+ PUTCHAR(buff_int[cpt2]);
+
+ break;
+ }
+
+ case 'c':
+ {
+ int value = va_arg(ap,int);
+ PUTCHAR((char)value);
+ break;
+ }
+
+ case 's':
+ {
+ char *string = va_arg(ap,char *);
+ if (! string)
+ string = "(null)";
+ for( ; *string != '\0' ; string++)
+ PUTCHAR(*string);
+ break;
+ }
+
+ case 'p':
+ PUTCHAR('0');
+ PUTCHAR('x');
+ case 'x':
+ {
+ unsigned int hexa = va_arg(ap,int);
+ unsigned int nb;
+ int i, had_nonzero = 0;
+ for(i=0 ; i < 8 ; i++)
+ {
+ nb = (unsigned int)(hexa << (i*4));
+ nb = (nb >> 28) & 0xf;
+ // Skip the leading zeros
+ if (nb == 0)
+ {
+ if (had_nonzero)
+ PUTCHAR('0');
+ }
+ else
+ {
+ had_nonzero = 1;
+ if (nb < 10)
+ PUTCHAR('0'+nb);
+ else
+ PUTCHAR('a'+(nb-10));
+ }
+ }
+ if (! had_nonzero)
+ PUTCHAR('0');
+ break;
+ }
+ break;
+
+ default:
+ PUTCHAR('%');
+ PUTCHAR(format[i]);
+ }
+ break;
+
+ default:
+ PUTCHAR(format[i]);
+ }
+ }
+
+ *buff = '\0';
+ return result;
+}
+
+
+int snprintf(char * buff, sos_size_t len, const char *format, ...)
+{
+ va_list ap;
+
+ va_start(ap, format);
+ len = vsnprintf(buff, len, format, ap);
+ va_end(ap);
+
+ return len;
+}
diff --git a/sos-code-article6/sos/klibc.h b/sos-code-article6/sos/klibc.h
new file mode 100644
index 0000000..7002778
--- /dev/null
+++ b/sos-code-article6/sos/klibc.h
@@ -0,0 +1,103 @@
+/* Copyright (C) 2003 The KOS Team
+ Copyright (C) 1999 Free Software Foundation
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#ifndef _SOS_KLIBC_H_
+#define _SOS_KLIBC_H_
+
+/**
+ * @file klibc.h
+ *
+ * Basic libc-style support for common useful functions (string.h,
+ * stdarg.h), some with slight non-standard behavior (see comments).
+ *
+ * Most of the prototypes of these functions are borrowed from
+ * FreeBSD, but their implementation (in klibc.c) come either from Kos
+ * (GPL v2) or from David Decotigny (SOS).
+ */
+
+#include <sos/types.h>
+
+/* string.h functions */
+
+void *memcpy(void *dst, const void *src, register unsigned int size ) ;
+void *memset(void *dst, register int c, register unsigned int length ) ;
+int memcmp(const void *s1, const void *s2, sos_size_t n);
+
+unsigned int strlen( register const char *str) ;
+unsigned int strnlen(const char * s, sos_size_t maxlen);
+
+/**
+ * @note Same as strncpy(), with a slightly different semantic.
+ * Actually, strncpy(3C) says " The result will not be null-terminated
+ * if the length of 'from' is n or more.". Here, 'dst' is ALWAYS
+ * null-terminated. And its total len will ALWAYS be <= len, with
+ * null-terminating-char included.
+ */
+char *strzcpy( register char *dst, register const char *src,
+ register int len ) ;
+
+/**
+ * @note Same as strncat(), with the same semantic : 'dst' is ALWAYS
+ * null-terminated. And its total len will ALWAYS be <= len, with
+ * null-terminating-char included.
+ */
+char *strzcat (char *dest, const char *src,
+ const sos_size_t len);
+
+int strcmp(register const char *s1, register const char *s2 );
+int strncmp(register const char *s1, register const char *s2,
+ register int len );
+
+/* Basic stdarg.h macros. Taken from gcc support files */
+#define __GNUC_VA_LIST
+typedef void *__gnuc_va_list;
+typedef __gnuc_va_list va_list;
+#define __va_rounded_size(TYPE) \
+ (((sizeof (TYPE) + sizeof (int) - 1) / sizeof (int)) * sizeof (int))
+#define va_start(AP, LASTARG) \
+ (AP = ((__gnuc_va_list) __builtin_next_arg (LASTARG)))
+#define va_end(AP) \
+ ((void)0)
+#define va_arg(AP, TYPE) \
+ (AP = (__gnuc_va_list) ((char *) (AP) + __va_rounded_size (TYPE)), \
+ *((TYPE *) (void *) ((char *) (AP) - __va_rounded_size (TYPE))))
+#define __va_copy(dest, src) \
+ (dest) = (src)
+
+/* stdarg.h functions. There might be a non-standard behavior: there
+ will always be a trailing '\0' in the resulting string */
+int vsnprintf(char *, sos_size_t, const char *, va_list);
+int snprintf(char *, sos_size_t, const char *, /*args*/ ...)
+ __attribute__ ((format (printf, 3, 4)));
+
+
+/*
+ * Pseudo-random generation functions. Useful to do some coverage
+ * tests.
+ */
+
+/* Amplitude of the random number generation */
+#define RAND_MAX 4294967291U
+
+/* Pseudo-random number generation (MT unsafe) */
+unsigned long int random (void);
+
+/* Set random seed (MT unsafe) */
+void srandom (unsigned long int seed);
+
+#endif /* _SOS_KLIBC_H_ */
diff --git a/sos-code-article6/sos/kmalloc.c b/sos-code-article6/sos/kmalloc.c
new file mode 100644
index 0000000..62d948d
--- /dev/null
+++ b/sos-code-article6/sos/kmalloc.c
@@ -0,0 +1,113 @@
+/* Copyright (C) 2004 David Decotigny
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+
+#include <sos/assert.h>
+#include <sos/macros.h>
+
+#include "physmem.h"
+#include "kmem_vmm.h"
+#include "kmem_slab.h"
+
+#include "kmalloc.h"
+
+/* The cache structures for these caches, the object size, their
+ names, and some number of pages that contain them. They might not
+ necessarily be powers of 2s. */
+static struct {
+ const char *name;
+ sos_size_t object_size;
+ sos_count_t pages_per_slab;
+ struct sos_kslab_cache *cache;
+} kmalloc_cache[] =
+ {
+ { "kmalloc 8B objects", 8, 1 },
+ { "kmalloc 16B objects", 16, 1 },
+ { "kmalloc 32B objects", 32, 1 },
+ { "kmalloc 64B objects", 64, 1 },
+ { "kmalloc 128B objects", 128, 1 },
+ { "kmalloc 256B objects", 256, 2 },
+ { "kmalloc 1024B objects", 1024, 2 },
+ { "kmalloc 2048B objects", 2048, 3 },
+ { "kmalloc 4096B objects", 4096, 4 },
+ { "kmalloc 8192B objects", 8192, 8 },
+ { "kmalloc 16384B objects", 16384, 12 },
+ { NULL, 0, 0, NULL }
+ };
+
+
+sos_ret_t sos_kmalloc_subsystem_setup()
+{
+ int i;
+ for (i = 0 ; kmalloc_cache[i].object_size != 0 ; i ++)
+ {
+ struct sos_kslab_cache *new_cache;
+ new_cache = sos_kmem_cache_create(kmalloc_cache[i].name,
+ kmalloc_cache[i].object_size,
+ kmalloc_cache[i].pages_per_slab,
+ 0,
+ SOS_KSLAB_CREATE_MAP
+ );
+ SOS_ASSERT_FATAL(new_cache != NULL);
+ kmalloc_cache[i].cache = new_cache;
+ }
+ return SOS_OK;
+}
+
+
+sos_vaddr_t sos_kmalloc(sos_size_t size, sos_ui32_t flags)
+{
+ /* Look for a suitable pre-allocated kmalloc cache */
+ int i;
+ for (i = 0 ; kmalloc_cache[i].object_size != 0 ; i ++)
+ {
+ if (kmalloc_cache[i].object_size >= size)
+ return sos_kmem_cache_alloc(kmalloc_cache[i].cache,
+ (flags
+ & SOS_KMALLOC_ATOMIC)?
+ SOS_KSLAB_ALLOC_ATOMIC:0);
+ }
+
+ /* none found yet => we directly use the kmem_vmm subsystem to
+ allocate whole pages */
+ return sos_kmem_vmm_alloc(SOS_PAGE_ALIGN_SUP(size) / SOS_PAGE_SIZE,
+ ( (flags
+ & SOS_KMALLOC_ATOMIC)?
+ SOS_KMEM_VMM_ATOMIC:0)
+ | SOS_KMEM_VMM_MAP
+ );
+}
+
+
+sos_ret_t sos_kfree(sos_vaddr_t vaddr)
+{
+ /* The trouble here is that we aren't sure whether this object is a
+ slab object in a pre-allocated kmalloc cache, or an object
+ directly allocated as a kmem_vmm region. */
+
+ /* We first pretend this object is allocated in a pre-allocated
+ kmalloc cache */
+ if (! sos_kmem_cache_free(vaddr))
+ return SOS_OK; /* Great ! We guessed right ! */
+
+ /* Here we're wrong: it appears not to be an object in a
+ pre-allocated kmalloc cache. So we try to pretend this is a
+ kmem_vmm area */
+ return sos_kmem_vmm_free(vaddr);
+}
+
+
diff --git a/sos-code-article6/sos/kmalloc.h b/sos-code-article6/sos/kmalloc.h
new file mode 100644
index 0000000..3f35b9d
--- /dev/null
+++ b/sos-code-article6/sos/kmalloc.h
@@ -0,0 +1,63 @@
+/* Copyright (C) 2004 David Decotigny
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#ifndef _SOS_KMALLOC_H_
+#define _SOS_KMALLOC_H_
+
+/**
+ * @file kmalloc.h
+ *
+ * Simple malloc-style wrapper to kmem_vmm.h and kmem_slab.h for
+ * "anonymous" objects (ie not associated to any precise slab cache).
+ */
+
+#include <sos/types.h>
+#include <sos/errno.h>
+
+
+/**
+ * Iniatilize the kmalloc subsystem, ie pre-allocate a series of caches.
+ */
+sos_ret_t sos_kmalloc_subsystem_setup(void);
+
+/*
+ * sos_kmalloc flags
+ */
+/** sos_kmalloc() should succeed without blocking, or return NULL */
+#define SOS_KMALLOC_ATOMIC 1
+
+/**
+ * Allocate a kernel object of the given size in the most suited slab
+ * cache if size can be handled by one of the pre-allocated caches, or
+ * using directly the range allocator otherwise. The object will
+ * allways be mapped in physical memory (ie implies
+ * SOS_KSLAB_CREATE_MAP and SOS_KMEM_VMM_MAP).
+ *
+ * @param size The size of the object
+ * @param flags The allocation flags (SOS_KMALLOC_* flags)
+ */
+sos_vaddr_t sos_kmalloc(sos_size_t size, sos_ui32_t flags);
+
+/**
+ * @note you are perfectly allowed to give the address of the
+ * kernel image, or the address of the bios area here, it will work:
+ * the kernel/bios WILL be "deallocated". But if you really want to do
+ * this, well..., do expect some "surprises" ;)
+ */
+sos_ret_t sos_kfree(sos_vaddr_t vaddr);
+
+#endif /* _SOS_KMALLOC_H_ */
diff --git a/sos-code-article6/sos/kmem_slab.c b/sos-code-article6/sos/kmem_slab.c
new file mode 100644
index 0000000..49a1527
--- /dev/null
+++ b/sos-code-article6/sos/kmem_slab.c
@@ -0,0 +1,812 @@
+/* Copyright (C) 2000 Thomas Petazzoni
+ Copyright (C) 2004 David Decotigny
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#include <sos/macros.h>
+#include <sos/klibc.h>
+#include <sos/list.h>
+#include <sos/assert.h>
+#include <hwcore/paging.h>
+#include <sos/physmem.h>
+#include <sos/kmem_vmm.h>
+
+#include "kmem_slab.h"
+
+/* Dimensioning constants */
+#define NB_PAGES_IN_SLAB_OF_CACHES 1
+#define NB_PAGES_IN_SLAB_OF_RANGES 1
+
+/** The structure of a slab cache */
+struct sos_kslab_cache
+{
+ char *name;
+
+ /* non mutable characteristics of this slab */
+ sos_size_t original_obj_size; /* asked object size */
+ sos_size_t alloc_obj_size; /* actual object size, taking the
+ alignment constraints into account */
+ sos_count_t nb_objects_per_slab;
+ sos_count_t nb_pages_per_slab;
+ sos_count_t min_free_objects;
+
+/* slab cache flags */
+// #define SOS_KSLAB_CREATE_MAP (1<<0) /* See kmem_slab.h */
+// #define SOS_KSLAB_CREATE_ZERO (1<<1) /* " " " " " " " " */
+#define ON_SLAB (1<<31) /* struct sos_kslab is included inside the slab */
+ sos_ui32_t flags;
+
+ /* Supervision data (updated at run-time) */
+ sos_count_t nb_free_objects;
+
+ /* The lists of slabs owned by this cache */
+ struct sos_kslab *slab_list; /* head = non full, tail = full */
+
+ /* The caches are linked together on the kslab_cache_list */
+ struct sos_kslab_cache *prev, *next;
+};
+
+
+/** The structure of a slab */
+struct sos_kslab
+{
+ /** Number of free objects on this slab */
+ sos_count_t nb_free;
+
+ /** The list of these free objects */
+ struct sos_kslab_free_object *free;
+
+ /** The address of the associated range structure */
+ struct sos_kmem_range *range;
+
+ /** Virtual start address of this range */
+ sos_vaddr_t first_object;
+
+ /** Slab cache owning this slab */
+ struct sos_kslab_cache *cache;
+
+ /** Links to the other slabs managed by the same cache */
+ struct sos_kslab *prev, *next;
+};
+
+
+/** The structure of the free objects in the slab */
+struct sos_kslab_free_object
+{
+ struct sos_kslab_free_object *prev, *next;
+};
+
+/** The cache of slab caches */
+static struct sos_kslab_cache *cache_of_struct_kslab_cache;
+
+/** The cache of slab structures for non-ON_SLAB caches */
+static struct sos_kslab_cache *cache_of_struct_kslab;
+
+/** The list of slab caches */
+static struct sos_kslab_cache *kslab_cache_list;
+
+/* Helper function to initialize a cache structure */
+static sos_ret_t
+cache_initialize(/*out*/struct sos_kslab_cache *the_cache,
+ const char* name,
+ sos_size_t obj_size,
+ sos_count_t pages_per_slab,
+ sos_count_t min_free_objs,
+ sos_ui32_t cache_flags)
+{
+ unsigned int space_left;
+ sos_size_t alloc_obj_size;
+
+ if (obj_size <= 0)
+ return -SOS_EINVAL;
+
+ /* Default allocation size is the requested one */
+ alloc_obj_size = obj_size;
+
+ /* Make sure the requested size is large enough to store a
+ free_object structure */
+ if (alloc_obj_size < sizeof(struct sos_kslab_free_object))
+ alloc_obj_size = sizeof(struct sos_kslab_free_object);
+
+ /* Align obj_size on 4 bytes */
+ alloc_obj_size = SOS_ALIGN_SUP(alloc_obj_size, sizeof(int));
+
+ /* Make sure supplied number of pages per slab is consistent with
+ actual allocated object size */
+ if (alloc_obj_size > pages_per_slab*SOS_PAGE_SIZE)
+ return -SOS_EINVAL;
+
+ /* Refuse too large slabs */
+ if (pages_per_slab > MAX_PAGES_PER_SLAB)
+ return -SOS_ENOMEM;
+
+ /* Fills in the cache structure */
+ memset(the_cache, 0x0, sizeof(struct sos_kslab_cache));
+ the_cache->name = (char*)name;
+ the_cache->flags = cache_flags;
+ the_cache->original_obj_size = obj_size;
+ the_cache->alloc_obj_size = alloc_obj_size;
+ the_cache->min_free_objects = min_free_objs;
+ the_cache->nb_pages_per_slab = pages_per_slab;
+
+ /* Small size objets => the slab structure is allocated directly in
+ the slab */
+ if(alloc_obj_size <= sizeof(struct sos_kslab))
+ the_cache->flags |= ON_SLAB;
+
+ /*
+ * Compute the space left once the maximum number of objects
+ * have been allocated in the slab
+ */
+ space_left = the_cache->nb_pages_per_slab*SOS_PAGE_SIZE;
+ if(the_cache->flags & ON_SLAB)
+ space_left -= sizeof(struct sos_kslab);
+ the_cache->nb_objects_per_slab = space_left / alloc_obj_size;
+ space_left -= the_cache->nb_objects_per_slab*alloc_obj_size;
+
+ /* Make sure a single slab is large enough to contain the minimum
+ number of objects requested */
+ if (the_cache->nb_objects_per_slab < min_free_objs)
+ return -SOS_EINVAL;
+
+ /* If there is now enough place for both the objects and the slab
+ structure, then make the slab structure ON_SLAB */
+ if (space_left >= sizeof(struct sos_kslab))
+ the_cache->flags |= ON_SLAB;
+
+ return SOS_OK;
+}
+
+
+/** Helper function to add a new slab for the given cache. */
+static sos_ret_t
+cache_add_slab(struct sos_kslab_cache *kslab_cache,
+ sos_vaddr_t vaddr_slab,
+ struct sos_kslab *slab)
+{
+ int i;
+
+ /* Setup the slab structure */
+ memset(slab, 0x0, sizeof(struct sos_kslab));
+ slab->cache = kslab_cache;
+
+ /* Establish the address of the first free object */
+ slab->first_object = vaddr_slab;
+
+ /* Account for this new slab in the cache */
+ slab->nb_free = kslab_cache->nb_objects_per_slab;
+ kslab_cache->nb_free_objects += slab->nb_free;
+
+ /* Build the list of free objects */
+ for (i = 0 ; i < kslab_cache->nb_objects_per_slab ; i++)
+ {
+ sos_vaddr_t obj_vaddr;
+
+ /* Set object's address */
+ obj_vaddr = slab->first_object + i*kslab_cache->alloc_obj_size;
+
+ /* Add it to the list of free objects */
+ list_add_tail(slab->free,
+ (struct sos_kslab_free_object *)obj_vaddr);
+ }
+
+ /* Add the slab to the cache's slab list: add the head of the list
+ since this slab is non full */
+ list_add_head(kslab_cache->slab_list, slab);
+
+ return SOS_OK;
+}
+
+
+/** Helper function to allocate a new slab for the given kslab_cache */
+static sos_ret_t
+cache_grow(struct sos_kslab_cache *kslab_cache,
+ sos_ui32_t alloc_flags)
+{
+ sos_ui32_t range_alloc_flags;
+
+ struct sos_kmem_range *new_range;
+ sos_vaddr_t new_range_start;
+
+ struct sos_kslab *new_slab;
+
+ /*
+ * Setup the flags for the range allocation
+ */
+ range_alloc_flags = 0;
+
+ /* Atomic ? */
+ if (alloc_flags & SOS_KSLAB_ALLOC_ATOMIC)
+ range_alloc_flags |= SOS_KMEM_VMM_ATOMIC;
+
+ /* Need physical mapping NOW ? */
+ if (kslab_cache->flags & (SOS_KSLAB_CREATE_MAP
+ | SOS_KSLAB_CREATE_ZERO))
+ range_alloc_flags |= SOS_KMEM_VMM_MAP;
+
+ /* Allocate the range */
+ new_range = sos_kmem_vmm_new_range(kslab_cache->nb_pages_per_slab,
+ range_alloc_flags,
+ & new_range_start);
+ if (! new_range)
+ return -SOS_ENOMEM;
+
+ /* Allocate the slab structure */
+ if (kslab_cache->flags & ON_SLAB)
+ {
+ /* Slab structure is ON the slab: simply set its address to the
+ end of the range */
+ sos_vaddr_t slab_vaddr
+ = new_range_start + kslab_cache->nb_pages_per_slab*SOS_PAGE_SIZE
+ - sizeof(struct sos_kslab);
+ new_slab = (struct sos_kslab*)slab_vaddr;
+ }
+ else
+ {
+ /* Slab structure is OFF the slab: allocate it from the cache of
+ slab structures */
+ sos_vaddr_t slab_vaddr
+ = sos_kmem_cache_alloc(cache_of_struct_kslab,
+ alloc_flags);
+ if (! slab_vaddr)
+ {
+ sos_kmem_vmm_del_range(new_range);
+ return -SOS_ENOMEM;
+ }
+ new_slab = (struct sos_kslab*)slab_vaddr;
+ }
+
+ cache_add_slab(kslab_cache, new_range_start, new_slab);
+ new_slab->range = new_range;
+
+ /* Set the backlink from range to this slab */
+ sos_kmem_vmm_set_slab(new_range, new_slab);
+
+ return SOS_OK;
+}
+
+
+/**
+ * Helper function to release a slab
+ *
+ * The corresponding range is always deleted, except when the @param
+ * must_del_range_now is not set. This happens only when the function
+ * gets called from sos_kmem_cache_release_struct_range(), to avoid
+ * large recursions.
+ */
+static sos_ret_t
+cache_release_slab(struct sos_kslab *slab,
+ sos_bool_t must_del_range_now)
+{
+ struct sos_kslab_cache *kslab_cache = slab->cache;
+ struct sos_kmem_range *range = slab->range;
+
+ SOS_ASSERT_FATAL(kslab_cache != NULL);
+ SOS_ASSERT_FATAL(range != NULL);
+ SOS_ASSERT_FATAL(slab->nb_free == slab->cache->nb_objects_per_slab);
+
+ /* First, remove the slab from the slabs' list of the cache */
+ list_delete(kslab_cache->slab_list, slab);
+ slab->cache->nb_free_objects -= slab->nb_free;
+
+ /* Release the slab structure if it is OFF slab */
+ if (! (slab->cache->flags & ON_SLAB))
+ sos_kmem_cache_free((sos_vaddr_t)slab);
+
+ /* Ok, the range is not bound to any slab anymore */
+ sos_kmem_vmm_set_slab(range, NULL);
+
+ /* Always delete the range now, unless we are told not to do so (see
+ sos_kmem_cache_release_struct_range() below) */
+ if (must_del_range_now)
+ return sos_kmem_vmm_del_range(range);
+
+ return SOS_OK;
+}
+
+
+/**
+ * Helper function to create the initial cache of caches, with a very
+ * first slab in it, so that new cache structures can be simply allocated.
+ * @return the cache structure for the cache of caches
+ */
+static struct sos_kslab_cache *
+create_cache_of_caches(sos_vaddr_t vaddr_first_slab_of_caches,
+ int nb_pages)
+{
+ /* The preliminary cache structure we need in order to allocate the
+ first slab in the cache of caches (allocated on the stack !) */
+ struct sos_kslab_cache fake_cache_of_caches;
+
+ /* The real cache structure for the cache of caches */
+ struct sos_kslab_cache *real_cache_of_caches;
+
+ /* The kslab structure for this very first slab */
+ struct sos_kslab *slab_of_caches;
+
+ /* Init the cache structure for the cache of caches */
+ if (cache_initialize(& fake_cache_of_caches,
+ "Caches", sizeof(struct sos_kslab_cache),
+ nb_pages, 0, SOS_KSLAB_CREATE_MAP | ON_SLAB))
+ /* Something wrong with the parameters */
+ return NULL;
+
+ memset((void*)vaddr_first_slab_of_caches, 0x0, nb_pages*SOS_PAGE_SIZE);
+
+ /* Add the pages for the 1st slab of caches */
+ slab_of_caches = (struct sos_kslab*)(vaddr_first_slab_of_caches
+ + nb_pages*SOS_PAGE_SIZE
+ - sizeof(struct sos_kslab));
+
+ /* Add the abovementioned 1st slab to the cache of caches */
+ cache_add_slab(& fake_cache_of_caches,
+ vaddr_first_slab_of_caches,
+ slab_of_caches);
+
+ /* Now we allocate a cache structure, which will be the real cache
+ of caches, ie a cache structure allocated INSIDE the cache of
+ caches, not inside the stack */
+ real_cache_of_caches
+ = (struct sos_kslab_cache*) sos_kmem_cache_alloc(& fake_cache_of_caches,
+ 0);
+ /* We initialize it */
+ memcpy(real_cache_of_caches, & fake_cache_of_caches,
+ sizeof(struct sos_kslab_cache));
+ /* We need to update the slab's 'cache' field */
+ slab_of_caches->cache = real_cache_of_caches;
+
+ /* Add the cache to the list of slab caches */
+ list_add_tail(kslab_cache_list, real_cache_of_caches);
+
+ return real_cache_of_caches;
+}
+
+
+/**
+ * Helper function to create the initial cache of ranges, with a very
+ * first slab in it, so that new kmem_range structures can be simply
+ * allocated.
+ * @return the cache of kmem_range
+ */
+static struct sos_kslab_cache *
+create_cache_of_ranges(sos_vaddr_t vaddr_first_slab_of_ranges,
+ sos_size_t sizeof_struct_range,
+ int nb_pages)
+{
+ /* The cache structure for the cache of kmem_range */
+ struct sos_kslab_cache *cache_of_ranges;
+
+ /* The kslab structure for the very first slab of ranges */
+ struct sos_kslab *slab_of_ranges;
+
+ cache_of_ranges = (struct sos_kslab_cache*)
+ sos_kmem_cache_alloc(cache_of_struct_kslab_cache,
+ 0);
+ if (! cache_of_ranges)
+ return NULL;
+
+ /* Init the cache structure for the cache of ranges with min objects
+ per slab = 2 !!! */
+ if (cache_initialize(cache_of_ranges,
+ "struct kmem_range",
+ sizeof_struct_range,
+ nb_pages, 2, SOS_KSLAB_CREATE_MAP | ON_SLAB))
+ /* Something wrong with the parameters */
+ return NULL;
+
+ /* Add the cache to the list of slab caches */
+ list_add_tail(kslab_cache_list, cache_of_ranges);
+
+ /*
+ * Add the first slab for this cache
+ */
+ memset((void*)vaddr_first_slab_of_ranges, 0x0, nb_pages*SOS_PAGE_SIZE);
+
+ /* Add the pages for the 1st slab of ranges */
+ slab_of_ranges = (struct sos_kslab*)(vaddr_first_slab_of_ranges
+ + nb_pages*SOS_PAGE_SIZE
+ - sizeof(struct sos_kslab));
+
+ cache_add_slab(cache_of_ranges,
+ vaddr_first_slab_of_ranges,
+ slab_of_ranges);
+
+ return cache_of_ranges;
+}
+
+
+struct sos_kslab_cache *
+sos_kmem_cache_subsystem_setup_prepare(sos_vaddr_t kernel_core_base,
+ sos_vaddr_t kernel_core_top,
+ sos_size_t sizeof_struct_range,
+ /* results */
+ struct sos_kslab **first_struct_slab_of_caches,
+ sos_vaddr_t *first_slab_of_caches_base,
+ sos_count_t *first_slab_of_caches_nb_pages,
+ struct sos_kslab **first_struct_slab_of_ranges,
+ sos_vaddr_t *first_slab_of_ranges_base,
+ sos_count_t *first_slab_of_ranges_nb_pages)
+{
+ int i;
+ sos_ret_t retval;
+ sos_vaddr_t vaddr;
+
+ /* The cache of ranges we are about to allocate */
+ struct sos_kslab_cache *cache_of_ranges;
+
+ /* In the begining, there isn't any cache */
+ kslab_cache_list = NULL;
+ cache_of_struct_kslab = NULL;
+ cache_of_struct_kslab_cache = NULL;
+
+ /*
+ * Create the cache of caches, initialised with 1 allocated slab
+ */
+
+ /* Allocate the pages needed for the 1st slab of caches, and map them
+ in kernel space, right after the kernel */
+ *first_slab_of_caches_base = SOS_PAGE_ALIGN_SUP(kernel_core_top);
+ for (i = 0, vaddr = *first_slab_of_caches_base ;
+ i < NB_PAGES_IN_SLAB_OF_CACHES ;
+ i++, vaddr += SOS_PAGE_SIZE)
+ {
+ sos_paddr_t ppage_paddr;
+
+ ppage_paddr
+ = sos_physmem_ref_physpage_new(FALSE);
+ SOS_ASSERT_FATAL(ppage_paddr != (sos_paddr_t)NULL);
+
+ retval = sos_paging_map(ppage_paddr, vaddr,
+ FALSE,
+ SOS_VM_MAP_ATOMIC
+ | SOS_VM_MAP_PROT_READ
+ | SOS_VM_MAP_PROT_WRITE);
+ SOS_ASSERT_FATAL(retval == SOS_OK);
+
+ retval = sos_physmem_unref_physpage(ppage_paddr);
+ SOS_ASSERT_FATAL(retval == FALSE);
+ }
+
+ /* Create the cache of caches */
+ *first_slab_of_caches_nb_pages = NB_PAGES_IN_SLAB_OF_CACHES;
+ cache_of_struct_kslab_cache
+ = create_cache_of_caches(*first_slab_of_caches_base,
+ NB_PAGES_IN_SLAB_OF_CACHES);
+ SOS_ASSERT_FATAL(cache_of_struct_kslab_cache != NULL);
+
+ /* Retrieve the slab that should have been allocated */
+ *first_struct_slab_of_caches
+ = list_get_head(cache_of_struct_kslab_cache->slab_list);
+
+
+ /*
+ * Create the cache of ranges, initialised with 1 allocated slab
+ */
+ *first_slab_of_ranges_base = vaddr;
+ /* Allocate the 1st slab */
+ for (i = 0, vaddr = *first_slab_of_ranges_base ;
+ i < NB_PAGES_IN_SLAB_OF_RANGES ;
+ i++, vaddr += SOS_PAGE_SIZE)
+ {
+ sos_paddr_t ppage_paddr;
+
+ ppage_paddr
+ = sos_physmem_ref_physpage_new(FALSE);
+ SOS_ASSERT_FATAL(ppage_paddr != (sos_paddr_t)NULL);
+
+ retval = sos_paging_map(ppage_paddr, vaddr,
+ FALSE,
+ SOS_VM_MAP_ATOMIC
+ | SOS_VM_MAP_PROT_READ
+ | SOS_VM_MAP_PROT_WRITE);
+ SOS_ASSERT_FATAL(retval == SOS_OK);
+
+ retval = sos_physmem_unref_physpage(ppage_paddr);
+ SOS_ASSERT_FATAL(retval == FALSE);
+ }
+
+ /* Create the cache of ranges */
+ *first_slab_of_ranges_nb_pages = NB_PAGES_IN_SLAB_OF_RANGES;
+ cache_of_ranges = create_cache_of_ranges(*first_slab_of_ranges_base,
+ sizeof_struct_range,
+ NB_PAGES_IN_SLAB_OF_RANGES);
+ SOS_ASSERT_FATAL(cache_of_ranges != NULL);
+
+ /* Retrieve the slab that should have been allocated */
+ *first_struct_slab_of_ranges
+ = list_get_head(cache_of_ranges->slab_list);
+
+ /*
+ * Create the cache of slabs, without any allocated slab yet
+ */
+ cache_of_struct_kslab
+ = sos_kmem_cache_create("off-slab slab structures",
+ sizeof(struct sos_kslab),
+ 1,
+ 0,
+ SOS_KSLAB_CREATE_MAP);
+ SOS_ASSERT_FATAL(cache_of_struct_kslab != NULL);
+
+ return cache_of_ranges;
+}
+
+
+sos_ret_t
+sos_kmem_cache_subsystem_setup_commit(struct sos_kslab *first_struct_slab_of_caches,
+ struct sos_kmem_range *first_range_of_caches,
+ struct sos_kslab *first_struct_slab_of_ranges,
+ struct sos_kmem_range *first_range_of_ranges)
+{
+ first_struct_slab_of_caches->range = first_range_of_caches;
+ first_struct_slab_of_ranges->range = first_range_of_ranges;
+ return SOS_OK;
+}
+
+
+struct sos_kslab_cache *
+sos_kmem_cache_create(const char* name,
+ sos_size_t obj_size,
+ sos_count_t pages_per_slab,
+ sos_count_t min_free_objs,
+ sos_ui32_t cache_flags)
+{
+ struct sos_kslab_cache *new_cache;
+
+ /* Allocate the new cache */
+ new_cache = (struct sos_kslab_cache*)
+ sos_kmem_cache_alloc(cache_of_struct_kslab_cache,
+ 0/* NOT ATOMIC */);
+ if (! new_cache)
+ return NULL;
+
+ if (cache_initialize(new_cache, name, obj_size,
+ pages_per_slab, min_free_objs,
+ cache_flags))
+ {
+ /* Something was wrong */
+ sos_kmem_cache_free((sos_vaddr_t)new_cache);
+ return NULL;
+ }
+
+ /* Add the cache to the list of slab caches */
+ list_add_tail(kslab_cache_list, new_cache);
+
+ /* if the min_free_objs is set, pre-allocate a slab */
+ if (min_free_objs)
+ {
+ if (cache_grow(new_cache, 0 /* Not atomic */) != SOS_OK)
+ {
+ sos_kmem_cache_destroy(new_cache);
+ return NULL; /* Not enough memory */
+ }
+ }
+
+ return new_cache;
+}
+
+
+sos_ret_t sos_kmem_cache_destroy(struct sos_kslab_cache *kslab_cache)
+{
+ int nb_slabs;
+ struct sos_kslab *slab;
+
+ if (! kslab_cache)
+ return -SOS_EINVAL;
+
+ /* Refuse to destroy the cache if there are any objects still
+ allocated */
+ list_foreach(kslab_cache->slab_list, slab, nb_slabs)
+ {
+ if (slab->nb_free != kslab_cache->nb_objects_per_slab)
+ return -SOS_EBUSY;
+ }
+
+ /* Remove all the slabs */
+ while ((slab = list_get_head(kslab_cache->slab_list)) != NULL)
+ {
+ cache_release_slab(slab, TRUE);
+ }
+
+ /* Remove the cache */
+ return sos_kmem_cache_free((sos_vaddr_t)kslab_cache);
+}
+
+
+sos_vaddr_t sos_kmem_cache_alloc(struct sos_kslab_cache *kslab_cache,
+ sos_ui32_t alloc_flags)
+{
+ sos_vaddr_t obj_vaddr;
+ struct sos_kslab * slab_head;
+#define ALLOC_RET return
+
+ /* If the slab at the head of the slabs' list has no free object,
+ then the other slabs don't either => need to allocate a new
+ slab */
+ if ((! kslab_cache->slab_list)
+ || (! list_get_head(kslab_cache->slab_list)->free))
+ {
+ if (cache_grow(kslab_cache, alloc_flags) != SOS_OK)
+ /* Not enough memory or blocking alloc */
+ ALLOC_RET( (sos_vaddr_t)NULL);
+ }
+
+ /* Here: we are sure that list_get_head(kslab_cache->slab_list)
+ exists *AND* that list_get_head(kslab_cache->slab_list)->free is
+ NOT NULL */
+ slab_head = list_get_head(kslab_cache->slab_list);
+ SOS_ASSERT_FATAL(slab_head != NULL);
+
+ /* Allocate the object at the head of the slab at the head of the
+ slabs' list */
+ obj_vaddr = (sos_vaddr_t)list_pop_head(slab_head->free);
+ slab_head->nb_free --;
+ kslab_cache->nb_free_objects --;
+
+ /* If needed, reset object's contents */
+ if (kslab_cache->flags & SOS_KSLAB_CREATE_ZERO)
+ memset((void*)obj_vaddr, 0x0, kslab_cache->alloc_obj_size);
+
+ /* Slab is now full ? */
+ if (slab_head->free == NULL)
+ {
+ /* Transfer it at the tail of the slabs' list */
+ struct sos_kslab *slab;
+ slab = list_pop_head(kslab_cache->slab_list);
+ list_add_tail(kslab_cache->slab_list, slab);
+ }
+
+ /*
+ * For caches that require a minimum amount of free objects left,
+ * allocate a slab if needed.
+ *
+ * Notice the "== min_objects - 1": we did not write " <
+ * min_objects" because for the cache of kmem structure, this would
+ * lead to an chicken-and-egg problem, since cache_grow below would
+ * call cache_alloc again for the kmem_vmm cache, so we return here
+ * with the same cache. If the test were " < min_objects", then we
+ * would call cache_grow again for the kmem_vmm cache again and
+ * again... until we reach the bottom of our stack (infinite
+ * recursion). By telling precisely "==", then the cache_grow would
+ * only be called the first time.
+ */
+ if ((kslab_cache->min_free_objects > 0)
+ && (kslab_cache->nb_free_objects == (kslab_cache->min_free_objects - 1)))
+ {
+ /* No: allocate a new slab now */
+ if (cache_grow(kslab_cache, alloc_flags) != SOS_OK)
+ {
+ /* Not enough free memory or blocking alloc => undo the
+ allocation */
+ sos_kmem_cache_free(obj_vaddr);
+ ALLOC_RET( (sos_vaddr_t)NULL);
+ }
+ }
+
+ ALLOC_RET(obj_vaddr);
+}
+
+
+/**
+ * Helper function to free the object located at the given address.
+ *
+ * @param empty_slab is the address of the slab to release, if removing
+ * the object causes the slab to become empty.
+ */
+inline static
+sos_ret_t
+free_object(sos_vaddr_t vaddr,
+ struct sos_kslab ** empty_slab)
+{
+ struct sos_kslab_cache *kslab_cache;
+
+ /* Lookup the slab containing the object in the slabs' list */
+ struct sos_kslab *slab = sos_kmem_vmm_resolve_slab(vaddr);
+
+ /* By default, consider that the slab will not become empty */
+ *empty_slab = NULL;
+
+ /* Did not find the slab */
+ if (! slab)
+ return -SOS_EINVAL;
+
+ SOS_ASSERT_FATAL(slab->cache);
+ kslab_cache = slab->cache;
+
+ /*
+ * Check whether the address really could mark the start of an actual
+ * allocated object
+ */
+ /* Address multiple of an object's size ? */
+ if (( (vaddr - slab->first_object)
+ % kslab_cache->alloc_obj_size) != 0)
+ return -SOS_EINVAL;
+ /* Address not too large ? */
+ if (( (vaddr - slab->first_object)
+ / kslab_cache->alloc_obj_size) >= kslab_cache->nb_objects_per_slab)
+ return -SOS_EINVAL;
+
+ /*
+ * Ok: we now release the object
+ */
+
+ /* Did find a full slab => will not be full any more => move it
+ to the head of the slabs' list */
+ if (! slab->free)
+ {
+ list_delete(kslab_cache->slab_list, slab);
+ list_add_head(kslab_cache->slab_list, slab);
+ }
+
+ /* Release the object */
+ list_add_head(slab->free, (struct sos_kslab_free_object*)vaddr);
+ slab->nb_free++;
+ kslab_cache->nb_free_objects++;
+ SOS_ASSERT_FATAL(slab->nb_free <= slab->cache->nb_objects_per_slab);
+
+ /* Cause the slab to be released if it becomes empty, and if we are
+ allowed to do it */
+ if ((slab->nb_free >= kslab_cache->nb_objects_per_slab)
+ && (kslab_cache->nb_free_objects - slab->nb_free
+ >= kslab_cache->min_free_objects))
+ {
+ *empty_slab = slab;
+ }
+
+ return SOS_OK;
+}
+
+
+sos_ret_t sos_kmem_cache_free(sos_vaddr_t vaddr)
+{
+ sos_ret_t retval;
+ struct sos_kslab *empty_slab;
+
+ /* Remove the object from the slab */
+ retval = free_object(vaddr, & empty_slab);
+ if (retval != SOS_OK)
+ return retval;
+
+ /* Remove the slab and the underlying range if needed */
+ if (empty_slab != NULL)
+ return cache_release_slab(empty_slab, TRUE);
+
+ return SOS_OK;
+}
+
+
+struct sos_kmem_range *
+sos_kmem_cache_release_struct_range(struct sos_kmem_range *the_range)
+{
+ sos_ret_t retval;
+ struct sos_kslab *empty_slab;
+
+ /* Remove the object from the slab */
+ retval = free_object((sos_vaddr_t)the_range, & empty_slab);
+ if (retval != SOS_OK)
+ return NULL;
+
+ /* Remove the slab BUT NOT the underlying range if needed */
+ if (empty_slab != NULL)
+ {
+ struct sos_kmem_range *empty_range = empty_slab->range;
+ SOS_ASSERT_FATAL(cache_release_slab(empty_slab, FALSE) == SOS_OK);
+ SOS_ASSERT_FATAL(empty_range != NULL);
+ return empty_range;
+ }
+
+ return NULL;
+}
+
diff --git a/sos-code-article6/sos/kmem_slab.h b/sos-code-article6/sos/kmem_slab.h
new file mode 100644
index 0000000..1f28ff9
--- /dev/null
+++ b/sos-code-article6/sos/kmem_slab.h
@@ -0,0 +1,206 @@
+/* Copyright (C) 2000 Thomas Petazzoni
+ Copyright (C) 2004 David Decotigny
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#ifndef _SOS_KMEM_SLAB_H_
+#define _SOS_KMEM_SLAB_H_
+
+/**
+ * @file kmem_slab.h
+ *
+ * Kernel Memory Allocator based on Bonwick's slab llocator (Solaris
+ * 2.4, Linux 2.4). This allocator achieves good memory utilization
+ * ratio (memory effectively used / memory requested) ie limited
+ * fragmentation, while elegantly handling cache-effect considerations
+ * (TLB locality through the notion of "cache" of slabs, and the
+ * dcache utilization through the notion of cache colouring to
+ * decrease the conflicts in the dcache for accesses to different data
+ * in the same cache).
+ *
+ * This allocator relies on the range allocator (kmem_vmm.h) to
+ * allocate the slabs, which itself relies on the slab allocator to
+ * allocate its "range" data structures, thus leading to a
+ * chicken-and-egg problem. We solve this problem by introducing the
+ * notion of "min_free_objs" for the slab caches, in order for the cache
+ * of ranges to always have enough ranges in reserve to complete the
+ * range allocation before being urged to allocate a new slab of
+ * ranges, which would require the allocation of a new range.
+ *
+ * Compared to Bonwick's recommendations, we don't handle ctor/dtor
+ * routines on the objects, so that we can alter the objects once they
+ * are set free. Thus, the list of free object is stored in the free
+ * objects themselves, not alongside the objects (this also implies that
+ * the SOS_KSLAB_CREATE_MAP flag below is meaningless). We also don't
+ * implement the cache colouring (trivial to add, but we omit it for
+ * readability reasons), and the only alignment constraint we respect
+ * is that allocated objects are aligned on a 4B boundary: for other
+ * alignment constraints, the user must integrate them in the
+ * "object_size" parameter to "sos_kmem_cache_create()".
+ *
+ * References :
+ * - J. Bonwick's paper, "The slab allocator: An object-caching kernel
+ * memory allocator", In USENIX Summer 1994 Technical Conference
+ * - The bible, aka "Unix internals : the new frontiers" (section
+ * 12.10), Uresh Vahalia, Prentice Hall 1996, ISBN 0131019082
+ * - "The Linux slab allocator", B. Fitzgibbons,
+ * http://www.cc.gatech.edu/people/home/bradf/cs7001/proj2/
+ * - The Kos, http://kos.enix.org/
+ */
+#include <sos/types.h>
+#include <sos/errno.h>
+
+/** Opaque data structure that defines a Cache of slabs */
+struct sos_kslab_cache;
+
+/** Opaque data structure that defines a slab. Exported only to
+ kmem_vmm.h */
+struct sos_kslab;
+
+#include "kmem_vmm.h"
+
+
+/** The maximum allowed pages for each slab */
+#define MAX_PAGES_PER_SLAB 32 /* 128 kB */
+
+
+/**
+ * Initialize the slab cache of slab caches, and prepare the cache of
+ * kmem_range for kmem_vmm.
+ *
+ * @param kernel_core_base The virtual address of the first byte used
+ * by the kernel code/data
+ *
+ * @param kernel_core_top The virtual address of the first byte after
+ * the kernel code/data.
+ *
+ * @param sizeof_struct_range the size of the objects (aka "struct
+ * sos_kmem_vmm_ranges") to be allocated in the cache of ranges
+ *
+ * @param first_struct_slab_of_caches (output value) the virtual
+ * address of the first slab structure that gets allocated for the
+ * cache of caches. The function actually manually allocate the first
+ * slab of the cache of caches because of a chicken-and-egg thing. The
+ * address of the slab is used by the kmem_vmm_setup routine to
+ * finalize the allocation of the slab, in order for it to behave like
+ * a real slab afterwards.
+ *
+ * @param first_slab_of_caches_base (output value) the virtual address
+ * of the slab associated to the slab structure.
+ *
+ * @param first_slab_of_caches_nb_pages (output value) the number of
+ * (virtual) pages used by the first slab of the cache of caches.
+ *
+ * @param first_struct_slab_of_ranges (output value) the virtual address
+ * of the first slab that gets allocated for the cache of ranges. Same
+ * explanation as above.
+ *
+ * @param first_slab_of_ranges_base (output value) the virtual address
+ * of the slab associated to the slab structure.
+ *
+ * @param first_slab_of_ranges_nb_pages (output value) the number of
+ * (virtual) pages used by the first slab of the cache of ranges.
+ *
+ * @return the cache of kmem_range immediatly usable
+ */
+struct sos_kslab_cache *
+sos_kmem_cache_subsystem_setup_prepare(sos_vaddr_t kernel_core_base,
+ sos_vaddr_t kernel_core_top,
+ sos_size_t sizeof_struct_range,
+ /* results */
+ struct sos_kslab **first_struct_slab_of_caches,
+ sos_vaddr_t *first_slab_of_caches_base,
+ sos_count_t *first_slab_of_caches_nb_pages,
+ struct sos_kslab **first_struct_slab_of_ranges,
+ sos_vaddr_t *first_slab_of_ranges_base,
+ sos_count_t *first_slab_of_ranges_nb_pages);
+
+/**
+ * Update the configuration of the cache subsystem once the vmm
+ * subsystem has been fully initialized
+ */
+sos_ret_t
+sos_kmem_cache_subsystem_setup_commit(struct sos_kslab *first_struct_slab_of_caches,
+ struct sos_kmem_range *first_range_of_caches,
+ struct sos_kslab *first_struct_slab_of_ranges,
+ struct sos_kmem_range *first_range_of_ranges);
+
+
+/*
+ * Flags for sos_kmem_cache_create()
+ */
+/** The slabs should be initially mapped in physical memory */
+#define SOS_KSLAB_CREATE_MAP (1<<0)
+/** The object should always be set to zero at allocation (implies
+ SOS_KSLAB_CREATE_MAP) */
+#define SOS_KSLAB_CREATE_ZERO (1<<1)
+
+/**
+ * @note this function MAY block (involved allocations are not atomic)
+ * @param name must remain valid during the whole cache's life
+ * (shallow copy) !
+ * @param cache_flags An or-ed combination of the SOS_KSLAB_CREATE_* flags
+ */
+struct sos_kslab_cache *
+sos_kmem_cache_create(const char* name,
+ sos_size_t object_size,
+ sos_count_t pages_per_slab,
+ sos_count_t min_free_objects,
+ sos_ui32_t cache_flags);
+
+sos_ret_t sos_kmem_cache_destroy(struct sos_kslab_cache *kslab_cache);
+
+
+/*
+ * Flags for sos_kmem_cache_alloc()
+ */
+/** Allocation should either succeed or fail, without blocking */
+#define SOS_KSLAB_ALLOC_ATOMIC (1<<0)
+
+/**
+ * Allocate an object from the given cache.
+ *
+ * @param alloc_flags An or-ed combination of the SOS_KSLAB_ALLOC_* flags
+ */
+sos_vaddr_t sos_kmem_cache_alloc(struct sos_kslab_cache *kslab_cache,
+ sos_ui32_t alloc_flags);
+
+
+/**
+ * Free an object (assumed to be already allocated and not already
+ * free) at the given virtual address.
+ */
+sos_ret_t sos_kmem_cache_free(sos_vaddr_t vaddr);
+
+
+/*
+ * Function reserved to kmem_vmm.c. Does almost everything
+ * sos_kmem_cache_free() does, except it does not call
+ * sos_kmem_vmm_del_range() if it needs to. This is aimed at avoiding
+ * large recursion when a range is freed with
+ * sos_kmem_vmm_del_range().
+ *
+ * @param the_range The range structure to free
+ *
+ * @return NULL when the range containing 'the_range' still contains
+ * other ranges, or the address of the range which owned 'the_range'
+ * if it becomes empty.
+ */
+struct sos_kmem_range *
+sos_kmem_cache_release_struct_range(struct sos_kmem_range *the_range);
+
+
+#endif /* _SOS_KMEM_SLAB_H_ */
diff --git a/sos-code-article6/sos/kmem_vmm.c b/sos-code-article6/sos/kmem_vmm.c
new file mode 100644
index 0000000..ea2fdf1
--- /dev/null
+++ b/sos-code-article6/sos/kmem_vmm.c
@@ -0,0 +1,606 @@
+/* Copyright (C) 2000 Thomas Petazzoni
+ Copyright (C) 2004 David Decotigny
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+
+#include <sos/list.h>
+#include <sos/physmem.h>
+#include <hwcore/paging.h>
+#include <sos/assert.h>
+
+#include "kmem_vmm.h"
+
+/** The structure of a range of kernel-space virtual addresses */
+struct sos_kmem_range
+{
+ sos_vaddr_t base_vaddr;
+ sos_count_t nb_pages;
+
+ /* The slab owning this range, or NULL */
+ struct sos_kslab *slab;
+
+ struct sos_kmem_range *prev, *next;
+};
+const int sizeof_struct_sos_kmem_range = sizeof(struct sos_kmem_range);
+
+/** The ranges are SORTED in (strictly) ascending base addresses */
+static struct sos_kmem_range *kmem_free_range_list, *kmem_used_range_list;
+
+/** The slab cache for the kmem ranges */
+static struct sos_kslab_cache *kmem_range_cache;
+
+
+
+/** Helper function to get the closest preceding or containing
+ range for the given virtual address */
+static struct sos_kmem_range *
+get_closest_preceding_kmem_range(struct sos_kmem_range *the_list,
+ sos_vaddr_t vaddr)
+{
+ int nb_elements;
+ struct sos_kmem_range *a_range, *ret_range;
+
+ /* kmem_range list is kept SORTED, so we exit as soon as vaddr >= a
+ range base address */
+ ret_range = NULL;
+ list_foreach(the_list, a_range, nb_elements)
+ {
+ if (vaddr < a_range->base_vaddr)
+ return ret_range;
+ ret_range = a_range;
+ }
+
+ /* This will always be the LAST range in the kmem area */
+ return ret_range;
+}
+
+
+/**
+ * Helper function to lookup a free range large enough to hold nb_pages
+ * pages (first fit)
+ */
+static struct sos_kmem_range *find_suitable_free_range(sos_count_t nb_pages)
+{
+ int nb_elements;
+ struct sos_kmem_range *r;
+
+ list_foreach(kmem_free_range_list, r, nb_elements)
+ {
+ if (r->nb_pages >= nb_pages)
+ return r;
+ }
+
+ return NULL;
+}
+
+
+/**
+ * Helper function to add a_range in the_list, in strictly ascending order.
+ *
+ * @return The (possibly) new head of the_list
+ */
+static struct sos_kmem_range *insert_range(struct sos_kmem_range *the_list,
+ struct sos_kmem_range *a_range)
+{
+ struct sos_kmem_range *prec_used;
+
+ /** Look for any preceding range */
+ prec_used = get_closest_preceding_kmem_range(the_list,
+ a_range->base_vaddr);
+ /** insert a_range /after/ this prec_used */
+ if (prec_used != NULL)
+ list_insert_after(the_list, prec_used, a_range);
+ else /* Insert at the beginning of the list */
+ list_add_head(the_list, a_range);
+
+ return the_list;
+}
+
+
+/**
+ * Helper function to retrieve the range owning the given vaddr, by
+ * scanning the physical memory first if vaddr is mapped in RAM
+ */
+static struct sos_kmem_range *lookup_range(sos_vaddr_t vaddr)
+{
+ struct sos_kmem_range *range;
+
+ /* First: try to retrieve the physical page mapped at this address */
+ sos_paddr_t ppage_paddr = SOS_PAGE_ALIGN_INF(sos_paging_get_paddr(vaddr));
+
+ if (ppage_paddr)
+ {
+ range = sos_physmem_get_kmem_range(ppage_paddr);
+
+ /* If a page is mapped at this address, it is EXPECTED that it
+ is really associated with a range */
+ SOS_ASSERT_FATAL(range != NULL);
+ }
+
+ /* Otherwise scan the list of used ranges, looking for the range
+ owning the address */
+ else
+ {
+ range = get_closest_preceding_kmem_range(kmem_used_range_list,
+ vaddr);
+ /* Not found */
+ if (! range)
+ return NULL;
+
+ /* vaddr not covered by this range */
+ if ( (vaddr < range->base_vaddr)
+ || (vaddr >= (range->base_vaddr + range->nb_pages*SOS_PAGE_SIZE)) )
+ return NULL;
+ }
+
+ return range;
+}
+
+
+/**
+ * Helper function for sos_kmem_vmm_setup() to initialize a new range
+ * that maps a given area as free or as already used.
+ * This function either succeeds or halts the whole system.
+ */
+static struct sos_kmem_range *
+create_range(sos_bool_t is_free,
+ sos_vaddr_t base_vaddr,
+ sos_vaddr_t top_vaddr,
+ struct sos_kslab *associated_slab)
+{
+ struct sos_kmem_range *range;
+
+ SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(base_vaddr));
+ SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(top_vaddr));
+
+ if ((top_vaddr - base_vaddr) < SOS_PAGE_SIZE)
+ return NULL;
+
+ range = (struct sos_kmem_range*)sos_kmem_cache_alloc(kmem_range_cache,
+ SOS_KSLAB_ALLOC_ATOMIC);
+ SOS_ASSERT_FATAL(range != NULL);
+
+ range->base_vaddr = base_vaddr;
+ range->nb_pages = (top_vaddr - base_vaddr) / SOS_PAGE_SIZE;
+
+ if (is_free)
+ {
+ list_add_tail(kmem_free_range_list,
+ range);
+ }
+ else
+ {
+ sos_vaddr_t vaddr;
+ range->slab = associated_slab;
+ list_add_tail(kmem_used_range_list,
+ range);
+
+ /* Ok, set the range owner for the pages in this page */
+ for (vaddr = base_vaddr ;
+ vaddr < top_vaddr ;
+ vaddr += SOS_PAGE_SIZE)
+ {
+ sos_paddr_t ppage_paddr = sos_paging_get_paddr(vaddr);
+ SOS_ASSERT_FATAL((void*)ppage_paddr != NULL);
+ sos_physmem_set_kmem_range(ppage_paddr, range);
+ }
+ }
+
+ return range;
+}
+
+
+sos_ret_t
+sos_kmem_vmm_subsystem_setup(sos_vaddr_t kernel_core_base,
+ sos_vaddr_t kernel_core_top,
+ sos_vaddr_t bootstrap_stack_bottom_vaddr,
+ sos_vaddr_t bootstrap_stack_top_vaddr)
+{
+ struct sos_kslab *first_struct_slab_of_caches,
+ *first_struct_slab_of_ranges;
+ sos_vaddr_t first_slab_of_caches_base,
+ first_slab_of_caches_nb_pages,
+ first_slab_of_ranges_base,
+ first_slab_of_ranges_nb_pages;
+ struct sos_kmem_range *first_range_of_caches,
+ *first_range_of_ranges;
+
+ list_init(kmem_free_range_list);
+ list_init(kmem_used_range_list);
+
+ kmem_range_cache
+ = sos_kmem_cache_subsystem_setup_prepare(kernel_core_base,
+ kernel_core_top,
+ sizeof(struct sos_kmem_range),
+ & first_struct_slab_of_caches,
+ & first_slab_of_caches_base,
+ & first_slab_of_caches_nb_pages,
+ & first_struct_slab_of_ranges,
+ & first_slab_of_ranges_base,
+ & first_slab_of_ranges_nb_pages);
+ SOS_ASSERT_FATAL(kmem_range_cache != NULL);
+
+ /* Mark virtual addresses 16kB - Video as FREE */
+ create_range(TRUE,
+ SOS_KMEM_VMM_BASE,
+ SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO_START),
+ NULL);
+
+ /* Mark virtual addresses in Video hardware mapping as NOT FREE */
+ create_range(FALSE,
+ SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO_START),
+ SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO_END),
+ NULL);
+
+ /* Mark virtual addresses Video - Kernel as FREE */
+ create_range(TRUE,
+ SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO_END),
+ SOS_PAGE_ALIGN_INF(kernel_core_base),
+ NULL);
+
+ /* Mark virtual addresses in Kernel code/data up to the bootstrap stack
+ as NOT FREE */
+ create_range(FALSE,
+ SOS_PAGE_ALIGN_INF(kernel_core_base),
+ bootstrap_stack_bottom_vaddr,
+ NULL);
+
+ /* Mark virtual addresses in the bootstrap stack as NOT FREE too,
+ but in another vmm region in order to be un-allocated later */
+ create_range(FALSE,
+ bootstrap_stack_bottom_vaddr,
+ bootstrap_stack_top_vaddr,
+ NULL);
+
+ /* Mark the remaining virtual addresses in Kernel code/data after
+ the bootstrap stack as NOT FREE */
+ create_range(FALSE,
+ bootstrap_stack_top_vaddr,
+ SOS_PAGE_ALIGN_SUP(kernel_core_top),
+ NULL);
+
+ /* Mark virtual addresses in the first slab of the cache of caches
+ as NOT FREE */
+ SOS_ASSERT_FATAL(SOS_PAGE_ALIGN_SUP(kernel_core_top)
+ == first_slab_of_caches_base);
+ SOS_ASSERT_FATAL(first_struct_slab_of_caches != NULL);
+ first_range_of_caches
+ = create_range(FALSE,
+ first_slab_of_caches_base,
+ first_slab_of_caches_base
+ + first_slab_of_caches_nb_pages*SOS_PAGE_SIZE,
+ first_struct_slab_of_caches);
+
+ /* Mark virtual addresses in the first slab of the cache of ranges
+ as NOT FREE */
+ SOS_ASSERT_FATAL((first_slab_of_caches_base
+ + first_slab_of_caches_nb_pages*SOS_PAGE_SIZE)
+ == first_slab_of_ranges_base);
+ SOS_ASSERT_FATAL(first_struct_slab_of_ranges != NULL);
+ first_range_of_ranges
+ = create_range(FALSE,
+ first_slab_of_ranges_base,
+ first_slab_of_ranges_base
+ + first_slab_of_ranges_nb_pages*SOS_PAGE_SIZE,
+ first_struct_slab_of_ranges);
+
+ /* Mark virtual addresses after these slabs as FREE */
+ create_range(TRUE,
+ first_slab_of_ranges_base
+ + first_slab_of_ranges_nb_pages*SOS_PAGE_SIZE,
+ SOS_KMEM_VMM_TOP,
+ NULL);
+
+ /* Update the cache subsystem so that the artificially-created
+ caches of caches and ranges really behave like *normal* caches (ie
+ those allocated by the normal slab API) */
+ sos_kmem_cache_subsystem_setup_commit(first_struct_slab_of_caches,
+ first_range_of_caches,
+ first_struct_slab_of_ranges,
+ first_range_of_ranges);
+
+ return SOS_OK;
+}
+
+
+/**
+ * Allocate a new kernel area spanning one or multiple pages.
+ *
+ * @eturn a new range structure
+ */
+struct sos_kmem_range *sos_kmem_vmm_new_range(sos_count_t nb_pages,
+ sos_ui32_t flags,
+ sos_vaddr_t * range_start)
+{
+ struct sos_kmem_range *free_range, *new_range;
+
+ if (nb_pages <= 0)
+ return NULL;
+
+ /* Find a suitable free range to hold the size-sized object */
+ free_range = find_suitable_free_range(nb_pages);
+ if (free_range == NULL)
+ return NULL;
+
+ /* If range has exactly the requested size, just move it to the
+ "used" list */
+ if(free_range->nb_pages == nb_pages)
+ {
+ list_delete(kmem_free_range_list, free_range);
+ kmem_used_range_list = insert_range(kmem_used_range_list,
+ free_range);
+ /* The new_range is exactly the free_range */
+ new_range = free_range;
+ }
+
+ /* Otherwise the range is bigger than the requested size, split it.
+ This involves reducing its size, and allocate a new range, which
+ is going to be added to the "used" list */
+ else
+ {
+ /* free_range split in { new_range | free_range } */
+ new_range = (struct sos_kmem_range*)
+ sos_kmem_cache_alloc(kmem_range_cache,
+ (flags & SOS_KMEM_VMM_ATOMIC)?
+ SOS_KSLAB_ALLOC_ATOMIC:0);
+ if (! new_range)
+ return NULL;
+
+ new_range->base_vaddr = free_range->base_vaddr;
+ new_range->nb_pages = nb_pages;
+ free_range->base_vaddr += nb_pages*SOS_PAGE_SIZE;
+ free_range->nb_pages -= nb_pages;
+
+ /* free_range is still at the same place in the list */
+ /* insert new_range in the used list */
+ kmem_used_range_list = insert_range(kmem_used_range_list,
+ new_range);
+ }
+
+ /* By default, the range is not associated with any slab */
+ new_range->slab = NULL;
+
+ /* If mapping of physical pages is needed, map them now */
+ if (flags & SOS_KMEM_VMM_MAP)
+ {
+ int i;
+ for (i = 0 ; i < nb_pages ; i ++)
+ {
+ /* Get a new physical page */
+ sos_paddr_t ppage_paddr
+ = sos_physmem_ref_physpage_new(! (flags & SOS_KMEM_VMM_ATOMIC));
+
+ /* Map the page in kernel space */
+ if (ppage_paddr)
+ {
+ if (sos_paging_map(ppage_paddr,
+ new_range->base_vaddr
+ + i * SOS_PAGE_SIZE,
+ FALSE /* Not a user page */,
+ ((flags & SOS_KMEM_VMM_ATOMIC)?
+ SOS_VM_MAP_ATOMIC:0)
+ | SOS_VM_MAP_PROT_READ
+ | SOS_VM_MAP_PROT_WRITE))
+ {
+ /* Failed => force unallocation, see below */
+ sos_physmem_unref_physpage(ppage_paddr);
+ ppage_paddr = (sos_paddr_t)NULL;
+ }
+ else
+ {
+ /* Success : page can be unreferenced since it is
+ now mapped */
+ sos_physmem_unref_physpage(ppage_paddr);
+ }
+ }
+
+ /* Undo the allocation if failed to allocate or map a new page */
+ if (! ppage_paddr)
+ {
+ sos_kmem_vmm_del_range(new_range);
+ return NULL;
+ }
+
+ /* Ok, set the range owner for this page */
+ sos_physmem_set_kmem_range(ppage_paddr, new_range);
+ }
+ }
+ /* ... Otherwise: Demand Paging will do the job */
+
+ if (range_start)
+ *range_start = new_range->base_vaddr;
+
+ return new_range;
+}
+
+
+sos_ret_t sos_kmem_vmm_del_range(struct sos_kmem_range *range)
+{
+ int i;
+ struct sos_kmem_range *ranges_to_free;
+ list_init(ranges_to_free);
+
+ SOS_ASSERT_FATAL(range != NULL);
+ SOS_ASSERT_FATAL(range->slab == NULL);
+
+ /* Remove the range from the 'USED' list now */
+ list_delete(kmem_used_range_list, range);
+
+ /*
+ * The following do..while() loop is here to avoid an indirect
+ * recursion: if we call directly kmem_cache_free() from inside the
+ * current function, we take the risk to re-enter the current function
+ * (sos_kmem_vmm_del_range()) again, which may cause problem if it
+ * in turn calls kmem_slab again and sos_kmem_vmm_del_range again,
+ * and again and again. This may happen while freeing ranges of
+ * struct sos_kslab...
+ *
+ * To avoid this,we choose to call a special function of kmem_slab
+ * doing almost the same as sos_kmem_cache_free(), but which does
+ * NOT call us (ie sos_kmem_vmm_del_range()): instead WE add the
+ * range that is to be freed to a list, and the do..while() loop is
+ * here to process this list ! The recursion is replaced by
+ * classical iterations.
+ */
+ do
+ {
+ /* Ok, we got the range. Now, insert this range in the free list */
+ kmem_free_range_list = insert_range(kmem_free_range_list, range);
+
+ /* Unmap the physical pages */
+ for (i = 0 ; i < range->nb_pages ; i ++)
+ {
+ /* This will work even if no page is mapped at this address */
+ sos_paging_unmap(range->base_vaddr + i*SOS_PAGE_SIZE);
+ }
+
+ /* Eventually coalesce it with prev/next free ranges (there is
+ always a valid prev/next link since the list is circular). Note:
+ the tests below will lead to correct behaviour even if the list
+ is limited to the 'range' singleton, at least as long as the
+ range is not zero-sized */
+ /* Merge with preceding one ? */
+ if (range->prev->base_vaddr + range->prev->nb_pages*SOS_PAGE_SIZE
+ == range->base_vaddr)
+ {
+ struct sos_kmem_range *empty_range_of_ranges = NULL;
+ struct sos_kmem_range *prec_free = range->prev;
+
+ /* Merge them */
+ prec_free->nb_pages += range->nb_pages;
+ list_delete(kmem_free_range_list, range);
+
+ /* Mark the range as free. This may cause the slab owning
+ the range to become empty */
+ empty_range_of_ranges =
+ sos_kmem_cache_release_struct_range(range);
+
+ /* If this causes the slab owning the range to become empty,
+ add the range corresponding to the slab at the end of the
+ list of the ranges to be freed: it will be actually freed
+ in one of the next iterations of the do{} loop. */
+ if (empty_range_of_ranges != NULL)
+ {
+ list_delete(kmem_used_range_list, empty_range_of_ranges);
+ list_add_tail(ranges_to_free, empty_range_of_ranges);
+ }
+
+ /* Set range to the beginning of this coelescion */
+ range = prec_free;
+ }
+
+ /* Merge with next one ? [NO 'else' since range may be the result of
+ the merge above] */
+ if (range->base_vaddr + range->nb_pages*SOS_PAGE_SIZE
+ == range->next->base_vaddr)
+ {
+ struct sos_kmem_range *empty_range_of_ranges = NULL;
+ struct sos_kmem_range *next_range = range->next;
+
+ /* Merge them */
+ range->nb_pages += next_range->nb_pages;
+ list_delete(kmem_free_range_list, next_range);
+
+ /* Mark the next_range as free. This may cause the slab
+ owning the next_range to become empty */
+ empty_range_of_ranges =
+ sos_kmem_cache_release_struct_range(next_range);
+
+ /* If this causes the slab owning the next_range to become
+ empty, add the range corresponding to the slab at the end
+ of the list of the ranges to be freed: it will be
+ actually freed in one of the next iterations of the
+ do{} loop. */
+ if (empty_range_of_ranges != NULL)
+ {
+ list_delete(kmem_used_range_list, empty_range_of_ranges);
+ list_add_tail(ranges_to_free, empty_range_of_ranges);
+ }
+ }
+
+
+ /* If deleting the range(s) caused one or more range(s) to be
+ freed, get the next one to free */
+ if (list_is_empty(ranges_to_free))
+ range = NULL; /* No range left to free */
+ else
+ range = list_pop_head(ranges_to_free);
+
+ }
+ /* Stop when there is no range left to be freed for now */
+ while (range != NULL);
+
+ return SOS_OK;
+}
+
+
+sos_vaddr_t sos_kmem_vmm_alloc(sos_count_t nb_pages,
+ sos_ui32_t flags)
+{
+ struct sos_kmem_range *range
+ = sos_kmem_vmm_new_range(nb_pages,
+ flags,
+ NULL);
+ if (! range)
+ return (sos_vaddr_t)NULL;
+
+ return range->base_vaddr;
+}
+
+
+sos_ret_t sos_kmem_vmm_free(sos_vaddr_t vaddr)
+{
+ struct sos_kmem_range *range = lookup_range(vaddr);
+
+ /* We expect that the given address is the base address of the
+ range */
+ if (!range || (range->base_vaddr != vaddr))
+ return -SOS_EINVAL;
+
+ /* We expect that this range is not held by any cache */
+ if (range->slab != NULL)
+ return -SOS_EBUSY;
+
+ return sos_kmem_vmm_del_range(range);
+}
+
+
+sos_ret_t sos_kmem_vmm_set_slab(struct sos_kmem_range *range,
+ struct sos_kslab *slab)
+{
+ if (! range)
+ return -SOS_EINVAL;
+
+ range->slab = slab;
+ return SOS_OK;
+}
+
+struct sos_kslab * sos_kmem_vmm_resolve_slab(sos_vaddr_t vaddr)
+{
+ struct sos_kmem_range *range = lookup_range(vaddr);
+ if (! range)
+ return NULL;
+
+ return range->slab;
+}
+
+
+sos_bool_t sos_kmem_vmm_is_valid_vaddr(sos_vaddr_t vaddr)
+{
+ struct sos_kmem_range *range = lookup_range(vaddr);
+ return (range != NULL);
+}
diff --git a/sos-code-article6/sos/kmem_vmm.h b/sos-code-article6/sos/kmem_vmm.h
new file mode 100644
index 0000000..49b262d
--- /dev/null
+++ b/sos-code-article6/sos/kmem_vmm.h
@@ -0,0 +1,113 @@
+/* Copyright (C) 2000 Thomas Petazzoni
+ Copyright (C) 2004 David Decotigny
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#ifndef _SOS_KMEM_VMM_H_
+#define _SOS_KMEM_VMM_H_
+
+/**
+ * @file kmem_vmm.h
+ *
+ * Kernel Memory Allocator for multiple-page-sized objects residing in
+ * the kernel (virtual memory) space. Relies on the slab cache
+ * allocator to allocate its (internal) "range" data structure.
+ */
+
+#include <hwcore/paging.h>
+
+/* The base and top virtual addresses covered by the kernel allocator */
+#define SOS_KMEM_VMM_BASE 0x4000 /* 16kB */
+#define SOS_KMEM_VMM_TOP SOS_PAGING_MIRROR_VADDR /* 1GB - 4MB */
+
+/** Opaque structure used internally and declared here for physmem.h */
+struct sos_kmem_range;
+
+#include <sos/kmem_slab.h>
+
+/**
+ * Mark the areas belonging to SOS_KMEM_VMM_BASE and SOS_KMEM_VMM_TOP
+ * are either used or free. Those that are already mapped are marked
+ * as "used", and the 0..SOS_KMEM_VMM_BASE virtual addresses as marked
+ * as "used" too (to detect incorrect pointer dereferences).
+ */
+sos_ret_t
+sos_kmem_vmm_subsystem_setup(sos_vaddr_t kernel_core_base_vaddr,
+ sos_vaddr_t kernel_core_top_vaddr,
+ sos_vaddr_t bootstrap_stack_bottom_vaddr,
+ sos_vaddr_t bootstrap_stack_top_vaddr);
+
+
+/*
+ * Flags for kmem_vmm_new_range and kmem_vmm_alloc
+ */
+/** Physical pages should be immediately mapped */
+#define SOS_KMEM_VMM_MAP (1<<0)
+/** Allocation should either success or fail, without blocking */
+#define SOS_KMEM_VMM_ATOMIC (1<<1)
+
+/**
+ * Allocate a new kernel area spanning one or multiple pages.
+ *
+ * @param range_base_vaddr If not NULL, the start address of the range
+ * is stored in this location
+ * @eturn a new range structure
+ */
+struct sos_kmem_range *sos_kmem_vmm_new_range(sos_size_t nb_pages,
+ sos_ui32_t flags,
+ sos_vaddr_t *range_base_vaddr);
+sos_ret_t sos_kmem_vmm_del_range(struct sos_kmem_range *range);
+
+
+/**
+ * Straighforward variant of sos_kmem_vmm_new_range() returning the
+ * range's start address instead of the range structure
+ */
+sos_vaddr_t sos_kmem_vmm_alloc(sos_size_t nb_pages,
+ sos_ui32_t flags);
+
+/**
+ * @note you are perfectly allowed to give the address of the
+ * kernel image, or the address of the bios area here, it will work:
+ * the kernel/bios WILL be "deallocated". But if you really want to do
+ * this, well..., do expect some "surprises" ;)
+ */
+sos_ret_t sos_kmem_vmm_free(sos_vaddr_t vaddr);
+
+
+/**
+ * @return TRUE when vaddr is covered by any (used) kernel range
+ */
+sos_bool_t sos_kmem_vmm_is_valid_vaddr(sos_vaddr_t vaddr);
+
+
+/* *****************************
+ * Reserved to kmem_slab.c ONLY.
+ */
+/**
+ * Associate the range with the given slab.
+ */
+sos_ret_t sos_kmem_vmm_set_slab(struct sos_kmem_range *range,
+ struct sos_kslab *slab);
+
+/**
+ * Retrieve the (used) slab associated with the range covering vaddr.
+ *
+ * @return NULL if the range is not associated with a KMEM range
+ */
+struct sos_kslab *sos_kmem_vmm_resolve_slab(sos_vaddr_t vaddr);
+
+#endif /* _SOS_KMEM_VMM_H_ */
diff --git a/sos-code-article6/sos/list.h b/sos-code-article6/sos/list.h
new file mode 100644
index 0000000..67e72f3
--- /dev/null
+++ b/sos-code-article6/sos/list.h
@@ -0,0 +1,186 @@
+/* Copyright (C) 2001 David Decotigny
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#ifndef _SOS_LIST_H_
+#define _SOS_LIST_H_
+
+/**
+ * @file list.h
+ *
+ * Circular doubly-linked lists implementation entirely based on C
+ * macros
+ */
+
+
+/* *_named are used when next and prev links are not exactly next
+ and prev. For instance when we have next_in_team, prev_in_team,
+ prev_global and next_global */
+
+#define list_init_named(list,prev,next) \
+ ((list) = NULL)
+
+#define list_singleton_named(list,item,prev,next) ({ \
+ (item)->next = (item)->prev = (item); \
+ (list) = (item); \
+})
+
+#define list_is_empty_named(list,prev,next) \
+ ((list) == NULL)
+
+#define list_get_head_named(list,prev,next) \
+ (list)
+
+#define list_get_tail_named(list,prev,next) \
+ ((list)?((list)->prev):NULL)
+
+/* Internal macro : insert before the head == insert at tail */
+#define __list_insert_atleft_named(before_this,item,prev,next) ({ \
+ (before_this)->prev->next = (item); \
+ (item)->prev = (before_this)->prev; \
+ (before_this)->prev = (item); \
+ (item)->next = (before_this); \
+})
+
+/* @note Before_this and item are expected to be valid ! */
+#define list_insert_before_named(list,before_this,item,prev,next) ({ \
+ __list_insert_atleft_named(before_this,item,prev,next); \
+ if ((list) == (before_this)) (list) = (item); \
+})
+
+/** @note After_this and item are expected to be valid ! */
+#define list_insert_after_named(list,after_this,item,prev,next) ({ \
+ (after_this)->next->prev = (item); \
+ (item)->next = (after_this)->next; \
+ (after_this)->next = (item); \
+ (item)->prev = (after_this); \
+})
+
+#define list_add_head_named(list,item,prev,next) ({ \
+ if (list) \
+ list_insert_before_named(list,list,item,prev,next); \
+ else \
+ list_singleton_named(list,item,prev,next); \
+ (list) = (item); \
+})
+
+#define list_add_tail_named(list,item,prev,next) ({ \
+ if (list) \
+ __list_insert_atleft_named(list,item,prev,next); \
+ else \
+ list_singleton_named(list,item,prev,next); \
+})
+
+/** @note NO check whether item really is in list ! */
+#define list_delete_named(list,item,prev,next) ({ \
+ if ( ((item)->next == (item)) && ((item)->prev == (item)) ) \
+ (item)->next = (item)->prev = (list) = NULL; \
+ else { \
+ (item)->prev->next = (item)->next; \
+ (item)->next->prev = (item)->prev; \
+ if ((item) == (list)) (list) = (item)->next; \
+ (item)->prev = (item)->next = NULL; \
+ } \
+})
+
+#define list_pop_head_named(list,prev,next) ({ \
+ typeof(list) __ret_elt = (list); \
+ list_delete_named(list,__ret_elt,prev,next); \
+ __ret_elt; })
+
+/** Loop statement that iterates through all of its elements, from
+ head to tail */
+#define list_foreach_forward_named(list,iterator,nb_elements,prev,next) \
+ for (nb_elements=0, (iterator) = (list) ; \
+ (iterator) && (!nb_elements || ((iterator) != (list))) ; \
+ nb_elements++, (iterator) = (iterator)->next )
+
+/** Loop statement that iterates through all of its elements, from
+ tail back to head */
+#define list_foreach_backward_named(list,iterator,nb_elements,prev,next) \
+ for (nb_elements=0, (iterator) = list_get_tail_named(list,prev,next) ; \
+ (iterator) && (!nb_elements || \
+ ((iterator) != list_get_tail_named(list,prev,next))) ; \
+ nb_elements++, (iterator) = (iterator)->prev )
+
+#define list_foreach_named list_foreach_forward_named
+
+/** True when we exitted early from the foreach loop (ie break) */
+#define list_foreach_early_break(list,iterator,nb_elements) \
+ ((list) && ( \
+ ((list) != (iterator)) || \
+ ( ((list) == (iterator)) && (nb_elements == 0)) ))
+
+/** Loop statement that also removes the item at each iteration */
+#define list_collapse_named(list,iterator,prev,next) \
+ for ( ; ({ ((iterator) = (list)) ; \
+ if (list) list_delete_named(list,iterator,prev,next) ; \
+ (iterator); }) ; )
+
+
+/*
+ * the same macros : assume that the prev and next fields are really
+ * named "prev" and "next"
+ */
+
+#define list_init(list) \
+ list_init_named(list,prev,next)
+
+#define list_singleton(list,item) \
+ list_singleton_named(list,item,prev,next)
+
+#define list_is_empty(list) \
+ list_is_empty_named(list,prev,next)
+
+#define list_get_head(list) \
+ list_get_head_named(list,prev,next) \
+
+#define list_get_tail(list) \
+ list_get_tail_named(list,prev,next) \
+
+/* @note Before_this and item are expected to be valid ! */
+#define list_insert_after(list,after_this,item) \
+ list_insert_after_named(list,after_this,item,prev,next)
+
+/* @note After_this and item are expected to be valid ! */
+#define list_insert_before(list,before_this,item) \
+ list_insert_before_named(list,before_this,item,prev,next)
+
+#define list_add_head(list,item) \
+ list_add_head_named(list,item,prev,next)
+
+#define list_add_tail(list,item) \
+ list_add_tail_named(list,item,prev,next)
+
+/* @note NO check whether item really is in list ! */
+#define list_delete(list,item) \
+ list_delete_named(list,item,prev,next)
+
+#define list_pop_head(list) \
+ list_pop_head_named(list,prev,next)
+
+#define list_foreach_forward(list,iterator,nb_elements) \
+ list_foreach_forward_named(list,iterator,nb_elements,prev,next)
+
+#define list_foreach_backward(list,iterator,nb_elements) \
+ list_foreach_backward_named(list,iterator,nb_elements,prev,next)
+
+#define list_foreach list_foreach_forward
+
+#define list_collapse(list,iterator) \
+ list_collapse_named(list,iterator,prev,next)
+
+#endif /* _SOS_LIST_H_ */
diff --git a/sos-code-article6/sos/macros.h b/sos-code-article6/sos/macros.h
new file mode 100644
index 0000000..80a05d3
--- /dev/null
+++ b/sos-code-article6/sos/macros.h
@@ -0,0 +1,41 @@
+/* Copyright (C) 2004 The KOS Team
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#ifndef _SOS_MACROS_H_
+#define _SOS_MACROS_H_
+
+/** Align on a boundary (MUST be a power of 2), so that return value <= val */
+#define SOS_ALIGN_INF(val,boundary) \
+ (((unsigned)(val)) & (~((boundary)-1)))
+
+/** Align on a boundary (MUST be a power of 2), so that return value >= val */
+#define SOS_ALIGN_SUP(val,boundary) \
+ ({ unsigned int __bnd=(boundary); \
+ (((((unsigned)(val))-1) & (~(__bnd - 1))) + __bnd); })
+
+/** Check whether val is aligned on a boundary (MUST be a power of 2) */
+#define SOS_IS_ALIGNED(val,boundary) \
+ ( 0 == (((unsigned)(val)) & ((boundary)-1)) )
+
+/**
+ * @return TRUE if val is a power of 2.
+ * @note val is evaluated multiple times
+ */
+#define SOS_IS_POWER_OF_2(val) \
+ ((((val) - 1) & (val)) == 0)
+
+#endif /* _SOS_MACROS_H_ */
diff --git a/sos-code-article6/sos/main.c b/sos-code-article6/sos/main.c
new file mode 100644
index 0000000..37f1d68
--- /dev/null
+++ b/sos-code-article6/sos/main.c
@@ -0,0 +1,1162 @@
+/* Copyright (C) 2004 The SOS Team
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+
+/* Include definitions of the multiboot standard */
+#include <bootstrap/multiboot.h>
+#include <hwcore/idt.h>
+#include <hwcore/gdt.h>
+#include <hwcore/irq.h>
+#include <hwcore/exception.h>
+#include <hwcore/i8254.h>
+#include <sos/list.h>
+#include <sos/physmem.h>
+#include <hwcore/paging.h>
+#include <sos/kmem_vmm.h>
+#include <sos/kmalloc.h>
+#include <sos/klibc.h>
+#include <sos/assert.h>
+#include <drivers/x86_videomem.h>
+#include <drivers/bochs.h>
+
+
+/* Helper function to display each bits of a 32bits integer on the
+ screen as dark or light carrets */
+void display_bits(unsigned char row, unsigned char col,
+ unsigned char attribute,
+ sos_ui32_t integer)
+{
+ int i;
+ /* Scan each bit of the integer, MSb first */
+ for (i = 31 ; i >= 0 ; i--)
+ {
+ /* Test if bit i of 'integer' is set */
+ int bit_i = (integer & (1 << i));
+ /* Ascii 219 => dark carret, Ascii 177 => light carret */
+ unsigned char ascii_code = bit_i?219:177;
+ sos_x86_videomem_putchar(row, col++,
+ attribute,
+ ascii_code);
+ }
+}
+
+
+/* Clock IRQ handler */
+static void clk_it(int intid)
+{
+ static sos_ui32_t clock_count = 0;
+
+ display_bits(0, 48,
+ SOS_X86_VIDEO_FG_LTGREEN | SOS_X86_VIDEO_BG_BLUE,
+ clock_count);
+ clock_count++;
+}
+
+
+/* ======================================================================
+ * Page fault exception handling
+ */
+
+/* Helper function to dump a backtrace on bochs and/or the console */
+
+static void dump_backtrace(const struct sos_cpu_state *cpu_state,
+ sos_vaddr_t stack_bottom,
+ sos_size_t stack_size,
+ sos_bool_t on_console,
+ sos_bool_t on_bochs)
+{
+ static void backtracer(sos_vaddr_t PC,
+ sos_vaddr_t params,
+ sos_ui32_t depth,
+ void *custom_arg)
+ {
+ sos_ui32_t invalid = 0xffffffff, *arg1, *arg2, *arg3, *arg4;
+
+ /* Get the address of the first 3 arguments from the
+ frame. Among these arguments, 0, 1, 2, 3 arguments might be
+ meaningful (depending on how many arguments the function may
+ take). */
+ arg1 = (sos_ui32_t*)params;
+ arg2 = (sos_ui32_t*)(params+4);
+ arg3 = (sos_ui32_t*)(params+8);
+ arg4 = (sos_ui32_t*)(params+12);
+
+ /* Make sure the addresses of these arguments fit inside the
+ stack boundaries */
+#define INTERVAL_OK(b,v,u) ( ((b) <= (sos_vaddr_t)(v)) \
+ && ((sos_vaddr_t)(v) < (u)) )
+ if (!INTERVAL_OK(stack_bottom, arg1, stack_bottom + stack_size))
+ arg1 = &invalid;
+ if (!INTERVAL_OK(stack_bottom, arg2, stack_bottom + stack_size))
+ arg2 = &invalid;
+ if (!INTERVAL_OK(stack_bottom, arg3, stack_bottom + stack_size))
+ arg3 = &invalid;
+ if (!INTERVAL_OK(stack_bottom, arg4, stack_bottom + stack_size))
+ arg4 = &invalid;
+
+ /* Print the function context for this frame */
+ if (on_bochs)
+ sos_bochs_printf("[%d] PC=0x%x arg1=0x%x arg2=0x%x arg3=0x%x\n",
+ (unsigned)depth, (unsigned)PC,
+ (unsigned)*arg1, (unsigned)*arg2,
+ (unsigned)*arg3);
+
+ if (on_console)
+ sos_x86_videomem_printf(23-depth, 3,
+ SOS_X86_VIDEO_BG_BLUE
+ | SOS_X86_VIDEO_FG_LTGREEN,
+ "[%d] PC=0x%x arg1=0x%x arg2=0x%x arg3=0x%x arg4=0x%x",
+ (unsigned)depth, PC,
+ (unsigned)*arg1, (unsigned)*arg2,
+ (unsigned)*arg3, (unsigned)*arg4);
+
+ }
+ sos_backtrace(cpu_state, 15, stack_bottom, stack_size, backtracer, NULL);
+}
+
+
+/* Page fault exception handler with demand paging for the kernel */
+static void pgflt_ex(int intid, const struct sos_cpu_state *ctxt)
+{
+ static sos_ui32_t demand_paging_count = 0;
+ sos_vaddr_t faulting_vaddr = sos_cpu_context_get_EX_faulting_vaddr(ctxt);
+ sos_paddr_t ppage_paddr;
+
+ /* Check if address is covered by any VMM range */
+ if (! sos_kmem_vmm_is_valid_vaddr(faulting_vaddr))
+ {
+ /* No: The page fault is out of any kernel virtual region. For
+ the moment, we don't handle this. */
+ dump_backtrace(ctxt,
+ bootstrap_stack_bottom,
+ bootstrap_stack_size,
+ TRUE, TRUE);
+ sos_display_fatal_error("Unresolved page Fault at instruction 0x%x on access to address 0x%x (info=%x)!",
+ sos_cpu_context_get_PC(ctxt),
+ (unsigned)faulting_vaddr,
+ (unsigned)sos_cpu_context_get_EX_info(ctxt));
+ SOS_ASSERT_FATAL(! "Got page fault (note: demand paging is disabled)");
+ }
+
+
+ /*
+ * Demand paging
+ */
+
+ /* Update the number of demand paging requests handled */
+ demand_paging_count ++;
+ display_bits(0, 0,
+ SOS_X86_VIDEO_FG_LTRED | SOS_X86_VIDEO_BG_BLUE,
+ demand_paging_count);
+
+ /* Allocate a new page for the virtual address */
+ ppage_paddr = sos_physmem_ref_physpage_new(FALSE);
+ if (! ppage_paddr)
+ SOS_ASSERT_FATAL(! "TODO: implement swap. (Out of mem in demand paging because no swap for kernel yet !)");
+ SOS_ASSERT_FATAL(SOS_OK == sos_paging_map(ppage_paddr,
+ SOS_PAGE_ALIGN_INF(faulting_vaddr),
+ FALSE,
+ SOS_VM_MAP_PROT_READ
+ | SOS_VM_MAP_PROT_WRITE
+ | SOS_VM_MAP_ATOMIC));
+ sos_physmem_unref_physpage(ppage_paddr);
+
+ /* Ok, we can now return to interrupted context */
+}
+
+
+
+/* ======================================================================
+ * Demonstrate the use of the CPU kernet context management API:
+ * - A coroutine prints "Hlowrd" and switches to the other after each
+ * letter
+ * - A coroutine prints "el ol\n" and switches back to the other after
+ * each letter.
+ * The first to reach the '\n' returns back to main.
+ */
+struct sos_cpu_state *ctxt_hello1;
+struct sos_cpu_state *ctxt_hello2;
+struct sos_cpu_state *ctxt_main;
+sos_vaddr_t hello1_stack, hello2_stack;
+
+static void reclaim_stack(sos_vaddr_t stack_vaddr)
+{
+ sos_kfree(stack_vaddr);
+}
+
+
+static void exit_hello12(sos_vaddr_t stack_vaddr)
+{
+ sos_cpu_context_exit_to(ctxt_main,
+ (sos_cpu_kstate_function_arg1_t*) reclaim_stack,
+ stack_vaddr);
+}
+
+
+static void hello1 (char *str)
+{
+ for ( ; *str != '\n' ; str++)
+ {
+ sos_bochs_printf("hello1: %c\n", *str);
+ sos_cpu_context_switch(& ctxt_hello1, ctxt_hello2);
+ }
+
+ /* You can uncomment this in case you explicitly want to exit
+ now. But returning from the function will do the same */
+ /* sos_cpu_context_exit_to(ctxt_main,
+ (sos_cpu_kstate_function_arg1_t*) reclaim_stack,
+ hello1_stack); */
+}
+
+
+static void hello2 (char *str)
+{
+ for ( ; *str != '\n' ; str++)
+ {
+ sos_bochs_printf("hello2: %c\n", *str);
+ sos_cpu_context_switch(& ctxt_hello2, ctxt_hello1);
+ }
+
+ /* You can uncomment this in case you explicitly want to exit
+ now. But returning from the function will do the same */
+ /* sos_cpu_context_exit_to(ctxt_main,
+ (sos_cpu_kstate_function_arg1_t*) reclaim_stack,
+ hello2_stack); */
+}
+
+
+void print_hello_world ()
+{
+#define DEMO_STACK_SIZE 1024
+ /* Allocate the stacks */
+ hello1_stack = sos_kmalloc(DEMO_STACK_SIZE, 0);
+ hello2_stack = sos_kmalloc(DEMO_STACK_SIZE, 0);
+
+ /* Initialize the coroutines' contexts */
+ sos_cpu_kstate_init(&ctxt_hello1,
+ (sos_cpu_kstate_function_arg1_t*) hello1,
+ (sos_ui32_t) "Hlowrd",
+ (sos_vaddr_t) hello1_stack, DEMO_STACK_SIZE,
+ (sos_cpu_kstate_function_arg1_t*) exit_hello12,
+ (sos_ui32_t) hello1_stack);
+ sos_cpu_kstate_init(&ctxt_hello2,
+ (sos_cpu_kstate_function_arg1_t*) hello2,
+ (sos_ui32_t) "el ol\n",
+ (sos_vaddr_t) hello2_stack, DEMO_STACK_SIZE,
+ (sos_cpu_kstate_function_arg1_t*) exit_hello12,
+ (sos_ui32_t) hello2_stack);
+
+ /* Go to first coroutine */
+ sos_bochs_printf("Printing Hello World\\n...\n");
+ sos_cpu_context_switch(& ctxt_main, ctxt_hello1);
+
+ /* The first coroutine to reach the '\n' switched back to us */
+ sos_bochs_printf("Back in main !\n");
+}
+
+
+/* ======================================================================
+ * Generate page faults on an unmapped but allocated kernel virtual
+ * region, which results in a series of physical memory mappings for the
+ * faulted pages.
+ */
+static void test_demand_paging(int nb_alloc_vpages, int nb_alloc_ppages)
+{
+ int i;
+ sos_vaddr_t base_vaddr;
+
+ sos_x86_videomem_printf(10, 0,
+ SOS_X86_VIDEO_BG_BLUE | SOS_X86_VIDEO_FG_LTGREEN,
+ "Demand paging test (alloc %dMB of VMM, test %dkB RAM)",
+ nb_alloc_vpages >> 8, nb_alloc_ppages << 2);
+
+ /* Allocate virtual memory */
+ base_vaddr = sos_kmem_vmm_alloc(nb_alloc_vpages, 0);
+
+ SOS_ASSERT_FATAL(base_vaddr != (sos_vaddr_t)NULL);
+ sos_x86_videomem_printf(11, 0,
+ SOS_X86_VIDEO_BG_BLUE | SOS_X86_VIDEO_FG_YELLOW,
+ "Allocated virtual region [0x%x, 0x%x[",
+ base_vaddr,
+ base_vaddr + nb_alloc_vpages*SOS_PAGE_SIZE);
+
+ /* Now use part of it in physical memory */
+ for (i = 0 ; (i < nb_alloc_ppages) && (i < nb_alloc_vpages) ; i++)
+ {
+ /* Compute an address inside the range */
+ sos_ui32_t *value, j;
+ sos_vaddr_t vaddr = base_vaddr;
+ vaddr += (nb_alloc_vpages - (i + 1))*SOS_PAGE_SIZE;
+ vaddr += 2345;
+
+ sos_x86_videomem_printf(12, 0,
+ SOS_X86_VIDEO_BG_BLUE | SOS_X86_VIDEO_FG_YELLOW,
+ "Writing %d at virtual address 0x%x...",
+ i, vaddr);
+
+ /* Write at this address */
+ value = (sos_ui32_t*)vaddr;
+ *value = i;
+
+ /* Yep ! A new page should normally have been allocated for us */
+ sos_x86_videomem_printf(13, 0,
+ SOS_X86_VIDEO_BG_BLUE | SOS_X86_VIDEO_FG_YELLOW,
+ "Value read at address 0x%x = %d",
+ vaddr, (unsigned)*value);
+ }
+
+ SOS_ASSERT_FATAL(SOS_OK == sos_kmem_vmm_free(base_vaddr));
+ /* Yep ! A new page should normally have been allocated for us */
+ sos_x86_videomem_printf(14, 0,
+ SOS_X86_VIDEO_BG_BLUE | SOS_X86_VIDEO_FG_YELLOW,
+ "Done (area un-allocated)");
+}
+
+
+
+/* ======================================================================
+ * Shows how the backtrace stuff works
+ */
+
+/* Recursive function. Print the backtrace from the innermost function */
+static void test_backtrace(int i, int magic, sos_vaddr_t stack_bottom,
+ sos_size_t stack_size)
+{
+ if (i <= 0)
+ {
+ /* The page fault exception handler will print the backtrace of
+ this function, because address 0x42 is not mapped */
+ *((char*)0x42) = 12;
+
+ /* More direct variant: */
+ /* dump_backtrace(NULL, stack_bottom, stack_size, TRUE, TRUE); */
+ }
+ else
+ test_backtrace(i-1, magic, stack_bottom, stack_size);
+}
+
+
+/* ======================================================================
+ * Parsing of Mathematical expressions
+ *
+ * This is a recursive lexer/parser/evaluator for arithmetical
+ * expressions. Supports both binary +/-* and unary +- operators, as
+ * well as parentheses.
+ *
+ * Terminal tokens (Lexer):
+ * - Number: positive integer number
+ * - Variable: ascii name (regexp: [a-zA-Z]+)
+ * - Operator: +*-/
+ * - Opening/closing parentheses
+ *
+ * Grammar (Parser):
+ * Expression ::= Term E'
+ * Expr_lr ::= + Term Expr_lr | - Term Expr_lr | Nothing
+ * Term ::= Factor Term_lr
+ * Term_lr ::= * Factor Term_lr | / Factor Term_lr | Nothing
+ * Factor ::= - Factor | + Factor | Scalar | ( Expression )
+ * Scalar ::= Number | Variable
+ *
+ * Note. This is the left-recursive equivalent of the following basic grammar:
+ * Expression ::= Expression + Term | Expression - Term
+ * Term ::= Term * Factor | Term / Factor
+ * factor ::= - Factor | + Factor | Scalar | Variable | ( Expression )
+ * Scalar ::= Number | Variable
+ *
+ * The parsing is composed of a 3 stages pipeline:
+ * - The reader: reads a string 1 character at a time, transferring
+ * the control back to lexer after each char. This function shows the
+ * interest in using coroutines, because its state (str) is
+ * implicitely stored in the stack between each iteration.
+ * - The lexer: consumes the characters from the reader and identifies
+ * the terminal tokens, 1 token at a time, transferring control back
+ * to the parser after each token. This function shows the interest
+ * in using coroutines, because its state (c and got_what_before) is
+ * implicitely stored in the stack between each iteration.
+ * - The parser: consumes the tokens from the lexer and builds the
+ * syntax tree of the expression. There is no real algorithmic
+ * interest in defining a coroutine devoted to do this. HOWEVER, we
+ * do use one for that because this allows us to switch to a much
+ * deeper stack. Actually, the parser is highly recursive, so that
+ * the default 16kB stack of the sos_main() function might not be
+ * enough. Here, we switch to a 64kB stack, which is safer for
+ * recursive functions. The Parser uses intermediary functions: these
+ * are defined and implemented as internal nested functions. This is
+ * just for the sake of clarity, and is absolutely not mandatory for
+ * the algorithm: one can transfer these functions out of the parser
+ * function without restriction.
+ *
+ * The evaluator is another recursive function that reuses the
+ * parser's stack to evaluate the parsed expression with the given
+ * values for the variables present in the expression. As for the
+ * parser function, this function defines and uses a nested function,
+ * which can be extracted from the main evaluation function at will.
+ *
+ * All these functions support a kind of "exception" feature: when
+ * something goes wrong, control is transferred DIRECTLY back to the
+ * sos_main() context, without unrolling the recursions. This shows
+ * how exceptions basically work, but one should not consider this as
+ * a reference exceptions implementation. Real exception mechanisms
+ * (such as that in the C++ language) call the destructors to the
+ * objects allocated on the stack during the "stack unwinding" process
+ * upon exception handling, which complicates a lot the mechanism. We
+ * don't have real Objects here (in the OOP sense, full-featured with
+ * destructors), so we don't have to complicate things.
+ *
+ * After this little coroutine demo, one should forget all about such
+ * a low-level manual direct manipulation of stacks. This would
+ * probably mess up the whole kernel to do what we do here (locked
+ * resources such as mutex/semaphore won't be correctly unlocked,
+ * ...). Higher level "kernel thread" primitives will soon be
+ * presented, which provide a higher-level set of APIs to manage CPU
+ * contexts. You'll have to use EXCLUSIVELY those APIs. If you still
+ * need a huge stack to do recursion for example, please don't even
+ * think of changing manually the stack for something bigger ! Simply
+ * rethink your algorithm, making it non-recursive.
+ */
+
+
+/* The stacks involved */
+static char stack_reader[1024];
+static char stack_lexer[1024];
+static char deep_stack[65536]; /* For the parser and the evaluator */
+
+/* The CPU states for the various coroutines */
+static struct sos_cpu_state *st_reader, *st_lexer, *st_parser,
+ *st_eval, *st_free, *st_main;
+
+
+/*
+ * Default exit/reclaim functions: return control to the "sos_main()"
+ * context
+ */
+static void reclaim(int unused)
+{
+}
+static void func_exit(sos_ui32_t unused)
+{
+ sos_cpu_context_exit_to(st_main, (sos_cpu_kstate_function_arg1_t*)reclaim, 0);
+}
+
+
+/*
+ * The reader coroutine and associated variable. This coroutine could
+ * have been a normal function, except that the current parsed
+ * character would have to be stored somewhere.
+ */
+static char data_reader_to_lexer;
+
+static void func_reader(const char *str)
+{
+ for ( ; str && (*str != '\0') ; str++)
+ {
+ data_reader_to_lexer = *str;
+ sos_cpu_context_switch(& st_reader, st_lexer);
+ }
+
+ data_reader_to_lexer = '\0';
+ sos_cpu_context_switch(& st_reader, st_lexer);
+}
+
+
+/*
+ * The Lexer coroutine and associated types/variables. This coroutine
+ * could have been a normal function, except that the current parsed
+ * character, token and previous token would have to be stored
+ * somewhere.
+ */
+#define STR_VAR_MAXLEN 16
+static struct lex_elem
+{
+ enum { LEX_IS_NUMBER, LEX_IS_OPER, LEX_IS_VAR,
+ LEX_IS_OPENPAR, LEX_IS_CLOSEPAR, LEX_END } type;
+ union {
+ int number;
+ char operator;
+ char var[STR_VAR_MAXLEN];
+ };
+} data_lexer_to_parser;
+
+static void func_lexer(sos_ui32_t unused)
+{
+ char c;
+ enum { GOT_SPACE, GOT_NUM, GOT_OP, GOT_STR,
+ GOT_OPENPAR, GOT_CLOSEPAR } got_what, got_what_before;
+
+ data_lexer_to_parser.number = 0;
+ got_what_before = GOT_SPACE;
+ do
+ {
+ /* Consume one character from the reader */
+ sos_cpu_context_switch(& st_lexer, st_reader);
+ c = data_reader_to_lexer;
+
+ /* Classify the consumed character */
+ if ( (c >= '0') && (c <= '9') )
+ got_what = GOT_NUM;
+ else if ( (c == '+') || (c == '-') || (c == '*') || (c == '/') )
+ got_what = GOT_OP;
+ else if ( ( (c >= 'a') && (c <= 'z') )
+ || ( (c >= 'A') && (c <= 'Z') ) )
+ got_what = GOT_STR;
+ else if (c == '(')
+ got_what = GOT_OPENPAR;
+ else if (c == ')')
+ got_what = GOT_CLOSEPAR;
+ else
+ got_what = GOT_SPACE;
+
+ /* Determine whether the current token is ended */
+ if ( (got_what != got_what_before)
+ || (got_what_before == GOT_OP)
+ || (got_what_before == GOT_OPENPAR)
+ || (got_what_before == GOT_CLOSEPAR) )
+ {
+ /* return control back to the parser if the previous token
+ has been recognized */
+ if ( (got_what_before != GOT_SPACE) )
+ sos_cpu_context_switch(& st_lexer, st_parser);
+
+ data_lexer_to_parser.number = 0;
+ }
+
+ /* Update the token being currently recognized */
+ if (got_what == GOT_OP)
+ {
+ data_lexer_to_parser.type = LEX_IS_OPER;
+ data_lexer_to_parser.operator = c;
+ }
+ else if (got_what == GOT_NUM)
+ {
+ data_lexer_to_parser.type = LEX_IS_NUMBER;
+ data_lexer_to_parser.number *= 10;
+ data_lexer_to_parser.number += (c - '0');
+ }
+ else if (got_what == GOT_STR)
+ {
+ char to_cat[] = { c, '\0' };
+ data_lexer_to_parser.type = LEX_IS_VAR;
+ strzcat(data_lexer_to_parser.var, to_cat, STR_VAR_MAXLEN);
+ }
+ else if (got_what == GOT_OPENPAR)
+ data_lexer_to_parser.type = LEX_IS_OPENPAR;
+ else if (got_what == GOT_CLOSEPAR)
+ data_lexer_to_parser.type = LEX_IS_CLOSEPAR;
+
+ got_what_before = got_what;
+ }
+ while (c != '\0');
+
+ /* Transfer last recognized token to the parser */
+ if ( (got_what_before != GOT_SPACE) )
+ sos_cpu_context_switch(& st_lexer, st_parser);
+
+ /* Signal that no more token are available */
+ data_lexer_to_parser.type = LEX_END;
+ sos_cpu_context_switch(& st_lexer, st_parser);
+
+ /* Exception: parser asks for a token AFTER having received the last
+ one */
+ sos_bochs_printf("Error: end of string already reached !\n");
+ sos_cpu_context_switch(& st_lexer, st_main);
+}
+
+
+/*
+ * The Parser coroutine and associated types/variables
+ */
+struct syntax_node
+{
+ enum { YY_IS_BINOP, YY_IS_UNAROP, YY_IS_NUM, YY_IS_VAR } type;
+ union
+ {
+ int number;
+ char var[STR_VAR_MAXLEN];
+ struct
+ {
+ char op;
+ struct syntax_node *parm_left, *parm_right;
+ } binop;
+ struct
+ {
+ char op;
+ struct syntax_node *parm;
+ } unarop;
+ };
+};
+
+static void func_parser(struct syntax_node ** syntax_tree)
+{
+ static struct syntax_node *alloc_node_num(int val);
+ static struct syntax_node *alloc_node_var(const char * name);
+ static struct syntax_node *alloc_node_binop(char op,
+ struct syntax_node *parm_left,
+ struct syntax_node *parm_right);
+ static struct syntax_node *alloc_node_unarop(char op,
+ struct syntax_node *parm);
+ static struct syntax_node * get_expr();
+ static struct syntax_node * get_expr_lr(struct syntax_node *n);
+ static struct syntax_node * get_term();
+ static struct syntax_node * get_term_lr(struct syntax_node *n);
+ static struct syntax_node * get_factor();
+ static struct syntax_node * get_scalar();
+
+ /* Create a new node to store a number */
+ static struct syntax_node *alloc_node_num(int val)
+ {
+ struct syntax_node *n
+ = (struct syntax_node*) sos_kmalloc(sizeof(struct syntax_node), 0);
+ n->type = YY_IS_NUM;
+ n->number = val;
+ return n;
+ }
+ /* Create a new node to store a variable */
+ static struct syntax_node *alloc_node_var(const char * name)
+ {
+ struct syntax_node *n
+ = (struct syntax_node*) sos_kmalloc(sizeof(struct syntax_node), 0);
+ n->type = YY_IS_VAR;
+ strzcpy(n->var, name, STR_VAR_MAXLEN);
+ return n;
+ }
+ /* Create a new node to store a binary operator */
+ static struct syntax_node *alloc_node_binop(char op,
+ struct syntax_node *parm_left,
+ struct syntax_node *parm_right)
+ {
+ struct syntax_node *n
+ = (struct syntax_node*) sos_kmalloc(sizeof(struct syntax_node), 0);
+ n->type = YY_IS_BINOP;
+ n->binop.op = op;
+ n->binop.parm_left = parm_left;
+ n->binop.parm_right = parm_right;
+ return n;
+ }
+ /* Create a new node to store a unary operator */
+ static struct syntax_node *alloc_node_unarop(char op,
+ struct syntax_node *parm)
+ {
+ struct syntax_node *n
+ = (struct syntax_node*) sos_kmalloc(sizeof(struct syntax_node), 0);
+ n->type = YY_IS_UNAROP;
+ n->unarop.op = op;
+ n->unarop.parm = parm;
+ return n;
+ }
+
+ /* Raise an exception: transfer control back to main context,
+ without unrolling the whole recursion */
+ static void parser_exception(const char *str)
+ {
+ sos_bochs_printf("Parser exception: %s\n", str);
+ sos_cpu_context_switch(& st_parser, st_main);
+ }
+
+ /* Consume the current terminal "number" token and ask for a new
+ token */
+ static int get_number()
+ {
+ int v;
+ if (data_lexer_to_parser.type != LEX_IS_NUMBER)
+ parser_exception("Expected number");
+ v = data_lexer_to_parser.number;
+ sos_cpu_context_switch(& st_parser, st_lexer);
+ return v;
+ }
+ /* Consume the current terminal "variable" token and ask for a new
+ token */
+ static void get_str(char name[STR_VAR_MAXLEN])
+ {
+ if (data_lexer_to_parser.type != LEX_IS_VAR)
+ parser_exception("Expected variable");
+ strzcpy(name, data_lexer_to_parser.var, STR_VAR_MAXLEN);
+ sos_cpu_context_switch(& st_parser, st_lexer);
+ }
+ /* Consume the current terminal "operator" token and ask for a new
+ token */
+ static char get_op()
+ {
+ char op;
+ if (data_lexer_to_parser.type != LEX_IS_OPER)
+ parser_exception("Expected operator");
+ op = data_lexer_to_parser.operator;
+ sos_cpu_context_switch(& st_parser, st_lexer);
+ return op;
+ }
+ /* Consume the current terminal "parenthese" token and ask for a new
+ token */
+ static void get_par()
+ {
+ if ( (data_lexer_to_parser.type != LEX_IS_OPENPAR)
+ && (data_lexer_to_parser.type != LEX_IS_CLOSEPAR) )
+ parser_exception("Expected parenthese");
+ sos_cpu_context_switch(& st_parser, st_lexer);
+ }
+
+ /* Parse an Expression */
+ static struct syntax_node * get_expr()
+ {
+ struct syntax_node *t = get_term();
+ return get_expr_lr(t);
+ }
+ /* Parse an Expr_lr */
+ static struct syntax_node * get_expr_lr(struct syntax_node *n)
+ {
+ if ( (data_lexer_to_parser.type == LEX_IS_OPER)
+ && ( (data_lexer_to_parser.operator == '+')
+ || (data_lexer_to_parser.operator == '-') ) )
+ {
+ char op = get_op();
+ struct syntax_node *term = get_term();
+ struct syntax_node *node_op = alloc_node_binop(op, n, term);
+ return get_expr_lr(node_op);
+ }
+ return n;
+ }
+ /* Parse a Term */
+ static struct syntax_node * get_term()
+ {
+ struct syntax_node *f1 = get_factor();
+ return get_term_lr(f1);
+ }
+ /* Parse a Term_lr */
+ static struct syntax_node * get_term_lr(struct syntax_node *n)
+ {
+ if ( (data_lexer_to_parser.type == LEX_IS_OPER)
+ && ( (data_lexer_to_parser.operator == '*')
+ || (data_lexer_to_parser.operator == '/') ) )
+ {
+ char op = get_op();
+ struct syntax_node *factor = get_factor();
+ struct syntax_node *node_op = alloc_node_binop(op, n, factor);
+ return get_term_lr(node_op);
+ }
+ return n;
+ }
+ /* Parse a Factor */
+ static struct syntax_node * get_factor()
+ {
+ if ( (data_lexer_to_parser.type == LEX_IS_OPER)
+ && ( (data_lexer_to_parser.operator == '-')
+ || (data_lexer_to_parser.operator == '+') ) )
+ { char op = data_lexer_to_parser.operator;
+ get_op(); return alloc_node_unarop(op, get_factor()); }
+ else if (data_lexer_to_parser.type == LEX_IS_OPENPAR)
+ {
+ struct syntax_node *expr;
+ get_par();
+ expr = get_expr();
+ if (data_lexer_to_parser.type != LEX_IS_CLOSEPAR)
+ parser_exception("Mismatched parentheses");
+ get_par();
+ return expr;
+ }
+
+ return get_scalar();
+ }
+ /* Parse a Scalar */
+ static struct syntax_node * get_scalar()
+ {
+ if (data_lexer_to_parser.type != LEX_IS_NUMBER)
+ {
+ char var[STR_VAR_MAXLEN];
+ get_str(var);
+ return alloc_node_var(var);
+ }
+ return alloc_node_num(get_number());
+ }
+
+
+ /*
+ * Body of the function
+ */
+
+ /* Get the first token */
+ sos_cpu_context_switch(& st_parser, st_lexer);
+
+ /* Begin the parsing ! */
+ *syntax_tree = get_expr();
+ /* The result is returned in the syntax_tree parameter */
+}
+
+
+/*
+ * Setup the parser's pipeline
+ */
+static struct syntax_node * parse_expression(const char *expr)
+{
+ struct syntax_node *retval = NULL;
+
+ /* Build the context of the functions in the pipeline */
+ sos_cpu_kstate_init(& st_reader,
+ (sos_cpu_kstate_function_arg1_t*)func_reader,
+ (sos_ui32_t)expr,
+ (sos_vaddr_t)stack_reader, sizeof(stack_reader),
+ (sos_cpu_kstate_function_arg1_t*)func_exit, 0);
+ sos_cpu_kstate_init(& st_lexer,
+ (sos_cpu_kstate_function_arg1_t*)func_lexer,
+ 0,
+ (sos_vaddr_t)stack_lexer, sizeof(stack_lexer),
+ (sos_cpu_kstate_function_arg1_t*)func_exit, 0);
+ sos_cpu_kstate_init(& st_parser,
+ (sos_cpu_kstate_function_arg1_t*)func_parser,
+ (sos_ui32_t) /* syntax tree ! */&retval,
+ (sos_vaddr_t)deep_stack, sizeof(deep_stack),
+ (sos_cpu_kstate_function_arg1_t*)func_exit, 0);
+
+ /* Parse the expression */
+ sos_cpu_context_switch(& st_main, st_parser);
+ return retval;
+}
+
+
+/*
+ * The Evaluator coroutine and associated types/variables
+ */
+struct func_eval_params
+{
+ const struct syntax_node *e;
+ const char **var_name;
+ int *var_val;
+ int nb_vars;
+
+ int result;
+};
+
+static void func_eval(struct func_eval_params *parms)
+{
+ /* The internal (recursive) nested function to evaluate each node of
+ the syntax tree */
+ static int rec_eval(const struct syntax_node *n,
+ const char* var_name[], int var_val[], int nb_vars)
+ {
+ switch (n->type)
+ {
+ case YY_IS_NUM:
+ return n->number;
+
+ case YY_IS_VAR:
+ {
+ int i;
+ for (i = 0 ; i < nb_vars ; i++)
+ if (0 == strcmp(var_name[i], n->var))
+ return var_val[i];
+
+ /* Exception: no variable with that name ! */
+ sos_bochs_printf("ERROR: unknown variable %s\n", n->var);
+ sos_cpu_context_switch(& st_eval, st_main);
+ }
+
+ case YY_IS_BINOP:
+ {
+ int left = rec_eval(n->binop.parm_left,
+ var_name, var_val, nb_vars);
+ int right = rec_eval(n->binop.parm_right,
+ var_name, var_val, nb_vars);
+ switch (n->binop.op)
+ {
+ case '+': return left + right;
+ case '-': return left - right;
+ case '*': return left * right;
+ case '/': return left / right;
+ default:
+ /* Exception: no such operator (INTERNAL error) ! */
+ sos_bochs_printf("ERROR: unknown binop %c\n", n->binop.op);
+ sos_cpu_context_switch(& st_eval, st_main);
+ }
+ }
+
+ case YY_IS_UNAROP:
+ {
+ int arg = rec_eval(n->unarop.parm, var_name, var_val, nb_vars);
+ switch (n->unarop.op)
+ {
+ case '-': return -arg;
+ case '+': return arg;
+ default:
+ /* Exception: no such operator (INTERNAL error) ! */
+ sos_bochs_printf("ERROR: unknown unarop %c\n", n->unarop.op);
+ sos_cpu_context_switch(& st_eval, st_main);
+ }
+ }
+ }
+
+ /* Exception: no such syntax node (INTERNAL error) ! */
+ sos_bochs_printf("ERROR: invalid node type\n");
+ sos_cpu_context_switch(& st_eval, st_main);
+ return -1; /* let's make gcc happy */
+ }
+
+
+ /*
+ * Function BODY
+ */
+ /* Update p.result returned back to calling function */
+ parms->result
+ = rec_eval(parms->e, parms->var_name, parms->var_val, parms->nb_vars);
+}
+
+/*
+ * Change the stack for something larger in order to call the
+ * recursive function above in a safe way
+ */
+static int eval_expression(const struct syntax_node *e,
+ const char* var_name[], int var_val[], int nb_vars)
+{
+ struct func_eval_params p
+ = (struct func_eval_params){ .e=e,
+ .var_name=var_name,
+ .var_val=var_val,
+ .nb_vars=nb_vars,
+ .result = 0 };
+
+ sos_cpu_kstate_init(& st_eval,
+ (sos_cpu_kstate_function_arg1_t*)func_eval,
+ (sos_ui32_t)/* p.result is modified upon success */&p,
+ (sos_vaddr_t)deep_stack, sizeof(deep_stack),
+ (sos_cpu_kstate_function_arg1_t*)func_exit, 0);
+
+ /* Go ! */
+ sos_cpu_context_switch(& st_main, st_eval);
+ return p.result;
+}
+
+
+/*
+ * Function to free the syntax tree
+ */
+static void func_free(struct syntax_node *n)
+{
+ switch (n->type)
+ {
+ case YY_IS_NUM:
+ case YY_IS_VAR:
+ break;
+
+ case YY_IS_BINOP:
+ func_free(n->binop.parm_left);
+ func_free(n->binop.parm_right);
+ break;
+
+ case YY_IS_UNAROP:
+ func_free(n->unarop.parm);
+ break;
+ }
+
+ sos_kfree((sos_vaddr_t)n);
+}
+
+/*
+ * Change the stack for something larger in order to call the
+ * recursive function above in a safe way
+ */
+static void free_syntax_tree(struct syntax_node *tree)
+{
+ sos_cpu_kstate_init(& st_free,
+ (sos_cpu_kstate_function_arg1_t*)func_free,
+ (sos_ui32_t)tree,
+ (sos_vaddr_t)deep_stack, sizeof(deep_stack),
+ (sos_cpu_kstate_function_arg1_t*)func_exit, 0);
+
+ /* Go ! */
+ sos_cpu_context_switch(& st_main, st_free);
+}
+
+
+/* ======================================================================
+ * The C entry point of our operating system
+ */
+void sos_main(unsigned long magic, unsigned long addr)
+{
+ unsigned i;
+ sos_paddr_t sos_kernel_core_base_paddr, sos_kernel_core_top_paddr;
+ struct syntax_node *syntax_tree;
+
+ /* Grub sends us a structure, called multiboot_info_t with a lot of
+ precious informations about the system, see the multiboot
+ documentation for more information. */
+ multiboot_info_t *mbi;
+ mbi = (multiboot_info_t *) addr;
+
+ /* Setup bochs and console, and clear the console */
+ sos_bochs_setup();
+
+ sos_x86_videomem_setup();
+ sos_x86_videomem_cls(SOS_X86_VIDEO_BG_BLUE);
+
+ /* Greetings from SOS */
+ if (magic == MULTIBOOT_BOOTLOADER_MAGIC)
+ /* Loaded with Grub */
+ sos_x86_videomem_printf(1, 0,
+ SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE,
+ "Welcome From GRUB to %s%c RAM is %dMB (upper mem = 0x%x kB)",
+ "SOS article 6", ',',
+ (unsigned)(mbi->mem_upper >> 10) + 1,
+ (unsigned)mbi->mem_upper);
+ else
+ /* Not loaded with grub */
+ sos_x86_videomem_printf(1, 0,
+ SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE,
+ "Welcome to SOS article 6");
+
+ sos_bochs_putstring("Message in a bochs: This is SOS article 6.\n");
+
+ /* Setup CPU segmentation and IRQ subsystem */
+ sos_gdt_subsystem_setup();
+ sos_idt_subsystem_setup();
+
+ /* Setup SOS IRQs and exceptions subsystem */
+ sos_exception_subsystem_setup();
+ sos_irq_subsystem_setup();
+
+ /* Configure the timer so as to raise the IRQ0 at a 100Hz rate */
+ sos_i8254_set_frequency(100);
+
+ /* We need a multiboot-compliant boot loader to get the size of the RAM */
+ if (magic != MULTIBOOT_BOOTLOADER_MAGIC)
+ {
+ sos_x86_videomem_putstring(20, 0,
+ SOS_X86_VIDEO_FG_LTRED
+ | SOS_X86_VIDEO_BG_BLUE
+ | SOS_X86_VIDEO_FG_BLINKING,
+ "I'm not loaded with Grub !");
+ /* STOP ! */
+ for (;;)
+ continue;
+ }
+
+ /*
+ * Some interrupt handlers
+ */
+
+ /* Binding some HW interrupts and exceptions to software routines */
+ sos_irq_set_routine(SOS_IRQ_TIMER,
+ clk_it);
+
+ /*
+ * Setup physical memory management
+ */
+
+ /* Multiboot says: "The value returned for upper memory is maximally
+ the address of the first upper memory hole minus 1 megabyte.". It
+ also adds: "It is not guaranteed to be this value." aka "YMMV" ;) */
+ sos_physmem_subsystem_setup((mbi->mem_upper<<10) + (1<<20),
+ & sos_kernel_core_base_paddr,
+ & sos_kernel_core_top_paddr);
+
+ /*
+ * Switch to paged-memory mode
+ */
+
+ /* Disabling interrupts should seem more correct, but it's not really
+ necessary at this stage */
+ SOS_ASSERT_FATAL(SOS_OK ==
+ sos_paging_subsystem_setup(sos_kernel_core_base_paddr,
+ sos_kernel_core_top_paddr));
+
+ /* Bind the page fault exception */
+ sos_exception_set_routine(SOS_EXCEPT_PAGE_FAULT,
+ pgflt_ex);
+
+ /*
+ * Setup kernel virtual memory allocator
+ */
+
+ if (sos_kmem_vmm_subsystem_setup(sos_kernel_core_base_paddr,
+ sos_kernel_core_top_paddr,
+ bootstrap_stack_bottom,
+ bootstrap_stack_bottom
+ + bootstrap_stack_size))
+ sos_bochs_printf("Could not setup the Kernel virtual space allocator\n");
+
+ if (sos_kmalloc_subsystem_setup())
+ sos_bochs_printf("Could not setup the Kmalloc subsystem\n");
+
+ /*
+ * Enabling the HW interrupts here, this will make the timer HW
+ * interrupt call our clk_it handler
+ */
+ asm volatile ("sti\n");
+
+ /*
+ * Print hello world using coroutines
+ */
+ print_hello_world();
+
+
+ /*
+ * Run coroutine tests
+ */
+ sos_x86_videomem_printf(4, 0,
+ SOS_X86_VIDEO_BG_BLUE | SOS_X86_VIDEO_FG_LTGREEN,
+ "Coroutine test");
+ sos_x86_videomem_printf(5, 0,
+ SOS_X86_VIDEO_BG_BLUE | SOS_X86_VIDEO_FG_YELLOW,
+ "Parsing...");
+ syntax_tree = parse_expression(" - ( (69/ toto)+ ( (( - +-- 1))) + --toto*((toto+ - - y - +2*(y-toto))*y) +2*(y-toto) )/- (( y - toto)*2)");
+
+ if (syntax_tree != NULL)
+ {
+ sos_x86_videomem_printf(6, 0,
+ SOS_X86_VIDEO_BG_BLUE | SOS_X86_VIDEO_FG_YELLOW,
+ "Evaluating...");
+ sos_x86_videomem_printf(7, 0,
+ SOS_X86_VIDEO_BG_BLUE | SOS_X86_VIDEO_FG_YELLOW,
+ "Result=%d (if 0: check bochs output)",
+ eval_expression(syntax_tree,
+ (const char*[]){"toto", "y"},
+ (int[]){3, 4},
+ 2));
+ free_syntax_tree(syntax_tree);
+ sos_x86_videomem_printf(8, 0,
+ SOS_X86_VIDEO_BG_BLUE | SOS_X86_VIDEO_FG_YELLOW,
+ "Done (un-allocated syntax tree)");
+ }
+ else
+ {
+ sos_x86_videomem_printf(6, 0,
+ SOS_X86_VIDEO_BG_BLUE | SOS_X86_VIDEO_FG_YELLOW,
+ "Error in parsing (see bochs output)");
+ }
+
+ /*
+ * Run some demand-paging tests
+ */
+ test_demand_paging(234567, 500);
+
+
+ /*
+ * Create an un-resolved page fault, which will make the page fault
+ * handler print the backtrace.
+ */
+ test_backtrace(6, 0xdeadbeef, bootstrap_stack_bottom, bootstrap_stack_size);
+
+ /*
+ * System should be halted BEFORE here !
+ */
+
+
+ /* An operatig system never ends */
+ for (;;)
+ {
+ /* Remove this instruction if you get an "Invalid opcode" CPU
+ exception (old 80386 CPU) */
+ asm("hlt\n");
+
+ continue;
+ }
+}
diff --git a/sos-code-article6/sos/physmem.c b/sos-code-article6/sos/physmem.c
new file mode 100644
index 0000000..daf730f
--- /dev/null
+++ b/sos-code-article6/sos/physmem.c
@@ -0,0 +1,319 @@
+/* Copyright (C) 2004 David Decotigny
+ Copyright (C) 2000 The KOS Team
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#include <sos/list.h>
+#include <sos/macros.h>
+#include <sos/assert.h>
+#include <sos/klibc.h>
+
+#include "physmem.h"
+
+/** A descriptor for a physical page in SOS */
+struct physical_page_descr
+{
+ /** The physical base address for the page */
+ sos_paddr_t paddr;
+
+ /** The reference count for this physical page. > 0 means that the
+ page is in the used list. */
+ sos_count_t ref_cnt;
+
+ /** Some data associated with the page when it is mapped in kernel space */
+ struct sos_kmem_range *kernel_range;
+
+ /** The other pages on the list (used, free) */
+ struct physical_page_descr *prev, *next;
+};
+
+/** These are some markers present in the executable file (see sos.lds) */
+extern char __b_kernel, __e_kernel;
+
+/** The array of ppage descriptors will be located at this address */
+#define PAGE_DESCR_ARRAY_ADDR \
+ SOS_PAGE_ALIGN_SUP((sos_paddr_t) (& __e_kernel))
+static struct physical_page_descr * physical_page_descr_array;
+
+/** The list of physical pages currently available */
+static struct physical_page_descr *free_ppage;
+
+/** The list of physical pages currently in use */
+static struct physical_page_descr *used_ppage;
+
+/** We will store here the interval of valid physical addresses */
+static sos_paddr_t physmem_base, physmem_top;
+
+/** We store the number of pages used/free */
+static sos_count_t physmem_total_pages, physmem_used_pages;
+
+sos_ret_t sos_physmem_subsystem_setup(sos_size_t ram_size,
+ /* out */sos_paddr_t *kernel_core_base,
+ /* out */sos_paddr_t *kernel_core_top)
+{
+ /* The iterator over the page descriptors */
+ struct physical_page_descr *ppage_descr;
+
+ /* The iterator over the physical addresses */
+ sos_paddr_t ppage_addr;
+
+ /* Make sure ram size is aligned on a page boundary */
+ ram_size = SOS_PAGE_ALIGN_INF(ram_size);/* Yes, we may lose at most a page */
+
+ /* Reset the used/free page lists before building them */
+ free_ppage = used_ppage = NULL;
+ physmem_total_pages = physmem_used_pages = 0;
+
+ /* Make sure that there is enough memory to store the array of page
+ descriptors */
+ *kernel_core_base = SOS_PAGE_ALIGN_INF((sos_paddr_t)(& __b_kernel));
+ *kernel_core_top
+ = PAGE_DESCR_ARRAY_ADDR
+ + SOS_PAGE_ALIGN_SUP( (ram_size >> SOS_PAGE_SHIFT)
+ * sizeof(struct physical_page_descr));
+ if (*kernel_core_top > ram_size)
+ return -SOS_ENOMEM;
+
+ /* Page 0-4kB is not available in order to return address 0 as a
+ means to signal "no page available" */
+ physmem_base = SOS_PAGE_SIZE;
+ physmem_top = ram_size;
+
+ /* Setup the page descriptor arrray */
+ physical_page_descr_array
+ = (struct physical_page_descr*)PAGE_DESCR_ARRAY_ADDR;
+
+ /* Scan the list of physical pages */
+ for (ppage_addr = 0,
+ ppage_descr = physical_page_descr_array ;
+ ppage_addr < physmem_top ;
+ ppage_addr += SOS_PAGE_SIZE,
+ ppage_descr ++)
+ {
+ enum { PPAGE_MARK_RESERVED, PPAGE_MARK_FREE,
+ PPAGE_MARK_KERNEL, PPAGE_MARK_HWMAP } todo;
+
+ memset(ppage_descr, 0x0, sizeof(struct physical_page_descr));
+
+ /* Init the page descriptor for this page */
+ ppage_descr->paddr = ppage_addr;
+
+ /* Reserved : 0 ... base */
+ if (ppage_addr < physmem_base)
+ todo = PPAGE_MARK_RESERVED;
+
+ /* Free : base ... BIOS */
+ else if ((ppage_addr >= physmem_base)
+ && (ppage_addr < BIOS_N_VIDEO_START))
+ todo = PPAGE_MARK_FREE;
+
+ /* Used : BIOS */
+ else if ((ppage_addr >= BIOS_N_VIDEO_START)
+ && (ppage_addr < BIOS_N_VIDEO_END))
+ todo = PPAGE_MARK_HWMAP;
+
+ /* Free : BIOS ... kernel */
+ else if ((ppage_addr >= BIOS_N_VIDEO_END)
+ && (ppage_addr < (sos_paddr_t) (& __b_kernel)))
+ todo = PPAGE_MARK_FREE;
+
+ /* Used : Kernel code/data/bss + physcal page descr array */
+ else if ((ppage_addr >= *kernel_core_base)
+ && (ppage_addr < *kernel_core_top))
+ todo = PPAGE_MARK_KERNEL;
+
+ /* Free : first page of descr ... end of RAM */
+ else
+ todo = PPAGE_MARK_FREE;
+
+ /* Actually does the insertion in the used/free page lists */
+ physmem_total_pages ++;
+ switch (todo)
+ {
+ case PPAGE_MARK_FREE:
+ ppage_descr->ref_cnt = 0;
+ list_add_head(free_ppage, ppage_descr);
+ break;
+
+ case PPAGE_MARK_KERNEL:
+ case PPAGE_MARK_HWMAP:
+ ppage_descr->ref_cnt = 1;
+ list_add_head(used_ppage, ppage_descr);
+ physmem_used_pages ++;
+ break;
+
+ default:
+ /* Reserved page: nop */
+ break;
+ }
+ }
+
+ return SOS_OK;
+}
+
+
+sos_paddr_t sos_physmem_ref_physpage_new(sos_bool_t can_block)
+{
+ struct physical_page_descr *ppage_descr;
+
+ if (! free_ppage)
+ return (sos_paddr_t)NULL;
+
+ /* Retrieve a page in the free list */
+ ppage_descr = list_pop_head(free_ppage);
+
+ /* The page is assumed not to be already used */
+ SOS_ASSERT_FATAL(ppage_descr->ref_cnt == 0);
+
+ /* Mark the page as used (this of course sets the ref count to 1) */
+ ppage_descr->ref_cnt ++;
+
+ /* No associated kernel range by default */
+ ppage_descr->kernel_range = NULL;
+
+ /* Put the page in the used list */
+ list_add_tail(used_ppage, ppage_descr);
+ physmem_used_pages ++;
+
+ return ppage_descr->paddr;
+}
+
+
+/**
+ * Helper function to get the physical page descriptor for the given
+ * physical page address.
+ *
+ * @return NULL when out-of-bounds or non-page-aligned
+ */
+inline static struct physical_page_descr *
+get_page_descr_at_paddr(sos_paddr_t ppage_paddr)
+{
+ /* Don't handle non-page-aligned addresses */
+ if (ppage_paddr & SOS_PAGE_MASK)
+ return NULL;
+
+ /* Don't support out-of-bounds requests */
+ if ((ppage_paddr < physmem_base) || (ppage_paddr >= physmem_top))
+ return NULL;
+
+ return physical_page_descr_array + (ppage_paddr >> SOS_PAGE_SHIFT);
+}
+
+
+sos_ret_t sos_physmem_ref_physpage_at(sos_paddr_t ppage_paddr)
+{
+ struct physical_page_descr *ppage_descr
+ = get_page_descr_at_paddr(ppage_paddr);
+
+ if (! ppage_descr)
+ return -SOS_EINVAL;
+
+ /* Increment the reference count for the page */
+ ppage_descr->ref_cnt ++;
+
+ /* If the page is newly referenced (ie we are the only owners of the
+ page => ref cnt == 1), transfer it in the used pages list */
+ if (ppage_descr->ref_cnt == 1)
+ {
+ list_delete(free_ppage, ppage_descr);
+
+ /* No associated kernel range by default */
+ ppage_descr->kernel_range = NULL;
+
+ list_add_tail(used_ppage, ppage_descr);
+ physmem_used_pages ++;
+
+ /* The page is newly referenced */
+ return FALSE;
+ }
+
+ /* The page was already referenced by someone */
+ return TRUE;
+}
+
+
+sos_ret_t
+sos_physmem_unref_physpage(sos_paddr_t ppage_paddr)
+{
+ /* By default the return value indicates that the page is still
+ used */
+ sos_ret_t retval = FALSE;
+
+ struct physical_page_descr *ppage_descr
+ = get_page_descr_at_paddr(ppage_paddr);
+
+ if (! ppage_descr)
+ return -SOS_EINVAL;
+
+ /* Don't do anything if the page is not in the used list */
+ if (ppage_descr->ref_cnt <= 0)
+ return -SOS_EINVAL;
+
+ /* Unreference the page, and, when no mapping is active anymore, put
+ the page in the free list */
+ ppage_descr->ref_cnt--;
+ if (ppage_descr->ref_cnt <= 0)
+ {
+ /* Reset associated kernel range */
+ ppage_descr->kernel_range = NULL;
+
+ /* Transfer the page, considered USED, to the free list */
+ list_delete(used_ppage, ppage_descr);
+ physmem_used_pages --;
+ list_add_head(free_ppage, ppage_descr);
+
+ /* Indicate that the page is now unreferenced */
+ retval = TRUE;
+ }
+
+ return retval;
+}
+
+
+struct sos_kmem_range* sos_physmem_get_kmem_range(sos_paddr_t ppage_paddr)
+{
+ struct physical_page_descr *ppage_descr
+ = get_page_descr_at_paddr(ppage_paddr);
+
+ if (! ppage_descr)
+ return NULL;
+
+ return ppage_descr->kernel_range;
+}
+
+
+sos_ret_t sos_physmem_set_kmem_range(sos_paddr_t ppage_paddr,
+ struct sos_kmem_range *range)
+{
+ struct physical_page_descr *ppage_descr
+ = get_page_descr_at_paddr(ppage_paddr);
+
+ if (! ppage_descr)
+ return -SOS_EINVAL;
+
+ ppage_descr->kernel_range = range;
+ return SOS_OK;
+}
+
+sos_ret_t sos_physmem_get_state(/* out */sos_count_t *total_ppages,
+ /* out */sos_count_t *used_ppages)
+{
+ if (total_ppages)
+ *total_ppages = physmem_total_pages;
+ if (used_ppages)
+ *used_ppages = physmem_used_pages;
+ return SOS_OK;
+}
diff --git a/sos-code-article6/sos/physmem.h b/sos-code-article6/sos/physmem.h
new file mode 100644
index 0000000..7b4cd2b
--- /dev/null
+++ b/sos-code-article6/sos/physmem.h
@@ -0,0 +1,147 @@
+/* Copyright (C) 2004 David Decotigny
+ Copyright (C) 2000 The KOS Team
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#ifndef _SOS_PHYSMEM_H_
+#define _SOS_PHYSMEM_H_
+
+/**
+ * @file physmem.h
+ *
+ * Physical pages of memory
+ */
+
+#include <sos/errno.h>
+#include <sos/types.h>
+#include <sos/macros.h>
+
+/** The size of a physical page (arch-dependent) */
+#define SOS_PAGE_SIZE (4*1024)
+
+/** The corresponding shift */
+#define SOS_PAGE_SHIFT 12 /* 4 kB = 2^12 B */
+
+/** The corresponding mask */
+#define SOS_PAGE_MASK ((1<<12) - 1)
+
+#define SOS_PAGE_ALIGN_INF(val) \
+ SOS_ALIGN_INF((val), SOS_PAGE_SIZE)
+#define SOS_PAGE_ALIGN_SUP(val) \
+ SOS_ALIGN_SUP((val), SOS_PAGE_SIZE)
+#define SOS_IS_PAGE_ALIGNED(val) \
+ SOS_IS_ALIGNED((val), SOS_PAGE_SIZE)
+
+/**
+ * This is the reserved physical interval for the x86 video memory and
+ * BIOS area. In physmem.c, we have to mark this area as "used" in
+ * order to prevent from allocating it. And in paging.c, we'd better
+ * map it in virtual space if we really want to be able to print to
+ * the screen (for debugging purpose, at least): for this, the
+ * simplest is to identity-map this area in virtual space (note
+ * however that this mapping could also be non-identical).
+ */
+#define BIOS_N_VIDEO_START 0xa0000
+#define BIOS_N_VIDEO_END 0x100000
+
+
+/**
+ * Initialize the physical memory subsystem, for the physical area [0,
+ * ram_size). This routine takes into account the BIOS and video
+ * areas, to prevent them from future allocations.
+ *
+ * @param ram_size The size of the RAM that will be managed by this subsystem
+ *
+ * @param kernel_core_base The lowest address for which the kernel
+ * assumes identity mapping (ie virtual address == physical address)
+ * will be stored here
+ *
+ * @param kernel_core_top The top address for which the kernel
+ * assumes identity mapping (ie virtual address == physical address)
+ * will be stored here
+ */
+sos_ret_t sos_physmem_subsystem_setup(sos_size_t ram_size,
+ /* out */sos_paddr_t *kernel_core_base,
+ /* out */sos_paddr_t *kernel_core_top);
+
+/**
+ * Retrieve the total number of pages, and the number of free pages
+ */
+sos_ret_t sos_physmem_get_state(/* out */sos_count_t *total_ppages,
+ /* out */sos_count_t *used_ppages);
+
+
+/**
+ * Get a free page.
+ *
+ * @return The (physical) address of the (physical) page allocated, or
+ * NULL when none currently available.
+ *
+ * @param can_block TRUE if the function is allowed to block
+ * @note The page returned has a reference count equal to 1.
+ */
+sos_paddr_t sos_physmem_ref_physpage_new(sos_bool_t can_block);
+
+
+/**
+ * Increment the reference count of a given physical page. Useful for
+ * VM code which tries to map a precise physical address.
+ *
+ * @param ppage_paddr Physical address of the page (MUST be page-aligned)
+ *
+ * @return TRUE when the page was previously in use, FALSE when the
+ * page was previously in the free list, <0 when the page address is
+ * invalid.
+ */
+sos_ret_t sos_physmem_ref_physpage_at(sos_paddr_t ppage_paddr);
+
+
+/**
+ * Decrement the reference count of the given physical page. When this
+ * reference count reaches 0, the page is marked free, ie is available
+ * for future sos_physmem_get_physpage()
+ *
+ * @param ppage_paddr Physical address of the page (MUST be page-aligned)
+ *
+ * @return FALSE when the page is still in use, TRUE when the page is now
+ * unreferenced, <0 when the page address is invalid
+ */
+sos_ret_t sos_physmem_unref_physpage(sos_paddr_t ppage_paddr);
+
+
+#include <sos/kmem_vmm.h>
+
+/**
+ * Return the kernel memory allocation range associated with the given
+ * physical page, or NULL when page has no associated range
+ *
+ * @param ppage_paddr Physical address of the page (MUST be page-aligned)
+ */
+struct sos_kmem_range* sos_physmem_get_kmem_range(sos_paddr_t ppage_paddr);
+
+
+/**
+ * Set the kernel memory allocation range associated to the given
+ * physical page.
+ *
+ * @param ppage_paddr Physical address of the page (MUST be page-aligned)
+ *
+ * @return error if page is invalid
+ */
+sos_ret_t sos_physmem_set_kmem_range(sos_paddr_t ppage_paddr,
+ struct sos_kmem_range *range);
+
+#endif /* _SOS_PHYSMEM_H_ */
diff --git a/sos-code-article6/sos/types.h b/sos-code-article6/sos/types.h
new file mode 100644
index 0000000..bf04314
--- /dev/null
+++ b/sos-code-article6/sos/types.h
@@ -0,0 +1,52 @@
+/* Copyright (C) 2004 The SOS Team
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#ifndef _SOS_TYPES_H_
+#define _SOS_TYPES_H_
+
+/**
+ * @file types.h
+ *
+ * SOS basic types definition
+ */
+
+/** Physical address */
+typedef unsigned int sos_paddr_t;
+
+/** Kernel virtual address */
+typedef unsigned int sos_vaddr_t;
+
+/** Memory size of an object (positive) */
+typedef unsigned int sos_size_t;
+/** Generic count of objects */
+typedef unsigned int sos_count_t;
+
+/** Low-level sizes */
+typedef unsigned long int sos_ui32_t; /* 32b unsigned */
+typedef unsigned short int sos_ui16_t; /* 16b unsigned */
+typedef unsigned char sos_ui8_t; /* 8b unsigned */
+typedef signed long int sos_si32_t; /* 32b signed */
+typedef signed short int sos_si16_t; /* 16b signed */
+typedef signed char sos_si8_t; /* 8b signed */
+
+typedef enum { FALSE=0, TRUE } sos_bool_t;
+
+/** Not a proper type, but highly useful with basic type
+ manipulations */
+#define NULL ((void*)0)
+
+#endif /* _SOS_TYPES_H_ */