summaryrefslogtreecommitdiff
path: root/sos-code-article6.5/sos
diff options
context:
space:
mode:
Diffstat (limited to 'sos-code-article6.5/sos')
-rw-r--r--sos-code-article6.5/sos/assert.c44
-rw-r--r--sos-code-article6.5/sos/assert.h45
-rw-r--r--sos-code-article6.5/sos/errno.h42
-rw-r--r--sos-code-article6.5/sos/klibc.c308
-rw-r--r--sos-code-article6.5/sos/klibc.h103
-rw-r--r--sos-code-article6.5/sos/kmalloc.c113
-rw-r--r--sos-code-article6.5/sos/kmalloc.h63
-rw-r--r--sos-code-article6.5/sos/kmem_slab.c812
-rw-r--r--sos-code-article6.5/sos/kmem_slab.h206
-rw-r--r--sos-code-article6.5/sos/kmem_vmm.c606
-rw-r--r--sos-code-article6.5/sos/kmem_vmm.h113
-rw-r--r--sos-code-article6.5/sos/ksynch.c233
-rw-r--r--sos-code-article6.5/sos/ksynch.h170
-rw-r--r--sos-code-article6.5/sos/kwaitq.c249
-rw-r--r--sos-code-article6.5/sos/kwaitq.h180
-rw-r--r--sos-code-article6.5/sos/list.h186
-rw-r--r--sos-code-article6.5/sos/macros.h41
-rw-r--r--sos-code-article6.5/sos/main.c461
-rw-r--r--sos-code-article6.5/sos/mouse_sim.c803
-rw-r--r--sos-code-article6.5/sos/physmem.c319
-rw-r--r--sos-code-article6.5/sos/physmem.h147
-rw-r--r--sos-code-article6.5/sos/sched.c133
-rw-r--r--sos-code-article6.5/sos/sched.h75
-rw-r--r--sos-code-article6.5/sos/thread.c441
-rw-r--r--sos-code-article6.5/sos/thread.h207
-rw-r--r--sos-code-article6.5/sos/time.c355
-rw-r--r--sos-code-article6.5/sos/time.h222
-rw-r--r--sos-code-article6.5/sos/types.h52
28 files changed, 6729 insertions, 0 deletions
diff --git a/sos-code-article6.5/sos/assert.c b/sos-code-article6.5/sos/assert.c
new file mode 100644
index 0000000..2bc0d41
--- /dev/null
+++ b/sos-code-article6.5/sos/assert.c
@@ -0,0 +1,44 @@
+/* Copyright (C) 2004 The KOS Team
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+
+#include <sos/klibc.h>
+#include <drivers/bochs.h>
+#include <drivers/x86_videomem.h>
+
+#include "assert.h"
+
+void sos_display_fatal_error(const char *format, /* args */...)
+{
+ char buff[256];
+ va_list ap;
+
+ asm("cli\n"); /* disable interrupts -- x86 only */ \
+
+ va_start(ap, format);
+ vsnprintf(buff, sizeof(buff), format, ap);
+ va_end(ap);
+
+ sos_bochs_putstring(buff); sos_bochs_putstring("\n");
+ sos_x86_videomem_putstring(23, 0,
+ SOS_X86_VIDEO_BG_BLACK
+ | SOS_X86_VIDEO_FG_LTRED , buff);
+
+ /* Infinite loop: processor halted */
+ for ( ; ; )
+ asm("hlt\n");
+}
diff --git a/sos-code-article6.5/sos/assert.h b/sos-code-article6.5/sos/assert.h
new file mode 100644
index 0000000..9fcfec0
--- /dev/null
+++ b/sos-code-article6.5/sos/assert.h
@@ -0,0 +1,45 @@
+/* Copyright (C) 2004 The KOS Team
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#ifndef _SOS_ASSERT_H_
+#define _SOS_ASSERT_H_
+
+
+void sos_display_fatal_error(const char *format, /* args */...)
+ __attribute__ ((format (printf, 1, 2), noreturn));
+
+
+/**
+ * If the expr is FALSE, print a message and halt the machine
+ */
+#define SOS_ASSERT_FATAL(expr) \
+ ({ \
+ int __res=(int)(expr); \
+ if (! __res) \
+ sos_display_fatal_error("%s@%s:%d Assertion " # expr " failed", \
+ __PRETTY_FUNCTION__, __FILE__, __LINE__); \
+ })
+
+
+#define SOS_FATAL_ERROR(fmt,args...) \
+ ({ \
+ sos_display_fatal_error("%s@%s:%d FATAL: " fmt, \
+ __PRETTY_FUNCTION__, __FILE__, __LINE__, \
+ ##args); \
+ })
+
+#endif /* _SOS_ASSERT_H_ */
diff --git a/sos-code-article6.5/sos/errno.h b/sos-code-article6.5/sos/errno.h
new file mode 100644
index 0000000..fda5ed8
--- /dev/null
+++ b/sos-code-article6.5/sos/errno.h
@@ -0,0 +1,42 @@
+/* Copyright (C) 2004 The SOS Team
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#ifndef _SOS_ERRNO_H_
+#define _SOS_ERRNO_H_
+
+/**
+ * @file errno.h
+ *
+ * SOS return value codes and errors.
+ */
+
+/* Positive values of the error codes */
+#define SOS_OK 0 /* No error */
+#define SOS_EINVAL 1 /* Invalid argument */
+#define SOS_ENOSUP 2 /* Operation not supported */
+#define SOS_ENOMEM 3 /* No available memory */
+#define SOS_EBUSY 4 /* Object or device still in use */
+#define SOS_EINTR 5 /* Wait/Sleep has been interrupted */
+#define SOS_EPERM 6 /* Mutex/files ownership error */
+#define SOS_EFATAL 255 /* Internal fatal error */
+
+/* A negative value means that an error occured. For
+ * example -SOS_EINVAL means that the error was "invalid
+ * argument" */
+typedef int sos_ret_t;
+
+#endif /* _SOS_ERRNO_H_ */
diff --git a/sos-code-article6.5/sos/klibc.c b/sos-code-article6.5/sos/klibc.c
new file mode 100644
index 0000000..4442842
--- /dev/null
+++ b/sos-code-article6.5/sos/klibc.c
@@ -0,0 +1,308 @@
+/* Copyright (C) 2004 David Decotigny (with INSA Rennes for vsnprintf)
+ Copyright (C) 2003 The KOS Team
+ Copyright (C) 1999 Free Software Foundation
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#include "klibc.h"
+
+/* For an optimized version, see BSD sources ;) */
+void *memcpy(void *dst0, const void *src0, register unsigned int size)
+{
+ char *dst;
+ const char *src;
+ for (dst = (char*)dst0, src = (const char*)src0 ;
+ size > 0 ;
+ dst++, src++, size--)
+ *dst = *src;
+ return dst0;
+}
+
+/* ditto */
+void *memset(void *dst0, register int c, register unsigned int length)
+{
+ char *dst;
+ for (dst = (char*) dst0 ;
+ length > 0 ;
+ dst++, length --)
+ *dst = (char)c;
+ return dst0;
+}
+
+int memcmp(const void *s1, const void *s2, sos_size_t len)
+{
+ const unsigned char *c1, *c2;
+ unsigned int i;
+
+ for (i = 0, c1 = s1, c2 = s2; i < len; i++, c1++, c2++)
+ {
+ if(*c1 != *c2)
+ return *c1 - *c2;
+ }
+
+ return 0;
+}
+
+
+unsigned int strlen(register const char *str)
+{
+ unsigned int retval = 0;
+
+ while (*str++)
+ retval++;
+
+ return retval;
+}
+
+
+unsigned int strnlen(const char * s, sos_size_t count)
+{
+ const char *sc;
+
+ for (sc = s; count-- && *sc != '\0'; ++sc)
+ /* nothing */continue;
+
+ return sc - s;
+}
+
+
+char *strzcpy(register char *dst, register const char *src, register int len)
+{
+ int i;
+
+ if (len <= 0)
+ return dst;
+
+ for (i = 0; i < len; i++)
+ {
+ dst[i] = src[i];
+ if(src[i] == '\0')
+ return dst;
+ }
+
+ dst[len-1] = '\0';
+ return dst;
+}
+
+
+char *strzcat (char *dest, const char *src, sos_size_t n)
+{
+ char *res = dest;
+
+ for ( ; *dest ; dest++);
+
+ for ( ; *src ; src++, dest++) {
+ *dest = *src;
+ n--;
+ if (n <= 0)
+ break;
+ }
+
+ *dest = '\0';
+ return res;
+}
+
+int strcmp(register const char *s1, register const char *s2)
+{
+ while (*s1 == *s2++)
+ if (*s1++ == 0)
+ return (0);
+
+ return (*(const unsigned char *)s1 - *(const unsigned char *)(s2 - 1));
+}
+
+
+int strncmp(register const char *s1, register const char *s2, register int len)
+{
+ char c1 = '\0', c2 = '\0';
+
+ while (len > 0)
+ {
+ c1 = (unsigned char) *s1++;
+ c2 = (unsigned char) *s2++;
+ if (c1 == '\0' || c1 != c2)
+ return c1 - c2;
+ len--;
+ }
+
+ return c1 - c2;
+}
+
+
+static unsigned long int _random_seed = 93186752;
+
+/**
+ * The following code is borrowed from Glenn Rhoads.
+ * http://remus.rutgers.edu/~rhoads/Code/code.html
+ * License to be defined...
+ */
+unsigned long int random (void)
+{
+/* The following parameters are recommended settings based on research
+ uncomment the one you want. */
+
+/* For RAND_MAX == 4294967291 */
+ static unsigned int a = 1588635695, q = 2, r = 1117695901;
+/* static unsigned int a = 1223106847, m = 4294967291U, q = 3, r = 625646750;*/
+/* static unsigned int a = 279470273, m = 4294967291U, q = 15, r = 102913196;*/
+
+/* For RAND_MAX == 2147483647 */
+/* static unsigned int a = 1583458089, m = 2147483647, q = 1, r = 564025558; */
+/* static unsigned int a = 784588716, m = 2147483647, q = 2, r = 578306215; */
+/* static unsigned int a = 16807, m = 2147483647, q = 127773, r = 2836; */
+/* static unsigned int a = 950706376, m = 2147483647, q = 2, r = 246070895; */
+
+ _random_seed = a*(_random_seed % q) - r*(_random_seed / q);
+ return _random_seed;
+}
+
+
+void srandom (unsigned long int seed)
+{
+ _random_seed = seed;
+}
+
+
+/* I (d2) borrowed and rewrote this for Nachos/INSA Rennes. Thanks to
+ them for having kindly allowed me to do so. */
+int vsnprintf(char *buff, sos_size_t len, const char * format, va_list ap)
+{
+ sos_size_t i, result;
+
+ if (!buff || !format || (len < 0))
+ return -1;
+
+#define PUTCHAR(thechar) \
+ do { \
+ if (result < len-1) \
+ *buff++ = (thechar); \
+ result++; \
+ } while (0)
+
+ result = 0;
+ for(i=0 ; format[i] != '\0' ; i++){
+ switch (format[i])
+ {
+ case '%':
+ i++;
+ switch(format[i])
+ {
+ case '%':
+ {
+ PUTCHAR('%');
+ break;
+ }
+ case 'i':;
+ case 'd':
+ {
+ int integer = va_arg(ap,int);
+ int cpt2 = 0;
+ char buff_int[16];
+
+ if (integer<0)
+ PUTCHAR('-');
+ /* Ne fait pas integer = -integer ici parce que INT_MIN
+ n'a pas d'equivalent positif (int = [-2^31, 2^31-1]) */
+
+ do {
+ int m10 = integer%10;
+ m10 = (m10 < 0)? -m10:m10;
+ buff_int[cpt2++]=(char)('0'+ m10);
+ integer=integer/10;
+ } while(integer!=0);
+
+ for(cpt2 = cpt2 - 1 ; cpt2 >= 0 ; cpt2--)
+ PUTCHAR(buff_int[cpt2]);
+
+ break;
+ }
+
+ case 'c':
+ {
+ int value = va_arg(ap,int);
+ PUTCHAR((char)value);
+ break;
+ }
+
+ case 's':
+ {
+ char *string = va_arg(ap,char *);
+ if (! string)
+ string = "(null)";
+ for( ; *string != '\0' ; string++)
+ PUTCHAR(*string);
+ break;
+ }
+
+ case 'p':
+ PUTCHAR('0');
+ PUTCHAR('x');
+ case 'x':
+ {
+ unsigned int hexa = va_arg(ap,int);
+ unsigned int nb;
+ int i, had_nonzero = 0;
+ for(i=0 ; i < 8 ; i++)
+ {
+ nb = (unsigned int)(hexa << (i*4));
+ nb = (nb >> 28) & 0xf;
+ // Skip the leading zeros
+ if (nb == 0)
+ {
+ if (had_nonzero)
+ PUTCHAR('0');
+ }
+ else
+ {
+ had_nonzero = 1;
+ if (nb < 10)
+ PUTCHAR('0'+nb);
+ else
+ PUTCHAR('a'+(nb-10));
+ }
+ }
+ if (! had_nonzero)
+ PUTCHAR('0');
+ break;
+ }
+ break;
+
+ default:
+ PUTCHAR('%');
+ PUTCHAR(format[i]);
+ }
+ break;
+
+ default:
+ PUTCHAR(format[i]);
+ }
+ }
+
+ *buff = '\0';
+ return result;
+}
+
+
+int snprintf(char * buff, sos_size_t len, const char *format, ...)
+{
+ va_list ap;
+
+ va_start(ap, format);
+ len = vsnprintf(buff, len, format, ap);
+ va_end(ap);
+
+ return len;
+}
diff --git a/sos-code-article6.5/sos/klibc.h b/sos-code-article6.5/sos/klibc.h
new file mode 100644
index 0000000..7002778
--- /dev/null
+++ b/sos-code-article6.5/sos/klibc.h
@@ -0,0 +1,103 @@
+/* Copyright (C) 2003 The KOS Team
+ Copyright (C) 1999 Free Software Foundation
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#ifndef _SOS_KLIBC_H_
+#define _SOS_KLIBC_H_
+
+/**
+ * @file klibc.h
+ *
+ * Basic libc-style support for common useful functions (string.h,
+ * stdarg.h), some with slight non-standard behavior (see comments).
+ *
+ * Most of the prototypes of these functions are borrowed from
+ * FreeBSD, but their implementation (in klibc.c) come either from Kos
+ * (GPL v2) or from David Decotigny (SOS).
+ */
+
+#include <sos/types.h>
+
+/* string.h functions */
+
+void *memcpy(void *dst, const void *src, register unsigned int size ) ;
+void *memset(void *dst, register int c, register unsigned int length ) ;
+int memcmp(const void *s1, const void *s2, sos_size_t n);
+
+unsigned int strlen( register const char *str) ;
+unsigned int strnlen(const char * s, sos_size_t maxlen);
+
+/**
+ * @note Same as strncpy(), with a slightly different semantic.
+ * Actually, strncpy(3C) says " The result will not be null-terminated
+ * if the length of 'from' is n or more.". Here, 'dst' is ALWAYS
+ * null-terminated. And its total len will ALWAYS be <= len, with
+ * null-terminating-char included.
+ */
+char *strzcpy( register char *dst, register const char *src,
+ register int len ) ;
+
+/**
+ * @note Same as strncat(), with the same semantic : 'dst' is ALWAYS
+ * null-terminated. And its total len will ALWAYS be <= len, with
+ * null-terminating-char included.
+ */
+char *strzcat (char *dest, const char *src,
+ const sos_size_t len);
+
+int strcmp(register const char *s1, register const char *s2 );
+int strncmp(register const char *s1, register const char *s2,
+ register int len );
+
+/* Basic stdarg.h macros. Taken from gcc support files */
+#define __GNUC_VA_LIST
+typedef void *__gnuc_va_list;
+typedef __gnuc_va_list va_list;
+#define __va_rounded_size(TYPE) \
+ (((sizeof (TYPE) + sizeof (int) - 1) / sizeof (int)) * sizeof (int))
+#define va_start(AP, LASTARG) \
+ (AP = ((__gnuc_va_list) __builtin_next_arg (LASTARG)))
+#define va_end(AP) \
+ ((void)0)
+#define va_arg(AP, TYPE) \
+ (AP = (__gnuc_va_list) ((char *) (AP) + __va_rounded_size (TYPE)), \
+ *((TYPE *) (void *) ((char *) (AP) - __va_rounded_size (TYPE))))
+#define __va_copy(dest, src) \
+ (dest) = (src)
+
+/* stdarg.h functions. There might be a non-standard behavior: there
+ will always be a trailing '\0' in the resulting string */
+int vsnprintf(char *, sos_size_t, const char *, va_list);
+int snprintf(char *, sos_size_t, const char *, /*args*/ ...)
+ __attribute__ ((format (printf, 3, 4)));
+
+
+/*
+ * Pseudo-random generation functions. Useful to do some coverage
+ * tests.
+ */
+
+/* Amplitude of the random number generation */
+#define RAND_MAX 4294967291U
+
+/* Pseudo-random number generation (MT unsafe) */
+unsigned long int random (void);
+
+/* Set random seed (MT unsafe) */
+void srandom (unsigned long int seed);
+
+#endif /* _SOS_KLIBC_H_ */
diff --git a/sos-code-article6.5/sos/kmalloc.c b/sos-code-article6.5/sos/kmalloc.c
new file mode 100644
index 0000000..62d948d
--- /dev/null
+++ b/sos-code-article6.5/sos/kmalloc.c
@@ -0,0 +1,113 @@
+/* Copyright (C) 2004 David Decotigny
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+
+#include <sos/assert.h>
+#include <sos/macros.h>
+
+#include "physmem.h"
+#include "kmem_vmm.h"
+#include "kmem_slab.h"
+
+#include "kmalloc.h"
+
+/* The cache structures for these caches, the object size, their
+ names, and some number of pages that contain them. They might not
+ necessarily be powers of 2s. */
+static struct {
+ const char *name;
+ sos_size_t object_size;
+ sos_count_t pages_per_slab;
+ struct sos_kslab_cache *cache;
+} kmalloc_cache[] =
+ {
+ { "kmalloc 8B objects", 8, 1 },
+ { "kmalloc 16B objects", 16, 1 },
+ { "kmalloc 32B objects", 32, 1 },
+ { "kmalloc 64B objects", 64, 1 },
+ { "kmalloc 128B objects", 128, 1 },
+ { "kmalloc 256B objects", 256, 2 },
+ { "kmalloc 1024B objects", 1024, 2 },
+ { "kmalloc 2048B objects", 2048, 3 },
+ { "kmalloc 4096B objects", 4096, 4 },
+ { "kmalloc 8192B objects", 8192, 8 },
+ { "kmalloc 16384B objects", 16384, 12 },
+ { NULL, 0, 0, NULL }
+ };
+
+
+sos_ret_t sos_kmalloc_subsystem_setup()
+{
+ int i;
+ for (i = 0 ; kmalloc_cache[i].object_size != 0 ; i ++)
+ {
+ struct sos_kslab_cache *new_cache;
+ new_cache = sos_kmem_cache_create(kmalloc_cache[i].name,
+ kmalloc_cache[i].object_size,
+ kmalloc_cache[i].pages_per_slab,
+ 0,
+ SOS_KSLAB_CREATE_MAP
+ );
+ SOS_ASSERT_FATAL(new_cache != NULL);
+ kmalloc_cache[i].cache = new_cache;
+ }
+ return SOS_OK;
+}
+
+
+sos_vaddr_t sos_kmalloc(sos_size_t size, sos_ui32_t flags)
+{
+ /* Look for a suitable pre-allocated kmalloc cache */
+ int i;
+ for (i = 0 ; kmalloc_cache[i].object_size != 0 ; i ++)
+ {
+ if (kmalloc_cache[i].object_size >= size)
+ return sos_kmem_cache_alloc(kmalloc_cache[i].cache,
+ (flags
+ & SOS_KMALLOC_ATOMIC)?
+ SOS_KSLAB_ALLOC_ATOMIC:0);
+ }
+
+ /* none found yet => we directly use the kmem_vmm subsystem to
+ allocate whole pages */
+ return sos_kmem_vmm_alloc(SOS_PAGE_ALIGN_SUP(size) / SOS_PAGE_SIZE,
+ ( (flags
+ & SOS_KMALLOC_ATOMIC)?
+ SOS_KMEM_VMM_ATOMIC:0)
+ | SOS_KMEM_VMM_MAP
+ );
+}
+
+
+sos_ret_t sos_kfree(sos_vaddr_t vaddr)
+{
+ /* The trouble here is that we aren't sure whether this object is a
+ slab object in a pre-allocated kmalloc cache, or an object
+ directly allocated as a kmem_vmm region. */
+
+ /* We first pretend this object is allocated in a pre-allocated
+ kmalloc cache */
+ if (! sos_kmem_cache_free(vaddr))
+ return SOS_OK; /* Great ! We guessed right ! */
+
+ /* Here we're wrong: it appears not to be an object in a
+ pre-allocated kmalloc cache. So we try to pretend this is a
+ kmem_vmm area */
+ return sos_kmem_vmm_free(vaddr);
+}
+
+
diff --git a/sos-code-article6.5/sos/kmalloc.h b/sos-code-article6.5/sos/kmalloc.h
new file mode 100644
index 0000000..3f35b9d
--- /dev/null
+++ b/sos-code-article6.5/sos/kmalloc.h
@@ -0,0 +1,63 @@
+/* Copyright (C) 2004 David Decotigny
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#ifndef _SOS_KMALLOC_H_
+#define _SOS_KMALLOC_H_
+
+/**
+ * @file kmalloc.h
+ *
+ * Simple malloc-style wrapper to kmem_vmm.h and kmem_slab.h for
+ * "anonymous" objects (ie not associated to any precise slab cache).
+ */
+
+#include <sos/types.h>
+#include <sos/errno.h>
+
+
+/**
+ * Iniatilize the kmalloc subsystem, ie pre-allocate a series of caches.
+ */
+sos_ret_t sos_kmalloc_subsystem_setup(void);
+
+/*
+ * sos_kmalloc flags
+ */
+/** sos_kmalloc() should succeed without blocking, or return NULL */
+#define SOS_KMALLOC_ATOMIC 1
+
+/**
+ * Allocate a kernel object of the given size in the most suited slab
+ * cache if size can be handled by one of the pre-allocated caches, or
+ * using directly the range allocator otherwise. The object will
+ * allways be mapped in physical memory (ie implies
+ * SOS_KSLAB_CREATE_MAP and SOS_KMEM_VMM_MAP).
+ *
+ * @param size The size of the object
+ * @param flags The allocation flags (SOS_KMALLOC_* flags)
+ */
+sos_vaddr_t sos_kmalloc(sos_size_t size, sos_ui32_t flags);
+
+/**
+ * @note you are perfectly allowed to give the address of the
+ * kernel image, or the address of the bios area here, it will work:
+ * the kernel/bios WILL be "deallocated". But if you really want to do
+ * this, well..., do expect some "surprises" ;)
+ */
+sos_ret_t sos_kfree(sos_vaddr_t vaddr);
+
+#endif /* _SOS_KMALLOC_H_ */
diff --git a/sos-code-article6.5/sos/kmem_slab.c b/sos-code-article6.5/sos/kmem_slab.c
new file mode 100644
index 0000000..49a1527
--- /dev/null
+++ b/sos-code-article6.5/sos/kmem_slab.c
@@ -0,0 +1,812 @@
+/* Copyright (C) 2000 Thomas Petazzoni
+ Copyright (C) 2004 David Decotigny
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#include <sos/macros.h>
+#include <sos/klibc.h>
+#include <sos/list.h>
+#include <sos/assert.h>
+#include <hwcore/paging.h>
+#include <sos/physmem.h>
+#include <sos/kmem_vmm.h>
+
+#include "kmem_slab.h"
+
+/* Dimensioning constants */
+#define NB_PAGES_IN_SLAB_OF_CACHES 1
+#define NB_PAGES_IN_SLAB_OF_RANGES 1
+
+/** The structure of a slab cache */
+struct sos_kslab_cache
+{
+ char *name;
+
+ /* non mutable characteristics of this slab */
+ sos_size_t original_obj_size; /* asked object size */
+ sos_size_t alloc_obj_size; /* actual object size, taking the
+ alignment constraints into account */
+ sos_count_t nb_objects_per_slab;
+ sos_count_t nb_pages_per_slab;
+ sos_count_t min_free_objects;
+
+/* slab cache flags */
+// #define SOS_KSLAB_CREATE_MAP (1<<0) /* See kmem_slab.h */
+// #define SOS_KSLAB_CREATE_ZERO (1<<1) /* " " " " " " " " */
+#define ON_SLAB (1<<31) /* struct sos_kslab is included inside the slab */
+ sos_ui32_t flags;
+
+ /* Supervision data (updated at run-time) */
+ sos_count_t nb_free_objects;
+
+ /* The lists of slabs owned by this cache */
+ struct sos_kslab *slab_list; /* head = non full, tail = full */
+
+ /* The caches are linked together on the kslab_cache_list */
+ struct sos_kslab_cache *prev, *next;
+};
+
+
+/** The structure of a slab */
+struct sos_kslab
+{
+ /** Number of free objects on this slab */
+ sos_count_t nb_free;
+
+ /** The list of these free objects */
+ struct sos_kslab_free_object *free;
+
+ /** The address of the associated range structure */
+ struct sos_kmem_range *range;
+
+ /** Virtual start address of this range */
+ sos_vaddr_t first_object;
+
+ /** Slab cache owning this slab */
+ struct sos_kslab_cache *cache;
+
+ /** Links to the other slabs managed by the same cache */
+ struct sos_kslab *prev, *next;
+};
+
+
+/** The structure of the free objects in the slab */
+struct sos_kslab_free_object
+{
+ struct sos_kslab_free_object *prev, *next;
+};
+
+/** The cache of slab caches */
+static struct sos_kslab_cache *cache_of_struct_kslab_cache;
+
+/** The cache of slab structures for non-ON_SLAB caches */
+static struct sos_kslab_cache *cache_of_struct_kslab;
+
+/** The list of slab caches */
+static struct sos_kslab_cache *kslab_cache_list;
+
+/* Helper function to initialize a cache structure */
+static sos_ret_t
+cache_initialize(/*out*/struct sos_kslab_cache *the_cache,
+ const char* name,
+ sos_size_t obj_size,
+ sos_count_t pages_per_slab,
+ sos_count_t min_free_objs,
+ sos_ui32_t cache_flags)
+{
+ unsigned int space_left;
+ sos_size_t alloc_obj_size;
+
+ if (obj_size <= 0)
+ return -SOS_EINVAL;
+
+ /* Default allocation size is the requested one */
+ alloc_obj_size = obj_size;
+
+ /* Make sure the requested size is large enough to store a
+ free_object structure */
+ if (alloc_obj_size < sizeof(struct sos_kslab_free_object))
+ alloc_obj_size = sizeof(struct sos_kslab_free_object);
+
+ /* Align obj_size on 4 bytes */
+ alloc_obj_size = SOS_ALIGN_SUP(alloc_obj_size, sizeof(int));
+
+ /* Make sure supplied number of pages per slab is consistent with
+ actual allocated object size */
+ if (alloc_obj_size > pages_per_slab*SOS_PAGE_SIZE)
+ return -SOS_EINVAL;
+
+ /* Refuse too large slabs */
+ if (pages_per_slab > MAX_PAGES_PER_SLAB)
+ return -SOS_ENOMEM;
+
+ /* Fills in the cache structure */
+ memset(the_cache, 0x0, sizeof(struct sos_kslab_cache));
+ the_cache->name = (char*)name;
+ the_cache->flags = cache_flags;
+ the_cache->original_obj_size = obj_size;
+ the_cache->alloc_obj_size = alloc_obj_size;
+ the_cache->min_free_objects = min_free_objs;
+ the_cache->nb_pages_per_slab = pages_per_slab;
+
+ /* Small size objets => the slab structure is allocated directly in
+ the slab */
+ if(alloc_obj_size <= sizeof(struct sos_kslab))
+ the_cache->flags |= ON_SLAB;
+
+ /*
+ * Compute the space left once the maximum number of objects
+ * have been allocated in the slab
+ */
+ space_left = the_cache->nb_pages_per_slab*SOS_PAGE_SIZE;
+ if(the_cache->flags & ON_SLAB)
+ space_left -= sizeof(struct sos_kslab);
+ the_cache->nb_objects_per_slab = space_left / alloc_obj_size;
+ space_left -= the_cache->nb_objects_per_slab*alloc_obj_size;
+
+ /* Make sure a single slab is large enough to contain the minimum
+ number of objects requested */
+ if (the_cache->nb_objects_per_slab < min_free_objs)
+ return -SOS_EINVAL;
+
+ /* If there is now enough place for both the objects and the slab
+ structure, then make the slab structure ON_SLAB */
+ if (space_left >= sizeof(struct sos_kslab))
+ the_cache->flags |= ON_SLAB;
+
+ return SOS_OK;
+}
+
+
+/** Helper function to add a new slab for the given cache. */
+static sos_ret_t
+cache_add_slab(struct sos_kslab_cache *kslab_cache,
+ sos_vaddr_t vaddr_slab,
+ struct sos_kslab *slab)
+{
+ int i;
+
+ /* Setup the slab structure */
+ memset(slab, 0x0, sizeof(struct sos_kslab));
+ slab->cache = kslab_cache;
+
+ /* Establish the address of the first free object */
+ slab->first_object = vaddr_slab;
+
+ /* Account for this new slab in the cache */
+ slab->nb_free = kslab_cache->nb_objects_per_slab;
+ kslab_cache->nb_free_objects += slab->nb_free;
+
+ /* Build the list of free objects */
+ for (i = 0 ; i < kslab_cache->nb_objects_per_slab ; i++)
+ {
+ sos_vaddr_t obj_vaddr;
+
+ /* Set object's address */
+ obj_vaddr = slab->first_object + i*kslab_cache->alloc_obj_size;
+
+ /* Add it to the list of free objects */
+ list_add_tail(slab->free,
+ (struct sos_kslab_free_object *)obj_vaddr);
+ }
+
+ /* Add the slab to the cache's slab list: add the head of the list
+ since this slab is non full */
+ list_add_head(kslab_cache->slab_list, slab);
+
+ return SOS_OK;
+}
+
+
+/** Helper function to allocate a new slab for the given kslab_cache */
+static sos_ret_t
+cache_grow(struct sos_kslab_cache *kslab_cache,
+ sos_ui32_t alloc_flags)
+{
+ sos_ui32_t range_alloc_flags;
+
+ struct sos_kmem_range *new_range;
+ sos_vaddr_t new_range_start;
+
+ struct sos_kslab *new_slab;
+
+ /*
+ * Setup the flags for the range allocation
+ */
+ range_alloc_flags = 0;
+
+ /* Atomic ? */
+ if (alloc_flags & SOS_KSLAB_ALLOC_ATOMIC)
+ range_alloc_flags |= SOS_KMEM_VMM_ATOMIC;
+
+ /* Need physical mapping NOW ? */
+ if (kslab_cache->flags & (SOS_KSLAB_CREATE_MAP
+ | SOS_KSLAB_CREATE_ZERO))
+ range_alloc_flags |= SOS_KMEM_VMM_MAP;
+
+ /* Allocate the range */
+ new_range = sos_kmem_vmm_new_range(kslab_cache->nb_pages_per_slab,
+ range_alloc_flags,
+ & new_range_start);
+ if (! new_range)
+ return -SOS_ENOMEM;
+
+ /* Allocate the slab structure */
+ if (kslab_cache->flags & ON_SLAB)
+ {
+ /* Slab structure is ON the slab: simply set its address to the
+ end of the range */
+ sos_vaddr_t slab_vaddr
+ = new_range_start + kslab_cache->nb_pages_per_slab*SOS_PAGE_SIZE
+ - sizeof(struct sos_kslab);
+ new_slab = (struct sos_kslab*)slab_vaddr;
+ }
+ else
+ {
+ /* Slab structure is OFF the slab: allocate it from the cache of
+ slab structures */
+ sos_vaddr_t slab_vaddr
+ = sos_kmem_cache_alloc(cache_of_struct_kslab,
+ alloc_flags);
+ if (! slab_vaddr)
+ {
+ sos_kmem_vmm_del_range(new_range);
+ return -SOS_ENOMEM;
+ }
+ new_slab = (struct sos_kslab*)slab_vaddr;
+ }
+
+ cache_add_slab(kslab_cache, new_range_start, new_slab);
+ new_slab->range = new_range;
+
+ /* Set the backlink from range to this slab */
+ sos_kmem_vmm_set_slab(new_range, new_slab);
+
+ return SOS_OK;
+}
+
+
+/**
+ * Helper function to release a slab
+ *
+ * The corresponding range is always deleted, except when the @param
+ * must_del_range_now is not set. This happens only when the function
+ * gets called from sos_kmem_cache_release_struct_range(), to avoid
+ * large recursions.
+ */
+static sos_ret_t
+cache_release_slab(struct sos_kslab *slab,
+ sos_bool_t must_del_range_now)
+{
+ struct sos_kslab_cache *kslab_cache = slab->cache;
+ struct sos_kmem_range *range = slab->range;
+
+ SOS_ASSERT_FATAL(kslab_cache != NULL);
+ SOS_ASSERT_FATAL(range != NULL);
+ SOS_ASSERT_FATAL(slab->nb_free == slab->cache->nb_objects_per_slab);
+
+ /* First, remove the slab from the slabs' list of the cache */
+ list_delete(kslab_cache->slab_list, slab);
+ slab->cache->nb_free_objects -= slab->nb_free;
+
+ /* Release the slab structure if it is OFF slab */
+ if (! (slab->cache->flags & ON_SLAB))
+ sos_kmem_cache_free((sos_vaddr_t)slab);
+
+ /* Ok, the range is not bound to any slab anymore */
+ sos_kmem_vmm_set_slab(range, NULL);
+
+ /* Always delete the range now, unless we are told not to do so (see
+ sos_kmem_cache_release_struct_range() below) */
+ if (must_del_range_now)
+ return sos_kmem_vmm_del_range(range);
+
+ return SOS_OK;
+}
+
+
+/**
+ * Helper function to create the initial cache of caches, with a very
+ * first slab in it, so that new cache structures can be simply allocated.
+ * @return the cache structure for the cache of caches
+ */
+static struct sos_kslab_cache *
+create_cache_of_caches(sos_vaddr_t vaddr_first_slab_of_caches,
+ int nb_pages)
+{
+ /* The preliminary cache structure we need in order to allocate the
+ first slab in the cache of caches (allocated on the stack !) */
+ struct sos_kslab_cache fake_cache_of_caches;
+
+ /* The real cache structure for the cache of caches */
+ struct sos_kslab_cache *real_cache_of_caches;
+
+ /* The kslab structure for this very first slab */
+ struct sos_kslab *slab_of_caches;
+
+ /* Init the cache structure for the cache of caches */
+ if (cache_initialize(& fake_cache_of_caches,
+ "Caches", sizeof(struct sos_kslab_cache),
+ nb_pages, 0, SOS_KSLAB_CREATE_MAP | ON_SLAB))
+ /* Something wrong with the parameters */
+ return NULL;
+
+ memset((void*)vaddr_first_slab_of_caches, 0x0, nb_pages*SOS_PAGE_SIZE);
+
+ /* Add the pages for the 1st slab of caches */
+ slab_of_caches = (struct sos_kslab*)(vaddr_first_slab_of_caches
+ + nb_pages*SOS_PAGE_SIZE
+ - sizeof(struct sos_kslab));
+
+ /* Add the abovementioned 1st slab to the cache of caches */
+ cache_add_slab(& fake_cache_of_caches,
+ vaddr_first_slab_of_caches,
+ slab_of_caches);
+
+ /* Now we allocate a cache structure, which will be the real cache
+ of caches, ie a cache structure allocated INSIDE the cache of
+ caches, not inside the stack */
+ real_cache_of_caches
+ = (struct sos_kslab_cache*) sos_kmem_cache_alloc(& fake_cache_of_caches,
+ 0);
+ /* We initialize it */
+ memcpy(real_cache_of_caches, & fake_cache_of_caches,
+ sizeof(struct sos_kslab_cache));
+ /* We need to update the slab's 'cache' field */
+ slab_of_caches->cache = real_cache_of_caches;
+
+ /* Add the cache to the list of slab caches */
+ list_add_tail(kslab_cache_list, real_cache_of_caches);
+
+ return real_cache_of_caches;
+}
+
+
+/**
+ * Helper function to create the initial cache of ranges, with a very
+ * first slab in it, so that new kmem_range structures can be simply
+ * allocated.
+ * @return the cache of kmem_range
+ */
+static struct sos_kslab_cache *
+create_cache_of_ranges(sos_vaddr_t vaddr_first_slab_of_ranges,
+ sos_size_t sizeof_struct_range,
+ int nb_pages)
+{
+ /* The cache structure for the cache of kmem_range */
+ struct sos_kslab_cache *cache_of_ranges;
+
+ /* The kslab structure for the very first slab of ranges */
+ struct sos_kslab *slab_of_ranges;
+
+ cache_of_ranges = (struct sos_kslab_cache*)
+ sos_kmem_cache_alloc(cache_of_struct_kslab_cache,
+ 0);
+ if (! cache_of_ranges)
+ return NULL;
+
+ /* Init the cache structure for the cache of ranges with min objects
+ per slab = 2 !!! */
+ if (cache_initialize(cache_of_ranges,
+ "struct kmem_range",
+ sizeof_struct_range,
+ nb_pages, 2, SOS_KSLAB_CREATE_MAP | ON_SLAB))
+ /* Something wrong with the parameters */
+ return NULL;
+
+ /* Add the cache to the list of slab caches */
+ list_add_tail(kslab_cache_list, cache_of_ranges);
+
+ /*
+ * Add the first slab for this cache
+ */
+ memset((void*)vaddr_first_slab_of_ranges, 0x0, nb_pages*SOS_PAGE_SIZE);
+
+ /* Add the pages for the 1st slab of ranges */
+ slab_of_ranges = (struct sos_kslab*)(vaddr_first_slab_of_ranges
+ + nb_pages*SOS_PAGE_SIZE
+ - sizeof(struct sos_kslab));
+
+ cache_add_slab(cache_of_ranges,
+ vaddr_first_slab_of_ranges,
+ slab_of_ranges);
+
+ return cache_of_ranges;
+}
+
+
+struct sos_kslab_cache *
+sos_kmem_cache_subsystem_setup_prepare(sos_vaddr_t kernel_core_base,
+ sos_vaddr_t kernel_core_top,
+ sos_size_t sizeof_struct_range,
+ /* results */
+ struct sos_kslab **first_struct_slab_of_caches,
+ sos_vaddr_t *first_slab_of_caches_base,
+ sos_count_t *first_slab_of_caches_nb_pages,
+ struct sos_kslab **first_struct_slab_of_ranges,
+ sos_vaddr_t *first_slab_of_ranges_base,
+ sos_count_t *first_slab_of_ranges_nb_pages)
+{
+ int i;
+ sos_ret_t retval;
+ sos_vaddr_t vaddr;
+
+ /* The cache of ranges we are about to allocate */
+ struct sos_kslab_cache *cache_of_ranges;
+
+ /* In the begining, there isn't any cache */
+ kslab_cache_list = NULL;
+ cache_of_struct_kslab = NULL;
+ cache_of_struct_kslab_cache = NULL;
+
+ /*
+ * Create the cache of caches, initialised with 1 allocated slab
+ */
+
+ /* Allocate the pages needed for the 1st slab of caches, and map them
+ in kernel space, right after the kernel */
+ *first_slab_of_caches_base = SOS_PAGE_ALIGN_SUP(kernel_core_top);
+ for (i = 0, vaddr = *first_slab_of_caches_base ;
+ i < NB_PAGES_IN_SLAB_OF_CACHES ;
+ i++, vaddr += SOS_PAGE_SIZE)
+ {
+ sos_paddr_t ppage_paddr;
+
+ ppage_paddr
+ = sos_physmem_ref_physpage_new(FALSE);
+ SOS_ASSERT_FATAL(ppage_paddr != (sos_paddr_t)NULL);
+
+ retval = sos_paging_map(ppage_paddr, vaddr,
+ FALSE,
+ SOS_VM_MAP_ATOMIC
+ | SOS_VM_MAP_PROT_READ
+ | SOS_VM_MAP_PROT_WRITE);
+ SOS_ASSERT_FATAL(retval == SOS_OK);
+
+ retval = sos_physmem_unref_physpage(ppage_paddr);
+ SOS_ASSERT_FATAL(retval == FALSE);
+ }
+
+ /* Create the cache of caches */
+ *first_slab_of_caches_nb_pages = NB_PAGES_IN_SLAB_OF_CACHES;
+ cache_of_struct_kslab_cache
+ = create_cache_of_caches(*first_slab_of_caches_base,
+ NB_PAGES_IN_SLAB_OF_CACHES);
+ SOS_ASSERT_FATAL(cache_of_struct_kslab_cache != NULL);
+
+ /* Retrieve the slab that should have been allocated */
+ *first_struct_slab_of_caches
+ = list_get_head(cache_of_struct_kslab_cache->slab_list);
+
+
+ /*
+ * Create the cache of ranges, initialised with 1 allocated slab
+ */
+ *first_slab_of_ranges_base = vaddr;
+ /* Allocate the 1st slab */
+ for (i = 0, vaddr = *first_slab_of_ranges_base ;
+ i < NB_PAGES_IN_SLAB_OF_RANGES ;
+ i++, vaddr += SOS_PAGE_SIZE)
+ {
+ sos_paddr_t ppage_paddr;
+
+ ppage_paddr
+ = sos_physmem_ref_physpage_new(FALSE);
+ SOS_ASSERT_FATAL(ppage_paddr != (sos_paddr_t)NULL);
+
+ retval = sos_paging_map(ppage_paddr, vaddr,
+ FALSE,
+ SOS_VM_MAP_ATOMIC
+ | SOS_VM_MAP_PROT_READ
+ | SOS_VM_MAP_PROT_WRITE);
+ SOS_ASSERT_FATAL(retval == SOS_OK);
+
+ retval = sos_physmem_unref_physpage(ppage_paddr);
+ SOS_ASSERT_FATAL(retval == FALSE);
+ }
+
+ /* Create the cache of ranges */
+ *first_slab_of_ranges_nb_pages = NB_PAGES_IN_SLAB_OF_RANGES;
+ cache_of_ranges = create_cache_of_ranges(*first_slab_of_ranges_base,
+ sizeof_struct_range,
+ NB_PAGES_IN_SLAB_OF_RANGES);
+ SOS_ASSERT_FATAL(cache_of_ranges != NULL);
+
+ /* Retrieve the slab that should have been allocated */
+ *first_struct_slab_of_ranges
+ = list_get_head(cache_of_ranges->slab_list);
+
+ /*
+ * Create the cache of slabs, without any allocated slab yet
+ */
+ cache_of_struct_kslab
+ = sos_kmem_cache_create("off-slab slab structures",
+ sizeof(struct sos_kslab),
+ 1,
+ 0,
+ SOS_KSLAB_CREATE_MAP);
+ SOS_ASSERT_FATAL(cache_of_struct_kslab != NULL);
+
+ return cache_of_ranges;
+}
+
+
+sos_ret_t
+sos_kmem_cache_subsystem_setup_commit(struct sos_kslab *first_struct_slab_of_caches,
+ struct sos_kmem_range *first_range_of_caches,
+ struct sos_kslab *first_struct_slab_of_ranges,
+ struct sos_kmem_range *first_range_of_ranges)
+{
+ first_struct_slab_of_caches->range = first_range_of_caches;
+ first_struct_slab_of_ranges->range = first_range_of_ranges;
+ return SOS_OK;
+}
+
+
+struct sos_kslab_cache *
+sos_kmem_cache_create(const char* name,
+ sos_size_t obj_size,
+ sos_count_t pages_per_slab,
+ sos_count_t min_free_objs,
+ sos_ui32_t cache_flags)
+{
+ struct sos_kslab_cache *new_cache;
+
+ /* Allocate the new cache */
+ new_cache = (struct sos_kslab_cache*)
+ sos_kmem_cache_alloc(cache_of_struct_kslab_cache,
+ 0/* NOT ATOMIC */);
+ if (! new_cache)
+ return NULL;
+
+ if (cache_initialize(new_cache, name, obj_size,
+ pages_per_slab, min_free_objs,
+ cache_flags))
+ {
+ /* Something was wrong */
+ sos_kmem_cache_free((sos_vaddr_t)new_cache);
+ return NULL;
+ }
+
+ /* Add the cache to the list of slab caches */
+ list_add_tail(kslab_cache_list, new_cache);
+
+ /* if the min_free_objs is set, pre-allocate a slab */
+ if (min_free_objs)
+ {
+ if (cache_grow(new_cache, 0 /* Not atomic */) != SOS_OK)
+ {
+ sos_kmem_cache_destroy(new_cache);
+ return NULL; /* Not enough memory */
+ }
+ }
+
+ return new_cache;
+}
+
+
+sos_ret_t sos_kmem_cache_destroy(struct sos_kslab_cache *kslab_cache)
+{
+ int nb_slabs;
+ struct sos_kslab *slab;
+
+ if (! kslab_cache)
+ return -SOS_EINVAL;
+
+ /* Refuse to destroy the cache if there are any objects still
+ allocated */
+ list_foreach(kslab_cache->slab_list, slab, nb_slabs)
+ {
+ if (slab->nb_free != kslab_cache->nb_objects_per_slab)
+ return -SOS_EBUSY;
+ }
+
+ /* Remove all the slabs */
+ while ((slab = list_get_head(kslab_cache->slab_list)) != NULL)
+ {
+ cache_release_slab(slab, TRUE);
+ }
+
+ /* Remove the cache */
+ return sos_kmem_cache_free((sos_vaddr_t)kslab_cache);
+}
+
+
+sos_vaddr_t sos_kmem_cache_alloc(struct sos_kslab_cache *kslab_cache,
+ sos_ui32_t alloc_flags)
+{
+ sos_vaddr_t obj_vaddr;
+ struct sos_kslab * slab_head;
+#define ALLOC_RET return
+
+ /* If the slab at the head of the slabs' list has no free object,
+ then the other slabs don't either => need to allocate a new
+ slab */
+ if ((! kslab_cache->slab_list)
+ || (! list_get_head(kslab_cache->slab_list)->free))
+ {
+ if (cache_grow(kslab_cache, alloc_flags) != SOS_OK)
+ /* Not enough memory or blocking alloc */
+ ALLOC_RET( (sos_vaddr_t)NULL);
+ }
+
+ /* Here: we are sure that list_get_head(kslab_cache->slab_list)
+ exists *AND* that list_get_head(kslab_cache->slab_list)->free is
+ NOT NULL */
+ slab_head = list_get_head(kslab_cache->slab_list);
+ SOS_ASSERT_FATAL(slab_head != NULL);
+
+ /* Allocate the object at the head of the slab at the head of the
+ slabs' list */
+ obj_vaddr = (sos_vaddr_t)list_pop_head(slab_head->free);
+ slab_head->nb_free --;
+ kslab_cache->nb_free_objects --;
+
+ /* If needed, reset object's contents */
+ if (kslab_cache->flags & SOS_KSLAB_CREATE_ZERO)
+ memset((void*)obj_vaddr, 0x0, kslab_cache->alloc_obj_size);
+
+ /* Slab is now full ? */
+ if (slab_head->free == NULL)
+ {
+ /* Transfer it at the tail of the slabs' list */
+ struct sos_kslab *slab;
+ slab = list_pop_head(kslab_cache->slab_list);
+ list_add_tail(kslab_cache->slab_list, slab);
+ }
+
+ /*
+ * For caches that require a minimum amount of free objects left,
+ * allocate a slab if needed.
+ *
+ * Notice the "== min_objects - 1": we did not write " <
+ * min_objects" because for the cache of kmem structure, this would
+ * lead to an chicken-and-egg problem, since cache_grow below would
+ * call cache_alloc again for the kmem_vmm cache, so we return here
+ * with the same cache. If the test were " < min_objects", then we
+ * would call cache_grow again for the kmem_vmm cache again and
+ * again... until we reach the bottom of our stack (infinite
+ * recursion). By telling precisely "==", then the cache_grow would
+ * only be called the first time.
+ */
+ if ((kslab_cache->min_free_objects > 0)
+ && (kslab_cache->nb_free_objects == (kslab_cache->min_free_objects - 1)))
+ {
+ /* No: allocate a new slab now */
+ if (cache_grow(kslab_cache, alloc_flags) != SOS_OK)
+ {
+ /* Not enough free memory or blocking alloc => undo the
+ allocation */
+ sos_kmem_cache_free(obj_vaddr);
+ ALLOC_RET( (sos_vaddr_t)NULL);
+ }
+ }
+
+ ALLOC_RET(obj_vaddr);
+}
+
+
+/**
+ * Helper function to free the object located at the given address.
+ *
+ * @param empty_slab is the address of the slab to release, if removing
+ * the object causes the slab to become empty.
+ */
+inline static
+sos_ret_t
+free_object(sos_vaddr_t vaddr,
+ struct sos_kslab ** empty_slab)
+{
+ struct sos_kslab_cache *kslab_cache;
+
+ /* Lookup the slab containing the object in the slabs' list */
+ struct sos_kslab *slab = sos_kmem_vmm_resolve_slab(vaddr);
+
+ /* By default, consider that the slab will not become empty */
+ *empty_slab = NULL;
+
+ /* Did not find the slab */
+ if (! slab)
+ return -SOS_EINVAL;
+
+ SOS_ASSERT_FATAL(slab->cache);
+ kslab_cache = slab->cache;
+
+ /*
+ * Check whether the address really could mark the start of an actual
+ * allocated object
+ */
+ /* Address multiple of an object's size ? */
+ if (( (vaddr - slab->first_object)
+ % kslab_cache->alloc_obj_size) != 0)
+ return -SOS_EINVAL;
+ /* Address not too large ? */
+ if (( (vaddr - slab->first_object)
+ / kslab_cache->alloc_obj_size) >= kslab_cache->nb_objects_per_slab)
+ return -SOS_EINVAL;
+
+ /*
+ * Ok: we now release the object
+ */
+
+ /* Did find a full slab => will not be full any more => move it
+ to the head of the slabs' list */
+ if (! slab->free)
+ {
+ list_delete(kslab_cache->slab_list, slab);
+ list_add_head(kslab_cache->slab_list, slab);
+ }
+
+ /* Release the object */
+ list_add_head(slab->free, (struct sos_kslab_free_object*)vaddr);
+ slab->nb_free++;
+ kslab_cache->nb_free_objects++;
+ SOS_ASSERT_FATAL(slab->nb_free <= slab->cache->nb_objects_per_slab);
+
+ /* Cause the slab to be released if it becomes empty, and if we are
+ allowed to do it */
+ if ((slab->nb_free >= kslab_cache->nb_objects_per_slab)
+ && (kslab_cache->nb_free_objects - slab->nb_free
+ >= kslab_cache->min_free_objects))
+ {
+ *empty_slab = slab;
+ }
+
+ return SOS_OK;
+}
+
+
+sos_ret_t sos_kmem_cache_free(sos_vaddr_t vaddr)
+{
+ sos_ret_t retval;
+ struct sos_kslab *empty_slab;
+
+ /* Remove the object from the slab */
+ retval = free_object(vaddr, & empty_slab);
+ if (retval != SOS_OK)
+ return retval;
+
+ /* Remove the slab and the underlying range if needed */
+ if (empty_slab != NULL)
+ return cache_release_slab(empty_slab, TRUE);
+
+ return SOS_OK;
+}
+
+
+struct sos_kmem_range *
+sos_kmem_cache_release_struct_range(struct sos_kmem_range *the_range)
+{
+ sos_ret_t retval;
+ struct sos_kslab *empty_slab;
+
+ /* Remove the object from the slab */
+ retval = free_object((sos_vaddr_t)the_range, & empty_slab);
+ if (retval != SOS_OK)
+ return NULL;
+
+ /* Remove the slab BUT NOT the underlying range if needed */
+ if (empty_slab != NULL)
+ {
+ struct sos_kmem_range *empty_range = empty_slab->range;
+ SOS_ASSERT_FATAL(cache_release_slab(empty_slab, FALSE) == SOS_OK);
+ SOS_ASSERT_FATAL(empty_range != NULL);
+ return empty_range;
+ }
+
+ return NULL;
+}
+
diff --git a/sos-code-article6.5/sos/kmem_slab.h b/sos-code-article6.5/sos/kmem_slab.h
new file mode 100644
index 0000000..1f28ff9
--- /dev/null
+++ b/sos-code-article6.5/sos/kmem_slab.h
@@ -0,0 +1,206 @@
+/* Copyright (C) 2000 Thomas Petazzoni
+ Copyright (C) 2004 David Decotigny
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#ifndef _SOS_KMEM_SLAB_H_
+#define _SOS_KMEM_SLAB_H_
+
+/**
+ * @file kmem_slab.h
+ *
+ * Kernel Memory Allocator based on Bonwick's slab llocator (Solaris
+ * 2.4, Linux 2.4). This allocator achieves good memory utilization
+ * ratio (memory effectively used / memory requested) ie limited
+ * fragmentation, while elegantly handling cache-effect considerations
+ * (TLB locality through the notion of "cache" of slabs, and the
+ * dcache utilization through the notion of cache colouring to
+ * decrease the conflicts in the dcache for accesses to different data
+ * in the same cache).
+ *
+ * This allocator relies on the range allocator (kmem_vmm.h) to
+ * allocate the slabs, which itself relies on the slab allocator to
+ * allocate its "range" data structures, thus leading to a
+ * chicken-and-egg problem. We solve this problem by introducing the
+ * notion of "min_free_objs" for the slab caches, in order for the cache
+ * of ranges to always have enough ranges in reserve to complete the
+ * range allocation before being urged to allocate a new slab of
+ * ranges, which would require the allocation of a new range.
+ *
+ * Compared to Bonwick's recommendations, we don't handle ctor/dtor
+ * routines on the objects, so that we can alter the objects once they
+ * are set free. Thus, the list of free object is stored in the free
+ * objects themselves, not alongside the objects (this also implies that
+ * the SOS_KSLAB_CREATE_MAP flag below is meaningless). We also don't
+ * implement the cache colouring (trivial to add, but we omit it for
+ * readability reasons), and the only alignment constraint we respect
+ * is that allocated objects are aligned on a 4B boundary: for other
+ * alignment constraints, the user must integrate them in the
+ * "object_size" parameter to "sos_kmem_cache_create()".
+ *
+ * References :
+ * - J. Bonwick's paper, "The slab allocator: An object-caching kernel
+ * memory allocator", In USENIX Summer 1994 Technical Conference
+ * - The bible, aka "Unix internals : the new frontiers" (section
+ * 12.10), Uresh Vahalia, Prentice Hall 1996, ISBN 0131019082
+ * - "The Linux slab allocator", B. Fitzgibbons,
+ * http://www.cc.gatech.edu/people/home/bradf/cs7001/proj2/
+ * - The Kos, http://kos.enix.org/
+ */
+#include <sos/types.h>
+#include <sos/errno.h>
+
+/** Opaque data structure that defines a Cache of slabs */
+struct sos_kslab_cache;
+
+/** Opaque data structure that defines a slab. Exported only to
+ kmem_vmm.h */
+struct sos_kslab;
+
+#include "kmem_vmm.h"
+
+
+/** The maximum allowed pages for each slab */
+#define MAX_PAGES_PER_SLAB 32 /* 128 kB */
+
+
+/**
+ * Initialize the slab cache of slab caches, and prepare the cache of
+ * kmem_range for kmem_vmm.
+ *
+ * @param kernel_core_base The virtual address of the first byte used
+ * by the kernel code/data
+ *
+ * @param kernel_core_top The virtual address of the first byte after
+ * the kernel code/data.
+ *
+ * @param sizeof_struct_range the size of the objects (aka "struct
+ * sos_kmem_vmm_ranges") to be allocated in the cache of ranges
+ *
+ * @param first_struct_slab_of_caches (output value) the virtual
+ * address of the first slab structure that gets allocated for the
+ * cache of caches. The function actually manually allocate the first
+ * slab of the cache of caches because of a chicken-and-egg thing. The
+ * address of the slab is used by the kmem_vmm_setup routine to
+ * finalize the allocation of the slab, in order for it to behave like
+ * a real slab afterwards.
+ *
+ * @param first_slab_of_caches_base (output value) the virtual address
+ * of the slab associated to the slab structure.
+ *
+ * @param first_slab_of_caches_nb_pages (output value) the number of
+ * (virtual) pages used by the first slab of the cache of caches.
+ *
+ * @param first_struct_slab_of_ranges (output value) the virtual address
+ * of the first slab that gets allocated for the cache of ranges. Same
+ * explanation as above.
+ *
+ * @param first_slab_of_ranges_base (output value) the virtual address
+ * of the slab associated to the slab structure.
+ *
+ * @param first_slab_of_ranges_nb_pages (output value) the number of
+ * (virtual) pages used by the first slab of the cache of ranges.
+ *
+ * @return the cache of kmem_range immediatly usable
+ */
+struct sos_kslab_cache *
+sos_kmem_cache_subsystem_setup_prepare(sos_vaddr_t kernel_core_base,
+ sos_vaddr_t kernel_core_top,
+ sos_size_t sizeof_struct_range,
+ /* results */
+ struct sos_kslab **first_struct_slab_of_caches,
+ sos_vaddr_t *first_slab_of_caches_base,
+ sos_count_t *first_slab_of_caches_nb_pages,
+ struct sos_kslab **first_struct_slab_of_ranges,
+ sos_vaddr_t *first_slab_of_ranges_base,
+ sos_count_t *first_slab_of_ranges_nb_pages);
+
+/**
+ * Update the configuration of the cache subsystem once the vmm
+ * subsystem has been fully initialized
+ */
+sos_ret_t
+sos_kmem_cache_subsystem_setup_commit(struct sos_kslab *first_struct_slab_of_caches,
+ struct sos_kmem_range *first_range_of_caches,
+ struct sos_kslab *first_struct_slab_of_ranges,
+ struct sos_kmem_range *first_range_of_ranges);
+
+
+/*
+ * Flags for sos_kmem_cache_create()
+ */
+/** The slabs should be initially mapped in physical memory */
+#define SOS_KSLAB_CREATE_MAP (1<<0)
+/** The object should always be set to zero at allocation (implies
+ SOS_KSLAB_CREATE_MAP) */
+#define SOS_KSLAB_CREATE_ZERO (1<<1)
+
+/**
+ * @note this function MAY block (involved allocations are not atomic)
+ * @param name must remain valid during the whole cache's life
+ * (shallow copy) !
+ * @param cache_flags An or-ed combination of the SOS_KSLAB_CREATE_* flags
+ */
+struct sos_kslab_cache *
+sos_kmem_cache_create(const char* name,
+ sos_size_t object_size,
+ sos_count_t pages_per_slab,
+ sos_count_t min_free_objects,
+ sos_ui32_t cache_flags);
+
+sos_ret_t sos_kmem_cache_destroy(struct sos_kslab_cache *kslab_cache);
+
+
+/*
+ * Flags for sos_kmem_cache_alloc()
+ */
+/** Allocation should either succeed or fail, without blocking */
+#define SOS_KSLAB_ALLOC_ATOMIC (1<<0)
+
+/**
+ * Allocate an object from the given cache.
+ *
+ * @param alloc_flags An or-ed combination of the SOS_KSLAB_ALLOC_* flags
+ */
+sos_vaddr_t sos_kmem_cache_alloc(struct sos_kslab_cache *kslab_cache,
+ sos_ui32_t alloc_flags);
+
+
+/**
+ * Free an object (assumed to be already allocated and not already
+ * free) at the given virtual address.
+ */
+sos_ret_t sos_kmem_cache_free(sos_vaddr_t vaddr);
+
+
+/*
+ * Function reserved to kmem_vmm.c. Does almost everything
+ * sos_kmem_cache_free() does, except it does not call
+ * sos_kmem_vmm_del_range() if it needs to. This is aimed at avoiding
+ * large recursion when a range is freed with
+ * sos_kmem_vmm_del_range().
+ *
+ * @param the_range The range structure to free
+ *
+ * @return NULL when the range containing 'the_range' still contains
+ * other ranges, or the address of the range which owned 'the_range'
+ * if it becomes empty.
+ */
+struct sos_kmem_range *
+sos_kmem_cache_release_struct_range(struct sos_kmem_range *the_range);
+
+
+#endif /* _SOS_KMEM_SLAB_H_ */
diff --git a/sos-code-article6.5/sos/kmem_vmm.c b/sos-code-article6.5/sos/kmem_vmm.c
new file mode 100644
index 0000000..ea2fdf1
--- /dev/null
+++ b/sos-code-article6.5/sos/kmem_vmm.c
@@ -0,0 +1,606 @@
+/* Copyright (C) 2000 Thomas Petazzoni
+ Copyright (C) 2004 David Decotigny
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+
+#include <sos/list.h>
+#include <sos/physmem.h>
+#include <hwcore/paging.h>
+#include <sos/assert.h>
+
+#include "kmem_vmm.h"
+
+/** The structure of a range of kernel-space virtual addresses */
+struct sos_kmem_range
+{
+ sos_vaddr_t base_vaddr;
+ sos_count_t nb_pages;
+
+ /* The slab owning this range, or NULL */
+ struct sos_kslab *slab;
+
+ struct sos_kmem_range *prev, *next;
+};
+const int sizeof_struct_sos_kmem_range = sizeof(struct sos_kmem_range);
+
+/** The ranges are SORTED in (strictly) ascending base addresses */
+static struct sos_kmem_range *kmem_free_range_list, *kmem_used_range_list;
+
+/** The slab cache for the kmem ranges */
+static struct sos_kslab_cache *kmem_range_cache;
+
+
+
+/** Helper function to get the closest preceding or containing
+ range for the given virtual address */
+static struct sos_kmem_range *
+get_closest_preceding_kmem_range(struct sos_kmem_range *the_list,
+ sos_vaddr_t vaddr)
+{
+ int nb_elements;
+ struct sos_kmem_range *a_range, *ret_range;
+
+ /* kmem_range list is kept SORTED, so we exit as soon as vaddr >= a
+ range base address */
+ ret_range = NULL;
+ list_foreach(the_list, a_range, nb_elements)
+ {
+ if (vaddr < a_range->base_vaddr)
+ return ret_range;
+ ret_range = a_range;
+ }
+
+ /* This will always be the LAST range in the kmem area */
+ return ret_range;
+}
+
+
+/**
+ * Helper function to lookup a free range large enough to hold nb_pages
+ * pages (first fit)
+ */
+static struct sos_kmem_range *find_suitable_free_range(sos_count_t nb_pages)
+{
+ int nb_elements;
+ struct sos_kmem_range *r;
+
+ list_foreach(kmem_free_range_list, r, nb_elements)
+ {
+ if (r->nb_pages >= nb_pages)
+ return r;
+ }
+
+ return NULL;
+}
+
+
+/**
+ * Helper function to add a_range in the_list, in strictly ascending order.
+ *
+ * @return The (possibly) new head of the_list
+ */
+static struct sos_kmem_range *insert_range(struct sos_kmem_range *the_list,
+ struct sos_kmem_range *a_range)
+{
+ struct sos_kmem_range *prec_used;
+
+ /** Look for any preceding range */
+ prec_used = get_closest_preceding_kmem_range(the_list,
+ a_range->base_vaddr);
+ /** insert a_range /after/ this prec_used */
+ if (prec_used != NULL)
+ list_insert_after(the_list, prec_used, a_range);
+ else /* Insert at the beginning of the list */
+ list_add_head(the_list, a_range);
+
+ return the_list;
+}
+
+
+/**
+ * Helper function to retrieve the range owning the given vaddr, by
+ * scanning the physical memory first if vaddr is mapped in RAM
+ */
+static struct sos_kmem_range *lookup_range(sos_vaddr_t vaddr)
+{
+ struct sos_kmem_range *range;
+
+ /* First: try to retrieve the physical page mapped at this address */
+ sos_paddr_t ppage_paddr = SOS_PAGE_ALIGN_INF(sos_paging_get_paddr(vaddr));
+
+ if (ppage_paddr)
+ {
+ range = sos_physmem_get_kmem_range(ppage_paddr);
+
+ /* If a page is mapped at this address, it is EXPECTED that it
+ is really associated with a range */
+ SOS_ASSERT_FATAL(range != NULL);
+ }
+
+ /* Otherwise scan the list of used ranges, looking for the range
+ owning the address */
+ else
+ {
+ range = get_closest_preceding_kmem_range(kmem_used_range_list,
+ vaddr);
+ /* Not found */
+ if (! range)
+ return NULL;
+
+ /* vaddr not covered by this range */
+ if ( (vaddr < range->base_vaddr)
+ || (vaddr >= (range->base_vaddr + range->nb_pages*SOS_PAGE_SIZE)) )
+ return NULL;
+ }
+
+ return range;
+}
+
+
+/**
+ * Helper function for sos_kmem_vmm_setup() to initialize a new range
+ * that maps a given area as free or as already used.
+ * This function either succeeds or halts the whole system.
+ */
+static struct sos_kmem_range *
+create_range(sos_bool_t is_free,
+ sos_vaddr_t base_vaddr,
+ sos_vaddr_t top_vaddr,
+ struct sos_kslab *associated_slab)
+{
+ struct sos_kmem_range *range;
+
+ SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(base_vaddr));
+ SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(top_vaddr));
+
+ if ((top_vaddr - base_vaddr) < SOS_PAGE_SIZE)
+ return NULL;
+
+ range = (struct sos_kmem_range*)sos_kmem_cache_alloc(kmem_range_cache,
+ SOS_KSLAB_ALLOC_ATOMIC);
+ SOS_ASSERT_FATAL(range != NULL);
+
+ range->base_vaddr = base_vaddr;
+ range->nb_pages = (top_vaddr - base_vaddr) / SOS_PAGE_SIZE;
+
+ if (is_free)
+ {
+ list_add_tail(kmem_free_range_list,
+ range);
+ }
+ else
+ {
+ sos_vaddr_t vaddr;
+ range->slab = associated_slab;
+ list_add_tail(kmem_used_range_list,
+ range);
+
+ /* Ok, set the range owner for the pages in this page */
+ for (vaddr = base_vaddr ;
+ vaddr < top_vaddr ;
+ vaddr += SOS_PAGE_SIZE)
+ {
+ sos_paddr_t ppage_paddr = sos_paging_get_paddr(vaddr);
+ SOS_ASSERT_FATAL((void*)ppage_paddr != NULL);
+ sos_physmem_set_kmem_range(ppage_paddr, range);
+ }
+ }
+
+ return range;
+}
+
+
+sos_ret_t
+sos_kmem_vmm_subsystem_setup(sos_vaddr_t kernel_core_base,
+ sos_vaddr_t kernel_core_top,
+ sos_vaddr_t bootstrap_stack_bottom_vaddr,
+ sos_vaddr_t bootstrap_stack_top_vaddr)
+{
+ struct sos_kslab *first_struct_slab_of_caches,
+ *first_struct_slab_of_ranges;
+ sos_vaddr_t first_slab_of_caches_base,
+ first_slab_of_caches_nb_pages,
+ first_slab_of_ranges_base,
+ first_slab_of_ranges_nb_pages;
+ struct sos_kmem_range *first_range_of_caches,
+ *first_range_of_ranges;
+
+ list_init(kmem_free_range_list);
+ list_init(kmem_used_range_list);
+
+ kmem_range_cache
+ = sos_kmem_cache_subsystem_setup_prepare(kernel_core_base,
+ kernel_core_top,
+ sizeof(struct sos_kmem_range),
+ & first_struct_slab_of_caches,
+ & first_slab_of_caches_base,
+ & first_slab_of_caches_nb_pages,
+ & first_struct_slab_of_ranges,
+ & first_slab_of_ranges_base,
+ & first_slab_of_ranges_nb_pages);
+ SOS_ASSERT_FATAL(kmem_range_cache != NULL);
+
+ /* Mark virtual addresses 16kB - Video as FREE */
+ create_range(TRUE,
+ SOS_KMEM_VMM_BASE,
+ SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO_START),
+ NULL);
+
+ /* Mark virtual addresses in Video hardware mapping as NOT FREE */
+ create_range(FALSE,
+ SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO_START),
+ SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO_END),
+ NULL);
+
+ /* Mark virtual addresses Video - Kernel as FREE */
+ create_range(TRUE,
+ SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO_END),
+ SOS_PAGE_ALIGN_INF(kernel_core_base),
+ NULL);
+
+ /* Mark virtual addresses in Kernel code/data up to the bootstrap stack
+ as NOT FREE */
+ create_range(FALSE,
+ SOS_PAGE_ALIGN_INF(kernel_core_base),
+ bootstrap_stack_bottom_vaddr,
+ NULL);
+
+ /* Mark virtual addresses in the bootstrap stack as NOT FREE too,
+ but in another vmm region in order to be un-allocated later */
+ create_range(FALSE,
+ bootstrap_stack_bottom_vaddr,
+ bootstrap_stack_top_vaddr,
+ NULL);
+
+ /* Mark the remaining virtual addresses in Kernel code/data after
+ the bootstrap stack as NOT FREE */
+ create_range(FALSE,
+ bootstrap_stack_top_vaddr,
+ SOS_PAGE_ALIGN_SUP(kernel_core_top),
+ NULL);
+
+ /* Mark virtual addresses in the first slab of the cache of caches
+ as NOT FREE */
+ SOS_ASSERT_FATAL(SOS_PAGE_ALIGN_SUP(kernel_core_top)
+ == first_slab_of_caches_base);
+ SOS_ASSERT_FATAL(first_struct_slab_of_caches != NULL);
+ first_range_of_caches
+ = create_range(FALSE,
+ first_slab_of_caches_base,
+ first_slab_of_caches_base
+ + first_slab_of_caches_nb_pages*SOS_PAGE_SIZE,
+ first_struct_slab_of_caches);
+
+ /* Mark virtual addresses in the first slab of the cache of ranges
+ as NOT FREE */
+ SOS_ASSERT_FATAL((first_slab_of_caches_base
+ + first_slab_of_caches_nb_pages*SOS_PAGE_SIZE)
+ == first_slab_of_ranges_base);
+ SOS_ASSERT_FATAL(first_struct_slab_of_ranges != NULL);
+ first_range_of_ranges
+ = create_range(FALSE,
+ first_slab_of_ranges_base,
+ first_slab_of_ranges_base
+ + first_slab_of_ranges_nb_pages*SOS_PAGE_SIZE,
+ first_struct_slab_of_ranges);
+
+ /* Mark virtual addresses after these slabs as FREE */
+ create_range(TRUE,
+ first_slab_of_ranges_base
+ + first_slab_of_ranges_nb_pages*SOS_PAGE_SIZE,
+ SOS_KMEM_VMM_TOP,
+ NULL);
+
+ /* Update the cache subsystem so that the artificially-created
+ caches of caches and ranges really behave like *normal* caches (ie
+ those allocated by the normal slab API) */
+ sos_kmem_cache_subsystem_setup_commit(first_struct_slab_of_caches,
+ first_range_of_caches,
+ first_struct_slab_of_ranges,
+ first_range_of_ranges);
+
+ return SOS_OK;
+}
+
+
+/**
+ * Allocate a new kernel area spanning one or multiple pages.
+ *
+ * @eturn a new range structure
+ */
+struct sos_kmem_range *sos_kmem_vmm_new_range(sos_count_t nb_pages,
+ sos_ui32_t flags,
+ sos_vaddr_t * range_start)
+{
+ struct sos_kmem_range *free_range, *new_range;
+
+ if (nb_pages <= 0)
+ return NULL;
+
+ /* Find a suitable free range to hold the size-sized object */
+ free_range = find_suitable_free_range(nb_pages);
+ if (free_range == NULL)
+ return NULL;
+
+ /* If range has exactly the requested size, just move it to the
+ "used" list */
+ if(free_range->nb_pages == nb_pages)
+ {
+ list_delete(kmem_free_range_list, free_range);
+ kmem_used_range_list = insert_range(kmem_used_range_list,
+ free_range);
+ /* The new_range is exactly the free_range */
+ new_range = free_range;
+ }
+
+ /* Otherwise the range is bigger than the requested size, split it.
+ This involves reducing its size, and allocate a new range, which
+ is going to be added to the "used" list */
+ else
+ {
+ /* free_range split in { new_range | free_range } */
+ new_range = (struct sos_kmem_range*)
+ sos_kmem_cache_alloc(kmem_range_cache,
+ (flags & SOS_KMEM_VMM_ATOMIC)?
+ SOS_KSLAB_ALLOC_ATOMIC:0);
+ if (! new_range)
+ return NULL;
+
+ new_range->base_vaddr = free_range->base_vaddr;
+ new_range->nb_pages = nb_pages;
+ free_range->base_vaddr += nb_pages*SOS_PAGE_SIZE;
+ free_range->nb_pages -= nb_pages;
+
+ /* free_range is still at the same place in the list */
+ /* insert new_range in the used list */
+ kmem_used_range_list = insert_range(kmem_used_range_list,
+ new_range);
+ }
+
+ /* By default, the range is not associated with any slab */
+ new_range->slab = NULL;
+
+ /* If mapping of physical pages is needed, map them now */
+ if (flags & SOS_KMEM_VMM_MAP)
+ {
+ int i;
+ for (i = 0 ; i < nb_pages ; i ++)
+ {
+ /* Get a new physical page */
+ sos_paddr_t ppage_paddr
+ = sos_physmem_ref_physpage_new(! (flags & SOS_KMEM_VMM_ATOMIC));
+
+ /* Map the page in kernel space */
+ if (ppage_paddr)
+ {
+ if (sos_paging_map(ppage_paddr,
+ new_range->base_vaddr
+ + i * SOS_PAGE_SIZE,
+ FALSE /* Not a user page */,
+ ((flags & SOS_KMEM_VMM_ATOMIC)?
+ SOS_VM_MAP_ATOMIC:0)
+ | SOS_VM_MAP_PROT_READ
+ | SOS_VM_MAP_PROT_WRITE))
+ {
+ /* Failed => force unallocation, see below */
+ sos_physmem_unref_physpage(ppage_paddr);
+ ppage_paddr = (sos_paddr_t)NULL;
+ }
+ else
+ {
+ /* Success : page can be unreferenced since it is
+ now mapped */
+ sos_physmem_unref_physpage(ppage_paddr);
+ }
+ }
+
+ /* Undo the allocation if failed to allocate or map a new page */
+ if (! ppage_paddr)
+ {
+ sos_kmem_vmm_del_range(new_range);
+ return NULL;
+ }
+
+ /* Ok, set the range owner for this page */
+ sos_physmem_set_kmem_range(ppage_paddr, new_range);
+ }
+ }
+ /* ... Otherwise: Demand Paging will do the job */
+
+ if (range_start)
+ *range_start = new_range->base_vaddr;
+
+ return new_range;
+}
+
+
+sos_ret_t sos_kmem_vmm_del_range(struct sos_kmem_range *range)
+{
+ int i;
+ struct sos_kmem_range *ranges_to_free;
+ list_init(ranges_to_free);
+
+ SOS_ASSERT_FATAL(range != NULL);
+ SOS_ASSERT_FATAL(range->slab == NULL);
+
+ /* Remove the range from the 'USED' list now */
+ list_delete(kmem_used_range_list, range);
+
+ /*
+ * The following do..while() loop is here to avoid an indirect
+ * recursion: if we call directly kmem_cache_free() from inside the
+ * current function, we take the risk to re-enter the current function
+ * (sos_kmem_vmm_del_range()) again, which may cause problem if it
+ * in turn calls kmem_slab again and sos_kmem_vmm_del_range again,
+ * and again and again. This may happen while freeing ranges of
+ * struct sos_kslab...
+ *
+ * To avoid this,we choose to call a special function of kmem_slab
+ * doing almost the same as sos_kmem_cache_free(), but which does
+ * NOT call us (ie sos_kmem_vmm_del_range()): instead WE add the
+ * range that is to be freed to a list, and the do..while() loop is
+ * here to process this list ! The recursion is replaced by
+ * classical iterations.
+ */
+ do
+ {
+ /* Ok, we got the range. Now, insert this range in the free list */
+ kmem_free_range_list = insert_range(kmem_free_range_list, range);
+
+ /* Unmap the physical pages */
+ for (i = 0 ; i < range->nb_pages ; i ++)
+ {
+ /* This will work even if no page is mapped at this address */
+ sos_paging_unmap(range->base_vaddr + i*SOS_PAGE_SIZE);
+ }
+
+ /* Eventually coalesce it with prev/next free ranges (there is
+ always a valid prev/next link since the list is circular). Note:
+ the tests below will lead to correct behaviour even if the list
+ is limited to the 'range' singleton, at least as long as the
+ range is not zero-sized */
+ /* Merge with preceding one ? */
+ if (range->prev->base_vaddr + range->prev->nb_pages*SOS_PAGE_SIZE
+ == range->base_vaddr)
+ {
+ struct sos_kmem_range *empty_range_of_ranges = NULL;
+ struct sos_kmem_range *prec_free = range->prev;
+
+ /* Merge them */
+ prec_free->nb_pages += range->nb_pages;
+ list_delete(kmem_free_range_list, range);
+
+ /* Mark the range as free. This may cause the slab owning
+ the range to become empty */
+ empty_range_of_ranges =
+ sos_kmem_cache_release_struct_range(range);
+
+ /* If this causes the slab owning the range to become empty,
+ add the range corresponding to the slab at the end of the
+ list of the ranges to be freed: it will be actually freed
+ in one of the next iterations of the do{} loop. */
+ if (empty_range_of_ranges != NULL)
+ {
+ list_delete(kmem_used_range_list, empty_range_of_ranges);
+ list_add_tail(ranges_to_free, empty_range_of_ranges);
+ }
+
+ /* Set range to the beginning of this coelescion */
+ range = prec_free;
+ }
+
+ /* Merge with next one ? [NO 'else' since range may be the result of
+ the merge above] */
+ if (range->base_vaddr + range->nb_pages*SOS_PAGE_SIZE
+ == range->next->base_vaddr)
+ {
+ struct sos_kmem_range *empty_range_of_ranges = NULL;
+ struct sos_kmem_range *next_range = range->next;
+
+ /* Merge them */
+ range->nb_pages += next_range->nb_pages;
+ list_delete(kmem_free_range_list, next_range);
+
+ /* Mark the next_range as free. This may cause the slab
+ owning the next_range to become empty */
+ empty_range_of_ranges =
+ sos_kmem_cache_release_struct_range(next_range);
+
+ /* If this causes the slab owning the next_range to become
+ empty, add the range corresponding to the slab at the end
+ of the list of the ranges to be freed: it will be
+ actually freed in one of the next iterations of the
+ do{} loop. */
+ if (empty_range_of_ranges != NULL)
+ {
+ list_delete(kmem_used_range_list, empty_range_of_ranges);
+ list_add_tail(ranges_to_free, empty_range_of_ranges);
+ }
+ }
+
+
+ /* If deleting the range(s) caused one or more range(s) to be
+ freed, get the next one to free */
+ if (list_is_empty(ranges_to_free))
+ range = NULL; /* No range left to free */
+ else
+ range = list_pop_head(ranges_to_free);
+
+ }
+ /* Stop when there is no range left to be freed for now */
+ while (range != NULL);
+
+ return SOS_OK;
+}
+
+
+sos_vaddr_t sos_kmem_vmm_alloc(sos_count_t nb_pages,
+ sos_ui32_t flags)
+{
+ struct sos_kmem_range *range
+ = sos_kmem_vmm_new_range(nb_pages,
+ flags,
+ NULL);
+ if (! range)
+ return (sos_vaddr_t)NULL;
+
+ return range->base_vaddr;
+}
+
+
+sos_ret_t sos_kmem_vmm_free(sos_vaddr_t vaddr)
+{
+ struct sos_kmem_range *range = lookup_range(vaddr);
+
+ /* We expect that the given address is the base address of the
+ range */
+ if (!range || (range->base_vaddr != vaddr))
+ return -SOS_EINVAL;
+
+ /* We expect that this range is not held by any cache */
+ if (range->slab != NULL)
+ return -SOS_EBUSY;
+
+ return sos_kmem_vmm_del_range(range);
+}
+
+
+sos_ret_t sos_kmem_vmm_set_slab(struct sos_kmem_range *range,
+ struct sos_kslab *slab)
+{
+ if (! range)
+ return -SOS_EINVAL;
+
+ range->slab = slab;
+ return SOS_OK;
+}
+
+struct sos_kslab * sos_kmem_vmm_resolve_slab(sos_vaddr_t vaddr)
+{
+ struct sos_kmem_range *range = lookup_range(vaddr);
+ if (! range)
+ return NULL;
+
+ return range->slab;
+}
+
+
+sos_bool_t sos_kmem_vmm_is_valid_vaddr(sos_vaddr_t vaddr)
+{
+ struct sos_kmem_range *range = lookup_range(vaddr);
+ return (range != NULL);
+}
diff --git a/sos-code-article6.5/sos/kmem_vmm.h b/sos-code-article6.5/sos/kmem_vmm.h
new file mode 100644
index 0000000..49b262d
--- /dev/null
+++ b/sos-code-article6.5/sos/kmem_vmm.h
@@ -0,0 +1,113 @@
+/* Copyright (C) 2000 Thomas Petazzoni
+ Copyright (C) 2004 David Decotigny
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#ifndef _SOS_KMEM_VMM_H_
+#define _SOS_KMEM_VMM_H_
+
+/**
+ * @file kmem_vmm.h
+ *
+ * Kernel Memory Allocator for multiple-page-sized objects residing in
+ * the kernel (virtual memory) space. Relies on the slab cache
+ * allocator to allocate its (internal) "range" data structure.
+ */
+
+#include <hwcore/paging.h>
+
+/* The base and top virtual addresses covered by the kernel allocator */
+#define SOS_KMEM_VMM_BASE 0x4000 /* 16kB */
+#define SOS_KMEM_VMM_TOP SOS_PAGING_MIRROR_VADDR /* 1GB - 4MB */
+
+/** Opaque structure used internally and declared here for physmem.h */
+struct sos_kmem_range;
+
+#include <sos/kmem_slab.h>
+
+/**
+ * Mark the areas belonging to SOS_KMEM_VMM_BASE and SOS_KMEM_VMM_TOP
+ * are either used or free. Those that are already mapped are marked
+ * as "used", and the 0..SOS_KMEM_VMM_BASE virtual addresses as marked
+ * as "used" too (to detect incorrect pointer dereferences).
+ */
+sos_ret_t
+sos_kmem_vmm_subsystem_setup(sos_vaddr_t kernel_core_base_vaddr,
+ sos_vaddr_t kernel_core_top_vaddr,
+ sos_vaddr_t bootstrap_stack_bottom_vaddr,
+ sos_vaddr_t bootstrap_stack_top_vaddr);
+
+
+/*
+ * Flags for kmem_vmm_new_range and kmem_vmm_alloc
+ */
+/** Physical pages should be immediately mapped */
+#define SOS_KMEM_VMM_MAP (1<<0)
+/** Allocation should either success or fail, without blocking */
+#define SOS_KMEM_VMM_ATOMIC (1<<1)
+
+/**
+ * Allocate a new kernel area spanning one or multiple pages.
+ *
+ * @param range_base_vaddr If not NULL, the start address of the range
+ * is stored in this location
+ * @eturn a new range structure
+ */
+struct sos_kmem_range *sos_kmem_vmm_new_range(sos_size_t nb_pages,
+ sos_ui32_t flags,
+ sos_vaddr_t *range_base_vaddr);
+sos_ret_t sos_kmem_vmm_del_range(struct sos_kmem_range *range);
+
+
+/**
+ * Straighforward variant of sos_kmem_vmm_new_range() returning the
+ * range's start address instead of the range structure
+ */
+sos_vaddr_t sos_kmem_vmm_alloc(sos_size_t nb_pages,
+ sos_ui32_t flags);
+
+/**
+ * @note you are perfectly allowed to give the address of the
+ * kernel image, or the address of the bios area here, it will work:
+ * the kernel/bios WILL be "deallocated". But if you really want to do
+ * this, well..., do expect some "surprises" ;)
+ */
+sos_ret_t sos_kmem_vmm_free(sos_vaddr_t vaddr);
+
+
+/**
+ * @return TRUE when vaddr is covered by any (used) kernel range
+ */
+sos_bool_t sos_kmem_vmm_is_valid_vaddr(sos_vaddr_t vaddr);
+
+
+/* *****************************
+ * Reserved to kmem_slab.c ONLY.
+ */
+/**
+ * Associate the range with the given slab.
+ */
+sos_ret_t sos_kmem_vmm_set_slab(struct sos_kmem_range *range,
+ struct sos_kslab *slab);
+
+/**
+ * Retrieve the (used) slab associated with the range covering vaddr.
+ *
+ * @return NULL if the range is not associated with a KMEM range
+ */
+struct sos_kslab *sos_kmem_vmm_resolve_slab(sos_vaddr_t vaddr);
+
+#endif /* _SOS_KMEM_VMM_H_ */
diff --git a/sos-code-article6.5/sos/ksynch.c b/sos-code-article6.5/sos/ksynch.c
new file mode 100644
index 0000000..799ecc0
--- /dev/null
+++ b/sos-code-article6.5/sos/ksynch.c
@@ -0,0 +1,233 @@
+/* Copyright (C) 2004 David Decotigny
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+
+
+#include <hwcore/irq.h>
+
+
+#include "ksynch.h"
+
+
+sos_ret_t sos_ksema_init(struct sos_ksema *sema, const char *name,
+ int initial_value)
+{
+ sema->value = initial_value;
+ return sos_kwaitq_init(& sema->kwaitq, name);
+}
+
+
+sos_ret_t sos_ksema_dispose(struct sos_ksema *sema)
+{
+ return sos_kwaitq_dispose(& sema->kwaitq);
+}
+
+
+sos_ret_t sos_ksema_down(struct sos_ksema *sema,
+ struct sos_time *timeout)
+{
+ sos_ui32_t flags;
+ sos_ret_t retval;
+
+ sos_disable_IRQs(flags);
+ retval = SOS_OK;
+
+ sema->value --;
+ if (sema->value < 0)
+ {
+ /* Wait for somebody to wake us */
+ retval = sos_kwaitq_wait(& sema->kwaitq, timeout);
+
+ /* Something wrong happened (timeout, external wakeup, ...) ? */
+ if (SOS_OK != retval)
+ {
+ /* Yes: pretend we did not ask for the semaphore */
+ sema->value ++;
+ }
+ }
+
+ sos_restore_IRQs(flags);
+ return retval;
+}
+
+
+sos_ret_t sos_ksema_trydown(struct sos_ksema *sema)
+{
+ sos_ui32_t flags;
+ sos_ret_t retval;
+
+ sos_disable_IRQs(flags);
+
+ /* Can we take the semaphore without blocking ? */
+ if (sema->value >= 1)
+ {
+ /* Yes: we take it now */
+ sema->value --;
+ retval = SOS_OK;
+ }
+ else
+ {
+ /* No: we signal it */
+ retval = -SOS_EBUSY;
+ }
+
+ sos_restore_IRQs(flags);
+ return retval;
+}
+
+
+sos_ret_t sos_ksema_up(struct sos_ksema *sema)
+{
+ sos_ui32_t flags;
+ sos_ret_t retval;
+
+ sos_disable_IRQs(flags);
+
+ sema->value ++;
+ retval = sos_kwaitq_wakeup(& sema->kwaitq, 1, SOS_OK);
+
+ sos_restore_IRQs(flags);
+ return retval;
+}
+
+
+sos_ret_t sos_kmutex_init(struct sos_kmutex *mutex, const char *name)
+{
+ mutex->owner = NULL;
+ return sos_kwaitq_init(& mutex->kwaitq, name);
+}
+
+
+sos_ret_t sos_kmutex_dispose(struct sos_kmutex *mutex)
+{
+ return sos_kwaitq_dispose(& mutex->kwaitq);
+}
+
+
+/*
+ * Implementation based on ownership transfer (ie no while()
+ * loop). The only assumption is that the thread awoken by
+ * kmutex_unlock is not suppressed before effectively waking up: in
+ * that case the mutex will be forever locked AND unlockable (by
+ * nobody other than the owner, but this is not natural since this
+ * owner already issued an unlock()...). The same problem happens with
+ * the semaphores, but in a less obvious manner.
+ */
+sos_ret_t sos_kmutex_lock(struct sos_kmutex *mutex,
+ struct sos_time *timeout)
+{
+ __label__ exit_kmutex_lock;
+ sos_ui32_t flags;
+ sos_ret_t retval;
+
+ sos_disable_IRQs(flags);
+ retval = SOS_OK;
+
+ /* Mutex already owned ? */
+ if (NULL != mutex->owner)
+ {
+ /* Owned by us or by someone else ? */
+ if (sos_thread_get_current() == mutex->owner)
+ {
+ /* Owned by us: do nothing */
+ retval = -SOS_EBUSY;
+ goto exit_kmutex_lock;
+ }
+
+ /* Wait for somebody to wake us */
+ retval = sos_kwaitq_wait(& mutex->kwaitq, timeout);
+
+ /* Something wrong happened ? */
+ if (SOS_OK != retval)
+ {
+ goto exit_kmutex_lock;
+ }
+ }
+
+ /* Ok, the mutex is available to us: take it */
+ mutex->owner = sos_thread_get_current();
+
+ exit_kmutex_lock:
+ sos_restore_IRQs(flags);
+ return retval;
+}
+
+
+sos_ret_t sos_kmutex_trylock(struct sos_kmutex *mutex)
+{
+ sos_ui32_t flags;
+ sos_ret_t retval;
+
+ sos_disable_IRQs(flags);
+
+ /* Mutex available to us ? */
+ if (NULL == mutex->owner)
+ {
+ /* Great ! Take it now */
+ mutex->owner = sos_thread_get_current();
+
+ retval = SOS_OK;
+ }
+ else
+ {
+ /* No: signal it */
+ retval = -SOS_EBUSY;
+ }
+
+ sos_restore_IRQs(flags);
+ return retval;
+}
+
+
+sos_ret_t sos_kmutex_unlock(struct sos_kmutex *mutex)
+{
+ sos_ui32_t flags;
+ sos_ret_t retval;
+
+ sos_disable_IRQs(flags);
+
+ if (sos_thread_get_current() != mutex->owner)
+ retval = -SOS_EPERM;
+
+ else if (sos_kwaitq_is_empty(& mutex->kwaitq))
+ {
+ /*
+ * There is NOT ANY thread waiting => we really mark the mutex
+ * as FREE
+ */
+ mutex->owner = NULL;
+ retval = SOS_OK;
+ }
+ else
+ {
+ /*
+ * There is at least 1 thread waiting => we DO NOT mark the
+ * mutex as free !
+ * Actually, we should have written:
+ * mutex->owner = thread_that_is_woken_up;
+ * But the real Id of the next thread owning the mutex is not
+ * that important. What is important here is that mutex->owner
+ * IS NOT NULL. Otherwise there will be a possibility for the
+ * thread woken up here to have the mutex stolen by a thread
+ * locking the mutex in the meantime.
+ */
+ retval = sos_kwaitq_wakeup(& mutex->kwaitq, 1, SOS_OK);
+ }
+
+ sos_restore_IRQs(flags);
+ return retval;
+}
diff --git a/sos-code-article6.5/sos/ksynch.h b/sos-code-article6.5/sos/ksynch.h
new file mode 100644
index 0000000..597971d
--- /dev/null
+++ b/sos-code-article6.5/sos/ksynch.h
@@ -0,0 +1,170 @@
+/* Copyright (C) 2004 David Decotigny
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#ifndef _SOS_KSYNCH_H_
+#define _SOS_KSYNCH_H_
+
+
+/**
+ * @file synch.h
+ *
+ * Common kernel synchronisation primitives.
+ */
+
+
+#include <sos/errno.h>
+#include <sos/kwaitq.h>
+
+
+/* ====================================================================
+ * Kernel semaphores, NON-recursive
+ */
+
+
+/**
+ * The structure of a (NON-RECURSIVE) kernel Semaphore
+ */
+struct sos_ksema
+{
+ int value;
+ struct sos_kwaitq kwaitq;
+};
+
+
+/*
+ * Initialize a kernel semaphore structure with the given name
+ *
+ * @param name Name of the semaphore (for debugging purpose only; safe
+ * [deep copied])
+ *
+ * @param initial_value The value of the semaphore before any up/down
+ */
+sos_ret_t sos_ksema_init(struct sos_ksema *sema, const char *name,
+ int initial_value);
+
+
+/*
+ * De-initialize a kernel semaphore
+ *
+ * @return -SOS_EBUSY when semaphore could not be de-initialized
+ * because at least a thread is in the waitq.
+ */
+sos_ret_t sos_ksema_dispose(struct sos_ksema *sema);
+
+
+/*
+ * Enters the semaphore
+ *
+ * @param timeout Maximum time to wait for the semaphore. Or NULL for
+ * "no limit". Updated on return to reflect the time remaining (0 when
+ * timeout has been triggered)
+ *
+ * @return -SOS_EINTR when timeout was triggered or when another waitq
+ * woke us up.
+ *
+ * @note This is a BLOCKING FUNCTION
+ */
+sos_ret_t sos_ksema_down(struct sos_ksema *sema,
+ struct sos_time *timeout);
+
+
+/*
+ * Try to enter the semaphore without blocking.
+ *
+ * @return -SOS_EBUSY when locking the semaphore would block
+ */
+sos_ret_t sos_ksema_trydown(struct sos_ksema *sema);
+
+
+/**
+ * Increments the semaphore's value, eventually waking up a thread
+ */
+sos_ret_t sos_ksema_up(struct sos_ksema *sema);
+
+
+
+/* ====================================================================
+ * Kernel mutex (ie binary semaphore with strong ownership),
+ * NON-recursive !
+ */
+
+
+/**
+ * The structure of a (NON-RECURSIVE) kernel Mutex
+ */
+struct sos_kmutex
+{
+ struct sos_thread *owner;
+ struct sos_kwaitq kwaitq;
+};
+
+
+/*
+ * Initialize a kernel mutex structure with the given name
+ *
+ * @param name Name of the mutex (for debugging purpose only; safe
+ * [deep copied])
+ *
+ * @param initial_value The value of the mutex before any up/down
+ */
+sos_ret_t sos_kmutex_init(struct sos_kmutex *mutex, const char *name);
+
+
+/*
+ * De-initialize a kernel mutex
+ *
+ * @return -SOS_EBUSY when mutex could not be de-initialized
+ * because at least a thread is in the waitq.
+ */
+sos_ret_t sos_kmutex_dispose(struct sos_kmutex *mutex);
+
+
+/*
+ * Lock the mutex. If the same thread multiply locks the same mutex,
+ * it won't hurt (no deadlock, return value = -SOS_EBUSY).
+ *
+ * @param timeout Maximum time to wait for the mutex. Or NULL for "no
+ * limit". Updated on return to reflect the time remaining (0 when
+ * timeout has been triggered)
+ *
+ * @return -SOS_EINTR when timeout was triggered or when another waitq
+ * woke us up, -SOS_EBUSY when the thread already owns the mutex.
+ *
+ * @note This is a BLOCKING FUNCTION
+ */
+sos_ret_t sos_kmutex_lock(struct sos_kmutex *mutex,
+ struct sos_time *timeout);
+
+
+/*
+ * Try to lock the mutex without blocking.
+ *
+ * @return -SOS_EBUSY when locking the mutex would block
+ */
+sos_ret_t sos_kmutex_trylock(struct sos_kmutex *mutex);
+
+
+/**
+ * Unlock the mutex, eventually waking up a thread
+ *
+ * @return -SOS_EPERM when the calling thread is NOT the owner of the
+ * mutex
+ */
+sos_ret_t sos_kmutex_unlock(struct sos_kmutex *mutex);
+
+
+#endif /* _SOS_KSYNCH_H_ */
diff --git a/sos-code-article6.5/sos/kwaitq.c b/sos-code-article6.5/sos/kwaitq.c
new file mode 100644
index 0000000..b4a451e
--- /dev/null
+++ b/sos-code-article6.5/sos/kwaitq.c
@@ -0,0 +1,249 @@
+/* Copyright (C) 2004 David Decotigny
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+
+#include <sos/klibc.h>
+#include <sos/list.h>
+#include <sos/assert.h>
+#include <hwcore/irq.h>
+
+#include "kwaitq.h"
+
+
+sos_ret_t sos_kwaitq_init(struct sos_kwaitq *kwq,
+ const char *name)
+{
+ memset(kwq, 0x0, sizeof(struct sos_kwaitq));
+
+#ifdef SOS_KWQ_DEBUG
+ if (! name)
+ name = "<unknown>";
+ strzcpy(kwq->name, name, SOS_KWQ_DEBUG_MAX_NAMELEN);
+#endif
+ list_init_named(kwq->waiting_list,
+ prev_entry_in_kwaitq, next_entry_in_kwaitq);
+
+ return SOS_OK;
+}
+
+
+sos_ret_t sos_kwaitq_dispose(struct sos_kwaitq *kwq)
+{
+ sos_ui32_t flags;
+ sos_ret_t retval;
+
+ sos_disable_IRQs(flags);
+ if (list_is_empty_named(kwq->waiting_list,
+ prev_entry_in_kwaitq, next_entry_in_kwaitq))
+ retval = SOS_OK;
+ else
+ retval = -SOS_EBUSY;
+
+ sos_restore_IRQs(flags);
+ return retval;
+}
+
+
+sos_bool_t sos_kwaitq_is_empty(const struct sos_kwaitq *kwq)
+{
+ sos_ui32_t flags;
+ sos_ret_t retval;
+
+ sos_disable_IRQs(flags);
+ retval = list_is_empty_named(kwq->waiting_list,
+ prev_entry_in_kwaitq, next_entry_in_kwaitq);
+
+ sos_restore_IRQs(flags);
+ return retval;
+}
+
+
+sos_ret_t sos_kwaitq_init_entry(struct sos_kwaitq_entry *kwq_entry)
+{
+ memset(kwq_entry, 0x0, sizeof(struct sos_kwaitq_entry));
+ kwq_entry->thread = sos_thread_get_current();
+ return SOS_OK;
+}
+
+
+/** Internal helper function equivalent to sos_kwaitq_add_entry(), but
+ without interrupt protection scheme, and explicit priority
+ ordering */
+inline static sos_ret_t _kwaitq_add_entry(struct sos_kwaitq *kwq,
+ struct sos_kwaitq_entry *kwq_entry)
+{
+ /* This entry is already added in the kwaitq ! */
+ SOS_ASSERT_FATAL(NULL == kwq_entry->kwaitq);
+
+ /* sos_kwaitq_init_entry() has not been called ?! */
+ SOS_ASSERT_FATAL(NULL != kwq_entry->thread);
+
+ /* (Re-)Initialize wakeup status of the entry */
+ kwq_entry->wakeup_triggered = FALSE;
+ kwq_entry->wakeup_status = SOS_OK;
+
+ /* Add the thread in the list */
+ list_add_tail_named(kwq->waiting_list, kwq_entry,
+ prev_entry_in_kwaitq, next_entry_in_kwaitq);
+
+ /* Update the list of waitqueues for the thread */
+ list_add_tail_named(kwq_entry->thread->kwaitq_list, kwq_entry,
+ prev_entry_for_thread, next_entry_for_thread);
+
+ kwq_entry->kwaitq = kwq;
+
+ return SOS_OK;
+}
+
+
+sos_ret_t sos_kwaitq_add_entry(struct sos_kwaitq *kwq,
+ struct sos_kwaitq_entry *kwq_entry)
+{
+ sos_ui32_t flags;
+ sos_ret_t retval;
+
+ sos_disable_IRQs(flags);
+ retval = _kwaitq_add_entry(kwq, kwq_entry);
+ sos_restore_IRQs(flags);
+
+ return retval;
+}
+
+
+/** Internal helper function equivalent to sos_kwaitq_remove_entry(),
+ but without interrupt protection scheme */
+inline static sos_ret_t
+_kwaitq_remove_entry(struct sos_kwaitq *kwq,
+ struct sos_kwaitq_entry *kwq_entry)
+{
+ SOS_ASSERT_FATAL(kwq_entry->kwaitq == kwq);
+
+ list_delete_named(kwq->waiting_list, kwq_entry,
+ prev_entry_in_kwaitq, next_entry_in_kwaitq);
+
+ list_delete_named(kwq_entry->thread->kwaitq_list, kwq_entry,
+ prev_entry_for_thread, next_entry_for_thread);
+
+ kwq_entry->kwaitq = NULL;
+ return SOS_OK;
+}
+
+
+sos_ret_t sos_kwaitq_remove_entry(struct sos_kwaitq *kwq,
+ struct sos_kwaitq_entry *kwq_entry)
+{
+ sos_ui32_t flags;
+ sos_ret_t retval;
+
+ sos_disable_IRQs(flags);
+ retval = _kwaitq_remove_entry(kwq, kwq_entry);
+ sos_restore_IRQs(flags);
+
+ return retval;
+}
+
+
+sos_ret_t sos_kwaitq_wait(struct sos_kwaitq *kwq,
+ struct sos_time *timeout)
+{
+ sos_ui32_t flags;
+ sos_ret_t retval;
+ struct sos_kwaitq_entry kwq_entry;
+
+ sos_kwaitq_init_entry(& kwq_entry);
+
+ sos_disable_IRQs(flags);
+
+ retval = _kwaitq_add_entry(kwq, & kwq_entry);
+
+ /* Wait for wakeup or timeout */
+ sos_thread_sleep(timeout);
+ /* Woken up ! */
+
+ /* Sleep delay elapsed ? */
+ if (! kwq_entry.wakeup_triggered)
+ {
+ /* Yes (timeout occured, or wakeup on another waitqueue): remove
+ the waitq entry by ourselves */
+ _kwaitq_remove_entry(kwq, & kwq_entry);
+ retval = -SOS_EINTR;
+ }
+ else
+ {
+ retval = kwq_entry.wakeup_status;
+ }
+
+ sos_restore_IRQs(flags);
+
+ /* We were correctly awoken: position return status */
+ return retval;
+}
+
+
+sos_ret_t sos_kwaitq_wakeup(struct sos_kwaitq *kwq,
+ unsigned int nb_threads,
+ sos_ret_t wakeup_status)
+{
+ sos_ui32_t flags;
+
+ sos_disable_IRQs(flags);
+
+ /* Wake up as much threads waiting in waitqueue as possible (up to
+ nb_threads), scanning the list in FIFO order */
+ while (! list_is_empty_named(kwq->waiting_list,
+ prev_entry_in_kwaitq, next_entry_in_kwaitq))
+ {
+ struct sos_kwaitq_entry *kwq_entry
+ = list_get_head_named(kwq->waiting_list,
+ prev_entry_in_kwaitq, next_entry_in_kwaitq);
+
+ /* Enough threads woken up ? */
+ if (nb_threads <= 0)
+ break;
+
+ /*
+ * Ok: wake up the thread for this entry
+ */
+
+ /* Thread already woken up ? */
+ if (SOS_THR_RUNNING == sos_thread_get_state(kwq_entry->thread))
+ {
+ /* Yes => Do nothing because WE are that woken-up thread. In
+ particular: don't call set_ready() here because this
+ would result in an inconsistent configuration (currently
+ running thread marked as "waiting for CPU"...). */
+ continue;
+ }
+ else
+ {
+ /* No => wake it up now. */
+ sos_sched_set_ready(kwq_entry->thread);
+ }
+
+ /* Remove this waitq entry */
+ _kwaitq_remove_entry(kwq, kwq_entry);
+ kwq_entry->wakeup_triggered = TRUE;
+ kwq_entry->wakeup_status = wakeup_status;
+
+ /* Next iteration... */
+ nb_threads --;
+ }
+
+ sos_restore_IRQs(flags);
+
+ return SOS_OK;
+}
diff --git a/sos-code-article6.5/sos/kwaitq.h b/sos-code-article6.5/sos/kwaitq.h
new file mode 100644
index 0000000..4f879aa
--- /dev/null
+++ b/sos-code-article6.5/sos/kwaitq.h
@@ -0,0 +1,180 @@
+/* Copyright (C) 2004 David Decotigny
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#ifndef _SOS_KWAITQ_H_
+#define _SOS_KWAITQ_H_
+
+#include <sos/errno.h>
+#include <sos/thread.h>
+#include <sos/time.h>
+#include <sos/sched.h>
+
+
+/**
+ * @kwaitq.h
+ *
+ * Low-level functions to manage queues of threads waiting for a
+ * resource. These functions are public. For higher-level
+ * synchronization primitives such as mutex, semaphores, conditions,
+ * ... prefer looking at the corresponding libraries.
+ */
+
+
+/**
+ * Define this if you want to know the names of the kwaitq
+ */
+// #define SOS_KWQ_DEBUG
+
+
+/* Forward declaration */
+struct sos_kwaitq_entry;
+
+
+/**
+ * Definition of a waitqueue. In a kwaitq, the threads are ordererd in
+ * FIFO order.
+ */
+struct sos_kwaitq
+{
+#ifdef SOS_KWQ_DEBUG
+# define SOS_KWQ_DEBUG_MAX_NAMELEN 32
+ char name[SOS_KWQ_DEBUG_MAX_NAMELEN];
+#endif
+ struct sos_kwaitq_entry *waiting_list;
+};
+
+
+/**
+ * Definition of an entry for a thread waiting in the waitqueue
+ */
+struct sos_kwaitq_entry
+{
+ /** The thread associted with this entry */
+ struct sos_thread *thread;
+
+ /** The kwaitqueue this entry belongs to */
+ struct sos_kwaitq *kwaitq;
+
+ /** TRUE when somebody woke up this entry */
+ sos_bool_t wakeup_triggered;
+
+ /** The status of wakeup for this entry. @see wakeup_status argument
+ of sos_kwaitq_wakeup() */
+ sos_ret_t wakeup_status;
+
+ /** Other entries in this kwaitqueue */
+ struct sos_kwaitq_entry *prev_entry_in_kwaitq, *next_entry_in_kwaitq;
+
+ /** Other entries for the thread */
+ struct sos_kwaitq_entry *prev_entry_for_thread, *next_entry_for_thread;
+};
+
+
+/**
+ * Initialize an empty waitqueue.
+ *
+ * @param name Used only if SOS_KWQ_DEBUG is defined (safe [deep
+ * copied])
+ */
+sos_ret_t sos_kwaitq_init(struct sos_kwaitq *kwq,
+ const char *name);
+
+
+/**
+ * Release a waitqueue, making sure that no thread is in it.
+ *
+ * @return -SOS_EBUSY in case a thread is still in the waitqueue.
+ */
+sos_ret_t sos_kwaitq_dispose(struct sos_kwaitq *kwq);
+
+
+/**
+ * Return whether there are no threads in the waitq
+ */
+sos_bool_t sos_kwaitq_is_empty(const struct sos_kwaitq *kwq);
+
+
+/**
+ * Initialize a waitqueue entry. Mainly consists in updating the
+ * "thread" field of the entry (set to current running thread), and
+ * initializing the remaining of the entry as to indicate it does not
+ * belong to any waitq.
+ */
+sos_ret_t sos_kwaitq_init_entry(struct sos_kwaitq_entry *kwq_entry);
+
+
+/**
+ * Add an entry (previously initialized with sos_kwaitq_init_entry())
+ * in the given waitqueue.
+ *
+ * @note: No state change/context switch can occur here ! Among other
+ * things: the current executing thread is not preempted.
+ */
+sos_ret_t sos_kwaitq_add_entry(struct sos_kwaitq *kwq,
+ struct sos_kwaitq_entry *kwq_entry);
+
+
+/**
+ * Remove the given kwaitq_entry from the kwaitq.
+ *
+ * @note: No state change/context switch can occur here ! Among other
+ * things: the thread associated with the entry is not necessarilly
+ * the same as the one currently running, and does not preempt the
+ * current running thread if they are different.
+ */
+sos_ret_t sos_kwaitq_remove_entry(struct sos_kwaitq *kwq,
+ struct sos_kwaitq_entry *kwq_entry);
+
+
+/**
+ * Helper function to make the current running thread block in the
+ * given kwaitq, waiting to be woken up by somedy else or by the given
+ * timeout. It calls the sos_kwaitq_add_entry() and
+ * sos_kwaitq_remove_entry().
+ *
+ * @param timeout The desired timeout (can be NULL => wait for
+ * ever). It is updated by the function to reflect the remaining
+ * timeout in case the thread has been woken-up prior to its
+ * expiration.
+ *
+ * @return -SOS_EINTR when the thread is resumed while it has not be
+ * explicitely woken up by someone calling sos_kwaitq_wakeup() upon
+ * the same waitqueue... This can only happen 1/ if the timeout
+ * expired, or 2/ if the current thread is also in another kwaitq
+ * different to "kwq". Otherwise return the value set by
+ * sos_kwaitq_wakeup(). The timeout remaining is updated in timeout.
+ *
+ * @note This is a BLOCKING FUNCTION
+ */
+sos_ret_t sos_kwaitq_wait(struct sos_kwaitq *kwq,
+ struct sos_time *timeout);
+
+
+/**
+ * Wake up as much as nb_thread threads (SOS_KWQ_WAKEUP_ALL to wake
+ * up all threads) in the kwaitq kwq, in FIFO order.
+ *
+ * @param wakeup_status The value returned by sos_kwaitq_wait() when
+ * the thread will effectively woken up due to this wakeup.
+ */
+sos_ret_t sos_kwaitq_wakeup(struct sos_kwaitq *kwq,
+ unsigned int nb_threads,
+ sos_ret_t wakeup_status);
+#define SOS_KWQ_WAKEUP_ALL (~((unsigned int)0))
+
+
+#endif /* _SOS_KWAITQ_H_ */
diff --git a/sos-code-article6.5/sos/list.h b/sos-code-article6.5/sos/list.h
new file mode 100644
index 0000000..67e72f3
--- /dev/null
+++ b/sos-code-article6.5/sos/list.h
@@ -0,0 +1,186 @@
+/* Copyright (C) 2001 David Decotigny
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#ifndef _SOS_LIST_H_
+#define _SOS_LIST_H_
+
+/**
+ * @file list.h
+ *
+ * Circular doubly-linked lists implementation entirely based on C
+ * macros
+ */
+
+
+/* *_named are used when next and prev links are not exactly next
+ and prev. For instance when we have next_in_team, prev_in_team,
+ prev_global and next_global */
+
+#define list_init_named(list,prev,next) \
+ ((list) = NULL)
+
+#define list_singleton_named(list,item,prev,next) ({ \
+ (item)->next = (item)->prev = (item); \
+ (list) = (item); \
+})
+
+#define list_is_empty_named(list,prev,next) \
+ ((list) == NULL)
+
+#define list_get_head_named(list,prev,next) \
+ (list)
+
+#define list_get_tail_named(list,prev,next) \
+ ((list)?((list)->prev):NULL)
+
+/* Internal macro : insert before the head == insert at tail */
+#define __list_insert_atleft_named(before_this,item,prev,next) ({ \
+ (before_this)->prev->next = (item); \
+ (item)->prev = (before_this)->prev; \
+ (before_this)->prev = (item); \
+ (item)->next = (before_this); \
+})
+
+/* @note Before_this and item are expected to be valid ! */
+#define list_insert_before_named(list,before_this,item,prev,next) ({ \
+ __list_insert_atleft_named(before_this,item,prev,next); \
+ if ((list) == (before_this)) (list) = (item); \
+})
+
+/** @note After_this and item are expected to be valid ! */
+#define list_insert_after_named(list,after_this,item,prev,next) ({ \
+ (after_this)->next->prev = (item); \
+ (item)->next = (after_this)->next; \
+ (after_this)->next = (item); \
+ (item)->prev = (after_this); \
+})
+
+#define list_add_head_named(list,item,prev,next) ({ \
+ if (list) \
+ list_insert_before_named(list,list,item,prev,next); \
+ else \
+ list_singleton_named(list,item,prev,next); \
+ (list) = (item); \
+})
+
+#define list_add_tail_named(list,item,prev,next) ({ \
+ if (list) \
+ __list_insert_atleft_named(list,item,prev,next); \
+ else \
+ list_singleton_named(list,item,prev,next); \
+})
+
+/** @note NO check whether item really is in list ! */
+#define list_delete_named(list,item,prev,next) ({ \
+ if ( ((item)->next == (item)) && ((item)->prev == (item)) ) \
+ (item)->next = (item)->prev = (list) = NULL; \
+ else { \
+ (item)->prev->next = (item)->next; \
+ (item)->next->prev = (item)->prev; \
+ if ((item) == (list)) (list) = (item)->next; \
+ (item)->prev = (item)->next = NULL; \
+ } \
+})
+
+#define list_pop_head_named(list,prev,next) ({ \
+ typeof(list) __ret_elt = (list); \
+ list_delete_named(list,__ret_elt,prev,next); \
+ __ret_elt; })
+
+/** Loop statement that iterates through all of its elements, from
+ head to tail */
+#define list_foreach_forward_named(list,iterator,nb_elements,prev,next) \
+ for (nb_elements=0, (iterator) = (list) ; \
+ (iterator) && (!nb_elements || ((iterator) != (list))) ; \
+ nb_elements++, (iterator) = (iterator)->next )
+
+/** Loop statement that iterates through all of its elements, from
+ tail back to head */
+#define list_foreach_backward_named(list,iterator,nb_elements,prev,next) \
+ for (nb_elements=0, (iterator) = list_get_tail_named(list,prev,next) ; \
+ (iterator) && (!nb_elements || \
+ ((iterator) != list_get_tail_named(list,prev,next))) ; \
+ nb_elements++, (iterator) = (iterator)->prev )
+
+#define list_foreach_named list_foreach_forward_named
+
+/** True when we exitted early from the foreach loop (ie break) */
+#define list_foreach_early_break(list,iterator,nb_elements) \
+ ((list) && ( \
+ ((list) != (iterator)) || \
+ ( ((list) == (iterator)) && (nb_elements == 0)) ))
+
+/** Loop statement that also removes the item at each iteration */
+#define list_collapse_named(list,iterator,prev,next) \
+ for ( ; ({ ((iterator) = (list)) ; \
+ if (list) list_delete_named(list,iterator,prev,next) ; \
+ (iterator); }) ; )
+
+
+/*
+ * the same macros : assume that the prev and next fields are really
+ * named "prev" and "next"
+ */
+
+#define list_init(list) \
+ list_init_named(list,prev,next)
+
+#define list_singleton(list,item) \
+ list_singleton_named(list,item,prev,next)
+
+#define list_is_empty(list) \
+ list_is_empty_named(list,prev,next)
+
+#define list_get_head(list) \
+ list_get_head_named(list,prev,next) \
+
+#define list_get_tail(list) \
+ list_get_tail_named(list,prev,next) \
+
+/* @note Before_this and item are expected to be valid ! */
+#define list_insert_after(list,after_this,item) \
+ list_insert_after_named(list,after_this,item,prev,next)
+
+/* @note After_this and item are expected to be valid ! */
+#define list_insert_before(list,before_this,item) \
+ list_insert_before_named(list,before_this,item,prev,next)
+
+#define list_add_head(list,item) \
+ list_add_head_named(list,item,prev,next)
+
+#define list_add_tail(list,item) \
+ list_add_tail_named(list,item,prev,next)
+
+/* @note NO check whether item really is in list ! */
+#define list_delete(list,item) \
+ list_delete_named(list,item,prev,next)
+
+#define list_pop_head(list) \
+ list_pop_head_named(list,prev,next)
+
+#define list_foreach_forward(list,iterator,nb_elements) \
+ list_foreach_forward_named(list,iterator,nb_elements,prev,next)
+
+#define list_foreach_backward(list,iterator,nb_elements) \
+ list_foreach_backward_named(list,iterator,nb_elements,prev,next)
+
+#define list_foreach list_foreach_forward
+
+#define list_collapse(list,iterator) \
+ list_collapse_named(list,iterator,prev,next)
+
+#endif /* _SOS_LIST_H_ */
diff --git a/sos-code-article6.5/sos/macros.h b/sos-code-article6.5/sos/macros.h
new file mode 100644
index 0000000..80a05d3
--- /dev/null
+++ b/sos-code-article6.5/sos/macros.h
@@ -0,0 +1,41 @@
+/* Copyright (C) 2004 The KOS Team
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#ifndef _SOS_MACROS_H_
+#define _SOS_MACROS_H_
+
+/** Align on a boundary (MUST be a power of 2), so that return value <= val */
+#define SOS_ALIGN_INF(val,boundary) \
+ (((unsigned)(val)) & (~((boundary)-1)))
+
+/** Align on a boundary (MUST be a power of 2), so that return value >= val */
+#define SOS_ALIGN_SUP(val,boundary) \
+ ({ unsigned int __bnd=(boundary); \
+ (((((unsigned)(val))-1) & (~(__bnd - 1))) + __bnd); })
+
+/** Check whether val is aligned on a boundary (MUST be a power of 2) */
+#define SOS_IS_ALIGNED(val,boundary) \
+ ( 0 == (((unsigned)(val)) & ((boundary)-1)) )
+
+/**
+ * @return TRUE if val is a power of 2.
+ * @note val is evaluated multiple times
+ */
+#define SOS_IS_POWER_OF_2(val) \
+ ((((val) - 1) & (val)) == 0)
+
+#endif /* _SOS_MACROS_H_ */
diff --git a/sos-code-article6.5/sos/main.c b/sos-code-article6.5/sos/main.c
new file mode 100644
index 0000000..6201f47
--- /dev/null
+++ b/sos-code-article6.5/sos/main.c
@@ -0,0 +1,461 @@
+/* Copyright (C) 2004 The SOS Team
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+
+/* Include definitions of the multiboot standard */
+#include <bootstrap/multiboot.h>
+#include <hwcore/idt.h>
+#include <hwcore/gdt.h>
+#include <hwcore/irq.h>
+#include <hwcore/exception.h>
+#include <hwcore/i8254.h>
+#include <sos/list.h>
+#include <sos/physmem.h>
+#include <hwcore/paging.h>
+#include <sos/kmem_vmm.h>
+#include <sos/kmalloc.h>
+#include <sos/time.h>
+#include <sos/thread.h>
+#include <sos/klibc.h>
+#include <sos/assert.h>
+#include <drivers/x86_videomem.h>
+#include <drivers/bochs.h>
+
+
+/* Helper function to display each bits of a 32bits integer on the
+ screen as dark or light carrets */
+void display_bits(unsigned char row, unsigned char col,
+ unsigned char attribute,
+ sos_ui32_t integer)
+{
+ int i;
+ /* Scan each bit of the integer, MSb first */
+ for (i = 31 ; i >= 0 ; i--)
+ {
+ /* Test if bit i of 'integer' is set */
+ int bit_i = (integer & (1 << i));
+ /* Ascii 219 => dark carret, Ascii 177 => light carret */
+ unsigned char ascii_code = bit_i?219:177;
+ sos_x86_videomem_putchar(row, col++,
+ attribute,
+ ascii_code);
+ }
+}
+
+
+/* Clock IRQ handler */
+static void clk_it(int intid)
+{
+ static sos_ui32_t clock_count = 0;
+
+ display_bits(0, 48,
+ SOS_X86_VIDEO_FG_LTGREEN | SOS_X86_VIDEO_BG_BLUE,
+ clock_count);
+ clock_count++;
+
+ /* Execute the expired timeout actions (if any) */
+ sos_time_do_tick();
+}
+
+
+/* ======================================================================
+ * Page fault exception handling
+ */
+
+/* Helper function to dump a backtrace on bochs and/or the console */
+static void dump_backtrace(const struct sos_cpu_state *cpu_state,
+ sos_vaddr_t stack_bottom,
+ sos_size_t stack_size,
+ sos_bool_t on_console,
+ sos_bool_t on_bochs)
+{
+ void backtracer(sos_vaddr_t PC,
+ sos_vaddr_t params,
+ sos_ui32_t depth,
+ void *custom_arg)
+ {
+ sos_ui32_t invalid = 0xffffffff, *arg1, *arg2, *arg3, *arg4;
+
+ /* Get the address of the first 3 arguments from the
+ frame. Among these arguments, 0, 1, 2, 3 arguments might be
+ meaningful (depending on how many arguments the function may
+ take). */
+ arg1 = (sos_ui32_t*)params;
+ arg2 = (sos_ui32_t*)(params+4);
+ arg3 = (sos_ui32_t*)(params+8);
+ arg4 = (sos_ui32_t*)(params+12);
+
+ /* Make sure the addresses of these arguments fit inside the
+ stack boundaries */
+#define INTERVAL_OK(b,v,u) ( ((b) <= (sos_vaddr_t)(v)) \
+ && ((sos_vaddr_t)(v) < (u)) )
+ if (!INTERVAL_OK(stack_bottom, arg1, stack_bottom + stack_size))
+ arg1 = &invalid;
+ if (!INTERVAL_OK(stack_bottom, arg2, stack_bottom + stack_size))
+ arg2 = &invalid;
+ if (!INTERVAL_OK(stack_bottom, arg3, stack_bottom + stack_size))
+ arg3 = &invalid;
+ if (!INTERVAL_OK(stack_bottom, arg4, stack_bottom + stack_size))
+ arg4 = &invalid;
+
+ /* Print the function context for this frame */
+ if (on_bochs)
+ sos_bochs_printf("[%d] PC=0x%x arg1=0x%x arg2=0x%x arg3=0x%x\n",
+ (unsigned)depth, (unsigned)PC,
+ (unsigned)*arg1, (unsigned)*arg2,
+ (unsigned)*arg3);
+
+ if (on_console)
+ sos_x86_videomem_printf(23-depth, 3,
+ SOS_X86_VIDEO_BG_BLUE
+ | SOS_X86_VIDEO_FG_LTGREEN,
+ "[%d] PC=0x%x arg1=0x%x arg2=0x%x arg3=0x%x arg4=0x%x",
+ (unsigned)depth, PC,
+ (unsigned)*arg1, (unsigned)*arg2,
+ (unsigned)*arg3, (unsigned)*arg4);
+
+ }
+
+ sos_backtrace(cpu_state, 15, stack_bottom, stack_size, backtracer, NULL);
+}
+
+
+/* Page fault exception handler with demand paging for the kernel */
+static void pgflt_ex(int intid, const struct sos_cpu_state *ctxt)
+{
+ static sos_ui32_t demand_paging_count = 0;
+ sos_vaddr_t faulting_vaddr = sos_cpu_context_get_EX_faulting_vaddr(ctxt);
+ sos_paddr_t ppage_paddr;
+
+ /* Check if address is covered by any VMM range */
+ if (! sos_kmem_vmm_is_valid_vaddr(faulting_vaddr))
+ {
+ /* No: The page fault is out of any kernel virtual region. For
+ the moment, we don't handle this. */
+ dump_backtrace(ctxt,
+ bootstrap_stack_bottom,
+ bootstrap_stack_size,
+ TRUE, TRUE);
+ sos_display_fatal_error("Unresolved page Fault at instruction 0x%x on access to address 0x%x (info=%x)!",
+ sos_cpu_context_get_PC(ctxt),
+ (unsigned)faulting_vaddr,
+ (unsigned)sos_cpu_context_get_EX_info(ctxt));
+ SOS_ASSERT_FATAL(! "Got page fault (note: demand paging is disabled)");
+ }
+
+
+ /*
+ * Demand paging
+ */
+
+ /* Update the number of demand paging requests handled */
+ demand_paging_count ++;
+ display_bits(0, 0,
+ SOS_X86_VIDEO_FG_LTRED | SOS_X86_VIDEO_BG_BLUE,
+ demand_paging_count);
+
+ /* Allocate a new page for the virtual address */
+ ppage_paddr = sos_physmem_ref_physpage_new(FALSE);
+ if (! ppage_paddr)
+ SOS_ASSERT_FATAL(! "TODO: implement swap. (Out of mem in demand paging because no swap for kernel yet !)");
+ SOS_ASSERT_FATAL(SOS_OK == sos_paging_map(ppage_paddr,
+ SOS_PAGE_ALIGN_INF(faulting_vaddr),
+ FALSE,
+ SOS_VM_MAP_PROT_READ
+ | SOS_VM_MAP_PROT_WRITE
+ | SOS_VM_MAP_ATOMIC));
+ sos_physmem_unref_physpage(ppage_paddr);
+
+ /* Ok, we can now return to interrupted context */
+}
+
+
+/* ======================================================================
+ * Demonstrate the use of SOS kernel threads
+ * - Kernel Threads are created with various priorities and their
+ * state is printed on both the console and the bochs' 0xe9 port
+ * - For tests regarding threads' synchronization, see mouse_sim.c
+ */
+
+struct thr_arg
+{
+ char character;
+ int color;
+
+ int col;
+ int row;
+};
+
+
+static void demo_thread(void *arg)
+{
+ struct thr_arg *thr_arg = (struct thr_arg*)arg;
+ int progress = 0;
+
+ sos_bochs_printf("start %c", thr_arg->character);
+ while (1)
+ {
+ progress ++;
+ display_bits(thr_arg->row, thr_arg->col+1, thr_arg->color, progress);
+
+ sos_bochs_putchar(thr_arg->character);
+
+ /* Yield the CPU to another thread sometimes... */
+ if ((random() % 100) == 0)
+ {
+ sos_bochs_printf("yield(%c)\n", thr_arg->character);
+ sos_x86_videomem_putchar(thr_arg->row, thr_arg->col, 0x1e, 'Y');
+ SOS_ASSERT_FATAL(SOS_OK == sos_thread_yield());
+ sos_x86_videomem_putchar(thr_arg->row, thr_arg->col, 0x1e, 'R');
+ }
+
+ /* Go to sleep some other times... */
+ else if ((random() % 200) == 0)
+ {
+ struct sos_time t = (struct sos_time){ .sec=0, .nanosec=50000000 };
+ sos_bochs_printf("sleep1(%c)\n", thr_arg->character);
+ sos_x86_videomem_putchar(thr_arg->row, thr_arg->col, 0x1e, 's');
+ SOS_ASSERT_FATAL(SOS_OK == sos_thread_sleep(& t));
+ SOS_ASSERT_FATAL(sos_time_is_zero(& t));
+ sos_x86_videomem_putchar(thr_arg->row, thr_arg->col, 0x1e, 'R');
+ }
+
+ /* Go to sleep for a longer time some other times... */
+ else if ((random() % 300) == 0)
+ {
+ struct sos_time t = (struct sos_time){ .sec=0, .nanosec=300000000 };
+ sos_bochs_printf("sleep2(%c)\n", thr_arg->character);
+ sos_x86_videomem_putchar(thr_arg->row, thr_arg->col, 0x1e, 'S');
+ SOS_ASSERT_FATAL(SOS_OK == sos_thread_sleep(& t));
+ SOS_ASSERT_FATAL(sos_time_is_zero(& t));
+ sos_x86_videomem_putchar(thr_arg->row, thr_arg->col, 0x1e, 'R');
+ }
+
+ /* Infinite loop otherwise */
+ }
+}
+
+
+static void test_thread()
+{
+ /* "static" variables because we want them to remain even when the
+ function returns */
+ static struct thr_arg arg_b, arg_c, arg_d, arg_e, arg_R, arg_S;
+ sos_ui32_t flags;
+
+ sos_disable_IRQs(flags);
+
+ arg_b = (struct thr_arg) { .character='b', .col=0, .row=21, .color=0x14 };
+ sos_create_kernel_thread("YO[b]", demo_thread, (void*)&arg_b);
+
+ arg_c = (struct thr_arg) { .character='c', .col=46, .row=21, .color=0x14 };
+ sos_create_kernel_thread("YO[c]", demo_thread, (void*)&arg_c);
+
+ arg_d = (struct thr_arg) { .character='d', .col=0, .row=20, .color=0x14 };
+ sos_create_kernel_thread("YO[d]", demo_thread, (void*)&arg_d);
+
+ arg_e = (struct thr_arg) { .character='e', .col=0, .row=19, .color=0x14 };
+ sos_create_kernel_thread("YO[e]", demo_thread, (void*)&arg_e);
+
+ arg_R = (struct thr_arg) { .character='R', .col=0, .row=17, .color=0x1c };
+ sos_create_kernel_thread("YO[R]", demo_thread, (void*)&arg_R);
+
+ arg_S = (struct thr_arg) { .character='S', .col=0, .row=16, .color=0x1c };
+ sos_create_kernel_thread("YO[S]", demo_thread, (void*)&arg_S);
+
+ sos_restore_IRQs(flags);
+}
+
+
+/* ======================================================================
+ * An operating system MUST always have a ready thread ! Otherwise:
+ * what would the CPU have to execute ?!
+ */
+static void idle_thread()
+{
+ sos_ui32_t idle_twiddle = 0;
+
+ while (1)
+ {
+ /* Remove this instruction if you get an "Invalid opcode" CPU
+ exception (old 80386 CPU) */
+ asm("hlt\n");
+
+ idle_twiddle ++;
+ display_bits(0, 0, SOS_X86_VIDEO_FG_GREEN | SOS_X86_VIDEO_BG_BLUE,
+ idle_twiddle);
+
+ /* Lend the CPU to some other thread */
+ sos_thread_yield();
+ }
+}
+
+
+/* ======================================================================
+ * The C entry point of our operating system
+ */
+void sos_main(unsigned long magic, unsigned long addr)
+{
+ unsigned i;
+ sos_paddr_t sos_kernel_core_base_paddr, sos_kernel_core_top_paddr;
+ struct sos_time tick_resolution;
+
+ /* Grub sends us a structure, called multiboot_info_t with a lot of
+ precious informations about the system, see the multiboot
+ documentation for more information. */
+ multiboot_info_t *mbi;
+ mbi = (multiboot_info_t *) addr;
+
+ /* Setup bochs and console, and clear the console */
+ sos_bochs_setup();
+
+ sos_x86_videomem_setup();
+ sos_x86_videomem_cls(SOS_X86_VIDEO_BG_BLUE);
+
+ /* Greetings from SOS */
+ if (magic == MULTIBOOT_BOOTLOADER_MAGIC)
+ /* Loaded with Grub */
+ sos_x86_videomem_printf(1, 0,
+ SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE,
+ "Welcome From GRUB to %s%c RAM is %dMB (upper mem = 0x%x kB)",
+ "SOS article 6.5", ',',
+ (unsigned)(mbi->mem_upper >> 10) + 1,
+ (unsigned)mbi->mem_upper);
+ else
+ /* Not loaded with grub */
+ sos_x86_videomem_printf(1, 0,
+ SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE,
+ "Welcome to SOS article 6.5");
+
+ sos_bochs_putstring("Message in a bochs: This is SOS article 6.5.\n");
+
+ /* Setup CPU segmentation and IRQ subsystem */
+ sos_gdt_subsystem_setup();
+ sos_idt_subsystem_setup();
+
+ /* Setup SOS IRQs and exceptions subsystem */
+ sos_exception_subsystem_setup();
+ sos_irq_subsystem_setup();
+
+ /* Configure the timer so as to raise the IRQ0 at a 100Hz rate */
+ sos_i8254_set_frequency(100);
+
+ /* Setup the kernel time subsystem to get prepared to take the timer
+ ticks into account */
+ tick_resolution = (struct sos_time) { .sec=0, .nanosec=10000000UL };
+ sos_time_subsysem_setup(& tick_resolution);
+
+ /* We need a multiboot-compliant boot loader to get the size of the RAM */
+ if (magic != MULTIBOOT_BOOTLOADER_MAGIC)
+ {
+ sos_x86_videomem_putstring(20, 0,
+ SOS_X86_VIDEO_FG_LTRED
+ | SOS_X86_VIDEO_BG_BLUE
+ | SOS_X86_VIDEO_FG_BLINKING,
+ "I'm not loaded with Grub !");
+ /* STOP ! */
+ for (;;)
+ continue;
+ }
+
+ /*
+ * Some interrupt handlers
+ */
+
+ /* Binding some HW interrupts and exceptions to software routines */
+ sos_irq_set_routine(SOS_IRQ_TIMER,
+ clk_it);
+
+ /*
+ * Setup physical memory management
+ */
+
+ /* Multiboot says: "The value returned for upper memory is maximally
+ the address of the first upper memory hole minus 1 megabyte.". It
+ also adds: "It is not guaranteed to be this value." aka "YMMV" ;) */
+ sos_physmem_subsystem_setup((mbi->mem_upper<<10) + (1<<20),
+ & sos_kernel_core_base_paddr,
+ & sos_kernel_core_top_paddr);
+
+ /*
+ * Switch to paged-memory mode
+ */
+
+ /* Disabling interrupts should seem more correct, but it's not really
+ necessary at this stage */
+ SOS_ASSERT_FATAL(SOS_OK ==
+ sos_paging_subsystem_setup(sos_kernel_core_base_paddr,
+ sos_kernel_core_top_paddr));
+
+ /* Bind the page fault exception */
+ sos_exception_set_routine(SOS_EXCEPT_PAGE_FAULT,
+ pgflt_ex);
+
+ /*
+ * Setup kernel virtual memory allocator
+ */
+
+ if (sos_kmem_vmm_subsystem_setup(sos_kernel_core_base_paddr,
+ sos_kernel_core_top_paddr,
+ bootstrap_stack_bottom,
+ bootstrap_stack_bottom
+ + bootstrap_stack_size))
+ sos_bochs_printf("Could not setup the Kernel virtual space allocator\n");
+
+ if (sos_kmalloc_subsystem_setup())
+ sos_bochs_printf("Could not setup the Kmalloc subsystem\n");
+
+ /*
+ * Initialize the Kernel thread and scheduler subsystems
+ */
+
+ /* Initialize kernel thread subsystem */
+ sos_thread_subsystem_setup(bootstrap_stack_bottom,
+ bootstrap_stack_size);
+
+ /* Initialize the scheduler */
+ sos_sched_subsystem_setup();
+
+ /* Declare the IDLE thread */
+ SOS_ASSERT_FATAL(sos_create_kernel_thread("idle", idle_thread, NULL) != NULL);
+
+ /* Enabling the HW interrupts here, this will make the timer HW
+ interrupt call the scheduler */
+ asm volatile ("sti\n");
+
+
+ /* Now run some Kernel threads just for fun ! */
+ extern void MouseSim();
+ MouseSim();
+ test_thread();
+
+ /*
+ * We can safely exit from this function now, for there is already
+ * an idle Kernel thread ready to make the CPU busy working...
+ *
+ * However, we must EXPLICITELY call sos_thread_exit() because a
+ * simple "return" will return nowhere ! Actually this first thread
+ * was initialized by the Grub bootstrap stage, at a time when the
+ * word "thread" did not exist. This means that the stack was not
+ * setup in order for a return here to call sos_thread_exit()
+ * automagically. Hence we must call it manually. This is the ONLY
+ * kernel thread where we must do this manually.
+ */
+ sos_bochs_printf("Bye from primary thread !\n");
+ sos_thread_exit();
+ SOS_FATAL_ERROR("No trespassing !");
+}
diff --git a/sos-code-article6.5/sos/mouse_sim.c b/sos-code-article6.5/sos/mouse_sim.c
new file mode 100644
index 0000000..c5eb6bf
--- /dev/null
+++ b/sos-code-article6.5/sos/mouse_sim.c
@@ -0,0 +1,803 @@
+/***************************************************************************
+ * Copyright (C) 2004 by cyril dupuit *
+ * cyrildupuit@hotmail.com *
+ * http://perso.wanadoo.fr/koalys/ *
+ * (Adaptation for SOS by d2 -- 2004/12/20) *
+ * *
+ * This program is free software; you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License as published by *
+ * the Free Software Foundation; either version 2 of the License, or *
+ * (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this program; if not, write to the *
+ * Free Software Foundation, Inc., *
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ ***************************************************************************/
+
+//*****************************************************************************
+// Nom du module : MouseSim.c
+// Description : Creation et destruction de souris mangeuse de fromages
+//*****************************************************************************
+
+#include <sos/assert.h>
+#include <sos/klibc.h>
+#include <sos/thread.h>
+#include <sos/ksynch.h>
+#include <sos/kmalloc.h>
+#include <drivers/x86_videomem.h>
+
+// Historique :
+// 20/12/04 : Suppr DestroyMap et suppr handler kbd dans version LM (d2)
+// 26/11/04 : Bug trouve et resolu dans la fonction DestroyMap
+// 21/11/04 : Creation du module V1.0
+
+//*****************************************************************************
+// Definition des equivalences :
+//*****************************************************************************
+#define MAP_X 76
+#define MAP_Y 12
+#define MAP_SIZE MAP_X * MAP_Y
+
+#define MOUSE 0x01
+#define CHEESE 0x02
+#define OBSTACLE 0x04
+#define INPUT 0x08
+#define OUTPUT 0x10
+
+#define OBSTACLE_COUNT 100
+#define CHEESE_COUNT 650
+
+#define MOUSE_FULL 0x01
+#define MOUSE_EMPTY 0x02
+#define CHEESE_FOUND 0x04
+#define MOUSE_EXITED 0x08
+
+#define MOUSE_SPEED_MAX 1000
+#define MOUSE_SPEED_MIN 4
+
+typedef unsigned int Color_t;
+
+struct Point{
+ int X;
+ int Y;
+ };
+
+typedef struct Point Point_t;
+
+#define Set(Reg, Flag) Reg = (Reg | Flag)
+#define Reset(Reg, Flag) Reg = (Reg &(~Flag))
+#define IsSet(Reg, Flag) (Reg & Flag)
+
+
+//*****************************************************************************
+// Structure de gestion d'un element
+//*****************************************************************************
+struct Element{
+ sos_ui32_t Type;//Type d'element
+ sos_ui32_t Status;
+ Color_t Color;//Couleur de l'element
+ Point_t P;//Coordonnees de l'element
+ struct sos_thread * ThreadID;//Thread associe a la souris
+ int Way;//Direction de la souris
+ };
+
+typedef struct Element Element_t;
+
+//*****************************************************************************
+// Prototypes des fonctions/procedures :
+//*****************************************************************************
+static void MouseCommander(void);
+static void DrawMap(void);
+static sos_ret_t CreateMap(void);
+static sos_ret_t InitMapInput(Element_t * * pMap);
+static sos_ret_t InitMapOutput(Element_t * * pMap);
+static sos_ret_t ElementInit(Element_t * * pMap, unsigned int Type);
+static void Mouse(unsigned long Param);
+static void MouseMove(Point_t * P);
+static Point_t ChoosePosition(Element_t * pMouse, int Positions[], int Count);
+static int EvaluatePositions(Point_t Org, int Positions[], Point_t * Cheese);
+static sos_bool_t IsCollision(Point_t Org, Point_t p, Point_t *Cheese);
+static sos_bool_t AffectMovement(Point_t Org, Point_t p);
+static void MouseCreator(void);
+static sos_ret_t CreateMouse(void);
+
+//*****************************************************************************
+// Variables globales de ce module :
+//*****************************************************************************
+
+static Element_t * * pMap;
+static struct sos_ksema SemMap;
+static struct sos_ksema SemMouse;
+static int MouseCount = 0;
+static int CheeseCount = 0;
+static int ObstacleCount = 0;
+static int MouseSpeed = 100;
+
+//*****************************************************************************
+// Koalys Glue
+//*****************************************************************************
+void DrawPixel(int x, int y, Color_t color)
+{
+ sos_x86_videomem_putchar(y+3, x+2, color, 219);
+}
+
+
+
+//*****************************************************************************
+// Point d'entre de la 'simulation'
+//*****************************************************************************
+void MouseSim(void)
+{
+ //Creation du semaphore de protection de la carte
+ SOS_ASSERT_FATAL(SOS_OK == sos_ksema_init(& SemMap, "SemMap", 1));
+
+ //Creation du semaphore de creation de souris
+ SOS_ASSERT_FATAL(SOS_OK == sos_ksema_init(& SemMouse, "SemMouse", 2));
+
+ //Creation de la carte
+ SOS_ASSERT_FATAL(SOS_OK == CreateMap());
+
+ //Creation du thread createur de souris
+ SOS_ASSERT_FATAL(sos_create_kernel_thread("MouseCreator",
+ (sos_kernel_thread_start_routine_t)MouseCreator,
+ 0) != NULL);
+
+}
+
+
+//*****************************************************************************
+// But de la fonction : Creer et initialiser la carte
+// Entree : Aucune
+// Parametre retourne : ERROR si la memoire est insuffisante, TRUE sinon
+//*****************************************************************************
+static sos_ret_t CreateMap(void)
+{
+ pMap = (Element_t * *)sos_kmalloc(MAP_SIZE * sizeof(Element_t *), 0);
+ if(pMap == NULL) return -SOS_ENOMEM;
+
+ //Mettre la carte a 0
+ memset(pMap, 0, MAP_SIZE * sizeof(Element_t *));
+
+ //Initialisation de l'entree de la carte
+ if(SOS_OK != InitMapInput(pMap))
+ {//Memoire insuffisante
+ return -SOS_EFATAL;
+ }
+
+ //Initialisation de la sortie de la carte
+ if(InitMapOutput(pMap) != SOS_OK)
+ {//Memoire insuffisante
+ return -SOS_EFATAL;
+ }
+
+ //Initialisation du fromage
+ if(ElementInit(pMap, CHEESE) != SOS_OK)
+ {//Memoire insuffisante
+ return -SOS_EFATAL;
+ }
+
+ //Initialisation des obstacles
+ if(ElementInit(pMap, OBSTACLE) != SOS_OK)
+ {//Memoire insuffisante
+ return -SOS_EFATAL;
+ }
+
+ DrawMap();//Afficher la carte creee
+
+ return SOS_OK;
+}
+
+//*****************************************************************************
+// But de la procedure : Dessiner la carte a l'ecran
+// Entree : Aucune
+// Sortie : Aucune
+//*****************************************************************************
+static void DrawMap(void)
+{
+ unsigned int I;
+
+ for(I = 0; I < MAP_SIZE; I++)
+ {
+ if(pMap[I] != NULL)
+ {
+ DrawPixel(I % MAP_X, I/MAP_X, pMap[I]->Color);
+ }
+ else DrawPixel(I % MAP_X, I/MAP_X, SOS_X86_VIDEO_FG_BLACK);
+ }
+ sos_x86_videomem_printf(23, 0, SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE,
+ "Souris = %d; Fromages = %d; Obstacles = %d ",
+ MouseCount, CheeseCount, ObstacleCount);
+}
+
+//*****************************************************************************
+// But de la fonction : Initialiser l'entree de la carte
+// Entree :
+// pMap : Pointeur sur la carte
+// Parametre retourne : ERROR si memoire insuffisante, TRUE sinon
+//*****************************************************************************
+static sos_ret_t InitMapInput(Element_t * * pMap)
+{
+ Element_t * pElement;
+
+ //Definir le point d'entree
+ pElement = (Element_t *)sos_kmalloc(sizeof(Element_t), 0);
+ if(pElement == NULL) return -SOS_ENOMEM;
+
+ //Initialiser l'entree
+ pElement->Type = INPUT;
+ pElement->Status = 0;
+ pElement->Color = SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE;
+ pElement->P.X = 0;
+ pElement->P.Y = MAP_Y / 2;
+ pElement->ThreadID = 0;
+
+ pMap[(pElement->P.Y * MAP_X) + pElement->P.X] = pElement;
+
+ return SOS_OK;
+}
+
+//*****************************************************************************
+// But de la fonction : Initialiser la sortie de la carte
+// Entree :
+// pMap : Pointeur sur la carte
+// Parametre retourne : ERROR si memoire insuffisante, TRUE sinon
+//*****************************************************************************
+static sos_ret_t InitMapOutput(Element_t * * pMap)
+{
+ Element_t * pElement;
+
+ //Definir le point de sortie
+ pElement = (Element_t *)sos_kmalloc(sizeof(Element_t), 0);
+ if(pElement == NULL) return -SOS_ENOMEM;
+
+ //Initialiser l'entree
+ pElement->Type = OUTPUT;
+ pElement->Status = 0;
+ pElement->Color = SOS_X86_VIDEO_FG_LTBLUE;
+ pElement->P.X = MAP_X - 1;
+ pElement->P.Y = MAP_Y / 2;
+ pElement->ThreadID = 0;
+
+ pMap[(pElement->P.Y * MAP_X) + pElement->P.X] = pElement;
+
+ return SOS_OK;
+}
+
+//*****************************************************************************
+// But de la fonction : Initialiser un type d'objet sur la carte
+// Entree :
+// pMap : Pointeur sur la carte
+// Type : Type d'objet a initialiser
+// Parametre retourne : ERROR si memoire insuffisante, TRUE sinon
+//*****************************************************************************
+static sos_ret_t ElementInit(Element_t * * pMap, unsigned int Type)
+{
+ unsigned int I, J;
+ unsigned int Max;
+ Color_t Color;
+
+ if(Type == CHEESE)
+ {//Type d'element = fromage
+ Max = CHEESE_COUNT;
+ Color = SOS_X86_VIDEO_FG_YELLOW;
+ }
+ else if(Type == OBSTACLE)
+ {//Type d'element = Obstacle
+ Max = OBSTACLE_COUNT;
+ Color = SOS_X86_VIDEO_FG_GREEN;
+ }
+ else
+ {//Aucune autre type reconnu
+ return -SOS_EINVAL;
+ }
+
+ for(I = 0; I < Max; I++)
+ {//Tirer les fromages
+ J = random();
+ J += random();
+ J %= MAP_SIZE;
+ if(pMap[J] == NULL)
+ {//Si l'emplacement est libre
+ pMap[J] = (Element_t *)sos_kmalloc(sizeof(Element_t),
+ 0);
+ if(pMap[J] == NULL) return -SOS_ENOMEM;
+
+ pMap[J]->Type = Type;
+ //Initialiser l'element
+ if(Type == CHEESE)
+ {//Type d'element = fromage
+ CheeseCount++;
+ }
+ else if(Type == OBSTACLE)
+ {//Type d'element = Obstacle
+ ObstacleCount++;
+ }
+
+ pMap[J]->Color = Color;
+ pMap[J]->Status = 0;
+ pMap[J]->Color = Color;
+ pMap[J]->P.X = J % MAP_X;
+ pMap[J]->P.Y = J / MAP_X;
+ pMap[J]->ThreadID = 0;
+ }
+ }
+
+ return SOS_OK;
+}
+
+
+//*****************************************************************************
+// But du thread : Deplacer la souris sur la carte selon les regles etablies.
+// Regles :
+// - La souris doit se placer devant l'entree puis commence a recolter du
+// fromage.
+// - Des que la souris a ramasse un morceau de fromage, elle doit aller en
+// entree de la carte afin de deposer sa recolte.
+// - Si une souris a prouve sa recolte, une autre souris est creee.
+// - Si une souris prend la sortie, elle est eliminee.
+//*****************************************************************************
+static void Mouse(unsigned long Param)
+{
+ Element_t * pMouse = (Element_t *)Param;
+ Point_t P;
+
+ SOS_ASSERT_FATAL(pMouse != NULL);
+
+ //Position de depart de la souris
+ P = pMouse->P;
+ P = pMouse->P;
+
+ while(1)
+ {
+ int delay_ms;
+ struct sos_time delay;
+
+ //La souris doit se deplacer
+ sos_ksema_down(& SemMap, NULL);
+
+ MouseMove(&P);
+
+ sos_ksema_up(& SemMap);
+
+ // Est-ce que la souris est sortie ?
+ if (IsSet(pMouse->Status, MOUSE_EXITED))
+ // Oui => on sort
+ break;
+
+ // Delai entre MOUSE_SPEED_MIN et MouseSpeed - 1
+ delay_ms = MOUSE_SPEED_MIN + (random() % MouseSpeed);
+ delay.sec = delay_ms / 1000;
+ delay.nanosec = (delay_ms % 1000) * 1000000;
+ sos_thread_sleep(& delay);
+ }
+
+ // Libere la structure associee
+ sos_kfree((sos_vaddr_t)pMouse);
+}
+
+//*****************************************************************************
+// But de la procedure : Deplacer la souris de maniere aleatoire sur la carte
+// Entrees :
+// P : Position courante de la souris
+// Sorties :
+// P : Position suivante de la souris
+//*****************************************************************************
+static void MouseMove(Point_t * P)
+{
+ Point_t Org;
+ Point_t p;
+ Point_t Cheese;
+ int Positions[8];
+ int Count = 0;
+ Element_t * pMouse;
+
+ Org = *P;
+
+ pMouse = pMap[Org.X + (Org.Y * MAP_X)];
+
+ Count = EvaluatePositions(Org, Positions, &Cheese);
+
+ if(Count == 0) return;
+
+ p = Org;
+
+ if(IsSet(pMouse->Status, CHEESE_FOUND))
+ {//Prendre le fromage
+ Reset(pMouse->Status, CHEESE_FOUND);
+ p = Cheese;
+ }
+ else
+ {//Choisir une position au hasard
+ p = ChoosePosition(pMouse, Positions, Count);
+ }
+ if(AffectMovement(Org, p) == FALSE) return;
+ //Deplacer la souris
+ pMap[Org.X + (Org.Y * MAP_X)] = NULL;
+ pMap[p.X + (p.Y * MAP_X)] = pMouse;
+ pMouse->P = p;
+ //Mettre a jour l'affichage
+ DrawPixel(Org.X, Org.Y, SOS_X86_VIDEO_FG_BLACK);
+ DrawPixel(p.X, p.Y, pMouse->Color);
+ sos_x86_videomem_printf( 23,0, SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE, "Souris = %d; Fromages = %d; Obstacles = %d ", MouseCount, CheeseCount, ObstacleCount);
+ //Mettre a jour les coordonnees
+ *P = p;
+}
+
+//*****************************************************************************
+// But de la fonction : Choisir un mouvement
+// Entree :
+// pMouse : Pointeur sur la souris
+// Positions : Tableau de position possible
+// Count :Nombre de positions valides
+// Sortie : Aucune
+// Parametre retourne : Position choisie
+//*****************************************************************************
+static Point_t ChoosePosition(Element_t * pMouse, int Positions[], int Count)
+{
+ int I, J;
+ Point_t p;
+
+ for(J = 0; J < Count; J++)
+ {//Chercher dans le tableau si cette position est disponible
+ I = Positions[J];
+ if(I == pMouse->Way)
+ {//Poursuivre ce sens d'avance
+ p = pMouse->P;
+ switch(I)
+ {
+ case 0:
+ p.Y++;
+ break;
+ case 1:
+ p.X++;
+ p.Y++;
+ break;
+ case 2:
+ p.X++;
+ break;
+ case 3:
+ p.Y--;
+ p.X++;
+ break;
+ case 4:
+ p.Y--;
+ break;
+ case 5:
+ p.Y--;
+ p.X--;
+ break;
+ case 6:
+ p.X--;
+ break;
+ case 7:
+ p.X--;
+ p.Y++;
+ break;
+ }
+ return p;
+ }
+ }
+
+ J = random() % Count;
+ I = Positions[J];
+ if(((I + 4) % 8) == pMouse->Way)
+ {//Eviter le sens inverse
+ J = (J + 1) % Count;
+ I = Positions[J];
+ }
+
+ p = pMouse->P;
+ switch(I)
+ {//Repere le deplacement
+ case 0:
+ p.Y++;
+ break;
+ case 1:
+ p.X++;
+ p.Y++;
+ break;
+ case 2:
+ p.X++;
+ break;
+ case 3:
+ p.Y--;
+ p.X++;
+ break;
+ case 4:
+ p.Y--;
+ break;
+ case 5:
+ p.Y--;
+ p.X--;
+ break;
+ case 6:
+ p.X--;
+ break;
+ case 7:
+ p.X--;
+ p.Y++;
+ break;
+ }
+
+ pMouse->Way = I;//Memoriser la direction selectionnee
+
+ return p;
+}
+
+//*****************************************************************************
+// But de la fonction : Evaluer les positions possibles et les memoriser dans
+// un tableau de positions si aucun fromage n'a ete detecte. Si du fromage a
+// ete detecte, il sera selectionne en premier. La presence d'un fromage est
+// indiquee par le drapeau CHEESE_FOUND
+// Entree :
+// Org : Position de la souris
+// Sorties :
+// Positions : Tableau de positions valides
+// Cheese : Position du fromage
+// Parametre retourne : Nombre de positions valides
+//*****************************************************************************
+static int EvaluatePositions(Point_t Org, int Positions[], Point_t * Cheese)
+{
+ int I;
+ int Count = 0;
+ Point_t p;
+ Point_t CheesePos;
+
+ for(I = 0; I < 8; I++)
+ {//Explorer toute les directions
+ p = Org;
+ switch(I)
+ {//Repere le deplacement
+ case 0:
+ p.Y++;
+ break;
+ case 1:
+ p.X++;
+ p.Y++;
+ break;
+ case 2:
+ p.X++;
+ break;
+ case 3:
+ p.Y--;
+ p.X++;
+ break;
+ case 4:
+ p.Y--;
+ break;
+ case 5:
+ p.Y--;
+ p.X--;
+ break;
+ case 6:
+ p.X--;
+ break;
+ case 7:
+ p.X--;
+ p.Y++;
+ break;
+ }
+ //Tester la collision
+ if(IsCollision(Org, p, &CheesePos) == FALSE)
+ {//La souris n'a rencontre aucun obstacle
+ Positions[Count] = I;
+ Count++;
+ }
+ }
+
+ *Cheese = CheesePos;
+
+ return Count;
+}
+
+//*****************************************************************************
+// But de la fonction : Affecter un mouvement a la souris
+// Entrees :
+// Org : Coordonnees de la souris
+// p : Coordonnees voulu par la souris
+// Parametre retourne : TRUE si le mouvement a eu lieu, FALSE sinon
+//*****************************************************************************
+static sos_bool_t AffectMovement(Point_t Org, Point_t p)
+{
+ Element_t * pMouse = pMap[Org.X + (Org.Y * MAP_X)];
+ Element_t * pElement;
+
+ pElement = pMap[p.X + (p.Y * MAP_X)];
+
+ //La place est libre
+ if(pElement == NULL) return TRUE;//Autoriser le mouvement
+
+ switch(pElement->Type)
+ {
+ case CHEESE:
+ // Liberer l'emplacement memoire du fromage
+ sos_kfree((sos_vaddr_t)pElement);
+ pMap[p.X + (p.Y * MAP_X)] = NULL;
+
+ //Donner le fromage a la souris
+ Set(pMouse->Status, MOUSE_FULL);
+ Reset(pMouse->Status, MOUSE_EMPTY);
+ pMouse->Color = SOS_X86_VIDEO_FG_MAGENTA;
+ CheeseCount--;
+ return TRUE;
+ case OUTPUT:
+ //Supprimer la souris
+ pMap[Org.X + (Org.Y * MAP_X)] = NULL;
+ MouseCount--;
+ DrawPixel(Org.X, Org.Y, SOS_X86_VIDEO_FG_BLACK);
+ sos_x86_videomem_printf( 23,0, SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE,
+ "Souris = %d; Fromages = %d; Obstacles = %d ",
+ MouseCount, CheeseCount,
+ ObstacleCount);
+ Set(pMouse->Status, MOUSE_EXITED);
+ return FALSE;
+ default :
+ return FALSE;
+ }
+
+ return FALSE;
+}
+
+//*****************************************************************************
+// But de la fonction : Tester si une collision a eu lieu avec un obstacle
+// Entrees :
+// Org : Coordonnees de la souris
+// p : coordonnees desirees par la souris
+// Sortie :
+// Cheese : Coordonnees du fromage
+// Parametre retourne : TRUE si une collision a eu lieu, FALSE sinon
+//*****************************************************************************
+static sos_bool_t IsCollision(Point_t Org, Point_t p, Point_t *Cheese)
+{
+ Element_t * pMouse = pMap[Org.X + (Org.Y * MAP_X)];
+ Element_t * pElement;
+
+ //Tester les bordures de la map
+ if((p.X < 0)||(p.Y < 0)) return TRUE;
+
+ if((p.Y >= MAP_Y)||(p.X >= MAP_X)) return TRUE;
+
+ pElement = pMap[p.X + (p.Y * MAP_X)];
+
+ //L'element est vide
+ if(pElement == NULL) return FALSE;
+
+ //Si du fromage a ete trouve, stopper la recherche
+ if(IsSet(pMouse->Status, CHEESE_FOUND)) return FALSE;
+
+ switch(pElement->Type)
+ {
+ case CHEESE:
+ if(IsSet(pMouse->Status, MOUSE_FULL)) return TRUE;
+ //Indiquer que du fromage a ete trouve
+ Set(pMouse->Status, CHEESE_FOUND);
+ //Retenir la position du fromage
+ (*Cheese).X = p.X;
+ (*Cheese).Y = p.Y;
+ break;
+ case INPUT:
+ if(IsSet(pMouse->Status, MOUSE_EMPTY)) return TRUE;
+ //Remplir les reserves de fromage
+ Set(pMouse->Status, MOUSE_EMPTY);
+ Reset(pMouse->Status, MOUSE_FULL);
+ pMouse->Color = SOS_X86_VIDEO_FG_LTRED;
+ //Autoriser la creation d'une autre souris
+ sos_ksema_up(& SemMouse);
+ return TRUE;
+ case OUTPUT:
+ break;
+ default :
+ return TRUE;
+ }
+
+ return FALSE;//Aucune collision
+}
+
+//*****************************************************************************
+// But du thread : Creer une souris et la placer autour de l'entree
+//*****************************************************************************
+static void MouseCreator(void)
+{
+ while(1)
+ {
+ sos_ksema_down(& SemMouse, NULL);
+ sos_ksema_down(& SemMap, NULL);
+ CreateMouse();
+ sos_ksema_up(& SemMap);
+ }
+}
+
+//*****************************************************************************
+// But de la fonction : Creer une souris et l'inserer dans la carte
+// Entree : Aucune
+// Parametre retourne : ERROR si memoire insuffisante, FALSE si souris non
+// cree, TRUE sinon
+//*****************************************************************************
+static sos_ret_t CreateMouse(void)
+{
+ Element_t * pElement;
+ unsigned int I;
+
+ Point_t p;
+
+ for(I = 0; I < 8; I++)
+ {//Explorer tous les emplacements
+ p.X = 0;
+ p.Y = MAP_Y / 2;
+ switch(I)
+ {//Repere le deplacement
+ case 0:
+ p.Y++;
+ break;
+ case 1:
+ p.X++;
+ p.Y++;
+ break;
+ case 2:
+ p.X++;
+ break;
+ case 3:
+ p.Y--;
+ p.X++;
+ break;
+ case 4:
+ p.Y--;
+ break;
+ case 5:
+ p.Y--;
+ p.X--;
+ break;
+ case 6:
+ p.X--;
+ break;
+ case 7:
+ p.X--;
+ p.Y++;
+ break;
+ }
+ if((p.X >= 0)&&(p.Y >= 0)&&(p.X < MAP_X)&&(p.Y < MAP_Y))
+ {//L'emplacement est valide
+ pElement = pMap[p.X + (p.Y * MAP_X)];
+ if(pElement == NULL)
+ {//Creer la souris
+ pElement = (Element_t *)sos_kmalloc(sizeof(Element_t), 0);
+ if(pElement != NULL)
+ {//Initialiser l'entree
+ pElement->Type = MOUSE;
+ Set(pElement->Status, MOUSE_EMPTY);
+ pElement->Color = SOS_X86_VIDEO_FG_LTRED;
+ pElement->P = p;
+ pElement->Way = 0;
+ pElement->ThreadID
+ = sos_create_kernel_thread("Mouse",
+ (sos_kernel_thread_start_routine_t)Mouse,
+ pElement);
+ if(pElement->ThreadID == 0)
+ {
+ sos_kfree((sos_vaddr_t)pElement);
+ pElement = NULL;
+ return -SOS_ENOMEM;
+ }
+ pMap[p.X + (p.Y * MAP_X)] = pElement;
+ MouseCount++;
+
+ DrawPixel(p.X, p.Y, pElement->Color);
+ sos_x86_videomem_printf(23, 0, SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE, "Souris = %d; Fromages = %d; Obstacles = %d ", MouseCount, CheeseCount, ObstacleCount);
+
+ return SOS_OK;
+ }
+ }
+ }
+ }
+ return -SOS_EBUSY;
+}
+
+//*****************************************************************************
+// C'est fini !!!!
+//*****************************************************************************
diff --git a/sos-code-article6.5/sos/physmem.c b/sos-code-article6.5/sos/physmem.c
new file mode 100644
index 0000000..daf730f
--- /dev/null
+++ b/sos-code-article6.5/sos/physmem.c
@@ -0,0 +1,319 @@
+/* Copyright (C) 2004 David Decotigny
+ Copyright (C) 2000 The KOS Team
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#include <sos/list.h>
+#include <sos/macros.h>
+#include <sos/assert.h>
+#include <sos/klibc.h>
+
+#include "physmem.h"
+
+/** A descriptor for a physical page in SOS */
+struct physical_page_descr
+{
+ /** The physical base address for the page */
+ sos_paddr_t paddr;
+
+ /** The reference count for this physical page. > 0 means that the
+ page is in the used list. */
+ sos_count_t ref_cnt;
+
+ /** Some data associated with the page when it is mapped in kernel space */
+ struct sos_kmem_range *kernel_range;
+
+ /** The other pages on the list (used, free) */
+ struct physical_page_descr *prev, *next;
+};
+
+/** These are some markers present in the executable file (see sos.lds) */
+extern char __b_kernel, __e_kernel;
+
+/** The array of ppage descriptors will be located at this address */
+#define PAGE_DESCR_ARRAY_ADDR \
+ SOS_PAGE_ALIGN_SUP((sos_paddr_t) (& __e_kernel))
+static struct physical_page_descr * physical_page_descr_array;
+
+/** The list of physical pages currently available */
+static struct physical_page_descr *free_ppage;
+
+/** The list of physical pages currently in use */
+static struct physical_page_descr *used_ppage;
+
+/** We will store here the interval of valid physical addresses */
+static sos_paddr_t physmem_base, physmem_top;
+
+/** We store the number of pages used/free */
+static sos_count_t physmem_total_pages, physmem_used_pages;
+
+sos_ret_t sos_physmem_subsystem_setup(sos_size_t ram_size,
+ /* out */sos_paddr_t *kernel_core_base,
+ /* out */sos_paddr_t *kernel_core_top)
+{
+ /* The iterator over the page descriptors */
+ struct physical_page_descr *ppage_descr;
+
+ /* The iterator over the physical addresses */
+ sos_paddr_t ppage_addr;
+
+ /* Make sure ram size is aligned on a page boundary */
+ ram_size = SOS_PAGE_ALIGN_INF(ram_size);/* Yes, we may lose at most a page */
+
+ /* Reset the used/free page lists before building them */
+ free_ppage = used_ppage = NULL;
+ physmem_total_pages = physmem_used_pages = 0;
+
+ /* Make sure that there is enough memory to store the array of page
+ descriptors */
+ *kernel_core_base = SOS_PAGE_ALIGN_INF((sos_paddr_t)(& __b_kernel));
+ *kernel_core_top
+ = PAGE_DESCR_ARRAY_ADDR
+ + SOS_PAGE_ALIGN_SUP( (ram_size >> SOS_PAGE_SHIFT)
+ * sizeof(struct physical_page_descr));
+ if (*kernel_core_top > ram_size)
+ return -SOS_ENOMEM;
+
+ /* Page 0-4kB is not available in order to return address 0 as a
+ means to signal "no page available" */
+ physmem_base = SOS_PAGE_SIZE;
+ physmem_top = ram_size;
+
+ /* Setup the page descriptor arrray */
+ physical_page_descr_array
+ = (struct physical_page_descr*)PAGE_DESCR_ARRAY_ADDR;
+
+ /* Scan the list of physical pages */
+ for (ppage_addr = 0,
+ ppage_descr = physical_page_descr_array ;
+ ppage_addr < physmem_top ;
+ ppage_addr += SOS_PAGE_SIZE,
+ ppage_descr ++)
+ {
+ enum { PPAGE_MARK_RESERVED, PPAGE_MARK_FREE,
+ PPAGE_MARK_KERNEL, PPAGE_MARK_HWMAP } todo;
+
+ memset(ppage_descr, 0x0, sizeof(struct physical_page_descr));
+
+ /* Init the page descriptor for this page */
+ ppage_descr->paddr = ppage_addr;
+
+ /* Reserved : 0 ... base */
+ if (ppage_addr < physmem_base)
+ todo = PPAGE_MARK_RESERVED;
+
+ /* Free : base ... BIOS */
+ else if ((ppage_addr >= physmem_base)
+ && (ppage_addr < BIOS_N_VIDEO_START))
+ todo = PPAGE_MARK_FREE;
+
+ /* Used : BIOS */
+ else if ((ppage_addr >= BIOS_N_VIDEO_START)
+ && (ppage_addr < BIOS_N_VIDEO_END))
+ todo = PPAGE_MARK_HWMAP;
+
+ /* Free : BIOS ... kernel */
+ else if ((ppage_addr >= BIOS_N_VIDEO_END)
+ && (ppage_addr < (sos_paddr_t) (& __b_kernel)))
+ todo = PPAGE_MARK_FREE;
+
+ /* Used : Kernel code/data/bss + physcal page descr array */
+ else if ((ppage_addr >= *kernel_core_base)
+ && (ppage_addr < *kernel_core_top))
+ todo = PPAGE_MARK_KERNEL;
+
+ /* Free : first page of descr ... end of RAM */
+ else
+ todo = PPAGE_MARK_FREE;
+
+ /* Actually does the insertion in the used/free page lists */
+ physmem_total_pages ++;
+ switch (todo)
+ {
+ case PPAGE_MARK_FREE:
+ ppage_descr->ref_cnt = 0;
+ list_add_head(free_ppage, ppage_descr);
+ break;
+
+ case PPAGE_MARK_KERNEL:
+ case PPAGE_MARK_HWMAP:
+ ppage_descr->ref_cnt = 1;
+ list_add_head(used_ppage, ppage_descr);
+ physmem_used_pages ++;
+ break;
+
+ default:
+ /* Reserved page: nop */
+ break;
+ }
+ }
+
+ return SOS_OK;
+}
+
+
+sos_paddr_t sos_physmem_ref_physpage_new(sos_bool_t can_block)
+{
+ struct physical_page_descr *ppage_descr;
+
+ if (! free_ppage)
+ return (sos_paddr_t)NULL;
+
+ /* Retrieve a page in the free list */
+ ppage_descr = list_pop_head(free_ppage);
+
+ /* The page is assumed not to be already used */
+ SOS_ASSERT_FATAL(ppage_descr->ref_cnt == 0);
+
+ /* Mark the page as used (this of course sets the ref count to 1) */
+ ppage_descr->ref_cnt ++;
+
+ /* No associated kernel range by default */
+ ppage_descr->kernel_range = NULL;
+
+ /* Put the page in the used list */
+ list_add_tail(used_ppage, ppage_descr);
+ physmem_used_pages ++;
+
+ return ppage_descr->paddr;
+}
+
+
+/**
+ * Helper function to get the physical page descriptor for the given
+ * physical page address.
+ *
+ * @return NULL when out-of-bounds or non-page-aligned
+ */
+inline static struct physical_page_descr *
+get_page_descr_at_paddr(sos_paddr_t ppage_paddr)
+{
+ /* Don't handle non-page-aligned addresses */
+ if (ppage_paddr & SOS_PAGE_MASK)
+ return NULL;
+
+ /* Don't support out-of-bounds requests */
+ if ((ppage_paddr < physmem_base) || (ppage_paddr >= physmem_top))
+ return NULL;
+
+ return physical_page_descr_array + (ppage_paddr >> SOS_PAGE_SHIFT);
+}
+
+
+sos_ret_t sos_physmem_ref_physpage_at(sos_paddr_t ppage_paddr)
+{
+ struct physical_page_descr *ppage_descr
+ = get_page_descr_at_paddr(ppage_paddr);
+
+ if (! ppage_descr)
+ return -SOS_EINVAL;
+
+ /* Increment the reference count for the page */
+ ppage_descr->ref_cnt ++;
+
+ /* If the page is newly referenced (ie we are the only owners of the
+ page => ref cnt == 1), transfer it in the used pages list */
+ if (ppage_descr->ref_cnt == 1)
+ {
+ list_delete(free_ppage, ppage_descr);
+
+ /* No associated kernel range by default */
+ ppage_descr->kernel_range = NULL;
+
+ list_add_tail(used_ppage, ppage_descr);
+ physmem_used_pages ++;
+
+ /* The page is newly referenced */
+ return FALSE;
+ }
+
+ /* The page was already referenced by someone */
+ return TRUE;
+}
+
+
+sos_ret_t
+sos_physmem_unref_physpage(sos_paddr_t ppage_paddr)
+{
+ /* By default the return value indicates that the page is still
+ used */
+ sos_ret_t retval = FALSE;
+
+ struct physical_page_descr *ppage_descr
+ = get_page_descr_at_paddr(ppage_paddr);
+
+ if (! ppage_descr)
+ return -SOS_EINVAL;
+
+ /* Don't do anything if the page is not in the used list */
+ if (ppage_descr->ref_cnt <= 0)
+ return -SOS_EINVAL;
+
+ /* Unreference the page, and, when no mapping is active anymore, put
+ the page in the free list */
+ ppage_descr->ref_cnt--;
+ if (ppage_descr->ref_cnt <= 0)
+ {
+ /* Reset associated kernel range */
+ ppage_descr->kernel_range = NULL;
+
+ /* Transfer the page, considered USED, to the free list */
+ list_delete(used_ppage, ppage_descr);
+ physmem_used_pages --;
+ list_add_head(free_ppage, ppage_descr);
+
+ /* Indicate that the page is now unreferenced */
+ retval = TRUE;
+ }
+
+ return retval;
+}
+
+
+struct sos_kmem_range* sos_physmem_get_kmem_range(sos_paddr_t ppage_paddr)
+{
+ struct physical_page_descr *ppage_descr
+ = get_page_descr_at_paddr(ppage_paddr);
+
+ if (! ppage_descr)
+ return NULL;
+
+ return ppage_descr->kernel_range;
+}
+
+
+sos_ret_t sos_physmem_set_kmem_range(sos_paddr_t ppage_paddr,
+ struct sos_kmem_range *range)
+{
+ struct physical_page_descr *ppage_descr
+ = get_page_descr_at_paddr(ppage_paddr);
+
+ if (! ppage_descr)
+ return -SOS_EINVAL;
+
+ ppage_descr->kernel_range = range;
+ return SOS_OK;
+}
+
+sos_ret_t sos_physmem_get_state(/* out */sos_count_t *total_ppages,
+ /* out */sos_count_t *used_ppages)
+{
+ if (total_ppages)
+ *total_ppages = physmem_total_pages;
+ if (used_ppages)
+ *used_ppages = physmem_used_pages;
+ return SOS_OK;
+}
diff --git a/sos-code-article6.5/sos/physmem.h b/sos-code-article6.5/sos/physmem.h
new file mode 100644
index 0000000..7b4cd2b
--- /dev/null
+++ b/sos-code-article6.5/sos/physmem.h
@@ -0,0 +1,147 @@
+/* Copyright (C) 2004 David Decotigny
+ Copyright (C) 2000 The KOS Team
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#ifndef _SOS_PHYSMEM_H_
+#define _SOS_PHYSMEM_H_
+
+/**
+ * @file physmem.h
+ *
+ * Physical pages of memory
+ */
+
+#include <sos/errno.h>
+#include <sos/types.h>
+#include <sos/macros.h>
+
+/** The size of a physical page (arch-dependent) */
+#define SOS_PAGE_SIZE (4*1024)
+
+/** The corresponding shift */
+#define SOS_PAGE_SHIFT 12 /* 4 kB = 2^12 B */
+
+/** The corresponding mask */
+#define SOS_PAGE_MASK ((1<<12) - 1)
+
+#define SOS_PAGE_ALIGN_INF(val) \
+ SOS_ALIGN_INF((val), SOS_PAGE_SIZE)
+#define SOS_PAGE_ALIGN_SUP(val) \
+ SOS_ALIGN_SUP((val), SOS_PAGE_SIZE)
+#define SOS_IS_PAGE_ALIGNED(val) \
+ SOS_IS_ALIGNED((val), SOS_PAGE_SIZE)
+
+/**
+ * This is the reserved physical interval for the x86 video memory and
+ * BIOS area. In physmem.c, we have to mark this area as "used" in
+ * order to prevent from allocating it. And in paging.c, we'd better
+ * map it in virtual space if we really want to be able to print to
+ * the screen (for debugging purpose, at least): for this, the
+ * simplest is to identity-map this area in virtual space (note
+ * however that this mapping could also be non-identical).
+ */
+#define BIOS_N_VIDEO_START 0xa0000
+#define BIOS_N_VIDEO_END 0x100000
+
+
+/**
+ * Initialize the physical memory subsystem, for the physical area [0,
+ * ram_size). This routine takes into account the BIOS and video
+ * areas, to prevent them from future allocations.
+ *
+ * @param ram_size The size of the RAM that will be managed by this subsystem
+ *
+ * @param kernel_core_base The lowest address for which the kernel
+ * assumes identity mapping (ie virtual address == physical address)
+ * will be stored here
+ *
+ * @param kernel_core_top The top address for which the kernel
+ * assumes identity mapping (ie virtual address == physical address)
+ * will be stored here
+ */
+sos_ret_t sos_physmem_subsystem_setup(sos_size_t ram_size,
+ /* out */sos_paddr_t *kernel_core_base,
+ /* out */sos_paddr_t *kernel_core_top);
+
+/**
+ * Retrieve the total number of pages, and the number of free pages
+ */
+sos_ret_t sos_physmem_get_state(/* out */sos_count_t *total_ppages,
+ /* out */sos_count_t *used_ppages);
+
+
+/**
+ * Get a free page.
+ *
+ * @return The (physical) address of the (physical) page allocated, or
+ * NULL when none currently available.
+ *
+ * @param can_block TRUE if the function is allowed to block
+ * @note The page returned has a reference count equal to 1.
+ */
+sos_paddr_t sos_physmem_ref_physpage_new(sos_bool_t can_block);
+
+
+/**
+ * Increment the reference count of a given physical page. Useful for
+ * VM code which tries to map a precise physical address.
+ *
+ * @param ppage_paddr Physical address of the page (MUST be page-aligned)
+ *
+ * @return TRUE when the page was previously in use, FALSE when the
+ * page was previously in the free list, <0 when the page address is
+ * invalid.
+ */
+sos_ret_t sos_physmem_ref_physpage_at(sos_paddr_t ppage_paddr);
+
+
+/**
+ * Decrement the reference count of the given physical page. When this
+ * reference count reaches 0, the page is marked free, ie is available
+ * for future sos_physmem_get_physpage()
+ *
+ * @param ppage_paddr Physical address of the page (MUST be page-aligned)
+ *
+ * @return FALSE when the page is still in use, TRUE when the page is now
+ * unreferenced, <0 when the page address is invalid
+ */
+sos_ret_t sos_physmem_unref_physpage(sos_paddr_t ppage_paddr);
+
+
+#include <sos/kmem_vmm.h>
+
+/**
+ * Return the kernel memory allocation range associated with the given
+ * physical page, or NULL when page has no associated range
+ *
+ * @param ppage_paddr Physical address of the page (MUST be page-aligned)
+ */
+struct sos_kmem_range* sos_physmem_get_kmem_range(sos_paddr_t ppage_paddr);
+
+
+/**
+ * Set the kernel memory allocation range associated to the given
+ * physical page.
+ *
+ * @param ppage_paddr Physical address of the page (MUST be page-aligned)
+ *
+ * @return error if page is invalid
+ */
+sos_ret_t sos_physmem_set_kmem_range(sos_paddr_t ppage_paddr,
+ struct sos_kmem_range *range);
+
+#endif /* _SOS_PHYSMEM_H_ */
diff --git a/sos-code-article6.5/sos/sched.c b/sos-code-article6.5/sos/sched.c
new file mode 100644
index 0000000..b68dcaf
--- /dev/null
+++ b/sos-code-article6.5/sos/sched.c
@@ -0,0 +1,133 @@
+/* Copyright (C) 2004 David Decotigny
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+
+#include <sos/errno.h>
+#include <sos/klibc.h>
+#include <sos/assert.h>
+#include <sos/list.h>
+
+#include "sched.h"
+
+
+/**
+ * The definition of the scheduler queue. We could have used a normal
+ * kwaitq here, it would have had the same properties. But, in the
+ * definitive version (O(1) scheduler), the structure has to be a bit
+ * more complicated. So, in order to keep the changes as small as
+ * possible between this version and the definitive one, we don't use
+ * kwaitq here.
+ */
+static struct
+{
+ unsigned int nr_threads;
+ struct sos_thread *thread_list;
+} ready_queue;
+
+
+sos_ret_t sos_sched_subsystem_setup()
+{
+ memset(& ready_queue, 0x0, sizeof(ready_queue));
+
+ return SOS_OK;
+}
+
+
+/**
+ * Helper function to add a thread in a ready queue AND to change the
+ * state of the given thread to "READY".
+ *
+ * @param insert_at_tail TRUE to tell to add the thread at the end of
+ * the ready list. Otherwise it is added at the head of it.
+ */
+static sos_ret_t add_in_ready_queue(struct sos_thread *thr,
+ sos_bool_t insert_at_tail)
+{
+
+ SOS_ASSERT_FATAL( (SOS_THR_CREATED == thr->state)
+ || (SOS_THR_RUNNING == thr->state) /* Yield */
+ || (SOS_THR_BLOCKED == thr->state) );
+
+ /* Add the thread to the CPU queue */
+ if (insert_at_tail)
+ list_add_tail_named(ready_queue.thread_list, thr,
+ ready.rdy_prev, ready.rdy_next);
+ else
+ list_add_head_named(ready_queue.thread_list, thr,
+ ready.rdy_prev, ready.rdy_next);
+ ready_queue.nr_threads ++;
+
+ /* Ok, thread is now really ready to be (re)started */
+ thr->state = SOS_THR_READY;
+
+ return SOS_OK;
+}
+
+
+sos_ret_t sos_sched_set_ready(struct sos_thread *thr)
+{
+ sos_ret_t retval;
+
+ /* Don't do anything for already ready threads */
+ if (SOS_THR_READY == thr->state)
+ return SOS_OK;
+
+ /* Real-time thread: schedule it for the present turn */
+ retval = add_in_ready_queue(thr, TRUE);
+
+ return retval;
+}
+
+
+struct sos_thread * sos_reschedule(struct sos_thread *current_thread,
+ sos_bool_t do_yield)
+{
+
+ if (SOS_THR_ZOMBIE == current_thread->state)
+ {
+ /* Don't think of returning to this thread since it is
+ terminated */
+ /* Nop */
+ }
+ else if (SOS_THR_BLOCKED != current_thread->state)
+ {
+ /* Take into account the current executing thread unless it is
+ marked blocked */
+ if (do_yield)
+ /* Ok, reserve it for next turn */
+ add_in_ready_queue(current_thread, TRUE);
+ else
+ /* Put it at the head of the active list */
+ add_in_ready_queue(current_thread, FALSE);
+ }
+
+ /* The next thread is that at the head of the ready list */
+ if (ready_queue.nr_threads > 0)
+ {
+ struct sos_thread *next_thr;
+
+ /* Queue is not empty: take the thread at its head */
+ next_thr = list_pop_head_named(ready_queue.thread_list,
+ ready.rdy_prev, ready.rdy_next);
+ ready_queue.nr_threads --;
+
+ return next_thr;
+ }
+
+ SOS_FATAL_ERROR("No kernel thread ready ?!");
+ return NULL;
+}
diff --git a/sos-code-article6.5/sos/sched.h b/sos-code-article6.5/sos/sched.h
new file mode 100644
index 0000000..123fe7d
--- /dev/null
+++ b/sos-code-article6.5/sos/sched.h
@@ -0,0 +1,75 @@
+/* Copyright (C) 2004 David Decotigny
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#ifndef _SOS_SCHED_H_
+#define _SOS_SCHED_H_
+
+
+/**
+ * @file sched.h
+ *
+ * A basic scheduler with simple FIFO threads' ordering.
+ *
+ * The functions below manage CPU queues, and are NEVER responsible
+ * for context switches (see thread.h for that) or synchronizations
+ * (see kwaitq.h or the higher levels primitives [mutex, semaphore,
+ * ...] for that).
+ *
+ * @note IMPORTANT: all the functions below are meant to be called
+ * ONLY by the thread/timer/kwaitq subsystems. DO NOT use them
+ * directly from anywhere else: use ONLY the thread/kwaitq functions!
+ * If you still want to call them directly despite this disclaimer,
+ * simply disable interrupts before clling them.
+ */
+
+#include <sos/errno.h>
+
+
+#include <sos/thread.h>
+
+
+/**
+ * Initialize the scheduler
+ *
+ * @note: The use of this function is RESERVED
+ */
+sos_ret_t sos_sched_subsystem_setup();
+
+
+/**
+ * Mark the given thread as ready
+ *
+ * @note: The use of this function is RESERVED
+ */
+sos_ret_t sos_sched_set_ready(struct sos_thread * thr);
+
+
+/**
+ * Return the identifier of the next thread to run. Also removes it
+ * from the ready list, but does NOT set is as current_thread !
+ *
+ * @param current_thread TCB of the thread calling the function
+ *
+ * @param do_yield When TRUE, put the current executing thread at the
+ * end of the ready list. Otherwise it is kept at the head of it.
+ *
+ * @note: The use of this function is RESERVED
+ */
+struct sos_thread * sos_reschedule(struct sos_thread * current_thread,
+ sos_bool_t do_yield);
+
+#endif /* _SOS_WAITQUEUE_H_ */
diff --git a/sos-code-article6.5/sos/thread.c b/sos-code-article6.5/sos/thread.c
new file mode 100644
index 0000000..766b804
--- /dev/null
+++ b/sos-code-article6.5/sos/thread.c
@@ -0,0 +1,441 @@
+/* Copyright (C) 2004,2005 David Decotigny
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+
+#include <sos/physmem.h>
+#include <sos/kmem_slab.h>
+#include <sos/kmalloc.h>
+#include <sos/klibc.h>
+#include <sos/list.h>
+#include <sos/assert.h>
+
+#include <hwcore/irq.h>
+
+#include "thread.h"
+
+
+/**
+ * The size of the stack of a kernel thread
+ */
+#define SOS_THREAD_KERNEL_STACK_SIZE (1*SOS_PAGE_SIZE)
+
+
+/**
+ * The identifier of the thread currently running on CPU.
+ *
+ * We only support a SINGLE processor, ie a SINGLE thread
+ * running at any time in the system. This greatly simplifies the
+ * implementation of the system, since we don't have to complicate
+ * things in order to retrieve the identifier of the threads running
+ * on the CPU. On multiprocessor systems the current_thread below is
+ * an array indexed by the id of the CPU, so that the challenge is to
+ * retrieve the identifier of the CPU. This is usually done based on
+ * the stack address (Linux implementation) or on some form of TLS
+ * ("Thread Local Storage": can be implemented by way of LDTs for the
+ * processes, accessed through the fs or gs registers).
+ */
+static volatile struct sos_thread *current_thread = NULL;
+
+
+/*
+ * The list of threads currently in the system.
+ *
+ * @note We could have used current_thread for that...
+ */
+static struct sos_thread *thread_list = NULL;
+
+
+/**
+ * The Cache of thread structures
+ */
+static struct sos_kslab_cache *cache_thread;
+
+
+struct sos_thread *sos_thread_get_current()
+{
+ SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
+ return (struct sos_thread*)current_thread;
+}
+
+
+inline static sos_ret_t _set_current(struct sos_thread *thr)
+{
+ SOS_ASSERT_FATAL(thr->state == SOS_THR_READY);
+ current_thread = thr;
+ current_thread->state = SOS_THR_RUNNING;
+ return SOS_OK;
+}
+
+
+sos_ret_t sos_thread_subsystem_setup(sos_vaddr_t init_thread_stack_base_addr,
+ sos_size_t init_thread_stack_size)
+{
+ struct sos_thread *myself;
+
+ /* Allocate the cache of threads */
+ cache_thread = sos_kmem_cache_create("thread",
+ sizeof(struct sos_thread),
+ 2,
+ 0,
+ SOS_KSLAB_CREATE_MAP
+ | SOS_KSLAB_CREATE_ZERO);
+ if (! cache_thread)
+ return -SOS_ENOMEM;
+
+ /* Allocate a new thread structure for the current running thread */
+ myself = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread,
+ SOS_KSLAB_ALLOC_ATOMIC);
+ if (! myself)
+ return -SOS_ENOMEM;
+
+ /* Initialize the thread attributes */
+ strzcpy(myself->name, "[kinit]", SOS_THR_MAX_NAMELEN);
+ myself->state = SOS_THR_CREATED;
+ myself->kernel_stack_base_addr = init_thread_stack_base_addr;
+ myself->kernel_stack_size = init_thread_stack_size;
+
+ /* Do some stack poisoning on the bottom of the stack, if needed */
+ sos_cpu_state_prepare_detect_kernel_stack_overflow(myself->cpu_state,
+ myself->kernel_stack_base_addr,
+ myself->kernel_stack_size);
+
+ /* Add the thread in the global list */
+ list_singleton_named(thread_list, myself, gbl_prev, gbl_next);
+
+ /* Ok, now pretend that the running thread is ourselves */
+ myself->state = SOS_THR_READY;
+ _set_current(myself);
+
+ return SOS_OK;
+}
+
+
+struct sos_thread *
+sos_create_kernel_thread(const char *name,
+ sos_kernel_thread_start_routine_t start_func,
+ void *start_arg)
+{
+ __label__ undo_creation;
+ sos_ui32_t flags;
+ struct sos_thread *new_thread;
+
+ if (! start_func)
+ return NULL;
+
+ /* Allocate a new thread structure for the current running thread */
+ new_thread
+ = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread,
+ SOS_KSLAB_ALLOC_ATOMIC);
+ if (! new_thread)
+ return NULL;
+
+ /* Initialize the thread attributes */
+ strzcpy(new_thread->name, ((name)?name:"[NONAME]"), SOS_THR_MAX_NAMELEN);
+ new_thread->state = SOS_THR_CREATED;
+
+ /* Allocate the stack for the new thread */
+ new_thread->kernel_stack_base_addr = sos_kmalloc(SOS_THREAD_KERNEL_STACK_SIZE, 0);
+ new_thread->kernel_stack_size = SOS_THREAD_KERNEL_STACK_SIZE;
+ if (! new_thread->kernel_stack_base_addr)
+ goto undo_creation;
+
+ /* Initialize the CPU context of the new thread */
+ if (SOS_OK
+ != sos_cpu_kstate_init(& new_thread->cpu_state,
+ (sos_cpu_kstate_function_arg1_t*) start_func,
+ (sos_ui32_t) start_arg,
+ new_thread->kernel_stack_base_addr,
+ new_thread->kernel_stack_size,
+ (sos_cpu_kstate_function_arg1_t*) sos_thread_exit,
+ (sos_ui32_t) NULL))
+ goto undo_creation;
+
+ /* Add the thread in the global list */
+ sos_disable_IRQs(flags);
+ list_add_tail_named(thread_list, new_thread, gbl_prev, gbl_next);
+ sos_restore_IRQs(flags);
+
+ /* Mark the thread ready */
+ if (SOS_OK != sos_sched_set_ready(new_thread))
+ goto undo_creation;
+
+ /* Normal non-erroneous end of function */
+ return new_thread;
+
+ undo_creation:
+ if (new_thread->kernel_stack_base_addr)
+ sos_kfree((sos_vaddr_t) new_thread->kernel_stack_base_addr);
+ sos_kmem_cache_free((sos_vaddr_t) new_thread);
+ return NULL;
+}
+
+
+/** Function called after thr has terminated. Called from inside the context
+ of another thread, interrupts disabled */
+static void delete_thread(struct sos_thread *thr)
+{
+ sos_ui32_t flags;
+
+ sos_disable_IRQs(flags);
+ list_delete_named(thread_list, thr, gbl_prev, gbl_next);
+ sos_restore_IRQs(flags);
+
+ sos_kfree((sos_vaddr_t) thr->kernel_stack_base_addr);
+ memset(thr, 0x0, sizeof(struct sos_thread));
+ sos_kmem_cache_free((sos_vaddr_t) thr);
+}
+
+
+void sos_thread_exit()
+{
+ sos_ui32_t flags;
+ struct sos_thread *myself, *next_thread;
+
+ /* Interrupt handlers are NOT allowed to exit the current thread ! */
+ SOS_ASSERT_FATAL(! sos_servicing_irq());
+
+ myself = sos_thread_get_current();
+
+ /* Refuse to end the current executing thread if it still holds a
+ resource ! */
+ SOS_ASSERT_FATAL(list_is_empty_named(myself->kwaitq_list,
+ prev_entry_for_thread,
+ next_entry_for_thread));
+
+ /* Prepare to run the next thread */
+ sos_disable_IRQs(flags);
+ myself->state = SOS_THR_ZOMBIE;
+ next_thread = sos_reschedule(myself, FALSE);
+
+ /* Make sure that the next_thread is valid */
+ sos_cpu_state_detect_kernel_stack_overflow(next_thread->cpu_state,
+ next_thread->kernel_stack_base_addr,
+ next_thread->kernel_stack_size);
+
+ /* No need for sos_restore_IRQs() here because the IRQ flag will be
+ restored to that of the next thread upon context switch */
+
+ /* Immediate switch to next thread */
+ _set_current(next_thread);
+ sos_cpu_context_exit_to(next_thread->cpu_state,
+ (sos_cpu_kstate_function_arg1_t*) delete_thread,
+ (sos_ui32_t) myself);
+}
+
+
+sos_thread_state_t sos_thread_get_state(struct sos_thread *thr)
+{
+ if (! thr)
+ thr = (struct sos_thread*)current_thread;
+
+ return thr->state;
+}
+
+
+typedef enum { YIELD_MYSELF, BLOCK_MYSELF } switch_type_t;
+/**
+ * Helper function to initiate a context switch in case the current
+ * thread becomes blocked, waiting for a timeout, or calls yield.
+ */
+static sos_ret_t _switch_to_next_thread(switch_type_t operation)
+{
+ struct sos_thread *myself, *next_thread;
+
+ SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
+
+ /* Interrupt handlers are NOT allowed to block ! */
+ SOS_ASSERT_FATAL(! sos_servicing_irq());
+
+ myself = (struct sos_thread*)current_thread;
+
+ /* Make sure that if we are to be marked "BLOCKED", we have any
+ reason of effectively being blocked */
+ if (BLOCK_MYSELF == operation)
+ {
+ myself->state = SOS_THR_BLOCKED;
+ }
+
+ /* Identify the next thread */
+ next_thread = sos_reschedule(myself, YIELD_MYSELF == operation);
+
+ /* Avoid context switch if the context does not change */
+ if (myself != next_thread)
+ {
+ /* Sanity checks for the next thread */
+ sos_cpu_state_detect_kernel_stack_overflow(next_thread->cpu_state,
+ next_thread->kernel_stack_base_addr,
+ next_thread->kernel_stack_size);
+
+
+ /*
+ * Actual CPU context switch
+ */
+ _set_current(next_thread);
+ sos_cpu_context_switch(& myself->cpu_state, next_thread->cpu_state);
+
+ /* Back here ! */
+ SOS_ASSERT_FATAL(current_thread == myself);
+ SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
+ }
+ else
+ {
+ /* No context switch but still update ID of current thread */
+ _set_current(next_thread);
+ }
+
+ return SOS_OK;
+}
+
+
+sos_ret_t sos_thread_yield()
+{
+ sos_ui32_t flags;
+ sos_ret_t retval;
+
+ sos_disable_IRQs(flags);
+
+ retval = _switch_to_next_thread(YIELD_MYSELF);
+
+ sos_restore_IRQs(flags);
+ return retval;
+}
+
+
+/**
+ * Internal sleep timeout management
+ */
+struct sleep_timeout_params
+{
+ struct sos_thread *thread_to_wakeup;
+ sos_bool_t timeout_triggered;
+};
+
+
+/**
+ * Callback called when a timeout happened
+ */
+static void sleep_timeout(struct sos_timeout_action *act)
+{
+ struct sleep_timeout_params *sleep_timeout_params
+ = (struct sleep_timeout_params*) act->routine_data;
+
+ /* Signal that we have been woken up by the timeout */
+ sleep_timeout_params->timeout_triggered = TRUE;
+
+ /* Mark the thread ready */
+ SOS_ASSERT_FATAL(SOS_OK ==
+ sos_thread_force_unblock(sleep_timeout_params
+ ->thread_to_wakeup));
+}
+
+
+sos_ret_t sos_thread_sleep(struct sos_time *timeout)
+{
+ sos_ui32_t flags;
+ struct sleep_timeout_params sleep_timeout_params;
+ struct sos_timeout_action timeout_action;
+ sos_ret_t retval;
+
+ /* Block forever if no timeout is given */
+ if (NULL == timeout)
+ {
+ sos_disable_IRQs(flags);
+ retval = _switch_to_next_thread(BLOCK_MYSELF);
+ sos_restore_IRQs(flags);
+
+ return retval;
+ }
+
+ /* Initialize the timeout action */
+ sos_time_init_action(& timeout_action);
+
+ /* Prepare parameters used by the sleep timeout callback */
+ sleep_timeout_params.thread_to_wakeup
+ = (struct sos_thread*)current_thread;
+ sleep_timeout_params.timeout_triggered = FALSE;
+
+ sos_disable_IRQs(flags);
+
+ /* Now program the timeout ! */
+ SOS_ASSERT_FATAL(SOS_OK ==
+ sos_time_register_action_relative(& timeout_action,
+ timeout,
+ sleep_timeout,
+ & sleep_timeout_params));
+
+ /* Prepare to block: wait for sleep_timeout() to wakeup us in the
+ timeout kwaitq, or for someone to wake us up in any other
+ waitq */
+ retval = _switch_to_next_thread(BLOCK_MYSELF);
+ /* Unblocked by something ! */
+
+ /* Unblocked by timeout ? */
+ if (sleep_timeout_params.timeout_triggered)
+ {
+ /* Yes */
+ SOS_ASSERT_FATAL(sos_time_is_zero(& timeout_action.timeout));
+ retval = SOS_OK;
+ }
+ else
+ {
+ /* No: We have probably been woken up while in some other
+ kwaitq */
+ SOS_ASSERT_FATAL(SOS_OK == sos_time_unregister_action(& timeout_action));
+ retval = -SOS_EINTR;
+ }
+
+ sos_restore_IRQs(flags);
+
+ /* Update the remaining timeout */
+ memcpy(timeout, & timeout_action.timeout, sizeof(struct sos_time));
+
+ return retval;
+}
+
+
+sos_ret_t sos_thread_force_unblock(struct sos_thread *thread)
+{
+ sos_ret_t retval;
+ sos_ui32_t flags;
+
+ if (! thread)
+ return -SOS_EINVAL;
+
+ sos_disable_IRQs(flags);
+
+ /* Thread already woken up ? */
+ retval = SOS_OK;
+ switch(sos_thread_get_state(thread))
+ {
+ case SOS_THR_RUNNING:
+ case SOS_THR_READY:
+ /* Do nothing */
+ break;
+
+ case SOS_THR_ZOMBIE:
+ retval = -SOS_EFATAL;
+ break;
+
+ default:
+ retval = sos_sched_set_ready(thread);
+ break;
+ }
+
+ sos_restore_IRQs(flags);
+
+ return retval;
+}
diff --git a/sos-code-article6.5/sos/thread.h b/sos-code-article6.5/sos/thread.h
new file mode 100644
index 0000000..02ea833
--- /dev/null
+++ b/sos-code-article6.5/sos/thread.h
@@ -0,0 +1,207 @@
+/* Copyright (C) 2004,2005 David Decotigny
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#ifndef _SOS_THREAD_H_
+#define _SOS_THREAD_H_
+
+/**
+ * @file thread.h
+ *
+ * SOS Thread management API
+ */
+
+#include <sos/errno.h>
+
+/* Forward declaration */
+struct sos_thread;
+
+#include <hwcore/cpu_context.h>
+#include <sos/sched.h>
+#include <sos/kwaitq.h>
+#include <sos/time.h>
+
+/**
+ * The possible states of a valid thread
+ */
+typedef enum { SOS_THR_CREATED, /**< Thread created, not fully initialized */
+ SOS_THR_READY, /**< Thread fully initialized or
+ waiting for CPU after having been
+ blocked or preempted */
+ SOS_THR_RUNNING, /**< Thread currently running on CPU */
+ SOS_THR_BLOCKED, /**< Thread waiting for I/O (+ in at LEAST
+ one kwaitq) and/or sleeping (+ in NO
+ kwaitq) */
+ SOS_THR_ZOMBIE, /**< Thread terminated execution, waiting to
+ be deleted by kernel */
+ } sos_thread_state_t;
+
+
+/**
+ * TCB (Thread Control Block): structure describing a thread. Don't
+ * access these fields directly: prefer using the accessor functions
+ * below.
+ */
+struct sos_thread
+{
+#define SOS_THR_MAX_NAMELEN 32
+ char name[SOS_THR_MAX_NAMELEN];
+
+ sos_thread_state_t state;
+
+ /**
+ * The hardware context of the thread.
+ *
+ * It will reflect the CPU state of the thread:
+ * - From an interrupt handler: the state of the thread at the time
+ * of the OUTERMOST irq. An IRQ is not allowed to make context
+ * switches, so this context will remain valid from the begining of
+ * the outermost IRQ handler to the end of it, no matter if there
+ * are other IRQ handlers nesting in one another. You may safely
+ * use it from IRQ handlers to query the state of the interrupted
+ * thread, no matter if there has been other IRQ handlers
+ * executing meanwhile.
+ * - From normal kernel code, exceptions and syscall: the state of
+ * the thread the last time there was a context switch from this
+ * thread to another one. Thus this field WON'T reflect the
+ * current's thread cpu_state in these cases. So, in these cases,
+ * simply DO NOT USE IT outside thread.c ! Note: for syscall and
+ * exception handlers, the VALID state of the interrupted thread is
+ * passed as an argument to the handlers.
+ */
+ struct sos_cpu_state *cpu_state;
+
+ /* Kernel stack parameters */
+ sos_vaddr_t kernel_stack_base_addr;
+ sos_size_t kernel_stack_size;
+
+ /* Data specific to each state */
+ union
+ {
+ struct
+ {
+ struct sos_thread *rdy_prev, *rdy_next;
+ } ready;
+ }; /* Anonymous union (gcc extenion) */
+
+
+ /*
+ * Data used by the kwaitq subsystem: list of kwaitqueues the thread
+ * is waiting for.
+ *
+ * @note: a RUNNING or READY thread might be in one or more
+ * waitqueues ! The only property we have is that, among these
+ * waitqueues (if any), _at least_ one has woken the thread.
+ */
+ struct sos_kwaitq_entry *kwaitq_list;
+
+
+ /**
+ * Chaining pointers for global ("gbl") list of threads (debug)
+ */
+ struct sos_thread *gbl_prev, *gbl_next;
+};
+
+
+/**
+ * Definition of the function executed by a kernel thread
+ */
+typedef void (*sos_kernel_thread_start_routine_t)(void *arg);
+
+
+/**
+ * Initialize the subsystem responsible for thread management
+ *
+ * Initialize the primary kernel thread so that it can be handled the
+ * same way as an ordinary thread created by sos_thread_create().
+ */
+sos_ret_t sos_thread_subsystem_setup(sos_vaddr_t init_thread_stack_base_addr,
+ sos_size_t init_thread_stack_size);
+
+
+/**
+ * Create a new kernel thread
+ */
+struct sos_thread *
+sos_create_kernel_thread(const char *name,
+ sos_kernel_thread_start_routine_t start_func,
+ void *start_arg);
+
+
+/**
+ * Terminate the execution of the current thread. For kernel threads,
+ * it is called by default when the start routine returns.
+ */
+void sos_thread_exit() __attribute__((noreturn));
+
+
+/**
+ * Get the identifier of the thread currently running on CPU. Trivial
+ * function.
+ */
+struct sos_thread *sos_thread_get_current();
+
+
+/**
+ * If thr == NULL, get the state of the current thread. Trivial
+ * function.
+ *
+ * @note NOT protected against interrupts
+ */
+sos_thread_state_t sos_thread_get_state(struct sos_thread *thr);
+
+
+/**
+ * Yield CPU to another ready thread.
+ *
+ * @note This is a BLOCKING FUNCTION
+ */
+sos_ret_t sos_thread_yield();
+
+
+/**
+ * Release the CPU for (at least) the given delay.
+ *
+ * @param delay The delay to wait for. If delay == NULL then wait
+ * forever that any event occurs.
+ *
+ * @return SOS_OK when delay expired (and delay is reset to zero),
+ * -SOS_EINTR otherwise (and delay contains the amount of time
+ * remaining).
+ *
+ * @note This is a BLOCKING FUNCTION
+ */
+sos_ret_t sos_thread_sleep(/* in/out */struct sos_time *delay);
+
+
+/**
+ * Mark the given thread as READY (if not already ready) even if it is
+ * blocked in a kwaitq or in a sleep ! As a result, the interrupted
+ * kwaitq/sleep function call of the thread will return with
+ * -SOS_EINTR.
+ *
+ * @return -SOS_EINVAL if thread does not exist, or -SOS_EFATAL if
+ * marked ZOMBIE.
+ *
+ * @note As a result, the semaphore/mutex/conditions/... functions
+ * return values SHOULD ALWAYS be checked ! If they are != SOS_OK,
+ * then the caller should consider that the resource is not aquired
+ * because somebody woke the thread by some way.
+ */
+sos_ret_t sos_thread_force_unblock(struct sos_thread *thread);
+
+
+#endif /* _SOS_THREAD_H_ */
diff --git a/sos-code-article6.5/sos/time.c b/sos-code-article6.5/sos/time.c
new file mode 100644
index 0000000..5181959
--- /dev/null
+++ b/sos-code-article6.5/sos/time.c
@@ -0,0 +1,355 @@
+/* Copyright (C) 2004 David Decotigny
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+
+#include <sos/assert.h>
+#include <sos/klibc.h>
+#include <hwcore/irq.h>
+#include <sos/list.h>
+
+#include "time.h"
+
+
+/**
+ * Number of nanoseconds in 1 second
+ */
+#define NS_IN_SEC 1000000000UL
+
+
+/**
+ * The list of timeout actions waiting for a timeout. The timeout
+ * actions are stored in the list in increasing initial timeout
+ * order. Actually, the "timeout" field won't reflect this initial
+ * timeout: for each element in the list, it stores the timeout
+ * _difference_ between the timeout action and the previous in the
+ * list.
+ */
+static struct sos_timeout_action *timeout_action_list;
+
+
+/**
+ * Current resolution of a time tick
+ */
+static struct sos_time tick_resolution;
+
+
+/**
+ * Time elapsed between boot and last timer tick
+ *
+ * @note No 'volatile' here because the tick value is NEVER modified
+ * while in any of the functions below: it is modified only out of
+ * these functions by the IRQ timer handler because these functions
+ * are protected against timer IRQ and are "one shot" (no busy waiting
+ * for a change in the tick's value).
+ */
+static struct sos_time last_tick_time;
+
+
+sos_ret_t sos_time_inc(struct sos_time *dest,
+ const struct sos_time *to_add)
+{
+ /* nanosec is always < 1e9 so that their sum is always < 2e9, which
+ is smaller than 2^32-1 */
+ sos_ui32_t sigma_ns = dest->nanosec + to_add->nanosec;
+
+ dest->sec += to_add->sec;
+ dest->sec += sigma_ns / NS_IN_SEC;
+ dest->nanosec = sigma_ns % NS_IN_SEC;
+ return SOS_OK;
+}
+
+
+sos_ret_t sos_time_dec(struct sos_time *dest,
+ const struct sos_time *to_dec)
+{
+ /* nanosec is always < 1e9 so that their difference is always in
+ (-1e9, 1e9), which is compatible with the (-2^31, 2^31 - 1)
+ cpacity of a signed dword */
+ sos_si32_t diff_ns = ((sos_si32_t)dest->nanosec)
+ - ((sos_si32_t)to_dec->nanosec);
+
+ /* Make sure substraction is possible */
+ SOS_ASSERT_FATAL(dest->sec >= to_dec->sec);
+ if (dest->sec == to_dec->sec)
+ SOS_ASSERT_FATAL(dest->nanosec >= to_dec->nanosec);
+
+ dest->sec -= to_dec->sec;
+ if (diff_ns > 0)
+ dest->sec += diff_ns / NS_IN_SEC;
+ else
+ dest->sec -= ((-diff_ns) / NS_IN_SEC);
+ dest->nanosec = (NS_IN_SEC + diff_ns) % NS_IN_SEC;
+ if (diff_ns < 0)
+ dest->sec --;
+ return SOS_OK;
+}
+
+
+int sos_time_cmp(const struct sos_time *t1,
+ const struct sos_time *t2)
+{
+ /* Compare seconds */
+ if (t1->sec < t2->sec)
+ return -1;
+ else if (t1->sec > t2->sec)
+ return 1;
+
+ /* seconds are equal => compare nanoseconds */
+ else if (t1->nanosec < t2->nanosec)
+ return -1;
+ else if (t1->nanosec > t2->nanosec)
+ return 1;
+
+ /* else: sec and nanosecs are equal */
+ return 0;
+}
+
+
+sos_bool_t sos_time_is_zero(const struct sos_time *tm)
+{
+ return ( (0 == tm->sec) && (0 == tm->nanosec) );
+}
+
+
+sos_ret_t sos_time_subsysem_setup(const struct sos_time *initial_resolution)
+{
+ timeout_action_list = NULL;
+ last_tick_time = (struct sos_time) { .sec = 0, .nanosec = 0 };
+ memcpy(& tick_resolution, initial_resolution, sizeof(struct sos_time));
+
+ return SOS_OK;
+}
+
+
+sos_ret_t sos_time_get_tick_resolution(struct sos_time *resolution)
+{
+ sos_ui32_t flags;
+ sos_disable_IRQs(flags);
+
+ memcpy(resolution, & tick_resolution, sizeof(struct sos_time));
+
+ sos_restore_IRQs(flags);
+ return SOS_OK;
+}
+
+
+sos_ret_t sos_time_set_tick_resolution(const struct sos_time *resolution)
+{
+ sos_ui32_t flags;
+
+ sos_disable_IRQs(flags);
+ memcpy(& tick_resolution, resolution, sizeof(struct sos_time));
+ sos_restore_IRQs(flags);
+
+ return SOS_OK;
+}
+
+
+sos_ret_t sos_time_get_now(struct sos_time *now)
+{
+ sos_ui32_t flags;
+ sos_disable_IRQs(flags);
+
+ memcpy(now, & last_tick_time, sizeof(struct sos_time));
+
+ sos_restore_IRQs(flags);
+ return SOS_OK;
+}
+
+
+/**
+ * Helper routine to add the action in the list. MUST be called with
+ * interrupts disabled !
+ */
+static sos_ret_t _add_action(struct sos_timeout_action *act,
+ const struct sos_time *due_date,
+ sos_bool_t is_relative_due_date,
+ sos_timeout_routine_t *routine,
+ void *routine_data)
+{
+ struct sos_timeout_action *other, *insert_before;
+ int nb_act;
+
+ /* Delay must be specified */
+ if (due_date == NULL)
+ return -SOS_EINVAL;
+
+ /* Action container MUST be specified */
+ if (act == NULL)
+ return -SOS_EINVAL;
+
+ /* Refuse to add an empty action */
+ if (NULL == routine)
+ return -SOS_EINVAL;
+
+ /* Refuse to add the action if it is already added */
+ if (NULL != act->tmo_next)
+ return -SOS_EBUSY;
+
+ /* Compute action absolute due date */
+ if (is_relative_due_date)
+ {
+ /* The provided due_date is relative to the current time */
+ memcpy(& act->timeout, & last_tick_time, sizeof(struct sos_time));
+ sos_time_inc(& act->timeout, due_date);
+ }
+ else
+ {
+ /* The provided due_date is absolute (ie relative to the system
+ boot instant) */
+
+ if (sos_time_cmp(due_date, & last_tick_time) < 0)
+ /* Refuse to add a past action ! */
+ return -SOS_EINVAL;
+
+ memcpy(& act->timeout, due_date, sizeof(struct sos_time));
+ }
+
+ /* Prepare the action data structure */
+ act->routine = routine;
+ act->routine_data = routine_data;
+
+ /* Find the right place in the list of the timeout action. */
+ insert_before = NULL;
+ list_foreach_forward_named(timeout_action_list,
+ other, nb_act,
+ tmo_prev, tmo_next)
+ {
+ if (sos_time_cmp(& act->timeout, & other->timeout) < 0)
+ {
+ insert_before = other;
+ break;
+ }
+
+ /* Loop over to next timeout */
+ }
+
+ /* Now insert the action in the list */
+ if (insert_before != NULL)
+ list_insert_before_named(timeout_action_list, insert_before, act,
+ tmo_prev, tmo_next);
+ else
+ list_add_tail_named(timeout_action_list, act,
+ tmo_prev, tmo_next);
+
+ return SOS_OK;
+}
+
+
+sos_ret_t
+sos_time_register_action_relative(struct sos_timeout_action *act,
+ const struct sos_time *delay,
+ sos_timeout_routine_t *routine,
+ void *routine_data)
+{
+ sos_ui32_t flags;
+ sos_ret_t retval;
+
+ sos_disable_IRQs(flags);
+ retval = _add_action(act, delay, TRUE, routine, routine_data);
+ sos_restore_IRQs(flags);
+
+ return retval;
+}
+
+
+sos_ret_t
+sos_time_register_action_absolute(struct sos_timeout_action *act,
+ const struct sos_time *date,
+ sos_timeout_routine_t *routine,
+ void *routine_data)
+{
+ sos_ui32_t flags;
+ sos_ret_t retval;
+
+ sos_disable_IRQs(flags);
+ retval = _add_action(act, date, FALSE, routine, routine_data);
+ sos_restore_IRQs(flags);
+
+ return retval;
+}
+
+
+/**
+ * Helper routine to remove the action from the list. MUST be called
+ * with interrupts disabled !
+ */
+static sos_ret_t _remove_action(struct sos_timeout_action *act)
+{
+ /* Don't do anything if action is not in timeout list */
+ if (NULL == act->tmo_next)
+ return -SOS_EINVAL;
+
+ /* Update the action's remaining timeout */
+ if (sos_time_cmp(& act->timeout, & last_tick_time) <= 0)
+ act->timeout = (struct sos_time){ .sec=0, .nanosec=0 };
+ else
+ sos_time_dec(& act->timeout, & last_tick_time);
+
+ /* Actually remove the action from the list */
+ list_delete_named(timeout_action_list, act,
+ tmo_prev, tmo_next);
+ act->tmo_prev = act->tmo_next = NULL;
+
+ return SOS_OK;
+}
+
+
+sos_ret_t sos_time_unregister_action(struct sos_timeout_action *act)
+{
+ sos_ret_t retval;
+ sos_ui32_t flags;
+
+ sos_disable_IRQs(flags);
+ retval = _remove_action(act);
+ sos_restore_IRQs(flags);
+
+ return SOS_OK;
+}
+
+
+sos_ret_t sos_time_do_tick()
+{
+ sos_ui32_t flags;
+
+ sos_disable_IRQs(flags);
+
+ /* Update kernel time */
+ sos_time_inc(& last_tick_time, & tick_resolution);
+
+ while (! list_is_empty_named(timeout_action_list, tmo_prev, tmo_next))
+ {
+ struct sos_timeout_action *act;
+ act = list_get_head_named(timeout_action_list, tmo_prev, tmo_next);
+
+ /* Did we go too far in the actions' list ? */
+ if (sos_time_cmp(& last_tick_time, & act->timeout) < 0)
+ {
+ /* Yes: No need to look further. */
+ break;
+ }
+
+ /* Remove the action from the list */
+ _remove_action(act);
+
+ /* Call the action's routine */
+ act->routine(act);
+ }
+
+ sos_restore_IRQs(flags);
+ return SOS_OK;
+}
diff --git a/sos-code-article6.5/sos/time.h b/sos-code-article6.5/sos/time.h
new file mode 100644
index 0000000..7d3a90b
--- /dev/null
+++ b/sos-code-article6.5/sos/time.h
@@ -0,0 +1,222 @@
+/* Copyright (C) 2004 David Decotigny
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#ifndef _SOS_TIME_H_
+#define _SOS_TIME_H_
+
+/**
+ * @file time.h
+ *
+ * Primitives and callbacks related to kernel time management (timer
+ * IRQ)
+ */
+
+#include <sos/types.h>
+#include <sos/errno.h>
+#include <sos/klibc.h>
+
+
+/* =======================================================================
+ * Library of time manipulation functions
+ */
+struct sos_time
+{
+ sos_ui32_t sec;
+ sos_ui32_t nanosec;
+};
+
+sos_ret_t sos_time_inc(struct sos_time *dest,
+ const struct sos_time *to_add);
+
+sos_ret_t sos_time_dec(struct sos_time *dest,
+ const struct sos_time *to_dec);
+
+int sos_time_cmp(const struct sos_time *t1,
+ const struct sos_time *t2);
+
+sos_bool_t sos_time_is_zero(const struct sos_time *tm);
+
+
+
+/* =======================================================================
+ * Kernel time management. This is not the same as the "system-time",
+ * ie it does not not take into account the system-time adjustments
+ * (NTP, daylight saving times, etc...): this is the job of a
+ * system-time subsystem.
+ */
+
+
+/**
+ * Initialize kernel time subsystem.
+ *
+ * @param initial_resolution The initial time resolution. MUST be
+ * consistent with that of the hardware timer
+ */
+sos_ret_t sos_time_subsysem_setup(const struct sos_time *initial_resolution);
+
+
+/**
+ * Value of the interval between 2 time ticks. Should be consistent
+ * with the configuration of the hardware timer.
+ */
+sos_ret_t sos_time_get_tick_resolution(struct sos_time *resolution);
+
+
+/**
+ * Change the value of the interval between 2 time ticks. Must be
+ * called each time the hardware timer is reconfigured.
+ *
+ * @note MUST be consistent with that of the hardware timer
+ */
+sos_ret_t sos_time_set_tick_resolution(const struct sos_time *resolution);
+
+
+/**
+ * Get the time elapsed since system boot. Does not take into account
+ * the system-time adjustment (NTP, daylight saving times, etc...):
+ * this is the job of a system-time subsystem.
+ */
+sos_ret_t sos_time_get_now(struct sos_time *now);
+
+
+
+/* =======================================================================
+ * Routines to schedule future execution of routines: "timeout" actions
+ */
+
+/* Forward declaration */
+struct sos_timeout_action;
+
+/**
+ * Prototype of a timeout routine. Called with IRQ disabled !
+ */
+typedef void (sos_timeout_routine_t)(struct sos_timeout_action *);
+
+
+/**
+ * The structure of a timeout action. This structure should have been
+ * opaque to the other parts of the kernel. We keep it public here so
+ * that struct sos_timeout_action can be allocated on the stack from
+ * other source files in the kernel. However, all the fields should be
+ * considered read-only for modules others than time.{ch}.
+ *
+ * @note After an action has been allocated (on the stack or kmalloc),
+ * it MUST be initialized with sos_time_init_action below !
+ */
+struct sos_timeout_action
+{
+ /** PUBLIC: Address of the timeout routine */
+ sos_timeout_routine_t *routine;
+
+ /** PUBLIC: (Custom) data available for this routine */
+ void *routine_data;
+
+ /** PUBLIC: 2 meanings:
+ * - before and while in the timeout list: absolute due date of the
+ * timeout action
+ * - once removed from timeout list: the time remaining in the
+ * initial timeout (might be 0 if timeout expired) at removal
+ * time
+ */
+ struct sos_time timeout;
+
+ /** PRIVATE: To chain the timeout actions */
+ struct sos_timeout_action *tmo_prev, *tmo_next;
+};
+
+
+/**
+ * Initialize a timeout action. MUST be called immediately after
+ * (stack or kmalloc) allocation of the action.
+ *
+ * @param ptr_act Pointer to the action to initialize.
+ */
+#define sos_time_init_action(ptr_act) \
+ ({ (ptr_act)->tmo_prev = (ptr_act)->tmo_next = NULL; /* return */ SOS_OK; })
+
+
+/**
+ * Add the given action in the timeout list, so that it will be
+ * triggered after the specified delay RELATIVE to the time when the
+ * function gets called. The action is always inserted in the list.
+ *
+ * @param act The action to be initialized by the function upon
+ * insertion in the timeout list.
+ *
+ * @param delay Delay until the action is fired. If 0, then it is
+ * fired at next timer IRQ. The action will be fired in X ticks, with
+ * X integer and >= delay.
+ *
+ * @param routine The timeout routine to call when the timeout will be
+ * triggered.
+ *
+ * @param routine_data The data available to the routine when it will
+ * be called.
+ *
+ * @note 'act' MUST remain valid until it is either fired or removed
+ * (with sos_time_remove_action)
+ */
+sos_ret_t
+sos_time_register_action_relative(struct sos_timeout_action *act,
+ const struct sos_time *delay,
+ sos_timeout_routine_t *routine,
+ void *routine_data);
+
+
+/**
+ * Add the given action in the timeout list, so that it will be
+ * triggered after the specified ABSOLUTE date (relative to system
+ * boot time). The action is always inserted in the list.
+ *
+ * @param act The action to be initialized by the function upon
+ * insertion in the timeout list.
+ *
+ * @param date Absolute date (relative to system boot time) when the
+ * action will be triggered.
+ *
+ * @param routine The timeout routine to call when the timeout will be
+ * triggered.
+ *
+ * @param routine_data The data available to the routine when it will
+ * be called.
+ *
+ * @note 'act' MUST remain valid until it is either fired or removed
+ * (with sos_time_remove_action)
+ */
+sos_ret_t
+sos_time_register_action_absolute(struct sos_timeout_action *act,
+ const struct sos_time *date,
+ sos_timeout_routine_t *routine,
+ void *routine_data);
+
+
+/**
+ * The action is removed and its timeout is updated to reflect the
+ * time remaining.
+ */
+sos_ret_t sos_time_unregister_action(struct sos_timeout_action *act);
+
+
+/**
+ * Timer IRQ callback. Call and remove expired actions from the list.
+ *
+ * @note The use of this function is RESERVED (to timer IRQ)
+ */
+sos_ret_t sos_time_do_tick();
+
+
+#endif /* _SOS_TIME_H_ */
diff --git a/sos-code-article6.5/sos/types.h b/sos-code-article6.5/sos/types.h
new file mode 100644
index 0000000..bf04314
--- /dev/null
+++ b/sos-code-article6.5/sos/types.h
@@ -0,0 +1,52 @@
+/* Copyright (C) 2004 The SOS Team
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ USA.
+*/
+#ifndef _SOS_TYPES_H_
+#define _SOS_TYPES_H_
+
+/**
+ * @file types.h
+ *
+ * SOS basic types definition
+ */
+
+/** Physical address */
+typedef unsigned int sos_paddr_t;
+
+/** Kernel virtual address */
+typedef unsigned int sos_vaddr_t;
+
+/** Memory size of an object (positive) */
+typedef unsigned int sos_size_t;
+/** Generic count of objects */
+typedef unsigned int sos_count_t;
+
+/** Low-level sizes */
+typedef unsigned long int sos_ui32_t; /* 32b unsigned */
+typedef unsigned short int sos_ui16_t; /* 16b unsigned */
+typedef unsigned char sos_ui8_t; /* 8b unsigned */
+typedef signed long int sos_si32_t; /* 32b signed */
+typedef signed short int sos_si16_t; /* 16b signed */
+typedef signed char sos_si8_t; /* 8b signed */
+
+typedef enum { FALSE=0, TRUE } sos_bool_t;
+
+/** Not a proper type, but highly useful with basic type
+ manipulations */
+#define NULL ((void*)0)
+
+#endif /* _SOS_TYPES_H_ */