aboutsummaryrefslogtreecommitdiff
path: root/src/kernel
diff options
context:
space:
mode:
authorAlex Auvolat <alex.auvolat@ens.fr>2015-03-08 19:07:48 +0100
committerAlex Auvolat <alex.auvolat@ens.fr>2015-03-08 19:07:48 +0100
commit6dd488b87fdc47fb377ba648a6cd598bdab87f59 (patch)
tree2e69225353054eb43a9869af4ca9766a0f39c828 /src/kernel
parentbcee004478c6448541ce583e75c706e185190800 (diff)
downloadkogata-6dd488b87fdc47fb377ba648a6cd598bdab87f59.tar.gz
kogata-6dd488b87fdc47fb377ba648a6cd598bdab87f59.zip
Implement select ; add two tests for channels.
Diffstat (limited to 'src/kernel')
-rw-r--r--src/kernel/core/idt.c15
-rw-r--r--src/kernel/core/thread.c86
-rw-r--r--src/kernel/dev/pciide.c1
-rw-r--r--src/kernel/fs/iso9660.c2
-rw-r--r--src/kernel/include/thread.h4
-rw-r--r--src/kernel/include/vfs.h2
-rw-r--r--src/kernel/user/ipc.c22
-rw-r--r--src/kernel/user/nullfs.c2
-rw-r--r--src/kernel/user/syscall.c54
-rw-r--r--src/kernel/user/vfs.c9
10 files changed, 162 insertions, 35 deletions
diff --git a/src/kernel/core/idt.c b/src/kernel/core/idt.c
index fcf8074..6bfe8b5 100644
--- a/src/kernel/core/idt.c
+++ b/src/kernel/core/idt.c
@@ -119,12 +119,17 @@ void idt_irq_handler(registers_t *regs) {
}
outb(0x20, 0x20);
- if (regs->err_code != 0) dbg_printf("IRQ%d\n", regs->err_code);
- if (irq_handlers[regs->err_code] != 0) {
- irq_handlers[regs->err_code](regs);
- }
+ if (regs->err_code == 0) {
+ irq0_handler(regs, st);
+ } else {
+ dbg_printf("IRQ%d\n", regs->err_code);
- exit_critical(st);
+ if (irq_handlers[regs->err_code] != 0) {
+ irq_handlers[regs->err_code](regs);
+ }
+
+ exit_critical(st);
+ }
// maybe exit
if (current_thread != 0 && regs->eip < K_HIGHHALF_ADDR && current_thread->must_exit) {
diff --git a/src/kernel/core/thread.c b/src/kernel/core/thread.c
index d59b71d..58c92d3 100644
--- a/src/kernel/core/thread.c
+++ b/src/kernel/core/thread.c
@@ -147,7 +147,8 @@ static void run_thread(void (*entry)(void*), void* data) {
switch_pagedir(get_kernel_pagedir());
- asm volatile("sti");
+ exit_critical(CL_USER);
+
entry(data);
exit();
@@ -192,11 +193,10 @@ thread_t *new_thread(entry_t entry, void* data) {
t->state = T_STATE_LOADING;
t->last_ran = 0;
- t->waiting_on = 0;
t->must_exit = false;
t->current_pd_d = get_kernel_pagedir();
- t->critical_level = CL_USER;
+ t->critical_level = CL_EXCL;
// used by user processes
t->proc = 0;
@@ -220,8 +220,11 @@ static void delete_thread(thread_t *t) {
// SETUP CODE //
// ========== //
-static void irq0_handler(registers_t *regs) {
+void irq0_handler(registers_t *regs, int crit_level) {
notify_time_pass(1000000 / TASK_SWITCH_FREQUENCY);
+
+ exit_critical(crit_level);
+
if (current_thread != 0 && current_thread->critical_level == CL_USER) {
save_context_and_enter_scheduler(&current_thread->ctx);
}
@@ -231,7 +234,7 @@ void threading_setup(entry_t cont, void* arg) {
ASSERT(waiters != 0);
set_pit_frequency(TASK_SWITCH_FREQUENCY);
- idt_set_irq_handler(IRQ0, irq0_handler);
+ // no need to set irq0 handler
thread_t *t = new_thread(cont, arg);
ASSERT(t != 0);
@@ -267,34 +270,59 @@ void yield() {
}
bool wait_on(void* x) {
+ return wait_on_many(&x, 1);
+}
+
+bool wait_on_many(void** x, size_t n) {
ASSERT(current_thread != 0 && current_thread->critical_level != CL_EXCL);
+ ASSERT(n > 0);
mutex_lock(&waiters_mutex);
- void* prev_th = hashtbl_find(waiters, x);
- if (prev_th == 0) {
- bool add_ok = hashtbl_add(waiters, x, (void*)1);
- if (!add_ok) return false; // should not happen to often, I hope
- } else if (prev_th != (void*)1) {
+ // ---- Check we can wait on all the requested objects
+ bool ok = true;
+ for (size_t i = 0; ok && i < n; i++) {
+ void* prev_th = hashtbl_find(waiters, x[i]);
+ if (prev_th == 0) {
+ bool add_ok = hashtbl_add(waiters, x[i], (void*)1);
+ if (!add_ok) {
+ ok = false;
+ }
+ } else if (prev_th != (void*)1) {
+ ok = false;
+ }
+ }
+ if (!ok) {
mutex_unlock(&waiters_mutex);
return false;
}
+ // ---- Set ourselves as the waiting thread for all the requested objets
int st = enter_critical(CL_NOSWITCH);
- if (current_thread->must_exit) return false;
-
- current_thread->waiting_on = x;
+ for (size_t i = 0; i < n; i++) {
+ ASSERT(hashtbl_change(waiters, x[i], current_thread));
+ }
- ASSERT(hashtbl_change(waiters, x, current_thread));
+ // ---- Go to sleep
mutex_unlock(&waiters_mutex);
current_thread->state = T_STATE_PAUSED;
save_context_and_enter_scheduler(&current_thread->ctx);
+ // ---- Remove ourselves from the list
+ mutex_lock(&waiters_mutex);
+
+ for (size_t i = 0; i < n; i++) {
+ ASSERT(hashtbl_change(waiters, x[i], (void*)1));
+ }
+
+ mutex_unlock(&waiters_mutex);
exit_critical(st);
+ // ---- Check that we weren't waked up because of a kill request
if (current_thread->must_exit) return false;
+
return true;
}
@@ -333,26 +361,27 @@ void exit() {
bool resume_on(void* x) {
thread_t *thread;
- { mutex_lock(&waiters_mutex);
+ bool ret = false;
- thread = hashtbl_find(waiters, x);
- hashtbl_change(waiters, x, (void*)1);
+ mutex_lock(&waiters_mutex);
+ int st = enter_critical(CL_NOINT);
- mutex_unlock(&waiters_mutex); }
-
- if (thread == 0 || thread == (void*)1) return false;
+ thread = hashtbl_find(waiters, x);
- { int st = enter_critical(CL_NOINT);
+ if (thread != 0 && thread != (void*)1) {
+ if (thread->state == T_STATE_PAUSED) {
+ thread->state = T_STATE_RUNNING;
- ASSERT(thread->state == T_STATE_PAUSED);
- thread->state = T_STATE_RUNNING;
- thread->waiting_on = 0;
+ enqueue_thread(thread, false);
- enqueue_thread(thread, false);
+ ret = true;
+ }
+ }
- exit_critical(st); }
+ mutex_unlock(&waiters_mutex);
+ exit_critical(st);
- return true;
+ return ret;
}
void kill_thread(thread_t *thread) {
@@ -365,7 +394,8 @@ void kill_thread(thread_t *thread) {
int i = 0;
while (thread->state != T_STATE_FINISHED) {
if (thread->state == T_STATE_PAUSED) {
- resume_on(thread->waiting_on);
+ thread->state = T_STATE_RUNNING;
+ enqueue_thread(thread, false);
}
yield();
if (i++ > 100) dbg_printf("Thread 0x%p must be killed but will not exit.\n", thread);
diff --git a/src/kernel/dev/pciide.c b/src/kernel/dev/pciide.c
index 56d4e4f..8fcaf02 100644
--- a/src/kernel/dev/pciide.c
+++ b/src/kernel/dev/pciide.c
@@ -677,6 +677,7 @@ static fs_node_ops_t ide_vfs_node_ops = {
.ioctl = ide_vfs_ioctl,
.close = ide_vfs_close,
.readdir = 0,
+ .poll = 0,
};
void ide_register_device(ide_controller_t *c, uint8_t device, fs_t *iofs) {
diff --git a/src/kernel/fs/iso9660.c b/src/kernel/fs/iso9660.c
index 89bfdf8..a3db67e 100644
--- a/src/kernel/fs/iso9660.c
+++ b/src/kernel/fs/iso9660.c
@@ -39,6 +39,7 @@ static fs_node_ops_t iso9660_dir_ops = {
.read = 0,
.write = 0,
.ioctl = 0,
+ .poll = 0,
};
static fs_node_ops_t iso9660_file_ops = {
@@ -54,6 +55,7 @@ static fs_node_ops_t iso9660_file_ops = {
.read = iso9660_file_read,
.write = 0,
.ioctl = 0,
+ .poll = 0,
};
void register_iso9660_driver() {
diff --git a/src/kernel/include/thread.h b/src/kernel/include/thread.h
index 09995fd..f9df0ea 100644
--- a/src/kernel/include/thread.h
+++ b/src/kernel/include/thread.h
@@ -36,7 +36,6 @@ typedef struct thread {
struct thread *next_in_queue;
struct thread *next_in_proc;
- void* waiting_on;
bool must_exit;
} thread_t;
@@ -46,6 +45,8 @@ void threading_setup(entry_t cont, void* data); // never returns
thread_t *new_thread(entry_t entry, void* data); // thread is PAUSED, and must be started with start_thread
void start_thread(thread_t *t);
+void irq0_handler(registers_t *regs, int crit_level);
+
extern thread_t *current_thread;
void yield();
@@ -60,6 +61,7 @@ void usleep(int usecs);
// killed and must terminate its kernel-land processing as soon as possible.
bool wait_on(void* x); // true : resumed normally, false : resumed because thread was killed, or someone else already waiting
+bool wait_on_many(void** x, size_t count); // true only if we could wait on ALL objects
bool resume_on(void* x);
void kill_thread(thread_t *thread); // cannot be called for current thread
diff --git a/src/kernel/include/vfs.h b/src/kernel/include/vfs.h
index 371ffb7..a56ecae 100644
--- a/src/kernel/include/vfs.h
+++ b/src/kernel/include/vfs.h
@@ -86,6 +86,7 @@ typedef struct fs_node_ops {
size_t (*read)(fs_handle_t *f, size_t offset, size_t len, char* buf);
size_t (*write)(fs_handle_t *f, size_t offset, size_t len, const char* buf);
bool (*readdir)(fs_handle_t *f, size_t ent_no, dirent_t *d);
+ int (*poll)(fs_handle_t *f, void** out_wait_obj);
void (*close)(fs_handle_t *f);
bool (*stat)(fs_node_ptr n, stat_t *st);
@@ -181,5 +182,6 @@ size_t file_read(fs_handle_t *f, size_t offset, size_t len, char* buf);
size_t file_write(fs_handle_t *f, size_t offset, size_t len, const char* buf);
int file_ioctl(fs_handle_t *f, int command, void* data);
bool file_readdir(fs_handle_t *f, size_t ent_no, dirent_t *d);
+int file_poll(fs_handle_t *f, void** out_wait_obj); // just polls the file & returns a mask of SEL_* (see <fs.h>)
/* vim: set ts=4 sw=4 tw=0 noet :*/
diff --git a/src/kernel/user/ipc.c b/src/kernel/user/ipc.c
index 648fdf2..dce1847 100644
--- a/src/kernel/user/ipc.c
+++ b/src/kernel/user/ipc.c
@@ -11,6 +11,7 @@
static size_t channel_read(fs_handle_t *c, size_t offset, size_t len, char* buf);
static size_t channel_write(fs_handle_t *c, size_t offset, size_t len, const char* buf);
+static int channel_poll(fs_handle_t *c, void** out_wait_obj);
static bool channel_stat(fs_node_ptr c, stat_t *st);
static void channel_close(fs_handle_t *c);
@@ -18,6 +19,7 @@ static fs_node_ops_t channel_ops = {
.read = channel_read,
.write = channel_write,
.close = channel_close,
+ .poll = channel_poll,
.open = 0,
.readdir = 0,
.ioctl = 0,
@@ -157,6 +159,20 @@ size_t channel_write(fs_handle_t *h, size_t offset, size_t req_len, const char*
return ret;
}
+int channel_poll(fs_handle_t *h, void** out_wait_obj) {
+ channel_t *c = (channel_t*)h->data;
+
+ int ret = 0;
+
+ if (c->other_side == 0) ret |= SEL_ERROR;
+ if (c->other_side && c->other_side->buf_used < CHANNEL_BUFFER_SIZE) ret |= SEL_WRITE;
+ if (c->buf_used > 0) ret |= SEL_READ;
+
+ if (out_wait_obj) *out_wait_obj = c;
+
+ return ret;
+}
+
bool channel_stat(fs_node_ptr ch, stat_t *st) {
channel_t *c = (channel_t*)ch;
@@ -176,7 +192,11 @@ void channel_close(fs_handle_t *ch) {
mutex_lock(&c->lock);
- c->other_side->other_side = 0;
+ if (c->other_side) {
+ resume_on(c->other_side);
+ c->other_side->other_side = 0;
+ }
+
free(c);
}
diff --git a/src/kernel/user/nullfs.c b/src/kernel/user/nullfs.c
index 60872ab..0dc1b22 100644
--- a/src/kernel/user/nullfs.c
+++ b/src/kernel/user/nullfs.c
@@ -53,6 +53,7 @@ static fs_node_ops_t nullfs_d_ops = {
.read = 0,
.write = 0,
.ioctl = 0,
+ .poll = 0,
};
static fs_node_ops_t nullfs_f_ops = {
@@ -68,6 +69,7 @@ static fs_node_ops_t nullfs_f_ops = {
.close = nullfs_f_close,
.readdir = 0,
.ioctl =0,
+ .poll = 0,
};
diff --git a/src/kernel/user/syscall.c b/src/kernel/user/syscall.c
index c352ff4..67ed317 100644
--- a/src/kernel/user/syscall.c
+++ b/src/kernel/user/syscall.c
@@ -6,6 +6,7 @@
#include <ipc.h>
#include <sct.h>
+#include <worker.h>
typedef struct {
uint32_t sc_id, a, b, c, d, e; // a: ebx, b: ecx, c: edx, d: esi, e: edi
@@ -282,6 +283,58 @@ static uint32_t get_mode_sc(sc_args_t args) {
return file_get_mode(h);
}
+static uint32_t select_sc(sc_args_t args) {
+ sel_fd_t *fds = (sel_fd_t*)args.a;
+ size_t n = args.b;
+ int timeout = args.c;
+
+ probe_for_write(fds, n * sizeof(sel_fd_t));
+
+ uint64_t select_begin_time = get_kernel_time();
+
+ void** wait_objs = (void**)malloc((n+1) * sizeof(void*));
+ if (!wait_objs) return false;
+
+ bool ret = false;
+
+ int st = enter_critical(CL_NOSWITCH);
+
+ while (true) {
+ // ---- Poll FDs, if any is ok then return it
+ size_t n_wait_objs = 0;
+ if (timeout > 0) wait_objs[n_wait_objs++] = current_thread;
+ for (size_t i = 0; i < n; i++) {
+ fs_handle_t *h = proc_read_fd(current_process(), fds[i].fd);
+ if (h) {
+ fds[i].got_flags = file_poll(h, &wait_objs[n_wait_objs]);
+ if (wait_objs[n_wait_objs]) n_wait_objs++;
+ if (fds[i].got_flags & fds[i].req_flags) ret = true;
+ }
+ }
+
+ uint64_t time = get_kernel_time();
+
+ // ---- If none of the handles given is a valid handle, return false
+ if (n_wait_objs == 0) break;
+ // ---- If any is ok, return true
+ if (ret) break;
+ // ---- If the timeout is over, return false
+ if (timeout >= 0 && time - select_begin_time >= (uint64_t)timeout) break;
+
+ // ---- Do a wait, if interrupted (killed or whatever) return false
+ void resume_on_v(void*x) {
+ resume_on(x);
+ }
+ if (timeout > 0) worker_push_in(time - select_begin_time - timeout, resume_on_v, current_thread);
+ if (!wait_on_many(wait_objs, n_wait_objs)) break;
+ }
+
+ exit_critical(st);
+
+ free(wait_objs);
+ return ret;
+}
+
// ---- IPC
static uint32_t make_channel_sc(sc_args_t args) {
@@ -642,6 +695,7 @@ void setup_syscall_table() {
sc_handlers[SC_STAT_OPEN] = stat_open_sc;
sc_handlers[SC_IOCTL] = ioctl_sc;
sc_handlers[SC_GET_MODE] = get_mode_sc;
+ sc_handlers[SC_SELECT] = select_sc;
sc_handlers[SC_MK_CHANNEL] = make_channel_sc;
sc_handlers[SC_GEN_TOKEN] = gen_token_sc;
diff --git a/src/kernel/user/vfs.c b/src/kernel/user/vfs.c
index 4e8cf53..05b4a2c 100644
--- a/src/kernel/user/vfs.c
+++ b/src/kernel/user/vfs.c
@@ -494,4 +494,13 @@ bool file_readdir(fs_handle_t *f, size_t ent_no, dirent_t *d) {
return f->ops->readdir && f->ops->readdir(f, ent_no, d);
}
+int file_poll(fs_handle_t *f, void** out_wait_obj) {
+ if (!f->ops->poll) {
+ if (out_wait_obj) *out_wait_obj = 0;
+ return 0;
+ }
+
+ return f->ops->poll(f, out_wait_obj);
+}
+
/* vim: set ts=4 sw=4 tw=0 noet :*/