summaryrefslogtreecommitdiffstats
path: root/src/fiber.c
diff options
context:
space:
mode:
authorTimo Teras <timo.teras@iki.fi>2009-11-25 10:52:15 +0200
committerTimo Teras <timo.teras@iki.fi>2009-11-25 10:52:15 +0200
commitfc1044daf51f32b9d85f8497e4e0bd5a3c1e7fe9 (patch)
tree52e11c88f17c47c0d086761e50b266f5c5ccd061 /src/fiber.c
parentcec85dedb7fd66cf2c23cafadd7c53eb7afed78f (diff)
downloadlibtf-fc1044daf51f32b9d85f8497e4e0bd5a3c1e7fe9.tar.bz2
libtf-fc1044daf51f32b9d85f8497e4e0bd5a3c1e7fe9.tar.xz
libtf: implement basic file i/o with epoll
some scetching of i/o api, and implement basic read and write functionality. integrate polling to scheduler and an epoll based polling mechanism.
Diffstat (limited to 'src/fiber.c')
-rw-r--r--src/fiber.c85
1 files changed, 61 insertions, 24 deletions
diff --git a/src/fiber.c b/src/fiber.c
index 72da440..15c533a 100644
--- a/src/fiber.c
+++ b/src/fiber.c
@@ -13,11 +13,12 @@
#include <time.h>
#include <errno.h>
#include <unistd.h>
-#include <libtf/tf.h>
-#include <libtf/heap.h>
+#include <libtf/fiber.h>
+#include <libtf/io.h>
struct tf_fiber {
unsigned int ref_count;
+ int wakeup_type;
struct tf_list_node queue_node;
struct tf_heap_node heap_node;
char data[TF_EMPTY_ARRAY];
@@ -85,16 +86,13 @@ static void run_fiber(struct tf_scheduler *sched, struct tf_fiber *f)
struct tf_fiber *schedf = container_of((void*) tf_get_scheduler(), struct tf_fiber, data);
sched->active_fiber = f;
- switch (tf_uctx_transfer(schedf, f, 1)) {
- case EFAULT: /* Fiber is dead */
+ switch (tf_uctx_transfer(schedf, f, f->wakeup_type)) {
+ case TF_WAKEUP_KILL:
tf_fiber_put(f->data);
sched->num_fibers--;
break;
- case EAGAIN: /* Yielded, reshedule */
- tf_list_add_tail(&f->queue_node, &sched->run_q);
- break;
- case EIO: /* Blocked, in sleep */
- tf_list_add_tail(&f->queue_node, &sched->sleep_q);
+ case TF_WAKEUP_IMMEDIATE:
+ case TF_WAKEUP_TIMEOUT:
break;
default:
TF_BUG_ON("bad scheduler call from fiber");
@@ -108,7 +106,7 @@ static void process_heap(struct tf_scheduler *sched)
tf_mtime_t now = tf_mtime();
while (!tf_heap_empty(&sched->heap) &&
- tf_mtime_diff(now, tf_heap_get_value(&sched->heap)) > 0) {
+ tf_mtime_diff(now, tf_heap_get_value(&sched->heap)) >= 0) {
node = tf_heap_get_node(&sched->heap);
f = container_of(node, struct tf_fiber, heap_node);
run_fiber(sched, f);
@@ -135,9 +133,9 @@ int tf_main(tf_fiber_proc main_fiber)
ctx->stack_guard = &stack_guard;
*sched = (struct tf_scheduler){
.run_q = TF_LIST_HEAD_INITIALIZER(sched->run_q),
- .sleep_q = TF_LIST_HEAD_INITIALIZER(sched->sleep_q),
};
__tf_scheduler = sched;
+ tf_poll_init();
update_time(sched);
tf_fiber_put(tf_fiber_create(main_fiber, 0));
do {
@@ -148,47 +146,86 @@ int tf_main(tf_fiber_proc main_fiber)
timeout = 0;
} else if (!tf_heap_empty(&sched->heap)) {
timeout = tf_mtime_diff(tf_heap_get_value(&sched->heap),
- sched->scheduler_time);
+ tf_mtime());
if (timeout < 0)
timeout = 0;
} else
timeout = -1;
- if (timeout > 0)
- usleep(timeout * 1000);
-
- process_heap(sched);
+ if (tf_poll(timeout) == TF_WAKEUP_TIMEOUT) {
+ sched->scheduler_time += timeout;
+ process_heap(sched);
+ }
process_runq(sched);
} while (likely(sched->num_fibers));
+ tf_poll_close();
__tf_scheduler = NULL;
return 0;
}
-int tf_schedule(int err)
+int tf_schedule(int wakeup)
{
struct tf_scheduler *sched = tf_get_scheduler();
struct tf_fiber *schedf = container_of((void*) sched, struct tf_fiber, data);
struct tf_fiber *f = sched->active_fiber;
- int r;
- r = tf_uctx_transfer(f, schedf, err);
- if (r == 1)
- return 0;
+ if (wakeup != TF_WAKEUP_TIMEOUT)
+ tf_heap_delete(&f->heap_node, &sched->heap);
+ f->wakeup_type = TF_WAKEUP_NONE;
- return r;
+ return tf_uctx_transfer(f, schedf, wakeup);
}
-int tf_msleep(int milliseconds)
+int tf_schedule_timeout(int milliseconds)
{
struct tf_scheduler *sched = tf_get_scheduler();
struct tf_fiber *f = sched->active_fiber;
+ if (milliseconds <= 0) {
+ tf_heap_delete(&f->heap_node, &sched->heap);
+ return TF_WAKEUP_IMMEDIATE;
+ }
tf_heap_change(&f->heap_node, &sched->heap, tf_mtime() + milliseconds);
+ return TF_WAKEUP_TIMEOUT;
+}
+
+void tf_wakeup(struct tf_fiber *fiber, int wakeup_type)
+{
+ struct tf_scheduler *sched = tf_get_scheduler();
+
+ if (fiber->wakeup_type == TF_WAKEUP_NONE) {
+ fiber->wakeup_type = wakeup_type;
+ tf_list_add_tail(&fiber->queue_node, &sched->run_q);
+ }
+}
+
+void tf_exit(void)
+{
+ struct tf_scheduler *sched = tf_get_scheduler();
+ struct tf_fiber *f = sched->active_fiber;
- return tf_schedule(EIO);
+ tf_heap_delete(&f->heap_node, &sched->heap);
+ tf_schedule(TF_WAKEUP_KILL);
+ TF_BUG_ON(1);
}
void tf_kill(void *fiber)
{
}
+
+int tf_yield(void)
+{
+ struct tf_scheduler *sched = tf_get_scheduler();
+ struct tf_fiber *f = sched->active_fiber;
+
+ tf_list_add_tail(&f->queue_node, &sched->run_q);
+ return tf_schedule(TF_WAKEUP_IMMEDIATE);
+}
+
+int tf_msleep(int milliseconds)
+{
+ tf_schedule_timeout(milliseconds);
+ return tf_schedule(TF_WAKEUP_TIMEOUT);
+}
+