diff options
author | Timo Teras <timo.teras@iki.fi> | 2009-11-19 14:32:11 +0200 |
---|---|---|
committer | Timo Teras <timo.teras@iki.fi> | 2009-11-24 08:12:45 +0200 |
commit | e4e54c2ec744e884f6f55c135bea78e815d28d6c (patch) | |
tree | 3278c0bdb3eedc47eaf71ef53b9ff849cb136593 /src/fiber.c | |
download | libtf-e4e54c2ec744e884f6f55c135bea78e815d28d6c.tar.bz2 libtf-e4e54c2ec744e884f6f55c135bea78e815d28d6c.tar.xz |
libtf: initial commit
libtf is to be user-space cooperative threading library similar
to State Threads (http://state-threads.sourceforge.net/), but
with additional support for multiple cores, using better
algorithms and taking advantage of new Linux kernel syscalls
such as eventfd, signalfd and epoll (edge-triggered mode).
Initial implementation has setjmp based user-space context
switching and trivial testcase. Works on Linux/x86.
TFbuild uses ideas from different build systems, namely Kbuild,
but it's inner workings are quite different. All build files are
included (using macro trickery) instead of recursive making. Thus
the build dependency graph is complete and should yield good make
performance. Also parallel stuff should work.
Diffstat (limited to 'src/fiber.c')
-rw-r--r-- | src/fiber.c | 131 |
1 files changed, 131 insertions, 0 deletions
diff --git a/src/fiber.c b/src/fiber.c new file mode 100644 index 0000000..0db2984 --- /dev/null +++ b/src/fiber.c @@ -0,0 +1,131 @@ +/* fiber.c - fiber management and scheduling + * + * Copyright (C) 2009 Timo Teräs <timo.teras@iki.fi> + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 or later as + * published by the Free Software Foundation. + * + * See http://www.gnu.org/ for details. + */ +#include <errno.h> +#include <libtf/tf.h> +#include TF_UCTX_H + +struct tf_scheduler { + struct tf_list_head run_q; + struct tf_list_head sleep_q; + + struct tf_fiber * active_fiber; + int num_fibers; +}; + +/* FIXME: should be in thread local storage */ +static struct tf_scheduler *__scheduler; + +void *tf_fiber_create(tf_fiber_proc fiber_main, int private_size) +{ + struct tf_scheduler *sched = __scheduler; + struct tf_fiber *fiber; + + fiber = tf_uctx_create(fiber_main, private_size); + + /* The initial references for caller and scheduler */ + *fiber = (struct tf_fiber) { + .ref_count = 2, + .queue_node = TF_LIST_INITIALIZER(fiber->queue_node), + }; + + tf_list_add_tail(&fiber->queue_node, &sched->run_q); + sched->num_fibers++; + + return fiber->data; +} + +void __tf_fiber_destroy(struct tf_fiber *fiber) +{ + tf_uctx_destroy(fiber); +} + +void *tf_fiber_get(void *data) +{ + struct tf_fiber *fiber = container_of(data, struct tf_fiber, data); + tf_atomic_inc(fiber->ref_count); + return data; +} + +void tf_fiber_put(void *data) +{ + struct tf_fiber *fiber = container_of(data, struct tf_fiber, data); + if (tf_atomic_dec(fiber->ref_count) == 0) + __tf_fiber_destroy(fiber); +} + +static void run_fiber(void) +{ + struct tf_scheduler *sched = __scheduler; + struct tf_fiber *schedf = container_of((void*) __scheduler, struct tf_fiber, data); + struct tf_fiber *f; + + if (tf_list_empty(&sched->run_q)) + return; + + f = tf_list_first(&sched->run_q, struct tf_fiber, queue_node); + tf_list_del(&f->queue_node); + + sched->active_fiber = f; + switch (tf_uctx_transfer(schedf, f, 1)) { + case EFAULT: /* Fiber is dead */ + tf_fiber_put(f->data); + sched->num_fibers--; + break; + case EAGAIN: /* Yielded, reshedule */ + tf_list_add_tail(&f->queue_node, &sched->run_q); + break; + case EIO: /* Blocked, in sleep */ + tf_list_add_tail(&f->queue_node, &sched->sleep_q); + break; + default: + TF_BUG_ON("bad scheduler call from fiber"); + } +} + +int tf_main(tf_fiber_proc main_fiber) +{ + struct tf_uctx *ctx = alloca(sizeof(struct tf_uctx) + sizeof(struct tf_scheduler)); + struct tf_scheduler *sched = (struct tf_scheduler*) ctx->fiber.data; + int stack_guard = STACK_GUARD; + + ctx->stack_guard = &stack_guard; + *sched = (struct tf_scheduler){ + .run_q = TF_LIST_HEAD_INITIALIZER(sched->run_q), + .sleep_q = TF_LIST_HEAD_INITIALIZER(sched->sleep_q), + }; + __scheduler = sched; + tf_fiber_put(tf_fiber_create(main_fiber, 0)); + do { + run_fiber(); + } while (likely(sched->num_fibers)); + __scheduler = NULL; + + return 0; +} + +int tf_schedule(int err) +{ + struct tf_scheduler *sched = __scheduler; + struct tf_fiber *schedf = container_of((void*) __scheduler, struct tf_fiber, data); + struct tf_fiber *f = sched->active_fiber; + int r; + + r = tf_uctx_transfer(f, schedf, err); + if (r == 1) + return 0; + + return r; +} + +void tf_kill(void *fiber) +{ +} |