diff options
author | Timo Teräs <timo.teras@iki.fi> | 2010-07-02 20:23:07 +0300 |
---|---|---|
committer | Timo Teräs <timo.teras@iki.fi> | 2010-07-02 20:25:47 +0300 |
commit | 23b95bf1a15322c2f471b80c06cb65d9b2d2a282 (patch) | |
tree | 9bf12231db9591852e3b42ca24715d2cbaf6267b /src/vmach.c | |
parent | 0183e33d9a4759764716e771b85e19f7a997b8bd (diff) | |
download | libtf-master.tar.bz2 libtf-master.tar.xz |
the idea is to make libtf completely multi-threaded. meaning each
fiber can be running concurrently in separate thread. quite a bit
of framework is added for this and some atomic helpers are already
introduced. however, io polling is busy polling now (will be soon
in own thread) and timeouts are still more or less broken. oh, and
the multithreading core is not there yet. basically we are currently
mostly broken ;)
Diffstat (limited to 'src/vmach.c')
-rw-r--r-- | src/vmach.c | 120 |
1 files changed, 120 insertions, 0 deletions
diff --git a/src/vmach.c b/src/vmach.c new file mode 100644 index 0000000..a9ca446 --- /dev/null +++ b/src/vmach.c @@ -0,0 +1,120 @@ +#include <libtf/defines.h> +#include <libtf/list.h> +#include <libtf/vmach.h> +#include <libtf/io.h> +#include "uctx.h" + +__thread struct tf_fiber *tf_current_fiber; +__thread struct tf_vcpu *tf_current_vcpu; +__thread struct tf_vmach *tf_current_vmach; + +extern struct tf_poll_hooks tf_epoll_hooks; + +struct tf_vmachine { + struct tf_vmach vmach; + struct tf_uctx startup_uctx; + void *machine_init_fiber; +}; + +static void tf_vcpu_main(void *fiber_data) +{ + struct tf_vcpu *vcpu = fiber_data; + struct tf_vmach *vmach = vcpu->machine; + struct tf_fiber *self = container_of(fiber_data, struct tf_fiber, data); + struct tf_fiber *f; + + tf_current_vmach = vmach; + tf_current_vcpu = vcpu; + + while (vmach->num_user_fibers != 0) { + if (tf_list_empty(&vmach->run_q)) { + /* sleep */ + continue; + } + + f = tf_list_entry(tf_list_pop(&vmach->run_q), + struct tf_fiber, queue_node); + + f->return_context = self->context; + tf_current_fiber = f; + tf_uctx_transfer(self->context, f->context); + tf_list_splice_tail(&f->wakeup_q, &vmach->run_q); + } +} + +static void tf_vmach_main(void *fiber_data) +{ + struct tf_fiber *self = container_of(fiber_data, struct tf_fiber, data); + struct tf_vcpu *vcpu = fiber_data; + struct tf_vmach *vmach = vcpu->machine; + + tf_current_vmach = vmach; + tf_current_vcpu = vcpu; + tf_current_fiber = self; + + /* Initialize IO subsystem */ + vmach->poll_ops = &tf_epoll_hooks; + vmach->poll_fiber = vmach->poll_ops->create(); + vmach->timeout_fiber = tf_timeout_fiber_create(); + vmach->startup_fiber.timeout.manager = vmach->timeout_fiber; + + /* Run the initial fiber */ + tf_fiber_wakeup(&vmach->startup_fiber); + + /* Use main thread as a regular vcpu */ + vmach->num_user_fibers = 1; + tf_list_splice_tail(&self->wakeup_q, &vmach->run_q); + tf_vcpu_main(vcpu); + + /* Kill all stuff */ + + /* Return to main fiber */ + vmach->startup_fiber.return_context = NULL; + tf_current_fiber = NULL; + tf_current_vcpu = NULL; + tf_current_vmach = NULL; + + tf_uctx_transfer(self->context, vmach->startup_fiber.context); +} + +void tf_vmach_start(void) +{ + struct tf_vmachine *vmach; + struct tf_vcpu *vcpu; + + TF_BUG_ON(tf_current_vcpu != NULL); + + /* Create a self-fiber so we can surrender control to vcpu */ + vmach = calloc(1, sizeof(struct tf_vmachine)); + vmach->vmach.startup_fiber = (struct tf_fiber) { + .ref_count = 1, + .queue_node = TF_LIST_INITIALIZER(vmach->vmach.startup_fiber.queue_node), + .wakeup_q = TF_LIST_HEAD_INITIALIZER(vmach->vmach.startup_fiber.wakeup_q), + .context = tf_uctx_create_self(&vmach->startup_uctx), + }; + tf_list_init_head(&vmach->vmach.run_q); + vcpu = tf_fiber_create(tf_vmach_main, sizeof(struct tf_vcpu)); + vmach->machine_init_fiber = vcpu; + vcpu->machine = &vmach->vmach; + + /* Create manager fiber to initialize vcpu */ + tf_uctx_transfer(vmach->vmach.startup_fiber.context, + container_of((void *) vcpu, struct tf_fiber, data)->context); +} + +void tf_vmach_stop(void) +{ + struct tf_fiber *self = tf_vmach_get_current_fiber(); + struct tf_vmach *vmach = tf_vmach_get_current(); + + TF_BUG_ON(self != &vmach->startup_fiber); + + /* Wait for the vmachine to stop */ + tf_vmach_get_current()->num_user_fibers--; + while (self->return_context != NULL) + tf_fiber_schedule(); + + /* And clean up */ + tf_uctx_destroy(vmach->startup_fiber.context); + free(vmach); +} |