summaryrefslogtreecommitdiffstats
path: root/src/io-epoll.c
diff options
context:
space:
mode:
authorTimo Teräs <timo.teras@iki.fi>2010-07-02 20:23:07 +0300
committerTimo Teräs <timo.teras@iki.fi>2010-07-02 20:25:47 +0300
commit23b95bf1a15322c2f471b80c06cb65d9b2d2a282 (patch)
tree9bf12231db9591852e3b42ca24715d2cbaf6267b /src/io-epoll.c
parent0183e33d9a4759764716e771b85e19f7a997b8bd (diff)
downloadlibtf-23b95bf1a15322c2f471b80c06cb65d9b2d2a282.tar.bz2
libtf-23b95bf1a15322c2f471b80c06cb65d9b2d2a282.tar.xz
libtf: major redesign startedHEADmaster
the idea is to make libtf completely multi-threaded. meaning each fiber can be running concurrently in separate thread. quite a bit of framework is added for this and some atomic helpers are already introduced. however, io polling is busy polling now (will be soon in own thread) and timeouts are still more or less broken. oh, and the multithreading core is not there yet. basically we are currently mostly broken ;)
Diffstat (limited to 'src/io-epoll.c')
-rw-r--r--src/io-epoll.c122
1 files changed, 45 insertions, 77 deletions
diff --git a/src/io-epoll.c b/src/io-epoll.c
index 32aa090..1fc9ca1 100644
--- a/src/io-epoll.c
+++ b/src/io-epoll.c
@@ -16,74 +16,70 @@
#include <sys/epoll.h>
#include <libtf/io.h>
-#include <libtf/scheduler.h>
+#include <libtf/fiber.h>
-struct tf_poll_data {
+struct tf_epoll_data {
int epoll_fd;
- int num_waiters;
};
-static struct tf_poll_data *tf_epoll_get_data(void)
+static void tf_epoll_main(void *ctx)
{
- struct tf_scheduler *sched = tf_scheduler_get_current();
- TF_BUILD_BUG_ON(sizeof(struct tf_poll_data) > sizeof(sched->poll_data));
- return (struct tf_poll_data *) &sched->poll_data;
-}
-
-static void tf_epoll_init(void)
-{
- struct tf_poll_data *pd = tf_epoll_get_data();
+ struct tf_epoll_data *pd = ctx;
+ struct epoll_event events[64];
+ struct tf_fd *fd;
+ int r, i;
- pd->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
- pd->num_waiters = 0;
- TF_BUG_ON(pd->epoll_fd < 0);
-}
+ do {
+ r = epoll_wait(pd->epoll_fd, events, array_size(events), 0);
+ if (r == 0) {
+ /* FIXME: yielding is bad */
+ struct tf_fiber *self = tf_vmach_get_current_fiber();
+ tf_list_add_tail(&self->queue_node, &self->wakeup_q);
+ if (tf_fiber_schedule() == 0)
+ continue;
+ }
-static void tf_epoll_close(void)
-{
- struct tf_poll_data *pd = tf_epoll_get_data();
+ for (i = 0; i < r; i++) {
+ fd = (struct tf_fd *) events[i].data.ptr;
+ tf_fiber_wakeup(fd->fiber);
+ }
+ } while (1);
close(pd->epoll_fd);
}
-static int tf_epoll_poll(tf_mtime_diff_t timeout)
+
+static void *tf_epoll_create(void)
{
- struct tf_poll_data *pd = tf_epoll_get_data();
- struct epoll_event events[64];
- struct tf_fd *fd;
- int r, i, ret;
+ struct tf_epoll_data *d;
- if (timeout == 0 && pd->num_waiters == 0)
- return TF_WAKEUP_TIMEOUT;
+ d = tf_fiber_create(tf_epoll_main, sizeof(struct tf_epoll_data));
+ if (d == NULL)
+ return NULL;
- ret = TF_WAKEUP_TIMEOUT;
- do {
- r = epoll_wait(pd->epoll_fd, events, array_size(events), timeout);
- if (r == 0)
- break;
+ d->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
+ TF_BUG_ON(d->epoll_fd < 0);
- for (i = 0; i < r; i++) {
- fd = (struct tf_fd *) events[i].data.ptr;
- if (likely(fd->events & events[i].events))
- __tf_fiber_wakeup(fd->waiting_fiber, TF_WAKEUP_FD);
- }
- ret = TF_WAKEUP_FD;
- timeout = 0;
- } while (unlikely(r == array_size(events)));
+ tf_fiber_run(tf_fiber_get(d));
- return ret;
+ return d;
}
-static int tf_epoll_fd_created(struct tf_fd *fd)
+static int tf_epoll_fd_created(void *fiber, struct tf_fd *fd)
{
- struct tf_poll_data *pd = tf_epoll_get_data();
+ struct tf_epoll_data *d = fiber;
struct epoll_event ev;
int r;
ev = (struct epoll_event) {
- .events = EPOLLIN | EPOLLOUT | EPOLLET,
+ .events = EPOLLET,
.data.ptr = fd,
};
- r = epoll_ctl(pd->epoll_fd, EPOLL_CTL_ADD, fd->fd, &ev);
+ if (fd->flags & TF_FD_READ)
+ ev.events |= EPOLLIN;
+ if (fd->flags & TF_FD_WRITE)
+ ev.events |= EPOLLOUT;
+
+ r = epoll_ctl(d->epoll_fd, EPOLL_CTL_ADD, fd->fd, &ev);
if (unlikely(r < 0)) {
TF_BUG_ON(errno == EEXIST);
r = -errno;
@@ -93,46 +89,18 @@ static int tf_epoll_fd_created(struct tf_fd *fd)
return 0;
}
-static int tf_epoll_fd_destroyed(struct tf_fd *fd)
+static int tf_epoll_fd_destroyed(void *fiber, struct tf_fd *fd)
{
- struct tf_poll_data *pd = tf_epoll_get_data();
+ struct tf_epoll_data *d = fiber;
- if (fd->flags & TF_FD_AUTOCLOSE)
- return 0;
+ if (!(fd->flags & TF_FD_AUTOCLOSE))
+ epoll_ctl(d->epoll_fd, EPOLL_CTL_DEL, fd->fd, NULL);
- epoll_ctl(pd->epoll_fd, EPOLL_CTL_DEL, fd->fd, NULL);
return 0;
}
-static void tf_epoll_fd_monitor(struct tf_fd *fd, int events)
-{
- struct tf_poll_data *pd = tf_epoll_get_data();
-
- TF_BUG_ON(fd->waiting_fiber != NULL);
- fd->events = EPOLLERR | EPOLLHUP;
- if (events & TF_POLL_READ)
- fd->events |= EPOLLIN;
- if (events & TF_POLL_WRITE)
- fd->events |= EPOLLOUT;
- fd->waiting_fiber = tf_scheduler_get_current()->active_fiber;
- pd->num_waiters++;
-}
-
-static void tf_epoll_fd_unmonitor(struct tf_fd *fd)
-{
- struct tf_poll_data *pd = tf_epoll_get_data();
-
- fd->waiting_fiber = NULL;
- fd->events = 0;
- pd->num_waiters--;
-}
-
struct tf_poll_hooks tf_epoll_hooks = {
- .init = tf_epoll_init,
- .close = tf_epoll_close,
- .poll = tf_epoll_poll,
+ .create = tf_epoll_create,
.fd_created = tf_epoll_fd_created,
.fd_destroyed = tf_epoll_fd_destroyed,
- .fd_monitor = tf_epoll_fd_monitor,
- .fd_unmonitor = tf_epoll_fd_unmonitor,
};