/* io-epoll.c - epoll(7) based file descriptor monitoring * * Copyright (C) 2009 Timo Teräs * All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 or later as * published by the Free Software Foundation. * * See http://www.gnu.org/ for details. */ #include #include #include #include #include #include struct tf_poll_data { int epoll_fd; int num_waiters; }; static struct tf_poll_data *tf_epoll_get_data(void) { struct tf_scheduler *sched = tf_scheduler_get_current(); TF_BUILD_BUG_ON(sizeof(struct tf_poll_data) > sizeof(sched->poll_data)); return (struct tf_poll_data *) &sched->poll_data; } static void tf_epoll_init(void) { struct tf_poll_data *pd = tf_epoll_get_data(); pd->epoll_fd = epoll_create1(EPOLL_CLOEXEC); pd->num_waiters = 0; TF_BUG_ON(pd->epoll_fd < 0); } static void tf_epoll_close(void) { struct tf_poll_data *pd = tf_epoll_get_data(); close(pd->epoll_fd); } static int tf_epoll_poll(tf_mtime_diff_t timeout) { struct tf_poll_data *pd = tf_epoll_get_data(); struct epoll_event events[64]; struct tf_fd *fd; int r, i, ret; if (timeout == 0 && pd->num_waiters == 0) return TF_WAKEUP_TIMEOUT; ret = TF_WAKEUP_TIMEOUT; do { r = epoll_wait(pd->epoll_fd, events, array_size(events), timeout); if (r == 0) break; for (i = 0; i < r; i++) { fd = (struct tf_fd *) events[i].data.ptr; if (likely(fd->events & events[i].events)) __tf_fiber_wakeup(fd->waiting_fiber, TF_WAKEUP_FD); } ret = TF_WAKEUP_FD; timeout = 0; } while (unlikely(r == array_size(events))); return ret; } static int tf_epoll_fd_created(struct tf_fd *fd) { struct tf_poll_data *pd = tf_epoll_get_data(); struct epoll_event ev; int r; ev = (struct epoll_event) { .events = EPOLLIN | EPOLLOUT | EPOLLET, .data.ptr = fd, }; r = epoll_ctl(pd->epoll_fd, EPOLL_CTL_ADD, fd->fd, &ev); if (unlikely(r < 0)) { TF_BUG_ON(errno == EEXIST); r = -errno; return r; } return 0; } static int tf_epoll_fd_destroyed(struct tf_fd *fd) { struct tf_poll_data *pd = tf_epoll_get_data(); if (fd->flags & TF_FD_AUTOCLOSE) return 0; epoll_ctl(pd->epoll_fd, EPOLL_CTL_DEL, fd->fd, NULL); return 0; } static void tf_epoll_fd_monitor(struct tf_fd *fd, int events) { struct tf_poll_data *pd = tf_epoll_get_data(); TF_BUG_ON(fd->waiting_fiber != NULL); fd->events = EPOLLERR | EPOLLHUP; if (events & TF_POLL_READ) fd->events |= EPOLLIN; if (events & TF_POLL_WRITE) fd->events |= EPOLLOUT; fd->waiting_fiber = tf_scheduler_get_current()->active_fiber; pd->num_waiters++; } static void tf_epoll_fd_unmonitor(struct tf_fd *fd) { struct tf_poll_data *pd = tf_epoll_get_data(); fd->waiting_fiber = NULL; fd->events = 0; pd->num_waiters--; } struct tf_poll_hooks tf_epoll_hooks = { .init = tf_epoll_init, .close = tf_epoll_close, .poll = tf_epoll_poll, .fd_created = tf_epoll_fd_created, .fd_destroyed = tf_epoll_fd_destroyed, .fd_monitor = tf_epoll_fd_monitor, .fd_unmonitor = tf_epoll_fd_unmonitor, };