summaryrefslogtreecommitdiffstats
path: root/src/scheduler.c
blob: d287ecaf86c83c726d5aeb42217d6421649397c4 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
/* scheduler.c - fiber scheduling
 *
 * Copyright (C) 2009-2010 Timo Teräs <timo.teras@iki.fi>
 * All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 or later as
 * published by the Free Software Foundation.
 *
 * See http://www.gnu.org/ for details.
 */

#include <time.h>
#include <libtf/scheduler.h>
#include <libtf/io.h>

/* FIXME: should be in thread local storage */
struct tf_scheduler *__tf_scheduler;

static void update_time(struct tf_scheduler *sched)
{
	struct timespec ts;

	clock_gettime(CLOCK_MONOTONIC, &ts);
	sched->scheduler_time = ts.tv_sec * 1000 + ts.tv_nsec / 1000000;
}

static void process_heap(struct tf_scheduler *sched)
{
	struct tf_heap_node *node;
	tf_mtime_t now = sched->scheduler_time;

	while (!tf_heap_empty(&sched->heap) &&
	       tf_mtime_diff(now, tf_heap_get_value(&sched->heap)) >= 0) {
		node = tf_heap_get_node(&sched->heap);
		tf_heap_delete(node, &sched->heap);
		__tf_fiber_wakeup_heapnode(node);
	}
}

void tf_scheduler_fiber(void *data)
{
	struct tf_scheduler *sched = (struct tf_scheduler *) data;

	do {
		tf_mtime_diff_t timeout;

		update_time(sched);
		if (!tf_list_empty(&sched->scheduled_q) ||
		    !tf_list_empty(&sched->running_q)) {
			timeout = 0;
		} else if (!tf_heap_empty(&sched->heap)) {
			timeout = tf_mtime_diff(
				tf_heap_get_value(&sched->heap),
				tf_scheduler_get_mtime());
			if (timeout < 0)
				timeout = 0;
		} else {
			timeout = -1;
		}

		if (tf_poll(timeout) == TF_WAKEUP_TIMEOUT &&
		    timeout >= 0) {
			sched->scheduler_time += timeout;
			process_heap(sched);
		}

		if (tf_fiber_yield() == TF_WAKEUP_KILL) {
			do {
				tf_fiber_put(sched->active_fiber);
				sched->active_fiber = sched;
			} while (__tf_fiber_schedule() == TF_WAKEUP_KILL);
		}
	} while (1);
}

struct tf_scheduler *tf_scheduler_create(void)
{
	struct tf_scheduler *sched;

	sched = __tf_fiber_create(tf_scheduler_fiber,
				  sizeof(struct tf_scheduler));

	*sched = (struct tf_scheduler) {
		.scheduled_q = TF_LIST_HEAD_INITIALIZER(sched->scheduled_q),
		.running_q = TF_LIST_HEAD_INITIALIZER(sched->running_q),
	};

	return sched;
}

int  tf_scheduler_enable(struct tf_scheduler *sched)
{
	struct tf_scheduler *s = sched;

	if (s == NULL) {
		s = tf_scheduler_create();
		if (s == NULL)
			return -ENOMEM;
	}
	if (s->main_fiber != NULL)
		return -EBUSY;

	__tf_fiber_bind_scheduler(s);
	__tf_scheduler = s;
	tf_poll_init();
	update_time(s);

	if (sched != NULL)
		tf_scheduler_get(sched);

	return 0;
}

void tf_scheduler_disable(void)
{
	struct tf_scheduler *sched = __tf_scheduler;

	if (sched == NULL ||
	    sched->main_fiber != sched->active_fiber)
		return;

	/* sleep until no others */
	while (sched->num_fibers > 1)
		__tf_fiber_schedule();

	tf_poll_close();
	__tf_scheduler = NULL;
	__tf_fiber_release_scheduler(sched);
	tf_heap_destroy(&sched->heap);
	tf_fiber_put(sched);
}