1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
|
/* fiber.c - fiber management and scheduling
*
* Copyright (C) 2009 Timo Teräs <timo.teras@iki.fi>
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 or later as
* published by the Free Software Foundation.
*
* See http://www.gnu.org/ for details.
*/
#include <errno.h>
#include <libtf/tf.h>
#include TF_UCTX_H
struct tf_scheduler {
struct tf_list_head run_q;
struct tf_list_head sleep_q;
struct tf_fiber * active_fiber;
int num_fibers;
};
/* FIXME: should be in thread local storage */
static struct tf_scheduler *__scheduler;
void *tf_fiber_create(tf_fiber_proc fiber_main, int private_size)
{
struct tf_scheduler *sched = __scheduler;
struct tf_fiber *fiber;
fiber = tf_uctx_create(fiber_main, private_size);
/* The initial references for caller and scheduler */
*fiber = (struct tf_fiber) {
.ref_count = 2,
.queue_node = TF_LIST_INITIALIZER(fiber->queue_node),
};
tf_list_add_tail(&fiber->queue_node, &sched->run_q);
sched->num_fibers++;
return fiber->data;
}
void __tf_fiber_destroy(struct tf_fiber *fiber)
{
tf_uctx_destroy(fiber);
}
void *tf_fiber_get(void *data)
{
struct tf_fiber *fiber = container_of(data, struct tf_fiber, data);
tf_atomic_inc(fiber->ref_count);
return data;
}
void tf_fiber_put(void *data)
{
struct tf_fiber *fiber = container_of(data, struct tf_fiber, data);
if (tf_atomic_dec(fiber->ref_count) == 0)
__tf_fiber_destroy(fiber);
}
static void run_fiber(void)
{
struct tf_scheduler *sched = __scheduler;
struct tf_fiber *schedf = container_of((void*) __scheduler, struct tf_fiber, data);
struct tf_fiber *f;
if (tf_list_empty(&sched->run_q))
return;
f = tf_list_first(&sched->run_q, struct tf_fiber, queue_node);
tf_list_del(&f->queue_node);
sched->active_fiber = f;
switch (tf_uctx_transfer(schedf, f, 1)) {
case EFAULT: /* Fiber is dead */
tf_fiber_put(f->data);
sched->num_fibers--;
break;
case EAGAIN: /* Yielded, reshedule */
tf_list_add_tail(&f->queue_node, &sched->run_q);
break;
case EIO: /* Blocked, in sleep */
tf_list_add_tail(&f->queue_node, &sched->sleep_q);
break;
default:
TF_BUG_ON("bad scheduler call from fiber");
}
}
int tf_main(tf_fiber_proc main_fiber)
{
struct tf_uctx *ctx = alloca(sizeof(struct tf_uctx) + sizeof(struct tf_scheduler));
struct tf_scheduler *sched = (struct tf_scheduler*) ctx->fiber.data;
int stack_guard = STACK_GUARD;
ctx->stack_guard = &stack_guard;
*sched = (struct tf_scheduler){
.run_q = TF_LIST_HEAD_INITIALIZER(sched->run_q),
.sleep_q = TF_LIST_HEAD_INITIALIZER(sched->sleep_q),
};
__scheduler = sched;
tf_fiber_put(tf_fiber_create(main_fiber, 0));
do {
run_fiber();
} while (likely(sched->num_fibers));
__scheduler = NULL;
return 0;
}
int tf_schedule(int err)
{
struct tf_scheduler *sched = __scheduler;
struct tf_fiber *schedf = container_of((void*) __scheduler, struct tf_fiber, data);
struct tf_fiber *f = sched->active_fiber;
int r;
r = tf_uctx_transfer(f, schedf, err);
if (r == 1)
return 0;
return r;
}
void tf_kill(void *fiber)
{
}
|