summaryrefslogtreecommitdiffstats
path: root/src/uctx.h
diff options
context:
space:
mode:
authorTimo Teräs <timo.teras@iki.fi>2010-07-02 20:23:07 +0300
committerTimo Teräs <timo.teras@iki.fi>2010-07-02 20:25:47 +0300
commit23b95bf1a15322c2f471b80c06cb65d9b2d2a282 (patch)
tree9bf12231db9591852e3b42ca24715d2cbaf6267b /src/uctx.h
parent0183e33d9a4759764716e771b85e19f7a997b8bd (diff)
downloadlibtf-master.tar.bz2
libtf-master.tar.xz
libtf: major redesign startedHEADmaster
the idea is to make libtf completely multi-threaded. meaning each fiber can be running concurrently in separate thread. quite a bit of framework is added for this and some atomic helpers are already introduced. however, io polling is busy polling now (will be soon in own thread) and timeouts are still more or less broken. oh, and the multithreading core is not there yet. basically we are currently mostly broken ;)
Diffstat (limited to 'src/uctx.h')
-rw-r--r--src/uctx.h41
1 files changed, 26 insertions, 15 deletions
diff --git a/src/uctx.h b/src/uctx.h
index 3ec30c4..353b732 100644
--- a/src/uctx.h
+++ b/src/uctx.h
@@ -18,19 +18,18 @@
#ifdef VALGRIND
#include <valgrind/valgrind.h>
-#else
-#define VALGRIND_STACK_REGISTER(stack_base, size) 0
-#define VALGRIND_STACK_DEREGISTER(stack_id)
#endif
#define STACK_GUARD 0xbad57ac4
struct tf_uctx {
- int *stack_guard;
- size_t size;
- void *alloc;
- void *current_sp;
- unsigned int stack_id;
+ int * stack_guard;
+ size_t size;
+ void * alloc;
+ void * current_sp;
+#ifdef VALGRIND
+ unsigned int stack_id;
+#endif
};
#if defined(__i386__)
@@ -89,13 +88,14 @@ static inline void stack_push_ptr(void **stackptr, void *ptr)
}
-static inline void tf_uctx_create_self(struct tf_uctx *uctx)
+static inline tf_uctx_t tf_uctx_create_self(struct tf_uctx *uctx)
{
static int dummy_guard = STACK_GUARD;
*uctx = (struct tf_uctx) {
.stack_guard = &dummy_guard,
};
+ return uctx;
}
static inline void *
@@ -118,20 +118,24 @@ tf_uctx_create_embedded(
/* Create initial stack frame (cdecl convention) */
stack = stack_pointer(stack_base, size);
- user_data = stack_push(&stack, TF_ALIGN(private_size, 64));
+ user_data = stack_push(&stack, TF_ALIGN(private_size, 16));
+ uctx = stack_push(&stack, TF_ALIGN(sizeof(struct tf_uctx), 16));
stack_push_ptr(&stack, main_argument);
stack_push_ptr(&stack, user_data);
stack_push_ptr(&stack, NULL);
stack_push_ptr(&stack, stack_frame_main); /* eip */
stack_push_ptr(&stack, NULL); /* ebp */
- uctx = user_data + uctx_offset;
+ *((tf_uctx_t *) (user_data + uctx_offset)) = uctx;
+
*uctx = (struct tf_uctx) {
.stack_guard = stack_guard(stack_base, size),
.alloc = stack_base,
.size = size,
.current_sp = stack,
+#ifdef VALGRIND
.stack_id = VALGRIND_STACK_REGISTER(stack_base, stack_base+size),
+#endif
};
*uctx->stack_guard = STACK_GUARD;
@@ -139,18 +143,25 @@ tf_uctx_create_embedded(
}
static inline
-void tf_uctx_destroy(struct tf_uctx *uctx)
+void tf_uctx_destroy(tf_uctx_t ctx)
{
+ struct tf_uctx *uctx = ctx;
+
if (uctx->alloc != NULL) {
+#ifdef VALGRIND
VALGRIND_STACK_DEREGISTER(uctx->stack_id);
+#endif
tf_bmem_free(uctx->alloc, uctx->size);
}
}
static inline
-void tf_uctx_transfer(struct tf_uctx *from, struct tf_uctx *to)
+void tf_uctx_transfer(tf_uctx_t from, tf_uctx_t to)
{
+ struct tf_uctx *ufrom = from;
+ struct tf_uctx *uto = to;
+
/* Switch stack pointers */
- TF_BUG_ON(*from->stack_guard != STACK_GUARD);
- switch_fiber(from, to);
+ TF_BUG_ON(*ufrom->stack_guard != STACK_GUARD);
+ switch_fiber(ufrom, uto);
}