diff options
author | paulo <paul@bayleaf.org.uk> | 2010-01-18 18:00:34 +0000 |
---|---|---|
committer | paulo <paul@bayleaf.org.uk> | 2010-01-18 18:00:34 +0000 |
commit | 8f62c98f904833033bd4684fa3ae6b516470da75 (patch) | |
tree | 75f698ea0fc9ccc0624434e3b25df3d4a0091acf | |
parent | ea3df74b49782790ba198b744cbe22330377fba4 (diff) | |
download | quagga-8f62c98f904833033bd4684fa3ae6b516470da75.tar.bz2 quagga-8f62c98f904833033bd4684fa3ae6b516470da75.tar.xz |
Nexus to handle old threads and process the bgp local events. Deals
with having 1 or multiple nexus (and pthreads).
-rw-r--r-- | bgpd/bgp_main.c | 53 | ||||
-rw-r--r-- | lib/qpnexus.c | 32 | ||||
-rw-r--r-- | lib/qpnexus.h | 12 | ||||
-rw-r--r-- | lib/thread.c | 55 | ||||
-rw-r--r-- | lib/thread.h | 3 |
5 files changed, 126 insertions, 29 deletions
diff --git a/bgpd/bgp_main.c b/bgpd/bgp_main.c index e6afe1ec..319da31c 100644 --- a/bgpd/bgp_main.c +++ b/bgpd/bgp_main.c @@ -82,6 +82,8 @@ void sigusr2 (void); static void bgp_exit (int); static void init_second_stage(int pthreads); static void bgp_in_thread_init(void); +static qtime_mono_t routing_event_hook(void); +static qtime_mono_t bgp_event_hook(void); static struct quagga_signal_t bgp_signals[] = { @@ -366,17 +368,30 @@ init_second_stage(int pthreads) qlib_init_second_stage(pthreads); bgp_peer_index_mutex_init(NULL); - /* if using pthreads create additional mutexes */ - if (pthreads) + /* if using pthreads create additional nexus */ + if (qpthreads_enabled) { bgp_nexus = qpn_init_new(cli_nexus, 0); routing_nexus = qpn_init_new(cli_nexus, 0); } - /* legacy threads are executed in routing_nexus */ - routing_nexus->master = master; + /* Nexus hooks. + * Beware if !qpthreads_enabled then there is only 1 nexus object + * with all nexus pointers being aliases for it. So only one routine + * per hook for *all* nexus. + */ + bgp_nexus->in_thread_init = bgp_in_thread_init; + bgp_nexus->in_thread_final = bgp_close_listeners; + routing_nexus->event_hook[0] = routing_event_hook; + bgp_nexus->event_hook[1] = bgp_event_hook; + confirm(NUM_EVENT_HOOK >= 2); + + /* vty can use either nexus or threads. For bgp client we always + * want nexus, regardless of pthreads. + */ vty_init_r(cli_nexus, routing_nexus); } + /* Main routine of bgpd. Treatment of argument and start bgp finite state machine is handled at here. */ int @@ -546,15 +561,6 @@ main (int argc, char **argv) (bm->address ? bm->address : "<all>"), (int)bm->port); - /* in-thread initialization and finalization. - * NB if !qpthreads_enabled then there is only 1 nexus object - * with all nexus pointers being alises for it. So if different - * logical nexus need their own init or final then will need a single - * init or final routine. - */ - bgp_nexus->in_thread_init = bgp_in_thread_init; - bgp_nexus->in_thread_final = bgp_close_listeners; - /* Launch finite state machine(s) */ if (qpthreads_enabled) { @@ -582,3 +588,24 @@ bgp_in_thread_init(void) { bgp_open_listeners(bm->port, bm->address); } + +/* legacy threads */ +static qtime_mono_t +routing_event_hook(void) +{ + struct thread thread; + qtime_mono_t event_wait; + + while (thread_fetch_event (master, &thread, &event_wait)) + thread_call (&thread); + + return event_wait; +} + +/* BGP local queued events */ +static qtime_mono_t +bgp_event_hook(void) +{ + bgp_connection_queue_process(); + return 0; +} diff --git a/lib/qpnexus.c b/lib/qpnexus.c index 98bd767c..270142be 100644 --- a/lib/qpnexus.c +++ b/lib/qpnexus.c @@ -121,7 +121,7 @@ qpn_exec(qpn_nexus qpn) * * 1) Main thread only -- signals. * - * 2) Pending work -- local queue. + * 2) Pending work -- event hooks. * * 3) messages coming from other pthreads -- mqueue_queue. * @@ -145,7 +145,8 @@ qpn_start(void* arg) mqueue_block mqb; int actions; qtime_mono_t now; - struct thread thread; + qtime_mono_t max_wait; + int i; /* now in our thread, complete initialisation */ qpn_in_thread_init(qpn); @@ -157,6 +158,21 @@ qpn_start(void* arg) if (qpn->main_thread) quagga_sigevent_process (); + /* max time to wait in pselect */ + now = qt_get_monotonic(); + max_wait = now + QTIME(MAX_PSELECT_TIMOUT); + + /* event hooks, if any */ + for (i = 0; i < NUM_EVENT_HOOK; ++i) + { + if (qpn->event_hook[i] != NULL) + { + qtime_mono_t event_wait = qpn->event_hook[i](); + if (event_wait > 0 && event_wait < max_wait) + max_wait = event_wait; + } + } + /* drain the message queue, will be in waiting for signal state * when it's empty */ for (;;) @@ -169,9 +185,8 @@ qpn_start(void* arg) } /* block for some input, output, signal or timeout */ - now = qt_get_monotonic(); actions = qps_pselect(qpn->selection, - qtimer_pile_top_time(qpn->pile, now + QTIME(MAX_PSELECT_TIMOUT)) ); + qtimer_pile_top_time(qpn->pile, max_wait)); /* process I/O actions */ while (actions) @@ -184,15 +199,6 @@ qpn_start(void* arg) while (qtimer_pile_dispatch_next(qpn->pile, now)) { } - - /* legacy threads */ - /* TODO: legacy threads must not pselect. How is the pselect above - * to know when to timeout for legacy timers? */ - if (qpn->master != NULL) - { - if (thread_fetch (qpn->master, &thread)) - thread_call (&thread); - } } /* last bit of code to run in this thread */ diff --git a/lib/qpnexus.h b/lib/qpnexus.h index c717ed23..32219df1 100644 --- a/lib/qpnexus.h +++ b/lib/qpnexus.h @@ -53,6 +53,9 @@ /* signal for message queues */ #define SIGMQUEUE SIGUSR2 +/* number of event hooks */ +#define NUM_EVENT_HOOK 2 + /*============================================================================== * Data Structures. */ @@ -80,9 +83,6 @@ struct qpn_nexus mqueue_queue queue; mqueue_thread_signal mts; - /* legacy threads */ - struct thread_master *master; - /* qpthread routine, can override */ void* (*start)(void*); @@ -95,6 +95,12 @@ struct qpn_nexus * thread loop is no longer executed */ void (*in_thread_final)(void); + /* thread loop events, can override. Called before message queue, + * I/O and timers. + * Returns the time to try again, 0 means default to maximum. + */ + qtime_mono_t (*event_hook[NUM_EVENT_HOOK])(void); + }; /*============================================================================== diff --git a/lib/thread.c b/lib/thread.c index 773e9338..62f9669f 100644 --- a/lib/thread.c +++ b/lib/thread.c @@ -1028,6 +1028,61 @@ thread_fetch (struct thread_master *m, struct thread *fetch) } } + +/* Fetch next ready thread. Events and timeouts only. No I/O. + * If nothing to do returns NULL and sets event_wait to recommended time + * to be called again. */ +struct thread * +thread_fetch_event (struct thread_master *m, struct thread *fetch, + qtime_mono_t *event_wait) +{ + struct thread *thread; + struct timeval timer_val; + struct timeval timer_val_bg; + struct timeval *timer_wait; + struct timeval *timer_wait_bg; + + /* Normal event are the next highest priority. */ + if ((thread = thread_trim_head (&m->event)) != NULL) + return thread_run (m, thread, fetch); + + /* If there are any ready threads from previous scheduler runs, + * process top of them. + */ + if ((thread = thread_trim_head (&m->ready)) != NULL) + return thread_run (m, thread, fetch); + + /* Calculate select wait timer if nothing else to do */ + quagga_get_relative (NULL); + timer_wait = thread_timer_wait (&m->timer, &timer_val); + timer_wait_bg = thread_timer_wait (&m->background, &timer_val_bg); + + if (timer_wait_bg && + (!timer_wait || (timeval_cmp (*timer_wait, *timer_wait_bg) > 0))) + timer_wait = timer_wait_bg; + + /* When is the next timer due ? */ + if (timer_wait) + { + *event_wait = timeval2qtime(timer_wait); + return NULL; + } + + /* Check foreground timers. Historically, they have had higher + priority than I/O threads, so let's push them onto the ready + list in front of the I/O threads. */ + quagga_get_relative (NULL); + thread_timer_process (&m->timer, &relative_time); + + /* Background timer/events, lowest priority */ + thread_timer_process (&m->background, &relative_time); + + if ((thread = thread_trim_head (&m->ready)) != NULL) + return thread_run (m, thread, fetch); + + return NULL; +} + unsigned long thread_consumed_time (RUSAGE_T *now, RUSAGE_T *start, unsigned long *cputime) { diff --git a/lib/thread.h b/lib/thread.h index 43d4d12f..b0699650 100644 --- a/lib/thread.h +++ b/lib/thread.h @@ -23,6 +23,7 @@ #define _ZEBRA_THREAD_H #include <sys/resource.h> +#include "qtime.h" struct rusage_t { @@ -194,6 +195,8 @@ extern struct thread *funcname_thread_execute (struct thread_master *, extern void thread_cancel (struct thread *); extern unsigned int thread_cancel_event (struct thread_master *, void *); extern struct thread *thread_fetch (struct thread_master *, struct thread *); +struct thread * thread_fetch_event (struct thread_master *m, struct thread *fetch, + qtime_mono_t *event_wait); extern void thread_call (struct thread *); extern unsigned long thread_timer_remain_second (struct thread *); extern int thread_should_yield (struct thread *); |