aboutsummaryrefslogtreecommitdiffstats
path: root/src/libstrongswan/processing/processor.c
diff options
context:
space:
mode:
authorTobias Brunner <tobias@strongswan.org>2012-06-21 10:10:25 +0200
committerTobias Brunner <tobias@strongswan.org>2012-06-25 17:49:12 +0200
commite0efd7c121a888625cc1d287c496f1306019b9c7 (patch)
treec0cbfe50b65c97c0f91599dec54076e9f4a24578 /src/libstrongswan/processing/processor.c
parent26d77eb3e61b2ff929dff96bbb53a5d22d76ce4f (diff)
downloadstrongswan-e0efd7c121a888625cc1d287c496f1306019b9c7.tar.bz2
strongswan-e0efd7c121a888625cc1d287c496f1306019b9c7.tar.xz
Make rescheduling a job more predictable
This avoids race conditions between calls to cancel() and jobs that like to be rescheduled. If jobs were able to reschedule themselves it would theoretically be possible that two worker threads have the same job assigned (the one currently executing the job and the one executing the same but rescheduled job if it already is time to execute it), this means that cancel() could be called twice for that job. Creating a new job based on the current one and reschedule that is also OK, but rescheduling itself is more efficient for jobs that need to be executed often.
Diffstat (limited to 'src/libstrongswan/processing/processor.c')
-rw-r--r--src/libstrongswan/processing/processor.c54
1 files changed, 35 insertions, 19 deletions
diff --git a/src/libstrongswan/processing/processor.c b/src/libstrongswan/processing/processor.c
index 0f0c192d2..5b7fd467c 100644
--- a/src/libstrongswan/processing/processor.c
+++ b/src/libstrongswan/processing/processor.c
@@ -217,13 +217,13 @@ static void process_jobs(worker_thread_t *worker)
while (TRUE)
{
requeue = worker->job->execute(worker->job);
- if (requeue != JOB_REQUEUE_DIRECT)
+ if (requeue.type != JOB_REQUEUE_TYPE_DIRECT)
{
break;
}
else if (!worker->job->cancel)
{ /* only allow cancelable jobs to requeue directly */
- requeue = JOB_REQUEUE_FAIR;
+ requeue.type = JOB_REQUEUE_TYPE_FAIR;
break;
}
}
@@ -234,25 +234,41 @@ static void process_jobs(worker_thread_t *worker)
{ /* job was canceled via a custom cancel() method or did not
* use JOB_REQUEUE_TYPE_DIRECT */
worker->job->destroy(worker->job);
+ break;
}
- else
+ switch (requeue.type)
{
- switch (requeue)
- {
- case JOB_REQUEUE_NONE:
- worker->job->status = JOB_STATUS_DONE;
- worker->job->destroy(worker->job);
- break;
- case JOB_REQUEUE_FAIR:
- worker->job->status = JOB_STATUS_QUEUED;
- this->jobs[i]->insert_last(this->jobs[i],
- worker->job);
- this->job_added->signal(this->job_added);
- break;
- case JOB_REQUEUE_SCHEDULED:
- default:
- break;
- }
+ case JOB_REQUEUE_TYPE_NONE:
+ worker->job->status = JOB_STATUS_DONE;
+ worker->job->destroy(worker->job);
+ break;
+ case JOB_REQUEUE_TYPE_FAIR:
+ worker->job->status = JOB_STATUS_QUEUED;
+ this->jobs[i]->insert_last(this->jobs[i],
+ worker->job);
+ this->job_added->signal(this->job_added);
+ break;
+ case JOB_REQUEUE_TYPE_SCHEDULE:
+ /* scheduler_t does not hold its lock when queeuing jobs
+ * so this should be safe without unlocking our mutex */
+ switch (requeue.schedule)
+ {
+ case JOB_SCHEDULE:
+ lib->scheduler->schedule_job(lib->scheduler,
+ worker->job, requeue.time.rel);
+ break;
+ case JOB_SCHEDULE_MS:
+ lib->scheduler->schedule_job_ms(lib->scheduler,
+ worker->job, requeue.time.rel);
+ break;
+ case JOB_SCHEDULE_TV:
+ lib->scheduler->schedule_job_tv(lib->scheduler,
+ worker->job, requeue.time.abs);
+ break;
+ }
+ break;
+ default:
+ break;
}
break;
}