2
* Licensed to the Apache Software Foundation (ASF) under one or more
3
* contributor license agreements. See the NOTICE file distributed
4
* with this work for additional information regarding copyright
5
* ownership. The ASF licenses this file to you under the Apache
6
* License, Version 2.0 (the "License"); you may not use this file
7
* except in compliance with the License. You may obtain a copy of
10
* http://www.apache.org/licenses/LICENSE-2.0
12
* Unless required by applicable law or agreed to in writing, software
13
* distributed under the License is distributed on an "AS IS" BASIS,
14
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
15
* implied. See the License for the specific language governing
16
* permissions and limitations under the License.
20
#include "apr_thread_pool.h"
22
#include "apr_thread_cond.h"
23
#include "apr_portable.h"
27
#define TASK_PRIORITY_SEGS 4
28
#define TASK_PRIORITY_SEG(x) (((x)->dispatch.priority & 0xFF) / 64)
30
typedef struct apr_thread_pool_task
32
APR_RING_ENTRY(apr_thread_pool_task) link;
33
apr_thread_start_t func;
41
} apr_thread_pool_task_t;
43
APR_RING_HEAD(apr_thread_pool_tasks, apr_thread_pool_task);
45
struct apr_thread_list_elt
47
APR_RING_ENTRY(apr_thread_list_elt) link;
49
volatile void *current_owner;
50
volatile enum { TH_RUN, TH_STOP, TH_PROBATION } state;
53
APR_RING_HEAD(apr_thread_list, apr_thread_list_elt);
55
struct apr_thread_pool
58
volatile apr_size_t thd_max;
59
volatile apr_size_t idle_max;
60
volatile apr_interval_time_t idle_wait;
61
volatile apr_size_t thd_cnt;
62
volatile apr_size_t idle_cnt;
63
volatile apr_size_t task_cnt;
64
volatile apr_size_t scheduled_task_cnt;
65
volatile apr_size_t threshold;
66
volatile apr_size_t tasks_run;
67
volatile apr_size_t tasks_high;
68
volatile apr_size_t thd_high;
69
volatile apr_size_t thd_timed_out;
70
struct apr_thread_pool_tasks *tasks;
71
struct apr_thread_pool_tasks *scheduled_tasks;
72
struct apr_thread_list *busy_thds;
73
struct apr_thread_list *idle_thds;
74
apr_thread_mutex_t *lock;
75
apr_thread_mutex_t *cond_lock;
76
apr_thread_cond_t *cond;
77
volatile int terminated;
78
struct apr_thread_pool_tasks *recycled_tasks;
79
struct apr_thread_list *recycled_thds;
80
apr_thread_pool_task_t *task_idx[TASK_PRIORITY_SEGS];
83
static apr_status_t thread_pool_construct(apr_thread_pool_t * me,
84
apr_size_t init_threads,
85
apr_size_t max_threads)
90
me->thd_max = max_threads;
91
me->idle_max = init_threads;
92
me->threshold = init_threads / 2;
93
rv = apr_thread_mutex_create(&me->lock, APR_THREAD_MUTEX_NESTED,
95
if (APR_SUCCESS != rv) {
98
rv = apr_thread_mutex_create(&me->cond_lock, APR_THREAD_MUTEX_UNNESTED,
100
if (APR_SUCCESS != rv) {
101
apr_thread_mutex_destroy(me->lock);
104
rv = apr_thread_cond_create(&me->cond, me->pool);
105
if (APR_SUCCESS != rv) {
106
apr_thread_mutex_destroy(me->lock);
107
apr_thread_mutex_destroy(me->cond_lock);
110
me->tasks = apr_palloc(me->pool, sizeof(*me->tasks));
114
APR_RING_INIT(me->tasks, apr_thread_pool_task, link);
115
me->scheduled_tasks = apr_palloc(me->pool, sizeof(*me->scheduled_tasks));
116
if (!me->scheduled_tasks) {
119
APR_RING_INIT(me->scheduled_tasks, apr_thread_pool_task, link);
120
me->recycled_tasks = apr_palloc(me->pool, sizeof(*me->recycled_tasks));
121
if (!me->recycled_tasks) {
124
APR_RING_INIT(me->recycled_tasks, apr_thread_pool_task, link);
125
me->busy_thds = apr_palloc(me->pool, sizeof(*me->busy_thds));
126
if (!me->busy_thds) {
129
APR_RING_INIT(me->busy_thds, apr_thread_list_elt, link);
130
me->idle_thds = apr_palloc(me->pool, sizeof(*me->idle_thds));
131
if (!me->idle_thds) {
134
APR_RING_INIT(me->idle_thds, apr_thread_list_elt, link);
135
me->recycled_thds = apr_palloc(me->pool, sizeof(*me->recycled_thds));
136
if (!me->recycled_thds) {
139
APR_RING_INIT(me->recycled_thds, apr_thread_list_elt, link);
140
me->thd_cnt = me->idle_cnt = me->task_cnt = me->scheduled_task_cnt = 0;
141
me->tasks_run = me->tasks_high = me->thd_high = me->thd_timed_out = 0;
144
for (i = 0; i < TASK_PRIORITY_SEGS; i++) {
145
me->task_idx[i] = NULL;
150
apr_thread_mutex_destroy(me->lock);
151
apr_thread_mutex_destroy(me->cond_lock);
152
apr_thread_cond_destroy(me->cond);
158
* NOTE: This function is not thread safe by itself. Caller should hold the lock
160
static apr_thread_pool_task_t *pop_task(apr_thread_pool_t * me)
162
apr_thread_pool_task_t *task = NULL;
165
/* check for scheduled tasks */
166
if (me->scheduled_task_cnt > 0) {
167
task = APR_RING_FIRST(me->scheduled_tasks);
168
assert(task != NULL);
170
APR_RING_SENTINEL(me->scheduled_tasks, apr_thread_pool_task,
173
if (task->dispatch.time <= apr_time_now()) {
174
--me->scheduled_task_cnt;
175
APR_RING_REMOVE(task, link);
179
/* check for normal tasks if we're not returning a scheduled task */
180
if (me->task_cnt == 0) {
184
task = APR_RING_FIRST(me->tasks);
185
assert(task != NULL);
186
assert(task != APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link));
188
seg = TASK_PRIORITY_SEG(task);
189
if (task == me->task_idx[seg]) {
190
me->task_idx[seg] = APR_RING_NEXT(task, link);
191
if (me->task_idx[seg] == APR_RING_SENTINEL(me->tasks,
192
apr_thread_pool_task, link)
193
|| TASK_PRIORITY_SEG(me->task_idx[seg]) != seg) {
194
me->task_idx[seg] = NULL;
197
APR_RING_REMOVE(task, link);
201
static apr_interval_time_t waiting_time(apr_thread_pool_t * me)
203
apr_thread_pool_task_t *task = NULL;
205
task = APR_RING_FIRST(me->scheduled_tasks);
206
assert(task != NULL);
208
APR_RING_SENTINEL(me->scheduled_tasks, apr_thread_pool_task,
210
return task->dispatch.time - apr_time_now();
214
* NOTE: This function is not thread safe by itself. Caller should hold the lock
216
static struct apr_thread_list_elt *elt_new(apr_thread_pool_t * me,
219
struct apr_thread_list_elt *elt;
221
if (APR_RING_EMPTY(me->recycled_thds, apr_thread_list_elt, link)) {
222
elt = apr_pcalloc(me->pool, sizeof(*elt));
228
elt = APR_RING_FIRST(me->recycled_thds);
229
APR_RING_REMOVE(elt, link);
232
APR_RING_ELEM_INIT(elt, link);
234
elt->current_owner = NULL;
240
* The worker thread function. Take a task from the queue and perform it if
241
* there is any. Otherwise, put itself into the idle thread list and waiting
242
* for signal to wake up.
243
* The thread terminate directly by detach and exit when it is asked to stop
244
* after finishing a task. Otherwise, the thread should be in idle thread list
245
* and should be joined.
247
static void *APR_THREAD_FUNC thread_pool_func(apr_thread_t * t, void *param)
249
apr_status_t rv = APR_SUCCESS;
250
apr_thread_pool_t *me = param;
251
apr_thread_pool_task_t *task = NULL;
252
apr_interval_time_t wait;
253
struct apr_thread_list_elt *elt;
255
apr_thread_mutex_lock(me->lock);
256
elt = elt_new(me, t);
258
apr_thread_mutex_unlock(me->lock);
259
apr_thread_exit(t, APR_ENOMEM);
262
while (!me->terminated && elt->state != TH_STOP) {
263
/* Test if not new element, it is awakened from idle */
264
if (APR_RING_NEXT(elt, link) != elt) {
266
APR_RING_REMOVE(elt, link);
269
APR_RING_INSERT_TAIL(me->busy_thds, elt, apr_thread_list_elt, link);
271
while (NULL != task && !me->terminated) {
273
elt->current_owner = task->owner;
274
apr_thread_mutex_unlock(me->lock);
275
apr_thread_data_set(task, "apr_thread_pool_task", NULL, t);
276
task->func(t, task->param);
277
apr_thread_mutex_lock(me->lock);
278
APR_RING_INSERT_TAIL(me->recycled_tasks, task,
279
apr_thread_pool_task, link);
280
elt->current_owner = NULL;
281
if (TH_STOP == elt->state) {
286
assert(NULL == elt->current_owner);
287
if (TH_STOP != elt->state)
288
APR_RING_REMOVE(elt, link);
290
/* Test if a busy thread been asked to stop, which is not joinable */
291
if ((me->idle_cnt >= me->idle_max
292
&& !(me->scheduled_task_cnt && 0 >= me->idle_max)
294
|| me->terminated || elt->state != TH_RUN) {
296
if ((TH_PROBATION == elt->state) && me->idle_wait)
298
APR_RING_INSERT_TAIL(me->recycled_thds, elt,
299
apr_thread_list_elt, link);
300
apr_thread_mutex_unlock(me->lock);
301
apr_thread_detach(t);
302
apr_thread_exit(t, APR_SUCCESS);
303
return NULL; /* should not be here, safe net */
306
/* busy thread become idle */
308
APR_RING_INSERT_TAIL(me->idle_thds, elt, apr_thread_list_elt, link);
311
* If there is a scheduled task, always scheduled to perform that task.
312
* Since there is no guarantee that current idle threads are scheduled
313
* for next scheduled task.
315
if (me->scheduled_task_cnt)
316
wait = waiting_time(me);
317
else if (me->idle_cnt > me->idle_max) {
318
wait = me->idle_wait;
319
elt->state = TH_PROBATION;
324
apr_thread_mutex_unlock(me->lock);
325
apr_thread_mutex_lock(me->cond_lock);
327
rv = apr_thread_cond_timedwait(me->cond, me->cond_lock, wait);
330
rv = apr_thread_cond_wait(me->cond, me->cond_lock);
332
apr_thread_mutex_unlock(me->cond_lock);
333
apr_thread_mutex_lock(me->lock);
336
/* idle thread been asked to stop, will be joined */
338
apr_thread_mutex_unlock(me->lock);
339
apr_thread_exit(t, APR_SUCCESS);
340
return NULL; /* should not be here, safe net */
343
static apr_status_t thread_pool_cleanup(void *me)
345
apr_thread_pool_t *_self = me;
347
_self->terminated = 1;
348
apr_thread_pool_idle_max_set(_self, 0);
349
while (_self->thd_cnt) {
350
apr_sleep(20 * 1000); /* spin lock with 20 ms */
352
apr_thread_mutex_destroy(_self->lock);
353
apr_thread_mutex_destroy(_self->cond_lock);
354
apr_thread_cond_destroy(_self->cond);
358
APU_DECLARE(apr_status_t) apr_thread_pool_create(apr_thread_pool_t ** me,
359
apr_size_t init_threads,
360
apr_size_t max_threads,
364
apr_status_t rv = APR_SUCCESS;
366
*me = apr_pcalloc(pool, sizeof(**me));
373
rv = thread_pool_construct(*me, init_threads, max_threads);
374
if (APR_SUCCESS != rv) {
378
apr_pool_cleanup_register(pool, *me, thread_pool_cleanup,
379
apr_pool_cleanup_null);
381
while (init_threads) {
382
rv = apr_thread_create(&t, NULL, thread_pool_func, *me, (*me)->pool);
383
if (APR_SUCCESS != rv) {
387
if ((*me)->thd_cnt > (*me)->thd_high)
388
(*me)->thd_high = (*me)->thd_cnt;
395
APU_DECLARE(apr_status_t) apr_thread_pool_destroy(apr_thread_pool_t * me)
397
return apr_pool_cleanup_run(me->pool, me, thread_pool_cleanup);
401
* NOTE: This function is not thread safe by itself. Caller should hold the lock
403
static apr_thread_pool_task_t *task_new(apr_thread_pool_t * me,
404
apr_thread_start_t func,
405
void *param, apr_byte_t priority,
406
void *owner, apr_time_t time)
408
apr_thread_pool_task_t *t;
410
if (APR_RING_EMPTY(me->recycled_tasks, apr_thread_pool_task, link)) {
411
t = apr_pcalloc(me->pool, sizeof(*t));
417
t = APR_RING_FIRST(me->recycled_tasks);
418
APR_RING_REMOVE(t, link);
421
APR_RING_ELEM_INIT(t, link);
426
t->dispatch.time = apr_time_now() + time;
429
t->dispatch.priority = priority;
435
* Test it the task is the only one within the priority segment.
436
* If it is not, return the first element with same or lower priority.
437
* Otherwise, add the task into the queue and return NULL.
439
* NOTE: This function is not thread safe by itself. Caller should hold the lock
441
static apr_thread_pool_task_t *add_if_empty(apr_thread_pool_t * me,
442
apr_thread_pool_task_t * const t)
446
apr_thread_pool_task_t *t_next;
448
seg = TASK_PRIORITY_SEG(t);
449
if (me->task_idx[seg]) {
450
assert(APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link) !=
452
t_next = me->task_idx[seg];
453
while (t_next->dispatch.priority > t->dispatch.priority) {
454
t_next = APR_RING_NEXT(t_next, link);
455
if (APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link) ==
463
for (next = seg - 1; next >= 0; next--) {
464
if (me->task_idx[next]) {
465
APR_RING_INSERT_BEFORE(me->task_idx[next], t, link);
470
APR_RING_INSERT_TAIL(me->tasks, t, apr_thread_pool_task, link);
472
me->task_idx[seg] = t;
477
* schedule a task to run in "time" microseconds. Find the spot in the ring where
478
* the time fits. Adjust the short_time so the thread wakes up when the time is reached.
480
static apr_status_t schedule_task(apr_thread_pool_t *me,
481
apr_thread_start_t func, void *param,
482
void *owner, apr_interval_time_t time)
484
apr_thread_pool_task_t *t;
485
apr_thread_pool_task_t *t_loc;
487
apr_status_t rv = APR_SUCCESS;
488
apr_thread_mutex_lock(me->lock);
490
t = task_new(me, func, param, 0, owner, time);
492
apr_thread_mutex_unlock(me->lock);
495
t_loc = APR_RING_FIRST(me->scheduled_tasks);
496
while (NULL != t_loc) {
497
/* if the time is less than the entry insert ahead of it */
498
if (t->dispatch.time < t_loc->dispatch.time) {
499
++me->scheduled_task_cnt;
500
APR_RING_INSERT_BEFORE(t_loc, t, link);
504
t_loc = APR_RING_NEXT(t_loc, link);
506
APR_RING_SENTINEL(me->scheduled_tasks, apr_thread_pool_task,
508
++me->scheduled_task_cnt;
509
APR_RING_INSERT_TAIL(me->scheduled_tasks, t,
510
apr_thread_pool_task, link);
515
/* there should be at least one thread for scheduled tasks */
516
if (0 == me->thd_cnt) {
517
rv = apr_thread_create(&thd, NULL, thread_pool_func, me, me->pool);
518
if (APR_SUCCESS == rv) {
520
if (me->thd_cnt > me->thd_high)
521
me->thd_high = me->thd_cnt;
524
apr_thread_mutex_unlock(me->lock);
525
apr_thread_mutex_lock(me->cond_lock);
526
apr_thread_cond_signal(me->cond);
527
apr_thread_mutex_unlock(me->cond_lock);
531
static apr_status_t add_task(apr_thread_pool_t *me, apr_thread_start_t func,
532
void *param, apr_byte_t priority, int push,
535
apr_thread_pool_task_t *t;
536
apr_thread_pool_task_t *t_loc;
538
apr_status_t rv = APR_SUCCESS;
540
apr_thread_mutex_lock(me->lock);
542
t = task_new(me, func, param, priority, owner, 0);
544
apr_thread_mutex_unlock(me->lock);
548
t_loc = add_if_empty(me, t);
554
while (APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link) !=
555
t_loc && t_loc->dispatch.priority >= t->dispatch.priority) {
556
t_loc = APR_RING_NEXT(t_loc, link);
559
APR_RING_INSERT_BEFORE(t_loc, t, link);
561
if (t_loc == me->task_idx[TASK_PRIORITY_SEG(t)]) {
562
me->task_idx[TASK_PRIORITY_SEG(t)] = t;
568
if (me->task_cnt > me->tasks_high)
569
me->tasks_high = me->task_cnt;
570
if (0 == me->thd_cnt || (0 == me->idle_cnt && me->thd_cnt < me->thd_max &&
571
me->task_cnt > me->threshold)) {
572
rv = apr_thread_create(&thd, NULL, thread_pool_func, me, me->pool);
573
if (APR_SUCCESS == rv) {
575
if (me->thd_cnt > me->thd_high)
576
me->thd_high = me->thd_cnt;
579
apr_thread_mutex_unlock(me->lock);
581
apr_thread_mutex_lock(me->cond_lock);
582
apr_thread_cond_signal(me->cond);
583
apr_thread_mutex_unlock(me->cond_lock);
588
APU_DECLARE(apr_status_t) apr_thread_pool_push(apr_thread_pool_t *me,
589
apr_thread_start_t func,
594
return add_task(me, func, param, priority, 1, owner);
597
APU_DECLARE(apr_status_t) apr_thread_pool_schedule(apr_thread_pool_t *me,
598
apr_thread_start_t func,
600
apr_interval_time_t time,
603
return schedule_task(me, func, param, owner, time);
606
APU_DECLARE(apr_status_t) apr_thread_pool_top(apr_thread_pool_t *me,
607
apr_thread_start_t func,
612
return add_task(me, func, param, priority, 0, owner);
615
static apr_status_t remove_scheduled_tasks(apr_thread_pool_t *me,
618
apr_thread_pool_task_t *t_loc;
619
apr_thread_pool_task_t *next;
621
t_loc = APR_RING_FIRST(me->scheduled_tasks);
623
APR_RING_SENTINEL(me->scheduled_tasks, apr_thread_pool_task,
625
next = APR_RING_NEXT(t_loc, link);
626
/* if this is the owner remove it */
627
if (t_loc->owner == owner) {
628
--me->scheduled_task_cnt;
629
APR_RING_REMOVE(t_loc, link);
636
static apr_status_t remove_tasks(apr_thread_pool_t *me, void *owner)
638
apr_thread_pool_task_t *t_loc;
639
apr_thread_pool_task_t *next;
642
t_loc = APR_RING_FIRST(me->tasks);
643
while (t_loc != APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link)) {
644
next = APR_RING_NEXT(t_loc, link);
645
if (t_loc->owner == owner) {
647
seg = TASK_PRIORITY_SEG(t_loc);
648
if (t_loc == me->task_idx[seg]) {
649
me->task_idx[seg] = APR_RING_NEXT(t_loc, link);
650
if (me->task_idx[seg] == APR_RING_SENTINEL(me->tasks,
651
apr_thread_pool_task,
653
|| TASK_PRIORITY_SEG(me->task_idx[seg]) != seg) {
654
me->task_idx[seg] = NULL;
657
APR_RING_REMOVE(t_loc, link);
664
static void wait_on_busy_threads(apr_thread_pool_t *me, void *owner)
667
apr_os_thread_t *os_thread;
669
struct apr_thread_list_elt *elt;
670
apr_thread_mutex_lock(me->lock);
671
elt = APR_RING_FIRST(me->busy_thds);
672
while (elt != APR_RING_SENTINEL(me->busy_thds, apr_thread_list_elt, link)) {
673
if (elt->current_owner != owner) {
674
elt = APR_RING_NEXT(elt, link);
678
/* make sure the thread is not the one calling tasks_cancel */
679
apr_os_thread_get(&os_thread, elt->thd);
681
/* hack for apr win32 bug */
682
assert(!apr_os_thread_equal(apr_os_thread_current(), os_thread));
684
assert(!apr_os_thread_equal(apr_os_thread_current(), *os_thread));
687
while (elt->current_owner == owner) {
688
apr_thread_mutex_unlock(me->lock);
689
apr_sleep(200 * 1000);
690
apr_thread_mutex_lock(me->lock);
692
elt = APR_RING_FIRST(me->busy_thds);
694
apr_thread_mutex_unlock(me->lock);
698
APU_DECLARE(apr_status_t) apr_thread_pool_tasks_cancel(apr_thread_pool_t *me,
701
apr_status_t rv = APR_SUCCESS;
703
apr_thread_mutex_lock(me->lock);
704
if (me->task_cnt > 0) {
705
rv = remove_tasks(me, owner);
707
if (me->scheduled_task_cnt > 0) {
708
rv = remove_scheduled_tasks(me, owner);
710
apr_thread_mutex_unlock(me->lock);
711
wait_on_busy_threads(me, owner);
716
APU_DECLARE(apr_size_t) apr_thread_pool_tasks_count(apr_thread_pool_t *me)
721
APU_DECLARE(apr_size_t)
722
apr_thread_pool_scheduled_tasks_count(apr_thread_pool_t *me)
724
return me->scheduled_task_cnt;
727
APU_DECLARE(apr_size_t) apr_thread_pool_threads_count(apr_thread_pool_t *me)
732
APU_DECLARE(apr_size_t) apr_thread_pool_busy_count(apr_thread_pool_t *me)
734
return me->thd_cnt - me->idle_cnt;
737
APU_DECLARE(apr_size_t) apr_thread_pool_idle_count(apr_thread_pool_t *me)
742
APU_DECLARE(apr_size_t)
743
apr_thread_pool_tasks_run_count(apr_thread_pool_t * me)
745
return me->tasks_run;
748
APU_DECLARE(apr_size_t)
749
apr_thread_pool_tasks_high_count(apr_thread_pool_t * me)
751
return me->tasks_high;
754
APU_DECLARE(apr_size_t)
755
apr_thread_pool_threads_high_count(apr_thread_pool_t * me)
760
APU_DECLARE(apr_size_t)
761
apr_thread_pool_threads_idle_timeout_count(apr_thread_pool_t * me)
763
return me->thd_timed_out;
767
APU_DECLARE(apr_size_t) apr_thread_pool_idle_max_get(apr_thread_pool_t *me)
772
APU_DECLARE(apr_interval_time_t)
773
apr_thread_pool_idle_wait_get(apr_thread_pool_t * me)
775
return me->idle_wait;
779
* This function stop extra idle threads to the cnt.
780
* @return the number of threads stopped
781
* NOTE: There could be busy threads become idle during this function
783
static struct apr_thread_list_elt *trim_threads(apr_thread_pool_t *me,
784
apr_size_t *cnt, int idle)
786
struct apr_thread_list *thds;
787
apr_size_t n, n_dbg, i;
788
struct apr_thread_list_elt *head, *tail, *elt;
790
apr_thread_mutex_lock(me->lock);
792
thds = me->idle_thds;
796
thds = me->busy_thds;
797
n = me->thd_cnt - me->idle_cnt;
800
apr_thread_mutex_unlock(me->lock);
806
head = APR_RING_FIRST(thds);
807
for (i = 0; i < *cnt; i++) {
808
head = APR_RING_NEXT(head, link);
810
tail = APR_RING_LAST(thds);
812
APR_RING_UNSPLICE(head, tail, link);
817
for (elt = head; elt != tail; elt = APR_RING_NEXT(elt, link)) {
818
elt->state = TH_STOP;
821
elt->state = TH_STOP;
826
apr_thread_mutex_unlock(me->lock);
828
APR_RING_PREV(head, link) = NULL;
829
APR_RING_NEXT(tail, link) = NULL;
833
static apr_size_t trim_idle_threads(apr_thread_pool_t *me, apr_size_t cnt)
836
struct apr_thread_list_elt *elt, *head, *tail;
839
elt = trim_threads(me, &cnt, 1);
841
apr_thread_mutex_lock(me->cond_lock);
842
apr_thread_cond_broadcast(me->cond);
843
apr_thread_mutex_unlock(me->cond_lock);
846
if (NULL != (head = elt)) {
849
apr_thread_join(&rv, elt->thd);
850
elt = APR_RING_NEXT(elt, link);
853
apr_thread_mutex_lock(me->lock);
854
APR_RING_SPLICE_TAIL(me->recycled_thds, head, tail,
855
apr_thread_list_elt, link);
856
apr_thread_mutex_unlock(me->lock);
858
assert(cnt == n_dbg);
863
/* don't join on busy threads for performance reasons, who knows how long will
864
* the task takes to perform
866
static apr_size_t trim_busy_threads(apr_thread_pool_t *me, apr_size_t cnt)
868
trim_threads(me, &cnt, 0);
872
APU_DECLARE(apr_size_t) apr_thread_pool_idle_max_set(apr_thread_pool_t *me,
876
cnt = trim_idle_threads(me, cnt);
880
APU_DECLARE(apr_interval_time_t)
881
apr_thread_pool_idle_wait_set(apr_thread_pool_t * me,
882
apr_interval_time_t timeout)
884
apr_interval_time_t oldtime;
886
oldtime = me->idle_wait;
887
me->idle_wait = timeout;
892
APU_DECLARE(apr_size_t) apr_thread_pool_thread_max_get(apr_thread_pool_t *me)
898
* This function stop extra working threads to the new limit.
899
* NOTE: There could be busy threads become idle during this function
901
APU_DECLARE(apr_size_t) apr_thread_pool_thread_max_set(apr_thread_pool_t *me,
907
if (0 == cnt || me->thd_cnt <= cnt) {
911
n = me->thd_cnt - cnt;
912
if (n >= me->idle_cnt) {
913
trim_busy_threads(me, n - me->idle_cnt);
914
trim_idle_threads(me, 0);
917
trim_idle_threads(me, me->idle_cnt - n);
922
APU_DECLARE(apr_size_t) apr_thread_pool_threshold_get(apr_thread_pool_t *me)
924
return me->threshold;
927
APU_DECLARE(apr_size_t) apr_thread_pool_threshold_set(apr_thread_pool_t *me,
937
APU_DECLARE(apr_status_t) apr_thread_pool_task_owner_get(apr_thread_t *thd,
941
apr_thread_pool_task_t *task;
944
rv = apr_thread_data_get(&data, "apr_thread_pool_task", thd);
945
if (rv != APR_SUCCESS) {
955
*owner = task->owner;
959
#endif /* APR_HAS_THREADS */
961
/* vim: set ts=4 sw=4 et cin tw=80: */