1
/* Licensed to the Apache Software Foundation (ASF) under one or more
2
* contributor license agreements. See the NOTICE file distributed with
3
* this work for additional information regarding copyright ownership.
4
* The ASF licenses this file to You under the Apache License, Version 2.0
5
* (the "License"); you may not use this file except in compliance with
6
* the License. You may obtain a copy of the License at
8
* http://www.apache.org/licenses/LICENSE-2.0
10
* Unless required by applicable law or agreed to in writing, software
11
* distributed under the License is distributed on an "AS IS" BASIS,
12
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
* See the License for the specific language governing permissions and
14
* limitations under the License.
18
#include "apr_atomic.h"
20
typedef struct recycled_pool
23
struct recycled_pool *next;
26
struct fd_queue_info_t
28
apr_int32_t idlers; /**
29
* 0 or positive: number of idle worker threads
30
* negative: number of threads blocked waiting
33
apr_thread_mutex_t *idlers_mutex;
34
apr_thread_cond_t *wait_for_idler;
37
recycled_pool *recycled_pools;
40
static apr_status_t queue_info_cleanup(void *data_)
42
fd_queue_info_t *qi = data_;
43
apr_thread_cond_destroy(qi->wait_for_idler);
44
apr_thread_mutex_destroy(qi->idlers_mutex);
46
/* Clean up any pools in the recycled list */
48
struct recycled_pool *first_pool = qi->recycled_pools;
49
if (first_pool == NULL) {
53
((volatile void **) &(qi->recycled_pools), first_pool->next,
54
first_pool) == first_pool) {
55
apr_pool_destroy(first_pool->pool);
62
apr_status_t ap_queue_info_create(fd_queue_info_t ** queue_info,
63
apr_pool_t * pool, int max_idlers)
68
qi = apr_palloc(pool, sizeof(*qi));
69
memset(qi, 0, sizeof(*qi));
71
rv = apr_thread_mutex_create(&qi->idlers_mutex, APR_THREAD_MUTEX_DEFAULT,
73
if (rv != APR_SUCCESS) {
76
rv = apr_thread_cond_create(&qi->wait_for_idler, pool);
77
if (rv != APR_SUCCESS) {
80
qi->recycled_pools = NULL;
81
qi->max_idlers = max_idlers;
82
apr_pool_cleanup_register(pool, qi, queue_info_cleanup,
83
apr_pool_cleanup_null);
90
apr_status_t ap_queue_info_set_idle(fd_queue_info_t * queue_info,
91
apr_pool_t * pool_to_recycle)
96
ap_push_pool(queue_info, pool_to_recycle);
98
/* Atomically increment the count of idle workers */
99
prev_idlers = apr_atomic_inc32(&(queue_info->idlers));
101
/* If other threads are waiting on a worker, wake one up */
102
if (prev_idlers < 0) {
103
rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
104
if (rv != APR_SUCCESS) {
108
rv = apr_thread_cond_signal(queue_info->wait_for_idler);
109
if (rv != APR_SUCCESS) {
110
apr_thread_mutex_unlock(queue_info->idlers_mutex);
113
rv = apr_thread_mutex_unlock(queue_info->idlers_mutex);
114
if (rv != APR_SUCCESS) {
122
apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t * queue_info)
127
/* Atomically decrement the idle worker count, saving the old value */
128
prev_idlers = apr_atomic_add32(&(queue_info->idlers), -1);
130
/* Block if there weren't any idle workers */
131
if (prev_idlers <= 0) {
132
rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
133
if (rv != APR_SUCCESS) {
135
apr_atomic_inc32(&(queue_info->idlers)); /* back out dec */
138
/* Re-check the idle worker count to guard against a
139
* race condition. Now that we're in the mutex-protected
140
* region, one of two things may have happened:
141
* - If the idle worker count is still negative, the
142
* workers are all still busy, so it's safe to
143
* block on a condition variable.
144
* - If the idle worker count is non-negative, then a
145
* worker has become idle since the first check
146
* of queue_info->idlers above. It's possible
147
* that the worker has also signaled the condition
148
* variable--and if so, the listener missed it
149
* because it wasn't yet blocked on the condition
150
* variable. But if the idle worker count is
151
* now non-negative, it's safe for this function to
152
* return immediately.
154
* A negative value in queue_info->idlers tells how many
155
* threads are waiting on an idle worker.
157
if (queue_info->idlers < 0) {
158
rv = apr_thread_cond_wait(queue_info->wait_for_idler,
159
queue_info->idlers_mutex);
160
if (rv != APR_SUCCESS) {
163
rv2 = apr_thread_mutex_unlock(queue_info->idlers_mutex);
164
if (rv2 != APR_SUCCESS) {
170
rv = apr_thread_mutex_unlock(queue_info->idlers_mutex);
171
if (rv != APR_SUCCESS) {
176
if (queue_info->terminated) {
185
void ap_push_pool(fd_queue_info_t * queue_info,
186
apr_pool_t * pool_to_recycle)
188
/* If we have been given a pool to recycle, atomically link
189
* it into the queue_info's list of recycled pools
191
if (pool_to_recycle) {
192
struct recycled_pool *new_recycle;
193
new_recycle = (struct recycled_pool *) apr_palloc(pool_to_recycle,
196
new_recycle->pool = pool_to_recycle;
198
new_recycle->next = queue_info->recycled_pools;
199
if (apr_atomic_casptr
200
((volatile void **) &(queue_info->recycled_pools),
201
new_recycle, new_recycle->next) == new_recycle->next) {
208
void ap_pop_pool(apr_pool_t ** recycled_pool, fd_queue_info_t * queue_info)
210
/* Atomically pop a pool from the recycled list */
212
/* This function is safe only as long as it is single threaded because
213
* it reaches into the queue and accesses "next" which can change.
214
* We are OK today because it is only called from the listener thread.
215
* cas-based pushes do not have the same limitation - any number can
216
* happen concurrently with a single cas-based pop.
219
*recycled_pool = NULL;
222
/* Atomically pop a pool from the recycled list */
224
struct recycled_pool *first_pool = queue_info->recycled_pools;
225
if (first_pool == NULL) {
228
if (apr_atomic_casptr
229
((volatile void **) &(queue_info->recycled_pools),
230
first_pool->next, first_pool) == first_pool) {
231
*recycled_pool = first_pool->pool;
237
apr_status_t ap_queue_info_term(fd_queue_info_t * queue_info)
240
rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
241
if (rv != APR_SUCCESS) {
244
queue_info->terminated = 1;
245
apr_thread_cond_broadcast(queue_info->wait_for_idler);
246
return apr_thread_mutex_unlock(queue_info->idlers_mutex);
250
* Detects when the fd_queue_t is full. This utility function is expected
251
* to be called from within critical sections, and is not threadsafe.
253
#define ap_queue_full(queue) ((queue)->nelts == (queue)->bounds)
256
* Detects when the fd_queue_t is empty. This utility function is expected
257
* to be called from within critical sections, and is not threadsafe.
259
#define ap_queue_empty(queue) ((queue)->nelts == 0)
262
* Callback routine that is called to destroy this
263
* fd_queue_t when its pool is destroyed.
265
static apr_status_t ap_queue_destroy(void *data)
267
fd_queue_t *queue = data;
269
/* Ignore errors here, we can't do anything about them anyway.
270
* XXX: We should at least try to signal an error here, it is
271
* indicative of a programmer error. -aaron */
272
apr_thread_cond_destroy(queue->not_empty);
273
apr_thread_mutex_destroy(queue->one_big_mutex);
279
* Initialize the fd_queue_t.
281
apr_status_t ap_queue_init(fd_queue_t * queue, int queue_capacity,
287
if ((rv = apr_thread_mutex_create(&queue->one_big_mutex,
288
APR_THREAD_MUTEX_DEFAULT,
289
a)) != APR_SUCCESS) {
292
if ((rv = apr_thread_cond_create(&queue->not_empty, a)) != APR_SUCCESS) {
296
queue->data = apr_palloc(a, queue_capacity * sizeof(fd_queue_elem_t));
297
queue->bounds = queue_capacity;
300
/* Set all the sockets in the queue to NULL */
301
for (i = 0; i < queue_capacity; ++i)
302
queue->data[i].sd = NULL;
304
apr_pool_cleanup_register(a, queue, ap_queue_destroy,
305
apr_pool_cleanup_null);
311
* Push a new socket onto the queue.
313
* precondition: ap_queue_info_wait_for_idler has already been called
314
* to reserve an idle worker thread
316
apr_status_t ap_queue_push(fd_queue_t * queue, apr_socket_t * sd,
317
conn_state_t * cs, apr_pool_t * p)
319
fd_queue_elem_t *elem;
322
if ((rv = apr_thread_mutex_lock(queue->one_big_mutex)) != APR_SUCCESS) {
326
AP_DEBUG_ASSERT(!queue->terminated);
327
AP_DEBUG_ASSERT(!ap_queue_full(queue));
329
elem = &queue->data[queue->nelts];
335
apr_thread_cond_signal(queue->not_empty);
337
if ((rv = apr_thread_mutex_unlock(queue->one_big_mutex)) != APR_SUCCESS) {
345
* Retrieves the next available socket from the queue. If there are no
346
* sockets available, it will block until one becomes available.
347
* Once retrieved, the socket is placed into the address specified by
350
apr_status_t ap_queue_pop(fd_queue_t * queue, apr_socket_t ** sd,
351
conn_state_t ** cs, apr_pool_t ** p)
353
fd_queue_elem_t *elem;
356
if ((rv = apr_thread_mutex_lock(queue->one_big_mutex)) != APR_SUCCESS) {
360
/* Keep waiting until we wake up and find that the queue is not empty. */
361
if (ap_queue_empty(queue)) {
362
if (!queue->terminated) {
363
apr_thread_cond_wait(queue->not_empty, queue->one_big_mutex);
365
/* If we wake up and it's still empty, then we were interrupted */
366
if (ap_queue_empty(queue)) {
367
rv = apr_thread_mutex_unlock(queue->one_big_mutex);
368
if (rv != APR_SUCCESS) {
371
if (queue->terminated) {
372
return APR_EOF; /* no more elements ever again */
380
elem = &queue->data[--queue->nelts];
387
#endif /* AP_DEBUG */
389
rv = apr_thread_mutex_unlock(queue->one_big_mutex);
393
apr_status_t ap_queue_interrupt_all(fd_queue_t * queue)
397
if ((rv = apr_thread_mutex_lock(queue->one_big_mutex)) != APR_SUCCESS) {
400
apr_thread_cond_broadcast(queue->not_empty);
401
return apr_thread_mutex_unlock(queue->one_big_mutex);
404
apr_status_t ap_queue_term(fd_queue_t * queue)
408
if ((rv = apr_thread_mutex_lock(queue->one_big_mutex)) != APR_SUCCESS) {
411
/* we must hold one_big_mutex when setting this... otherwise,
412
* we could end up setting it and waking everybody up just after a
413
* would-be popper checks it but right before they block
415
queue->terminated = 1;
416
if ((rv = apr_thread_mutex_unlock(queue->one_big_mutex)) != APR_SUCCESS) {
419
return ap_queue_interrupt_all(queue);