1
/* Licensed to the Apache Software Foundation (ASF) under one or more
2
* contributor license agreements. See the NOTICE file distributed with
3
* this work for additional information regarding copyright ownership.
4
* The ASF licenses this file to You under the Apache License, Version 2.0
5
* (the "License"); you may not use this file except in compliance with
6
* the License. You may obtain a copy of the License at
8
* http://www.apache.org/licenses/LICENSE-2.0
10
* Unless required by applicable law or agreed to in writing, software
11
* distributed under the License is distributed on an "AS IS" BASIS,
12
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
* See the License for the specific language governing permissions and
14
* limitations under the License.
18
#include "apr_atomic.h"
20
typedef struct recycled_pool {
22
struct recycled_pool *next;
25
struct fd_queue_info_t {
27
apr_thread_mutex_t *idlers_mutex;
28
apr_thread_cond_t *wait_for_idler;
31
recycled_pool *recycled_pools;
34
static apr_status_t queue_info_cleanup(void *data_)
36
fd_queue_info_t *qi = data_;
37
apr_thread_cond_destroy(qi->wait_for_idler);
38
apr_thread_mutex_destroy(qi->idlers_mutex);
40
/* Clean up any pools in the recycled list */
42
struct recycled_pool *first_pool = qi->recycled_pools;
43
if (first_pool == NULL) {
46
if (apr_atomic_casptr((volatile void**)&(qi->recycled_pools), first_pool->next,
47
first_pool) == first_pool) {
48
apr_pool_destroy(first_pool->pool);
55
apr_status_t ap_queue_info_create(fd_queue_info_t **queue_info,
56
apr_pool_t *pool, int max_idlers)
61
qi = apr_palloc(pool, sizeof(*qi));
62
memset(qi, 0, sizeof(*qi));
64
rv = apr_thread_mutex_create(&qi->idlers_mutex, APR_THREAD_MUTEX_DEFAULT,
66
if (rv != APR_SUCCESS) {
69
rv = apr_thread_cond_create(&qi->wait_for_idler, pool);
70
if (rv != APR_SUCCESS) {
73
qi->recycled_pools = NULL;
74
qi->max_idlers = max_idlers;
75
apr_pool_cleanup_register(pool, qi, queue_info_cleanup,
76
apr_pool_cleanup_null);
83
apr_status_t ap_queue_info_set_idle(fd_queue_info_t *queue_info,
84
apr_pool_t *pool_to_recycle)
89
/* If we have been given a pool to recycle, atomically link
90
* it into the queue_info's list of recycled pools
92
if (pool_to_recycle) {
93
struct recycled_pool *new_recycle;
94
new_recycle = (struct recycled_pool *)apr_palloc(pool_to_recycle,
95
sizeof(*new_recycle));
96
new_recycle->pool = pool_to_recycle;
98
new_recycle->next = queue_info->recycled_pools;
99
if (apr_atomic_casptr((volatile void**)&(queue_info->recycled_pools),
100
new_recycle, new_recycle->next) ==
107
/* Atomically increment the count of idle workers */
109
prev_idlers = queue_info->idlers;
110
if (apr_atomic_cas32(&(queue_info->idlers), prev_idlers + 1,
111
prev_idlers) == prev_idlers) {
116
/* If this thread just made the idle worker count nonzero,
117
* wake up the listener. */
118
if (prev_idlers == 0) {
119
rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
120
if (rv != APR_SUCCESS) {
123
rv = apr_thread_cond_signal(queue_info->wait_for_idler);
124
if (rv != APR_SUCCESS) {
125
apr_thread_mutex_unlock(queue_info->idlers_mutex);
128
rv = apr_thread_mutex_unlock(queue_info->idlers_mutex);
129
if (rv != APR_SUCCESS) {
137
apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t *queue_info,
138
apr_pool_t **recycled_pool)
142
*recycled_pool = NULL;
144
/* Block if the count of idle workers is zero */
145
if (queue_info->idlers == 0) {
146
rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
147
if (rv != APR_SUCCESS) {
150
/* Re-check the idle worker count to guard against a
151
* race condition. Now that we're in the mutex-protected
152
* region, one of two things may have happened:
153
* - If the idle worker count is still zero, the
154
* workers are all still busy, so it's safe to
155
* block on a condition variable.
156
* - If the idle worker count is nonzero, then a
157
* worker has become idle since the first check
158
* of queue_info->idlers above. It's possible
159
* that the worker has also signaled the condition
160
* variable--and if so, the listener missed it
161
* because it wasn't yet blocked on the condition
162
* variable. But if the idle worker count is
163
* now nonzero, it's safe for this function to
164
* return immediately.
166
if (queue_info->idlers == 0) {
167
rv = apr_thread_cond_wait(queue_info->wait_for_idler,
168
queue_info->idlers_mutex);
169
if (rv != APR_SUCCESS) {
171
rv2 = apr_thread_mutex_unlock(queue_info->idlers_mutex);
172
if (rv2 != APR_SUCCESS) {
178
rv = apr_thread_mutex_unlock(queue_info->idlers_mutex);
179
if (rv != APR_SUCCESS) {
184
/* Atomically decrement the idle worker count */
185
apr_atomic_dec32(&(queue_info->idlers));
187
/* Atomically pop a pool from the recycled list */
189
struct recycled_pool *first_pool = queue_info->recycled_pools;
190
if (first_pool == NULL) {
193
if (apr_atomic_casptr((volatile void**)&(queue_info->recycled_pools), first_pool->next,
194
first_pool) == first_pool) {
195
*recycled_pool = first_pool->pool;
200
if (queue_info->terminated) {
208
apr_status_t ap_queue_info_term(fd_queue_info_t *queue_info)
211
rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
212
if (rv != APR_SUCCESS) {
215
queue_info->terminated = 1;
216
apr_thread_cond_broadcast(queue_info->wait_for_idler);
217
return apr_thread_mutex_unlock(queue_info->idlers_mutex);
221
* Detects when the fd_queue_t is full. This utility function is expected
222
* to be called from within critical sections, and is not threadsafe.
224
#define ap_queue_full(queue) ((queue)->nelts == (queue)->bounds)
227
* Detects when the fd_queue_t is empty. This utility function is expected
228
* to be called from within critical sections, and is not threadsafe.
230
#define ap_queue_empty(queue) ((queue)->nelts == 0)
233
* Callback routine that is called to destroy this
234
* fd_queue_t when its pool is destroyed.
236
static apr_status_t ap_queue_destroy(void *data)
238
fd_queue_t *queue = data;
240
/* Ignore errors here, we can't do anything about them anyway.
241
* XXX: We should at least try to signal an error here, it is
242
* indicative of a programmer error. -aaron */
243
apr_thread_cond_destroy(queue->not_empty);
244
apr_thread_mutex_destroy(queue->one_big_mutex);
250
* Initialize the fd_queue_t.
252
apr_status_t ap_queue_init(fd_queue_t *queue, int queue_capacity, apr_pool_t *a)
257
if ((rv = apr_thread_mutex_create(&queue->one_big_mutex,
258
APR_THREAD_MUTEX_DEFAULT, a)) != APR_SUCCESS) {
261
if ((rv = apr_thread_cond_create(&queue->not_empty, a)) != APR_SUCCESS) {
265
queue->data = apr_palloc(a, queue_capacity * sizeof(fd_queue_elem_t));
266
queue->bounds = queue_capacity;
269
/* Set all the sockets in the queue to NULL */
270
for (i = 0; i < queue_capacity; ++i)
271
queue->data[i].sd = NULL;
273
apr_pool_cleanup_register(a, queue, ap_queue_destroy, apr_pool_cleanup_null);
279
* Push a new socket onto the queue.
281
* precondition: ap_queue_info_wait_for_idler has already been called
282
* to reserve an idle worker thread
284
apr_status_t ap_queue_push(fd_queue_t *queue, apr_socket_t *sd, apr_pool_t *p)
286
fd_queue_elem_t *elem;
289
if ((rv = apr_thread_mutex_lock(queue->one_big_mutex)) != APR_SUCCESS) {
293
AP_DEBUG_ASSERT(!queue->terminated);
294
AP_DEBUG_ASSERT(!ap_queue_full(queue));
296
elem = &queue->data[queue->nelts];
301
apr_thread_cond_signal(queue->not_empty);
303
if ((rv = apr_thread_mutex_unlock(queue->one_big_mutex)) != APR_SUCCESS) {
311
* Retrieves the next available socket from the queue. If there are no
312
* sockets available, it will block until one becomes available.
313
* Once retrieved, the socket is placed into the address specified by
316
apr_status_t ap_queue_pop(fd_queue_t *queue, apr_socket_t **sd, apr_pool_t **p)
318
fd_queue_elem_t *elem;
321
if ((rv = apr_thread_mutex_lock(queue->one_big_mutex)) != APR_SUCCESS) {
325
/* Keep waiting until we wake up and find that the queue is not empty. */
326
if (ap_queue_empty(queue)) {
327
if (!queue->terminated) {
328
apr_thread_cond_wait(queue->not_empty, queue->one_big_mutex);
330
/* If we wake up and it's still empty, then we were interrupted */
331
if (ap_queue_empty(queue)) {
332
rv = apr_thread_mutex_unlock(queue->one_big_mutex);
333
if (rv != APR_SUCCESS) {
336
if (queue->terminated) {
337
return APR_EOF; /* no more elements ever again */
345
elem = &queue->data[--queue->nelts];
351
#endif /* AP_DEBUG */
353
rv = apr_thread_mutex_unlock(queue->one_big_mutex);
357
apr_status_t ap_queue_interrupt_all(fd_queue_t *queue)
361
if ((rv = apr_thread_mutex_lock(queue->one_big_mutex)) != APR_SUCCESS) {
364
apr_thread_cond_broadcast(queue->not_empty);
365
return apr_thread_mutex_unlock(queue->one_big_mutex);
368
apr_status_t ap_queue_term(fd_queue_t *queue)
372
if ((rv = apr_thread_mutex_lock(queue->one_big_mutex)) != APR_SUCCESS) {
375
/* we must hold one_big_mutex when setting this... otherwise,
376
* we could end up setting it and waking everybody up just after a
377
* would-be popper checks it but right before they block
379
queue->terminated = 1;
380
if ((rv = apr_thread_mutex_unlock(queue->one_big_mutex)) != APR_SUCCESS) {
383
return ap_queue_interrupt_all(queue);