2
* config_pool.c : pool of configuration objects
4
* ====================================================================
5
* Licensed to the Apache Software Foundation (ASF) under one
6
* or more contributor license agreements. See the NOTICE file
7
* distributed with this work for additional information
8
* regarding copyright ownership. The ASF licenses this file
9
* to you under the Apache License, Version 2.0 (the
10
* "License"); you may not use this file except in compliance
11
* with the License. You may obtain a copy of the License at
13
* http://www.apache.org/licenses/LICENSE-2.0
15
* Unless required by applicable law or agreed to in writing,
16
* software distributed under the License is distributed on an
17
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
18
* KIND, either express or implied. See the License for the
19
* specific language governing permissions and limitations
21
* ====================================================================
29
#include "svn_error.h"
31
#include "svn_pools.h"
33
#include "private/svn_atomic.h"
34
#include "private/svn_object_pool.h"
35
#include "private/svn_subr_private.h"
36
#include "private/svn_dep_compat.h"
40
/* A reference counting wrapper around the user-provided object.
42
typedef struct object_ref_t
44
/* reference to the parent container */
45
svn_object_pool__t *object_pool;
47
/* identifies the bucket in OBJECT_POOL->OBJECTS in which this entry
51
/* User provided object. Usually a wrapper. */
54
/* private pool. This instance and its other members got allocated in it.
55
* Will be destroyed when this instance is cleaned up. */
58
/* Number of references to this data struct */
59
volatile svn_atomic_t ref_count;
63
/* Core data structure. All access to it must be serialized using MUTEX.
65
struct svn_object_pool__t
67
/* serialization object for all non-atomic data in this struct */
70
/* object_ref_t.KEY -> object_ref_t* mapping.
72
* In shared object mode, there is at most one such entry per key and it
73
* may or may not be in use. In exclusive mode, only unused references
74
* will be put here and they form chains if there are multiple unused
75
* instances for the key. */
78
/* same as objects->count but allows for non-sync'ed access */
79
volatile svn_atomic_t object_count;
81
/* Number of entries in OBJECTS with a reference count 0.
82
Due to races, this may be *temporarily* off by one or more.
83
Hence we must not strictly depend on it. */
84
volatile svn_atomic_t unused_count;
86
/* the root pool owning this structure */
89
/* extractor and updater for the user object wrappers */
90
svn_object_pool__getter_t getter;
91
svn_object_pool__setter_t setter;
95
/* Pool cleanup function for the whole object pool.
98
object_pool_cleanup(void *baton)
100
svn_object_pool__t *object_pool = baton;
102
/* all entries must have been released up by now */
103
SVN_ERR_ASSERT_NO_RETURN( object_pool->object_count
104
== object_pool->unused_count);
109
/* Remove entries from OBJECTS in OBJECT_POOL that have a ref-count of 0.
111
* Requires external serialization on OBJECT_POOL.
114
remove_unused_objects(svn_object_pool__t *object_pool)
116
apr_pool_t *subpool = svn_pool_create(object_pool->pool);
118
/* process all hash buckets */
119
apr_hash_index_t *hi;
120
for (hi = apr_hash_first(subpool, object_pool->objects);
122
hi = apr_hash_next(hi))
124
object_ref_t *object_ref = apr_hash_this_val(hi);
126
/* note that we won't hand out new references while access
127
to the hash is serialized */
128
if (svn_atomic_read(&object_ref->ref_count) == 0)
130
apr_hash_set(object_pool->objects, object_ref->key.data,
131
object_ref->key.size, NULL);
132
svn_atomic_dec(&object_pool->object_count);
133
svn_atomic_dec(&object_pool->unused_count);
135
svn_pool_destroy(object_ref->pool);
139
svn_pool_destroy(subpool);
142
/* Cleanup function called when an object_ref_t gets released.
145
object_ref_cleanup(void *baton)
147
object_ref_t *object = baton;
148
svn_object_pool__t *object_pool = object->object_pool;
150
/* If we released the last reference to object, there is one more
153
Note that unused_count does not need to be always exact but only
154
needs to become exact *eventually* (we use it to check whether we
155
should remove unused objects every now and then). I.e. it must
156
never drift off / get stuck but always reflect the true value once
157
all threads left the racy sections.
159
if (svn_atomic_dec(&object->ref_count) == 0)
160
svn_atomic_inc(&object_pool->unused_count);
165
/* Handle reference counting for the OBJECT_REF that the caller is about
166
* to return. The reference will be released when POOL gets cleaned up.
168
* Requires external serialization on OBJECT_REF->OBJECT_POOL.
171
add_object_ref(object_ref_t *object_ref,
174
/* Update ref counter.
175
Note that this is racy with object_ref_cleanup; see comment there. */
176
if (svn_atomic_inc(&object_ref->ref_count) == 0)
177
svn_atomic_dec(&object_ref->object_pool->unused_count);
179
/* make sure the reference gets released automatically */
180
apr_pool_cleanup_register(pool, object_ref, object_ref_cleanup,
181
apr_pool_cleanup_null);
184
/* Actual implementation of svn_object_pool__lookup.
186
* Requires external serialization on OBJECT_POOL.
189
lookup(void **object,
190
svn_object_pool__t *object_pool,
193
apr_pool_t *result_pool)
195
object_ref_t *object_ref
196
= apr_hash_get(object_pool->objects, key->data, key->size);
200
*object = object_pool->getter(object_ref->wrapper, baton, result_pool);
201
add_object_ref(object_ref, result_pool);
211
/* Actual implementation of svn_object_pool__insert.
213
* Requires external serialization on OBJECT_POOL.
216
insert(void **object,
217
svn_object_pool__t *object_pool,
218
const svn_membuf_t *key,
221
apr_pool_t *wrapper_pool,
222
apr_pool_t *result_pool)
224
object_ref_t *object_ref
225
= apr_hash_get(object_pool->objects, key->data, key->size);
228
/* entry already exists (e.g. race condition) */
229
svn_error_t *err = object_pool->setter(&object_ref->wrapper,
234
/* if we had an issue in the setter, then OBJECT_REF is in an
235
* unknown state now. Keep it around for the current users
236
* (i.e. don't clean the pool) but remove it from the list of
239
apr_hash_set(object_pool->objects, key->data, key->size, NULL);
240
svn_atomic_dec(&object_pool->object_count);
242
/* for the unlikely case that the object got created _and_
243
* already released since we last checked: */
244
if (svn_atomic_read(&object_ref->ref_count) == 0)
245
svn_atomic_dec(&object_pool->unused_count);
247
/* cleanup the new data as well because it's not safe to use
250
svn_pool_destroy(wrapper_pool);
252
/* propagate error */
253
return svn_error_trace(err);
256
/* Destroy the new one and return a reference to the existing one
257
* because the existing one may already have references on it.
259
svn_pool_destroy(wrapper_pool);
263
/* add new index entry */
264
object_ref = apr_pcalloc(wrapper_pool, sizeof(*object_ref));
265
object_ref->object_pool = object_pool;
266
object_ref->wrapper = wrapper;
267
object_ref->pool = wrapper_pool;
269
svn_membuf__create(&object_ref->key, key->size, wrapper_pool);
270
object_ref->key.size = key->size;
271
memcpy(object_ref->key.data, key->data, key->size);
273
apr_hash_set(object_pool->objects, object_ref->key.data,
274
object_ref->key.size, object_ref);
275
svn_atomic_inc(&object_pool->object_count);
277
/* the new entry is *not* in use yet.
278
* add_object_ref will update counters again.
280
svn_atomic_inc(&object_ref->object_pool->unused_count);
283
/* return a reference to the object we just added */
284
*object = object_pool->getter(object_ref->wrapper, baton, result_pool);
285
add_object_ref(object_ref, result_pool);
287
/* limit memory usage */
288
if (svn_atomic_read(&object_pool->unused_count) * 2
289
> apr_hash_count(object_pool->objects) + 2)
290
remove_unused_objects(object_pool);
295
/* Implement svn_object_pool__getter_t as no-op.
298
default_getter(void *object,
305
/* Implement svn_object_pool__setter_t as no-op.
308
default_setter(void **target,
317
/* API implementation */
320
svn_object_pool__create(svn_object_pool__t **object_pool,
321
svn_object_pool__getter_t getter,
322
svn_object_pool__setter_t setter,
323
svn_boolean_t thread_safe,
326
svn_object_pool__t *result;
328
/* construct the object pool in our private ROOT_POOL to survive POOL
329
* cleanup and to prevent threading issues with the allocator
331
result = apr_pcalloc(pool, sizeof(*result));
332
SVN_ERR(svn_mutex__init(&result->mutex, thread_safe, pool));
335
result->objects = svn_hash__make(result->pool);
336
result->getter = getter ? getter : default_getter;
337
result->setter = setter ? setter : default_setter;
339
/* make sure we clean up nicely.
340
* We need two cleanup functions of which exactly one will be run
341
* (disabling the respective other as the first step). If the owning
342
* pool does not cleaned up / destroyed explicitly, it may live longer
343
* than our allocator. So, we need do act upon cleanup requests from
344
* either side - owning_pool and root_pool.
346
apr_pool_cleanup_register(pool, result, object_pool_cleanup,
347
apr_pool_cleanup_null);
349
*object_pool = result;
354
svn_object_pool__new_wrapper_pool(svn_object_pool__t *object_pool)
356
return svn_pool_create(object_pool->pool);
360
svn_object_pool__mutex(svn_object_pool__t *object_pool)
362
return object_pool->mutex;
366
svn_object_pool__count(svn_object_pool__t *object_pool)
368
return svn_atomic_read(&object_pool->object_count);
372
svn_object_pool__lookup(void **object,
373
svn_object_pool__t *object_pool,
376
apr_pool_t *result_pool)
379
SVN_MUTEX__WITH_LOCK(object_pool->mutex,
380
lookup(object, object_pool, key, baton, result_pool));
385
svn_object_pool__insert(void **object,
386
svn_object_pool__t *object_pool,
387
const svn_membuf_t *key,
390
apr_pool_t *wrapper_pool,
391
apr_pool_t *result_pool)
394
SVN_MUTEX__WITH_LOCK(object_pool->mutex,
395
insert(object, object_pool, key, wrapper, baton,
396
wrapper_pool, result_pool));