27
27
#include "ssl_private.h"
30
30
* This shared memory based SSL session cache implementation was
31
31
* originally written by Geoff Thorpe <geoff geoffthorpe.net> for C2Net
32
32
* Europe as a contribution to Ralf Engelschall's mod_ssl project.
36
* The shared-memory segment header can be cast to and from the
37
* SHMCBHeader type, all other structures need to be initialised by
40
* The "header" looks like this;
42
* data applying to the overall structure:
43
* - division_offset (unsigned int):
44
* how far into the shared memory segment the first division is.
45
* - division_size (unsigned int):
46
* how many bytes each division occupies.
47
* (NB: This includes the queue and the cache)
48
* - division_mask (unsigned char):
49
* the "mask" in the next line. Add one to this,
50
* and that's the number of divisions.
52
* data applying to within each division:
53
* - queue_size (unsigned int):
54
* how big each "queue" is. NB: The queue is the first block in each
55
* division and is followed immediately by the cache itself so so
56
* there's no cache_offset value.
58
* data applying to within each queue:
59
* - index_num (unsigned char):
60
* how many indexes in each cache's queue
61
* - index_offset (unsigned char):
62
* how far into the queue the first index is.
64
* how big each index is.
66
* data applying to within each cache:
67
* - cache_data_offset (unsigned int):
68
* how far into the cache the session-data array is stored.
69
* - cache_data_size (unsigned int):
70
* how big each cache's data block is.
72
* statistics data (this will eventually be per-division but right now
73
* there's only one mutex):
74
* - stores (unsigned long):
75
* how many stores have been performed in the cache.
76
* - expiries (unsigned long):
77
* how many session have been expired from the cache.
78
* - scrolled (unsigned long):
79
* how many sessions have been scrolled out of full cache during a
80
* "store" operation. This is different to the "removes" stats as
81
* they are requested by mod_ssl/Apache, these are done because of
82
* cache logistics. (NB: Also, this value should be deducible from
83
* the others if my code has no bugs, but I count it anyway - plus
84
* it helps debugging :-).
85
* - retrieves_hit (unsigned long):
86
* how many session-retrieves have succeeded.
87
* - retrieves_miss (unsigned long):
88
* how many session-retrieves have failed.
89
* - removes_hit (unsigned long):
90
* - removes_miss (unsigned long):
92
* Following immediately after the header is an array of "divisions".
93
* Each division is simply a "queue" immediately followed by its
94
* corresponding "cache". Each division handles some pre-defined band
95
* of sessions by using the "division_mask" in the header. Eg. if
96
* division_mask=0x1f then there are 32 divisions, the first of which
97
* will store sessions whose least-significant 5 bits are 0, the second
98
* stores session whose LS 5 bits equal 1, etc. A queue is an indexing
99
* structure referring to its corresponding cache.
101
* A "queue" looks like this;
103
* - first_pos (unsigned int):
104
* the location within the array of indexes where the virtual
105
* "left-hand-edge" of the cyclic buffer is.
106
* - pos_count (unsigned int):
107
* the number of indexes occupied from first_pos onwards.
109
* ...followed by an array of indexes, each of which can be
110
* memcpy'd to and from an SHMCBIndex, and look like this;
112
* - expires (time_t):
113
* the time() value at which this session expires.
114
* - offset (unsigned int):
115
* the offset within the cache data block where the corresponding
117
* - s_id2 (unsigned char):
118
* the second byte of the session_id, stored as an optimisation to
119
* reduce the number of d2i_SSL_SESSION calls that are made when doing
121
* - removed (unsigned char):
122
* a byte used to indicate whether a session has been "passively"
123
* removed. Ie. it is still in the cache but is to be disregarded by
124
* any "retrieve" operation.
126
* A "cache" looks like this;
128
* - first_pos (unsigned int):
129
* the location within the data block where the virtual
130
* "left-hand-edge" of the cyclic buffer is.
131
* - pos_count (unsigned int):
132
* the number of bytes used in the data block from first_pos onwards.
134
* ...followed by the data block in which actual DER-encoded SSL
135
* sessions are stored.
139
* Header - can be memcpy'd to and from the front of the shared
140
* memory segment. NB: The first copy (commented out) has the
141
* elements in a meaningful order, but due to data-alignment
142
* braindeadness, the second (uncommented) copy has the types grouped
143
* so as to decrease "struct-bloat". sigh.
34
* Since rewritten by GT to not use alignment-fudging memcpys and reduce
39
* Header structure - the start of the shared-mem segment
146
unsigned long num_stores;
147
unsigned long num_expiries;
148
unsigned long num_scrolled;
149
unsigned long num_retrieves_hit;
150
unsigned long num_retrieves_miss;
151
unsigned long num_removes_hit;
152
unsigned long num_removes_miss;
153
unsigned int division_offset;
154
unsigned int division_size;
155
unsigned int queue_size;
156
unsigned int cache_data_offset;
157
unsigned int cache_data_size;
158
unsigned char division_mask;
42
/* Stats for cache operations */
43
unsigned long stat_stores;
44
unsigned long stat_expiries;
45
unsigned long stat_scrolled;
46
unsigned long stat_retrieves_hit;
47
unsigned long stat_retrieves_miss;
48
unsigned long stat_removes_hit;
49
unsigned long stat_removes_miss;
50
/* Number of subcaches */
51
unsigned int subcache_num;
52
/* How many indexes each subcache's queue has */
159
53
unsigned int index_num;
160
unsigned int index_offset;
161
unsigned int index_size;
54
/* How large each subcache is, including the queue and data */
55
unsigned int subcache_size;
56
/* How far into each subcache the data area is (optimisation) */
57
unsigned int subcache_data_offset;
58
/* How large the data area in each subcache is (optimisation) */
59
unsigned int subcache_data_size;
165
* Index - can be memcpy'd to and from an index inside each
166
* queue's index array.
63
* Subcache structure - the start of each subcache, followed by
67
/* The start position and length of the cyclic buffer of indexes */
68
unsigned int idx_pos, idx_used;
69
/* Same for the data area */
70
unsigned int data_pos, data_used;
74
* Index structure - each subcache has an array of these
77
/* absolute time this entry expires */
79
/* location within the subcache's data area */
80
unsigned int data_pos;
81
/* size (most logic ignores this, we keep it only to minimise memcpy) */
82
unsigned int data_used;
83
/* Optimisation to prevent ASN decoding unless a match is likely */
171
84
unsigned char s_id2;
85
/* Used to mark explicitly-removed sessions */
172
86
unsigned char removed;
176
* Queue - must be populated by a call to shmcb_get_division
177
* and the structure's pointers are used for updating (ie.
178
* the structure doesn't need any "set" to update values).
182
unsigned int *first_pos;
183
unsigned int *pos_count;
188
* Cache - same comment as for Queue. 'Queue's are in a 1-1
189
* correspondance with 'Cache's and are usually carried round
190
* in a pair, they are only seperated for clarity.
194
unsigned int *first_pos;
195
unsigned int *pos_count;
200
* Forward function prototypes.
203
/* Functions for working around data-alignment-picky systems (sparcs,
204
Irix, etc). These use "memcpy" as a way of foxing these systems into
205
treating the composite types as byte-arrays rather than higher-level
206
primitives that it prefers to have 4-(or 8-)byte aligned. I don't
207
envisage this being a performance issue as a couple of 2 or 4 byte
208
memcpys can hardly make a dent on the massive memmove operations this
209
cache technique avoids, nor the overheads of ASN en/decoding. */
210
static unsigned int shmcb_get_safe_uint(unsigned int *);
211
static void shmcb_set_safe_uint_ex(unsigned char *, const unsigned char *);
212
#define shmcb_set_safe_uint(pdest, src) \
214
unsigned int tmp_uint = src; \
215
shmcb_set_safe_uint_ex((unsigned char *)pdest, \
216
(const unsigned char *)(&tmp_uint)); \
218
#if 0 /* Unused so far */
219
static unsigned long shmcb_get_safe_ulong(unsigned long *);
220
static void shmcb_set_safe_ulong_ex(unsigned char *, const unsigned char *);
221
#define shmcb_set_safe_ulong(pdest, src) \
223
unsigned long tmp_ulong = src; \
224
shmcb_set_safe_ulong_ex((unsigned char *)pdest, \
225
(const unsigned char *)(&tmp_ulong)); \
228
static time_t shmcb_get_safe_time(time_t *);
229
static void shmcb_set_safe_time_ex(unsigned char *, const unsigned char *);
230
#define shmcb_set_safe_time(pdest, src) \
232
time_t tmp_time = src; \
233
shmcb_set_safe_time_ex((unsigned char *)pdest, \
234
(const unsigned char *)(&tmp_time)); \
237
/* This is used to persuade the compiler from using an inline memset()
238
* which has no respect for alignment, since the size parameter is
239
* often a compile-time constant. GCC >= 4 will aggressively inline
240
* static functions, so it's marked as explicitly not-inline. */
241
#if defined(__GNUC__) && __GNUC__ > 3
242
__attribute__((__noinline__))
244
static void shmcb_safe_clear(void *ptr, size_t size)
246
memset(ptr, 0, size);
249
/* Underlying functions for session-caching */
250
static BOOL shmcb_init_memory(server_rec *, void *, unsigned int);
251
static BOOL shmcb_store_session(server_rec *, void *, UCHAR *, int, SSL_SESSION *, time_t);
252
static SSL_SESSION *shmcb_retrieve_session(server_rec *, void *, UCHAR *, int);
253
static BOOL shmcb_remove_session(server_rec *, void *, UCHAR *, int);
255
/* Utility functions for manipulating the structures */
256
static void shmcb_get_header(void *, SHMCBHeader **);
257
static BOOL shmcb_get_division(SHMCBHeader *, SHMCBQueue *, SHMCBCache *, unsigned int);
258
static SHMCBIndex *shmcb_get_index(const SHMCBQueue *, unsigned int);
259
static unsigned int shmcb_expire_division(server_rec *, SHMCBQueue *, SHMCBCache *);
260
static BOOL shmcb_insert_encoded_session(server_rec *, SHMCBQueue *, SHMCBCache *, unsigned char *, unsigned int, unsigned char *, time_t);
261
static SSL_SESSION *shmcb_lookup_session_id(server_rec *, SHMCBQueue *, SHMCBCache *, UCHAR *, unsigned int);
262
static BOOL shmcb_remove_session_id(server_rec *, SHMCBQueue *, SHMCBCache *, UCHAR *, unsigned int);
265
* Data-alignment functions (a.k.a. avoidance tactics)
267
* NB: On HPUX (and possibly others) there is a *very* mischievous little
268
* "optimisation" in the compilers where it will convert the following;
269
* memcpy(dest_ptr, &source, sizeof(unsigned int));
270
* (where dest_ptr is of type (unsigned int *) and source is (unsigned int))
272
* *dest_ptr = source; (or *dest_ptr = *(&source), not sure).
273
* Either way, it completely destroys the whole point of these _safe_
274
* functions, because the assignment operation will fall victim to the
275
* architecture's byte-alignment dictations, whereas the memcpy (as a
276
* byte-by-byte copy) should not. sigh. So, if you're wondering about the
277
* apparently unnecessary conversions to (unsigned char *) in these
278
* functions, you now have an explanation. Don't just revert them back and
279
* say "ooh look, it still works" - if you try it on HPUX (well, 32-bit
280
* HPUX 11.00 at least) you may find it fails with a SIGBUS. :-(
283
static unsigned int shmcb_get_safe_uint(unsigned int *ptr)
286
shmcb_set_safe_uint_ex((unsigned char *)(&ret),
287
(const unsigned char *)ptr);
291
static void shmcb_set_safe_uint_ex(unsigned char *dest,
292
const unsigned char *src)
294
memcpy(dest, src, sizeof(unsigned int));
297
#if 0 /* Unused so far */
298
static unsigned long shmcb_get_safe_ulong(unsigned long *ptr)
301
shmcb_set_safe_ulong_ex((unsigned char *)(&ret),
302
(const unsigned char *)ptr);
306
static void shmcb_set_safe_ulong_ex(unsigned char *dest,
307
const unsigned char *src)
309
memcpy(dest, src, sizeof(unsigned long));
313
static time_t shmcb_get_safe_time(time_t * ptr)
316
shmcb_set_safe_time_ex((unsigned char *)(&ret),
317
(const unsigned char *)ptr);
321
static void shmcb_set_safe_time_ex(unsigned char *dest,
322
const unsigned char *src)
324
memcpy(dest, src, sizeof(time_t));
328
** High-Level "handlers" as per ssl_scache.c
90
/* The SHM data segment is of fixed size and stores data as follows.
92
* [ SHMCBHeader | Subcaches ]
94
* The SHMCBHeader header structure stores metadata concerning the
95
* cache and the contained subcaches.
97
* Subcaches is a hash table of header->subcache_num SHMCBSubcache
98
* structures. The hash table is indexed by SHMCB_MASK(id). Each
99
* SHMCBSubcache structure has a fixed size (header->subcache_size),
100
* which is determined at creation time, and looks like the following:
102
* [ SHMCBSubcache | Indexes | Data ]
104
* Each subcache is prefixed by the SHMCBSubcache structure.
106
* The subcache's "Data" segment is a single cyclic data buffer, of
107
* total size header->subcache_data_size; data inside is referenced
108
* using byte offsets. The offset marking the beginning of the cyclic
109
* buffer is subcache->data_pos the buffer's length is
110
* subcache->data_used.
112
* "Indexes" is an array of header->index_num SHMCBIndex structures,
113
* which is used as a cyclic queue; subcache->idx_pos gives the array
114
* index of the first in use, subcache->idx_used gives the number in
115
* use. Both ->idx_* values have a range of [0, header->index_num)
117
* Each in-use SHMCBIndex structure represents a single SSL session.
120
/* This macro takes a pointer to the header and a zero-based index and returns
121
* a pointer to the corresponding subcache. */
122
#define SHMCB_SUBCACHE(pHeader, num) \
123
(SHMCBSubcache *)(((unsigned char *)(pHeader)) + \
124
sizeof(SHMCBHeader) + \
125
(num) * ((pHeader)->subcache_size))
127
/* This macro takes a pointer to the header and a session id and returns a
128
* pointer to the corresponding subcache. */
129
#define SHMCB_MASK(pHeader, id) \
130
SHMCB_SUBCACHE((pHeader), *(id) & ((pHeader)->subcache_num - 1))
132
/* This macro takes the same params as the last, generating two outputs for use
133
* in ap_log_error(...). */
134
#define SHMCB_MASK_DBG(pHeader, id) \
135
*(id), (*(id) & ((pHeader)->subcache_num - 1))
137
/* This macro takes a pointer to a subcache and a zero-based index and returns
138
* a pointer to the corresponding SHMCBIndex. */
139
#define SHMCB_INDEX(pSubcache, num) \
140
((SHMCBIndex *)(((unsigned char *)pSubcache) + \
141
sizeof(SHMCBSubcache)) + num)
143
/* This macro takes a pointer to the header and a subcache and returns a
144
* pointer to the corresponding data area. */
145
#define SHMCB_DATA(pHeader, pSubcache) \
146
((unsigned char *)(pSubcache) + (pHeader)->subcache_data_offset)
149
* Cyclic functions - assists in "wrap-around"/modulo logic
152
/* Addition modulo 'mod' */
153
#define SHMCB_CYCLIC_INCREMENT(val,inc,mod) \
154
(((val) + (inc)) % (mod))
156
/* Subtraction (or "distance between") modulo 'mod' */
157
#define SHMCB_CYCLIC_SPACE(val1,val2,mod) \
158
((val2) >= (val1) ? ((val2) - (val1)) : \
159
((val2) + (mod) - (val1)))
161
/* A "normal-to-cyclic" memcpy. */
162
static void shmcb_cyclic_ntoc_memcpy(unsigned int buf_size, unsigned char *data,
163
unsigned int dest_offset, unsigned char *src,
164
unsigned int src_len)
166
if (dest_offset + src_len < buf_size)
167
/* It be copied all in one go */
168
memcpy(data + dest_offset, src, src_len);
170
/* Copy the two splits */
171
memcpy(data + dest_offset, src, buf_size - dest_offset);
172
memcpy(data, src + buf_size - dest_offset,
173
src_len + dest_offset - buf_size);
177
/* A "cyclic-to-normal" memcpy. */
178
static void shmcb_cyclic_cton_memcpy(unsigned int buf_size, unsigned char *dest,
179
unsigned char *data, unsigned int src_offset,
180
unsigned int src_len)
182
if (src_offset + src_len < buf_size)
183
/* It be copied all in one go */
184
memcpy(dest, data + src_offset, src_len);
186
/* Copy the two splits */
187
memcpy(dest, data + src_offset, buf_size - src_offset);
188
memcpy(dest + buf_size - src_offset, data,
189
src_len + src_offset - buf_size);
193
/* Prototypes for low-level subcache operations */
194
static void shmcb_subcache_expire(server_rec *, SHMCBHeader *, SHMCBSubcache *);
195
static BOOL shmcb_subcache_store(server_rec *, SHMCBHeader *, SHMCBSubcache *,
196
UCHAR *, unsigned int, UCHAR *, time_t);
197
static SSL_SESSION *shmcb_subcache_retrieve(server_rec *, SHMCBHeader *, SHMCBSubcache *,
198
UCHAR *, unsigned int);
199
static BOOL shmcb_subcache_remove(server_rec *, SHMCBHeader *, SHMCBSubcache *,
200
UCHAR *, unsigned int);
203
* High-Level "handlers" as per ssl_scache.c
204
* subcache internals are deferred to shmcb_subcache_*** functions lower down
332
207
void ssl_scache_shmcb_init(server_rec *s, apr_pool_t *p)
426
385
SSL_SESSION *ssl_scache_shmcb_retrieve(server_rec *s, UCHAR *id, int idlen)
428
387
SSLModConfigRec *mc = myModConfig(s);
429
SSL_SESSION *pSession;
388
SSL_SESSION *pSession = NULL;
389
SHMCBHeader *header = mc->tSessionCacheDataTable;
390
SHMCBSubcache *subcache = SHMCB_MASK(header, id);
432
pSession = shmcb_retrieve_session(s, mc->tSessionCacheDataTable, id, idlen);
393
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
394
"ssl_scache_shmcb_retrieve (0x%02x -> subcache %d)",
395
SHMCB_MASK_DBG(header, id));
397
ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, "unusably short session_id provided "
398
"(%u bytes)", idlen);
401
/* Get the session corresponding to the session_id or NULL if it doesn't
402
* exist (or is flagged as "removed"). */
403
pSession = shmcb_subcache_retrieve(s, header, subcache, id, idlen);
405
header->stat_retrieves_hit++;
407
header->stat_retrieves_miss++;
408
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
409
"leaving ssl_scache_shmcb_retrieve successfully");
433
411
ssl_mutex_off(s);
435
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
436
"shmcb_retrieve had a hit");
438
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
439
"shmcb_retrieve had a miss");
440
ap_log_error(APLOG_MARK, APLOG_INFO, 0, s,
441
"Client requested a 'session-resume' but "
442
"we have no such session.");
447
415
void ssl_scache_shmcb_remove(server_rec *s, UCHAR *id, int idlen)
449
417
SSLModConfigRec *mc = myModConfig(s);
418
SHMCBHeader *header = mc->tSessionCacheDataTable;
419
SHMCBSubcache *subcache = SHMCB_MASK(header, id);
452
shmcb_remove_session(s, mc->tSessionCacheDataTable, id, idlen);
422
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
423
"ssl_scache_shmcb_remove (0x%02x -> subcache %d)",
424
SHMCB_MASK_DBG(header, id));
426
ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, "unusably short session_id provided "
427
"(%u bytes)", idlen);
430
if (shmcb_subcache_remove(s, header, subcache, id, idlen))
431
header->stat_removes_hit++;
433
header->stat_removes_miss++;
434
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
435
"leaving ssl_scache_shmcb_remove successfully");
453
437
ssl_mutex_off(s);
456
440
void ssl_scache_shmcb_status(request_rec *r, int flags, apr_pool_t *p)
458
SSLModConfigRec *mc = myModConfig(r->server);
463
unsigned int loop, total, cache_total, non_empty_divisions;
442
server_rec *s = r->server;
443
SSLModConfigRec *mc = myModConfig(s);
444
void *shm_segment = apr_shm_baseaddr_get(mc->pSessionCacheDataMM);
445
SHMCBHeader *header = shm_segment;
446
unsigned int loop, total = 0, cache_total = 0, non_empty_subcaches = 0;
447
time_t idx_expiry, min_expiry = 0, max_expiry = 0, average_expiry = 0;
448
time_t now = time(NULL);
449
double expiry_total = 0;
464
450
int index_pct, cache_pct;
466
time_t average_expiry, now, max_expiry, min_expiry, idxexpiry;
468
452
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, "inside shmcb_status");
470
/* Get the header structure. */
471
shmcb_get_header(mc->tSessionCacheDataTable, &header);
472
total = cache_total = non_empty_divisions = 0;
473
average_expiry = max_expiry = min_expiry = 0;
476
/* It may seem strange to grab "now" at this point, but in theory
477
* we should never have a negative threshold but grabbing "now" after
478
* the loop (which performs expiries) could allow that chance. */
480
for (loop = 0; loop <= header->division_mask; loop++) {
481
if (shmcb_get_division(header, &queue, &cache, loop)) {
482
shmcb_expire_division(r->server, &queue, &cache);
483
total += shmcb_get_safe_uint(queue.pos_count);
484
cache_total += shmcb_get_safe_uint(cache.pos_count);
485
if (shmcb_get_safe_uint(queue.pos_count) > 0) {
486
idx = shmcb_get_index(&queue,
487
shmcb_get_safe_uint(queue.first_pos));
488
non_empty_divisions++;
489
idxexpiry = shmcb_get_safe_time(&(idx->expires));
490
expiry_total += (double) idxexpiry;
491
max_expiry = (idxexpiry > max_expiry ? idxexpiry :
494
min_expiry = idxexpiry;
496
min_expiry = (idxexpiry < min_expiry ? idxexpiry :
453
/* Perform the iteration inside the mutex to avoid corruption or invalid
454
* pointer arithmetic. The rest of our logic uses read-only header data so
455
* doesn't need the lock. */
457
/* Iterate over the subcaches */
458
for (loop = 0; loop < header->subcache_num; loop++) {
459
SHMCBSubcache *subcache = SHMCB_SUBCACHE(header, loop);
460
shmcb_subcache_expire(s, header, subcache);
461
total += subcache->idx_used;
462
cache_total += subcache->data_used;
463
if (subcache->idx_used) {
464
SHMCBIndex *idx = SHMCB_INDEX(subcache, subcache->idx_pos);
465
non_empty_subcaches++;
466
idx_expiry = idx->expires;
467
expiry_total += (double)idx_expiry;
468
max_expiry = ((idx_expiry > max_expiry) ? idx_expiry : max_expiry);
470
min_expiry = idx_expiry;
472
min_expiry = ((idx_expiry < min_expiry) ? idx_expiry : min_expiry);
501
index_pct = (100 * total) / (header->index_num * (header->division_mask + 1));
502
cache_pct = (100 * cache_total) / (header->cache_data_size * (header->division_mask + 1));
476
index_pct = (100 * total) / (header->index_num *
477
header->subcache_num);
478
cache_pct = (100 * cache_total) / (header->subcache_data_size *
479
header->subcache_num);
503
481
ap_rprintf(r, "cache type: <b>SHMCB</b>, shared memory: <b>%d</b> "
504
482
"bytes, current sessions: <b>%d</b><br>",
505
483
mc->nSessionCacheDataSize, total);
506
ap_rprintf(r, "sub-caches: <b>%d</b>, indexes per sub-cache: "
507
"<b>%d</b><br>", (int) header->division_mask + 1,
508
(int) header->index_num);
509
if (non_empty_divisions != 0) {
510
average_expiry = (time_t)(expiry_total / (double)non_empty_divisions);
484
ap_rprintf(r, "subcaches: <b>%d</b>, indexes per subcache: <b>%d</b><br>",
485
header->subcache_num, header->index_num);
486
if (non_empty_subcaches) {
487
average_expiry = (time_t)(expiry_total / (double)non_empty_subcaches);
511
488
ap_rprintf(r, "time left on oldest entries' SSL sessions: ");
512
489
if (now < average_expiry)
513
490
ap_rprintf(r, "avg: <b>%d</b> seconds, (range: %d...%d)<br>",
514
(int)(average_expiry - now), (int) (min_expiry - now),
491
(int)(average_expiry - now),
492
(int)(min_expiry - now),
515
493
(int)(max_expiry - now));
517
ap_rprintf(r, "expiry threshold: <b>Calculation Error!</b>"
495
ap_rprintf(r, "expiry_threshold: <b>Calculation error!</b><br>");
521
ap_rprintf(r, "index usage: <b>%d%%</b>, cache usage: <b>%d%%</b>"
522
"<br>", index_pct, cache_pct);
498
ap_rprintf(r, "index usage: <b>%d%%</b>, cache usage: <b>%d%%</b><br>",
499
index_pct, cache_pct);
523
500
ap_rprintf(r, "total sessions stored since starting: <b>%lu</b><br>",
501
header->stat_stores);
525
502
ap_rprintf(r, "total sessions expired since starting: <b>%lu</b><br>",
526
header->num_expiries);
527
ap_rprintf(r, "total (pre-expiry) sessions scrolled out of the "
528
"cache: <b>%lu</b><br>", header->num_scrolled);
503
header->stat_expiries);
504
ap_rprintf(r, "total (pre-expiry) sessions scrolled out of the cache: "
505
"<b>%lu</b><br>", header->stat_scrolled);
529
506
ap_rprintf(r, "total retrieves since starting: <b>%lu</b> hit, "
530
"<b>%lu</b> miss<br>", header->num_retrieves_hit,
531
header->num_retrieves_miss);
507
"<b>%lu</b> miss<br>", header->stat_retrieves_hit,
508
header->stat_retrieves_miss);
532
509
ap_rprintf(r, "total removes since starting: <b>%lu</b> hit, "
533
"<b>%lu</b> miss<br>", header->num_removes_hit,
534
header->num_removes_miss);
510
"<b>%lu</b> miss<br>", header->stat_removes_hit,
511
header->stat_removes_miss);
535
512
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, "leaving shmcb_status");
541
** Memory manipulation and low-level cache operations
545
static BOOL shmcb_init_memory(
546
server_rec *s, void *shm_mem,
547
unsigned int shm_mem_size)
552
unsigned int temp, loop, granularity;
554
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
555
"entered shmcb_init_memory()");
557
/* Calculate some sizes... */
558
temp = sizeof(SHMCBHeader);
560
/* If the segment is ridiculously too small, bail out */
561
if (shm_mem_size < (2*temp)) {
562
ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
563
"shared memory segment too small");
567
/* Make temp the amount of memory without the header */
568
temp = shm_mem_size - temp;
570
/* Work on the basis that you need 10 bytes index for each session
571
* (approx 150 bytes), which is to divide temp by 160 - and then
572
* make sure we err on having too index space to burn even when
573
* the cache is full, which is a lot less stupid than having
574
* having not enough index space to utilise the whole cache!. */
576
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
577
"for %u bytes, recommending %u indexes",
580
/* We should divide these indexes evenly amongst the queues. Try
581
* to get it so that there are roughly half the number of divisions
582
* as there are indexes in each division. */
584
while ((temp / granularity) < (2 * granularity))
587
/* So we have 'granularity' divisions, set 'temp' equal to the
588
* number of indexes in each division. */
591
/* Too small? Bail ... */
593
ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
594
"shared memory segment too small");
598
/* OK, we're sorted - from here on in, the return should be TRUE */
599
header = (SHMCBHeader *)shm_mem;
600
header->division_mask = (unsigned char)(granularity - 1);
601
header->division_offset = sizeof(SHMCBHeader);
602
header->index_num = temp;
603
header->index_offset = (2 * sizeof(unsigned int));
604
header->index_size = sizeof(SHMCBIndex);
605
header->queue_size = header->index_offset +
606
(header->index_num * header->index_size);
608
/* Now calculate the space for each division */
609
temp = shm_mem_size - header->division_offset;
610
header->division_size = temp / granularity;
612
/* Calculate the space left in each division for the cache */
613
temp -= header->queue_size;
614
header->cache_data_offset = (2 * sizeof(unsigned int));
615
header->cache_data_size = header->division_size -
616
header->queue_size - header->cache_data_offset;
618
/* Output trace info */
619
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
620
"shmcb_init_memory choices follow");
621
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
622
"division_mask = 0x%02X", header->division_mask);
623
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
624
"division_offset = %u", header->division_offset);
625
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
626
"division_size = %u", header->division_size);
627
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
628
"queue_size = %u", header->queue_size);
629
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
630
"index_num = %u", header->index_num);
631
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
632
"index_offset = %u", header->index_offset);
633
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
634
"index_size = %u", header->index_size);
635
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
636
"cache_data_offset = %u", header->cache_data_offset);
637
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
638
"cache_data_size = %u", header->cache_data_size);
640
/* The header is done, make the caches empty */
641
for (loop = 0; loop < granularity; loop++) {
642
if (!shmcb_get_division(header, &queue, &cache, loop))
643
ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, "shmcb_init_memory, " "internal error");
644
shmcb_set_safe_uint(cache.first_pos, 0);
645
shmcb_set_safe_uint(cache.pos_count, 0);
646
shmcb_set_safe_uint(queue.first_pos, 0);
647
shmcb_set_safe_uint(queue.pos_count, 0);
650
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
651
"leaving shmcb_init_memory()");
655
static BOOL shmcb_store_session(
656
server_rec *s, void *shm_segment, UCHAR *id,
657
int idlen, SSL_SESSION * pSession,
663
unsigned char masked_index;
664
unsigned char encoded[SSL_SESSION_MAX_DER];
665
unsigned char *ptr_encoded;
666
unsigned int len_encoded;
668
unsigned char *session_id = SSL_SESSION_get_session_id(pSession);
670
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
671
"inside shmcb_store_session");
673
/* Get the header structure, which division this session will fall into etc. */
674
shmcb_get_header(shm_segment, &header);
675
masked_index = session_id[0] & header->division_mask;
676
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
677
"session_id[0]=%u, masked index=%u",
678
session_id[0], masked_index);
679
if (!shmcb_get_division(header, &queue, &cache, (unsigned int)masked_index)) {
680
ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
681
"shmcb_store_session internal error");
685
/* Serialise the session, work out how much we're dealing
686
* with. NB: This check could be removed if we're not paranoid
687
* or we find some assurance that it will never be necessary. */
688
len_encoded = i2d_SSL_SESSION(pSession, NULL);
689
if (len_encoded > SSL_SESSION_MAX_DER) {
690
ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
691
"session is too big (%u bytes)", len_encoded);
694
ptr_encoded = encoded;
695
len_encoded = i2d_SSL_SESSION(pSession, &ptr_encoded);
696
expiry_time = timeout;
697
if (!shmcb_insert_encoded_session(s, &queue, &cache, encoded,
698
len_encoded, session_id,
700
ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
701
"can't store a session!");
704
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
705
"leaving shmcb_store successfully");
706
header->num_stores++;
710
static SSL_SESSION *shmcb_retrieve_session(
711
server_rec *s, void *shm_segment,
712
UCHAR *id, int idlen)
717
unsigned char masked_index;
718
SSL_SESSION *pSession;
720
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
721
"inside shmcb_retrieve_session");
723
ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, "unusably short session_id provided "
724
"(%u bytes)", idlen);
728
/* Get the header structure, which division this session lookup
729
* will come from etc. */
730
shmcb_get_header(shm_segment, &header);
731
masked_index = id[0] & header->division_mask;
732
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
733
"id[0]=%u, masked index=%u", id[0], masked_index);
734
if (!shmcb_get_division(header, &queue, &cache, (unsigned int) masked_index)) {
735
ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
736
"shmcb_retrieve_session internal error");
737
header->num_retrieves_miss++;
741
/* Get the session corresponding to the session_id or NULL if it
742
* doesn't exist (or is flagged as "removed"). */
743
pSession = shmcb_lookup_session_id(s, &queue, &cache, id, idlen);
745
header->num_retrieves_hit++;
747
header->num_retrieves_miss++;
748
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
749
"leaving shmcb_retrieve_session");
753
static BOOL shmcb_remove_session(
754
server_rec *s, void *shm_segment,
755
UCHAR *id, int idlen)
760
unsigned char masked_index;
763
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
764
"inside shmcb_remove_session");
766
ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, "remove called with NULL session_id!");
770
/* Get the header structure, which division this session remove
771
* will happen in etc. */
772
shmcb_get_header(shm_segment, &header);
773
masked_index = id[0] & header->division_mask;
774
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
775
"id[0]=%u, masked index=%u", id[0], masked_index);
776
if (!shmcb_get_division(header, &queue, &cache, (unsigned int)masked_index)) {
777
ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, "shmcb_remove_session, internal error");
778
header->num_removes_miss++;
781
res = shmcb_remove_session_id(s, &queue, &cache, id, idlen);
783
header->num_removes_hit++;
785
header->num_removes_miss++;
786
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
787
"leaving shmcb_remove_session");
794
** Weirdo cyclic buffer functions
798
/* This gets used in the cyclic "index array" (in the 'Queue's) and
799
* in the cyclic 'Cache's too ... you provide the "width" of the
800
* cyclic store, the starting position and how far to move (with
801
* wrapping if necessary). Basically it's addition modulo buf_size. */
802
static unsigned int shmcb_cyclic_increment(
803
unsigned int buf_size,
804
unsigned int start_pos,
808
while (start_pos >= buf_size)
809
start_pos -= buf_size;
813
/* Given two positions in a cyclic buffer, calculate the "distance".
814
* This is to cover the case ("non-trivial") where the 'next' offset
815
* is to the left of the 'start' offset. NB: This calculates the
816
* space inclusive of one end-point but not the other. There is an
817
* ambiguous case (which is why we use the <start_pos,offset>
818
* coordinate system rather than <start_pos,end_pos> one) when 'start'
819
* is the same as 'next'. It could indicate the buffer is full or it
820
* can indicate the buffer is empty ... I choose the latter as it's
821
* easier and usually necessary to check if the buffer is full anyway
822
* before doing incremental logic (which is this useful for), but we
823
* definitely need the empty case handled - in fact it's our starting
825
static unsigned int shmcb_cyclic_space(
826
unsigned int buf_size,
827
unsigned int start_offset,
828
unsigned int next_offset)
830
/* Is it the trivial case? */
831
if (start_offset <= next_offset)
832
return (next_offset - start_offset); /* yes */
834
return ((buf_size - start_offset) + next_offset); /* no */
837
/* A "normal-to-cyclic" memcpy ... this takes a linear block of
838
* memory and copies it onto a cyclic buffer. The purpose and
839
* function of this is pretty obvious, you need to cover the case
840
* that the destination (cyclic) buffer has to wrap round. */
841
static void shmcb_cyclic_ntoc_memcpy(
842
unsigned int buf_size,
844
unsigned int dest_offset,
845
unsigned char *src, unsigned int src_len)
847
/* Cover the case that src_len > buf_size */
848
if (src_len > buf_size)
851
/* Can it be copied all in one go? */
852
if (dest_offset + src_len < buf_size)
854
memcpy(data + dest_offset, src, src_len);
857
memcpy(data + dest_offset, src, buf_size - dest_offset);
858
memcpy(data, src + buf_size - dest_offset,
859
src_len + dest_offset - buf_size);
864
/* A "cyclic-to-normal" memcpy ... given the last function, this
865
* one's purpose is clear, it copies out of a cyclic buffer handling
867
static void shmcb_cyclic_cton_memcpy(
868
unsigned int buf_size,
871
unsigned int src_offset,
872
unsigned int src_len)
874
/* Cover the case that src_len > buf_size */
875
if (src_len > buf_size)
878
/* Can it be copied all in one go? */
879
if (src_offset + src_len < buf_size)
881
memcpy(dest, data + src_offset, src_len);
884
memcpy(dest, data + src_offset, buf_size - src_offset);
885
memcpy(dest + buf_size - src_offset, data,
886
src_len + src_offset - buf_size);
891
/* Here's the cool hack that makes it all work ... by simply
892
* making the first collection of bytes *be* our header structure
893
* (casting it into the C structure), we have the perfect way to
894
* maintain state in a shared-memory session cache from one call
895
* (and process) to the next, use the shared memory itself! The
896
* original mod_ssl shared-memory session cache uses variables
897
* inside the context, but we simply use that for storing the
898
* pointer to the shared memory itself. And don't forget, after
899
* Apache's initialisation, this "header" is constant/read-only
900
* so we can read it outside any locking.
901
* <grin> - sometimes I just *love* coding y'know?! */
902
static void shmcb_get_header(void *shm_mem, SHMCBHeader **header)
904
*header = (SHMCBHeader *)shm_mem;
908
/* This is what populates our "interesting" structures. Given a
909
* pointer to the header, and an index into the appropriate
910
* division (this must have already been masked using the
911
* division_mask by the caller!), we can populate the provided
912
* SHMCBQueue and SHMCBCache structures with values and
913
* pointers to the underlying shared memory. Upon returning
914
* (if not FALSE), the caller can meddle with the pointer
915
* values and they will map into the shared-memory directly,
916
* as such there's no need to "free" or "set" the Queue or
917
* Cache values, they were themselves references to the *real*
919
static BOOL shmcb_get_division(
920
SHMCBHeader *header, SHMCBQueue *queue,
921
SHMCBCache *cache, unsigned int idx)
923
unsigned char *pQueue;
924
unsigned char *pCache;
927
if (idx > (unsigned int) header->division_mask)
930
/* Locate the blocks of memory storing the corresponding data */
931
pQueue = ((unsigned char *) header) + header->division_offset +
932
(idx * header->division_size);
933
pCache = pQueue + header->queue_size;
935
/* Populate the structures with appropriate pointers */
936
queue->first_pos = (unsigned int *) pQueue;
938
/* Our structures stay packed, no matter what the system's
939
* data-alignment regime is. */
940
queue->pos_count = (unsigned int *) (pQueue + sizeof(unsigned int));
941
queue->indexes = (SHMCBIndex *) (pQueue + (2 * sizeof(unsigned int)));
942
cache->first_pos = (unsigned int *) pCache;
943
cache->pos_count = (unsigned int *) (pCache + sizeof(unsigned int));
944
cache->data = (unsigned char *) (pCache + (2 * sizeof(unsigned int)));
945
queue->header = cache->header = header;
950
/* This returns a pointer to the piece of shared memory containing
951
* a specified 'Index'. SHMCBIndex, like SHMCBHeader, is a fixed
952
* width non-referencing structure of primitive types that can be
953
* cast onto the corresponding block of shared memory. Thus, by
954
* returning a cast pointer to that section of shared memory, the
955
* caller can read and write values to and from the "structure" and
956
* they are actually reading and writing the underlying shared
958
static SHMCBIndex *shmcb_get_index(
959
const SHMCBQueue *queue, unsigned int idx)
962
if (idx > queue->header->index_num)
965
/* Return a pointer to the index. NB: I am being horribly pendantic
966
* here so as to avoid any potential data-alignment assumptions being
967
* placed on the pointer arithmetic by the compiler (sigh). */
968
return (SHMCBIndex *)(((unsigned char *) queue->indexes) +
969
(idx * sizeof(SHMCBIndex)));
972
/* This functions rolls expired cache (and index) entries off the front
973
* of the cyclic buffers in a division. The function returns the number
974
* of expired sessions. */
975
static unsigned int shmcb_expire_division(
976
server_rec *s, SHMCBQueue *queue, SHMCBCache *cache)
980
unsigned int loop, index_num, pos_count, new_pos;
983
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
984
"entering shmcb_expire_division");
986
/* We must calculate num and space ourselves based on expiry times. */
989
new_pos = shmcb_get_safe_uint(queue->first_pos);
991
/* Cache useful values */
992
header = queue->header;
993
index_num = header->index_num;
994
pos_count = shmcb_get_safe_uint(queue->pos_count);
995
while (loop < pos_count) {
996
idx = shmcb_get_index(queue, new_pos);
997
if (shmcb_get_safe_time(&(idx->expires)) > now)
516
* Subcache-level cache operations
519
static void shmcb_subcache_expire(server_rec *s, SHMCBHeader *header,
520
SHMCBSubcache *subcache)
522
time_t now = time(NULL);
523
unsigned int loop = 0;
524
unsigned int new_idx_pos = subcache->idx_pos;
525
SHMCBIndex *idx = NULL;
527
while (loop < subcache->idx_used) {
528
idx = SHMCB_INDEX(subcache, new_idx_pos);
529
if (idx->expires > now)
998
530
/* it hasn't expired yet, we're done iterating */
1000
/* This one should be expired too. Shift to the next entry. */
1002
new_pos = shmcb_cyclic_increment(index_num, new_pos, 1);
1005
/* Find the new_offset and make the expiries happen. */
1007
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1008
"will be expiring %u sessions", loop);
1009
/* We calculate the new_offset by "peeking" (or in the
1010
* case it's the last entry, "sneaking" ;-). */
1011
if (loop == pos_count) {
1012
/* We are expiring everything! This is easy to do... */
1013
shmcb_set_safe_uint(queue->pos_count, 0);
1014
shmcb_set_safe_uint(cache->pos_count, 0);
1017
/* The Queue is easy to adjust */
1018
shmcb_set_safe_uint(queue->pos_count,
1019
shmcb_get_safe_uint(queue->pos_count) - loop);
1020
shmcb_set_safe_uint(queue->first_pos, new_pos);
1021
/* peek to the start of the next session */
1022
idx = shmcb_get_index(queue, new_pos);
1023
/* We can use shmcb_cyclic_space because we've guaranteed
1024
* we don't fit the ambiguous full/empty case. */
1025
shmcb_set_safe_uint(cache->pos_count,
1026
shmcb_get_safe_uint(cache->pos_count) -
1027
shmcb_cyclic_space(header->cache_data_size,
1028
shmcb_get_safe_uint(cache->first_pos),
1029
shmcb_get_safe_uint(&(idx->offset))));
1030
shmcb_set_safe_uint(cache->first_pos, shmcb_get_safe_uint(&(idx->offset)));
1032
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1033
"we now have %u sessions",
1034
shmcb_get_safe_uint(queue->pos_count));
1036
header->num_expiries += loop;
533
new_idx_pos = SHMCB_CYCLIC_INCREMENT(new_idx_pos, 1, header->index_num);
538
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
539
"will be expiring %u sessions", loop);
540
if (loop == subcache->idx_used) {
541
/* We're expiring everything, piece of cake */
542
subcache->idx_used = 0;
543
subcache->data_used = 0;
545
/* There remain other indexes, so we can use idx to adjust 'data' */
546
unsigned int diff = SHMCB_CYCLIC_SPACE(subcache->data_pos,
548
header->subcache_data_size);
549
/* Adjust the indexes */
550
subcache->idx_used -= loop;
551
subcache->idx_pos = new_idx_pos;
552
/* Adjust the data area */
553
subcache->data_used -= diff;
554
subcache->data_pos = idx->data_pos;
556
header->stat_expiries += loop;
557
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
558
"we now have %u sessions", subcache->idx_used);
1040
/* Inserts a new encoded session into a queue/cache pair - expiring
1041
* (early or otherwise) any leading sessions as necessary to ensure
1042
* there is room. An error return (FALSE) should only happen in the
1043
* event of surreal values being passed on, or ridiculously small
1044
* cache sizes. NB: For tracing purposes, this function is also given
1045
* the server_rec to allow "ssl_log()". */
1046
static BOOL shmcb_insert_encoded_session(
1047
server_rec *s, SHMCBQueue * queue,
1049
unsigned char *encoded,
1050
unsigned int encoded_len,
1051
unsigned char *session_id,
561
static BOOL shmcb_subcache_store(server_rec *s, SHMCBHeader *header,
562
SHMCBSubcache *subcache,
563
UCHAR *data, unsigned int data_len,
564
UCHAR *id, time_t expiry)
1054
SHMCBHeader *header;
1055
SHMCBIndex *idx = NULL;
1056
unsigned int gap, new_pos, loop, new_offset;
1059
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1060
"entering shmcb_insert_encoded_session, "
1061
"*queue->pos_count = %u",
1062
shmcb_get_safe_uint(queue->pos_count));
1064
/* If there's entries to expire, ditch them first thing. */
1065
shmcb_expire_division(s, queue, cache);
1066
header = cache->header;
1067
gap = header->cache_data_size - shmcb_get_safe_uint(cache->pos_count);
1068
if (gap < encoded_len) {
1069
new_pos = shmcb_get_safe_uint(queue->first_pos);
1071
need = (int) encoded_len - (int) gap;
1072
while ((need > 0) && (loop + 1 < shmcb_get_safe_uint(queue->pos_count))) {
1073
new_pos = shmcb_cyclic_increment(header->index_num, new_pos, 1);
1075
idx = shmcb_get_index(queue, new_pos);
1076
need = (int) encoded_len - (int) gap -
1077
shmcb_cyclic_space(header->cache_data_size,
1078
shmcb_get_safe_uint(cache->first_pos),
1079
shmcb_get_safe_uint(&(idx->offset)));
1082
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1083
"about to scroll %u sessions from %u",
1084
loop, shmcb_get_safe_uint(queue->pos_count));
1085
/* We are removing "loop" items from the cache. */
1086
shmcb_set_safe_uint(cache->pos_count,
1087
shmcb_get_safe_uint(cache->pos_count) -
1088
shmcb_cyclic_space(header->cache_data_size,
1089
shmcb_get_safe_uint(cache->first_pos),
1090
shmcb_get_safe_uint(&(idx->offset))));
1091
shmcb_set_safe_uint(cache->first_pos, shmcb_get_safe_uint(&(idx->offset)));
1092
shmcb_set_safe_uint(queue->pos_count, shmcb_get_safe_uint(queue->pos_count) - loop);
1093
shmcb_set_safe_uint(queue->first_pos, new_pos);
1094
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1095
"now only have %u sessions",
1096
shmcb_get_safe_uint(queue->pos_count));
1097
/* Update the stats!!! */
1098
header->num_scrolled += loop;
1102
/* probably unecessary checks, but I'll leave them until this code
1104
if (shmcb_get_safe_uint(cache->pos_count) + encoded_len >
1105
header->cache_data_size) {
1106
ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
1107
"shmcb_insert_encoded_session internal error");
1110
if (shmcb_get_safe_uint(queue->pos_count) == header->index_num) {
1111
ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
1112
"shmcb_insert_encoded_session internal error");
1115
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1116
"we have %u bytes and %u indexes free - enough",
1117
header->cache_data_size -
1118
shmcb_get_safe_uint(cache->pos_count), header->index_num -
1119
shmcb_get_safe_uint(queue->pos_count));
566
unsigned int new_offset, new_idx;
569
/* Sanity check the input */
570
if ((data_len > header->subcache_data_size) || (data_len > SSL_SESSION_MAX_DER)) {
571
ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
572
"inserting session larger (%d) than subcache data area (%d)",
573
data_len, header->subcache_data_size);
577
/* If there are entries to expire, ditch them first. */
578
shmcb_subcache_expire(s, header, subcache);
580
/* Loop until there is enough space to insert */
581
if (header->subcache_data_size - subcache->data_used < data_len
582
|| subcache->idx_used == header->index_num) {
583
unsigned int loop = 0;
585
idx = SHMCB_INDEX(subcache, subcache->idx_pos);
586
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
587
"about to force-expire, subcache: idx_used=%d, "
588
"data_used=%d", subcache->idx_used, subcache->data_used);
592
/* Adjust the indexes by one */
593
subcache->idx_pos = SHMCB_CYCLIC_INCREMENT(subcache->idx_pos, 1,
595
subcache->idx_used--;
596
if (!subcache->idx_used) {
597
/* There's nothing left */
598
subcache->data_used = 0;
601
/* Adjust the data */
602
idx2 = SHMCB_INDEX(subcache, subcache->idx_pos);
603
subcache->data_used -= SHMCB_CYCLIC_SPACE(idx->data_pos, idx2->data_pos,
604
header->subcache_data_size);
605
subcache->data_pos = idx2->data_pos;
607
header->stat_scrolled++;
611
} while (header->subcache_data_size - subcache->data_used < data_len);
613
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
614
"finished force-expire, subcache: idx_used=%d, "
615
"data_used=%d", subcache->idx_used, subcache->data_used);
1122
618
/* HERE WE ASSUME THAT THE NEW SESSION SHOULD GO ON THE END! I'M NOT
1123
619
* CHECKING WHETHER IT SHOULD BE GENUINELY "INSERTED" SOMEWHERE.
1129
625
* would make this stuff *MUCH* more efficient. Mind you, it's very
1130
626
* efficient right now because I'm ignoring this problem!!!
1133
/* Increment to the first unused byte */
1134
new_offset = shmcb_cyclic_increment(header->cache_data_size,
1135
shmcb_get_safe_uint(cache->first_pos),
1136
shmcb_get_safe_uint(cache->pos_count));
1137
/* Copy the DER-encoded session into place */
1138
shmcb_cyclic_ntoc_memcpy(header->cache_data_size, cache->data,
1139
new_offset, encoded, encoded_len);
1140
/* Get the new index that this session is stored in. */
1141
new_pos = shmcb_cyclic_increment(header->index_num,
1142
shmcb_get_safe_uint(queue->first_pos),
1143
shmcb_get_safe_uint(queue->pos_count));
1144
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1145
"storing in index %u, at offset %u",
1146
new_pos, new_offset);
1147
idx = shmcb_get_index(queue, new_pos);
1149
ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
1150
"shmcb_insert_encoded_session internal error");
1153
shmcb_safe_clear(idx, sizeof(SHMCBIndex));
1154
shmcb_set_safe_time(&(idx->expires), expiry_time);
1155
shmcb_set_safe_uint(&(idx->offset), new_offset);
1157
/* idx->removed = (unsigned char)0; */ /* Not needed given the memset above. */
1158
idx->s_id2 = session_id[1];
1159
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1160
"session_id[0]=%u, idx->s_id2=%u",
1161
session_id[0], session_id[1]);
1163
/* All that remains is to adjust the cache's and queue's "pos_count"s. */
1164
shmcb_set_safe_uint(cache->pos_count,
1165
shmcb_get_safe_uint(cache->pos_count) + encoded_len);
1166
shmcb_set_safe_uint(queue->pos_count,
1167
shmcb_get_safe_uint(queue->pos_count) + 1);
1169
/* And just for good debugging measure ... */
1170
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1171
"leaving now with %u bytes in the cache and %u indexes",
1172
shmcb_get_safe_uint(cache->pos_count),
1173
shmcb_get_safe_uint(queue->pos_count));
1174
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1175
"leaving shmcb_insert_encoded_session");
628
/* Insert the data */
629
new_offset = SHMCB_CYCLIC_INCREMENT(subcache->data_pos, subcache->data_used,
630
header->subcache_data_size);
631
shmcb_cyclic_ntoc_memcpy(header->subcache_data_size,
632
SHMCB_DATA(header, subcache), new_offset,
634
subcache->data_used += data_len;
635
/* Insert the index */
636
new_idx = SHMCB_CYCLIC_INCREMENT(subcache->idx_pos, subcache->idx_used,
638
idx = SHMCB_INDEX(subcache, new_idx);
639
idx->expires = expiry;
640
idx->data_pos = new_offset;
641
idx->data_used = data_len;
644
subcache->idx_used++;
645
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
646
"insert happened at idx=%d, data=%d", new_idx, new_offset);
647
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
648
"finished insert, subcache: idx_pos/idx_used=%d/%d, "
649
"data_pos/data_used=%d/%d",
650
subcache->idx_pos, subcache->idx_used,
651
subcache->data_pos, subcache->data_used);
1179
/* Performs a lookup into a queue/cache pair for a
1180
* session_id. If found, the session is deserialised
1181
* and returned, otherwise NULL. */
1182
static SSL_SESSION *shmcb_lookup_session_id(
1183
server_rec *s, SHMCBQueue *queue,
1184
SHMCBCache *cache, UCHAR *id,
655
static SSL_SESSION *shmcb_subcache_retrieve(server_rec *s, SHMCBHeader *header,
656
SHMCBSubcache *subcache, UCHAR *id,
1187
unsigned char tempasn[SSL_SESSION_MAX_DER];
1189
SHMCBHeader *header;
1190
SSL_SESSION *pSession = NULL;
1191
unsigned int curr_pos, loop, count;
1192
MODSSL_D2I_SSL_SESSION_CONST unsigned char *ptr;
1195
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1196
"entering shmcb_lookup_session_id");
1198
/* If there are entries to expire, ditch them first thing. */
1199
shmcb_expire_division(s, queue, cache);
1201
curr_pos = shmcb_get_safe_uint(queue->first_pos);
1202
count = shmcb_get_safe_uint(queue->pos_count);
1203
header = queue->header;
1204
for (loop = 0; loop < count; loop++) {
1205
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1206
"loop=%u, count=%u, curr_pos=%u",
1207
loop, count, curr_pos);
1208
idx = shmcb_get_index(queue, curr_pos);
1209
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1210
"idx->s_id2=%u, id[1]=%u, offset=%u",
1211
idx->s_id2, id[1], shmcb_get_safe_uint(&(idx->offset)));
1212
/* Only look into the session further if;
1213
* (a) the second byte of the session_id matches,
1214
* (b) the "removed" flag isn't set,
1215
* (c) the session hasn't expired yet.
1216
* We do (c) like this so that it saves us having to
1217
* do natural expiries ... naturally expired sessions
1218
* scroll off the front anyway when the cache is full and
1219
* "rotating", the only real issue that remains is the
1220
* removal or disabling of forcibly killed sessions. */
1221
if ((idx->s_id2 == id[1]) && !idx->removed &&
1222
(shmcb_get_safe_time(&(idx->expires)) > now)) {
1223
unsigned int session_id_length;
1224
unsigned char *session_id;
660
unsigned int loop = 0;
662
/* If there are entries to expire, ditch them first. */
663
shmcb_subcache_expire(s, header, subcache);
664
pos = subcache->idx_pos;
666
while (loop < subcache->idx_used) {
667
SHMCBIndex *idx = SHMCB_INDEX(subcache, pos);
669
/* Only consider 'idx' if;
670
* (a) the s_id2 byte matches
671
* (b) the "removed" flag isn't set.
673
if ((idx->s_id2 == id[1]) && !idx->removed) {
674
SSL_SESSION *pSession;
676
unsigned int s_idlen;
677
unsigned char tempasn[SSL_SESSION_MAX_DER];
678
MODSSL_D2I_SSL_SESSION_CONST unsigned char *ptr = tempasn;
1226
680
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1227
"at index %u, found possible session match",
1229
shmcb_cyclic_cton_memcpy(header->cache_data_size,
1230
tempasn, cache->data,
1231
shmcb_get_safe_uint(&(idx->offset)),
1232
SSL_SESSION_MAX_DER);
1234
pSession = d2i_SSL_SESSION(NULL, &ptr, SSL_SESSION_MAX_DER);
1235
session_id_length = SSL_SESSION_get_session_id_length(pSession);
1236
session_id = SSL_SESSION_get_session_id(pSession);
1238
if (pSession == NULL) {
681
"possible match at idx=%d, data=%d", pos, idx->data_pos);
683
shmcb_cyclic_cton_memcpy(header->subcache_data_size,
684
tempasn, SHMCB_DATA(header, subcache),
685
idx->data_pos, idx->data_used);
686
/* Decode the session */
687
pSession = d2i_SSL_SESSION(NULL, &ptr, idx->data_used);
1239
689
ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
1240
"scach2_lookup_session_id internal error");
690
"shmcb_subcache_retrieve internal error");
1243
if ((session_id_length == idlen) &&
1244
(memcmp(session_id, id, idlen) == 0)) {
693
s_id = SSL_SESSION_get_session_id(pSession);
694
s_idlen = SSL_SESSION_get_session_id_length(pSession);
695
if (s_idlen == idlen && memcmp(s_id, id, idlen) == 0) {
696
/* Found the matching session */
1245
697
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
698
"shmcb_subcache_retrieve returning matching session");
1247
699
return pSession;
1249
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1251
701
SSL_SESSION_free(pSession);
1254
curr_pos = shmcb_cyclic_increment(header->index_num, curr_pos, 1);
705
pos = SHMCB_CYCLIC_INCREMENT(pos, 1, header->index_num);
1256
708
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1257
"no matching sessions were found");
709
"shmcb_subcache_retrieve found no match");
1261
static BOOL shmcb_remove_session_id(
1262
server_rec *s, SHMCBQueue *queue,
1263
SHMCBCache *cache, UCHAR *id, unsigned int idlen)
713
static BOOL shmcb_subcache_remove(server_rec *s, SHMCBHeader *header,
714
SHMCBSubcache *subcache,
715
UCHAR *id, unsigned int idlen)
1265
unsigned char tempasn[SSL_SESSION_MAX_DER];
1266
SSL_SESSION *pSession = NULL;
1268
SHMCBHeader *header;
1269
unsigned int curr_pos, loop, count;
1270
MODSSL_D2I_SSL_SESSION_CONST unsigned char *ptr;
718
unsigned int loop = 0;
1271
719
BOOL to_return = FALSE;
1273
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1274
"entering shmcb_remove_session_id");
1276
/* If there's entries to expire, ditch them first thing. */
1277
/* shmcb_expire_division(s, queue, cache); */
1279
/* Regarding the above ... hmmm ... I know my expiry code is slightly
1280
* "faster" than all this remove stuff ... but if the higher level
1281
* code calls a "remove" operation (and this *only* seems to happen
1282
* when it has spotted an expired session before we had a chance to)
1283
* then it should get credit for a remove (stats-wise). Also, in the
1284
* off-chance that the server *requests* a renegotiate and wants to
1285
* wipe the session clean we should give that priority over our own
1286
* routine expiry handling. So I've moved the expiry check to *after*
1287
* this general remove stuff. */
1288
curr_pos = shmcb_get_safe_uint(queue->first_pos);
1289
count = shmcb_get_safe_uint(queue->pos_count);
1290
header = cache->header;
1291
for (loop = 0; loop < count; loop++) {
1292
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1293
"loop=%u, count=%u, curr_pos=%u",
1294
loop, count, curr_pos);
1295
idx = shmcb_get_index(queue, curr_pos);
1296
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1297
"idx->s_id2=%u, id[1]=%u", idx->s_id2,
1299
/* Only look into the session further if the second byte of the
1300
* session_id matches. */
1301
if (idx->s_id2 == id[1]) {
1302
unsigned int session_id_length;
1303
unsigned char *session_id;
721
/* Unlike the others, we don't do an expire-run first. This is to keep
722
* consistent statistics where a "remove" operation may actually be the
723
* higher layer spotting an expiry issue prior to us. Our caller is
724
* handling stats, so a failure return would be inconsistent if the
725
* intended session was in fact removed by an expiry run. */
727
pos = subcache->idx_pos;
728
while (!to_return && (loop < subcache->idx_used)) {
729
SHMCBIndex *idx = SHMCB_INDEX(subcache, pos);
730
/* Only consider 'idx' if the s_id2 byte matches and it's not already
731
* removed - easiest way to avoid costly ASN decodings. */
732
if ((idx->s_id2 == id[1]) && !idx->removed) {
733
SSL_SESSION *pSession;
735
unsigned int s_idlen;
736
unsigned char tempasn[SSL_SESSION_MAX_DER];
737
MODSSL_D2I_SSL_SESSION_CONST unsigned char *ptr = tempasn;
1305
739
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1306
"at index %u, found possible "
1307
"session match", curr_pos);
1308
shmcb_cyclic_cton_memcpy(header->cache_data_size,
1309
tempasn, cache->data,
1310
shmcb_get_safe_uint(&(idx->offset)),
1311
SSL_SESSION_MAX_DER);
1313
pSession = d2i_SSL_SESSION(NULL, &ptr, SSL_SESSION_MAX_DER);
1314
if (pSession == NULL) {
740
"possible match at idx=%d, data=%d", pos, idx->data_pos);
742
shmcb_cyclic_cton_memcpy(header->subcache_data_size,
743
tempasn, SHMCB_DATA(header, subcache),
744
idx->data_pos, idx->data_used);
745
/* Decode the session */
746
pSession = d2i_SSL_SESSION(NULL, &ptr, idx->data_used);
1315
748
ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
1316
"shmcb_remove_session_id, internal error");
749
"shmcb_subcache_remove internal error");
1319
session_id_length = SSL_SESSION_get_session_id_length(pSession);
1320
session_id = SSL_SESSION_get_session_id(pSession);
1322
if ((session_id_length == idlen)
1323
&& (memcmp(id, session_id, idlen) == 0)) {
1324
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1326
/* Scrub out this session "quietly" */
1327
idx->removed = (unsigned char) 1;
1328
SSL_SESSION_free(pSession);
752
s_id = SSL_SESSION_get_session_id(pSession);
753
s_idlen = SSL_SESSION_get_session_id_length(pSession);
754
if (s_idlen == idlen && memcmp(s_id, id, idlen) == 0) {
755
/* Found the matching session, remove it quietly. */
1329
757
to_return = TRUE;
758
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
759
"shmcb_subcache_remove removing matching session");
1332
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1334
761
SSL_SESSION_free(pSession);
1337
curr_pos = shmcb_cyclic_increment(header->index_num, curr_pos, 1);
765
pos = SHMCB_CYCLIC_INCREMENT(pos, 1, header->index_num);
1339
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1340
"no matching sessions were found");
1342
/* If there's entries to expire, ditch them now. */
1343
shmcb_expire_division(s, queue, cache);
1345
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1346
"leaving shmcb_remove_session_id");
1347
768
return to_return;