2
* Copyright 2000, International Business Machines Corporation and others.
5
* This software has been released under the terms of the IBM Public
6
* License. For details, see the LICENSE file in the top-level source
7
* directory or online at http://www.openafs.org/dl/license10.html
10
#include <afsconfig.h>
12
#include "../afs/param.h"
14
#include <afs/param.h>
18
#include <sys/time_impl.h>
21
RCSID("$Header: /afs/sipb.mit.edu/project/openafs/debian/cvs/openafs/src/rx/rx_event.c,v 1.1.1.7 2002/08/02 04:36:20 hartmans Exp $");
25
#include "../afs/afs_osi.h"
27
#include "../afs/sysincludes.h"
28
#include "../afs/afsincludes.h"
30
#include "../rx/rx_clock.h"
31
#include "../rx/rx_queue.h"
32
#include "../rx/rx_event.h"
33
#include "../rx/rx_kernel.h"
34
#include "../rx/rx_kmutex.h"
35
#ifdef RX_ENABLE_LOCKS
37
#endif /* RX_ENABLE_LOCKS */
38
#include "../rx/rx_globals.h"
39
#if defined(AFS_SGI_ENV)
40
#include "../sys/debug.h"
41
/* These are necessary to get curproc (used by GLOCK asserts) to work. */
42
#include "../h/proc.h"
43
#if !defined(AFS_SGI64_ENV) && !defined(UKERNEL)
44
#include "../h/user.h"
46
extern void *osi_Alloc();
54
#ifdef AFS_PTHREAD_ENV
55
#include <rx/rx_pthread.h>
59
#ifdef RX_ENABLE_LOCKS
61
#endif /* RX_ENABLE_LOCKS */
62
#include "rx_globals.h"
69
/* All event processing is relative to the apparent current time given by clock_GetTime */
71
/* This should be static, but event_test wants to look at the free list... */
72
struct rx_queue rxevent_free; /* It's somewhat bogus to use a doubly-linked queue for the free list */
73
struct rx_queue rxepoch_free; /* It's somewhat bogus to use a doubly-linked queue for the free list */
74
static struct rx_queue rxepoch_queue; /* list of waiting epochs */
75
static int rxevent_allocUnit = 10; /* Allocation unit (number of event records to allocate at one time) */
76
static int rxepoch_allocUnit = 10; /* Allocation unit (number of epoch records to allocate at one time) */
77
int rxevent_nFree; /* Number of free event records */
78
int rxevent_nPosted; /* Current number of posted events */
79
int rxepoch_nFree; /* Number of free epoch records */
80
static int (*rxevent_ScheduledEarlierEvent)(); /* Proc to call when an event is scheduled that is earlier than all other events */
84
struct xfreelist *next;
86
static struct xfreelist *xfreemallocs = 0, *xsp = 0;
88
struct clock rxevent_nextRaiseEvents; /* Time of next call to raise events */
89
int rxevent_raiseScheduled; /* true if raise events is scheduled */
91
#ifdef RX_ENABLE_LOCKS
93
/* rxdb_fileID is used to identify the lock location, along with line#. */
94
static int rxdb_fileID = RXDB_FILE_RX_EVENT;
95
#endif /* RX_LOCKS_DB */
96
#define RX_ENABLE_LOCKS 1
97
afs_kmutex_t rxevent_lock;
98
#endif /* RX_ENABLE_LOCKS */
100
#ifdef AFS_PTHREAD_ENV
102
* This mutex protects the following global variables:
103
* rxevent_initialized
107
pthread_mutex_t rx_event_mutex;
108
#define LOCK_EV_INIT assert(pthread_mutex_lock(&rx_event_mutex)==0);
109
#define UNLOCK_EV_INIT assert(pthread_mutex_unlock(&rx_event_mutex)==0);
112
#define UNLOCK_EV_INIT
113
#endif /* AFS_PTHREAD_ENV */
116
/* Pass in the number of events to allocate at a time */
117
int rxevent_initialized = 0;
119
rxevent_Init(nEvents, scheduler)
124
if (rxevent_initialized) {
128
MUTEX_INIT(&rxevent_lock, "rxevent_lock", MUTEX_DEFAULT, 0);
130
if (nEvents) rxevent_allocUnit = nEvents;
131
queue_Init(&rxevent_free);
132
queue_Init(&rxepoch_free);
133
queue_Init(&rxepoch_queue);
134
rxevent_nFree = rxevent_nPosted = 0;
136
rxevent_ScheduledEarlierEvent = scheduler;
137
rxevent_initialized = 1;
138
clock_Zero(&rxevent_nextRaiseEvents);
139
rxevent_raiseScheduled = 0;
143
/* Create and initialize new epoch structure */
144
struct rxepoch *rxepoch_Allocate(struct clock *when)
149
/* If we are short on free epoch entries, create a block of new oned
150
* and add them to the free queue */
151
if (queue_IsEmpty(&rxepoch_free)) {
152
#if defined(AFS_AIX32_ENV) && defined(KERNEL)
153
ep = (struct rxepoch *) rxi_Alloc(sizeof(struct rxepoch));
154
queue_Append(&rxepoch_free, &ep[0]), rxepoch_nFree++;
156
ep = (struct rxepoch *)
157
osi_Alloc(sizeof(struct rxepoch) * rxepoch_allocUnit);
159
xfreemallocs = (struct xfreelist *) osi_Alloc(sizeof(struct xfreelist));
160
xfreemallocs->mem = (void *)ep;
161
xfreemallocs->size = sizeof(struct rxepoch) * rxepoch_allocUnit;
162
xfreemallocs->next = xsp;
163
for (i = 0; i<rxepoch_allocUnit; i++)
164
queue_Append(&rxepoch_free, &ep[i]), rxepoch_nFree++;
167
ep = queue_First(&rxepoch_free, rxepoch);
170
ep->epochSec = when->sec;
171
queue_Init(&ep->events);
175
/* Add the indicated event (function, arg) at the specified clock time. The
176
* "when" argument specifies when "func" should be called, in clock (clock.h)
179
struct rxevent *rxevent_Post(struct clock *when, void (*func)(),
180
void *arg, void *arg1)
182
register struct rxevent *ev, *evqe, *evqpr;
183
register struct rxepoch *ep, *epqe, *epqpr;
186
MUTEX_ENTER(&rxevent_lock);
187
AFS_ASSERT_RXGLOCK();
192
fprintf(rx_Log_event, "%d.%d: rxevent_Post(%d.%d, %x, %x)\n",
193
(int) now.sec, (int) now.usec, (int) when->sec,
194
(int) when->usec, (unsigned int) func, (unsigned int) arg);
198
/* Get a pointer to the epoch for this event, if none is found then
199
* create a new epoch and insert it into the sorted list */
200
for (ep = NULL, queue_ScanBackwards(&rxepoch_queue, epqe, epqpr, rxepoch)) {
201
if (when->sec == epqe->epochSec) {
202
/* already have an structure for this epoch */
204
if (ep == queue_First(&rxepoch_queue, rxepoch))
207
} else if (when->sec > epqe->epochSec) {
208
/* Create a new epoch and insert after qe */
209
ep = rxepoch_Allocate(when);
210
queue_InsertAfter(epqe, ep);
215
/* Create a new epoch and place it at the head of the list */
216
ep = rxepoch_Allocate(when);
217
queue_Prepend(&rxepoch_queue, ep);
221
/* If we're short on free event entries, create a block of new ones and add
222
* them to the free queue */
223
if (queue_IsEmpty(&rxevent_free)) {
225
#if defined(AFS_AIX32_ENV) && defined(KERNEL)
226
ev = (struct rxevent *) rxi_Alloc(sizeof(struct rxevent));
227
queue_Append(&rxevent_free, &ev[0]), rxevent_nFree++;
229
ev = (struct rxevent *) osi_Alloc(sizeof(struct rxevent) * rxevent_allocUnit);
231
xfreemallocs = (struct xfreelist *) osi_Alloc(sizeof(struct xfreelist));
232
xfreemallocs->mem = (void *)ev;
233
xfreemallocs->size = sizeof(struct rxevent) * rxevent_allocUnit;
234
xfreemallocs->next = xsp;
235
for (i = 0; i<rxevent_allocUnit; i++)
236
queue_Append(&rxevent_free, &ev[i]), rxevent_nFree++;
240
/* Grab and initialize a new rxevent structure */
241
ev = queue_First(&rxevent_free, rxevent);
245
/* Record user defined event state */
246
ev->eventTime = *when;
250
rxevent_nPosted += 1; /* Rather than ++, to shut high-C up
251
* regarding never-set variables
254
/* Insert the event into the sorted list of events for this epoch */
255
for (queue_ScanBackwards(&ep->events, evqe, evqpr, rxevent)) {
256
if (when->usec >= evqe->eventTime.usec) {
257
/* Insert event after evqe */
258
queue_InsertAfter(evqe, ev);
259
MUTEX_EXIT(&rxevent_lock);
263
/* Insert event at head of current epoch */
264
queue_Prepend(&ep->events, ev);
265
if (isEarliest && rxevent_ScheduledEarlierEvent &&
266
(!rxevent_raiseScheduled ||
267
clock_Lt(&ev->eventTime, &rxevent_nextRaiseEvents))) {
268
rxevent_raiseScheduled = 1;
269
clock_Zero(&rxevent_nextRaiseEvents);
270
MUTEX_EXIT(&rxevent_lock);
271
/* Notify our external scheduler */
272
(*rxevent_ScheduledEarlierEvent)();
273
MUTEX_ENTER(&rxevent_lock);
275
MUTEX_EXIT(&rxevent_lock);
279
/* Cancel an event by moving it from the event queue to the free list.
280
* Warning, the event must be on the event queue! If not, this should core
281
* dump (reference through 0). This routine should be called using the macro
282
* event_Cancel, which checks for a null event and also nulls the caller's
283
* event pointer after cancelling the event.
285
#ifdef RX_ENABLE_LOCKS
286
#ifdef RX_REFCOUNT_CHECK
287
int rxevent_Cancel_type = 0;
288
void rxevent_Cancel_1(ev, call, type)
289
register struct rxevent *ev;
290
register struct rx_call *call;
292
#else /* RX_REFCOUNT_CHECK */
293
void rxevent_Cancel_1(ev, call)
294
register struct rxevent *ev;
295
register struct rx_call *call;
296
#endif /* RX_REFCOUNT_CHECK */
297
#else /* RX_ENABLE_LOCKS */
298
void rxevent_Cancel_1(ev)
299
register struct rxevent *ev;
300
#endif /* RX_ENABLE_LOCKS */
306
fprintf(rx_Log_event, "%d.%d: rxevent_Cancel_1(%d.%d, %x, %x)\n",
307
(int) now.sec, (int) now.usec, (int) ev->eventTime.sec,
308
(int) ev->eventTime.usec, (unsigned int) ev->func,
309
(unsigned int) ev->arg);
312
/* Append it to the free list (rather than prepending) to keep the free
313
* list hot so nothing pages out
315
AFS_ASSERT_RXGLOCK();
316
MUTEX_ENTER(&rxevent_lock);
318
MUTEX_EXIT(&rxevent_lock);
321
#ifdef RX_ENABLE_LOCKS
322
/* It's possible we're currently processing this event. */
323
if (queue_IsOnQueue(ev)) {
324
queue_MoveAppend(&rxevent_free, ev);
329
#ifdef RX_REFCOUNT_CHECK
330
call->refCDebug[type]--;
331
if (call->refCDebug[type]<0) {
332
rxevent_Cancel_type = type;
333
osi_Panic("rxevent_Cancel: call refCount < 0");
335
#endif /* RX_REFCOUNT_CHECK */
338
#else /* RX_ENABLE_LOCKS */
339
queue_MoveAppend(&rxevent_free, ev);
342
#endif /* RX_ENABLE_LOCKS */
343
MUTEX_EXIT(&rxevent_lock);
346
/* Process all epochs that have expired relative to the current clock time
347
* (which is not re-evaluated unless clock_NewTime has been called). The
348
* relative time to the next epoch is returned in the output parameter next
349
* and the function returns 1. If there are is no next epoch, the function
352
int rxevent_RaiseEvents(next)
355
register struct rxepoch *ep;
356
register struct rxevent *ev;
357
volatile struct clock now;
359
MUTEX_ENTER(&rxevent_lock);
361
AFS_ASSERT_RXGLOCK();
363
/* Events are sorted by time, so only scan until an event is found that has
364
* not yet timed out */
367
while (queue_IsNotEmpty(&rxepoch_queue)) {
368
ep = queue_First(&rxepoch_queue, rxepoch);
369
if (queue_IsEmpty(&ep->events)) {
371
queue_Append(&rxepoch_free, ep);
376
ev = queue_First(&ep->events, rxevent);
377
if (clock_Lt(&now, &ev->eventTime)) {
379
if (clock_Lt(&now, &ev->eventTime)) {
380
*next = rxevent_nextRaiseEvents = ev->eventTime;
381
rxevent_raiseScheduled = 1;
382
clock_Sub(next, &now);
383
MUTEX_EXIT(&rxevent_lock);
389
MUTEX_EXIT(&rxevent_lock);
390
ev->func(ev, ev->arg, ev->arg1);
391
MUTEX_ENTER(&rxevent_lock);
392
queue_Append(&rxevent_free, ev);
394
} while (queue_IsNotEmpty(&ep->events));
397
if (rx_Log_event) fprintf(rx_Log_event, "rxevent_RaiseEvents(%d.%d)\n",
398
(int) now.sec, (int) now.usec);
400
rxevent_raiseScheduled = 0;
401
MUTEX_EXIT(&rxevent_lock);
405
void shutdown_rxevent(void)
407
struct xfreelist *xp, *nxp;
410
if (!rxevent_initialized) {
414
rxevent_initialized = 0;
416
MUTEX_DESTROY(&rxevent_lock);
417
#if defined(AFS_AIX32_ENV) && defined(KERNEL)
418
/* Everything is freed in afs_osinet.c */
423
osi_Free((char *)xp->mem, xp->size);
424
osi_Free((char *)xp, sizeof(struct xfreelist));
427
xfreemallocs = (struct xfreelist *) 0;