1
/* Copyright (c) 2005 PrimeBase Technologies GmbH
5
* This program is free software; you can redistribute it and/or modify
6
* it under the terms of the GNU General Public License as published by
7
* the Free Software Foundation; either version 2 of the License, or
8
* (at your option) any later version.
10
* This program is distributed in the hope that it will be useful,
11
* but WITHOUT ANY WARRANTY; without even the implied warranty of
12
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
* GNU General Public License for more details.
15
* You should have received a copy of the GNU General Public License
16
* along with this program; if not, write to the Free Software
17
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19
* 2008-01-24 Paul McCullagh
30
#include "locklist_xt.h"
31
#include "pthread_xt.h"
39
#ifdef XT_ATOMIC_SOLARIS_LIB
43
void xt_log_atomic_error_and_abort(c_char *func, c_char *file, u_int line);
46
* -----------------------------------------------------------------------
51
* This macro is to remind me where it was safe
54
#define xt_lck_slock xt_spinlock_lock
56
/* I call these operations flushed because the result
57
* is written atomically.
58
* But the operations themselves are not atomic!
60
inline void xt_atomic_inc1(volatile xtWord1 *mptr)
62
#ifdef XT_ATOMIC_WIN32_X86
64
__asm MOV DL, BYTE PTR [ECX]
66
__asm XCHG DL, BYTE PTR [ECX]
67
#elif defined(XT_ATOMIC_GNUC_X86)
70
asm volatile ("movb %1,%0" : "=r" (val) : "m" (*mptr) : "memory");
72
asm volatile ("xchgb %1,%0" : "=r" (val) : "m" (*mptr), "0" (val) : "memory");
73
#elif defined(XT_ATOMIC_SOLARIS_LIB)
77
xt_log_atomic_error_and_abort(__FUNC__, __FILE__, __LINE__);
81
inline xtWord1 xt_atomic_dec1(volatile xtWord1 *mptr)
85
#ifdef XT_ATOMIC_WIN32_X86
87
__asm MOV DL, BYTE PTR [ECX]
90
__asm XCHG DL, BYTE PTR [ECX]
91
#elif defined(XT_ATOMIC_GNUC_X86)
94
asm volatile ("movb %1, %0" : "=r" (val) : "m" (*mptr) : "memory");
96
asm volatile ("xchgb %1,%0" : "=r" (val2) : "m" (*mptr), "0" (val) : "memory");
97
/* Should work, but compiler makes a mistake?
98
* asm volatile ("xchgb %1, %0" : : "r" (val), "m" (*mptr) : "memory");
100
#elif defined(XT_ATOMIC_SOLARIS_LIB)
101
val = atomic_dec_8_nv(mptr);
104
xt_log_atomic_error_and_abort(__FUNC__, __FILE__, __LINE__);
109
inline void xt_atomic_inc2(volatile xtWord2 *mptr)
111
#ifdef XT_ATOMIC_WIN32_X86
113
__asm LOCK INC WORD PTR [ECX]
114
#elif defined(XT_ATOMIC_GNUC_X86)
115
asm volatile ("lock; incw %0" : : "m" (*mptr) : "memory");
116
#elif defined(XT_ATOMIC_GCC_OPS)
117
__sync_fetch_and_add(mptr, 1);
118
#elif defined(XT_ATOMIC_SOLARIS_LIB)
119
atomic_inc_16_nv(mptr);
122
xt_log_atomic_error_and_abort(__FUNC__, __FILE__, __LINE__);
126
inline void xt_atomic_dec2(volatile xtWord2 *mptr)
128
#ifdef XT_ATOMIC_WIN32_X86
130
__asm LOCK DEC WORD PTR [ECX]
131
#elif defined(XT_ATOMIC_GNUC_X86)
132
asm volatile ("lock; decw %0" : : "m" (*mptr) : "memory");
133
#elif defined(XT_ATOMIC_GCC_OPS)
134
__sync_fetch_and_sub(mptr, 1);
135
#elif defined(XT_ATOMIC_SOLARIS_LIB)
136
atomic_dec_16_nv(mptr);
139
xt_log_atomic_error_and_abort(__FUNC__, __FILE__, __LINE__);
143
/* Atomic test and set 2 byte word! */
144
inline xtWord2 xt_atomic_tas2(volatile xtWord2 *mptr, xtWord2 val)
146
#ifdef XT_ATOMIC_WIN32_X86
149
__asm XCHG DX, WORD PTR [ECX]
151
#elif defined(XT_ATOMIC_GNUC_X86)
152
asm volatile ("xchgw %1,%0" : "=r" (val) : "m" (*mptr), "0" (val) : "memory");
153
#elif defined(XT_ATOMIC_SOLARIS_LIB)
154
val = atomic_swap_16(mptr, val);
161
xt_log_atomic_error_and_abort(__FUNC__, __FILE__, __LINE__);
166
inline void xt_atomic_set4(volatile xtWord4 *mptr, xtWord4 val)
168
#ifdef XT_ATOMIC_WIN32_X86
171
__asm XCHG EDX, DWORD PTR [ECX]
172
//__asm MOV DWORD PTR [ECX], EDX
173
#elif defined(XT_ATOMIC_GNUC_X86)
174
asm volatile ("xchgl %1,%0" : "=r" (val) : "m" (*mptr), "0" (val) : "memory");
175
//asm volatile ("movl %0,%1" : "=r" (val) : "m" (*mptr) : "memory");
176
#elif defined(XT_ATOMIC_SOLARIS_LIB)
177
atomic_swap_32(mptr, val);
180
xt_log_atomic_error_and_abort(__FUNC__, __FILE__, __LINE__);
184
inline xtWord4 xt_atomic_tas4(volatile xtWord4 *mptr, xtWord4 val)
186
#ifdef XT_ATOMIC_WIN32_X86
189
__asm XCHG EDX, DWORD PTR [ECX]
191
#elif defined(XT_ATOMIC_GNUC_X86)
193
asm volatile ("xchgl %1,%0" : "=r" (val) : "m" (*mptr), "0" (val) : "memory");
194
#elif defined(XT_ATOMIC_SOLARIS_LIB)
195
val = atomic_swap_32(mptr, val);
198
xt_log_atomic_error_and_abort(__FUNC__, __FILE__, __LINE__);
204
* -----------------------------------------------------------------------
205
* DIFFERENT TYPES OF LOCKS
208
typedef struct XTSpinLock {
209
volatile xtWord4 spl_lock;
211
xt_mutex_type spl_mutex;
214
struct XTThread *spl_locker;
216
#ifdef XT_THREAD_LOCK_INFO
217
XTThreadLockInfoRec spl_lock_info;
218
const char *spl_name;
220
} XTSpinLockRec, *XTSpinLockPtr;
222
#ifdef XT_THREAD_LOCK_INFO
223
#define xt_spinlock_init_with_autoname(a,b) xt_spinlock_init(a,b,LOCKLIST_ARG_SUFFIX(b))
224
void xt_spinlock_init(struct XTThread *self, XTSpinLockPtr sp, const char *name);
226
#define xt_spinlock_init_with_autoname(a,b) xt_spinlock_init(a,b)
227
void xt_spinlock_init(struct XTThread *self, XTSpinLockPtr sp);
229
void xt_spinlock_free(struct XTThread *self, XTSpinLockPtr sp);
230
xtBool xt_spinlock_spin(XTSpinLockPtr spl);
232
void xt_spinlock_set_thread(XTSpinLockPtr spl);
235
/* Code for test and set is derived from code by Larry Zhou and
236
* Google: http://code.google.com/p/google-perftools
238
inline xtWord4 xt_spinlock_set(XTSpinLockPtr spl)
241
volatile xtWord4 *lck;
243
lck = &spl->spl_lock;
244
#ifdef XT_ATOMIC_WIN32_X86
247
__asm XCHG EDX, DWORD PTR [ECX]
249
#elif defined(XT_ATOMIC_GNUC_X86)
251
asm volatile ("xchgl %1,%0" : "=r" (prv) : "m" (*lck), "0" (prv) : "memory");
252
#elif defined(XT_ATOMIC_SOLARIS_LIB)
253
prv = atomic_swap_32(lck, 1);
255
/* The default implementation just uses a mutex, and
257
xt_lock_mutex_ns(&spl->spl_mutex);
258
/* We have the lock */
264
xt_spinlock_set_thread(spl);
269
inline xtWord4 xt_spinlock_reset(XTSpinLockPtr spl)
272
volatile xtWord4 *lck;
275
spl->spl_locker = NULL;
277
lck = &spl->spl_lock;
278
#ifdef XT_ATOMIC_WIN32_X86
281
__asm XCHG EDX, DWORD PTR [ECX]
283
#elif defined(XT_ATOMIC_GNUC_X86)
285
asm volatile ("xchgl %1,%0" : "=r" (prv) : "m" (*lck), "0" (prv) : "memory");
286
#elif defined(XT_ATOMIC_SOLARIS_LIB)
287
prv = atomic_swap_32(lck, 0);
290
xt_unlock_mutex_ns(&spl->spl_mutex);
297
* Return FALSE, and register an error on failure.
299
inline xtBool xt_spinlock_lock(XTSpinLockPtr spl)
301
if (!xt_spinlock_set(spl)) {
302
#ifdef XT_THREAD_LOCK_INFO
303
xt_thread_lock_info_add_owner(&spl->spl_lock_info);
307
#ifdef XT_THREAD_LOCK_INFO
308
xtBool spin_result = xt_spinlock_spin(spl);
310
xt_thread_lock_info_add_owner(&spl->spl_lock_info);
313
return xt_spinlock_spin(spl);
317
inline void xt_spinlock_unlock(XTSpinLockPtr spl)
319
xt_spinlock_reset(spl);
320
#ifdef XT_THREAD_LOCK_INFO
321
xt_thread_lock_info_release_owner(&spl->spl_lock_info);
325
#define XT_SXS_SLOCK_COUNT 2
327
typedef struct XTSpinXSLock {
328
volatile xtWord2 sxs_xlocked;
329
volatile xtWord2 sxs_rlock_count;
330
volatile xtWord2 sxs_wait_count; /* The number of readers waiting for the xlocker. */
332
xtThreadID sxs_locker;
334
#ifdef XT_THREAD_LOCK_INFO
335
XTThreadLockInfoRec sxs_lock_info;
336
const char *sxs_name;
338
} XTSpinXSLockRec, *XTSpinXSLockPtr;
340
#ifdef XT_THREAD_LOCK_INFO
341
#define xt_spinxslock_init_with_autoname(a,b) xt_spinxslock_init(a,b,LOCKLIST_ARG_SUFFIX(b))
342
void xt_spinxslock_init(struct XTThread *self, XTSpinXSLockPtr sxs, const char *name);
344
#define xt_spinxslock_init_with_autoname(a,b) xt_spinxslock_init(a,b)
345
void xt_spinxslock_init(struct XTThread *self, XTSpinXSLockPtr sxs);
347
void xt_spinxslock_free(struct XTThread *self, XTSpinXSLockPtr sxs);
348
xtBool xt_spinxslock_xlock(XTSpinXSLockPtr sxs, xtBool try_lock, xtThreadID thd_id);
349
xtBool xt_spinxslock_slock(XTSpinXSLockPtr sxs);
350
xtBool xt_spinxslock_unlock(XTSpinXSLockPtr sxs, xtBool xlocked);
352
typedef struct XTMutexXSLock {
353
xt_mutex_type xsm_lock;
354
xt_cond_type xsm_xcond;
355
xt_cond_type xsm_rcond;
356
volatile xtThreadID xsm_xlocker;
357
volatile xtWord2 xsm_xwait_count;
358
volatile xtWord2 xsm_rlock_count;
359
volatile xtWord2 xsm_rwait_count; /* The number of readers waiting for the xlocker. */
361
xtThreadID xsm_locker;
363
#ifdef XT_THREAD_LOCK_INFO
364
XTThreadLockInfoRec xsm_lock_info;
365
const char *xsm_name;
367
} XTMutexXSLockRec, *XTMutexXSLockPtr;
369
#ifdef XT_THREAD_LOCK_INFO
370
#define xt_xsmutex_init_with_autoname(a,b) xt_xsmutex_init(a,b,LOCKLIST_ARG_SUFFIX(b))
371
void xt_xsmutex_init(struct XTThread *self, XTMutexXSLockPtr xsm, const char *name);
373
#define xt_xsmutex_init_with_autoname(a,b) xt_xsmutex_init(a,b)
374
void xt_xsmutex_init(struct XTThread *self, XTMutexXSLockPtr xsm);
377
void xt_xsmutex_free(struct XTThread *self, XTMutexXSLockPtr xsm);
378
xtBool xt_xsmutex_xlock(XTMutexXSLockPtr xsm, xtThreadID thd_id);
379
xtBool xt_xsmutex_slock(XTMutexXSLockPtr xsm, xtThreadID thd_id);
380
xtBool xt_xsmutex_unlock(XTMutexXSLockPtr xsm, xtThreadID thd_id);
382
void xt_unit_test_read_write_locks(struct XTThread *self);
383
void xt_unit_test_mutex_locks(struct XTThread *self);
384
void xt_unit_test_create_threads(struct XTThread *self);
387
* -----------------------------------------------------------------------
394
* These are perminent row locks. They are set on rows for 2 reasons:
396
* 1. To lock a row that is being updated. The row is locked
397
* when it is read, until the point that it is updated. If the row
398
* is not updated, the lock is removed.
399
* This prevents an update coming between which will cause an error
400
* on the first thread.
402
* 2. The locks are used to implement SELECT FOR UPDATE.
406
* A lock that is set in order to perform an update is a temporary lock.
407
* This lock will be removed once the update of the record is done.
408
* The objective is to prevent some other thread from changine the
409
* record between the time the record is read and updated. This is to
410
* prevent unncessary "Record was updated" errors.
412
* A permanent lock is set by a SELECT FOR UPDATE. These locks are
413
* held until the end of the transaction.
415
* However, a SELECT FOR UPDATE will pop its lock stack before
416
* waiting for a transaction that has updated a record.
417
* This is to prevent the deadlock that can occur because a
418
* SELECT FOR UPDATE locks groups of records (I mean in general the
419
* locks used are group locks).
421
* This means a SELECT FOR UPDATE can get ahead of an UPDATE as far as
422
* locking is concerned. Example:
424
* Record 1,2 and 3 are in group A.
426
* T1: UPDATES record 2.
427
* T2: SELECT FOR UPDATE record 1, which locks group A.
428
* T2: SELECT FOR UPDATE record 2, which must wait for T1.
429
* T1: UPDATES record 3, which musts wait because of group lock A.
431
* To avoid deadlock, T2 releases its group lock A before waiting for
432
* record 2. It then regains the lock after waiting for record 2.
434
* (NOTE: Locks are no longer released. Please check this comment:
435
* {RELEASING-LOCKS} in lock_xt.cc. )
437
* However, release group A lock mean first releasing all locks gained
438
* after group a lock.
440
* For example: a thread locks groups: A, B and C. To release group B
441
* lock the thread must release C as well. Afterwards, it must gain
442
* B and C again, in that order. This is to ensure that the lock
443
* order is NOT changed!
446
#define XT_LOCK_ERR -1
448
#define XT_TEMP_LOCK 1 /* A temporary lock */
449
#define XT_PERM_LOCK 2 /* A permanent lock */
451
typedef struct XTRowLockList : public XTBasicList {
452
void xt_remove_all_locks(struct XTDatabase *db, struct XTThread *thread);
453
} XTRowLockListRec, *XTRowLockListPtr;
455
#define XT_USE_LIST_BASED_ROW_LOCKS
457
#ifdef XT_USE_LIST_BASED_ROW_LOCKS
459
* This method stores each lock, and avoids conflicts.
460
* But it is a bit more expensive in time.
464
#define XT_TEMP_LOCK_BYTES 10
465
#define XT_ROW_LOCK_GROUP_COUNT 5
467
#define XT_TEMP_LOCK_BYTES 0xFFFF
468
#define XT_ROW_LOCK_GROUP_COUNT 23
471
typedef struct XTLockWait {
472
/* Information about the lock to be aquired: */
473
struct XTThread *lw_thread;
474
struct XTOpenTable *lw_ot;
477
/* This is the lock currently held, and the transaction ID: */
481
/* This is information about the updating transaction: */
482
xtBool lw_row_updated;
483
xtXactID lw_updating_xn_id;
485
/* Pointers for the lock list: */
486
struct XTLockWait *lw_next;
487
struct XTLockWait *lw_prev;
488
} XTLockWaitRec, *XTLockWaitPtr;
490
typedef struct XTLockItem {
491
xtRowID li_row_id; /* The row list is sorted in this value. */
492
xtWord2 li_count; /* The number of consecutive rows locked. FFFF means a temporary lock. */
493
xtWord2 li_thread_id; /* The thread that holds this lock. */
494
} XTLockItemRec, *XTLockItemPtr;
496
typedef struct XTLockGroup {
497
XTSpinLockRec lg_lock; /* A lock for the list. */
498
XTLockWaitPtr lg_wait_queue; /* A queue of threads waiting for a lock in this group. */
499
XTLockWaitPtr lg_wait_queue_end; /* The end of the thread queue. */
500
size_t lg_list_size; /* The size of the list. */
501
size_t lg_list_in_use; /* Number of slots on the list in use. */
502
XTLockItemPtr lg_list; /* List of locks. */
503
} XTLockGroupRec, *XTLockGroupPtr;
507
typedef struct XTRowLocks {
508
XTLockGroupRec rl_groups[XT_ROW_LOCK_GROUP_COUNT];
510
void xt_cancel_temp_lock(XTLockWaitPtr lw);
511
xtBool xt_set_temp_lock(struct XTOpenTable *ot, XTLockWaitPtr lw, XTRowLockListPtr lock_list);
512
void xt_remove_temp_lock(struct XTOpenTable *ot, xtBool updated);
513
xtBool xt_make_lock_permanent(struct XTOpenTable *ot, XTRowLockListPtr lock_list);
515
xtBool rl_lock_row(XTLockGroupPtr group, XTLockWaitPtr lw, XTRowLockListPtr lock_list, int *result, struct XTThread *thread);
516
void rl_grant_locks(XTLockGroupPtr group, struct XTThread *thread);
517
#ifdef DEBUG_LOCK_QUEUE
518
void rl_check(XTLockWaitPtr lw);
520
} XTRowLocksRec, *XTRowLocksPtr;
522
#define XT_USE_TABLE_REF
524
typedef struct XTPermRowLock {
525
#ifdef XT_USE_TABLE_REF
526
struct XTTable *pr_table;
530
xtWord1 pr_group[XT_ROW_LOCK_GROUP_COUNT];
531
} XTPermRowLockRec, *XTPermRowLockPtr;
533
#else // XT_ROW_LOCK_GROUP_COUNT
535
/* Hash based row locking. This method allows conflics, even
536
* when there is none.
538
typedef struct XTRowLocks {
539
xtWord1 tab_lock_perm[XT_ROW_LOCK_COUNT]; /* Byte set to 1 for permanent locks. */
540
struct XTXactData *tab_row_locks[XT_ROW_LOCK_COUNT]; /* The transactions that have locked the specific rows. */
542
int xt_set_temp_lock(struct XTOpenTable *ot, xtRowID row, xtXactID *xn_id, XTRowLockListPtr lock_list);
543
void xt_remove_temp_lock(struct XTOpenTable *ot);
544
xtBool xt_make_lock_permanent(struct XTOpenTable *ot, XTRowLockListPtr lock_list);
545
int xt_is_locked(struct XTOpenTable *ot, xtRowID row, xtXactID *xn_id);
546
} XTRowLocksRec, *XTRowLocksPtr;
548
typedef struct XTPermRowLock {
551
} XTPermRowLockRec, *XTPermRowLockPtr;
553
#endif // XT_ROW_LOCK_GROUP_COUNT
555
xtBool xt_init_row_locks(XTRowLocksPtr rl);
556
void xt_exit_row_locks(XTRowLocksPtr rl);
558
xtBool xt_init_row_lock_list(XTRowLockListPtr rl);
559
void xt_exit_row_lock_list(XTRowLockListPtr rl);
562
#define XT_WANT_LOCK 1
563
#define XT_HAVE_LOCK 2