175
by paul-mccullagh
Implemented full durability, and SELECT FOR UPDATE |
1 |
/* Copyright (c) 2005 PrimeBase Technologies GmbH
|
2 |
*
|
|
3 |
* PrimeBase XT
|
|
4 |
*
|
|
5 |
* This program is free software; you can redistribute it and/or modify
|
|
6 |
* it under the terms of the GNU General Public License as published by
|
|
7 |
* the Free Software Foundation; either version 2 of the License, or
|
|
8 |
* (at your option) any later version.
|
|
9 |
*
|
|
10 |
* This program is distributed in the hope that it will be useful,
|
|
11 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
12 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
13 |
* GNU General Public License for more details.
|
|
14 |
*
|
|
15 |
* You should have received a copy of the GNU General Public License
|
|
16 |
* along with this program; if not, write to the Free Software
|
|
17 |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
18 |
*
|
|
19 |
* 2008-01-24 Paul McCullagh
|
|
20 |
*
|
|
21 |
* Row lock functions.
|
|
22 |
*
|
|
23 |
* H&G2JCtL
|
|
24 |
*/
|
|
25 |
#ifndef __xt_lock_h__
|
|
26 |
#define __xt_lock_h__
|
|
27 |
||
28 |
#include "xt_defs.h" |
|
29 |
#include "util_xt.h" |
|
528.2.1
by Vladimir Kolesnikov
added thread lock lists |
30 |
#include "locklist_xt.h" |
31 |
#include "pthread_xt.h" |
|
175
by paul-mccullagh
Implemented full durability, and SELECT FOR UPDATE |
32 |
|
33 |
struct XTThread; |
|
34 |
struct XTDatabase; |
|
35 |
struct XTOpenTable; |
|
36 |
struct XTXactData; |
|
597
by Paul McCullagh
Use a reference to the table instead of a table ID, this saves a lot |
37 |
struct XTTable; |
175
by paul-mccullagh
Implemented full durability, and SELECT FOR UPDATE |
38 |
|
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
39 |
#ifdef XT_ATOMIC_SOLARIS_LIB
|
524.1.1
by Vladimir Kolesnikov
use sun atomic ops library on solaris |
40 |
#include <atomic.h> |
41 |
#endif
|
|
42 |
||
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
43 |
void xt_log_atomic_error_and_abort(c_char *func, c_char *file, u_int line); |
44 |
||
630
by Paul McCullagh
Added a new type of spin R/W mutex lock |
45 |
/*
|
46 |
* -----------------------------------------------------------------------
|
|
47 |
* ATOMIC OPERATIONS
|
|
48 |
*/
|
|
255
by paul-mccullagh
Implemented spinlocks |
49 |
|
50 |
/*
|
|
51 |
* This macro is to remind me where it was safe
|
|
52 |
* to use a read lock!
|
|
53 |
*/
|
|
258
by paul-mccullagh
Implemented many types of locks |
54 |
#define xt_lck_slock xt_spinlock_lock
|
255
by paul-mccullagh
Implemented spinlocks |
55 |
|
546
by Paul McCullagh
Hang on 8 core may be due to this value not being written through to memory |
56 |
/* I call these operations flushed because the result
|
57 |
* is written atomically.
|
|
58 |
* But the operations themselves are not atomic!
|
|
59 |
*/
|
|
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
60 |
inline void xt_atomic_inc1(volatile xtWord1 *mptr) |
546
by Paul McCullagh
Hang on 8 core may be due to this value not being written through to memory |
61 |
{
|
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
62 |
#ifdef XT_ATOMIC_WIN32_X86
|
546
by Paul McCullagh
Hang on 8 core may be due to this value not being written through to memory |
63 |
__asm MOV ECX, mptr |
64 |
__asm MOV DL, BYTE PTR [ECX] |
|
65 |
__asm INC DL |
|
66 |
__asm XCHG DL, BYTE PTR [ECX] |
|
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
67 |
#elif defined(XT_ATOMIC_GNUC_X86)
|
546
by Paul McCullagh
Hang on 8 core may be due to this value not being written through to memory |
68 |
xtWord1 val; |
69 |
||
70 |
asm volatile ("movb %1,%0" : "=r" (val) : "m" (*mptr) : "memory"); |
|
71 |
val++; |
|
72 |
asm volatile ("xchgb %1,%0" : "=r" (val) : "m" (*mptr), "0" (val) : "memory"); |
|
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
73 |
#elif defined(XT_ATOMIC_SOLARIS_LIB)
|
546
by Paul McCullagh
Hang on 8 core may be due to this value not being written through to memory |
74 |
atomic_inc_8(mptr); |
75 |
#else
|
|
76 |
*mptr++; |
|
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
77 |
xt_log_atomic_error_and_abort(__FUNC__, __FILE__, __LINE__); |
546
by Paul McCullagh
Hang on 8 core may be due to this value not being written through to memory |
78 |
#endif
|
79 |
}
|
|
80 |
||
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
81 |
inline xtWord1 xt_atomic_dec1(volatile xtWord1 *mptr) |
546
by Paul McCullagh
Hang on 8 core may be due to this value not being written through to memory |
82 |
{
|
553
by Paul McCullagh
Implemented concurrent updating of indexes and remover the memcpy's |
83 |
xtWord1 val; |
547
by Paul McCullagh
Operations now atomic, and return correct value |
84 |
|
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
85 |
#ifdef XT_ATOMIC_WIN32_X86
|
546
by Paul McCullagh
Hang on 8 core may be due to this value not being written through to memory |
86 |
__asm MOV ECX, mptr |
87 |
__asm MOV DL, BYTE PTR [ECX] |
|
553
by Paul McCullagh
Implemented concurrent updating of indexes and remover the memcpy's |
88 |
__asm DEC DL |
89 |
__asm MOV val, DL |
|
90 |
__asm XCHG DL, BYTE PTR [ECX] |
|
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
91 |
#elif defined(XT_ATOMIC_GNUC_X86)
|
556
by Paul McCullagh
Optimiser did not seem to handle this correctly |
92 |
xtWord1 val2; |
93 |
||
553
by Paul McCullagh
Implemented concurrent updating of indexes and remover the memcpy's |
94 |
asm volatile ("movb %1, %0" : "=r" (val) : "m" (*mptr) : "memory"); |
95 |
val--; |
|
556
by Paul McCullagh
Optimiser did not seem to handle this correctly |
96 |
asm volatile ("xchgb %1,%0" : "=r" (val2) : "m" (*mptr), "0" (val) : "memory"); |
97 |
/* Should work, but compiler makes a mistake?
|
|
98 |
* asm volatile ("xchgb %1, %0" : : "r" (val), "m" (*mptr) : "memory");
|
|
99 |
*/
|
|
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
100 |
#elif defined(XT_ATOMIC_SOLARIS_LIB)
|
553
by Paul McCullagh
Implemented concurrent updating of indexes and remover the memcpy's |
101 |
val = atomic_dec_8_nv(mptr); |
102 |
#else
|
|
103 |
val = --(*mptr); |
|
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
104 |
xt_log_atomic_error_and_abort(__FUNC__, __FILE__, __LINE__); |
553
by Paul McCullagh
Implemented concurrent updating of indexes and remover the memcpy's |
105 |
#endif
|
106 |
return val; |
|
107 |
}
|
|
108 |
||
109 |
inline void xt_atomic_inc2(volatile xtWord2 *mptr) |
|
110 |
{
|
|
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
111 |
#ifdef XT_ATOMIC_WIN32_X86
|
792
by Paul McCullagh
Fixed Windows atomic INC/DEC operations, which lead to atomic R/W lock not working correctly |
112 |
__asm MOV ECX, mptr |
113 |
__asm LOCK INC WORD PTR [ECX] |
|
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
114 |
#elif defined(XT_ATOMIC_GNUC_X86)
|
115 |
asm volatile ("lock; incw %0" : : "m" (*mptr) : "memory"); |
|
116 |
#elif defined(XT_ATOMIC_GCC_OPS)
|
|
578.1.1
by Vladimir Kolesnikov
changed rwlock assembly for linux, added gcc built-in atomics as default for gcc on non x86 |
117 |
__sync_fetch_and_add(mptr, 1); |
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
118 |
#elif defined(XT_ATOMIC_SOLARIS_LIB)
|
553
by Paul McCullagh
Implemented concurrent updating of indexes and remover the memcpy's |
119 |
atomic_inc_16_nv(mptr); |
120 |
#else
|
|
121 |
(*mptr)++; |
|
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
122 |
xt_log_atomic_error_and_abort(__FUNC__, __FILE__, __LINE__); |
553
by Paul McCullagh
Implemented concurrent updating of indexes and remover the memcpy's |
123 |
#endif
|
124 |
}
|
|
125 |
||
126 |
inline void xt_atomic_dec2(volatile xtWord2 *mptr) |
|
127 |
{
|
|
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
128 |
#ifdef XT_ATOMIC_WIN32_X86
|
792
by Paul McCullagh
Fixed Windows atomic INC/DEC operations, which lead to atomic R/W lock not working correctly |
129 |
__asm MOV ECX, mptr |
130 |
__asm LOCK DEC WORD PTR [ECX] |
|
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
131 |
#elif defined(XT_ATOMIC_GNUC_X86)
|
573.1.1
by Vladimir Kolesnikov
a fix to atomic assembly code for gcc |
132 |
asm volatile ("lock; decw %0" : : "m" (*mptr) : "memory"); |
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
133 |
#elif defined(XT_ATOMIC_GCC_OPS)
|
578.1.1
by Vladimir Kolesnikov
changed rwlock assembly for linux, added gcc built-in atomics as default for gcc on non x86 |
134 |
__sync_fetch_and_sub(mptr, 1); |
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
135 |
#elif defined(XT_ATOMIC_SOLARIS_LIB)
|
639
by Paul McCullagh
Removed __attribute__((unused)) |
136 |
atomic_dec_16_nv(mptr); |
546
by Paul McCullagh
Hang on 8 core may be due to this value not being written through to memory |
137 |
#else
|
639
by Paul McCullagh
Removed __attribute__((unused)) |
138 |
--(*mptr); |
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
139 |
xt_log_atomic_error_and_abort(__FUNC__, __FILE__, __LINE__); |
546
by Paul McCullagh
Hang on 8 core may be due to this value not being written through to memory |
140 |
#endif
|
553
by Paul McCullagh
Implemented concurrent updating of indexes and remover the memcpy's |
141 |
}
|
142 |
||
143 |
/* Atomic test and set 2 byte word! */
|
|
144 |
inline xtWord2 xt_atomic_tas2(volatile xtWord2 *mptr, xtWord2 val) |
|
145 |
{
|
|
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
146 |
#ifdef XT_ATOMIC_WIN32_X86
|
553
by Paul McCullagh
Implemented concurrent updating of indexes and remover the memcpy's |
147 |
__asm MOV ECX, mptr |
148 |
__asm MOV DX, val |
|
149 |
__asm XCHG DX, WORD PTR [ECX] |
|
150 |
__asm MOV val, DX |
|
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
151 |
#elif defined(XT_ATOMIC_GNUC_X86)
|
553
by Paul McCullagh
Implemented concurrent updating of indexes and remover the memcpy's |
152 |
asm volatile ("xchgw %1,%0" : "=r" (val) : "m" (*mptr), "0" (val) : "memory"); |
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
153 |
#elif defined(XT_ATOMIC_SOLARIS_LIB)
|
553
by Paul McCullagh
Implemented concurrent updating of indexes and remover the memcpy's |
154 |
val = atomic_swap_16(mptr, val); |
155 |
#else
|
|
156 |
/* Yikes! */
|
|
157 |
xtWord2 nval = val; |
|
158 |
||
159 |
val = *mptr; |
|
160 |
*mptr = nval; |
|
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
161 |
xt_log_atomic_error_and_abort(__FUNC__, __FILE__, __LINE__); |
553
by Paul McCullagh
Implemented concurrent updating of indexes and remover the memcpy's |
162 |
#endif
|
163 |
return val; |
|
546
by Paul McCullagh
Hang on 8 core may be due to this value not being written through to memory |
164 |
}
|
165 |
||
313
by paul-mccullagh
Corrected Visual Studio assembler |
166 |
inline void xt_atomic_set4(volatile xtWord4 *mptr, xtWord4 val) |
294
by paul-mccullagh
xs_state is VERY volatile, on a multi-core system we must make sure memory is written, not the processor cache |
167 |
{
|
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
168 |
#ifdef XT_ATOMIC_WIN32_X86
|
313
by paul-mccullagh
Corrected Visual Studio assembler |
169 |
__asm MOV ECX, mptr |
294
by paul-mccullagh
xs_state is VERY volatile, on a multi-core system we must make sure memory is written, not the processor cache |
170 |
__asm MOV EDX, val |
171 |
__asm XCHG EDX, DWORD PTR [ECX] |
|
295
by paul-mccullagh
Testing without the automic set |
172 |
//__asm MOV DWORD PTR [ECX], EDX
|
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
173 |
#elif defined(XT_ATOMIC_GNUC_X86)
|
313
by paul-mccullagh
Corrected Visual Studio assembler |
174 |
asm volatile ("xchgl %1,%0" : "=r" (val) : "m" (*mptr), "0" (val) : "memory"); |
175 |
//asm volatile ("movl %0,%1" : "=r" (val) : "m" (*mptr) : "memory");
|
|
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
176 |
#elif defined(XT_ATOMIC_SOLARIS_LIB)
|
524.1.1
by Vladimir Kolesnikov
use sun atomic ops library on solaris |
177 |
atomic_swap_32(mptr, val); |
294
by paul-mccullagh
xs_state is VERY volatile, on a multi-core system we must make sure memory is written, not the processor cache |
178 |
#else
|
313
by paul-mccullagh
Corrected Visual Studio assembler |
179 |
*mptr = val; |
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
180 |
xt_log_atomic_error_and_abort(__FUNC__, __FILE__, __LINE__); |
181 |
#endif
|
|
294
by paul-mccullagh
xs_state is VERY volatile, on a multi-core system we must make sure memory is written, not the processor cache |
182 |
}
|
183 |
||
629
by Paul McCullagh
Added an options to spin while waiting for I/O on the transaction log |
184 |
inline xtWord4 xt_atomic_tas4(volatile xtWord4 *mptr, xtWord4 val) |
185 |
{
|
|
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
186 |
#ifdef XT_ATOMIC_WIN32_X86
|
629
by Paul McCullagh
Added an options to spin while waiting for I/O on the transaction log |
187 |
__asm MOV ECX, mptr |
188 |
__asm MOV EDX, val |
|
189 |
__asm XCHG EDX, DWORD PTR [ECX] |
|
190 |
__asm MOV val, EDX |
|
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
191 |
#elif defined(XT_ATOMIC_GNUC_X86)
|
629
by Paul McCullagh
Added an options to spin while waiting for I/O on the transaction log |
192 |
val = val; |
193 |
asm volatile ("xchgl %1,%0" : "=r" (val) : "m" (*mptr), "0" (val) : "memory"); |
|
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
194 |
#elif defined(XT_ATOMIC_SOLARIS_LIB)
|
629
by Paul McCullagh
Added an options to spin while waiting for I/O on the transaction log |
195 |
val = atomic_swap_32(mptr, val); |
196 |
#else
|
|
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
197 |
*mptr = val; |
198 |
xt_log_atomic_error_and_abort(__FUNC__, __FILE__, __LINE__); |
|
629
by Paul McCullagh
Added an options to spin while waiting for I/O on the transaction log |
199 |
#endif
|
200 |
return val; |
|
201 |
}
|
|
202 |
||
630
by Paul McCullagh
Added a new type of spin R/W mutex lock |
203 |
/*
|
204 |
* -----------------------------------------------------------------------
|
|
205 |
* DIFFERENT TYPES OF LOCKS
|
|
206 |
*/
|
|
207 |
||
208 |
typedef struct XTSpinLock { |
|
209 |
volatile xtWord4 spl_lock; |
|
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
210 |
#ifdef XT_NO_ATOMICS
|
630
by Paul McCullagh
Added a new type of spin R/W mutex lock |
211 |
xt_mutex_type spl_mutex; |
212 |
#endif
|
|
213 |
#ifdef DEBUG
|
|
214 |
struct XTThread *spl_locker; |
|
215 |
#endif
|
|
216 |
#ifdef XT_THREAD_LOCK_INFO
|
|
217 |
XTThreadLockInfoRec spl_lock_info; |
|
218 |
const char *spl_name; |
|
219 |
#endif
|
|
220 |
} XTSpinLockRec, *XTSpinLockPtr; |
|
221 |
||
222 |
#ifdef XT_THREAD_LOCK_INFO
|
|
223 |
#define xt_spinlock_init_with_autoname(a,b) xt_spinlock_init(a,b,LOCKLIST_ARG_SUFFIX(b))
|
|
224 |
void xt_spinlock_init(struct XTThread *self, XTSpinLockPtr sp, const char *name); |
|
225 |
#else
|
|
226 |
#define xt_spinlock_init_with_autoname(a,b) xt_spinlock_init(a,b)
|
|
227 |
void xt_spinlock_init(struct XTThread *self, XTSpinLockPtr sp); |
|
228 |
#endif
|
|
229 |
void xt_spinlock_free(struct XTThread *self, XTSpinLockPtr sp); |
|
230 |
xtBool xt_spinlock_spin(XTSpinLockPtr spl); |
|
231 |
#ifdef DEBUG
|
|
232 |
void xt_spinlock_set_thread(XTSpinLockPtr spl); |
|
233 |
#endif
|
|
234 |
||
255
by paul-mccullagh
Implemented spinlocks |
235 |
/* Code for test and set is derived from code by Larry Zhou and
|
236 |
* Google: http://code.google.com/p/google-perftools
|
|
237 |
*/
|
|
238 |
inline xtWord4 xt_spinlock_set(XTSpinLockPtr spl) |
|
239 |
{
|
|
258
by paul-mccullagh
Implemented many types of locks |
240 |
xtWord4 prv; |
241 |
volatile xtWord4 *lck; |
|
255
by paul-mccullagh
Implemented spinlocks |
242 |
|
243 |
lck = &spl->spl_lock; |
|
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
244 |
#ifdef XT_ATOMIC_WIN32_X86
|
255
by paul-mccullagh
Implemented spinlocks |
245 |
__asm MOV ECX, lck |
246 |
__asm MOV EDX, 1 |
|
247 |
__asm XCHG EDX, DWORD PTR [ECX] |
|
248 |
__asm MOV prv, EDX |
|
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
249 |
#elif defined(XT_ATOMIC_GNUC_X86)
|
255
by paul-mccullagh
Implemented spinlocks |
250 |
prv = 1; |
251 |
asm volatile ("xchgl %1,%0" : "=r" (prv) : "m" (*lck), "0" (prv) : "memory"); |
|
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
252 |
#elif defined(XT_ATOMIC_SOLARIS_LIB)
|
524.1.1
by Vladimir Kolesnikov
use sun atomic ops library on solaris |
253 |
prv = atomic_swap_32(lck, 1); |
255
by paul-mccullagh
Implemented spinlocks |
254 |
#else
|
277
by paul-mccullagh
Commented out by mistake, thank Kay! |
255 |
/* The default implementation just uses a mutex, and
|
256 |
* does not spin! */
|
|
255
by paul-mccullagh
Implemented spinlocks |
257 |
xt_lock_mutex_ns(&spl->spl_mutex); |
258 |
/* We have the lock */
|
|
259 |
*lck = 1; |
|
260 |
prv = 0; |
|
261 |
#endif
|
|
262 |
#ifdef DEBUG
|
|
560
by Paul McCullagh
Fixed a race condition in the RW Mutex lock |
263 |
if (!prv) |
264 |
xt_spinlock_set_thread(spl); |
|
255
by paul-mccullagh
Implemented spinlocks |
265 |
#endif
|
266 |
return prv; |
|
267 |
}
|
|
268 |
||
269 |
inline xtWord4 xt_spinlock_reset(XTSpinLockPtr spl) |
|
270 |
{
|
|
258
by paul-mccullagh
Implemented many types of locks |
271 |
xtWord4 prv; |
272 |
volatile xtWord4 *lck; |
|
255
by paul-mccullagh
Implemented spinlocks |
273 |
|
274 |
#ifdef DEBUG
|
|
275 |
spl->spl_locker = NULL; |
|
276 |
#endif
|
|
277 |
lck = &spl->spl_lock; |
|
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
278 |
#ifdef XT_ATOMIC_WIN32_X86
|
255
by paul-mccullagh
Implemented spinlocks |
279 |
__asm MOV ECX, lck |
280 |
__asm MOV EDX, 0 |
|
281 |
__asm XCHG EDX, DWORD PTR [ECX] |
|
282 |
__asm MOV prv, EDX |
|
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
283 |
#elif defined(XT_ATOMIC_GNUC_X86)
|
255
by paul-mccullagh
Implemented spinlocks |
284 |
prv = 0; |
285 |
asm volatile ("xchgl %1,%0" : "=r" (prv) : "m" (*lck), "0" (prv) : "memory"); |
|
642
by Paul McCullagh
Atomic operations are no longer required to compile an run |
286 |
#elif defined(XT_ATOMIC_SOLARIS_LIB)
|
524.1.1
by Vladimir Kolesnikov
use sun atomic ops library on solaris |
287 |
prv = atomic_swap_32(lck, 0); |
255
by paul-mccullagh
Implemented spinlocks |
288 |
#else
|
289 |
*lck = 0; |
|
290 |
xt_unlock_mutex_ns(&spl->spl_mutex); |
|
291 |
prv = 1; |
|
292 |
#endif
|
|
293 |
return prv; |
|
294 |
}
|
|
295 |
||
296 |
/*
|
|
297 |
* Return FALSE, and register an error on failure.
|
|
298 |
*/
|
|
299 |
inline xtBool xt_spinlock_lock(XTSpinLockPtr spl) |
|
300 |
{
|
|
528.2.1
by Vladimir Kolesnikov
added thread lock lists |
301 |
if (!xt_spinlock_set(spl)) { |
302 |
#ifdef XT_THREAD_LOCK_INFO
|
|
303 |
xt_thread_lock_info_add_owner(&spl->spl_lock_info); |
|
304 |
#endif
|
|
255
by paul-mccullagh
Implemented spinlocks |
305 |
return OK; |
528.2.1
by Vladimir Kolesnikov
added thread lock lists |
306 |
}
|
307 |
#ifdef XT_THREAD_LOCK_INFO
|
|
308 |
xtBool spin_result = xt_spinlock_spin(spl); |
|
309 |
if (spin_result) |
|
310 |
xt_thread_lock_info_add_owner(&spl->spl_lock_info); |
|
311 |
return spin_result; |
|
312 |
#else
|
|
255
by paul-mccullagh
Implemented spinlocks |
313 |
return xt_spinlock_spin(spl); |
528.2.1
by Vladimir Kolesnikov
added thread lock lists |
314 |
#endif
|
255
by paul-mccullagh
Implemented spinlocks |
315 |
}
|
316 |
||
317 |
inline void xt_spinlock_unlock(XTSpinLockPtr spl) |
|
318 |
{
|
|
319 |
xt_spinlock_reset(spl); |
|
528.2.1
by Vladimir Kolesnikov
added thread lock lists |
320 |
#ifdef XT_THREAD_LOCK_INFO
|
321 |
xt_thread_lock_info_release_owner(&spl->spl_lock_info); |
|
322 |
#endif
|
|
255
by paul-mccullagh
Implemented spinlocks |
323 |
}
|
324 |
||
630
by Paul McCullagh
Added a new type of spin R/W mutex lock |
325 |
/* Possibilities are 2 = align 4 or 2 = align 8 */
|
326 |
#define XT_XS_LOCK_SHIFT 2
|
|
327 |
#define XT_XS_LOCK_ALIGN (1 << XT_XS_LOCK_SHIFT)
|
|
328 |
||
329 |
/* This lock is fast for reads but slow for writes.
|
|
330 |
* Use this lock in situations where you have 99% reads,
|
|
331 |
* and then some potentially long writes.
|
|
332 |
*/
|
|
333 |
typedef struct XTRWMutex { |
|
334 |
#ifdef DEBUG
|
|
335 |
struct XTThread *xs_lock_thread; |
|
336 |
u_int xs_inited; |
|
337 |
#endif
|
|
338 |
#ifdef XT_THREAD_LOCK_INFO
|
|
339 |
XTThreadLockInfoRec xs_lock_info; |
|
340 |
const char *xs_name; |
|
341 |
#endif
|
|
342 |
xt_mutex_type xs_lock; |
|
343 |
xt_cond_type xs_cond; |
|
344 |
volatile xtWord4 xs_state; |
|
345 |
volatile xtThreadID xs_xlocker; |
|
346 |
union { |
|
347 |
#if XT_XS_LOCK_ALIGN == 4
|
|
348 |
volatile xtWord4 *xs_rlock_align; |
|
349 |
#else
|
|
350 |
volatile xtWord8 *xs_rlock_align; |
|
351 |
#endif
|
|
352 |
volatile xtWord1 *xs_rlock; |
|
353 |
} x; |
|
354 |
} XTRWMutexRec, *XTRWMutexPtr; |
|
355 |
||
356 |
#ifdef XT_THREAD_LOCK_INFO
|
|
357 |
#define xt_rwmutex_init_with_autoname(a,b) xt_rwmutex_init(a,b,LOCKLIST_ARG_SUFFIX(b))
|
|
358 |
void xt_rwmutex_init(struct XTThread *self, XTRWMutexPtr xsl, const char *name); |
|
359 |
#else
|
|
360 |
#define xt_rwmutex_init_with_autoname(a,b) xt_rwmutex_init(a,b)
|
|
361 |
void xt_rwmutex_init(struct XTThread *self, XTRWMutexPtr xsl); |
|
362 |
#endif
|
|
363 |
void xt_rwmutex_free(struct XTThread *self, XTRWMutexPtr xsl); |
|
364 |
xtBool xt_rwmutex_xlock(XTRWMutexPtr xsl, xtThreadID thd_id); |
|
365 |
xtBool xt_rwmutex_slock(XTRWMutexPtr xsl, xtThreadID thd_id); |
|
366 |
xtBool xt_rwmutex_unlock(XTRWMutexPtr xsl, xtThreadID thd_id); |
|
255
by paul-mccullagh
Implemented spinlocks |
367 |
|
258
by paul-mccullagh
Implemented many types of locks |
368 |
#define XT_FAST_LOCK_MAX_WAIT 100
|
369 |
||
370 |
typedef struct XTFastLock { |
|
371 |
XTSpinLockRec fal_spinlock; |
|
372 |
struct XTThread *fal_locker; |
|
373 |
||
374 |
XTSpinLockRec fal_wait_lock; |
|
375 |
u_int fal_wait_count; |
|
376 |
u_int fal_wait_wakeup; |
|
377 |
u_int fal_wait_alloc; |
|
378 |
struct XTThread *fal_wait_list[XT_FAST_LOCK_MAX_WAIT]; |
|
528.2.1
by Vladimir Kolesnikov
added thread lock lists |
379 |
#ifdef XT_THREAD_LOCK_INFO
|
380 |
XTThreadLockInfoRec fal_lock_info; |
|
381 |
const char *fal_name; |
|
382 |
#endif
|
|
258
by paul-mccullagh
Implemented many types of locks |
383 |
} XTFastLockRec, *XTFastLockPtr; |
384 |
||
528.2.1
by Vladimir Kolesnikov
added thread lock lists |
385 |
#ifdef XT_THREAD_LOCK_INFO
|
386 |
#define xt_fastlock_init_with_autoname(a,b) xt_fastlock_init(a,b,LOCKLIST_ARG_SUFFIX(b))
|
|
387 |
void xt_fastlock_init(struct XTThread *self, XTFastLockPtr spl, const char *name); |
|
388 |
#else
|
|
389 |
#define xt_fastlock_init_with_autoname(a,b) xt_fastlock_init(a,b)
|
|
258
by paul-mccullagh
Implemented many types of locks |
390 |
void xt_fastlock_init(struct XTThread *self, XTFastLockPtr spl); |
528.2.1
by Vladimir Kolesnikov
added thread lock lists |
391 |
#endif
|
258
by paul-mccullagh
Implemented many types of locks |
392 |
void xt_fastlock_free(struct XTThread *self, XTFastLockPtr spl); |
393 |
void xt_fastlock_wakeup(XTFastLockPtr spl); |
|
394 |
xtBool xt_fastlock_spin(XTFastLockPtr spl, struct XTThread *thread); |
|
395 |
||
396 |
inline xtBool xt_fastlock_lock(XTFastLockPtr fal, struct XTThread *thread) |
|
397 |
{
|
|
398 |
if (!xt_spinlock_set(&fal->fal_spinlock)) { |
|
399 |
fal->fal_locker = thread; |
|
528.2.1
by Vladimir Kolesnikov
added thread lock lists |
400 |
#ifdef XT_THREAD_LOCK_INFO
|
401 |
xt_thread_lock_info_add_owner(&fal->fal_lock_info); |
|
402 |
#endif
|
|
258
by paul-mccullagh
Implemented many types of locks |
403 |
return OK; |
404 |
}
|
|
528.2.1
by Vladimir Kolesnikov
added thread lock lists |
405 |
#ifdef XT_THREAD_LOCK_INFO
|
406 |
xtBool spin_result = xt_fastlock_spin(fal, thread); |
|
407 |
if (spin_result) |
|
408 |
xt_thread_lock_info_add_owner(&fal->fal_lock_info); |
|
409 |
return spin_result; |
|
410 |
#else
|
|
258
by paul-mccullagh
Implemented many types of locks |
411 |
return xt_fastlock_spin(fal, thread); |
528.2.1
by Vladimir Kolesnikov
added thread lock lists |
412 |
#endif
|
258
by paul-mccullagh
Implemented many types of locks |
413 |
}
|
414 |
||
639
by Paul McCullagh
Removed __attribute__((unused)) |
415 |
inline void xt_fastlock_unlock(XTFastLockPtr fal, struct XTThread *XT_UNUSED(thread)) |
258
by paul-mccullagh
Implemented many types of locks |
416 |
{
|
417 |
if (fal->fal_wait_count) |
|
418 |
xt_fastlock_wakeup(fal); |
|
419 |
else { |
|
420 |
fal->fal_locker = NULL; |
|
421 |
xt_spinlock_reset(&fal->fal_spinlock); |
|
422 |
}
|
|
528.2.1
by Vladimir Kolesnikov
added thread lock lists |
423 |
#ifdef XT_THREAD_LOCK_INFO
|
424 |
xt_thread_lock_info_release_owner(&fal->fal_lock_info); |
|
425 |
#endif
|
|
258
by paul-mccullagh
Implemented many types of locks |
426 |
}
|
427 |
||
631
by Paul McCullagh
Added a new type of atomic spin lock |
428 |
#define XT_SXS_SLOCK_COUNT 2
|
429 |
||
430 |
typedef struct XTSpinXSLock { |
|
431 |
volatile xtWord2 sxs_xlocked; |
|
810
by Paul McCullagh
Fixed a race condition in the R/W atomic lock |
432 |
volatile xtWord2 sxs_xwaiter; |
631
by Paul McCullagh
Added a new type of atomic spin lock |
433 |
volatile xtWord2 sxs_rlock_count; |
434 |
volatile xtWord2 sxs_wait_count; /* The number of readers waiting for the xlocker. */ |
|
435 |
#ifdef DEBUG
|
|
436 |
xtThreadID sxs_locker; |
|
437 |
#endif
|
|
438 |
#ifdef XT_THREAD_LOCK_INFO
|
|
439 |
XTThreadLockInfoRec sxs_lock_info; |
|
440 |
const char *sxs_name; |
|
441 |
#endif
|
|
442 |
} XTSpinXSLockRec, *XTSpinXSLockPtr; |
|
443 |
||
444 |
#ifdef XT_THREAD_LOCK_INFO
|
|
445 |
#define xt_spinxslock_init_with_autoname(a,b) xt_spinxslock_init(a,b,LOCKLIST_ARG_SUFFIX(b))
|
|
446 |
void xt_spinxslock_init(struct XTThread *self, XTSpinXSLockPtr sxs, const char *name); |
|
528.2.1
by Vladimir Kolesnikov
added thread lock lists |
447 |
#else
|
631
by Paul McCullagh
Added a new type of atomic spin lock |
448 |
#define xt_spinxslock_init_with_autoname(a,b) xt_spinxslock_init(a,b)
|
449 |
void xt_spinxslock_init(struct XTThread *self, XTSpinXSLockPtr sxs); |
|
528.2.1
by Vladimir Kolesnikov
added thread lock lists |
450 |
#endif
|
631
by Paul McCullagh
Added a new type of atomic spin lock |
451 |
void xt_spinxslock_free(struct XTThread *self, XTSpinXSLockPtr sxs); |
811
by Paul McCullagh
Fix RN1/3 and RN1/4 back-ported from 1.1: Deadlock in low index cache situations |
452 |
xtBool xt_spinxslock_xlock(XTSpinXSLockPtr sxs, xtBool try_lock, xtThreadID thd_id); |
631
by Paul McCullagh
Added a new type of atomic spin lock |
453 |
xtBool xt_spinxslock_slock(XTSpinXSLockPtr sxs); |
454 |
xtBool xt_spinxslock_unlock(XTSpinXSLockPtr sxs, xtBool xlocked); |
|
258
by paul-mccullagh
Implemented many types of locks |
455 |
|
633
by Paul McCullagh
implemented a new type of X/S mutex, that does not require a thread array |
456 |
typedef struct XTXSMutexLock { |
457 |
xt_mutex_type xsm_lock; |
|
458 |
xt_cond_type xsm_cond; |
|
459 |
xt_cond_type xsm_cond_2; |
|
460 |
volatile xtThreadID xsm_xlocker; |
|
461 |
volatile xtWord2 xsm_rlock_count; |
|
462 |
volatile xtWord2 xsm_wait_count; /* The number of readers waiting for the xlocker. */ |
|
463 |
#ifdef DEBUG
|
|
464 |
xtThreadID xsm_locker; |
|
465 |
#endif
|
|
466 |
#ifdef XT_THREAD_LOCK_INFO
|
|
467 |
XTThreadLockInfoRec xsm_lock_info; |
|
468 |
const char *xsm_name; |
|
469 |
#endif
|
|
634
by Paul McCullagh
Changed locking options |
470 |
} XTXSMutexRec, *XTXSMutexLockPtr; |
633
by Paul McCullagh
implemented a new type of X/S mutex, that does not require a thread array |
471 |
|
472 |
#ifdef XT_THREAD_LOCK_INFO
|
|
473 |
#define xt_xsmutex_init_with_autoname(a,b) xt_xsmutex_init(a,b,LOCKLIST_ARG_SUFFIX(b))
|
|
474 |
void xt_xsmutex_init(struct XTThread *self, XTXSMutexLockPtr xsm, const char *name); |
|
475 |
#else
|
|
476 |
#define xt_xsmutex_init_with_autoname(a,b) xt_xsmutex_init(a,b)
|
|
477 |
void xt_xsmutex_init(struct XTThread *self, XTXSMutexLockPtr xsm); |
|
478 |
#endif
|
|
479 |
||
480 |
void xt_xsmutex_free(struct XTThread *self, XTXSMutexLockPtr xsm); |
|
481 |
xtBool xt_xsmutex_xlock(XTXSMutexLockPtr xsm, xtThreadID thd_id); |
|
482 |
xtBool xt_xsmutex_slock(XTXSMutexLockPtr xsm, xtThreadID thd_id); |
|
483 |
xtBool xt_xsmutex_unlock(XTXSMutexLockPtr xsm, xtThreadID thd_id); |
|
258
by paul-mccullagh
Implemented many types of locks |
484 |
|
553
by Paul McCullagh
Implemented concurrent updating of indexes and remover the memcpy's |
485 |
typedef struct XTAtomicRWLock { |
486 |
volatile xtWord2 arw_reader_count; |
|
487 |
volatile xtWord2 arw_xlock_set; |
|
488 |
||
489 |
#ifdef XT_THREAD_LOCK_INFO
|
|
490 |
XTThreadLockInfoRec arw_lock_info; |
|
491 |
const char *arw_name; |
|
492 |
#endif
|
|
493 |
#ifdef DEBUG
|
|
494 |
xtThreadID arw_locker; |
|
495 |
#endif
|
|
496 |
} XTAtomicRWLockRec, *XTAtomicRWLockPtr; |
|
497 |
||
498 |
#ifdef XT_THREAD_LOCK_INFO
|
|
499 |
#define xt_atomicrwlock_init_with_autoname(a,b) xt_atomicrwlock_init(a,b,LOCKLIST_ARG_SUFFIX(b))
|
|
500 |
void xt_atomicrwlock_init(struct XTThread *self, XTAtomicRWLockPtr xsl, const char *name); |
|
501 |
#else
|
|
502 |
#define xt_atomicrwlock_init_with_autoname(a,b) xt_atomicrwlock_init(a,b)
|
|
503 |
void xt_atomicrwlock_init(struct XTThread *self, XTAtomicRWLockPtr xsl); |
|
504 |
#endif
|
|
505 |
void xt_atomicrwlock_free(struct XTThread *self, XTAtomicRWLockPtr xsl); |
|
811
by Paul McCullagh
Fix RN1/3 and RN1/4 back-ported from 1.1: Deadlock in low index cache situations |
506 |
xtBool xt_atomicrwlock_xlock(XTAtomicRWLockPtr xsl, xtBool try_lock, xtThreadID thr_id); |
553
by Paul McCullagh
Implemented concurrent updating of indexes and remover the memcpy's |
507 |
xtBool xt_atomicrwlock_slock(XTAtomicRWLockPtr xsl); |
508 |
xtBool xt_atomicrwlock_unlock(XTAtomicRWLockPtr xsl, xtBool xlocked); |
|
509 |
||
615
by Paul McCullagh
Added atomic R/W lock that favors writers |
510 |
typedef struct XTSkewRWLock { |
511 |
volatile xtWord2 srw_reader_count; |
|
512 |
volatile xtWord2 srw_xlock_set; |
|
513 |
||
514 |
#ifdef XT_THREAD_LOCK_INFO
|
|
515 |
XTThreadLockInfoRec srw_lock_info; |
|
516 |
const char *srw_name; |
|
517 |
#endif
|
|
518 |
#ifdef DEBUG
|
|
519 |
xtThreadID srw_locker; |
|
520 |
#endif
|
|
521 |
} XTSkewRWLockRec, *XTSkewRWLockPtr; |
|
522 |
||
523 |
#ifdef XT_THREAD_LOCK_INFO
|
|
524 |
#define xt_skewrwlock_init_with_autoname(a,b) xt_skewrwlock_init(a,b,LOCKLIST_ARG_SUFFIX(b))
|
|
525 |
void xt_skewrwlock_init(struct XTThread *self, XTSkewRWLockPtr xsl, const char *name); |
|
526 |
#else
|
|
527 |
#define xt_skewrwlock_init_with_autoname(a,b) xt_skewrwlock_init(a,b)
|
|
528 |
void xt_skewrwlock_init(struct XTThread *self, XTSkewRWLockPtr xsl); |
|
529 |
#endif
|
|
530 |
void xt_skewrwlock_free(struct XTThread *self, XTSkewRWLockPtr xsl); |
|
811
by Paul McCullagh
Fix RN1/3 and RN1/4 back-ported from 1.1: Deadlock in low index cache situations |
531 |
xtBool xt_skewrwlock_xlock(XTSkewRWLockPtr xsl, xtBool try_lock, xtThreadID thr_id); |
615
by Paul McCullagh
Added atomic R/W lock that favors writers |
532 |
xtBool xt_skewrwlock_slock(XTSkewRWLockPtr xsl); |
533 |
xtBool xt_skewrwlock_unlock(XTSkewRWLockPtr xsl, xtBool xlocked); |
|
534 |
||
630
by Paul McCullagh
Added a new type of spin R/W mutex lock |
535 |
void xt_unit_test_read_write_locks(struct XTThread *self); |
536 |
void xt_unit_test_mutex_locks(struct XTThread *self); |
|
537 |
void xt_unit_test_create_threads(struct XTThread *self); |
|
538 |
||
538
by Paul McCullagh
Implemented list-based locking in order to avoid conflicts that occur with the group based locking |
539 |
/*
|
540 |
* -----------------------------------------------------------------------
|
|
541 |
* ROW LOCKS
|
|
542 |
*/
|
|
543 |
||
544 |
/*
|
|
545 |
* [(9)]
|
|
546 |
*
|
|
547 |
* These are perminent row locks. They are set on rows for 2 reasons:
|
|
548 |
*
|
|
549 |
* 1. To lock a row that is being updated. The row is locked
|
|
550 |
* when it is read, until the point that it is updated. If the row
|
|
551 |
* is not updated, the lock is removed.
|
|
552 |
* This prevents an update coming between which will cause an error
|
|
553 |
* on the first thread.
|
|
554 |
*
|
|
555 |
* 2. The locks are used to implement SELECT FOR UPDATE.
|
|
556 |
*/
|
|
557 |
||
558 |
/*
|
|
559 |
* A lock that is set in order to perform an update is a temporary lock.
|
|
560 |
* This lock will be removed once the update of the record is done.
|
|
561 |
* The objective is to prevent some other thread from changine the
|
|
562 |
* record between the time the record is read and updated. This is to
|
|
563 |
* prevent unncessary "Record was updated" errors.
|
|
564 |
*
|
|
565 |
* A permanent lock is set by a SELECT FOR UPDATE. These locks are
|
|
566 |
* held until the end of the transaction.
|
|
567 |
*
|
|
568 |
* However, a SELECT FOR UPDATE will pop its lock stack before
|
|
569 |
* waiting for a transaction that has updated a record.
|
|
570 |
* This is to prevent the deadlock that can occur because a
|
|
571 |
* SELECT FOR UPDATE locks groups of records (I mean in general the
|
|
572 |
* locks used are group locks).
|
|
573 |
*
|
|
574 |
* This means a SELECT FOR UPDATE can get ahead of an UPDATE as far as
|
|
575 |
* locking is concerned. Example:
|
|
576 |
*
|
|
577 |
* Record 1,2 and 3 are in group A.
|
|
578 |
*
|
|
579 |
* T1: UPDATES record 2.
|
|
580 |
* T2: SELECT FOR UPDATE record 1, which locks group A.
|
|
581 |
* T2: SELECT FOR UPDATE record 2, which must wait for T1.
|
|
582 |
* T1: UPDATES record 3, which musts wait because of group lock A.
|
|
583 |
*
|
|
584 |
* To avoid deadlock, T2 releases its group lock A before waiting for
|
|
585 |
* record 2. It then regains the lock after waiting for record 2.
|
|
586 |
*
|
|
587 |
* (NOTE: Locks are no longer released. Please check this comment:
|
|
588 |
* {RELEASING-LOCKS} in lock_xt.cc. )
|
|
589 |
*
|
|
590 |
* However, release group A lock mean first releasing all locks gained
|
|
591 |
* after group a lock.
|
|
592 |
*
|
|
593 |
* For example: a thread locks groups: A, B and C. To release group B
|
|
594 |
* lock the thread must release C as well. Afterwards, it must gain
|
|
595 |
* B and C again, in that order. This is to ensure that the lock
|
|
596 |
* order is NOT changed!
|
|
597 |
*
|
|
598 |
*/
|
|
599 |
#define XT_LOCK_ERR -1
|
|
600 |
#define XT_NO_LOCK 0
|
|
601 |
#define XT_TEMP_LOCK 1 /* A temporary lock */ |
|
602 |
#define XT_PERM_LOCK 2 /* A permanent lock */ |
|
603 |
||
604 |
typedef struct XTRowLockList : public XTBasicList { |
|
591
by Paul McCullagh
Use wait condition per thread, to avoid conflict |
605 |
void xt_remove_all_locks(struct XTDatabase *db, struct XTThread *thread); |
538
by Paul McCullagh
Implemented list-based locking in order to avoid conflicts that occur with the group based locking |
606 |
} XTRowLockListRec, *XTRowLockListPtr; |
607 |
||
608 |
#define XT_USE_LIST_BASED_ROW_LOCKS
|
|
609 |
||
610 |
#ifdef XT_USE_LIST_BASED_ROW_LOCKS
|
|
611 |
/*
|
|
612 |
* This method stores each lock, and avoids conflicts.
|
|
613 |
* But it is a bit more expensive in time.
|
|
614 |
*/
|
|
615 |
||
616 |
#ifdef DEBUG
|
|
617 |
#define XT_TEMP_LOCK_BYTES 10
|
|
618 |
#define XT_ROW_LOCK_GROUP_COUNT 5
|
|
619 |
#else
|
|
620 |
#define XT_TEMP_LOCK_BYTES 0xFFFF
|
|
621 |
#define XT_ROW_LOCK_GROUP_COUNT 23
|
|
622 |
#endif
|
|
623 |
||
586
by Paul McCullagh
Implemented a queue for threads waiting for a lock, if a temp lock is granted because of an update, the thread will also wait for the transaction to quit |
624 |
typedef struct XTLockWait { |
625 |
/* Information about the lock to be aquired: */
|
|
626 |
struct XTThread *lw_thread; |
|
627 |
struct XTOpenTable *lw_ot; |
|
628 |
xtRowID lw_row_id; |
|
629 |
||
630 |
/* This is the lock currently held, and the transaction ID: */
|
|
631 |
int lw_curr_lock; |
|
632 |
xtXactID lw_xn_id; |
|
633 |
||
634 |
/* This is information about the updating transaction: */
|
|
635 |
xtBool lw_row_updated; |
|
636 |
xtXactID lw_updating_xn_id; |
|
637 |
||
638 |
/* Pointers for the lock list: */
|
|
639 |
struct XTLockWait *lw_next; |
|
640 |
struct XTLockWait *lw_prev; |
|
641 |
} XTLockWaitRec, *XTLockWaitPtr; |
|
642 |
||
538
by Paul McCullagh
Implemented list-based locking in order to avoid conflicts that occur with the group based locking |
643 |
typedef struct XTLockItem { |
644 |
xtRowID li_row_id; /* The row list is sorted in this value. */ |
|
645 |
xtWord2 li_count; /* The number of consecutive rows locked. FFFF means a temporary lock. */ |
|
646 |
xtWord2 li_thread_id; /* The thread that holds this lock. */ |
|
647 |
} XTLockItemRec, *XTLockItemPtr; |
|
648 |
||
649 |
typedef struct XTLockGroup { |
|
650 |
XTSpinLockRec lg_lock; /* A lock for the list. */ |
|
586
by Paul McCullagh
Implemented a queue for threads waiting for a lock, if a temp lock is granted because of an update, the thread will also wait for the transaction to quit |
651 |
XTLockWaitPtr lg_wait_queue; /* A queue of threads waiting for a lock in this group. */ |
652 |
XTLockWaitPtr lg_wait_queue_end; /* The end of the thread queue. */ |
|
598.1.1
by Vladimir Kolesnikov
merged Drizzle-specific changes |
653 |
size_t lg_list_size; /* The size of the list. */ |
654 |
size_t lg_list_in_use; /* Number of slots on the list in use. */ |
|
538
by Paul McCullagh
Implemented list-based locking in order to avoid conflicts that occur with the group based locking |
655 |
XTLockItemPtr lg_list; /* List of locks. */ |
656 |
} XTLockGroupRec, *XTLockGroupPtr; |
|
657 |
||
586
by Paul McCullagh
Implemented a queue for threads waiting for a lock, if a temp lock is granted because of an update, the thread will also wait for the transaction to quit |
658 |
struct XTLockWait; |
659 |
||
538
by Paul McCullagh
Implemented list-based locking in order to avoid conflicts that occur with the group based locking |
660 |
typedef struct XTRowLocks { |
661 |
XTLockGroupRec rl_groups[XT_ROW_LOCK_GROUP_COUNT]; |
|
662 |
||
600
by Paul McCullagh
Fixed crash that can occur after deadlock detected |
663 |
void xt_cancel_temp_lock(XTLockWaitPtr lw); |
586
by Paul McCullagh
Implemented a queue for threads waiting for a lock, if a temp lock is granted because of an update, the thread will also wait for the transaction to quit |
664 |
xtBool xt_set_temp_lock(struct XTOpenTable *ot, XTLockWaitPtr lw, XTRowLockListPtr lock_list); |
665 |
void xt_remove_temp_lock(struct XTOpenTable *ot, xtBool updated); |
|
538
by Paul McCullagh
Implemented list-based locking in order to avoid conflicts that occur with the group based locking |
666 |
xtBool xt_make_lock_permanent(struct XTOpenTable *ot, XTRowLockListPtr lock_list); |
586
by Paul McCullagh
Implemented a queue for threads waiting for a lock, if a temp lock is granted because of an update, the thread will also wait for the transaction to quit |
667 |
|
668 |
xtBool rl_lock_row(XTLockGroupPtr group, XTLockWaitPtr lw, XTRowLockListPtr lock_list, int *result); |
|
591
by Paul McCullagh
Use wait condition per thread, to avoid conflict |
669 |
void rl_grant_locks(XTLockGroupPtr group, struct XTThread *thread); |
587
by Paul McCullagh
Added debug code for lock queue |
670 |
#ifdef DEBUG_LOCK_QUEUE
|
671 |
void rl_check(XTLockWaitPtr lw); |
|
672 |
#endif
|
|
538
by Paul McCullagh
Implemented list-based locking in order to avoid conflicts that occur with the group based locking |
673 |
} XTRowLocksRec, *XTRowLocksPtr; |
674 |
||
597
by Paul McCullagh
Use a reference to the table instead of a table ID, this saves a lot |
675 |
#define XT_USE_TABLE_REF
|
676 |
||
538
by Paul McCullagh
Implemented list-based locking in order to avoid conflicts that occur with the group based locking |
677 |
typedef struct XTPermRowLock { |
597
by Paul McCullagh
Use a reference to the table instead of a table ID, this saves a lot |
678 |
#ifdef XT_USE_TABLE_REF
|
679 |
struct XTTable *pr_table; |
|
680 |
#else
|
|
538
by Paul McCullagh
Implemented list-based locking in order to avoid conflicts that occur with the group based locking |
681 |
xtTableID pr_tab_id; |
597
by Paul McCullagh
Use a reference to the table instead of a table ID, this saves a lot |
682 |
#endif
|
538
by Paul McCullagh
Implemented list-based locking in order to avoid conflicts that occur with the group based locking |
683 |
xtWord1 pr_group[XT_ROW_LOCK_GROUP_COUNT]; |
684 |
} XTPermRowLockRec, *XTPermRowLockPtr; |
|
685 |
||
686 |
#else // XT_ROW_LOCK_GROUP_COUNT |
|
687 |
||
688 |
/* Hash based row locking. This method allows conflics, even
|
|
689 |
* when there is none.
|
|
690 |
*/
|
|
691 |
typedef struct XTRowLocks { |
|
692 |
xtWord1 tab_lock_perm[XT_ROW_LOCK_COUNT]; /* Byte set to 1 for permanent locks. */ |
|
693 |
struct XTXactData *tab_row_locks[XT_ROW_LOCK_COUNT]; /* The transactions that have locked the specific rows. */ |
|
694 |
||
695 |
int xt_set_temp_lock(struct XTOpenTable *ot, xtRowID row, xtXactID *xn_id, XTRowLockListPtr lock_list); |
|
696 |
void xt_remove_temp_lock(struct XTOpenTable *ot); |
|
697 |
xtBool xt_make_lock_permanent(struct XTOpenTable *ot, XTRowLockListPtr lock_list); |
|
698 |
int xt_is_locked(struct XTOpenTable *ot, xtRowID row, xtXactID *xn_id); |
|
699 |
} XTRowLocksRec, *XTRowLocksPtr; |
|
700 |
||
701 |
typedef struct XTPermRowLock { |
|
702 |
xtTableID pr_tab_id; |
|
703 |
xtWord4 pr_group; |
|
704 |
} XTPermRowLockRec, *XTPermRowLockPtr; |
|
705 |
||
706 |
#endif // XT_ROW_LOCK_GROUP_COUNT |
|
707 |
||
708 |
xtBool xt_init_row_locks(XTRowLocksPtr rl); |
|
709 |
void xt_exit_row_locks(XTRowLocksPtr rl); |
|
710 |
||
711 |
xtBool xt_init_row_lock_list(XTRowLockListPtr rl); |
|
712 |
void xt_exit_row_lock_list(XTRowLockListPtr rl); |
|
713 |
||
714 |
#define XT_NO_LOCK 0
|
|
715 |
#define XT_WANT_LOCK 1
|
|
716 |
#define XT_HAVE_LOCK 2
|
|
717 |
#define XT_WAITING 3
|
|
718 |
||
820
by Paul McCullagh
Creating a table the references a non-existing table can now only be done if you set: foreign_key_checks = 0 |
719 |
/*
|
720 |
* -----------------------------------------------------------------------
|
|
721 |
* RECURSIVE MUTEX (allows lockers to lock again)
|
|
722 |
*/
|
|
723 |
||
724 |
typedef struct XTRecursiveMutex { |
|
725 |
struct XTThread *rm_locker; |
|
726 |
u_int rm_lock_count; |
|
727 |
xt_mutex_type rm_mutex; |
|
728 |
||
729 |
#ifdef XT_THREAD_LOCK_INFO
|
|
730 |
XTThreadLockInfoRec rm_lock_info; |
|
731 |
const char *rm_name; |
|
732 |
#endif
|
|
733 |
} XTRecursiveMutexRec, *XTRecursiveMutexPtr; |
|
734 |
||
735 |
#ifdef XT_THREAD_LOCK_INFO
|
|
736 |
#define xt_recursivemutex_init_with_autoname(a,b) xt_recursivemutex_init(a,b,LOCKLIST_ARG_SUFFIX(b))
|
|
737 |
void xt_recursivemutex_init(struct XTThread *self, XTRecursiveMutexPtr rm, const char *name); |
|
738 |
#else
|
|
739 |
#define xt_recursivemutex_init_with_autoname(a,b) xt_recursivemutex_init(a,b)
|
|
740 |
void xt_recursivemutex_init(struct XTThread *self, XTRecursiveMutexPtr rm); |
|
741 |
#endif
|
|
742 |
void xt_recursivemutex_free(XTRecursiveMutexPtr rm); |
|
743 |
void xt_recursivemutex_lock(struct XTThread *self, XTRecursiveMutexPtr rm); |
|
744 |
void xt_recursivemutex_unlock(struct XTThread *self, XTRecursiveMutexPtr rm); |
|
745 |
||
746 |
typedef struct XTRecurRWLock { |
|
747 |
struct XTThread *rrw_locker; |
|
748 |
u_int rrw_lock_count; |
|
749 |
xt_rwlock_type rrw_lock; |
|
750 |
||
751 |
#ifdef XT_THREAD_LOCK_INFO
|
|
752 |
XTThreadLockInfoRec rrw_lock_info; |
|
753 |
const char *rrw_name; |
|
754 |
#endif
|
|
755 |
} XTRecurRWLockRec, *XTRecurRWLockPtr; |
|
756 |
||
757 |
#ifdef XT_THREAD_LOCK_INFO
|
|
758 |
#define xt_recurrwlock_init_with_autoname(a,b) xt_recurrwlock_init(a,b,LOCKLIST_ARG_SUFFIX(b))
|
|
759 |
void xt_recurrwlock_init(struct XTThread *self, XTRecurRWLockPtr rrw, const char *name); |
|
760 |
#else
|
|
761 |
#define xt_recurrwlock_init_with_autoname(a,b) xt_recurrwlock_init(a,b)
|
|
762 |
void xt_recurrwlock_init(struct XTThread *self, XTRecurRWLockPtr rrw); |
|
763 |
#endif
|
|
764 |
void xt_recurrwlock_free(XTRecurRWLockPtr rrw); |
|
765 |
void xt_recurrwlock_xlock(struct XTThread *self, XTRecurRWLockPtr rrw); |
|
766 |
void xt_recurrwlock_slock(struct XTThread *self, XTRecurRWLockPtr rrw); |
|
767 |
void xt_recurrwlock_slock_ns(XTRecurRWLockPtr rrw); |
|
768 |
void xt_recurrwlock_unxlock(struct XTThread *self, XTRecurRWLockPtr rrw); |
|
769 |
void xt_recurrwlock_unslock(struct XTThread *self, XTRecurRWLockPtr rrw); |
|
770 |
void xt_recurrwlock_unslock_ns(XTRecurRWLockPtr rrw); |
|
771 |
||
175
by paul-mccullagh
Implemented full durability, and SELECT FOR UPDATE |
772 |
#endif
|