1
/*****************************************************************************
3
Copyright (c) 1995, 2009, Innobase Oy. All Rights Reserved.
4
Copyright (c) 2008, Google Inc.
6
Portions of this file contain modifications contributed and copyrighted by
7
Google, Inc. Those modifications are gratefully acknowledged and are described
8
briefly in the InnoDB documentation. The contributions by Google are
9
incorporated with their permission, and subject to the conditions contained in
10
the file COPYING.Google.
12
This program is free software; you can redistribute it and/or modify it under
13
the terms of the GNU General Public License as published by the Free Software
14
Foundation; version 2 of the License.
16
This program is distributed in the hope that it will be useful, but WITHOUT
17
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
18
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
20
You should have received a copy of the GNU General Public License along with
21
this program; if not, write to the Free Software Foundation, Inc., 59 Temple
22
Place, Suite 330, Boston, MA 02111-1307 USA
24
*****************************************************************************/
26
/**************************************************//**
28
The read-write lock (for thread synchronization)
30
Created 9/11/1995 Heikki Tuuri
31
*******************************************************/
38
#include "os0thread.h"
41
#include "os0sync.h" /* for INNODB_RW_LOCKS_USE_ATOMICS */
44
IMPLEMENTATION OF THE RW_LOCK
45
=============================
46
The status of a rw_lock is held in lock_word. The initial value of lock_word is
47
X_LOCK_DECR. lock_word is decremented by 1 for each s-lock and by X_LOCK_DECR
48
for each x-lock. This describes the lock state for each value of lock_word:
50
lock_word == X_LOCK_DECR: Unlocked.
51
0 < lock_word < X_LOCK_DECR: Read locked, no waiting writers.
52
(X_LOCK_DECR - lock_word) is the
53
number of readers that hold the lock.
54
lock_word == 0: Write locked
55
-X_LOCK_DECR < lock_word < 0: Read locked, with a waiting writer.
56
(-lock_word) is the number of readers
58
lock_word <= -X_LOCK_DECR: Recursively write locked. lock_word has been
59
decremented by X_LOCK_DECR once for each lock,
60
so the number of locks is:
61
((-lock_word) / X_LOCK_DECR) + 1
62
When lock_word <= -X_LOCK_DECR, we also know that lock_word % X_LOCK_DECR == 0:
63
other values of lock_word are invalid.
65
The lock_word is always read and updated atomically and consistently, so that
66
it always represents the state of the lock, and the state of the lock changes
67
with a single atomic operation. This lock_word holds all of the information
68
that a thread needs in order to determine if it is eligible to gain the lock
69
or if it must spin or sleep. The one exception to this is that writer_thread
70
must be verified before recursive write locks: to solve this scenario, we make
71
writer_thread readable by all threads, but only writeable by the x-lock holder.
73
The other members of the lock obey the following rules to remain consistent:
75
recursive: This and the writer_thread field together control the
76
behaviour of recursive x-locking.
77
lock->recursive must be FALSE in following states:
78
1) The writer_thread contains garbage i.e.: the
79
lock has just been initialized.
80
2) The lock is not x-held and there is no
81
x-waiter waiting on WAIT_EX event.
82
3) The lock is x-held or there is an x-waiter
83
waiting on WAIT_EX event but the 'pass' value
85
lock->recursive is TRUE iff:
86
1) The lock is x-held or there is an x-waiter
87
waiting on WAIT_EX event and the 'pass' value
89
This flag must be set after the writer_thread field
90
has been updated with a memory ordering barrier.
91
It is unset before the lock_word has been incremented.
92
writer_thread: Is used only in recursive x-locking. Can only be safely
93
read iff lock->recursive flag is TRUE.
94
This field is uninitialized at lock creation time and
95
is updated atomically when x-lock is acquired or when
96
move_ownership is called. A thread is only allowed to
97
set the value of this field to it's thread_id i.e.: a
98
thread cannot set writer_thread to some other thread's
100
waiters: May be set to 1 anytime, but to avoid unnecessary wake-up
101
signals, it should only be set to 1 when there are threads
102
waiting on event. Must be 1 when a writer starts waiting to
103
ensure the current x-locking thread sends a wake-up signal
104
during unlock. May only be reset to 0 immediately before a
105
a wake-up signal is sent to event. On most platforms, a
106
memory barrier is required after waiters is set, and before
107
verifying lock_word is still held, to ensure some unlocker
108
really does see the flags new value.
109
event: Threads wait on event for read or writer lock when another
110
thread has an x-lock or an x-lock reservation (wait_ex). A
111
thread may only wait on event after performing the following
113
(1) Record the counter value of event (with os_event_reset).
114
(2) Set waiters to 1.
115
(3) Verify lock_word <= 0.
116
(1) must come before (2) to ensure signal is not missed.
117
(2) must come before (3) to ensure a signal is sent.
118
These restrictions force the above ordering.
119
Immediately before sending the wake-up signal, we should:
120
(1) Verify lock_word == X_LOCK_DECR (unlocked)
121
(2) Reset waiters to 0.
122
wait_ex_event: A thread may only wait on the wait_ex_event after it has
123
performed the following actions in order:
124
(1) Decrement lock_word by X_LOCK_DECR.
125
(2) Record counter value of wait_ex_event (os_event_reset,
126
called from sync_array_reserve_cell).
127
(3) Verify that lock_word < 0.
128
(1) must come first to ensures no other threads become reader
129
or next writer, and notifies unlocker that signal must be sent.
130
(2) must come before (3) to ensure the signal is not missed.
131
These restrictions force the above ordering.
132
Immediately before sending the wake-up signal, we should:
133
Verify lock_word == 0 (waiting thread holds x_lock)
137
/** number of spin waits on rw-latches,
138
resulted during shared (read) locks */
139
UNIV_INTERN ib_int64_t rw_s_spin_wait_count = 0;
140
/** number of spin loop rounds on rw-latches,
141
resulted during shared (read) locks */
142
UNIV_INTERN ib_int64_t rw_s_spin_round_count = 0;
144
/** number of OS waits on rw-latches,
145
resulted during shared (read) locks */
146
UNIV_INTERN ib_int64_t rw_s_os_wait_count = 0;
148
/** number of unlocks (that unlock shared locks),
149
set only when UNIV_SYNC_PERF_STAT is defined */
150
UNIV_INTERN ib_int64_t rw_s_exit_count = 0;
152
/** number of spin waits on rw-latches,
153
resulted during exclusive (write) locks */
154
UNIV_INTERN ib_int64_t rw_x_spin_wait_count = 0;
155
/** number of spin loop rounds on rw-latches,
156
resulted during exclusive (write) locks */
157
UNIV_INTERN ib_int64_t rw_x_spin_round_count = 0;
159
/** number of OS waits on rw-latches,
160
resulted during exclusive (write) locks */
161
UNIV_INTERN ib_int64_t rw_x_os_wait_count = 0;
163
/** number of unlocks (that unlock exclusive locks),
164
set only when UNIV_SYNC_PERF_STAT is defined */
165
UNIV_INTERN ib_int64_t rw_x_exit_count = 0;
167
/* The global list of rw-locks */
168
UNIV_INTERN rw_lock_list_t rw_lock_list;
169
UNIV_INTERN mutex_t rw_lock_list_mutex;
171
#ifdef UNIV_SYNC_DEBUG
172
/* The global mutex which protects debug info lists of all rw-locks.
173
To modify the debug info list of an rw-lock, this mutex has to be
174
acquired in addition to the mutex protecting the lock. */
176
UNIV_INTERN mutex_t rw_lock_debug_mutex;
177
/* If deadlock detection does not get immediately the mutex,
178
it may wait for this event */
179
UNIV_INTERN os_event_t rw_lock_debug_event;
180
/* This is set to TRUE, if there may be waiters for the event */
181
UNIV_INTERN ibool rw_lock_debug_waiters;
183
/******************************************************************//**
184
Creates a debug info struct. */
187
rw_lock_debug_create(void);
188
/*======================*/
189
/******************************************************************//**
190
Frees a debug info struct. */
195
rw_lock_debug_t* info);
197
/******************************************************************//**
198
Creates a debug info struct.
199
@return own: debug info struct */
202
rw_lock_debug_create(void)
203
/*======================*/
205
return((rw_lock_debug_t*) mem_alloc(sizeof(rw_lock_debug_t)));
208
/******************************************************************//**
209
Frees a debug info struct. */
214
rw_lock_debug_t* info)
218
#endif /* UNIV_SYNC_DEBUG */
220
/******************************************************************//**
221
Creates, or rather, initializes an rw-lock object in a specified memory
222
location (which must be appropriately aligned). The rw-lock is initialized
223
to the non-locked state. Explicit freeing of the rw-lock with rw_lock_free
224
is necessary only if the memory block containing it is freed. */
229
rw_lock_t* lock, /*!< in: pointer to memory */
231
# ifdef UNIV_SYNC_DEBUG
232
ulint level, /*!< in: level */
233
# endif /* UNIV_SYNC_DEBUG */
234
const char* cmutex_name, /*!< in: mutex name */
235
#endif /* UNIV_DEBUG */
236
const char* cfile_name, /*!< in: file name where created */
237
ulint cline) /*!< in: file line where created */
239
/* If this is the very first time a synchronization object is
240
created, then the following call initializes the sync system. */
242
#ifndef INNODB_RW_LOCKS_USE_ATOMICS
243
mutex_create(rw_lock_get_mutex(lock), SYNC_NO_ORDER_CHECK);
245
lock->mutex.cfile_name = cfile_name;
246
lock->mutex.cline = cline;
248
ut_d(lock->mutex.cmutex_name = cmutex_name);
249
ut_d(lock->mutex.mutex_type = 1);
250
#else /* INNODB_RW_LOCKS_USE_ATOMICS */
252
UT_NOT_USED(cmutex_name);
254
#endif /* INNODB_RW_LOCKS_USE_ATOMICS */
256
lock->lock_word = X_LOCK_DECR;
259
/* We set this value to signify that lock->writer_thread
260
contains garbage at initialization and cannot be used for
261
recursive x-locking. */
262
lock->recursive = FALSE;
264
#ifdef UNIV_SYNC_DEBUG
265
UT_LIST_INIT(lock->debug_list);
268
#endif /* UNIV_SYNC_DEBUG */
270
lock->magic_n = RW_LOCK_MAGIC_N;
272
lock->cfile_name = cfile_name;
273
lock->cline = (unsigned int) cline;
275
lock->count_os_wait = 0;
276
lock->last_s_file_name = "not yet reserved";
277
lock->last_x_file_name = "not yet reserved";
278
lock->last_s_line = 0;
279
lock->last_x_line = 0;
280
lock->event = os_event_create(NULL);
281
lock->wait_ex_event = os_event_create(NULL);
283
mutex_enter(&rw_lock_list_mutex);
285
if (UT_LIST_GET_LEN(rw_lock_list) > 0) {
286
ut_a(UT_LIST_GET_FIRST(rw_lock_list)->magic_n
290
UT_LIST_ADD_FIRST(list, rw_lock_list, lock);
292
mutex_exit(&rw_lock_list_mutex);
295
/******************************************************************//**
296
Calling this function is obligatory only if the memory buffer containing
297
the rw-lock is freed. Removes an rw-lock object from the global list. The
298
rw-lock is checked to be in the non-locked state. */
303
rw_lock_t* lock) /*!< in: rw-lock */
305
ut_ad(rw_lock_validate(lock));
306
ut_a(lock->lock_word == X_LOCK_DECR);
310
#ifndef INNODB_RW_LOCKS_USE_ATOMICS
311
mutex_free(rw_lock_get_mutex(lock));
312
#endif /* INNODB_RW_LOCKS_USE_ATOMICS */
314
mutex_enter(&rw_lock_list_mutex);
315
os_event_free(lock->event);
317
os_event_free(lock->wait_ex_event);
319
if (UT_LIST_GET_PREV(list, lock)) {
320
ut_a(UT_LIST_GET_PREV(list, lock)->magic_n == RW_LOCK_MAGIC_N);
322
if (UT_LIST_GET_NEXT(list, lock)) {
323
ut_a(UT_LIST_GET_NEXT(list, lock)->magic_n == RW_LOCK_MAGIC_N);
326
UT_LIST_REMOVE(list, rw_lock_list, lock);
328
mutex_exit(&rw_lock_list_mutex);
332
/******************************************************************//**
333
Checks that the rw-lock has been initialized and that there are no
334
simultaneous shared and exclusive locks.
340
rw_lock_t* lock) /*!< in: rw-lock */
344
ulint waiters = rw_lock_get_waiters(lock);
345
lint lock_word = lock->lock_word;
347
ut_a(lock->magic_n == RW_LOCK_MAGIC_N);
348
ut_a(waiters == 0 || waiters == 1);
349
ut_a(lock_word > -X_LOCK_DECR ||(-lock_word) % X_LOCK_DECR == 0);
353
#endif /* UNIV_DEBUG */
355
/******************************************************************//**
356
Lock an rw-lock in shared mode for the current thread. If the rw-lock is
357
locked in exclusive mode, or there is an exclusive lock request waiting,
358
the function spins a preset time (controlled by SYNC_SPIN_ROUNDS), waiting
359
for the lock, before suspending the thread. */
364
rw_lock_t* lock, /*!< in: pointer to rw-lock */
365
ulint pass, /*!< in: pass value; != 0, if the lock
366
will be passed to another thread to unlock */
367
const char* file_name, /*!< in: file name where lock requested */
368
ulint line) /*!< in: line where requested */
370
ulint index; /* index of the reserved wait cell */
371
ulint i = 0; /* spin round count */
373
ut_ad(rw_lock_validate(lock));
375
rw_s_spin_wait_count++; /*!< Count calls to this function */
378
/* Spin waiting for the writer field to become free */
379
while (i < SYNC_SPIN_ROUNDS && lock->lock_word <= 0) {
380
if (srv_spin_wait_delay) {
381
ut_delay(ut_rnd_interval(0, srv_spin_wait_delay));
387
if (i == SYNC_SPIN_ROUNDS) {
391
if (srv_print_latch_waits) {
393
"Thread %lu spin wait rw-s-lock at %p"
394
" cfile %s cline %lu rnds %lu\n",
395
(ulong) os_thread_pf(os_thread_get_curr_id()),
397
lock->cfile_name, (ulong) lock->cline, (ulong) i);
400
/* We try once again to obtain the lock */
401
if (TRUE == rw_lock_s_lock_low(lock, pass, file_name, line)) {
402
rw_s_spin_round_count += i;
404
return; /* Success */
407
if (i < SYNC_SPIN_ROUNDS) {
411
rw_s_spin_round_count += i;
413
sync_array_reserve_cell(sync_primary_wait_array,
414
lock, RW_LOCK_SHARED,
418
/* Set waiters before checking lock_word to ensure wake-up
419
signal is sent. This may lead to some unnecessary signals. */
420
rw_lock_set_waiter_flag(lock);
422
if (TRUE == rw_lock_s_lock_low(lock, pass, file_name, line)) {
423
sync_array_free_cell(sync_primary_wait_array, index);
424
return; /* Success */
427
if (srv_print_latch_waits) {
429
"Thread %lu OS wait rw-s-lock at %p"
430
" cfile %s cline %lu\n",
431
os_thread_pf(os_thread_get_curr_id()),
432
(void*) lock, lock->cfile_name,
433
(ulong) lock->cline);
436
/* these stats may not be accurate */
437
lock->count_os_wait++;
438
rw_s_os_wait_count++;
440
sync_array_wait_event(sync_primary_wait_array, index);
447
/******************************************************************//**
448
This function is used in the insert buffer to move the ownership of an
449
x-latch on a buffer frame to the current thread. The x-latch was set by
450
the buffer read operation and it protected the buffer frame while the
451
read was done. The ownership is moved because we want that the current
452
thread is able to acquire a second x-latch which is stored in an mtr.
453
This, in turn, is needed to pass the debug checks of index page
457
rw_lock_x_lock_move_ownership(
458
/*==========================*/
459
rw_lock_t* lock) /*!< in: lock which was x-locked in the
462
ut_ad(rw_lock_is_locked(lock, RW_LOCK_EX));
464
rw_lock_set_writer_id_and_recursion_flag(lock, TRUE);
467
/******************************************************************//**
468
Function for the next writer to call. Waits for readers to exit.
469
The caller must have already decremented lock_word by X_LOCK_DECR. */
474
rw_lock_t* lock, /*!< in: pointer to rw-lock */
475
#ifdef UNIV_SYNC_DEBUG
476
ulint pass, /*!< in: pass value; != 0, if the lock will
477
be passed to another thread to unlock */
479
const char* file_name,/*!< in: file name where lock requested */
480
ulint line) /*!< in: line where requested */
485
ut_ad(lock->lock_word <= 0);
487
while (lock->lock_word < 0) {
488
if (srv_spin_wait_delay) {
489
ut_delay(ut_rnd_interval(0, srv_spin_wait_delay));
491
if(i < SYNC_SPIN_ROUNDS) {
496
/* If there is still a reader, then go to sleep.*/
497
rw_x_spin_round_count += i;
499
sync_array_reserve_cell(sync_primary_wait_array,
504
/* Check lock_word to ensure wake-up isn't missed.*/
505
if(lock->lock_word < 0) {
507
/* these stats may not be accurate */
508
lock->count_os_wait++;
509
rw_x_os_wait_count++;
511
/* Add debug info as it is needed to detect possible
512
deadlock. We must add info for WAIT_EX thread for
513
deadlock detection to work properly. */
514
#ifdef UNIV_SYNC_DEBUG
515
rw_lock_add_debug_info(lock, pass, RW_LOCK_WAIT_EX,
519
sync_array_wait_event(sync_primary_wait_array,
521
#ifdef UNIV_SYNC_DEBUG
522
rw_lock_remove_debug_info(lock, pass,
525
/* It is possible to wake when lock_word < 0.
526
We must pass the while-loop check to proceed.*/
528
sync_array_free_cell(sync_primary_wait_array,
532
rw_x_spin_round_count += i;
535
/******************************************************************//**
536
Low-level function for acquiring an exclusive lock.
537
@return RW_LOCK_NOT_LOCKED if did not succeed, RW_LOCK_EX if success. */
542
rw_lock_t* lock, /*!< in: pointer to rw-lock */
543
ulint pass, /*!< in: pass value; != 0, if the lock will
544
be passed to another thread to unlock */
545
const char* file_name,/*!< in: file name where lock requested */
546
ulint line) /*!< in: line where requested */
548
os_thread_id_t curr_thread = os_thread_get_curr_id();
550
if (rw_lock_lock_word_decr(lock, X_LOCK_DECR)) {
552
/* lock->recursive also tells us if the writer_thread
553
field is stale or active. As we are going to write
554
our own thread id in that field it must be that the
555
current writer_thread value is not active. */
556
ut_a(!lock->recursive);
558
/* Decrement occurred: we are writer or next-writer. */
559
rw_lock_set_writer_id_and_recursion_flag(lock,
560
pass ? FALSE : TRUE);
562
rw_lock_x_lock_wait(lock,
563
#ifdef UNIV_SYNC_DEBUG
569
/* Decrement failed: relock or failed lock */
570
if (!pass && lock->recursive
571
&& os_thread_eq(lock->writer_thread, curr_thread)) {
573
lock->lock_word -= X_LOCK_DECR;
575
/* Another thread locked before us */
579
#ifdef UNIV_SYNC_DEBUG
580
rw_lock_add_debug_info(lock, pass, RW_LOCK_EX,
583
lock->last_x_file_name = file_name;
584
lock->last_x_line = (unsigned int) line;
589
/******************************************************************//**
590
NOTE! Use the corresponding macro, not directly this function! Lock an
591
rw-lock in exclusive mode for the current thread. If the rw-lock is locked
592
in shared or exclusive mode, or there is an exclusive lock request waiting,
593
the function spins a preset time (controlled by SYNC_SPIN_ROUNDS), waiting
594
for the lock before suspending the thread. If the same thread has an x-lock
595
on the rw-lock, locking succeed, with the following exception: if pass != 0,
596
only a single x-lock may be taken on the lock. NOTE: If the same thread has
597
an s-lock, locking does not succeed! */
602
rw_lock_t* lock, /*!< in: pointer to rw-lock */
603
ulint pass, /*!< in: pass value; != 0, if the lock will
604
be passed to another thread to unlock */
605
const char* file_name,/*!< in: file name where lock requested */
606
ulint line) /*!< in: line where requested */
608
ulint index; /*!< index of the reserved wait cell */
609
ulint i; /*!< spin round count */
610
ibool spinning = FALSE;
612
ut_ad(rw_lock_validate(lock));
618
if (rw_lock_x_lock_low(lock, pass, file_name, line)) {
619
rw_x_spin_round_count += i;
621
return; /* Locking succeeded */
627
rw_x_spin_wait_count++;
630
/* Spin waiting for the lock_word to become free */
631
while (i < SYNC_SPIN_ROUNDS
632
&& lock->lock_word <= 0) {
633
if (srv_spin_wait_delay) {
634
ut_delay(ut_rnd_interval(0,
635
srv_spin_wait_delay));
640
if (i == SYNC_SPIN_ROUNDS) {
647
rw_x_spin_round_count += i;
649
if (srv_print_latch_waits) {
651
"Thread %lu spin wait rw-x-lock at %p"
652
" cfile %s cline %lu rnds %lu\n",
653
os_thread_pf(os_thread_get_curr_id()), (void*) lock,
654
lock->cfile_name, (ulong) lock->cline, (ulong) i);
657
sync_array_reserve_cell(sync_primary_wait_array,
663
/* Waiters must be set before checking lock_word, to ensure signal
664
is sent. This could lead to a few unnecessary wake-up signals. */
665
rw_lock_set_waiter_flag(lock);
667
if (rw_lock_x_lock_low(lock, pass, file_name, line)) {
668
sync_array_free_cell(sync_primary_wait_array, index);
669
return; /* Locking succeeded */
672
if (srv_print_latch_waits) {
674
"Thread %lu OS wait for rw-x-lock at %p"
675
" cfile %s cline %lu\n",
676
os_thread_pf(os_thread_get_curr_id()), (void*) lock,
677
lock->cfile_name, (ulong) lock->cline);
680
/* these stats may not be accurate */
681
lock->count_os_wait++;
682
rw_x_os_wait_count++;
684
sync_array_wait_event(sync_primary_wait_array, index);
690
#ifdef UNIV_SYNC_DEBUG
691
/******************************************************************//**
692
Acquires the debug mutex. We cannot use the mutex defined in sync0sync,
693
because the debug mutex is also acquired in sync0arr while holding the OS
694
mutex protecting the sync array, and the ordinary mutex_enter might
695
recursively call routines in sync0arr, leading to a deadlock on the OS
699
rw_lock_debug_mutex_enter(void)
700
/*==========================*/
703
if (0 == mutex_enter_nowait(&rw_lock_debug_mutex)) {
707
os_event_reset(rw_lock_debug_event);
709
rw_lock_debug_waiters = TRUE;
711
if (0 == mutex_enter_nowait(&rw_lock_debug_mutex)) {
715
os_event_wait(rw_lock_debug_event);
720
/******************************************************************//**
721
Releases the debug mutex. */
724
rw_lock_debug_mutex_exit(void)
725
/*==========================*/
727
mutex_exit(&rw_lock_debug_mutex);
729
if (rw_lock_debug_waiters) {
730
rw_lock_debug_waiters = FALSE;
731
os_event_set(rw_lock_debug_event);
735
/******************************************************************//**
736
Inserts the debug information for an rw-lock. */
739
rw_lock_add_debug_info(
740
/*===================*/
741
rw_lock_t* lock, /*!< in: rw-lock */
742
ulint pass, /*!< in: pass value */
743
ulint lock_type, /*!< in: lock type */
744
const char* file_name, /*!< in: file where requested */
745
ulint line) /*!< in: line where requested */
747
rw_lock_debug_t* info;
752
info = rw_lock_debug_create();
754
rw_lock_debug_mutex_enter();
756
info->file_name = file_name;
758
info->lock_type = lock_type;
759
info->thread_id = os_thread_get_curr_id();
762
UT_LIST_ADD_FIRST(list, lock->debug_list, info);
764
rw_lock_debug_mutex_exit();
766
if ((pass == 0) && (lock_type != RW_LOCK_WAIT_EX)) {
767
sync_thread_add_level(lock, lock->level);
771
/******************************************************************//**
772
Removes a debug information struct for an rw-lock. */
775
rw_lock_remove_debug_info(
776
/*======================*/
777
rw_lock_t* lock, /*!< in: rw-lock */
778
ulint pass, /*!< in: pass value */
779
ulint lock_type) /*!< in: lock type */
781
rw_lock_debug_t* info;
785
if ((pass == 0) && (lock_type != RW_LOCK_WAIT_EX)) {
786
sync_thread_reset_level(lock);
789
rw_lock_debug_mutex_enter();
791
info = UT_LIST_GET_FIRST(lock->debug_list);
793
while (info != NULL) {
794
if ((pass == info->pass)
796
|| os_thread_eq(info->thread_id,
797
os_thread_get_curr_id()))
798
&& (info->lock_type == lock_type)) {
801
UT_LIST_REMOVE(list, lock->debug_list, info);
802
rw_lock_debug_mutex_exit();
804
rw_lock_debug_free(info);
809
info = UT_LIST_GET_NEXT(list, info);
814
#endif /* UNIV_SYNC_DEBUG */
816
#ifdef UNIV_SYNC_DEBUG
817
/******************************************************************//**
818
Checks if the thread has locked the rw-lock in the specified mode, with
820
@return TRUE if locked */
825
rw_lock_t* lock, /*!< in: rw-lock */
826
ulint lock_type) /*!< in: lock type: RW_LOCK_SHARED,
829
rw_lock_debug_t* info;
832
ut_ad(rw_lock_validate(lock));
834
rw_lock_debug_mutex_enter();
836
info = UT_LIST_GET_FIRST(lock->debug_list);
838
while (info != NULL) {
840
if (os_thread_eq(info->thread_id, os_thread_get_curr_id())
842
&& (info->lock_type == lock_type)) {
844
rw_lock_debug_mutex_exit();
850
info = UT_LIST_GET_NEXT(list, info);
852
rw_lock_debug_mutex_exit();
856
#endif /* UNIV_SYNC_DEBUG */
858
/******************************************************************//**
859
Checks if somebody has locked the rw-lock in the specified mode.
860
@return TRUE if locked */
865
rw_lock_t* lock, /*!< in: rw-lock */
866
ulint lock_type) /*!< in: lock type: RW_LOCK_SHARED,
872
ut_ad(rw_lock_validate(lock));
874
if (lock_type == RW_LOCK_SHARED) {
875
if (rw_lock_get_reader_count(lock) > 0) {
878
} else if (lock_type == RW_LOCK_EX) {
879
if (rw_lock_get_writer(lock) == RW_LOCK_EX) {
889
#ifdef UNIV_SYNC_DEBUG
890
/***************************************************************//**
891
Prints debug info of currently locked rw-locks. */
894
rw_lock_list_print_info(
895
/*====================*/
896
FILE* file) /*!< in: file where to print */
900
rw_lock_debug_t* info;
902
mutex_enter(&rw_lock_list_mutex);
904
fputs("-------------\n"
906
"-------------\n", file);
908
lock = UT_LIST_GET_FIRST(rw_lock_list);
910
while (lock != NULL) {
914
#ifndef INNODB_RW_LOCKS_USE_ATOMICS
915
mutex_enter(&(lock->mutex));
917
if (lock->lock_word != X_LOCK_DECR) {
919
fprintf(file, "RW-LOCK: %p ", (void*) lock);
921
if (rw_lock_get_waiters(lock)) {
922
fputs(" Waiters for the lock exist\n", file);
927
info = UT_LIST_GET_FIRST(lock->debug_list);
928
while (info != NULL) {
929
rw_lock_debug_print(info);
930
info = UT_LIST_GET_NEXT(list, info);
933
#ifndef INNODB_RW_LOCKS_USE_ATOMICS
934
mutex_exit(&(lock->mutex));
937
lock = UT_LIST_GET_NEXT(list, lock);
940
fprintf(file, "Total number of rw-locks %ld\n", count);
941
mutex_exit(&rw_lock_list_mutex);
944
/***************************************************************//**
945
Prints debug info of an rw-lock. */
950
rw_lock_t* lock) /*!< in: rw-lock */
952
rw_lock_debug_t* info;
957
"RW-LATCH: %p ", (void*) lock);
959
#ifndef INNODB_RW_LOCKS_USE_ATOMICS
960
/* We used to acquire lock->mutex here, but it would cause a
961
recursive call to sync_thread_add_level() if UNIV_SYNC_DEBUG
962
is defined. Since this function is only invoked from
963
sync_thread_levels_g(), let us choose the smaller evil:
964
performing dirty reads instead of causing bogus deadlocks or
965
assertion failures. */
967
if (lock->lock_word != X_LOCK_DECR) {
969
if (rw_lock_get_waiters(lock)) {
970
fputs(" Waiters for the lock exist\n", stderr);
975
info = UT_LIST_GET_FIRST(lock->debug_list);
976
while (info != NULL) {
977
rw_lock_debug_print(info);
978
info = UT_LIST_GET_NEXT(list, info);
983
/*********************************************************************//**
984
Prints info of a debug struct. */
989
rw_lock_debug_t* info) /*!< in: debug struct */
993
rwt = info->lock_type;
995
fprintf(stderr, "Locked: thread %ld file %s line %ld ",
996
(ulong) os_thread_pf(info->thread_id), info->file_name,
998
if (rwt == RW_LOCK_SHARED) {
999
fputs("S-LOCK", stderr);
1000
} else if (rwt == RW_LOCK_EX) {
1001
fputs("X-LOCK", stderr);
1002
} else if (rwt == RW_LOCK_WAIT_EX) {
1003
fputs("WAIT X-LOCK", stderr);
1007
if (info->pass != 0) {
1008
fprintf(stderr, " pass value %lu", (ulong) info->pass);
1013
/***************************************************************//**
1014
Returns the number of currently locked rw-locks. Works only in the debug
1016
@return number of locked rw-locks */
1019
rw_lock_n_locked(void)
1020
/*==================*/
1025
mutex_enter(&rw_lock_list_mutex);
1027
lock = UT_LIST_GET_FIRST(rw_lock_list);
1029
while (lock != NULL) {
1031
if (lock->lock_word != X_LOCK_DECR) {
1035
lock = UT_LIST_GET_NEXT(list, lock);
1038
mutex_exit(&rw_lock_list_mutex);
1042
#endif /* UNIV_SYNC_DEBUG */