1
/*****************************************************************************
3
Copyright (c) 2007, 2009, Innobase Oy. All Rights Reserved.
5
This program is free software; you can redistribute it and/or modify it under
6
the terms of the GNU General Public License as published by the Free Software
7
Foundation; version 2 of the License.
9
This program is distributed in the hope that it will be useful, but WITHOUT
10
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
11
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
13
You should have received a copy of the GNU General Public License along with
14
this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15
Place, Suite 330, Boston, MA 02111-1307 USA
17
*****************************************************************************/
19
/**************************************************//**
21
INFORMATION SCHEMA innodb_trx, innodb_locks and
22
innodb_lock_waits tables fetch code.
24
The code below fetches information needed to fill those
25
3 dynamic tables and uploads it into a "transactions
26
table cache" for later retrieval.
28
Created July 17, 2007 Vasil Dimov
29
*******************************************************/
31
#include <mysql/plugin.h>
33
#include "mysql_addons.h"
37
#include "dict0dict.h"
38
#include "ha0storage.h"
39
#include "ha_prototypes.h"
40
#include "hash0hash.h"
41
#include "lock0iter.h"
42
#include "lock0lock.h"
44
#include "page0page.h"
49
#include "sync0sync.h"
50
#include "sync0types.h"
57
/** Initial number of rows in the table cache */
58
#define TABLE_CACHE_INITIAL_ROWSNUM 1024
60
/** @brief The maximum number of chunks to allocate for a table cache.
62
The rows of a table cache are stored in a set of chunks. When a new
63
row is added a new chunk is allocated if necessary. Assuming that the
64
first one is 1024 rows (TABLE_CACHE_INITIAL_ROWSNUM) and each
65
subsequent is N/2 where N is the number of rows we have allocated till
66
now, then 39th chunk would accommodate 1677416425 rows and all chunks
67
would accommodate 3354832851 rows. */
68
#define MEM_CHUNKS_IN_TABLE_CACHE 39
70
/** The following are some testing auxiliary macros. Do not enable them
71
in a production environment. */
75
/** If this is enabled then lock folds will always be different
76
resulting in equal rows being put in a different cells of the hash
77
table. Checking for duplicates will be flawed because different
78
fold will be calculated when a row is searched in the hash table. */
79
#define TEST_LOCK_FOLD_ALWAYS_DIFFERENT
83
/** This effectively kills the search-for-duplicate-before-adding-a-row
84
function, but searching in the hash is still performed. It will always
85
be assumed that lock is not present and insertion will be performed in
87
#define TEST_NO_LOCKS_ROW_IS_EVER_EQUAL_TO_LOCK_T
91
/** This aggressively repeats adding each row many times. Depending on
92
the above settings this may be noop or may result in lots of rows being
94
#define TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES
98
/** Very similar to TEST_NO_LOCKS_ROW_IS_EVER_EQUAL_TO_LOCK_T but hash
99
table search is not performed at all. */
100
#define TEST_DO_NOT_CHECK_FOR_DUPLICATE_ROWS
104
/** Do not insert each row into the hash table, duplicates may appear
105
if this is enabled, also if this is enabled searching into the hash is
106
noop because it will be empty. */
107
#define TEST_DO_NOT_INSERT_INTO_THE_HASH_TABLE
111
/** Memory limit passed to ha_storage_put_memlim().
112
@param cache hash storage
113
@return maximum allowed allocation size */
114
#define MAX_ALLOWED_FOR_STORAGE(cache) \
116
- (cache)->mem_allocd)
118
/** Memory limit in table_cache_create_empty_row().
119
@param cache hash storage
120
@return maximum allowed allocation size */
121
#define MAX_ALLOWED_FOR_ALLOC(cache) \
123
- (cache)->mem_allocd \
124
- ha_storage_get_size((cache)->storage))
126
/** Memory for each table in the intermediate buffer is allocated in
127
separate chunks. These chunks are considered to be concatenated to
128
represent one flat array of rows. */
129
typedef struct i_s_mem_chunk_struct {
130
ulint offset; /*!< offset, in number of rows */
131
ulint rows_allocd; /*!< the size of this chunk, in number
133
void* base; /*!< start of the chunk */
136
/** This represents one table's cache. */
137
typedef struct i_s_table_cache_struct {
138
ulint rows_used; /*!< number of used rows */
139
ulint rows_allocd; /*!< number of allocated rows */
140
ulint row_size; /*!< size of a single row */
141
i_s_mem_chunk_t chunks[MEM_CHUNKS_IN_TABLE_CACHE]; /*!< array of
142
memory chunks that stores the
146
/** This structure describes the intermediate buffer */
147
struct trx_i_s_cache_struct {
148
rw_lock_t rw_lock; /*!< read-write lock protecting
149
the rest of this structure */
150
ullint last_read; /*!< last time the cache was read;
151
measured in microseconds since
153
mutex_t last_read_mutex;/*!< mutex protecting the
154
last_read member - it is updated
155
inside a shared lock of the
157
i_s_table_cache_t innodb_trx; /*!< innodb_trx table */
158
i_s_table_cache_t innodb_locks; /*!< innodb_locks table */
159
i_s_table_cache_t innodb_lock_waits;/*!< innodb_lock_waits table */
160
/** the hash table size is LOCKS_HASH_CELLS_NUM * sizeof(void*) bytes */
161
#define LOCKS_HASH_CELLS_NUM 10000
162
hash_table_t* locks_hash; /*!< hash table used to eliminate
163
duplicate entries in the
164
innodb_locks table */
165
/** Initial size of the cache storage */
166
#define CACHE_STORAGE_INITIAL_SIZE 1024
167
/** Number of hash cells in the cache storage */
168
#define CACHE_STORAGE_HASH_CELLS 2048
169
ha_storage_t* storage; /*!< storage for external volatile
170
data that can possibly not be
171
available later, when we release
173
ulint mem_allocd; /*!< the amount of memory
174
allocated with mem_alloc*() */
175
ibool is_truncated; /*!< this is TRUE if the memory
176
limit was hit and thus the data
177
in the cache is truncated */
180
/** This is the intermediate buffer where data needed to fill the
181
INFORMATION SCHEMA tables is fetched and later retrieved by the C++
182
code in handler/i_s.cc. */
183
static trx_i_s_cache_t trx_i_s_cache_static;
184
/** This is the intermediate buffer where data needed to fill the
185
INFORMATION SCHEMA tables is fetched and later retrieved by the C++
186
code in handler/i_s.cc. */
187
UNIV_INTERN trx_i_s_cache_t* trx_i_s_cache = &trx_i_s_cache_static;
189
/*******************************************************************//**
190
For a record lock that is in waiting state retrieves the only bit that
191
is set, for a table lock returns ULINT_UNDEFINED.
192
@return record number within the heap */
195
wait_lock_get_heap_no(
196
/*==================*/
197
const lock_t* lock) /*!< in: lock */
201
switch (lock_get_type(lock)) {
203
ret = lock_rec_find_set_bit(lock);
204
ut_a(ret != ULINT_UNDEFINED);
207
ret = ULINT_UNDEFINED;
216
/*******************************************************************//**
217
Initializes the members of a table cache. */
222
i_s_table_cache_t* table_cache, /*!< out: table cache */
223
size_t row_size) /*!< in: the size of a
228
table_cache->rows_used = 0;
229
table_cache->rows_allocd = 0;
230
table_cache->row_size = row_size;
232
for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
234
/* the memory is actually allocated in
235
table_cache_create_empty_row() */
236
table_cache->chunks[i].base = NULL;
240
/*******************************************************************//**
241
Frees a table cache. */
246
i_s_table_cache_t* table_cache) /*!< in/out: table cache */
250
for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
252
/* the memory is actually allocated in
253
table_cache_create_empty_row() */
254
if (table_cache->chunks[i].base) {
255
mem_free(table_cache->chunks[i].base);
256
table_cache->chunks[i].base = NULL;
261
/*******************************************************************//**
262
Returns an empty row from a table cache. The row is allocated if no more
263
empty rows are available. The number of used rows is incremented.
264
If the memory limit is hit then NULL is returned and nothing is
266
@return empty row, or NULL if out of memory */
269
table_cache_create_empty_row(
270
/*=========================*/
271
i_s_table_cache_t* table_cache, /*!< in/out: table cache */
272
trx_i_s_cache_t* cache) /*!< in/out: cache to record
279
ut_a(table_cache->rows_used <= table_cache->rows_allocd);
281
if (table_cache->rows_used == table_cache->rows_allocd) {
283
/* rows_used == rows_allocd means that new chunk needs
284
to be allocated: either no more empty rows in the
285
last allocated chunk or nothing has been allocated yet
286
(rows_num == rows_allocd == 0); */
288
i_s_mem_chunk_t* chunk;
294
/* find the first not allocated chunk */
295
for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
297
if (table_cache->chunks[i].base == NULL) {
303
/* i == MEM_CHUNKS_IN_TABLE_CACHE means that all chunks
304
have been allocated :-X */
305
ut_a(i < MEM_CHUNKS_IN_TABLE_CACHE);
307
/* allocate the chunk we just found */
311
/* first chunk, nothing is allocated yet */
312
req_rows = TABLE_CACHE_INITIAL_ROWSNUM;
315
/* Memory is increased by the formula
316
new = old + old / 2; We are trying not to be
317
aggressive here (= using the common new = old * 2)
318
because the allocated memory will not be freed
319
until InnoDB exit (it is reused). So it is better
320
to once allocate the memory in more steps, but
321
have less unused/wasted memory than to use less
322
steps in allocation (which is done once in a
323
lifetime) but end up with lots of unused/wasted
325
req_rows = table_cache->rows_allocd / 2;
327
req_bytes = req_rows * table_cache->row_size;
329
if (req_bytes > MAX_ALLOWED_FOR_ALLOC(cache)) {
334
chunk = &table_cache->chunks[i];
336
chunk->base = mem_alloc2(req_bytes, &got_bytes);
338
got_rows = got_bytes / table_cache->row_size;
340
cache->mem_allocd += got_bytes;
343
printf("allocating chunk %d req bytes=%lu, got bytes=%lu, "
345
"req rows=%lu, got rows=%lu\n",
346
i, req_bytes, got_bytes,
347
table_cache->row_size,
351
chunk->rows_allocd = got_rows;
353
table_cache->rows_allocd += got_rows;
355
/* adjust the offset of the next chunk */
356
if (i < MEM_CHUNKS_IN_TABLE_CACHE - 1) {
358
table_cache->chunks[i + 1].offset
359
= chunk->offset + chunk->rows_allocd;
362
/* return the first empty row in the newly allocated
370
/* there is an empty row, no need to allocate new
373
/* find the first chunk that contains allocated but
375
for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
377
if (table_cache->chunks[i].offset
378
+ table_cache->chunks[i].rows_allocd
379
> table_cache->rows_used) {
385
/* i == MEM_CHUNKS_IN_TABLE_CACHE means that all chunks
387
table_cache->rows_used != table_cache->rows_allocd means
388
exactly the opposite - there are allocated but
389
empty/unused rows :-X */
390
ut_a(i < MEM_CHUNKS_IN_TABLE_CACHE);
392
chunk_start = (char*) table_cache->chunks[i].base;
393
offset = table_cache->rows_used
394
- table_cache->chunks[i].offset;
396
row = chunk_start + offset * table_cache->row_size;
399
table_cache->rows_used++;
404
/*******************************************************************//**
405
Fills i_s_trx_row_t object.
406
If memory can not be allocated then FALSE is returned.
407
@return FALSE if allocation fails */
412
i_s_trx_row_t* row, /*!< out: result object
414
const trx_t* trx, /*!< in: transaction to
416
const i_s_locks_row_t* requested_lock_row,/*!< in: pointer to the
418
innodb_locks if trx is
419
waiting or NULL if trx
421
trx_i_s_cache_t* cache) /*!< in/out: cache into
422
which to copy volatile
425
row->trx_id = trx_get_id(trx);
426
row->trx_started = (ib_time_t) trx->start_time;
427
row->trx_state = trx_get_que_state_str(trx);
429
if (trx->wait_lock != NULL) {
431
ut_a(requested_lock_row != NULL);
433
row->requested_lock_row = requested_lock_row;
434
row->trx_wait_started = (ib_time_t) trx->wait_started;
437
ut_a(requested_lock_row == NULL);
439
row->requested_lock_row = NULL;
440
row->trx_wait_started = 0;
443
row->trx_weight = (ullint) ut_conv_dulint_to_longlong(TRX_WEIGHT(trx));
445
if (trx->mysql_thd != NULL) {
446
row->trx_mysql_thread_id
447
= thd_get_thread_id(trx->mysql_thd);
449
/* For internal transactions e.g., purge and transactions
450
being recovered at startup there is no associated MySQL
451
thread data structure. */
452
row->trx_mysql_thread_id = 0;
455
if (trx->mysql_query_str != NULL && *trx->mysql_query_str != NULL) {
457
if (strlen(*trx->mysql_query_str)
458
> TRX_I_S_TRX_QUERY_MAX_LEN) {
460
char query[TRX_I_S_TRX_QUERY_MAX_LEN + 1];
462
memcpy(query, *trx->mysql_query_str,
463
TRX_I_S_TRX_QUERY_MAX_LEN);
464
query[TRX_I_S_TRX_QUERY_MAX_LEN] = '\0';
466
row->trx_query = ha_storage_put_memlim(
467
cache->storage, query,
468
TRX_I_S_TRX_QUERY_MAX_LEN + 1,
469
MAX_ALLOWED_FOR_STORAGE(cache));
472
row->trx_query = ha_storage_put_str_memlim(
473
cache->storage, *trx->mysql_query_str,
474
MAX_ALLOWED_FOR_STORAGE(cache));
477
if (row->trx_query == NULL) {
483
row->trx_query = NULL;
489
/*******************************************************************//**
490
Format the nth field of "rec" and put it in "buf". The result is always
491
NUL-terminated. Returns the number of bytes that were written to "buf"
492
(including the terminating NUL).
493
@return end of the result */
498
char* buf, /*!< out: buffer */
499
ulint buf_size,/*!< in: buffer size in bytes */
500
ulint n, /*!< in: number of field */
501
const dict_index_t* index, /*!< in: index */
502
const rec_t* rec, /*!< in: record */
503
const ulint* offsets)/*!< in: record offsets, returned
504
by rec_get_offsets() */
508
dict_field_t* dict_field;
511
ut_ad(rec_offs_validate(rec, NULL, offsets));
521
/* we must append ", " before the actual data */
529
memcpy(buf, ", ", 3);
536
/* now buf_size >= 1 */
538
data = rec_get_nth_field(rec, offsets, n, &data_len);
540
dict_field = dict_index_get_nth_field(index, n);
542
ret += row_raw_format((const char*) data, data_len,
543
dict_field, buf, buf_size);
548
/*******************************************************************//**
549
Fills the "lock_data" member of i_s_locks_row_t object.
550
If memory can not be allocated then FALSE is returned.
551
@return FALSE if allocation fails */
556
const char** lock_data,/*!< out: "lock_data" to fill */
557
const lock_t* lock, /*!< in: lock used to find the data */
558
ulint heap_no,/*!< in: rec num used to find the data */
559
trx_i_s_cache_t* cache) /*!< in/out: cache where to store
564
const buf_block_t* block;
568
ut_a(lock_get_type(lock) == LOCK_REC);
572
block = buf_page_try_get(lock_rec_get_space_id(lock),
573
lock_rec_get_page_no(lock),
585
page = (const page_t*) buf_block_get_frame(block);
587
rec = page_find_rec_with_heap_no(page, heap_no);
589
if (page_rec_is_infimum(rec)) {
591
*lock_data = ha_storage_put_str_memlim(
592
cache->storage, "infimum pseudo-record",
593
MAX_ALLOWED_FOR_STORAGE(cache));
594
} else if (page_rec_is_supremum(rec)) {
596
*lock_data = ha_storage_put_str_memlim(
597
cache->storage, "supremum pseudo-record",
598
MAX_ALLOWED_FOR_STORAGE(cache));
601
const dict_index_t* index;
604
ulint offsets_onstack[REC_OFFS_NORMAL_SIZE];
606
char buf[TRX_I_S_LOCK_DATA_MAX_LEN];
610
rec_offs_init(offsets_onstack);
611
offsets = offsets_onstack;
613
index = lock_rec_get_index(lock);
615
n_fields = dict_index_get_n_unique(index);
620
offsets = rec_get_offsets(rec, index, offsets, n_fields,
623
/* format and store the data */
626
for (i = 0; i < n_fields; i++) {
628
buf_used += put_nth_field(
629
buf + buf_used, sizeof(buf) - buf_used,
630
i, index, rec, offsets) - 1;
633
*lock_data = (const char*) ha_storage_put_memlim(
634
cache->storage, buf, buf_used + 1,
635
MAX_ALLOWED_FOR_STORAGE(cache));
637
if (UNIV_UNLIKELY(heap != NULL)) {
639
/* this means that rec_get_offsets() has created a new
640
heap and has stored offsets in it; check that this is
641
really the case and free the heap */
642
ut_a(offsets != offsets_onstack);
649
if (*lock_data == NULL) {
657
/*******************************************************************//**
658
Fills i_s_locks_row_t object. Returns its first argument.
659
If memory can not be allocated then FALSE is returned.
660
@return FALSE if allocation fails */
665
i_s_locks_row_t* row, /*!< out: result object that's filled */
666
const lock_t* lock, /*!< in: lock to get data from */
667
ulint heap_no,/*!< in: lock's record number
668
or ULINT_UNDEFINED if the lock
670
trx_i_s_cache_t* cache) /*!< in/out: cache into which to copy
673
row->lock_trx_id = lock_get_trx_id(lock);
674
row->lock_mode = lock_get_mode_str(lock);
675
row->lock_type = lock_get_type_str(lock);
677
row->lock_table = ha_storage_put_str_memlim(
678
cache->storage, lock_get_table_name(lock),
679
MAX_ALLOWED_FOR_STORAGE(cache));
681
/* memory could not be allocated */
682
if (row->lock_table == NULL) {
687
switch (lock_get_type(lock)) {
689
row->lock_index = ha_storage_put_str_memlim(
690
cache->storage, lock_rec_get_index_name(lock),
691
MAX_ALLOWED_FOR_STORAGE(cache));
693
/* memory could not be allocated */
694
if (row->lock_index == NULL) {
699
row->lock_space = lock_rec_get_space_id(lock);
700
row->lock_page = lock_rec_get_page_no(lock);
701
row->lock_rec = heap_no;
703
if (!fill_lock_data(&row->lock_data, lock, heap_no, cache)) {
705
/* memory could not be allocated */
711
row->lock_index = NULL;
713
row->lock_space = ULINT_UNDEFINED;
714
row->lock_page = ULINT_UNDEFINED;
715
row->lock_rec = ULINT_UNDEFINED;
717
row->lock_data = NULL;
724
row->lock_table_id = lock_get_table_id(lock);
726
row->hash_chain.value = row;
731
/*******************************************************************//**
732
Fills i_s_lock_waits_row_t object. Returns its first argument.
733
@return result object that's filled */
735
i_s_lock_waits_row_t*
738
i_s_lock_waits_row_t* row, /*!< out: result object
740
const i_s_locks_row_t* requested_lock_row,/*!< in: pointer to the
741
relevant requested lock
742
row in innodb_locks */
743
const i_s_locks_row_t* blocking_lock_row)/*!< in: pointer to the
744
relevant blocking lock
745
row in innodb_locks */
747
row->requested_lock_row = requested_lock_row;
748
row->blocking_lock_row = blocking_lock_row;
753
/*******************************************************************//**
754
Calculates a hash fold for a lock. For a record lock the fold is
755
calculated from 4 elements, which uniquely identify a lock at a given
756
point in time: transaction id, space id, page number, record number.
757
For a table lock the fold is table's id.
763
const lock_t* lock, /*!< in: lock object to fold */
764
ulint heap_no)/*!< in: lock's record number
765
or ULINT_UNDEFINED if the lock
768
#ifdef TEST_LOCK_FOLD_ALWAYS_DIFFERENT
769
static ulint fold = 0;
775
switch (lock_get_type(lock)) {
777
ut_a(heap_no != ULINT_UNDEFINED);
779
ret = ut_fold_ulint_pair((ulint) lock_get_trx_id(lock),
780
lock_rec_get_space_id(lock));
782
ret = ut_fold_ulint_pair(ret,
783
lock_rec_get_page_no(lock));
785
ret = ut_fold_ulint_pair(ret, heap_no);
789
/* this check is actually not necessary for continuing
790
correct operation, but something must have gone wrong if
792
ut_a(heap_no == ULINT_UNDEFINED);
794
ret = (ulint) lock_get_table_id(lock);
805
/*******************************************************************//**
806
Checks whether i_s_locks_row_t object represents a lock_t object.
807
@return TRUE if they match */
812
const i_s_locks_row_t* row, /*!< in: innodb_locks row */
813
const lock_t* lock, /*!< in: lock object */
814
ulint heap_no)/*!< in: lock's record number
815
or ULINT_UNDEFINED if the lock
818
#ifdef TEST_NO_LOCKS_ROW_IS_EVER_EQUAL_TO_LOCK_T
821
switch (lock_get_type(lock)) {
823
ut_a(heap_no != ULINT_UNDEFINED);
825
return(row->lock_trx_id == lock_get_trx_id(lock)
826
&& row->lock_space == lock_rec_get_space_id(lock)
827
&& row->lock_page == lock_rec_get_page_no(lock)
828
&& row->lock_rec == heap_no);
831
/* this check is actually not necessary for continuing
832
correct operation, but something must have gone wrong if
834
ut_a(heap_no == ULINT_UNDEFINED);
836
return(row->lock_trx_id == lock_get_trx_id(lock)
837
&& row->lock_table_id == lock_get_table_id(lock));
846
/*******************************************************************//**
847
Searches for a row in the innodb_locks cache that has a specified id.
848
This happens in O(1) time since a hash table is used. Returns pointer to
849
the row or NULL if none is found.
850
@return row or NULL */
855
trx_i_s_cache_t* cache, /*!< in: cache */
856
const lock_t* lock, /*!< in: lock to search for */
857
ulint heap_no)/*!< in: lock's record number
858
or ULINT_UNDEFINED if the lock
861
i_s_hash_chain_t* hash_chain;
864
/* hash_chain->"next" */
869
fold_lock(lock, heap_no),
870
/* the type of the next variable */
872
/* auxiliary variable */
874
/* assertion on every traversed item */
876
/* this determines if we have found the lock */
877
locks_row_eq_lock(hash_chain->value, lock, heap_no));
879
if (hash_chain == NULL) {
885
return(hash_chain->value);
888
/*******************************************************************//**
889
Adds new element to the locks cache, enlarging it if necessary.
890
Returns a pointer to the added row. If the row is already present then
891
no row is added and a pointer to the existing row is returned.
892
If row can not be allocated then NULL is returned.
898
trx_i_s_cache_t* cache, /*!< in/out: cache */
899
const lock_t* lock, /*!< in: the element to add */
900
ulint heap_no)/*!< in: lock's record number
901
or ULINT_UNDEFINED if the lock
904
i_s_locks_row_t* dst_row;
906
#ifdef TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES
908
for (i = 0; i < 10000; i++) {
910
#ifndef TEST_DO_NOT_CHECK_FOR_DUPLICATE_ROWS
911
/* quit if this lock is already present */
912
dst_row = search_innodb_locks(cache, lock, heap_no);
913
if (dst_row != NULL) {
919
dst_row = (i_s_locks_row_t*)
920
table_cache_create_empty_row(&cache->innodb_locks, cache);
922
/* memory could not be allocated */
923
if (dst_row == NULL) {
928
if (!fill_locks_row(dst_row, lock, heap_no, cache)) {
930
/* memory could not be allocated */
931
cache->innodb_locks.rows_used--;
935
#ifndef TEST_DO_NOT_INSERT_INTO_THE_HASH_TABLE
937
/* the type used in the hash chain */
939
/* hash_chain->"next" */
944
fold_lock(lock, heap_no),
945
/* add this data to the hash */
946
&dst_row->hash_chain);
948
#ifdef TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES
955
/*******************************************************************//**
956
Adds new pair of locks to the lock waits cache.
957
If memory can not be allocated then FALSE is returned.
958
@return FALSE if allocation fails */
961
add_lock_wait_to_cache(
962
/*===================*/
963
trx_i_s_cache_t* cache, /*!< in/out: cache */
964
const i_s_locks_row_t* requested_lock_row,/*!< in: pointer to the
965
relevant requested lock
966
row in innodb_locks */
967
const i_s_locks_row_t* blocking_lock_row)/*!< in: pointer to the
968
relevant blocking lock
969
row in innodb_locks */
971
i_s_lock_waits_row_t* dst_row;
973
dst_row = (i_s_lock_waits_row_t*)
974
table_cache_create_empty_row(&cache->innodb_lock_waits,
977
/* memory could not be allocated */
978
if (dst_row == NULL) {
983
fill_lock_waits_row(dst_row, requested_lock_row, blocking_lock_row);
988
/*******************************************************************//**
989
Adds transaction's relevant (important) locks to cache.
990
If the transaction is waiting, then the wait lock is added to
991
innodb_locks and a pointer to the added row is returned in
992
requested_lock_row, otherwise requested_lock_row is set to NULL.
993
If rows can not be allocated then FALSE is returned and the value of
994
requested_lock_row is undefined.
995
@return FALSE if allocation fails */
998
add_trx_relevant_locks_to_cache(
999
/*============================*/
1000
trx_i_s_cache_t* cache, /*!< in/out: cache */
1001
const trx_t* trx, /*!< in: transaction */
1002
i_s_locks_row_t** requested_lock_row)/*!< out: pointer to the
1003
requested lock row, or NULL or
1006
ut_ad(mutex_own(&kernel_mutex));
1008
/* If transaction is waiting we add the wait lock and all locks
1009
from another transactions that are blocking the wait lock. */
1010
if (trx->que_state == TRX_QUE_LOCK_WAIT) {
1012
const lock_t* curr_lock;
1013
ulint wait_lock_heap_no;
1014
i_s_locks_row_t* blocking_lock_row;
1015
lock_queue_iterator_t iter;
1017
ut_a(trx->wait_lock != NULL);
1020
= wait_lock_get_heap_no(trx->wait_lock);
1022
/* add the requested lock */
1024
= add_lock_to_cache(cache, trx->wait_lock,
1027
/* memory could not be allocated */
1028
if (*requested_lock_row == NULL) {
1033
/* then iterate over the locks before the wait lock and
1034
add the ones that are blocking it */
1036
lock_queue_iterator_reset(&iter, trx->wait_lock,
1039
curr_lock = lock_queue_iterator_get_prev(&iter);
1040
while (curr_lock != NULL) {
1042
if (lock_has_to_wait(trx->wait_lock,
1045
/* add the lock that is
1046
blocking trx->wait_lock */
1048
= add_lock_to_cache(
1050
/* heap_no is the same
1051
for the wait and waited
1055
/* memory could not be allocated */
1056
if (blocking_lock_row == NULL) {
1061
/* add the relation between both locks
1062
to innodb_lock_waits */
1063
if (!add_lock_wait_to_cache(
1064
cache, *requested_lock_row,
1065
blocking_lock_row)) {
1067
/* memory could not be allocated */
1072
curr_lock = lock_queue_iterator_get_prev(&iter);
1076
*requested_lock_row = NULL;
1082
/** The minimum time that a cache must not be updated after it has been
1083
read for the last time; measured in microseconds. We use this technique
1084
to ensure that SELECTs which join several INFORMATION SCHEMA tables read
1085
the same version of the cache. */
1086
#define CACHE_MIN_IDLE_TIME_US 100000 /* 0.1 sec */
1088
/*******************************************************************//**
1089
Checks if the cache can safely be updated.
1090
@return TRUE if can be updated */
1093
can_cache_be_updated(
1094
/*=================*/
1095
trx_i_s_cache_t* cache) /*!< in: cache */
1099
/* Here we read cache->last_read without acquiring its mutex
1100
because last_read is only updated when a shared rw lock on the
1101
whole cache is being held (see trx_i_s_cache_end_read()) and
1102
we are currently holding an exclusive rw lock on the cache.
1103
So it is not possible for last_read to be updated while we are
1106
#ifdef UNIV_SYNC_DEBUG
1107
ut_a(rw_lock_own(&cache->rw_lock, RW_LOCK_EX));
1110
now = ut_time_us(NULL);
1111
if (now - cache->last_read > CACHE_MIN_IDLE_TIME_US) {
1119
/*******************************************************************//**
1120
Declare a cache empty, preparing it to be filled up. Not all resources
1121
are freed because they can be reused. */
1124
trx_i_s_cache_clear(
1125
/*================*/
1126
trx_i_s_cache_t* cache) /*!< out: cache to clear */
1128
cache->innodb_trx.rows_used = 0;
1129
cache->innodb_locks.rows_used = 0;
1130
cache->innodb_lock_waits.rows_used = 0;
1132
hash_table_clear(cache->locks_hash);
1134
ha_storage_empty(&cache->storage);
1137
/*******************************************************************//**
1138
Fetches the data needed to fill the 3 INFORMATION SCHEMA tables into the
1139
table cache buffer. Cache must be locked for write. */
1142
fetch_data_into_cache(
1143
/*==================*/
1144
trx_i_s_cache_t* cache) /*!< in/out: cache */
1147
i_s_trx_row_t* trx_row;
1148
i_s_locks_row_t* requested_lock_row;
1150
ut_ad(mutex_own(&kernel_mutex));
1152
trx_i_s_cache_clear(cache);
1154
/* We iterate over the list of all transactions and add each one
1155
to innodb_trx's cache. We also add all locks that are relevant
1156
to each transaction into innodb_locks' and innodb_lock_waits'
1159
for (trx = UT_LIST_GET_FIRST(trx_sys->trx_list);
1161
trx = UT_LIST_GET_NEXT(trx_list, trx)) {
1163
if (!add_trx_relevant_locks_to_cache(cache, trx,
1164
&requested_lock_row)) {
1166
cache->is_truncated = TRUE;
1170
trx_row = (i_s_trx_row_t*)
1171
table_cache_create_empty_row(&cache->innodb_trx,
1174
/* memory could not be allocated */
1175
if (trx_row == NULL) {
1177
cache->is_truncated = TRUE;
1181
if (!fill_trx_row(trx_row, trx, requested_lock_row, cache)) {
1183
/* memory could not be allocated */
1184
cache->innodb_trx.rows_used--;
1185
cache->is_truncated = TRUE;
1190
cache->is_truncated = FALSE;
1193
/*******************************************************************//**
1194
Update the transactions cache if it has not been read for some time.
1195
Called from handler/i_s.cc.
1196
@return 0 - fetched, 1 - not */
1199
trx_i_s_possibly_fetch_data_into_cache(
1200
/*===================================*/
1201
trx_i_s_cache_t* cache) /*!< in/out: cache */
1203
if (!can_cache_be_updated(cache)) {
1208
/* We need to read trx_sys and record/table lock queues */
1209
mutex_enter(&kernel_mutex);
1211
fetch_data_into_cache(cache);
1213
mutex_exit(&kernel_mutex);
1218
/*******************************************************************//**
1219
Returns TRUE if the data in the cache is truncated due to the memory
1220
limit posed by TRX_I_S_MEM_LIMIT.
1221
@return TRUE if truncated */
1224
trx_i_s_cache_is_truncated(
1225
/*=======================*/
1226
trx_i_s_cache_t* cache) /*!< in: cache */
1228
return(cache->is_truncated);
1231
/*******************************************************************//**
1232
Initialize INFORMATION SCHEMA trx related cache. */
1237
trx_i_s_cache_t* cache) /*!< out: cache to init */
1239
/* The latching is done in the following order:
1240
acquire trx_i_s_cache_t::rw_lock, X
1241
acquire kernel_mutex
1242
release kernel_mutex
1243
release trx_i_s_cache_t::rw_lock
1244
acquire trx_i_s_cache_t::rw_lock, S
1245
acquire trx_i_s_cache_t::last_read_mutex
1246
release trx_i_s_cache_t::last_read_mutex
1247
release trx_i_s_cache_t::rw_lock */
1249
rw_lock_create(&cache->rw_lock, SYNC_TRX_I_S_RWLOCK);
1251
cache->last_read = 0;
1253
mutex_create(&cache->last_read_mutex, SYNC_TRX_I_S_LAST_READ);
1255
table_cache_init(&cache->innodb_trx, sizeof(i_s_trx_row_t));
1256
table_cache_init(&cache->innodb_locks, sizeof(i_s_locks_row_t));
1257
table_cache_init(&cache->innodb_lock_waits,
1258
sizeof(i_s_lock_waits_row_t));
1260
cache->locks_hash = hash_create(LOCKS_HASH_CELLS_NUM);
1262
cache->storage = ha_storage_create(CACHE_STORAGE_INITIAL_SIZE,
1263
CACHE_STORAGE_HASH_CELLS);
1265
cache->mem_allocd = 0;
1267
cache->is_truncated = FALSE;
1270
/*******************************************************************//**
1271
Free the INFORMATION SCHEMA trx related cache. */
1276
trx_i_s_cache_t* cache) /*!< in, own: cache to free */
1278
hash_table_free(cache->locks_hash);
1279
ha_storage_free(cache->storage);
1280
table_cache_free(&cache->innodb_trx);
1281
table_cache_free(&cache->innodb_locks);
1282
table_cache_free(&cache->innodb_lock_waits);
1283
memset(cache, 0, sizeof *cache);
1286
/*******************************************************************//**
1287
Issue a shared/read lock on the tables cache. */
1290
trx_i_s_cache_start_read(
1291
/*=====================*/
1292
trx_i_s_cache_t* cache) /*!< in: cache */
1294
rw_lock_s_lock(&cache->rw_lock);
1297
/*******************************************************************//**
1298
Release a shared/read lock on the tables cache. */
1301
trx_i_s_cache_end_read(
1302
/*===================*/
1303
trx_i_s_cache_t* cache) /*!< in: cache */
1307
#ifdef UNIV_SYNC_DEBUG
1308
ut_a(rw_lock_own(&cache->rw_lock, RW_LOCK_SHARED));
1311
/* update cache last read time */
1312
now = ut_time_us(NULL);
1313
mutex_enter(&cache->last_read_mutex);
1314
cache->last_read = now;
1315
mutex_exit(&cache->last_read_mutex);
1317
rw_lock_s_unlock(&cache->rw_lock);
1320
/*******************************************************************//**
1321
Issue an exclusive/write lock on the tables cache. */
1324
trx_i_s_cache_start_write(
1325
/*======================*/
1326
trx_i_s_cache_t* cache) /*!< in: cache */
1328
rw_lock_x_lock(&cache->rw_lock);
1331
/*******************************************************************//**
1332
Release an exclusive/write lock on the tables cache. */
1335
trx_i_s_cache_end_write(
1336
/*====================*/
1337
trx_i_s_cache_t* cache) /*!< in: cache */
1339
#ifdef UNIV_SYNC_DEBUG
1340
ut_a(rw_lock_own(&cache->rw_lock, RW_LOCK_EX));
1343
rw_lock_x_unlock(&cache->rw_lock);
1346
/*******************************************************************//**
1347
Selects a INFORMATION SCHEMA table cache from the whole cache.
1348
@return table cache */
1353
trx_i_s_cache_t* cache, /*!< in: whole cache */
1354
enum i_s_table table) /*!< in: which table */
1356
i_s_table_cache_t* table_cache;
1358
#ifdef UNIV_SYNC_DEBUG
1359
ut_a(rw_lock_own(&cache->rw_lock, RW_LOCK_SHARED)
1360
|| rw_lock_own(&cache->rw_lock, RW_LOCK_EX));
1364
case I_S_INNODB_TRX:
1365
table_cache = &cache->innodb_trx;
1367
case I_S_INNODB_LOCKS:
1368
table_cache = &cache->innodb_locks;
1370
case I_S_INNODB_LOCK_WAITS:
1371
table_cache = &cache->innodb_lock_waits;
1377
return(table_cache);
1380
/*******************************************************************//**
1381
Retrieves the number of used rows in the cache for a given
1382
INFORMATION SCHEMA table.
1383
@return number of rows */
1386
trx_i_s_cache_get_rows_used(
1387
/*========================*/
1388
trx_i_s_cache_t* cache, /*!< in: cache */
1389
enum i_s_table table) /*!< in: which table */
1391
i_s_table_cache_t* table_cache;
1393
table_cache = cache_select_table(cache, table);
1395
return(table_cache->rows_used);
1398
/*******************************************************************//**
1399
Retrieves the nth row (zero-based) in the cache for a given
1400
INFORMATION SCHEMA table.
1404
trx_i_s_cache_get_nth_row(
1405
/*======================*/
1406
trx_i_s_cache_t* cache, /*!< in: cache */
1407
enum i_s_table table, /*!< in: which table */
1408
ulint n) /*!< in: row number */
1410
i_s_table_cache_t* table_cache;
1414
table_cache = cache_select_table(cache, table);
1416
ut_a(n < table_cache->rows_used);
1420
for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
1422
if (table_cache->chunks[i].offset
1423
+ table_cache->chunks[i].rows_allocd > n) {
1425
row = (char*) table_cache->chunks[i].base
1426
+ (n - table_cache->chunks[i].offset)
1427
* table_cache->row_size;
1437
/*******************************************************************//**
1438
Crafts a lock id string from a i_s_locks_row_t object. Returns its
1439
second argument. This function aborts if there is not enough space in
1440
lock_id. Be sure to provide at least TRX_I_S_LOCK_ID_MAX_LEN + 1 if you
1441
want to be 100% sure that it will not abort.
1442
@return resulting lock id */
1445
trx_i_s_create_lock_id(
1446
/*===================*/
1447
const i_s_locks_row_t* row, /*!< in: innodb_locks row */
1448
char* lock_id,/*!< out: resulting lock_id */
1449
ulint lock_id_size)/*!< in: size of the lock id
1454
/* please adjust TRX_I_S_LOCK_ID_MAX_LEN if you change this */
1456
if (row->lock_space != ULINT_UNDEFINED) {
1458
res_len = ut_snprintf(lock_id, lock_id_size,
1459
TRX_ID_FMT ":%lu:%lu:%lu",
1460
row->lock_trx_id, row->lock_space,
1461
row->lock_page, row->lock_rec);
1464
res_len = ut_snprintf(lock_id, lock_id_size,
1467
row->lock_table_id);
1470
/* the typecast is safe because snprintf(3) never returns
1473
ut_a((ulint) res_len < lock_id_size);