1
/*****************************************************************************
3
Copyright (c) 2007, 2009, Innobase Oy. All Rights Reserved.
5
This program is free software; you can redistribute it and/or modify it under
6
the terms of the GNU General Public License as published by the Free Software
7
Foundation; version 2 of the License.
9
This program is distributed in the hope that it will be useful, but WITHOUT
10
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
11
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
13
You should have received a copy of the GNU General Public License along with
14
this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15
Place, Suite 330, Boston, MA 02111-1307 USA
17
*****************************************************************************/
19
/**************************************************//**
21
INFORMATION SCHEMA innodb_trx, innodb_locks and
22
innodb_lock_waits tables fetch code.
24
The code below fetches information needed to fill those
25
3 dynamic tables and uploads it into a "transactions
26
table cache" for later retrieval.
28
Created July 17, 2007 Vasil Dimov
29
*******************************************************/
31
#if !defined(BUILD_DRIZZLE)
32
# include <mysql/plugin.h>
35
#include "mysql_addons.h"
39
#include "dict0dict.h"
40
#include "ha0storage.h"
41
#include "ha_prototypes.h"
42
#include "hash0hash.h"
43
#include "lock0iter.h"
44
#include "lock0lock.h"
46
#include "page0page.h"
51
#include "sync0sync.h"
52
#include "sync0types.h"
59
/** Initial number of rows in the table cache */
60
#define TABLE_CACHE_INITIAL_ROWSNUM 1024
62
/** @brief The maximum number of chunks to allocate for a table cache.
64
The rows of a table cache are stored in a set of chunks. When a new
65
row is added a new chunk is allocated if necessary. Assuming that the
66
first one is 1024 rows (TABLE_CACHE_INITIAL_ROWSNUM) and each
67
subsequent is N/2 where N is the number of rows we have allocated till
68
now, then 39th chunk would accommodate 1677416425 rows and all chunks
69
would accommodate 3354832851 rows. */
70
#define MEM_CHUNKS_IN_TABLE_CACHE 39
72
/** The following are some testing auxiliary macros. Do not enable them
73
in a production environment. */
77
/** If this is enabled then lock folds will always be different
78
resulting in equal rows being put in a different cells of the hash
79
table. Checking for duplicates will be flawed because different
80
fold will be calculated when a row is searched in the hash table. */
81
#define TEST_LOCK_FOLD_ALWAYS_DIFFERENT
85
/** This effectively kills the search-for-duplicate-before-adding-a-row
86
function, but searching in the hash is still performed. It will always
87
be assumed that lock is not present and insertion will be performed in
89
#define TEST_NO_LOCKS_ROW_IS_EVER_EQUAL_TO_LOCK_T
93
/** This aggressively repeats adding each row many times. Depending on
94
the above settings this may be noop or may result in lots of rows being
96
#define TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES
100
/** Very similar to TEST_NO_LOCKS_ROW_IS_EVER_EQUAL_TO_LOCK_T but hash
101
table search is not performed at all. */
102
#define TEST_DO_NOT_CHECK_FOR_DUPLICATE_ROWS
106
/** Do not insert each row into the hash table, duplicates may appear
107
if this is enabled, also if this is enabled searching into the hash is
108
noop because it will be empty. */
109
#define TEST_DO_NOT_INSERT_INTO_THE_HASH_TABLE
113
/** Memory limit passed to ha_storage_put_memlim().
114
@param cache hash storage
115
@return maximum allowed allocation size */
116
#define MAX_ALLOWED_FOR_STORAGE(cache) \
118
- (cache)->mem_allocd)
120
/** Memory limit in table_cache_create_empty_row().
121
@param cache hash storage
122
@return maximum allowed allocation size */
123
#define MAX_ALLOWED_FOR_ALLOC(cache) \
125
- (cache)->mem_allocd \
126
- ha_storage_get_size((cache)->storage))
128
/** Memory for each table in the intermediate buffer is allocated in
129
separate chunks. These chunks are considered to be concatenated to
130
represent one flat array of rows. */
131
typedef struct i_s_mem_chunk_struct {
132
ulint offset; /*!< offset, in number of rows */
133
ulint rows_allocd; /*!< the size of this chunk, in number
135
void* base; /*!< start of the chunk */
138
/** This represents one table's cache. */
139
typedef struct i_s_table_cache_struct {
140
ulint rows_used; /*!< number of used rows */
141
ulint rows_allocd; /*!< number of allocated rows */
142
ulint row_size; /*!< size of a single row */
143
i_s_mem_chunk_t chunks[MEM_CHUNKS_IN_TABLE_CACHE]; /*!< array of
144
memory chunks that stores the
148
/** This structure describes the intermediate buffer */
149
struct trx_i_s_cache_struct {
150
rw_lock_t rw_lock; /*!< read-write lock protecting
151
the rest of this structure */
152
ullint last_read; /*!< last time the cache was read;
153
measured in microseconds since
155
mutex_t last_read_mutex;/*!< mutex protecting the
156
last_read member - it is updated
157
inside a shared lock of the
159
i_s_table_cache_t innodb_trx; /*!< innodb_trx table */
160
i_s_table_cache_t innodb_locks; /*!< innodb_locks table */
161
i_s_table_cache_t innodb_lock_waits;/*!< innodb_lock_waits table */
162
/** the hash table size is LOCKS_HASH_CELLS_NUM * sizeof(void*) bytes */
163
#define LOCKS_HASH_CELLS_NUM 10000
164
hash_table_t* locks_hash; /*!< hash table used to eliminate
165
duplicate entries in the
166
innodb_locks table */
167
/** Initial size of the cache storage */
168
#define CACHE_STORAGE_INITIAL_SIZE 1024
169
/** Number of hash cells in the cache storage */
170
#define CACHE_STORAGE_HASH_CELLS 2048
171
ha_storage_t* storage; /*!< storage for external volatile
172
data that can possibly not be
173
available later, when we release
175
ulint mem_allocd; /*!< the amount of memory
176
allocated with mem_alloc*() */
177
ibool is_truncated; /*!< this is TRUE if the memory
178
limit was hit and thus the data
179
in the cache is truncated */
182
/** This is the intermediate buffer where data needed to fill the
183
INFORMATION SCHEMA tables is fetched and later retrieved by the C++
184
code in handler/i_s.cc. */
185
static trx_i_s_cache_t trx_i_s_cache_static;
186
/** This is the intermediate buffer where data needed to fill the
187
INFORMATION SCHEMA tables is fetched and later retrieved by the C++
188
code in handler/i_s.cc. */
189
UNIV_INTERN trx_i_s_cache_t* trx_i_s_cache = &trx_i_s_cache_static;
191
/*******************************************************************//**
192
For a record lock that is in waiting state retrieves the only bit that
193
is set, for a table lock returns ULINT_UNDEFINED.
194
@return record number within the heap */
197
wait_lock_get_heap_no(
198
/*==================*/
199
const lock_t* lock) /*!< in: lock */
203
switch (lock_get_type(lock)) {
205
ret = lock_rec_find_set_bit(lock);
206
ut_a(ret != ULINT_UNDEFINED);
209
ret = ULINT_UNDEFINED;
218
/*******************************************************************//**
219
Initializes the members of a table cache. */
224
i_s_table_cache_t* table_cache, /*!< out: table cache */
225
size_t row_size) /*!< in: the size of a
230
table_cache->rows_used = 0;
231
table_cache->rows_allocd = 0;
232
table_cache->row_size = row_size;
234
for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
236
/* the memory is actually allocated in
237
table_cache_create_empty_row() */
238
table_cache->chunks[i].base = NULL;
242
/*******************************************************************//**
243
Returns an empty row from a table cache. The row is allocated if no more
244
empty rows are available. The number of used rows is incremented.
245
If the memory limit is hit then NULL is returned and nothing is
247
@return empty row, or NULL if out of memory */
250
table_cache_create_empty_row(
251
/*=========================*/
252
i_s_table_cache_t* table_cache, /*!< in/out: table cache */
253
trx_i_s_cache_t* cache) /*!< in/out: cache to record
260
ut_a(table_cache->rows_used <= table_cache->rows_allocd);
262
if (table_cache->rows_used == table_cache->rows_allocd) {
264
/* rows_used == rows_allocd means that new chunk needs
265
to be allocated: either no more empty rows in the
266
last allocated chunk or nothing has been allocated yet
267
(rows_num == rows_allocd == 0); */
269
i_s_mem_chunk_t* chunk;
275
/* find the first not allocated chunk */
276
for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
278
if (table_cache->chunks[i].base == NULL) {
284
/* i == MEM_CHUNKS_IN_TABLE_CACHE means that all chunks
285
have been allocated :-X */
286
ut_a(i < MEM_CHUNKS_IN_TABLE_CACHE);
288
/* allocate the chunk we just found */
292
/* first chunk, nothing is allocated yet */
293
req_rows = TABLE_CACHE_INITIAL_ROWSNUM;
296
/* Memory is increased by the formula
297
new = old + old / 2; We are trying not to be
298
aggressive here (= using the common new = old * 2)
299
because the allocated memory will not be freed
300
until InnoDB exit (it is reused). So it is better
301
to once allocate the memory in more steps, but
302
have less unused/wasted memory than to use less
303
steps in allocation (which is done once in a
304
lifetime) but end up with lots of unused/wasted
306
req_rows = table_cache->rows_allocd / 2;
308
req_bytes = req_rows * table_cache->row_size;
310
if (req_bytes > MAX_ALLOWED_FOR_ALLOC(cache)) {
315
chunk = &table_cache->chunks[i];
317
chunk->base = mem_alloc2(req_bytes, &got_bytes);
319
got_rows = got_bytes / table_cache->row_size;
321
cache->mem_allocd += got_bytes;
324
printf("allocating chunk %d req bytes=%lu, got bytes=%lu, "
326
"req rows=%lu, got rows=%lu\n",
327
i, req_bytes, got_bytes,
328
table_cache->row_size,
332
chunk->rows_allocd = got_rows;
334
table_cache->rows_allocd += got_rows;
336
/* adjust the offset of the next chunk */
337
if (i < MEM_CHUNKS_IN_TABLE_CACHE - 1) {
339
table_cache->chunks[i + 1].offset
340
= chunk->offset + chunk->rows_allocd;
343
/* return the first empty row in the newly allocated
351
/* there is an empty row, no need to allocate new
354
/* find the first chunk that contains allocated but
356
for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
358
if (table_cache->chunks[i].offset
359
+ table_cache->chunks[i].rows_allocd
360
> table_cache->rows_used) {
366
/* i == MEM_CHUNKS_IN_TABLE_CACHE means that all chunks
368
table_cache->rows_used != table_cache->rows_allocd means
369
exactly the opposite - there are allocated but
370
empty/unused rows :-X */
371
ut_a(i < MEM_CHUNKS_IN_TABLE_CACHE);
373
chunk_start = (char*) table_cache->chunks[i].base;
374
offset = table_cache->rows_used
375
- table_cache->chunks[i].offset;
377
row = chunk_start + offset * table_cache->row_size;
380
table_cache->rows_used++;
385
/*******************************************************************//**
386
Fills i_s_trx_row_t object.
387
If memory can not be allocated then FALSE is returned.
388
@return FALSE if allocation fails */
393
i_s_trx_row_t* row, /*!< out: result object
395
const trx_t* trx, /*!< in: transaction to
397
const i_s_locks_row_t* requested_lock_row,/*!< in: pointer to the
399
innodb_locks if trx is
400
waiting or NULL if trx
402
trx_i_s_cache_t* cache) /*!< in/out: cache into
403
which to copy volatile
406
row->trx_id = trx_get_id(trx);
407
row->trx_started = (ib_time_t) trx->start_time;
408
row->trx_state = trx_get_que_state_str(trx);
410
if (trx->wait_lock != NULL) {
412
ut_a(requested_lock_row != NULL);
414
row->requested_lock_row = requested_lock_row;
415
row->trx_wait_started = (ib_time_t) trx->wait_started;
418
ut_a(requested_lock_row == NULL);
420
row->requested_lock_row = NULL;
421
row->trx_wait_started = 0;
424
row->trx_weight = (ullint) ut_conv_dulint_to_longlong(TRX_WEIGHT(trx));
426
if (trx->mysql_thd != NULL) {
427
row->trx_mysql_thread_id
428
#if defined(BUILD_DRIZZLE)
429
= session_get_thread_id(trx->mysql_thd);
431
= thd_get_thread_id(trx->mysql_thd);
434
/* For internal transactions e.g., purge and transactions
435
being recovered at startup there is no associated MySQL
436
thread data structure. */
437
row->trx_mysql_thread_id = 0;
440
if (trx->mysql_query_str != NULL && *trx->mysql_query_str != NULL) {
442
if (strlen(*trx->mysql_query_str)
443
> TRX_I_S_TRX_QUERY_MAX_LEN) {
445
char query[TRX_I_S_TRX_QUERY_MAX_LEN + 1];
447
memcpy(query, *trx->mysql_query_str,
448
TRX_I_S_TRX_QUERY_MAX_LEN);
449
query[TRX_I_S_TRX_QUERY_MAX_LEN] = '\0';
451
row->trx_query = ha_storage_put_memlim(
452
cache->storage, query,
453
TRX_I_S_TRX_QUERY_MAX_LEN + 1,
454
MAX_ALLOWED_FOR_STORAGE(cache));
457
row->trx_query = ha_storage_put_str_memlim(
458
cache->storage, *trx->mysql_query_str,
459
MAX_ALLOWED_FOR_STORAGE(cache));
462
if (row->trx_query == NULL) {
468
row->trx_query = NULL;
474
/*******************************************************************//**
475
Format the nth field of "rec" and put it in "buf". The result is always
476
NUL-terminated. Returns the number of bytes that were written to "buf"
477
(including the terminating NUL).
478
@return end of the result */
483
char* buf, /*!< out: buffer */
484
ulint buf_size,/*!< in: buffer size in bytes */
485
ulint n, /*!< in: number of field */
486
const dict_index_t* index, /*!< in: index */
487
const rec_t* rec, /*!< in: record */
488
const ulint* offsets)/*!< in: record offsets, returned
489
by rec_get_offsets() */
493
dict_field_t* dict_field;
496
ut_ad(rec_offs_validate(rec, NULL, offsets));
506
/* we must append ", " before the actual data */
514
memcpy(buf, ", ", 3);
521
/* now buf_size >= 1 */
523
data = rec_get_nth_field(rec, offsets, n, &data_len);
525
dict_field = dict_index_get_nth_field(index, n);
527
ret += row_raw_format((const char*) data, data_len,
528
dict_field, buf, buf_size);
533
/*******************************************************************//**
534
Fills the "lock_data" member of i_s_locks_row_t object.
535
If memory can not be allocated then FALSE is returned.
536
@return FALSE if allocation fails */
541
const char** lock_data,/*!< out: "lock_data" to fill */
542
const lock_t* lock, /*!< in: lock used to find the data */
543
ulint heap_no,/*!< in: rec num used to find the data */
544
trx_i_s_cache_t* cache) /*!< in/out: cache where to store
549
const buf_block_t* block;
553
ut_a(lock_get_type(lock) == LOCK_REC);
557
block = buf_page_try_get(lock_rec_get_space_id(lock),
558
lock_rec_get_page_no(lock),
570
page = (const page_t*) buf_block_get_frame(block);
572
rec = page_find_rec_with_heap_no(page, heap_no);
574
if (page_rec_is_infimum(rec)) {
576
*lock_data = ha_storage_put_str_memlim(
577
cache->storage, "infimum pseudo-record",
578
MAX_ALLOWED_FOR_STORAGE(cache));
579
} else if (page_rec_is_supremum(rec)) {
581
*lock_data = ha_storage_put_str_memlim(
582
cache->storage, "supremum pseudo-record",
583
MAX_ALLOWED_FOR_STORAGE(cache));
586
const dict_index_t* index;
589
ulint offsets_onstack[REC_OFFS_NORMAL_SIZE];
591
char buf[TRX_I_S_LOCK_DATA_MAX_LEN];
595
rec_offs_init(offsets_onstack);
596
offsets = offsets_onstack;
598
index = lock_rec_get_index(lock);
600
n_fields = dict_index_get_n_unique(index);
605
offsets = rec_get_offsets(rec, index, offsets, n_fields,
608
/* format and store the data */
611
for (i = 0; i < n_fields; i++) {
613
buf_used += put_nth_field(
614
buf + buf_used, sizeof(buf) - buf_used,
615
i, index, rec, offsets) - 1;
618
*lock_data = (const char*) ha_storage_put_memlim(
619
cache->storage, buf, buf_used + 1,
620
MAX_ALLOWED_FOR_STORAGE(cache));
622
if (UNIV_UNLIKELY(heap != NULL)) {
624
/* this means that rec_get_offsets() has created a new
625
heap and has stored offsets in it; check that this is
626
really the case and free the heap */
627
ut_a(offsets != offsets_onstack);
634
if (*lock_data == NULL) {
642
/*******************************************************************//**
643
Fills i_s_locks_row_t object. Returns its first argument.
644
If memory can not be allocated then FALSE is returned.
645
@return FALSE if allocation fails */
650
i_s_locks_row_t* row, /*!< out: result object that's filled */
651
const lock_t* lock, /*!< in: lock to get data from */
652
ulint heap_no,/*!< in: lock's record number
653
or ULINT_UNDEFINED if the lock
655
trx_i_s_cache_t* cache) /*!< in/out: cache into which to copy
658
row->lock_trx_id = lock_get_trx_id(lock);
659
row->lock_mode = lock_get_mode_str(lock);
660
row->lock_type = lock_get_type_str(lock);
662
row->lock_table = ha_storage_put_str_memlim(
663
cache->storage, lock_get_table_name(lock),
664
MAX_ALLOWED_FOR_STORAGE(cache));
666
/* memory could not be allocated */
667
if (row->lock_table == NULL) {
672
switch (lock_get_type(lock)) {
674
row->lock_index = ha_storage_put_str_memlim(
675
cache->storage, lock_rec_get_index_name(lock),
676
MAX_ALLOWED_FOR_STORAGE(cache));
678
/* memory could not be allocated */
679
if (row->lock_index == NULL) {
684
row->lock_space = lock_rec_get_space_id(lock);
685
row->lock_page = lock_rec_get_page_no(lock);
686
row->lock_rec = heap_no;
688
if (!fill_lock_data(&row->lock_data, lock, heap_no, cache)) {
690
/* memory could not be allocated */
696
row->lock_index = NULL;
698
row->lock_space = ULINT_UNDEFINED;
699
row->lock_page = ULINT_UNDEFINED;
700
row->lock_rec = ULINT_UNDEFINED;
702
row->lock_data = NULL;
709
row->lock_table_id = lock_get_table_id(lock);
711
row->hash_chain.value = row;
716
/*******************************************************************//**
717
Fills i_s_lock_waits_row_t object. Returns its first argument.
718
@return result object that's filled */
720
i_s_lock_waits_row_t*
723
i_s_lock_waits_row_t* row, /*!< out: result object
725
const i_s_locks_row_t* requested_lock_row,/*!< in: pointer to the
726
relevant requested lock
727
row in innodb_locks */
728
const i_s_locks_row_t* blocking_lock_row)/*!< in: pointer to the
729
relevant blocking lock
730
row in innodb_locks */
732
row->requested_lock_row = requested_lock_row;
733
row->blocking_lock_row = blocking_lock_row;
738
/*******************************************************************//**
739
Calculates a hash fold for a lock. For a record lock the fold is
740
calculated from 4 elements, which uniquely identify a lock at a given
741
point in time: transaction id, space id, page number, record number.
742
For a table lock the fold is table's id.
748
const lock_t* lock, /*!< in: lock object to fold */
749
ulint heap_no)/*!< in: lock's record number
750
or ULINT_UNDEFINED if the lock
753
#ifdef TEST_LOCK_FOLD_ALWAYS_DIFFERENT
754
static ulint fold = 0;
760
switch (lock_get_type(lock)) {
762
ut_a(heap_no != ULINT_UNDEFINED);
764
ret = ut_fold_ulint_pair((ulint) lock_get_trx_id(lock),
765
lock_rec_get_space_id(lock));
767
ret = ut_fold_ulint_pair(ret,
768
lock_rec_get_page_no(lock));
770
ret = ut_fold_ulint_pair(ret, heap_no);
774
/* this check is actually not necessary for continuing
775
correct operation, but something must have gone wrong if
777
ut_a(heap_no == ULINT_UNDEFINED);
779
ret = (ulint) lock_get_table_id(lock);
790
/*******************************************************************//**
791
Checks whether i_s_locks_row_t object represents a lock_t object.
792
@return TRUE if they match */
797
const i_s_locks_row_t* row, /*!< in: innodb_locks row */
798
const lock_t* lock, /*!< in: lock object */
799
ulint heap_no)/*!< in: lock's record number
800
or ULINT_UNDEFINED if the lock
803
#ifdef TEST_NO_LOCKS_ROW_IS_EVER_EQUAL_TO_LOCK_T
806
switch (lock_get_type(lock)) {
808
ut_a(heap_no != ULINT_UNDEFINED);
810
return(row->lock_trx_id == lock_get_trx_id(lock)
811
&& row->lock_space == lock_rec_get_space_id(lock)
812
&& row->lock_page == lock_rec_get_page_no(lock)
813
&& row->lock_rec == heap_no);
816
/* this check is actually not necessary for continuing
817
correct operation, but something must have gone wrong if
819
ut_a(heap_no == ULINT_UNDEFINED);
821
return(row->lock_trx_id == lock_get_trx_id(lock)
822
&& row->lock_table_id == lock_get_table_id(lock));
831
/*******************************************************************//**
832
Searches for a row in the innodb_locks cache that has a specified id.
833
This happens in O(1) time since a hash table is used. Returns pointer to
834
the row or NULL if none is found.
835
@return row or NULL */
840
trx_i_s_cache_t* cache, /*!< in: cache */
841
const lock_t* lock, /*!< in: lock to search for */
842
ulint heap_no)/*!< in: lock's record number
843
or ULINT_UNDEFINED if the lock
846
i_s_hash_chain_t* hash_chain;
849
/* hash_chain->"next" */
854
fold_lock(lock, heap_no),
855
/* the type of the next variable */
857
/* auxiliary variable */
859
/* assertion on every traversed item */
861
/* this determines if we have found the lock */
862
locks_row_eq_lock(hash_chain->value, lock, heap_no));
864
if (hash_chain == NULL) {
870
return(hash_chain->value);
873
/*******************************************************************//**
874
Adds new element to the locks cache, enlarging it if necessary.
875
Returns a pointer to the added row. If the row is already present then
876
no row is added and a pointer to the existing row is returned.
877
If row can not be allocated then NULL is returned.
883
trx_i_s_cache_t* cache, /*!< in/out: cache */
884
const lock_t* lock, /*!< in: the element to add */
885
ulint heap_no)/*!< in: lock's record number
886
or ULINT_UNDEFINED if the lock
889
i_s_locks_row_t* dst_row;
891
#ifdef TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES
893
for (i = 0; i < 10000; i++) {
895
#ifndef TEST_DO_NOT_CHECK_FOR_DUPLICATE_ROWS
896
/* quit if this lock is already present */
897
dst_row = search_innodb_locks(cache, lock, heap_no);
898
if (dst_row != NULL) {
904
dst_row = (i_s_locks_row_t*)
905
table_cache_create_empty_row(&cache->innodb_locks, cache);
907
/* memory could not be allocated */
908
if (dst_row == NULL) {
913
if (!fill_locks_row(dst_row, lock, heap_no, cache)) {
915
/* memory could not be allocated */
916
cache->innodb_locks.rows_used--;
920
#ifndef TEST_DO_NOT_INSERT_INTO_THE_HASH_TABLE
922
/* the type used in the hash chain */
924
/* hash_chain->"next" */
929
fold_lock(lock, heap_no),
930
/* add this data to the hash */
931
&dst_row->hash_chain);
933
#ifdef TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES
940
/*******************************************************************//**
941
Adds new pair of locks to the lock waits cache.
942
If memory can not be allocated then FALSE is returned.
943
@return FALSE if allocation fails */
946
add_lock_wait_to_cache(
947
/*===================*/
948
trx_i_s_cache_t* cache, /*!< in/out: cache */
949
const i_s_locks_row_t* requested_lock_row,/*!< in: pointer to the
950
relevant requested lock
951
row in innodb_locks */
952
const i_s_locks_row_t* blocking_lock_row)/*!< in: pointer to the
953
relevant blocking lock
954
row in innodb_locks */
956
i_s_lock_waits_row_t* dst_row;
958
dst_row = (i_s_lock_waits_row_t*)
959
table_cache_create_empty_row(&cache->innodb_lock_waits,
962
/* memory could not be allocated */
963
if (dst_row == NULL) {
968
fill_lock_waits_row(dst_row, requested_lock_row, blocking_lock_row);
973
/*******************************************************************//**
974
Adds transaction's relevant (important) locks to cache.
975
If the transaction is waiting, then the wait lock is added to
976
innodb_locks and a pointer to the added row is returned in
977
requested_lock_row, otherwise requested_lock_row is set to NULL.
978
If rows can not be allocated then FALSE is returned and the value of
979
requested_lock_row is undefined.
980
@return FALSE if allocation fails */
983
add_trx_relevant_locks_to_cache(
984
/*============================*/
985
trx_i_s_cache_t* cache, /*!< in/out: cache */
986
const trx_t* trx, /*!< in: transaction */
987
i_s_locks_row_t** requested_lock_row)/*!< out: pointer to the
988
requested lock row, or NULL or
991
ut_ad(mutex_own(&kernel_mutex));
993
/* If transaction is waiting we add the wait lock and all locks
994
from another transactions that are blocking the wait lock. */
995
if (trx->que_state == TRX_QUE_LOCK_WAIT) {
997
const lock_t* curr_lock;
998
ulint wait_lock_heap_no;
999
i_s_locks_row_t* blocking_lock_row;
1000
lock_queue_iterator_t iter;
1002
ut_a(trx->wait_lock != NULL);
1005
= wait_lock_get_heap_no(trx->wait_lock);
1007
/* add the requested lock */
1009
= add_lock_to_cache(cache, trx->wait_lock,
1012
/* memory could not be allocated */
1013
if (*requested_lock_row == NULL) {
1018
/* then iterate over the locks before the wait lock and
1019
add the ones that are blocking it */
1021
lock_queue_iterator_reset(&iter, trx->wait_lock,
1024
curr_lock = lock_queue_iterator_get_prev(&iter);
1025
while (curr_lock != NULL) {
1027
if (lock_has_to_wait(trx->wait_lock,
1030
/* add the lock that is
1031
blocking trx->wait_lock */
1033
= add_lock_to_cache(
1035
/* heap_no is the same
1036
for the wait and waited
1040
/* memory could not be allocated */
1041
if (blocking_lock_row == NULL) {
1046
/* add the relation between both locks
1047
to innodb_lock_waits */
1048
if (!add_lock_wait_to_cache(
1049
cache, *requested_lock_row,
1050
blocking_lock_row)) {
1052
/* memory could not be allocated */
1057
curr_lock = lock_queue_iterator_get_prev(&iter);
1061
*requested_lock_row = NULL;
1067
/** The minimum time that a cache must not be updated after it has been
1068
read for the last time; measured in microseconds. We use this technique
1069
to ensure that SELECTs which join several INFORMATION SCHEMA tables read
1070
the same version of the cache. */
1071
#define CACHE_MIN_IDLE_TIME_US 100000 /* 0.1 sec */
1073
/*******************************************************************//**
1074
Checks if the cache can safely be updated.
1075
@return TRUE if can be updated */
1078
can_cache_be_updated(
1079
/*=================*/
1080
trx_i_s_cache_t* cache) /*!< in: cache */
1084
/* Here we read cache->last_read without acquiring its mutex
1085
because last_read is only updated when a shared rw lock on the
1086
whole cache is being held (see trx_i_s_cache_end_read()) and
1087
we are currently holding an exclusive rw lock on the cache.
1088
So it is not possible for last_read to be updated while we are
1091
#ifdef UNIV_SYNC_DEBUG
1092
ut_a(rw_lock_own(&cache->rw_lock, RW_LOCK_EX));
1095
now = ut_time_us(NULL);
1096
if (now - cache->last_read > CACHE_MIN_IDLE_TIME_US) {
1104
/*******************************************************************//**
1105
Declare a cache empty, preparing it to be filled up. Not all resources
1106
are freed because they can be reused. */
1109
trx_i_s_cache_clear(
1110
/*================*/
1111
trx_i_s_cache_t* cache) /*!< out: cache to clear */
1113
cache->innodb_trx.rows_used = 0;
1114
cache->innodb_locks.rows_used = 0;
1115
cache->innodb_lock_waits.rows_used = 0;
1117
hash_table_clear(cache->locks_hash);
1119
ha_storage_empty(&cache->storage);
1122
/*******************************************************************//**
1123
Fetches the data needed to fill the 3 INFORMATION SCHEMA tables into the
1124
table cache buffer. Cache must be locked for write. */
1127
fetch_data_into_cache(
1128
/*==================*/
1129
trx_i_s_cache_t* cache) /*!< in/out: cache */
1132
i_s_trx_row_t* trx_row;
1133
i_s_locks_row_t* requested_lock_row;
1135
ut_ad(mutex_own(&kernel_mutex));
1137
trx_i_s_cache_clear(cache);
1139
/* We iterate over the list of all transactions and add each one
1140
to innodb_trx's cache. We also add all locks that are relevant
1141
to each transaction into innodb_locks' and innodb_lock_waits'
1144
for (trx = UT_LIST_GET_FIRST(trx_sys->trx_list);
1146
trx = UT_LIST_GET_NEXT(trx_list, trx)) {
1148
if (!add_trx_relevant_locks_to_cache(cache, trx,
1149
&requested_lock_row)) {
1151
cache->is_truncated = TRUE;
1155
trx_row = (i_s_trx_row_t*)
1156
table_cache_create_empty_row(&cache->innodb_trx,
1159
/* memory could not be allocated */
1160
if (trx_row == NULL) {
1162
cache->is_truncated = TRUE;
1166
if (!fill_trx_row(trx_row, trx, requested_lock_row, cache)) {
1168
/* memory could not be allocated */
1169
cache->innodb_trx.rows_used--;
1170
cache->is_truncated = TRUE;
1175
cache->is_truncated = FALSE;
1178
/*******************************************************************//**
1179
Update the transactions cache if it has not been read for some time.
1180
Called from handler/i_s.cc.
1181
@return 0 - fetched, 1 - not */
1184
trx_i_s_possibly_fetch_data_into_cache(
1185
/*===================================*/
1186
trx_i_s_cache_t* cache) /*!< in/out: cache */
1188
if (!can_cache_be_updated(cache)) {
1193
/* We are going to access trx->query in all transactions */
1194
innobase_mysql_prepare_print_arbitrary_thd();
1196
/* We need to read trx_sys and record/table lock queues */
1197
mutex_enter(&kernel_mutex);
1199
fetch_data_into_cache(cache);
1201
mutex_exit(&kernel_mutex);
1203
innobase_mysql_end_print_arbitrary_thd();
1208
/*******************************************************************//**
1209
Returns TRUE if the data in the cache is truncated due to the memory
1210
limit posed by TRX_I_S_MEM_LIMIT.
1211
@return TRUE if truncated */
1214
trx_i_s_cache_is_truncated(
1215
/*=======================*/
1216
trx_i_s_cache_t* cache) /*!< in: cache */
1218
return(cache->is_truncated);
1221
/*******************************************************************//**
1222
Initialize INFORMATION SCHEMA trx related cache. */
1227
trx_i_s_cache_t* cache) /*!< out: cache to init */
1229
/* The latching is done in the following order:
1230
acquire trx_i_s_cache_t::rw_lock, X
1231
acquire kernel_mutex
1232
release kernel_mutex
1233
release trx_i_s_cache_t::rw_lock
1234
acquire trx_i_s_cache_t::rw_lock, S
1235
acquire trx_i_s_cache_t::last_read_mutex
1236
release trx_i_s_cache_t::last_read_mutex
1237
release trx_i_s_cache_t::rw_lock */
1239
rw_lock_create(&cache->rw_lock, SYNC_TRX_I_S_RWLOCK);
1241
cache->last_read = 0;
1243
mutex_create(&cache->last_read_mutex, SYNC_TRX_I_S_LAST_READ);
1245
table_cache_init(&cache->innodb_trx, sizeof(i_s_trx_row_t));
1246
table_cache_init(&cache->innodb_locks, sizeof(i_s_locks_row_t));
1247
table_cache_init(&cache->innodb_lock_waits,
1248
sizeof(i_s_lock_waits_row_t));
1250
cache->locks_hash = hash_create(LOCKS_HASH_CELLS_NUM);
1252
cache->storage = ha_storage_create(CACHE_STORAGE_INITIAL_SIZE,
1253
CACHE_STORAGE_HASH_CELLS);
1255
cache->mem_allocd = 0;
1257
cache->is_truncated = FALSE;
1260
/*******************************************************************//**
1261
Issue a shared/read lock on the tables cache. */
1264
trx_i_s_cache_start_read(
1265
/*=====================*/
1266
trx_i_s_cache_t* cache) /*!< in: cache */
1268
rw_lock_s_lock(&cache->rw_lock);
1271
/*******************************************************************//**
1272
Release a shared/read lock on the tables cache. */
1275
trx_i_s_cache_end_read(
1276
/*===================*/
1277
trx_i_s_cache_t* cache) /*!< in: cache */
1281
#ifdef UNIV_SYNC_DEBUG
1282
ut_a(rw_lock_own(&cache->rw_lock, RW_LOCK_SHARED));
1285
/* update cache last read time */
1286
now = ut_time_us(NULL);
1287
mutex_enter(&cache->last_read_mutex);
1288
cache->last_read = now;
1289
mutex_exit(&cache->last_read_mutex);
1291
rw_lock_s_unlock(&cache->rw_lock);
1294
/*******************************************************************//**
1295
Issue an exclusive/write lock on the tables cache. */
1298
trx_i_s_cache_start_write(
1299
/*======================*/
1300
trx_i_s_cache_t* cache) /*!< in: cache */
1302
rw_lock_x_lock(&cache->rw_lock);
1305
/*******************************************************************//**
1306
Release an exclusive/write lock on the tables cache. */
1309
trx_i_s_cache_end_write(
1310
/*====================*/
1311
trx_i_s_cache_t* cache) /*!< in: cache */
1313
#ifdef UNIV_SYNC_DEBUG
1314
ut_a(rw_lock_own(&cache->rw_lock, RW_LOCK_EX));
1317
rw_lock_x_unlock(&cache->rw_lock);
1320
/*******************************************************************//**
1321
Selects a INFORMATION SCHEMA table cache from the whole cache.
1322
@return table cache */
1327
trx_i_s_cache_t* cache, /*!< in: whole cache */
1328
enum i_s_table table) /*!< in: which table */
1330
i_s_table_cache_t* table_cache;
1332
#ifdef UNIV_SYNC_DEBUG
1333
ut_a(rw_lock_own(&cache->rw_lock, RW_LOCK_SHARED)
1334
|| rw_lock_own(&cache->rw_lock, RW_LOCK_EX));
1338
case I_S_INNODB_TRX:
1339
table_cache = &cache->innodb_trx;
1341
case I_S_INNODB_LOCKS:
1342
table_cache = &cache->innodb_locks;
1344
case I_S_INNODB_LOCK_WAITS:
1345
table_cache = &cache->innodb_lock_waits;
1351
return(table_cache);
1354
/*******************************************************************//**
1355
Retrieves the number of used rows in the cache for a given
1356
INFORMATION SCHEMA table.
1357
@return number of rows */
1360
trx_i_s_cache_get_rows_used(
1361
/*========================*/
1362
trx_i_s_cache_t* cache, /*!< in: cache */
1363
enum i_s_table table) /*!< in: which table */
1365
i_s_table_cache_t* table_cache;
1367
table_cache = cache_select_table(cache, table);
1369
return(table_cache->rows_used);
1372
/*******************************************************************//**
1373
Retrieves the nth row (zero-based) in the cache for a given
1374
INFORMATION SCHEMA table.
1378
trx_i_s_cache_get_nth_row(
1379
/*======================*/
1380
trx_i_s_cache_t* cache, /*!< in: cache */
1381
enum i_s_table table, /*!< in: which table */
1382
ulint n) /*!< in: row number */
1384
i_s_table_cache_t* table_cache;
1388
table_cache = cache_select_table(cache, table);
1390
ut_a(n < table_cache->rows_used);
1394
for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
1396
if (table_cache->chunks[i].offset
1397
+ table_cache->chunks[i].rows_allocd > n) {
1399
row = (char*) table_cache->chunks[i].base
1400
+ (n - table_cache->chunks[i].offset)
1401
* table_cache->row_size;
1411
/*******************************************************************//**
1412
Crafts a lock id string from a i_s_locks_row_t object. Returns its
1413
second argument. This function aborts if there is not enough space in
1414
lock_id. Be sure to provide at least TRX_I_S_LOCK_ID_MAX_LEN + 1 if you
1415
want to be 100% sure that it will not abort.
1416
@return resulting lock id */
1419
trx_i_s_create_lock_id(
1420
/*===================*/
1421
const i_s_locks_row_t* row, /*!< in: innodb_locks row */
1422
char* lock_id,/*!< out: resulting lock_id */
1423
ulint lock_id_size)/*!< in: size of the lock id
1428
/* please adjust TRX_I_S_LOCK_ID_MAX_LEN if you change this */
1430
if (row->lock_space != ULINT_UNDEFINED) {
1432
res_len = ut_snprintf(lock_id, lock_id_size,
1433
TRX_ID_FMT ":%lu:%lu:%lu",
1434
row->lock_trx_id, row->lock_space,
1435
row->lock_page, row->lock_rec);
1438
res_len = ut_snprintf(lock_id, lock_id_size,
1441
row->lock_table_id);
1444
/* the typecast is safe because snprintf(3) never returns
1447
ut_a((ulint) res_len < lock_id_size);