1
/*****************************************************************************
3
Copyright (c) 1996, 2009, Innobase Oy. All Rights Reserved.
4
Copyright (c) 2008, Google Inc.
6
Portions of this file contain modifications contributed and copyrighted by
7
Google, Inc. Those modifications are gratefully acknowledged and are described
8
briefly in the InnoDB documentation. The contributions by Google are
9
incorporated with their permission, and subject to the conditions contained in
10
the file COPYING.Google.
12
This program is free software; you can redistribute it and/or modify it under
13
the terms of the GNU General Public License as published by the Free Software
14
Foundation; version 2 of the License.
16
This program is distributed in the hope that it will be useful, but WITHOUT
17
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
18
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
20
You should have received a copy of the GNU General Public License along with
21
this program; if not, write to the Free Software Foundation, Inc., 59 Temple
22
Place, Suite 330, Boston, MA 02111-1307 USA
24
*****************************************************************************/
26
/********************************************************************//**
28
The index tree adaptive search
30
Created 2/17/1996 Heikki Tuuri
31
*************************************************************************/
39
#include "page0page.h"
46
/** Flag: has the search system been enabled?
47
Protected by btr_search_latch and btr_search_enabled_mutex. */
48
UNIV_INTERN bool btr_search_enabled = TRUE;
50
/** Mutex protecting btr_search_enabled */
51
static mutex_t btr_search_enabled_mutex;
53
/** A dummy variable to fool the compiler */
54
UNIV_INTERN ulint btr_search_this_is_zero = 0;
56
#ifdef UNIV_SEARCH_PERF_STAT
57
/** Number of successful adaptive hash index lookups */
58
UNIV_INTERN ulint btr_search_n_succ = 0;
59
/** Number of failed adaptive hash index lookups */
60
UNIV_INTERN ulint btr_search_n_hash_fail = 0;
61
#endif /* UNIV_SEARCH_PERF_STAT */
63
/** padding to prevent other memory update
64
hotspots from residing on the same memory
65
cache line as btr_search_latch */
66
UNIV_INTERN byte btr_sea_pad1[64];
68
/** The latch protecting the adaptive search system: this latch protects the
69
(1) positions of records on those pages where a hash index has been built.
70
NOTE: It does not protect values of non-ordering fields within a record from
71
being updated in-place! We can use fact (1) to perform unique searches to
74
/* We will allocate the latch from dynamic memory to get it to the
75
same DRAM page as other hotspot semaphores */
76
UNIV_INTERN rw_lock_t* btr_search_latch_temp;
78
/** padding to prevent other memory update hotspots from residing on
79
the same memory cache line */
80
UNIV_INTERN byte btr_sea_pad2[64];
82
/** The adaptive hash index */
83
UNIV_INTERN btr_search_sys_t* btr_search_sys;
85
/** If the number of records on the page divided by this parameter
86
would have been successfully accessed using a hash index, the index
87
is then built on the page, assuming the global limit has been reached */
88
#define BTR_SEARCH_PAGE_BUILD_LIMIT 16
90
/** The global limit for consecutive potentially successful hash searches,
91
before hash index building is started */
92
#define BTR_SEARCH_BUILD_LIMIT 100
94
/********************************************************************//**
95
Builds a hash index on a page with the given parameters. If the page already
96
has a hash index with different parameters, the old hash index is removed.
97
If index is non-NULL, this function checks if n_fields and n_bytes are
98
sensible values, and does not build a hash index if not. */
101
btr_search_build_page_hash_index(
102
/*=============================*/
103
dict_index_t* index, /*!< in: index for which to build, or NULL if
105
buf_block_t* block, /*!< in: index page, s- or x-latched */
106
ulint n_fields,/*!< in: hash this many full fields */
107
ulint n_bytes,/*!< in: hash this many bytes from the next
109
ibool left_side);/*!< in: hash for searches from left side? */
111
/*****************************************************************//**
112
This function should be called before reserving any btr search mutex, if
113
the intended operation might add nodes to the search system hash table.
114
Because of the latching order, once we have reserved the btr search system
115
latch, we cannot allocate a free frame from the buffer pool. Checks that
116
there is a free buffer frame allocated for hash table heap in the btr search
117
system. If not, allocates a free frames for the heap. This check makes it
118
probable that, when have reserved the btr search system latch and we need to
119
allocate a new node to the hash table, it will succeed. However, the check
120
will not guarantee success. */
123
btr_search_check_free_space_in_heap(void)
124
/*=====================================*/
129
#ifdef UNIV_SYNC_DEBUG
130
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
131
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
132
#endif /* UNIV_SYNC_DEBUG */
134
table = btr_search_sys->hash_index;
138
/* Note that we peek the value of heap->free_block without reserving
139
the latch: this is ok, because we will not guarantee that there will
140
be enough free space in the hash table. */
142
if (heap->free_block == NULL) {
143
buf_block_t* block = buf_block_alloc(0);
145
rw_lock_x_lock(&btr_search_latch);
147
if (heap->free_block == NULL) {
148
heap->free_block = block;
150
buf_block_free(block);
153
rw_lock_x_unlock(&btr_search_latch);
157
/*****************************************************************//**
158
Creates and initializes the adaptive search system at a database start. */
161
btr_search_sys_create(
162
/*==================*/
163
ulint hash_size) /*!< in: hash index hash table size */
165
/* We allocate the search latch from dynamic memory:
166
see above at the global variable definition */
168
btr_search_latch_temp = mem_alloc(sizeof(rw_lock_t));
170
rw_lock_create(&btr_search_latch, SYNC_SEARCH_SYS);
171
mutex_create(&btr_search_enabled_mutex, SYNC_SEARCH_SYS_CONF);
173
btr_search_sys = mem_alloc(sizeof(btr_search_sys_t));
175
btr_search_sys->hash_index = ha_create(hash_size, 0, 0);
178
/********************************************************************//**
179
Disable the adaptive hash search system and empty the index. */
182
btr_search_disable(void)
183
/*====================*/
185
mutex_enter(&btr_search_enabled_mutex);
186
rw_lock_x_lock(&btr_search_latch);
188
btr_search_enabled = FALSE;
190
/* Clear all block->is_hashed flags and remove all entries
191
from btr_search_sys->hash_index. */
192
buf_pool_drop_hash_index();
194
/* btr_search_enabled_mutex should guarantee this. */
195
ut_ad(!btr_search_enabled);
197
rw_lock_x_unlock(&btr_search_latch);
198
mutex_exit(&btr_search_enabled_mutex);
201
/********************************************************************//**
202
Enable the adaptive hash search system. */
205
btr_search_enable(void)
206
/*====================*/
208
mutex_enter(&btr_search_enabled_mutex);
209
rw_lock_x_lock(&btr_search_latch);
211
btr_search_enabled = TRUE;
213
rw_lock_x_unlock(&btr_search_latch);
214
mutex_exit(&btr_search_enabled_mutex);
217
/*****************************************************************//**
218
Creates and initializes a search info struct.
219
@return own: search info struct */
222
btr_search_info_create(
223
/*===================*/
224
mem_heap_t* heap) /*!< in: heap where created */
228
info = mem_heap_alloc(heap, sizeof(btr_search_t));
231
info->magic_n = BTR_SEARCH_MAGIC_N;
232
#endif /* UNIV_DEBUG */
235
info->root_guess = NULL;
237
info->hash_analysis = 0;
238
info->n_hash_potential = 0;
240
info->last_hash_succ = FALSE;
242
#ifdef UNIV_SEARCH_PERF_STAT
243
info->n_hash_succ = 0;
244
info->n_hash_fail = 0;
245
info->n_patt_succ = 0;
246
info->n_searches = 0;
247
#endif /* UNIV_SEARCH_PERF_STAT */
249
/* Set some sensible values */
253
info->left_side = TRUE;
258
/*****************************************************************//**
259
Returns the value of ref_count. The value is protected by
261
@return ref_count value. */
264
btr_search_info_get_ref_count(
265
/*==========================*/
266
btr_search_t* info) /*!< in: search info. */
272
#ifdef UNIV_SYNC_DEBUG
273
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
274
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
275
#endif /* UNIV_SYNC_DEBUG */
277
rw_lock_s_lock(&btr_search_latch);
278
ret = info->ref_count;
279
rw_lock_s_unlock(&btr_search_latch);
284
/*********************************************************************//**
285
Updates the search info of an index about hash successes. NOTE that info
286
is NOT protected by any semaphore, to save CPU time! Do not assume its fields
290
btr_search_info_update_hash(
291
/*========================*/
292
btr_search_t* info, /*!< in/out: search info */
293
btr_cur_t* cursor) /*!< in: cursor which was just positioned */
299
#ifdef UNIV_SYNC_DEBUG
300
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
301
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
302
#endif /* UNIV_SYNC_DEBUG */
304
index = cursor->index;
306
if (dict_index_is_ibuf(index)) {
307
/* So many deletes are performed on an insert buffer tree
308
that we do not consider a hash index useful on it: */
313
n_unique = dict_index_get_n_unique_in_tree(index);
315
if (info->n_hash_potential == 0) {
320
/* Test if the search would have succeeded using the recommended
323
if (info->n_fields >= n_unique && cursor->up_match >= n_unique) {
325
info->n_hash_potential++;
330
cmp = ut_pair_cmp(info->n_fields, info->n_bytes,
331
cursor->low_match, cursor->low_bytes);
333
if (info->left_side ? cmp <= 0 : cmp > 0) {
338
cmp = ut_pair_cmp(info->n_fields, info->n_bytes,
339
cursor->up_match, cursor->up_bytes);
341
if (info->left_side ? cmp <= 0 : cmp > 0) {
343
goto increment_potential;
347
/* We have to set a new recommendation; skip the hash analysis
348
for a while to avoid unnecessary CPU time usage when there is no
349
chance for success */
351
info->hash_analysis = 0;
353
cmp = ut_pair_cmp(cursor->up_match, cursor->up_bytes,
354
cursor->low_match, cursor->low_bytes);
356
info->n_hash_potential = 0;
358
/* For extra safety, we set some sensible values here */
363
info->left_side = TRUE;
365
} else if (cmp > 0) {
366
info->n_hash_potential = 1;
368
if (cursor->up_match >= n_unique) {
370
info->n_fields = n_unique;
373
} else if (cursor->low_match < cursor->up_match) {
375
info->n_fields = cursor->low_match + 1;
378
info->n_fields = cursor->low_match;
379
info->n_bytes = cursor->low_bytes + 1;
382
info->left_side = TRUE;
384
info->n_hash_potential = 1;
386
if (cursor->low_match >= n_unique) {
388
info->n_fields = n_unique;
391
} else if (cursor->low_match > cursor->up_match) {
393
info->n_fields = cursor->up_match + 1;
396
info->n_fields = cursor->up_match;
397
info->n_bytes = cursor->up_bytes + 1;
400
info->left_side = FALSE;
404
/*********************************************************************//**
405
Updates the block search info on hash successes. NOTE that info and
406
block->n_hash_helps, n_fields, n_bytes, side are NOT protected by any
407
semaphore, to save CPU time! Do not assume the fields are consistent.
408
@return TRUE if building a (new) hash index on the block is recommended */
411
btr_search_update_block_hash_info(
412
/*==============================*/
413
btr_search_t* info, /*!< in: search info */
414
buf_block_t* block, /*!< in: buffer block */
415
btr_cur_t* cursor __attribute__((unused)))
418
#ifdef UNIV_SYNC_DEBUG
419
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
420
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
421
ut_ad(rw_lock_own(&block->lock, RW_LOCK_SHARED)
422
|| rw_lock_own(&block->lock, RW_LOCK_EX));
423
#endif /* UNIV_SYNC_DEBUG */
426
info->last_hash_succ = FALSE;
428
ut_a(buf_block_state_valid(block));
429
ut_ad(info->magic_n == BTR_SEARCH_MAGIC_N);
431
if ((block->n_hash_helps > 0)
432
&& (info->n_hash_potential > 0)
433
&& (block->n_fields == info->n_fields)
434
&& (block->n_bytes == info->n_bytes)
435
&& (block->left_side == info->left_side)) {
437
if ((block->is_hashed)
438
&& (block->curr_n_fields == info->n_fields)
439
&& (block->curr_n_bytes == info->n_bytes)
440
&& (block->curr_left_side == info->left_side)) {
442
/* The search would presumably have succeeded using
445
info->last_hash_succ = TRUE;
448
block->n_hash_helps++;
450
block->n_hash_helps = 1;
451
block->n_fields = info->n_fields;
452
block->n_bytes = info->n_bytes;
453
block->left_side = info->left_side;
457
if (cursor->index->table->does_not_fit_in_memory) {
458
block->n_hash_helps = 0;
460
#endif /* UNIV_DEBUG */
462
if ((block->n_hash_helps > page_get_n_recs(block->frame)
463
/ BTR_SEARCH_PAGE_BUILD_LIMIT)
464
&& (info->n_hash_potential >= BTR_SEARCH_BUILD_LIMIT)) {
466
if ((!block->is_hashed)
467
|| (block->n_hash_helps
468
> 2 * page_get_n_recs(block->frame))
469
|| (block->n_fields != block->curr_n_fields)
470
|| (block->n_bytes != block->curr_n_bytes)
471
|| (block->left_side != block->curr_left_side)) {
473
/* Build a new hash index on the page */
482
/*********************************************************************//**
483
Updates a hash node reference when it has been unsuccessfully used in a
484
search which could have succeeded with the used hash parameters. This can
485
happen because when building a hash index for a page, we do not check
486
what happens at page boundaries, and therefore there can be misleading
487
hash nodes. Also, collisions in the fold value can lead to misleading
488
references. This function lazily fixes these imperfections in the hash
492
btr_search_update_hash_ref(
493
/*=======================*/
494
btr_search_t* info, /*!< in: search info */
495
buf_block_t* block, /*!< in: buffer block where cursor positioned */
496
btr_cur_t* cursor) /*!< in: cursor */
502
ut_ad(cursor->flag == BTR_CUR_HASH_FAIL);
503
#ifdef UNIV_SYNC_DEBUG
504
ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
505
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
506
|| rw_lock_own(&(block->lock), RW_LOCK_EX));
507
#endif /* UNIV_SYNC_DEBUG */
508
ut_ad(page_align(btr_cur_get_rec(cursor))
509
== buf_block_get_frame(block));
511
if (!block->is_hashed) {
516
ut_a(block->index == cursor->index);
517
ut_a(!dict_index_is_ibuf(cursor->index));
519
if ((info->n_hash_potential > 0)
520
&& (block->curr_n_fields == info->n_fields)
521
&& (block->curr_n_bytes == info->n_bytes)
522
&& (block->curr_left_side == info->left_side)) {
523
mem_heap_t* heap = NULL;
524
ulint offsets_[REC_OFFS_NORMAL_SIZE];
525
rec_offs_init(offsets_);
527
rec = btr_cur_get_rec(cursor);
529
if (!page_rec_is_user_rec(rec)) {
534
index_id = cursor->index->id;
536
rec_get_offsets(rec, cursor->index, offsets_,
537
ULINT_UNDEFINED, &heap),
538
block->curr_n_fields,
539
block->curr_n_bytes, index_id);
540
if (UNIV_LIKELY_NULL(heap)) {
543
#ifdef UNIV_SYNC_DEBUG
544
ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
545
#endif /* UNIV_SYNC_DEBUG */
547
ha_insert_for_fold(btr_search_sys->hash_index, fold,
552
/*********************************************************************//**
553
Updates the search info. */
556
btr_search_info_update_slow(
557
/*========================*/
558
btr_search_t* info, /*!< in/out: search info */
559
btr_cur_t* cursor) /*!< in: cursor which was just positioned */
566
#ifdef UNIV_SYNC_DEBUG
567
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
568
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
569
#endif /* UNIV_SYNC_DEBUG */
571
block = btr_cur_get_block(cursor);
573
/* NOTE that the following two function calls do NOT protect
574
info or block->n_fields etc. with any semaphore, to save CPU time!
575
We cannot assume the fields are consistent when we return from
578
btr_search_info_update_hash(info, cursor);
580
build_index = btr_search_update_block_hash_info(info, block, cursor);
582
if (build_index || (cursor->flag == BTR_CUR_HASH_FAIL)) {
584
btr_search_check_free_space_in_heap();
587
if (cursor->flag == BTR_CUR_HASH_FAIL) {
588
/* Update the hash node reference, if appropriate */
590
#ifdef UNIV_SEARCH_PERF_STAT
591
btr_search_n_hash_fail++;
592
#endif /* UNIV_SEARCH_PERF_STAT */
594
rw_lock_x_lock(&btr_search_latch);
596
btr_search_update_hash_ref(info, block, cursor);
598
rw_lock_x_unlock(&btr_search_latch);
602
/* Note that since we did not protect block->n_fields etc.
603
with any semaphore, the values can be inconsistent. We have
604
to check inside the function call that they make sense. We
605
also malloc an array and store the values there to make sure
606
the compiler does not let the function call parameters change
607
inside the called function. It might be that the compiler
608
would optimize the call just to pass pointers to block. */
610
params = mem_alloc(3 * sizeof(ulint));
611
params[0] = block->n_fields;
612
params[1] = block->n_bytes;
613
params[2] = block->left_side;
615
/* Make sure the compiler cannot deduce the values and do
618
params2 = params + btr_search_this_is_zero;
620
btr_search_build_page_hash_index(cursor->index,
629
/******************************************************************//**
630
Checks if a guessed position for a tree cursor is right. Note that if
631
mode is PAGE_CUR_LE, which is used in inserts, and the function returns
632
TRUE, then cursor->up_match and cursor->low_match both have sensible values.
633
@return TRUE if success */
636
btr_search_check_guess(
637
/*===================*/
638
btr_cur_t* cursor, /*!< in: guessed cursor position */
639
ibool can_only_compare_to_cursor_rec,
640
/*!< in: if we do not have a latch on the page
641
of cursor, but only a latch on
642
btr_search_latch, then ONLY the columns
643
of the record UNDER the cursor are
644
protected, not the next or previous record
645
in the chain: we cannot look at the next or
646
previous record to check our guess! */
647
const dtuple_t* tuple, /*!< in: data tuple */
648
ulint mode, /*!< in: PAGE_CUR_L, PAGE_CUR_LE, PAGE_CUR_G,
650
mtr_t* mtr) /*!< in: mtr */
657
mem_heap_t* heap = NULL;
658
ulint offsets_[REC_OFFS_NORMAL_SIZE];
659
ulint* offsets = offsets_;
660
ibool success = FALSE;
661
rec_offs_init(offsets_);
663
n_unique = dict_index_get_n_unique_in_tree(cursor->index);
665
rec = btr_cur_get_rec(cursor);
667
ut_ad(page_rec_is_user_rec(rec));
672
offsets = rec_get_offsets(rec, cursor->index, offsets,
674
cmp = page_cmp_dtuple_rec_with_match(tuple, rec,
675
offsets, &match, &bytes);
677
if (mode == PAGE_CUR_GE) {
682
cursor->up_match = match;
684
if (match >= n_unique) {
688
} else if (mode == PAGE_CUR_LE) {
693
cursor->low_match = match;
695
} else if (mode == PAGE_CUR_G) {
699
} else if (mode == PAGE_CUR_L) {
705
if (can_only_compare_to_cursor_rec) {
706
/* Since we could not determine if our guess is right just by
707
looking at the record under the cursor, return FALSE */
714
if ((mode == PAGE_CUR_G) || (mode == PAGE_CUR_GE)) {
717
ut_ad(!page_rec_is_infimum(rec));
719
prev_rec = page_rec_get_prev(rec);
721
if (page_rec_is_infimum(prev_rec)) {
722
success = btr_page_get_prev(page_align(prev_rec), mtr)
728
offsets = rec_get_offsets(prev_rec, cursor->index, offsets,
730
cmp = page_cmp_dtuple_rec_with_match(tuple, prev_rec,
731
offsets, &match, &bytes);
732
if (mode == PAGE_CUR_GE) {
742
ut_ad(!page_rec_is_supremum(rec));
744
next_rec = page_rec_get_next(rec);
746
if (page_rec_is_supremum(next_rec)) {
747
if (btr_page_get_next(page_align(next_rec), mtr)
750
cursor->up_match = 0;
757
offsets = rec_get_offsets(next_rec, cursor->index, offsets,
759
cmp = page_cmp_dtuple_rec_with_match(tuple, next_rec,
760
offsets, &match, &bytes);
761
if (mode == PAGE_CUR_LE) {
763
cursor->up_match = match;
769
if (UNIV_LIKELY_NULL(heap)) {
775
/******************************************************************//**
776
Tries to guess the right search position based on the hash search info
777
of the index. Note that if mode is PAGE_CUR_LE, which is used in inserts,
778
and the function returns TRUE, then cursor->up_match and cursor->low_match
779
both have sensible values.
780
@return TRUE if succeeded */
783
btr_search_guess_on_hash(
784
/*=====================*/
785
dict_index_t* index, /*!< in: index */
786
btr_search_t* info, /*!< in: index search info */
787
const dtuple_t* tuple, /*!< in: logical record */
788
ulint mode, /*!< in: PAGE_CUR_L, ... */
789
ulint latch_mode, /*!< in: BTR_SEARCH_LEAF, ...;
790
NOTE that only if has_search_latch
791
is 0, we will have a latch set on
792
the cursor page, otherwise we assume
793
the caller uses his search latch
794
to protect the record! */
795
btr_cur_t* cursor, /*!< out: tree cursor */
796
ulint has_search_latch,/*!< in: latch mode the caller
797
currently has on btr_search_latch:
798
RW_S_LATCH, RW_X_LATCH, or 0 */
799
mtr_t* mtr) /*!< in: mtr */
809
ut_ad(index && info && tuple && cursor && mtr);
810
ut_ad((latch_mode == BTR_SEARCH_LEAF)
811
|| (latch_mode == BTR_MODIFY_LEAF));
813
/* Note that, for efficiency, the struct info may not be protected by
816
if (UNIV_UNLIKELY(info->n_hash_potential == 0)) {
821
cursor->n_fields = info->n_fields;
822
cursor->n_bytes = info->n_bytes;
824
if (UNIV_UNLIKELY(dtuple_get_n_fields(tuple)
825
< cursor->n_fields + (cursor->n_bytes > 0))) {
830
index_id = index->id;
832
#ifdef UNIV_SEARCH_PERF_STAT
835
fold = dtuple_fold(tuple, cursor->n_fields, cursor->n_bytes, index_id);
838
cursor->flag = BTR_CUR_HASH;
840
if (UNIV_LIKELY(!has_search_latch)) {
841
rw_lock_s_lock(&btr_search_latch);
843
if (UNIV_UNLIKELY(!btr_search_enabled)) {
848
ut_ad(rw_lock_get_writer(&btr_search_latch) != RW_LOCK_EX);
849
ut_ad(rw_lock_get_reader_count(&btr_search_latch) > 0);
851
rec = ha_search_and_get_data(btr_search_sys->hash_index, fold);
853
if (UNIV_UNLIKELY(!rec)) {
857
block = buf_block_align(rec);
859
if (UNIV_LIKELY(!has_search_latch)) {
862
!buf_page_get_known_nowait(latch_mode, block,
869
rw_lock_s_unlock(&btr_search_latch);
871
buf_block_dbg_add_level(block, SYNC_TREE_NODE_FROM_HASH);
874
if (UNIV_UNLIKELY(buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE)) {
875
ut_ad(buf_block_get_state(block) == BUF_BLOCK_REMOVE_HASH);
877
if (UNIV_LIKELY(!has_search_latch)) {
879
btr_leaf_page_release(block, latch_mode, mtr);
885
ut_ad(page_rec_is_user_rec(rec));
887
btr_cur_position(index, rec, block, cursor);
889
/* Check the validity of the guess within the page */
891
/* If we only have the latch on btr_search_latch, not on the
892
page, it only protects the columns of the record the cursor
893
is positioned on. We cannot look at the next of the previous
894
record to determine if our guess for the cursor position is
897
(ut_dulint_cmp(index_id, btr_page_get_index_id(block->frame)), 0)
898
|| !btr_search_check_guess(cursor,
901
if (UNIV_LIKELY(!has_search_latch)) {
902
btr_leaf_page_release(block, latch_mode, mtr);
908
if (UNIV_LIKELY(info->n_hash_potential < BTR_SEARCH_BUILD_LIMIT + 5)) {
910
info->n_hash_potential++;
914
/* These lines of code can be used in a debug version to check
915
the correctness of the searched cursor position: */
917
info->last_hash_succ = FALSE;
919
/* Currently, does not work if the following fails: */
920
ut_ad(!has_search_latch);
922
btr_leaf_page_release(block, latch_mode, mtr);
924
btr_cur_search_to_nth_level(index, 0, tuple, mode, latch_mode,
926
if (mode == PAGE_CUR_GE
927
&& page_rec_is_supremum(btr_cur_get_rec(&cursor2))) {
929
/* If mode is PAGE_CUR_GE, then the binary search
930
in the index tree may actually take us to the supremum
931
of the previous page */
933
info->last_hash_succ = FALSE;
935
btr_pcur_open_on_user_rec(index, tuple, mode, latch_mode,
937
ut_ad(btr_pcur_get_rec(&pcur) == btr_cur_get_rec(cursor));
939
ut_ad(btr_cur_get_rec(&cursor2) == btr_cur_get_rec(cursor));
942
/* NOTE that it is theoretically possible that the above assertions
943
fail if the page of the cursor gets removed from the buffer pool
944
meanwhile! Thus it might not be a bug. */
946
info->last_hash_succ = TRUE;
948
#ifdef UNIV_SEARCH_PERF_STAT
951
if (UNIV_LIKELY(!has_search_latch)
952
&& buf_page_peek_if_too_old(&block->page)) {
954
buf_page_make_young(&block->page);
957
/* Increment the page get statistics though we did not really
958
fix the page: for user info only */
960
buf_pool->n_page_gets++;
964
/*-------------------------------------------*/
966
if (UNIV_LIKELY(!has_search_latch)) {
967
rw_lock_s_unlock(&btr_search_latch);
970
cursor->flag = BTR_CUR_HASH_FAIL;
972
#ifdef UNIV_SEARCH_PERF_STAT
975
if (info->n_hash_succ > 0) {
979
info->last_hash_succ = FALSE;
984
/********************************************************************//**
985
Drops a page hash index. */
988
btr_search_drop_page_hash_index(
989
/*============================*/
990
buf_block_t* block) /*!< in: block containing index page,
991
s- or x-latched, or an index page
992
for which we know that
993
block->buf_fix_count == 0 */
1008
const dict_index_t* index;
1011
#ifdef UNIV_SYNC_DEBUG
1012
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
1013
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
1014
#endif /* UNIV_SYNC_DEBUG */
1017
rw_lock_s_lock(&btr_search_latch);
1018
page = block->frame;
1020
if (UNIV_LIKELY(!block->is_hashed)) {
1022
rw_lock_s_unlock(&btr_search_latch);
1027
table = btr_search_sys->hash_index;
1029
#ifdef UNIV_SYNC_DEBUG
1030
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
1031
|| rw_lock_own(&(block->lock), RW_LOCK_EX)
1032
|| (block->page.buf_fix_count == 0));
1033
#endif /* UNIV_SYNC_DEBUG */
1035
n_fields = block->curr_n_fields;
1036
n_bytes = block->curr_n_bytes;
1037
index = block->index;
1038
ut_a(!dict_index_is_ibuf(index));
1040
/* NOTE: The fields of block must not be accessed after
1041
releasing btr_search_latch, as the index page might only
1044
rw_lock_s_unlock(&btr_search_latch);
1046
ut_a(n_fields + n_bytes > 0);
1048
n_recs = page_get_n_recs(page);
1050
/* Calculate and cache fold values into an array for fast deletion
1051
from the hash index */
1053
folds = mem_alloc(n_recs * sizeof(ulint));
1057
rec = page_get_infimum_rec(page);
1058
rec = page_rec_get_next_low(rec, page_is_comp(page));
1060
index_id = btr_page_get_index_id(page);
1062
ut_a(0 == ut_dulint_cmp(index_id, index->id));
1069
while (!page_rec_is_supremum(rec)) {
1070
offsets = rec_get_offsets(rec, index, offsets,
1071
n_fields + (n_bytes > 0), &heap);
1072
ut_a(rec_offs_n_fields(offsets) == n_fields + (n_bytes > 0));
1073
fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id);
1075
if (fold == prev_fold && prev_fold != 0) {
1080
/* Remove all hash nodes pointing to this page from the
1083
folds[n_cached] = fold;
1086
rec = page_rec_get_next_low(rec, page_rec_is_comp(rec));
1090
if (UNIV_LIKELY_NULL(heap)) {
1091
mem_heap_free(heap);
1094
rw_lock_x_lock(&btr_search_latch);
1096
if (UNIV_UNLIKELY(!block->is_hashed)) {
1097
/* Someone else has meanwhile dropped the hash index */
1102
ut_a(block->index == index);
1104
if (UNIV_UNLIKELY(block->curr_n_fields != n_fields)
1105
|| UNIV_UNLIKELY(block->curr_n_bytes != n_bytes)) {
1107
/* Someone else has meanwhile built a new hash index on the
1108
page, with different parameters */
1110
rw_lock_x_unlock(&btr_search_latch);
1116
for (i = 0; i < n_cached; i++) {
1118
ha_remove_all_nodes_to_page(table, folds[i], page);
1121
ut_a(index->search_info->ref_count > 0);
1122
index->search_info->ref_count--;
1124
block->is_hashed = FALSE;
1125
block->index = NULL;
1128
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
1129
if (UNIV_UNLIKELY(block->n_pointers)) {
1131
ut_print_timestamp(stderr);
1133
" InnoDB: Corruption of adaptive hash index."
1135
"InnoDB: the hash index to a page of %s,"
1136
" still %lu hash nodes remain.\n",
1137
index->name, (ulong) block->n_pointers);
1138
rw_lock_x_unlock(&btr_search_latch);
1140
btr_search_validate();
1142
rw_lock_x_unlock(&btr_search_latch);
1144
#else /* UNIV_AHI_DEBUG || UNIV_DEBUG */
1145
rw_lock_x_unlock(&btr_search_latch);
1146
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
1151
/********************************************************************//**
1152
Drops a page hash index when a page is freed from a fseg to the file system.
1153
Drops possible hash index if the page happens to be in the buffer pool. */
1156
btr_search_drop_page_hash_when_freed(
1157
/*=================================*/
1158
ulint space, /*!< in: space id */
1159
ulint zip_size, /*!< in: compressed page size in bytes
1160
or 0 for uncompressed pages */
1161
ulint page_no) /*!< in: page number */
1166
if (!buf_page_peek_if_search_hashed(space, page_no)) {
1173
/* We assume that if the caller has a latch on the page, then the
1174
caller has already dropped the hash index for the page, and we never
1175
get here. Therefore we can acquire the s-latch to the page without
1176
having to fear a deadlock. */
1178
block = buf_page_get_gen(space, zip_size, page_no, RW_S_LATCH, NULL,
1179
BUF_GET_IF_IN_POOL, __FILE__, __LINE__,
1181
/* Because the buffer pool mutex was released by
1182
buf_page_peek_if_search_hashed(), it is possible that the
1183
block was removed from the buffer pool by another thread
1184
before buf_page_get_gen() got a chance to acquire the buffer
1185
pool mutex again. Thus, we must check for a NULL return. */
1187
if (UNIV_LIKELY(block != NULL)) {
1189
buf_block_dbg_add_level(block, SYNC_TREE_NODE_FROM_HASH);
1191
btr_search_drop_page_hash_index(block);
1197
/********************************************************************//**
1198
Builds a hash index on a page with the given parameters. If the page already
1199
has a hash index with different parameters, the old hash index is removed.
1200
If index is non-NULL, this function checks if n_fields and n_bytes are
1201
sensible values, and does not build a hash index if not. */
1204
btr_search_build_page_hash_index(
1205
/*=============================*/
1206
dict_index_t* index, /*!< in: index for which to build */
1207
buf_block_t* block, /*!< in: index page, s- or x-latched */
1208
ulint n_fields,/*!< in: hash this many full fields */
1209
ulint n_bytes,/*!< in: hash this many bytes from the next
1211
ibool left_side)/*!< in: hash for searches from left side? */
1213
hash_table_t* table;
1225
mem_heap_t* heap = NULL;
1226
ulint offsets_[REC_OFFS_NORMAL_SIZE];
1227
ulint* offsets = offsets_;
1228
rec_offs_init(offsets_);
1231
ut_a(!dict_index_is_ibuf(index));
1233
table = btr_search_sys->hash_index;
1234
page = buf_block_get_frame(block);
1236
#ifdef UNIV_SYNC_DEBUG
1237
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
1238
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
1239
|| rw_lock_own(&(block->lock), RW_LOCK_EX));
1240
#endif /* UNIV_SYNC_DEBUG */
1242
rw_lock_s_lock(&btr_search_latch);
1244
if (block->is_hashed && ((block->curr_n_fields != n_fields)
1245
|| (block->curr_n_bytes != n_bytes)
1246
|| (block->curr_left_side != left_side))) {
1248
rw_lock_s_unlock(&btr_search_latch);
1250
btr_search_drop_page_hash_index(block);
1252
rw_lock_s_unlock(&btr_search_latch);
1255
n_recs = page_get_n_recs(page);
1262
/* Check that the values for hash index build are sensible */
1264
if (n_fields + n_bytes == 0) {
1269
if (dict_index_get_n_unique_in_tree(index) < n_fields
1270
|| (dict_index_get_n_unique_in_tree(index) == n_fields
1275
/* Calculate and cache fold values and corresponding records into
1276
an array for fast insertion to the hash index */
1278
folds = mem_alloc(n_recs * sizeof(ulint));
1279
recs = mem_alloc(n_recs * sizeof(rec_t*));
1283
index_id = btr_page_get_index_id(page);
1285
rec = page_rec_get_next(page_get_infimum_rec(page));
1287
offsets = rec_get_offsets(rec, index, offsets,
1288
n_fields + (n_bytes > 0), &heap);
1290
if (!page_rec_is_supremum(rec)) {
1291
ut_a(n_fields <= rec_offs_n_fields(offsets));
1294
ut_a(n_fields < rec_offs_n_fields(offsets));
1298
fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id);
1302
folds[n_cached] = fold;
1303
recs[n_cached] = rec;
1308
next_rec = page_rec_get_next(rec);
1310
if (page_rec_is_supremum(next_rec)) {
1314
folds[n_cached] = fold;
1315
recs[n_cached] = rec;
1322
offsets = rec_get_offsets(next_rec, index, offsets,
1323
n_fields + (n_bytes > 0), &heap);
1324
next_fold = rec_fold(next_rec, offsets, n_fields,
1327
if (fold != next_fold) {
1328
/* Insert an entry into the hash index */
1332
folds[n_cached] = next_fold;
1333
recs[n_cached] = next_rec;
1336
folds[n_cached] = fold;
1337
recs[n_cached] = rec;
1346
btr_search_check_free_space_in_heap();
1348
rw_lock_x_lock(&btr_search_latch);
1350
if (UNIV_UNLIKELY(!btr_search_enabled)) {
1354
if (block->is_hashed && ((block->curr_n_fields != n_fields)
1355
|| (block->curr_n_bytes != n_bytes)
1356
|| (block->curr_left_side != left_side))) {
1360
/* This counter is decremented every time we drop page
1361
hash index entries and is incremented here. Since we can
1362
rebuild hash index for a page that is already hashed, we
1363
have to take care not to increment the counter in that
1365
if (!block->is_hashed) {
1366
index->search_info->ref_count++;
1369
block->is_hashed = TRUE;
1370
block->n_hash_helps = 0;
1372
block->curr_n_fields = n_fields;
1373
block->curr_n_bytes = n_bytes;
1374
block->curr_left_side = left_side;
1375
block->index = index;
1377
for (i = 0; i < n_cached; i++) {
1379
ha_insert_for_fold(table, folds[i], block, recs[i]);
1383
rw_lock_x_unlock(&btr_search_latch);
1387
if (UNIV_LIKELY_NULL(heap)) {
1388
mem_heap_free(heap);
1392
/********************************************************************//**
1393
Moves or deletes hash entries for moved records. If new_page is already hashed,
1394
then the hash index for page, if any, is dropped. If new_page is not hashed,
1395
and page is hashed, then a new hash index is built to new_page with the same
1396
parameters as page (this often happens when a page is split). */
1399
btr_search_move_or_delete_hash_entries(
1400
/*===================================*/
1401
buf_block_t* new_block, /*!< in: records are copied
1403
buf_block_t* block, /*!< in: index page from which
1404
records were copied, and the
1405
copied records will be deleted
1407
dict_index_t* index) /*!< in: record descriptor */
1413
#ifdef UNIV_SYNC_DEBUG
1414
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
1415
ut_ad(rw_lock_own(&(new_block->lock), RW_LOCK_EX));
1416
#endif /* UNIV_SYNC_DEBUG */
1417
ut_a(!new_block->is_hashed || new_block->index == index);
1418
ut_a(!block->is_hashed || block->index == index);
1419
ut_a(!(new_block->is_hashed || block->is_hashed)
1420
|| !dict_index_is_ibuf(index));
1422
rw_lock_s_lock(&btr_search_latch);
1424
if (new_block->is_hashed) {
1426
rw_lock_s_unlock(&btr_search_latch);
1428
btr_search_drop_page_hash_index(block);
1433
if (block->is_hashed) {
1435
n_fields = block->curr_n_fields;
1436
n_bytes = block->curr_n_bytes;
1437
left_side = block->curr_left_side;
1439
new_block->n_fields = block->curr_n_fields;
1440
new_block->n_bytes = block->curr_n_bytes;
1441
new_block->left_side = left_side;
1443
rw_lock_s_unlock(&btr_search_latch);
1445
ut_a(n_fields + n_bytes > 0);
1447
btr_search_build_page_hash_index(index, new_block, n_fields,
1448
n_bytes, left_side);
1449
ut_ad(n_fields == block->curr_n_fields);
1450
ut_ad(n_bytes == block->curr_n_bytes);
1451
ut_ad(left_side == block->curr_left_side);
1455
rw_lock_s_unlock(&btr_search_latch);
1458
/********************************************************************//**
1459
Updates the page hash index when a single record is deleted from a page. */
1462
btr_search_update_hash_on_delete(
1463
/*=============================*/
1464
btr_cur_t* cursor) /*!< in: cursor which was positioned on the
1465
record to delete using btr_cur_search_...,
1466
the record is not yet deleted */
1468
hash_table_t* table;
1474
ulint offsets_[REC_OFFS_NORMAL_SIZE];
1475
mem_heap_t* heap = NULL;
1476
rec_offs_init(offsets_);
1478
rec = btr_cur_get_rec(cursor);
1480
block = btr_cur_get_block(cursor);
1482
#ifdef UNIV_SYNC_DEBUG
1483
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
1484
#endif /* UNIV_SYNC_DEBUG */
1486
if (!block->is_hashed) {
1491
ut_a(block->index == cursor->index);
1492
ut_a(block->curr_n_fields + block->curr_n_bytes > 0);
1493
ut_a(!dict_index_is_ibuf(cursor->index));
1495
table = btr_search_sys->hash_index;
1497
index_id = cursor->index->id;
1498
fold = rec_fold(rec, rec_get_offsets(rec, cursor->index, offsets_,
1499
ULINT_UNDEFINED, &heap),
1500
block->curr_n_fields, block->curr_n_bytes, index_id);
1501
if (UNIV_LIKELY_NULL(heap)) {
1502
mem_heap_free(heap);
1504
rw_lock_x_lock(&btr_search_latch);
1506
found = ha_search_and_delete_if_found(table, fold, rec);
1508
rw_lock_x_unlock(&btr_search_latch);
1511
/********************************************************************//**
1512
Updates the page hash index when a single record is inserted on a page. */
1515
btr_search_update_hash_node_on_insert(
1516
/*==================================*/
1517
btr_cur_t* cursor) /*!< in: cursor which was positioned to the
1518
place to insert using btr_cur_search_...,
1519
and the new record has been inserted next
1522
hash_table_t* table;
1526
rec = btr_cur_get_rec(cursor);
1528
block = btr_cur_get_block(cursor);
1530
#ifdef UNIV_SYNC_DEBUG
1531
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
1532
#endif /* UNIV_SYNC_DEBUG */
1534
if (!block->is_hashed) {
1539
ut_a(block->index == cursor->index);
1540
ut_a(!dict_index_is_ibuf(cursor->index));
1542
rw_lock_x_lock(&btr_search_latch);
1544
if ((cursor->flag == BTR_CUR_HASH)
1545
&& (cursor->n_fields == block->curr_n_fields)
1546
&& (cursor->n_bytes == block->curr_n_bytes)
1547
&& !block->curr_left_side) {
1549
table = btr_search_sys->hash_index;
1551
ha_search_and_update_if_found(table, cursor->fold, rec,
1552
block, page_rec_get_next(rec));
1554
rw_lock_x_unlock(&btr_search_latch);
1556
rw_lock_x_unlock(&btr_search_latch);
1558
btr_search_update_hash_on_insert(cursor);
1562
/********************************************************************//**
1563
Updates the page hash index when a single record is inserted on a page. */
1566
btr_search_update_hash_on_insert(
1567
/*=============================*/
1568
btr_cur_t* cursor) /*!< in: cursor which was positioned to the
1569
place to insert using btr_cur_search_...,
1570
and the new record has been inserted next
1573
hash_table_t* table;
1581
ulint next_fold = 0; /* remove warning (??? bug ???) */
1585
ibool locked = FALSE;
1586
mem_heap_t* heap = NULL;
1587
ulint offsets_[REC_OFFS_NORMAL_SIZE];
1588
ulint* offsets = offsets_;
1589
rec_offs_init(offsets_);
1591
table = btr_search_sys->hash_index;
1593
btr_search_check_free_space_in_heap();
1595
rec = btr_cur_get_rec(cursor);
1597
block = btr_cur_get_block(cursor);
1599
#ifdef UNIV_SYNC_DEBUG
1600
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
1601
#endif /* UNIV_SYNC_DEBUG */
1603
if (!block->is_hashed) {
1608
ut_a(block->index == cursor->index);
1609
ut_a(!dict_index_is_ibuf(cursor->index));
1611
index_id = cursor->index->id;
1613
n_fields = block->curr_n_fields;
1614
n_bytes = block->curr_n_bytes;
1615
left_side = block->curr_left_side;
1617
ins_rec = page_rec_get_next(rec);
1618
next_rec = page_rec_get_next(ins_rec);
1620
offsets = rec_get_offsets(ins_rec, cursor->index, offsets,
1621
ULINT_UNDEFINED, &heap);
1622
ins_fold = rec_fold(ins_rec, offsets, n_fields, n_bytes, index_id);
1624
if (!page_rec_is_supremum(next_rec)) {
1625
offsets = rec_get_offsets(next_rec, cursor->index, offsets,
1626
n_fields + (n_bytes > 0), &heap);
1627
next_fold = rec_fold(next_rec, offsets, n_fields,
1631
if (!page_rec_is_infimum(rec)) {
1632
offsets = rec_get_offsets(rec, cursor->index, offsets,
1633
n_fields + (n_bytes > 0), &heap);
1634
fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id);
1638
rw_lock_x_lock(&btr_search_latch);
1642
ha_insert_for_fold(table, ins_fold, block, ins_rec);
1645
goto check_next_rec;
1648
if (fold != ins_fold) {
1652
rw_lock_x_lock(&btr_search_latch);
1658
ha_insert_for_fold(table, fold, block, rec);
1660
ha_insert_for_fold(table, ins_fold, block, ins_rec);
1665
if (page_rec_is_supremum(next_rec)) {
1670
rw_lock_x_lock(&btr_search_latch);
1675
ha_insert_for_fold(table, ins_fold, block, ins_rec);
1681
if (ins_fold != next_fold) {
1685
rw_lock_x_lock(&btr_search_latch);
1692
ha_insert_for_fold(table, ins_fold, block, ins_rec);
1694
fputs("Hash insert for ", stderr);
1695
dict_index_name_print(stderr, cursor->index);
1696
fprintf(stderr, " fold %lu\n", ins_fold);
1699
ha_insert_for_fold(table, next_fold, block, next_rec);
1704
if (UNIV_LIKELY_NULL(heap)) {
1705
mem_heap_free(heap);
1708
rw_lock_x_unlock(&btr_search_latch);
1712
/********************************************************************//**
1713
Validates the search system.
1714
@return TRUE if ok */
1717
btr_search_validate(void)
1718
/*=====================*/
1721
ulint n_page_dumps = 0;
1725
mem_heap_t* heap = NULL;
1726
ulint offsets_[REC_OFFS_NORMAL_SIZE];
1727
ulint* offsets = offsets_;
1729
/* How many cells to check before temporarily releasing
1730
btr_search_latch. */
1731
ulint chunk_size = 10000;
1733
rec_offs_init(offsets_);
1735
rw_lock_x_lock(&btr_search_latch);
1736
buf_pool_mutex_enter();
1738
cell_count = hash_get_n_cells(btr_search_sys->hash_index);
1740
for (i = 0; i < cell_count; i++) {
1741
/* We release btr_search_latch every once in a while to
1742
give other queries a chance to run. */
1743
if ((i != 0) && ((i % chunk_size) == 0)) {
1744
buf_pool_mutex_exit();
1745
rw_lock_x_unlock(&btr_search_latch);
1747
rw_lock_x_lock(&btr_search_latch);
1748
buf_pool_mutex_enter();
1751
node = hash_get_nth_cell(btr_search_sys->hash_index, i)->node;
1753
for (; node != NULL; node = node->next) {
1754
const buf_block_t* block
1755
= buf_block_align(node->data);
1756
const buf_block_t* hash_block;
1758
if (UNIV_LIKELY(buf_block_get_state(block)
1759
== BUF_BLOCK_FILE_PAGE)) {
1761
/* The space and offset are only valid
1762
for file blocks. It is possible that
1763
the block is being freed
1764
(BUF_BLOCK_REMOVE_HASH, see the
1765
assertion and the comment below) */
1766
hash_block = buf_block_hash_get(
1767
buf_block_get_space(block),
1768
buf_block_get_page_no(block));
1774
ut_a(hash_block == block);
1776
/* When a block is being freed,
1777
buf_LRU_search_and_free_block() first
1778
removes the block from
1779
buf_pool->page_hash by calling
1780
buf_LRU_block_remove_hashed_page().
1781
After that, it invokes
1782
btr_search_drop_page_hash_index() to
1783
remove the block from
1784
btr_search_sys->hash_index. */
1786
ut_a(buf_block_get_state(block)
1787
== BUF_BLOCK_REMOVE_HASH);
1790
ut_a(!dict_index_is_ibuf(block->index));
1792
offsets = rec_get_offsets((const rec_t*) node->data,
1793
block->index, offsets,
1794
block->curr_n_fields
1795
+ (block->curr_n_bytes > 0),
1798
if (!block->is_hashed || node->fold
1799
!= rec_fold((rec_t*)(node->data),
1801
block->curr_n_fields,
1802
block->curr_n_bytes,
1803
btr_page_get_index_id(block->frame))) {
1804
const page_t* page = block->frame;
1807
ut_print_timestamp(stderr);
1810
" InnoDB: Error in an adaptive hash"
1811
" index pointer to page %lu\n"
1812
"InnoDB: ptr mem address %p"
1813
" index id %lu %lu,"
1814
" node fold %lu, rec fold %lu\n",
1815
(ulong) page_get_page_no(page),
1817
(ulong) ut_dulint_get_high(
1818
btr_page_get_index_id(page)),
1819
(ulong) ut_dulint_get_low(
1820
btr_page_get_index_id(page)),
1822
(ulong) rec_fold((rec_t*)(node->data),
1824
block->curr_n_fields,
1825
block->curr_n_bytes,
1826
btr_page_get_index_id(
1829
fputs("InnoDB: Record ", stderr);
1830
rec_print_new(stderr, (rec_t*)node->data,
1832
fprintf(stderr, "\nInnoDB: on that page."
1833
" Page mem address %p, is hashed %lu,"
1834
" n fields %lu, n bytes %lu\n"
1835
"InnoDB: side %lu\n",
1836
(void*) page, (ulong) block->is_hashed,
1837
(ulong) block->curr_n_fields,
1838
(ulong) block->curr_n_bytes,
1839
(ulong) block->curr_left_side);
1841
if (n_page_dumps < 20) {
1842
buf_page_print(page, 0);
1849
for (i = 0; i < cell_count; i += chunk_size) {
1850
ulint end_index = ut_min(i + chunk_size - 1, cell_count - 1);
1852
/* We release btr_search_latch every once in a while to
1853
give other queries a chance to run. */
1855
buf_pool_mutex_exit();
1856
rw_lock_x_unlock(&btr_search_latch);
1858
rw_lock_x_lock(&btr_search_latch);
1859
buf_pool_mutex_enter();
1862
if (!ha_validate(btr_search_sys->hash_index, i, end_index)) {
1867
buf_pool_mutex_exit();
1868
rw_lock_x_unlock(&btr_search_latch);
1869
if (UNIV_LIKELY_NULL(heap)) {
1870
mem_heap_free(heap);