1
/*****************************************************************************
3
Copyright (c) 1996, 2009, Innobase Oy. All Rights Reserved.
4
Copyright (c) 2008, Google Inc.
6
Portions of this file contain modifications contributed and copyrighted by
7
Google, Inc. Those modifications are gratefully acknowledged and are described
8
briefly in the InnoDB documentation. The contributions by Google are
9
incorporated with their permission, and subject to the conditions contained in
10
the file COPYING.Google.
12
This program is free software; you can redistribute it and/or modify it under
13
the terms of the GNU General Public License as published by the Free Software
14
Foundation; version 2 of the License.
16
This program is distributed in the hope that it will be useful, but WITHOUT
17
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
18
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
20
You should have received a copy of the GNU General Public License along with
21
this program; if not, write to the Free Software Foundation, Inc., 59 Temple
22
Place, Suite 330, Boston, MA 02111-1307 USA
24
*****************************************************************************/
26
/********************************************************************//**
28
The index tree adaptive search
30
Created 2/17/1996 Heikki Tuuri
31
*************************************************************************/
39
#include "page0page.h"
46
/** Flag: has the search system been enabled?
47
Protected by btr_search_latch and btr_search_enabled_mutex. */
48
UNIV_INTERN char btr_search_enabled = TRUE;
50
/** Mutex protecting btr_search_enabled */
51
static mutex_t btr_search_enabled_mutex;
53
/** A dummy variable to fool the compiler */
54
UNIV_INTERN ulint btr_search_this_is_zero = 0;
56
#ifdef UNIV_SEARCH_PERF_STAT
57
/** Number of successful adaptive hash index lookups */
58
UNIV_INTERN ulint btr_search_n_succ = 0;
59
/** Number of failed adaptive hash index lookups */
60
UNIV_INTERN ulint btr_search_n_hash_fail = 0;
61
#endif /* UNIV_SEARCH_PERF_STAT */
63
/** padding to prevent other memory update
64
hotspots from residing on the same memory
65
cache line as btr_search_latch */
66
UNIV_INTERN byte btr_sea_pad1[64];
68
/** The latch protecting the adaptive search system: this latch protects the
69
(1) positions of records on those pages where a hash index has been built.
70
NOTE: It does not protect values of non-ordering fields within a record from
71
being updated in-place! We can use fact (1) to perform unique searches to
74
/* We will allocate the latch from dynamic memory to get it to the
75
same DRAM page as other hotspot semaphores */
76
UNIV_INTERN rw_lock_t* btr_search_latch_temp;
78
/** padding to prevent other memory update hotspots from residing on
79
the same memory cache line */
80
UNIV_INTERN byte btr_sea_pad2[64];
82
/** The adaptive hash index */
83
UNIV_INTERN btr_search_sys_t* btr_search_sys;
85
/** If the number of records on the page divided by this parameter
86
would have been successfully accessed using a hash index, the index
87
is then built on the page, assuming the global limit has been reached */
88
#define BTR_SEARCH_PAGE_BUILD_LIMIT 16
90
/** The global limit for consecutive potentially successful hash searches,
91
before hash index building is started */
92
#define BTR_SEARCH_BUILD_LIMIT 100
94
/********************************************************************//**
95
Builds a hash index on a page with the given parameters. If the page already
96
has a hash index with different parameters, the old hash index is removed.
97
If index is non-NULL, this function checks if n_fields and n_bytes are
98
sensible values, and does not build a hash index if not. */
101
btr_search_build_page_hash_index(
102
/*=============================*/
103
dict_index_t* index, /*!< in: index for which to build, or NULL if
105
buf_block_t* block, /*!< in: index page, s- or x-latched */
106
ulint n_fields,/*!< in: hash this many full fields */
107
ulint n_bytes,/*!< in: hash this many bytes from the next
109
ibool left_side);/*!< in: hash for searches from left side? */
111
/*****************************************************************//**
112
This function should be called before reserving any btr search mutex, if
113
the intended operation might add nodes to the search system hash table.
114
Because of the latching order, once we have reserved the btr search system
115
latch, we cannot allocate a free frame from the buffer pool. Checks that
116
there is a free buffer frame allocated for hash table heap in the btr search
117
system. If not, allocates a free frames for the heap. This check makes it
118
probable that, when have reserved the btr search system latch and we need to
119
allocate a new node to the hash table, it will succeed. However, the check
120
will not guarantee success. */
123
btr_search_check_free_space_in_heap(void)
124
/*=====================================*/
129
#ifdef UNIV_SYNC_DEBUG
130
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
131
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
132
#endif /* UNIV_SYNC_DEBUG */
134
table = btr_search_sys->hash_index;
138
/* Note that we peek the value of heap->free_block without reserving
139
the latch: this is ok, because we will not guarantee that there will
140
be enough free space in the hash table. */
142
if (heap->free_block == NULL) {
143
buf_block_t* block = buf_block_alloc(0);
145
rw_lock_x_lock(&btr_search_latch);
147
if (heap->free_block == NULL) {
148
heap->free_block = block;
150
buf_block_free(block);
153
rw_lock_x_unlock(&btr_search_latch);
157
/*****************************************************************//**
158
Creates and initializes the adaptive search system at a database start. */
161
btr_search_sys_create(
162
/*==================*/
163
ulint hash_size) /*!< in: hash index hash table size */
165
/* We allocate the search latch from dynamic memory:
166
see above at the global variable definition */
168
btr_search_latch_temp = mem_alloc(sizeof(rw_lock_t));
170
rw_lock_create(&btr_search_latch, SYNC_SEARCH_SYS);
171
mutex_create(&btr_search_enabled_mutex, SYNC_SEARCH_SYS_CONF);
173
btr_search_sys = mem_alloc(sizeof(btr_search_sys_t));
175
btr_search_sys->hash_index = ha_create(hash_size, 0, 0);
178
/*****************************************************************//**
179
Frees the adaptive search system at a database shutdown. */
182
btr_search_sys_free(void)
183
/*=====================*/
185
mem_free(btr_search_latch_temp);
186
btr_search_latch_temp = NULL;
187
mem_heap_free(btr_search_sys->hash_index->heap);
188
hash_table_free(btr_search_sys->hash_index);
189
mem_free(btr_search_sys);
190
btr_search_sys = NULL;
193
/********************************************************************//**
194
Disable the adaptive hash search system and empty the index. */
197
btr_search_disable(void)
198
/*====================*/
200
mutex_enter(&btr_search_enabled_mutex);
201
rw_lock_x_lock(&btr_search_latch);
203
btr_search_enabled = FALSE;
205
/* Clear all block->is_hashed flags and remove all entries
206
from btr_search_sys->hash_index. */
207
buf_pool_drop_hash_index();
209
/* btr_search_enabled_mutex should guarantee this. */
210
ut_ad(!btr_search_enabled);
212
rw_lock_x_unlock(&btr_search_latch);
213
mutex_exit(&btr_search_enabled_mutex);
216
/********************************************************************//**
217
Enable the adaptive hash search system. */
220
btr_search_enable(void)
221
/*====================*/
223
mutex_enter(&btr_search_enabled_mutex);
224
rw_lock_x_lock(&btr_search_latch);
226
btr_search_enabled = TRUE;
228
rw_lock_x_unlock(&btr_search_latch);
229
mutex_exit(&btr_search_enabled_mutex);
232
/*****************************************************************//**
233
Creates and initializes a search info struct.
234
@return own: search info struct */
237
btr_search_info_create(
238
/*===================*/
239
mem_heap_t* heap) /*!< in: heap where created */
243
info = mem_heap_alloc(heap, sizeof(btr_search_t));
246
info->magic_n = BTR_SEARCH_MAGIC_N;
247
#endif /* UNIV_DEBUG */
250
info->root_guess = NULL;
252
info->hash_analysis = 0;
253
info->n_hash_potential = 0;
255
info->last_hash_succ = FALSE;
257
#ifdef UNIV_SEARCH_PERF_STAT
258
info->n_hash_succ = 0;
259
info->n_hash_fail = 0;
260
info->n_patt_succ = 0;
261
info->n_searches = 0;
262
#endif /* UNIV_SEARCH_PERF_STAT */
264
/* Set some sensible values */
268
info->left_side = TRUE;
273
/*****************************************************************//**
274
Returns the value of ref_count. The value is protected by
276
@return ref_count value. */
279
btr_search_info_get_ref_count(
280
/*==========================*/
281
btr_search_t* info) /*!< in: search info. */
287
#ifdef UNIV_SYNC_DEBUG
288
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
289
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
290
#endif /* UNIV_SYNC_DEBUG */
292
rw_lock_s_lock(&btr_search_latch);
293
ret = info->ref_count;
294
rw_lock_s_unlock(&btr_search_latch);
299
/*********************************************************************//**
300
Updates the search info of an index about hash successes. NOTE that info
301
is NOT protected by any semaphore, to save CPU time! Do not assume its fields
305
btr_search_info_update_hash(
306
/*========================*/
307
btr_search_t* info, /*!< in/out: search info */
308
btr_cur_t* cursor) /*!< in: cursor which was just positioned */
314
#ifdef UNIV_SYNC_DEBUG
315
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
316
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
317
#endif /* UNIV_SYNC_DEBUG */
319
index = cursor->index;
321
if (dict_index_is_ibuf(index)) {
322
/* So many deletes are performed on an insert buffer tree
323
that we do not consider a hash index useful on it: */
328
n_unique = dict_index_get_n_unique_in_tree(index);
330
if (info->n_hash_potential == 0) {
335
/* Test if the search would have succeeded using the recommended
338
if (info->n_fields >= n_unique && cursor->up_match >= n_unique) {
340
info->n_hash_potential++;
345
cmp = ut_pair_cmp(info->n_fields, info->n_bytes,
346
cursor->low_match, cursor->low_bytes);
348
if (info->left_side ? cmp <= 0 : cmp > 0) {
353
cmp = ut_pair_cmp(info->n_fields, info->n_bytes,
354
cursor->up_match, cursor->up_bytes);
356
if (info->left_side ? cmp <= 0 : cmp > 0) {
358
goto increment_potential;
362
/* We have to set a new recommendation; skip the hash analysis
363
for a while to avoid unnecessary CPU time usage when there is no
364
chance for success */
366
info->hash_analysis = 0;
368
cmp = ut_pair_cmp(cursor->up_match, cursor->up_bytes,
369
cursor->low_match, cursor->low_bytes);
371
info->n_hash_potential = 0;
373
/* For extra safety, we set some sensible values here */
378
info->left_side = TRUE;
380
} else if (cmp > 0) {
381
info->n_hash_potential = 1;
383
if (cursor->up_match >= n_unique) {
385
info->n_fields = n_unique;
388
} else if (cursor->low_match < cursor->up_match) {
390
info->n_fields = cursor->low_match + 1;
393
info->n_fields = cursor->low_match;
394
info->n_bytes = cursor->low_bytes + 1;
397
info->left_side = TRUE;
399
info->n_hash_potential = 1;
401
if (cursor->low_match >= n_unique) {
403
info->n_fields = n_unique;
406
} else if (cursor->low_match > cursor->up_match) {
408
info->n_fields = cursor->up_match + 1;
411
info->n_fields = cursor->up_match;
412
info->n_bytes = cursor->up_bytes + 1;
415
info->left_side = FALSE;
419
/*********************************************************************//**
420
Updates the block search info on hash successes. NOTE that info and
421
block->n_hash_helps, n_fields, n_bytes, side are NOT protected by any
422
semaphore, to save CPU time! Do not assume the fields are consistent.
423
@return TRUE if building a (new) hash index on the block is recommended */
426
btr_search_update_block_hash_info(
427
/*==============================*/
428
btr_search_t* info, /*!< in: search info */
429
buf_block_t* block, /*!< in: buffer block */
430
btr_cur_t* cursor __attribute__((unused)))
433
#ifdef UNIV_SYNC_DEBUG
434
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
435
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
436
ut_ad(rw_lock_own(&block->lock, RW_LOCK_SHARED)
437
|| rw_lock_own(&block->lock, RW_LOCK_EX));
438
#endif /* UNIV_SYNC_DEBUG */
441
info->last_hash_succ = FALSE;
443
ut_a(buf_block_state_valid(block));
444
ut_ad(info->magic_n == BTR_SEARCH_MAGIC_N);
446
if ((block->n_hash_helps > 0)
447
&& (info->n_hash_potential > 0)
448
&& (block->n_fields == info->n_fields)
449
&& (block->n_bytes == info->n_bytes)
450
&& (block->left_side == info->left_side)) {
452
if ((block->is_hashed)
453
&& (block->curr_n_fields == info->n_fields)
454
&& (block->curr_n_bytes == info->n_bytes)
455
&& (block->curr_left_side == info->left_side)) {
457
/* The search would presumably have succeeded using
460
info->last_hash_succ = TRUE;
463
block->n_hash_helps++;
465
block->n_hash_helps = 1;
466
block->n_fields = info->n_fields;
467
block->n_bytes = info->n_bytes;
468
block->left_side = info->left_side;
472
if (cursor->index->table->does_not_fit_in_memory) {
473
block->n_hash_helps = 0;
475
#endif /* UNIV_DEBUG */
477
if ((block->n_hash_helps > page_get_n_recs(block->frame)
478
/ BTR_SEARCH_PAGE_BUILD_LIMIT)
479
&& (info->n_hash_potential >= BTR_SEARCH_BUILD_LIMIT)) {
481
if ((!block->is_hashed)
482
|| (block->n_hash_helps
483
> 2 * page_get_n_recs(block->frame))
484
|| (block->n_fields != block->curr_n_fields)
485
|| (block->n_bytes != block->curr_n_bytes)
486
|| (block->left_side != block->curr_left_side)) {
488
/* Build a new hash index on the page */
497
/*********************************************************************//**
498
Updates a hash node reference when it has been unsuccessfully used in a
499
search which could have succeeded with the used hash parameters. This can
500
happen because when building a hash index for a page, we do not check
501
what happens at page boundaries, and therefore there can be misleading
502
hash nodes. Also, collisions in the fold value can lead to misleading
503
references. This function lazily fixes these imperfections in the hash
507
btr_search_update_hash_ref(
508
/*=======================*/
509
btr_search_t* info, /*!< in: search info */
510
buf_block_t* block, /*!< in: buffer block where cursor positioned */
511
btr_cur_t* cursor) /*!< in: cursor */
517
ut_ad(cursor->flag == BTR_CUR_HASH_FAIL);
518
#ifdef UNIV_SYNC_DEBUG
519
ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
520
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
521
|| rw_lock_own(&(block->lock), RW_LOCK_EX));
522
#endif /* UNIV_SYNC_DEBUG */
523
ut_ad(page_align(btr_cur_get_rec(cursor))
524
== buf_block_get_frame(block));
526
if (!block->is_hashed) {
531
ut_a(block->index == cursor->index);
532
ut_a(!dict_index_is_ibuf(cursor->index));
534
if ((info->n_hash_potential > 0)
535
&& (block->curr_n_fields == info->n_fields)
536
&& (block->curr_n_bytes == info->n_bytes)
537
&& (block->curr_left_side == info->left_side)) {
538
mem_heap_t* heap = NULL;
539
ulint offsets_[REC_OFFS_NORMAL_SIZE];
540
rec_offs_init(offsets_);
542
rec = btr_cur_get_rec(cursor);
544
if (!page_rec_is_user_rec(rec)) {
549
index_id = cursor->index->id;
551
rec_get_offsets(rec, cursor->index, offsets_,
552
ULINT_UNDEFINED, &heap),
553
block->curr_n_fields,
554
block->curr_n_bytes, index_id);
555
if (UNIV_LIKELY_NULL(heap)) {
558
#ifdef UNIV_SYNC_DEBUG
559
ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
560
#endif /* UNIV_SYNC_DEBUG */
562
ha_insert_for_fold(btr_search_sys->hash_index, fold,
567
/*********************************************************************//**
568
Updates the search info. */
571
btr_search_info_update_slow(
572
/*========================*/
573
btr_search_t* info, /*!< in/out: search info */
574
btr_cur_t* cursor) /*!< in: cursor which was just positioned */
581
#ifdef UNIV_SYNC_DEBUG
582
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
583
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
584
#endif /* UNIV_SYNC_DEBUG */
586
block = btr_cur_get_block(cursor);
588
/* NOTE that the following two function calls do NOT protect
589
info or block->n_fields etc. with any semaphore, to save CPU time!
590
We cannot assume the fields are consistent when we return from
593
btr_search_info_update_hash(info, cursor);
595
build_index = btr_search_update_block_hash_info(info, block, cursor);
597
if (build_index || (cursor->flag == BTR_CUR_HASH_FAIL)) {
599
btr_search_check_free_space_in_heap();
602
if (cursor->flag == BTR_CUR_HASH_FAIL) {
603
/* Update the hash node reference, if appropriate */
605
#ifdef UNIV_SEARCH_PERF_STAT
606
btr_search_n_hash_fail++;
607
#endif /* UNIV_SEARCH_PERF_STAT */
609
rw_lock_x_lock(&btr_search_latch);
611
btr_search_update_hash_ref(info, block, cursor);
613
rw_lock_x_unlock(&btr_search_latch);
617
/* Note that since we did not protect block->n_fields etc.
618
with any semaphore, the values can be inconsistent. We have
619
to check inside the function call that they make sense. We
620
also malloc an array and store the values there to make sure
621
the compiler does not let the function call parameters change
622
inside the called function. It might be that the compiler
623
would optimize the call just to pass pointers to block. */
625
params = mem_alloc(3 * sizeof(ulint));
626
params[0] = block->n_fields;
627
params[1] = block->n_bytes;
628
params[2] = block->left_side;
630
/* Make sure the compiler cannot deduce the values and do
633
params2 = params + btr_search_this_is_zero;
635
btr_search_build_page_hash_index(cursor->index,
644
/******************************************************************//**
645
Checks if a guessed position for a tree cursor is right. Note that if
646
mode is PAGE_CUR_LE, which is used in inserts, and the function returns
647
TRUE, then cursor->up_match and cursor->low_match both have sensible values.
648
@return TRUE if success */
651
btr_search_check_guess(
652
/*===================*/
653
btr_cur_t* cursor, /*!< in: guessed cursor position */
654
ibool can_only_compare_to_cursor_rec,
655
/*!< in: if we do not have a latch on the page
656
of cursor, but only a latch on
657
btr_search_latch, then ONLY the columns
658
of the record UNDER the cursor are
659
protected, not the next or previous record
660
in the chain: we cannot look at the next or
661
previous record to check our guess! */
662
const dtuple_t* tuple, /*!< in: data tuple */
663
ulint mode, /*!< in: PAGE_CUR_L, PAGE_CUR_LE, PAGE_CUR_G,
665
mtr_t* mtr) /*!< in: mtr */
672
mem_heap_t* heap = NULL;
673
ulint offsets_[REC_OFFS_NORMAL_SIZE];
674
ulint* offsets = offsets_;
675
ibool success = FALSE;
676
rec_offs_init(offsets_);
678
n_unique = dict_index_get_n_unique_in_tree(cursor->index);
680
rec = btr_cur_get_rec(cursor);
682
ut_ad(page_rec_is_user_rec(rec));
687
offsets = rec_get_offsets(rec, cursor->index, offsets,
689
cmp = page_cmp_dtuple_rec_with_match(tuple, rec,
690
offsets, &match, &bytes);
692
if (mode == PAGE_CUR_GE) {
697
cursor->up_match = match;
699
if (match >= n_unique) {
703
} else if (mode == PAGE_CUR_LE) {
708
cursor->low_match = match;
710
} else if (mode == PAGE_CUR_G) {
714
} else if (mode == PAGE_CUR_L) {
720
if (can_only_compare_to_cursor_rec) {
721
/* Since we could not determine if our guess is right just by
722
looking at the record under the cursor, return FALSE */
729
if ((mode == PAGE_CUR_G) || (mode == PAGE_CUR_GE)) {
732
ut_ad(!page_rec_is_infimum(rec));
734
prev_rec = page_rec_get_prev(rec);
736
if (page_rec_is_infimum(prev_rec)) {
737
success = btr_page_get_prev(page_align(prev_rec), mtr)
743
offsets = rec_get_offsets(prev_rec, cursor->index, offsets,
745
cmp = page_cmp_dtuple_rec_with_match(tuple, prev_rec,
746
offsets, &match, &bytes);
747
if (mode == PAGE_CUR_GE) {
757
ut_ad(!page_rec_is_supremum(rec));
759
next_rec = page_rec_get_next(rec);
761
if (page_rec_is_supremum(next_rec)) {
762
if (btr_page_get_next(page_align(next_rec), mtr)
765
cursor->up_match = 0;
772
offsets = rec_get_offsets(next_rec, cursor->index, offsets,
774
cmp = page_cmp_dtuple_rec_with_match(tuple, next_rec,
775
offsets, &match, &bytes);
776
if (mode == PAGE_CUR_LE) {
778
cursor->up_match = match;
784
if (UNIV_LIKELY_NULL(heap)) {
790
/******************************************************************//**
791
Tries to guess the right search position based on the hash search info
792
of the index. Note that if mode is PAGE_CUR_LE, which is used in inserts,
793
and the function returns TRUE, then cursor->up_match and cursor->low_match
794
both have sensible values.
795
@return TRUE if succeeded */
798
btr_search_guess_on_hash(
799
/*=====================*/
800
dict_index_t* index, /*!< in: index */
801
btr_search_t* info, /*!< in: index search info */
802
const dtuple_t* tuple, /*!< in: logical record */
803
ulint mode, /*!< in: PAGE_CUR_L, ... */
804
ulint latch_mode, /*!< in: BTR_SEARCH_LEAF, ...;
805
NOTE that only if has_search_latch
806
is 0, we will have a latch set on
807
the cursor page, otherwise we assume
808
the caller uses his search latch
809
to protect the record! */
810
btr_cur_t* cursor, /*!< out: tree cursor */
811
ulint has_search_latch,/*!< in: latch mode the caller
812
currently has on btr_search_latch:
813
RW_S_LATCH, RW_X_LATCH, or 0 */
814
mtr_t* mtr) /*!< in: mtr */
824
ut_ad(index && info && tuple && cursor && mtr);
825
ut_ad((latch_mode == BTR_SEARCH_LEAF)
826
|| (latch_mode == BTR_MODIFY_LEAF));
828
/* Note that, for efficiency, the struct info may not be protected by
831
if (UNIV_UNLIKELY(info->n_hash_potential == 0)) {
836
cursor->n_fields = info->n_fields;
837
cursor->n_bytes = info->n_bytes;
839
if (UNIV_UNLIKELY(dtuple_get_n_fields(tuple)
840
< cursor->n_fields + (cursor->n_bytes > 0))) {
845
index_id = index->id;
847
#ifdef UNIV_SEARCH_PERF_STAT
850
fold = dtuple_fold(tuple, cursor->n_fields, cursor->n_bytes, index_id);
853
cursor->flag = BTR_CUR_HASH;
855
if (UNIV_LIKELY(!has_search_latch)) {
856
rw_lock_s_lock(&btr_search_latch);
858
if (UNIV_UNLIKELY(!btr_search_enabled)) {
863
ut_ad(rw_lock_get_writer(&btr_search_latch) != RW_LOCK_EX);
864
ut_ad(rw_lock_get_reader_count(&btr_search_latch) > 0);
866
rec = ha_search_and_get_data(btr_search_sys->hash_index, fold);
868
if (UNIV_UNLIKELY(!rec)) {
872
block = buf_block_align(rec);
874
if (UNIV_LIKELY(!has_search_latch)) {
877
!buf_page_get_known_nowait(latch_mode, block,
884
rw_lock_s_unlock(&btr_search_latch);
886
buf_block_dbg_add_level(block, SYNC_TREE_NODE_FROM_HASH);
889
if (UNIV_UNLIKELY(buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE)) {
890
ut_ad(buf_block_get_state(block) == BUF_BLOCK_REMOVE_HASH);
892
if (UNIV_LIKELY(!has_search_latch)) {
894
btr_leaf_page_release(block, latch_mode, mtr);
900
ut_ad(page_rec_is_user_rec(rec));
902
btr_cur_position(index, rec, block, cursor);
904
/* Check the validity of the guess within the page */
906
/* If we only have the latch on btr_search_latch, not on the
907
page, it only protects the columns of the record the cursor
908
is positioned on. We cannot look at the next of the previous
909
record to determine if our guess for the cursor position is
912
(ut_dulint_cmp(index_id, btr_page_get_index_id(block->frame)), 0)
913
|| !btr_search_check_guess(cursor,
916
if (UNIV_LIKELY(!has_search_latch)) {
917
btr_leaf_page_release(block, latch_mode, mtr);
923
if (UNIV_LIKELY(info->n_hash_potential < BTR_SEARCH_BUILD_LIMIT + 5)) {
925
info->n_hash_potential++;
929
/* These lines of code can be used in a debug version to check
930
the correctness of the searched cursor position: */
932
info->last_hash_succ = FALSE;
934
/* Currently, does not work if the following fails: */
935
ut_ad(!has_search_latch);
937
btr_leaf_page_release(block, latch_mode, mtr);
939
btr_cur_search_to_nth_level(index, 0, tuple, mode, latch_mode,
941
if (mode == PAGE_CUR_GE
942
&& page_rec_is_supremum(btr_cur_get_rec(&cursor2))) {
944
/* If mode is PAGE_CUR_GE, then the binary search
945
in the index tree may actually take us to the supremum
946
of the previous page */
948
info->last_hash_succ = FALSE;
950
btr_pcur_open_on_user_rec(index, tuple, mode, latch_mode,
952
ut_ad(btr_pcur_get_rec(&pcur) == btr_cur_get_rec(cursor));
954
ut_ad(btr_cur_get_rec(&cursor2) == btr_cur_get_rec(cursor));
957
/* NOTE that it is theoretically possible that the above assertions
958
fail if the page of the cursor gets removed from the buffer pool
959
meanwhile! Thus it might not be a bug. */
961
info->last_hash_succ = TRUE;
963
#ifdef UNIV_SEARCH_PERF_STAT
966
if (UNIV_LIKELY(!has_search_latch)
967
&& buf_page_peek_if_too_old(&block->page)) {
969
buf_page_make_young(&block->page);
972
/* Increment the page get statistics though we did not really
973
fix the page: for user info only */
975
buf_pool->stat.n_page_gets++;
979
/*-------------------------------------------*/
981
if (UNIV_LIKELY(!has_search_latch)) {
982
rw_lock_s_unlock(&btr_search_latch);
985
cursor->flag = BTR_CUR_HASH_FAIL;
987
#ifdef UNIV_SEARCH_PERF_STAT
990
if (info->n_hash_succ > 0) {
994
info->last_hash_succ = FALSE;
999
/********************************************************************//**
1000
Drops a page hash index. */
1003
btr_search_drop_page_hash_index(
1004
/*============================*/
1005
buf_block_t* block) /*!< in: block containing index page,
1006
s- or x-latched, or an index page
1007
for which we know that
1008
block->buf_fix_count == 0 */
1010
hash_table_t* table;
1023
const dict_index_t* index;
1026
#ifdef UNIV_SYNC_DEBUG
1027
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
1028
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
1029
#endif /* UNIV_SYNC_DEBUG */
1032
rw_lock_s_lock(&btr_search_latch);
1033
page = block->frame;
1035
if (UNIV_LIKELY(!block->is_hashed)) {
1037
rw_lock_s_unlock(&btr_search_latch);
1042
table = btr_search_sys->hash_index;
1044
#ifdef UNIV_SYNC_DEBUG
1045
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
1046
|| rw_lock_own(&(block->lock), RW_LOCK_EX)
1047
|| (block->page.buf_fix_count == 0));
1048
#endif /* UNIV_SYNC_DEBUG */
1050
n_fields = block->curr_n_fields;
1051
n_bytes = block->curr_n_bytes;
1052
index = block->index;
1053
ut_a(!dict_index_is_ibuf(index));
1055
/* NOTE: The fields of block must not be accessed after
1056
releasing btr_search_latch, as the index page might only
1059
rw_lock_s_unlock(&btr_search_latch);
1061
ut_a(n_fields + n_bytes > 0);
1063
n_recs = page_get_n_recs(page);
1065
/* Calculate and cache fold values into an array for fast deletion
1066
from the hash index */
1068
folds = mem_alloc(n_recs * sizeof(ulint));
1072
rec = page_get_infimum_rec(page);
1073
rec = page_rec_get_next_low(rec, page_is_comp(page));
1075
index_id = btr_page_get_index_id(page);
1077
ut_a(0 == ut_dulint_cmp(index_id, index->id));
1084
while (!page_rec_is_supremum(rec)) {
1085
offsets = rec_get_offsets(rec, index, offsets,
1086
n_fields + (n_bytes > 0), &heap);
1087
ut_a(rec_offs_n_fields(offsets) == n_fields + (n_bytes > 0));
1088
fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id);
1090
if (fold == prev_fold && prev_fold != 0) {
1095
/* Remove all hash nodes pointing to this page from the
1098
folds[n_cached] = fold;
1101
rec = page_rec_get_next_low(rec, page_rec_is_comp(rec));
1105
if (UNIV_LIKELY_NULL(heap)) {
1106
mem_heap_free(heap);
1109
rw_lock_x_lock(&btr_search_latch);
1111
if (UNIV_UNLIKELY(!block->is_hashed)) {
1112
/* Someone else has meanwhile dropped the hash index */
1117
ut_a(block->index == index);
1119
if (UNIV_UNLIKELY(block->curr_n_fields != n_fields)
1120
|| UNIV_UNLIKELY(block->curr_n_bytes != n_bytes)) {
1122
/* Someone else has meanwhile built a new hash index on the
1123
page, with different parameters */
1125
rw_lock_x_unlock(&btr_search_latch);
1131
for (i = 0; i < n_cached; i++) {
1133
ha_remove_all_nodes_to_page(table, folds[i], page);
1136
ut_a(index->search_info->ref_count > 0);
1137
index->search_info->ref_count--;
1139
block->is_hashed = FALSE;
1140
block->index = NULL;
1143
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
1144
if (UNIV_UNLIKELY(block->n_pointers)) {
1146
ut_print_timestamp(stderr);
1148
" InnoDB: Corruption of adaptive hash index."
1150
"InnoDB: the hash index to a page of %s,"
1151
" still %lu hash nodes remain.\n",
1152
index->name, (ulong) block->n_pointers);
1153
rw_lock_x_unlock(&btr_search_latch);
1155
btr_search_validate();
1157
rw_lock_x_unlock(&btr_search_latch);
1159
#else /* UNIV_AHI_DEBUG || UNIV_DEBUG */
1160
rw_lock_x_unlock(&btr_search_latch);
1161
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
1166
/********************************************************************//**
1167
Drops a page hash index when a page is freed from a fseg to the file system.
1168
Drops possible hash index if the page happens to be in the buffer pool. */
1171
btr_search_drop_page_hash_when_freed(
1172
/*=================================*/
1173
ulint space, /*!< in: space id */
1174
ulint zip_size, /*!< in: compressed page size in bytes
1175
or 0 for uncompressed pages */
1176
ulint page_no) /*!< in: page number */
1181
if (!buf_page_peek_if_search_hashed(space, page_no)) {
1188
/* We assume that if the caller has a latch on the page, then the
1189
caller has already dropped the hash index for the page, and we never
1190
get here. Therefore we can acquire the s-latch to the page without
1191
having to fear a deadlock. */
1193
block = buf_page_get_gen(space, zip_size, page_no, RW_S_LATCH, NULL,
1194
BUF_GET_IF_IN_POOL, __FILE__, __LINE__,
1196
/* Because the buffer pool mutex was released by
1197
buf_page_peek_if_search_hashed(), it is possible that the
1198
block was removed from the buffer pool by another thread
1199
before buf_page_get_gen() got a chance to acquire the buffer
1200
pool mutex again. Thus, we must check for a NULL return. */
1202
if (UNIV_LIKELY(block != NULL)) {
1204
buf_block_dbg_add_level(block, SYNC_TREE_NODE_FROM_HASH);
1206
btr_search_drop_page_hash_index(block);
1212
/********************************************************************//**
1213
Builds a hash index on a page with the given parameters. If the page already
1214
has a hash index with different parameters, the old hash index is removed.
1215
If index is non-NULL, this function checks if n_fields and n_bytes are
1216
sensible values, and does not build a hash index if not. */
1219
btr_search_build_page_hash_index(
1220
/*=============================*/
1221
dict_index_t* index, /*!< in: index for which to build */
1222
buf_block_t* block, /*!< in: index page, s- or x-latched */
1223
ulint n_fields,/*!< in: hash this many full fields */
1224
ulint n_bytes,/*!< in: hash this many bytes from the next
1226
ibool left_side)/*!< in: hash for searches from left side? */
1228
hash_table_t* table;
1240
mem_heap_t* heap = NULL;
1241
ulint offsets_[REC_OFFS_NORMAL_SIZE];
1242
ulint* offsets = offsets_;
1243
rec_offs_init(offsets_);
1246
ut_a(!dict_index_is_ibuf(index));
1248
table = btr_search_sys->hash_index;
1249
page = buf_block_get_frame(block);
1251
#ifdef UNIV_SYNC_DEBUG
1252
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
1253
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
1254
|| rw_lock_own(&(block->lock), RW_LOCK_EX));
1255
#endif /* UNIV_SYNC_DEBUG */
1257
rw_lock_s_lock(&btr_search_latch);
1259
if (block->is_hashed && ((block->curr_n_fields != n_fields)
1260
|| (block->curr_n_bytes != n_bytes)
1261
|| (block->curr_left_side != left_side))) {
1263
rw_lock_s_unlock(&btr_search_latch);
1265
btr_search_drop_page_hash_index(block);
1267
rw_lock_s_unlock(&btr_search_latch);
1270
n_recs = page_get_n_recs(page);
1277
/* Check that the values for hash index build are sensible */
1279
if (n_fields + n_bytes == 0) {
1284
if (dict_index_get_n_unique_in_tree(index) < n_fields
1285
|| (dict_index_get_n_unique_in_tree(index) == n_fields
1290
/* Calculate and cache fold values and corresponding records into
1291
an array for fast insertion to the hash index */
1293
folds = mem_alloc(n_recs * sizeof(ulint));
1294
recs = mem_alloc(n_recs * sizeof(rec_t*));
1298
index_id = btr_page_get_index_id(page);
1300
rec = page_rec_get_next(page_get_infimum_rec(page));
1302
offsets = rec_get_offsets(rec, index, offsets,
1303
n_fields + (n_bytes > 0), &heap);
1305
if (!page_rec_is_supremum(rec)) {
1306
ut_a(n_fields <= rec_offs_n_fields(offsets));
1309
ut_a(n_fields < rec_offs_n_fields(offsets));
1313
fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id);
1317
folds[n_cached] = fold;
1318
recs[n_cached] = rec;
1323
next_rec = page_rec_get_next(rec);
1325
if (page_rec_is_supremum(next_rec)) {
1329
folds[n_cached] = fold;
1330
recs[n_cached] = rec;
1337
offsets = rec_get_offsets(next_rec, index, offsets,
1338
n_fields + (n_bytes > 0), &heap);
1339
next_fold = rec_fold(next_rec, offsets, n_fields,
1342
if (fold != next_fold) {
1343
/* Insert an entry into the hash index */
1347
folds[n_cached] = next_fold;
1348
recs[n_cached] = next_rec;
1351
folds[n_cached] = fold;
1352
recs[n_cached] = rec;
1361
btr_search_check_free_space_in_heap();
1363
rw_lock_x_lock(&btr_search_latch);
1365
if (UNIV_UNLIKELY(!btr_search_enabled)) {
1369
if (block->is_hashed && ((block->curr_n_fields != n_fields)
1370
|| (block->curr_n_bytes != n_bytes)
1371
|| (block->curr_left_side != left_side))) {
1375
/* This counter is decremented every time we drop page
1376
hash index entries and is incremented here. Since we can
1377
rebuild hash index for a page that is already hashed, we
1378
have to take care not to increment the counter in that
1380
if (!block->is_hashed) {
1381
index->search_info->ref_count++;
1384
block->is_hashed = TRUE;
1385
block->n_hash_helps = 0;
1387
block->curr_n_fields = n_fields;
1388
block->curr_n_bytes = n_bytes;
1389
block->curr_left_side = left_side;
1390
block->index = index;
1392
for (i = 0; i < n_cached; i++) {
1394
ha_insert_for_fold(table, folds[i], block, recs[i]);
1398
rw_lock_x_unlock(&btr_search_latch);
1402
if (UNIV_LIKELY_NULL(heap)) {
1403
mem_heap_free(heap);
1407
/********************************************************************//**
1408
Moves or deletes hash entries for moved records. If new_page is already hashed,
1409
then the hash index for page, if any, is dropped. If new_page is not hashed,
1410
and page is hashed, then a new hash index is built to new_page with the same
1411
parameters as page (this often happens when a page is split). */
1414
btr_search_move_or_delete_hash_entries(
1415
/*===================================*/
1416
buf_block_t* new_block, /*!< in: records are copied
1418
buf_block_t* block, /*!< in: index page from which
1419
records were copied, and the
1420
copied records will be deleted
1422
dict_index_t* index) /*!< in: record descriptor */
1428
#ifdef UNIV_SYNC_DEBUG
1429
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
1430
ut_ad(rw_lock_own(&(new_block->lock), RW_LOCK_EX));
1431
#endif /* UNIV_SYNC_DEBUG */
1432
ut_a(!new_block->is_hashed || new_block->index == index);
1433
ut_a(!block->is_hashed || block->index == index);
1434
ut_a(!(new_block->is_hashed || block->is_hashed)
1435
|| !dict_index_is_ibuf(index));
1437
rw_lock_s_lock(&btr_search_latch);
1439
if (new_block->is_hashed) {
1441
rw_lock_s_unlock(&btr_search_latch);
1443
btr_search_drop_page_hash_index(block);
1448
if (block->is_hashed) {
1450
n_fields = block->curr_n_fields;
1451
n_bytes = block->curr_n_bytes;
1452
left_side = block->curr_left_side;
1454
new_block->n_fields = block->curr_n_fields;
1455
new_block->n_bytes = block->curr_n_bytes;
1456
new_block->left_side = left_side;
1458
rw_lock_s_unlock(&btr_search_latch);
1460
ut_a(n_fields + n_bytes > 0);
1462
btr_search_build_page_hash_index(index, new_block, n_fields,
1463
n_bytes, left_side);
1464
ut_ad(n_fields == block->curr_n_fields);
1465
ut_ad(n_bytes == block->curr_n_bytes);
1466
ut_ad(left_side == block->curr_left_side);
1470
rw_lock_s_unlock(&btr_search_latch);
1473
/********************************************************************//**
1474
Updates the page hash index when a single record is deleted from a page. */
1477
btr_search_update_hash_on_delete(
1478
/*=============================*/
1479
btr_cur_t* cursor) /*!< in: cursor which was positioned on the
1480
record to delete using btr_cur_search_...,
1481
the record is not yet deleted */
1483
hash_table_t* table;
1489
ulint offsets_[REC_OFFS_NORMAL_SIZE];
1490
mem_heap_t* heap = NULL;
1491
rec_offs_init(offsets_);
1493
rec = btr_cur_get_rec(cursor);
1495
block = btr_cur_get_block(cursor);
1497
#ifdef UNIV_SYNC_DEBUG
1498
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
1499
#endif /* UNIV_SYNC_DEBUG */
1501
if (!block->is_hashed) {
1506
ut_a(block->index == cursor->index);
1507
ut_a(block->curr_n_fields + block->curr_n_bytes > 0);
1508
ut_a(!dict_index_is_ibuf(cursor->index));
1510
table = btr_search_sys->hash_index;
1512
index_id = cursor->index->id;
1513
fold = rec_fold(rec, rec_get_offsets(rec, cursor->index, offsets_,
1514
ULINT_UNDEFINED, &heap),
1515
block->curr_n_fields, block->curr_n_bytes, index_id);
1516
if (UNIV_LIKELY_NULL(heap)) {
1517
mem_heap_free(heap);
1519
rw_lock_x_lock(&btr_search_latch);
1521
found = ha_search_and_delete_if_found(table, fold, rec);
1523
rw_lock_x_unlock(&btr_search_latch);
1526
/********************************************************************//**
1527
Updates the page hash index when a single record is inserted on a page. */
1530
btr_search_update_hash_node_on_insert(
1531
/*==================================*/
1532
btr_cur_t* cursor) /*!< in: cursor which was positioned to the
1533
place to insert using btr_cur_search_...,
1534
and the new record has been inserted next
1537
hash_table_t* table;
1541
rec = btr_cur_get_rec(cursor);
1543
block = btr_cur_get_block(cursor);
1545
#ifdef UNIV_SYNC_DEBUG
1546
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
1547
#endif /* UNIV_SYNC_DEBUG */
1549
if (!block->is_hashed) {
1554
ut_a(block->index == cursor->index);
1555
ut_a(!dict_index_is_ibuf(cursor->index));
1557
rw_lock_x_lock(&btr_search_latch);
1559
if ((cursor->flag == BTR_CUR_HASH)
1560
&& (cursor->n_fields == block->curr_n_fields)
1561
&& (cursor->n_bytes == block->curr_n_bytes)
1562
&& !block->curr_left_side) {
1564
table = btr_search_sys->hash_index;
1566
ha_search_and_update_if_found(table, cursor->fold, rec,
1567
block, page_rec_get_next(rec));
1569
rw_lock_x_unlock(&btr_search_latch);
1571
rw_lock_x_unlock(&btr_search_latch);
1573
btr_search_update_hash_on_insert(cursor);
1577
/********************************************************************//**
1578
Updates the page hash index when a single record is inserted on a page. */
1581
btr_search_update_hash_on_insert(
1582
/*=============================*/
1583
btr_cur_t* cursor) /*!< in: cursor which was positioned to the
1584
place to insert using btr_cur_search_...,
1585
and the new record has been inserted next
1588
hash_table_t* table;
1596
ulint next_fold = 0; /* remove warning (??? bug ???) */
1600
ibool locked = FALSE;
1601
mem_heap_t* heap = NULL;
1602
ulint offsets_[REC_OFFS_NORMAL_SIZE];
1603
ulint* offsets = offsets_;
1604
rec_offs_init(offsets_);
1606
table = btr_search_sys->hash_index;
1608
btr_search_check_free_space_in_heap();
1610
rec = btr_cur_get_rec(cursor);
1612
block = btr_cur_get_block(cursor);
1614
#ifdef UNIV_SYNC_DEBUG
1615
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
1616
#endif /* UNIV_SYNC_DEBUG */
1618
if (!block->is_hashed) {
1623
ut_a(block->index == cursor->index);
1624
ut_a(!dict_index_is_ibuf(cursor->index));
1626
index_id = cursor->index->id;
1628
n_fields = block->curr_n_fields;
1629
n_bytes = block->curr_n_bytes;
1630
left_side = block->curr_left_side;
1632
ins_rec = page_rec_get_next(rec);
1633
next_rec = page_rec_get_next(ins_rec);
1635
offsets = rec_get_offsets(ins_rec, cursor->index, offsets,
1636
ULINT_UNDEFINED, &heap);
1637
ins_fold = rec_fold(ins_rec, offsets, n_fields, n_bytes, index_id);
1639
if (!page_rec_is_supremum(next_rec)) {
1640
offsets = rec_get_offsets(next_rec, cursor->index, offsets,
1641
n_fields + (n_bytes > 0), &heap);
1642
next_fold = rec_fold(next_rec, offsets, n_fields,
1646
if (!page_rec_is_infimum(rec)) {
1647
offsets = rec_get_offsets(rec, cursor->index, offsets,
1648
n_fields + (n_bytes > 0), &heap);
1649
fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id);
1653
rw_lock_x_lock(&btr_search_latch);
1657
ha_insert_for_fold(table, ins_fold, block, ins_rec);
1660
goto check_next_rec;
1663
if (fold != ins_fold) {
1667
rw_lock_x_lock(&btr_search_latch);
1673
ha_insert_for_fold(table, fold, block, rec);
1675
ha_insert_for_fold(table, ins_fold, block, ins_rec);
1680
if (page_rec_is_supremum(next_rec)) {
1685
rw_lock_x_lock(&btr_search_latch);
1690
ha_insert_for_fold(table, ins_fold, block, ins_rec);
1696
if (ins_fold != next_fold) {
1700
rw_lock_x_lock(&btr_search_latch);
1707
ha_insert_for_fold(table, ins_fold, block, ins_rec);
1709
fputs("Hash insert for ", stderr);
1710
dict_index_name_print(stderr, cursor->index);
1711
fprintf(stderr, " fold %lu\n", ins_fold);
1714
ha_insert_for_fold(table, next_fold, block, next_rec);
1719
if (UNIV_LIKELY_NULL(heap)) {
1720
mem_heap_free(heap);
1723
rw_lock_x_unlock(&btr_search_latch);
1727
/********************************************************************//**
1728
Validates the search system.
1729
@return TRUE if ok */
1732
btr_search_validate(void)
1733
/*=====================*/
1736
ulint n_page_dumps = 0;
1740
mem_heap_t* heap = NULL;
1741
ulint offsets_[REC_OFFS_NORMAL_SIZE];
1742
ulint* offsets = offsets_;
1744
/* How many cells to check before temporarily releasing
1745
btr_search_latch. */
1746
ulint chunk_size = 10000;
1748
rec_offs_init(offsets_);
1750
rw_lock_x_lock(&btr_search_latch);
1751
buf_pool_mutex_enter();
1753
cell_count = hash_get_n_cells(btr_search_sys->hash_index);
1755
for (i = 0; i < cell_count; i++) {
1756
/* We release btr_search_latch every once in a while to
1757
give other queries a chance to run. */
1758
if ((i != 0) && ((i % chunk_size) == 0)) {
1759
buf_pool_mutex_exit();
1760
rw_lock_x_unlock(&btr_search_latch);
1762
rw_lock_x_lock(&btr_search_latch);
1763
buf_pool_mutex_enter();
1766
node = hash_get_nth_cell(btr_search_sys->hash_index, i)->node;
1768
for (; node != NULL; node = node->next) {
1769
const buf_block_t* block
1770
= buf_block_align(node->data);
1771
const buf_block_t* hash_block;
1773
if (UNIV_LIKELY(buf_block_get_state(block)
1774
== BUF_BLOCK_FILE_PAGE)) {
1776
/* The space and offset are only valid
1777
for file blocks. It is possible that
1778
the block is being freed
1779
(BUF_BLOCK_REMOVE_HASH, see the
1780
assertion and the comment below) */
1781
hash_block = buf_block_hash_get(
1782
buf_block_get_space(block),
1783
buf_block_get_page_no(block));
1789
ut_a(hash_block == block);
1791
/* When a block is being freed,
1792
buf_LRU_search_and_free_block() first
1793
removes the block from
1794
buf_pool->page_hash by calling
1795
buf_LRU_block_remove_hashed_page().
1796
After that, it invokes
1797
btr_search_drop_page_hash_index() to
1798
remove the block from
1799
btr_search_sys->hash_index. */
1801
ut_a(buf_block_get_state(block)
1802
== BUF_BLOCK_REMOVE_HASH);
1805
ut_a(!dict_index_is_ibuf(block->index));
1807
offsets = rec_get_offsets((const rec_t*) node->data,
1808
block->index, offsets,
1809
block->curr_n_fields
1810
+ (block->curr_n_bytes > 0),
1813
if (!block->is_hashed || node->fold
1814
!= rec_fold((rec_t*)(node->data),
1816
block->curr_n_fields,
1817
block->curr_n_bytes,
1818
btr_page_get_index_id(block->frame))) {
1819
const page_t* page = block->frame;
1822
ut_print_timestamp(stderr);
1825
" InnoDB: Error in an adaptive hash"
1826
" index pointer to page %lu\n"
1827
"InnoDB: ptr mem address %p"
1828
" index id %lu %lu,"
1829
" node fold %lu, rec fold %lu\n",
1830
(ulong) page_get_page_no(page),
1832
(ulong) ut_dulint_get_high(
1833
btr_page_get_index_id(page)),
1834
(ulong) ut_dulint_get_low(
1835
btr_page_get_index_id(page)),
1837
(ulong) rec_fold((rec_t*)(node->data),
1839
block->curr_n_fields,
1840
block->curr_n_bytes,
1841
btr_page_get_index_id(
1844
fputs("InnoDB: Record ", stderr);
1845
rec_print_new(stderr, (rec_t*)node->data,
1847
fprintf(stderr, "\nInnoDB: on that page."
1848
" Page mem address %p, is hashed %lu,"
1849
" n fields %lu, n bytes %lu\n"
1850
"InnoDB: side %lu\n",
1851
(void*) page, (ulong) block->is_hashed,
1852
(ulong) block->curr_n_fields,
1853
(ulong) block->curr_n_bytes,
1854
(ulong) block->curr_left_side);
1856
if (n_page_dumps < 20) {
1857
buf_page_print(page, 0);
1864
for (i = 0; i < cell_count; i += chunk_size) {
1865
ulint end_index = ut_min(i + chunk_size - 1, cell_count - 1);
1867
/* We release btr_search_latch every once in a while to
1868
give other queries a chance to run. */
1870
buf_pool_mutex_exit();
1871
rw_lock_x_unlock(&btr_search_latch);
1873
rw_lock_x_lock(&btr_search_latch);
1874
buf_pool_mutex_enter();
1877
if (!ha_validate(btr_search_sys->hash_index, i, end_index)) {
1882
buf_pool_mutex_exit();
1883
rw_lock_x_unlock(&btr_search_latch);
1884
if (UNIV_LIKELY_NULL(heap)) {
1885
mem_heap_free(heap);