~ubuntu-branches/ubuntu/trusty/drizzle/trusty

« back to all changes in this revision

Viewing changes to plugin/innobase/btr/btr0sea.c

  • Committer: Bazaar Package Importer
  • Author(s): Monty Taylor
  • Date: 2010-03-18 12:12:31 UTC
  • Revision ID: james.westby@ubuntu.com-20100318121231-k6g1xe6cshbwa0f8
Tags: upstream-2010.03.1347
ImportĀ upstreamĀ versionĀ 2010.03.1347

Show diffs side-by-side

added added

removed removed

Lines of Context:
 
1
/*****************************************************************************
 
2
 
 
3
Copyright (c) 1996, 2009, Innobase Oy. All Rights Reserved.
 
4
Copyright (c) 2008, Google Inc.
 
5
 
 
6
Portions of this file contain modifications contributed and copyrighted by
 
7
Google, Inc. Those modifications are gratefully acknowledged and are described
 
8
briefly in the InnoDB documentation. The contributions by Google are
 
9
incorporated with their permission, and subject to the conditions contained in
 
10
the file COPYING.Google.
 
11
 
 
12
This program is free software; you can redistribute it and/or modify it under
 
13
the terms of the GNU General Public License as published by the Free Software
 
14
Foundation; version 2 of the License.
 
15
 
 
16
This program is distributed in the hope that it will be useful, but WITHOUT
 
17
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 
18
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
 
19
 
 
20
You should have received a copy of the GNU General Public License along with
 
21
this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 
22
Place, Suite 330, Boston, MA 02111-1307 USA
 
23
 
 
24
*****************************************************************************/
 
25
 
 
26
/********************************************************************//**
 
27
@file btr/btr0sea.c
 
28
The index tree adaptive search
 
29
 
 
30
Created 2/17/1996 Heikki Tuuri
 
31
*************************************************************************/
 
32
 
 
33
#include "btr0sea.h"
 
34
#ifdef UNIV_NONINL
 
35
#include "btr0sea.ic"
 
36
#endif
 
37
 
 
38
#include "buf0buf.h"
 
39
#include "page0page.h"
 
40
#include "page0cur.h"
 
41
#include "btr0cur.h"
 
42
#include "btr0pcur.h"
 
43
#include "btr0btr.h"
 
44
#include "ha0ha.h"
 
45
 
 
46
/** Flag: has the search system been enabled?
 
47
Protected by btr_search_latch and btr_search_enabled_mutex. */
 
48
UNIV_INTERN bool                btr_search_enabled      = TRUE;
 
49
 
 
50
/** Mutex protecting btr_search_enabled */
 
51
static mutex_t                  btr_search_enabled_mutex;
 
52
 
 
53
/** A dummy variable to fool the compiler */
 
54
UNIV_INTERN ulint               btr_search_this_is_zero = 0;
 
55
 
 
56
#ifdef UNIV_SEARCH_PERF_STAT
 
57
/** Number of successful adaptive hash index lookups */
 
58
UNIV_INTERN ulint               btr_search_n_succ       = 0;
 
59
/** Number of failed adaptive hash index lookups */
 
60
UNIV_INTERN ulint               btr_search_n_hash_fail  = 0;
 
61
#endif /* UNIV_SEARCH_PERF_STAT */
 
62
 
 
63
/** padding to prevent other memory update
 
64
hotspots from residing on the same memory
 
65
cache line as btr_search_latch */
 
66
UNIV_INTERN byte                btr_sea_pad1[64];
 
67
 
 
68
/** The latch protecting the adaptive search system: this latch protects the
 
69
(1) positions of records on those pages where a hash index has been built.
 
70
NOTE: It does not protect values of non-ordering fields within a record from
 
71
being updated in-place! We can use fact (1) to perform unique searches to
 
72
indexes. */
 
73
 
 
74
/* We will allocate the latch from dynamic memory to get it to the
 
75
same DRAM page as other hotspot semaphores */
 
76
UNIV_INTERN rw_lock_t*          btr_search_latch_temp;
 
77
 
 
78
/** padding to prevent other memory update hotspots from residing on
 
79
the same memory cache line */
 
80
UNIV_INTERN byte                btr_sea_pad2[64];
 
81
 
 
82
/** The adaptive hash index */
 
83
UNIV_INTERN btr_search_sys_t*   btr_search_sys;
 
84
 
 
85
/** If the number of records on the page divided by this parameter
 
86
would have been successfully accessed using a hash index, the index
 
87
is then built on the page, assuming the global limit has been reached */
 
88
#define BTR_SEARCH_PAGE_BUILD_LIMIT     16
 
89
 
 
90
/** The global limit for consecutive potentially successful hash searches,
 
91
before hash index building is started */
 
92
#define BTR_SEARCH_BUILD_LIMIT          100
 
93
 
 
94
/********************************************************************//**
 
95
Builds a hash index on a page with the given parameters. If the page already
 
96
has a hash index with different parameters, the old hash index is removed.
 
97
If index is non-NULL, this function checks if n_fields and n_bytes are
 
98
sensible values, and does not build a hash index if not. */
 
99
static
 
100
void
 
101
btr_search_build_page_hash_index(
 
102
/*=============================*/
 
103
        dict_index_t*   index,  /*!< in: index for which to build, or NULL if
 
104
                                not known */
 
105
        buf_block_t*    block,  /*!< in: index page, s- or x-latched */
 
106
        ulint           n_fields,/*!< in: hash this many full fields */
 
107
        ulint           n_bytes,/*!< in: hash this many bytes from the next
 
108
                                field */
 
109
        ibool           left_side);/*!< in: hash for searches from left side? */
 
110
 
 
111
/*****************************************************************//**
 
112
This function should be called before reserving any btr search mutex, if
 
113
the intended operation might add nodes to the search system hash table.
 
114
Because of the latching order, once we have reserved the btr search system
 
115
latch, we cannot allocate a free frame from the buffer pool. Checks that
 
116
there is a free buffer frame allocated for hash table heap in the btr search
 
117
system. If not, allocates a free frames for the heap. This check makes it
 
118
probable that, when have reserved the btr search system latch and we need to
 
119
allocate a new node to the hash table, it will succeed. However, the check
 
120
will not guarantee success. */
 
121
static
 
122
void
 
123
btr_search_check_free_space_in_heap(void)
 
124
/*=====================================*/
 
125
{
 
126
        hash_table_t*   table;
 
127
        mem_heap_t*     heap;
 
128
 
 
129
#ifdef UNIV_SYNC_DEBUG
 
130
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
 
131
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
 
132
#endif /* UNIV_SYNC_DEBUG */
 
133
 
 
134
        table = btr_search_sys->hash_index;
 
135
 
 
136
        heap = table->heap;
 
137
 
 
138
        /* Note that we peek the value of heap->free_block without reserving
 
139
        the latch: this is ok, because we will not guarantee that there will
 
140
        be enough free space in the hash table. */
 
141
 
 
142
        if (heap->free_block == NULL) {
 
143
                buf_block_t*    block = buf_block_alloc(0);
 
144
 
 
145
                rw_lock_x_lock(&btr_search_latch);
 
146
 
 
147
                if (heap->free_block == NULL) {
 
148
                        heap->free_block = block;
 
149
                } else {
 
150
                        buf_block_free(block);
 
151
                }
 
152
 
 
153
                rw_lock_x_unlock(&btr_search_latch);
 
154
        }
 
155
}
 
156
 
 
157
/*****************************************************************//**
 
158
Creates and initializes the adaptive search system at a database start. */
 
159
UNIV_INTERN
 
160
void
 
161
btr_search_sys_create(
 
162
/*==================*/
 
163
        ulint   hash_size)      /*!< in: hash index hash table size */
 
164
{
 
165
        /* We allocate the search latch from dynamic memory:
 
166
        see above at the global variable definition */
 
167
 
 
168
        btr_search_latch_temp = mem_alloc(sizeof(rw_lock_t));
 
169
 
 
170
        rw_lock_create(&btr_search_latch, SYNC_SEARCH_SYS);
 
171
        mutex_create(&btr_search_enabled_mutex, SYNC_SEARCH_SYS_CONF);
 
172
 
 
173
        btr_search_sys = mem_alloc(sizeof(btr_search_sys_t));
 
174
 
 
175
        btr_search_sys->hash_index = ha_create(hash_size, 0, 0);
 
176
}
 
177
 
 
178
/********************************************************************//**
 
179
Disable the adaptive hash search system and empty the index. */
 
180
UNIV_INTERN
 
181
void
 
182
btr_search_disable(void)
 
183
/*====================*/
 
184
{
 
185
        mutex_enter(&btr_search_enabled_mutex);
 
186
        rw_lock_x_lock(&btr_search_latch);
 
187
 
 
188
        btr_search_enabled = FALSE;
 
189
 
 
190
        /* Clear all block->is_hashed flags and remove all entries
 
191
        from btr_search_sys->hash_index. */
 
192
        buf_pool_drop_hash_index();
 
193
 
 
194
        /* btr_search_enabled_mutex should guarantee this. */
 
195
        ut_ad(!btr_search_enabled);
 
196
 
 
197
        rw_lock_x_unlock(&btr_search_latch);
 
198
        mutex_exit(&btr_search_enabled_mutex);
 
199
}
 
200
 
 
201
/********************************************************************//**
 
202
Enable the adaptive hash search system. */
 
203
UNIV_INTERN
 
204
void
 
205
btr_search_enable(void)
 
206
/*====================*/
 
207
{
 
208
        mutex_enter(&btr_search_enabled_mutex);
 
209
        rw_lock_x_lock(&btr_search_latch);
 
210
 
 
211
        btr_search_enabled = TRUE;
 
212
 
 
213
        rw_lock_x_unlock(&btr_search_latch);
 
214
        mutex_exit(&btr_search_enabled_mutex);
 
215
}
 
216
 
 
217
/*****************************************************************//**
 
218
Creates and initializes a search info struct.
 
219
@return own: search info struct */
 
220
UNIV_INTERN
 
221
btr_search_t*
 
222
btr_search_info_create(
 
223
/*===================*/
 
224
        mem_heap_t*     heap)   /*!< in: heap where created */
 
225
{
 
226
        btr_search_t*   info;
 
227
 
 
228
        info = mem_heap_alloc(heap, sizeof(btr_search_t));
 
229
 
 
230
#ifdef UNIV_DEBUG
 
231
        info->magic_n = BTR_SEARCH_MAGIC_N;
 
232
#endif /* UNIV_DEBUG */
 
233
 
 
234
        info->ref_count = 0;
 
235
        info->root_guess = NULL;
 
236
 
 
237
        info->hash_analysis = 0;
 
238
        info->n_hash_potential = 0;
 
239
 
 
240
        info->last_hash_succ = FALSE;
 
241
 
 
242
#ifdef UNIV_SEARCH_PERF_STAT
 
243
        info->n_hash_succ = 0;
 
244
        info->n_hash_fail = 0;
 
245
        info->n_patt_succ = 0;
 
246
        info->n_searches = 0;
 
247
#endif /* UNIV_SEARCH_PERF_STAT */
 
248
 
 
249
        /* Set some sensible values */
 
250
        info->n_fields = 1;
 
251
        info->n_bytes = 0;
 
252
 
 
253
        info->left_side = TRUE;
 
254
 
 
255
        return(info);
 
256
}
 
257
 
 
258
/*****************************************************************//**
 
259
Returns the value of ref_count. The value is protected by
 
260
btr_search_latch.
 
261
@return ref_count value. */
 
262
UNIV_INTERN
 
263
ulint
 
264
btr_search_info_get_ref_count(
 
265
/*==========================*/
 
266
        btr_search_t*   info)   /*!< in: search info. */
 
267
{
 
268
        ulint ret;
 
269
 
 
270
        ut_ad(info);
 
271
 
 
272
#ifdef UNIV_SYNC_DEBUG
 
273
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
 
274
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
 
275
#endif /* UNIV_SYNC_DEBUG */
 
276
 
 
277
        rw_lock_s_lock(&btr_search_latch);
 
278
        ret = info->ref_count;
 
279
        rw_lock_s_unlock(&btr_search_latch);
 
280
 
 
281
        return(ret);
 
282
}
 
283
 
 
284
/*********************************************************************//**
 
285
Updates the search info of an index about hash successes. NOTE that info
 
286
is NOT protected by any semaphore, to save CPU time! Do not assume its fields
 
287
are consistent. */
 
288
static
 
289
void
 
290
btr_search_info_update_hash(
 
291
/*========================*/
 
292
        btr_search_t*   info,   /*!< in/out: search info */
 
293
        btr_cur_t*      cursor) /*!< in: cursor which was just positioned */
 
294
{
 
295
        dict_index_t*   index;
 
296
        ulint           n_unique;
 
297
        int             cmp;
 
298
 
 
299
#ifdef UNIV_SYNC_DEBUG
 
300
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
 
301
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
 
302
#endif /* UNIV_SYNC_DEBUG */
 
303
 
 
304
        index = cursor->index;
 
305
 
 
306
        if (dict_index_is_ibuf(index)) {
 
307
                /* So many deletes are performed on an insert buffer tree
 
308
                that we do not consider a hash index useful on it: */
 
309
 
 
310
                return;
 
311
        }
 
312
 
 
313
        n_unique = dict_index_get_n_unique_in_tree(index);
 
314
 
 
315
        if (info->n_hash_potential == 0) {
 
316
 
 
317
                goto set_new_recomm;
 
318
        }
 
319
 
 
320
        /* Test if the search would have succeeded using the recommended
 
321
        hash prefix */
 
322
 
 
323
        if (info->n_fields >= n_unique && cursor->up_match >= n_unique) {
 
324
increment_potential:
 
325
                info->n_hash_potential++;
 
326
 
 
327
                return;
 
328
        }
 
329
 
 
330
        cmp = ut_pair_cmp(info->n_fields, info->n_bytes,
 
331
                          cursor->low_match, cursor->low_bytes);
 
332
 
 
333
        if (info->left_side ? cmp <= 0 : cmp > 0) {
 
334
 
 
335
                goto set_new_recomm;
 
336
        }
 
337
 
 
338
        cmp = ut_pair_cmp(info->n_fields, info->n_bytes,
 
339
                          cursor->up_match, cursor->up_bytes);
 
340
 
 
341
        if (info->left_side ? cmp <= 0 : cmp > 0) {
 
342
 
 
343
                goto increment_potential;
 
344
        }
 
345
 
 
346
set_new_recomm:
 
347
        /* We have to set a new recommendation; skip the hash analysis
 
348
        for a while to avoid unnecessary CPU time usage when there is no
 
349
        chance for success */
 
350
 
 
351
        info->hash_analysis = 0;
 
352
 
 
353
        cmp = ut_pair_cmp(cursor->up_match, cursor->up_bytes,
 
354
                          cursor->low_match, cursor->low_bytes);
 
355
        if (cmp == 0) {
 
356
                info->n_hash_potential = 0;
 
357
 
 
358
                /* For extra safety, we set some sensible values here */
 
359
 
 
360
                info->n_fields = 1;
 
361
                info->n_bytes = 0;
 
362
 
 
363
                info->left_side = TRUE;
 
364
 
 
365
        } else if (cmp > 0) {
 
366
                info->n_hash_potential = 1;
 
367
 
 
368
                if (cursor->up_match >= n_unique) {
 
369
 
 
370
                        info->n_fields = n_unique;
 
371
                        info->n_bytes = 0;
 
372
 
 
373
                } else if (cursor->low_match < cursor->up_match) {
 
374
 
 
375
                        info->n_fields = cursor->low_match + 1;
 
376
                        info->n_bytes = 0;
 
377
                } else {
 
378
                        info->n_fields = cursor->low_match;
 
379
                        info->n_bytes = cursor->low_bytes + 1;
 
380
                }
 
381
 
 
382
                info->left_side = TRUE;
 
383
        } else {
 
384
                info->n_hash_potential = 1;
 
385
 
 
386
                if (cursor->low_match >= n_unique) {
 
387
 
 
388
                        info->n_fields = n_unique;
 
389
                        info->n_bytes = 0;
 
390
 
 
391
                } else if (cursor->low_match > cursor->up_match) {
 
392
 
 
393
                        info->n_fields = cursor->up_match + 1;
 
394
                        info->n_bytes = 0;
 
395
                } else {
 
396
                        info->n_fields = cursor->up_match;
 
397
                        info->n_bytes = cursor->up_bytes + 1;
 
398
                }
 
399
 
 
400
                info->left_side = FALSE;
 
401
        }
 
402
}
 
403
 
 
404
/*********************************************************************//**
 
405
Updates the block search info on hash successes. NOTE that info and
 
406
block->n_hash_helps, n_fields, n_bytes, side are NOT protected by any
 
407
semaphore, to save CPU time! Do not assume the fields are consistent.
 
408
@return TRUE if building a (new) hash index on the block is recommended */
 
409
static
 
410
ibool
 
411
btr_search_update_block_hash_info(
 
412
/*==============================*/
 
413
        btr_search_t*   info,   /*!< in: search info */
 
414
        buf_block_t*    block,  /*!< in: buffer block */
 
415
        btr_cur_t*      cursor __attribute__((unused)))
 
416
                                /*!< in: cursor */
 
417
{
 
418
#ifdef UNIV_SYNC_DEBUG
 
419
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
 
420
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
 
421
        ut_ad(rw_lock_own(&block->lock, RW_LOCK_SHARED)
 
422
              || rw_lock_own(&block->lock, RW_LOCK_EX));
 
423
#endif /* UNIV_SYNC_DEBUG */
 
424
        ut_ad(cursor);
 
425
 
 
426
        info->last_hash_succ = FALSE;
 
427
 
 
428
        ut_a(buf_block_state_valid(block));
 
429
        ut_ad(info->magic_n == BTR_SEARCH_MAGIC_N);
 
430
 
 
431
        if ((block->n_hash_helps > 0)
 
432
            && (info->n_hash_potential > 0)
 
433
            && (block->n_fields == info->n_fields)
 
434
            && (block->n_bytes == info->n_bytes)
 
435
            && (block->left_side == info->left_side)) {
 
436
 
 
437
                if ((block->is_hashed)
 
438
                    && (block->curr_n_fields == info->n_fields)
 
439
                    && (block->curr_n_bytes == info->n_bytes)
 
440
                    && (block->curr_left_side == info->left_side)) {
 
441
 
 
442
                        /* The search would presumably have succeeded using
 
443
                        the hash index */
 
444
 
 
445
                        info->last_hash_succ = TRUE;
 
446
                }
 
447
 
 
448
                block->n_hash_helps++;
 
449
        } else {
 
450
                block->n_hash_helps = 1;
 
451
                block->n_fields = info->n_fields;
 
452
                block->n_bytes = info->n_bytes;
 
453
                block->left_side = info->left_side;
 
454
        }
 
455
 
 
456
#ifdef UNIV_DEBUG
 
457
        if (cursor->index->table->does_not_fit_in_memory) {
 
458
                block->n_hash_helps = 0;
 
459
        }
 
460
#endif /* UNIV_DEBUG */
 
461
 
 
462
        if ((block->n_hash_helps > page_get_n_recs(block->frame)
 
463
             / BTR_SEARCH_PAGE_BUILD_LIMIT)
 
464
            && (info->n_hash_potential >= BTR_SEARCH_BUILD_LIMIT)) {
 
465
 
 
466
                if ((!block->is_hashed)
 
467
                    || (block->n_hash_helps
 
468
                        > 2 * page_get_n_recs(block->frame))
 
469
                    || (block->n_fields != block->curr_n_fields)
 
470
                    || (block->n_bytes != block->curr_n_bytes)
 
471
                    || (block->left_side != block->curr_left_side)) {
 
472
 
 
473
                        /* Build a new hash index on the page */
 
474
 
 
475
                        return(TRUE);
 
476
                }
 
477
        }
 
478
 
 
479
        return(FALSE);
 
480
}
 
481
 
 
482
/*********************************************************************//**
 
483
Updates a hash node reference when it has been unsuccessfully used in a
 
484
search which could have succeeded with the used hash parameters. This can
 
485
happen because when building a hash index for a page, we do not check
 
486
what happens at page boundaries, and therefore there can be misleading
 
487
hash nodes. Also, collisions in the fold value can lead to misleading
 
488
references. This function lazily fixes these imperfections in the hash
 
489
index. */
 
490
static
 
491
void
 
492
btr_search_update_hash_ref(
 
493
/*=======================*/
 
494
        btr_search_t*   info,   /*!< in: search info */
 
495
        buf_block_t*    block,  /*!< in: buffer block where cursor positioned */
 
496
        btr_cur_t*      cursor) /*!< in: cursor */
 
497
{
 
498
        ulint   fold;
 
499
        rec_t*  rec;
 
500
        dulint  index_id;
 
501
 
 
502
        ut_ad(cursor->flag == BTR_CUR_HASH_FAIL);
 
503
#ifdef UNIV_SYNC_DEBUG
 
504
        ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
 
505
        ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
 
506
              || rw_lock_own(&(block->lock), RW_LOCK_EX));
 
507
#endif /* UNIV_SYNC_DEBUG */
 
508
        ut_ad(page_align(btr_cur_get_rec(cursor))
 
509
              == buf_block_get_frame(block));
 
510
 
 
511
        if (!block->is_hashed) {
 
512
 
 
513
                return;
 
514
        }
 
515
 
 
516
        ut_a(block->index == cursor->index);
 
517
        ut_a(!dict_index_is_ibuf(cursor->index));
 
518
 
 
519
        if ((info->n_hash_potential > 0)
 
520
            && (block->curr_n_fields == info->n_fields)
 
521
            && (block->curr_n_bytes == info->n_bytes)
 
522
            && (block->curr_left_side == info->left_side)) {
 
523
                mem_heap_t*     heap            = NULL;
 
524
                ulint           offsets_[REC_OFFS_NORMAL_SIZE];
 
525
                rec_offs_init(offsets_);
 
526
 
 
527
                rec = btr_cur_get_rec(cursor);
 
528
 
 
529
                if (!page_rec_is_user_rec(rec)) {
 
530
 
 
531
                        return;
 
532
                }
 
533
 
 
534
                index_id = cursor->index->id;
 
535
                fold = rec_fold(rec,
 
536
                                rec_get_offsets(rec, cursor->index, offsets_,
 
537
                                                ULINT_UNDEFINED, &heap),
 
538
                                block->curr_n_fields,
 
539
                                block->curr_n_bytes, index_id);
 
540
                if (UNIV_LIKELY_NULL(heap)) {
 
541
                        mem_heap_free(heap);
 
542
                }
 
543
#ifdef UNIV_SYNC_DEBUG
 
544
                ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
 
545
#endif /* UNIV_SYNC_DEBUG */
 
546
 
 
547
                ha_insert_for_fold(btr_search_sys->hash_index, fold,
 
548
                                   block, rec);
 
549
        }
 
550
}
 
551
 
 
552
/*********************************************************************//**
 
553
Updates the search info. */
 
554
UNIV_INTERN
 
555
void
 
556
btr_search_info_update_slow(
 
557
/*========================*/
 
558
        btr_search_t*   info,   /*!< in/out: search info */
 
559
        btr_cur_t*      cursor) /*!< in: cursor which was just positioned */
 
560
{
 
561
        buf_block_t*    block;
 
562
        ibool           build_index;
 
563
        ulint*          params;
 
564
        ulint*          params2;
 
565
 
 
566
#ifdef UNIV_SYNC_DEBUG
 
567
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
 
568
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
 
569
#endif /* UNIV_SYNC_DEBUG */
 
570
 
 
571
        block = btr_cur_get_block(cursor);
 
572
 
 
573
        /* NOTE that the following two function calls do NOT protect
 
574
        info or block->n_fields etc. with any semaphore, to save CPU time!
 
575
        We cannot assume the fields are consistent when we return from
 
576
        those functions! */
 
577
 
 
578
        btr_search_info_update_hash(info, cursor);
 
579
 
 
580
        build_index = btr_search_update_block_hash_info(info, block, cursor);
 
581
 
 
582
        if (build_index || (cursor->flag == BTR_CUR_HASH_FAIL)) {
 
583
 
 
584
                btr_search_check_free_space_in_heap();
 
585
        }
 
586
 
 
587
        if (cursor->flag == BTR_CUR_HASH_FAIL) {
 
588
                /* Update the hash node reference, if appropriate */
 
589
 
 
590
#ifdef UNIV_SEARCH_PERF_STAT
 
591
                btr_search_n_hash_fail++;
 
592
#endif /* UNIV_SEARCH_PERF_STAT */
 
593
 
 
594
                rw_lock_x_lock(&btr_search_latch);
 
595
 
 
596
                btr_search_update_hash_ref(info, block, cursor);
 
597
 
 
598
                rw_lock_x_unlock(&btr_search_latch);
 
599
        }
 
600
 
 
601
        if (build_index) {
 
602
                /* Note that since we did not protect block->n_fields etc.
 
603
                with any semaphore, the values can be inconsistent. We have
 
604
                to check inside the function call that they make sense. We
 
605
                also malloc an array and store the values there to make sure
 
606
                the compiler does not let the function call parameters change
 
607
                inside the called function. It might be that the compiler
 
608
                would optimize the call just to pass pointers to block. */
 
609
 
 
610
                params = mem_alloc(3 * sizeof(ulint));
 
611
                params[0] = block->n_fields;
 
612
                params[1] = block->n_bytes;
 
613
                params[2] = block->left_side;
 
614
 
 
615
                /* Make sure the compiler cannot deduce the values and do
 
616
                optimizations */
 
617
 
 
618
                params2 = params + btr_search_this_is_zero;
 
619
 
 
620
                btr_search_build_page_hash_index(cursor->index,
 
621
                                                 block,
 
622
                                                 params2[0],
 
623
                                                 params2[1],
 
624
                                                 params2[2]);
 
625
                mem_free(params);
 
626
        }
 
627
}
 
628
 
 
629
/******************************************************************//**
 
630
Checks if a guessed position for a tree cursor is right. Note that if
 
631
mode is PAGE_CUR_LE, which is used in inserts, and the function returns
 
632
TRUE, then cursor->up_match and cursor->low_match both have sensible values.
 
633
@return TRUE if success */
 
634
static
 
635
ibool
 
636
btr_search_check_guess(
 
637
/*===================*/
 
638
        btr_cur_t*      cursor, /*!< in: guessed cursor position */
 
639
        ibool           can_only_compare_to_cursor_rec,
 
640
                                /*!< in: if we do not have a latch on the page
 
641
                                of cursor, but only a latch on
 
642
                                btr_search_latch, then ONLY the columns
 
643
                                of the record UNDER the cursor are
 
644
                                protected, not the next or previous record
 
645
                                in the chain: we cannot look at the next or
 
646
                                previous record to check our guess! */
 
647
        const dtuple_t* tuple,  /*!< in: data tuple */
 
648
        ulint           mode,   /*!< in: PAGE_CUR_L, PAGE_CUR_LE, PAGE_CUR_G,
 
649
                                or PAGE_CUR_GE */
 
650
        mtr_t*          mtr)    /*!< in: mtr */
 
651
{
 
652
        rec_t*          rec;
 
653
        ulint           n_unique;
 
654
        ulint           match;
 
655
        ulint           bytes;
 
656
        int             cmp;
 
657
        mem_heap_t*     heap            = NULL;
 
658
        ulint           offsets_[REC_OFFS_NORMAL_SIZE];
 
659
        ulint*          offsets         = offsets_;
 
660
        ibool           success         = FALSE;
 
661
        rec_offs_init(offsets_);
 
662
 
 
663
        n_unique = dict_index_get_n_unique_in_tree(cursor->index);
 
664
 
 
665
        rec = btr_cur_get_rec(cursor);
 
666
 
 
667
        ut_ad(page_rec_is_user_rec(rec));
 
668
 
 
669
        match = 0;
 
670
        bytes = 0;
 
671
 
 
672
        offsets = rec_get_offsets(rec, cursor->index, offsets,
 
673
                                  n_unique, &heap);
 
674
        cmp = page_cmp_dtuple_rec_with_match(tuple, rec,
 
675
                                             offsets, &match, &bytes);
 
676
 
 
677
        if (mode == PAGE_CUR_GE) {
 
678
                if (cmp == 1) {
 
679
                        goto exit_func;
 
680
                }
 
681
 
 
682
                cursor->up_match = match;
 
683
 
 
684
                if (match >= n_unique) {
 
685
                        success = TRUE;
 
686
                        goto exit_func;
 
687
                }
 
688
        } else if (mode == PAGE_CUR_LE) {
 
689
                if (cmp == -1) {
 
690
                        goto exit_func;
 
691
                }
 
692
 
 
693
                cursor->low_match = match;
 
694
 
 
695
        } else if (mode == PAGE_CUR_G) {
 
696
                if (cmp != -1) {
 
697
                        goto exit_func;
 
698
                }
 
699
        } else if (mode == PAGE_CUR_L) {
 
700
                if (cmp != 1) {
 
701
                        goto exit_func;
 
702
                }
 
703
        }
 
704
 
 
705
        if (can_only_compare_to_cursor_rec) {
 
706
                /* Since we could not determine if our guess is right just by
 
707
                looking at the record under the cursor, return FALSE */
 
708
                goto exit_func;
 
709
        }
 
710
 
 
711
        match = 0;
 
712
        bytes = 0;
 
713
 
 
714
        if ((mode == PAGE_CUR_G) || (mode == PAGE_CUR_GE)) {
 
715
                rec_t*  prev_rec;
 
716
 
 
717
                ut_ad(!page_rec_is_infimum(rec));
 
718
 
 
719
                prev_rec = page_rec_get_prev(rec);
 
720
 
 
721
                if (page_rec_is_infimum(prev_rec)) {
 
722
                        success = btr_page_get_prev(page_align(prev_rec), mtr)
 
723
                                == FIL_NULL;
 
724
 
 
725
                        goto exit_func;
 
726
                }
 
727
 
 
728
                offsets = rec_get_offsets(prev_rec, cursor->index, offsets,
 
729
                                          n_unique, &heap);
 
730
                cmp = page_cmp_dtuple_rec_with_match(tuple, prev_rec,
 
731
                                                     offsets, &match, &bytes);
 
732
                if (mode == PAGE_CUR_GE) {
 
733
                        success = cmp == 1;
 
734
                } else {
 
735
                        success = cmp != -1;
 
736
                }
 
737
 
 
738
                goto exit_func;
 
739
        } else {
 
740
                rec_t*  next_rec;
 
741
 
 
742
                ut_ad(!page_rec_is_supremum(rec));
 
743
 
 
744
                next_rec = page_rec_get_next(rec);
 
745
 
 
746
                if (page_rec_is_supremum(next_rec)) {
 
747
                        if (btr_page_get_next(page_align(next_rec), mtr)
 
748
                            == FIL_NULL) {
 
749
 
 
750
                                cursor->up_match = 0;
 
751
                                success = TRUE;
 
752
                        }
 
753
 
 
754
                        goto exit_func;
 
755
                }
 
756
 
 
757
                offsets = rec_get_offsets(next_rec, cursor->index, offsets,
 
758
                                          n_unique, &heap);
 
759
                cmp = page_cmp_dtuple_rec_with_match(tuple, next_rec,
 
760
                                                     offsets, &match, &bytes);
 
761
                if (mode == PAGE_CUR_LE) {
 
762
                        success = cmp == -1;
 
763
                        cursor->up_match = match;
 
764
                } else {
 
765
                        success = cmp != 1;
 
766
                }
 
767
        }
 
768
exit_func:
 
769
        if (UNIV_LIKELY_NULL(heap)) {
 
770
                mem_heap_free(heap);
 
771
        }
 
772
        return(success);
 
773
}
 
774
 
 
775
/******************************************************************//**
 
776
Tries to guess the right search position based on the hash search info
 
777
of the index. Note that if mode is PAGE_CUR_LE, which is used in inserts,
 
778
and the function returns TRUE, then cursor->up_match and cursor->low_match
 
779
both have sensible values.
 
780
@return TRUE if succeeded */
 
781
UNIV_INTERN
 
782
ibool
 
783
btr_search_guess_on_hash(
 
784
/*=====================*/
 
785
        dict_index_t*   index,          /*!< in: index */
 
786
        btr_search_t*   info,           /*!< in: index search info */
 
787
        const dtuple_t* tuple,          /*!< in: logical record */
 
788
        ulint           mode,           /*!< in: PAGE_CUR_L, ... */
 
789
        ulint           latch_mode,     /*!< in: BTR_SEARCH_LEAF, ...;
 
790
                                        NOTE that only if has_search_latch
 
791
                                        is 0, we will have a latch set on
 
792
                                        the cursor page, otherwise we assume
 
793
                                        the caller uses his search latch
 
794
                                        to protect the record! */
 
795
        btr_cur_t*      cursor,         /*!< out: tree cursor */
 
796
        ulint           has_search_latch,/*!< in: latch mode the caller
 
797
                                        currently has on btr_search_latch:
 
798
                                        RW_S_LATCH, RW_X_LATCH, or 0 */
 
799
        mtr_t*          mtr)            /*!< in: mtr */
 
800
{
 
801
        buf_block_t*    block;
 
802
        rec_t*          rec;
 
803
        ulint           fold;
 
804
        dulint          index_id;
 
805
#ifdef notdefined
 
806
        btr_cur_t       cursor2;
 
807
        btr_pcur_t      pcur;
 
808
#endif
 
809
        ut_ad(index && info && tuple && cursor && mtr);
 
810
        ut_ad((latch_mode == BTR_SEARCH_LEAF)
 
811
              || (latch_mode == BTR_MODIFY_LEAF));
 
812
 
 
813
        /* Note that, for efficiency, the struct info may not be protected by
 
814
        any latch here! */
 
815
 
 
816
        if (UNIV_UNLIKELY(info->n_hash_potential == 0)) {
 
817
 
 
818
                return(FALSE);
 
819
        }
 
820
 
 
821
        cursor->n_fields = info->n_fields;
 
822
        cursor->n_bytes = info->n_bytes;
 
823
 
 
824
        if (UNIV_UNLIKELY(dtuple_get_n_fields(tuple)
 
825
                          < cursor->n_fields + (cursor->n_bytes > 0))) {
 
826
 
 
827
                return(FALSE);
 
828
        }
 
829
 
 
830
        index_id = index->id;
 
831
 
 
832
#ifdef UNIV_SEARCH_PERF_STAT
 
833
        info->n_hash_succ++;
 
834
#endif
 
835
        fold = dtuple_fold(tuple, cursor->n_fields, cursor->n_bytes, index_id);
 
836
 
 
837
        cursor->fold = fold;
 
838
        cursor->flag = BTR_CUR_HASH;
 
839
 
 
840
        if (UNIV_LIKELY(!has_search_latch)) {
 
841
                rw_lock_s_lock(&btr_search_latch);
 
842
 
 
843
                if (UNIV_UNLIKELY(!btr_search_enabled)) {
 
844
                        goto failure_unlock;
 
845
                }
 
846
        }
 
847
 
 
848
        ut_ad(rw_lock_get_writer(&btr_search_latch) != RW_LOCK_EX);
 
849
        ut_ad(rw_lock_get_reader_count(&btr_search_latch) > 0);
 
850
 
 
851
        rec = ha_search_and_get_data(btr_search_sys->hash_index, fold);
 
852
 
 
853
        if (UNIV_UNLIKELY(!rec)) {
 
854
                goto failure_unlock;
 
855
        }
 
856
 
 
857
        block = buf_block_align(rec);
 
858
 
 
859
        if (UNIV_LIKELY(!has_search_latch)) {
 
860
 
 
861
                if (UNIV_UNLIKELY(
 
862
                            !buf_page_get_known_nowait(latch_mode, block,
 
863
                                                       BUF_MAKE_YOUNG,
 
864
                                                       __FILE__, __LINE__,
 
865
                                                       mtr))) {
 
866
                        goto failure_unlock;
 
867
                }
 
868
 
 
869
                rw_lock_s_unlock(&btr_search_latch);
 
870
 
 
871
                buf_block_dbg_add_level(block, SYNC_TREE_NODE_FROM_HASH);
 
872
        }
 
873
 
 
874
        if (UNIV_UNLIKELY(buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE)) {
 
875
                ut_ad(buf_block_get_state(block) == BUF_BLOCK_REMOVE_HASH);
 
876
 
 
877
                if (UNIV_LIKELY(!has_search_latch)) {
 
878
 
 
879
                        btr_leaf_page_release(block, latch_mode, mtr);
 
880
                }
 
881
 
 
882
                goto failure;
 
883
        }
 
884
 
 
885
        ut_ad(page_rec_is_user_rec(rec));
 
886
 
 
887
        btr_cur_position(index, rec, block, cursor);
 
888
 
 
889
        /* Check the validity of the guess within the page */
 
890
 
 
891
        /* If we only have the latch on btr_search_latch, not on the
 
892
        page, it only protects the columns of the record the cursor
 
893
        is positioned on. We cannot look at the next of the previous
 
894
        record to determine if our guess for the cursor position is
 
895
        right. */
 
896
        if (UNIV_EXPECT
 
897
            (ut_dulint_cmp(index_id, btr_page_get_index_id(block->frame)), 0)
 
898
            || !btr_search_check_guess(cursor,
 
899
                                       has_search_latch,
 
900
                                       tuple, mode, mtr)) {
 
901
                if (UNIV_LIKELY(!has_search_latch)) {
 
902
                        btr_leaf_page_release(block, latch_mode, mtr);
 
903
                }
 
904
 
 
905
                goto failure;
 
906
        }
 
907
 
 
908
        if (UNIV_LIKELY(info->n_hash_potential < BTR_SEARCH_BUILD_LIMIT + 5)) {
 
909
 
 
910
                info->n_hash_potential++;
 
911
        }
 
912
 
 
913
#ifdef notdefined
 
914
        /* These lines of code can be used in a debug version to check
 
915
        the correctness of the searched cursor position: */
 
916
 
 
917
        info->last_hash_succ = FALSE;
 
918
 
 
919
        /* Currently, does not work if the following fails: */
 
920
        ut_ad(!has_search_latch);
 
921
 
 
922
        btr_leaf_page_release(block, latch_mode, mtr);
 
923
 
 
924
        btr_cur_search_to_nth_level(index, 0, tuple, mode, latch_mode,
 
925
                                    &cursor2, 0, mtr);
 
926
        if (mode == PAGE_CUR_GE
 
927
            && page_rec_is_supremum(btr_cur_get_rec(&cursor2))) {
 
928
 
 
929
                /* If mode is PAGE_CUR_GE, then the binary search
 
930
                in the index tree may actually take us to the supremum
 
931
                of the previous page */
 
932
 
 
933
                info->last_hash_succ = FALSE;
 
934
 
 
935
                btr_pcur_open_on_user_rec(index, tuple, mode, latch_mode,
 
936
                                          &pcur, mtr);
 
937
                ut_ad(btr_pcur_get_rec(&pcur) == btr_cur_get_rec(cursor));
 
938
        } else {
 
939
                ut_ad(btr_cur_get_rec(&cursor2) == btr_cur_get_rec(cursor));
 
940
        }
 
941
 
 
942
        /* NOTE that it is theoretically possible that the above assertions
 
943
        fail if the page of the cursor gets removed from the buffer pool
 
944
        meanwhile! Thus it might not be a bug. */
 
945
#endif
 
946
        info->last_hash_succ = TRUE;
 
947
 
 
948
#ifdef UNIV_SEARCH_PERF_STAT
 
949
        btr_search_n_succ++;
 
950
#endif
 
951
        if (UNIV_LIKELY(!has_search_latch)
 
952
            && buf_page_peek_if_too_old(&block->page)) {
 
953
 
 
954
                buf_page_make_young(&block->page);
 
955
        }
 
956
 
 
957
        /* Increment the page get statistics though we did not really
 
958
        fix the page: for user info only */
 
959
 
 
960
        buf_pool->n_page_gets++;
 
961
 
 
962
        return(TRUE);
 
963
 
 
964
        /*-------------------------------------------*/
 
965
failure_unlock:
 
966
        if (UNIV_LIKELY(!has_search_latch)) {
 
967
                rw_lock_s_unlock(&btr_search_latch);
 
968
        }
 
969
failure:
 
970
        cursor->flag = BTR_CUR_HASH_FAIL;
 
971
 
 
972
#ifdef UNIV_SEARCH_PERF_STAT
 
973
        info->n_hash_fail++;
 
974
 
 
975
        if (info->n_hash_succ > 0) {
 
976
                info->n_hash_succ--;
 
977
        }
 
978
#endif
 
979
        info->last_hash_succ = FALSE;
 
980
 
 
981
        return(FALSE);
 
982
}
 
983
 
 
984
/********************************************************************//**
 
985
Drops a page hash index. */
 
986
UNIV_INTERN
 
987
void
 
988
btr_search_drop_page_hash_index(
 
989
/*============================*/
 
990
        buf_block_t*    block)  /*!< in: block containing index page,
 
991
                                s- or x-latched, or an index page
 
992
                                for which we know that
 
993
                                block->buf_fix_count == 0 */
 
994
{
 
995
        hash_table_t*           table;
 
996
        ulint                   n_fields;
 
997
        ulint                   n_bytes;
 
998
        const page_t*           page;
 
999
        const rec_t*            rec;
 
1000
        ulint                   fold;
 
1001
        ulint                   prev_fold;
 
1002
        dulint                  index_id;
 
1003
        ulint                   n_cached;
 
1004
        ulint                   n_recs;
 
1005
        ulint*                  folds;
 
1006
        ulint                   i;
 
1007
        mem_heap_t*             heap;
 
1008
        const dict_index_t*     index;
 
1009
        ulint*                  offsets;
 
1010
 
 
1011
#ifdef UNIV_SYNC_DEBUG
 
1012
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
 
1013
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
 
1014
#endif /* UNIV_SYNC_DEBUG */
 
1015
 
 
1016
retry:
 
1017
        rw_lock_s_lock(&btr_search_latch);
 
1018
        page = block->frame;
 
1019
 
 
1020
        if (UNIV_LIKELY(!block->is_hashed)) {
 
1021
 
 
1022
                rw_lock_s_unlock(&btr_search_latch);
 
1023
 
 
1024
                return;
 
1025
        }
 
1026
 
 
1027
        table = btr_search_sys->hash_index;
 
1028
 
 
1029
#ifdef UNIV_SYNC_DEBUG
 
1030
        ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
 
1031
              || rw_lock_own(&(block->lock), RW_LOCK_EX)
 
1032
              || (block->page.buf_fix_count == 0));
 
1033
#endif /* UNIV_SYNC_DEBUG */
 
1034
 
 
1035
        n_fields = block->curr_n_fields;
 
1036
        n_bytes = block->curr_n_bytes;
 
1037
        index = block->index;
 
1038
        ut_a(!dict_index_is_ibuf(index));
 
1039
 
 
1040
        /* NOTE: The fields of block must not be accessed after
 
1041
        releasing btr_search_latch, as the index page might only
 
1042
        be s-latched! */
 
1043
 
 
1044
        rw_lock_s_unlock(&btr_search_latch);
 
1045
 
 
1046
        ut_a(n_fields + n_bytes > 0);
 
1047
 
 
1048
        n_recs = page_get_n_recs(page);
 
1049
 
 
1050
        /* Calculate and cache fold values into an array for fast deletion
 
1051
        from the hash index */
 
1052
 
 
1053
        folds = mem_alloc(n_recs * sizeof(ulint));
 
1054
 
 
1055
        n_cached = 0;
 
1056
 
 
1057
        rec = page_get_infimum_rec(page);
 
1058
        rec = page_rec_get_next_low(rec, page_is_comp(page));
 
1059
 
 
1060
        index_id = btr_page_get_index_id(page);
 
1061
 
 
1062
        ut_a(0 == ut_dulint_cmp(index_id, index->id));
 
1063
 
 
1064
        prev_fold = 0;
 
1065
 
 
1066
        heap = NULL;
 
1067
        offsets = NULL;
 
1068
 
 
1069
        while (!page_rec_is_supremum(rec)) {
 
1070
                offsets = rec_get_offsets(rec, index, offsets,
 
1071
                                          n_fields + (n_bytes > 0), &heap);
 
1072
                ut_a(rec_offs_n_fields(offsets) == n_fields + (n_bytes > 0));
 
1073
                fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id);
 
1074
 
 
1075
                if (fold == prev_fold && prev_fold != 0) {
 
1076
 
 
1077
                        goto next_rec;
 
1078
                }
 
1079
 
 
1080
                /* Remove all hash nodes pointing to this page from the
 
1081
                hash chain */
 
1082
 
 
1083
                folds[n_cached] = fold;
 
1084
                n_cached++;
 
1085
next_rec:
 
1086
                rec = page_rec_get_next_low(rec, page_rec_is_comp(rec));
 
1087
                prev_fold = fold;
 
1088
        }
 
1089
 
 
1090
        if (UNIV_LIKELY_NULL(heap)) {
 
1091
                mem_heap_free(heap);
 
1092
        }
 
1093
 
 
1094
        rw_lock_x_lock(&btr_search_latch);
 
1095
 
 
1096
        if (UNIV_UNLIKELY(!block->is_hashed)) {
 
1097
                /* Someone else has meanwhile dropped the hash index */
 
1098
 
 
1099
                goto cleanup;
 
1100
        }
 
1101
 
 
1102
        ut_a(block->index == index);
 
1103
 
 
1104
        if (UNIV_UNLIKELY(block->curr_n_fields != n_fields)
 
1105
            || UNIV_UNLIKELY(block->curr_n_bytes != n_bytes)) {
 
1106
 
 
1107
                /* Someone else has meanwhile built a new hash index on the
 
1108
                page, with different parameters */
 
1109
 
 
1110
                rw_lock_x_unlock(&btr_search_latch);
 
1111
 
 
1112
                mem_free(folds);
 
1113
                goto retry;
 
1114
        }
 
1115
 
 
1116
        for (i = 0; i < n_cached; i++) {
 
1117
 
 
1118
                ha_remove_all_nodes_to_page(table, folds[i], page);
 
1119
        }
 
1120
 
 
1121
        ut_a(index->search_info->ref_count > 0);
 
1122
        index->search_info->ref_count--;
 
1123
 
 
1124
        block->is_hashed = FALSE;
 
1125
        block->index = NULL;
 
1126
        
 
1127
cleanup:
 
1128
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
 
1129
        if (UNIV_UNLIKELY(block->n_pointers)) {
 
1130
                /* Corruption */
 
1131
                ut_print_timestamp(stderr);
 
1132
                fprintf(stderr,
 
1133
                        "  InnoDB: Corruption of adaptive hash index."
 
1134
                        " After dropping\n"
 
1135
                        "InnoDB: the hash index to a page of %s,"
 
1136
                        " still %lu hash nodes remain.\n",
 
1137
                        index->name, (ulong) block->n_pointers);
 
1138
                rw_lock_x_unlock(&btr_search_latch);
 
1139
 
 
1140
                btr_search_validate();
 
1141
        } else {
 
1142
                rw_lock_x_unlock(&btr_search_latch);
 
1143
        }
 
1144
#else /* UNIV_AHI_DEBUG || UNIV_DEBUG */
 
1145
        rw_lock_x_unlock(&btr_search_latch);
 
1146
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
 
1147
 
 
1148
        mem_free(folds);
 
1149
}
 
1150
 
 
1151
/********************************************************************//**
 
1152
Drops a page hash index when a page is freed from a fseg to the file system.
 
1153
Drops possible hash index if the page happens to be in the buffer pool. */
 
1154
UNIV_INTERN
 
1155
void
 
1156
btr_search_drop_page_hash_when_freed(
 
1157
/*=================================*/
 
1158
        ulint   space,          /*!< in: space id */
 
1159
        ulint   zip_size,       /*!< in: compressed page size in bytes
 
1160
                                or 0 for uncompressed pages */
 
1161
        ulint   page_no)        /*!< in: page number */
 
1162
{
 
1163
        buf_block_t*    block;
 
1164
        mtr_t           mtr;
 
1165
 
 
1166
        if (!buf_page_peek_if_search_hashed(space, page_no)) {
 
1167
 
 
1168
                return;
 
1169
        }
 
1170
 
 
1171
        mtr_start(&mtr);
 
1172
 
 
1173
        /* We assume that if the caller has a latch on the page, then the
 
1174
        caller has already dropped the hash index for the page, and we never
 
1175
        get here. Therefore we can acquire the s-latch to the page without
 
1176
        having to fear a deadlock. */
 
1177
 
 
1178
        block = buf_page_get_gen(space, zip_size, page_no, RW_S_LATCH, NULL,
 
1179
                                BUF_GET_IF_IN_POOL, __FILE__, __LINE__,
 
1180
                                &mtr);
 
1181
        /* Because the buffer pool mutex was released by
 
1182
        buf_page_peek_if_search_hashed(), it is possible that the
 
1183
        block was removed from the buffer pool by another thread
 
1184
        before buf_page_get_gen() got a chance to acquire the buffer
 
1185
        pool mutex again.  Thus, we must check for a NULL return. */
 
1186
 
 
1187
        if (UNIV_LIKELY(block != NULL)) {
 
1188
 
 
1189
                buf_block_dbg_add_level(block, SYNC_TREE_NODE_FROM_HASH);
 
1190
 
 
1191
                btr_search_drop_page_hash_index(block);
 
1192
        }
 
1193
 
 
1194
        mtr_commit(&mtr);
 
1195
}
 
1196
 
 
1197
/********************************************************************//**
 
1198
Builds a hash index on a page with the given parameters. If the page already
 
1199
has a hash index with different parameters, the old hash index is removed.
 
1200
If index is non-NULL, this function checks if n_fields and n_bytes are
 
1201
sensible values, and does not build a hash index if not. */
 
1202
static
 
1203
void
 
1204
btr_search_build_page_hash_index(
 
1205
/*=============================*/
 
1206
        dict_index_t*   index,  /*!< in: index for which to build */
 
1207
        buf_block_t*    block,  /*!< in: index page, s- or x-latched */
 
1208
        ulint           n_fields,/*!< in: hash this many full fields */
 
1209
        ulint           n_bytes,/*!< in: hash this many bytes from the next
 
1210
                                field */
 
1211
        ibool           left_side)/*!< in: hash for searches from left side? */
 
1212
{
 
1213
        hash_table_t*   table;
 
1214
        page_t*         page;
 
1215
        rec_t*          rec;
 
1216
        rec_t*          next_rec;
 
1217
        ulint           fold;
 
1218
        ulint           next_fold;
 
1219
        dulint          index_id;
 
1220
        ulint           n_cached;
 
1221
        ulint           n_recs;
 
1222
        ulint*          folds;
 
1223
        rec_t**         recs;
 
1224
        ulint           i;
 
1225
        mem_heap_t*     heap            = NULL;
 
1226
        ulint           offsets_[REC_OFFS_NORMAL_SIZE];
 
1227
        ulint*          offsets         = offsets_;
 
1228
        rec_offs_init(offsets_);
 
1229
 
 
1230
        ut_ad(index);
 
1231
        ut_a(!dict_index_is_ibuf(index));
 
1232
 
 
1233
        table = btr_search_sys->hash_index;
 
1234
        page = buf_block_get_frame(block);
 
1235
 
 
1236
#ifdef UNIV_SYNC_DEBUG
 
1237
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
 
1238
        ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
 
1239
              || rw_lock_own(&(block->lock), RW_LOCK_EX));
 
1240
#endif /* UNIV_SYNC_DEBUG */
 
1241
 
 
1242
        rw_lock_s_lock(&btr_search_latch);
 
1243
 
 
1244
        if (block->is_hashed && ((block->curr_n_fields != n_fields)
 
1245
                                 || (block->curr_n_bytes != n_bytes)
 
1246
                                 || (block->curr_left_side != left_side))) {
 
1247
 
 
1248
                rw_lock_s_unlock(&btr_search_latch);
 
1249
 
 
1250
                btr_search_drop_page_hash_index(block);
 
1251
        } else {
 
1252
                rw_lock_s_unlock(&btr_search_latch);
 
1253
        }
 
1254
 
 
1255
        n_recs = page_get_n_recs(page);
 
1256
 
 
1257
        if (n_recs == 0) {
 
1258
 
 
1259
                return;
 
1260
        }
 
1261
 
 
1262
        /* Check that the values for hash index build are sensible */
 
1263
 
 
1264
        if (n_fields + n_bytes == 0) {
 
1265
 
 
1266
                return;
 
1267
        }
 
1268
 
 
1269
        if (dict_index_get_n_unique_in_tree(index) < n_fields
 
1270
            || (dict_index_get_n_unique_in_tree(index) == n_fields
 
1271
                && n_bytes > 0)) {
 
1272
                return;
 
1273
        }
 
1274
 
 
1275
        /* Calculate and cache fold values and corresponding records into
 
1276
        an array for fast insertion to the hash index */
 
1277
 
 
1278
        folds = mem_alloc(n_recs * sizeof(ulint));
 
1279
        recs = mem_alloc(n_recs * sizeof(rec_t*));
 
1280
 
 
1281
        n_cached = 0;
 
1282
 
 
1283
        index_id = btr_page_get_index_id(page);
 
1284
 
 
1285
        rec = page_rec_get_next(page_get_infimum_rec(page));
 
1286
 
 
1287
        offsets = rec_get_offsets(rec, index, offsets,
 
1288
                                  n_fields + (n_bytes > 0), &heap);
 
1289
 
 
1290
        if (!page_rec_is_supremum(rec)) {
 
1291
                ut_a(n_fields <= rec_offs_n_fields(offsets));
 
1292
 
 
1293
                if (n_bytes > 0) {
 
1294
                        ut_a(n_fields < rec_offs_n_fields(offsets));
 
1295
                }
 
1296
        }
 
1297
 
 
1298
        fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id);
 
1299
 
 
1300
        if (left_side) {
 
1301
 
 
1302
                folds[n_cached] = fold;
 
1303
                recs[n_cached] = rec;
 
1304
                n_cached++;
 
1305
        }
 
1306
 
 
1307
        for (;;) {
 
1308
                next_rec = page_rec_get_next(rec);
 
1309
 
 
1310
                if (page_rec_is_supremum(next_rec)) {
 
1311
 
 
1312
                        if (!left_side) {
 
1313
 
 
1314
                                folds[n_cached] = fold;
 
1315
                                recs[n_cached] = rec;
 
1316
                                n_cached++;
 
1317
                        }
 
1318
 
 
1319
                        break;
 
1320
                }
 
1321
 
 
1322
                offsets = rec_get_offsets(next_rec, index, offsets,
 
1323
                                          n_fields + (n_bytes > 0), &heap);
 
1324
                next_fold = rec_fold(next_rec, offsets, n_fields,
 
1325
                                     n_bytes, index_id);
 
1326
 
 
1327
                if (fold != next_fold) {
 
1328
                        /* Insert an entry into the hash index */
 
1329
 
 
1330
                        if (left_side) {
 
1331
 
 
1332
                                folds[n_cached] = next_fold;
 
1333
                                recs[n_cached] = next_rec;
 
1334
                                n_cached++;
 
1335
                        } else {
 
1336
                                folds[n_cached] = fold;
 
1337
                                recs[n_cached] = rec;
 
1338
                                n_cached++;
 
1339
                        }
 
1340
                }
 
1341
 
 
1342
                rec = next_rec;
 
1343
                fold = next_fold;
 
1344
        }
 
1345
 
 
1346
        btr_search_check_free_space_in_heap();
 
1347
 
 
1348
        rw_lock_x_lock(&btr_search_latch);
 
1349
 
 
1350
        if (UNIV_UNLIKELY(!btr_search_enabled)) {
 
1351
                goto exit_func;
 
1352
        }
 
1353
 
 
1354
        if (block->is_hashed && ((block->curr_n_fields != n_fields)
 
1355
                                 || (block->curr_n_bytes != n_bytes)
 
1356
                                 || (block->curr_left_side != left_side))) {
 
1357
                goto exit_func;
 
1358
        }
 
1359
 
 
1360
        /* This counter is decremented every time we drop page
 
1361
        hash index entries and is incremented here. Since we can
 
1362
        rebuild hash index for a page that is already hashed, we
 
1363
        have to take care not to increment the counter in that
 
1364
        case. */
 
1365
        if (!block->is_hashed) {
 
1366
                index->search_info->ref_count++;
 
1367
        }
 
1368
 
 
1369
        block->is_hashed = TRUE;
 
1370
        block->n_hash_helps = 0;
 
1371
 
 
1372
        block->curr_n_fields = n_fields;
 
1373
        block->curr_n_bytes = n_bytes;
 
1374
        block->curr_left_side = left_side;
 
1375
        block->index = index;
 
1376
 
 
1377
        for (i = 0; i < n_cached; i++) {
 
1378
 
 
1379
                ha_insert_for_fold(table, folds[i], block, recs[i]);
 
1380
        }
 
1381
 
 
1382
exit_func:
 
1383
        rw_lock_x_unlock(&btr_search_latch);
 
1384
 
 
1385
        mem_free(folds);
 
1386
        mem_free(recs);
 
1387
        if (UNIV_LIKELY_NULL(heap)) {
 
1388
                mem_heap_free(heap);
 
1389
        }
 
1390
}
 
1391
 
 
1392
/********************************************************************//**
 
1393
Moves or deletes hash entries for moved records. If new_page is already hashed,
 
1394
then the hash index for page, if any, is dropped. If new_page is not hashed,
 
1395
and page is hashed, then a new hash index is built to new_page with the same
 
1396
parameters as page (this often happens when a page is split). */
 
1397
UNIV_INTERN
 
1398
void
 
1399
btr_search_move_or_delete_hash_entries(
 
1400
/*===================================*/
 
1401
        buf_block_t*    new_block,      /*!< in: records are copied
 
1402
                                        to this page */
 
1403
        buf_block_t*    block,          /*!< in: index page from which
 
1404
                                        records were copied, and the
 
1405
                                        copied records will be deleted
 
1406
                                        from this page */
 
1407
        dict_index_t*   index)          /*!< in: record descriptor */
 
1408
{
 
1409
        ulint   n_fields;
 
1410
        ulint   n_bytes;
 
1411
        ibool   left_side;
 
1412
 
 
1413
#ifdef UNIV_SYNC_DEBUG
 
1414
        ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
 
1415
        ut_ad(rw_lock_own(&(new_block->lock), RW_LOCK_EX));
 
1416
#endif /* UNIV_SYNC_DEBUG */
 
1417
        ut_a(!new_block->is_hashed || new_block->index == index);
 
1418
        ut_a(!block->is_hashed || block->index == index);
 
1419
        ut_a(!(new_block->is_hashed || block->is_hashed)
 
1420
             || !dict_index_is_ibuf(index));
 
1421
 
 
1422
        rw_lock_s_lock(&btr_search_latch);
 
1423
 
 
1424
        if (new_block->is_hashed) {
 
1425
 
 
1426
                rw_lock_s_unlock(&btr_search_latch);
 
1427
 
 
1428
                btr_search_drop_page_hash_index(block);
 
1429
 
 
1430
                return;
 
1431
        }
 
1432
 
 
1433
        if (block->is_hashed) {
 
1434
 
 
1435
                n_fields = block->curr_n_fields;
 
1436
                n_bytes = block->curr_n_bytes;
 
1437
                left_side = block->curr_left_side;
 
1438
 
 
1439
                new_block->n_fields = block->curr_n_fields;
 
1440
                new_block->n_bytes = block->curr_n_bytes;
 
1441
                new_block->left_side = left_side;
 
1442
 
 
1443
                rw_lock_s_unlock(&btr_search_latch);
 
1444
 
 
1445
                ut_a(n_fields + n_bytes > 0);
 
1446
 
 
1447
                btr_search_build_page_hash_index(index, new_block, n_fields,
 
1448
                                                 n_bytes, left_side);
 
1449
                ut_ad(n_fields == block->curr_n_fields);
 
1450
                ut_ad(n_bytes == block->curr_n_bytes);
 
1451
                ut_ad(left_side == block->curr_left_side);
 
1452
                return;
 
1453
        }
 
1454
 
 
1455
        rw_lock_s_unlock(&btr_search_latch);
 
1456
}
 
1457
 
 
1458
/********************************************************************//**
 
1459
Updates the page hash index when a single record is deleted from a page. */
 
1460
UNIV_INTERN
 
1461
void
 
1462
btr_search_update_hash_on_delete(
 
1463
/*=============================*/
 
1464
        btr_cur_t*      cursor) /*!< in: cursor which was positioned on the
 
1465
                                record to delete using btr_cur_search_...,
 
1466
                                the record is not yet deleted */
 
1467
{
 
1468
        hash_table_t*   table;
 
1469
        buf_block_t*    block;
 
1470
        rec_t*          rec;
 
1471
        ulint           fold;
 
1472
        dulint          index_id;
 
1473
        ibool           found;
 
1474
        ulint           offsets_[REC_OFFS_NORMAL_SIZE];
 
1475
        mem_heap_t*     heap            = NULL;
 
1476
        rec_offs_init(offsets_);
 
1477
 
 
1478
        rec = btr_cur_get_rec(cursor);
 
1479
 
 
1480
        block = btr_cur_get_block(cursor);
 
1481
 
 
1482
#ifdef UNIV_SYNC_DEBUG
 
1483
        ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
 
1484
#endif /* UNIV_SYNC_DEBUG */
 
1485
 
 
1486
        if (!block->is_hashed) {
 
1487
 
 
1488
                return;
 
1489
        }
 
1490
 
 
1491
        ut_a(block->index == cursor->index);
 
1492
        ut_a(block->curr_n_fields + block->curr_n_bytes > 0);
 
1493
        ut_a(!dict_index_is_ibuf(cursor->index));
 
1494
 
 
1495
        table = btr_search_sys->hash_index;
 
1496
 
 
1497
        index_id = cursor->index->id;
 
1498
        fold = rec_fold(rec, rec_get_offsets(rec, cursor->index, offsets_,
 
1499
                                             ULINT_UNDEFINED, &heap),
 
1500
                        block->curr_n_fields, block->curr_n_bytes, index_id);
 
1501
        if (UNIV_LIKELY_NULL(heap)) {
 
1502
                mem_heap_free(heap);
 
1503
        }
 
1504
        rw_lock_x_lock(&btr_search_latch);
 
1505
 
 
1506
        found = ha_search_and_delete_if_found(table, fold, rec);
 
1507
 
 
1508
        rw_lock_x_unlock(&btr_search_latch);
 
1509
}
 
1510
 
 
1511
/********************************************************************//**
 
1512
Updates the page hash index when a single record is inserted on a page. */
 
1513
UNIV_INTERN
 
1514
void
 
1515
btr_search_update_hash_node_on_insert(
 
1516
/*==================================*/
 
1517
        btr_cur_t*      cursor) /*!< in: cursor which was positioned to the
 
1518
                                place to insert using btr_cur_search_...,
 
1519
                                and the new record has been inserted next
 
1520
                                to the cursor */
 
1521
{
 
1522
        hash_table_t*   table;
 
1523
        buf_block_t*    block;
 
1524
        rec_t*          rec;
 
1525
 
 
1526
        rec = btr_cur_get_rec(cursor);
 
1527
 
 
1528
        block = btr_cur_get_block(cursor);
 
1529
 
 
1530
#ifdef UNIV_SYNC_DEBUG
 
1531
        ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
 
1532
#endif /* UNIV_SYNC_DEBUG */
 
1533
 
 
1534
        if (!block->is_hashed) {
 
1535
 
 
1536
                return;
 
1537
        }
 
1538
 
 
1539
        ut_a(block->index == cursor->index);
 
1540
        ut_a(!dict_index_is_ibuf(cursor->index));
 
1541
 
 
1542
        rw_lock_x_lock(&btr_search_latch);
 
1543
 
 
1544
        if ((cursor->flag == BTR_CUR_HASH)
 
1545
            && (cursor->n_fields == block->curr_n_fields)
 
1546
            && (cursor->n_bytes == block->curr_n_bytes)
 
1547
            && !block->curr_left_side) {
 
1548
 
 
1549
                table = btr_search_sys->hash_index;
 
1550
 
 
1551
                ha_search_and_update_if_found(table, cursor->fold, rec,
 
1552
                                              block, page_rec_get_next(rec));
 
1553
 
 
1554
                rw_lock_x_unlock(&btr_search_latch);
 
1555
        } else {
 
1556
                rw_lock_x_unlock(&btr_search_latch);
 
1557
 
 
1558
                btr_search_update_hash_on_insert(cursor);
 
1559
        }
 
1560
}
 
1561
 
 
1562
/********************************************************************//**
 
1563
Updates the page hash index when a single record is inserted on a page. */
 
1564
UNIV_INTERN
 
1565
void
 
1566
btr_search_update_hash_on_insert(
 
1567
/*=============================*/
 
1568
        btr_cur_t*      cursor) /*!< in: cursor which was positioned to the
 
1569
                                place to insert using btr_cur_search_...,
 
1570
                                and the new record has been inserted next
 
1571
                                to the cursor */
 
1572
{
 
1573
        hash_table_t*   table;
 
1574
        buf_block_t*    block;
 
1575
        rec_t*          rec;
 
1576
        rec_t*          ins_rec;
 
1577
        rec_t*          next_rec;
 
1578
        dulint          index_id;
 
1579
        ulint           fold;
 
1580
        ulint           ins_fold;
 
1581
        ulint           next_fold = 0; /* remove warning (??? bug ???) */
 
1582
        ulint           n_fields;
 
1583
        ulint           n_bytes;
 
1584
        ibool           left_side;
 
1585
        ibool           locked          = FALSE;
 
1586
        mem_heap_t*     heap            = NULL;
 
1587
        ulint           offsets_[REC_OFFS_NORMAL_SIZE];
 
1588
        ulint*          offsets         = offsets_;
 
1589
        rec_offs_init(offsets_);
 
1590
 
 
1591
        table = btr_search_sys->hash_index;
 
1592
 
 
1593
        btr_search_check_free_space_in_heap();
 
1594
 
 
1595
        rec = btr_cur_get_rec(cursor);
 
1596
 
 
1597
        block = btr_cur_get_block(cursor);
 
1598
 
 
1599
#ifdef UNIV_SYNC_DEBUG
 
1600
        ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
 
1601
#endif /* UNIV_SYNC_DEBUG */
 
1602
 
 
1603
        if (!block->is_hashed) {
 
1604
 
 
1605
                return;
 
1606
        }
 
1607
 
 
1608
        ut_a(block->index == cursor->index);
 
1609
        ut_a(!dict_index_is_ibuf(cursor->index));
 
1610
 
 
1611
        index_id = cursor->index->id;
 
1612
 
 
1613
        n_fields = block->curr_n_fields;
 
1614
        n_bytes = block->curr_n_bytes;
 
1615
        left_side = block->curr_left_side;
 
1616
 
 
1617
        ins_rec = page_rec_get_next(rec);
 
1618
        next_rec = page_rec_get_next(ins_rec);
 
1619
 
 
1620
        offsets = rec_get_offsets(ins_rec, cursor->index, offsets,
 
1621
                                  ULINT_UNDEFINED, &heap);
 
1622
        ins_fold = rec_fold(ins_rec, offsets, n_fields, n_bytes, index_id);
 
1623
 
 
1624
        if (!page_rec_is_supremum(next_rec)) {
 
1625
                offsets = rec_get_offsets(next_rec, cursor->index, offsets,
 
1626
                                          n_fields + (n_bytes > 0), &heap);
 
1627
                next_fold = rec_fold(next_rec, offsets, n_fields,
 
1628
                                     n_bytes, index_id);
 
1629
        }
 
1630
 
 
1631
        if (!page_rec_is_infimum(rec)) {
 
1632
                offsets = rec_get_offsets(rec, cursor->index, offsets,
 
1633
                                          n_fields + (n_bytes > 0), &heap);
 
1634
                fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id);
 
1635
        } else {
 
1636
                if (left_side) {
 
1637
 
 
1638
                        rw_lock_x_lock(&btr_search_latch);
 
1639
 
 
1640
                        locked = TRUE;
 
1641
 
 
1642
                        ha_insert_for_fold(table, ins_fold, block, ins_rec);
 
1643
                }
 
1644
 
 
1645
                goto check_next_rec;
 
1646
        }
 
1647
 
 
1648
        if (fold != ins_fold) {
 
1649
 
 
1650
                if (!locked) {
 
1651
 
 
1652
                        rw_lock_x_lock(&btr_search_latch);
 
1653
 
 
1654
                        locked = TRUE;
 
1655
                }
 
1656
 
 
1657
                if (!left_side) {
 
1658
                        ha_insert_for_fold(table, fold, block, rec);
 
1659
                } else {
 
1660
                        ha_insert_for_fold(table, ins_fold, block, ins_rec);
 
1661
                }
 
1662
        }
 
1663
 
 
1664
check_next_rec:
 
1665
        if (page_rec_is_supremum(next_rec)) {
 
1666
 
 
1667
                if (!left_side) {
 
1668
 
 
1669
                        if (!locked) {
 
1670
                                rw_lock_x_lock(&btr_search_latch);
 
1671
 
 
1672
                                locked = TRUE;
 
1673
                        }
 
1674
 
 
1675
                        ha_insert_for_fold(table, ins_fold, block, ins_rec);
 
1676
                }
 
1677
 
 
1678
                goto function_exit;
 
1679
        }
 
1680
 
 
1681
        if (ins_fold != next_fold) {
 
1682
 
 
1683
                if (!locked) {
 
1684
 
 
1685
                        rw_lock_x_lock(&btr_search_latch);
 
1686
 
 
1687
                        locked = TRUE;
 
1688
                }
 
1689
 
 
1690
                if (!left_side) {
 
1691
 
 
1692
                        ha_insert_for_fold(table, ins_fold, block, ins_rec);
 
1693
                        /*
 
1694
                        fputs("Hash insert for ", stderr);
 
1695
                        dict_index_name_print(stderr, cursor->index);
 
1696
                        fprintf(stderr, " fold %lu\n", ins_fold);
 
1697
                        */
 
1698
                } else {
 
1699
                        ha_insert_for_fold(table, next_fold, block, next_rec);
 
1700
                }
 
1701
        }
 
1702
 
 
1703
function_exit:
 
1704
        if (UNIV_LIKELY_NULL(heap)) {
 
1705
                mem_heap_free(heap);
 
1706
        }
 
1707
        if (locked) {
 
1708
                rw_lock_x_unlock(&btr_search_latch);
 
1709
        }
 
1710
}
 
1711
 
 
1712
/********************************************************************//**
 
1713
Validates the search system.
 
1714
@return TRUE if ok */
 
1715
UNIV_INTERN
 
1716
ibool
 
1717
btr_search_validate(void)
 
1718
/*=====================*/
 
1719
{
 
1720
        ha_node_t*      node;
 
1721
        ulint           n_page_dumps    = 0;
 
1722
        ibool           ok              = TRUE;
 
1723
        ulint           i;
 
1724
        ulint           cell_count;
 
1725
        mem_heap_t*     heap            = NULL;
 
1726
        ulint           offsets_[REC_OFFS_NORMAL_SIZE];
 
1727
        ulint*          offsets         = offsets_;
 
1728
 
 
1729
        /* How many cells to check before temporarily releasing
 
1730
        btr_search_latch. */
 
1731
        ulint           chunk_size = 10000;
 
1732
 
 
1733
        rec_offs_init(offsets_);
 
1734
 
 
1735
        rw_lock_x_lock(&btr_search_latch);
 
1736
        buf_pool_mutex_enter();
 
1737
 
 
1738
        cell_count = hash_get_n_cells(btr_search_sys->hash_index);
 
1739
 
 
1740
        for (i = 0; i < cell_count; i++) {
 
1741
                /* We release btr_search_latch every once in a while to
 
1742
                give other queries a chance to run. */
 
1743
                if ((i != 0) && ((i % chunk_size) == 0)) {
 
1744
                        buf_pool_mutex_exit();
 
1745
                        rw_lock_x_unlock(&btr_search_latch);
 
1746
                        os_thread_yield();
 
1747
                        rw_lock_x_lock(&btr_search_latch);
 
1748
                        buf_pool_mutex_enter();
 
1749
                }
 
1750
 
 
1751
                node = hash_get_nth_cell(btr_search_sys->hash_index, i)->node;
 
1752
 
 
1753
                for (; node != NULL; node = node->next) {
 
1754
                        const buf_block_t*      block
 
1755
                                = buf_block_align(node->data);
 
1756
                        const buf_block_t*      hash_block;
 
1757
 
 
1758
                        if (UNIV_LIKELY(buf_block_get_state(block)
 
1759
                                        == BUF_BLOCK_FILE_PAGE)) {
 
1760
 
 
1761
                                /* The space and offset are only valid
 
1762
                                for file blocks.  It is possible that
 
1763
                                the block is being freed
 
1764
                                (BUF_BLOCK_REMOVE_HASH, see the
 
1765
                                assertion and the comment below) */
 
1766
                                hash_block = buf_block_hash_get(
 
1767
                                        buf_block_get_space(block),
 
1768
                                        buf_block_get_page_no(block));
 
1769
                        } else {
 
1770
                                hash_block = NULL;
 
1771
                        }
 
1772
 
 
1773
                        if (hash_block) {
 
1774
                                ut_a(hash_block == block);
 
1775
                        } else {
 
1776
                                /* When a block is being freed,
 
1777
                                buf_LRU_search_and_free_block() first
 
1778
                                removes the block from
 
1779
                                buf_pool->page_hash by calling
 
1780
                                buf_LRU_block_remove_hashed_page().
 
1781
                                After that, it invokes
 
1782
                                btr_search_drop_page_hash_index() to
 
1783
                                remove the block from
 
1784
                                btr_search_sys->hash_index. */
 
1785
 
 
1786
                                ut_a(buf_block_get_state(block)
 
1787
                                     == BUF_BLOCK_REMOVE_HASH);
 
1788
                        }
 
1789
 
 
1790
                        ut_a(!dict_index_is_ibuf(block->index));
 
1791
 
 
1792
                        offsets = rec_get_offsets((const rec_t*) node->data,
 
1793
                                                  block->index, offsets,
 
1794
                                                  block->curr_n_fields
 
1795
                                                  + (block->curr_n_bytes > 0),
 
1796
                                                  &heap);
 
1797
 
 
1798
                        if (!block->is_hashed || node->fold
 
1799
                            != rec_fold((rec_t*)(node->data),
 
1800
                                        offsets,
 
1801
                                        block->curr_n_fields,
 
1802
                                        block->curr_n_bytes,
 
1803
                                        btr_page_get_index_id(block->frame))) {
 
1804
                                const page_t*   page = block->frame;
 
1805
 
 
1806
                                ok = FALSE;
 
1807
                                ut_print_timestamp(stderr);
 
1808
 
 
1809
                                fprintf(stderr,
 
1810
                                        "  InnoDB: Error in an adaptive hash"
 
1811
                                        " index pointer to page %lu\n"
 
1812
                                        "InnoDB: ptr mem address %p"
 
1813
                                        " index id %lu %lu,"
 
1814
                                        " node fold %lu, rec fold %lu\n",
 
1815
                                        (ulong) page_get_page_no(page),
 
1816
                                        node->data,
 
1817
                                        (ulong) ut_dulint_get_high(
 
1818
                                                btr_page_get_index_id(page)),
 
1819
                                        (ulong) ut_dulint_get_low(
 
1820
                                                btr_page_get_index_id(page)),
 
1821
                                        (ulong) node->fold,
 
1822
                                        (ulong) rec_fold((rec_t*)(node->data),
 
1823
                                                         offsets,
 
1824
                                                         block->curr_n_fields,
 
1825
                                                         block->curr_n_bytes,
 
1826
                                                         btr_page_get_index_id(
 
1827
                                                                 page)));
 
1828
 
 
1829
                                fputs("InnoDB: Record ", stderr);
 
1830
                                rec_print_new(stderr, (rec_t*)node->data,
 
1831
                                              offsets);
 
1832
                                fprintf(stderr, "\nInnoDB: on that page."
 
1833
                                        " Page mem address %p, is hashed %lu,"
 
1834
                                        " n fields %lu, n bytes %lu\n"
 
1835
                                        "InnoDB: side %lu\n",
 
1836
                                        (void*) page, (ulong) block->is_hashed,
 
1837
                                        (ulong) block->curr_n_fields,
 
1838
                                        (ulong) block->curr_n_bytes,
 
1839
                                        (ulong) block->curr_left_side);
 
1840
 
 
1841
                                if (n_page_dumps < 20) {
 
1842
                                        buf_page_print(page, 0);
 
1843
                                        n_page_dumps++;
 
1844
                                }
 
1845
                        }
 
1846
                }
 
1847
        }
 
1848
 
 
1849
        for (i = 0; i < cell_count; i += chunk_size) {
 
1850
                ulint end_index = ut_min(i + chunk_size - 1, cell_count - 1);
 
1851
 
 
1852
                /* We release btr_search_latch every once in a while to
 
1853
                give other queries a chance to run. */
 
1854
                if (i != 0) {
 
1855
                        buf_pool_mutex_exit();
 
1856
                        rw_lock_x_unlock(&btr_search_latch);
 
1857
                        os_thread_yield();
 
1858
                        rw_lock_x_lock(&btr_search_latch);
 
1859
                        buf_pool_mutex_enter();
 
1860
                }
 
1861
 
 
1862
                if (!ha_validate(btr_search_sys->hash_index, i, end_index)) {
 
1863
                        ok = FALSE;
 
1864
                }
 
1865
        }
 
1866
 
 
1867
        buf_pool_mutex_exit();
 
1868
        rw_lock_x_unlock(&btr_search_latch);
 
1869
        if (UNIV_LIKELY_NULL(heap)) {
 
1870
                mem_heap_free(heap);
 
1871
        }
 
1872
 
 
1873
        return(ok);
 
1874
}