~ubuntu-branches/ubuntu/precise/mysql-5.1/precise

« back to all changes in this revision

Viewing changes to storage/innodb_plugin/btr/btr0sea.c

  • Committer: Bazaar Package Importer
  • Author(s): Norbert Tretkowski
  • Date: 2010-03-17 14:56:02 UTC
  • Revision ID: james.westby@ubuntu.com-20100317145602-x7e30l1b2sb5s6w6
Tags: upstream-5.1.45
ImportĀ upstreamĀ versionĀ 5.1.45

Show diffs side-by-side

added added

removed removed

Lines of Context:
 
1
/*****************************************************************************
 
2
 
 
3
Copyright (c) 1996, 2009, Innobase Oy. All Rights Reserved.
 
4
Copyright (c) 2008, Google Inc.
 
5
 
 
6
Portions of this file contain modifications contributed and copyrighted by
 
7
Google, Inc. Those modifications are gratefully acknowledged and are described
 
8
briefly in the InnoDB documentation. The contributions by Google are
 
9
incorporated with their permission, and subject to the conditions contained in
 
10
the file COPYING.Google.
 
11
 
 
12
This program is free software; you can redistribute it and/or modify it under
 
13
the terms of the GNU General Public License as published by the Free Software
 
14
Foundation; version 2 of the License.
 
15
 
 
16
This program is distributed in the hope that it will be useful, but WITHOUT
 
17
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 
18
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
 
19
 
 
20
You should have received a copy of the GNU General Public License along with
 
21
this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 
22
Place, Suite 330, Boston, MA 02111-1307 USA
 
23
 
 
24
*****************************************************************************/
 
25
 
 
26
/********************************************************************//**
 
27
@file btr/btr0sea.c
 
28
The index tree adaptive search
 
29
 
 
30
Created 2/17/1996 Heikki Tuuri
 
31
*************************************************************************/
 
32
 
 
33
#include "btr0sea.h"
 
34
#ifdef UNIV_NONINL
 
35
#include "btr0sea.ic"
 
36
#endif
 
37
 
 
38
#include "buf0buf.h"
 
39
#include "page0page.h"
 
40
#include "page0cur.h"
 
41
#include "btr0cur.h"
 
42
#include "btr0pcur.h"
 
43
#include "btr0btr.h"
 
44
#include "ha0ha.h"
 
45
 
 
46
/** Flag: has the search system been enabled?
 
47
Protected by btr_search_latch and btr_search_enabled_mutex. */
 
48
UNIV_INTERN char                btr_search_enabled      = TRUE;
 
49
 
 
50
/** Mutex protecting btr_search_enabled */
 
51
static mutex_t                  btr_search_enabled_mutex;
 
52
 
 
53
/** A dummy variable to fool the compiler */
 
54
UNIV_INTERN ulint               btr_search_this_is_zero = 0;
 
55
 
 
56
#ifdef UNIV_SEARCH_PERF_STAT
 
57
/** Number of successful adaptive hash index lookups */
 
58
UNIV_INTERN ulint               btr_search_n_succ       = 0;
 
59
/** Number of failed adaptive hash index lookups */
 
60
UNIV_INTERN ulint               btr_search_n_hash_fail  = 0;
 
61
#endif /* UNIV_SEARCH_PERF_STAT */
 
62
 
 
63
/** padding to prevent other memory update
 
64
hotspots from residing on the same memory
 
65
cache line as btr_search_latch */
 
66
UNIV_INTERN byte                btr_sea_pad1[64];
 
67
 
 
68
/** The latch protecting the adaptive search system: this latch protects the
 
69
(1) positions of records on those pages where a hash index has been built.
 
70
NOTE: It does not protect values of non-ordering fields within a record from
 
71
being updated in-place! We can use fact (1) to perform unique searches to
 
72
indexes. */
 
73
 
 
74
/* We will allocate the latch from dynamic memory to get it to the
 
75
same DRAM page as other hotspot semaphores */
 
76
UNIV_INTERN rw_lock_t*          btr_search_latch_temp;
 
77
 
 
78
/** padding to prevent other memory update hotspots from residing on
 
79
the same memory cache line */
 
80
UNIV_INTERN byte                btr_sea_pad2[64];
 
81
 
 
82
/** The adaptive hash index */
 
83
UNIV_INTERN btr_search_sys_t*   btr_search_sys;
 
84
 
 
85
/** If the number of records on the page divided by this parameter
 
86
would have been successfully accessed using a hash index, the index
 
87
is then built on the page, assuming the global limit has been reached */
 
88
#define BTR_SEARCH_PAGE_BUILD_LIMIT     16
 
89
 
 
90
/** The global limit for consecutive potentially successful hash searches,
 
91
before hash index building is started */
 
92
#define BTR_SEARCH_BUILD_LIMIT          100
 
93
 
 
94
/********************************************************************//**
 
95
Builds a hash index on a page with the given parameters. If the page already
 
96
has a hash index with different parameters, the old hash index is removed.
 
97
If index is non-NULL, this function checks if n_fields and n_bytes are
 
98
sensible values, and does not build a hash index if not. */
 
99
static
 
100
void
 
101
btr_search_build_page_hash_index(
 
102
/*=============================*/
 
103
        dict_index_t*   index,  /*!< in: index for which to build, or NULL if
 
104
                                not known */
 
105
        buf_block_t*    block,  /*!< in: index page, s- or x-latched */
 
106
        ulint           n_fields,/*!< in: hash this many full fields */
 
107
        ulint           n_bytes,/*!< in: hash this many bytes from the next
 
108
                                field */
 
109
        ibool           left_side);/*!< in: hash for searches from left side? */
 
110
 
 
111
/*****************************************************************//**
 
112
This function should be called before reserving any btr search mutex, if
 
113
the intended operation might add nodes to the search system hash table.
 
114
Because of the latching order, once we have reserved the btr search system
 
115
latch, we cannot allocate a free frame from the buffer pool. Checks that
 
116
there is a free buffer frame allocated for hash table heap in the btr search
 
117
system. If not, allocates a free frames for the heap. This check makes it
 
118
probable that, when have reserved the btr search system latch and we need to
 
119
allocate a new node to the hash table, it will succeed. However, the check
 
120
will not guarantee success. */
 
121
static
 
122
void
 
123
btr_search_check_free_space_in_heap(void)
 
124
/*=====================================*/
 
125
{
 
126
        hash_table_t*   table;
 
127
        mem_heap_t*     heap;
 
128
 
 
129
#ifdef UNIV_SYNC_DEBUG
 
130
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
 
131
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
 
132
#endif /* UNIV_SYNC_DEBUG */
 
133
 
 
134
        table = btr_search_sys->hash_index;
 
135
 
 
136
        heap = table->heap;
 
137
 
 
138
        /* Note that we peek the value of heap->free_block without reserving
 
139
        the latch: this is ok, because we will not guarantee that there will
 
140
        be enough free space in the hash table. */
 
141
 
 
142
        if (heap->free_block == NULL) {
 
143
                buf_block_t*    block = buf_block_alloc(0);
 
144
 
 
145
                rw_lock_x_lock(&btr_search_latch);
 
146
 
 
147
                if (heap->free_block == NULL) {
 
148
                        heap->free_block = block;
 
149
                } else {
 
150
                        buf_block_free(block);
 
151
                }
 
152
 
 
153
                rw_lock_x_unlock(&btr_search_latch);
 
154
        }
 
155
}
 
156
 
 
157
/*****************************************************************//**
 
158
Creates and initializes the adaptive search system at a database start. */
 
159
UNIV_INTERN
 
160
void
 
161
btr_search_sys_create(
 
162
/*==================*/
 
163
        ulint   hash_size)      /*!< in: hash index hash table size */
 
164
{
 
165
        /* We allocate the search latch from dynamic memory:
 
166
        see above at the global variable definition */
 
167
 
 
168
        btr_search_latch_temp = mem_alloc(sizeof(rw_lock_t));
 
169
 
 
170
        rw_lock_create(&btr_search_latch, SYNC_SEARCH_SYS);
 
171
        mutex_create(&btr_search_enabled_mutex, SYNC_SEARCH_SYS_CONF);
 
172
 
 
173
        btr_search_sys = mem_alloc(sizeof(btr_search_sys_t));
 
174
 
 
175
        btr_search_sys->hash_index = ha_create(hash_size, 0, 0);
 
176
}
 
177
 
 
178
/*****************************************************************//**
 
179
Frees the adaptive search system at a database shutdown. */
 
180
UNIV_INTERN
 
181
void
 
182
btr_search_sys_free(void)
 
183
/*=====================*/
 
184
{
 
185
        mem_free(btr_search_latch_temp);
 
186
        btr_search_latch_temp = NULL;
 
187
        mem_heap_free(btr_search_sys->hash_index->heap);
 
188
        hash_table_free(btr_search_sys->hash_index);
 
189
        mem_free(btr_search_sys);
 
190
        btr_search_sys = NULL;
 
191
}
 
192
 
 
193
/********************************************************************//**
 
194
Disable the adaptive hash search system and empty the index. */
 
195
UNIV_INTERN
 
196
void
 
197
btr_search_disable(void)
 
198
/*====================*/
 
199
{
 
200
        mutex_enter(&btr_search_enabled_mutex);
 
201
        rw_lock_x_lock(&btr_search_latch);
 
202
 
 
203
        btr_search_enabled = FALSE;
 
204
 
 
205
        /* Clear all block->is_hashed flags and remove all entries
 
206
        from btr_search_sys->hash_index. */
 
207
        buf_pool_drop_hash_index();
 
208
 
 
209
        /* btr_search_enabled_mutex should guarantee this. */
 
210
        ut_ad(!btr_search_enabled);
 
211
 
 
212
        rw_lock_x_unlock(&btr_search_latch);
 
213
        mutex_exit(&btr_search_enabled_mutex);
 
214
}
 
215
 
 
216
/********************************************************************//**
 
217
Enable the adaptive hash search system. */
 
218
UNIV_INTERN
 
219
void
 
220
btr_search_enable(void)
 
221
/*====================*/
 
222
{
 
223
        mutex_enter(&btr_search_enabled_mutex);
 
224
        rw_lock_x_lock(&btr_search_latch);
 
225
 
 
226
        btr_search_enabled = TRUE;
 
227
 
 
228
        rw_lock_x_unlock(&btr_search_latch);
 
229
        mutex_exit(&btr_search_enabled_mutex);
 
230
}
 
231
 
 
232
/*****************************************************************//**
 
233
Creates and initializes a search info struct.
 
234
@return own: search info struct */
 
235
UNIV_INTERN
 
236
btr_search_t*
 
237
btr_search_info_create(
 
238
/*===================*/
 
239
        mem_heap_t*     heap)   /*!< in: heap where created */
 
240
{
 
241
        btr_search_t*   info;
 
242
 
 
243
        info = mem_heap_alloc(heap, sizeof(btr_search_t));
 
244
 
 
245
#ifdef UNIV_DEBUG
 
246
        info->magic_n = BTR_SEARCH_MAGIC_N;
 
247
#endif /* UNIV_DEBUG */
 
248
 
 
249
        info->ref_count = 0;
 
250
        info->root_guess = NULL;
 
251
 
 
252
        info->hash_analysis = 0;
 
253
        info->n_hash_potential = 0;
 
254
 
 
255
        info->last_hash_succ = FALSE;
 
256
 
 
257
#ifdef UNIV_SEARCH_PERF_STAT
 
258
        info->n_hash_succ = 0;
 
259
        info->n_hash_fail = 0;
 
260
        info->n_patt_succ = 0;
 
261
        info->n_searches = 0;
 
262
#endif /* UNIV_SEARCH_PERF_STAT */
 
263
 
 
264
        /* Set some sensible values */
 
265
        info->n_fields = 1;
 
266
        info->n_bytes = 0;
 
267
 
 
268
        info->left_side = TRUE;
 
269
 
 
270
        return(info);
 
271
}
 
272
 
 
273
/*****************************************************************//**
 
274
Returns the value of ref_count. The value is protected by
 
275
btr_search_latch.
 
276
@return ref_count value. */
 
277
UNIV_INTERN
 
278
ulint
 
279
btr_search_info_get_ref_count(
 
280
/*==========================*/
 
281
        btr_search_t*   info)   /*!< in: search info. */
 
282
{
 
283
        ulint ret;
 
284
 
 
285
        ut_ad(info);
 
286
 
 
287
#ifdef UNIV_SYNC_DEBUG
 
288
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
 
289
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
 
290
#endif /* UNIV_SYNC_DEBUG */
 
291
 
 
292
        rw_lock_s_lock(&btr_search_latch);
 
293
        ret = info->ref_count;
 
294
        rw_lock_s_unlock(&btr_search_latch);
 
295
 
 
296
        return(ret);
 
297
}
 
298
 
 
299
/*********************************************************************//**
 
300
Updates the search info of an index about hash successes. NOTE that info
 
301
is NOT protected by any semaphore, to save CPU time! Do not assume its fields
 
302
are consistent. */
 
303
static
 
304
void
 
305
btr_search_info_update_hash(
 
306
/*========================*/
 
307
        btr_search_t*   info,   /*!< in/out: search info */
 
308
        btr_cur_t*      cursor) /*!< in: cursor which was just positioned */
 
309
{
 
310
        dict_index_t*   index;
 
311
        ulint           n_unique;
 
312
        int             cmp;
 
313
 
 
314
#ifdef UNIV_SYNC_DEBUG
 
315
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
 
316
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
 
317
#endif /* UNIV_SYNC_DEBUG */
 
318
 
 
319
        index = cursor->index;
 
320
 
 
321
        if (dict_index_is_ibuf(index)) {
 
322
                /* So many deletes are performed on an insert buffer tree
 
323
                that we do not consider a hash index useful on it: */
 
324
 
 
325
                return;
 
326
        }
 
327
 
 
328
        n_unique = dict_index_get_n_unique_in_tree(index);
 
329
 
 
330
        if (info->n_hash_potential == 0) {
 
331
 
 
332
                goto set_new_recomm;
 
333
        }
 
334
 
 
335
        /* Test if the search would have succeeded using the recommended
 
336
        hash prefix */
 
337
 
 
338
        if (info->n_fields >= n_unique && cursor->up_match >= n_unique) {
 
339
increment_potential:
 
340
                info->n_hash_potential++;
 
341
 
 
342
                return;
 
343
        }
 
344
 
 
345
        cmp = ut_pair_cmp(info->n_fields, info->n_bytes,
 
346
                          cursor->low_match, cursor->low_bytes);
 
347
 
 
348
        if (info->left_side ? cmp <= 0 : cmp > 0) {
 
349
 
 
350
                goto set_new_recomm;
 
351
        }
 
352
 
 
353
        cmp = ut_pair_cmp(info->n_fields, info->n_bytes,
 
354
                          cursor->up_match, cursor->up_bytes);
 
355
 
 
356
        if (info->left_side ? cmp <= 0 : cmp > 0) {
 
357
 
 
358
                goto increment_potential;
 
359
        }
 
360
 
 
361
set_new_recomm:
 
362
        /* We have to set a new recommendation; skip the hash analysis
 
363
        for a while to avoid unnecessary CPU time usage when there is no
 
364
        chance for success */
 
365
 
 
366
        info->hash_analysis = 0;
 
367
 
 
368
        cmp = ut_pair_cmp(cursor->up_match, cursor->up_bytes,
 
369
                          cursor->low_match, cursor->low_bytes);
 
370
        if (cmp == 0) {
 
371
                info->n_hash_potential = 0;
 
372
 
 
373
                /* For extra safety, we set some sensible values here */
 
374
 
 
375
                info->n_fields = 1;
 
376
                info->n_bytes = 0;
 
377
 
 
378
                info->left_side = TRUE;
 
379
 
 
380
        } else if (cmp > 0) {
 
381
                info->n_hash_potential = 1;
 
382
 
 
383
                if (cursor->up_match >= n_unique) {
 
384
 
 
385
                        info->n_fields = n_unique;
 
386
                        info->n_bytes = 0;
 
387
 
 
388
                } else if (cursor->low_match < cursor->up_match) {
 
389
 
 
390
                        info->n_fields = cursor->low_match + 1;
 
391
                        info->n_bytes = 0;
 
392
                } else {
 
393
                        info->n_fields = cursor->low_match;
 
394
                        info->n_bytes = cursor->low_bytes + 1;
 
395
                }
 
396
 
 
397
                info->left_side = TRUE;
 
398
        } else {
 
399
                info->n_hash_potential = 1;
 
400
 
 
401
                if (cursor->low_match >= n_unique) {
 
402
 
 
403
                        info->n_fields = n_unique;
 
404
                        info->n_bytes = 0;
 
405
 
 
406
                } else if (cursor->low_match > cursor->up_match) {
 
407
 
 
408
                        info->n_fields = cursor->up_match + 1;
 
409
                        info->n_bytes = 0;
 
410
                } else {
 
411
                        info->n_fields = cursor->up_match;
 
412
                        info->n_bytes = cursor->up_bytes + 1;
 
413
                }
 
414
 
 
415
                info->left_side = FALSE;
 
416
        }
 
417
}
 
418
 
 
419
/*********************************************************************//**
 
420
Updates the block search info on hash successes. NOTE that info and
 
421
block->n_hash_helps, n_fields, n_bytes, side are NOT protected by any
 
422
semaphore, to save CPU time! Do not assume the fields are consistent.
 
423
@return TRUE if building a (new) hash index on the block is recommended */
 
424
static
 
425
ibool
 
426
btr_search_update_block_hash_info(
 
427
/*==============================*/
 
428
        btr_search_t*   info,   /*!< in: search info */
 
429
        buf_block_t*    block,  /*!< in: buffer block */
 
430
        btr_cur_t*      cursor __attribute__((unused)))
 
431
                                /*!< in: cursor */
 
432
{
 
433
#ifdef UNIV_SYNC_DEBUG
 
434
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
 
435
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
 
436
        ut_ad(rw_lock_own(&block->lock, RW_LOCK_SHARED)
 
437
              || rw_lock_own(&block->lock, RW_LOCK_EX));
 
438
#endif /* UNIV_SYNC_DEBUG */
 
439
        ut_ad(cursor);
 
440
 
 
441
        info->last_hash_succ = FALSE;
 
442
 
 
443
        ut_a(buf_block_state_valid(block));
 
444
        ut_ad(info->magic_n == BTR_SEARCH_MAGIC_N);
 
445
 
 
446
        if ((block->n_hash_helps > 0)
 
447
            && (info->n_hash_potential > 0)
 
448
            && (block->n_fields == info->n_fields)
 
449
            && (block->n_bytes == info->n_bytes)
 
450
            && (block->left_side == info->left_side)) {
 
451
 
 
452
                if ((block->is_hashed)
 
453
                    && (block->curr_n_fields == info->n_fields)
 
454
                    && (block->curr_n_bytes == info->n_bytes)
 
455
                    && (block->curr_left_side == info->left_side)) {
 
456
 
 
457
                        /* The search would presumably have succeeded using
 
458
                        the hash index */
 
459
 
 
460
                        info->last_hash_succ = TRUE;
 
461
                }
 
462
 
 
463
                block->n_hash_helps++;
 
464
        } else {
 
465
                block->n_hash_helps = 1;
 
466
                block->n_fields = info->n_fields;
 
467
                block->n_bytes = info->n_bytes;
 
468
                block->left_side = info->left_side;
 
469
        }
 
470
 
 
471
#ifdef UNIV_DEBUG
 
472
        if (cursor->index->table->does_not_fit_in_memory) {
 
473
                block->n_hash_helps = 0;
 
474
        }
 
475
#endif /* UNIV_DEBUG */
 
476
 
 
477
        if ((block->n_hash_helps > page_get_n_recs(block->frame)
 
478
             / BTR_SEARCH_PAGE_BUILD_LIMIT)
 
479
            && (info->n_hash_potential >= BTR_SEARCH_BUILD_LIMIT)) {
 
480
 
 
481
                if ((!block->is_hashed)
 
482
                    || (block->n_hash_helps
 
483
                        > 2 * page_get_n_recs(block->frame))
 
484
                    || (block->n_fields != block->curr_n_fields)
 
485
                    || (block->n_bytes != block->curr_n_bytes)
 
486
                    || (block->left_side != block->curr_left_side)) {
 
487
 
 
488
                        /* Build a new hash index on the page */
 
489
 
 
490
                        return(TRUE);
 
491
                }
 
492
        }
 
493
 
 
494
        return(FALSE);
 
495
}
 
496
 
 
497
/*********************************************************************//**
 
498
Updates a hash node reference when it has been unsuccessfully used in a
 
499
search which could have succeeded with the used hash parameters. This can
 
500
happen because when building a hash index for a page, we do not check
 
501
what happens at page boundaries, and therefore there can be misleading
 
502
hash nodes. Also, collisions in the fold value can lead to misleading
 
503
references. This function lazily fixes these imperfections in the hash
 
504
index. */
 
505
static
 
506
void
 
507
btr_search_update_hash_ref(
 
508
/*=======================*/
 
509
        btr_search_t*   info,   /*!< in: search info */
 
510
        buf_block_t*    block,  /*!< in: buffer block where cursor positioned */
 
511
        btr_cur_t*      cursor) /*!< in: cursor */
 
512
{
 
513
        ulint   fold;
 
514
        rec_t*  rec;
 
515
        dulint  index_id;
 
516
 
 
517
        ut_ad(cursor->flag == BTR_CUR_HASH_FAIL);
 
518
#ifdef UNIV_SYNC_DEBUG
 
519
        ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
 
520
        ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
 
521
              || rw_lock_own(&(block->lock), RW_LOCK_EX));
 
522
#endif /* UNIV_SYNC_DEBUG */
 
523
        ut_ad(page_align(btr_cur_get_rec(cursor))
 
524
              == buf_block_get_frame(block));
 
525
 
 
526
        if (!block->is_hashed) {
 
527
 
 
528
                return;
 
529
        }
 
530
 
 
531
        ut_a(block->index == cursor->index);
 
532
        ut_a(!dict_index_is_ibuf(cursor->index));
 
533
 
 
534
        if ((info->n_hash_potential > 0)
 
535
            && (block->curr_n_fields == info->n_fields)
 
536
            && (block->curr_n_bytes == info->n_bytes)
 
537
            && (block->curr_left_side == info->left_side)) {
 
538
                mem_heap_t*     heap            = NULL;
 
539
                ulint           offsets_[REC_OFFS_NORMAL_SIZE];
 
540
                rec_offs_init(offsets_);
 
541
 
 
542
                rec = btr_cur_get_rec(cursor);
 
543
 
 
544
                if (!page_rec_is_user_rec(rec)) {
 
545
 
 
546
                        return;
 
547
                }
 
548
 
 
549
                index_id = cursor->index->id;
 
550
                fold = rec_fold(rec,
 
551
                                rec_get_offsets(rec, cursor->index, offsets_,
 
552
                                                ULINT_UNDEFINED, &heap),
 
553
                                block->curr_n_fields,
 
554
                                block->curr_n_bytes, index_id);
 
555
                if (UNIV_LIKELY_NULL(heap)) {
 
556
                        mem_heap_free(heap);
 
557
                }
 
558
#ifdef UNIV_SYNC_DEBUG
 
559
                ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
 
560
#endif /* UNIV_SYNC_DEBUG */
 
561
 
 
562
                ha_insert_for_fold(btr_search_sys->hash_index, fold,
 
563
                                   block, rec);
 
564
        }
 
565
}
 
566
 
 
567
/*********************************************************************//**
 
568
Updates the search info. */
 
569
UNIV_INTERN
 
570
void
 
571
btr_search_info_update_slow(
 
572
/*========================*/
 
573
        btr_search_t*   info,   /*!< in/out: search info */
 
574
        btr_cur_t*      cursor) /*!< in: cursor which was just positioned */
 
575
{
 
576
        buf_block_t*    block;
 
577
        ibool           build_index;
 
578
        ulint*          params;
 
579
        ulint*          params2;
 
580
 
 
581
#ifdef UNIV_SYNC_DEBUG
 
582
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
 
583
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
 
584
#endif /* UNIV_SYNC_DEBUG */
 
585
 
 
586
        block = btr_cur_get_block(cursor);
 
587
 
 
588
        /* NOTE that the following two function calls do NOT protect
 
589
        info or block->n_fields etc. with any semaphore, to save CPU time!
 
590
        We cannot assume the fields are consistent when we return from
 
591
        those functions! */
 
592
 
 
593
        btr_search_info_update_hash(info, cursor);
 
594
 
 
595
        build_index = btr_search_update_block_hash_info(info, block, cursor);
 
596
 
 
597
        if (build_index || (cursor->flag == BTR_CUR_HASH_FAIL)) {
 
598
 
 
599
                btr_search_check_free_space_in_heap();
 
600
        }
 
601
 
 
602
        if (cursor->flag == BTR_CUR_HASH_FAIL) {
 
603
                /* Update the hash node reference, if appropriate */
 
604
 
 
605
#ifdef UNIV_SEARCH_PERF_STAT
 
606
                btr_search_n_hash_fail++;
 
607
#endif /* UNIV_SEARCH_PERF_STAT */
 
608
 
 
609
                rw_lock_x_lock(&btr_search_latch);
 
610
 
 
611
                btr_search_update_hash_ref(info, block, cursor);
 
612
 
 
613
                rw_lock_x_unlock(&btr_search_latch);
 
614
        }
 
615
 
 
616
        if (build_index) {
 
617
                /* Note that since we did not protect block->n_fields etc.
 
618
                with any semaphore, the values can be inconsistent. We have
 
619
                to check inside the function call that they make sense. We
 
620
                also malloc an array and store the values there to make sure
 
621
                the compiler does not let the function call parameters change
 
622
                inside the called function. It might be that the compiler
 
623
                would optimize the call just to pass pointers to block. */
 
624
 
 
625
                params = mem_alloc(3 * sizeof(ulint));
 
626
                params[0] = block->n_fields;
 
627
                params[1] = block->n_bytes;
 
628
                params[2] = block->left_side;
 
629
 
 
630
                /* Make sure the compiler cannot deduce the values and do
 
631
                optimizations */
 
632
 
 
633
                params2 = params + btr_search_this_is_zero;
 
634
 
 
635
                btr_search_build_page_hash_index(cursor->index,
 
636
                                                 block,
 
637
                                                 params2[0],
 
638
                                                 params2[1],
 
639
                                                 params2[2]);
 
640
                mem_free(params);
 
641
        }
 
642
}
 
643
 
 
644
/******************************************************************//**
 
645
Checks if a guessed position for a tree cursor is right. Note that if
 
646
mode is PAGE_CUR_LE, which is used in inserts, and the function returns
 
647
TRUE, then cursor->up_match and cursor->low_match both have sensible values.
 
648
@return TRUE if success */
 
649
static
 
650
ibool
 
651
btr_search_check_guess(
 
652
/*===================*/
 
653
        btr_cur_t*      cursor, /*!< in: guessed cursor position */
 
654
        ibool           can_only_compare_to_cursor_rec,
 
655
                                /*!< in: if we do not have a latch on the page
 
656
                                of cursor, but only a latch on
 
657
                                btr_search_latch, then ONLY the columns
 
658
                                of the record UNDER the cursor are
 
659
                                protected, not the next or previous record
 
660
                                in the chain: we cannot look at the next or
 
661
                                previous record to check our guess! */
 
662
        const dtuple_t* tuple,  /*!< in: data tuple */
 
663
        ulint           mode,   /*!< in: PAGE_CUR_L, PAGE_CUR_LE, PAGE_CUR_G,
 
664
                                or PAGE_CUR_GE */
 
665
        mtr_t*          mtr)    /*!< in: mtr */
 
666
{
 
667
        rec_t*          rec;
 
668
        ulint           n_unique;
 
669
        ulint           match;
 
670
        ulint           bytes;
 
671
        int             cmp;
 
672
        mem_heap_t*     heap            = NULL;
 
673
        ulint           offsets_[REC_OFFS_NORMAL_SIZE];
 
674
        ulint*          offsets         = offsets_;
 
675
        ibool           success         = FALSE;
 
676
        rec_offs_init(offsets_);
 
677
 
 
678
        n_unique = dict_index_get_n_unique_in_tree(cursor->index);
 
679
 
 
680
        rec = btr_cur_get_rec(cursor);
 
681
 
 
682
        ut_ad(page_rec_is_user_rec(rec));
 
683
 
 
684
        match = 0;
 
685
        bytes = 0;
 
686
 
 
687
        offsets = rec_get_offsets(rec, cursor->index, offsets,
 
688
                                  n_unique, &heap);
 
689
        cmp = page_cmp_dtuple_rec_with_match(tuple, rec,
 
690
                                             offsets, &match, &bytes);
 
691
 
 
692
        if (mode == PAGE_CUR_GE) {
 
693
                if (cmp == 1) {
 
694
                        goto exit_func;
 
695
                }
 
696
 
 
697
                cursor->up_match = match;
 
698
 
 
699
                if (match >= n_unique) {
 
700
                        success = TRUE;
 
701
                        goto exit_func;
 
702
                }
 
703
        } else if (mode == PAGE_CUR_LE) {
 
704
                if (cmp == -1) {
 
705
                        goto exit_func;
 
706
                }
 
707
 
 
708
                cursor->low_match = match;
 
709
 
 
710
        } else if (mode == PAGE_CUR_G) {
 
711
                if (cmp != -1) {
 
712
                        goto exit_func;
 
713
                }
 
714
        } else if (mode == PAGE_CUR_L) {
 
715
                if (cmp != 1) {
 
716
                        goto exit_func;
 
717
                }
 
718
        }
 
719
 
 
720
        if (can_only_compare_to_cursor_rec) {
 
721
                /* Since we could not determine if our guess is right just by
 
722
                looking at the record under the cursor, return FALSE */
 
723
                goto exit_func;
 
724
        }
 
725
 
 
726
        match = 0;
 
727
        bytes = 0;
 
728
 
 
729
        if ((mode == PAGE_CUR_G) || (mode == PAGE_CUR_GE)) {
 
730
                rec_t*  prev_rec;
 
731
 
 
732
                ut_ad(!page_rec_is_infimum(rec));
 
733
 
 
734
                prev_rec = page_rec_get_prev(rec);
 
735
 
 
736
                if (page_rec_is_infimum(prev_rec)) {
 
737
                        success = btr_page_get_prev(page_align(prev_rec), mtr)
 
738
                                == FIL_NULL;
 
739
 
 
740
                        goto exit_func;
 
741
                }
 
742
 
 
743
                offsets = rec_get_offsets(prev_rec, cursor->index, offsets,
 
744
                                          n_unique, &heap);
 
745
                cmp = page_cmp_dtuple_rec_with_match(tuple, prev_rec,
 
746
                                                     offsets, &match, &bytes);
 
747
                if (mode == PAGE_CUR_GE) {
 
748
                        success = cmp == 1;
 
749
                } else {
 
750
                        success = cmp != -1;
 
751
                }
 
752
 
 
753
                goto exit_func;
 
754
        } else {
 
755
                rec_t*  next_rec;
 
756
 
 
757
                ut_ad(!page_rec_is_supremum(rec));
 
758
 
 
759
                next_rec = page_rec_get_next(rec);
 
760
 
 
761
                if (page_rec_is_supremum(next_rec)) {
 
762
                        if (btr_page_get_next(page_align(next_rec), mtr)
 
763
                            == FIL_NULL) {
 
764
 
 
765
                                cursor->up_match = 0;
 
766
                                success = TRUE;
 
767
                        }
 
768
 
 
769
                        goto exit_func;
 
770
                }
 
771
 
 
772
                offsets = rec_get_offsets(next_rec, cursor->index, offsets,
 
773
                                          n_unique, &heap);
 
774
                cmp = page_cmp_dtuple_rec_with_match(tuple, next_rec,
 
775
                                                     offsets, &match, &bytes);
 
776
                if (mode == PAGE_CUR_LE) {
 
777
                        success = cmp == -1;
 
778
                        cursor->up_match = match;
 
779
                } else {
 
780
                        success = cmp != 1;
 
781
                }
 
782
        }
 
783
exit_func:
 
784
        if (UNIV_LIKELY_NULL(heap)) {
 
785
                mem_heap_free(heap);
 
786
        }
 
787
        return(success);
 
788
}
 
789
 
 
790
/******************************************************************//**
 
791
Tries to guess the right search position based on the hash search info
 
792
of the index. Note that if mode is PAGE_CUR_LE, which is used in inserts,
 
793
and the function returns TRUE, then cursor->up_match and cursor->low_match
 
794
both have sensible values.
 
795
@return TRUE if succeeded */
 
796
UNIV_INTERN
 
797
ibool
 
798
btr_search_guess_on_hash(
 
799
/*=====================*/
 
800
        dict_index_t*   index,          /*!< in: index */
 
801
        btr_search_t*   info,           /*!< in: index search info */
 
802
        const dtuple_t* tuple,          /*!< in: logical record */
 
803
        ulint           mode,           /*!< in: PAGE_CUR_L, ... */
 
804
        ulint           latch_mode,     /*!< in: BTR_SEARCH_LEAF, ...;
 
805
                                        NOTE that only if has_search_latch
 
806
                                        is 0, we will have a latch set on
 
807
                                        the cursor page, otherwise we assume
 
808
                                        the caller uses his search latch
 
809
                                        to protect the record! */
 
810
        btr_cur_t*      cursor,         /*!< out: tree cursor */
 
811
        ulint           has_search_latch,/*!< in: latch mode the caller
 
812
                                        currently has on btr_search_latch:
 
813
                                        RW_S_LATCH, RW_X_LATCH, or 0 */
 
814
        mtr_t*          mtr)            /*!< in: mtr */
 
815
{
 
816
        buf_block_t*    block;
 
817
        rec_t*          rec;
 
818
        ulint           fold;
 
819
        dulint          index_id;
 
820
#ifdef notdefined
 
821
        btr_cur_t       cursor2;
 
822
        btr_pcur_t      pcur;
 
823
#endif
 
824
        ut_ad(index && info && tuple && cursor && mtr);
 
825
        ut_ad((latch_mode == BTR_SEARCH_LEAF)
 
826
              || (latch_mode == BTR_MODIFY_LEAF));
 
827
 
 
828
        /* Note that, for efficiency, the struct info may not be protected by
 
829
        any latch here! */
 
830
 
 
831
        if (UNIV_UNLIKELY(info->n_hash_potential == 0)) {
 
832
 
 
833
                return(FALSE);
 
834
        }
 
835
 
 
836
        cursor->n_fields = info->n_fields;
 
837
        cursor->n_bytes = info->n_bytes;
 
838
 
 
839
        if (UNIV_UNLIKELY(dtuple_get_n_fields(tuple)
 
840
                          < cursor->n_fields + (cursor->n_bytes > 0))) {
 
841
 
 
842
                return(FALSE);
 
843
        }
 
844
 
 
845
        index_id = index->id;
 
846
 
 
847
#ifdef UNIV_SEARCH_PERF_STAT
 
848
        info->n_hash_succ++;
 
849
#endif
 
850
        fold = dtuple_fold(tuple, cursor->n_fields, cursor->n_bytes, index_id);
 
851
 
 
852
        cursor->fold = fold;
 
853
        cursor->flag = BTR_CUR_HASH;
 
854
 
 
855
        if (UNIV_LIKELY(!has_search_latch)) {
 
856
                rw_lock_s_lock(&btr_search_latch);
 
857
 
 
858
                if (UNIV_UNLIKELY(!btr_search_enabled)) {
 
859
                        goto failure_unlock;
 
860
                }
 
861
        }
 
862
 
 
863
        ut_ad(rw_lock_get_writer(&btr_search_latch) != RW_LOCK_EX);
 
864
        ut_ad(rw_lock_get_reader_count(&btr_search_latch) > 0);
 
865
 
 
866
        rec = ha_search_and_get_data(btr_search_sys->hash_index, fold);
 
867
 
 
868
        if (UNIV_UNLIKELY(!rec)) {
 
869
                goto failure_unlock;
 
870
        }
 
871
 
 
872
        block = buf_block_align(rec);
 
873
 
 
874
        if (UNIV_LIKELY(!has_search_latch)) {
 
875
 
 
876
                if (UNIV_UNLIKELY(
 
877
                            !buf_page_get_known_nowait(latch_mode, block,
 
878
                                                       BUF_MAKE_YOUNG,
 
879
                                                       __FILE__, __LINE__,
 
880
                                                       mtr))) {
 
881
                        goto failure_unlock;
 
882
                }
 
883
 
 
884
                rw_lock_s_unlock(&btr_search_latch);
 
885
 
 
886
                buf_block_dbg_add_level(block, SYNC_TREE_NODE_FROM_HASH);
 
887
        }
 
888
 
 
889
        if (UNIV_UNLIKELY(buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE)) {
 
890
                ut_ad(buf_block_get_state(block) == BUF_BLOCK_REMOVE_HASH);
 
891
 
 
892
                if (UNIV_LIKELY(!has_search_latch)) {
 
893
 
 
894
                        btr_leaf_page_release(block, latch_mode, mtr);
 
895
                }
 
896
 
 
897
                goto failure;
 
898
        }
 
899
 
 
900
        ut_ad(page_rec_is_user_rec(rec));
 
901
 
 
902
        btr_cur_position(index, rec, block, cursor);
 
903
 
 
904
        /* Check the validity of the guess within the page */
 
905
 
 
906
        /* If we only have the latch on btr_search_latch, not on the
 
907
        page, it only protects the columns of the record the cursor
 
908
        is positioned on. We cannot look at the next of the previous
 
909
        record to determine if our guess for the cursor position is
 
910
        right. */
 
911
        if (UNIV_EXPECT
 
912
            (ut_dulint_cmp(index_id, btr_page_get_index_id(block->frame)), 0)
 
913
            || !btr_search_check_guess(cursor,
 
914
                                       has_search_latch,
 
915
                                       tuple, mode, mtr)) {
 
916
                if (UNIV_LIKELY(!has_search_latch)) {
 
917
                        btr_leaf_page_release(block, latch_mode, mtr);
 
918
                }
 
919
 
 
920
                goto failure;
 
921
        }
 
922
 
 
923
        if (UNIV_LIKELY(info->n_hash_potential < BTR_SEARCH_BUILD_LIMIT + 5)) {
 
924
 
 
925
                info->n_hash_potential++;
 
926
        }
 
927
 
 
928
#ifdef notdefined
 
929
        /* These lines of code can be used in a debug version to check
 
930
        the correctness of the searched cursor position: */
 
931
 
 
932
        info->last_hash_succ = FALSE;
 
933
 
 
934
        /* Currently, does not work if the following fails: */
 
935
        ut_ad(!has_search_latch);
 
936
 
 
937
        btr_leaf_page_release(block, latch_mode, mtr);
 
938
 
 
939
        btr_cur_search_to_nth_level(index, 0, tuple, mode, latch_mode,
 
940
                                    &cursor2, 0, mtr);
 
941
        if (mode == PAGE_CUR_GE
 
942
            && page_rec_is_supremum(btr_cur_get_rec(&cursor2))) {
 
943
 
 
944
                /* If mode is PAGE_CUR_GE, then the binary search
 
945
                in the index tree may actually take us to the supremum
 
946
                of the previous page */
 
947
 
 
948
                info->last_hash_succ = FALSE;
 
949
 
 
950
                btr_pcur_open_on_user_rec(index, tuple, mode, latch_mode,
 
951
                                          &pcur, mtr);
 
952
                ut_ad(btr_pcur_get_rec(&pcur) == btr_cur_get_rec(cursor));
 
953
        } else {
 
954
                ut_ad(btr_cur_get_rec(&cursor2) == btr_cur_get_rec(cursor));
 
955
        }
 
956
 
 
957
        /* NOTE that it is theoretically possible that the above assertions
 
958
        fail if the page of the cursor gets removed from the buffer pool
 
959
        meanwhile! Thus it might not be a bug. */
 
960
#endif
 
961
        info->last_hash_succ = TRUE;
 
962
 
 
963
#ifdef UNIV_SEARCH_PERF_STAT
 
964
        btr_search_n_succ++;
 
965
#endif
 
966
        if (UNIV_LIKELY(!has_search_latch)
 
967
            && buf_page_peek_if_too_old(&block->page)) {
 
968
 
 
969
                buf_page_make_young(&block->page);
 
970
        }
 
971
 
 
972
        /* Increment the page get statistics though we did not really
 
973
        fix the page: for user info only */
 
974
 
 
975
        buf_pool->stat.n_page_gets++;
 
976
 
 
977
        return(TRUE);
 
978
 
 
979
        /*-------------------------------------------*/
 
980
failure_unlock:
 
981
        if (UNIV_LIKELY(!has_search_latch)) {
 
982
                rw_lock_s_unlock(&btr_search_latch);
 
983
        }
 
984
failure:
 
985
        cursor->flag = BTR_CUR_HASH_FAIL;
 
986
 
 
987
#ifdef UNIV_SEARCH_PERF_STAT
 
988
        info->n_hash_fail++;
 
989
 
 
990
        if (info->n_hash_succ > 0) {
 
991
                info->n_hash_succ--;
 
992
        }
 
993
#endif
 
994
        info->last_hash_succ = FALSE;
 
995
 
 
996
        return(FALSE);
 
997
}
 
998
 
 
999
/********************************************************************//**
 
1000
Drops a page hash index. */
 
1001
UNIV_INTERN
 
1002
void
 
1003
btr_search_drop_page_hash_index(
 
1004
/*============================*/
 
1005
        buf_block_t*    block)  /*!< in: block containing index page,
 
1006
                                s- or x-latched, or an index page
 
1007
                                for which we know that
 
1008
                                block->buf_fix_count == 0 */
 
1009
{
 
1010
        hash_table_t*           table;
 
1011
        ulint                   n_fields;
 
1012
        ulint                   n_bytes;
 
1013
        const page_t*           page;
 
1014
        const rec_t*            rec;
 
1015
        ulint                   fold;
 
1016
        ulint                   prev_fold;
 
1017
        dulint                  index_id;
 
1018
        ulint                   n_cached;
 
1019
        ulint                   n_recs;
 
1020
        ulint*                  folds;
 
1021
        ulint                   i;
 
1022
        mem_heap_t*             heap;
 
1023
        const dict_index_t*     index;
 
1024
        ulint*                  offsets;
 
1025
 
 
1026
#ifdef UNIV_SYNC_DEBUG
 
1027
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
 
1028
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
 
1029
#endif /* UNIV_SYNC_DEBUG */
 
1030
 
 
1031
retry:
 
1032
        rw_lock_s_lock(&btr_search_latch);
 
1033
        page = block->frame;
 
1034
 
 
1035
        if (UNIV_LIKELY(!block->is_hashed)) {
 
1036
 
 
1037
                rw_lock_s_unlock(&btr_search_latch);
 
1038
 
 
1039
                return;
 
1040
        }
 
1041
 
 
1042
        table = btr_search_sys->hash_index;
 
1043
 
 
1044
#ifdef UNIV_SYNC_DEBUG
 
1045
        ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
 
1046
              || rw_lock_own(&(block->lock), RW_LOCK_EX)
 
1047
              || (block->page.buf_fix_count == 0));
 
1048
#endif /* UNIV_SYNC_DEBUG */
 
1049
 
 
1050
        n_fields = block->curr_n_fields;
 
1051
        n_bytes = block->curr_n_bytes;
 
1052
        index = block->index;
 
1053
        ut_a(!dict_index_is_ibuf(index));
 
1054
 
 
1055
        /* NOTE: The fields of block must not be accessed after
 
1056
        releasing btr_search_latch, as the index page might only
 
1057
        be s-latched! */
 
1058
 
 
1059
        rw_lock_s_unlock(&btr_search_latch);
 
1060
 
 
1061
        ut_a(n_fields + n_bytes > 0);
 
1062
 
 
1063
        n_recs = page_get_n_recs(page);
 
1064
 
 
1065
        /* Calculate and cache fold values into an array for fast deletion
 
1066
        from the hash index */
 
1067
 
 
1068
        folds = mem_alloc(n_recs * sizeof(ulint));
 
1069
 
 
1070
        n_cached = 0;
 
1071
 
 
1072
        rec = page_get_infimum_rec(page);
 
1073
        rec = page_rec_get_next_low(rec, page_is_comp(page));
 
1074
 
 
1075
        index_id = btr_page_get_index_id(page);
 
1076
 
 
1077
        ut_a(0 == ut_dulint_cmp(index_id, index->id));
 
1078
 
 
1079
        prev_fold = 0;
 
1080
 
 
1081
        heap = NULL;
 
1082
        offsets = NULL;
 
1083
 
 
1084
        while (!page_rec_is_supremum(rec)) {
 
1085
                offsets = rec_get_offsets(rec, index, offsets,
 
1086
                                          n_fields + (n_bytes > 0), &heap);
 
1087
                ut_a(rec_offs_n_fields(offsets) == n_fields + (n_bytes > 0));
 
1088
                fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id);
 
1089
 
 
1090
                if (fold == prev_fold && prev_fold != 0) {
 
1091
 
 
1092
                        goto next_rec;
 
1093
                }
 
1094
 
 
1095
                /* Remove all hash nodes pointing to this page from the
 
1096
                hash chain */
 
1097
 
 
1098
                folds[n_cached] = fold;
 
1099
                n_cached++;
 
1100
next_rec:
 
1101
                rec = page_rec_get_next_low(rec, page_rec_is_comp(rec));
 
1102
                prev_fold = fold;
 
1103
        }
 
1104
 
 
1105
        if (UNIV_LIKELY_NULL(heap)) {
 
1106
                mem_heap_free(heap);
 
1107
        }
 
1108
 
 
1109
        rw_lock_x_lock(&btr_search_latch);
 
1110
 
 
1111
        if (UNIV_UNLIKELY(!block->is_hashed)) {
 
1112
                /* Someone else has meanwhile dropped the hash index */
 
1113
 
 
1114
                goto cleanup;
 
1115
        }
 
1116
 
 
1117
        ut_a(block->index == index);
 
1118
 
 
1119
        if (UNIV_UNLIKELY(block->curr_n_fields != n_fields)
 
1120
            || UNIV_UNLIKELY(block->curr_n_bytes != n_bytes)) {
 
1121
 
 
1122
                /* Someone else has meanwhile built a new hash index on the
 
1123
                page, with different parameters */
 
1124
 
 
1125
                rw_lock_x_unlock(&btr_search_latch);
 
1126
 
 
1127
                mem_free(folds);
 
1128
                goto retry;
 
1129
        }
 
1130
 
 
1131
        for (i = 0; i < n_cached; i++) {
 
1132
 
 
1133
                ha_remove_all_nodes_to_page(table, folds[i], page);
 
1134
        }
 
1135
 
 
1136
        ut_a(index->search_info->ref_count > 0);
 
1137
        index->search_info->ref_count--;
 
1138
 
 
1139
        block->is_hashed = FALSE;
 
1140
        block->index = NULL;
 
1141
        
 
1142
cleanup:
 
1143
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
 
1144
        if (UNIV_UNLIKELY(block->n_pointers)) {
 
1145
                /* Corruption */
 
1146
                ut_print_timestamp(stderr);
 
1147
                fprintf(stderr,
 
1148
                        "  InnoDB: Corruption of adaptive hash index."
 
1149
                        " After dropping\n"
 
1150
                        "InnoDB: the hash index to a page of %s,"
 
1151
                        " still %lu hash nodes remain.\n",
 
1152
                        index->name, (ulong) block->n_pointers);
 
1153
                rw_lock_x_unlock(&btr_search_latch);
 
1154
 
 
1155
                btr_search_validate();
 
1156
        } else {
 
1157
                rw_lock_x_unlock(&btr_search_latch);
 
1158
        }
 
1159
#else /* UNIV_AHI_DEBUG || UNIV_DEBUG */
 
1160
        rw_lock_x_unlock(&btr_search_latch);
 
1161
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
 
1162
 
 
1163
        mem_free(folds);
 
1164
}
 
1165
 
 
1166
/********************************************************************//**
 
1167
Drops a page hash index when a page is freed from a fseg to the file system.
 
1168
Drops possible hash index if the page happens to be in the buffer pool. */
 
1169
UNIV_INTERN
 
1170
void
 
1171
btr_search_drop_page_hash_when_freed(
 
1172
/*=================================*/
 
1173
        ulint   space,          /*!< in: space id */
 
1174
        ulint   zip_size,       /*!< in: compressed page size in bytes
 
1175
                                or 0 for uncompressed pages */
 
1176
        ulint   page_no)        /*!< in: page number */
 
1177
{
 
1178
        buf_block_t*    block;
 
1179
        mtr_t           mtr;
 
1180
 
 
1181
        if (!buf_page_peek_if_search_hashed(space, page_no)) {
 
1182
 
 
1183
                return;
 
1184
        }
 
1185
 
 
1186
        mtr_start(&mtr);
 
1187
 
 
1188
        /* We assume that if the caller has a latch on the page, then the
 
1189
        caller has already dropped the hash index for the page, and we never
 
1190
        get here. Therefore we can acquire the s-latch to the page without
 
1191
        having to fear a deadlock. */
 
1192
 
 
1193
        block = buf_page_get_gen(space, zip_size, page_no, RW_S_LATCH, NULL,
 
1194
                                BUF_GET_IF_IN_POOL, __FILE__, __LINE__,
 
1195
                                &mtr);
 
1196
        /* Because the buffer pool mutex was released by
 
1197
        buf_page_peek_if_search_hashed(), it is possible that the
 
1198
        block was removed from the buffer pool by another thread
 
1199
        before buf_page_get_gen() got a chance to acquire the buffer
 
1200
        pool mutex again.  Thus, we must check for a NULL return. */
 
1201
 
 
1202
        if (UNIV_LIKELY(block != NULL)) {
 
1203
 
 
1204
                buf_block_dbg_add_level(block, SYNC_TREE_NODE_FROM_HASH);
 
1205
 
 
1206
                btr_search_drop_page_hash_index(block);
 
1207
        }
 
1208
 
 
1209
        mtr_commit(&mtr);
 
1210
}
 
1211
 
 
1212
/********************************************************************//**
 
1213
Builds a hash index on a page with the given parameters. If the page already
 
1214
has a hash index with different parameters, the old hash index is removed.
 
1215
If index is non-NULL, this function checks if n_fields and n_bytes are
 
1216
sensible values, and does not build a hash index if not. */
 
1217
static
 
1218
void
 
1219
btr_search_build_page_hash_index(
 
1220
/*=============================*/
 
1221
        dict_index_t*   index,  /*!< in: index for which to build */
 
1222
        buf_block_t*    block,  /*!< in: index page, s- or x-latched */
 
1223
        ulint           n_fields,/*!< in: hash this many full fields */
 
1224
        ulint           n_bytes,/*!< in: hash this many bytes from the next
 
1225
                                field */
 
1226
        ibool           left_side)/*!< in: hash for searches from left side? */
 
1227
{
 
1228
        hash_table_t*   table;
 
1229
        page_t*         page;
 
1230
        rec_t*          rec;
 
1231
        rec_t*          next_rec;
 
1232
        ulint           fold;
 
1233
        ulint           next_fold;
 
1234
        dulint          index_id;
 
1235
        ulint           n_cached;
 
1236
        ulint           n_recs;
 
1237
        ulint*          folds;
 
1238
        rec_t**         recs;
 
1239
        ulint           i;
 
1240
        mem_heap_t*     heap            = NULL;
 
1241
        ulint           offsets_[REC_OFFS_NORMAL_SIZE];
 
1242
        ulint*          offsets         = offsets_;
 
1243
        rec_offs_init(offsets_);
 
1244
 
 
1245
        ut_ad(index);
 
1246
        ut_a(!dict_index_is_ibuf(index));
 
1247
 
 
1248
        table = btr_search_sys->hash_index;
 
1249
        page = buf_block_get_frame(block);
 
1250
 
 
1251
#ifdef UNIV_SYNC_DEBUG
 
1252
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
 
1253
        ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
 
1254
              || rw_lock_own(&(block->lock), RW_LOCK_EX));
 
1255
#endif /* UNIV_SYNC_DEBUG */
 
1256
 
 
1257
        rw_lock_s_lock(&btr_search_latch);
 
1258
 
 
1259
        if (block->is_hashed && ((block->curr_n_fields != n_fields)
 
1260
                                 || (block->curr_n_bytes != n_bytes)
 
1261
                                 || (block->curr_left_side != left_side))) {
 
1262
 
 
1263
                rw_lock_s_unlock(&btr_search_latch);
 
1264
 
 
1265
                btr_search_drop_page_hash_index(block);
 
1266
        } else {
 
1267
                rw_lock_s_unlock(&btr_search_latch);
 
1268
        }
 
1269
 
 
1270
        n_recs = page_get_n_recs(page);
 
1271
 
 
1272
        if (n_recs == 0) {
 
1273
 
 
1274
                return;
 
1275
        }
 
1276
 
 
1277
        /* Check that the values for hash index build are sensible */
 
1278
 
 
1279
        if (n_fields + n_bytes == 0) {
 
1280
 
 
1281
                return;
 
1282
        }
 
1283
 
 
1284
        if (dict_index_get_n_unique_in_tree(index) < n_fields
 
1285
            || (dict_index_get_n_unique_in_tree(index) == n_fields
 
1286
                && n_bytes > 0)) {
 
1287
                return;
 
1288
        }
 
1289
 
 
1290
        /* Calculate and cache fold values and corresponding records into
 
1291
        an array for fast insertion to the hash index */
 
1292
 
 
1293
        folds = mem_alloc(n_recs * sizeof(ulint));
 
1294
        recs = mem_alloc(n_recs * sizeof(rec_t*));
 
1295
 
 
1296
        n_cached = 0;
 
1297
 
 
1298
        index_id = btr_page_get_index_id(page);
 
1299
 
 
1300
        rec = page_rec_get_next(page_get_infimum_rec(page));
 
1301
 
 
1302
        offsets = rec_get_offsets(rec, index, offsets,
 
1303
                                  n_fields + (n_bytes > 0), &heap);
 
1304
 
 
1305
        if (!page_rec_is_supremum(rec)) {
 
1306
                ut_a(n_fields <= rec_offs_n_fields(offsets));
 
1307
 
 
1308
                if (n_bytes > 0) {
 
1309
                        ut_a(n_fields < rec_offs_n_fields(offsets));
 
1310
                }
 
1311
        }
 
1312
 
 
1313
        fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id);
 
1314
 
 
1315
        if (left_side) {
 
1316
 
 
1317
                folds[n_cached] = fold;
 
1318
                recs[n_cached] = rec;
 
1319
                n_cached++;
 
1320
        }
 
1321
 
 
1322
        for (;;) {
 
1323
                next_rec = page_rec_get_next(rec);
 
1324
 
 
1325
                if (page_rec_is_supremum(next_rec)) {
 
1326
 
 
1327
                        if (!left_side) {
 
1328
 
 
1329
                                folds[n_cached] = fold;
 
1330
                                recs[n_cached] = rec;
 
1331
                                n_cached++;
 
1332
                        }
 
1333
 
 
1334
                        break;
 
1335
                }
 
1336
 
 
1337
                offsets = rec_get_offsets(next_rec, index, offsets,
 
1338
                                          n_fields + (n_bytes > 0), &heap);
 
1339
                next_fold = rec_fold(next_rec, offsets, n_fields,
 
1340
                                     n_bytes, index_id);
 
1341
 
 
1342
                if (fold != next_fold) {
 
1343
                        /* Insert an entry into the hash index */
 
1344
 
 
1345
                        if (left_side) {
 
1346
 
 
1347
                                folds[n_cached] = next_fold;
 
1348
                                recs[n_cached] = next_rec;
 
1349
                                n_cached++;
 
1350
                        } else {
 
1351
                                folds[n_cached] = fold;
 
1352
                                recs[n_cached] = rec;
 
1353
                                n_cached++;
 
1354
                        }
 
1355
                }
 
1356
 
 
1357
                rec = next_rec;
 
1358
                fold = next_fold;
 
1359
        }
 
1360
 
 
1361
        btr_search_check_free_space_in_heap();
 
1362
 
 
1363
        rw_lock_x_lock(&btr_search_latch);
 
1364
 
 
1365
        if (UNIV_UNLIKELY(!btr_search_enabled)) {
 
1366
                goto exit_func;
 
1367
        }
 
1368
 
 
1369
        if (block->is_hashed && ((block->curr_n_fields != n_fields)
 
1370
                                 || (block->curr_n_bytes != n_bytes)
 
1371
                                 || (block->curr_left_side != left_side))) {
 
1372
                goto exit_func;
 
1373
        }
 
1374
 
 
1375
        /* This counter is decremented every time we drop page
 
1376
        hash index entries and is incremented here. Since we can
 
1377
        rebuild hash index for a page that is already hashed, we
 
1378
        have to take care not to increment the counter in that
 
1379
        case. */
 
1380
        if (!block->is_hashed) {
 
1381
                index->search_info->ref_count++;
 
1382
        }
 
1383
 
 
1384
        block->is_hashed = TRUE;
 
1385
        block->n_hash_helps = 0;
 
1386
 
 
1387
        block->curr_n_fields = n_fields;
 
1388
        block->curr_n_bytes = n_bytes;
 
1389
        block->curr_left_side = left_side;
 
1390
        block->index = index;
 
1391
 
 
1392
        for (i = 0; i < n_cached; i++) {
 
1393
 
 
1394
                ha_insert_for_fold(table, folds[i], block, recs[i]);
 
1395
        }
 
1396
 
 
1397
exit_func:
 
1398
        rw_lock_x_unlock(&btr_search_latch);
 
1399
 
 
1400
        mem_free(folds);
 
1401
        mem_free(recs);
 
1402
        if (UNIV_LIKELY_NULL(heap)) {
 
1403
                mem_heap_free(heap);
 
1404
        }
 
1405
}
 
1406
 
 
1407
/********************************************************************//**
 
1408
Moves or deletes hash entries for moved records. If new_page is already hashed,
 
1409
then the hash index for page, if any, is dropped. If new_page is not hashed,
 
1410
and page is hashed, then a new hash index is built to new_page with the same
 
1411
parameters as page (this often happens when a page is split). */
 
1412
UNIV_INTERN
 
1413
void
 
1414
btr_search_move_or_delete_hash_entries(
 
1415
/*===================================*/
 
1416
        buf_block_t*    new_block,      /*!< in: records are copied
 
1417
                                        to this page */
 
1418
        buf_block_t*    block,          /*!< in: index page from which
 
1419
                                        records were copied, and the
 
1420
                                        copied records will be deleted
 
1421
                                        from this page */
 
1422
        dict_index_t*   index)          /*!< in: record descriptor */
 
1423
{
 
1424
        ulint   n_fields;
 
1425
        ulint   n_bytes;
 
1426
        ibool   left_side;
 
1427
 
 
1428
#ifdef UNIV_SYNC_DEBUG
 
1429
        ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
 
1430
        ut_ad(rw_lock_own(&(new_block->lock), RW_LOCK_EX));
 
1431
#endif /* UNIV_SYNC_DEBUG */
 
1432
        ut_a(!new_block->is_hashed || new_block->index == index);
 
1433
        ut_a(!block->is_hashed || block->index == index);
 
1434
        ut_a(!(new_block->is_hashed || block->is_hashed)
 
1435
             || !dict_index_is_ibuf(index));
 
1436
 
 
1437
        rw_lock_s_lock(&btr_search_latch);
 
1438
 
 
1439
        if (new_block->is_hashed) {
 
1440
 
 
1441
                rw_lock_s_unlock(&btr_search_latch);
 
1442
 
 
1443
                btr_search_drop_page_hash_index(block);
 
1444
 
 
1445
                return;
 
1446
        }
 
1447
 
 
1448
        if (block->is_hashed) {
 
1449
 
 
1450
                n_fields = block->curr_n_fields;
 
1451
                n_bytes = block->curr_n_bytes;
 
1452
                left_side = block->curr_left_side;
 
1453
 
 
1454
                new_block->n_fields = block->curr_n_fields;
 
1455
                new_block->n_bytes = block->curr_n_bytes;
 
1456
                new_block->left_side = left_side;
 
1457
 
 
1458
                rw_lock_s_unlock(&btr_search_latch);
 
1459
 
 
1460
                ut_a(n_fields + n_bytes > 0);
 
1461
 
 
1462
                btr_search_build_page_hash_index(index, new_block, n_fields,
 
1463
                                                 n_bytes, left_side);
 
1464
                ut_ad(n_fields == block->curr_n_fields);
 
1465
                ut_ad(n_bytes == block->curr_n_bytes);
 
1466
                ut_ad(left_side == block->curr_left_side);
 
1467
                return;
 
1468
        }
 
1469
 
 
1470
        rw_lock_s_unlock(&btr_search_latch);
 
1471
}
 
1472
 
 
1473
/********************************************************************//**
 
1474
Updates the page hash index when a single record is deleted from a page. */
 
1475
UNIV_INTERN
 
1476
void
 
1477
btr_search_update_hash_on_delete(
 
1478
/*=============================*/
 
1479
        btr_cur_t*      cursor) /*!< in: cursor which was positioned on the
 
1480
                                record to delete using btr_cur_search_...,
 
1481
                                the record is not yet deleted */
 
1482
{
 
1483
        hash_table_t*   table;
 
1484
        buf_block_t*    block;
 
1485
        rec_t*          rec;
 
1486
        ulint           fold;
 
1487
        dulint          index_id;
 
1488
        ibool           found;
 
1489
        ulint           offsets_[REC_OFFS_NORMAL_SIZE];
 
1490
        mem_heap_t*     heap            = NULL;
 
1491
        rec_offs_init(offsets_);
 
1492
 
 
1493
        rec = btr_cur_get_rec(cursor);
 
1494
 
 
1495
        block = btr_cur_get_block(cursor);
 
1496
 
 
1497
#ifdef UNIV_SYNC_DEBUG
 
1498
        ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
 
1499
#endif /* UNIV_SYNC_DEBUG */
 
1500
 
 
1501
        if (!block->is_hashed) {
 
1502
 
 
1503
                return;
 
1504
        }
 
1505
 
 
1506
        ut_a(block->index == cursor->index);
 
1507
        ut_a(block->curr_n_fields + block->curr_n_bytes > 0);
 
1508
        ut_a(!dict_index_is_ibuf(cursor->index));
 
1509
 
 
1510
        table = btr_search_sys->hash_index;
 
1511
 
 
1512
        index_id = cursor->index->id;
 
1513
        fold = rec_fold(rec, rec_get_offsets(rec, cursor->index, offsets_,
 
1514
                                             ULINT_UNDEFINED, &heap),
 
1515
                        block->curr_n_fields, block->curr_n_bytes, index_id);
 
1516
        if (UNIV_LIKELY_NULL(heap)) {
 
1517
                mem_heap_free(heap);
 
1518
        }
 
1519
        rw_lock_x_lock(&btr_search_latch);
 
1520
 
 
1521
        found = ha_search_and_delete_if_found(table, fold, rec);
 
1522
 
 
1523
        rw_lock_x_unlock(&btr_search_latch);
 
1524
}
 
1525
 
 
1526
/********************************************************************//**
 
1527
Updates the page hash index when a single record is inserted on a page. */
 
1528
UNIV_INTERN
 
1529
void
 
1530
btr_search_update_hash_node_on_insert(
 
1531
/*==================================*/
 
1532
        btr_cur_t*      cursor) /*!< in: cursor which was positioned to the
 
1533
                                place to insert using btr_cur_search_...,
 
1534
                                and the new record has been inserted next
 
1535
                                to the cursor */
 
1536
{
 
1537
        hash_table_t*   table;
 
1538
        buf_block_t*    block;
 
1539
        rec_t*          rec;
 
1540
 
 
1541
        rec = btr_cur_get_rec(cursor);
 
1542
 
 
1543
        block = btr_cur_get_block(cursor);
 
1544
 
 
1545
#ifdef UNIV_SYNC_DEBUG
 
1546
        ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
 
1547
#endif /* UNIV_SYNC_DEBUG */
 
1548
 
 
1549
        if (!block->is_hashed) {
 
1550
 
 
1551
                return;
 
1552
        }
 
1553
 
 
1554
        ut_a(block->index == cursor->index);
 
1555
        ut_a(!dict_index_is_ibuf(cursor->index));
 
1556
 
 
1557
        rw_lock_x_lock(&btr_search_latch);
 
1558
 
 
1559
        if ((cursor->flag == BTR_CUR_HASH)
 
1560
            && (cursor->n_fields == block->curr_n_fields)
 
1561
            && (cursor->n_bytes == block->curr_n_bytes)
 
1562
            && !block->curr_left_side) {
 
1563
 
 
1564
                table = btr_search_sys->hash_index;
 
1565
 
 
1566
                ha_search_and_update_if_found(table, cursor->fold, rec,
 
1567
                                              block, page_rec_get_next(rec));
 
1568
 
 
1569
                rw_lock_x_unlock(&btr_search_latch);
 
1570
        } else {
 
1571
                rw_lock_x_unlock(&btr_search_latch);
 
1572
 
 
1573
                btr_search_update_hash_on_insert(cursor);
 
1574
        }
 
1575
}
 
1576
 
 
1577
/********************************************************************//**
 
1578
Updates the page hash index when a single record is inserted on a page. */
 
1579
UNIV_INTERN
 
1580
void
 
1581
btr_search_update_hash_on_insert(
 
1582
/*=============================*/
 
1583
        btr_cur_t*      cursor) /*!< in: cursor which was positioned to the
 
1584
                                place to insert using btr_cur_search_...,
 
1585
                                and the new record has been inserted next
 
1586
                                to the cursor */
 
1587
{
 
1588
        hash_table_t*   table;
 
1589
        buf_block_t*    block;
 
1590
        rec_t*          rec;
 
1591
        rec_t*          ins_rec;
 
1592
        rec_t*          next_rec;
 
1593
        dulint          index_id;
 
1594
        ulint           fold;
 
1595
        ulint           ins_fold;
 
1596
        ulint           next_fold = 0; /* remove warning (??? bug ???) */
 
1597
        ulint           n_fields;
 
1598
        ulint           n_bytes;
 
1599
        ibool           left_side;
 
1600
        ibool           locked          = FALSE;
 
1601
        mem_heap_t*     heap            = NULL;
 
1602
        ulint           offsets_[REC_OFFS_NORMAL_SIZE];
 
1603
        ulint*          offsets         = offsets_;
 
1604
        rec_offs_init(offsets_);
 
1605
 
 
1606
        table = btr_search_sys->hash_index;
 
1607
 
 
1608
        btr_search_check_free_space_in_heap();
 
1609
 
 
1610
        rec = btr_cur_get_rec(cursor);
 
1611
 
 
1612
        block = btr_cur_get_block(cursor);
 
1613
 
 
1614
#ifdef UNIV_SYNC_DEBUG
 
1615
        ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
 
1616
#endif /* UNIV_SYNC_DEBUG */
 
1617
 
 
1618
        if (!block->is_hashed) {
 
1619
 
 
1620
                return;
 
1621
        }
 
1622
 
 
1623
        ut_a(block->index == cursor->index);
 
1624
        ut_a(!dict_index_is_ibuf(cursor->index));
 
1625
 
 
1626
        index_id = cursor->index->id;
 
1627
 
 
1628
        n_fields = block->curr_n_fields;
 
1629
        n_bytes = block->curr_n_bytes;
 
1630
        left_side = block->curr_left_side;
 
1631
 
 
1632
        ins_rec = page_rec_get_next(rec);
 
1633
        next_rec = page_rec_get_next(ins_rec);
 
1634
 
 
1635
        offsets = rec_get_offsets(ins_rec, cursor->index, offsets,
 
1636
                                  ULINT_UNDEFINED, &heap);
 
1637
        ins_fold = rec_fold(ins_rec, offsets, n_fields, n_bytes, index_id);
 
1638
 
 
1639
        if (!page_rec_is_supremum(next_rec)) {
 
1640
                offsets = rec_get_offsets(next_rec, cursor->index, offsets,
 
1641
                                          n_fields + (n_bytes > 0), &heap);
 
1642
                next_fold = rec_fold(next_rec, offsets, n_fields,
 
1643
                                     n_bytes, index_id);
 
1644
        }
 
1645
 
 
1646
        if (!page_rec_is_infimum(rec)) {
 
1647
                offsets = rec_get_offsets(rec, cursor->index, offsets,
 
1648
                                          n_fields + (n_bytes > 0), &heap);
 
1649
                fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id);
 
1650
        } else {
 
1651
                if (left_side) {
 
1652
 
 
1653
                        rw_lock_x_lock(&btr_search_latch);
 
1654
 
 
1655
                        locked = TRUE;
 
1656
 
 
1657
                        ha_insert_for_fold(table, ins_fold, block, ins_rec);
 
1658
                }
 
1659
 
 
1660
                goto check_next_rec;
 
1661
        }
 
1662
 
 
1663
        if (fold != ins_fold) {
 
1664
 
 
1665
                if (!locked) {
 
1666
 
 
1667
                        rw_lock_x_lock(&btr_search_latch);
 
1668
 
 
1669
                        locked = TRUE;
 
1670
                }
 
1671
 
 
1672
                if (!left_side) {
 
1673
                        ha_insert_for_fold(table, fold, block, rec);
 
1674
                } else {
 
1675
                        ha_insert_for_fold(table, ins_fold, block, ins_rec);
 
1676
                }
 
1677
        }
 
1678
 
 
1679
check_next_rec:
 
1680
        if (page_rec_is_supremum(next_rec)) {
 
1681
 
 
1682
                if (!left_side) {
 
1683
 
 
1684
                        if (!locked) {
 
1685
                                rw_lock_x_lock(&btr_search_latch);
 
1686
 
 
1687
                                locked = TRUE;
 
1688
                        }
 
1689
 
 
1690
                        ha_insert_for_fold(table, ins_fold, block, ins_rec);
 
1691
                }
 
1692
 
 
1693
                goto function_exit;
 
1694
        }
 
1695
 
 
1696
        if (ins_fold != next_fold) {
 
1697
 
 
1698
                if (!locked) {
 
1699
 
 
1700
                        rw_lock_x_lock(&btr_search_latch);
 
1701
 
 
1702
                        locked = TRUE;
 
1703
                }
 
1704
 
 
1705
                if (!left_side) {
 
1706
 
 
1707
                        ha_insert_for_fold(table, ins_fold, block, ins_rec);
 
1708
                        /*
 
1709
                        fputs("Hash insert for ", stderr);
 
1710
                        dict_index_name_print(stderr, cursor->index);
 
1711
                        fprintf(stderr, " fold %lu\n", ins_fold);
 
1712
                        */
 
1713
                } else {
 
1714
                        ha_insert_for_fold(table, next_fold, block, next_rec);
 
1715
                }
 
1716
        }
 
1717
 
 
1718
function_exit:
 
1719
        if (UNIV_LIKELY_NULL(heap)) {
 
1720
                mem_heap_free(heap);
 
1721
        }
 
1722
        if (locked) {
 
1723
                rw_lock_x_unlock(&btr_search_latch);
 
1724
        }
 
1725
}
 
1726
 
 
1727
/********************************************************************//**
 
1728
Validates the search system.
 
1729
@return TRUE if ok */
 
1730
UNIV_INTERN
 
1731
ibool
 
1732
btr_search_validate(void)
 
1733
/*=====================*/
 
1734
{
 
1735
        ha_node_t*      node;
 
1736
        ulint           n_page_dumps    = 0;
 
1737
        ibool           ok              = TRUE;
 
1738
        ulint           i;
 
1739
        ulint           cell_count;
 
1740
        mem_heap_t*     heap            = NULL;
 
1741
        ulint           offsets_[REC_OFFS_NORMAL_SIZE];
 
1742
        ulint*          offsets         = offsets_;
 
1743
 
 
1744
        /* How many cells to check before temporarily releasing
 
1745
        btr_search_latch. */
 
1746
        ulint           chunk_size = 10000;
 
1747
 
 
1748
        rec_offs_init(offsets_);
 
1749
 
 
1750
        rw_lock_x_lock(&btr_search_latch);
 
1751
        buf_pool_mutex_enter();
 
1752
 
 
1753
        cell_count = hash_get_n_cells(btr_search_sys->hash_index);
 
1754
 
 
1755
        for (i = 0; i < cell_count; i++) {
 
1756
                /* We release btr_search_latch every once in a while to
 
1757
                give other queries a chance to run. */
 
1758
                if ((i != 0) && ((i % chunk_size) == 0)) {
 
1759
                        buf_pool_mutex_exit();
 
1760
                        rw_lock_x_unlock(&btr_search_latch);
 
1761
                        os_thread_yield();
 
1762
                        rw_lock_x_lock(&btr_search_latch);
 
1763
                        buf_pool_mutex_enter();
 
1764
                }
 
1765
 
 
1766
                node = hash_get_nth_cell(btr_search_sys->hash_index, i)->node;
 
1767
 
 
1768
                for (; node != NULL; node = node->next) {
 
1769
                        const buf_block_t*      block
 
1770
                                = buf_block_align(node->data);
 
1771
                        const buf_block_t*      hash_block;
 
1772
 
 
1773
                        if (UNIV_LIKELY(buf_block_get_state(block)
 
1774
                                        == BUF_BLOCK_FILE_PAGE)) {
 
1775
 
 
1776
                                /* The space and offset are only valid
 
1777
                                for file blocks.  It is possible that
 
1778
                                the block is being freed
 
1779
                                (BUF_BLOCK_REMOVE_HASH, see the
 
1780
                                assertion and the comment below) */
 
1781
                                hash_block = buf_block_hash_get(
 
1782
                                        buf_block_get_space(block),
 
1783
                                        buf_block_get_page_no(block));
 
1784
                        } else {
 
1785
                                hash_block = NULL;
 
1786
                        }
 
1787
 
 
1788
                        if (hash_block) {
 
1789
                                ut_a(hash_block == block);
 
1790
                        } else {
 
1791
                                /* When a block is being freed,
 
1792
                                buf_LRU_search_and_free_block() first
 
1793
                                removes the block from
 
1794
                                buf_pool->page_hash by calling
 
1795
                                buf_LRU_block_remove_hashed_page().
 
1796
                                After that, it invokes
 
1797
                                btr_search_drop_page_hash_index() to
 
1798
                                remove the block from
 
1799
                                btr_search_sys->hash_index. */
 
1800
 
 
1801
                                ut_a(buf_block_get_state(block)
 
1802
                                     == BUF_BLOCK_REMOVE_HASH);
 
1803
                        }
 
1804
 
 
1805
                        ut_a(!dict_index_is_ibuf(block->index));
 
1806
 
 
1807
                        offsets = rec_get_offsets((const rec_t*) node->data,
 
1808
                                                  block->index, offsets,
 
1809
                                                  block->curr_n_fields
 
1810
                                                  + (block->curr_n_bytes > 0),
 
1811
                                                  &heap);
 
1812
 
 
1813
                        if (!block->is_hashed || node->fold
 
1814
                            != rec_fold((rec_t*)(node->data),
 
1815
                                        offsets,
 
1816
                                        block->curr_n_fields,
 
1817
                                        block->curr_n_bytes,
 
1818
                                        btr_page_get_index_id(block->frame))) {
 
1819
                                const page_t*   page = block->frame;
 
1820
 
 
1821
                                ok = FALSE;
 
1822
                                ut_print_timestamp(stderr);
 
1823
 
 
1824
                                fprintf(stderr,
 
1825
                                        "  InnoDB: Error in an adaptive hash"
 
1826
                                        " index pointer to page %lu\n"
 
1827
                                        "InnoDB: ptr mem address %p"
 
1828
                                        " index id %lu %lu,"
 
1829
                                        " node fold %lu, rec fold %lu\n",
 
1830
                                        (ulong) page_get_page_no(page),
 
1831
                                        node->data,
 
1832
                                        (ulong) ut_dulint_get_high(
 
1833
                                                btr_page_get_index_id(page)),
 
1834
                                        (ulong) ut_dulint_get_low(
 
1835
                                                btr_page_get_index_id(page)),
 
1836
                                        (ulong) node->fold,
 
1837
                                        (ulong) rec_fold((rec_t*)(node->data),
 
1838
                                                         offsets,
 
1839
                                                         block->curr_n_fields,
 
1840
                                                         block->curr_n_bytes,
 
1841
                                                         btr_page_get_index_id(
 
1842
                                                                 page)));
 
1843
 
 
1844
                                fputs("InnoDB: Record ", stderr);
 
1845
                                rec_print_new(stderr, (rec_t*)node->data,
 
1846
                                              offsets);
 
1847
                                fprintf(stderr, "\nInnoDB: on that page."
 
1848
                                        " Page mem address %p, is hashed %lu,"
 
1849
                                        " n fields %lu, n bytes %lu\n"
 
1850
                                        "InnoDB: side %lu\n",
 
1851
                                        (void*) page, (ulong) block->is_hashed,
 
1852
                                        (ulong) block->curr_n_fields,
 
1853
                                        (ulong) block->curr_n_bytes,
 
1854
                                        (ulong) block->curr_left_side);
 
1855
 
 
1856
                                if (n_page_dumps < 20) {
 
1857
                                        buf_page_print(page, 0);
 
1858
                                        n_page_dumps++;
 
1859
                                }
 
1860
                        }
 
1861
                }
 
1862
        }
 
1863
 
 
1864
        for (i = 0; i < cell_count; i += chunk_size) {
 
1865
                ulint end_index = ut_min(i + chunk_size - 1, cell_count - 1);
 
1866
 
 
1867
                /* We release btr_search_latch every once in a while to
 
1868
                give other queries a chance to run. */
 
1869
                if (i != 0) {
 
1870
                        buf_pool_mutex_exit();
 
1871
                        rw_lock_x_unlock(&btr_search_latch);
 
1872
                        os_thread_yield();
 
1873
                        rw_lock_x_lock(&btr_search_latch);
 
1874
                        buf_pool_mutex_enter();
 
1875
                }
 
1876
 
 
1877
                if (!ha_validate(btr_search_sys->hash_index, i, end_index)) {
 
1878
                        ok = FALSE;
 
1879
                }
 
1880
        }
 
1881
 
 
1882
        buf_pool_mutex_exit();
 
1883
        rw_lock_x_unlock(&btr_search_latch);
 
1884
        if (UNIV_LIKELY_NULL(heap)) {
 
1885
                mem_heap_free(heap);
 
1886
        }
 
1887
 
 
1888
        return(ok);
 
1889
}