~ubuntu-branches/ubuntu/natty/mysql-5.1/natty-proposed

« back to all changes in this revision

Viewing changes to storage/innodb_plugin/btr/btr0sea.c

  • Committer: Package Import Robot
  • Author(s): Marc Deslauriers
  • Date: 2012-02-22 08:30:45 UTC
  • mfrom: (1.4.1)
  • Revision ID: package-import@ubuntu.com-20120222083045-2rd53r4bnyx7qus4
Tags: 5.1.61-0ubuntu0.11.04.1
* SECURITY UPDATE: Update to 5.1.61 to fix multiple security issues
  (LP: #937869)
  - http://www.oracle.com/technetwork/topics/security/cpujan2012-366304.html
  - CVE-2011-2262
  - CVE-2012-0075
  - CVE-2012-0112
  - CVE-2012-0113
  - CVE-2012-0114
  - CVE-2012-0115
  - CVE-2012-0116
  - CVE-2012-0117
  - CVE-2012-0118
  - CVE-2012-0119
  - CVE-2012-0120
  - CVE-2012-0484
  - CVE-2012-0485
  - CVE-2012-0486
  - CVE-2012-0487
  - CVE-2012-0488
  - CVE-2012-0489
  - CVE-2012-0490
  - CVE-2012-0491
  - CVE-2012-0492
  - CVE-2012-0493
  - CVE-2012-0494
  - CVE-2012-0495
  - CVE-2012-0496

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
1
/*****************************************************************************
2
2
 
3
 
Copyright (c) 1996, 2009, Innobase Oy. All Rights Reserved.
 
3
Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved.
4
4
Copyright (c) 2008, Google Inc.
5
5
 
6
6
Portions of this file contain modifications contributed and copyrighted by
44
44
#include "ha0ha.h"
45
45
 
46
46
/** Flag: has the search system been enabled?
47
 
Protected by btr_search_latch and btr_search_enabled_mutex. */
 
47
Protected by btr_search_latch. */
48
48
UNIV_INTERN char                btr_search_enabled      = TRUE;
49
 
UNIV_INTERN ibool               btr_search_fully_disabled = FALSE;
50
 
 
51
 
/** Mutex protecting btr_search_enabled */
52
 
static mutex_t                  btr_search_enabled_mutex;
53
49
 
54
50
/** A dummy variable to fool the compiler */
55
51
UNIV_INTERN ulint               btr_search_this_is_zero = 0;
141
137
        be enough free space in the hash table. */
142
138
 
143
139
        if (heap->free_block == NULL) {
144
 
                buf_block_t*    block = buf_block_alloc(0);
 
140
                buf_block_t*    block = buf_block_alloc();
145
141
 
146
142
                rw_lock_x_lock(&btr_search_latch);
147
143
 
169
165
        btr_search_latch_temp = mem_alloc(sizeof(rw_lock_t));
170
166
 
171
167
        rw_lock_create(&btr_search_latch, SYNC_SEARCH_SYS);
172
 
        mutex_create(&btr_search_enabled_mutex, SYNC_SEARCH_SYS_CONF);
173
168
 
174
169
        btr_search_sys = mem_alloc(sizeof(btr_search_sys_t));
175
170
 
199
194
btr_search_disable(void)
200
195
/*====================*/
201
196
{
202
 
        mutex_enter(&btr_search_enabled_mutex);
 
197
        dict_table_t*   table;
 
198
 
 
199
        mutex_enter(&dict_sys->mutex);
203
200
        rw_lock_x_lock(&btr_search_latch);
204
201
 
205
 
        /* Disable access to hash index, also tell ha_insert_for_fold()
206
 
        stop adding new nodes to hash index, but still allow updating
207
 
        existing nodes */
208
202
        btr_search_enabled = FALSE;
209
203
 
210
 
        /* Clear all block->is_hashed flags and remove all entries
211
 
        from btr_search_sys->hash_index. */
212
 
        buf_pool_drop_hash_index();
213
 
 
214
 
        /* hash index has been cleaned up, disallow any operation to
215
 
        the hash index */
216
 
        btr_search_fully_disabled = TRUE;
217
 
 
218
 
        /* btr_search_enabled_mutex should guarantee this. */
219
 
        ut_ad(!btr_search_enabled);
 
204
        /* Clear the index->search_info->ref_count of every index in
 
205
        the data dictionary cache. */
 
206
        for (table = UT_LIST_GET_FIRST(dict_sys->table_LRU); table;
 
207
             table = UT_LIST_GET_NEXT(table_LRU, table)) {
 
208
 
 
209
                dict_index_t*   index;
 
210
 
 
211
                for (index = dict_table_get_first_index(table); index;
 
212
                     index = dict_table_get_next_index(index)) {
 
213
 
 
214
                        index->search_info->ref_count = 0;
 
215
                }
 
216
        }
 
217
 
 
218
        mutex_exit(&dict_sys->mutex);
 
219
 
 
220
        /* Set all block->index = NULL. */
 
221
        buf_pool_clear_hash_index();
 
222
 
 
223
        /* Clear the adaptive hash index. */
 
224
        hash_table_clear(btr_search_sys->hash_index);
 
225
        mem_heap_empty(btr_search_sys->hash_index->heap);
220
226
 
221
227
        rw_lock_x_unlock(&btr_search_latch);
222
 
        mutex_exit(&btr_search_enabled_mutex);
223
228
}
224
229
 
225
230
/********************************************************************//**
229
234
btr_search_enable(void)
230
235
/*====================*/
231
236
{
232
 
        mutex_enter(&btr_search_enabled_mutex);
233
237
        rw_lock_x_lock(&btr_search_latch);
234
238
 
235
239
        btr_search_enabled = TRUE;
236
 
        btr_search_fully_disabled = FALSE;
237
240
 
238
241
        rw_lock_x_unlock(&btr_search_latch);
239
 
        mutex_exit(&btr_search_enabled_mutex);
240
242
}
241
243
 
242
244
/*****************************************************************//**
459
461
            && (block->n_bytes == info->n_bytes)
460
462
            && (block->left_side == info->left_side)) {
461
463
 
462
 
                if ((block->is_hashed)
 
464
                if ((block->index)
463
465
                    && (block->curr_n_fields == info->n_fields)
464
466
                    && (block->curr_n_bytes == info->n_bytes)
465
467
                    && (block->curr_left_side == info->left_side)) {
488
490
             / BTR_SEARCH_PAGE_BUILD_LIMIT)
489
491
            && (info->n_hash_potential >= BTR_SEARCH_BUILD_LIMIT)) {
490
492
 
491
 
                if ((!block->is_hashed)
 
493
                if ((!block->index)
492
494
                    || (block->n_hash_helps
493
495
                        > 2 * page_get_n_recs(block->frame))
494
496
                    || (block->n_fields != block->curr_n_fields)
520
522
        buf_block_t*    block,  /*!< in: buffer block where cursor positioned */
521
523
        btr_cur_t*      cursor) /*!< in: cursor */
522
524
{
523
 
        ulint   fold;
524
 
        rec_t*  rec;
525
 
        dulint  index_id;
 
525
        dict_index_t*   index;
 
526
        ulint           fold;
 
527
        const rec_t*    rec;
526
528
 
527
529
        ut_ad(cursor->flag == BTR_CUR_HASH_FAIL);
528
530
#ifdef UNIV_SYNC_DEBUG
533
535
        ut_ad(page_align(btr_cur_get_rec(cursor))
534
536
              == buf_block_get_frame(block));
535
537
 
536
 
        if (!block->is_hashed) {
 
538
        index = block->index;
 
539
 
 
540
        if (!index) {
537
541
 
538
542
                return;
539
543
        }
540
544
 
541
 
        ut_a(block->index == cursor->index);
542
 
        ut_a(!dict_index_is_ibuf(cursor->index));
 
545
        ut_a(index == cursor->index);
 
546
        ut_a(!dict_index_is_ibuf(index));
543
547
 
544
548
        if ((info->n_hash_potential > 0)
545
549
            && (block->curr_n_fields == info->n_fields)
556
560
                        return;
557
561
                }
558
562
 
559
 
                index_id = cursor->index->id;
560
563
                fold = rec_fold(rec,
561
 
                                rec_get_offsets(rec, cursor->index, offsets_,
 
564
                                rec_get_offsets(rec, index, offsets_,
562
565
                                                ULINT_UNDEFINED, &heap),
563
566
                                block->curr_n_fields,
564
 
                                block->curr_n_bytes, index_id);
 
567
                                block->curr_n_bytes, index->id);
565
568
                if (UNIV_LIKELY_NULL(heap)) {
566
569
                        mem_heap_free(heap);
567
570
                }
824
827
        mtr_t*          mtr)            /*!< in: mtr */
825
828
{
826
829
        buf_block_t*    block;
827
 
        rec_t*          rec;
 
830
        const rec_t*    rec;
828
831
        ulint           fold;
829
832
        dulint          index_id;
830
833
#ifdef notdefined
832
835
        btr_pcur_t      pcur;
833
836
#endif
834
837
        ut_ad(index && info && tuple && cursor && mtr);
 
838
        ut_ad(!dict_index_is_ibuf(index));
835
839
        ut_ad((latch_mode == BTR_SEARCH_LEAF)
836
840
              || (latch_mode == BTR_MODIFY_LEAF));
837
841
 
909
913
 
910
914
        ut_ad(page_rec_is_user_rec(rec));
911
915
 
912
 
        btr_cur_position(index, rec, block, cursor);
 
916
        btr_cur_position(index, (rec_t*) rec, block, cursor);
913
917
 
914
918
        /* Check the validity of the guess within the page */
915
919
 
1040
1044
 
1041
1045
retry:
1042
1046
        rw_lock_s_lock(&btr_search_latch);
1043
 
        page = block->frame;
 
1047
        index = block->index;
1044
1048
 
1045
 
        if (UNIV_LIKELY(!block->is_hashed)) {
 
1049
        if (UNIV_LIKELY(!index)) {
1046
1050
 
1047
1051
                rw_lock_s_unlock(&btr_search_latch);
1048
1052
 
1049
1053
                return;
1050
1054
        }
1051
1055
 
 
1056
        ut_a(!dict_index_is_ibuf(index));
1052
1057
        table = btr_search_sys->hash_index;
1053
1058
 
1054
1059
#ifdef UNIV_SYNC_DEBUG
1059
1064
 
1060
1065
        n_fields = block->curr_n_fields;
1061
1066
        n_bytes = block->curr_n_bytes;
1062
 
        index = block->index;
1063
 
        ut_a(!dict_index_is_ibuf(index));
1064
1067
 
1065
1068
        /* NOTE: The fields of block must not be accessed after
1066
1069
        releasing btr_search_latch, as the index page might only
1070
1073
 
1071
1074
        ut_a(n_fields + n_bytes > 0);
1072
1075
 
 
1076
        page = block->frame;
1073
1077
        n_recs = page_get_n_recs(page);
1074
1078
 
1075
1079
        /* Calculate and cache fold values into an array for fast deletion
1118
1122
 
1119
1123
        rw_lock_x_lock(&btr_search_latch);
1120
1124
 
1121
 
        if (UNIV_UNLIKELY(!block->is_hashed)) {
 
1125
        if (UNIV_UNLIKELY(!block->index)) {
1122
1126
                /* Someone else has meanwhile dropped the hash index */
1123
1127
 
1124
1128
                goto cleanup;
1146
1150
        ut_a(index->search_info->ref_count > 0);
1147
1151
        index->search_info->ref_count--;
1148
1152
 
1149
 
        block->is_hashed = FALSE;
1150
1153
        block->index = NULL;
1151
 
        
 
1154
 
1152
1155
cleanup:
1153
1156
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
1154
1157
        if (UNIV_UNLIKELY(block->n_pointers)) {
1174
1177
}
1175
1178
 
1176
1179
/********************************************************************//**
1177
 
Drops a page hash index when a page is freed from a fseg to the file system.
1178
 
Drops possible hash index if the page happens to be in the buffer pool. */
 
1180
Drops a possible page hash index when a page is evicted from the buffer pool
 
1181
or freed in a file segment. */
1179
1182
UNIV_INTERN
1180
1183
void
1181
1184
btr_search_drop_page_hash_when_freed(
1188
1191
        buf_block_t*    block;
1189
1192
        mtr_t           mtr;
1190
1193
 
1191
 
        if (!buf_page_peek_if_search_hashed(space, page_no)) {
1192
 
 
1193
 
                return;
1194
 
        }
1195
 
 
1196
1194
        mtr_start(&mtr);
1197
1195
 
1198
 
        /* We assume that if the caller has a latch on the page, then the
1199
 
        caller has already dropped the hash index for the page, and we never
1200
 
        get here. Therefore we can acquire the s-latch to the page without
1201
 
        having to fear a deadlock. */
1202
 
 
1203
 
        block = buf_page_get_gen(space, zip_size, page_no, RW_S_LATCH, NULL,
1204
 
                                BUF_GET_IF_IN_POOL, __FILE__, __LINE__,
1205
 
                                &mtr);
1206
 
        /* Because the buffer pool mutex was released by
1207
 
        buf_page_peek_if_search_hashed(), it is possible that the
1208
 
        block was removed from the buffer pool by another thread
1209
 
        before buf_page_get_gen() got a chance to acquire the buffer
1210
 
        pool mutex again.  Thus, we must check for a NULL return. */
1211
 
 
1212
 
        if (UNIV_LIKELY(block != NULL)) {
 
1196
        /* If the caller has a latch on the page, then the caller must
 
1197
        have a x-latch on the page and it must have already dropped
 
1198
        the hash index for the page. Because of the x-latch that we
 
1199
        are possibly holding, we cannot s-latch the page, but must
 
1200
        (recursively) x-latch it, even though we are only reading. */
 
1201
 
 
1202
        block = buf_page_get_gen(space, zip_size, page_no, RW_X_LATCH, NULL,
 
1203
                                 BUF_PEEK_IF_IN_POOL, __FILE__, __LINE__,
 
1204
                                 &mtr);
 
1205
 
 
1206
        if (block && block->index) {
1213
1207
 
1214
1208
                buf_block_dbg_add_level(block, SYNC_TREE_NODE_FROM_HASH);
1215
1209
 
1241
1235
        rec_t*          next_rec;
1242
1236
        ulint           fold;
1243
1237
        ulint           next_fold;
1244
 
        dulint          index_id;
1245
1238
        ulint           n_cached;
1246
1239
        ulint           n_recs;
1247
1240
        ulint*          folds;
1255
1248
        ut_ad(index);
1256
1249
        ut_a(!dict_index_is_ibuf(index));
1257
1250
 
 
1251
#ifdef UNIV_SYNC_DEBUG
 
1252
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
 
1253
        ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
 
1254
              || rw_lock_own(&(block->lock), RW_LOCK_EX));
 
1255
#endif /* UNIV_SYNC_DEBUG */
 
1256
 
 
1257
        rw_lock_s_lock(&btr_search_latch);
 
1258
 
 
1259
        if (!btr_search_enabled) {
 
1260
                rw_lock_s_unlock(&btr_search_latch);
 
1261
                return;
 
1262
        }
 
1263
 
1258
1264
        table = btr_search_sys->hash_index;
1259
1265
        page = buf_block_get_frame(block);
1260
1266
 
1261
 
#ifdef UNIV_SYNC_DEBUG
1262
 
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
1263
 
        ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
1264
 
              || rw_lock_own(&(block->lock), RW_LOCK_EX));
1265
 
#endif /* UNIV_SYNC_DEBUG */
1266
 
 
1267
 
        rw_lock_s_lock(&btr_search_latch);
1268
 
 
1269
 
        if (block->is_hashed && ((block->curr_n_fields != n_fields)
1270
 
                                 || (block->curr_n_bytes != n_bytes)
1271
 
                                 || (block->curr_left_side != left_side))) {
 
1267
        if (block->index && ((block->curr_n_fields != n_fields)
 
1268
                             || (block->curr_n_bytes != n_bytes)
 
1269
                             || (block->curr_left_side != left_side))) {
1272
1270
 
1273
1271
                rw_lock_s_unlock(&btr_search_latch);
1274
1272
 
1305
1303
 
1306
1304
        n_cached = 0;
1307
1305
 
1308
 
        index_id = btr_page_get_index_id(page);
 
1306
        ut_a(UT_DULINT_EQ(index->id, btr_page_get_index_id(page)));
1309
1307
 
1310
1308
        rec = page_rec_get_next(page_get_infimum_rec(page));
1311
1309
 
1320
1318
                }
1321
1319
        }
1322
1320
 
1323
 
        fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id);
 
1321
        fold = rec_fold(rec, offsets, n_fields, n_bytes, index->id);
1324
1322
 
1325
1323
        if (left_side) {
1326
1324
 
1347
1345
                offsets = rec_get_offsets(next_rec, index, offsets,
1348
1346
                                          n_fields + (n_bytes > 0), &heap);
1349
1347
                next_fold = rec_fold(next_rec, offsets, n_fields,
1350
 
                                     n_bytes, index_id);
 
1348
                                     n_bytes, index->id);
1351
1349
 
1352
1350
                if (fold != next_fold) {
1353
1351
                        /* Insert an entry into the hash index */
1372
1370
 
1373
1371
        rw_lock_x_lock(&btr_search_latch);
1374
1372
 
1375
 
        if (UNIV_UNLIKELY(btr_search_fully_disabled)) {
 
1373
        if (UNIV_UNLIKELY(!btr_search_enabled)) {
1376
1374
                goto exit_func;
1377
1375
        }
1378
1376
 
1379
 
        if (block->is_hashed && ((block->curr_n_fields != n_fields)
1380
 
                                 || (block->curr_n_bytes != n_bytes)
1381
 
                                 || (block->curr_left_side != left_side))) {
 
1377
        if (block->index && ((block->curr_n_fields != n_fields)
 
1378
                             || (block->curr_n_bytes != n_bytes)
 
1379
                             || (block->curr_left_side != left_side))) {
1382
1380
                goto exit_func;
1383
1381
        }
1384
1382
 
1387
1385
        rebuild hash index for a page that is already hashed, we
1388
1386
        have to take care not to increment the counter in that
1389
1387
        case. */
1390
 
        if (!block->is_hashed) {
 
1388
        if (!block->index) {
1391
1389
                index->search_info->ref_count++;
1392
1390
        }
1393
1391
 
1394
 
        block->is_hashed = TRUE;
1395
1392
        block->n_hash_helps = 0;
1396
1393
 
1397
1394
        block->curr_n_fields = n_fields;
1439
1436
        ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
1440
1437
        ut_ad(rw_lock_own(&(new_block->lock), RW_LOCK_EX));
1441
1438
#endif /* UNIV_SYNC_DEBUG */
1442
 
        ut_a(!new_block->is_hashed || new_block->index == index);
1443
 
        ut_a(!block->is_hashed || block->index == index);
1444
 
        ut_a(!(new_block->is_hashed || block->is_hashed)
 
1439
 
 
1440
        rw_lock_s_lock(&btr_search_latch);
 
1441
 
 
1442
        ut_a(!new_block->index || new_block->index == index);
 
1443
        ut_a(!block->index || block->index == index);
 
1444
        ut_a(!(new_block->index || block->index)
1445
1445
             || !dict_index_is_ibuf(index));
1446
1446
 
1447
 
        rw_lock_s_lock(&btr_search_latch);
1448
 
 
1449
 
        if (new_block->is_hashed) {
 
1447
        if (new_block->index) {
1450
1448
 
1451
1449
                rw_lock_s_unlock(&btr_search_latch);
1452
1450
 
1455
1453
                return;
1456
1454
        }
1457
1455
 
1458
 
        if (block->is_hashed) {
 
1456
        if (block->index) {
1459
1457
 
1460
1458
                n_fields = block->curr_n_fields;
1461
1459
                n_bytes = block->curr_n_bytes;
1492
1490
{
1493
1491
        hash_table_t*   table;
1494
1492
        buf_block_t*    block;
1495
 
        rec_t*          rec;
 
1493
        const rec_t*    rec;
1496
1494
        ulint           fold;
1497
 
        dulint          index_id;
 
1495
        dict_index_t*   index;
1498
1496
        ulint           offsets_[REC_OFFS_NORMAL_SIZE];
1499
1497
        mem_heap_t*     heap            = NULL;
1500
1498
        rec_offs_init(offsets_);
1501
1499
 
1502
 
        rec = btr_cur_get_rec(cursor);
1503
 
 
1504
1500
        block = btr_cur_get_block(cursor);
1505
1501
 
1506
1502
#ifdef UNIV_SYNC_DEBUG
1507
1503
        ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
1508
1504
#endif /* UNIV_SYNC_DEBUG */
1509
1505
 
1510
 
        if (!block->is_hashed) {
 
1506
        index = block->index;
 
1507
 
 
1508
        if (!index) {
1511
1509
 
1512
1510
                return;
1513
1511
        }
1514
1512
 
1515
 
        ut_a(block->index == cursor->index);
 
1513
        ut_a(index == cursor->index);
1516
1514
        ut_a(block->curr_n_fields + block->curr_n_bytes > 0);
1517
 
        ut_a(!dict_index_is_ibuf(cursor->index));
 
1515
        ut_a(!dict_index_is_ibuf(index));
1518
1516
 
1519
1517
        table = btr_search_sys->hash_index;
1520
1518
 
1521
 
        index_id = cursor->index->id;
1522
 
        fold = rec_fold(rec, rec_get_offsets(rec, cursor->index, offsets_,
 
1519
        rec = btr_cur_get_rec(cursor);
 
1520
 
 
1521
        fold = rec_fold(rec, rec_get_offsets(rec, index, offsets_,
1523
1522
                                             ULINT_UNDEFINED, &heap),
1524
 
                        block->curr_n_fields, block->curr_n_bytes, index_id);
 
1523
                        block->curr_n_fields, block->curr_n_bytes, index->id);
1525
1524
        if (UNIV_LIKELY_NULL(heap)) {
1526
1525
                mem_heap_free(heap);
1527
1526
        }
 
1527
 
1528
1528
        rw_lock_x_lock(&btr_search_latch);
1529
1529
 
1530
 
        ha_search_and_delete_if_found(table, fold, rec);
 
1530
        if (block->index) {
 
1531
                ut_a(block->index == index);
 
1532
 
 
1533
                ha_search_and_delete_if_found(table, fold, rec);
 
1534
        }
1531
1535
 
1532
1536
        rw_lock_x_unlock(&btr_search_latch);
1533
1537
}
1545
1549
{
1546
1550
        hash_table_t*   table;
1547
1551
        buf_block_t*    block;
 
1552
        dict_index_t*   index;
1548
1553
        rec_t*          rec;
1549
1554
 
1550
1555
        rec = btr_cur_get_rec(cursor);
1555
1560
        ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
1556
1561
#endif /* UNIV_SYNC_DEBUG */
1557
1562
 
1558
 
        if (!block->is_hashed) {
 
1563
        index = block->index;
 
1564
 
 
1565
        if (!index) {
1559
1566
 
1560
1567
                return;
1561
1568
        }
1562
1569
 
1563
 
        ut_a(block->index == cursor->index);
1564
 
        ut_a(!dict_index_is_ibuf(cursor->index));
 
1570
        ut_a(cursor->index == index);
 
1571
        ut_a(!dict_index_is_ibuf(index));
1565
1572
 
1566
1573
        rw_lock_x_lock(&btr_search_latch);
1567
1574
 
 
1575
        if (!block->index) {
 
1576
 
 
1577
                goto func_exit;
 
1578
        }
 
1579
 
 
1580
        ut_a(block->index == index);
 
1581
 
1568
1582
        if ((cursor->flag == BTR_CUR_HASH)
1569
1583
            && (cursor->n_fields == block->curr_n_fields)
1570
1584
            && (cursor->n_bytes == block->curr_n_bytes)
1575
1589
                ha_search_and_update_if_found(table, cursor->fold, rec,
1576
1590
                                              block, page_rec_get_next(rec));
1577
1591
 
 
1592
func_exit:
1578
1593
                rw_lock_x_unlock(&btr_search_latch);
1579
1594
        } else {
1580
1595
                rw_lock_x_unlock(&btr_search_latch);
1596
1611
{
1597
1612
        hash_table_t*   table;
1598
1613
        buf_block_t*    block;
 
1614
        dict_index_t*   index;
1599
1615
        rec_t*          rec;
1600
1616
        rec_t*          ins_rec;
1601
1617
        rec_t*          next_rec;
1602
 
        dulint          index_id;
1603
1618
        ulint           fold;
1604
1619
        ulint           ins_fold;
1605
1620
        ulint           next_fold = 0; /* remove warning (??? bug ???) */
1624
1639
        ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
1625
1640
#endif /* UNIV_SYNC_DEBUG */
1626
1641
 
1627
 
        if (!block->is_hashed) {
 
1642
        index = block->index;
 
1643
 
 
1644
        if (!index) {
1628
1645
 
1629
1646
                return;
1630
1647
        }
1631
1648
 
1632
 
        ut_a(block->index == cursor->index);
1633
 
        ut_a(!dict_index_is_ibuf(cursor->index));
1634
 
 
1635
 
        index_id = cursor->index->id;
 
1649
        ut_a(index == cursor->index);
 
1650
        ut_a(!dict_index_is_ibuf(index));
1636
1651
 
1637
1652
        n_fields = block->curr_n_fields;
1638
1653
        n_bytes = block->curr_n_bytes;
1641
1656
        ins_rec = page_rec_get_next(rec);
1642
1657
        next_rec = page_rec_get_next(ins_rec);
1643
1658
 
1644
 
        offsets = rec_get_offsets(ins_rec, cursor->index, offsets,
 
1659
        offsets = rec_get_offsets(ins_rec, index, offsets,
1645
1660
                                  ULINT_UNDEFINED, &heap);
1646
 
        ins_fold = rec_fold(ins_rec, offsets, n_fields, n_bytes, index_id);
 
1661
        ins_fold = rec_fold(ins_rec, offsets, n_fields, n_bytes, index->id);
1647
1662
 
1648
1663
        if (!page_rec_is_supremum(next_rec)) {
1649
 
                offsets = rec_get_offsets(next_rec, cursor->index, offsets,
 
1664
                offsets = rec_get_offsets(next_rec, index, offsets,
1650
1665
                                          n_fields + (n_bytes > 0), &heap);
1651
1666
                next_fold = rec_fold(next_rec, offsets, n_fields,
1652
 
                                     n_bytes, index_id);
 
1667
                                     n_bytes, index->id);
1653
1668
        }
1654
1669
 
1655
1670
        if (!page_rec_is_infimum(rec)) {
1656
 
                offsets = rec_get_offsets(rec, cursor->index, offsets,
 
1671
                offsets = rec_get_offsets(rec, index, offsets,
1657
1672
                                          n_fields + (n_bytes > 0), &heap);
1658
 
                fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id);
 
1673
                fold = rec_fold(rec, offsets, n_fields, n_bytes, index->id);
1659
1674
        } else {
1660
1675
                if (left_side) {
1661
1676
 
1663
1678
 
1664
1679
                        locked = TRUE;
1665
1680
 
 
1681
                        if (!btr_search_enabled) {
 
1682
                                goto function_exit;
 
1683
                        }
 
1684
 
1666
1685
                        ha_insert_for_fold(table, ins_fold, block, ins_rec);
1667
1686
                }
1668
1687
 
1676
1695
                        rw_lock_x_lock(&btr_search_latch);
1677
1696
 
1678
1697
                        locked = TRUE;
 
1698
 
 
1699
                        if (!btr_search_enabled) {
 
1700
                                goto function_exit;
 
1701
                        }
1679
1702
                }
1680
1703
 
1681
1704
                if (!left_side) {
1694
1717
                                rw_lock_x_lock(&btr_search_latch);
1695
1718
 
1696
1719
                                locked = TRUE;
 
1720
 
 
1721
                                if (!btr_search_enabled) {
 
1722
                                        goto function_exit;
 
1723
                                }
1697
1724
                        }
1698
1725
 
1699
1726
                        ha_insert_for_fold(table, ins_fold, block, ins_rec);
1709
1736
                        rw_lock_x_lock(&btr_search_latch);
1710
1737
 
1711
1738
                        locked = TRUE;
 
1739
 
 
1740
                        if (!btr_search_enabled) {
 
1741
                                goto function_exit;
 
1742
                        }
1712
1743
                }
1713
1744
 
1714
1745
                if (!left_side) {
1716
1747
                        ha_insert_for_fold(table, ins_fold, block, ins_rec);
1717
1748
                        /*
1718
1749
                        fputs("Hash insert for ", stderr);
1719
 
                        dict_index_name_print(stderr, cursor->index);
 
1750
                        dict_index_name_print(stderr, index);
1720
1751
                        fprintf(stderr, " fold %lu\n", ins_fold);
1721
1752
                        */
1722
1753
                } else {
1820
1851
                                                  + (block->curr_n_bytes > 0),
1821
1852
                                                  &heap);
1822
1853
 
1823
 
                        if (!block->is_hashed || node->fold
 
1854
                        if (!block->index || node->fold
1824
1855
                            != rec_fold((rec_t*)(node->data),
1825
1856
                                        offsets,
1826
1857
                                        block->curr_n_fields,
1855
1886
                                rec_print_new(stderr, (rec_t*)node->data,
1856
1887
                                              offsets);
1857
1888
                                fprintf(stderr, "\nInnoDB: on that page."
1858
 
                                        " Page mem address %p, is hashed %lu,"
 
1889
                                        " Page mem address %p, is hashed %p,"
1859
1890
                                        " n fields %lu, n bytes %lu\n"
1860
1891
                                        "InnoDB: side %lu\n",
1861
 
                                        (void*) page, (ulong) block->is_hashed,
 
1892
                                        (void*) page, (void*) block->index,
1862
1893
                                        (ulong) block->curr_n_fields,
1863
1894
                                        (ulong) block->curr_n_bytes,
1864
1895
                                        (ulong) block->curr_left_side);