~ubuntu-branches/ubuntu/wily/mysql-5.6/wily

« back to all changes in this revision

Viewing changes to storage/innobase/row/row0merge.cc

  • Committer: Package Import Robot
  • Author(s): Marc Deslauriers
  • Date: 2015-04-16 20:07:10 UTC
  • mto: (1.3.9 vivid-proposed)
  • mto: This revision was merged to the branch mainline in revision 11.
  • Revision ID: package-import@ubuntu.com-20150416200710-pcrsa022082zj46k
Tags: upstream-5.6.24
ImportĀ upstreamĀ versionĀ 5.6.24

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
1
/*****************************************************************************
2
2
 
3
 
Copyright (c) 2005, 2014, Oracle and/or its affiliates. All Rights Reserved.
 
3
Copyright (c) 2005, 2015, Oracle and/or its affiliates. All Rights Reserved.
4
4
 
5
5
This program is free software; you can redistribute it and/or modify it under
6
6
the terms of the GNU General Public License as published by the Free Software
235
235
        mem_heap_free(buf->heap);
236
236
}
237
237
 
238
 
/******************************************************//**
239
 
Insert a data tuple into a sort buffer.
240
 
@return number of rows added, 0 if out of space */
 
238
/** Convert the field data from compact to redundant format.
 
239
@param[in]      row_field       field to copy from
 
240
@param[out]     field           field to copy to
 
241
@param[in]      len             length of the field data
 
242
@param[in]      zip_size        compressed BLOB page size,
 
243
                                zero for uncompressed BLOBs
 
244
@param[in,out]  heap            memory heap where to allocate data when
 
245
                                converting to ROW_FORMAT=REDUNDANT, or NULL
 
246
                                when not to invoke
 
247
                                row_merge_buf_redundant_convert(). */
 
248
static
 
249
void
 
250
row_merge_buf_redundant_convert(
 
251
        const dfield_t*         row_field,
 
252
        dfield_t*               field,
 
253
        ulint                   len,
 
254
        ulint                   zip_size,
 
255
        mem_heap_t*             heap)
 
256
{
 
257
        ut_ad(DATA_MBMINLEN(field->type.mbminmaxlen) == 1);
 
258
        ut_ad(DATA_MBMAXLEN(field->type.mbminmaxlen) > 1);
 
259
 
 
260
        byte*           buf = (byte*) mem_heap_alloc(heap, len);
 
261
        ulint           field_len = row_field->len;
 
262
        ut_ad(field_len <= len);
 
263
 
 
264
        if (row_field->ext) {
 
265
                const byte*     field_data = static_cast<byte*>(
 
266
                        dfield_get_data(row_field));
 
267
                ulint           ext_len;
 
268
 
 
269
                ut_a(field_len >= BTR_EXTERN_FIELD_REF_SIZE);
 
270
                ut_a(memcmp(field_data + field_len - BTR_EXTERN_FIELD_REF_SIZE,
 
271
                            field_ref_zero, BTR_EXTERN_FIELD_REF_SIZE));
 
272
 
 
273
                byte*   data = btr_copy_externally_stored_field(
 
274
                        &ext_len, field_data, zip_size, field_len, heap);
 
275
 
 
276
                ut_ad(ext_len < len);
 
277
 
 
278
                memcpy(buf, data, ext_len);
 
279
                field_len = ext_len;
 
280
        } else {
 
281
                memcpy(buf, row_field->data, field_len);
 
282
        }
 
283
 
 
284
        memset(buf + field_len, 0x20, len - field_len);
 
285
 
 
286
        dfield_set_data(field, buf, len);
 
287
}
 
288
 
 
289
/** Insert a data tuple into a sort buffer.
 
290
@param[in,out]  buf             sort buffer
 
291
@param[in]      fts_index       fts index to be created
 
292
@param[in]      old_table       original table
 
293
@param[in,out]  psort_info      parallel sort info
 
294
@param[in]      row             table row
 
295
@param[in]      ext             cache of externally stored
 
296
                                column prefixes, or NULL
 
297
@param[in,out]  doc_id          Doc ID if we are creating
 
298
                                FTS index
 
299
@param[in,out]  conv_heap       memory heap where to allocate data when
 
300
                                converting to ROW_FORMAT=REDUNDANT, or NULL
 
301
                                when not to invoke
 
302
                                row_merge_buf_redundant_convert()
 
303
@param[in,out]  exceed_page     set if the record size exceeds the page size
 
304
                                when converting to ROW_FORMAT=REDUNDANT
 
305
@return number of rows added, 0 if out of space */
241
306
static
242
307
ulint
243
308
row_merge_buf_add(
244
 
/*==============*/
245
 
        row_merge_buf_t*        buf,    /*!< in/out: sort buffer */
246
 
        dict_index_t*           fts_index,/*!< in: fts index to be created */
247
 
        const dict_table_t*     old_table,/*!< in: original table */
248
 
        fts_psort_t*            psort_info, /*!< in: parallel sort info */
249
 
        const dtuple_t*         row,    /*!< in: table row */
250
 
        const row_ext_t*        ext,    /*!< in: cache of externally stored
251
 
                                        column prefixes, or NULL */
252
 
        doc_id_t*               doc_id) /*!< in/out: Doc ID if we are
253
 
                                        creating FTS index */
 
309
        row_merge_buf_t*        buf,
 
310
        dict_index_t*           fts_index,
 
311
        const dict_table_t*     old_table,
 
312
        fts_psort_t*            psort_info,
 
313
        const dtuple_t*         row,
 
314
        const row_ext_t*        ext,
 
315
        doc_id_t*               doc_id,
 
316
        mem_heap_t*             conv_heap,
 
317
        bool*                   exceed_page)
254
318
{
255
319
        ulint                   i;
256
320
        const dict_index_t*     index;
400
464
                                n_row_added = 1;
401
465
                                continue;
402
466
                        }
 
467
 
 
468
                        if (field->len != UNIV_SQL_NULL
 
469
                            && col->mtype == DATA_MYSQL
 
470
                            && col->len != field->len) {
 
471
 
 
472
                                if (conv_heap != NULL) {
 
473
                                        row_merge_buf_redundant_convert(
 
474
                                                row_field, field, col->len,
 
475
                                                dict_table_zip_size(old_table),
 
476
                                                conv_heap);
 
477
                                } else {
 
478
                                        /* Field length mismatch should not
 
479
                                        happen when rebuilding redundant row
 
480
                                        format table. */
 
481
                                        ut_ad(dict_table_is_comp(index->table));
 
482
                                }
 
483
                        }
403
484
                }
404
485
 
405
486
                len = dfield_get_len(field);
508
589
        of extra_size. */
509
590
        data_size += (extra_size + 1) + ((extra_size + 1) >= 0x80);
510
591
 
 
592
        /* Record size can exceed page size while converting to
 
593
        redundant row format. But there is assert
 
594
        ut_ad(size < UNIV_PAGE_SIZE) in rec_offs_data_size().
 
595
        It may hit the assert before attempting to insert the row. */
 
596
        if (conv_heap != NULL && data_size > UNIV_PAGE_SIZE) {
 
597
                *exceed_page = true;
 
598
        }
 
599
 
511
600
        ut_ad(data_size < srv_sort_buf_size);
512
601
 
513
602
        /* Reserve one byte for the end marker of row_merge_block_t. */
527
616
                dfield_dup(field++, buf->heap);
528
617
        } while (--n_fields);
529
618
 
 
619
        if (conv_heap != NULL) {
 
620
                mem_heap_empty(conv_heap);
 
621
        }
 
622
 
530
623
        DBUG_RETURN(n_row_added);
531
624
}
532
625
 
1208
1301
        os_event_t              fts_parallel_sort_event = NULL;
1209
1302
        ibool                   fts_pll_sort = FALSE;
1210
1303
        ib_int64_t              sig_count = 0;
 
1304
        mem_heap_t*             conv_heap = NULL;
1211
1305
        DBUG_ENTER("row_merge_read_clustered_index");
1212
1306
 
1213
1307
        ut_ad((old_table == new_table) == !col_map);
1303
1397
 
1304
1398
        row_heap = mem_heap_create(sizeof(mrec_buf_t));
1305
1399
 
 
1400
        if (dict_table_is_comp(old_table)
 
1401
            && !dict_table_is_comp(new_table)) {
 
1402
                conv_heap = mem_heap_create(sizeof(mrec_buf_t));
 
1403
        }
 
1404
 
1306
1405
        /* Scan the clustered index. */
1307
1406
        for (;;) {
1308
1407
                const rec_t*    rec;
1581
1680
                        row_merge_buf_t*        buf     = merge_buf[i];
1582
1681
                        merge_file_t*           file    = &files[i];
1583
1682
                        ulint                   rows_added = 0;
 
1683
                        bool                    exceed_page = false;
1584
1684
 
1585
1685
                        if (UNIV_LIKELY
1586
1686
                            (row && (rows_added = row_merge_buf_add(
1587
1687
                                        buf, fts_index, old_table,
1588
 
                                        psort_info, row, ext, &doc_id)))) {
 
1688
                                        psort_info, row, ext, &doc_id,
 
1689
                                        conv_heap, &exceed_page)))) {
1589
1690
 
1590
1691
                                /* If we are creating FTS index,
1591
1692
                                a single row can generate more
1592
1693
                                records for tokenized word */
1593
1694
                                file->n_rec += rows_added;
 
1695
 
 
1696
                                if (exceed_page) {
 
1697
                                        err = DB_TOO_BIG_RECORD;
 
1698
                                        break;
 
1699
                                }
 
1700
 
1594
1701
                                if (doc_id > max_doc_id) {
1595
1702
                                        max_doc_id = doc_id;
1596
1703
                                }
1691
1798
                                    (!(rows_added = row_merge_buf_add(
1692
1799
                                                buf, fts_index, old_table,
1693
1800
                                                psort_info, row, ext,
1694
 
                                                &doc_id)))) {
 
1801
                                                &doc_id, conv_heap,
 
1802
                                                &exceed_page)))) {
1695
1803
                                        /* An empty buffer should have enough
1696
1804
                                        room for at least one record. */
1697
1805
                                        ut_error;
1698
1806
                                }
1699
1807
 
 
1808
                                if (exceed_page) {
 
1809
                                        err = DB_TOO_BIG_RECORD;
 
1810
                                        break;
 
1811
                                }
 
1812
 
1700
1813
                                file->n_rec += rows_added;
1701
1814
                        }
1702
1815
                }
1721
1834
        }
1722
1835
 
1723
1836
all_done:
 
1837
        if (conv_heap != NULL) {
 
1838
                mem_heap_free(conv_heap);
 
1839
        }
 
1840
 
1724
1841
#ifdef FTS_INTERNAL_DIAG_PRINT
1725
1842
        DEBUG_FTS_SORT_PRINT("FTS_SORT: Complete Scan Table\n");
1726
1843
#endif