1
/* Copyright (C) 2001 MySQL AB
3
This program is free software; you can redistribute it and/or modify
4
it under the terms of the GNU General Public License as published by
5
the Free Software Foundation; version 2 of the License.
7
This program is distributed in the hope that it will be useful,
8
but WITHOUT ANY WARRANTY; without even the implied warranty of
9
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
GNU General Public License for more details.
12
You should have received a copy of the GNU General Public License
13
along with this program; if not, write to the Free Software
14
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
17
Function to handle quick removal of duplicates
18
This code is used when doing multi-table deletes to find the rows in
19
reference tables that needs to be deleted.
21
The basic idea is as follows:
23
Store first all strings in a binary tree, ignoring duplicates.
24
When the tree uses more memory than 'max_heap_table_size',
25
write the tree (in sorted order) out to disk and start with a new tree.
26
When all data has been generated, merge the trees (removing any found
29
The unique entries will be returned in sort order, to ensure that we do the
30
deletes in disk order.
39
#include "drizzled/sql_sort.h"
40
#include "drizzled/session.h"
41
#include "drizzled/sql_list.h"
42
#include "drizzled/internal/iocache.h"
44
#if defined(CMATH_NAMESPACE)
45
using namespace CMATH_NAMESPACE;
53
int unique_write_to_file(unsigned char* key, uint32_t,
57
Use unique->size (size of element stored in the tree) and not
58
unique->tree.size_of_element. The latter is different from unique->size
59
when tree implementation chooses to store pointer to key in TREE_ELEMENT
60
(instead of storing the element itself there)
62
return my_b_write(unique->file, key, unique->size) ? 1 : 0;
65
int unique_write_to_ptrs(unsigned char* key,
66
uint32_t, Unique *unique)
68
memcpy(unique->record_pointers, key, unique->size);
69
unique->record_pointers+=unique->size;
73
Unique::Unique(qsort_cmp2 comp_func, void * comp_func_fixed_arg,
74
uint32_t size_arg, size_t max_in_memory_size_arg)
75
: max_in_memory_size(max_in_memory_size_arg),
76
file(static_cast<internal::IO_CACHE *>(memory::sql_calloc(sizeof(internal::IO_CACHE)))),
81
init_tree(&tree, (ulong) (max_in_memory_size / 16), 0, size, comp_func, false,
82
NULL, comp_func_fixed_arg);
83
/* If the following fail's the next add will also fail */
84
my_init_dynamic_array(&file_ptrs, sizeof(BUFFPEK), 16, 16);
86
If you change the following, change it in get_max_elements function, too.
88
max_elements= (ulong) (max_in_memory_size /
89
ALIGN_SIZE(sizeof(TREE_ELEMENT)+size));
90
open_cached_file(file, drizzle_tmpdir,TEMP_PREFIX, DISK_BUFFER_SIZE,
99
Stirling's approximate formula is used:
101
n! ~= sqrt(2*M_PI*n) * (n/M_E)^n
103
Derivation of formula used for calculations is as follows:
105
log2(n!) = log(n!)/log(2) = log(sqrt(2*M_PI*n)*(n/M_E)^n) / log(2) =
107
= (log(2*M_PI*n)/2 + n*log(n/M_E)) / log(2).
110
inline double log2_n_fact(double x)
112
return (log(2*M_PI*x)/2 + x*log(x/M_E)) / M_LN2;
117
Calculate cost of merge_buffers function call for given sequence of
118
input stream lengths and store the number of rows in result stream in *last.
121
get_merge_buffers_cost()
122
buff_elems Array of #s of elements in buffers
123
elem_size Size of element stored in buffer
124
first Pointer to first merged element size
125
last Pointer to last merged element size
128
Cost of merge_buffers operation in disk seeks.
131
It is assumed that no rows are eliminated during merge.
132
The cost is calculated as
134
cost(read_and_write) + cost(merge_comparisons).
136
All bytes in the sequences is read and written back during merge so cost
137
of disk io is 2*elem_size*total_buf_elems/IO_SIZE (2 is for read + write)
139
For comparisons cost calculations we assume that all merged sequences have
140
the same length, so each of total_buf_size elements will be added to a sort
141
heap with (n_buffers-1) elements. This gives the comparison cost:
143
total_buf_elems* log2(n_buffers) / TIME_FOR_COMPARE_ROWID;
146
static double get_merge_buffers_cost(uint32_t *, uint32_t elem_size,
147
uint32_t *first, uint32_t *last)
149
uint32_t total_buf_elems= 0;
150
for (uint32_t *pbuf= first; pbuf <= last; pbuf++)
151
total_buf_elems+= *pbuf;
152
*last= total_buf_elems;
154
int n_buffers= last - first + 1;
156
/* Using log2(n)=log(n)/log(2) formula */
157
return 2*((double)total_buf_elems*elem_size) / IO_SIZE +
158
total_buf_elems*log((double) n_buffers) / (TIME_FOR_COMPARE_ROWID * M_LN2);
163
Calculate cost of merging buffers into one in Unique::get, i.e. calculate
164
how long (in terms of disk seeks) the two calls
165
merge_many_buffs(...);
170
get_merge_many_buffs_cost()
171
buffer buffer space for temporary data, at least
172
Unique::get_cost_calc_buff_size bytes
173
maxbuffer # of full buffers
174
max_n_elems # of elements in first maxbuffer buffers
175
last_n_elems # of elements in last buffer
176
elem_size size of buffer element
179
maxbuffer+1 buffers are merged, where first maxbuffer buffers contain
180
max_n_elems elements each and last buffer contains last_n_elems elements.
182
The current implementation does a dumb simulation of merge_many_buffs
186
Cost of merge in disk seeks.
189
static double get_merge_many_buffs_cost(uint32_t *buffer,
190
uint32_t maxbuffer, uint32_t max_n_elems,
191
uint32_t last_n_elems, int elem_size)
194
double total_cost= 0.0;
195
uint32_t *buff_elems= buffer; /* #s of elements in each of merged sequences */
198
Set initial state: first maxbuffer sequences contain max_n_elems elements
199
each, last sequence contains last_n_elems elements.
201
for (i = 0; i < (int)maxbuffer; i++)
202
buff_elems[i]= max_n_elems;
203
buff_elems[maxbuffer]= last_n_elems;
206
Do it exactly as merge_many_buff function does, calling
207
get_merge_buffers_cost to get cost of merge_buffers.
209
if (maxbuffer >= MERGEBUFF2)
211
while (maxbuffer >= MERGEBUFF2)
213
uint32_t lastbuff= 0;
214
for (i = 0; i <= (int) maxbuffer - MERGEBUFF*3/2; i += MERGEBUFF)
216
total_cost+=get_merge_buffers_cost(buff_elems, elem_size,
218
buff_elems + i + MERGEBUFF-1);
221
total_cost+=get_merge_buffers_cost(buff_elems, elem_size,
223
buff_elems + maxbuffer);
228
/* Simulate final merge_buff call. */
229
total_cost += get_merge_buffers_cost(buff_elems, elem_size,
230
buff_elems, buff_elems + maxbuffer);
236
Calculate cost of using Unique for processing nkeys elements of size
237
key_size using max_in_memory_size memory.
240
Unique::get_use_cost()
241
buffer space for temporary data, use Unique::get_cost_calc_buff_size
242
to get # bytes needed.
243
nkeys #of elements in Unique
244
key_size size of each elements in bytes
245
max_in_memory_size amount of memory Unique will be allowed to use
252
cost(create_trees) + (see #1)
253
cost(merge) + (see #2)
254
cost(read_result) (see #3)
256
1. Cost of trees creation
257
For each Unique::put operation there will be 2*log2(n+1) elements
258
comparisons, where n runs from 1 tree_size (we assume that all added
259
elements are different). Together this gives:
261
n_compares = 2*(log2(2) + log2(3) + ... + log2(N+1)) = 2*log2((N+1)!)
263
then cost(tree_creation) = n_compares*ROWID_COMPARE_COST;
265
Total cost of creating trees:
266
(n_trees - 1)*max_size_tree_cost + non_max_size_tree_cost.
268
Approximate value of log2(N!) is calculated by log2_n_fact function.
271
If only one tree is created by Unique no merging will be necessary.
272
Otherwise, we model execution of merge_many_buff function and count
273
#of merges. (The reason behind this is that number of buffers is small,
274
while size of buffers is big and we don't want to loose precision with
277
3. If only one tree is created by Unique no disk io will happen.
278
Otherwise, ceil(key_len*n_keys) disk seeks are necessary. We assume
279
these will be random seeks.
282
double Unique::get_use_cost(uint32_t *buffer, uint32_t nkeys, uint32_t key_size,
283
size_t max_in_memory_size)
285
ulong max_elements_in_tree;
286
ulong last_tree_elems;
287
int n_full_trees; /* number of trees in unique - 1 */
290
max_elements_in_tree= ((ulong) max_in_memory_size /
291
ALIGN_SIZE(sizeof(TREE_ELEMENT)+key_size));
293
n_full_trees= nkeys / max_elements_in_tree;
294
last_tree_elems= nkeys % max_elements_in_tree;
296
/* Calculate cost of creating trees */
297
result= 2*log2_n_fact(last_tree_elems + 1.0);
299
result+= n_full_trees * log2_n_fact(max_elements_in_tree + 1.0);
300
result /= TIME_FOR_COMPARE_ROWID;
307
There is more then one tree and merging is necessary.
308
First, add cost of writing all trees to disk, assuming that all disk
309
writes are sequential.
311
result += DISK_SEEK_BASE_COST * n_full_trees *
312
ceil(((double) key_size)*max_elements_in_tree / IO_SIZE);
313
result += DISK_SEEK_BASE_COST * ceil(((double) key_size)*last_tree_elems / IO_SIZE);
316
double merge_cost= get_merge_many_buffs_cost(buffer, n_full_trees,
317
max_elements_in_tree,
318
last_tree_elems, key_size);
319
if (merge_cost < 0.0)
322
result += merge_cost;
324
Add cost of reading the resulting sequence, assuming there were no
327
result += ceil((double)key_size*nkeys/IO_SIZE);
334
close_cached_file(file);
336
delete_dynamic(&file_ptrs);
340
/* Write tree to disk; clear tree */
344
elements+= tree.elements_in_tree;
345
file_ptr.count=tree.elements_in_tree;
346
file_ptr.file_pos=my_b_tell(file);
348
if (tree_walk(&tree, (tree_walk_action) unique_write_to_file,
349
(void*) this, left_root_right) ||
350
insert_dynamic(&file_ptrs, (unsigned char*) &file_ptr))
358
Clear the tree and the file.
359
You must call reset() if you want to reuse Unique after walk().
367
If elements != 0, some trees were stored in the file (see how
368
flush() works). Note, that we can not count on my_b_tell(&file) == 0
369
here, because it can return 0 right after walk(), and walk() does not
370
reset any Unique member.
374
reset_dynamic(&file_ptrs);
375
reinit_io_cache(file, internal::WRITE_CACHE, 0L, 0, 1);
381
The comparison function, passed to queue_init() in merge_walk() and in
382
merge_buffers() when the latter is called from Uniques::get() must
383
use comparison function of Uniques::tree, but compare members of struct
391
static int buffpek_compare(void *arg, unsigned char *key_ptr1, unsigned char *key_ptr2)
393
BUFFPEK_COMPARE_CONTEXT *ctx= (BUFFPEK_COMPARE_CONTEXT *) arg;
394
return ctx->key_compare(ctx->key_compare_arg,
395
*((unsigned char **) key_ptr1), *((unsigned char **)key_ptr2));
403
The comparison function object, passed to a priority_queue in merge_walk()
404
as its sort function parameter.
407
class buffpek_compare_functor
409
qsort_cmp2 key_compare;
410
void *key_compare_arg;
412
buffpek_compare_functor(qsort_cmp2 in_key_compare, void *in_compare_arg)
413
: key_compare(in_key_compare), key_compare_arg(in_compare_arg) { }
414
inline bool operator()(const BUFFPEK *i, const BUFFPEK *j)
416
return key_compare(key_compare_arg,
424
Function is very similar to merge_buffers, but instead of writing sorted
425
unique keys to the output file, it invokes walk_action for each key.
426
This saves I/O if you need to pass through all unique keys only once.
430
All params are 'IN' (but see comment for begin, end):
431
merge_buffer buffer to perform cached piece-by-piece loading
432
of trees; initially the buffer is empty
433
merge_buffer_size size of merge_buffer. Must be aligned with
435
key_length size of tree element; key_length * (end - begin)
436
must be less or equal than merge_buffer_size.
437
begin pointer to BUFFPEK struct for the first tree.
438
end pointer to BUFFPEK struct for the last tree;
439
end > begin and [begin, end) form a consecutive
440
range. BUFFPEKs structs in that range are used and
441
overwritten in merge_walk().
442
walk_action element visitor. Action is called for each unique
444
walk_action_arg argument to walk action. Passed to it on each call.
445
compare elements comparison function
446
compare_arg comparison function argument
447
file file with all trees dumped. Trees in the file
448
must contain sorted unique values. Cache must be
449
initialized in read mode.
455
static bool merge_walk(unsigned char *merge_buffer, ulong merge_buffer_size,
456
uint32_t key_length, BUFFPEK *begin, BUFFPEK *end,
457
tree_walk_action walk_action, void *walk_action_arg,
458
qsort_cmp2 compare, void *compare_arg,
459
internal::IO_CACHE *file)
462
merge_buffer_size < (ulong) (key_length * (end - begin + 1)))
464
priority_queue<BUFFPEK *, vector<BUFFPEK *>, buffpek_compare_functor >
465
queue(buffpek_compare_functor(compare, compare_arg));
466
/* we need space for one key when a piece of merge buffer is re-read */
467
merge_buffer_size-= key_length;
468
unsigned char *save_key_buff= merge_buffer + merge_buffer_size;
469
uint32_t max_key_count_per_piece= (uint32_t) (merge_buffer_size/(end-begin) /
471
/* if piece_size is aligned reuse_freed_buffer will always hit */
472
uint32_t piece_size= max_key_count_per_piece * key_length;
473
uint32_t bytes_read; /* to hold return value of read_to_buffer */
477
Invariant: queue must contain top element from each tree, until a tree
478
is not completely walked through.
479
Here we're forcing the invariant, inserting one element from each tree
482
for (top= begin; top != end; ++top)
484
top->base= merge_buffer + (top - begin) * piece_size;
485
top->max_keys= max_key_count_per_piece;
486
bytes_read= read_to_buffer(file, top, key_length);
487
if (bytes_read == (uint32_t) (-1))
493
while (queue.size() > 1)
496
Every iteration one element is removed from the queue, and one is
497
inserted by the rules of the invariant. If two adjacent elements on
498
the top of the queue are not equal, biggest one is unique, because all
499
elements in each tree are unique. Action is applied only to unique
502
void *old_key= top->key;
504
read next key from the cache or from the file and push it to the
505
queue; this gives new top.
507
top->key+= key_length;
508
if (--top->mem_count)
513
else /* next piece should be read */
515
/* save old_key not to overwrite it in read_to_buffer */
516
memcpy(save_key_buff, old_key, key_length);
517
old_key= save_key_buff;
518
bytes_read= read_to_buffer(file, top, key_length);
519
if (bytes_read == (uint32_t) (-1))
521
else if (bytes_read > 0) /* top->key, top->mem_count are reset */
522
{ /* in read_to_buffer */
529
Tree for old 'top' element is empty: remove it from the queue.
535
/* new top has been obtained; if old top is unique, apply the action */
536
if (compare(compare_arg, old_key, top->key))
538
if (walk_action(old_key, 1, walk_action_arg))
543
Applying walk_action to the tail of the last tree: this is safe because
544
either we had only one tree in the beginning, either we work with the
545
last tree in the queue.
551
if (walk_action(top->key, 1, walk_action_arg))
553
top->key+= key_length;
555
while (--top->mem_count);
556
bytes_read= read_to_buffer(file, top, key_length);
557
if (bytes_read == (uint32_t) (-1))
569
Walks consecutively through all unique elements:
570
if all elements are in memory, then it simply invokes 'tree_walk', else
571
all flushed trees are loaded to memory piece-by-piece, pieces are
572
sorted, and action is called for each unique value.
573
Note: so as merging resets file_ptrs state, this method can change
574
internal Unique state to undefined: if you want to reuse Unique after
575
walk() you must call reset() first!
579
action function-visitor, typed in include/my_tree.h
580
function is called for each unique element
581
arg argument for visitor, which is passed to it on each call
587
bool Unique::walk(tree_walk_action action, void *walk_action_arg)
590
unsigned char *merge_buffer;
592
if (elements == 0) /* the whole tree is in memory */
593
return tree_walk(&tree, action, walk_action_arg, left_root_right);
595
/* flush current tree to the file to have some memory for merge buffer */
598
if (flush_io_cache(file) || reinit_io_cache(file, internal::READ_CACHE, 0L, 0, 0))
600
if (!(merge_buffer= (unsigned char *) malloc(max_in_memory_size)))
602
res= merge_walk(merge_buffer, (ulong) max_in_memory_size, size,
603
(BUFFPEK *) file_ptrs.buffer,
604
(BUFFPEK *) file_ptrs.buffer + file_ptrs.elements,
605
action, walk_action_arg,
606
tree.compare, tree.custom_arg, file);
607
free((char*) merge_buffer);
612
Modify the Table element so that when one calls init_records()
613
the rows will be read in priority order.
616
bool Unique::get(Table *table)
618
SORTPARAM sort_param;
619
table->sort.found_records=elements+tree.elements_in_tree;
621
if (my_b_tell(file) == 0)
623
/* Whole tree is in memory; Don't use disk if you don't need to */
624
if ((record_pointers=table->sort.record_pointers= (unsigned char*)
625
malloc(size * tree.elements_in_tree)))
627
(void) tree_walk(&tree, (tree_walk_action) unique_write_to_ptrs,
628
this, left_root_right);
632
/* Not enough memory; Save the result to file && free memory used by tree */
636
internal::IO_CACHE *outfile=table->sort.io_cache;
637
BUFFPEK *file_ptr= (BUFFPEK*) file_ptrs.buffer;
638
uint32_t maxbuffer= file_ptrs.elements - 1;
639
unsigned char *sort_buffer;
640
internal::my_off_t save_pos;
643
/* Open cached file if it isn't open */
644
outfile=table->sort.io_cache= new internal::IO_CACHE;
645
memset(outfile, 0, sizeof(internal::IO_CACHE));
647
if (!outfile || (! my_b_inited(outfile) && open_cached_file(outfile,drizzle_tmpdir,TEMP_PREFIX,READ_RECORD_BUFFER, MYF(MY_WME))))
649
reinit_io_cache(outfile, internal::WRITE_CACHE, 0L, 0, 0);
651
memset(&sort_param, 0, sizeof(sort_param));
652
sort_param.max_rows= elements;
653
sort_param.sort_form=table;
654
sort_param.rec_length= sort_param.sort_length= sort_param.ref_length=
656
sort_param.keys= (uint32_t) (max_in_memory_size / sort_param.sort_length);
657
sort_param.not_killable=1;
659
if (!(sort_buffer=(unsigned char*) malloc((sort_param.keys+1) *
660
sort_param.sort_length)))
662
sort_param.unique_buff= sort_buffer+(sort_param.keys*
663
sort_param.sort_length);
665
sort_param.compare= (qsort2_cmp) buffpek_compare;
666
sort_param.cmp_context.key_compare= tree.compare;
667
sort_param.cmp_context.key_compare_arg= tree.custom_arg;
669
/* Merge the buffers to one file, removing duplicates */
670
if (merge_many_buff(&sort_param,sort_buffer,file_ptr,&maxbuffer,file))
672
if (flush_io_cache(file) ||
673
reinit_io_cache(file,internal::READ_CACHE,0L,0,0))
675
if (merge_buffers(&sort_param, file, outfile, sort_buffer, file_ptr,
676
file_ptr, file_ptr+maxbuffer,0))
682
if (flush_io_cache(outfile))
685
/* Setup io_cache for reading */
686
save_pos=outfile->pos_in_file;
687
if (reinit_io_cache(outfile,internal::READ_CACHE,0L,0,0))
689
outfile->end_of_file=save_pos;
693
} /* namespace drizzled */