1
/* Copyright (C) 2003 MySQL AB
3
This program is free software; you can redistribute it and/or modify
4
it under the terms of the GNU General Public License as published by
5
the Free Software Foundation; either version 2 of the License, or
6
(at your option) any later version.
8
This program is distributed in the hope that it will be useful,
9
but WITHOUT ANY WARRANTY; without even the implied warranty of
10
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
GNU General Public License for more details.
13
You should have received a copy of the GNU General Public License
14
along with this program; if not, write to the Free Software
15
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
17
#ifdef USE_PRAGMA_IMPLEMENTATION
18
#pragma implementation // gcc: Class implementation
21
#include "mysql_priv.h"
23
#if defined(HAVE_ARCHIVE_DB)
24
#include "ha_archive.h"
28
First, if you want to understand storage engines you should look at
29
ha_example.cc and ha_example.h.
30
This example was written as a test case for a customer who needed
31
a storage engine without indexes that could compress data very well.
32
So, welcome to a completely compressed storage engine. This storage
33
engine only does inserts. No replace, deletes, or updates. All reads are
34
complete table scans. Compression is done through gzip (bzip compresses
35
better, but only marginally, if someone asks I could add support for
36
it too, but beaware that it costs a lot more in CPU time then gzip).
38
We keep a file pointer open for each instance of ha_archive for each read
39
but for writes we keep one open file handle just for that. We flush it
40
only if we have a read occur. gzip handles compressing lots of records
41
at once much better then doing lots of little records between writes.
42
It is possible to not lock on writes but this would then mean we couldn't
43
handle bulk inserts as well (that is if someone was trying to read at
44
the same time since we would want to flush).
46
A "meta" file is kept alongside the data file. This file serves two purpose.
47
The first purpose is to track the number of rows in the table. The second
48
purpose is to determine if the table was closed properly or not. When the
49
meta file is first opened it is marked as dirty. It is opened when the table
50
itself is opened for writing. When the table is closed the new count for rows
51
is written to the meta file and the file is marked as clean. If the meta file
52
is opened and it is marked as dirty, it is assumed that a crash occured. At
53
this point an error occurs and the user is told to rebuild the file.
54
A rebuild scans the rows and rewrites the meta file. If corruption is found
55
in the data file then the meta file is not repaired.
57
At some point a recovery method for such a drastic case needs to be divised.
59
Locks are row level, and you will get a consistant read.
61
For performance as far as table scans go it is quite fast. I don't have
62
good numbers but locally it has out performed both Innodb and MyISAM. For
63
Innodb the question will be if the table can be fit into the buffer
64
pool. For MyISAM its a question of how much the file system caches the
65
MyISAM file. With enough free memory MyISAM is faster. Its only when the OS
66
doesn't have enough memory to cache entire table that archive turns out
67
to be any faster. For writes it is always a bit slower then MyISAM. It has no
68
internal limits though for row length.
70
Examples between MyISAM (packed) and Archive.
72
Table with 76695844 identical rows:
73
29680807 a_archive.ARZ
77
Table with 8991478 rows (all of Slashdot's comments):
78
1922964506 comment_archive.ARZ
79
2944970297 comment_text.MYD
83
Add bzip optional support.
84
Allow users to set compression level.
85
Add truncate table command.
86
Implement versioning, should be easy.
87
Allow for errors, find a way to mark bad rows.
88
Talk to the gzip guys, come up with a writable format so that updates are doable
89
without switching to a block method.
90
Add optional feature so that rows can be flushed at interval (which will cause less
91
compression but may speed up ordered searches).
92
Checkpoint the meta file to allow for faster rebuilds.
93
Dirty open (right now the meta file is repaired if a crash occured).
94
Option to allow for dirty reads, this would lower the sync calls, which would make
95
inserts a lot faster, but would mean highly arbitrary reads.
100
Notes on file formats.
101
The Meta file is layed out as:
102
check - Just an int of 254 to make sure that the the file we are opening was
104
version - The current version of the file format.
105
rows - This is an unsigned long long which is the number of rows in the data
107
check point - Reserved for future use
108
dirty - Status of the file, whether or not its values are the latest. This
109
flag is what causes a repair to occur
112
check - Just an int of 254 to make sure that the the file we are opening was
114
version - The current version of the file format.
115
data - The data is stored in a "row +blobs" format.
118
/* If the archive storage engine has been inited */
119
static bool archive_inited= FALSE;
120
/* Variables for archive share methods */
121
pthread_mutex_t archive_mutex;
122
static HASH archive_open_tables;
123
static z_off_t max_zfile_size;
124
static int zoffset_size;
126
/* The file extension */
127
#define ARZ ".ARZ" // The data file
128
#define ARN ".ARN" // Files used during an optimize call
129
#define ARM ".ARM" // Meta file
131
uchar + uchar + ulonglong + ulonglong + uchar
133
#define META_BUFFER_SIZE 19 // Size of the data used in the meta file
137
#define DATA_BUFFER_SIZE 2 // Size of the data used in the data file
138
#define ARCHIVE_CHECK_HEADER 254 // The number we use to determine corruption
141
Number of rows that will force a bulk insert.
143
#define ARCHIVE_MIN_ROWS_TO_USE_BULK_INSERT 2
147
/* dummy handlerton - only to have something to return from archive_db_init */
148
handlerton archive_hton = {
151
"Archive storage engine",
155
0, /* savepoint size. */
156
NULL, /* close_connection */
157
NULL, /* savepoint */
158
NULL, /* rollback to savepoint */
159
NULL, /* releas savepoint */
164
NULL, /* commit_by_xid */
165
NULL, /* rollback_by_xid */
166
NULL, /* create_cursor_read_view */
167
NULL, /* set_cursor_read_view */
168
NULL, /* close_cursor_read_view */
174
Used for hash table that tracks open tables.
176
static byte* archive_get_key(ARCHIVE_SHARE *share,uint *length,
177
my_bool not_used __attribute__((unused)))
179
*length=share->table_name_length;
180
return (byte*) share->table_name;
185
Initialize the archive handler.
196
bool archive_db_init()
198
DBUG_ENTER("archive_db_init");
199
if (pthread_mutex_init(&archive_mutex, MY_MUTEX_INIT_FAST))
201
if (hash_init(&archive_open_tables, system_charset_info, 32, 0, 0,
202
(hash_get_key) archive_get_key, 0, 0))
204
VOID(pthread_mutex_destroy(&archive_mutex));
208
zoffset_size= 2 << ((zlibCompileFlags() >> 6) & 3);
209
switch (sizeof(z_off_t)) {
211
max_zfile_size= INT_MAX16;
214
max_zfile_size= (z_off_t) LONGLONG_MAX;
218
max_zfile_size= INT_MAX32;
220
archive_inited= TRUE;
224
have_archive_db= SHOW_OPTION_DISABLED; // If we couldn't use handler
229
Release the archive handler.
239
bool archive_db_end()
243
hash_free(&archive_open_tables);
244
VOID(pthread_mutex_destroy(&archive_mutex));
250
ha_archive::ha_archive(TABLE *table_arg)
251
:handler(&archive_hton, table_arg), delayed_insert(0), bulk_insert(0)
253
/* Set our original buffer from pre-allocated memory */
254
buffer.set((char *)byte_buffer, IO_SIZE, system_charset_info);
256
/* The size of the offset value we will use for position() */
257
ref_length = zoffset_size;
258
DBUG_ASSERT(ref_length <= sizeof(z_off_t));
262
This method reads the header of a datafile and returns whether or not it was successful.
264
int ha_archive::read_data_header(gzFile file_to_read)
266
uchar data_buffer[DATA_BUFFER_SIZE];
267
DBUG_ENTER("ha_archive::read_data_header");
269
if (gzrewind(file_to_read) == -1)
270
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
272
if (gzread(file_to_read, data_buffer, DATA_BUFFER_SIZE) != DATA_BUFFER_SIZE)
273
DBUG_RETURN(errno ? errno : -1);
275
DBUG_PRINT("ha_archive::read_data_header", ("Check %u", data_buffer[0]));
276
DBUG_PRINT("ha_archive::read_data_header", ("Version %u", data_buffer[1]));
278
if ((data_buffer[0] != (uchar)ARCHIVE_CHECK_HEADER) &&
279
(data_buffer[1] != (uchar)ARCHIVE_VERSION))
280
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
286
This method writes out the header of a datafile and returns whether or not it was successful.
288
int ha_archive::write_data_header(gzFile file_to_write)
290
uchar data_buffer[DATA_BUFFER_SIZE];
291
DBUG_ENTER("ha_archive::write_data_header");
293
data_buffer[0]= (uchar)ARCHIVE_CHECK_HEADER;
294
data_buffer[1]= (uchar)ARCHIVE_VERSION;
296
if (gzwrite(file_to_write, &data_buffer, DATA_BUFFER_SIZE) !=
299
DBUG_PRINT("ha_archive::write_data_header", ("Check %u", (uint)data_buffer[0]));
300
DBUG_PRINT("ha_archive::write_data_header", ("Version %u", (uint)data_buffer[1]));
308
This method reads the header of a meta file and returns whether or not it was successful.
309
*rows will contain the current number of rows in the data file upon success.
311
int ha_archive::read_meta_file(File meta_file, ha_rows *rows)
313
uchar meta_buffer[META_BUFFER_SIZE];
314
ulonglong check_point;
316
DBUG_ENTER("ha_archive::read_meta_file");
318
VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0)));
319
if (my_read(meta_file, (byte*)meta_buffer, META_BUFFER_SIZE, 0) != META_BUFFER_SIZE)
323
Parse out the meta data, we ignore version at the moment
325
*rows= (ha_rows)uint8korr(meta_buffer + 2);
326
check_point= uint8korr(meta_buffer + 10);
328
DBUG_PRINT("ha_archive::read_meta_file", ("Check %d", (uint)meta_buffer[0]));
329
DBUG_PRINT("ha_archive::read_meta_file", ("Version %d", (uint)meta_buffer[1]));
330
DBUG_PRINT("ha_archive::read_meta_file", ("Rows %lu", (ulong) *rows));
331
DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %lu", (ulong) check_point));
332
DBUG_PRINT("ha_archive::read_meta_file", ("Dirty %d", (int)meta_buffer[18]));
334
if ((meta_buffer[0] != (uchar)ARCHIVE_CHECK_HEADER) ||
335
((bool)meta_buffer[18] == TRUE))
336
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
338
my_sync(meta_file, MYF(MY_WME));
344
This method writes out the header of a meta file and returns whether or not it was successful.
345
By setting dirty you say whether or not the file represents the actual state of the data file.
346
Upon ::open() we set to dirty, and upon ::close() we set to clean.
348
int ha_archive::write_meta_file(File meta_file, ha_rows rows, bool dirty)
350
uchar meta_buffer[META_BUFFER_SIZE];
351
ulonglong check_point= 0; //Reserved for the future
353
DBUG_ENTER("ha_archive::write_meta_file");
355
meta_buffer[0]= (uchar)ARCHIVE_CHECK_HEADER;
356
meta_buffer[1]= (uchar)ARCHIVE_VERSION;
357
int8store(meta_buffer + 2, (ulonglong)rows);
358
int8store(meta_buffer + 10, check_point);
359
*(meta_buffer + 18)= (uchar)dirty;
360
DBUG_PRINT("ha_archive::write_meta_file", ("Check %d", (uint)ARCHIVE_CHECK_HEADER));
361
DBUG_PRINT("ha_archive::write_meta_file", ("Version %d", (uint)ARCHIVE_VERSION));
362
DBUG_PRINT("ha_archive::write_meta_file", ("Rows %lu", (ulong)rows));
363
DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %lu", (ulong) check_point));
364
DBUG_PRINT("ha_archive::write_meta_file", ("Dirty %d", (uint)dirty));
366
VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0)));
367
if (my_write(meta_file, (byte *)meta_buffer, META_BUFFER_SIZE, 0) != META_BUFFER_SIZE)
370
my_sync(meta_file, MYF(MY_WME));
377
We create the shared memory space that we will use for the open table.
378
No matter what we try to get or create a share. This is so that a repair
379
table operation can occur.
381
See ha_example.cc for a longer description.
383
ARCHIVE_SHARE *ha_archive::get_share(const char *table_name,
384
TABLE *table, int *rc)
386
ARCHIVE_SHARE *share;
387
char meta_file_name[FN_REFLEN];
390
DBUG_ENTER("ha_archive::get_share");
392
pthread_mutex_lock(&archive_mutex);
393
length=(uint) strlen(table_name);
395
if (!(share=(ARCHIVE_SHARE*) hash_search(&archive_open_tables,
399
if (!my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
400
&share, sizeof(*share),
404
pthread_mutex_unlock(&archive_mutex);
405
*rc= HA_ERR_OUT_OF_MEM;
410
share->table_name_length= length;
411
share->table_name= tmp_name;
412
share->crashed= FALSE;
413
share->archive_write_open= FALSE;
414
fn_format(share->data_file_name,table_name,"",ARZ,MY_REPLACE_EXT|MY_UNPACK_FILENAME);
415
fn_format(meta_file_name,table_name,"",ARM,MY_REPLACE_EXT|MY_UNPACK_FILENAME);
416
strmov(share->table_name,table_name);
418
We will use this lock for rows.
420
VOID(pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST));
421
if ((share->meta_file= my_open(meta_file_name, O_RDWR, MYF(0))) == -1)
422
share->crashed= TRUE;
425
After we read, we set the file to dirty. When we close, we will do the
426
opposite. If the meta file will not open we assume it is crashed and
427
leave it up to the user to fix.
429
if (read_meta_file(share->meta_file, &share->rows_recorded))
430
share->crashed= TRUE;
432
VOID(my_hash_insert(&archive_open_tables, (byte*) share));
433
thr_lock_init(&share->lock);
436
DBUG_PRINT("info", ("archive table %.*s has %d open handles now",
437
share->table_name_length, share->table_name,
440
*rc= HA_ERR_CRASHED_ON_USAGE;
441
pthread_mutex_unlock(&archive_mutex);
449
See ha_example.cc for a description.
451
int ha_archive::free_share(ARCHIVE_SHARE *share)
454
DBUG_ENTER("ha_archive::free_share");
455
DBUG_PRINT("info", ("archive table %.*s has %d open handles on entrance",
456
share->table_name_length, share->table_name,
459
pthread_mutex_lock(&archive_mutex);
460
if (!--share->use_count)
462
hash_delete(&archive_open_tables, (byte*) share);
463
thr_lock_delete(&share->lock);
464
VOID(pthread_mutex_destroy(&share->mutex));
466
(void)write_meta_file(share->meta_file, share->rows_recorded, TRUE);
468
(void)write_meta_file(share->meta_file, share->rows_recorded, FALSE);
469
if (share->archive_write_open)
470
if (gzclose(share->archive_write) == Z_ERRNO)
472
if (my_close(share->meta_file, MYF(0)))
474
my_free((gptr) share, MYF(0));
476
pthread_mutex_unlock(&archive_mutex);
481
int ha_archive::init_archive_writer()
483
DBUG_ENTER("ha_archive::init_archive_writer");
484
(void)write_meta_file(share->meta_file, share->rows_recorded, TRUE);
487
It is expensive to open and close the data files and since you can't have
488
a gzip file that can be both read and written we keep a writer open
489
that is shared amoung all open tables.
491
if ((share->archive_write= gzopen(share->data_file_name, "ab")) == NULL)
493
share->crashed= TRUE;
496
share->archive_write_open= TRUE;
497
info(HA_STATUS_TIME);
498
share->approx_file_size= (ulong) data_file_length;
504
We just implement one additional file extension.
506
static const char *ha_archive_exts[] = {
512
const char **ha_archive::bas_ext() const
514
return ha_archive_exts;
519
When opening a file we:
520
Create/get our shared structure.
522
We open the file we will read from.
524
int ha_archive::open(const char *name, int mode, uint open_options)
527
DBUG_ENTER("ha_archive::open");
529
DBUG_PRINT("info", ("archive table was opened for crash %s",
530
(open_options & HA_OPEN_FOR_REPAIR) ? "yes" : "no"));
531
share= get_share(name, table, &rc);
533
if (rc == HA_ERR_CRASHED_ON_USAGE && !(open_options & HA_OPEN_FOR_REPAIR))
538
else if (rc == HA_ERR_OUT_OF_MEM)
543
thr_lock_data_init(&share->lock,&lock,NULL);
545
if ((archive= gzopen(share->data_file_name, "rb")) == NULL)
547
if (errno == EROFS || errno == EACCES)
548
DBUG_RETURN(my_errno= errno);
549
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
552
DBUG_PRINT("info", ("archive table was crashed %s",
553
rc == HA_ERR_CRASHED_ON_USAGE ? "yes" : "no"));
554
if (rc == HA_ERR_CRASHED_ON_USAGE && open_options & HA_OPEN_FOR_REPAIR)
571
We first close this storage engines file handle to the archive and
572
then remove our reference count to the table (and possibly free it
580
int ha_archive::close(void)
583
DBUG_ENTER("ha_archive::close");
585
/* First close stream */
586
if (gzclose(archive) == Z_ERRNO)
588
/* then also close share */
589
rc|= free_share(share);
596
We create our data file here. The format is pretty simple.
597
You can read about the format of the data file above.
598
Unlike other storage engines we do not "pack" our data. Since we
599
are about to do a general compression, packing would just be a waste of
600
CPU time. If the table has blobs they are written after the row in the order
604
int ha_archive::create(const char *name, TABLE *table_arg,
605
HA_CREATE_INFO *create_info)
607
File create_file; // We use to create the datafile and the metafile
608
char name_buff[FN_REFLEN];
610
DBUG_ENTER("ha_archive::create");
612
if ((create_file= my_create(fn_format(name_buff,name,"",ARM,
613
MY_REPLACE_EXT|MY_UNPACK_FILENAME),0,
614
O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
619
write_meta_file(create_file, 0, FALSE);
620
my_close(create_file,MYF(0));
623
We reuse name_buff since it is available.
625
if ((create_file= my_create(fn_format(name_buff,name,"",ARZ,
626
MY_REPLACE_EXT|MY_UNPACK_FILENAME),0,
627
O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
632
if ((archive= gzdopen(dup(create_file), "wb")) == NULL)
637
if (write_data_header(archive))
643
if (gzclose(archive))
649
my_close(create_file, MYF(0));
654
/* We already have an error, so ignore results of gzclose. */
655
(void)gzclose(archive);
657
my_close(create_file, MYF(0));
660
/* Return error number, if we got one */
661
DBUG_RETURN(error ? error : -1);
665
This is where the actual row is written out.
667
int ha_archive::real_write_row(byte *buf, gzFile writer)
669
z_off_t written, total_row_length;
671
DBUG_ENTER("ha_archive::real_write_row");
672
total_row_length= table->s->reclength;
673
for (ptr= table->s->blob_field, end= ptr + table->s->blob_fields;
675
total_row_length+= ((Field_blob*) table->field[*ptr])->get_length();
676
if (share->approx_file_size > max_zfile_size - total_row_length)
678
info(HA_STATUS_TIME);
679
share->approx_file_size= (ulong) data_file_length;
680
if (share->approx_file_size > max_zfile_size - total_row_length)
681
DBUG_RETURN(HA_ERR_RECORD_FILE_FULL);
683
share->approx_file_size+= total_row_length;
684
written= gzwrite(writer, buf, table->s->reclength);
685
DBUG_PRINT("ha_archive::real_write_row", ("Wrote %d bytes expected %lu", (int) written,
686
table->s->reclength));
687
if (!delayed_insert || !bulk_insert)
690
if (written != (z_off_t)table->s->reclength)
691
DBUG_RETURN(errno ? errno : -1);
693
We should probably mark the table as damagaged if the record is written
696
for (ptr= table->s->blob_field, end= ptr + table->s->blob_fields ;
701
uint32 size= ((Field_blob*) table->field[*ptr])->get_length();
705
((Field_blob*) table->field[*ptr])->get_ptr(&data_ptr);
706
written= gzwrite(writer, data_ptr, (unsigned)size);
707
if (written != (z_off_t)size)
708
DBUG_RETURN(errno ? errno : -1);
716
Look at ha_archive::open() for an explanation of the row format.
717
Here we just write out the row.
719
Wondering about start_bulk_insert()? We don't implement it for
720
archive since it optimizes for lots of writes. The only save
721
for implementing start_bulk_insert() is that we could skip
722
setting dirty to true each time.
724
int ha_archive::write_row(byte *buf)
727
DBUG_ENTER("ha_archive::write_row");
730
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
732
statistic_increment(table->in_use->status_var.ha_write_count, &LOCK_status);
733
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
734
table->timestamp_field->set_time();
735
pthread_mutex_lock(&share->mutex);
736
if (!share->archive_write_open)
737
if (init_archive_writer())
738
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
741
Varchar structures are constant in size but are not cleaned up request
742
to request. The following sets all unused space to null to improve
745
for (Field **field=table->field ; *field ; field++)
747
DBUG_PRINT("archive",("Pack is %d\n", (*field)->pack_length()));
748
DBUG_PRINT("archive",("MyPack is %d\n", (*field)->data_length((char*) buf + (*field)->offset())));
749
if ((*field)->real_type() == MYSQL_TYPE_VARCHAR)
751
uint actual_length= (*field)->data_length((char*) buf + (*field)->offset());
752
uint offset= (*field)->offset() + actual_length +
753
(actual_length > 255 ? 2 : 1);
754
DBUG_PRINT("archive",("Offset is %d -> %d\n", actual_length, offset));
756
if ((*field)->pack_length() + (*field)->offset() != offset)
757
bzero(buf + offset, (size_t)((*field)->pack_length() + (actual_length > 255 ? 2 : 1) - (*field)->data_length));
762
share->rows_recorded++;
763
rc= real_write_row(buf, share->archive_write);
764
pthread_mutex_unlock(&share->mutex);
770
All calls that need to scan the table start with this method. If we are told
771
that it is a table scan we rewind the file to the beginning, otherwise
772
we assume the position will be set.
775
int ha_archive::rnd_init(bool scan)
777
DBUG_ENTER("ha_archive::rnd_init");
780
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
782
/* We rewind the file so that we can read from the beginning if scan */
785
scan_rows= share->rows_recorded;
786
DBUG_PRINT("info", ("archive will retrieve %lu rows", (ulong) scan_rows));
790
If dirty, we lock, and then reset/flush the data.
791
I found that just calling gzflush() doesn't always work.
793
if (share->dirty == TRUE)
795
pthread_mutex_lock(&share->mutex);
796
if (share->dirty == TRUE)
798
DBUG_PRINT("info", ("archive flushing out rows for scan"));
799
gzflush(share->archive_write, Z_SYNC_FLUSH);
802
pthread_mutex_unlock(&share->mutex);
805
if (read_data_header(archive))
806
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
814
This is the method that is used to read a row. It assumes that the row is
815
positioned where you want it.
817
int ha_archive::get_row(gzFile file_to_read, byte *buf)
819
int read; // Bytes read, gzread() returns int
822
size_t total_blob_length= 0;
823
DBUG_ENTER("ha_archive::get_row");
825
read= gzread(file_to_read, buf, table->s->reclength);
826
DBUG_PRINT("ha_archive::get_row", ("Read %d bytes expected %lu", (int) read,
827
table->s->reclength));
829
if (read == Z_STREAM_ERROR)
830
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
832
/* If we read nothing we are at the end of the file */
834
DBUG_RETURN(HA_ERR_END_OF_FILE);
837
If the record is the wrong size, the file is probably damaged, unless
838
we are dealing with a delayed insert or a bulk insert.
840
if ((ulong) read != table->s->reclength)
841
DBUG_RETURN(HA_ERR_END_OF_FILE);
843
/* Calculate blob length, we use this for our buffer */
844
for (ptr= table->s->blob_field, end=ptr + table->s->blob_fields ;
847
total_blob_length += ((Field_blob*) table->field[*ptr])->get_length();
849
/* Adjust our row buffer if we need be */
850
buffer.alloc(total_blob_length);
851
last= (char *)buffer.ptr();
853
/* Loop through our blobs and read them */
854
for (ptr= table->s->blob_field, end=ptr + table->s->blob_fields ;
858
size_t size= ((Field_blob*) table->field[*ptr])->get_length();
861
read= gzread(file_to_read, last, size);
862
if ((size_t) read != size)
863
DBUG_RETURN(HA_ERR_END_OF_FILE);
864
((Field_blob*) table->field[*ptr])->set_ptr(size, last);
873
Called during ORDER BY. Its position is either from being called sequentially
874
or by having had ha_archive::rnd_pos() called before it is called.
877
int ha_archive::rnd_next(byte *buf)
880
DBUG_ENTER("ha_archive::rnd_next");
883
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
886
DBUG_RETURN(HA_ERR_END_OF_FILE);
889
statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
891
current_position= gztell(archive);
892
rc= get_row(archive, buf);
895
if (rc != HA_ERR_END_OF_FILE)
903
Thanks to the table flag HA_REC_NOT_IN_SEQ this will be called after
904
each call to ha_archive::rnd_next() if an ordering of the rows is
908
void ha_archive::position(const byte *record)
910
DBUG_ENTER("ha_archive::position");
911
my_store_ptr(ref, ref_length, current_position);
917
This is called after a table scan for each row if the results of the
918
scan need to be ordered. It will take *pos and use it to move the
919
cursor in the file so that the next row that is called is the
920
correctly ordered row.
923
int ha_archive::rnd_pos(byte * buf, byte *pos)
925
DBUG_ENTER("ha_archive::rnd_pos");
926
statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
928
current_position= (z_off_t)my_get_ptr(pos, ref_length);
929
(void)gzseek(archive, current_position, SEEK_SET);
931
DBUG_RETURN(get_row(archive, buf));
935
This method repairs the meta file. It does this by walking the datafile and
936
rewriting the meta file. Currently it does this by calling optimize with
939
int ha_archive::repair(THD* thd, HA_CHECK_OPT* check_opt)
941
DBUG_ENTER("ha_archive::repair");
942
check_opt->flags= T_EXTEND;
943
int rc= optimize(thd, check_opt);
946
DBUG_RETURN(HA_ERR_CRASHED_ON_REPAIR);
948
share->crashed= FALSE;
953
The table can become fragmented if data was inserted, read, and then
954
inserted again. What we do is open up the file and recompress it completely.
956
int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
958
DBUG_ENTER("ha_archive::optimize");
961
char writer_filename[FN_REFLEN];
963
/* Open up the writer if we haven't yet */
964
if (!share->archive_write_open)
965
init_archive_writer();
967
/* Flush any waiting data */
968
gzflush(share->archive_write, Z_SYNC_FLUSH);
970
/* Lets create a file to contain the new data */
971
fn_format(writer_filename, share->table_name, "", ARN,
972
MY_REPLACE_EXT|MY_UNPACK_FILENAME);
974
if ((writer= gzopen(writer_filename, "wb")) == NULL)
975
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
978
An extended rebuild is a lot more effort. We open up each row and re-record it.
979
Any dead rows are removed (aka rows that may have been partially recorded).
982
if (check_opt->flags == T_EXTEND)
987
First we create a buffer that we can use for reading rows, and can pass
990
if (!(buf= (byte*) my_malloc(table->s->reclength, MYF(MY_WME))))
992
rc= HA_ERR_OUT_OF_MEM;
997
Now we will rewind the archive file so that we are positioned at the
1000
rc= read_data_header(archive);
1003
Assuming now error from rewinding the archive file, we now write out the
1004
new header for out data file.
1007
rc= write_data_header(writer);
1010
On success of writing out the new header, we now fetch each row and
1011
insert it into the new archive file.
1015
share->rows_recorded= 0;
1016
while (!(rc= get_row(archive, buf)))
1018
real_write_row(buf, writer);
1019
share->rows_recorded++;
1022
DBUG_PRINT("info", ("recovered %lu archive rows",
1023
(ulong) share->rows_recorded));
1025
my_free((char*)buf, MYF(0));
1026
if (rc && rc != HA_ERR_END_OF_FILE)
1032
The quick method is to just read the data raw, and then compress it directly.
1034
int read; // Bytes read, gzread() returns int
1035
char block[IO_SIZE];
1036
if (gzrewind(archive) == -1)
1038
rc= HA_ERR_CRASHED_ON_USAGE;
1042
while ((read= gzread(archive, block, IO_SIZE)))
1043
gzwrite(writer, block, read);
1046
gzflush(writer, Z_SYNC_FLUSH);
1047
share->dirty= FALSE;
1048
gzclose(share->archive_write);
1049
share->archive_write= writer;
1051
my_rename(writer_filename,share->data_file_name,MYF(0));
1054
Now we need to reopen our read descriptor since it has changed.
1057
if ((archive= gzopen(share->data_file_name, "rb")) == NULL)
1059
rc= HA_ERR_CRASHED_ON_USAGE;
1073
Below is an example of how to setup row level locking.
1075
THR_LOCK_DATA **ha_archive::store_lock(THD *thd,
1077
enum thr_lock_type lock_type)
1079
if (lock_type == TL_WRITE_DELAYED)
1080
delayed_insert= TRUE;
1082
delayed_insert= FALSE;
1084
if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK)
1087
Here is where we get into the guts of a row level lock.
1089
If we are not doing a LOCK TABLE or DISCARD/IMPORT
1090
TABLESPACE, then allow multiple writers
1093
if ((lock_type >= TL_WRITE_CONCURRENT_INSERT &&
1094
lock_type <= TL_WRITE) && !thd->in_lock_tables
1095
&& !thd->tablespace_op)
1096
lock_type = TL_WRITE_ALLOW_WRITE;
1099
In queries of type INSERT INTO t1 SELECT ... FROM t2 ...
1100
MySQL would use the lock TL_READ_NO_INSERT on t2, and that
1101
would conflict with TL_WRITE_ALLOW_WRITE, blocking all inserts
1102
to t2. Convert the lock to a normal read lock to allow
1103
concurrent inserts to t2.
1106
if (lock_type == TL_READ_NO_INSERT && !thd->in_lock_tables)
1107
lock_type = TL_READ;
1109
lock.type=lock_type;
1119
Hints for optimizer, see ha_tina for more information
1121
int ha_archive::info(uint flag)
1123
DBUG_ENTER("ha_archive::info");
1125
This should be an accurate number now, though bulk and delayed inserts can
1126
cause the number to be inaccurate.
1128
records= share->rows_recorded;
1130
/* Costs quite a bit more to get all information */
1131
if (flag & HA_STATUS_TIME)
1133
MY_STAT file_stat; // Stat information for the data file
1135
VOID(my_stat(share->data_file_name, &file_stat, MYF(MY_WME)));
1137
mean_rec_length= table->s->reclength + buffer.alloced_length();
1138
data_file_length= file_stat.st_size;
1139
create_time= file_stat.st_ctime;
1140
update_time= file_stat.st_mtime;
1141
max_data_file_length= share->rows_recorded * mean_rec_length;
1144
index_file_length=0;
1151
This method tells us that a bulk insert operation is about to occur. We set
1152
a flag which will keep write_row from saying that its data is dirty. This in
1153
turn will keep selects from causing a sync to occur.
1154
Basically, yet another optimizations to keep compression working well.
1156
void ha_archive::start_bulk_insert(ha_rows rows)
1158
DBUG_ENTER("ha_archive::start_bulk_insert");
1159
if (!rows || rows >= ARCHIVE_MIN_ROWS_TO_USE_BULK_INSERT)
1166
Other side of start_bulk_insert, is end_bulk_insert. Here we turn off the bulk insert
1167
flag, and set the share dirty so that the next select will call sync for us.
1169
int ha_archive::end_bulk_insert()
1171
DBUG_ENTER("ha_archive::end_bulk_insert");
1178
We cancel a truncate command. The only way to delete an archive table is to drop it.
1179
This is done for security reasons. In a later version we will enable this by
1180
allowing the user to select a different row format.
1182
int ha_archive::delete_all_rows()
1184
DBUG_ENTER("ha_archive::delete_all_rows");
1185
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
1189
We just return state if asked.
1191
bool ha_archive::is_crashed() const
1193
DBUG_ENTER("ha_archive::is_crashed");
1194
DBUG_RETURN(share->crashed);
1198
Simple scan of the tables to make sure everything is ok.
1201
int ha_archive::check(THD* thd, HA_CHECK_OPT* check_opt)
1205
const char *old_proc_info=thd->proc_info;
1206
ha_rows count= share->rows_recorded;
1207
DBUG_ENTER("ha_archive::check");
1209
thd->proc_info= "Checking table";
1210
/* Flush any waiting data */
1211
gzflush(share->archive_write, Z_SYNC_FLUSH);
1214
First we create a buffer that we can use for reading rows, and can pass
1217
if (!(buf= (byte*) my_malloc(table->s->reclength, MYF(MY_WME))))
1218
rc= HA_ERR_OUT_OF_MEM;
1221
Now we will rewind the archive file so that we are positioned at the
1225
read_data_header(archive);
1228
while (!(rc= get_row(archive, buf)))
1231
my_free((char*)buf, MYF(0));
1233
thd->proc_info= old_proc_info;
1235
if ((rc && rc != HA_ERR_END_OF_FILE) || count)
1237
share->crashed= FALSE;
1238
DBUG_RETURN(HA_ADMIN_CORRUPT);
1242
DBUG_RETURN(HA_ADMIN_OK);
1247
Check and repair the table if needed.
1249
bool ha_archive::check_and_repair(THD *thd)
1251
HA_CHECK_OPT check_opt;
1252
DBUG_ENTER("ha_archive::check_and_repair");
1256
DBUG_RETURN(repair(thd, &check_opt));
1258
#endif /* HAVE_ARCHIVE_DB */