Commit 7e32ee20 authored by unknown's avatar unknown

This patch adds a new field to the meta file for tracking flushes (this is...

This patch adds a new field to the meta file for tracking flushes (this is being kept to later do stats and determine how often the compression buffer is being foiled). This should keep things all nice an compatible between versions.
Also added a flush table test as well. Found one possible bug in OPTIMIZE TABLE which has never been reported, but I think it would be possible on a file system that ran out of disk.


mysql-test/r/archive.result:
  New results
mysql-test/t/archive.test:
  Added test for flush tables, which will also let me confirm DELAYED INSERT.
sql/ha_archive.cc:
  Adding new stat
sql/ha_archive.h:
  Just extending SHARE
parent b5a64ee1
This diff is collapsed.
......@@ -1344,9 +1344,11 @@ CHECK TABLE t2;
SELECT * FROM t2;
# Just test syntax, we will never know if the output is right or wrong
# Must be the last test
# We won't know exactly about what is going on internally,
# but we will see if the row makes it in!!
INSERT DELAYED INTO t2 VALUES (4,011403,37,'intercepted','audiology','tinily','');
FLUSH TABLE t2;
SELECT * FROM t2;
# Adding test for alter table
ALTER TABLE t2 DROP COLUMN fld6;
......
......@@ -126,10 +126,10 @@ static HASH archive_open_tables;
#define ARN ".ARN" // Files used during an optimize call
#define ARM ".ARM" // Meta file
/*
uchar + uchar + ulonglong + ulonglong + ulonglong + uchar
uchar + uchar + ulonglong + ulonglong + ulonglong + ulonglong + uchar
*/
#define META_BUFFER_SIZE sizeof(uchar) + sizeof(uchar) + sizeof(ulonglong) \
+ sizeof(ulonglong) + sizeof(ulonglong) + sizeof(uchar)
+ sizeof(ulonglong) + sizeof(ulonglong) + sizeof(ulonglong) + sizeof(uchar)
/*
uchar + uchar
......@@ -313,7 +313,8 @@ int ha_archive::write_data_header(azio_stream *file_to_write)
*rows will contain the current number of rows in the data file upon success.
*/
int ha_archive::read_meta_file(File meta_file, ha_rows *rows,
ulonglong *auto_increment)
ulonglong *auto_increment,
ulonglong *forced_flushes)
{
uchar meta_buffer[META_BUFFER_SIZE];
uchar *ptr= meta_buffer;
......@@ -336,12 +337,15 @@ int ha_archive::read_meta_file(File meta_file, ha_rows *rows,
ptr+= sizeof(ulonglong); // Move past check_point
*auto_increment= uint8korr(ptr);
ptr+= sizeof(ulonglong); // Move past auto_increment
*forced_flushes= uint8korr(ptr);
ptr+= sizeof(ulonglong); // Move past forced_flush
DBUG_PRINT("ha_archive::read_meta_file", ("Check %d", (uint)meta_buffer[0]));
DBUG_PRINT("ha_archive::read_meta_file", ("Version %d", (uint)meta_buffer[1]));
DBUG_PRINT("ha_archive::read_meta_file", ("Rows %llu", *rows));
DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %llu", check_point));
DBUG_PRINT("ha_archive::read_meta_file", ("Auto-Increment %llu", *auto_increment));
DBUG_PRINT("ha_archive::read_meta_file", ("Forced Flushes %llu", *forced_flushes));
DBUG_PRINT("ha_archive::read_meta_file", ("Dirty %d", (int)(*ptr)));
if ((meta_buffer[0] != (uchar)ARCHIVE_CHECK_HEADER) ||
......@@ -359,7 +363,9 @@ int ha_archive::read_meta_file(File meta_file, ha_rows *rows,
Upon ::open() we set to dirty, and upon ::close() we set to clean.
*/
int ha_archive::write_meta_file(File meta_file, ha_rows rows,
ulonglong auto_increment, bool dirty)
ulonglong auto_increment,
ulonglong forced_flushes,
bool dirty)
{
uchar meta_buffer[META_BUFFER_SIZE];
uchar *ptr= meta_buffer;
......@@ -377,6 +383,8 @@ int ha_archive::write_meta_file(File meta_file, ha_rows rows,
ptr += sizeof(ulonglong);
int8store(ptr, auto_increment);
ptr += sizeof(ulonglong);
int8store(ptr, forced_flushes);
ptr += sizeof(ulonglong);
*ptr= (uchar)dirty;
DBUG_PRINT("ha_archive::write_meta_file", ("Check %d",
(uint)ARCHIVE_CHECK_HEADER));
......@@ -386,6 +394,8 @@ int ha_archive::write_meta_file(File meta_file, ha_rows rows,
DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %llu", check_point));
DBUG_PRINT("ha_archive::write_meta_file", ("Auto Increment %llu",
auto_increment));
DBUG_PRINT("ha_archive::write_meta_file", ("Forced Flushes %llu",
forced_flushes));
DBUG_PRINT("ha_archive::write_meta_file", ("Dirty %d", (uint)dirty));
VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0)));
......@@ -451,11 +461,14 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name,
leave it up to the user to fix.
*/
if (read_meta_file(share->meta_file, &share->rows_recorded,
&share->auto_increment_value))
&share->auto_increment_value,
&share->forced_flushes))
share->crashed= TRUE;
else
(void)write_meta_file(share->meta_file, share->rows_recorded,
share->auto_increment_value, TRUE);
share->auto_increment_value,
share->forced_flushes,
TRUE);
/*
It is expensive to open and close the data files and since you can't have
a gzip file that can be both read and written we keep a writer open
......@@ -500,12 +513,18 @@ int ha_archive::free_share(ARCHIVE_SHARE *share)
hash_delete(&archive_open_tables, (byte*) share);
thr_lock_delete(&share->lock);
VOID(pthread_mutex_destroy(&share->mutex));
if (share->crashed)
(void)write_meta_file(share->meta_file, share->rows_recorded,
share->auto_increment_value, TRUE);
else
(void)write_meta_file(share->meta_file, share->rows_recorded,
share->auto_increment_value, FALSE);
/*
We need to make sure we don't reset the crashed state.
If we open a crashed file, wee need to close it as crashed unless
it has been repaired.
Since we will close the data down after this, we go on and count
the flush on close;
*/
share->forced_flushes++;
(void)write_meta_file(share->meta_file, share->rows_recorded,
share->auto_increment_value,
share->forced_flushes,
share->crashed ? TRUE :FALSE);
if (azclose(&(share->archive_write)))
rc= 1;
if (my_close(share->meta_file, MYF(0)))
......@@ -657,7 +676,7 @@ int ha_archive::create(const char *name, TABLE *table_arg,
}
}
write_meta_file(create_file, 0, auto_increment_value, FALSE);
write_meta_file(create_file, 0, auto_increment_value, 0, FALSE);
my_close(create_file,MYF(0));
/*
......@@ -800,6 +819,7 @@ int ha_archive::write_row(byte *buf)
data
*/
azflush(&(share->archive_write), Z_SYNC_FLUSH);
share->forced_flushes++;
/*
Set the position of the local read thread to the beginning postion.
*/
......@@ -897,6 +917,7 @@ int ha_archive::index_read_idx(byte *buf, uint index, const byte *key,
*/
pthread_mutex_lock(&share->mutex);
azflush(&(share->archive_write), Z_SYNC_FLUSH);
share->forced_flushes++;
pthread_mutex_unlock(&share->mutex);
/*
......@@ -974,6 +995,7 @@ int ha_archive::rnd_init(bool scan)
{
DBUG_PRINT("info", ("archive flushing out rows for scan"));
azflush(&(share->archive_write), Z_SYNC_FLUSH);
share->forced_flushes++;
share->dirty= FALSE;
}
pthread_mutex_unlock(&share->mutex);
......@@ -1149,6 +1171,7 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
/* Flush any waiting data */
azflush(&(share->archive_write), Z_SYNC_FLUSH);
share->forced_flushes++;
/* Lets create a file to contain the new data */
fn_format(writer_filename, share->table_name, "", ARN,
......@@ -1233,13 +1256,15 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
goto error;
}
while ((read= azread(&archive, block, IO_SIZE)))
while ((read= azread(&archive, block, IO_SIZE)) > 0)
azwrite(&writer, block, read);
}
azclose(&writer);
share->dirty= FALSE;
share->forced_flushes= 0;
azclose(&(share->archive_write));
DBUG_PRINT("info", ("Reopening archive data file"));
if (!(azopen(&(share->archive_write), share->data_file_name,
O_WRONLY|O_APPEND|O_BINARY)))
{
......@@ -1421,6 +1446,7 @@ int ha_archive::check(THD* thd, HA_CHECK_OPT* check_opt)
thd->proc_info= "Checking table";
/* Flush any waiting data */
azflush(&(share->archive_write), Z_SYNC_FLUSH);
share->forced_flushes++;
/*
First we create a buffer that we can use for reading rows, and can pass
......
......@@ -39,6 +39,8 @@ typedef struct st_archive_share {
bool crashed; /* Meta file is crashed */
ha_rows rows_recorded; /* Number of rows in tables */
ulonglong auto_increment_value;
ulonglong forced_flushes;
ulonglong mean_rec_length;
} ARCHIVE_SHARE;
/*
......@@ -98,9 +100,13 @@ class ha_archive: public handler
int rnd_next(byte *buf);
int rnd_pos(byte * buf, byte *pos);
int get_row(azio_stream *file_to_read, byte *buf);
int read_meta_file(File meta_file, ha_rows *rows, ulonglong *auto_increment);
int read_meta_file(File meta_file, ha_rows *rows,
ulonglong *auto_increment,
ulonglong *forced_flushes);
int write_meta_file(File meta_file, ha_rows rows,
ulonglong auto_increment, bool dirty);
ulonglong auto_increment,
ulonglong forced_flushes,
bool dirty);
ARCHIVE_SHARE *get_share(const char *table_name, TABLE *table, int *rc);
int free_share(ARCHIVE_SHARE *share);
bool auto_repair() const { return 1; } // For the moment we just do this
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment