Commit ffd9a08e authored by unknown's avatar unknown

Fixed a few pieces around support for data directory.


mysql-test/r/archive.result:
  Adding test case for data directory support in create table.
mysql-test/t/archive.test:
  Added test for "data directory" support in archive.
sql/ha_archive.cc:
  Updated comments, added printable bits for support of "data directory"
sql/ha_archive.h:
  Added real_path to share (will come in handy in later code)
parent 682435de
...@@ -13809,4 +13809,11 @@ alter table t1 add unique key (i, v); ...@@ -13809,4 +13809,11 @@ alter table t1 add unique key (i, v);
select * from t1 where i between 2 and 4 and v in ('def','3r4f','lmn'); select * from t1 where i between 2 and 4 and v in ('def','3r4f','lmn');
i v i v
4 3r4f 4 3r4f
alter table t1 data directory="$MYSQLTEST_VARDIR/tmp";
select * from t1;
i v
1 def
2 abc
4 3r4f
5 lmn
drop table t1, t2, t4, t5; drop table t1, t2, t4, t5;
...@@ -1486,6 +1486,9 @@ select * from t1; ...@@ -1486,6 +1486,9 @@ select * from t1;
alter table t1 add unique key (i, v); alter table t1 add unique key (i, v);
select * from t1 where i between 2 and 4 and v in ('def','3r4f','lmn'); select * from t1 where i between 2 and 4 and v in ('def','3r4f','lmn');
alter table t1 data directory="$MYSQLTEST_VARDIR/tmp";
select * from t1;
# #
# Cleanup, test is over # Cleanup, test is over
# #
......
...@@ -63,8 +63,7 @@ ...@@ -63,8 +63,7 @@
pool. For MyISAM its a question of how much the file system caches the pool. For MyISAM its a question of how much the file system caches the
MyISAM file. With enough free memory MyISAM is faster. Its only when the OS MyISAM file. With enough free memory MyISAM is faster. Its only when the OS
doesn't have enough memory to cache entire table that archive turns out doesn't have enough memory to cache entire table that archive turns out
to be any faster. For writes it is always a bit slower then MyISAM. It has no to be any faster.
internal limits though for row length.
Examples between MyISAM (packed) and Archive. Examples between MyISAM (packed) and Archive.
...@@ -81,11 +80,8 @@ ...@@ -81,11 +80,8 @@
TODO: TODO:
Add bzip optional support. Add bzip optional support.
Allow users to set compression level. Allow users to set compression level.
Add truncate table command.
Implement versioning, should be easy. Implement versioning, should be easy.
Allow for errors, find a way to mark bad rows. Allow for errors, find a way to mark bad rows.
Talk to the azip guys, come up with a writable format so that updates are doable
without switching to a block method.
Add optional feature so that rows can be flushed at interval (which will cause less Add optional feature so that rows can be flushed at interval (which will cause less
compression but may speed up ordered searches). compression but may speed up ordered searches).
Checkpoint the meta file to allow for faster rebuilds. Checkpoint the meta file to allow for faster rebuilds.
...@@ -126,10 +122,12 @@ static HASH archive_open_tables; ...@@ -126,10 +122,12 @@ static HASH archive_open_tables;
#define ARN ".ARN" // Files used during an optimize call #define ARN ".ARN" // Files used during an optimize call
#define ARM ".ARM" // Meta file #define ARM ".ARM" // Meta file
/* /*
uchar + uchar + ulonglong + ulonglong + ulonglong + ulonglong + uchar uchar + uchar + ulonglong + ulonglong + ulonglong + ulonglong + FN_REFLEN
+ uchar
*/ */
#define META_BUFFER_SIZE sizeof(uchar) + sizeof(uchar) + sizeof(ulonglong) \ #define META_BUFFER_SIZE sizeof(uchar) + sizeof(uchar) + sizeof(ulonglong) \
+ sizeof(ulonglong) + sizeof(ulonglong) + sizeof(ulonglong) + sizeof(uchar) + sizeof(ulonglong) + sizeof(ulonglong) + sizeof(ulonglong) + FN_REFLEN \
+ sizeof(uchar)
/* /*
uchar + uchar uchar + uchar
...@@ -317,7 +315,8 @@ int ha_archive::write_data_header(azio_stream *file_to_write) ...@@ -317,7 +315,8 @@ int ha_archive::write_data_header(azio_stream *file_to_write)
*/ */
int ha_archive::read_meta_file(File meta_file, ha_rows *rows, int ha_archive::read_meta_file(File meta_file, ha_rows *rows,
ulonglong *auto_increment, ulonglong *auto_increment,
ulonglong *forced_flushes) ulonglong *forced_flushes,
char *real_path)
{ {
uchar meta_buffer[META_BUFFER_SIZE]; uchar meta_buffer[META_BUFFER_SIZE];
uchar *ptr= meta_buffer; uchar *ptr= meta_buffer;
...@@ -342,6 +341,8 @@ int ha_archive::read_meta_file(File meta_file, ha_rows *rows, ...@@ -342,6 +341,8 @@ int ha_archive::read_meta_file(File meta_file, ha_rows *rows,
ptr+= sizeof(ulonglong); // Move past auto_increment ptr+= sizeof(ulonglong); // Move past auto_increment
*forced_flushes= uint8korr(ptr); *forced_flushes= uint8korr(ptr);
ptr+= sizeof(ulonglong); // Move past forced_flush ptr+= sizeof(ulonglong); // Move past forced_flush
memmove(real_path, ptr, FN_REFLEN);
ptr+= FN_REFLEN; // Move past the possible location of the file
DBUG_PRINT("ha_archive::read_meta_file", ("Check %d", (uint)meta_buffer[0])); DBUG_PRINT("ha_archive::read_meta_file", ("Check %d", (uint)meta_buffer[0]));
DBUG_PRINT("ha_archive::read_meta_file", ("Version %d", (uint)meta_buffer[1])); DBUG_PRINT("ha_archive::read_meta_file", ("Version %d", (uint)meta_buffer[1]));
...@@ -349,6 +350,7 @@ int ha_archive::read_meta_file(File meta_file, ha_rows *rows, ...@@ -349,6 +350,7 @@ int ha_archive::read_meta_file(File meta_file, ha_rows *rows,
DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %llu", check_point)); DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %llu", check_point));
DBUG_PRINT("ha_archive::read_meta_file", ("Auto-Increment %llu", *auto_increment)); DBUG_PRINT("ha_archive::read_meta_file", ("Auto-Increment %llu", *auto_increment));
DBUG_PRINT("ha_archive::read_meta_file", ("Forced Flushes %llu", *forced_flushes)); DBUG_PRINT("ha_archive::read_meta_file", ("Forced Flushes %llu", *forced_flushes));
DBUG_PRINT("ha_archive::read_meta_file", ("Real Path %s", real_path));
DBUG_PRINT("ha_archive::read_meta_file", ("Dirty %d", (int)(*ptr))); DBUG_PRINT("ha_archive::read_meta_file", ("Dirty %d", (int)(*ptr)));
if ((meta_buffer[0] != (uchar)ARCHIVE_CHECK_HEADER) || if ((meta_buffer[0] != (uchar)ARCHIVE_CHECK_HEADER) ||
...@@ -368,6 +370,7 @@ int ha_archive::read_meta_file(File meta_file, ha_rows *rows, ...@@ -368,6 +370,7 @@ int ha_archive::read_meta_file(File meta_file, ha_rows *rows,
int ha_archive::write_meta_file(File meta_file, ha_rows rows, int ha_archive::write_meta_file(File meta_file, ha_rows rows,
ulonglong auto_increment, ulonglong auto_increment,
ulonglong forced_flushes, ulonglong forced_flushes,
char *real_path,
bool dirty) bool dirty)
{ {
uchar meta_buffer[META_BUFFER_SIZE]; uchar meta_buffer[META_BUFFER_SIZE];
...@@ -388,6 +391,12 @@ int ha_archive::write_meta_file(File meta_file, ha_rows rows, ...@@ -388,6 +391,12 @@ int ha_archive::write_meta_file(File meta_file, ha_rows rows,
ptr += sizeof(ulonglong); ptr += sizeof(ulonglong);
int8store(ptr, forced_flushes); int8store(ptr, forced_flushes);
ptr += sizeof(ulonglong); ptr += sizeof(ulonglong);
// No matter what, we pad with nulls
if (real_path)
strncpy((char *)ptr, real_path, FN_REFLEN);
else
bzero(ptr, FN_REFLEN);
ptr += FN_REFLEN;
*ptr= (uchar)dirty; *ptr= (uchar)dirty;
DBUG_PRINT("ha_archive::write_meta_file", ("Check %d", DBUG_PRINT("ha_archive::write_meta_file", ("Check %d",
(uint)ARCHIVE_CHECK_HEADER)); (uint)ARCHIVE_CHECK_HEADER));
...@@ -399,6 +408,8 @@ int ha_archive::write_meta_file(File meta_file, ha_rows rows, ...@@ -399,6 +408,8 @@ int ha_archive::write_meta_file(File meta_file, ha_rows rows,
auto_increment)); auto_increment));
DBUG_PRINT("ha_archive::write_meta_file", ("Forced Flushes %llu", DBUG_PRINT("ha_archive::write_meta_file", ("Forced Flushes %llu",
forced_flushes)); forced_flushes));
DBUG_PRINT("ha_archive::write_meta_file", ("Real path %s",
real_path));
DBUG_PRINT("ha_archive::write_meta_file", ("Dirty %d", (uint)dirty)); DBUG_PRINT("ha_archive::write_meta_file", ("Dirty %d", (uint)dirty));
VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0))); VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0)));
...@@ -448,8 +459,12 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, ...@@ -448,8 +459,12 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name,
share->table_name_length= length; share->table_name_length= length;
share->table_name= tmp_name; share->table_name= tmp_name;
share->crashed= FALSE; share->crashed= FALSE;
fn_format(share->data_file_name,table_name,"",ARZ,MY_REPLACE_EXT|MY_UNPACK_FILENAME); fn_format(share->data_file_name, table_name, "",
fn_format(meta_file_name,table_name,"",ARM,MY_REPLACE_EXT|MY_UNPACK_FILENAME); ARZ,MY_REPLACE_EXT|MY_UNPACK_FILENAME);
fn_format(meta_file_name, table_name, "", ARM,
MY_REPLACE_EXT|MY_UNPACK_FILENAME);
DBUG_PRINT("info", ("archive opening (1) up write at %s",
share->data_file_name));
strmov(share->table_name,table_name); strmov(share->table_name,table_name);
/* /*
We will use this lock for rows. We will use this lock for rows.
...@@ -457,6 +472,8 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, ...@@ -457,6 +472,8 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name,
VOID(pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST)); VOID(pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST));
if ((share->meta_file= my_open(meta_file_name, O_RDWR, MYF(0))) == -1) if ((share->meta_file= my_open(meta_file_name, O_RDWR, MYF(0))) == -1)
share->crashed= TRUE; share->crashed= TRUE;
DBUG_PRINT("info", ("archive opening (1) up write at %s",
share->data_file_name));
/* /*
After we read, we set the file to dirty. When we close, we will do the After we read, we set the file to dirty. When we close, we will do the
...@@ -465,13 +482,21 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, ...@@ -465,13 +482,21 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name,
*/ */
if (read_meta_file(share->meta_file, &share->rows_recorded, if (read_meta_file(share->meta_file, &share->rows_recorded,
&share->auto_increment_value, &share->auto_increment_value,
&share->forced_flushes)) &share->forced_flushes,
share->real_path))
share->crashed= TRUE; share->crashed= TRUE;
else else
(void)write_meta_file(share->meta_file, share->rows_recorded, (void)write_meta_file(share->meta_file, share->rows_recorded,
share->auto_increment_value, share->auto_increment_value,
share->forced_flushes, share->forced_flushes,
share->real_path,
TRUE); TRUE);
/*
Since we now possibly no real_path, we will use it instead if it exists.
*/
if (*share->real_path)
fn_format(share->data_file_name, share->real_path, "", ARZ,
MY_REPLACE_EXT|MY_UNPACK_FILENAME);
/* /*
It is expensive to open and close the data files and since you can't have It is expensive to open and close the data files and since you can't have
a gzip file that can be both read and written we keep a writer open a gzip file that can be both read and written we keep a writer open
...@@ -527,6 +552,7 @@ int ha_archive::free_share(ARCHIVE_SHARE *share) ...@@ -527,6 +552,7 @@ int ha_archive::free_share(ARCHIVE_SHARE *share)
(void)write_meta_file(share->meta_file, share->rows_recorded, (void)write_meta_file(share->meta_file, share->rows_recorded,
share->auto_increment_value, share->auto_increment_value,
share->forced_flushes, share->forced_flushes,
share->real_path,
share->crashed ? TRUE :FALSE); share->crashed ? TRUE :FALSE);
if (azclose(&(share->archive_write))) if (azclose(&(share->archive_write)))
rc= 1; rc= 1;
...@@ -566,7 +592,7 @@ int ha_archive::open(const char *name, int mode, uint open_options) ...@@ -566,7 +592,7 @@ int ha_archive::open(const char *name, int mode, uint open_options)
int rc= 0; int rc= 0;
DBUG_ENTER("ha_archive::open"); DBUG_ENTER("ha_archive::open");
DBUG_PRINT("info", ("archive table was opened for crash %s", DBUG_PRINT("info", ("archive table was opened for crash: %s",
(open_options & HA_OPEN_FOR_REPAIR) ? "yes" : "no")); (open_options & HA_OPEN_FOR_REPAIR) ? "yes" : "no"));
share= get_share(name, table, &rc); share= get_share(name, table, &rc);
...@@ -582,6 +608,7 @@ int ha_archive::open(const char *name, int mode, uint open_options) ...@@ -582,6 +608,7 @@ int ha_archive::open(const char *name, int mode, uint open_options)
thr_lock_data_init(&share->lock,&lock,NULL); thr_lock_data_init(&share->lock,&lock,NULL);
DBUG_PRINT("info", ("archive data_file_name %s", share->data_file_name));
if (!(azopen(&archive, share->data_file_name, O_RDONLY|O_BINARY))) if (!(azopen(&archive, share->data_file_name, O_RDONLY|O_BINARY)))
{ {
if (errno == EROFS || errno == EACCES) if (errno == EROFS || errno == EACCES)
...@@ -679,19 +706,41 @@ int ha_archive::create(const char *name, TABLE *table_arg, ...@@ -679,19 +706,41 @@ int ha_archive::create(const char *name, TABLE *table_arg,
} }
} }
write_meta_file(create_file, 0, auto_increment_value, 0, FALSE); write_meta_file(create_file, 0, auto_increment_value, 0,
(char *)create_info->data_file_name,
FALSE);
my_close(create_file,MYF(0)); my_close(create_file,MYF(0));
/* /*
We reuse name_buff since it is available. We reuse name_buff since it is available.
*/ */
if ((create_file= my_create(fn_format(name_buff,name,"",ARZ, if (create_info->data_file_name)
{
char linkname[FN_REFLEN];
DBUG_PRINT("info", ("archive will create stream file %s",
create_info->data_file_name));
fn_format(name_buff, create_info->data_file_name, "", ARZ,
MY_REPLACE_EXT|MY_UNPACK_FILENAME);
fn_format(linkname, name, "", ARZ,
MY_UNPACK_FILENAME | MY_APPEND_EXT);
if ((create_file= my_create_with_symlink(linkname, name_buff, 0,
O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
{
error= my_errno;
goto error;
}
}
else
{
if ((create_file= my_create(fn_format(name_buff, name,"", ARZ,
MY_REPLACE_EXT|MY_UNPACK_FILENAME),0, MY_REPLACE_EXT|MY_UNPACK_FILENAME),0,
O_RDWR | O_TRUNC,MYF(MY_WME))) < 0) O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
{ {
error= my_errno; error= my_errno;
goto error; goto error;
} }
}
if (!azdopen(&archive, create_file, O_WRONLY|O_BINARY)) if (!azdopen(&archive, create_file, O_WRONLY|O_BINARY))
{ {
error= errno; error= errno;
...@@ -1348,8 +1397,10 @@ void ha_archive::update_create_info(HA_CREATE_INFO *create_info) ...@@ -1348,8 +1397,10 @@ void ha_archive::update_create_info(HA_CREATE_INFO *create_info)
ha_archive::info(HA_STATUS_AUTO | HA_STATUS_CONST); ha_archive::info(HA_STATUS_AUTO | HA_STATUS_CONST);
if (!(create_info->used_fields & HA_CREATE_USED_AUTO)) if (!(create_info->used_fields & HA_CREATE_USED_AUTO))
{ {
create_info->auto_increment_value=auto_increment_value; create_info->auto_increment_value= auto_increment_value;
} }
if (*share->real_path)
create_info->data_file_name= share->real_path;
} }
......
...@@ -41,6 +41,7 @@ typedef struct st_archive_share { ...@@ -41,6 +41,7 @@ typedef struct st_archive_share {
ulonglong auto_increment_value; ulonglong auto_increment_value;
ulonglong forced_flushes; ulonglong forced_flushes;
ulonglong mean_rec_length; ulonglong mean_rec_length;
char real_path[FN_REFLEN];
} ARCHIVE_SHARE; } ARCHIVE_SHARE;
/* /*
...@@ -102,10 +103,12 @@ class ha_archive: public handler ...@@ -102,10 +103,12 @@ class ha_archive: public handler
int get_row(azio_stream *file_to_read, byte *buf); int get_row(azio_stream *file_to_read, byte *buf);
int read_meta_file(File meta_file, ha_rows *rows, int read_meta_file(File meta_file, ha_rows *rows,
ulonglong *auto_increment, ulonglong *auto_increment,
ulonglong *forced_flushes); ulonglong *forced_flushes,
char *real_path);
int write_meta_file(File meta_file, ha_rows rows, int write_meta_file(File meta_file, ha_rows rows,
ulonglong auto_increment, ulonglong auto_increment,
ulonglong forced_flushes, ulonglong forced_flushes,
char *real_path,
bool dirty); bool dirty);
ARCHIVE_SHARE *get_share(const char *table_name, TABLE *table, int *rc); ARCHIVE_SHARE *get_share(const char *table_name, TABLE *table, int *rc);
int free_share(ARCHIVE_SHARE *share); int free_share(ARCHIVE_SHARE *share);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment