Commit 50dd013c authored by brian@zim.(none)'s avatar brian@zim.(none)

Merge baker@bk-internal.mysql.com:/home/bk/mysql-5.1-new

into  zim.(none):/home/brian/mysql/archive-5.1
parents 5088f5b3 de44001a
...@@ -4,13 +4,13 @@ Period smallint(4) unsigned zerofill DEFAULT '0000' NOT NULL, ...@@ -4,13 +4,13 @@ Period smallint(4) unsigned zerofill DEFAULT '0000' NOT NULL,
Varor_period smallint(4) unsigned DEFAULT '0' NOT NULL Varor_period smallint(4) unsigned DEFAULT '0' NOT NULL
) ENGINE=archive; ) ENGINE=archive;
INSERT INTO t1 VALUES (9410,9412); INSERT INTO t1 VALUES (9410,9412);
select period from t1; select period FROM t1;
period period
9410 9410
select * from t1; select * FROM t1;
Period Varor_period Period Varor_period
9410 9412 9410 9412
select t1.* from t1; select t1.* FROM t1;
Period Varor_period Period Varor_period
9410 9412 9410 9412
CREATE TABLE t2 ( CREATE TABLE t2 (
...@@ -22,13 +22,13 @@ fld4 char(35) DEFAULT '' NOT NULL, ...@@ -22,13 +22,13 @@ fld4 char(35) DEFAULT '' NOT NULL,
fld5 char(35) DEFAULT '' NOT NULL, fld5 char(35) DEFAULT '' NOT NULL,
fld6 char(4) DEFAULT '' NOT NULL fld6 char(4) DEFAULT '' NOT NULL
) ENGINE=archive; ) ENGINE=archive;
select t2.fld3 from t2 where companynr = 58 and fld3 like "%imaginable%"; select t2.fld3 FROM t2 where companynr = 58 and fld3 like "%imaginable%";
fld3 fld3
imaginable imaginable
select fld3 from t2 where fld3 like "%cultivation" ; select fld3 FROM t2 where fld3 like "%cultivation" ;
fld3 fld3
cultivation cultivation
select t2.fld3,companynr from t2 where companynr = 57+1 order by fld3; select t2.fld3,companynr FROM t2 where companynr = 57+1 order by fld3;
fld3 companynr fld3 companynr
concoct 58 concoct 58
druggists 58 druggists 58
...@@ -53,7 +53,7 @@ synergy 58 ...@@ -53,7 +53,7 @@ synergy 58
thanking 58 thanking 58
tying 58 tying 58
unlocks 58 unlocks 58
select fld3,companynr from t2 where companynr = 58 order by fld3; select fld3,companynr FROM t2 where companynr = 58 order by fld3;
fld3 companynr fld3 companynr
concoct 58 concoct 58
druggists 58 druggists 58
...@@ -78,7 +78,7 @@ synergy 58 ...@@ -78,7 +78,7 @@ synergy 58
thanking 58 thanking 58
tying 58 tying 58
unlocks 58 unlocks 58
select fld3 from t2 order by fld3 desc limit 10; select fld3 FROM t2 order by fld3 desc limit 10;
fld3 fld3
youthfulness youthfulness
yelped yelped
...@@ -90,49 +90,49 @@ Winsett ...@@ -90,49 +90,49 @@ Winsett
Willy Willy
willed willed
wildcats wildcats
select fld3 from t2 order by fld3 desc limit 5; select fld3 FROM t2 order by fld3 desc limit 5;
fld3 fld3
youthfulness youthfulness
yelped yelped
Wotan Wotan
workers workers
Witt Witt
select fld3 from t2 order by fld3 desc limit 5,5; select fld3 FROM t2 order by fld3 desc limit 5,5;
fld3 fld3
witchcraft witchcraft
Winsett Winsett
Willy Willy
willed willed
wildcats wildcats
select t2.fld3 from t2 where fld3 = 'honeysuckle'; select t2.fld3 FROM t2 where fld3 = 'honeysuckle';
fld3 fld3
honeysuckle honeysuckle
select t2.fld3 from t2 where fld3 LIKE 'honeysuckl_'; select t2.fld3 FROM t2 where fld3 LIKE 'honeysuckl_';
fld3 fld3
honeysuckle honeysuckle
select t2.fld3 from t2 where fld3 LIKE 'hon_ysuckl_'; select t2.fld3 FROM t2 where fld3 LIKE 'hon_ysuckl_';
fld3 fld3
honeysuckle honeysuckle
select t2.fld3 from t2 where fld3 LIKE 'honeysuckle%'; select t2.fld3 FROM t2 where fld3 LIKE 'honeysuckle%';
fld3 fld3
honeysuckle honeysuckle
select t2.fld3 from t2 where fld3 LIKE 'h%le'; select t2.fld3 FROM t2 where fld3 LIKE 'h%le';
fld3 fld3
honeysuckle honeysuckle
select t2.fld3 from t2 where fld3 LIKE 'honeysuckle_'; select t2.fld3 FROM t2 where fld3 LIKE 'honeysuckle_';
fld3 fld3
select t2.fld3 from t2 where fld3 LIKE 'don_t_find_me_please%'; select t2.fld3 FROM t2 where fld3 LIKE 'don_t_find_me_please%';
fld3 fld3
select t2.fld3 from t2 where fld3 >= 'honeysuckle' and fld3 <= 'honoring' order by fld3; select t2.fld3 FROM t2 where fld3 >= 'honeysuckle' and fld3 <= 'honoring' order by fld3;
fld3 fld3
honeysuckle honeysuckle
honoring honoring
select fld1,fld3 from t2 where fld3="Colombo" or fld3 = "nondecreasing" order by fld3; select fld1,fld3 FROM t2 where fld3="Colombo" or fld3 = "nondecreasing" order by fld3;
fld1 fld3 fld1 fld3
148504 Colombo 148504 Colombo
068305 Colombo 068305 Colombo
000000 nondecreasing 000000 nondecreasing
select fld1,fld3 from t2 where companynr = 37 and fld3 like 'f%'; select fld1,fld3 FROM t2 where companynr = 37 and fld3 like 'f%';
fld1 fld3 fld1 fld3
012001 flanking 012001 flanking
013602 foldout 013602 foldout
...@@ -165,37 +165,37 @@ fld1 fld3 ...@@ -165,37 +165,37 @@ fld1 fld3
232102 forgivably 232102 forgivably
238007 filial 238007 filial
238008 fixedly 238008 fixedly
select fld3 from t2 where fld3 like "L%" and fld3 = "ok"; select fld3 FROM t2 where fld3 like "L%" and fld3 = "ok";
fld3 fld3
select fld3 from t2 where (fld3 like "C%" and fld3 = "Chantilly"); select fld3 FROM t2 where (fld3 like "C%" and fld3 = "Chantilly");
fld3 fld3
Chantilly Chantilly
select fld1,fld3 from t2 where fld1 like "25050%"; select fld1,fld3 FROM t2 where fld1 like "25050%";
fld1 fld3 fld1 fld3
250501 poisoning 250501 poisoning
250502 Iraqis 250502 Iraqis
250503 heaving 250503 heaving
250504 population 250504 population
250505 bomb 250505 bomb
select fld1,fld3 from t2 where fld1 like "25050_"; select fld1,fld3 FROM t2 where fld1 like "25050_";
fld1 fld3 fld1 fld3
250501 poisoning 250501 poisoning
250502 Iraqis 250502 Iraqis
250503 heaving 250503 heaving
250504 population 250504 population
250505 bomb 250505 bomb
create table t3 engine=archive select * from t2; create table t3 engine=archive select * FROM t2;
select * from t3 where fld3='bonfire'; select * FROM t3 where fld3='bonfire';
auto fld1 companynr fld3 fld4 fld5 fld6 auto fld1 companynr fld3 fld4 fld5 fld6
1191 068504 00 bonfire corresponds positively 1191 068504 00 bonfire corresponds positively
select count(*) from t3; select count(*) FROM t3;
count(*) count(*)
1199 1199
rename table t3 to t4; rename table t3 to t4;
select * from t4 where fld3='bonfire'; select * FROM t4 where fld3='bonfire';
auto fld1 companynr fld3 fld4 fld5 fld6 auto fld1 companynr fld3 fld4 fld5 fld6
1191 068504 00 bonfire corresponds positively 1191 068504 00 bonfire corresponds positively
select count(*) from t4; select count(*) FROM t4;
count(*) count(*)
1199 1199
INSERT INTO t2 VALUES (1,000001,00,'Omaha','teethe','neat',''); INSERT INTO t2 VALUES (1,000001,00,'Omaha','teethe','neat','');
...@@ -11130,7 +11130,7 @@ t2 CREATE TABLE `t2` ( ...@@ -11130,7 +11130,7 @@ t2 CREATE TABLE `t2` (
`fld4` char(35) NOT NULL default '', `fld4` char(35) NOT NULL default '',
`fld5` char(35) NOT NULL default '' `fld5` char(35) NOT NULL default ''
) ENGINE=ARCHIVE DEFAULT CHARSET=latin1 ) ENGINE=ARCHIVE DEFAULT CHARSET=latin1
SELECT * from t2; SELECT * FROM t2;
auto fld1 companynr fld3 fld4 fld5 auto fld1 companynr fld3 fld4 fld5
1 000001 00 Omaha teethe neat 1 000001 00 Omaha teethe neat
2 011401 37 breaking dreaded Steinberg 2 011401 37 breaking dreaded Steinberg
...@@ -12346,4 +12346,63 @@ auto fld1 companynr fld3 fld4 fld5 ...@@ -12346,4 +12346,63 @@ auto fld1 companynr fld3 fld4 fld5
3 011402 37 Romans scholastics jarring 3 011402 37 Romans scholastics jarring
4 011403 37 intercepted audiology tinily 4 011403 37 intercepted audiology tinily
4 011403 37 intercepted audiology tinily 4 011403 37 intercepted audiology tinily
drop table t1, t2, t4; CREATE TABLE `t5` (
`a` int(11) NOT NULL auto_increment,
b char(12),
PRIMARY KEY (`a`)
) ENGINE=ARCHIVE DEFAULT CHARSET=latin1;
INSERT INTO t5 VALUES (0, "foo");
INSERT INTO t5 VALUES (0, "foo");
INSERT INTO t5 VALUES (0, "foo");
INSERT INTO t5 VALUES (0, "foo");
INSERT INTO t5 VALUES (0, "foo");
INSERT INTO t5 VALUES (32, "foo");
INSERT INTO t5 VALUES (23, "foo");
INSERT INTO t5 VALUES (0, "foo");
INSERT INTO t5 VALUES (0, "foo");
INSERT INTO t5 VALUES (3, "foo");
ERROR 23000: Can't write; duplicate key in table 't5'
INSERT INTO t5 VALUES (0, "foo");
SELECT * FROM t5;
a b
1 foo
2 foo
3 foo
4 foo
5 foo
32 foo
23 foo
33 foo
34 foo
35 foo
DROP TABLE t5;
CREATE TABLE `t5` (
`a` int(11) NOT NULL auto_increment,
b char(12),
KEY (`a`)
) ENGINE=ARCHIVE DEFAULT CHARSET=latin1;
INSERT INTO t5 VALUES (0, "foo");
INSERT INTO t5 VALUES (0, "foo");
INSERT INTO t5 VALUES (0, "foo");
INSERT INTO t5 VALUES (0, "foo");
INSERT INTO t5 VALUES (0, "foo");
INSERT INTO t5 VALUES (32, "foo");
INSERT INTO t5 VALUES (23, "foo");
INSERT INTO t5 VALUES (0, "foo");
INSERT INTO t5 VALUES (0, "foo");
INSERT INTO t5 VALUES (3, "foo");
INSERT INTO t5 VALUES (0, "foo");
SELECT * FROM t5;
a b
1 foo
2 foo
3 foo
4 foo
5 foo
32 foo
23 foo
33 foo
34 foo
3 foo
35 foo
drop table t1, t2, t4, t5;
# #
# Simple test for archive example # Simple test for archive example
# Taken from the select test # Taken FROM the select test
# #
-- source include/have_archive.inc -- source include/have_archive.inc
...@@ -15,9 +15,9 @@ CREATE TABLE t1 ( ...@@ -15,9 +15,9 @@ CREATE TABLE t1 (
INSERT INTO t1 VALUES (9410,9412); INSERT INTO t1 VALUES (9410,9412);
select period from t1; select period FROM t1;
select * from t1; select * FROM t1;
select t1.* from t1; select t1.* FROM t1;
# #
# Create test table # Create test table
...@@ -1243,64 +1243,64 @@ INSERT INTO t2 VALUES (1193,000000,00,'nondecreasing','implant','thrillingly','' ...@@ -1243,64 +1243,64 @@ INSERT INTO t2 VALUES (1193,000000,00,'nondecreasing','implant','thrillingly',''
# Search with a key # Search with a key
# #
select t2.fld3 from t2 where companynr = 58 and fld3 like "%imaginable%"; select t2.fld3 FROM t2 where companynr = 58 and fld3 like "%imaginable%";
select fld3 from t2 where fld3 like "%cultivation" ; select fld3 FROM t2 where fld3 like "%cultivation" ;
# #
# Search with a key using sorting and limit the same time # Search with a key using sorting and limit the same time
# #
select t2.fld3,companynr from t2 where companynr = 57+1 order by fld3; select t2.fld3,companynr FROM t2 where companynr = 57+1 order by fld3;
select fld3,companynr from t2 where companynr = 58 order by fld3; select fld3,companynr FROM t2 where companynr = 58 order by fld3;
select fld3 from t2 order by fld3 desc limit 10; select fld3 FROM t2 order by fld3 desc limit 10;
select fld3 from t2 order by fld3 desc limit 5; select fld3 FROM t2 order by fld3 desc limit 5;
select fld3 from t2 order by fld3 desc limit 5,5; select fld3 FROM t2 order by fld3 desc limit 5,5;
# #
# Search with a key having a constant with each unique key. # Search with a key having a constant with each unique key.
# The table is read directly with read-next on fld3 # The table is read directly with read-next on fld3
# #
select t2.fld3 from t2 where fld3 = 'honeysuckle'; select t2.fld3 FROM t2 where fld3 = 'honeysuckle';
select t2.fld3 from t2 where fld3 LIKE 'honeysuckl_'; select t2.fld3 FROM t2 where fld3 LIKE 'honeysuckl_';
select t2.fld3 from t2 where fld3 LIKE 'hon_ysuckl_'; select t2.fld3 FROM t2 where fld3 LIKE 'hon_ysuckl_';
select t2.fld3 from t2 where fld3 LIKE 'honeysuckle%'; select t2.fld3 FROM t2 where fld3 LIKE 'honeysuckle%';
select t2.fld3 from t2 where fld3 LIKE 'h%le'; select t2.fld3 FROM t2 where fld3 LIKE 'h%le';
select t2.fld3 from t2 where fld3 LIKE 'honeysuckle_'; select t2.fld3 FROM t2 where fld3 LIKE 'honeysuckle_';
select t2.fld3 from t2 where fld3 LIKE 'don_t_find_me_please%'; select t2.fld3 FROM t2 where fld3 LIKE 'don_t_find_me_please%';
# #
# Test sorting with a used key (there is no need for sorting) # Test sorting with a used key (there is no need for sorting)
# #
select t2.fld3 from t2 where fld3 >= 'honeysuckle' and fld3 <= 'honoring' order by fld3; select t2.fld3 FROM t2 where fld3 >= 'honeysuckle' and fld3 <= 'honoring' order by fld3;
select fld1,fld3 from t2 where fld3="Colombo" or fld3 = "nondecreasing" order by fld3; select fld1,fld3 FROM t2 where fld3="Colombo" or fld3 = "nondecreasing" order by fld3;
# #
# Search with a key with LIKE constant # Search with a key with LIKE constant
# If the like starts with a certain letter key will be used. # If the like starts with a certain letter key will be used.
# #
select fld1,fld3 from t2 where companynr = 37 and fld3 like 'f%'; select fld1,fld3 FROM t2 where companynr = 37 and fld3 like 'f%';
select fld3 from t2 where fld3 like "L%" and fld3 = "ok"; select fld3 FROM t2 where fld3 like "L%" and fld3 = "ok";
select fld3 from t2 where (fld3 like "C%" and fld3 = "Chantilly"); select fld3 FROM t2 where (fld3 like "C%" and fld3 = "Chantilly");
select fld1,fld3 from t2 where fld1 like "25050%"; select fld1,fld3 FROM t2 where fld1 like "25050%";
select fld1,fld3 from t2 where fld1 like "25050_"; select fld1,fld3 FROM t2 where fld1 like "25050_";
# #
# Test rename of table # Test rename of table
# #
create table t3 engine=archive select * from t2; create table t3 engine=archive select * FROM t2;
select * from t3 where fld3='bonfire'; select * FROM t3 where fld3='bonfire';
select count(*) from t3; select count(*) FROM t3;
# Clean up path in error message # Clean up path in error message
--replace_result $MYSQL_TEST_DIR . /var/master-data/ / --replace_result $MYSQL_TEST_DIR . /var/master-data/ /
rename table t3 to t4; rename table t3 to t4;
select * from t4 where fld3='bonfire'; select * FROM t4 where fld3='bonfire';
select count(*) from t4; select count(*) FROM t4;
# End of 4.1 tests # End of 4.1 tests
...@@ -1351,10 +1351,58 @@ INSERT DELAYED INTO t2 VALUES (4,011403,37,'intercepted','audiology','tinily','' ...@@ -1351,10 +1351,58 @@ INSERT DELAYED INTO t2 VALUES (4,011403,37,'intercepted','audiology','tinily',''
# Adding test for alter table # Adding test for alter table
ALTER TABLE t2 DROP COLUMN fld6; ALTER TABLE t2 DROP COLUMN fld6;
SHOW CREATE TABLE t2; SHOW CREATE TABLE t2;
SELECT * from t2; SELECT * FROM t2;
# Adding tests for autoincrement
# First the simple stuff
CREATE TABLE `t5` (
`a` int(11) NOT NULL auto_increment,
b char(12),
PRIMARY KEY (`a`)
) ENGINE=ARCHIVE DEFAULT CHARSET=latin1;
INSERT INTO t5 VALUES (0, "foo");
INSERT INTO t5 VALUES (0, "foo");
INSERT INTO t5 VALUES (0, "foo");
INSERT INTO t5 VALUES (0, "foo");
INSERT INTO t5 VALUES (0, "foo");
INSERT INTO t5 VALUES (32, "foo");
INSERT INTO t5 VALUES (23, "foo");
INSERT INTO t5 VALUES (0, "foo");
INSERT INTO t5 VALUES (0, "foo");
--error 1022
INSERT INTO t5 VALUES (3, "foo");
INSERT INTO t5 VALUES (0, "foo");
SELECT * FROM t5;
DROP TABLE t5;
CREATE TABLE `t5` (
`a` int(11) NOT NULL auto_increment,
b char(12),
KEY (`a`)
) ENGINE=ARCHIVE DEFAULT CHARSET=latin1;
INSERT INTO t5 VALUES (0, "foo");
INSERT INTO t5 VALUES (0, "foo");
INSERT INTO t5 VALUES (0, "foo");
INSERT INTO t5 VALUES (0, "foo");
INSERT INTO t5 VALUES (0, "foo");
INSERT INTO t5 VALUES (32, "foo");
INSERT INTO t5 VALUES (23, "foo");
INSERT INTO t5 VALUES (0, "foo");
INSERT INTO t5 VALUES (0, "foo");
INSERT INTO t5 VALUES (3, "foo");
INSERT INTO t5 VALUES (0, "foo");
SELECT * FROM t5;
# #
# Cleanup, test is over # Cleanup, test is over
# #
drop table t1, t2, t4; --disable_warnings
drop table t1, t2, t4, t5;
--enable_warnings
...@@ -253,7 +253,15 @@ class Field ...@@ -253,7 +253,15 @@ class Field
ptr-=row_offset; ptr-=row_offset;
return tmp; return tmp;
} }
inline longlong val_int(char *new_ptr)
{
char *old_ptr= ptr;
longlong return_value;
ptr= new_ptr;
return_value= val_int();
ptr= old_ptr;
return return_value;
}
inline String *val_str(String *str, char *new_ptr) inline String *val_str(String *str, char *new_ptr)
{ {
char *old_ptr= ptr; char *old_ptr= ptr;
......
...@@ -104,6 +104,7 @@ ...@@ -104,6 +104,7 @@
rows - This is an unsigned long long which is the number of rows in the data rows - This is an unsigned long long which is the number of rows in the data
file. file.
check point - Reserved for future use check point - Reserved for future use
auto increment - MAX value for autoincrement
dirty - Status of the file, whether or not its values are the latest. This dirty - Status of the file, whether or not its values are the latest. This
flag is what causes a repair to occur flag is what causes a repair to occur
...@@ -125,9 +126,11 @@ static HASH archive_open_tables; ...@@ -125,9 +126,11 @@ static HASH archive_open_tables;
#define ARN ".ARN" // Files used during an optimize call #define ARN ".ARN" // Files used during an optimize call
#define ARM ".ARM" // Meta file #define ARM ".ARM" // Meta file
/* /*
uchar + uchar + ulonglong + ulonglong + uchar uchar + uchar + ulonglong + ulonglong + ulonglong + uchar
*/ */
#define META_BUFFER_SIZE 19 // Size of the data used in the meta file #define META_BUFFER_SIZE sizeof(uchar) + sizeof(uchar) + sizeof(ulonglong) \
+ sizeof(ulonglong) + sizeof(ulonglong) + sizeof(uchar)
/* /*
uchar + uchar uchar + uchar
*/ */
...@@ -300,9 +303,11 @@ int ha_archive::write_data_header(azio_stream *file_to_write) ...@@ -300,9 +303,11 @@ int ha_archive::write_data_header(azio_stream *file_to_write)
This method reads the header of a meta file and returns whether or not it was successful. This method reads the header of a meta file and returns whether or not it was successful.
*rows will contain the current number of rows in the data file upon success. *rows will contain the current number of rows in the data file upon success.
*/ */
int ha_archive::read_meta_file(File meta_file, ha_rows *rows) int ha_archive::read_meta_file(File meta_file, ha_rows *rows,
ulonglong *auto_increment)
{ {
uchar meta_buffer[META_BUFFER_SIZE]; uchar meta_buffer[META_BUFFER_SIZE];
uchar *ptr= meta_buffer;
ulonglong check_point; ulonglong check_point;
DBUG_ENTER("ha_archive::read_meta_file"); DBUG_ENTER("ha_archive::read_meta_file");
...@@ -314,17 +319,24 @@ int ha_archive::read_meta_file(File meta_file, ha_rows *rows) ...@@ -314,17 +319,24 @@ int ha_archive::read_meta_file(File meta_file, ha_rows *rows)
/* /*
Parse out the meta data, we ignore version at the moment Parse out the meta data, we ignore version at the moment
*/ */
*rows= (ha_rows)uint8korr(meta_buffer + 2);
check_point= uint8korr(meta_buffer + 10); ptr+= sizeof(uchar)*2; // Move past header
*rows= (ha_rows)uint8korr(ptr);
ptr+= sizeof(ulonglong); // Move past rows
check_point= uint8korr(ptr);
ptr+= sizeof(ulonglong); // Move past check_point
*auto_increment= uint8korr(ptr);
ptr+= sizeof(ulonglong); // Move past auto_increment
DBUG_PRINT("ha_archive::read_meta_file", ("Check %d", (uint)meta_buffer[0])); DBUG_PRINT("ha_archive::read_meta_file", ("Check %d", (uint)meta_buffer[0]));
DBUG_PRINT("ha_archive::read_meta_file", ("Version %d", (uint)meta_buffer[1])); DBUG_PRINT("ha_archive::read_meta_file", ("Version %d", (uint)meta_buffer[1]));
DBUG_PRINT("ha_archive::read_meta_file", ("Rows %lld", *rows)); DBUG_PRINT("ha_archive::read_meta_file", ("Rows %llu", *rows));
DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %lld", check_point)); DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %llu", check_point));
DBUG_PRINT("ha_archive::read_meta_file", ("Dirty %d", (int)meta_buffer[18])); DBUG_PRINT("ha_archive::read_meta_file", ("Auto-Increment %llu", *auto_increment));
DBUG_PRINT("ha_archive::read_meta_file", ("Dirty %d", (int)(*ptr)));
if ((meta_buffer[0] != (uchar)ARCHIVE_CHECK_HEADER) || if ((meta_buffer[0] != (uchar)ARCHIVE_CHECK_HEADER) ||
((bool)meta_buffer[18] == TRUE)) ((bool)(*ptr)== TRUE))
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
my_sync(meta_file, MYF(MY_WME)); my_sync(meta_file, MYF(MY_WME));
...@@ -337,22 +349,34 @@ int ha_archive::read_meta_file(File meta_file, ha_rows *rows) ...@@ -337,22 +349,34 @@ int ha_archive::read_meta_file(File meta_file, ha_rows *rows)
By setting dirty you say whether or not the file represents the actual state of the data file. By setting dirty you say whether or not the file represents the actual state of the data file.
Upon ::open() we set to dirty, and upon ::close() we set to clean. Upon ::open() we set to dirty, and upon ::close() we set to clean.
*/ */
int ha_archive::write_meta_file(File meta_file, ha_rows rows, bool dirty) int ha_archive::write_meta_file(File meta_file, ha_rows rows,
ulonglong auto_increment, bool dirty)
{ {
uchar meta_buffer[META_BUFFER_SIZE]; uchar meta_buffer[META_BUFFER_SIZE];
uchar *ptr= meta_buffer;
ulonglong check_point= 0; //Reserved for the future ulonglong check_point= 0; //Reserved for the future
DBUG_ENTER("ha_archive::write_meta_file"); DBUG_ENTER("ha_archive::write_meta_file");
meta_buffer[0]= (uchar)ARCHIVE_CHECK_HEADER; *ptr= (uchar)ARCHIVE_CHECK_HEADER;
meta_buffer[1]= (uchar)ARCHIVE_VERSION; ptr += sizeof(uchar);
int8store(meta_buffer + 2, (ulonglong)rows); *ptr= (uchar)ARCHIVE_VERSION;
int8store(meta_buffer + 10, check_point); ptr += sizeof(uchar);
*(meta_buffer + 18)= (uchar)dirty; int8store(ptr, (ulonglong)rows);
DBUG_PRINT("ha_archive::write_meta_file", ("Check %d", (uint)ARCHIVE_CHECK_HEADER)); ptr += sizeof(ulonglong);
DBUG_PRINT("ha_archive::write_meta_file", ("Version %d", (uint)ARCHIVE_VERSION)); int8store(ptr, check_point);
ptr += sizeof(ulonglong);
int8store(ptr, auto_increment);
ptr += sizeof(ulonglong);
*ptr= (uchar)dirty;
DBUG_PRINT("ha_archive::write_meta_file", ("Check %d",
(uint)ARCHIVE_CHECK_HEADER));
DBUG_PRINT("ha_archive::write_meta_file", ("Version %d",
(uint)ARCHIVE_VERSION));
DBUG_PRINT("ha_archive::write_meta_file", ("Rows %llu", (ulonglong)rows)); DBUG_PRINT("ha_archive::write_meta_file", ("Rows %llu", (ulonglong)rows));
DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %llu", check_point)); DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %llu", check_point));
DBUG_PRINT("ha_archive::write_meta_file", ("Auto Increment %llu",
auto_increment));
DBUG_PRINT("ha_archive::write_meta_file", ("Dirty %d", (uint)dirty)); DBUG_PRINT("ha_archive::write_meta_file", ("Dirty %d", (uint)dirty));
VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0))); VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0)));
...@@ -414,17 +438,19 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table) ...@@ -414,17 +438,19 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table)
opposite. If the meta file will not open we assume it is crashed and opposite. If the meta file will not open we assume it is crashed and
leave it up to the user to fix. leave it up to the user to fix.
*/ */
if (read_meta_file(share->meta_file, &share->rows_recorded)) if (read_meta_file(share->meta_file, &share->rows_recorded,
&share->auto_increment_value))
share->crashed= TRUE; share->crashed= TRUE;
else else
(void)write_meta_file(share->meta_file, share->rows_recorded, TRUE); (void)write_meta_file(share->meta_file, share->rows_recorded,
share->auto_increment_value, TRUE);
/* /*
It is expensive to open and close the data files and since you can't have It is expensive to open and close the data files and since you can't have
a gzip file that can be both read and written we keep a writer open a gzip file that can be both read and written we keep a writer open
that is shared amoung all open tables. that is shared amoung all open tables.
*/ */
if (!(azopen(&(share->archive_write), share->data_file_name, O_WRONLY|O_APPEND|O_BINARY))) if (!(azopen(&(share->archive_write), share->data_file_name,
O_WRONLY|O_APPEND|O_BINARY)))
{ {
DBUG_PRINT("info", ("Could not open archive write file")); DBUG_PRINT("info", ("Could not open archive write file"));
share->crashed= TRUE; share->crashed= TRUE;
...@@ -452,7 +478,8 @@ int ha_archive::free_share(ARCHIVE_SHARE *share) ...@@ -452,7 +478,8 @@ int ha_archive::free_share(ARCHIVE_SHARE *share)
hash_delete(&archive_open_tables, (byte*) share); hash_delete(&archive_open_tables, (byte*) share);
thr_lock_delete(&share->lock); thr_lock_delete(&share->lock);
VOID(pthread_mutex_destroy(&share->mutex)); VOID(pthread_mutex_destroy(&share->mutex));
(void)write_meta_file(share->meta_file, share->rows_recorded, FALSE); (void)write_meta_file(share->meta_file, share->rows_recorded,
share->auto_increment_value, FALSE);
if (azclose(&(share->archive_write))) if (azclose(&(share->archive_write)))
rc= 1; rc= 1;
if (my_close(share->meta_file, MYF(0))) if (my_close(share->meta_file, MYF(0)))
...@@ -561,7 +588,26 @@ int ha_archive::create(const char *name, TABLE *table_arg, ...@@ -561,7 +588,26 @@ int ha_archive::create(const char *name, TABLE *table_arg,
error= my_errno; error= my_errno;
goto error; goto error;
} }
write_meta_file(create_file, 0, FALSE);
for (uint key= 0; key < table_arg->s->keys; key++)
{
KEY *pos= table_arg->key_info+key;
KEY_PART_INFO *key_part= pos->key_part;
KEY_PART_INFO *key_part_end= key_part + pos->key_parts;
for (; key_part != key_part_end; key_part++)
{
Field *field= key_part->field;
if (!(field->flags & AUTO_INCREMENT_FLAG))
{
error= -1;
goto error;
}
}
}
write_meta_file(create_file, 0, 0, FALSE);
my_close(create_file,MYF(0)); my_close(create_file,MYF(0));
/* /*
...@@ -614,7 +660,8 @@ int ha_archive::real_write_row(byte *buf, azio_stream *writer) ...@@ -614,7 +660,8 @@ int ha_archive::real_write_row(byte *buf, azio_stream *writer)
DBUG_ENTER("ha_archive::real_write_row"); DBUG_ENTER("ha_archive::real_write_row");
written= azwrite(writer, buf, table->s->reclength); written= azwrite(writer, buf, table->s->reclength);
DBUG_PRINT("ha_archive::real_write_row", ("Wrote %d bytes expected %d", written, table->s->reclength)); DBUG_PRINT("ha_archive::real_write_row", ("Wrote %d bytes expected %d",
written, table->s->reclength));
if (!delayed_insert || !bulk_insert) if (!delayed_insert || !bulk_insert)
share->dirty= TRUE; share->dirty= TRUE;
...@@ -655,6 +702,8 @@ int ha_archive::real_write_row(byte *buf, azio_stream *writer) ...@@ -655,6 +702,8 @@ int ha_archive::real_write_row(byte *buf, azio_stream *writer)
int ha_archive::write_row(byte *buf) int ha_archive::write_row(byte *buf)
{ {
int rc; int rc;
byte *read_buf= NULL;
ulonglong temp_auto;
DBUG_ENTER("ha_archive::write_row"); DBUG_ENTER("ha_archive::write_row");
if (share->crashed) if (share->crashed)
...@@ -664,13 +713,165 @@ int ha_archive::write_row(byte *buf) ...@@ -664,13 +713,165 @@ int ha_archive::write_row(byte *buf)
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
table->timestamp_field->set_time(); table->timestamp_field->set_time();
pthread_mutex_lock(&share->mutex); pthread_mutex_lock(&share->mutex);
if (table->next_number_field)
{
KEY *mkey= &table->s->key_info[0]; // We only support one key right now
update_auto_increment();
temp_auto= table->next_number_field->val_int();
/*
Bad news, this will cause a search for the unique value which is very
expensive since we will have to do a table scan which will lock up
all other writers during this period. This could perhaps be optimized
in the future.
*/
if (temp_auto == share->auto_increment_value &&
mkey->flags & HA_NOSAME)
{
rc= HA_ERR_FOUND_DUPP_KEY;
goto error;
}
if (temp_auto < share->auto_increment_value &&
mkey->flags & HA_NOSAME)
{
/*
First we create a buffer that we can use for reading rows, and can pass
to get_row().
*/
if (!(read_buf= (byte*) my_malloc(table->s->reclength, MYF(MY_WME))))
{
rc= HA_ERR_OUT_OF_MEM;
goto error;
}
/*
All of the buffer must be written out or we won't see all of the
data
*/
azflush(&(share->archive_write), Z_SYNC_FLUSH);
/*
Set the position of the local read thread to the beginning postion.
*/
if (read_data_header(&archive))
{
rc= HA_ERR_CRASHED_ON_USAGE;
goto error;
}
/*
Now we read and check all of the rows.
if (!memcmp(table->next_number_field->ptr, mfield->ptr, mfield->max_length()))
if ((longlong)temp_auto ==
mfield->val_int((char*)(read_buf + mfield->offset())))
*/
Field *mfield= table->next_number_field;
while (!(get_row(&archive, read_buf)))
{
if (!memcmp(read_buf + mfield->offset(), table->next_number_field->ptr,
mfield->max_length()))
{
rc= HA_ERR_FOUND_DUPP_KEY;
goto error;
}
}
}
else
{
if (temp_auto > share->auto_increment_value)
auto_increment_value= share->auto_increment_value= temp_auto;
}
}
/*
Notice that the global auto_increment has been increased.
In case of a failed row write, we will never try to reuse the value.
*/
share->rows_recorded++; share->rows_recorded++;
rc= real_write_row(buf, &(share->archive_write)); rc= real_write_row(buf, &(share->archive_write));
error:
pthread_mutex_unlock(&share->mutex); pthread_mutex_unlock(&share->mutex);
if (read_buf)
my_free(read_buf, MYF(0));
DBUG_RETURN(rc); DBUG_RETURN(rc);
} }
ulonglong ha_archive::get_auto_increment()
{
return share->auto_increment_value + 1;
}
/* Initialized at each key walk (called multiple times unlike rnd_init()) */
int ha_archive::index_init(uint keynr, bool sorted)
{
DBUG_ENTER("ha_archive::index_init");
active_index= keynr;
DBUG_RETURN(0);
}
/*
No indexes, so if we get a request for an index search since we tell
the optimizer that we have unique indexes, we scan
*/
int ha_archive::index_read(byte *buf, const byte *key,
uint key_len, enum ha_rkey_function find_flag)
{
int rc;
DBUG_ENTER("ha_archive::index_read");
rc= index_read_idx(buf, active_index, key, key_len, find_flag);
DBUG_RETURN(rc);
}
int ha_archive::index_read_idx(byte *buf, uint index, const byte *key,
uint key_len, enum ha_rkey_function find_flag)
{
int rc= 0;
bool found= 0;
KEY *mkey= &table->s->key_info[index];
uint k_offset= mkey->key_part->offset;
DBUG_ENTER("ha_archive::index_read_idx");
/*
All of the buffer must be written out or we won't see all of the
data
*/
pthread_mutex_lock(&share->mutex);
azflush(&(share->archive_write), Z_SYNC_FLUSH);
pthread_mutex_unlock(&share->mutex);
/*
Set the position of the local read thread to the beginning postion.
*/
if (read_data_header(&archive))
{
rc= HA_ERR_CRASHED_ON_USAGE;
goto error;
}
while (!(get_row(&archive, buf)))
{
if (!memcmp(key, buf+k_offset, key_len))
{
found= 1;
break;
}
}
if (found)
DBUG_RETURN(0);
error:
DBUG_RETURN(rc ? rc : HA_ERR_END_OF_FILE);
}
/* /*
All calls that need to scan the table start with this method. If we are told All calls that need to scan the table start with this method. If we are told
that it is a table scan we rewind the file to the beginning, otherwise that it is a table scan we rewind the file to the beginning, otherwise
...@@ -726,7 +927,8 @@ int ha_archive::get_row(azio_stream *file_to_read, byte *buf) ...@@ -726,7 +927,8 @@ int ha_archive::get_row(azio_stream *file_to_read, byte *buf)
DBUG_ENTER("ha_archive::get_row"); DBUG_ENTER("ha_archive::get_row");
read= azread(file_to_read, buf, table->s->reclength); read= azread(file_to_read, buf, table->s->reclength);
DBUG_PRINT("ha_archive::get_row", ("Read %d bytes expected %d", read, table->s->reclength)); DBUG_PRINT("ha_archive::get_row", ("Read %d bytes expected %d", read,
table->s->reclength));
if (read == Z_STREAM_ERROR) if (read == Z_STREAM_ERROR)
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
...@@ -912,9 +1114,18 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) ...@@ -912,9 +1114,18 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
if (!rc) if (!rc)
{ {
share->rows_recorded= 0; share->rows_recorded= 0;
auto_increment_value= share->auto_increment_value= 0;
while (!(rc= get_row(&archive, buf))) while (!(rc= get_row(&archive, buf)))
{ {
real_write_row(buf, &writer); real_write_row(buf, &writer);
if (table->found_next_number_field)
{
Field *field= table->found_next_number_field;
if (share->auto_increment_value <
field->val_int((char*)(buf + field->offset())))
auto_increment_value= share->auto_increment_value=
field->val_int((char*)(buf + field->offset()));
}
share->rows_recorded++; share->rows_recorded++;
} }
} }
...@@ -1028,6 +1239,9 @@ void ha_archive::info(uint flag) ...@@ -1028,6 +1239,9 @@ void ha_archive::info(uint flag)
delete_length= 0; delete_length= 0;
index_file_length=0; index_file_length=0;
if (flag & HA_STATUS_AUTO)
auto_increment_value= share->auto_increment_value;
DBUG_VOID_RETURN; DBUG_VOID_RETURN;
} }
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#pragma interface /* gcc class implementation */ #pragma interface /* gcc class implementation */
#endif #endif
#include <values.h>
#include <zlib.h> #include <zlib.h>
#include "../storage/archive/azlib.h" #include "../storage/archive/azlib.h"
...@@ -38,13 +39,14 @@ typedef struct st_archive_share { ...@@ -38,13 +39,14 @@ typedef struct st_archive_share {
bool dirty; /* Flag for if a flush should occur */ bool dirty; /* Flag for if a flush should occur */
bool crashed; /* Meta file is crashed */ bool crashed; /* Meta file is crashed */
ha_rows rows_recorded; /* Number of rows in tables */ ha_rows rows_recorded; /* Number of rows in tables */
ulonglong auto_increment_value;
} ARCHIVE_SHARE; } ARCHIVE_SHARE;
/* /*
Version for file format. Version for file format.
1 - Initial Version 1 - Initial Version
*/ */
#define ARCHIVE_VERSION 1 #define ARCHIVE_VERSION 2
class ha_archive: public handler class ha_archive: public handler
{ {
...@@ -68,13 +70,22 @@ class ha_archive: public handler ...@@ -68,13 +70,22 @@ class ha_archive: public handler
const char **bas_ext() const; const char **bas_ext() const;
ulong table_flags() const ulong table_flags() const
{ {
return (HA_REC_NOT_IN_SEQ | HA_NOT_EXACT_COUNT | HA_NO_AUTO_INCREMENT | return (HA_REC_NOT_IN_SEQ | HA_NOT_EXACT_COUNT |
HA_FILE_BASED | HA_CAN_INSERT_DELAYED | HA_CAN_GEOMETRY); HA_FILE_BASED | HA_CAN_INSERT_DELAYED | HA_CAN_GEOMETRY);
} }
ulong index_flags(uint idx, uint part, bool all_parts) const ulong index_flags(uint idx, uint part, bool all_parts) const
{ {
return 0; return HA_ONLY_WHOLE_INDEX;
} }
ulonglong get_auto_increment();
uint max_supported_keys() const { return 1; }
uint max_supported_key_length() const { return sizeof(ulonglong); }
uint max_supported_key_part_length() const { return sizeof(ulonglong); }
int index_init(uint keynr, bool sorted);
virtual int index_read(byte * buf, const byte * key,
uint key_len, enum ha_rkey_function find_flag);
virtual int index_read_idx(byte * buf, uint index, const byte * key,
uint key_len, enum ha_rkey_function find_flag);
int open(const char *name, int mode, uint test_if_locked); int open(const char *name, int mode, uint test_if_locked);
int close(void); int close(void);
int write_row(byte * buf); int write_row(byte * buf);
...@@ -84,8 +95,9 @@ class ha_archive: public handler ...@@ -84,8 +95,9 @@ class ha_archive: public handler
int rnd_next(byte *buf); int rnd_next(byte *buf);
int rnd_pos(byte * buf, byte *pos); int rnd_pos(byte * buf, byte *pos);
int get_row(azio_stream *file_to_read, byte *buf); int get_row(azio_stream *file_to_read, byte *buf);
int read_meta_file(File meta_file, ha_rows *rows); int read_meta_file(File meta_file, ha_rows *rows, ulonglong *auto_increment);
int write_meta_file(File meta_file, ha_rows rows, bool dirty); int write_meta_file(File meta_file, ha_rows rows,
ulonglong auto_increment, bool dirty);
ARCHIVE_SHARE *get_share(const char *table_name, TABLE *table); ARCHIVE_SHARE *get_share(const char *table_name, TABLE *table);
int free_share(ARCHIVE_SHARE *share); int free_share(ARCHIVE_SHARE *share);
bool auto_repair() const { return 1; } // For the moment we just do this bool auto_repair() const { return 1; } // For the moment we just do this
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment