Commit b4098ead authored by unknown's avatar unknown

Add support for Gemini table handler, Monty has checked and approved

Fix bug when read return error


acconfig.h:
  Add Gemini to configure
acinclude.m4:
  Add Gemini to configure
include/my_base.h:
  Add error codes for tables handlers
mysql-test/t/select.test:
  Force temporary tables to MyISAM
sql-bench/server-cfg.sh:
  Allow Gemini to run sql-bench
sql/Makefile.am:
  Add Gemini to configure
sql/handler.cc:
  Add support for Gemini table handler
sql/handler.h:
  Add support for Gemini table handler
sql/lex.h:
  Add support for Gemini table handler
sql/mysqld.cc:
  Add support for Gemini table handler
sql/opt_range.cc:
  Fix bug when read return error
sql/records.cc:
  Fix bug when read return error
sql/sql_class.cc:
  Add support for Gemini table handler
sql/sql_class.h:
  Add support for Gemini table handler
sql/sql_lex.h:
  Add support for Gemini table handler
sql/sql_rename.cc:
  Add commit for table rename
sql/sql_table.cc:
  Add commit for table rename
BitKeeper/etc/logging_ok:
  Logging to logging@openlogging.org accepted
parent 4b56b0ee
monty@tik.mysql.fi
mikef@nslinux.bedford.progress.com
......@@ -117,6 +117,9 @@
/* POSIX readdir_r */
#undef HAVE_READDIR_R
/* Have Gemini db installed */
#undef HAVE_GEMINI_DB
/* POSIX sigwait */
#undef HAVE_SIGWAIT
......
......@@ -941,6 +941,92 @@ dnl ---------------------------------------------------------------------------
dnl END OF MYSQL_CHECK_INNOBASE SECTION
dnl ---------------------------------------------------------------------------
dnl ---------------------------------------------------------------------------
dnl Macro: MYSQL_CHECK_GEMINI
dnl Sets HAVE_GEMINI_DB if --with-gemini is used
dnl ---------------------------------------------------------------------------
AC_DEFUN([MYSQL_CHECK_GEMINI], [
AC_ARG_WITH([gemini],
[\
--with-gemini[=DIR] Use Gemini DB located in DIR],
[gemini="$withval"],
[gemini=no])
AC_MSG_CHECKING([for Gemini DB])
dnl SORT OUT THE SUPPLIED ARGUMENTS TO DETERMINE WHAT TO DO
dnl echo "DBG_GEM1: gemini='$gemini'"
have_gemini_db=no
gemini_includes=
gemini_libs=
case "$gemini" in
no )
AC_MSG_RESULT([Not using Gemini DB])
;;
yes | default | *)
have_gemini_db="yes"
gemini_includes="-I../gemini/incl -I../gemini"
gemini_libs="\
../gemini/api/libapi.a\
../gemini/db/libdb.a\
../gemini/dbut/libdbut.a\
../gemini/vst/libvst.a"
AC_MSG_RESULT([Using Gemini DB])
;;
esac
AC_SUBST(gemini_includes)
AC_SUBST(gemini_libs)
])
dnl ---------------------------------------------------------------------------
dnl END OF MYSQL_CHECK_GEMINI SECTION
dnl ---------------------------------------------------------------------------
dnl ---------------------------------------------------------------------------
dnl Macro: MYSQL_CHECK_GEMINI
dnl Sets HAVE_GEMINI_DB if --with-gemini is used
dnl ---------------------------------------------------------------------------
AC_DEFUN([MYSQL_CHECK_GEMINI], [
AC_ARG_WITH([gemini],
[\
--with-gemini[=DIR] Use Gemini DB located in DIR],
[gemini="$withval"],
[gemini=no])
AC_MSG_CHECKING([for Gemini DB])
dnl SORT OUT THE SUPPLIED ARGUMENTS TO DETERMINE WHAT TO DO
dnl echo "DBG_GEM1: gemini='$gemini'"
have_gemini_db=no
gemini_includes=
gemini_libs=
case "$gemini" in
no )
AC_MSG_RESULT([Not using Gemini DB])
;;
yes | default | *)
have_gemini_db="yes"
gemini_includes="-I../gemini/incl -I../gemini"
gemini_libs="\
../gemini/api/libapi.a\
../gemini/db/libdb.a\
../gemini/dbut/libdbut.a\
../gemini/vst/libvst.a"
AC_MSG_RESULT([Using Gemini DB])
;;
esac
AC_SUBST(gemini_includes)
AC_SUBST(gemini_libs)
])
dnl ---------------------------------------------------------------------------
dnl END OF MYSQL_CHECK_GEMINI SECTION
dnl ---------------------------------------------------------------------------
dnl ---------------------------------------------------------------------------
dnl Got this from the GNU tar 1.13.11 distribution
dnl by Paul Eggert <eggert@twinsun.com>
......
......@@ -211,6 +211,8 @@ enum ha_base_keytype {
#define HA_ERR_WRONG_TABLE_DEF 143
#define HA_ERR_CRASHED_ON_REPAIR 144 /* Last (automatic?) repair failed */
#define HA_ERR_CRASHED_ON_USAGE 145 /* Table must be repaired */
#define HA_ERR_LOCK_WAIT_TIMEOUT 146
#define HA_ERR_LOCK_TABLE_FULL 147
/* Other constants */
......
-- require r/have_gemini.require
show variables like "have_gemini";
id code name
1 1 Tim
2 1 Monty
3 2 David
4 2 Erik
5 3 Sasha
6 3 Jeremy
7 4 Matt
id code name
2 1 Monty
3 2 David
4 2 Erik
5 3 Sasha
6 3 Jeremy
7 4 Matt
8 1 Sinisa
id code name
3 2 David
4 2 Erik
5 3 Sasha
6 3 Jeremy
7 4 Matt
8 1 Sinisa
12 1 Ralph
id parent_id level
8 102 2
9 102 2
15 102 2
id parent_id level
1001 100 0
1003 101 1
1004 101 1
1008 102 2
1009 102 2
1017 103 2
1022 104 2
1024 104 2
1028 105 2
1029 105 2
1030 105 2
1031 106 2
1032 106 2
1033 106 2
1203 107 2
1202 107 2
1020 103 2
1157 100 0
1193 105 2
1040 107 2
1002 101 1
1015 102 2
1006 101 1
1034 106 2
1035 106 2
1016 103 2
1007 101 1
1036 107 2
1018 103 2
1026 105 2
1027 105 2
1183 104 2
1038 107 2
1025 105 2
1037 107 2
1021 104 2
1019 103 2
1005 101 1
1179 105 2
id parent_id level
1001 100 0
1003 101 1
1004 101 1
1008 102 2
1010 102 2
1017 103 2
1023 104 2
1024 104 2
1028 105 2
1029 105 2
1030 105 2
1031 106 2
1032 106 2
1033 106 2
1204 107 2
1203 107 2
1020 103 2
1158 100 0
1194 105 2
1041 107 2
1002 101 1
1015 102 2
1006 101 1
1034 106 2
1035 106 2
1016 103 2
1007 101 1
1036 107 2
1018 103 2
1026 105 2
1027 105 2
1184 104 2
1039 107 2
1025 105 2
1038 107 2
1022 104 2
1019 103 2
1005 101 1
1180 105 2
id parent_id level
1008 102 2
1010 102 2
1015 102 2
table type possible_keys key key_len ref rows Extra
t1 ref level level 1 const 6 where used; Using index
table type possible_keys key key_len ref rows Extra
t1 ref level level 1 const 6 where used
table type possible_keys key key_len ref rows Extra
t1 ref level level 1 const 6 where used
level id
1 1003
1 1004
1 1002
1 1006
1 1007
1 1005
level id parent_id
1 1003 101
1 1004 101
1 1002 101
1 1006 101
1 1007 101
1 1005 101
gesuchnr benutzer_id
1 1
2 1
a
2
user_id name phone ref_email detail
10292 sanjeev 29153373 sansh777@hotmail.com xxx
10292 shirish 2333604 shirish@yahoo.com ddsds
10292 sonali 323232 sonali@bolly.com filmstar
user_id name phone ref_email detail
10292 sanjeev 29153373 sansh777@hotmail.com xxx
10292 shirish 2333604 shirish@yahoo.com ddsds
10292 sonali 323232 sonali@bolly.com filmstar
user_id name phone ref_email detail
10292 sanjeev 29153373 sansh777@hotmail.com xxx
10292 shirish 2333604 shirish@yahoo.com ddsds
10292 sonali 323232 sonali@bolly.com filmstar
10293 shirish 2333604 shirish@yahoo.com ddsds
user_id name phone ref_email detail
10293 shirish 2333604 shirish@yahoo.com ddsds
user_id name phone ref_email detail
10291 sanjeev 29153373 sansh777@hotmail.com xxx
a b
1 3
2 3
3 3
a b
1 3
2 3
3 3
a b
a b
1 3
2 3
3 3
a b
1 3
2 3
3 3
id ggid email passwd
1 test1 xxx
id ggid email passwd
1 test1 xxx
id ggid email passwd
2 test2 yyy
id parent_id level
8 102 2
9 102 2
15 102 2
id parent_id level
1001 100 0
1003 101 1
1004 101 1
1008 102 2
1024 102 2
1017 103 2
1022 104 2
1024 104 2
1028 105 2
1029 105 2
1030 105 2
1031 106 2
1032 106 2
1033 106 2
1203 107 2
1202 107 2
1020 103 2
1157 100 0
1193 105 2
1040 107 2
1002 101 1
1015 102 2
1006 101 1
1034 106 2
1035 106 2
1016 103 2
1007 101 1
1036 107 2
1018 103 2
1026 105 2
1027 105 2
1183 104 2
1038 107 2
1025 105 2
1037 107 2
1021 104 2
1019 103 2
1005 101 1
1179 105 2
id parent_id level
1002 100 0
1004 101 1
1005 101 1
1009 102 2
1025 102 2
1018 103 2
1023 104 2
1025 104 2
1029 105 2
1030 105 2
1031 105 2
1032 106 2
1033 106 2
1034 106 2
1204 107 2
1203 107 2
1021 103 2
1158 100 0
1194 105 2
1041 107 2
1003 101 1
1016 102 2
1007 101 1
1035 106 2
1036 106 2
1017 103 2
1008 101 1
1037 107 2
1019 103 2
1027 105 2
1028 105 2
1184 104 2
1039 107 2
1026 105 2
1038 107 2
1022 104 2
1020 103 2
1006 101 1
1180 105 2
id parent_id level
1009 102 2
1025 102 2
1016 102 2
table type possible_keys key key_len ref rows Extra
t1 ref level level 1 const 6 where used; Using index
level id
1 1004
1 1005
1 1003
1 1007
1 1008
1 1006
level id parent_id
1 1004 101
1 1005 101
1 1003 101
1 1007 101
1 1008 101
1 1006 101
level id
1 1003
1 1004
1 1005
1 1006
1 1007
1 1008
id parent_id level
1002 100 0
1009 102 2
1025 102 2
1018 103 2
1023 104 2
1025 104 2
1029 105 2
1030 105 2
1031 105 2
1032 106 2
1033 106 2
1034 106 2
1204 107 2
1203 107 2
1021 103 2
1158 100 0
1194 105 2
1041 107 2
1016 102 2
1035 106 2
1036 106 2
1017 103 2
1037 107 2
1019 103 2
1027 105 2
1028 105 2
1184 104 2
1039 107 2
1026 105 2
1038 107 2
1022 104 2
1020 103 2
1180 105 2
count(*)
1
a
1
2
3
test for rollback
test for rollback
n after rollback
4 after rollback
n after commit
4 after commit
5 after commit
n after commit
4 after commit
5 after commit
6 after commit
n
4
5
6
7
afterbegin_id afterbegin_nom
1 first
2 hamdouni
afterrollback_id afterrollback_nom
1 first
afterautocommit0_id afterautocommit0_nom
1 first
3 mysql
afterrollback_id afterrollback_nom
1 first
id val
id val
pippo 12
id val
ID NAME
1 Jochen
_userid
marc@anyware.co.uk
_userid
marc@anyware.co.uk
Variable_name Value
have_gemini YES
-- source include/have_gemini.inc
#
# Small basic test with ignore
#
drop table if exists t1;
create table t1 (id int unsigned not null auto_increment, code tinyint unsigned not null, name char(20) not null, primary key (id), key (code), unique (name)) type=gemini;
insert into t1 (code, name) values (1, 'Tim'), (1, 'Monty'), (2, 'David'), (2, 'Erik'), (3, 'Sasha'), (3, 'Jeremy'), (4, 'Matt');
select id, code, name from t1 order by id;
update ignore t1 set id = 8, name = 'Sinisa' where id < 3;
select id, code, name from t1 order by id;
update ignore t1 set id = id + 10, name = 'Ralph' where id < 4;
select id, code, name from t1 order by id;
drop table t1;
#
# A bit bigger test
#
CREATE TABLE t1 (
id int(11) NOT NULL auto_increment,
parent_id int(11) DEFAULT '0' NOT NULL,
level tinyint(4) DEFAULT '0' NOT NULL,
PRIMARY KEY (id),
KEY parent_id (parent_id),
KEY level (level)
) type=gemini;
INSERT INTO t1 VALUES (1,0,0),(3,1,1),(4,1,1),(8,2,2),(9,2,2),(17,3,2),(22,4,2),(24,4,2),(28,5,2),(29,5,2),(30,5,2),(31,6,2),(32,6,2),(33,6,2),(203,7,2),(202,7,2),(20,3,2),(157,0,0),(193,5,2),(40,7,2),(2,1,1),(15,2,2),(6,1,1),(34,6,2),(35,6,2),(16,3,2),(7,1,1),(36,7,2),(18,3,2),(26,5,2),(27,5,2),(183,4,2),(38,7,2),(25,5,2),(37,7,2),(21,4,2),(19,3,2),(5,1,1),(179,5,2);
update t1 set parent_id=parent_id+100;
select * from t1 where parent_id=102;
update t1 set id=id+1000;
!$1062 update t1 set id=1024 where id=1009;
select * from t1;
update ignore t1 set id=id+1; # This will change all rows
select * from t1;
update ignore t1 set id=1023 where id=1010;
select * from t1 where parent_id=102;
explain select level from t1 where level=1;
explain select level,id from t1 where level=1;
explain select level,id,parent_id from t1 where level=1;
select level,id from t1 where level=1;
select level,id,parent_id from t1 where level=1;
drop table t1;
#
# Test replace
#
CREATE TABLE t1 (
gesuchnr int(11) DEFAULT '0' NOT NULL,
benutzer_id int(11) DEFAULT '0' NOT NULL,
PRIMARY KEY (gesuchnr,benutzer_id)
) type=gemini;
replace into t1 (gesuchnr,benutzer_id) values (2,1);
replace into t1 (gesuchnr,benutzer_id) values (1,1);
replace into t1 (gesuchnr,benutzer_id) values (1,1);
select * from t1;
drop table t1;
#
# test delete using hidden_primary_key
#
create table t1 (a int) type=gemini;
insert into t1 values (1), (2);
delete from t1 where a = 1;
select * from t1;
drop table t1;
#
# Test auto_increment on sub key
#
#create table t1 (a char(10) not null, b int not null auto_increment, primary key(a,b)) type=gemini;
#insert into t1 values ("a",1),("b",2),("a",2),("c",1);
#insert into t1 values ("a",NULL),("b",NULL),("c",NULL),("e",NULL);
#insert into t1 (a) values ("a"),("b"),("c"),("d");
#insert into t1 (a) values ('k'),('d');
#insert into t1 (a) values ("a");
#insert into t1 values ("d",last_insert_id());
#select * from t1;
#drop table t1;
#
# Test when reading on part of unique key
#
CREATE TABLE t1 (
user_id int(10) DEFAULT '0' NOT NULL,
name varchar(100),
phone varchar(100),
ref_email varchar(100) DEFAULT '' NOT NULL,
detail varchar(200),
PRIMARY KEY (user_id,ref_email)
)type=gemini;
INSERT INTO t1 VALUES (10292,'sanjeev','29153373','sansh777@hotmail.com','xxx'),(10292,'shirish','2333604','shirish@yahoo.com','ddsds'),(10292,'sonali','323232','sonali@bolly.com','filmstar');
select * from t1 where user_id=10292;
INSERT INTO t1 VALUES (10291,'sanjeev','29153373','sansh777@hotmail.com','xxx'),(10293,'shirish','2333604','shirish@yahoo.com','ddsds');
select * from t1 where user_id=10292;
select * from t1 where user_id>=10292;
select * from t1 where user_id>10292;
select * from t1 where user_id<10292;
drop table t1;
#
# Test that keys are created in right order
# - Needs ANALYZE TABLE to work - MikeF 2/12/01
#
#CREATE TABLE t1 (a int not null, b int not null,c int not null,
#key(a),primary key(a,b), unique(c),key(a),unique(b)) type = gemini;
#show index from t1;
#drop table t1;
#
# Test of ALTER TABLE and gemini tables
#
#create table t1 (col1 int not null, col2 char(4) not null, primary key(col1));
#alter table t1 type=gemini;
#insert into t1 values ('1','1'),('5','2'),('2','3'),('3','4'),('4','4');
#select * from t1;
#update t1 set col2='7' where col1='4';
#select * from t1;
#alter table t1 add co3 int not null;
#select * from t1;
#update t1 set col2='9' where col1='2';
#select * from t1;
#drop table t1;
#
# INSERT INTO gemini tables
#
create table t1 (a int not null , b int, primary key (a)) type = gemini;
create table t2 (a int not null , b int, primary key (a)) type = myisam;
insert into t1 VALUES (1,3) , (2,3), (3,3);
select * from t1;
insert into t2 select * from t1;
select * from t2;
delete from t1 where b = 3;
select * from t1;
insert into t1 select * from t2;
select * from t1;
select * from t2;
drop table t1,t2;
#
# Search on unique key
#
CREATE TABLE t1 (
id int(11) NOT NULL auto_increment,
ggid varchar(32) binary DEFAULT '' NOT NULL,
email varchar(64) DEFAULT '' NOT NULL,
passwd varchar(32) binary DEFAULT '' NOT NULL,
PRIMARY KEY (id),
UNIQUE ggid (ggid)
) TYPE=gemini;
insert into t1 (ggid,passwd) values ('test1','xxx');
insert into t1 (ggid,passwd) values ('test2','yyy');
select * from t1 where ggid='test1';
select * from t1 where passwd='xxx';
select * from t1 where id=2;
drop table t1;
#
# ORDER BY on not primary key
#
#CREATE TABLE t1 (
# user_name varchar(12),
#password text,
#subscribed char(1),
#user_id int(11) DEFAULT '0' NOT NULL,
#quota bigint(20),
#weight double,
#access_date date,
#access_time time,
#approved datetime,
#dummy_primary_key int(11) NOT NULL auto_increment,
#PRIMARY KEY (dummy_primary_key)
#) TYPE=gemini;
#INSERT INTO t1 VALUES ('user_0','somepassword','N',0,0,0,'2000-09-07','23:06:59','2000-09-07 23:06:59',1);
#INSERT INTO t1 VALUES ('user_1','somepassword','Y',1,1,1,'2000-09-07','23:06:59','2000-09-07 23:06:59',2);
#INSERT INTO t1 VALUES ('user_2','somepassword','N',2,2,1.4142135623731,'2000-09-07','23:06:59','2000-09-07 23:06:59',3);
#INSERT INTO t1 VALUES ('user_3','somepassword','Y',3,3,1.7320508075689,'2000-09-07','23:06:59','2000-09-07 23:06:59',4);
#INSERT INTO t1 VALUES ('user_4','somepassword','N',4,4,2,'2000-09-07','23:06:59','2000-09-07 23:06:59',5);
#select user_name, password , subscribed, user_id, quota, weight, access_date, access_time, approved, dummy_primary_key from t1 order by user_name;
#drop table t1;
#
# Testing of tables without primary keys
#
CREATE TABLE t1 (
id int(11) NOT NULL auto_increment,
parent_id int(11) DEFAULT '0' NOT NULL,
level tinyint(4) DEFAULT '0' NOT NULL,
KEY (id),
KEY parent_id (parent_id),
KEY level (level)
) type=gemini;
INSERT INTO t1 VALUES (1,0,0),(3,1,1),(4,1,1),(8,2,2),(9,2,2),(17,3,2),(22,4,2),(24,4,2),(28,5,2),(29,5,2),(30,5,2),(31,6,2),(32,6,2),(33,6,2),(203,7,2),(202,7,2),(20,3,2),(157,0,0),(193,5,2),(40,7,2),(2,1,1),(15,2,2),(6,1,1),(34,6,2),(35,6,2),(16,3,2),(7,1,1),(36,7,2),(18,3,2),(26,5,2),(27,5,2),(183,4,2),(38,7,2),(25,5,2),(37,7,2),(21,4,2),(19,3,2),(5,1,1);
INSERT INTO t1 values (179,5,2);
update t1 set parent_id=parent_id+100;
select * from t1 where parent_id=102;
update t1 set id=id+1000;
update t1 set id=1024 where id=1009;
select * from t1;
update ignore t1 set id=id+1; # This will change all rows
select * from t1;
update ignore t1 set id=1023 where id=1010;
select * from t1 where parent_id=102;
explain select level from t1 where level=1;
select level,id from t1 where level=1;
select level,id,parent_id from t1 where level=1;
select level,id from t1 where level=1 order by id;
delete from t1 where level=1;
select * from t1;
drop table t1;
#
# Test of index only reads
#
CREATE TABLE t1 (
sca_code char(6) NOT NULL,
cat_code char(6) NOT NULL,
sca_desc varchar(50),
lan_code char(2) NOT NULL,
sca_pic varchar(100),
sca_sdesc varchar(50),
sca_sch_desc varchar(16),
PRIMARY KEY (sca_code, cat_code, lan_code)
) type = gemini ;
INSERT INTO t1 ( sca_code, cat_code, sca_desc, lan_code, sca_pic, sca_sdesc, sca_sch_desc) VALUES ( 'PD', 'J', 'PENDANT', 'EN', NULL, NULL, 'PENDANT'),( 'RI', 'J', 'RING', 'EN', NULL, NULL, 'RING');
select count(*) from t1 where sca_code = 'PD';
drop table t1;
#
# Test of opening table twice
#
CREATE TABLE t1 (a int not null, primary key (a)) type=gemini;
insert into t1 values(1),(2),(3);
select t1.a from t1 natural join t1 as t2 order by t1.a;
drop table t1;
#
# Test rollback
#
select "test for rollback";
create table t1 (n int not null primary key) type=gemini;
set autocommit=0;
insert into t1 values (4);
commit;
insert into t1 values (5);
rollback;
select n, "after rollback" from t1;
insert into t1 values (5);
commit;
select n, "after commit" from t1;
commit;
insert into t1 values (6);
!$1062 insert into t1 values (4);
commit;
select n, "after commit" from t1;
set autocommit=1;
insert into t1 values (7);
!$1062 insert into t1 values (4);
select n from t1;
# nop
rollback;
drop table t1;
#
# Testing transactions
#
create table t1 ( id int NOT NULL PRIMARY KEY, nom varchar(64)) type=gemini;
insert into t1 values(1,'first');
begin;
insert into t1 values(2,'hamdouni');
select id as afterbegin_id,nom as afterbegin_nom from t1;
rollback;
select id as afterrollback_id,nom as afterrollback_nom from t1;
set autocommit=0;
insert into t1 values(3,'mysql');
select id as afterautocommit0_id,nom as afterautocommit0_nom from t1;
rollback;
select id as afterrollback_id,nom as afterrollback_nom from t1;
set autocommit=1;
drop table t1;
#
# Simple not autocommit test
#
CREATE TABLE t1 (id char(8) not null primary key, val int not null) type=gemini;
insert into t1 values ('pippo', 12);
!$1062 insert into t1 values ('pippo', 12); # Gives error
delete from t1;
delete from t1 where id = 'pippo';
select * from t1;
insert into t1 values ('pippo', 12);
set autocommit=0;
delete from t1;
rollback;
select * from t1;
delete from t1;
commit;
select * from t1;
drop table t1;
set autocommit=1;
#
# The following simple tests failed at some point
#
CREATE TABLE t1 (ID INTEGER NOT NULL PRIMARY KEY, NAME VARCHAR(64)) TYPE=gemini;
INSERT INTO t1 VALUES (1, 'Jochen');
select * from t1;
drop table t1;
CREATE TABLE t1 ( _userid VARCHAR(60) NOT NULL PRIMARY KEY) TYPE=gemini;
set autocommit=0;
INSERT INTO t1 SET _userid='marc@anyware.co.uk';
COMMIT;
SELECT * FROM t1;
SELECT _userid FROM t1 WHERE _userid='marc@anyware.co.uk';
drop table t1;
set autocommit=1;
......@@ -1377,7 +1377,7 @@ INSERT INTO t3 (period,name,companynr,price,price2) VALUES (1008,"tucked",311,23
INSERT INTO t3 (period,name,companynr,price,price2) VALUES (1009,"gems",447,2374834,9872392);
INSERT INTO t3 (period,name,companynr,price,price2) VALUES (1010,"clinker",512,786542,76234234);
create temporary table tmp select * from t3;
create temporary table tmp type = myisam select * from t3;
insert into t3 select * from tmp;
insert into tmp select * from t3;
......
......@@ -199,6 +199,11 @@ sub new
{
$limits{'max_text_size'} = 8000; # Limit in Innobase
}
if (defined($main::opt_create_options) &&
$main::opt_create_options =~ /type=gemini/i)
{
$limits{'working_blobs'} = 0; # Blobs not implemented yet
}
return $self;
}
......
......@@ -21,7 +21,7 @@ MYSQLDATAdir = $(localstatedir)
MYSQLSHAREdir = $(pkgdatadir)
MYSQLBASEdir= $(prefix)
INCLUDES = @MT_INCLUDES@ \
@bdb_includes@ @innobase_includes@ \
@bdb_includes@ @innobase_includes@ @gemini_includes@ \
-I$(srcdir)/../include \
-I$(srcdir)/../regex \
-I$(srcdir) -I../include -I.. -I.
......@@ -40,7 +40,7 @@ LDADD = ../isam/libnisam.a \
../regex/libregex.a \
../strings/libmystrings.a
mysqld_LDADD = @MYSQLD_EXTRA_LDFLAGS@ \
@bdb_libs@ @innobase_libs@ \
@bdb_libs@ @innobase_libs@ @gemini_libs@ \
$(LDADD) $(CXXLDFLAGS) $(WRAPLIBS)
noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \
item_strfunc.h item_timefunc.h item_uniq.h \
......@@ -50,7 +50,7 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \
field.h handler.h \
ha_isammrg.h ha_isam.h ha_myisammrg.h\
ha_heap.h ha_myisam.h ha_berkeley.h ha_innobase.h \
opt_range.h opt_ft.h \
ha_gemini.h opt_range.h opt_ft.h \
sql_select.h structs.h table.h sql_udf.h hash_filo.h\
lex.h lex_symbol.h sql_acl.h sql_crypt.h md5.h \
log_event.h mini_client.h sql_repl.h slave.h
......@@ -71,7 +71,7 @@ mysqld_SOURCES = sql_lex.cc \
records.cc filesort.cc handler.cc \
ha_isam.cc ha_isammrg.cc ha_heap.cc \
ha_myisam.cc ha_myisammrg.cc ha_berkeley.cc \
ha_innobase.cc \
ha_innobase.cc ha_gemini.cc \
sql_db.cc sql_table.cc sql_rename.cc sql_crypt.cc \
sql_load.cc mf_iocache.cc field_conv.cc sql_show.cc \
sql_udf.cc sql_analyse.cc sql_analyse.h sql_cache.cc \
......
/* Copyright (C) 2000 NuSphere Corporation
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#ifdef __GNUC__
#pragma implementation // gcc: Class implementation
#endif
#include <string.h>
#include "mysql_priv.h"
#include "my_pthread.h"
#ifdef HAVE_GEMINI_DB
#include "dbconfig.h"
#include "dsmpub.h"
#include "recpub.h"
#include "vststat.h"
#include <m_ctype.h>
#include <myisampack.h>
#include <assert.h>
#include <hash.h>
#include <stdarg.h>
#include "geminikey.h"
#include "ha_gemini.h"
const char *ha_gemini_ext=".gmd";
const char *ha_gemini_idx_ext=".gmi";
bool gemini_skip=0;
long gemini_options = 0;
long gemini_buffer_cache;
long gemini_io_threads;
long gemini_log_cluster_size;
long gemini_locktablesize;
long gemini_lock_wait_timeout;
long gemini_spin_retries;
long gemini_connection_limit;
const char gemini_dbname[] = "gemini";
dsmContext_t *pfirstContext = NULL;
ulong gemini_recovery_options = GEMINI_RECOVERY_FULL;
/* bits in gemini_recovery_options */
const char *gemini_recovery_names[] =
{ "FULL", "NONE", "FORCE" };
TYPELIB gemini_recovery_typelib= {array_elements(gemini_recovery_names),"",
gemini_recovery_names};
const int start_of_name = 2; /* Name passed as ./<db>/<table-name>
and we're not interested in the ./ */
static const int keyBufSize = MYMAXKEYSIZE * 2;
static int gemini_tx_begin(THD *thd);
static void print_msg(THD *thd, const char *table_name, const char *op_name,
const char *msg_type, const char *fmt, ...);
static int gemini_helper_threads(dsmContext_t *pContext);
pthread_handler_decl(gemini_watchdog,arg );
pthread_handler_decl(gemini_rl_writer,arg );
pthread_handler_decl(gemini_apw,arg);
/* General functions */
bool gemini_init(void)
{
dsmStatus_t rc = 0;
char pmsgsfile[MAXPATHN];
DBUG_ENTER("gemini_init");
/* If datadir isn't set, bail out */
if (*mysql_real_data_home == '\0')
{
goto badret;
}
/* Gotta connect to the database regardless of the operation */
rc = dsmContextCreate(&pfirstContext);
if( rc != 0 )
{
printf("dsmContextCreate failed %ld\n",rc);
goto badret;
}
rc = dsmContextSetString(pfirstContext, DSM_TAGDB_DBNAME,
strlen(gemini_dbname), (TEXT *)gemini_dbname);
if( rc != 0 )
{
printf("Dbname tag failed %ld\n", rc);
goto badret;
}
fn_format(pmsgsfile, GEM_MSGS_FILE, language, ".db", 2 | 4);
rc = dsmContextSetString(pfirstContext, DSM_TAGDB_MSGS_FILE,
strlen(pmsgsfile), (TEXT *)pmsgsfile);
if( rc != 0 )
{
printf("MSGS_DIR tag failed %ld\n", rc);
goto badret;
}
rc = dsmContextSetLong(pfirstContext,DSM_TAGDB_ACCESS_TYPE,DSM_ACCESS_STARTUP);
if ( rc != 0 )
{
printf("ACCESS TAG set failed %ld\n",rc);
goto badret;
}
rc = dsmContextSetLong(pfirstContext,DSM_TAGDB_ACCESS_ENV, DSM_SQL_ENGINE);
if( rc != 0 )
{
printf("ACCESS_ENV set failed %ld",rc);
goto badret;
}
rc = dsmContextSetString(pfirstContext, DSM_TAGDB_DATADIR,
strlen(mysql_real_data_home),
(TEXT *)mysql_real_data_home);
if( rc != 0 )
{
printf("Datadir tag failed %ld\n", rc);
goto badret;
}
rc = dsmContextSetLong(pfirstContext, DSM_TAGDB_MAX_USERS,
gemini_connection_limit);
if(rc != 0)
{
printf("MAX_USERS tag set failed %ld",rc);
goto badret;
}
rc = dsmContextSetLong(pfirstContext, DSM_TAGDB_DEFAULT_LOCK_TIMEOUT,
gemini_lock_wait_timeout);
if(rc != 0)
{
printf("MAX_LOCK_ENTRIES tag set failed %ld",rc);
goto badret;
}
rc = dsmContextSetLong(pfirstContext, DSM_TAGDB_MAX_LOCK_ENTRIES,
gemini_locktablesize);
if(rc != 0)
{
printf("MAX_LOCK_ENTRIES tag set failed %ld",rc);
goto badret;
}
rc = dsmContextSetLong(pfirstContext, DSM_TAGDB_SPIN_AMOUNT,
gemini_spin_retries);
if(rc != 0)
{
printf("SPIN_AMOUNT tag set failed %ld",rc);
goto badret;
}
/* blocksize is hardcoded to 8K. Buffer cache is in bytes
need to convert this to 8K blocks */
gemini_buffer_cache = gemini_buffer_cache / 8192;
rc = dsmContextSetLong(pfirstContext, DSM_TAGDB_DB_BUFFERS,
gemini_buffer_cache);
if(rc != 0)
{
printf("DB_BUFFERS tag set failed %ld",rc);
goto badret;
}
rc = dsmContextSetLong(pfirstContext, DSM_TAGDB_FLUSH_AT_COMMIT,
((gemini_options & GEMOPT_FLUSH_LOG) ? 1 : 0));
if(rc != 0)
{
printf("FLush_Log_At_Commit tag set failed %ld",rc);
goto badret;
}
rc = dsmContextSetLong(pfirstContext, DSM_TAGDB_DIRECT_IO,
((gemini_options & GEMOPT_UNBUFFERED_IO) ? 1 : 0));
if(rc != 0)
{
printf("DIRECT_IO tag set failed %ld",rc);
goto badret;
}
rc = dsmContextSetLong(pfirstContext, DSM_TAGDB_CRASH_PROTECTION,
((gemini_recovery_options & GEMINI_RECOVERY_FULL) ? 1 : 0));
if(rc != 0)
{
printf("CRASH_PROTECTION tag set failed %ld",rc);
goto badret;
}
/* cluster size will come in bytes, need to convert it to
16 K units. */
gemini_log_cluster_size = (gemini_log_cluster_size + 16383) / 16384;
rc = dsmContextSetLong(pfirstContext, DSM_TAGDB_BI_CLUSTER_SIZE,
gemini_log_cluster_size);
if(rc != 0)
{
printf("CRASH_PROTECTION tag set failed %ld",rc);
goto badret;
}
rc = dsmUserConnect(pfirstContext,(TEXT *)"Multi-user",
DSM_DB_OPENDB | DSM_DB_OPENFILE);
if( rc != 0 )
{
printf("dsmUserConnect failed rc = %ld\n",rc);
goto badret;
}
/* Set access to shared for subsequent user connects */
rc = dsmContextSetLong(pfirstContext,DSM_TAGDB_ACCESS_TYPE,DSM_ACCESS_SHARED);
rc = gemini_helper_threads(pfirstContext);
DBUG_RETURN(0);
badret:
gemini_skip = 1;
DBUG_RETURN(0);
}
static int gemini_helper_threads(dsmContext_t *pContext)
{
int rc = 0;
pthread_t hThread;
DBUG_ENTER("gemini_helper_threads");
rc = pthread_create (&hThread, 0, gemini_watchdog, (void *)pContext);
if (rc)
{
printf("Can't create gemini watchdog thread");
goto done;
}
if(!gemini_io_threads)
goto done;
rc = pthread_create(&hThread, 0, gemini_rl_writer, (void *)pContext);
if(rc)
{
printf("Can't create gemini recovery log writer thread");
goto done;
}
for( int i = gemini_io_threads - 1;i;i--)
{
rc = pthread_create(&hThread, 0, gemini_apw, (void *)pContext);
if(rc)
{
printf("Can't create gemini page writer thread");
goto done;
}
}
done:
DBUG_RETURN(rc);
}
pthread_handler_decl(gemini_watchdog,arg )
{
int rc = 0;
dsmContext_t *pcontext = (dsmContext_t *)arg;
dsmContext_t *pmyContext = NULL;
rc = dsmContextCopy(pcontext,&pmyContext, DSMCONTEXTDB);
if( rc != 0 )
{
printf("dsmContextCopy failed for watchdog %d\n",rc);
return 0;
}
rc = dsmUserConnect(pmyContext,NULL,0);
if( rc != 0 )
{
printf("dsmUserConnect failed for watchdog %d\n",rc);
return 0;
}
my_thread_init();
pthread_detach_this_thread();
while(rc == 0)
{
rc = dsmDatabaseProcessEvents(pmyContext);
if(!rc)
rc = dsmWatchdog(pmyContext);
sleep(1);
}
rc = dsmUserDisconnect(pmyContext,0);
my_thread_end();
return 0;
}
pthread_handler_decl(gemini_rl_writer,arg )
{
int rc = 0;
dsmContext_t *pcontext = (dsmContext_t *)arg;
dsmContext_t *pmyContext = NULL;
rc = dsmContextCopy(pcontext,&pmyContext, DSMCONTEXTDB);
if( rc != 0 )
{
printf("dsmContextCopy failed for recovery log writer %d\n",rc);
return 0;
}
rc = dsmUserConnect(pmyContext,NULL,0);
if( rc != 0 )
{
printf("dsmUserConnect failed for recovery log writer %d\n",rc);
return 0;
}
my_thread_init();
pthread_detach_this_thread();
while(rc == 0)
{
rc = dsmRLwriter(pmyContext);
}
rc = dsmUserDisconnect(pmyContext,0);
my_thread_end();
return 0;
}
pthread_handler_decl(gemini_apw,arg )
{
int rc = 0;
dsmContext_t *pcontext = (dsmContext_t *)arg;
dsmContext_t *pmyContext = NULL;
my_thread_init();
pthread_detach_this_thread();
rc = dsmContextCopy(pcontext,&pmyContext, DSMCONTEXTDB);
if( rc != 0 )
{
printf("dsmContextCopy failed for gemini page writer %d\n",rc);
my_thread_end();
return 0;
}
rc = dsmUserConnect(pmyContext,NULL,0);
if( rc != 0 )
{
printf("dsmUserConnect failed for gemini page writer %d\n",rc);
my_thread_end();
return 0;
}
while(rc == 0)
{
rc = dsmAPW(pmyContext);
}
rc = dsmUserDisconnect(pmyContext,0);
my_thread_end();
return 0;
}
int gemini_set_option_long(int optid, long optval)
{
dsmStatus_t rc = 0;
switch (optid)
{
case GEM_OPTID_SPIN_RETRIES:
/* If we don't have a context yet, skip the set and just save the
** value in gemini_spin_retries for a later gemini_init(). This
** may not ever happen, but we're covered if it does.
*/
if (pfirstContext)
{
rc = dsmContextSetLong(pfirstContext, DSM_TAGDB_SPIN_AMOUNT,
optval);
}
if (rc)
{
printf("SPIN_AMOUNT tag set failed %ld",rc);
}
else
{
gemini_spin_retries = optval;
}
break;
}
return rc;
}
static int gemini_connect(THD *thd)
{
DBUG_ENTER("gemini_connect");
dsmStatus_t rc;
rc = dsmContextCopy(pfirstContext,(dsmContext_t **)&thd->gemini.context,
DSMCONTEXTDB);
if( rc != 0 )
{
printf("dsmContextCopy failed %ld\n",rc);
return(rc);
}
rc = dsmUserConnect((dsmContext_t *)thd->gemini.context,NULL,0);
if( rc != 0 )
{
printf("dsmUserConnect failed %ld\n",rc);
return(rc);
}
rc = (dsmStatus_t)gemini_tx_begin(thd);
DBUG_RETURN(rc);
}
void gemini_disconnect(THD *thd)
{
dsmStatus_t rc;
if(thd->gemini.context)
{
rc = dsmUserDisconnect((dsmContext_t *)thd->gemini.context,0);
}
return;
}
bool gemini_end(void)
{
dsmStatus_t rc;
THD *thd;
DBUG_ENTER("gemini_end");
if(pfirstContext)
{
rc = dsmShutdownSet(pfirstContext, DSM_SHUTDOWN_NORMAL);
sleep(2);
rc = dsmContextSetLong(pfirstContext,DSM_TAGDB_ACCESS_TYPE,DSM_ACCESS_STARTUP);
rc = dsmShutdown(pfirstContext, DSMNICEBIT,DSMNICEBIT);
}
DBUG_RETURN(0);
}
bool gemini_flush_logs()
{
DBUG_ENTER("gemini_flush_logs");
DBUG_RETURN(0);
}
static int gemini_tx_begin(THD *thd)
{
dsmStatus_t rc;
DBUG_ENTER("gemini_tx_begin");
thd->gemini.savepoint = 1;
rc = dsmTransaction((dsmContext_t *)thd->gemini.context,
&thd->gemini.savepoint,DSMTXN_START,0,NULL);
if(!rc)
thd->gemini.needSavepoint = 1;
thd->gemini.tx_isolation = thd->tx_isolation;
DBUG_PRINT("trans",("beginning transaction"));
DBUG_RETURN(rc);
}
int gemini_commit(THD *thd)
{
dsmStatus_t rc;
LONG txNumber = 0;
DBUG_ENTER("gemini_commit");
if(!thd->gemini.context)
DBUG_RETURN(0);
rc = dsmTransaction((dsmContext_t *)thd->gemini.context,
0,DSMTXN_COMMIT,0,NULL);
if(!rc)
rc = gemini_tx_begin(thd);
thd->gemini.lock_count = 0;
DBUG_PRINT("trans",("ending transaction"));
DBUG_RETURN(rc);
}
int gemini_rollback(THD *thd)
{
dsmStatus_t rc;
LONG txNumber;
DBUG_ENTER("gemini_rollback");
DBUG_PRINT("trans",("aborting transaction"));
if(!thd->gemini.context)
DBUG_RETURN(0);
thd->gemini.savepoint = 0;
rc = dsmTransaction((dsmContext_t *)thd->gemini.context,
&thd->gemini.savepoint,DSMTXN_ABORT,0,NULL);
if(!rc)
rc = gemini_tx_begin(thd);
thd->gemini.lock_count = 0;
DBUG_RETURN(rc);
}
int gemini_rollback_to_savepoint(THD *thd)
{
dsmStatus_t rc = 0;
DBUG_ENTER("gemini_rollback_to_savepoint");
if(thd->gemini.savepoint > 1)
{
rc = dsmTransaction((dsmContext_t *)thd->gemini.context,
&thd->gemini.savepoint,DSMTXN_UNSAVE,0,NULL);
}
DBUG_RETURN(rc);
}
/* gemDataType - translates from mysql data type constant to gemini
key services data type contstant */
int gemDataType ( int mysqlType )
{
switch (mysqlType)
{
case FIELD_TYPE_LONG:
case FIELD_TYPE_TINY:
case FIELD_TYPE_SHORT:
case FIELD_TYPE_TIMESTAMP:
case FIELD_TYPE_LONGLONG:
case FIELD_TYPE_INT24:
case FIELD_TYPE_DATE:
case FIELD_TYPE_TIME:
case FIELD_TYPE_DATETIME:
case FIELD_TYPE_YEAR:
case FIELD_TYPE_NEWDATE:
case FIELD_TYPE_ENUM:
case FIELD_TYPE_SET:
return GEM_INT;
case FIELD_TYPE_DECIMAL:
return GEM_DECIMAL;
case FIELD_TYPE_FLOAT:
return GEM_FLOAT;
case FIELD_TYPE_DOUBLE:
return GEM_DOUBLE;
case FIELD_TYPE_TINY_BLOB:
return GEM_TINYBLOB;
case FIELD_TYPE_MEDIUM_BLOB:
return GEM_MEDIUMBLOB;
case FIELD_TYPE_LONG_BLOB:
return GEM_LONGBLOB;
case FIELD_TYPE_BLOB:
return GEM_BLOB;
case FIELD_TYPE_VAR_STRING:
case FIELD_TYPE_STRING:
return GEM_CHAR;
}
return -1;
}
/*****************************************************************************
** Gemini tables
*****************************************************************************/
const char **ha_gemini::bas_ext() const
{ static const char *ext[]= { ha_gemini_ext, ha_gemini_idx_ext, NullS };
return ext;
}
int ha_gemini::open(const char *name, int mode, uint test_if_locked)
{
dsmObject_t tableId = 0;
THD *thd;
char name_buff[FN_REFLEN];
char tabname_buff[FN_REFLEN];
char dbname_buff[FN_REFLEN];
unsigned i,nameLen;
LONG txNumber;
dsmStatus_t rc;
DBUG_ENTER("ha_gemini::open");
thd = current_thd;
thr_lock_init(&alock);
thr_lock_data_init(&alock,&lock,(void*)0);
ref_length = sizeof(dsmRecid_t);
if(thd->gemini.context == NULL)
{
/* Need to get this thread a connection into the database */
rc = gemini_connect(thd);
if(rc)
return rc;
}
if (!(rec_buff=my_malloc(table->rec_buff_length,
MYF(MY_WME))))
{
DBUG_RETURN(1);
}
/* separate out the name of the table and the database (a VST must be
** created in the mysql database)
*/
rc = gemini_parse_table_name(name, dbname_buff, tabname_buff);
if (rc == 0)
{
if (strcmp(dbname_buff, "mysql") == 0)
{
tableId = gemini_is_vst(tabname_buff);
}
}
sprintf(name_buff, "%s.%s", dbname_buff, tabname_buff);
/* if it's not a VST, get the table number the regular way */
if (!tableId)
{
rc = dsmObjectNameToNum((dsmContext_t *)thd->gemini.context,
(dsmText_t *)name_buff,
&tableId);
}
tableNumber = tableId;
if(!rc)
rc = index_open(name_buff);
fixed_length_row=!(table->db_create_options & HA_OPTION_PACK_RECORD);
key_read = 0;
using_ignore = 0;
/* Get the gemini table status -- we want to know if the table
crashed while being in the midst of a repair operation */
rc = dsmTableStatus((dsmContext_t *)thd->gemini.context,
tableNumber,&tableStatus);
if(tableStatus)
tableStatus = HA_ERR_CRASHED;
DBUG_RETURN (rc);
}
/* Look up and store the object numbers for the indexes on this table */
int ha_gemini::index_open(char *tableName)
{
dsmStatus_t rc = 0;
int nameLen;
DBUG_ENTER("ha_gemini::index_open");
if(table->keys)
{
THD *thd = current_thd;
dsmObject_t objectNumber;
if (!(pindexNumbers=(dsmIndex_t *)my_malloc(table->keys*sizeof(dsmIndex_t),
MYF(MY_WME))))
{
DBUG_RETURN(1);
}
nameLen = strlen(tableName);
tableName[nameLen] = '.';
nameLen++;
for( uint i = 0; i < table->keys && !rc; i++)
{
strcpy(&tableName[nameLen],table->key_info[i].name);
rc = dsmObjectNameToNum((dsmContext_t *)thd->gemini.context,
(dsmText_t *)tableName,
&objectNumber);
pindexNumbers[i] = objectNumber;
}
}
else
pindexNumbers = 0;
DBUG_RETURN(rc);
}
int ha_gemini::close(void)
{
DBUG_ENTER("ha_gemini::close");
thr_lock_delete(&alock);
my_free(rec_buff,MYF(MY_ALLOW_ZERO_PTR));
rec_buff = 0;
my_free((char *)pindexNumbers,MYF(MY_ALLOW_ZERO_PTR));
pindexNumbers = 0;
DBUG_RETURN(0);
}
int ha_gemini::write_row(byte * record)
{
int error = 0;
dsmRecord_t dsmRecord;
THD *thd;
DBUG_ENTER("write_row");
if(tableStatus)
DBUG_RETURN(tableStatus);
thd = current_thd;
statistic_increment(ha_write_count,&LOCK_status);
if (table->time_stamp)
update_timestamp(record+table->time_stamp-1);
if(thd->gemini.needSavepoint || using_ignore)
{
thd->gemini.savepoint++;
error = dsmTransaction((dsmContext_t *)thd->gemini.context,
&thd->gemini.savepoint,
DSMTXN_SAVE, 0, 0);
if (error)
DBUG_RETURN(error);
thd->gemini.needSavepoint = 0;
}
if (table->next_number_field && record == table->record[0])
{
if(thd->next_insert_id)
{
ULONG64 nr;
/* A set insert-id statement so set the auto-increment value if this
value is higher than it's current value */
error = dsmTableAutoIncrement((dsmContext_t *)thd->gemini.context,
tableNumber, (ULONG64 *)&nr);
if(thd->next_insert_id > nr)
{
error = dsmTableAutoIncrementSet((dsmContext_t *)thd->gemini.context,tableNumber,
(ULONG64)thd->next_insert_id);
}
}
update_auto_increment();
}
dsmRecord.table = tableNumber;
dsmRecord.maxLength = table->reclength;
if ((error=pack_row((byte **)&dsmRecord.pbuffer, (int *)&dsmRecord.recLength,
record)))
DBUG_RETURN(error);
error = dsmRecordCreate((dsmContext_t *)thd->gemini.context,
&dsmRecord,0);
if(!error)
{
error = handleIndexEntries(record, dsmRecord.recid,KEY_CREATE);
if(error == HA_ERR_FOUND_DUPP_KEY && using_ignore)
{
dsmStatus_t rc;
rc = dsmTransaction((dsmContext_t *)thd->gemini.context,
&thd->gemini.savepoint,DSMTXN_UNSAVE,0,NULL);
thd->gemini.needSavepoint = 1;
}
}
DBUG_RETURN(error);
}
longlong ha_gemini::get_auto_increment()
{
longlong nr;
int error;
THD *thd=current_thd;
error = dsmTableAutoIncrement((dsmContext_t *)thd->gemini.context,
tableNumber, (ULONG64 *)&nr);
return nr;
}
/* Put or delete index entries for a row */
int ha_gemini::handleIndexEntries(const byte * record, dsmRecid_t recid,
enum_key_string_options option)
{
dsmStatus_t rc = 0;
DBUG_ENTER("handleIndexEntries");
for (uint i = 0; i < table->keys && rc == 0; i++)
{
rc = handleIndexEntry(record, recid,option, i);
}
DBUG_RETURN(rc);
}
int ha_gemini::handleIndexEntry(const byte * record, dsmRecid_t recid,
enum_key_string_options option,uint keynr)
{
dsmStatus_t rc = 0;
KEY *key_info;
int keyStringLen;
bool thereIsAnull;
THD *thd;
AUTOKEY(theKey,keyBufSize);
DBUG_ENTER("handleIndexEntry");
thd = current_thd;
key_info=table->key_info+keynr;
thereIsAnull = false;
rc = createKeyString(record, key_info, theKey.akey.keystr,
sizeof(theKey.apad),&keyStringLen,
(short)pindexNumbers[keynr],
&thereIsAnull);
if(!rc)
{
theKey.akey.index = pindexNumbers[keynr];
theKey.akey.keycomps = (COUNT)key_info->key_parts;
/* We have to subtract three here since cxKeyPrepare
expects that the three lead bytes of the header are
not counted in this length -- But cxKeyPrepare also
expects that these three bytes are present in the keystr */
theKey.akey.keyLen = (COUNT)keyStringLen - 3;
theKey.akey.unknown_comp = thereIsAnull;
theKey.akey.word_index = 0;
theKey.akey.descending_key =0;
if(option == KEY_CREATE)
{
rc = dsmKeyCreate((dsmContext_t *)thd->gemini.context, &theKey.akey,
(dsmTable_t)tableNumber, recid, NULL);
if(rc == DSM_S_IXDUPKEY)
{
last_dup_key=keynr;
rc = HA_ERR_FOUND_DUPP_KEY;
}
}
else if(option == KEY_DELETE)
{
rc = dsmKeyDelete((dsmContext_t *)thd->gemini.context, &theKey.akey,
(dsmTable_t)tableNumber, recid, 0, NULL);
}
else
{
/* KEY_CHECK */
dsmCursid_t aCursorId;
int error;
rc = dsmCursorCreate((dsmContext_t *)thd->gemini.context,
(dsmTable_t)tableNumber,
(dsmIndex_t)pindexNumbers[keynr],
&aCursorId,NULL);
rc = dsmCursorFind((dsmContext_t *)thd->gemini.context,
&aCursorId,&theKey.akey,NULL,DSMDBKEY,
DSMFINDFIRST,DSM_LK_SHARE,0,
&lastRowid,0);
error = dsmCursorDelete((dsmContext_t *)thd->gemini.context,
&aCursorId, 0);
}
}
DBUG_RETURN(rc);
}
int ha_gemini::createKeyString(const byte * record, KEY *pkeyinfo,
unsigned char *pkeyBuf, int bufSize,
int *pkeyStringLen,
short geminiIndexNumber,
bool *thereIsAnull)
{
dsmStatus_t rc = 0;
int componentLen;
int fieldType;
int isNull;
KEY_PART_INFO *key_part;
DBUG_ENTER("createKeyString");
rc = gemKeyInit(pkeyBuf,pkeyStringLen, geminiIndexNumber);
for(uint i = 0; i < pkeyinfo->key_parts && rc == 0; i++)
{
unsigned char *pos;
key_part = pkeyinfo->key_part + i;
fieldType = gemDataType(key_part->field->type());
if(fieldType == GEM_CHAR)
{
/* Save the current ptr to the field in case we're building a key
to remove an old key value when an indexed character column
gets updated. */
char *ptr = key_part->field->ptr;
key_part->field->ptr = (char *)record + key_part->offset;
key_part->field->sort_string(rec_buff, key_part->length);
key_part->field->ptr = ptr;
pos = (unsigned char *)rec_buff;
}
else
{
pos = (unsigned char *)record + key_part->offset;
}
isNull = record[key_part->null_offset] & key_part->null_bit;
if(isNull)
*thereIsAnull = true;
rc = gemFieldToIdxComponent(pos,
(unsigned long) key_part->length,
fieldType,
isNull ,
key_part->field->flags & UNSIGNED_FLAG,
pkeyBuf + *pkeyStringLen,
bufSize,
&componentLen);
*pkeyStringLen += componentLen;
}
DBUG_RETURN(rc);
}
int ha_gemini::update_row(const byte * old_record, byte * new_record)
{
int error = 0;
dsmRecord_t dsmRecord;
unsigned long savepoint;
THD *thd = current_thd;
DBUG_ENTER("update_row");
statistic_increment(ha_update_count,&LOCK_status);
if (table->time_stamp)
update_timestamp(new_record+table->time_stamp-1);
if(thd->gemini.needSavepoint || using_ignore)
{
thd->gemini.savepoint++;
error = dsmTransaction((dsmContext_t *)thd->gemini.context,
&thd->gemini.savepoint,
DSMTXN_SAVE, 0, 0);
if (error)
DBUG_RETURN(error);
thd->gemini.needSavepoint = 0;
}
for (uint keynr=0 ; keynr < table->keys ; keynr++)
{
if(key_cmp(keynr,old_record, new_record))
{
error = handleIndexEntry(old_record,lastRowid,KEY_DELETE,keynr);
if(error)
DBUG_RETURN(error);
error = handleIndexEntry(new_record, lastRowid, KEY_CREATE, keynr);
if(error)
{
if (using_ignore && error == HA_ERR_FOUND_DUPP_KEY)
{
dsmStatus_t rc;
rc = dsmTransaction((dsmContext_t *)thd->gemini.context,
&thd->gemini.savepoint,DSMTXN_UNSAVE,0,NULL);
thd->gemini.needSavepoint = 1;
}
DBUG_RETURN(error);
}
}
}
dsmRecord.table = tableNumber;
dsmRecord.recid = lastRowid;
dsmRecord.maxLength = table->reclength;
if ((error=pack_row((byte **)&dsmRecord.pbuffer, (int *)&dsmRecord.recLength,
new_record)))
{
DBUG_RETURN(error);
}
error = dsmRecordUpdate((dsmContext_t *)thd->gemini.context,
&dsmRecord, 0, NULL);
DBUG_RETURN(error);
}
int ha_gemini::delete_row(const byte * record)
{
int error = 0;
dsmRecord_t dsmRecord;
THD *thd = current_thd;
DBUG_ENTER("delete_row");
statistic_increment(ha_delete_count,&LOCK_status);
if(thd->gemini.needSavepoint)
{
thd->gemini.savepoint++;
error = dsmTransaction((dsmContext_t *)thd->gemini.context,
&thd->gemini.savepoint,
DSMTXN_SAVE, 0, 0);
if (error)
DBUG_RETURN(error);
thd->gemini.needSavepoint = 0;
}
dsmRecord.table = tableNumber;
dsmRecord.recid = lastRowid;
error = handleIndexEntries(record, dsmRecord.recid,KEY_DELETE);
if(!error)
{
error = dsmRecordDelete((dsmContext_t *)thd->gemini.context,
&dsmRecord, 0, NULL);
}
DBUG_RETURN(error);
}
int ha_gemini::index_init(uint keynr)
{
int error = 0;
int keyStringLen;
THD *thd;
DBUG_ENTER("index_init");
thd = current_thd;
lastRowid = 0;
active_index=keynr;
error = dsmCursorCreate((dsmContext_t *)thd->gemini.context,
(dsmTable_t)tableNumber,
(dsmIndex_t)pindexNumbers[keynr],
&cursorId,NULL);
pbracketBase = (dsmKey_t *)my_malloc(sizeof(dsmKey_t) + keyBufSize,
MYF(MY_WME));
if(!pbracketBase)
DBUG_RETURN(1);
pbracketLimit = (dsmKey_t *)my_malloc(sizeof(dsmKey_t) + keyBufSize,MYF(MY_WME));
if(!pbracketLimit)
{
my_free((char *)pbracketLimit,MYF(0));
DBUG_RETURN(1);
}
pbracketBase->index = 0;
pbracketLimit->index = (dsmIndex_t)pindexNumbers[keynr];
pbracketLimit->keycomps = 1;
keyStringLen = 0;
error = gemKeyHigh(pbracketLimit->keystr, &keyStringLen,
pbracketLimit->index);
/* We have to subtract three here since cxKeyPrepare
expects that the three lead bytes of the header are
not counted in this length -- But cxKeyPrepare also
expects that these three bytes are present in the keystr */
pbracketLimit->keyLen = (COUNT)keyStringLen - 3;
pbracketBase->descending_key = pbracketLimit->descending_key = 0;
pbracketBase->ksubstr = pbracketLimit->ksubstr = 0;
pfoundKey = (dsmKey_t *)my_malloc(sizeof(dsmKey_t) + keyBufSize,MYF(MY_WME));
if(!pfoundKey)
{
my_free((char *)pbracketLimit,MYF(0));
my_free((char *)pbracketBase,MYF(0));
DBUG_RETURN(1);
}
DBUG_RETURN(error);
}
int ha_gemini::index_end()
{
int error = 0;
THD *thd;
DBUG_ENTER("index_end");
thd = current_thd;
error = dsmCursorDelete((dsmContext_t *)thd->gemini.context,
&cursorId, 0);
if(pbracketLimit)
my_free((char *)pbracketLimit,MYF(0));
if(pbracketBase)
my_free((char *)pbracketBase,MYF(0));
if(pfoundKey)
my_free((char *)pfoundKey,MYF(0));
pbracketLimit = 0;
pbracketBase = 0;
pfoundKey = 0;
DBUG_RETURN(error);
}
/* This is only used to read whole keys */
int ha_gemini::index_read_idx(byte * buf, uint keynr, const byte * key,
uint key_len, enum ha_rkey_function find_flag)
{
int error = 0;
DBUG_ENTER("index_read_idx");
statistic_increment(ha_read_key_count,&LOCK_status);
error = index_init(keynr);
if (!error)
error = index_read(buf,key,key_len,find_flag);
if(error == HA_ERR_END_OF_FILE)
error = HA_ERR_KEY_NOT_FOUND;
table->status = error ? STATUS_NOT_FOUND : 0;
DBUG_RETURN(error);
}
int ha_gemini::pack_key( uint keynr, dsmKey_t *pkey,
const byte *key_ptr, uint key_length)
{
KEY *key_info=table->key_info+keynr;
KEY_PART_INFO *key_part=key_info->key_part;
KEY_PART_INFO *end=key_part+key_info->key_parts;
int rc;
int componentLen;
DBUG_ENTER("pack_key");
rc = gemKeyInit(pkey->keystr,&componentLen,
(short)pindexNumbers[active_index]);
pkey->keyLen = componentLen;
for (; key_part != end && (int) key_length > 0 && !rc; key_part++)
{
uint offset=0;
unsigned char *pos;
int fieldType;
if (key_part->null_bit)
{
offset=1;
if (*key_ptr != 0) // Store 0 if NULL
{
key_length-= key_part->store_length;
key_ptr+= key_part->store_length;
rc = gemFieldToIdxComponent(
(unsigned char *)key_ptr + offset,
(unsigned long) key_part->length,
0,
1 , /* Tells it to build a null component */
key_part->field->flags & UNSIGNED_FLAG,
pkey->keystr + pkey->keyLen,
keyBufSize,
&componentLen);
pkey->keyLen += componentLen;
continue;
}
}
fieldType = gemDataType(key_part->field->type());
if(fieldType == GEM_CHAR)
{
key_part->field->store(key_ptr + offset, key_part->length);
key_part->field->sort_string(rec_buff, key_part->length);
pos = (unsigned char *)rec_buff;
}
else
{
pos = (unsigned char *)key_ptr + offset;
}
rc = gemFieldToIdxComponent(
pos,
(unsigned long) key_part->length,
fieldType,
0 ,
key_part->field->flags & UNSIGNED_FLAG,
pkey->keystr + pkey->keyLen,
keyBufSize,
&componentLen);
key_ptr+=key_part->store_length;
key_length-=key_part->store_length;
pkey->keyLen += componentLen;
}
DBUG_RETURN(rc);
}
void ha_gemini::unpack_key(char *record, dsmKey_t *key, uint index)
{
KEY *key_info=table->key_info+index;
KEY_PART_INFO *key_part= key_info->key_part,
*end=key_part+key_info->key_parts;
int fieldIsNull, fieldType;
int rc = 0;
char unsigned *pos= &key->keystr[7];
for ( ; key_part != end; key_part++)
{
fieldType = gemDataType(key_part->field->type());
if(fieldType == GEM_CHAR)
{
/* Can't get data from character indexes since the sort weights
are in the index and not the characters. */
key_read = 0;
}
rc = gemIdxComponentToField(pos, fieldType,
(unsigned char *)record + key_part->field->offset(),
key_part->field->field_length,
key_part->field->decimals(),
&fieldIsNull);
if(fieldIsNull)
{
record[key_part->null_offset] |= key_part->null_bit;
}
else if (key_part->null_bit)
{
record[key_part->null_offset]&= ~key_part->null_bit;
}
while(*pos++); /* Advance to next field in key by finding */
/* a null byte */
}
}
int ha_gemini::index_read(byte * buf, const byte * key,
uint key_len, enum ha_rkey_function find_flag)
{
int error = 0;
THD *thd;
int componentLen;
DBUG_ENTER("index_read");
statistic_increment(ha_read_key_count,&LOCK_status);
pbracketBase->index = (short)pindexNumbers[active_index];
pbracketBase->keycomps = 1;
/* Its a greater than operation so create a base bracket
from the input key data. */
error = pack_key(active_index, pbracketBase, key, key_len);
if(error)
goto errorReturn;
if(find_flag == HA_READ_AFTER_KEY)
{
/* A greater than operation */
error = gemKeyAddLow(pbracketBase->keystr + pbracketBase->keyLen,
&componentLen);
pbracketBase->keyLen += componentLen;
}
if(find_flag == HA_READ_KEY_EXACT)
{
/* Need to set up a high bracket for an equality operator
Which is a copy of the base bracket plus a hi lim term */
bmove(pbracketLimit,pbracketBase,(size_t)pbracketBase->keyLen + sizeof(dsmKey_t));
error = gemKeyAddHigh(pbracketLimit->keystr + pbracketLimit->keyLen,
&componentLen);
if(error)
goto errorReturn;
pbracketLimit->keyLen += componentLen;
}
else
{
/* Always add a high range -- except for HA_READ_KEY_EXACT this
is all we need for the upper index bracket */
error = gemKeyHigh(pbracketLimit->keystr, &componentLen,
pbracketLimit->index);
pbracketLimit->keyLen = componentLen;
}
/* We have to subtract three here since cxKeyPrepare
expects that the three lead bytes of the header are
not counted in this length -- But cxKeyPrepare also
expects that these three bytes are present in the keystr */
pbracketBase->keyLen -= 3;
pbracketLimit->keyLen -= 3;
thd = current_thd;
error = findRow(thd, DSMFINDFIRST, buf);
errorReturn:
if (error == DSM_S_ENDLOOP)
error = HA_ERR_KEY_NOT_FOUND;
table->status = error ? STATUS_NOT_FOUND : 0;
DBUG_RETURN(error);
}
int ha_gemini::index_next(byte * buf)
{
THD *thd;
int error = 1;
int keyStringLen=0;
dsmMask_t findMode;
DBUG_ENTER("index_next");
if(tableStatus)
DBUG_RETURN(tableStatus);
thd = current_thd;
if(pbracketBase->index == 0)
{
error = gemKeyLow(pbracketBase->keystr, &keyStringLen,
pbracketLimit->index);
pbracketBase->keyLen = (COUNT)keyStringLen - 3;
pbracketBase->index = pbracketLimit->index;
pbracketBase->keycomps = 1;
findMode = DSMFINDFIRST;
}
else
findMode = DSMFINDNEXT;
error = findRow(thd,findMode,buf);
if (error == DSM_S_ENDLOOP)
error = HA_ERR_END_OF_FILE;
table->status = error ? STATUS_NOT_FOUND : 0;
DBUG_RETURN(error);
}
int ha_gemini::index_next_same(byte * buf, const byte *key, uint keylen)
{
int error = 0;
DBUG_ENTER("index_next_same");
statistic_increment(ha_read_next_count,&LOCK_status);
DBUG_RETURN(index_next(buf));
}
int ha_gemini::index_prev(byte * buf)
{
int error = 0;
THD *thd = current_thd;
DBUG_ENTER("index_prev");
statistic_increment(ha_read_prev_count,&LOCK_status);
error = findRow(thd, DSMFINDPREV, buf);
if (error == DSM_S_ENDLOOP)
error = HA_ERR_END_OF_FILE;
table->status = error ? STATUS_NOT_FOUND : 0;
DBUG_RETURN(error);
}
int ha_gemini::index_first(byte * buf)
{
DBUG_ENTER("index_first");
statistic_increment(ha_read_first_count,&LOCK_status);
DBUG_RETURN(index_next(buf));
}
int ha_gemini::index_last(byte * buf)
{
int error = 0;
THD *thd;
int keyStringLen;
dsmMask_t findMode;
thd = current_thd;
DBUG_ENTER("index_last");
statistic_increment(ha_read_last_count,&LOCK_status);
error = gemKeyLow(pbracketBase->keystr, &keyStringLen,
pbracketLimit->index);
if(error)
goto errorReturn;
pbracketBase->keyLen = (COUNT)keyStringLen - 3;
pbracketBase->index = pbracketLimit->index;
pbracketBase->keycomps = 1;
error = findRow(thd,DSMFINDLAST,buf);
errorReturn:
if (error == DSM_S_ENDLOOP)
error = HA_ERR_END_OF_FILE;
table->status = error ? STATUS_NOT_FOUND : 0;
DBUG_RETURN(error);
table->status = error ? STATUS_NOT_FOUND : 0;
DBUG_RETURN(error);
}
int ha_gemini::rnd_init(bool scan)
{
THD *thd = current_thd;
lastRowid = 0;
return 0;
}
int ha_gemini::rnd_end()
{
/*
return gem_scan_end();
*/
return 0;
}
int ha_gemini::rnd_next(byte *buf)
{
int error = 0;
dsmRecord_t dsmRecord;
THD *thd;
DBUG_ENTER("rnd_next");
if(tableStatus)
DBUG_RETURN(tableStatus);
thd = current_thd;
if(thd->gemini.tx_isolation == ISO_READ_COMMITTED && !(lockMode & DSM_LK_EXCL)
&& lastRowid)
error = dsmObjectUnlock((dsmContext_t *)thd->gemini.context,
tableNumber, DSMOBJECT_RECORD, lastRowid,
lockMode | DSM_UNLK_FREE, 0);
statistic_increment(ha_read_rnd_next_count,&LOCK_status);
dsmRecord.table = tableNumber;
dsmRecord.recid = lastRowid;
dsmRecord.pbuffer = (dsmBuffer_t *)rec_buff;
dsmRecord.recLength = table->reclength;
dsmRecord.maxLength = table->reclength;
error = dsmTableScan((dsmContext_t *)thd->gemini.context,
&dsmRecord, DSMFINDNEXT, lockMode, 0);
if(!error)
{
lastRowid = dsmRecord.recid;
unpack_row((char *)buf,(char *)dsmRecord.pbuffer);
}
if(!error)
;
else if (error == DSM_S_ENDLOOP)
error = HA_ERR_END_OF_FILE;
else if (error == DSM_S_RQSTREJ)
error = HA_ERR_LOCK_WAIT_TIMEOUT;
else if (error == DSM_S_LKTBFULL)
error = HA_ERR_LOCK_TABLE_FULL;
table->status = error ? STATUS_NOT_FOUND : 0;
DBUG_RETURN(error);
}
int ha_gemini::rnd_pos(byte * buf, byte *pos)
{
int error;
int rc;
THD *thd;
statistic_increment(ha_read_rnd_count,&LOCK_status);
thd = current_thd;
memcpy((void *)&lastRowid,pos,ref_length);
if(thd->gemini.tx_isolation == ISO_READ_COMMITTED && !(lockMode & DSM_LK_EXCL))
{
/* Lock the row */
error = dsmObjectLock((dsmContext_t *)thd->gemini.context,
(dsmObject_t)tableNumber,DSMOBJECT_RECORD,lastRowid,
lockMode, 1, 0);
if ( error )
goto errorReturn;
}
error = fetch_row(thd->gemini.context, buf);
if(thd->gemini.tx_isolation == ISO_READ_COMMITTED && !(lockMode & DSM_LK_EXCL))
{
/* Unlock the row */
rc = dsmObjectUnlock((dsmContext_t *)thd->gemini.context,
(dsmObject_t)tableNumber,DSMOBJECT_RECORD,lastRowid,
lockMode | DSM_UNLK_FREE , 0);
}
if(error == DSM_S_RMNOTFND)
error = HA_ERR_RECORD_DELETED;
errorReturn:
table->status = error ? STATUS_NOT_FOUND : 0;
return error;
}
int ha_gemini::fetch_row(void *gemini_context,const byte *buf)
{
dsmStatus_t rc = 0;
dsmRecord_t dsmRecord;
DBUG_ENTER("fetch_row");
dsmRecord.table = tableNumber;
dsmRecord.recid = lastRowid;
dsmRecord.pbuffer = (dsmBuffer_t *)rec_buff;
dsmRecord.recLength = table->reclength;
dsmRecord.maxLength = table->reclength;
rc = dsmRecordGet((dsmContext_t *)gemini_context,
&dsmRecord, 0);
if(!rc)
{
unpack_row((char *)buf,(char *)dsmRecord.pbuffer);
}
DBUG_RETURN(rc);
}
int ha_gemini::findRow(THD *thd, dsmMask_t findMode, byte *buf)
{
dsmStatus_t rc;
dsmKey_t *pkey;
DBUG_ENTER("findRow");
if(thd->gemini.tx_isolation == ISO_READ_COMMITTED && !(lockMode & DSM_LK_EXCL)
&& lastRowid)
rc = dsmObjectUnlock((dsmContext_t *)thd->gemini.context,
tableNumber, DSMOBJECT_RECORD, lastRowid,
lockMode | DSM_UNLK_FREE, 0);
if( key_read )
pkey = pfoundKey;
else
pkey = 0;
rc = dsmCursorFind((dsmContext_t *)thd->gemini.context,
&cursorId,
pbracketBase,
pbracketLimit,
DSMPARTIAL,
findMode,
lockMode,
NULL,
&lastRowid,
pkey);
if( rc )
goto errorReturn;
if(key_read)
{
unpack_key(buf, pkey, active_index);
}
if(!key_read) /* unpack_key may have turned off key_read */
{
rc = fetch_row((dsmContext_t *)thd->gemini.context,buf);
}
errorReturn:
if(!rc)
;
else if(rc == DSM_S_RQSTREJ)
rc = HA_ERR_LOCK_WAIT_TIMEOUT;
else if (rc == DSM_S_LKTBFULL)
rc = HA_ERR_LOCK_TABLE_FULL;
DBUG_RETURN(rc);
}
void ha_gemini::position(const byte *record)
{
memcpy(ref,&lastRowid,ref_length);
}
void ha_gemini::info(uint flag)
{
DBUG_ENTER("info");
if ((flag & HA_STATUS_VARIABLE))
{
THD *thd = current_thd;
dsmStatus_t error;
ULONG64 rows;
error = dsmRowCount((dsmContext_t *)thd->gemini.context,tableNumber,&rows);
records = (ha_rows)rows;
deleted = 0;
}
else if ((flag & HA_STATUS_CONST))
{
;
}
else if ((flag & HA_STATUS_ERRKEY))
{
errkey=last_dup_key;
}
else if ((flag & HA_STATUS_TIME))
{
;
}
else if ((flag & HA_STATUS_AUTO))
{
;
}
DBUG_VOID_RETURN;
}
int ha_gemini::extra(enum ha_extra_function operation)
{
switch (operation)
{
case HA_EXTRA_RESET:
case HA_EXTRA_RESET_STATE:
key_read=0;
using_ignore=0;
break;
case HA_EXTRA_KEYREAD:
key_read=1; // Query satisfied with key
break;
case HA_EXTRA_NO_KEYREAD:
key_read=0;
break;
case HA_EXTRA_IGNORE_DUP_KEY:
using_ignore=1;
break;
case HA_EXTRA_NO_IGNORE_DUP_KEY:
using_ignore=0;
break;
default:
break;
}
return 0;
}
int ha_gemini::reset(void)
{
key_read=0; // Reset to state after open
return 0;
}
/*
As MySQL will execute an external lock for every new table it uses
we can use this to start the transactions.
*/
int ha_gemini::external_lock(THD *thd, int lock_type)
{
dsmStatus_t rc = 0;
LONG txNumber;
DBUG_ENTER("ha_gemini::external_lock");
if (lock_type != F_UNLCK)
{
if (!thd->gemini.lock_count)
{
thd->gemini.lock_count = 1;
thd->gemini.tx_isolation = thd->tx_isolation;
}
if(thd->gemini.context == NULL)
{
/* Need to get this thread a connection into the database */
rc = gemini_connect(thd);
if(rc)
return rc;
}
/* Set need savepoint flag */
thd->gemini.needSavepoint = 1;
if(rc)
DBUG_RETURN(rc);
if( thd->in_lock_tables || thd->gemini.tx_isolation == ISO_SERIALIZABLE )
{
rc = dsmObjectLock((dsmContext_t *)thd->gemini.context,
(dsmObject_t)tableNumber,DSMOBJECT_TABLE,0,
lockMode, 1, 0);
}
}
else /* lock_type == F_UNLK */
{
/* Commit the tx if we're in auto-commit mode */
if (!(thd->options & OPTION_NOT_AUTO_COMMIT)&&
!(thd->options & OPTION_BEGIN))
gemini_commit(thd);
}
DBUG_RETURN(rc);
}
THR_LOCK_DATA **ha_gemini::store_lock(THD *thd, THR_LOCK_DATA **to,
enum thr_lock_type lock_type)
{
if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK)
{
/* If we are not doing a LOCK TABLE, then allow multiple writers */
if ((lock_type >= TL_WRITE_CONCURRENT_INSERT &&
lock_type <= TL_WRITE) &&
!thd->in_lock_tables)
lock_type = TL_WRITE_ALLOW_WRITE;
lock.type=lock_type;
if(thd->gemini.tx_isolation == ISO_READ_UNCOMMITTED)
lockMode = DSM_LK_NOLOCK;
else if(table->reginfo.lock_type > TL_WRITE_ALLOW_READ)
lockMode = DSM_LK_EXCL;
else
lockMode = DSM_LK_SHARE;
}
*to++= &lock;
return to;
}
int ha_gemini::create(const char *name, register TABLE *form,
HA_CREATE_INFO *create_info)
{
THD *thd;
char name_buff[FN_REFLEN];
char dbname_buff[FN_REFLEN];
DBUG_ENTER("ha_gemini::create");
dsmContext_t *pcontext;
dsmStatus_t rc;
dsmArea_t areaNumber;
dsmObject_t tableNumber = 0;
dsmDbkey_t dummy = 0;
unsigned i;
int baseNameLen;
dsmObject_t indexNumber;
/* separate out the name of the table and the database (a VST must be
** created in the mysql database)
*/
rc = gemini_parse_table_name(name, dbname_buff, name_buff);
if (rc == 0)
{
/* If the table is a VST, don't create areas or extents */
if (strcmp(dbname_buff, "mysql") == 0)
{
tableNumber = gemini_is_vst(name_buff);
if (tableNumber)
{
return 0;
}
}
}
thd = current_thd;
if(thd->gemini.context == NULL)
{
/* Need to get this thread a connection into the database */
rc = gemini_connect(thd);
if(rc)
return rc;
}
pcontext = (dsmContext_t *)thd->gemini.context;
if(thd->gemini.needSavepoint || using_ignore)
{
thd->gemini.savepoint++;
rc = dsmTransaction((dsmContext_t *)thd->gemini.context,
&thd->gemini.savepoint,
DSMTXN_SAVE, 0, 0);
if (rc)
DBUG_RETURN(rc);
thd->gemini.needSavepoint = 0;
}
fn_format(name_buff, name, "", ha_gemini_ext, 2 | 4);
/* Create a storage area */
rc = dsmAreaNew(pcontext,gemini_blocksize,DSMAREA_TYPE_DATA,
&areaNumber, gemini_recbits,
(dsmText_t *)"gemini_data_area");
if( rc != 0 )
{
printf("dsmAreaNew failed %ld\n",rc);
return(rc);
}
/* Create an extent */
/* Don't pass in leading ./ in name_buff */
rc = dsmExtentCreate(pcontext,areaNumber,1,15,5,
(dsmText_t *)&name_buff[start_of_name]);
if( rc != 0 )
{
printf("dsmExtentCreate failed %ld\n",rc);
return(rc);
}
/* Create the table storage object */
/* Change slashes in the name to periods */
for( i = 0; i < strlen(name_buff); i++)
if(name_buff[i] == '/' || name_buff[i] == '\\')
name_buff[i] = '.';
/* Get rid of .gmd suffix */
name_buff[strlen(name_buff) - 4] = '\0';
rc = dsmObjectCreate(pcontext, areaNumber, &tableNumber,
DSMOBJECT_MIXTABLE,0,0,0,
(dsmText_t *)&name_buff[start_of_name],
&dummy,&dummy);
if(rc == 0 && form->keys)
{
fn_format(name_buff, name, "", ha_gemini_idx_ext, 2 | 4);
/* Create a storage area */
rc = dsmAreaNew(pcontext,gemini_blocksize,DSMAREA_TYPE_DATA,
&areaNumber, gemini_recbits,
(dsmText_t *)"gemini_index_area");
if( rc != 0 )
{
printf("dsmAreaNew failed %ld\n",rc);
return(rc);
}
/* Create an extent */
/* Don't pass in leading ./ in name_buff */
rc = dsmExtentCreate(pcontext,areaNumber,1,15,5,
(dsmText_t *)&name_buff[start_of_name]);
if( rc != 0 )
{
printf("dsmExtentCreate failed %ld\n",rc);
return(rc);
}
/* Change slashes in the name to periods */
for( i = 0; i < strlen(name_buff); i++)
if(name_buff[i] == '/' || name_buff[i] == '\\')
name_buff[i] = '.';
/* Get rid of .gmi suffix */
name_buff[strlen(name_buff) - 4] = '\0';
baseNameLen = strlen(name_buff);
name_buff[baseNameLen] = '.';
baseNameLen++;
for( i = 0; i < form->keys; i++)
{
dsmObjectAttr_t indexUnique;
indexNumber = DSMINDEX_INVALID;
/* Create a storage object record for each index */
/* Add the index name so the object name is in the form
<db>.<table>.<index_name> */
strcpy(&name_buff[baseNameLen],table->key_info[i].name);
if(table->key_info[i].flags & HA_NOSAME)
indexUnique = 1;
else
indexUnique = 0;
rc = dsmObjectCreate(pcontext, areaNumber, &indexNumber,
DSMOBJECT_MIXINDEX,indexUnique,tableNumber,
DSMOBJECT_MIXTABLE,
(dsmText_t *)&name_buff[start_of_name],
&dummy,&dummy);
}
}
rc = dsmTableAutoIncrementSet(pcontext,tableNumber,
create_info->auto_increment_value);
/* Get a table lock on this table in case this table is being
created as part of an alter table statement. We don't want
the alter table statement to abort because of a lock table overflow
*/
if (thd->lex.sql_command == SQLCOM_CREATE_INDEX ||
thd->lex.sql_command == SQLCOM_ALTER_TABLE ||
thd->lex.sql_command == SQLCOM_DROP_INDEX)
{
rc = dsmObjectLock(pcontext,
(dsmObject_t)tableNumber,DSMOBJECT_TABLE,0,
DSM_LK_EXCL, 1, 0);
/* and don't commit so we won't release the table on the table number
of the table being altered */
}
else
{
if(!rc)
rc = gemini_commit(thd);
}
DBUG_RETURN(rc);
}
int ha_gemini::delete_table(const char *pname)
{
THD *thd;
dsmStatus_t rc;
dsmContext_t *pcontext;
unsigned i,nameLen;
dsmArea_t indexArea = 0;
dsmArea_t tableArea = 0;
dsmObjectAttr_t objectAttr;
dsmObject_t associate;
dsmObjectType_t associateType;
dsmDbkey_t block, root;
int need_txn = 0;
dsmObject_t tableNum = 0;
char name_buff[FN_REFLEN];
char dbname_buff[FN_REFLEN];
DBUG_ENTER("ha_gemini::delete_table");
/* separate out the name of the table and the database (a VST must be
** located in the mysql database)
*/
rc = gemini_parse_table_name(pname, dbname_buff, name_buff);
if (rc == 0)
{
/* If the table is a VST, there are no areas or extents to delete */
if (strcmp(dbname_buff, "mysql") == 0)
{
tableNum = gemini_is_vst(name_buff);
if (tableNum)
{
return 0;
}
}
}
thd = current_thd;
if(thd->gemini.context == NULL)
{
/* Need to get this thread a connection into the database */
rc = gemini_connect(thd);
if(rc)
{
DBUG_RETURN(rc);
}
}
pcontext = (dsmContext_t *)thd->gemini.context;
bzero(name_buff, FN_REFLEN);
nameLen = strlen(pname);
for( i = start_of_name; i < nameLen; i++)
{
if(pname[i] == '/' || pname[i] == '\\')
name_buff[i-start_of_name] = '.';
else
name_buff[i-start_of_name] = pname[i];
}
rc = dsmObjectNameToNum(pcontext, (dsmText_t *)name_buff,
(dsmObject_t *)&tableNum);
if (rc)
{
printf("Cound not find table number for %s with string %s, %ld\n",
pname,name_buff,rc);
rc = gemini_rollback(thd);
if (rc)
{
printf("Error in rollback %ld\n",rc);
}
DBUG_RETURN(rc);
}
rc = dsmObjectInfo(pcontext, tableNum, DSMOBJECT_MIXTABLE, &tableArea,
&objectAttr, &associate, &associateType, &block, &root);
if (rc)
{
printf("Failed to get area number for table %d, %s, return %ld\n",
tableNum, pname, rc);
rc = gemini_rollback(thd);
if (rc)
{
printf("Error in rollback %ld\n",rc);
}
}
indexArea = DSMAREA_INVALID;
/* Delete the indexes and tables storage objects for with the table */
rc = dsmObjectDeleteAssociate(pcontext, tableNum, &indexArea);
if (rc)
{
printf("Error deleting storage objects for table number %d, return %ld\n",
(int)tableNum, rc);
/* roll back txn and return */
rc = gemini_rollback(thd);
if (rc)
{
printf("Error in rollback %ld\n",rc);
}
DBUG_RETURN(rc);
}
if (indexArea != DSMAREA_INVALID)
{
/* Delete the extents for both Index and Table */
rc = dsmExtentDelete(pcontext, indexArea, 0);
rc = dsmAreaDelete(pcontext, indexArea);
if (rc)
{
printf("Error deleting Index Area %ld, return %ld\n", indexArea, rc);
/* roll back txn and return */
rc = gemini_rollback(thd);
if (rc)
{
printf("Error in rollback %ld\n",rc);
}
DBUG_RETURN(rc);
}
}
rc = dsmExtentDelete(pcontext, tableArea, 0);
rc = dsmAreaDelete(pcontext, tableArea);
if (rc)
{
printf("Error deleting table Area %ld, name %s, return %ld\n",
tableArea, pname, rc);
/* roll back txn and return */
rc = gemini_rollback(thd);
if (rc)
{
printf("Error in rollback %ld\n",rc);
}
DBUG_RETURN(rc);
}
/* Commit the transaction */
rc = gemini_commit(thd);
if (rc)
{
printf("Failed to commit transaction %ld\n",rc);
}
/* now remove all the files that need to be removed and
cause a checkpoint so recovery will work */
rc = dsmExtentUnlink(pcontext);
DBUG_RETURN(0);
}
int ha_gemini::rename_table(const char *pfrom, const char *pto)
{
THD *thd;
dsmContext_t *pcontext;
dsmStatus_t rc;
char tabname_buff[FN_REFLEN];
char dbname_buff[FN_REFLEN];
char name_buff[FN_REFLEN];
char newname_buff[FN_REFLEN];
char newextname_buff[FN_REFLEN];
char newidxextname_buff[FN_REFLEN];
unsigned i, nameLen;
dsmObject_t tableNum;
dsmArea_t indexArea = 0;
DBUG_ENTER("ha_gemini::rename_table");
/* don't allow rename of VSTs */
rc = gemini_parse_table_name(pfrom, dbname_buff, name_buff);
if (rc == 0)
{
/* If the table is a VST, don't create areas or extents */
if (strcmp(dbname_buff, "mysql") == 0)
{
if (gemini_is_vst(name_buff))
{
return 0;
}
}
}
thd = current_thd;
if (thd->gemini.context == NULL)
{
/* Need to get this thread a connection into the database */
rc = gemini_connect(thd);
if (rc)
{
DBUG_RETURN(rc);
}
}
pcontext = (dsmContext_t *)thd->gemini.context;
/* change the slashes to dots in the old and new names */
nameLen = strlen(pfrom);
for( i = start_of_name; i < nameLen; i++)
{
if(pfrom[i] == '/' || pfrom[i] == '\\')
name_buff[i-start_of_name] = '.';
else
name_buff[i-start_of_name] = pfrom[i];
}
name_buff[i-start_of_name] = '\0';
nameLen = strlen(pto);
for( i = start_of_name; i < nameLen; i++)
{
if(pto[i] == '/' || pto[i] == '\\')
newname_buff[i-start_of_name] = '.';
else
newname_buff[i-start_of_name] = pto[i];
}
newname_buff[i-start_of_name] = '\0';
/* generate new extent names (for table and index extents) */
fn_format(newextname_buff, pto, "", ha_gemini_ext, 2 | 4);
fn_format(newidxextname_buff, pto, "", ha_gemini_idx_ext, 2 | 4);
rc = dsmObjectNameToNum(pcontext, (dsmText_t *)name_buff, &tableNum);
if (rc)
goto errorReturn;
rc = dsmObjectRename(pcontext, tableNum,
(dsmText_t *)newname_buff,
(dsmText_t *)&newidxextname_buff[start_of_name],
(dsmText_t *)&newextname_buff[start_of_name],
&indexArea);
if (rc)
goto errorReturn;
/* rename the physical table and index files (if necessary) */
rc = rename_file_ext(pfrom, pto, ha_gemini_ext);
if (!rc && indexArea)
{
rc = rename_file_ext(pfrom, pto, ha_gemini_idx_ext);
}
errorReturn:
DBUG_RETURN(rc);
}
/*
How many seeks it will take to read through the table
This is to be comparable to the number returned by records_in_range so
that we can decide if we should scan the table or use keys.
*/
double ha_gemini::scan_time()
{
return records / (gemini_blocksize / table->reclength);
}
int ha_gemini::check(THD* thd, HA_CHECK_OPT* check_opt)
{
int error;
int checkStatus = HA_ADMIN_OK;
ha_rows indexCount;
byte *buf = 0, *indexBuf = 0;
int errorCount = 0;
/* Get a shared table lock */
if(thd->gemini.needSavepoint)
{
/* We don't really need a savepoint here but do it anyway
just to keep the savepoint number correct. */
thd->gemini.savepoint++;
error = dsmTransaction((dsmContext_t *)thd->gemini.context,
&thd->gemini.savepoint,
DSMTXN_SAVE, 0, 0);
if (error)
return(error);
thd->gemini.needSavepoint = 0;
}
buf = my_malloc(table->rec_buff_length,MYF(MY_WME));
indexBuf = my_malloc(table->rec_buff_length,MYF(MY_WME));
/* Lock the table */
error = dsmObjectLock((dsmContext_t *)thd->gemini.context,
(dsmObject_t)tableNumber,
DSMOBJECT_TABLE,0,
DSM_LK_SHARE, 1, 0);
if(error)
return error;
info(HA_STATUS_VARIABLE);
/* If quick option just scan along index converting and counting entries */
for (uint i = 0; i < table->keys; i++)
{
key_read = 1;
indexCount = 0;
error = index_init(i);
error = index_first(indexBuf);
while(!error)
{
indexCount++;
if(!check_opt->quick)
{
/* Fetch row and compare to data produced from key */
error = fetch_row(thd->gemini.context,buf);
if(!error)
{
if(key_cmp(i,buf,indexBuf))
{
print_msg(thd,table->real_name,"check","error",
"Key does not match row for rowid %d for index %s",
lastRowid,table->key_info[i].name);
checkStatus = HA_ADMIN_CORRUPT;
errorCount++;
if(errorCount > 1000)
goto error_return;
}
else if(error == DSM_S_RMNOTFND)
{
errorCount++;
checkStatus = HA_ADMIN_CORRUPT;
print_msg(thd,table->real_name,"check","error",
"Key does not have a valid row pointer %d for index %s",
lastRowid,table->key_info[i].name);
if(errorCount > 1000)
goto error_return;
error = 0;
}
}
}
if(!error)
error = index_next(indexBuf);
}
if(error == HA_ERR_END_OF_FILE)
{
/* Check count of rows */
if(records != indexCount)
{
/* Number of index entries does not agree with the number of
rows in the index. */
checkStatus = HA_ADMIN_CORRUPT;
print_msg(thd,table->real_name,"check","error",
"Total rows %d does not match total index entries %d for %s",
records, indexCount,
table->key_info[i].name);
}
}
else
{
checkStatus = HA_ADMIN_FAILED;
goto error_return;
}
index_end();
}
if(!check_opt->quick)
{
/* Now scan the table and for each row generate the keys
and find them in the index */
error = fullCheck(thd, buf);\
if(error)
checkStatus = error;
}
error_return:
my_free(buf,MYF(MY_ALLOW_ZERO_PTR));
index_end();
key_read = 0;
error = dsmObjectUnlock((dsmContext_t *)thd->gemini.context,
(dsmObject_t)tableNumber,
DSMOBJECT_TABLE,0,
DSM_LK_SHARE,0);
return checkStatus;
}
int ha_gemini::fullCheck(THD *thd,byte *buf)
{
int error;
int errorCount = 0;
int checkStatus = 0;
lastRowid = 0;
while(((error = rnd_next( buf)) != HA_ERR_END_OF_FILE) && errorCount <= 1000)
{
if(!error)
{
error = handleIndexEntries(buf,lastRowid,KEY_CHECK);
if(error)
{
/* Error finding an index entry for a row. */
print_msg(thd,table->real_name,"check","error",
"Unable to find all index entries for row %d",
lastRowid);
errorCount++;
checkStatus = HA_ADMIN_CORRUPT;
error = 0;
}
}
else
{
/* Error reading a row */
print_msg(thd,table->real_name,"check","error",
"Error reading row %d status = %d",
lastRowid,error);
errorCount++;
checkStatus = HA_ADMIN_CORRUPT;
error = 0;
}
}
return checkStatus;
}
int ha_gemini::repair(THD* thd, HA_CHECK_OPT* check_opt)
{
int error;
dsmRecord_t dsmRecord;
byte *buf;
if(thd->gemini.needSavepoint)
{
/* We don't really need a savepoint here but do it anyway
just to keep the savepoint number correct. */
thd->gemini.savepoint++;
error = dsmTransaction((dsmContext_t *)thd->gemini.context,
&thd->gemini.savepoint,
DSMTXN_SAVE, 0, 0);
if (error)
return(error);
thd->gemini.needSavepoint = 0;
}
/* Lock the table */
error = dsmObjectLock((dsmContext_t *)thd->gemini.context,
(dsmObject_t)tableNumber,
DSMOBJECT_TABLE,0,
DSM_LK_EXCL, 1, 0);
if(error)
return error;
error = dsmContextSetLong((dsmContext_t *)thd->gemini.context,
DSM_TAGCONTEXT_NO_LOGGING,1);
error = dsmTableReset((dsmContext_t *)thd->gemini.context,
(dsmTable_t)tableNumber, table->keys,
pindexNumbers);
buf = my_malloc(table->rec_buff_length,MYF(MY_WME));
dsmRecord.table = tableNumber;
dsmRecord.recid = 0;
dsmRecord.pbuffer = (dsmBuffer_t *)rec_buff;
dsmRecord.recLength = table->reclength;
dsmRecord.maxLength = table->reclength;
while(!error)
{
error = dsmTableScan((dsmContext_t *)thd->gemini.context,
&dsmRecord, DSMFINDNEXT, DSM_LK_NOLOCK,
1);
if(!error)
{
unpack_row((char *)buf,(char *)dsmRecord.pbuffer);
error = handleIndexEntries(buf,dsmRecord.recid,KEY_CREATE);
if(error == HA_ERR_FOUND_DUPP_KEY)
{
/* We don't want to stop on duplicate keys -- we're repairing
here so let's get as much repaired as possible. */
error = 0;
}
}
}
error = dsmObjectUnlock((dsmContext_t *)thd->gemini.context,
(dsmObject_t)tableNumber,
DSMOBJECT_TABLE,0,
DSM_LK_EXCL,0);
my_free(buf,MYF(MY_ALLOW_ZERO_PTR));
error = dsmContextSetLong((dsmContext_t *)thd->gemini.context,
DSM_TAGCONTEXT_NO_LOGGING,0);
return error;
}
ha_rows ha_gemini::records_in_range(int keynr,
const byte *start_key,uint start_key_len,
enum ha_rkey_function start_search_flag,
const byte *end_key,uint end_key_len,
enum ha_rkey_function end_search_flag)
{
int error;
int componentLen;
float pctInrange;
ha_rows rows = 5;
DBUG_ENTER("records_in_range");
error = index_init(keynr);
if(error)
DBUG_RETURN(rows);
pbracketBase->index = (short)pindexNumbers[keynr];
pbracketBase->keycomps = 1;
if(start_key)
{
error = pack_key(keynr, pbracketBase, start_key, start_key_len);
if(start_search_flag == HA_READ_AFTER_KEY)
{
/* A greater than operation */
error = gemKeyAddLow(pbracketBase->keystr + pbracketBase->keyLen,
&componentLen);
pbracketBase->keyLen += componentLen;
}
}
else
{
error = gemKeyLow(pbracketBase->keystr, &componentLen,
pbracketBase->index);
pbracketBase->keyLen = componentLen;
}
pbracketBase->keyLen -= 3;
if(end_key)
{
error = pack_key(keynr, pbracketLimit, end_key, end_key_len);
if(!error && end_search_flag == HA_READ_AFTER_KEY)
{
error = gemKeyAddHigh(pbracketLimit->keystr + pbracketLimit->keyLen,
&componentLen);
pbracketLimit->keyLen += componentLen;
}
}
else
{
error = gemKeyHigh(pbracketLimit->keystr,&componentLen,
pbracketLimit->index);
pbracketLimit->keyLen = componentLen;
}
pbracketLimit->keyLen -= 3;
error = dsmIndexRowsInRange((dsmContext_t *)current_thd->gemini.context,
pbracketBase,pbracketLimit,
&pctInrange);
if(pctInrange >= 1)
rows = (ha_rows)pctInrange;
else
{
rows = (ha_rows)(records * pctInrange);
if(!rows && pctInrange > 0)
rows = 1;
}
index_end();
DBUG_RETURN(rows);
}
/*
Pack a row for storage. If the row is of fixed length, just store the
row 'as is'.
If not, we will generate a packed row suitable for storage.
This will only fail if we don't have enough memory to pack the row, which;
may only happen in rows with blobs, as the default row length is
pre-allocated.
*/
int ha_gemini::pack_row(byte **pprow, int *ppackedLength, const byte *record)
{
if (fixed_length_row)
{
*pprow = (byte *)record;
*ppackedLength=(int)table->reclength;
return 0;
}
if (table->blob_fields)
{
return HA_ERR_WRONG_COMMAND;
}
/* Copy null bits */
memcpy(rec_buff, record, table->null_bytes);
byte *ptr=rec_buff + table->null_bytes;
for (Field **field=table->field ; *field ; field++)
ptr=(byte*) (*field)->pack((char*) ptr,record + (*field)->offset());
*pprow=rec_buff;
*ppackedLength= (ptr - rec_buff);
return 0;
}
void ha_gemini::unpack_row(char *record, char *prow)
{
if (fixed_length_row)
{
/* If the table is a VST, the row is in Gemini internal format.
** Convert the fields to MySQL format.
*/
if (RM_IS_VST(tableNumber))
{
int i = 2; /* VST fields are numbered sequentially starting at 2 */
long longValue;
char *fld;
unsigned long unknown;
for (Field **field = table->field; *field; field++, i++)
{
switch ((*field)->type())
{
case FIELD_TYPE_LONG:
case FIELD_TYPE_TINY:
case FIELD_TYPE_SHORT:
case FIELD_TYPE_TIMESTAMP:
case FIELD_TYPE_LONGLONG:
case FIELD_TYPE_INT24:
case FIELD_TYPE_DATE:
case FIELD_TYPE_TIME:
case FIELD_TYPE_DATETIME:
case FIELD_TYPE_YEAR:
case FIELD_TYPE_NEWDATE:
case FIELD_TYPE_ENUM:
case FIELD_TYPE_SET:
recGetLONG((dsmText_t *)prow, i, 0, &longValue, &unknown);
if (unknown)
{
(*field)->set_null();
}
else
{
(*field)->set_notnull();
(*field)->store((longlong)longValue);
}
break;
case FIELD_TYPE_DECIMAL:
case FIELD_TYPE_DOUBLE:
case FIELD_TYPE_TINY_BLOB:
case FIELD_TYPE_MEDIUM_BLOB:
case FIELD_TYPE_LONG_BLOB:
case FIELD_TYPE_BLOB:
case FIELD_TYPE_VAR_STRING:
break;
case FIELD_TYPE_STRING:
svcByteString_t stringFld;
fld = (char *)my_malloc((*field)->field_length, MYF(MY_WME));
stringFld.pbyte = (TEXT *)fld;
stringFld.size = (*field)->field_length;
recGetBYTES((dsmText_t *)prow, i, 0, &stringFld, &unknown);
if (unknown)
{
(*field)->set_null();
}
else
{
(*field)->set_notnull();
(*field)->store(fld, (*field)->field_length);
}
my_free(fld, MYF(MY_ALLOW_ZERO_PTR));
break;
default:
break;
}
}
}
else
{
memcpy(record,(char*) prow,table->reclength);
}
}
else
{
/* Copy null bits */
const char *ptr= (const char*) prow;
memcpy(record, ptr, table->null_bytes);
ptr+=table->null_bytes;
for (Field **field=table->field ; *field ; field++)
ptr= (*field)->unpack(record + (*field)->offset(), ptr);
}
}
int ha_gemini::key_cmp(uint keynr, const byte * old_row,
const byte * new_row)
{
KEY_PART_INFO *key_part=table->key_info[keynr].key_part;
KEY_PART_INFO *end=key_part+table->key_info[keynr].key_parts;
for ( ; key_part != end ; key_part++)
{
if (key_part->null_bit)
{
if ((old_row[key_part->null_offset] & key_part->null_bit) !=
(new_row[key_part->null_offset] & key_part->null_bit))
return 1;
}
if (key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH))
{
if (key_part->field->cmp_binary(old_row + key_part->offset,
new_row + key_part->offset,
(ulong) key_part->length))
return 1;
}
else
{
if (memcmp(old_row+key_part->offset, new_row+key_part->offset,
key_part->length))
return 1;
}
}
return 0;
}
int gemini_parse_table_name(const char *fullname, char *dbname, char *tabname)
{
char *namestart;
char *nameend;
/* separate out the name of the table and the database
*/
namestart = strchr(fullname + start_of_name, '/');
if (!namestart)
{
/* if on Windows, slashes go the other way */
namestart = strchr(fullname + start_of_name, '\\');
}
nameend = strchr(fullname + start_of_name, '.');
/* sometimes fullname has an extension, sometimes it doesn't */
if (!nameend)
{
nameend = (char *)fullname + strlen(fullname);
}
strncpy(dbname, fullname + start_of_name,
(namestart - fullname) - start_of_name);
dbname[(namestart - fullname) - start_of_name] = '\0';
strncpy(tabname, namestart + 1, (nameend - namestart) - 1);
tabname[nameend - namestart - 1] = '\0';
return 0;
}
/* PROGRAM: gemini_is_vst - if the name is the name of a VST, return
* its number
*
* RETURNS: Table number if a match is found
* 0 if not a VST
*/
int
gemini_is_vst(const char *pname) /* IN the name */
{
int tablenum = 0;
for (int i = 0; i < vstnumfils; i++)
{
if (strcmp(pname, vstfil[i].filename) == 0)
{
tablenum = vstfil[i].filnum;
break;
}
}
return tablenum;
}
static void print_msg(THD *thd, const char *table_name, const char *op_name,
const char *msg_type, const char *fmt, ...)
{
String* packet = &thd->packet;
packet->length(0);
char msgbuf[256];
msgbuf[0] = 0;
va_list args;
va_start(args,fmt);
my_vsnprintf(msgbuf, sizeof(msgbuf), fmt, args);
msgbuf[sizeof(msgbuf) - 1] = 0; // healthy paranoia
DBUG_PRINT(msg_type,("message: %s",msgbuf));
net_store_data(packet, table_name);
net_store_data(packet, op_name);
net_store_data(packet, msg_type);
net_store_data(packet, msgbuf);
if (my_net_write(&thd->net, (char*)thd->packet.ptr(),
thd->packet.length()))
thd->killed=1;
}
#endif /* HAVE_GEMINI_DB */
/* Copyright (C) 2000 NuSphere Corporation
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#ifdef __GNUC__
#pragma interface /* gcc class implementation */
#endif
#include "dstd.h"
#include "dsmpub.h"
/* class for the the gemini handler */
enum enum_key_string_options{KEY_CREATE,KEY_DELETE,KEY_CHECK};
#define READ_UNCOMMITED 0
#define READ_COMMITED 1
#define REPEATABLE_READ 2
#define SERIALIZEABLE 3
class ha_gemini: public handler
{
/* define file as an int for now until we have a real file struct */
int file;
uint int_option_flag;
int tableNumber;
dsmIndex_t *pindexNumbers; // dsm object numbers for the indexes on this table
unsigned long lastRowid;
uint last_dup_key;
bool fixed_length_row, key_read, using_ignore;
byte *rec_buff;
dsmKey_t *pbracketBase;
dsmKey_t *pbracketLimit;
dsmKey_t *pfoundKey;
dsmMask_t tableStatus; // Crashed/repair status
int index_open(char *tableName);
int pack_row(byte **prow, int *ppackedLength, const byte *record);
void unpack_row(char *record, char *prow);
int findRow(THD *thd, dsmMask_t findMode, byte *buf);
int fetch_row(void *gemini_context, const byte *buf);
int handleIndexEntries(const byte * record, dsmRecid_t recid,
enum_key_string_options option);
int handleIndexEntry(const byte * record, dsmRecid_t recid,
enum_key_string_options option,uint keynr);
int createKeyString(const byte * record, KEY *pkeyinfo,
unsigned char *pkeyBuf, int bufSize,
int *pkeyStringLen, short geminiIndexNumber,
bool *thereIsAnull);
int fullCheck(THD *thd,byte *buf);
int pack_key( uint keynr, dsmKey_t *pkey,
const byte *key_ptr, uint key_length);
void unpack_key(char *record, dsmKey_t *key, uint index);
int key_cmp(uint keynr, const byte * old_row,
const byte * new_row);
short cursorId; /* cursorId of active index cursor if any */
dsmMask_t lockMode; /* Shared or exclusive */
/* FIXFIX Don't know why we need this because I don't know what
store_lock method does but we core dump without this */
THR_LOCK alock;
THR_LOCK_DATA lock;
public:
ha_gemini(TABLE *table): handler(table), file(0),
int_option_flag(HA_READ_NEXT | HA_READ_PREV |
HA_REC_NOT_IN_SEQ |
HA_KEYPOS_TO_RNDPOS | HA_READ_ORDER | HA_LASTKEY_ORDER |
HA_LONGLONG_KEYS | HA_NULL_KEY | HA_HAVE_KEY_READ_ONLY |
HA_NO_BLOBS | HA_NO_TEMP_TABLES |
/* HA_BLOB_KEY | */ /*HA_NOT_EXACT_COUNT | */
/*HA_KEY_READ_WRONG_STR |*/ HA_DROP_BEFORE_CREATE),
pbracketBase(0),pbracketLimit(0),pfoundKey(0),
cursorId(0)
{
}
~ha_gemini() {}
const char *table_type() const { return "Gemini"; }
const char **bas_ext() const;
ulong option_flag() const { return int_option_flag; }
uint max_record_length() const { return MAXRECSZ; }
uint max_keys() const { return MAX_KEY-1; }
uint max_key_parts() const { return MAX_REF_PARTS; }
uint max_key_length() const { return MAXKEYSZ; }
bool fast_key_read() { return 1;}
bool has_transactions() { return 1;}
int open(const char *name, int mode, uint test_if_locked);
int close(void);
double scan_time();
int write_row(byte * buf);
int update_row(const byte * old_data, byte * new_data);
int delete_row(const byte * buf);
int index_init(uint index);
int index_end();
int index_read(byte * buf, const byte * key,
uint key_len, enum ha_rkey_function find_flag);
int index_read_idx(byte * buf, uint index, const byte * key,
uint key_len, enum ha_rkey_function find_flag);
int index_next(byte * buf);
int index_next_same(byte * buf, const byte *key, uint keylen);
int index_prev(byte * buf);
int index_first(byte * buf);
int index_last(byte * buf);
int rnd_init(bool scan=1);
int rnd_end();
int rnd_next(byte *buf);
int rnd_pos(byte * buf, byte *pos);
void position(const byte *record);
void info(uint);
int extra(enum ha_extra_function operation);
int reset(void);
int check(THD* thd, HA_CHECK_OPT* check_opt);
int repair(THD* thd, HA_CHECK_OPT* check_opt);
int external_lock(THD *thd, int lock_type);
virtual longlong get_auto_increment();
void position(byte *record);
ha_rows records_in_range(int inx,
const byte *start_key,uint start_key_len,
enum ha_rkey_function start_search_flag,
const byte *end_key,uint end_key_len,
enum ha_rkey_function end_search_flag);
int create(const char *name, register TABLE *form,
HA_CREATE_INFO *create_info);
int delete_table(const char *name);
int rename_table(const char* from, const char* to);
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
enum thr_lock_type lock_type);
};
#define GEMOPT_FLUSH_LOG 0x00000001
#define GEMOPT_UNBUFFERED_IO 0x00000002
#define GEMINI_RECOVERY_FULL 0x00000001
#define GEMINI_RECOVERY_NONE 0x00000002
#define GEMINI_RECOVERY_FORCE 0x00000004
#define GEM_OPTID_SPIN_RETRIES 1
extern bool gemini_skip;
extern long gemini_options;
extern long gemini_buffer_cache;
extern long gemini_io_threads;
extern long gemini_log_cluster_size;
extern long gemini_locktablesize;
extern long gemini_lock_wait_timeout;
extern long gemini_spin_retries;
extern long gemini_connection_limit;
extern TYPELIB gemini_recovery_typelib;
extern ulong gemini_recovery_options;
bool gemini_init(void);
bool gemini_end(void);
bool gemini_flush_logs(void);
int gemini_commit(THD *thd);
int gemini_rollback(THD *thd);
void gemini_disconnect(THD *thd);
int gemini_rollback_to_savepoint(THD *thd);
int gemini_parse_table_name(const char *fullname, char *dbname, char *tabname);
int gemini_is_vst(const char *pname);
int gemini_set_option_long(int optid, long optval);
const int gemini_blocksize = 8192;
const int gemini_recbits = 7;
......@@ -35,6 +35,9 @@
#ifdef HAVE_INNOBASE_DB
#include "ha_innobase.h"
#endif
#ifdef HAVE_GEMINI_DB
#include "ha_gemini.h"
#endif
#include <myisampack.h>
#include <errno.h>
......@@ -49,7 +52,7 @@ ulong ha_read_count, ha_write_count, ha_delete_count, ha_update_count,
const char *ha_table_type[] = {
"", "DIAB_ISAM","HASH","MISAM","PISAM","RMS_ISAM","HEAP", "ISAM",
"MRG_ISAM","MYISAM", "MRG_MYISAM", "BDB", "INNOBASE", "?", "?",NullS
"MRG_ISAM","MYISAM", "MRG_MYISAM", "BDB", "INNOBASE", "GEMINI", "?", "?",NullS
};
const char *ha_row_type[] = {
......@@ -77,6 +80,10 @@ enum db_type ha_checktype(enum db_type database_type)
case DB_TYPE_INNOBASE:
return(innobase_skip ? DB_TYPE_MYISAM : database_type);
#endif
#ifdef HAVE_GEMINI_DB
case DB_TYPE_GEMINI:
return(gemini_skip ? DB_TYPE_MYISAM : database_type);
#endif
#ifndef NO_HASH
case DB_TYPE_HASH:
#endif
......@@ -118,6 +125,10 @@ handler *get_new_handler(TABLE *table, enum db_type db_type)
#ifdef HAVE_INNOBASE_DB
case DB_TYPE_INNOBASE:
return new ha_innobase(table);
#endif
#ifdef HAVE_GEMINI_DB
case DB_TYPE_GEMINI:
return new ha_gemini(table);
#endif
case DB_TYPE_HEAP:
return new ha_heap(table);
......@@ -149,6 +160,15 @@ int ha_init()
if (!innobase_skip) // If we couldn't use handler
opt_using_transactions=1;
}
#endif
#ifdef HAVE_GEMINI_DB
if (!gemini_skip)
{
if (gemini_init())
return -1;
if (!gemini_skip) // If we couldn't use handler
opt_using_transactions=1;
}
#endif
return 0;
}
......@@ -176,6 +196,10 @@ int ha_panic(enum ha_panic_function flag)
#ifdef HAVE_INNOBASE_DB
if (!innobase_skip)
error|=innobase_end();
#endif
#ifdef HAVE_GEMINI_DB
if (!gemini_skip)
error|=gemini_end();
#endif
return error;
} /* ha_panic */
......@@ -187,6 +211,12 @@ void ha_close_connection(THD* thd)
if (!innobase_skip)
innobase_close_connection(thd);
#endif
#ifdef HAVE_GEMINI_DB
if (!gemini_skip && thd->gemini.context)
{
gemini_disconnect(thd);
}
#endif /* HAVE_GEMINI_DB */
}
/*
......@@ -250,6 +280,20 @@ int ha_commit_trans(THD *thd, THD_TRANS* trans)
error=1;
}
}
#endif
#ifdef HAVE_GEMINI_DB
/* Commit the transaction in behalf of the commit statement
or if we're in auto-commit mode */
if((trans == &thd->transaction.all) ||
(!(thd->options & (OPTION_NOT_AUTO_COMMIT | OPTION_BEGIN))))
{
error=gemini_commit(thd);
if (error)
{
my_error(ER_ERROR_DURING_COMMIT, MYF(0), error);
error=1;
}
}
#endif
if (error && trans == &thd->transaction.all && mysql_bin_log.is_open())
sql_print_error("Error: Got error during commit; Binlog is not up to date!");
......@@ -287,6 +331,18 @@ int ha_rollback_trans(THD *thd, THD_TRANS *trans)
error=1;
}
}
#endif
#ifdef HAVE_GEMINI_DB
if((trans == &thd->transaction.stmt) &&
(thd->options & (OPTION_NOT_AUTO_COMMIT | OPTION_BEGIN)))
error = gemini_rollback_to_savepoint(thd);
else
error=gemini_rollback(thd);
if (error)
{
my_error(ER_ERROR_DURING_ROLLBACK, MYF(0), error);
error=1;
}
#endif
if (trans == &thd->transaction.all)
reinit_io_cache(&thd->transaction.trans_log,
......@@ -678,6 +734,21 @@ int handler::rename_table(const char * from, const char * to)
DBUG_RETURN(0);
}
int ha_commit_rename(THD *thd)
{
int error=0;
#ifdef HAVE_GEMINI_DB
/* Gemini needs to commit the rename; otherwise a rollback will change
** the table names back internally but the physical files will still
** have the new names.
*/
if (ha_commit_stmt(thd))
error= -1;
if (ha_commit(thd))
error= -1;
#endif
return error;
}
int handler::index_next_same(byte *buf, const byte *key, uint keylen)
{
......
......@@ -72,6 +72,7 @@
#define HA_DROP_BEFORE_CREATE (HA_PRIMARY_KEY_IN_READ_INDEX*2)
#define HA_NOT_READ_AFTER_KEY (HA_DROP_BEFORE_CREATE*2)
#define HA_NOT_DELETE_WITH_CACHE (HA_NOT_READ_AFTER_KEY*2)
#define HA_NO_TEMP_TABLES (HA_NOT_DELETE_WITH_CACHE*2)
/* Parameters for open() (in register form->filestat) */
/* HA_GET_INFO does a implicit HA_ABORT_IF_LOCKED */
......@@ -310,6 +311,17 @@ public:
enum thr_lock_type lock_type)=0;
};
#ifdef HAVE_GEMINI_DB
struct st_gemini
{
void *context;
unsigned long savepoint;
bool needSavepoint;
uint tx_isolation;
uint lock_count;
};
#endif
/* Some extern variables used with handlers */
extern const char *ha_row_type[];
......@@ -337,4 +349,4 @@ int ha_rollback_trans(THD *thd, THD_TRANS *trans);
int ha_autocommit_or_rollback(THD *thd, int error);
void ha_set_spin_retries(uint retries);
bool ha_flush_logs(void);
int ha_commit_rename(THD *thd);
......@@ -146,6 +146,7 @@ static SYMBOL symbols[] = {
{ "FULLTEXT", SYM(FULLTEXT_SYM),0,0},
{ "FUNCTION", SYM(UDF_SYM),0,0},
{ "GEMINI", SYM(GEMINI_SYM),0,0},
{ "GEMINI_SPIN_RETRIES", SYM(GEMINI_SPIN_RETRIES),0,0},
{ "GLOBAL", SYM(GLOBAL_SYM),0,0},
{ "GRANT", SYM(GRANT),0,0},
{ "GRANTS", SYM(GRANTS),0,0},
......
......@@ -25,6 +25,9 @@
#ifdef HAVE_INNOBASE_DB
#include "ha_innobase.h"
#endif
#ifdef HAVE_GEMINI_DB
#include "ha_gemini.h"
#endif
#include "ha_myisam.h"
#include <nisam.h>
#include <thr_alarm.h>
......@@ -279,6 +282,9 @@ const char *first_keyword="first";
const char **errmesg; /* Error messages */
const char *myisam_recover_options_str="OFF";
enum_tx_isolation default_tx_isolation=ISO_READ_COMMITTED;
#ifdef HAVE_GEMINI_DB
const char *gemini_recovery_options_str="FULL";
#endif
my_string mysql_unix_port=NULL,mysql_tmpdir=NULL;
ulong my_bind_addr; /* the address we bind to */
DATE_FORMAT dayord;
......@@ -2427,7 +2433,9 @@ enum options {
OPT_INNOBASE_FLUSH_LOG_AT_TRX_COMMIT,
OPT_SAFE_SHOW_DB,
OPT_GEMINI_SKIP, OPT_INNOBASE_SKIP,
OPT_TEMP_POOL, OPT_TX_ISOLATION
OPT_TEMP_POOL, OPT_TX_ISOLATION,
OPT_GEMINI_FLUSH_LOG, OPT_GEMINI_RECOVER,
OPT_GEMINI_UNBUFFERED_IO
};
static struct option long_options[] = {
......@@ -2464,6 +2472,11 @@ static struct option long_options[] = {
{"enable-locking", no_argument, 0, (int) OPT_ENABLE_LOCK},
{"exit-info", optional_argument, 0, 'T'},
{"flush", no_argument, 0, (int) OPT_FLUSH},
#ifdef HAVE_GEMINI_DB
{"gemini-flush-log-at-commit",no_argument, 0, (int) OPT_GEMINI_FLUSH_LOG},
{"gemini-recovery", required_argument, 0, (int) OPT_GEMINI_RECOVER},
{"gemini-unbuffered-io", no_argument, 0, (int) OPT_GEMINI_UNBUFFERED_IO},
#endif
/* We must always support this option to make scripts like mysqltest easier
to do */
{"innobase_data_file_path", required_argument, 0,
......@@ -2592,6 +2605,22 @@ CHANGEABLE_VAR changeable_vars[] = {
DELAYED_QUEUE_SIZE, 1, ~0L, 0, 1 },
{ "flush_time", (long*) &flush_time,
FLUSH_TIME, 0, ~0L, 0, 1 },
#ifdef HAVE_GEMINI_DB
{ "gemini_buffer_cache", (long*) &gemini_buffer_cache,
128 * 8192, 16, LONG_MAX, 0, 1 },
{ "gemini_connection_limit", (long*) &gemini_connection_limit,
100, 10, LONG_MAX, 0, 1 },
{ "gemini_io_threads", (long*) &gemini_io_threads,
2, 0, 256, 0, 1 },
{ "gemini_log_cluster_size", (long*) &gemini_log_cluster_size,
256 * 1024, 16 * 1024, LONG_MAX, 0, 1 },
{ "gemini_lock_table_size", (long*) &gemini_locktablesize,
4096, 1024, LONG_MAX, 0, 1 },
{ "gemini_lock_wait_timeout",(long*) &gemini_lock_wait_timeout,
10, 1, LONG_MAX, 0, 1 },
{ "gemini_spin_retries", (long*) &gemini_spin_retries,
1, 0, LONG_MAX, 0, 1 },
#endif
#ifdef HAVE_INNOBASE_DB
{"innobase_mirrored_log_groups",
(long*) &innobase_mirrored_log_groups, 1, 1, 10, 0, 1},
......@@ -2711,6 +2740,16 @@ struct show_var_st init_vars[]= {
{"delayed_queue_size", (char*) &delayed_queue_size, SHOW_LONG},
{"flush", (char*) &myisam_flush, SHOW_MY_BOOL},
{"flush_time", (char*) &flush_time, SHOW_LONG},
#ifdef HAVE_GEMINI_DB
{"gemini_buffer_cache", (char*) &gemini_buffer_cache, SHOW_LONG},
{"gemini_connection_limit", (char*) &gemini_connection_limit, SHOW_LONG},
{"gemini_io_threads", (char*) &gemini_io_threads, SHOW_LONG},
{"gemini_log_cluster_size", (char*) &gemini_log_cluster_size, SHOW_LONG},
{"gemini_lock_table_size", (char*) &gemini_locktablesize, SHOW_LONG},
{"gemini_lock_wait_timeout",(char*) &gemini_lock_wait_timeout, SHOW_LONG},
{"gemini_recovery_options", (char*) &gemini_recovery_options_str, SHOW_CHAR_PTR},
{"gemini_spin_retries", (char*) &gemini_spin_retries, SHOW_LONG},
#endif
{"have_bdb", (char*) &have_berkeley_db, SHOW_HAVE},
{"have_gemini", (char*) &have_gemini, SHOW_HAVE},
{"have_innobase", (char*) &have_innobase, SHOW_HAVE},
......@@ -2974,6 +3013,16 @@ static void usage(void)
--skip-bdb Don't use berkeley db (will save memory)\n\
");
#endif /* HAVE_BERKELEY_DB */
#ifdef HAVE_GEMINI_DB
puts("\
--gemini-recovery=mode Set Crash Recovery operating mode\n\
(FULL, NONE, FORCE - default FULL)
--gemini-flush-log-at-commit\n\
Every commit forces a write to the reovery log\n\
--gemini-unbuffered-io Use unbuffered i/o\n\
--skip-gemini Don't use gemini (will save memory)\n\
");
#endif
#ifdef HAVE_INNOBASE_DB
puts("\
--innobase_data_home_dir=dir The common part for innobase table spaces\n
......@@ -3519,6 +3568,21 @@ static void get_options(int argc,char **argv)
#ifdef HAVE_GEMINI_DB
gemini_skip=1;
have_gemini=SHOW_OPTION_DISABLED;
break;
case OPT_GEMINI_RECOVER:
gemini_recovery_options_str=optarg;
if ((gemini_recovery_options=
find_bit_type(optarg, &gemini_recovery_typelib)) == ~(ulong) 0)
{
fprintf(stderr, "Unknown option to gemini-recovery: %s\n",optarg);
exit(1);
}
break;
case OPT_GEMINI_FLUSH_LOG:
gemini_options |= GEMOPT_FLUSH_LOG;
break;
case OPT_GEMINI_UNBUFFERED_IO:
gemini_options |= GEMOPT_UNBUFFERED_IO;
#endif
break;
case OPT_INNOBASE_SKIP:
......@@ -4002,6 +4066,68 @@ static int get_service_parameters()
{
SET_CHANGEABLE_VARVAL( "thread_concurrency" );
}
#ifdef HAVE_GEMINI_DB
else if ( lstrcmp(szKeyValueName, TEXT("GeminiLazyCommit")) == 0 )
{
CHECK_KEY_TYPE( REG_DWORD, szKeyValueName );
if ( *lpdwValue )
gemini_options |= GEMOPT_FLUSH_LOG;
else
gemini_options &= ~GEMOPT_FLUSH_LOG;
}
else if ( lstrcmp(szKeyValueName, TEXT("GeminiFullRecovery")) == 0 )
{
CHECK_KEY_TYPE( REG_DWORD, szKeyValueName );
if ( *lpdwValue )
gemini_options &= ~GEMOPT_NO_CRASH_PROTECTION;
else
gemini_options |= GEMOPT_NO_CRASH_PROTECTION;
}
else if ( lstrcmp(szKeyValueName, TEXT("GeminiNoRecovery")) == 0 )
{
CHECK_KEY_TYPE( REG_DWORD, szKeyValueName );
if ( *lpdwValue )
gemini_options |= GEMOPT_NO_CRASH_PROTECTION;
else
gemini_options &= ~GEMOPT_NO_CRASH_PROTECTION;
}
else if ( lstrcmp(szKeyValueName, TEXT("GeminiUnbufferedIO")) == 0 )
{
CHECK_KEY_TYPE( REG_DWORD, szKeyValueName );
if ( *lpdwValue )
gemini_options |= GEMOPT_UNBUFFERED_IO;
else
gemini_options &= ~GEMOPT_UNBUFFERED_IO;
}
else if ( lstrcmp(szKeyValueName, TEXT("GeminiLockTableSize")) == 0 )
{
SET_CHANGEABLE_VARVAL( "gemini_lock_table_size" );
}
else if ( lstrcmp(szKeyValueName, TEXT("GeminiBufferCache")) == 0 )
{
SET_CHANGEABLE_VARVAL( "gemini_buffer_cache" );
}
else if ( lstrcmp(szKeyValueName, TEXT("GeminiSpinRetries")) == 0 )
{
SET_CHANGEABLE_VARVAL( "gemini_spin_retries" );
}
else if ( lstrcmp(szKeyValueName, TEXT("GeminiIoThreads")) == 0 )
{
SET_CHANGEABLE_VARVAL( "gemini_io_threads" );
}
else if ( lstrcmp(szKeyValueName, TEXT("GeminiConnectionLimit")) == 0 )
{
SET_CHANGEABLE_VARVAL( "gemini_connection_limit" );
}
else if ( lstrcmp(szKeyValueName, TEXT("GeminiLogClusterSize")) == 0 )
{
SET_CHANGEABLE_VARVAL( "gemini_log_cluster_size" );
}
else if ( lstrcmp(szKeyValueName, TEXT("GeminiLockWaitTimeout")) == 0 )
{
SET_CHANGEABLE_VARVAL( "gemini_lock_wait_timeout" );
}
#endif
else
{
TCHAR szErrorMsg [ 512 ];
......
......@@ -2400,15 +2400,22 @@ int QUICK_SELECT::get_next()
for (;;)
{
int result;
if (range)
{ // Already read through key
int result=((range->flag & EQ_RANGE) ?
result=((range->flag & EQ_RANGE) ?
file->index_next_same(record, (byte*) range->min_key,
range->min_length) :
file->index_next(record));
if (!result && !cmp_next(*it.ref()))
if (!result)
{
if (!cmp_next(*it.ref()))
DBUG_RETURN(0);
}
else if (result != HA_ERR_END_OF_FILE)
DBUG_RETURN(result);
}
if (!(range=it++))
DBUG_RETURN(HA_ERR_END_OF_FILE); // All ranges used
if (range->flag & NO_MIN_RANGE) // Read first record
......@@ -2421,15 +2428,17 @@ int QUICK_SELECT::get_next()
range=0; // To next range
continue;
}
if (file->index_read(record,(byte*) range->min_key,
if ((result = file->index_read(record,(byte*) range->min_key,
range->min_length,
((range->flag & NEAR_MIN) ?
HA_READ_AFTER_KEY:
(range->flag & EQ_RANGE) ?
HA_READ_KEY_EXACT :
HA_READ_KEY_OR_NEXT)))
HA_READ_KEY_OR_NEXT))))
{
if (result != HA_ERR_KEY_NOT_FOUND)
DBUG_RETURN(result);
range=0; // Not found, to next range
continue;
}
......
......@@ -164,25 +164,30 @@ static int rr_sequential(READ_RECORD *info)
static int rr_from_tempfile(READ_RECORD *info)
{
int tmp;
tryNext:
if (my_b_read(info->io_cache,info->ref_pos,info->ref_length))
return -1; /* End of file */
int tmp=info->file->rnd_pos(info->record,info->ref_pos);
tmp=info->file->rnd_pos(info->record,info->ref_pos);
if (tmp)
{
if (tmp == HA_ERR_END_OF_FILE)
tmp= -1;
else if (tmp == HA_ERR_RECORD_DELETED)
goto tryNext;
else if (info->print_error)
info->file->print_error(tmp,MYF(0));
}
return tmp;
} /* rr_from_tempfile */
static int rr_from_pointers(READ_RECORD *info)
{
byte *cache_pos;
tryNext:
if (info->cache_pos == info->cache_end)
return -1; /* End of file */
byte *cache_pos=info->cache_pos;
cache_pos=info->cache_pos;
info->cache_pos+=info->ref_length;
int tmp=info->file->rnd_pos(info->record,cache_pos);
......@@ -190,6 +195,8 @@ static int rr_from_pointers(READ_RECORD *info)
{
if (tmp == HA_ERR_END_OF_FILE)
tmp= -1;
else if (tmp == HA_ERR_RECORD_DELETED)
goto tryNext;
else if (info->print_error)
info->file->print_error(tmp,MYF(0));
}
......
......@@ -105,6 +105,9 @@ THD::THD():user_time(0),fatal_error(0),last_insert_id_used(0),
#ifdef __WIN__
real_id = 0;
#endif
#ifdef HAVE_GEMINI_DB
bzero((char *)&gemini, sizeof(gemini));
#endif
#ifdef SIGNAL_WITH_VIO_CLOSE
active_vio = 0;
pthread_mutex_init(&active_vio_lock, NULL);
......
......@@ -250,6 +250,9 @@ public:
THD_TRANS stmt; /* Trans for current statement */
uint bdb_lock_count;
} transaction;
#ifdef HAVE_GEMINI_DB
struct st_gemini gemini;
#endif
Item *free_list;
CONVERT *convert_set;
Field *dupp_field;
......@@ -265,10 +268,11 @@ public:
max_join_size,sent_row_count;
table_map used_tables;
ulong query_id,version, inactive_timeout,options,thread_id;
ulong gemini_spin_retries;
long dbug_thread_id;
pthread_t real_id;
uint current_tablenr,tmp_table,cond_count,col_access,query_length;
uint server_status,open_options, gemini_spin_retries;
uint server_status,open_options;
enum_tx_isolation tx_isolation, session_tx_isolation;
char scramble[9];
bool slave_thread;
......
......@@ -136,12 +136,12 @@ typedef struct st_lex {
LEX_MASTER_INFO mi; // used by CHANGE MASTER
ulong thread_id,type;
ulong options;
ulong gemini_spin_retries;
enum_sql_command sql_command;
enum lex_states next_state;
enum enum_duplicates duplicates;
enum enum_tx_isolation tx_isolation;
uint in_sum_expr,grant,grant_tot_col,which_columns, sort_default;
uint gemini_spin_retries;
thr_lock_type lock_option;
bool create_refs,drop_primary,drop_if_exists,local_file;
bool in_comment,ignore_space,verbose;
......
......@@ -31,7 +31,7 @@ static TABLE_LIST *rename_tables(THD *thd, TABLE_LIST *table_list,
bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list)
{
bool error=1,got_all_locks=1;
bool error=1,cerror,got_all_locks=1;
TABLE_LIST *lock_table,*ren_table=0;
DBUG_ENTER("mysql_rename_tables");
......@@ -85,7 +85,14 @@ end:
rename_tables(thd, table, 1);
/* Note that lock_table == 0 here, so the unlock loop will work */
}
if (!error)
/* Lets hope this doesn't fail as the result will be messy */
if ((cerror=ha_commit_rename(thd)))
{
my_error(ER_GET_ERRNO,MYF(0),cerror);
error= 1;
}
else if (!error)
{
mysql_update_log.write(thd,thd->query,thd->query_length);
if (mysql_bin_log.is_open())
......@@ -95,6 +102,7 @@ end:
}
send_ok(&thd->net);
}
for (TABLE_LIST *table=table_list ; table != lock_table ; table=table->next)
unlock_table_name(thd,table);
pthread_cond_broadcast(&COND_refresh);
......
......@@ -1170,6 +1170,12 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name,
if (mysql_rename_table(old_db_type,db,table_name,new_db,new_name))
error= -1;
}
if (!error && (error=ha_commit_rename(thd)))
{
my_error(ER_GET_ERRNO,MYF(0),error);
error=1;
}
VOID(pthread_cond_broadcast(&COND_refresh));
VOID(pthread_mutex_unlock(&LOCK_open));
if (!error)
......@@ -1603,6 +1609,7 @@ end_temporary:
DBUG_RETURN(0);
err:
(void) ha_commit_rename(thd); // Just for safety
DBUG_RETURN(-1);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment