Commit 080298cf authored by unknown's avatar unknown

Fixed test cases for innobase

Fixed bug introduced with last ORDER BY optimization
Changed log position to longlong to avoid warnings.


Docs/manual.texi:
  Update for innobase
mysql-test/r/innobase.result:
  Fixed test cases
mysql-test/t/bdb.test:
  Removed not used tables
mysql-test/t/innobase.test:
  Fixed test cases
sql/field.h:
  Fixed bug introduced with last ORDER BY optimization
sql/ha_berkeley.cc:
  Fixed bug when index_init() was called twice.
sql/mysql_priv.h:
  Added option to not get stack trace (when using gdb)
sql/mysqld.cc:
  Added option to not get stack trace (when using gdb)
sql/slave.cc:
  Changed log position to longlong to avoid warnings.
sql/slave.h:
  Changed log position to longlong to avoid warnings.
sql/sql_parse.cc:
  Removed warnings
sql/sql_select.cc:
  Cleanups
parent 9a1a850a
......@@ -23714,6 +23714,10 @@ You can't have a key on a @code{BLOB} or @code{TEXT} column.
rows, one by one, which isn't that fast.
@item
The maximum blob size is 8000 bytes.
@item
Before dropping a database with @code{INNOBASE} tables one has to drop
the individual tables first. If one doesn't do that, the space in the
Innobase table space will not be reclaimed.
@end itemize
@cindex tutorial
......@@ -251,9 +251,10 @@ id ggid email passwd
id ggid email passwd
2 test2 yyy
id ggid email passwd
2 test2 yyy
100 test2 xxx
1 this will work
3 test2 this will work
id ggid email passwd
1 this will work
id ggid email passwd
user_name password subscribed user_id quota weight access_date access_time approved dummy_primary_key
user_0 somepassword N 0 0 0 2000-09-07 23:06:59 2000-09-07 23:06:59 1
......
......@@ -4,7 +4,7 @@
# Small basic test with ignore
#
drop table if exists t1,t2;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8;
create table t1 (id int unsigned not null auto_increment, code tinyint unsigned not null, name char(20) not null, primary key (id), key (code), unique (name)) type=bdb;
insert into t1 (code, name) values (1, 'Tim'), (1, 'Monty'), (2, 'David'), (2, 'Erik'), (3, 'Sasha'), (3, 'Jeremy'), (4, 'Matt');
......@@ -503,8 +503,8 @@ create table t6
unique file_platform(product_file_id,platform_id,branch_id)
) type=bdb;
drop table if exists ba_archive ;
create table ba_archive
drop table if exists t8 ;
create table t8
(
archive_id int auto_increment primary key,
branch_id int not null,
......@@ -531,53 +531,6 @@ create table t7
unique build(branch_id,target_id,build_number)
) type=bdb;
drop table if exists t4_build ;
create table t4_build
(
product_build_id int auto_increment primary key,
build_id int not null,
product_id int not null,
platform_id int not null,
/* flag whether this is a released build */
product_release int not null default 0,
/* user-defined tag, e.g. 'RealPlayer 8.0' */
release_tag varchar(255) not null,
unique product_build(build_id,product_id,platform_id),
index product_release(product_release),
index release_tag(release_tag)
) type=bdb;
drop table if exists t7_file ;
create table t7_file
(
build_file_id int auto_increment primary key,
product_build_id int not null,
product_file_id int not null,
/* actual filename in the build archive */
filename text not null,
/* actual path in the build archive */
file_path text not null,
/* file version string, e.g. '8.0.1.368' */
file_version varchar(255) not null,
unique build_file(product_build_id,product_file_id),
index file_version(file_version)
) type=bdb;
drop table if exists ba_status ;
create table ba_status
(
status_id int auto_increment primary key,
status_name varchar(255) not null,
status_desc text not null
) type=bdb;
insert into ba_status
(status_name, status_desc)
values
('new', 'This item has been newly added.'),
('archived', 'This item is currently archived.'),
('not archived', 'This item is currently not archived.'),
('obsolete', 'This item is obsolete.'),
('unknown', 'The status of this item is unknown.') ;
insert into t1 (branch_name)
values ('RealMedia');
......@@ -612,10 +565,10 @@ create temporary table tmp1
from t1, t2, t3, t4 ;
create temporary table tmp2
select tmp1.branch_id, tmp1.target_id, tmp1.platform_id, tmp1.product_id
from tmp1 left join ba_archive
from tmp1 left join t8
using (branch_id,target_id,platform_id,product_id)
where ba_archive.archive_id is null ;
insert into ba_archive
where t8.archive_id is null ;
insert into t8
(branch_id, target_id, platform_id, product_id, status_id)
select branch_id, target_id, platform_id, product_id, 1
from tmp2 ;
......@@ -670,39 +623,7 @@ insert into t7
(branch_id,target_id,build_number,build_tag,build_date,build_path)
values (4, 4, 100, 'foo target-010101-100', '2001-01-01', 'current');
insert into t4_build
(build_id, product_id, platform_id)
values (1, 3, 2);
insert into t4_build
(build_id, product_id, platform_id)
values (2, 3, 2);
insert into t4_build
(build_id, product_id, platform_id)
values (3, 1, 2);
insert into t4_build
(build_id, product_id, platform_id)
values (4, 1, 2);
insert into t4_build
(build_id, product_id, platform_id)
values (5, 5, 3);
insert into t7_file
(product_build_id, product_file_id, filename, file_path, file_version)
values (1, 3, 'win32-playinst.exe', 'RP8REV/current/playerall-022101-1071/win32-i386', '8.0.3.263');
insert into t7_file
(product_build_id, product_file_id, filename, file_path, file_version)
values (5, 5, 'file1.exe', 'foo branch/current/foo target-022101-1071/foo platform', 'version 1');
insert into t7_file
(product_build_id, product_file_id, filename, file_path, file_version)
values (5, 6, 'file2.exe', 'foo branch/current/foo target-022101-1071/foo platform', 'version 2');
update ba_archive
update t8
set status_id=2
where branch_id=2 and target_id=2 and platform_id=2 and product_id=1;
......@@ -727,7 +648,7 @@ where
t5.file_name = 'playinst' and
t3.platform_id = 2;
drop table t1, t2, t3, t4, t5, t6,t7;
drop table t1, t2, t3, t4, t5, t6, t7, t8;
#
# Test with blob + tinyint key
......
......@@ -61,7 +61,6 @@ CREATE TABLE t1 (
replace into t1 (gesuchnr,benutzer_id) values (2,1);
replace into t1 (gesuchnr,benutzer_id) values (1,1);
--error 1022
replace into t1 (gesuchnr,benutzer_id) values (1,1);
select * from t1;
drop table t1;
......@@ -269,9 +268,7 @@ select * from t1 where ggid='test1';
select * from t1 where passwd='xxx';
select * from t1 where id=2;
--error 1022
replace into t1 (ggid,id) values ('this will work',1);
--error 1022
replace into t1 (ggid,passwd) values ('test2','this will work');
-- error 1062
update t1 set id=100,ggid='test2' where id=1;
......
......@@ -114,7 +114,7 @@ class Field {
if (tmp)
{
tmp->table=new_table;
tmp->key_start=tmp->part_of_key=0;
tmp->key_start=tmp->part_of_key=tmp->part_of_sortkey=0;
tmp->unireg_check=Field::NONE;
tmp->flags&= (NOT_NULL_FLAG | BLOB_FLAG | UNSIGNED_FLAG | ZEROFILL_FLAG | BINARY_FLAG | ENUM_FLAG | SET_FLAG);
tmp->reset_fields();
......
......@@ -1307,13 +1307,19 @@ int ha_berkeley::index_init(uint keynr)
int error;
DBUG_ENTER("index_init");
DBUG_PRINT("enter",("table: '%s' key: %d", table->real_name, keynr));
/*
Under some very rare conditions (like full joins) we may already have
an active cursor at this point
*/
if (cursor)
cursor->c_close(cursor);
active_index=keynr;
dbug_assert(cursor == 0);
if ((error=key_file[keynr]->cursor(key_file[keynr], transaction, &cursor,
table->reginfo.lock_type >
TL_WRITE_ALLOW_READ ?
0 : 0)))
cursor=0; // Safety /* purecov: inspected */
cursor=0; // Safety /* purecov: inspected */
bzero((char*) &last_key,sizeof(last_key));
DBUG_RETURN(error);
}
......
......@@ -137,6 +137,7 @@ void kill_one_thread(THD *thd, ulong id);
#define TEST_READCHECK 64 /* Force use of readcheck */
#define TEST_NO_EXTRA 128
#define TEST_CORE_ON_SIGNAL 256 /* Give core if signal */
#define TEST_NO_STACKTRACE 512
/* options for select set by the yacc parser (stored in lex->options) */
#define SELECT_DISTINCT 1
......
......@@ -1225,16 +1225,19 @@ static void init_signals(void)
struct sigaction sa; sa.sa_flags = 0;
sigemptyset(&sa.sa_mask);
sigprocmask(SIG_SETMASK,&sa.sa_mask,NULL);
if (!(test_flags & TEST_NO_STACKTRACE))
{
#ifdef HAVE_DARWIN_THREADS
sa.sa_handler=( void (*)() ) handle_segfault;
sa.sa_handler=( void (*)() ) handle_segfault;
#else
sa.sa_handler=handle_segfault;
sa.sa_handler=handle_segfault;
#endif
sigaction(SIGSEGV, &sa, NULL);
sigaction(SIGSEGV, &sa, NULL);
#ifdef SIGBUS
sigaction(SIGBUS, &sa, NULL);
sigaction(SIGBUS, &sa, NULL);
#endif
sigaction(SIGILL, &sa, NULL);
sigaction(SIGILL, &sa, NULL);
}
(void) sigemptyset(&set);
#ifdef THREAD_SPECIFIC_SIGPIPE
sigset(SIGPIPE,abort_thread);
......
......@@ -588,7 +588,7 @@ int show_master_info(THD* thd)
net_store_data(packet, (uint32) glob_mi.port);
net_store_data(packet, (uint32) glob_mi.connect_retry);
net_store_data(packet, glob_mi.log_file_name);
net_store_data(packet, (uint32) glob_mi.pos);
net_store_data(packet, (uint32) glob_mi.pos); // QQ: Should be fixed
pthread_mutex_unlock(&glob_mi.lock);
pthread_mutex_lock(&LOCK_slave);
net_store_data(packet, slave_running ? "Yes":"No");
......@@ -619,7 +619,7 @@ int flush_master_info(MASTER_INFO* mi)
return 0;
}
int st_master_info::wait_for_pos(THD* thd, String* log_name, ulong log_pos)
int st_master_info::wait_for_pos(THD* thd, String* log_name, ulonglong log_pos)
{
if (!inited) return -1;
bool pos_reached;
......@@ -838,6 +838,7 @@ static int exec_event(THD* thd, NET* net, MASTER_INFO* mi, int event_len)
{
Log_event * ev = Log_event::read_log_event((const char*)net->read_pos + 1,
event_len);
char llbuff[22];
if (ev)
{
......@@ -1006,8 +1007,9 @@ static int exec_event(THD* thd, NET* net, MASTER_INFO* mi, int event_len)
TL_WRITE))
thd->query_error = 1;
if(thd->cuted_fields)
sql_print_error("Slave: load data infile at position %d in log \
'%s' produced %d warning(s)", glob_mi.pos, RPL_LOG_NAME, thd->cuted_fields );
sql_print_error("Slave: load data infile at position %s in log \
'%s' produced %d warning(s)", llstr(glob_mi.pos,llbuff), RPL_LOG_NAME,
thd->cuted_fields );
net->pkt_nr = thd->net.pkt_nr;
}
}
......@@ -1124,6 +1126,7 @@ pthread_handler_decl(handle_slave,arg __attribute__((unused)))
#endif
THD *thd; // needs to be first for thread_stack
MYSQL *mysql = NULL ;
char llbuff[22];
pthread_mutex_lock(&LOCK_slave);
if (!server_id)
......@@ -1150,7 +1153,7 @@ pthread_handler_decl(handle_slave,arg __attribute__((unused)))
int error = 1;
bool retried_once = 0;
uint32 last_failed_pos = 0;
ulonglong last_failed_pos = 0;
// needs to call my_thread_init(), otherwise we get a coredump in DBUG_ stuff
my_thread_init();
......@@ -1168,8 +1171,8 @@ pthread_handler_decl(handle_slave,arg __attribute__((unused)))
thd->temporary_tables = save_temporary_tables; // restore temp tables
threads.append(thd);
DBUG_PRINT("info",("master info: log_file_name=%s, position=%d",
glob_mi.log_file_name, glob_mi.pos));
DBUG_PRINT("info",("master info: log_file_name=%s, position=%s",
glob_mi.log_file_name, llstr(glob_mi.pos,llbuff)));
if (!(mysql = mc_mysql_init(NULL)))
......@@ -1183,17 +1186,17 @@ pthread_handler_decl(handle_slave,arg __attribute__((unused)))
sql_print_error("Slave thread initialized");
#endif
// we can get killed during safe_connect
if(!safe_connect(thd, mysql, &glob_mi))
if (!safe_connect(thd, mysql, &glob_mi))
sql_print_error("Slave: connected to master '%s@%s:%d',\
replication started in log '%s' at position %ld", glob_mi.user,
glob_mi.host, glob_mi.port,
RPL_LOG_NAME,
glob_mi.pos);
replication started in log '%s' at position %s", glob_mi.user,
glob_mi.host, glob_mi.port,
RPL_LOG_NAME,
llstr(glob_mi.pos,llbuff));
else
{
sql_print_error("Slave thread killed while connecting to master");
goto err;
}
{
sql_print_error("Slave thread killed while connecting to master");
goto err;
}
while (!slave_killed(thd))
{
......@@ -1227,9 +1230,10 @@ dump");
}
thd->proc_info = "Reconnecting after a failed dump request";
last_failed_pos=glob_mi.pos;
sql_print_error("Slave: failed dump request, reconnecting to \
try again, log '%s' at postion %ld", RPL_LOG_NAME,
last_failed_pos = glob_mi.pos );
try again, log '%s' at postion %s", RPL_LOG_NAME,
llstr(last_failed_pos,llbuff));
if(safe_reconnect(thd, mysql, &glob_mi) || slave_killed(thd))
{
sql_print_error("Slave thread killed during or after reconnect");
......@@ -1267,9 +1271,10 @@ reconnect after a failed read");
goto err;
}
thd->proc_info = "Reconnecting after a failed read";
last_failed_pos= glob_mi.pos;
sql_print_error("Slave: Failed reading log event, \
reconnecting to retry, log '%s' position %ld", RPL_LOG_NAME,
last_failed_pos = glob_mi.pos);
reconnecting to retry, log '%s' position %s", RPL_LOG_NAME,
llstr(last_failed_pos, llbuff));
if(safe_reconnect(thd, mysql, &glob_mi) || slave_killed(thd))
{
sql_print_error("Slave thread killed during or after a \
......@@ -1285,8 +1290,8 @@ reconnect done to recover from failed read");
sql_print_error("\
Error running query, slave aborted. Fix the problem, and re-start \
the slave thread with \"mysqladmin start-slave\". We stopped at log \
'%s' position %ld",
RPL_LOG_NAME, glob_mi.pos);
'%s' position %s",
RPL_LOG_NAME, llstr(glob_mi.pos, llbuff));
goto err;
// there was an error running the query
// abort the slave thread, when the problem is fixed, the user
......@@ -1328,8 +1333,8 @@ the slave thread with \"mysqladmin start-slave\". We stopped at log \
err:
// print the current replication position
sql_print_error("Slave thread exiting, replication stopped in log '%s' at \
position %ld",
RPL_LOG_NAME, glob_mi.pos);
position %s",
RPL_LOG_NAME, llstr(glob_mi.pos,llbuff));
thd->query = thd->db = 0; // extra safety
if(mysql)
mc_mysql_close(mysql);
......@@ -1382,6 +1387,8 @@ static int safe_connect(THD* thd, MYSQL* mysql, MASTER_INFO* mi)
static int safe_reconnect(THD* thd, MYSQL* mysql, MASTER_INFO* mi)
{
int slave_was_killed;
char llbuff[22];
// if we lost connection after reading a state set event
// we will be re-reading it, so pending needs to be cleared
mi->pending = 0;
......@@ -1398,10 +1405,10 @@ static int safe_reconnect(THD* thd, MYSQL* mysql, MASTER_INFO* mi)
if(!slave_was_killed)
sql_print_error("Slave: reconnected to master '%s@%s:%d',\
replication resumed in log '%s' at position %ld", glob_mi.user,
glob_mi.host, glob_mi.port,
RPL_LOG_NAME,
glob_mi.pos);
replication resumed in log '%s' at position %s", glob_mi.user,
glob_mi.host, glob_mi.port,
RPL_LOG_NAME,
llstr(glob_mi.pos,llbuff));
return slave_was_killed;
}
......
......@@ -4,7 +4,7 @@
typedef struct st_master_info
{
char log_file_name[FN_REFLEN];
uint32 pos,pending;
ulonglong pos,pending;
File fd; // we keep the file open, so we need to remember the file pointer
IO_CACHE file;
// the variables below are needed because we can change masters on the fly
......@@ -29,11 +29,11 @@ typedef struct st_master_info
pthread_mutex_destroy(&lock);
pthread_cond_destroy(&cond);
}
inline void inc_pending(uint32 val)
inline void inc_pending(ulonglong val)
{
pending += val;
}
inline void inc_pos(uint32 val)
inline void inc_pos(ulonglong val)
{
pthread_mutex_lock(&lock);
pos += val + pending;
......@@ -43,14 +43,14 @@ typedef struct st_master_info
}
// thread safe read of position - not needed if we are in the slave thread,
// but required otherwise
inline void read_pos(uint32& var)
inline void read_pos(ulonglong& var)
{
pthread_mutex_lock(&lock);
var = pos;
pthread_mutex_unlock(&lock);
}
int wait_for_pos(THD* thd, String* log_name, ulong log_pos);
int wait_for_pos(THD* thd, String* log_name, ulonglong log_pos);
} MASTER_INFO;
typedef struct st_table_rule_ent
......
......@@ -263,7 +263,7 @@ static void decrease_user_connections(const char *user, const char *host)
{
char temp_user[USERNAME_LENGTH+HOSTNAME_LENGTH+2];
int temp_len;
struct user_conn uucc, *uc;
struct user_conn *uc;
if (!max_user_connections)
return;
if (!user)
......@@ -285,7 +285,7 @@ static void decrease_user_connections(const char *user, const char *host)
if (! --uc->connections)
{
/* Last connection for user; Delete it */
(void) hash_delete(&hash_user_connections,(char *) uc);
(void) hash_delete(&hash_user_connections,(byte*) uc);
}
end:
(void) pthread_mutex_unlock(&LOCK_user_conn);
......
......@@ -4451,7 +4451,8 @@ join_init_read_first_with_key(JOIN_TAB *tab)
{
if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
{
sql_print_error("read_first_with_key: Got error %d when reading table",error);
sql_print_error("read_first_with_key: Got error %d when reading table",
error);
table->file->print_error(error,MYF(0));
return 1;
}
......@@ -4499,7 +4500,7 @@ join_init_read_last_with_key(JOIN_TAB *tab)
{
if (error != HA_ERR_END_OF_FILE)
{
sql_print_error("read_first_with_key: Got error %d when reading table",
sql_print_error("read_last_with_key: Got error %d when reading table",
error, table->path);
table->file->print_error(error,MYF(0));
return 1;
......@@ -5176,7 +5177,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit)
tab->index=nr;
tab->read_first_record= (flag > 0 ? join_init_read_first_with_key:
join_init_read_last_with_key);
tab->table->file->index_init(tab->index);
table->file->index_init(nr);
tab->type=JT_NEXT; // Read with index_first(), index_next()
DBUG_RETURN(1);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment