Commit 2fb441fd authored by monty@mashka.mysql.fi's avatar monty@mashka.mysql.fi

Remove wrong bug fix when calling create_sort_index.

Fix possible replication bug with LOAD DATA ... IGNORE LINES #
parent 6e7a509d
...@@ -12,7 +12,10 @@ create table t1(n int not null auto_increment primary key); ...@@ -12,7 +12,10 @@ create table t1(n int not null auto_increment primary key);
insert into t1 values (NULL); insert into t1 values (NULL);
drop table t1; drop table t1;
create table t1 (word char(20) not null); create table t1 (word char(20) not null);
load data infile '../../std_data/words.dat' into table t1; load data infile '../../std_data/words.dat' into table t1 ignore 1 lines;
select count(*) from t1;
count(*)
69
drop table t1; drop table t1;
show binlog events; show binlog events;
Log_name Pos Event_type Server_id Orig_log_pos Info Log_name Pos Event_type Server_id Orig_log_pos Info
......
...@@ -13,7 +13,8 @@ create table t1(n int not null auto_increment primary key); ...@@ -13,7 +13,8 @@ create table t1(n int not null auto_increment primary key);
insert into t1 values (NULL); insert into t1 values (NULL);
drop table t1; drop table t1;
create table t1 (word char(20) not null); create table t1 (word char(20) not null);
load data infile '../../std_data/words.dat' into table t1; load data infile '../../std_data/words.dat' into table t1 ignore 1 lines;
select count(*) from t1;
drop table t1; drop table t1;
--replace_result $VERSION VERSION --replace_result $VERSION VERSION
show binlog events; show binlog events;
...@@ -35,8 +36,8 @@ flush logs; ...@@ -35,8 +36,8 @@ flush logs;
# So, depending on a few milliseconds, we end up with 2 rotate events in the # So, depending on a few milliseconds, we end up with 2 rotate events in the
# relay log or one, which influences the output of SHOW SLAVE STATUS, making # relay log or one, which influences the output of SHOW SLAVE STATUS, making
# it not predictable and causing random test failures. # it not predictable and causing random test failures.
# To make it predictable, we do a useless update now, but which has the interest # To make it predictable, we do a useless update now, but which has the
# of making the slave catch both rotate events. # interest of making the slave catch both rotate events.
create table t5 (a int); create table t5 (a int);
drop table t5; drop table t5;
......
...@@ -329,8 +329,14 @@ void Load_log_event::pack_info(String* packet) ...@@ -329,8 +329,14 @@ void Load_log_event::pack_info(String* packet)
pretty_print_str(&tmp, sql_ex.line_start, sql_ex.line_start_len); pretty_print_str(&tmp, sql_ex.line_start, sql_ex.line_start_len);
} }
if ((int)skip_lines > 0) if ((long) skip_lines > 0)
tmp.append( " IGNORE %ld LINES ", (long) skip_lines); {
char nr_buff[32], *end;
tmp.append( " IGNORE ");
end= longlong10_to_str((longlong) skip_lines, nr_buff, 10);
tmp.append(nr_buff, (uint) (end-nr_buff));
tmp.append( " LINES");
}
if (num_fields) if (num_fields)
{ {
...@@ -1338,8 +1344,8 @@ void Load_log_event::print(FILE* file, bool short_form, char* last_db) ...@@ -1338,8 +1344,8 @@ void Load_log_event::print(FILE* file, bool short_form, char* last_db)
pretty_print_str(file, sql_ex.line_start, sql_ex.line_start_len); pretty_print_str(file, sql_ex.line_start, sql_ex.line_start_len);
} }
if ((int)skip_lines > 0) if ((long) skip_lines > 0)
fprintf(file, " IGNORE %ld LINES ", (long) skip_lines); fprintf(file, " IGNORE %ld LINES", (long) skip_lines);
if (num_fields) if (num_fields)
{ {
...@@ -1934,20 +1940,22 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, ...@@ -1934,20 +1940,22 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli,
else if (sql_ex.opt_flags & IGNORE_FLAG) else if (sql_ex.opt_flags & IGNORE_FLAG)
handle_dup= DUP_IGNORE; handle_dup= DUP_IGNORE;
else else
{
/* /*
Note that when replication is running fine, if it was DUP_ERROR on the When replication is running fine, if it was DUP_ERROR on the
master then we could choose DUP_IGNORE here, because if DUP_ERROR master then we could choose DUP_IGNORE here, because if DUP_ERROR
suceeded on master, and data is identical on the master and slave, suceeded on master, and data is identical on the master and slave,
then there should be no uniqueness errors on slave, so DUP_IGNORE is then there should be no uniqueness errors on slave, so DUP_IGNORE is
the same as DUP_ERROR. But in the unlikely case of uniqueness errors the same as DUP_ERROR. But in the unlikely case of uniqueness errors
(because the data on the master and slave happen to be different (user (because the data on the master and slave happen to be different
error or bug), we want LOAD DATA to print an error message on the (user error or bug), we want LOAD DATA to print an error message on
slave to discover the problem. the slave to discover the problem.
If reading from net (a 3.23 master), mysql_load() will change this If reading from net (a 3.23 master), mysql_load() will change this
to DUP_IGNORE. to DUP_IGNORE.
*/ */
handle_dup= DUP_ERROR; handle_dup= DUP_ERROR;
}
sql_exchange ex((char*)fname, sql_ex.opt_flags & DUMPFILE_FLAG); sql_exchange ex((char*)fname, sql_ex.opt_flags & DUMPFILE_FLAG);
String field_term(sql_ex.field_term,sql_ex.field_term_len); String field_term(sql_ex.field_term,sql_ex.field_term_len);
......
...@@ -3579,8 +3579,8 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables) ...@@ -3579,8 +3579,8 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables)
if (options & REFRESH_LOG) if (options & REFRESH_LOG)
{ {
/* /*
Flush the normal query log, the update log, the binary log, the slow query Flush the normal query log, the update log, the binary log,
log, and the relay log (if it exists). the slow query log, and the relay log (if it exists).
*/ */
mysql_log.new_file(1); mysql_log.new_file(1);
mysql_update_log.new_file(1); mysql_update_log.new_file(1);
......
...@@ -972,10 +972,7 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds, ...@@ -972,10 +972,7 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds,
group ? group : order, group ? group : order,
select_limit, select_limit,
thd->select_limit)) thd->select_limit))
{ goto err;
if (!join.join_tab[join.const_tables].select->quick)
goto err;
}
} }
join.having=having; // Actually a parameter join.having=having; // Actually a parameter
thd->proc_info="Sending data"; thd->proc_info="Sending data";
......
...@@ -2024,7 +2024,7 @@ simple_expr: ...@@ -2024,7 +2024,7 @@ simple_expr:
{ {
LEX *lex=Lex; LEX *lex=Lex;
$$= new Item_func_week($3,new Item_int((char*) "0", $$= new Item_func_week($3,new Item_int((char*) "0",
lex->thd->variables.default_week_format,1)); lex->thd->variables.default_week_format,1));
} }
| WEEK_SYM '(' expr ',' expr ')' | WEEK_SYM '(' expr ',' expr ')'
{ $$= new Item_func_week($3,$5); } { $$= new Item_func_week($3,$5); }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment