Commit 5fd44f2c authored by unknown's avatar unknown

Fix Bug#17600: Invalid data logged into mysql.slow_log

Recommit with post-review fixes.


mysql-test/r/log_tables.result:
  update result
mysql-test/t/log_tables.test:
  add a test
sql/log.cc:
  Fix NULL handling in log tables
storage/csv/ha_tina.cc:
   Explicitly check fo NULLs, when writing a row.
   We should not hope, that the null field was
   cleaned up.
   Though usually we do call Field::reset() or
   restore_row(), before calling Field::set_null(),
   this depency is neither documented nor enforced
   by other means.
mysql-test/t/log_tables-master.opt:
  New BitKeeper file ``mysql-test/t/log_tables-master.opt''
parent f2de9e82
...@@ -64,3 +64,11 @@ TIMESTAMP root[root] @ localhost [] 2 1 Query create table bug16905 (s char(15) ...@@ -64,3 +64,11 @@ TIMESTAMP root[root] @ localhost [] 2 1 Query create table bug16905 (s char(15)
TIMESTAMP root[root] @ localhost [] 2 1 Query insert into bug16905 values ('новое') TIMESTAMP root[root] @ localhost [] 2 1 Query insert into bug16905 values ('новое')
TIMESTAMP root[root] @ localhost [] 2 1 Query select * from mysql.general_log TIMESTAMP root[root] @ localhost [] 2 1 Query select * from mysql.general_log
drop table bug16905; drop table bug16905;
truncate table mysql.slow_log;
set session long_query_time=1;
select sleep(2);
sleep(2)
0
select * from mysql.slow_log;
start_time user_host query_time lock_time rows_sent rows_examined db last_insert_id insert_id server_id sql_text
TIMESTAMP, root[root] @ localhost [] USER_HOST, QUERY_TIME 1 0 test 0 0 1 select sleep(2)
...@@ -160,6 +160,16 @@ insert into bug16905 values ('новое'); ...@@ -160,6 +160,16 @@ insert into bug16905 values ('новое');
select * from mysql.general_log; select * from mysql.general_log;
drop table bug16905; drop table bug16905;
#
# Bug #17600: Invalid data logged into mysql.slow_log
#
truncate table mysql.slow_log;
set session long_query_time=1;
select sleep(2);
--replace_column 1 TIMESTAMP, 3 USER_HOST, 4 QUERY_TIME
select * from mysql.slow_log;
# kill all connections # kill all connections
disconnect con1; disconnect con1;
disconnect con2; disconnect con2;
......
...@@ -288,7 +288,7 @@ void Log_to_csv_event_handler::cleanup() ...@@ -288,7 +288,7 @@ void Log_to_csv_event_handler::cleanup()
Log command to the general log table Log command to the general log table
SYNOPSIS SYNOPSIS
log_general_to_csv() log_general()
event_time command start timestamp event_time command start timestamp
user_host the pointer to the string with user@host info user_host the pointer to the string with user@host info
...@@ -322,18 +322,32 @@ bool Log_to_csv_event_handler:: ...@@ -322,18 +322,32 @@ bool Log_to_csv_event_handler::
if (unlikely(!logger.is_log_tables_initialized)) if (unlikely(!logger.is_log_tables_initialized))
return FALSE; return FALSE;
/*
NOTE: we do not call restore_record() here, as all fields are
filled by the Logger (=> no need to load default ones).
*/
/* log table entries are not replicated at the moment */ /* log table entries are not replicated at the moment */
tmp_disable_binlog(current_thd); tmp_disable_binlog(current_thd);
/* Set current time. Required for CURRENT_TIMESTAMP to work */
general_log_thd->start_time= event_time; general_log_thd->start_time= event_time;
/* set default value (which is CURRENT_TIMESTAMP) */
table->field[0]->set_null(); /*
We do not set a value for table->field[0], as it will use
default value (which is CURRENT_TIMESTAMP).
*/
table->field[1]->store(user_host, user_host_len, client_cs); table->field[1]->store(user_host, user_host_len, client_cs);
table->field[1]->set_notnull();
table->field[2]->store((longlong) thread_id); table->field[2]->store((longlong) thread_id);
table->field[2]->set_notnull();
table->field[3]->store((longlong) server_id); table->field[3]->store((longlong) server_id);
table->field[3]->set_notnull();
table->field[4]->store(command_type, command_type_len, client_cs); table->field[4]->store(command_type, command_type_len, client_cs);
table->field[4]->set_notnull();
table->field[5]->store(sql_text, sql_text_len, client_cs); table->field[5]->store(sql_text, sql_text_len, client_cs);
table->field[5]->set_notnull();
table->file->ha_write_row(table->record[0]); table->file->ha_write_row(table->record[0]);
reenable_binlog(current_thd); reenable_binlog(current_thd);
...@@ -346,7 +360,7 @@ bool Log_to_csv_event_handler:: ...@@ -346,7 +360,7 @@ bool Log_to_csv_event_handler::
Log a query to the slow log table Log a query to the slow log table
SYNOPSIS SYNOPSIS
log_slow_to_csv() log_slow()
thd THD of the query thd THD of the query
current_time current timestamp current_time current timestamp
query_start_arg command start timestamp query_start_arg command start timestamp
...@@ -381,7 +395,7 @@ bool Log_to_csv_event_handler:: ...@@ -381,7 +395,7 @@ bool Log_to_csv_event_handler::
TABLE *table= slow_log.table; TABLE *table= slow_log.table;
CHARSET_INFO *client_cs= thd->variables.character_set_client; CHARSET_INFO *client_cs= thd->variables.character_set_client;
DBUG_ENTER("log_slow_to_csv"); DBUG_ENTER("log_slow");
/* below should never happen */ /* below should never happen */
if (unlikely(!logger.is_log_tables_initialized)) if (unlikely(!logger.is_log_tables_initialized))
...@@ -392,12 +406,15 @@ bool Log_to_csv_event_handler:: ...@@ -392,12 +406,15 @@ bool Log_to_csv_event_handler::
/* /*
Set start time for CURRENT_TIMESTAMP to the start of the query. Set start time for CURRENT_TIMESTAMP to the start of the query.
This will be default value for the field This will be default value for the field[0]
*/ */
slow_log_thd->start_time= query_start_arg; slow_log_thd->start_time= query_start_arg;
restore_record(table, s->default_values); // Get empty record
/* set default value (which is CURRENT_TIMESTAMP) */ /*
table->field[0]->set_null(); We do not set a value for table->field[0], as it will use
default value.
*/
/* store the value */ /* store the value */
table->field[1]->store(user_host, user_host_len, client_cs); table->field[1]->store(user_host, user_host_len, client_cs);
...@@ -421,24 +438,28 @@ bool Log_to_csv_event_handler:: ...@@ -421,24 +438,28 @@ bool Log_to_csv_event_handler::
table->field[5]->set_null(); table->field[5]->set_null();
} }
/* fill database field */
if (thd->db) if (thd->db)
/* fill database field */ {
table->field[6]->store(thd->db, thd->db_length, client_cs); table->field[6]->store(thd->db, thd->db_length, client_cs);
else table->field[6]->set_notnull();
table->field[6]->set_null(); }
if (thd->last_insert_id_used) if (thd->last_insert_id_used)
{
table->field[7]->store((longlong) thd->current_insert_id); table->field[7]->store((longlong) thd->current_insert_id);
else table->field[7]->set_notnull();
table->field[7]->set_null(); }
/* set value if we do an insert on autoincrement column */ /* set value if we do an insert on autoincrement column */
if (thd->insert_id_used) if (thd->insert_id_used)
{
table->field[8]->store((longlong) thd->last_insert_id); table->field[8]->store((longlong) thd->last_insert_id);
else table->field[8]->set_notnull();
table->field[8]->set_null(); }
table->field[9]->store((longlong) server_id); table->field[9]->store((longlong) server_id);
table->field[9]->set_notnull();
/* sql_text */ /* sql_text */
table->field[10]->store(sql_text,sql_text_len, client_cs); table->field[10]->store(sql_text,sql_text_len, client_cs);
......
...@@ -351,9 +351,20 @@ int ha_tina::encode_quote(byte *buf) ...@@ -351,9 +351,20 @@ int ha_tina::encode_quote(byte *buf)
const char *ptr; const char *ptr;
const char *end_ptr; const char *end_ptr;
(*field)->val_str(&attribute,&attribute); /*
ptr= attribute.ptr(); Write an empty string to the buffer in case of a NULL value.
end_ptr= attribute.length() + ptr; Basically this is a safety check, as no one ensures that the
field content is cleaned up every time we use Field::set_null()
in the code.
*/
if ((*field)->is_null())
ptr= end_ptr= 0;
else
{
(*field)->val_str(&attribute,&attribute);
ptr= attribute.ptr();
end_ptr= attribute.length() + ptr;
}
buffer.append('"'); buffer.append('"');
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment