Commit 9b076952 authored by Jon Olav Hauglid's avatar Jon Olav Hauglid

Bug#11853126 RE-ENABLE CONCURRENT READS WHILE CREATING

             SECONDARY INDEX IN INNODB

The patches for Bug#11751388 and Bug#11784056 enabled concurrent
reads while creating secondary indexes in InnoDB. However, they
introduced a regression. This regression occured if ALTER TABLE
failed after the index had been added, for example during the
lock upgrade needed to update .FRM. If this happened, InnoDB
and the server got out of sync with regards to which indexes
actually existed. Therefore the patch for Bug#11815600 again
disabled concurrent reads.

This patch re-enables concurrent reads. The original regression
is fixed by splitting the ADD INDEX operation into two parts.
First the new index is created but not made active. This is
done while concurrent reads are allowed. The second part of
the operation makes the index active (or reverts the change).
This is done after lock upgrade, which prevents the original
regression.

In order to implement this change, the patch changes the storage
API for in-place index creation. handler::add_index() is split
into two functions, handler_add_index() and
handler::final_add_index(). The former for creating indexes without
making them visible and the latter for commiting (i.e. making
visible) new indexes or reverting the changes.

Large parts of this patch were written by Marko Mäkelä.

Test case added to innodb_mysql_lock.test.
parent 9e2b7fa7
...@@ -153,6 +153,7 @@ DROP VIEW v1; ...@@ -153,6 +153,7 @@ DROP VIEW v1;
# KEY NO 0 FOR TABLE IN ERROR LOG # KEY NO 0 FOR TABLE IN ERROR LOG
# #
DROP TABLE IF EXISTS t1; DROP TABLE IF EXISTS t1;
# Test 1: Secondary index
# Connection default # Connection default
CREATE TABLE t1 (id INT PRIMARY KEY, value INT) ENGINE = InnoDB; CREATE TABLE t1 (id INT PRIMARY KEY, value INT) ENGINE = InnoDB;
INSERT INTO t1 VALUES (1, 12345); INSERT INTO t1 VALUES (1, 12345);
...@@ -164,9 +165,28 @@ id value ...@@ -164,9 +165,28 @@ id value
SET lock_wait_timeout=1; SET lock_wait_timeout=1;
ALTER TABLE t1 ADD INDEX idx(value); ALTER TABLE t1 ADD INDEX idx(value);
ERROR HY000: Lock wait timeout exceeded; try restarting transaction ERROR HY000: Lock wait timeout exceeded; try restarting transaction
ALTER TABLE t1 ADD INDEX idx(value);
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
# Connection default # Connection default
SELECT * FROM t1; SELECT * FROM t1;
id value id value
1 12345 1 12345
COMMIT; COMMIT;
DROP TABLE t1; DROP TABLE t1;
# Test 2: Primary index
CREATE TABLE t1(a INT NOT NULL, b INT NOT NULL) engine=innodb;
INSERT INTO t1 VALUES (1, 12345), (2, 23456);
# Connection con1
SET SESSION debug= "+d,alter_table_rollback_new_index";
ALTER TABLE t1 ADD PRIMARY KEY(a);
ERROR HY000: Unknown error
SELECT * FROM t1;
a b
1 12345
2 23456
# Connection default
SELECT * FROM t1;
a b
1 12345
2 23456
DROP TABLE t1;
...@@ -94,6 +94,74 @@ SET DEBUG_SYNC= 'RESET'; ...@@ -94,6 +94,74 @@ SET DEBUG_SYNC= 'RESET';
# Bug#42230 during add index, cannot do queries on storage engines # Bug#42230 during add index, cannot do queries on storage engines
# that implement add_index # that implement add_index
# #
# DROP DATABASE IF EXISTS db1;
# DISABLED due to Bug#11815600 DROP TABLE IF EXISTS t1;
# # Test 1: Secondary index, should not block reads (original test case).
# Connection default
CREATE DATABASE db1;
CREATE TABLE db1.t1(id INT NOT NULL AUTO_INCREMENT PRIMARY KEY, value INT) engine=innodb;
INSERT INTO db1.t1(value) VALUES (1), (2);
SET DEBUG_SYNC= "alter_table_manage_keys SIGNAL manage WAIT_FOR query";
# Sending:
ALTER TABLE db1.t1 ADD INDEX(value);
# Connection con1
SET DEBUG_SYNC= "now WAIT_FOR manage";
USE db1;
SELECT * FROM t1;
id value
1 1
2 2
SET DEBUG_SYNC= "now SIGNAL query";
# Connection default
# Reaping: ALTER TABLE db1.t1 ADD INDEX(value)
DROP DATABASE db1;
# Test 2: Primary index (implicit), should block reads.
CREATE TABLE t1(a INT NOT NULL, b INT NOT NULL) engine=innodb;
SET DEBUG_SYNC= "alter_table_manage_keys SIGNAL manage WAIT_FOR query";
# Sending:
ALTER TABLE t1 ADD UNIQUE INDEX(a);
# Connection con1
SET DEBUG_SYNC= "now WAIT_FOR manage";
USE test;
# Sending:
SELECT * FROM t1;
# Connection con2
# Waiting for SELECT to be blocked by the metadata lock on t1
SET DEBUG_SYNC= "now SIGNAL query";
# Connection default
# Reaping: ALTER TABLE t1 ADD UNIQUE INDEX(a)
# Connection con1
# Reaping: SELECT * FROM t1
a b
# Test 3: Primary index (explicit), should block reads.
# Connection default
ALTER TABLE t1 DROP INDEX a;
SET DEBUG_SYNC= "alter_table_manage_keys SIGNAL manage WAIT_FOR query";
# Sending:
ALTER TABLE t1 ADD PRIMARY KEY (a);
# Connection con1
SET DEBUG_SYNC= "now WAIT_FOR manage";
# Sending:
SELECT * FROM t1;
# Connection con2
# Waiting for SELECT to be blocked by the metadata lock on t1
SET DEBUG_SYNC= "now SIGNAL query";
# Connection default
# Reaping: ALTER TABLE t1 ADD PRIMARY KEY (a)
# Connection con1
# Reaping: SELECT * FROM t1
a b
# Test 4: Secondary unique index, should not block reads.
# Connection default
SET DEBUG_SYNC= "alter_table_manage_keys SIGNAL manage WAIT_FOR query";
# Sending:
ALTER TABLE t1 ADD UNIQUE (b);
# Connection con1
SET DEBUG_SYNC= "now WAIT_FOR manage";
SELECT * FROM t1;
a b
SET DEBUG_SYNC= "now SIGNAL query";
# Connection default
# Reaping: ALTER TABLE t1 ADD UNIQUE (b)
SET DEBUG_SYNC= "RESET";
DROP TABLE t1;
...@@ -290,6 +290,8 @@ DROP TABLE IF EXISTS t1; ...@@ -290,6 +290,8 @@ DROP TABLE IF EXISTS t1;
--connect (con1,localhost,root) --connect (con1,localhost,root)
--echo # Test 1: Secondary index
--echo # Connection default --echo # Connection default
connection default; connection default;
CREATE TABLE t1 (id INT PRIMARY KEY, value INT) ENGINE = InnoDB; CREATE TABLE t1 (id INT PRIMARY KEY, value INT) ENGINE = InnoDB;
...@@ -300,6 +302,10 @@ SELECT * FROM t1; ...@@ -300,6 +302,10 @@ SELECT * FROM t1;
--echo # Connection con1 --echo # Connection con1
--connection con1 --connection con1
SET lock_wait_timeout=1; SET lock_wait_timeout=1;
# Test with two timeouts, as the first version of this patch
# only worked with one timeout.
--error ER_LOCK_WAIT_TIMEOUT
ALTER TABLE t1 ADD INDEX idx(value);
--error ER_LOCK_WAIT_TIMEOUT --error ER_LOCK_WAIT_TIMEOUT
ALTER TABLE t1 ADD INDEX idx(value); ALTER TABLE t1 ADD INDEX idx(value);
...@@ -308,6 +314,22 @@ ALTER TABLE t1 ADD INDEX idx(value); ...@@ -308,6 +314,22 @@ ALTER TABLE t1 ADD INDEX idx(value);
SELECT * FROM t1; SELECT * FROM t1;
COMMIT; COMMIT;
DROP TABLE t1; DROP TABLE t1;
--echo # Test 2: Primary index
CREATE TABLE t1(a INT NOT NULL, b INT NOT NULL) engine=innodb;
INSERT INTO t1 VALUES (1, 12345), (2, 23456);
--echo # Connection con1
--connection con1
SET SESSION debug= "+d,alter_table_rollback_new_index";
--error ER_UNKNOWN_ERROR
ALTER TABLE t1 ADD PRIMARY KEY(a);
SELECT * FROM t1;
--echo # Connection default
--connection default
SELECT * FROM t1;
DROP TABLE t1;
disconnect con1; disconnect con1;
......
...@@ -152,133 +152,129 @@ disconnect con1; ...@@ -152,133 +152,129 @@ disconnect con1;
--echo # that implement add_index --echo # that implement add_index
--echo # --echo #
--echo # --disable_warnings
--echo # DISABLED due to Bug#11815600 DROP DATABASE IF EXISTS db1;
--echo # DROP TABLE IF EXISTS t1;
--enable_warnings
#--disable_warnings connect(con1,localhost,root);
#DROP DATABASE IF EXISTS db1; connect(con2,localhost,root);
#DROP TABLE IF EXISTS t1;
#--enable_warnings --echo # Test 1: Secondary index, should not block reads (original test case).
#
#connect(con1,localhost,root); --echo # Connection default
#connect(con2,localhost,root); connection default;
# CREATE DATABASE db1;
#--echo # Test 1: Secondary index, should not block reads (original test case). CREATE TABLE db1.t1(id INT NOT NULL AUTO_INCREMENT PRIMARY KEY, value INT) engine=innodb;
# INSERT INTO db1.t1(value) VALUES (1), (2);
#--echo # Connection default SET DEBUG_SYNC= "alter_table_manage_keys SIGNAL manage WAIT_FOR query";
#connection default; --echo # Sending:
#CREATE DATABASE db1; --send ALTER TABLE db1.t1 ADD INDEX(value)
#CREATE TABLE db1.t1(id INT NOT NULL AUTO_INCREMENT PRIMARY KEY, value INT) engine=innodb;
#INSERT INTO db1.t1(value) VALUES (1), (2); --echo # Connection con1
#SET DEBUG_SYNC= "alter_table_manage_keys SIGNAL manage WAIT_FOR query"; connection con1;
#--echo # Sending: SET DEBUG_SYNC= "now WAIT_FOR manage";
#--send ALTER TABLE db1.t1 ADD INDEX(value) # Neither of these two statements should be blocked
# USE db1;
#--echo # Connection con1 SELECT * FROM t1;
#connection con1; SET DEBUG_SYNC= "now SIGNAL query";
#SET DEBUG_SYNC= "now WAIT_FOR manage";
# # Neither of these two statements should be blocked --echo # Connection default
#USE db1; connection default;
#SELECT * FROM t1; --echo # Reaping: ALTER TABLE db1.t1 ADD INDEX(value)
#SET DEBUG_SYNC= "now SIGNAL query"; --reap
# DROP DATABASE db1;
#--echo # Connection default
#connection default; --echo # Test 2: Primary index (implicit), should block reads.
#--echo # Reaping: ALTER TABLE db1.t1 ADD INDEX(value)
#--reap CREATE TABLE t1(a INT NOT NULL, b INT NOT NULL) engine=innodb;
#DROP DATABASE db1; SET DEBUG_SYNC= "alter_table_manage_keys SIGNAL manage WAIT_FOR query";
# --echo # Sending:
#--echo # Test 2: Primary index (implicit), should block reads. --send ALTER TABLE t1 ADD UNIQUE INDEX(a)
#
#CREATE TABLE t1(a INT NOT NULL, b INT NOT NULL) engine=innodb; --echo # Connection con1
#SET DEBUG_SYNC= "alter_table_manage_keys SIGNAL manage WAIT_FOR query"; connection con1;
#--echo # Sending: SET DEBUG_SYNC= "now WAIT_FOR manage";
#--send ALTER TABLE t1 ADD UNIQUE INDEX(a) USE test;
# --echo # Sending:
#--echo # Connection con1 --send SELECT * FROM t1
#connection con1;
#SET DEBUG_SYNC= "now WAIT_FOR manage"; --echo # Connection con2
#USE test; connection con2;
#--echo # Sending: --echo # Waiting for SELECT to be blocked by the metadata lock on t1
#--send SELECT * FROM t1 let $wait_condition= SELECT COUNT(*)= 1 FROM information_schema.processlist
# WHERE state= 'Waiting for table metadata lock'
#--echo # Connection con2 AND info='SELECT * FROM t1';
#connection con2; --source include/wait_condition.inc
#--echo # Waiting for SELECT to be blocked by the metadata lock on t1 SET DEBUG_SYNC= "now SIGNAL query";
#let $wait_condition= SELECT COUNT(*)= 1 FROM information_schema.processlist
# WHERE state= 'Waiting for table metadata lock' --echo # Connection default
# AND info='SELECT * FROM t1'; connection default;
#--source include/wait_condition.inc --echo # Reaping: ALTER TABLE t1 ADD UNIQUE INDEX(a)
#SET DEBUG_SYNC= "now SIGNAL query"; --reap
#
#--echo # Connection default --echo # Connection con1
#connection default; connection con1;
#--echo # Reaping: ALTER TABLE t1 ADD UNIQUE INDEX(a) --echo # Reaping: SELECT * FROM t1
#--reap --reap
#
#--echo # Connection con1 --echo # Test 3: Primary index (explicit), should block reads.
#connection con1;
#--echo # Reaping: SELECT * FROM t1 --echo # Connection default
#--reap connection default;
# ALTER TABLE t1 DROP INDEX a;
#--echo # Test 3: Primary index (explicit), should block reads. SET DEBUG_SYNC= "alter_table_manage_keys SIGNAL manage WAIT_FOR query";
# --echo # Sending:
#--echo # Connection default --send ALTER TABLE t1 ADD PRIMARY KEY (a)
#connection default;
#ALTER TABLE t1 DROP INDEX a; --echo # Connection con1
#SET DEBUG_SYNC= "alter_table_manage_keys SIGNAL manage WAIT_FOR query"; connection con1;
#--echo # Sending: SET DEBUG_SYNC= "now WAIT_FOR manage";
#--send ALTER TABLE t1 ADD PRIMARY KEY (a) --echo # Sending:
# --send SELECT * FROM t1
#--echo # Connection con1
#connection con1; --echo # Connection con2
#SET DEBUG_SYNC= "now WAIT_FOR manage"; connection con2;
#--echo # Sending: --echo # Waiting for SELECT to be blocked by the metadata lock on t1
#--send SELECT * FROM t1 let $wait_condition= SELECT COUNT(*)= 1 FROM information_schema.processlist
# WHERE state= 'Waiting for table metadata lock'
#--echo # Connection con2 AND info='SELECT * FROM t1';
#connection con2; --source include/wait_condition.inc
#--echo # Waiting for SELECT to be blocked by the metadata lock on t1 SET DEBUG_SYNC= "now SIGNAL query";
#let $wait_condition= SELECT COUNT(*)= 1 FROM information_schema.processlist
# WHERE state= 'Waiting for table metadata lock' --echo # Connection default
# AND info='SELECT * FROM t1'; connection default;
#--source include/wait_condition.inc --echo # Reaping: ALTER TABLE t1 ADD PRIMARY KEY (a)
#SET DEBUG_SYNC= "now SIGNAL query"; --reap
#
#--echo # Connection default --echo # Connection con1
#connection default; connection con1;
#--echo # Reaping: ALTER TABLE t1 ADD PRIMARY KEY (a) --echo # Reaping: SELECT * FROM t1
#--reap --reap
#
#--echo # Connection con1 --echo # Test 4: Secondary unique index, should not block reads.
#connection con1;
#--echo # Reaping: SELECT * FROM t1 --echo # Connection default
#--reap connection default;
# SET DEBUG_SYNC= "alter_table_manage_keys SIGNAL manage WAIT_FOR query";
#--echo # Test 4: Secondary unique index, should not block reads. --echo # Sending:
# --send ALTER TABLE t1 ADD UNIQUE (b)
#--echo # Connection default
#connection default; --echo # Connection con1
#SET DEBUG_SYNC= "alter_table_manage_keys SIGNAL manage WAIT_FOR query"; connection con1;
#--echo # Sending: SET DEBUG_SYNC= "now WAIT_FOR manage";
#--send ALTER TABLE t1 ADD UNIQUE (b) SELECT * FROM t1;
# SET DEBUG_SYNC= "now SIGNAL query";
#--echo # Connection con1
#connection con1; --echo # Connection default
#SET DEBUG_SYNC= "now WAIT_FOR manage"; connection default;
#SELECT * FROM t1; --echo # Reaping: ALTER TABLE t1 ADD UNIQUE (b)
#SET DEBUG_SYNC= "now SIGNAL query"; --reap
#
#--echo # Connection default disconnect con1;
#connection default; disconnect con2;
#--echo # Reaping: ALTER TABLE t1 ADD UNIQUE (b) SET DEBUG_SYNC= "RESET";
#--reap DROP TABLE t1;
#
#disconnect con1;
#disconnect con2;
#SET DEBUG_SYNC= "RESET";
#DROP TABLE t1;
# Check that all connections opened by test cases in this file are really # Check that all connections opened by test cases in this file are really
......
...@@ -6656,22 +6656,29 @@ bool ha_partition::check_if_incompatible_data(HA_CREATE_INFO *create_info, ...@@ -6656,22 +6656,29 @@ bool ha_partition::check_if_incompatible_data(HA_CREATE_INFO *create_info,
/** /**
Support of fast or online add/drop index Support of in-place add/drop index
*/ */
int ha_partition::add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys) int ha_partition::add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys,
handler_add_index **add)
{ {
handler **file; handler **file;
int ret= 0; int ret= 0;
DBUG_ENTER("ha_partition::add_index"); DBUG_ENTER("ha_partition::add_index");
*add= new handler_add_index(table, key_info, num_of_keys);
/* /*
There has already been a check in fix_partition_func in mysql_alter_table There has already been a check in fix_partition_func in mysql_alter_table
before this call, which checks for unique/primary key violations of the before this call, which checks for unique/primary key violations of the
partitioning function. So no need for extra check here. partitioning function. So no need for extra check here.
*/ */
for (file= m_file; *file; file++) for (file= m_file; *file; file++)
if ((ret= (*file)->add_index(table_arg, key_info, num_of_keys))) {
handler_add_index *add_index;
if ((ret= (*file)->add_index(table_arg, key_info, num_of_keys, &add_index)))
goto err;
if ((ret= (*file)->final_add_index(add_index, true)))
goto err; goto err;
}
DBUG_RETURN(ret); DBUG_RETURN(ret);
err: err:
if (file > m_file) if (file > m_file)
...@@ -6696,6 +6703,42 @@ err: ...@@ -6696,6 +6703,42 @@ err:
} }
/**
Second phase of in-place add index.
@note If commit is false, index changes are rolled back by dropping the
added indexes. If commit is true, nothing is done as the indexes
were already made active in ::add_index()
*/
int ha_partition::final_add_index(handler_add_index *add, bool commit)
{
DBUG_ENTER("ha_partition::final_add_index");
// Rollback by dropping indexes.
if (!commit)
{
TABLE *table_arg= add->table;
uint num_of_keys= add->num_of_keys;
handler **file;
uint *key_numbers= (uint*) ha_thd()->alloc(sizeof(uint) * num_of_keys);
uint old_num_of_keys= table_arg->s->keys;
uint i;
/* The newly created keys have the last id's */
for (i= 0; i < num_of_keys; i++)
key_numbers[i]= i + old_num_of_keys;
if (!table_arg->key_info)
table_arg->key_info= add->key_info;
for (file= m_file; *file; file++)
{
(void) (*file)->prepare_drop_index(table_arg, key_numbers, num_of_keys);
(void) (*file)->final_drop_index(table_arg);
}
if (table_arg->key_info == add->key_info)
table_arg->key_info= NULL;
}
DBUG_RETURN(0);
}
int ha_partition::prepare_drop_index(TABLE *table_arg, uint *key_num, int ha_partition::prepare_drop_index(TABLE *table_arg, uint *key_num,
uint num_of_keys) uint num_of_keys)
{ {
......
...@@ -1055,7 +1055,9 @@ public: ...@@ -1055,7 +1055,9 @@ public:
They are used for on-line/fast alter table add/drop index: They are used for on-line/fast alter table add/drop index:
------------------------------------------------------------------------- -------------------------------------------------------------------------
*/ */
virtual int add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys); virtual int add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys,
handler_add_index **add);
virtual int final_add_index(handler_add_index *add, bool commit);
virtual int prepare_drop_index(TABLE *table_arg, uint *key_num, virtual int prepare_drop_index(TABLE *table_arg, uint *key_num,
uint num_of_keys); uint num_of_keys);
virtual int final_drop_index(TABLE *table_arg); virtual int final_drop_index(TABLE *table_arg);
......
...@@ -1159,6 +1159,27 @@ uint calculate_key_len(TABLE *, uint, const uchar *, key_part_map); ...@@ -1159,6 +1159,27 @@ uint calculate_key_len(TABLE *, uint, const uchar *, key_part_map);
*/ */
#define make_prev_keypart_map(N) (((key_part_map)1 << (N)) - 1) #define make_prev_keypart_map(N) (((key_part_map)1 << (N)) - 1)
/**
Index creation context.
Created by handler::add_index() and freed by handler::final_add_index().
*/
class handler_add_index
{
public:
/* Table where the indexes are added */
TABLE* const table;
/* Indexes being created */
KEY* const key_info;
/* Size of key_info[] */
const uint num_of_keys;
handler_add_index(TABLE *table_arg, KEY *key_info_arg, uint num_of_keys_arg)
: table (table_arg), key_info (key_info_arg), num_of_keys (num_of_keys_arg)
{}
virtual ~handler_add_index() {}
};
/** /**
The handler class is the interface for dynamically loadable The handler class is the interface for dynamically loadable
storage engines. Do not add ifdefs and take care when adding or storage engines. Do not add ifdefs and take care when adding or
...@@ -1717,8 +1738,36 @@ public: ...@@ -1717,8 +1738,36 @@ public:
virtual ulong index_flags(uint idx, uint part, bool all_parts) const =0; virtual ulong index_flags(uint idx, uint part, bool all_parts) const =0;
virtual int add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys) /**
First phase of in-place add index.
Handlers are supposed to create new indexes here but not make them
visible.
@param table_arg Table to add index to
@param key_info Information about new indexes
@param num_of_key Number of new indexes
@param add[out] Context of handler specific information needed
for final_add_index().
@note This function can be called with less than exclusive metadata
lock depending on which flags are listed in alter_table_flags.
*/
virtual int add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys,
handler_add_index **add)
{ return (HA_ERR_WRONG_COMMAND); } { return (HA_ERR_WRONG_COMMAND); }
/**
Second and last phase of in-place add index.
Commit or rollback pending new indexes.
@param add Context of handler specific information from add_index().
@param commit If true, commit. If false, rollback index changes.
@note This function is called with exclusive metadata lock.
*/
virtual int final_add_index(handler_add_index *add, bool commit)
{ return (HA_ERR_WRONG_COMMAND); }
virtual int prepare_drop_index(TABLE *table_arg, uint *key_num, virtual int prepare_drop_index(TABLE *table_arg, uint *key_num,
uint num_of_keys) uint num_of_keys)
{ return (HA_ERR_WRONG_COMMAND); } { return (HA_ERR_WRONG_COMMAND); }
......
...@@ -5680,6 +5680,8 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, ...@@ -5680,6 +5680,8 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
uint index_drop_count= 0; uint index_drop_count= 0;
uint *index_drop_buffer= NULL; uint *index_drop_buffer= NULL;
uint index_add_count= 0; uint index_add_count= 0;
handler_add_index *add= NULL;
bool pending_inplace_add_index= false;
uint *index_add_buffer= NULL; uint *index_add_buffer= NULL;
uint candidate_key_count= 0; uint candidate_key_count= 0;
bool no_pk; bool no_pk;
...@@ -6434,6 +6436,9 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, ...@@ -6434,6 +6436,9 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
} }
thd->count_cuted_fields= CHECK_FIELD_IGNORE; thd->count_cuted_fields= CHECK_FIELD_IGNORE;
if (error)
goto err_new_table_cleanup;
/* If we did not need to copy, we might still need to add/drop indexes. */ /* If we did not need to copy, we might still need to add/drop indexes. */
if (! new_table) if (! new_table)
{ {
...@@ -6465,7 +6470,8 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, ...@@ -6465,7 +6470,8 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
key_part->field= table->field[key_part->fieldnr]; key_part->field= table->field[key_part->fieldnr];
} }
/* Add the indexes. */ /* Add the indexes. */
if ((error= table->file->add_index(table, key_info, index_add_count))) if ((error= table->file->add_index(table, key_info, index_add_count,
&add)))
{ {
/* /*
Exchange the key_info for the error message. If we exchange Exchange the key_info for the error message. If we exchange
...@@ -6477,11 +6483,27 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, ...@@ -6477,11 +6483,27 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
table->key_info= save_key_info; table->key_info= save_key_info;
goto err_new_table_cleanup; goto err_new_table_cleanup;
} }
pending_inplace_add_index= true;
} }
/*end of if (index_add_count)*/ /*end of if (index_add_count)*/
if (index_drop_count) if (index_drop_count)
{ {
/* Currently we must finalize add index if we also drop indexes */
if (pending_inplace_add_index)
{
/* Committing index changes needs exclusive metadata lock. */
DBUG_ASSERT(thd->mdl_context.is_lock_owner(MDL_key::TABLE,
table_list->db,
table_list->table_name,
MDL_EXCLUSIVE));
if ((error= table->file->final_add_index(add, true)))
{
table->file->print_error(error, MYF(0));
goto err_new_table_cleanup;
}
pending_inplace_add_index= false;
}
/* The prepare_drop_index() method takes an array of key numbers. */ /* The prepare_drop_index() method takes an array of key numbers. */
key_numbers= (uint*) thd->alloc(sizeof(uint) * index_drop_count); key_numbers= (uint*) thd->alloc(sizeof(uint) * index_drop_count);
keyno_p= key_numbers; keyno_p= key_numbers;
...@@ -6522,11 +6544,14 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, ...@@ -6522,11 +6544,14 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
} }
/*end of if (! new_table) for add/drop index*/ /*end of if (! new_table) for add/drop index*/
if (error)
goto err_new_table_cleanup;
if (table->s->tmp_table != NO_TMP_TABLE) if (table->s->tmp_table != NO_TMP_TABLE)
{ {
/*
In-place operations are not supported for temporary tables, so
we don't have to call final_add_index() in this case. The assert
verifies that in-place add index has not been done.
*/
DBUG_ASSERT(!pending_inplace_add_index);
/* Close lock if this is a transactional table */ /* Close lock if this is a transactional table */
if (thd->lock) if (thd->lock)
{ {
...@@ -6593,8 +6618,30 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, ...@@ -6593,8 +6618,30 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
my_casedn_str(files_charset_info, old_name); my_casedn_str(files_charset_info, old_name);
if (wait_while_table_is_used(thd, table, HA_EXTRA_PREPARE_FOR_RENAME)) if (wait_while_table_is_used(thd, table, HA_EXTRA_PREPARE_FOR_RENAME))
{
if (pending_inplace_add_index)
{
pending_inplace_add_index= false;
table->file->final_add_index(add, false);
}
// Mark this TABLE instance as stale to avoid out-of-sync index information.
table->m_needs_reopen= true;
goto err_new_table_cleanup; goto err_new_table_cleanup;
}
if (pending_inplace_add_index)
{
pending_inplace_add_index= false;
DBUG_EXECUTE_IF("alter_table_rollback_new_index", {
table->file->final_add_index(add, false);
my_error(ER_UNKNOWN_ERROR, MYF(0));
goto err_new_table_cleanup;
});
if ((error= table->file->final_add_index(add, true)))
{
table->file->print_error(error, MYF(0));
goto err_new_table_cleanup;
}
}
close_all_tables_for_name(thd, table->s, close_all_tables_for_name(thd, table->s,
new_name != table_name || new_db != db); new_name != table_name || new_db != db);
......
...@@ -2627,8 +2627,10 @@ innobase_alter_table_flags( ...@@ -2627,8 +2627,10 @@ innobase_alter_table_flags(
uint flags) uint flags)
{ {
return(HA_INPLACE_ADD_INDEX_NO_READ_WRITE return(HA_INPLACE_ADD_INDEX_NO_READ_WRITE
| HA_INPLACE_ADD_INDEX_NO_WRITE
| HA_INPLACE_DROP_INDEX_NO_READ_WRITE | HA_INPLACE_DROP_INDEX_NO_READ_WRITE
| HA_INPLACE_ADD_UNIQUE_INDEX_NO_READ_WRITE | HA_INPLACE_ADD_UNIQUE_INDEX_NO_READ_WRITE
| HA_INPLACE_ADD_UNIQUE_INDEX_NO_WRITE
| HA_INPLACE_DROP_UNIQUE_INDEX_NO_READ_WRITE | HA_INPLACE_DROP_UNIQUE_INDEX_NO_READ_WRITE
| HA_INPLACE_ADD_PK_INDEX_NO_READ_WRITE); | HA_INPLACE_ADD_PK_INDEX_NO_READ_WRITE);
} }
......
...@@ -215,7 +215,9 @@ class ha_innobase: public handler ...@@ -215,7 +215,9 @@ class ha_innobase: public handler
bool primary_key_is_clustered(); bool primary_key_is_clustered();
int cmp_ref(const uchar *ref1, const uchar *ref2); int cmp_ref(const uchar *ref1, const uchar *ref2);
/** Fast index creation (smart ALTER TABLE) @see handler0alter.cc @{ */ /** Fast index creation (smart ALTER TABLE) @see handler0alter.cc @{ */
int add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys); int add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys,
handler_add_index **add);
int final_add_index(handler_add_index *add, bool commit);
int prepare_drop_index(TABLE *table_arg, uint *key_num, int prepare_drop_index(TABLE *table_arg, uint *key_num,
uint num_of_keys); uint num_of_keys);
int final_drop_index(TABLE *table_arg); int final_drop_index(TABLE *table_arg);
......
...@@ -640,6 +640,18 @@ innobase_create_temporary_tablename( ...@@ -640,6 +640,18 @@ innobase_create_temporary_tablename(
return(name); return(name);
} }
class ha_innobase_add_index : public handler_add_index
{
public:
/** table where the indexes are being created */
dict_table_t* indexed_table;
ha_innobase_add_index(TABLE* table, KEY* key_info, uint num_of_keys,
dict_table_t* indexed_table_arg) :
handler_add_index(table, key_info, num_of_keys),
indexed_table (indexed_table_arg) {}
~ha_innobase_add_index() {}
};
/*******************************************************************//** /*******************************************************************//**
Create indexes. Create indexes.
@return 0 or error number */ @return 0 or error number */
...@@ -647,12 +659,15 @@ UNIV_INTERN ...@@ -647,12 +659,15 @@ UNIV_INTERN
int int
ha_innobase::add_index( ha_innobase::add_index(
/*===================*/ /*===================*/
TABLE* table, /*!< in: Table where indexes are created */ TABLE* table, /*!< in: Table where indexes
KEY* key_info, /*!< in: Indexes to be created */ are created */
uint num_of_keys) /*!< in: Number of indexes to be created */ KEY* key_info, /*!< in: Indexes
to be created */
uint num_of_keys, /*!< in: Number of indexes
to be created */
handler_add_index** add) /*!< out: context */
{ {
dict_index_t** index; /*!< Index to be created */ dict_index_t** index; /*!< Index to be created */
dict_table_t* innodb_table; /*!< InnoDB table in dictionary */
dict_table_t* indexed_table; /*!< Table where indexes are created */ dict_table_t* indexed_table; /*!< Table where indexes are created */
merge_index_def_t* index_defs; /*!< Index definitions */ merge_index_def_t* index_defs; /*!< Index definitions */
mem_heap_t* heap; /*!< Heap for index definitions */ mem_heap_t* heap; /*!< Heap for index definitions */
...@@ -668,6 +683,8 @@ ha_innobase::add_index( ...@@ -668,6 +683,8 @@ ha_innobase::add_index(
ut_a(key_info); ut_a(key_info);
ut_a(num_of_keys); ut_a(num_of_keys);
*add = NULL;
if (srv_created_new_raw || srv_force_recovery) { if (srv_created_new_raw || srv_force_recovery) {
DBUG_RETURN(HA_ERR_WRONG_COMMAND); DBUG_RETURN(HA_ERR_WRONG_COMMAND);
} }
...@@ -683,15 +700,16 @@ ha_innobase::add_index( ...@@ -683,15 +700,16 @@ ha_innobase::add_index(
DBUG_RETURN(-1); DBUG_RETURN(-1);
} }
innodb_table = indexed_table indexed_table = dict_table_get(prebuilt->table->name, FALSE);
= dict_table_get(prebuilt->table->name, FALSE);
if (UNIV_UNLIKELY(!innodb_table)) { if (UNIV_UNLIKELY(!indexed_table)) {
DBUG_RETURN(HA_ERR_NO_SUCH_TABLE); DBUG_RETURN(HA_ERR_NO_SUCH_TABLE);
} }
ut_a(indexed_table == prebuilt->table);
/* Check that index keys are sensible */ /* Check that index keys are sensible */
error = innobase_check_index_keys(key_info, num_of_keys, innodb_table); error = innobase_check_index_keys(key_info, num_of_keys, prebuilt->table);
if (UNIV_UNLIKELY(error)) { if (UNIV_UNLIKELY(error)) {
DBUG_RETURN(error); DBUG_RETURN(error);
...@@ -700,7 +718,7 @@ ha_innobase::add_index( ...@@ -700,7 +718,7 @@ ha_innobase::add_index(
/* Check each index's column length to make sure they do not /* Check each index's column length to make sure they do not
exceed limit */ exceed limit */
for (ulint i = 0; i < num_of_keys; i++) { for (ulint i = 0; i < num_of_keys; i++) {
error = innobase_check_column_length(innodb_table, error = innobase_check_column_length(prebuilt->table,
&key_info[i]); &key_info[i]);
if (error) { if (error) {
...@@ -723,7 +741,7 @@ ha_innobase::add_index( ...@@ -723,7 +741,7 @@ ha_innobase::add_index(
num_of_idx = num_of_keys; num_of_idx = num_of_keys;
index_defs = innobase_create_key_def( index_defs = innobase_create_key_def(
trx, innodb_table, heap, key_info, num_of_idx); trx, prebuilt->table, heap, key_info, num_of_idx);
new_primary = DICT_CLUSTERED & index_defs[0].ind_type; new_primary = DICT_CLUSTERED & index_defs[0].ind_type;
...@@ -737,7 +755,7 @@ ha_innobase::add_index( ...@@ -737,7 +755,7 @@ ha_innobase::add_index(
trx_set_dict_operation(trx, TRX_DICT_OP_INDEX); trx_set_dict_operation(trx, TRX_DICT_OP_INDEX);
/* Acquire a lock on the table before creating any indexes. */ /* Acquire a lock on the table before creating any indexes. */
error = row_merge_lock_table(prebuilt->trx, innodb_table, error = row_merge_lock_table(prebuilt->trx, prebuilt->table,
new_primary ? LOCK_X : LOCK_S); new_primary ? LOCK_X : LOCK_S);
if (UNIV_UNLIKELY(error != DB_SUCCESS)) { if (UNIV_UNLIKELY(error != DB_SUCCESS)) {
...@@ -751,7 +769,7 @@ ha_innobase::add_index( ...@@ -751,7 +769,7 @@ ha_innobase::add_index(
row_mysql_lock_data_dictionary(trx); row_mysql_lock_data_dictionary(trx);
dict_locked = TRUE; dict_locked = TRUE;
ut_d(dict_table_check_for_dup_indexes(innodb_table, FALSE)); ut_d(dict_table_check_for_dup_indexes(prebuilt->table, FALSE));
/* If a new primary key is defined for the table we need /* If a new primary key is defined for the table we need
to drop the original table and rebuild all indexes. */ to drop the original table and rebuild all indexes. */
...@@ -759,15 +777,15 @@ ha_innobase::add_index( ...@@ -759,15 +777,15 @@ ha_innobase::add_index(
if (UNIV_UNLIKELY(new_primary)) { if (UNIV_UNLIKELY(new_primary)) {
/* This transaction should be the only one /* This transaction should be the only one
operating on the table. */ operating on the table. */
ut_a(innodb_table->n_mysql_handles_opened == 1); ut_a(prebuilt->table->n_mysql_handles_opened == 1);
char* new_table_name = innobase_create_temporary_tablename( char* new_table_name = innobase_create_temporary_tablename(
heap, '1', innodb_table->name); heap, '1', prebuilt->table->name);
/* Clone the table. */ /* Clone the table. */
trx_set_dict_operation(trx, TRX_DICT_OP_TABLE); trx_set_dict_operation(trx, TRX_DICT_OP_TABLE);
indexed_table = row_merge_create_temporary_table( indexed_table = row_merge_create_temporary_table(
new_table_name, index_defs, innodb_table, trx); new_table_name, index_defs, prebuilt->table, trx);
if (!indexed_table) { if (!indexed_table) {
...@@ -781,11 +799,12 @@ ha_innobase::add_index( ...@@ -781,11 +799,12 @@ ha_innobase::add_index(
break; break;
default: default:
error = convert_error_code_to_mysql( error = convert_error_code_to_mysql(
trx->error_state, innodb_table->flags, trx->error_state,
prebuilt->table->flags,
user_thd); user_thd);
} }
ut_d(dict_table_check_for_dup_indexes(innodb_table, ut_d(dict_table_check_for_dup_indexes(prebuilt->table,
FALSE)); FALSE));
mem_heap_free(heap); mem_heap_free(heap);
trx_general_rollback_for_mysql(trx, NULL); trx_general_rollback_for_mysql(trx, NULL);
...@@ -800,17 +819,15 @@ ha_innobase::add_index( ...@@ -800,17 +819,15 @@ ha_innobase::add_index(
/* Create the indexes in SYS_INDEXES and load into dictionary. */ /* Create the indexes in SYS_INDEXES and load into dictionary. */
for (ulint i = 0; i < num_of_idx; i++) { for (num_created = 0; num_created < num_of_idx; num_created++) {
index[i] = row_merge_create_index(trx, indexed_table, index[num_created] = row_merge_create_index(
&index_defs[i]); trx, indexed_table, &index_defs[num_created]);
if (!index[i]) { if (!index[num_created]) {
error = trx->error_state; error = trx->error_state;
goto error_handling; goto error_handling;
} }
num_created++;
} }
ut_ad(error == DB_SUCCESS); ut_ad(error == DB_SUCCESS);
...@@ -832,7 +849,7 @@ ha_innobase::add_index( ...@@ -832,7 +849,7 @@ ha_innobase::add_index(
if (UNIV_UNLIKELY(new_primary)) { if (UNIV_UNLIKELY(new_primary)) {
/* A primary key is to be built. Acquire an exclusive /* A primary key is to be built. Acquire an exclusive
table lock also on the table that is being created. */ table lock also on the table that is being created. */
ut_ad(indexed_table != innodb_table); ut_ad(indexed_table != prebuilt->table);
error = row_merge_lock_table(prebuilt->trx, indexed_table, error = row_merge_lock_table(prebuilt->trx, indexed_table,
LOCK_X); LOCK_X);
...@@ -846,7 +863,7 @@ ha_innobase::add_index( ...@@ -846,7 +863,7 @@ ha_innobase::add_index(
/* Read the clustered index of the table and build indexes /* Read the clustered index of the table and build indexes
based on this information using temporary files and merge sort. */ based on this information using temporary files and merge sort. */
error = row_merge_build_indexes(prebuilt->trx, error = row_merge_build_indexes(prebuilt->trx,
innodb_table, indexed_table, prebuilt->table, indexed_table,
index, num_of_idx, table); index, num_of_idx, table);
error_handling: error_handling:
...@@ -854,63 +871,15 @@ error_handling: ...@@ -854,63 +871,15 @@ error_handling:
dictionary which were defined. */ dictionary which were defined. */
switch (error) { switch (error) {
const char* old_name;
char* tmp_name;
case DB_SUCCESS: case DB_SUCCESS:
ut_a(!dict_locked); ut_a(!dict_locked);
row_mysql_lock_data_dictionary(trx);
dict_locked = TRUE;
ut_d(mutex_enter(&dict_sys->mutex));
ut_d(dict_table_check_for_dup_indexes(prebuilt->table, TRUE)); ut_d(dict_table_check_for_dup_indexes(prebuilt->table, TRUE));
ut_d(mutex_exit(&dict_sys->mutex));
if (!new_primary) { *add = new ha_innobase_add_index(table, key_info, num_of_keys,
error = row_merge_rename_indexes(trx, indexed_table); indexed_table);
break;
if (error != DB_SUCCESS) {
row_merge_drop_indexes(trx, indexed_table,
index, num_created);
}
goto convert_error;
}
/* If a new primary key was defined for the table and
there was no error at this point, we can now rename
the old table as a temporary table, rename the new
temporary table as the old table and drop the old table. */
old_name = innodb_table->name;
tmp_name = innobase_create_temporary_tablename(heap, '2',
old_name);
error = row_merge_rename_tables(innodb_table, indexed_table,
tmp_name, trx);
if (error != DB_SUCCESS) {
row_merge_drop_table(trx, indexed_table);
switch (error) {
case DB_TABLESPACE_ALREADY_EXISTS:
case DB_DUPLICATE_KEY:
innobase_convert_tablename(tmp_name);
my_error(HA_ERR_TABLE_EXIST, MYF(0), tmp_name);
error = HA_ERR_TABLE_EXIST;
break;
default:
goto convert_error;
}
break;
}
trx_commit_for_mysql(prebuilt->trx);
row_prebuilt_free(prebuilt, TRUE);
prebuilt = row_create_prebuilt(indexed_table);
indexed_table->n_mysql_handles_opened++;
error = row_merge_drop_table(trx, innodb_table);
innodb_table = indexed_table;
goto convert_error;
case DB_TOO_BIG_RECORD: case DB_TOO_BIG_RECORD:
my_error(HA_ERR_TO_BIG_ROW, MYF(0)); my_error(HA_ERR_TO_BIG_ROW, MYF(0));
...@@ -926,7 +895,7 @@ error: ...@@ -926,7 +895,7 @@ error:
trx->error_state = DB_SUCCESS; trx->error_state = DB_SUCCESS;
if (new_primary) { if (new_primary) {
if (indexed_table != innodb_table) { if (indexed_table != prebuilt->table) {
row_merge_drop_table(trx, indexed_table); row_merge_drop_table(trx, indexed_table);
} }
} else { } else {
...@@ -938,38 +907,161 @@ error: ...@@ -938,38 +907,161 @@ error:
row_merge_drop_indexes(trx, indexed_table, row_merge_drop_indexes(trx, indexed_table,
index, num_created); index, num_created);
} }
convert_error:
if (error == DB_SUCCESS) {
/* Build index is successful. We will need to
rebuild index translation table. Reset the
index entry count in the translation table
to zero, so that translation table will be rebuilt */
share->idx_trans_tbl.index_count = 0;
}
error = convert_error_code_to_mysql(error,
innodb_table->flags,
user_thd);
} }
mem_heap_free(heap);
trx_commit_for_mysql(trx); trx_commit_for_mysql(trx);
if (prebuilt->trx) { if (prebuilt->trx) {
trx_commit_for_mysql(prebuilt->trx); trx_commit_for_mysql(prebuilt->trx);
} }
if (dict_locked) { if (dict_locked) {
ut_d(dict_table_check_for_dup_indexes(innodb_table, FALSE));
row_mysql_unlock_data_dictionary(trx); row_mysql_unlock_data_dictionary(trx);
} }
trx_free_for_mysql(trx); trx_free_for_mysql(trx);
mem_heap_free(heap);
/* There might be work for utility threads.*/ /* There might be work for utility threads.*/
srv_active_wake_master_thread(); srv_active_wake_master_thread();
DBUG_RETURN(error); DBUG_RETURN(convert_error_code_to_mysql(error, prebuilt->table->flags,
user_thd));
}
/*******************************************************************//**
Finalize or undo add_index().
@return 0 or error number */
UNIV_INTERN
int
ha_innobase::final_add_index(
/*=========================*/
handler_add_index* add_arg,/*!< in: context from add_index() */
bool commit) /*!< in: true=commit, false=rollback */
{
ha_innobase_add_index* add;
trx_t* trx;
int err = 0;
DBUG_ENTER("ha_innobase::final_add_index");
ut_ad(add_arg);
add = static_cast<class ha_innobase_add_index*>(add_arg);
/* Create a background transaction for the operations on
the data dictionary tables. */
trx = innobase_trx_allocate(user_thd);
trx_start_if_not_started(trx);
/* Flag this transaction as a dictionary operation, so that
the data dictionary will be locked in crash recovery. */
trx_set_dict_operation(trx, TRX_DICT_OP_INDEX);
/* Latch the InnoDB data dictionary exclusively so that no deadlocks
or lock waits can happen in it during an index create operation. */
row_mysql_lock_data_dictionary(trx);
if (add->indexed_table != prebuilt->table) {
ulint error;
/* We copied the table (new_primary). */
if (commit) {
mem_heap_t* heap;
char* tmp_name;
heap = mem_heap_create(1024);
/* A new primary key was defined for the table
and there was no error at this point. We can
now rename the old table as a temporary table,
rename the new temporary table as the old
table and drop the old table. */
tmp_name = innobase_create_temporary_tablename(
heap, '2', prebuilt->table->name);
error = row_merge_rename_tables(
prebuilt->table, add->indexed_table,
tmp_name, trx);
switch (error) {
case DB_TABLESPACE_ALREADY_EXISTS:
case DB_DUPLICATE_KEY:
innobase_convert_tablename(tmp_name);
my_error(HA_ERR_TABLE_EXIST, MYF(0), tmp_name);
err = HA_ERR_TABLE_EXIST;
break;
default:
err = convert_error_code_to_mysql(
error, prebuilt->table->flags,
user_thd);
break;
}
mem_heap_free(heap);
}
if (!commit || err) {
error = row_merge_drop_table(trx, add->indexed_table);
trx_commit_for_mysql(prebuilt->trx);
} else {
dict_table_t* old_table = prebuilt->table;
trx_commit_for_mysql(prebuilt->trx);
row_prebuilt_free(prebuilt, TRUE);
error = row_merge_drop_table(trx, old_table);
add->indexed_table->n_mysql_handles_opened++;
prebuilt = row_create_prebuilt(add->indexed_table);
}
err = convert_error_code_to_mysql(
error, prebuilt->table->flags, user_thd);
} else {
/* We created secondary indexes (!new_primary). */
if (commit) {
err = convert_error_code_to_mysql(
row_merge_rename_indexes(trx, prebuilt->table),
prebuilt->table->flags, user_thd);
}
if (!commit || err) {
dict_index_t* index;
dict_index_t* next_index;
for (index = dict_table_get_first_index(
prebuilt->table);
index; index = next_index) {
next_index = dict_table_get_next_index(index);
if (*index->name == TEMP_INDEX_PREFIX) {
row_merge_drop_index(
index, prebuilt->table, trx);
}
}
}
}
/* If index is successfully built, we will need to rebuild index
translation table. Set valid index entry count in the translation
table to zero. */
if (err == 0 && commit) {
share->idx_trans_tbl.index_count = 0;
}
trx_commit_for_mysql(trx);
if (prebuilt->trx) {
trx_commit_for_mysql(prebuilt->trx);
}
ut_d(dict_table_check_for_dup_indexes(prebuilt->table, FALSE));
row_mysql_unlock_data_dictionary(trx);
trx_free_for_mysql(trx);
/* There might be work for utility threads.*/
srv_active_wake_master_thread();
delete add;
DBUG_RETURN(err);
} }
/*******************************************************************//** /*******************************************************************//**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment