Commit e2052eba authored by marko's avatar marko

branches/zip: When creating a PRIMARY KEY, flag all its columns NOT NULL.

row_merge_create_temporary_table(): Add the parameter index_defs.

DB_PRIMARY_KEY_IS_NULL: New error code, mapped to ER_PRIMARY_CANT_HAVE_NULL.

row_merge_read_clustered_index(): Replace the parameter "table" with the
two parameters "old_table" and "new_table".  Detect NULL values of columns
that are supposed to be NOT NULL.

row_merge_col_prtype(): New auxiliary function, used by
row_merge_create_temporary_table() to flag NOT NULL any columns
contained in an added PRIMARY KEY.
parent f304ce10
...@@ -670,6 +670,9 @@ convert_error_code_to_mysql( ...@@ -670,6 +670,9 @@ convert_error_code_to_mysql(
case DB_CANNOT_DROP_FOREIGN_INDEX: case DB_CANNOT_DROP_FOREIGN_INDEX:
return(HA_ERR_DROP_INDEX_FK); return(HA_ERR_DROP_INDEX_FK);
case DB_PRIMARY_KEY_IS_NULL:
return(ER_PRIMARY_CANT_HAVE_NULL);
case DB_TOO_MANY_CONCURRENT_TRXS: case DB_TOO_MANY_CONCURRENT_TRXS:
/* Once MySQL add the appropriate code to errmsg.txt then /* Once MySQL add the appropriate code to errmsg.txt then
we can get rid of this #ifdef. NOTE: The code checked by we can get rid of this #ifdef. NOTE: The code checked by
...@@ -8363,7 +8366,7 @@ err_exit: ...@@ -8363,7 +8366,7 @@ err_exit:
/* Clone table and write UNDO log record */ /* Clone table and write UNDO log record */
indexed_table = row_merge_create_temporary_table( indexed_table = row_merge_create_temporary_table(
new_table_name, innodb_table, trx); new_table_name, index_defs, innodb_table, trx);
if (!indexed_table) { if (!indexed_table) {
...@@ -8475,6 +8478,7 @@ error_handling: ...@@ -8475,6 +8478,7 @@ error_handling:
case DB_SUCCESS: case DB_SUCCESS:
ut_ad(!dict_locked); ut_ad(!dict_locked);
break; break;
case DB_PRIMARY_KEY_IS_NULL:
case DB_DUPLICATE_KEY: case DB_DUPLICATE_KEY:
prebuilt->trx->error_info = NULL; prebuilt->trx->error_info = NULL;
prebuilt->trx->error_key_num = trx->error_key_num; prebuilt->trx->error_key_num = trx->error_key_num;
...@@ -8559,6 +8563,12 @@ func_exit: ...@@ -8559,6 +8563,12 @@ func_exit:
/* There might be work for utility threads.*/ /* There might be work for utility threads.*/
srv_active_wake_master_thread(); srv_active_wake_master_thread();
switch (error) {
case DB_PRIMARY_KEY_IS_NULL:
my_error(ER_PRIMARY_CANT_HAVE_NULL, MYF(0));
break;
}
DBUG_RETURN(convert_error_code_to_mysql(error, user_thd)); DBUG_RETURN(convert_error_code_to_mysql(error, user_thd));
} }
......
...@@ -69,6 +69,8 @@ Created 5/24/1996 Heikki Tuuri ...@@ -69,6 +69,8 @@ Created 5/24/1996 Heikki Tuuri
#define DB_CANNOT_DROP_FOREIGN_INDEX 48 /* we cannot drop an index because #define DB_CANNOT_DROP_FOREIGN_INDEX 48 /* we cannot drop an index because
it is needed on foreign key it is needed on foreign key
constraint */ constraint */
#define DB_PRIMARY_KEY_IS_NULL 49 /* a column in the PRIMARY KEY
was found to be NULL */
/* The following are partial failure codes */ /* The following are partial failure codes */
#define DB_FAIL 1000 #define DB_FAIL 1000
......
...@@ -64,16 +64,20 @@ row_merge_drop_indexes( ...@@ -64,16 +64,20 @@ row_merge_drop_indexes(
ulint num_created); /* in: number of elements in index[] */ ulint num_created); /* in: number of elements in index[] */
/************************************************************************* /*************************************************************************
Create a temporary table using a definition of the old table. You must Create a temporary table for creating a primary key, using the definition
lock data dictionary before calling this function. */ of an existing table. */
dict_table_t* dict_table_t*
row_merge_create_temporary_table( row_merge_create_temporary_table(
/*=============================*/ /*=============================*/
/* out: table, or NULL on error */ /* out: table,
const char* table_name, /* in: new table name */ or NULL on error */
dict_table_t* table, /* in: old table definition */ const char* table_name, /* in: new table name */
trx_t* trx); /* in/out: trx (sets error_state) */ const merge_index_def_t*index_def, /* in: the index definition
of the primary key */
const dict_table_t* table, /* in: old table definition */
trx_t* trx); /* in/out: transaction
(sets error_state) */
/************************************************************************* /*************************************************************************
Rename the indexes in the dictionary. */ Rename the indexes in the dictionary. */
......
...@@ -810,14 +810,18 @@ row_merge_cmp( ...@@ -810,14 +810,18 @@ row_merge_cmp(
/************************************************************************ /************************************************************************
Reads clustered index of the table and create temporary files Reads clustered index of the table and create temporary files
containing index entries for indexes to be built. */ containing the index entries for the indexes to be built. */
static static
ulint ulint
row_merge_read_clustered_index( row_merge_read_clustered_index(
/*===========================*/ /*===========================*/
/* out: DB_SUCCESS or error */ /* out: DB_SUCCESS or error */
trx_t* trx, /* in: transaction */ trx_t* trx, /* in: transaction */
dict_table_t* table, /* in: table where index is created */ dict_table_t* old_table,/* in: table where rows are
read from */
dict_table_t* new_table,/* in: table where indexes are
created; identical to old_table
unless creating a PRIMARY KEY */
dict_index_t** index, /* in: indexes to be created */ dict_index_t** index, /* in: indexes to be created */
merge_file_t* files, /* in: temporary files */ merge_file_t* files, /* in: temporary files */
ulint n_index,/* in: number of indexes to create */ ulint n_index,/* in: number of indexes to create */
...@@ -832,11 +836,15 @@ row_merge_read_clustered_index( ...@@ -832,11 +836,15 @@ row_merge_read_clustered_index(
mtr_t mtr; /* Mini transaction */ mtr_t mtr; /* Mini transaction */
ulint err = DB_SUCCESS;/* Return code */ ulint err = DB_SUCCESS;/* Return code */
ulint i; ulint i;
ulint n_nonnull = 0; /* number of columns
changed to NOT NULL */
ulint* nonnull = NULL; /* NOT NULL columns */
trx->op_info = "reading clustered index"; trx->op_info = "reading clustered index";
ut_ad(trx); ut_ad(trx);
ut_ad(table); ut_ad(old_table);
ut_ad(new_table);
ut_ad(index); ut_ad(index);
ut_ad(files); ut_ad(files);
...@@ -853,11 +861,44 @@ row_merge_read_clustered_index( ...@@ -853,11 +861,44 @@ row_merge_read_clustered_index(
/* Find the clustered index and create a persistent cursor /* Find the clustered index and create a persistent cursor
based on that. */ based on that. */
clust_index = dict_table_get_first_index(table); clust_index = dict_table_get_first_index(old_table);
btr_pcur_open_at_index_side( btr_pcur_open_at_index_side(
TRUE, clust_index, BTR_SEARCH_LEAF, &pcur, TRUE, &mtr); TRUE, clust_index, BTR_SEARCH_LEAF, &pcur, TRUE, &mtr);
if (UNIV_UNLIKELY(old_table != new_table)) {
ulint n_cols = dict_table_get_n_cols(old_table);
/* A primary key will be created. Identify the
columns that were flagged NOT NULL in the new table,
so that we can quickly check that the records in the
(old) clustered index do not violate the added NOT
NULL constraints. */
ut_a(n_cols == dict_table_get_n_cols(new_table));
nonnull = mem_alloc(n_cols * sizeof *nonnull);
for (i = 0; i < n_cols; i++) {
if (dict_table_get_nth_col(old_table, i)->prtype
& DATA_NOT_NULL) {
continue;
}
if (dict_table_get_nth_col(new_table, i)->prtype
& DATA_NOT_NULL) {
nonnull[n_nonnull++] = i;
}
}
if (!n_nonnull) {
mem_free(nonnull);
nonnull = NULL;
}
}
row_heap = mem_heap_create(UNIV_PAGE_SIZE); row_heap = mem_heap_create(UNIV_PAGE_SIZE);
/* Scan the clustered index. */ /* Scan the clustered index. */
...@@ -885,22 +926,40 @@ row_merge_read_clustered_index( ...@@ -885,22 +926,40 @@ row_merge_read_clustered_index(
rec = btr_pcur_get_rec(&pcur); rec = btr_pcur_get_rec(&pcur);
/* Skip delete marked records. */ /* Skip delete marked records. */
if (rec_get_deleted_flag(rec, if (rec_get_deleted_flag(
dict_table_is_comp(table))) { rec, dict_table_is_comp(old_table))) {
continue; continue;
} }
srv_n_rows_inserted++; srv_n_rows_inserted++;
/* Build row based on clustered index */ /* Build a row based on the clustered index. */
row = row_build(ROW_COPY_POINTERS, clust_index, row = row_build(ROW_COPY_POINTERS, clust_index,
rec, NULL, &ext, row_heap); rec, NULL, &ext, row_heap);
/* Build all entries for all the indexes to be created if (UNIV_LIKELY_NULL(nonnull)) {
in a single scan of the clustered index. */ for (i = 0; i < n_nonnull; i++) {
dfield_t* field
= &row->fields[nonnull[i]];
ut_a(!(field->type.prtype
& DATA_NOT_NULL));
if (dfield_is_null(field)) {
trx->error_key_num = 0;
err = DB_PRIMARY_KEY_IS_NULL;
goto func_exit;
}
field->type.prtype |= DATA_NOT_NULL;
}
}
} }
/* Build all entries for all the indexes to be created
in a single scan of the clustered index. */
for (i = 0; i < n_index; i++) { for (i = 0; i < n_index; i++) {
row_merge_buf_t* buf = merge_buf[i]; row_merge_buf_t* buf = merge_buf[i];
merge_file_t* file = &files[i]; merge_file_t* file = &files[i];
...@@ -918,6 +977,7 @@ row_merge_read_clustered_index( ...@@ -918,6 +977,7 @@ row_merge_read_clustered_index(
if (buf->n_tuples if (buf->n_tuples
&& row_merge_buf_sort(buf) && row_merge_buf_sort(buf)
&& dict_index_is_unique(buf->index)) { && dict_index_is_unique(buf->index)) {
trx->error_key_num = i;
err = DB_DUPLICATE_KEY; err = DB_DUPLICATE_KEY;
goto func_exit; goto func_exit;
} }
...@@ -946,6 +1006,10 @@ func_exit: ...@@ -946,6 +1006,10 @@ func_exit:
mtr_commit(&mtr); mtr_commit(&mtr);
mem_heap_free(row_heap); mem_heap_free(row_heap);
if (UNIV_LIKELY_NULL(nonnull)) {
mem_free(nonnull);
}
for (i = 0; i < n_index; i++) { for (i = 0; i < n_index; i++) {
row_merge_buf_free(merge_buf[i]); row_merge_buf_free(merge_buf[i]);
} }
...@@ -1429,16 +1493,56 @@ row_merge_file_destroy( ...@@ -1429,16 +1493,56 @@ row_merge_file_destroy(
} }
/************************************************************************* /*************************************************************************
Create a temporary table using a definition of the old table. You must Determine the precise type of a column that is added to a tem
lock data dictionary before calling this function. */ if a column must be constrained NOT NULL. */
UNIV_INLINE
ulint
row_merge_col_prtype(
/*=================*/
/* out: col->prtype, possibly
ORed with DATA_NOT_NULL */
const dict_col_t* col, /* in: column */
const char* col_name, /* in: name of the column */
const merge_index_def_t*index_def) /* in: the index definition
of the primary key */
{
ulint prtype = col->prtype;
ulint i;
ut_ad(index_def->ind_type & DICT_CLUSTERED);
if (prtype & DATA_NOT_NULL) {
return(prtype);
}
/* All columns that are included
in the PRIMARY KEY must be NOT NULL. */
for (i = 0; i < index_def->n_fields; i++) {
if (!strcmp(col_name, index_def->fields[i].field_name)) {
return(prtype | DATA_NOT_NULL);
}
}
return(prtype);
}
/*************************************************************************
Create a temporary table for creating a primary key, using the definition
of an existing table. */
dict_table_t* dict_table_t*
row_merge_create_temporary_table( row_merge_create_temporary_table(
/*=============================*/ /*=============================*/
/* out: table, or NULL on error */ /* out: table,
const char* table_name, /* in: new table name */ or NULL on error */
dict_table_t* table, /* in: old table definition */ const char* table_name, /* in: new table name */
trx_t* trx) /* in/out: trx (sets error_state) */ const merge_index_def_t*index_def, /* in: the index definition
of the primary key */
const dict_table_t* table, /* in: old table definition */
trx_t* trx) /* in/out: transaction
(sets error_state) */
{ {
ulint i; ulint i;
dict_table_t* new_table = NULL; dict_table_t* new_table = NULL;
...@@ -1446,6 +1550,7 @@ row_merge_create_temporary_table( ...@@ -1446,6 +1550,7 @@ row_merge_create_temporary_table(
ulint error; ulint error;
ut_ad(table_name); ut_ad(table_name);
ut_ad(index_def);
ut_ad(table); ut_ad(table);
ut_ad(mutex_own(&dict_sys->mutex)); ut_ad(mutex_own(&dict_sys->mutex));
...@@ -1461,13 +1566,15 @@ row_merge_create_temporary_table( ...@@ -1461,13 +1566,15 @@ row_merge_create_temporary_table(
for (i = 0; i < n_cols; i++) { for (i = 0; i < n_cols; i++) {
const dict_col_t* col; const dict_col_t* col;
const char* col_name;
col = dict_table_get_nth_col(table, i); col = dict_table_get_nth_col(table, i);
col_name = dict_table_get_col_name(table, i);
dict_mem_table_add_col( dict_mem_table_add_col(
new_table, heap, new_table, heap, col_name, col->mtype,
dict_table_get_col_name(table, i), row_merge_col_prtype(col, col_name, index_def),
col->mtype, col->prtype, col->len); col->len);
} }
error = row_create_table_for_mysql(new_table, trx); error = row_create_table_for_mysql(new_table, trx);
...@@ -1737,7 +1844,8 @@ row_merge_build_indexes( ...@@ -1737,7 +1844,8 @@ row_merge_build_indexes(
secondary index entries for merge sort */ secondary index entries for merge sort */
error = row_merge_read_clustered_index( error = row_merge_read_clustered_index(
trx, old_table, indexes, merge_files, n_indexes, block); trx, old_table, new_table, indexes,
merge_files, n_indexes, block);
if (error != DB_SUCCESS) { if (error != DB_SUCCESS) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment