Commit cb80ad09 authored by Vasil Dimov's avatar Vasil Dimov

Partial fix for Bug#11764622 57480: MEMORY LEAK WHEN HAVING 256+ TABLES

Port vasil.dimov@oracle.com-20111205083046-jtgi1emlvtfnjatt from mysql-trunk
parent a4fa485f
...@@ -3737,22 +3737,9 @@ ha_innobase::open( ...@@ -3737,22 +3737,9 @@ ha_innobase::open(
DBUG_RETURN(1); DBUG_RETURN(1);
} }
/* Create buffers for packing the fields of a record. Why /* Will be allocated if it is needed in ::update_row() */
table->reclength did not work here? Obviously, because char upd_buf = NULL;
fields when packed actually became 1 byte longer, when we also upd_buf_size = 0;
stored the string length as the first byte. */
upd_and_key_val_buff_len =
table->s->reclength + table->s->max_key_length
+ MAX_REF_PARTS * 3;
if (!(uchar*) my_multi_malloc(MYF(MY_WME),
&upd_buff, upd_and_key_val_buff_len,
&key_val_buff, upd_and_key_val_buff_len,
NullS)) {
free_share(share);
DBUG_RETURN(1);
}
/* We look for pattern #P# to see if the table is partitioned /* We look for pattern #P# to see if the table is partitioned
MySQL table. The retry logic for partitioned tables is a MySQL table. The retry logic for partitioned tables is a
...@@ -3793,7 +3780,6 @@ retry: ...@@ -3793,7 +3780,6 @@ retry:
"how you can resolve the problem.\n", "how you can resolve the problem.\n",
norm_name); norm_name);
free_share(share); free_share(share);
my_free(upd_buff);
my_errno = ENOENT; my_errno = ENOENT;
DBUG_RETURN(HA_ERR_NO_SUCH_TABLE); DBUG_RETURN(HA_ERR_NO_SUCH_TABLE);
...@@ -3809,7 +3795,6 @@ retry: ...@@ -3809,7 +3795,6 @@ retry:
"how you can resolve the problem.\n", "how you can resolve the problem.\n",
norm_name); norm_name);
free_share(share); free_share(share);
my_free(upd_buff);
my_errno = ENOENT; my_errno = ENOENT;
dict_table_decrement_handle_count(ib_table, FALSE); dict_table_decrement_handle_count(ib_table, FALSE);
...@@ -4006,7 +3991,13 @@ ha_innobase::close(void) ...@@ -4006,7 +3991,13 @@ ha_innobase::close(void)
row_prebuilt_free(prebuilt, FALSE); row_prebuilt_free(prebuilt, FALSE);
my_free(upd_buff); if (upd_buf != NULL) {
ut_ad(upd_buf_size != 0);
my_free(upd_buf);
upd_buf = NULL;
upd_buf_size = 0;
}
free_share(share); free_share(share);
/* Tell InnoDB server that there might be work for /* Tell InnoDB server that there might be work for
...@@ -5299,6 +5290,23 @@ ha_innobase::update_row( ...@@ -5299,6 +5290,23 @@ ha_innobase::update_row(
ut_a(prebuilt->trx == trx); ut_a(prebuilt->trx == trx);
if (upd_buf == NULL) {
ut_ad(upd_buf_size == 0);
/* Create a buffer for packing the fields of a record. Why
table->reclength did not work here? Obviously, because char
fields when packed actually became 1 byte longer, when we also
stored the string length as the first byte. */
upd_buf_size = table->s->reclength + table->s->max_key_length
+ MAX_REF_PARTS * 3;
upd_buf = (uchar*) my_malloc(upd_buf_size, MYF(MY_WME));
if (upd_buf == NULL) {
upd_buf_size = 0;
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
}
}
ha_statistic_increment(&SSV::ha_update_count); ha_statistic_increment(&SSV::ha_update_count);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
...@@ -5311,11 +5319,10 @@ ha_innobase::update_row( ...@@ -5311,11 +5319,10 @@ ha_innobase::update_row(
} }
/* Build an update vector from the modified fields in the rows /* Build an update vector from the modified fields in the rows
(uses upd_buff of the handle) */ (uses upd_buf of the handle) */
calc_row_difference(uvect, (uchar*) old_row, new_row, table, calc_row_difference(uvect, (uchar*) old_row, new_row, table,
upd_buff, (ulint)upd_and_key_val_buff_len, upd_buf, upd_buf_size, prebuilt, user_thd);
prebuilt, user_thd);
/* This is not a delete */ /* This is not a delete */
prebuilt->upd_node->is_delete = FALSE; prebuilt->upd_node->is_delete = FALSE;
...@@ -5692,8 +5699,7 @@ ha_innobase::index_read( ...@@ -5692,8 +5699,7 @@ ha_innobase::index_read(
row_sel_convert_mysql_key_to_innobase( row_sel_convert_mysql_key_to_innobase(
prebuilt->search_tuple, prebuilt->search_tuple,
(byte*) key_val_buff, srch_key_val1, sizeof(srch_key_val1),
(ulint)upd_and_key_val_buff_len,
index, index,
(byte*) key_ptr, (byte*) key_ptr,
(ulint) key_len, (ulint) key_len,
...@@ -7511,12 +7517,6 @@ ha_innobase::records_in_range( ...@@ -7511,12 +7517,6 @@ ha_innobase::records_in_range(
{ {
KEY* key; KEY* key;
dict_index_t* index; dict_index_t* index;
uchar* key_val_buff2 = (uchar*) my_malloc(
table->s->reclength
+ table->s->max_key_length + 100,
MYF(MY_FAE));
ulint buff2_len = table->s->reclength
+ table->s->max_key_length + 100;
dtuple_t* range_start; dtuple_t* range_start;
dtuple_t* range_end; dtuple_t* range_end;
ib_int64_t n_rows; ib_int64_t n_rows;
...@@ -7568,8 +7568,8 @@ ha_innobase::records_in_range( ...@@ -7568,8 +7568,8 @@ ha_innobase::records_in_range(
dict_index_copy_types(range_end, index, key->key_parts); dict_index_copy_types(range_end, index, key->key_parts);
row_sel_convert_mysql_key_to_innobase( row_sel_convert_mysql_key_to_innobase(
range_start, (byte*) key_val_buff, range_start,
(ulint)upd_and_key_val_buff_len, srch_key_val1, sizeof(srch_key_val1),
index, index,
(byte*) (min_key ? min_key->key : (byte*) (min_key ? min_key->key :
(const uchar*) 0), (const uchar*) 0),
...@@ -7580,8 +7580,9 @@ ha_innobase::records_in_range( ...@@ -7580,8 +7580,9 @@ ha_innobase::records_in_range(
: range_start->n_fields == 0); : range_start->n_fields == 0);
row_sel_convert_mysql_key_to_innobase( row_sel_convert_mysql_key_to_innobase(
range_end, (byte*) key_val_buff2, range_end,
buff2_len, index, srch_key_val2, sizeof(srch_key_val2),
index,
(byte*) (max_key ? max_key->key : (byte*) (max_key ? max_key->key :
(const uchar*) 0), (const uchar*) 0),
(ulint) (max_key ? max_key->length : 0), (ulint) (max_key ? max_key->length : 0),
...@@ -7608,7 +7609,6 @@ ha_innobase::records_in_range( ...@@ -7608,7 +7609,6 @@ ha_innobase::records_in_range(
mem_heap_free(heap); mem_heap_free(heap);
func_exit: func_exit:
my_free(key_val_buff2);
prebuilt->trx->op_info = (char*)""; prebuilt->trx->op_info = (char*)"";
......
...@@ -78,13 +78,14 @@ class ha_innobase: public handler ...@@ -78,13 +78,14 @@ class ha_innobase: public handler
INNOBASE_SHARE* share; /*!< information for MySQL INNOBASE_SHARE* share; /*!< information for MySQL
table locking */ table locking */
uchar* upd_buff; /*!< buffer used in updates */ uchar* upd_buf; /*!< buffer used in updates */
uchar* key_val_buff; /*!< buffer used in converting ulint upd_buf_size; /*!< the size of upd_buf in bytes */
uchar srch_key_val1[REC_VERSION_56_MAX_INDEX_COL_LEN + 2];
uchar srch_key_val2[REC_VERSION_56_MAX_INDEX_COL_LEN + 2];
/*!< buffers used in converting
search key values from MySQL format search key values from MySQL format
to Innodb format */ to InnoDB format. "+ 2" for the two
ulong upd_and_key_val_buff_len; bytes where the length is stored */
/* the length of each of the previous
two buffers */
Table_flags int_table_flags; Table_flags int_table_flags;
uint primary_key; uint primary_key;
ulong start_of_scan; /*!< this is set to 1 when we are ulong start_of_scan; /*!< this is set to 1 when we are
......
...@@ -128,7 +128,12 @@ row_sel_convert_mysql_key_to_innobase( ...@@ -128,7 +128,12 @@ row_sel_convert_mysql_key_to_innobase(
in the tuple is already according in the tuple is already according
to index! */ to index! */
byte* buf, /*!< in: buffer to use in field byte* buf, /*!< in: buffer to use in field
conversions */ conversions; NOTE that dtuple->data
may end up pointing inside buf so
do not discard that buffer while
the tuple is being used. See
row_mysql_store_col_in_innobase_format()
in the case of DATA_INT */
ulint buf_len, /*!< in: buffer length */ ulint buf_len, /*!< in: buffer length */
dict_index_t* index, /*!< in: index of the key value */ dict_index_t* index, /*!< in: index of the key value */
const byte* key_ptr, /*!< in: MySQL key value */ const byte* key_ptr, /*!< in: MySQL key value */
......
...@@ -2301,7 +2301,12 @@ row_sel_convert_mysql_key_to_innobase( ...@@ -2301,7 +2301,12 @@ row_sel_convert_mysql_key_to_innobase(
in the tuple is already according in the tuple is already according
to index! */ to index! */
byte* buf, /*!< in: buffer to use in field byte* buf, /*!< in: buffer to use in field
conversions */ conversions; NOTE that dtuple->data
may end up pointing inside buf so
do not discard that buffer while
the tuple is being used. See
row_mysql_store_col_in_innobase_format()
in the case of DATA_INT */
ulint buf_len, /*!< in: buffer length */ ulint buf_len, /*!< in: buffer length */
dict_index_t* index, /*!< in: index of the key value */ dict_index_t* index, /*!< in: index of the key value */
const byte* key_ptr, /*!< in: MySQL key value */ const byte* key_ptr, /*!< in: MySQL key value */
...@@ -2433,6 +2438,7 @@ row_sel_convert_mysql_key_to_innobase( ...@@ -2433,6 +2438,7 @@ row_sel_convert_mysql_key_to_innobase(
/* Storing may use at most data_len bytes of buf */ /* Storing may use at most data_len bytes of buf */
if (UNIV_LIKELY(!is_null)) { if (UNIV_LIKELY(!is_null)) {
ut_a(buf + data_len <= original_buf + buf_len);
row_mysql_store_col_in_innobase_format( row_mysql_store_col_in_innobase_format(
dfield, buf, dfield, buf,
FALSE, /* MySQL key value format col */ FALSE, /* MySQL key value format col */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment