Commit 82cfb40d authored by marko's avatar marko

branches/innodb+: Merge revisions 3544:3575 from branches/zip:

  ------------------------------------------------------------------------
  r3572 | marko | 2008-12-17 11:19:56 +0200 (Wed, 17 Dec 2008) | 3 lines
  Changed paths:
     M /branches/zip/ChangeLog
     M /branches/zip/btr/btr0sea.c
     M /branches/zip/buf/buf0buf.c
     M /branches/zip/buf/buf0lru.c
     M /branches/zip/ha/ha0ha.c
     M /branches/zip/ha/hash0hash.c
     M /branches/zip/include/buf0buf.h
     M /branches/zip/include/ha0ha.h
     M /branches/zip/include/ha0ha.ic
     M /branches/zip/include/hash0hash.h
     M /branches/zip/include/univ.i

  branches/zip: Introduce UNIV_AHI_DEBUG for debugging the adaptive hash
  index without enabling UNIV_DEBUG.
  ------------------------------------------------------------------------
  r3574 | marko | 2008-12-17 12:44:31 +0200 (Wed, 17 Dec 2008) | 2 lines
  Changed paths:
     M /branches/zip/ChangeLog

  branches/zip: ChangeLog: Document recent changes that were not included in
  InnoDB Plugin 1.0.2, except changes to source code comments.
  ------------------------------------------------------------------------
  r3575 | marko | 2008-12-17 14:40:58 +0200 (Wed, 17 Dec 2008) | 12 lines
  Changed paths:
     M /branches/zip/ChangeLog
     M /branches/zip/include/row0sel.h
     M /branches/zip/include/row0upd.h
     M /branches/zip/pars/pars0pars.c
     M /branches/zip/row/row0mysql.c
     M /branches/zip/row/row0sel.c
     M /branches/zip/row/row0upd.c

  branches/zip: Remove update-in-place-in-select from the internal SQL
  interpreter.  It was only used for updating the InnoDB internal data
  dictionary when renaming or dropping tables.  It could have caused
  deadlocks after acquiring latches on insert buffer bitmap pages.
  This and r3544 should fix Issue #135.

  Furthermore, the update-in-place-in-select does not account for
  compression failure.  That was not a problem yet, since the InnoDB SQL
  interpreter has so far assumed ROW_FORMAT=REDUNDANT.

  rb://63 approved by Heikki Tuuri
  ------------------------------------------------------------------------
parent 2d403a4d
2008-12-17 The InnoDB Team
* include/row0upd.h, include/row0sel.h, pars/pars0pars.c,
row/row0upd.c, row/row0sel.c, row/row0mysql.c:
Remove update-in-place select from the internal SQL interpreter.
It was only used for updating the InnoDB internal data dictionary
when renaming or dropping tables. It could have caused deadlocks
when acquiring latches on insert buffer bitmap pages.
2008-12-17 The InnoDB Team
* include/univ.i, include/buf0buf.h, include/hash0hash.h,
include/ha0ha.h, include/ha0ha.ic, ha/ha0ha.c, ha/hash0hash.c,
btr/btr0sea.c, buf/buf0lru.c, buf/buf0buf.c:
Introduce the preprocessor symbol UNIV_AHI_DEBUG for enabling
adaptive hash index debugging independently of UNIV_DEBUG.
2008-12-16 The InnoDB Team
* btr/btr0cur.c:
Do not update the free bits in the insert buffer bitmap when
inserting or deleting from the insert buffer B-tree. Assert that
records in the insert buffer B-tree are never updated.
2008-12-12 The InnoDB Team
* include/fil0fil.h, include/ibuf0ibuf.h, include/ibuf0types.h,
include/ibuf0ibuf.ic, ibuf/ibuf0ibuf.c,
buf/buf0buf.c, fil/fil0fil.c, fsp/fsp0fsp.c:
Clean up the insert buffer subsystem so that only one insert
buffer B-tree exists.
Originally, there were provisions in InnoDB for multiple insert
buffer B-trees, apparently one for each tablespace.
When Heikki Tuuri implemented multiple InnoDB tablespaces in
MySQL/InnoDB 4.1, he made the insert buffer live only in the
system tablespace (space 0) but left the provisions in the code.
2008-12-11 The InnoDB Team
* include/srv0srv.h, srv/srv0srv.c, os/os0proc.c:
Fix the issue that the InnoDB plugin fails if
innodb_buffer_pool_size is defined bigger than 4096M on 64-bit
Windows. This bug should not have affected other 64-bit systems.
2008-12-09 The InnoDB Team
* handler/ha_innodb.cc:
Fix Bug#40386 Not flushing query cache after truncate.
ha_statistics.records cannot be 0 unless the table is empty, set
to 1 instead. The original problem of Bug#29507 has been fixed in
the server.
2008-12-09 The InnoDB Team
* handler/ha_innodb.cc, srv/srv0srv.c, trx/trx0trx.c:
Fix Bug#40760 Getting database deadlocks on simultaneous inserts.
The config param innodb_thread_concurrency is dynamically set and
is read when a thread enters/exits innodb. If the value is
changed between the enter and exit time the behaviour becomes
erratic. The fix is not to use srv_thread_concurrency when
exiting, instead use the flag trx->declared_to_be_inside_innodb.
2008-12-09 The InnoDB Team
* trx/trx0undo.c:
Print 2 spaces between a timestamp and "InnoDB:" as usual.
2008-12-09 The InnoDB Team
* row/row0purge.c:
Allocate mtr_vers from the stack, not with mem_alloc().
2008-12-04 The InnoDB Team
* include/mysql_addons.h, handler/mysql_addons.cc,
handler/ha_innodb.cc, trx/trx0i_s.c, win-plugin/win-plugin.diff:
Remove dependencies to MySQL internals (defining MYSQL_SERVER).
2008-12-02 The InnoDB Team
* page/page0cur.c:
When allocating space for a record from the free list of
previously purged records, zero out the DB_TRX_ID and DB_ROLL_PTR
of the purged record if the new record would not overwrite these
fields. This fixes a harmless content mismatch reported by
page_zip_validate().
2008-12-02 The InnoDB Team
* row/row0merge.c:
......
......@@ -1102,7 +1102,7 @@ next_rec:
block->index = NULL;
cleanup:
#ifdef UNIV_DEBUG
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
if (UNIV_UNLIKELY(block->n_pointers)) {
/* Corruption */
ut_print_timestamp(stderr);
......@@ -1118,9 +1118,9 @@ cleanup:
} else {
rw_lock_x_unlock(&btr_search_latch);
}
#else /* UNIV_DEBUG */
#else /* UNIV_AHI_DEBUG || UNIV_DEBUG */
rw_lock_x_unlock(&btr_search_latch);
#endif /* UNIV_DEBUG */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
mem_free(folds);
}
......
......@@ -658,8 +658,10 @@ buf_block_init(
block->page.in_free_list = FALSE;
block->page.in_LRU_list = FALSE;
block->in_unzip_LRU_list = FALSE;
block->n_pointers = 0;
#endif /* UNIV_DEBUG */
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
block->n_pointers = 0;
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
page_zip_des_init(&block->page.zip);
mutex_create(&block->mutex, SYNC_BUF_BLOCK);
......
......@@ -1551,7 +1551,9 @@ buf_LRU_block_free_non_file_page(
ut_error;
}
ut_ad(block->n_pointers == 0);
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
ut_a(block->n_pointers == 0);
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
ut_ad(!block->page.in_free_list);
ut_ad(!block->page.in_flush_list);
ut_ad(!block->page.in_LRU_list);
......
......@@ -40,9 +40,9 @@ ha_create_func(
table = hash_create(n);
#ifdef UNIV_DEBUG
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
table->adaptive = TRUE;
#endif /* UNIV_DEBUG */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
/* Creating MEM_HEAP_BTR_SEARCH type heaps can potentially fail,
but in practise it never should in this case, hence the asserts. */
......@@ -111,9 +111,9 @@ ha_insert_for_fold_func(
the same fold value already exists, it is
updated to point to the same data, and no new
node is created! */
#ifdef UNIV_DEBUG
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
buf_block_t* block, /* in: buffer block containing the data */
#endif /* UNIV_DEBUG */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
void* data) /* in: data, must not be NULL */
{
hash_cell_t* cell;
......@@ -122,7 +122,9 @@ ha_insert_for_fold_func(
ulint hash;
ut_ad(table && data);
ut_ad(block->frame == page_align(data));
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
ut_a(block->frame == page_align(data));
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
ut_ad(!table->mutexes || mutex_own(hash_get_mutex(table, fold)));
hash = hash_calc_hash(fold, table);
......@@ -133,7 +135,7 @@ ha_insert_for_fold_func(
while (prev_node != NULL) {
if (prev_node->fold == fold) {
#ifdef UNIV_DEBUG
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
if (table->adaptive) {
buf_block_t* prev_block = prev_node->block;
ut_a(prev_block->frame
......@@ -144,7 +146,7 @@ ha_insert_for_fold_func(
}
prev_node->block = block;
#endif /* UNIV_DEBUG */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
prev_node->data = data;
return(TRUE);
......@@ -168,11 +170,11 @@ ha_insert_for_fold_func(
ha_node_set_data(node, block, data);
#ifdef UNIV_DEBUG
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
if (table->adaptive) {
block->n_pointers++;
}
#endif /* UNIV_DEBUG */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
node->fold = fold;
node->next = NULL;
......@@ -205,13 +207,13 @@ ha_delete_hash_node(
hash_table_t* table, /* in: hash table */
ha_node_t* del_node) /* in: node to be deleted */
{
#ifdef UNIV_DEBUG
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
if (table->adaptive) {
ut_a(del_node->block->frame = page_align(del_node->data));
ut_a(del_node->block->n_pointers > 0);
del_node->block->n_pointers--;
}
#endif /* UNIV_DEBUG */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
HASH_DELETE_AND_COMPACT(ha_node_t, next, table, del_node);
}
......@@ -247,20 +249,22 @@ ha_search_and_update_if_found_func(
hash_table_t* table, /* in: hash table */
ulint fold, /* in: folded value of the searched data */
void* data, /* in: pointer to the data */
#ifdef UNIV_DEBUG
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
buf_block_t* new_block,/* in: block containing new_data */
#endif
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
void* new_data)/* in: new pointer to the data */
{
ha_node_t* node;
ut_ad(!table->mutexes || mutex_own(hash_get_mutex(table, fold)));
ut_ad(new_block->frame == page_align(new_data));
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
ut_a(new_block->frame == page_align(new_data));
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
node = ha_search_with_data(table, fold, data);
if (node) {
#ifdef UNIV_DEBUG
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
if (table->adaptive) {
ut_a(node->block->n_pointers > 0);
node->block->n_pointers--;
......@@ -268,7 +272,7 @@ ha_search_and_update_if_found_func(
}
node->block = new_block;
#endif /* UNIV_DEBUG */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
node->data = new_data;
}
}
......
......@@ -89,9 +89,9 @@ hash_create(
array = ut_malloc(sizeof(hash_cell_t) * prime);
#ifdef UNIV_DEBUG
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
table->adaptive = FALSE;
#endif /* UNIV_DEBUG */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
table->array = array;
table->n_cells = prime;
table->n_mutexes = 0;
......
......@@ -1191,11 +1191,11 @@ struct buf_block_struct{
An exception to this is when we init or create a page
in the buffer pool in buf0buf.c. */
#ifdef UNIV_DEBUG
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
ulint n_pointers; /* used in debugging: the number of
pointers in the adaptive hash index
pointing to this frame */
#endif /* UNIV_DEBUG */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
unsigned is_hashed:1; /* TRUE if hash index has already been
built on this page; note that it does
not guarantee that the index is
......
......@@ -36,18 +36,18 @@ ha_search_and_update_if_found_func(
hash_table_t* table, /* in: hash table */
ulint fold, /* in: folded value of the searched data */
void* data, /* in: pointer to the data */
#ifdef UNIV_DEBUG
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
buf_block_t* new_block,/* in: block containing new_data */
#endif
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
void* new_data);/* in: new pointer to the data */
#ifdef UNIV_DEBUG
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
# define ha_search_and_update_if_found(table,fold,data,new_block,new_data) \
ha_search_and_update_if_found_func(table,fold,data,new_block,new_data)
#else
#else /* UNIV_AHI_DEBUG || UNIV_DEBUG */
# define ha_search_and_update_if_found(table,fold,data,new_block,new_data) \
ha_search_and_update_if_found_func(table,fold,data,new_data)
#endif
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
/*****************************************************************
Creates a hash table with >= n array cells. The actual number of cells is
chosen to be a prime number slightly bigger than n. */
......@@ -92,16 +92,16 @@ ha_insert_for_fold_func(
the same fold value already exists, it is
updated to point to the same data, and no new
node is created! */
#ifdef UNIV_DEBUG
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
buf_block_t* block, /* in: buffer block containing the data */
#endif /* UNIV_DEBUG */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
void* data); /* in: data, must not be NULL */
#ifdef UNIV_DEBUG
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
# define ha_insert_for_fold(t,f,b,d) ha_insert_for_fold_func(t,f,b,d)
#else
#else /* UNIV_AHI_DEBUG || UNIV_DEBUG */
# define ha_insert_for_fold(t,f,b,d) ha_insert_for_fold_func(t,f,d)
#endif
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
/*****************************************************************
Deletes an entry from a hash table. */
......@@ -158,9 +158,9 @@ ha_print_info(
typedef struct ha_node_struct ha_node_t;
struct ha_node_struct {
ha_node_t* next; /* next chain node or NULL if none */
#ifdef UNIV_DEBUG
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
buf_block_t* block; /* buffer block containing the data, or NULL */
#endif /* UNIV_DEBUG */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
void* data; /* pointer to the data */
ulint fold; /* fold value for the data */
};
......
......@@ -37,22 +37,22 @@ void
ha_node_set_data_func(
/*==================*/
ha_node_t* node, /* in: hash chain node */
#ifdef UNIV_DEBUG
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
buf_block_t* block, /* in: buffer block containing the data */
#endif /* UNIV_DEBUG */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
void* data) /* in: pointer to the data */
{
#ifdef UNIV_DEBUG
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
node->block = block;
#endif /* UNIV_DEBUG */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
node->data = data;
}
#ifdef UNIV_DEBUG
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
# define ha_node_set_data(n,b,d) ha_node_set_data_func(n,b,d)
#else /* UNIV_DEBUG */
#else /* UNIV_AHI_DEBUG || UNIV_DEBUG */
# define ha_node_set_data(n,b,d) ha_node_set_data_func(n,d)
#endif /* UNIV_DEBUG */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
/**********************************************************************
Gets the next node in a hash chain. */
......
......@@ -363,10 +363,10 @@ struct hash_cell_struct{
/* The hash table structure */
struct hash_table_struct {
#ifdef UNIV_DEBUG
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
ibool adaptive;/* TRUE if this is the hash table of the
adaptive hash index */
#endif /* UNIV_DEBUG */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
ulint n_cells;/* number of cells in the hash table */
hash_cell_t* array; /* pointer to cell array */
ulint n_mutexes;/* if mutexes != NULL, then the number of
......
......@@ -280,13 +280,6 @@ struct sel_node_struct{
ibool set_x_locks; /* TRUE if the cursor is for update or
delete, which means that a row x-lock
should be placed on the cursor row */
ibool select_will_do_update;
/* TRUE if the select is for a searched
update which can be performed in-place:
in this case the select will take care
of the update */
ulint latch_mode; /* BTR_SEARCH_LEAF, or BTR_MODIFY_LEAF
if select_will_do_update is TRUE */
ulint row_lock_mode; /* LOCK_X or LOCK_S */
ulint n_tables; /* number of tables */
ulint fetch_table; /* number of the next table to access
......
......@@ -292,16 +292,6 @@ row_upd_step(
/* out: query thread to run next or NULL */
que_thr_t* thr); /* in: query thread */
/*************************************************************************
Performs an in-place update for the current clustered index record in
select. */
UNIV_INTERN
void
row_upd_in_place_in_select(
/*=======================*/
sel_node_t* sel_node, /* in: select node */
que_thr_t* thr, /* in: query thread */
mtr_t* mtr); /* in: mtr */
/*************************************************************************
Parses the log data of system field values. */
UNIV_INTERN
byte*
......@@ -374,11 +364,6 @@ struct upd_node_struct{
ibool searched_update;
/* TRUE if searched update, FALSE if
positioned */
ibool select_will_do_update;
/* TRUE if a searched update where ordering
fields will not be updated, and the size of
the fields will not change: in this case the
select node will take care of the update */
ibool in_mysql_interface;
/* TRUE if the update node was created
for the MySQL interface */
......
......@@ -130,6 +130,8 @@ command. Not tested on Windows. */
Valgrind instrumentation */
#define UNIV_DEBUG_PRINT /* Enable the compilation of
some debug print functions */
#define UNIV_AHI_DEBUG /* Enable adaptive hash index
debugging without UNIV_DEBUG */
#define UNIV_BUF_DEBUG /* Enable buffer pool
debugging without UNIV_DEBUG */
#define UNIV_DEBUG /* Enable ut_ad() assertions
......
......@@ -1032,19 +1032,6 @@ pars_update_statement(
node->pcur = &(plan->pcur);
}
if (!node->is_delete && node->searched_update
&& (node->cmpl_info & UPD_NODE_NO_SIZE_CHANGE)
&& (node->cmpl_info & UPD_NODE_NO_ORD_CHANGE)) {
/* The select node can perform the update in-place */
ut_a(plan->asc);
node->select_will_do_update = TRUE;
sel_node->select_will_do_update = TRUE;
sel_node->latch_mode = BTR_MODIFY_LEAF;
}
return(node);
}
......
......@@ -1200,7 +1200,6 @@ row_create_update_node_for_mysql(
node->in_mysql_interface = TRUE;
node->is_delete = FALSE;
node->searched_update = FALSE;
node->select_will_do_update = FALSE;
node->select = NULL;
node->pcur = btr_pcur_create_for_mysql();
node->table = table;
......
......@@ -230,9 +230,6 @@ sel_node_create(
node->common.type = QUE_NODE_SELECT;
node->state = SEL_NODE_OPEN;
node->select_will_do_update = FALSE;
node->latch_mode = BTR_SEARCH_LEAF;
node->plans = NULL;
return(node);
......@@ -793,7 +790,7 @@ row_sel_get_clust_rec(
index = dict_table_get_first_index(plan->table);
btr_pcur_open_with_no_init(index, plan->clust_ref, PAGE_CUR_LE,
node->latch_mode, &(plan->clust_pcur),
BTR_SEARCH_LEAF, &plan->clust_pcur,
0, mtr);
clust_rec = btr_pcur_get_rec(&(plan->clust_pcur));
......@@ -962,7 +959,6 @@ static
void
row_sel_open_pcur(
/*==============*/
sel_node_t* node, /* in: select node */
plan_t* plan, /* in: table plan */
ibool search_latch_locked,
/* in: TRUE if the thread currently
......@@ -1015,13 +1011,13 @@ row_sel_open_pcur(
/* Open pcur to the index */
btr_pcur_open_with_no_init(index, plan->tuple, plan->mode,
node->latch_mode, &(plan->pcur),
BTR_SEARCH_LEAF, &plan->pcur,
has_search_latch, mtr);
} else {
/* Open the cursor to the start or the end of the index
(FALSE: no init) */
btr_pcur_open_at_index_side(plan->asc, index, node->latch_mode,
btr_pcur_open_at_index_side(plan->asc, index, BTR_SEARCH_LEAF,
&(plan->pcur), FALSE, mtr);
}
......@@ -1043,7 +1039,6 @@ row_sel_restore_pcur_pos(
function (moved to the previous, in the case
of a descending cursor) without processing
again the current cursor record */
sel_node_t* node, /* in: select node */
plan_t* plan, /* in: table plan */
mtr_t* mtr) /* in: mtr */
{
......@@ -1054,7 +1049,7 @@ row_sel_restore_pcur_pos(
relative_position = btr_pcur_get_rel_pos(&(plan->pcur));
equal_position = btr_pcur_restore_position(node->latch_mode,
equal_position = btr_pcur_restore_position(BTR_SEARCH_LEAF,
&(plan->pcur), mtr);
/* If the cursor is traveling upwards, and relative_position is
......@@ -1173,7 +1168,7 @@ row_sel_try_search_shortcut(
ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
#endif /* UNIV_SYNC_DEBUG */
row_sel_open_pcur(node, plan, TRUE, mtr);
row_sel_open_pcur(plan, TRUE, mtr);
rec = btr_pcur_get_rec(&(plan->pcur));
......@@ -1274,13 +1269,6 @@ row_sel(
ulint cost_counter = 0;
ibool cursor_just_opened;
ibool must_go_to_next;
ibool leaf_contains_updates = FALSE;
/* TRUE if select_will_do_update is
TRUE and the current clustered index
leaf page has been updated during
the current mtr: mtr must be committed
at the same time as the leaf x-latch
is released */
ibool mtr_has_extra_clust_latch = FALSE;
/* TRUE if the search was made using
a non-clustered index, and we had to
......@@ -1319,7 +1307,6 @@ table_loop:
node->fetch_table changes, and after adding a row to aggregate totals
and, of course, when this function is called. */
ut_ad(leaf_contains_updates == FALSE);
ut_ad(mtr_has_extra_clust_latch == FALSE);
plan = sel_node_get_nth_plan(node, node->fetch_table);
......@@ -1394,7 +1381,7 @@ table_loop:
/* Evaluate the expressions to build the search tuple and
open the cursor */
row_sel_open_pcur(node, plan, search_latch_locked, &mtr);
row_sel_open_pcur(plan, search_latch_locked, &mtr);
cursor_just_opened = TRUE;
......@@ -1403,7 +1390,7 @@ table_loop:
} else {
/* Restore pcur position to the index */
must_go_to_next = row_sel_restore_pcur_pos(node, plan, &mtr);
must_go_to_next = row_sel_restore_pcur_pos(plan, &mtr);
cursor_just_opened = FALSE;
......@@ -1744,28 +1731,6 @@ skip_lock:
ut_ad(plan->pcur.latch_mode == node->latch_mode);
if (node->select_will_do_update) {
/* This is a searched update and we can do the update in-place,
saving CPU time */
row_upd_in_place_in_select(node, thr, &mtr);
leaf_contains_updates = TRUE;
/* When the database is in the online backup mode, the number
of log records for a single mtr should be small: increment the
cost counter to ensure it */
cost_counter += 1 + (SEL_COST_LIMIT / 8);
if (plan->unique_search) {
goto table_exhausted;
}
goto next_rec;
}
if ((plan->n_rows_fetched <= SEL_PREFETCH_LIMIT)
|| plan->unique_search || plan->no_prefetch
|| plan->table->big_rows) {
......@@ -1799,19 +1764,6 @@ next_rec:
goto commit_mtr_for_a_while;
}
if (leaf_contains_updates
&& btr_pcur_is_after_last_on_page(&plan->pcur)) {
/* We must commit &mtr if we are moving to a different page,
because we have done updates to the x-latched leaf page, and
the latch would be released in btr_pcur_move_to_next, without
&mtr getting committed there */
ut_ad(node->asc);
goto commit_mtr_for_a_while;
}
if (node->asc) {
moved = btr_pcur_move_to_next(&(plan->pcur), &mtr);
} else {
......@@ -1848,7 +1800,6 @@ next_table:
mtr_commit(&mtr);
leaf_contains_updates = FALSE;
mtr_has_extra_clust_latch = FALSE;
next_table_no_mtr:
......@@ -1889,7 +1840,6 @@ table_exhausted:
mtr_commit(&mtr);
leaf_contains_updates = FALSE;
mtr_has_extra_clust_latch = FALSE;
if (plan->n_rows_prefetched > 0) {
......@@ -1958,7 +1908,6 @@ commit_mtr_for_a_while:
mtr_commit(&mtr);
leaf_contains_updates = FALSE;
mtr_has_extra_clust_latch = FALSE;
#ifdef UNIV_SYNC_DEBUG
......
......@@ -275,7 +275,6 @@ upd_node_create(
node->common.type = QUE_NODE_UPDATE;
node->state = UPD_NODE_UPDATE_CLUSTERED;
node->select_will_do_update = FALSE;
node->in_mysql_interface = FALSE;
node->row = NULL;
......@@ -2186,66 +2185,3 @@ error_handling:
return(thr);
}
/*************************************************************************
Performs an in-place update for the current clustered index record in
select. */
UNIV_INTERN
void
row_upd_in_place_in_select(
/*=======================*/
sel_node_t* sel_node, /* in: select node */
que_thr_t* thr, /* in: query thread */
mtr_t* mtr) /* in: mtr */
{
upd_node_t* node;
btr_pcur_t* pcur;
btr_cur_t* btr_cur;
ulint err;
mem_heap_t* heap = NULL;
ulint offsets_[REC_OFFS_NORMAL_SIZE];
rec_offs_init(offsets_);
ut_ad(sel_node->select_will_do_update);
ut_ad(sel_node->latch_mode == BTR_MODIFY_LEAF);
ut_ad(sel_node->asc);
node = que_node_get_parent(sel_node);
ut_ad(que_node_get_type(node) == QUE_NODE_UPDATE);
pcur = node->pcur;
btr_cur = btr_pcur_get_btr_cur(pcur);
/* Copy the necessary columns from clust_rec and calculate the new
values to set */
row_upd_copy_columns(btr_pcur_get_rec(pcur),
rec_get_offsets(btr_pcur_get_rec(pcur),
btr_cur->index, offsets_,
ULINT_UNDEFINED, &heap),
UT_LIST_GET_FIRST(node->columns));
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
}
row_upd_eval_new_vals(node->update);
ut_ad(!rec_get_deleted_flag(
btr_pcur_get_rec(pcur),
dict_table_is_comp(btr_cur->index->table)));
ut_ad(node->cmpl_info & UPD_NODE_NO_SIZE_CHANGE);
ut_ad(node->cmpl_info & UPD_NODE_NO_ORD_CHANGE);
ut_ad(node->select_will_do_update);
err = btr_cur_update_in_place(BTR_NO_LOCKING_FLAG, btr_cur,
node->update, node->cmpl_info,
thr, mtr);
/* TODO: the above can fail with DB_ZIP_OVERFLOW if page_zip != NULL.
However, this function row_upd_in_place_in_select() is only invoked
when executing UPDATE statements of the built-in InnoDB SQL parser.
The built-in SQL is only used for InnoDB system tables, which
always are in the old, uncompressed format (ROW_FORMAT=REDUNDANT,
comp == FALSE, page_zip == NULL). */
ut_ad(err == DB_SUCCESS);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment