Commit 2ae364a5 authored by marko's avatar marko

branches/innodb+: Merge revisions 3931:4006 from branches/zip:

  ------------------------------------------------------------------------
  r3938 | marko | 2009-01-15 10:28:23 +0200 (Thu, 15 Jan 2009) | 3 lines

  branches/zip: buf_LRU_invalidate_tablespace(), buf_LRU_free_block():
  Add comments and assertions that buf_LRU_block_remove_hashed_page()
  will release block_mutex when it returns BUF_BLOCK_ZIP_FREE.
  ------------------------------------------------------------------------
  r3939 | marko | 2009-01-15 10:37:51 +0200 (Thu, 15 Jan 2009) | 7 lines

  branches/zip: buf0lru.c: Improve debug assertions.

  buf_LRU_block_free_non_file_page(): ut_ad(block) before dereferencing block.

  buf_LRU_block_remove_hashed_page(): Forbid buf_pool_mutex_exit() while
  calling buf_buddy_free().  Callers of buf_LRU_block_remove_hashed_page()
  assume that the buffer pool mutex will not be released and reacquired.
  ------------------------------------------------------------------------
  r3944 | vasil | 2009-01-15 21:15:00 +0200 (Thu, 15 Jan 2009) | 4 lines

  branches/zip:

  Add ChangeLog entries for the bug fixes in r3911 and r3930.
  ------------------------------------------------------------------------
  r3958 | marko | 2009-01-16 14:53:40 +0200 (Fri, 16 Jan 2009) | 8 lines

  branches/zip: Add assertions that the kernel_mutex is being held
  while accessing table->locks or un_member.tab_lock.locks.
  This is related to Issue #158.  According to static analysis,
  the added debug assertions should always hold.

  lock_table_has_to_wait_in_queue(), lock_queue_iterator_reset(),
  lock_queue_iterator_get_prev(), add_trx_relevant_locks_to_cache(),
  fetch_data_into_cache(): Add ut_ad(mutex_own(&kernel_mutex)).
  ------------------------------------------------------------------------
  r4006 | marko | 2009-01-20 16:29:22 +0200 (Tue, 20 Jan 2009) | 33 lines

  branches/zip: Merge revisions 3930:4005 from branches/5.1:

    ------------------------------------------------------------------------
    r4004 | marko | 2009-01-20 16:19:00 +0200 (Tue, 20 Jan 2009) | 12 lines

    branches/5.1: Merge r4003 from branches/5.0:

    rec_set_nth_field(): When the field already is SQL null,
    do nothing when it is being changed to SQL null. (Bug #41571)

    Normally, MySQL does not pass "do-nothing" updates to the storage engine.
    When it does and a column of an InnoDB table that is in ROW_FORMAT=COMPACT
    is being updated from NULL to NULL, the InnoDB buffer pool will be corrupted
    without this fix.

    rb://81 approved by Heikki Tuuri
    ------------------------------------------------------------------------
    r4005 | marko | 2009-01-20 16:22:36 +0200 (Tue, 20 Jan 2009) | 8 lines

    branches/5.1: lock_is_table_exclusive(): Acquire kernel_mutex before
    accessing table->locks and release kernel_mutex before returning from
    the function.  This fixes a portential race condition in the
    "commit every 10,000 rows" in ALTER TABLE, CREATE INDEX, DROP INDEX,
    and OPTIMIZE TABLE. (Bug #42152)

    rb://80 approved by Heikki Tuuri
    ------------------------------------------------------------------------
parent 86b85252
2009-01-14 The InnoDB Team
* include/trx0roll.h, trx/trx0roll.c, trx/trx0trx.c:
Fix Bug#38187 Error 153 when creating savepoints
2009-01-14 The InnoDB Team
* dict/dict0load.c:
Fix Bug#42075 dict_load_indexes failure in dict_load_table will
corrupt the dictionary cache
2009-01-13 The InnoDB Team 2009-01-13 The InnoDB Team
* include/hash0hash.h, include/dict0dict.ic, dict/dict0dict.c, * include/hash0hash.h, include/dict0dict.ic, dict/dict0dict.c,
......
...@@ -377,6 +377,12 @@ scan_again: ...@@ -377,6 +377,12 @@ scan_again:
buf_LRU_block_free_hashed_page((buf_block_t*) buf_LRU_block_free_hashed_page((buf_block_t*)
bpage); bpage);
} else { } else {
/* The block_mutex should have been
released by buf_LRU_block_remove_hashed_page()
when it returns BUF_BLOCK_ZIP_FREE. */
ut_ad(block_mutex == &buf_pool_zip_mutex);
ut_ad(!mutex_own(block_mutex));
/* The compressed block descriptor /* The compressed block descriptor
(bpage) has been deallocated and (bpage) has been deallocated and
block_mutex released. Also, block_mutex released. Also,
...@@ -1523,6 +1529,10 @@ alloc: ...@@ -1523,6 +1529,10 @@ alloc:
buf_LRU_block_free_hashed_page((buf_block_t*) bpage); buf_LRU_block_free_hashed_page((buf_block_t*) bpage);
} else { } else {
/* The block_mutex should have been released by
buf_LRU_block_remove_hashed_page() when it returns
BUF_BLOCK_ZIP_FREE. */
ut_ad(block_mutex == &buf_pool_zip_mutex);
mutex_enter(block_mutex); mutex_enter(block_mutex);
} }
...@@ -1539,9 +1549,9 @@ buf_LRU_block_free_non_file_page( ...@@ -1539,9 +1549,9 @@ buf_LRU_block_free_non_file_page(
{ {
void* data; void* data;
ut_ad(block);
ut_ad(buf_pool_mutex_own()); ut_ad(buf_pool_mutex_own());
ut_ad(mutex_own(&block->mutex)); ut_ad(mutex_own(&block->mutex));
ut_ad(block);
switch (buf_block_get_state(block)) { switch (buf_block_get_state(block)) {
case BUF_BLOCK_MEMORY: case BUF_BLOCK_MEMORY:
...@@ -1761,7 +1771,9 @@ buf_LRU_block_remove_hashed_page( ...@@ -1761,7 +1771,9 @@ buf_LRU_block_remove_hashed_page(
bpage->zip.data = NULL; bpage->zip.data = NULL;
mutex_exit(&((buf_block_t*) bpage)->mutex); mutex_exit(&((buf_block_t*) bpage)->mutex);
buf_pool_mutex_exit_forbid();
buf_buddy_free(data, page_zip_get_size(&bpage->zip)); buf_buddy_free(data, page_zip_get_size(&bpage->zip));
buf_pool_mutex_exit_allow();
mutex_enter(&((buf_block_t*) bpage)->mutex); mutex_enter(&((buf_block_t*) bpage)->mutex);
page_zip_set_size(&bpage->zip, 0); page_zip_set_size(&bpage->zip, 0);
} }
......
...@@ -506,8 +506,9 @@ rec_offs_n_extern( ...@@ -506,8 +506,9 @@ rec_offs_n_extern(
/*************************************************************** /***************************************************************
This is used to modify the value of an already existing field in a record. This is used to modify the value of an already existing field in a record.
The previous value must have exactly the same size as the new value. If len The previous value must have exactly the same size as the new value. If len
is UNIV_SQL_NULL then the field is treated as an SQL null for old-style is UNIV_SQL_NULL then the field is treated as an SQL null.
records. For new-style records, len must not be UNIV_SQL_NULL. */ For records in ROW_FORMAT=COMPACT (new-style records), len must not be
UNIV_SQL_NULL unless the field already is SQL null. */
UNIV_INLINE UNIV_INLINE
void void
rec_set_nth_field( rec_set_nth_field(
...@@ -516,11 +517,7 @@ rec_set_nth_field( ...@@ -516,11 +517,7 @@ rec_set_nth_field(
const ulint* offsets,/* in: array returned by rec_get_offsets() */ const ulint* offsets,/* in: array returned by rec_get_offsets() */
ulint n, /* in: index number of the field */ ulint n, /* in: index number of the field */
const void* data, /* in: pointer to the data if not SQL null */ const void* data, /* in: pointer to the data if not SQL null */
ulint len); /* in: length of the data or UNIV_SQL_NULL. ulint len); /* in: length of the data or UNIV_SQL_NULL */
If not SQL null, must have the same
length as the previous value.
If SQL null, previous value must be
SQL null. */
/************************************************************** /**************************************************************
The following function returns the data size of an old-style physical The following function returns the data size of an old-style physical
record, that is the sum of field lengths. SQL null fields record, that is the sum of field lengths. SQL null fields
......
...@@ -1326,8 +1326,9 @@ rec_get_nth_field_size( ...@@ -1326,8 +1326,9 @@ rec_get_nth_field_size(
/*************************************************************** /***************************************************************
This is used to modify the value of an already existing field in a record. This is used to modify the value of an already existing field in a record.
The previous value must have exactly the same size as the new value. If len The previous value must have exactly the same size as the new value. If len
is UNIV_SQL_NULL then the field is treated as an SQL null for old-style is UNIV_SQL_NULL then the field is treated as an SQL null.
records. For new-style records, len must not be UNIV_SQL_NULL. */ For records in ROW_FORMAT=COMPACT (new-style records), len must not be
UNIV_SQL_NULL unless the field already is SQL null. */
UNIV_INLINE UNIV_INLINE
void void
rec_set_nth_field( rec_set_nth_field(
...@@ -1337,11 +1338,7 @@ rec_set_nth_field( ...@@ -1337,11 +1338,7 @@ rec_set_nth_field(
ulint n, /* in: index number of the field */ ulint n, /* in: index number of the field */
const void* data, /* in: pointer to the data const void* data, /* in: pointer to the data
if not SQL null */ if not SQL null */
ulint len) /* in: length of the data or UNIV_SQL_NULL. ulint len) /* in: length of the data or UNIV_SQL_NULL */
If not SQL null, must have the same
length as the previous value.
If SQL null, previous value must be
SQL null. */
{ {
byte* data2; byte* data2;
ulint len2; ulint len2;
...@@ -1349,9 +1346,11 @@ rec_set_nth_field( ...@@ -1349,9 +1346,11 @@ rec_set_nth_field(
ut_ad(rec); ut_ad(rec);
ut_ad(rec_offs_validate(rec, NULL, offsets)); ut_ad(rec_offs_validate(rec, NULL, offsets));
if (len == UNIV_SQL_NULL) { if (UNIV_UNLIKELY(len == UNIV_SQL_NULL)) {
ut_ad(!rec_offs_comp(offsets)); if (!rec_offs_nth_sql_null(offsets, n)) {
rec_set_nth_field_sql_null(rec, n); ut_a(!rec_offs_comp(offsets));
rec_set_nth_field_sql_null(rec, n);
}
return; return;
} }
......
...@@ -15,6 +15,9 @@ Created July 16, 2007 Vasil Dimov ...@@ -15,6 +15,9 @@ Created July 16, 2007 Vasil Dimov
#include "lock0priv.h" #include "lock0priv.h"
#include "ut0dbg.h" #include "ut0dbg.h"
#include "ut0lst.h" #include "ut0lst.h"
#ifdef UNIV_DEBUG
# include "srv0srv.h" /* kernel_mutex */
#endif /* UNIV_DEBUG */
/*********************************************************************** /***********************************************************************
Initialize lock queue iterator so that it starts to iterate from Initialize lock queue iterator so that it starts to iterate from
...@@ -34,6 +37,8 @@ lock_queue_iterator_reset( ...@@ -34,6 +37,8 @@ lock_queue_iterator_reset(
ulint bit_no) /* in: record number in the ulint bit_no) /* in: record number in the
heap */ heap */
{ {
ut_ad(mutex_own(&kernel_mutex));
iter->current_lock = lock; iter->current_lock = lock;
if (bit_no != ULINT_UNDEFINED) { if (bit_no != ULINT_UNDEFINED) {
...@@ -68,6 +73,8 @@ lock_queue_iterator_get_prev( ...@@ -68,6 +73,8 @@ lock_queue_iterator_get_prev(
{ {
const lock_t* prev_lock; const lock_t* prev_lock;
ut_ad(mutex_own(&kernel_mutex));
switch (lock_get_type_low(iter->current_lock)) { switch (lock_get_type_low(iter->current_lock)) {
case LOCK_REC: case LOCK_REC:
prev_lock = lock_rec_get_prev( prev_lock = lock_rec_get_prev(
......
...@@ -699,7 +699,10 @@ lock_is_table_exclusive( ...@@ -699,7 +699,10 @@ lock_is_table_exclusive(
const lock_t* lock; const lock_t* lock;
ibool ok = FALSE; ibool ok = FALSE;
ut_ad(table && trx); ut_ad(table);
ut_ad(trx);
lock_mutex_enter_kernel();
for (lock = UT_LIST_GET_FIRST(table->locks); for (lock = UT_LIST_GET_FIRST(table->locks);
lock; lock;
...@@ -707,7 +710,7 @@ lock_is_table_exclusive( ...@@ -707,7 +710,7 @@ lock_is_table_exclusive(
if (lock->trx != trx) { if (lock->trx != trx) {
/* A lock on the table is held /* A lock on the table is held
by some other transaction. */ by some other transaction. */
return(FALSE); goto not_ok;
} }
if (!(lock_get_type_low(lock) & LOCK_TABLE)) { if (!(lock_get_type_low(lock) & LOCK_TABLE)) {
...@@ -724,11 +727,16 @@ lock_is_table_exclusive( ...@@ -724,11 +727,16 @@ lock_is_table_exclusive(
auto_increment lock. */ auto_increment lock. */
break; break;
default: default:
not_ok:
/* Other table locks than LOCK_IX are not allowed. */ /* Other table locks than LOCK_IX are not allowed. */
return(FALSE); ok = FALSE;
goto func_exit;
} }
} }
func_exit:
lock_mutex_exit_kernel();
return(ok); return(ok);
} }
...@@ -3834,6 +3842,7 @@ lock_table_has_to_wait_in_queue( ...@@ -3834,6 +3842,7 @@ lock_table_has_to_wait_in_queue(
dict_table_t* table; dict_table_t* table;
lock_t* lock; lock_t* lock;
ut_ad(mutex_own(&kernel_mutex));
ut_ad(lock_get_wait(wait_lock)); ut_ad(lock_get_wait(wait_lock));
table = wait_lock->un_member.tab_lock.table; table = wait_lock->un_member.tab_lock.table;
......
...@@ -954,6 +954,8 @@ add_trx_relevant_locks_to_cache( ...@@ -954,6 +954,8 @@ add_trx_relevant_locks_to_cache(
requested lock row, or NULL or requested lock row, or NULL or
undefined */ undefined */
{ {
ut_ad(mutex_own(&kernel_mutex));
/* If transaction is waiting we add the wait lock and all locks /* If transaction is waiting we add the wait lock and all locks
from another transactions that are blocking the wait lock. */ from another transactions that are blocking the wait lock. */
if (trx->que_state == TRX_QUE_LOCK_WAIT) { if (trx->que_state == TRX_QUE_LOCK_WAIT) {
...@@ -1095,6 +1097,8 @@ fetch_data_into_cache( ...@@ -1095,6 +1097,8 @@ fetch_data_into_cache(
i_s_trx_row_t* trx_row; i_s_trx_row_t* trx_row;
i_s_locks_row_t* requested_lock_row; i_s_locks_row_t* requested_lock_row;
ut_ad(mutex_own(&kernel_mutex));
trx_i_s_cache_clear(cache); trx_i_s_cache_clear(cache);
/* We iterate over the list of all transactions and add each one /* We iterate over the list of all transactions and add each one
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment