Commit 898dcf93 authored by Marko Mäkelä's avatar Marko Mäkelä

Cleanup the lock creation

LOCK_MAX_N_STEPS_IN_DEADLOCK_CHECK, LOCK_MAX_DEPTH_IN_DEADLOCK_CHECK,
LOCK_RELEASE_INTERVAL: Replace with the bare use of the constants.

lock_rec_create_low(): Remove LOCK_PAGE_BITMAP_MARGIN altogether.
We already have REDZONE_SIZE as a 'safety margin' in AddressSanitizer
builds, to catch any out-of-bounds access.

lock_prdt_add_to_queue(): Avoid a useless search when enqueueing
a waiting lock request.

lock_prdt_lock(): Reduce the size of the trx->mutex critical section.
parent 469da6c3
......@@ -120,24 +120,6 @@ operator<<(std::ostream& out, const ib_lock_t& lock)
extern ibool lock_print_waits;
#endif /* UNIV_DEBUG */
/** Restricts the length of search we will do in the waits-for
graph of transactions */
static const ulint LOCK_MAX_N_STEPS_IN_DEADLOCK_CHECK = 1000000;
/** Restricts the search depth we will do in the waits-for graph of
transactions */
static const ulint LOCK_MAX_DEPTH_IN_DEADLOCK_CHECK = 200;
/** When releasing transaction locks, this specifies how often we release
the lock mutex for a moment to give also others access to it */
static const ulint LOCK_RELEASE_INTERVAL = 1000;
/* Safety margin when creating a new record lock: this many extra records
can be inserted to the page without need to create a lock with a bigger
bitmap */
static const ulint LOCK_PAGE_BITMAP_MARGIN = 64;
/* An explicit record lock affects both the record and the gap before it.
An implicit x-lock does not affect the gap, it only locks the index
record from read or update.
......
......@@ -120,10 +120,8 @@ class DeadlockChecker {
}
/** Check if the search is too deep. */
bool is_too_deep() const
{
return(m_n_elems > LOCK_MAX_DEPTH_IN_DEADLOCK_CHECK
|| m_cost > LOCK_MAX_N_STEPS_IN_DEADLOCK_CHECK);
bool is_too_deep() const {
return m_n_elems > 200 || m_cost > 1000000;
}
/** Save current state.
......@@ -1177,7 +1175,6 @@ lock_rec_create_low(
bool holds_trx_mutex)
{
lock_t* lock;
ulint n_bits;
ulint n_bytes;
lock_sys.mutex_assert_locked();
......@@ -1202,9 +1199,7 @@ lock_rec_create_low(
}
if (UNIV_LIKELY(!(type_mode & (LOCK_PREDICATE | LOCK_PRDT_PAGE)))) {
/* Make lock bitmap bigger by a safety margin */
n_bits = page_dir_get_n_heap(page) + LOCK_PAGE_BITMAP_MARGIN;
n_bytes = 1 + n_bits / 8;
n_bytes = (page_dir_get_n_heap(page) + 7) / 8;
} else {
ut_ad(heap_no == PRDT_HEAPNO);
......@@ -1554,11 +1549,11 @@ lock_rec_add_to_queue(
if (lock_get_wait(lock)
&& lock_rec_get_nth_bit(lock, heap_no)) {
break;
goto create;
}
}
if (lock == NULL && !(type_mode & LOCK_WAIT)) {
if (first_lock && !(type_mode & LOCK_WAIT)) {
/* Look for a similar record lock on the same page:
if one is found and there are no waiting lock requests,
......@@ -1575,6 +1570,7 @@ lock_rec_add_to_queue(
}
}
create:
lock_rec_create(
#ifdef WITH_WSREP
NULL, NULL,
......@@ -3850,7 +3846,7 @@ void lock_release(trx_t* trx)
lock_table_dequeue(lock);
}
if (count == LOCK_RELEASE_INTERVAL) {
if (count == 1000) {
/* Release the mutex for a while, so that we
do not monopolize it */
......
......@@ -437,8 +437,10 @@ lock_prdt_add_to_queue(
/*!< in: TRUE if caller owns the
transaction mutex */
{
const page_id_t id{block->page.id()};
lock_sys.mutex_assert_locked();
ut_ad(!dict_index_is_clust(index) && !dict_index_is_online_ddl(index));
ut_ad(index->is_spatial());
ut_ad(!dict_index_is_online_ddl(index));
ut_ad(type_mode & (LOCK_PREDICATE | LOCK_PRDT_PAGE));
#ifdef UNIV_DEBUG
......@@ -453,42 +455,31 @@ lock_prdt_add_to_queue(
type_mode |= LOCK_REC;
/* Look for a waiting lock request on the same record or on a gap */
lock_t* lock;
for (lock = lock_sys.get_first(*lock_hash_get(type_mode),
block->page.id());
lock != NULL;
lock = lock_rec_get_next_on_page(lock)) {
/* Try to extend a similar non-waiting lock on the same page */
if (type_mode & LOCK_WAIT) {
goto create;
}
for (lock_t* lock = lock_sys.get_first(*lock_hash_get(type_mode), id);
lock; lock = lock_rec_get_next_on_page(lock)) {
if (lock_get_wait(lock)
&& lock_rec_get_nth_bit(lock, PRDT_HEAPNO)
&& lock->type_mode & (LOCK_PREDICATE | LOCK_PRDT_PAGE)) {
break;
goto create;
}
}
if (lock == NULL && !(type_mode & LOCK_WAIT)) {
/* Look for a similar record lock on the same page:
if one is found and there are no waiting lock requests,
we can just set the bit */
lock = lock_prdt_find_on_page(type_mode, block, prdt, trx);
if (lock != NULL) {
if (lock->type_mode & LOCK_PREDICATE) {
lock_prdt_enlarge_prdt(lock, prdt);
}
return(lock);
if (lock_t* lock = lock_prdt_find_on_page(type_mode, block,
prdt, trx)) {
if (lock->type_mode & LOCK_PREDICATE) {
lock_prdt_enlarge_prdt(lock, prdt);
}
return lock;
}
lock = lock_rec_create(
create:
lock_t* lock = lock_rec_create(
#ifdef WITH_WSREP
NULL, NULL, /* FIXME: replicate SPATIAL INDEX locks */
#endif
......@@ -640,7 +631,7 @@ lock_prdt_update_parent(
lock_prdt_add_to_queue(lock->type_mode,
left_block, lock->index,
lock->trx, lock_prdt,
FALSE);
false);
}
if (!lock_prdt_consistent(lock_prdt, right_prdt, op)
......@@ -648,7 +639,7 @@ lock_prdt_update_parent(
lock_prdt, lock->trx)) {
lock_prdt_add_to_queue(lock->type_mode, right_block,
lock->index, lock->trx,
lock_prdt, FALSE);
lock_prdt, false);
}
}
......@@ -673,16 +664,13 @@ lock_prdt_update_split_low(
for (lock = lock_sys.get_first(*lock_hash_get(type_mode), page_id);
lock;
lock = lock_rec_get_next_on_page(lock)) {
trx_t* trx = lock->trx;
/* First dealing with Page Lock */
if (lock->type_mode & LOCK_PRDT_PAGE) {
/* Duplicate the lock to new page */
trx->mutex.wr_lock();
lock_prdt_add_to_queue(lock->type_mode,
new_block,
lock->index,
trx, NULL, TRUE);
trx->mutex.wr_unlock();
lock->trx, nullptr, false);
continue;
}
......@@ -701,11 +689,9 @@ lock_prdt_update_split_low(
if (!lock_prdt_consistent(lock_prdt, new_prdt, op)) {
/* Move the lock to new page */
trx->mutex.wr_lock();
lock_prdt_add_to_queue(lock->type_mode, new_block,
lock->index, trx, lock_prdt,
TRUE);
trx->mutex.wr_unlock();
lock->index, lock->trx,
lock_prdt, false);
}
}
}
......@@ -808,8 +794,6 @@ lock_prdt_lock(
status = LOCK_REC_SUCCESS_CREATED;
} else {
trx->mutex.wr_lock();
if (lock_rec_get_next_on_page(lock)
|| lock->trx != trx
|| lock->type_mode != (LOCK_REC | prdt_mode)
......@@ -817,6 +801,7 @@ lock_prdt_lock(
|| ((type_mode & LOCK_PREDICATE)
&& (!lock_prdt_consistent(
lock_get_prdt_from_lock(lock), prdt, 0)))) {
trx->mutex.wr_lock();
lock = lock_prdt_has_lock(
mode, type_mode, block, prdt, trx);
......@@ -850,8 +835,6 @@ lock_prdt_lock(
trx->mutex.wr_unlock();
} else {
trx->mutex.wr_unlock();
if (!lock_rec_get_nth_bit(lock, PRDT_HEAPNO)) {
lock_rec_set_nth_bit(lock, PRDT_HEAPNO);
status = LOCK_REC_SUCCESS_CREATED;
......@@ -879,9 +862,9 @@ lock_place_prdt_page_lock(
que_thr_t* thr) /*!< in: query thread */
{
ut_ad(thr != NULL);
ut_ad(!srv_read_only_mode);
ut_ad(!high_level_read_only);
ut_ad(!dict_index_is_clust(index));
ut_ad(index->is_spatial());
ut_ad(!dict_index_is_online_ddl(index));
/* Another transaction cannot have an implicit lock on the record,
......@@ -967,7 +950,7 @@ lock_prdt_rec_move(
lock_prdt_add_to_queue(
type_mode, receiver, lock->index, lock->trx,
lock_prdt, FALSE);
lock_prdt, false);
}
lock_sys.mutex_unlock();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment