Commit 7f741204 authored by irana's avatar irana

branches/innodb+

Merge r6915:6992 from branches/innodb+multipbp (i.e.: all the changes
made since it's creation)

This also reverts r6930 to branches/innodb+ because a different
solution for that issue is already present in innodb+multibp which
is being merged.

After this commit branches/innodb+multibp should be discarded
and this branch should become our main development tree.

  ------------------------------------------------------------------------
  r6915 | sbains | 2010-03-31 07:33:43 +0300 (Wed, 31 Mar 2010) | 1 line
  Changed paths:
     A /branches/innodb+multibp (from /branches/innodb+:6914)
  
  Creating a branch for the multiple buffer pool
  ------------------------------------------------------------------------
  r6916 | sbains | 2010-03-31 08:21:00 +0300 (Wed, 31 Mar 2010) | 3 lines
  Changed paths:
     M /branches/innodb+multibp/CMakeLists.txt
     M /branches/innodb+multibp/btr/btr0btr.c
     M /branches/innodb+multibp/btr/btr0cur.c
     M /branches/innodb+multibp/btr/btr0sea.c
     M /branches/innodb+multibp/buf/buf0buddy.c
     M /branches/innodb+multibp/buf/buf0buf.c
     M /branches/innodb+multibp/buf/buf0flu.c
     M /branches/innodb+multibp/buf/buf0lru.c
     M /branches/innodb+multibp/buf/buf0rea.c
     M /branches/innodb+multibp/handler/ha_innodb.cc
     M /branches/innodb+multibp/handler/i_s.cc
     M /branches/innodb+multibp/ibuf/ibuf0ibuf.c
     M /branches/innodb+multibp/include/buf0buddy.h
     M /branches/innodb+multibp/include/buf0buddy.ic
     M /branches/innodb+multibp/include/buf0buf.h
     M /branches/innodb+multibp/include/buf0buf.ic
     M /branches/innodb+multibp/include/buf0flu.h
     M /branches/innodb+multibp/include/buf0flu.ic
     M /branches/innodb+multibp/include/buf0lru.h
     M /branches/innodb+multibp/include/buf0rea.h
     M /branches/innodb+multibp/include/buf0types.h
     M /branches/innodb+multibp/include/ibuf0ibuf.ic
     M /branches/innodb+multibp/include/srv0srv.h
     M /branches/innodb+multibp/include/univ.i
     M /branches/innodb+multibp/log/log0log.c
     M /branches/innodb+multibp/log/log0recv.c
     M /branches/innodb+multibp/mem/mem0mem.c
     M /branches/innodb+multibp/page/page0zip.c
     M /branches/innodb+multibp/srv/srv0srv.c
     M /branches/innodb+multibp/srv/srv0start.c
     M /branches/innodb+multibp/trx/trx0trx.c
     M /branches/innodb+multibp/trx/trx0undo.c
  
  branches/innodb+multibp: Unable to crash it with UNIV_DEBUG and UNIV_SYNC_DEBUG
  with both ibtests and Sysbench. The patch now needs a workout from Michael.
  
  ------------------------------------------------------------------------
  r6917 | sbains | 2010-03-31 08:56:18 +0300 (Wed, 31 Mar 2010) | 2 lines
  Changed paths:
     M /branches/innodb+multibp/handler/ha_innodb.cc
  
  branches/innodb+multibp: Fix error introduced in r6916.
  
  ------------------------------------------------------------------------
  r6923 | sbains | 2010-03-31 15:16:04 +0300 (Wed, 31 Mar 2010) | 3 lines
  Changed paths:
     M /branches/innodb+multibp/btr/btr0cur.c
     M /branches/innodb+multibp/buf/buf0buddy.c
     M /branches/innodb+multibp/buf/buf0buf.c
     M /branches/innodb+multibp/buf/buf0flu.c
     M /branches/innodb+multibp/buf/buf0lru.c
     M /branches/innodb+multibp/include/buf0buddy.ic
     M /branches/innodb+multibp/include/buf0buf.h
     M /branches/innodb+multibp/include/buf0buf.ic
     M /branches/innodb+multibp/include/buf0flu.ic
     M /branches/innodb+multibp/page/page0zip.c
  
  branches/innodb+multibp: Fix whitespace issues. Add function
  buf_pool_from_block(). Add some comments to parameters.
  
  ------------------------------------------------------------------------
  r6932 | sbains | 2010-04-01 01:12:07 +0300 (Thu, 01 Apr 2010) | 4 lines
  Changed paths:
     M /branches/innodb+multibp/include/buf0buf.ic
     M /branches/innodb+multibp/include/univ.i
  
  branches/innodb+multibp: Remove bogus assertion. It's possible for the space
  and offset of a page to be undefined during the lifecycle of a page. Remove
  the debug #defines from univ.i.
  
  ------------------------------------------------------------------------
  r6933 | sbains | 2010-04-01 01:22:40 +0300 (Thu, 01 Apr 2010) | 2 lines
  Changed paths:
     M /branches/innodb+multibp/srv/srv0start.c
  
  branches/innodb+multibp: Fix whitespace issues.
  
  ------------------------------------------------------------------------
  r6934 | sbains | 2010-04-01 01:53:18 +0300 (Thu, 01 Apr 2010) | 2 lines
  Changed paths:
     M /branches/innodb+multibp/CMakeLists.txt
     M /branches/innodb+multibp/ChangeLog
     M /branches/innodb+multibp/buf/buf0buf.c
     M /branches/innodb+multibp/buf/buf0flu.c
     M /branches/innodb+multibp/handler/ha_innodb.cc
     M /branches/innodb+multibp/include/buf0buf.h
     M /branches/innodb+multibp/include/buf0buf.ic
     M /branches/innodb+multibp/include/buf0flu.ic
     M /branches/innodb+multibp/include/srv0srv.h
     M /branches/innodb+multibp/include/sync0sync.h
     M /branches/innodb+multibp/include/trx0purge.h
     M /branches/innodb+multibp/include/ut0ut.h
     M /branches/innodb+multibp/include/ut0ut.ic
     M /branches/innodb+multibp/lock/lock0lock.c
     M /branches/innodb+multibp/log/log0recv.c
     M /branches/innodb+multibp/mtr/mtr0mtr.c
     M /branches/innodb+multibp/mysql-test/innodb_bug38231.test
     A /branches/innodb+multibp/mysql-test/innodb_bug51920.result (from /branches/innodb+/mysql-test/innodb_bug51920.result:6931)
     A /branches/innodb+multibp/mysql-test/innodb_bug51920.test (from /branches/innodb+/mysql-test/innodb_bug51920.test:6931)
     M /branches/innodb+multibp/row/row0sel.c
     M /branches/innodb+multibp/srv/srv0srv.c
     M /branches/innodb+multibp/srv/srv0start.c
     M /branches/innodb+multibp/sync/sync0sync.c
     M /branches/innodb+multibp/trx/trx0purge.c
  
  branches/innodb+multibp: Merge revisions r6914:6931 from branches/innodb+
  
  ------------------------------------------------------------------------
  r6935 | sbains | 2010-04-01 02:08:32 +0300 (Thu, 01 Apr 2010) | 3 lines
  Changed paths:
     M /branches/innodb+multibp/buf/buf0flu.c
     M /branches/innodb+multibp/include/buf0flu.ic
     M /branches/innodb+multibp/mtr/mtr0mtr.c
  
  branches/innodb+multibp: Fix the debug assertions for flush order mutex. These
  were missed in r6934.
  
  ------------------------------------------------------------------------
  r6936 | sbains | 2010-04-01 02:46:52 +0300 (Thu, 01 Apr 2010) | 4 lines
  Changed paths:
     M /branches/innodb+multibp/sync/sync0sync.c
  
  branches/innodb+multibp: Because now we have multiple instances of a mutex at
  the same level and these mutexes can be acquired simultaneously we can't
  simply check for <= level. We need to check for <= level - 1.
  
  ------------------------------------------------------------------------
  r6937 | sbains | 2010-04-01 04:40:17 +0300 (Thu, 01 Apr 2010) | 5 lines
  Changed paths:
     M /branches/innodb+multibp/trx/trx0purge.c
  
  branches/innodb+multibp: We need to check if the history list len is > than
  some threshold not that it is evenly divisible by the some batch size. While
  running tests on dscczz01 I've observed that the purge thread can't keep up
  with the generation of the UNDO log records because of the faster code.
  
  ------------------------------------------------------------------------
  r6938 | irana | 2010-04-01 10:15:00 +0300 (Thu, 01 Apr 2010) | 7 lines
  Changed paths:
     M /branches/innodb+multibp/buf/buf0buf.c
     M /branches/innodb+multibp/buf/buf0flu.c
     M /branches/innodb+multibp/include/buf0buf.h
     M /branches/innodb+multibp/include/buf0buf.ic
     M /branches/innodb+multibp/include/buf0flu.h
     M /branches/innodb+multibp/include/buf0flu.ic
     M /branches/innodb+multibp/include/log0log.h
     M /branches/innodb+multibp/include/sync0sync.h
     M /branches/innodb+multibp/log/log0log.c
     M /branches/innodb+multibp/log/log0recv.c
     M /branches/innodb+multibp/mtr/mtr0mtr.c
     M /branches/innodb+multibp/sync/sync0sync.c
  
  branches/innodb+multibp
  
  The buf_flush_order patch that was ported in from 1.1 won't work with
  multiple buffer pools. This patch moves the mutex protecting order of
  insertion in the flush list(s) to log_sys struct so that we can have
  one global mutex protecting insertions into all flush list(s)
  
  ------------------------------------------------------------------------
  r6941 | sbains | 2010-04-02 00:51:28 +0300 (Fri, 02 Apr 2010) | 4 lines
  Changed paths:
     M /branches/innodb+multibp/lock/lock0lock.c
  
  branches/innodb+multibp: We should get the record heap no to check recursively
  only if we are checking a record lock. Prior to this fix we were doing it for
  table locks as well, this is a bug.
  
  ------------------------------------------------------------------------
  r6942 | csun | 2010-04-02 02:39:10 +0300 (Fri, 02 Apr 2010) | 4 lines
  Changed paths:
     M /branches/innodb+multibp/ha/ha0ha.c
  
  branches/innodb+multibp: fix compiler errors on Windows.
  Move ut_ad() to after declarations for C file.
  
  
  ------------------------------------------------------------------------
  r6943 | sbains | 2010-04-03 05:14:25 +0300 (Sat, 03 Apr 2010) | 2 lines
  Changed paths:
     M /branches/innodb+multibp/buf/buf0buf.c
  
  branches/innodb+multibp: Remove the code that created the fake buffer pool.
  
  ------------------------------------------------------------------------
  r6945 | irana | 2010-04-05 23:35:29 +0300 (Mon, 05 Apr 2010) | 5 lines
  Changed paths:
     M /branches/innodb+multibp/lock/lock0lock.c
  
  branches/innodb+multibp
  
  Revert r6941 as it does not resolve the issue and we have to take
  back the whole fix for bug#49047
  
  ------------------------------------------------------------------------
  r6946 | irana | 2010-04-05 23:50:42 +0300 (Mon, 05 Apr 2010) | 6 lines
  Changed paths:
     M /branches/innodb+multibp/include/ut0ut.h
     M /branches/innodb+multibp/include/ut0ut.ic
     M /branches/innodb+multibp/lock/lock0lock.c
  
  branches/innodb+multibp
  
  Merged revisions 6932:6944 from branches/innodb+
  
  This solely includes the reversal of fix for bug#49047
  
  ------------------------------------------------------------------------
  r6947 | sbains | 2010-04-06 01:33:46 +0300 (Tue, 06 Apr 2010) | 3 lines
  Changed paths:
     M /branches/innodb+multibp/buf/buf0lru.c
  
  branches/innodb+multibp: Remove the log sys mutex acquisition when doing
  buffer pool stat aggregation. A dirty read here should suffice.
  
  ------------------------------------------------------------------------
  r6951 | irana | 2010-04-06 17:25:29 +0300 (Tue, 06 Apr 2010) | 5 lines
  Changed paths:
     M /branches/innodb+multibp/buf/buf0buf.c
  
  branches/innodb+mbp
  
  Initialize the buf_page_t::buf_pool pointer when the descriptor is
  allocated using buf_buddy_alloc().
  
  ------------------------------------------------------------------------
  r6954 | jyang | 2010-04-06 21:24:46 +0300 (Tue, 06 Apr 2010) | 4 lines
  Changed paths:
     M /branches/innodb+multibp/handler/ha_innodb.cc
  
  branches/innodb+multibp: Fix a possible null pointer of index_mapping
  in a race condition.
  
  
  ------------------------------------------------------------------------
  r6958 | sbains | 2010-04-07 00:27:44 +0300 (Wed, 07 Apr 2010) | 3 lines
  Changed paths:
     M /branches/innodb+multibp/include/ut0mem.h
     M /branches/innodb+multibp/ut/ut0mem.c
  
  branches/innodb+multibp: Fix part of Bug#52546. We allow ut_free() to accept
  a NULL pointer and treat it as a nop.
  
  ------------------------------------------------------------------------
  r6961 | jyang | 2010-04-07 10:50:03 +0300 (Wed, 07 Apr 2010) | 9 lines
  Changed paths:
     M /branches/innodb+multibp/handler/ha_innodb.cc
  
  branches/innodb+multibp: Fix for bug #52580: Crash in
  ha_innobase::open on executing INSERT with concurrent ALTER TABLE.
  Change in MySQL bug #51557 releases the mutex LOCK_open before
  ha_innobase::open(), causing racing condition for index translation
  table creation. Fix it by adding dict_sys mutex for the operation.
  
  rb://283, approved by Marko.
  
  
  ------------------------------------------------------------------------
  r6963 | irana | 2010-04-07 19:14:10 +0300 (Wed, 07 Apr 2010) | 15 lines
  Changed paths:
     M /branches/innodb+multibp/handler/ha_innodb.cc
  
  branches/innodb+multibp
  
  Force setting of buf_pool->LRU_old_ratio by calling
  buf_LRU_old_ratio_update() with adjust set to TRUE. This will make sure
  that we grab the buf_pool mutex and actually adjust the
  buf_pool->LRU_old pointer instead of just updating the
  buf_pool->LRU_old_ratio.
  
  Note that after this change there is no call to
  buf_LRU_old_ratio_update() with adjust set to FALSE and therefore
  this parameter should be removed. I am keeping it for now to first
  make sure that the fix does work.
  
  Approved by: No one. Sunny agreed with my hypothesis of the problem.
  
  ------------------------------------------------------------------------
  r6964 | irana | 2010-04-07 19:59:59 +0300 (Wed, 07 Apr 2010) | 5 lines
  Changed paths:
     M /branches/innodb+multibp/handler/ha_innodb.cc
  
  branches/innodb+multibp
  
  Remove a too strong assertion on behalf of Jimmy.
  
  
  ------------------------------------------------------------------------
  r6971 | sbains | 2010-04-09 13:23:33 +0300 (Fri, 09 Apr 2010) | 6 lines
  Changed paths:
     M /branches/innodb+multibp/buf/buf0buf.c
  
  branches/innodb+multibp: When getting the oldest (minimum) LSN value from all
  the flush lists we need to acquire the flush list mutex. We were incorrectly
  acquiring the buffer pool mutex.
  
  This patch should fix a slew of bugs reported by Michael.
  
  ------------------------------------------------------------------------
  r6972 | sbains | 2010-04-10 00:25:09 +0300 (Sat, 10 Apr 2010) | 5 lines
  Changed paths:
     M /branches/innodb+multibp/buf/buf0buf.c
  
  branches/innodb+multibp: We should not reset the lsn to 0 when we encounter
  an empty flush list. Oldest LSN should be 0 only when all flush lists are empty.
  e.g., without this fix if even one flush list was empty we would end up
  breaking WAL.
  
  ------------------------------------------------------------------------
  r6987 | sbains | 2010-04-14 00:14:13 +0300 (Wed, 14 Apr 2010) | 12 lines
  Changed paths:
     M /branches/innodb+multibp/buf/buf0buf.c
  
  branches/innodb+multibp: When calculating the oldest_lsn we can have a
  situation where we've iterated to say buffer pool 3 and another thread
  adds two new dirty pages, the first to buffer pool 1 and the second to
  buffer pool 4. Up to say buffer pool 3 the oldest_lsn was 0. Now, we will
  end up returning the lsn at buffer pool 4 as the oldest LSN. We prevent this
  by acquiring the flush order mutex.
  
  One other future option is to calculate the min_lsn when flushing pages
  from the list and maintaining a running total using atomics. That way
  we can get rid of this function altogether. The atomics will only really
  be required when we do parallel flushing.
  
  ------------------------------------------------------------------------
  r6992 | sbains | 2010-04-14 02:45:59 +0300 (Wed, 14 Apr 2010) | 2 lines
  Changed paths:
     M /branches/innodb+multibp/include/ut0rbt.h
     M /branches/innodb+multibp/ut/ut0rbt.c
  
  branches/innodb+multibp: Fix copyright of the rbt code.
  
  ------------------------------------------------------------------------
parent 1e13dd0b
...@@ -952,6 +952,7 @@ btr_page_reorganize_low( ...@@ -952,6 +952,7 @@ btr_page_reorganize_low(
dict_index_t* index, /*!< in: record descriptor */ dict_index_t* index, /*!< in: record descriptor */
mtr_t* mtr) /*!< in: mtr */ mtr_t* mtr) /*!< in: mtr */
{ {
buf_pool_t* buf_pool = buf_pool_from_bpage(&block->page);
page_t* page = buf_block_get_frame(block); page_t* page = buf_block_get_frame(block);
page_zip_des_t* page_zip = buf_block_get_page_zip(block); page_zip_des_t* page_zip = buf_block_get_page_zip(block);
buf_block_t* temp_block; buf_block_t* temp_block;
...@@ -982,7 +983,7 @@ btr_page_reorganize_low( ...@@ -982,7 +983,7 @@ btr_page_reorganize_low(
log_mode = mtr_set_log_mode(mtr, MTR_LOG_NONE); log_mode = mtr_set_log_mode(mtr, MTR_LOG_NONE);
#ifndef UNIV_HOTBACKUP #ifndef UNIV_HOTBACKUP
temp_block = buf_block_alloc(0); temp_block = buf_block_alloc(buf_pool, 0);
#else /* !UNIV_HOTBACKUP */ #else /* !UNIV_HOTBACKUP */
ut_ad(block == back_block1); ut_ad(block == back_block1);
temp_block = back_block2; temp_block = back_block2;
......
...@@ -3882,6 +3882,7 @@ btr_blob_free( ...@@ -3882,6 +3882,7 @@ btr_blob_free(
if there is one */ if there is one */
mtr_t* mtr) /*!< in: mini-transaction to commit */ mtr_t* mtr) /*!< in: mini-transaction to commit */
{ {
buf_pool_t* buf_pool = buf_pool_from_block(block);
ulint space = buf_block_get_space(block); ulint space = buf_block_get_space(block);
ulint page_no = buf_block_get_page_no(block); ulint page_no = buf_block_get_page_no(block);
...@@ -3889,7 +3890,7 @@ btr_blob_free( ...@@ -3889,7 +3890,7 @@ btr_blob_free(
mtr_commit(mtr); mtr_commit(mtr);
buf_pool_mutex_enter(); buf_pool_mutex_enter(buf_pool);
mutex_enter(&block->mutex); mutex_enter(&block->mutex);
/* Only free the block if it is still allocated to /* Only free the block if it is still allocated to
...@@ -3910,7 +3911,7 @@ btr_blob_free( ...@@ -3910,7 +3911,7 @@ btr_blob_free(
} }
} }
buf_pool_mutex_exit(); buf_pool_mutex_exit(buf_pool);
mutex_exit(&block->mutex); mutex_exit(&block->mutex);
} }
......
...@@ -150,7 +150,7 @@ btr_search_check_free_space_in_heap(void) ...@@ -150,7 +150,7 @@ btr_search_check_free_space_in_heap(void)
be enough free space in the hash table. */ be enough free space in the hash table. */
if (heap->free_block == NULL) { if (heap->free_block == NULL) {
buf_block_t* block = buf_block_alloc(0); buf_block_t* block = buf_block_alloc(NULL, 0);
rw_lock_x_lock(&btr_search_latch); rw_lock_x_lock(&btr_search_latch);
...@@ -825,6 +825,7 @@ btr_search_guess_on_hash( ...@@ -825,6 +825,7 @@ btr_search_guess_on_hash(
RW_S_LATCH, RW_X_LATCH, or 0 */ RW_S_LATCH, RW_X_LATCH, or 0 */
mtr_t* mtr) /*!< in: mtr */ mtr_t* mtr) /*!< in: mtr */
{ {
buf_pool_t* buf_pool;
buf_block_t* block; buf_block_t* block;
rec_t* rec; rec_t* rec;
ulint fold; ulint fold;
...@@ -983,7 +984,7 @@ btr_search_guess_on_hash( ...@@ -983,7 +984,7 @@ btr_search_guess_on_hash(
/* Increment the page get statistics though we did not really /* Increment the page get statistics though we did not really
fix the page: for user info only */ fix the page: for user info only */
buf_pool = buf_pool_from_bpage(&block->page);
buf_pool->stat.n_page_gets++; buf_pool->stat.n_page_gets++;
return(TRUE); return(TRUE);
...@@ -1760,7 +1761,7 @@ btr_search_validate(void) ...@@ -1760,7 +1761,7 @@ btr_search_validate(void)
rec_offs_init(offsets_); rec_offs_init(offsets_);
rw_lock_x_lock(&btr_search_latch); rw_lock_x_lock(&btr_search_latch);
buf_pool_mutex_enter(); buf_pool_mutex_enter_all();
cell_count = hash_get_n_cells(btr_search_sys->hash_index); cell_count = hash_get_n_cells(btr_search_sys->hash_index);
...@@ -1768,11 +1769,11 @@ btr_search_validate(void) ...@@ -1768,11 +1769,11 @@ btr_search_validate(void)
/* We release btr_search_latch every once in a while to /* We release btr_search_latch every once in a while to
give other queries a chance to run. */ give other queries a chance to run. */
if ((i != 0) && ((i % chunk_size) == 0)) { if ((i != 0) && ((i % chunk_size) == 0)) {
buf_pool_mutex_exit(); buf_pool_mutex_exit_all();
rw_lock_x_unlock(&btr_search_latch); rw_lock_x_unlock(&btr_search_latch);
os_thread_yield(); os_thread_yield();
rw_lock_x_lock(&btr_search_latch); rw_lock_x_lock(&btr_search_latch);
buf_pool_mutex_enter(); buf_pool_mutex_enter_all();
} }
node = hash_get_nth_cell(btr_search_sys->hash_index, i)->node; node = hash_get_nth_cell(btr_search_sys->hash_index, i)->node;
...@@ -1781,6 +1782,9 @@ btr_search_validate(void) ...@@ -1781,6 +1782,9 @@ btr_search_validate(void)
const buf_block_t* block const buf_block_t* block
= buf_block_align(node->data); = buf_block_align(node->data);
const buf_block_t* hash_block; const buf_block_t* hash_block;
buf_pool_t* buf_pool;
buf_pool = buf_pool_from_bpage((buf_page_t*) block);
if (UNIV_LIKELY(buf_block_get_state(block) if (UNIV_LIKELY(buf_block_get_state(block)
== BUF_BLOCK_FILE_PAGE)) { == BUF_BLOCK_FILE_PAGE)) {
...@@ -1791,6 +1795,7 @@ btr_search_validate(void) ...@@ -1791,6 +1795,7 @@ btr_search_validate(void)
(BUF_BLOCK_REMOVE_HASH, see the (BUF_BLOCK_REMOVE_HASH, see the
assertion and the comment below) */ assertion and the comment below) */
hash_block = buf_block_hash_get( hash_block = buf_block_hash_get(
buf_pool,
buf_block_get_space(block), buf_block_get_space(block),
buf_block_get_page_no(block)); buf_block_get_page_no(block));
} else { } else {
...@@ -1879,11 +1884,11 @@ btr_search_validate(void) ...@@ -1879,11 +1884,11 @@ btr_search_validate(void)
/* We release btr_search_latch every once in a while to /* We release btr_search_latch every once in a while to
give other queries a chance to run. */ give other queries a chance to run. */
if (i != 0) { if (i != 0) {
buf_pool_mutex_exit(); buf_pool_mutex_exit_all();
rw_lock_x_unlock(&btr_search_latch); rw_lock_x_unlock(&btr_search_latch);
os_thread_yield(); os_thread_yield();
rw_lock_x_lock(&btr_search_latch); rw_lock_x_lock(&btr_search_latch);
buf_pool_mutex_enter(); buf_pool_mutex_enter_all();
} }
if (!ha_validate(btr_search_sys->hash_index, i, end_index)) { if (!ha_validate(btr_search_sys->hash_index, i, end_index)) {
...@@ -1891,7 +1896,7 @@ btr_search_validate(void) ...@@ -1891,7 +1896,7 @@ btr_search_validate(void)
} }
} }
buf_pool_mutex_exit(); buf_pool_mutex_exit_all();
rw_lock_x_unlock(&btr_search_latch); rw_lock_x_unlock(&btr_search_latch);
if (UNIV_LIKELY_NULL(heap)) { if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap); mem_heap_free(heap);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -171,6 +171,7 @@ buf_read_page( ...@@ -171,6 +171,7 @@ buf_read_page(
ulint zip_size,/*!< in: compressed page size in bytes, or 0 */ ulint zip_size,/*!< in: compressed page size in bytes, or 0 */
ulint offset) /*!< in: page number */ ulint offset) /*!< in: page number */
{ {
buf_pool_t* buf_pool = buf_pool_get(space, offset);
ib_int64_t tablespace_version; ib_int64_t tablespace_version;
ulint count; ulint count;
ulint err; ulint err;
...@@ -195,7 +196,7 @@ buf_read_page( ...@@ -195,7 +196,7 @@ buf_read_page(
} }
/* Flush pages from the end of the LRU list if necessary */ /* Flush pages from the end of the LRU list if necessary */
buf_flush_free_margin(); buf_flush_free_margin(buf_pool);
/* Increment number of I/O operations used for LRU policy. */ /* Increment number of I/O operations used for LRU policy. */
buf_LRU_stat_inc_io(); buf_LRU_stat_inc_io();
...@@ -236,6 +237,7 @@ buf_read_ahead_linear( ...@@ -236,6 +237,7 @@ buf_read_ahead_linear(
ulint offset) /*!< in: page number of a page; NOTE: the current thread ulint offset) /*!< in: page number of a page; NOTE: the current thread
must want access to this page (see NOTE 3 above) */ must want access to this page (see NOTE 3 above) */
{ {
buf_pool_t* buf_pool = buf_pool_get(space, offset);
ib_int64_t tablespace_version; ib_int64_t tablespace_version;
buf_page_t* bpage; buf_page_t* bpage;
buf_frame_t* frame; buf_frame_t* frame;
...@@ -251,7 +253,7 @@ buf_read_ahead_linear( ...@@ -251,7 +253,7 @@ buf_read_ahead_linear(
ulint err; ulint err;
ulint i; ulint i;
const ulint buf_read_ahead_linear_area const ulint buf_read_ahead_linear_area
= BUF_READ_AHEAD_LINEAR_AREA; = BUF_READ_AHEAD_LINEAR_AREA(buf_pool);
ulint threshold; ulint threshold;
if (UNIV_UNLIKELY(srv_startup_is_before_trx_rollback_phase)) { if (UNIV_UNLIKELY(srv_startup_is_before_trx_rollback_phase)) {
...@@ -286,10 +288,10 @@ buf_read_ahead_linear( ...@@ -286,10 +288,10 @@ buf_read_ahead_linear(
tablespace_version = fil_space_get_version(space); tablespace_version = fil_space_get_version(space);
buf_pool_mutex_enter(); buf_pool_mutex_enter(buf_pool);
if (high > fil_space_get_size(space)) { if (high > fil_space_get_size(space)) {
buf_pool_mutex_exit(); buf_pool_mutex_exit(buf_pool);
/* The area is not whole, return */ /* The area is not whole, return */
return(0); return(0);
...@@ -297,7 +299,7 @@ buf_read_ahead_linear( ...@@ -297,7 +299,7 @@ buf_read_ahead_linear(
if (buf_pool->n_pend_reads if (buf_pool->n_pend_reads
> buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) { > buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
buf_pool_mutex_exit(); buf_pool_mutex_exit(buf_pool);
return(0); return(0);
} }
...@@ -315,14 +317,14 @@ buf_read_ahead_linear( ...@@ -315,14 +317,14 @@ buf_read_ahead_linear(
/* How many out of order accessed pages can we ignore /* How many out of order accessed pages can we ignore
when working out the access pattern for linear readahead */ when working out the access pattern for linear readahead */
threshold = ut_min((64 - srv_read_ahead_threshold), threshold = ut_min((64 - srv_read_ahead_threshold),
BUF_READ_AHEAD_AREA); BUF_READ_AHEAD_AREA(buf_pool));
fail_count = 0; fail_count = 0;
for (i = low; i < high; i++) { for (i = low; i < high; i++) {
bpage = buf_page_hash_get(space, i); bpage = buf_page_hash_get(buf_pool, space, i);
if ((bpage == NULL) || !buf_page_is_accessed(bpage)) { if (bpage == NULL || !buf_page_is_accessed(bpage)) {
/* Not accessed */ /* Not accessed */
fail_count++; fail_count++;
...@@ -346,7 +348,7 @@ buf_read_ahead_linear( ...@@ -346,7 +348,7 @@ buf_read_ahead_linear(
if (fail_count > threshold) { if (fail_count > threshold) {
/* Too many failures: return */ /* Too many failures: return */
buf_pool_mutex_exit(); buf_pool_mutex_exit(buf_pool);
return(0); return(0);
} }
...@@ -358,10 +360,10 @@ buf_read_ahead_linear( ...@@ -358,10 +360,10 @@ buf_read_ahead_linear(
/* If we got this far, we know that enough pages in the area have /* If we got this far, we know that enough pages in the area have
been accessed in the right order: linear read-ahead can be sensible */ been accessed in the right order: linear read-ahead can be sensible */
bpage = buf_page_hash_get(space, offset); bpage = buf_page_hash_get(buf_pool, space, offset);
if (bpage == NULL) { if (bpage == NULL) {
buf_pool_mutex_exit(); buf_pool_mutex_exit(buf_pool);
return(0); return(0);
} }
...@@ -387,7 +389,7 @@ buf_read_ahead_linear( ...@@ -387,7 +389,7 @@ buf_read_ahead_linear(
pred_offset = fil_page_get_prev(frame); pred_offset = fil_page_get_prev(frame);
succ_offset = fil_page_get_next(frame); succ_offset = fil_page_get_next(frame);
buf_pool_mutex_exit(); buf_pool_mutex_exit(buf_pool);
if ((offset == low) && (succ_offset == offset + 1)) { if ((offset == low) && (succ_offset == offset + 1)) {
...@@ -466,7 +468,7 @@ buf_read_ahead_linear( ...@@ -466,7 +468,7 @@ buf_read_ahead_linear(
os_aio_simulated_wake_handler_threads(); os_aio_simulated_wake_handler_threads();
/* Flush pages from the end of the LRU list if necessary */ /* Flush pages from the end of the LRU list if necessary */
buf_flush_free_margin(); buf_flush_free_margin(buf_pool);
#ifdef UNIV_DEBUG #ifdef UNIV_DEBUG
if (buf_debug_prints && (count > 0)) { if (buf_debug_prints && (count > 0)) {
...@@ -518,15 +520,19 @@ buf_read_ibuf_merge_pages( ...@@ -518,15 +520,19 @@ buf_read_ibuf_merge_pages(
#ifdef UNIV_IBUF_DEBUG #ifdef UNIV_IBUF_DEBUG
ut_a(n_stored < UNIV_PAGE_SIZE); ut_a(n_stored < UNIV_PAGE_SIZE);
#endif #endif
for (i = 0; i < n_stored; i++) {
ulint err;
buf_pool_t* buf_pool;
ulint zip_size = fil_space_get_zip_size(space_ids[i]);
buf_pool = buf_pool_get(space_ids[i], space_versions[i]);
while (buf_pool->n_pend_reads while (buf_pool->n_pend_reads
> buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) { > buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
os_thread_sleep(500000); os_thread_sleep(500000);
} }
for (i = 0; i < n_stored; i++) {
ulint zip_size = fil_space_get_zip_size(space_ids[i]);
ulint err;
if (UNIV_UNLIKELY(zip_size == ULINT_UNDEFINED)) { if (UNIV_UNLIKELY(zip_size == ULINT_UNDEFINED)) {
goto tablespace_deleted; goto tablespace_deleted;
...@@ -550,8 +556,8 @@ tablespace_deleted: ...@@ -550,8 +556,8 @@ tablespace_deleted:
os_aio_simulated_wake_handler_threads(); os_aio_simulated_wake_handler_threads();
/* Flush pages from the end of the LRU list if necessary */ /* Flush pages from the end of all the LRU lists if necessary */
buf_flush_free_margin(); buf_flush_free_margins();
#ifdef UNIV_DEBUG #ifdef UNIV_DEBUG
if (buf_debug_prints) { if (buf_debug_prints) {
...@@ -600,11 +606,12 @@ buf_read_recv_pages( ...@@ -600,11 +606,12 @@ buf_read_recv_pages(
tablespace_version = fil_space_get_version(space); tablespace_version = fil_space_get_version(space);
for (i = 0; i < n_stored; i++) { for (i = 0; i < n_stored; i++) {
buf_pool_t* buf_pool;
count = 0; count = 0;
os_aio_print_debug = FALSE; os_aio_print_debug = FALSE;
buf_pool = buf_pool_get(space, page_nos[i]);
while (buf_pool->n_pend_reads >= recv_n_pool_free_frames / 2) { while (buf_pool->n_pend_reads >= recv_n_pool_free_frames / 2) {
os_aio_simulated_wake_handler_threads(); os_aio_simulated_wake_handler_threads();
...@@ -643,8 +650,8 @@ buf_read_recv_pages( ...@@ -643,8 +650,8 @@ buf_read_recv_pages(
os_aio_simulated_wake_handler_threads(); os_aio_simulated_wake_handler_threads();
/* Flush pages from the end of the LRU list if necessary */ /* Flush pages from the end of all the LRU lists if necessary */
buf_flush_free_margin(); buf_flush_free_margins();
#ifdef UNIV_DEBUG #ifdef UNIV_DEBUG
if (buf_debug_prints) { if (buf_debug_prints) {
......
...@@ -403,8 +403,6 @@ ha_print_info( ...@@ -403,8 +403,6 @@ ha_print_info(
FILE* file, /*!< in: file where to print */ FILE* file, /*!< in: file where to print */
hash_table_t* table) /*!< in: hash table */ hash_table_t* table) /*!< in: hash table */
{ {
ut_ad(table);
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
#ifdef UNIV_DEBUG #ifdef UNIV_DEBUG
/* Some of the code here is disabled for performance reasons in production /* Some of the code here is disabled for performance reasons in production
builds, see http://bugs.mysql.com/36941 */ builds, see http://bugs.mysql.com/36941 */
...@@ -418,6 +416,8 @@ builds, see http://bugs.mysql.com/36941 */ ...@@ -418,6 +416,8 @@ builds, see http://bugs.mysql.com/36941 */
#endif /* PRINT_USED_CELLS */ #endif /* PRINT_USED_CELLS */
ulint n_bufs; ulint n_bufs;
ut_ad(table);
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
#ifdef PRINT_USED_CELLS #ifdef PRINT_USED_CELLS
for (i = 0; i < hash_get_n_cells(table); i++) { for (i = 0; i < hash_get_n_cells(table); i++) {
......
...@@ -134,6 +134,7 @@ static long innobase_mirrored_log_groups, innobase_log_files_in_group, ...@@ -134,6 +134,7 @@ static long innobase_mirrored_log_groups, innobase_log_files_in_group,
static ulong innobase_commit_concurrency = 0; static ulong innobase_commit_concurrency = 0;
static ulong innobase_read_io_threads; static ulong innobase_read_io_threads;
static ulong innobase_write_io_threads; static ulong innobase_write_io_threads;
static long innobase_buffer_pool_instances = 1;
static long long innobase_buffer_pool_size, innobase_log_file_size; static long long innobase_buffer_pool_size, innobase_log_file_size;
...@@ -240,7 +241,7 @@ static PSI_mutex_info all_innodb_mutexes[] = { ...@@ -240,7 +241,7 @@ static PSI_mutex_info all_innodb_mutexes[] = {
{&file_format_max_mutex_key, "file_format_max_mutex", 0}, {&file_format_max_mutex_key, "file_format_max_mutex", 0},
{&fil_system_mutex_key, "fil_system_mutex", 0}, {&fil_system_mutex_key, "fil_system_mutex", 0},
{&flush_list_mutex_key, "flush_list_mutex", 0}, {&flush_list_mutex_key, "flush_list_mutex", 0},
{&flush_order_mutex_key, "flush_order_mutex", 0}, {&log_flush_order_mutex_key, "log_flush_order_mutex", 0},
{&hash_table_mutex_key, "hash_table_mutex", 0}, {&hash_table_mutex_key, "hash_table_mutex", 0},
{&ibuf_bitmap_mutex_key, "ibuf_bitmap_mutex", 0}, {&ibuf_bitmap_mutex_key, "ibuf_bitmap_mutex", 0},
{&ibuf_mutex_key, "ibuf_mutex", 0}, {&ibuf_mutex_key, "ibuf_mutex", 0},
...@@ -2304,6 +2305,7 @@ innobase_change_buffering_inited_ok: ...@@ -2304,6 +2305,7 @@ innobase_change_buffering_inited_ok:
srv_log_buffer_size = (ulint) innobase_log_buffer_size; srv_log_buffer_size = (ulint) innobase_log_buffer_size;
srv_buf_pool_size = (ulint) innobase_buffer_pool_size; srv_buf_pool_size = (ulint) innobase_buffer_pool_size;
srv_buf_pool_instances = (ulint) innobase_buffer_pool_instances;
srv_mem_pool_size = (ulint) innobase_additional_mem_pool_size; srv_mem_pool_size = (ulint) innobase_additional_mem_pool_size;
...@@ -2347,9 +2349,6 @@ innobase_change_buffering_inited_ok: ...@@ -2347,9 +2349,6 @@ innobase_change_buffering_inited_ok:
ut_a(0 == strcmp(my_charset_latin1.name, "latin1_swedish_ci")); ut_a(0 == strcmp(my_charset_latin1.name, "latin1_swedish_ci"));
srv_latin1_ordering = my_charset_latin1.sort_order; srv_latin1_ordering = my_charset_latin1.sort_order;
innobase_old_blocks_pct = buf_LRU_old_ratio_update(
innobase_old_blocks_pct, FALSE);
innobase_commit_concurrency_init_default(); innobase_commit_concurrency_init_default();
#ifdef HAVE_PSI_INTERFACE #ifdef HAVE_PSI_INTERFACE
...@@ -2403,6 +2402,9 @@ innobase_change_buffering_inited_ok: ...@@ -2403,6 +2402,9 @@ innobase_change_buffering_inited_ok:
goto mem_free_and_error; goto mem_free_and_error;
} }
innobase_old_blocks_pct = buf_LRU_old_ratio_update(
innobase_old_blocks_pct, TRUE);
innobase_open_tables = hash_create(200); innobase_open_tables = hash_create(200);
mysql_mutex_init(innobase_share_mutex_key, mysql_mutex_init(innobase_share_mutex_key,
&innobase_share_mutex, &innobase_share_mutex,
...@@ -3336,6 +3338,8 @@ innobase_build_index_translation( ...@@ -3336,6 +3338,8 @@ innobase_build_index_translation(
DBUG_ENTER("innobase_build_index_translation"); DBUG_ENTER("innobase_build_index_translation");
mutex_enter(&dict_sys->mutex);
mysql_num_index = table->s->keys; mysql_num_index = table->s->keys;
ib_num_index = UT_LIST_GET_LEN(ib_table->indexes); ib_num_index = UT_LIST_GET_LEN(ib_table->indexes);
...@@ -3366,6 +3370,13 @@ innobase_build_index_translation( ...@@ -3366,6 +3370,13 @@ innobase_build_index_translation(
MYF(MY_ALLOW_ZERO_PTR)); MYF(MY_ALLOW_ZERO_PTR));
if (!index_mapping) { if (!index_mapping) {
/* Report an error if index_mapping continues to be
NULL and mysql_num_index is a non-zero value */
sql_print_error("InnoDB: fail to allocate memory for "
"index translation table. Number of "
"Index:%lu, array size:%lu",
mysql_num_index,
share->idx_trans_tbl.array_size);
ret = FALSE; ret = FALSE;
goto func_exit; goto func_exit;
} }
...@@ -3373,7 +3384,6 @@ innobase_build_index_translation( ...@@ -3373,7 +3384,6 @@ innobase_build_index_translation(
share->idx_trans_tbl.array_size = mysql_num_index; share->idx_trans_tbl.array_size = mysql_num_index;
} }
/* For each index in the mysql key_info array, fetch its /* For each index in the mysql key_info array, fetch its
corresponding InnoDB index pointer into index_mapping corresponding InnoDB index pointer into index_mapping
array. */ array. */
...@@ -3419,6 +3429,8 @@ func_exit: ...@@ -3419,6 +3429,8 @@ func_exit:
share->idx_trans_tbl.index_mapping = index_mapping; share->idx_trans_tbl.index_mapping = index_mapping;
mutex_exit(&dict_sys->mutex);
DBUG_RETURN(ret); DBUG_RETURN(ret);
} }
...@@ -10816,6 +10828,11 @@ static MYSQL_SYSVAR_LONGLONG(buffer_pool_size, innobase_buffer_pool_size, ...@@ -10816,6 +10828,11 @@ static MYSQL_SYSVAR_LONGLONG(buffer_pool_size, innobase_buffer_pool_size,
"The size of the memory buffer InnoDB uses to cache data and indexes of its tables.", "The size of the memory buffer InnoDB uses to cache data and indexes of its tables.",
NULL, NULL, 128*1024*1024L, 5*1024*1024L, LONGLONG_MAX, 1024*1024L); NULL, NULL, 128*1024*1024L, 5*1024*1024L, LONGLONG_MAX, 1024*1024L);
static MYSQL_SYSVAR_LONG(buffer_pool_instances, innobase_buffer_pool_instances,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
"Number of buffer pool instances, set to higher value on high-end machines to increase scalability",
NULL, NULL, 1L, 1L, MAX_BUFFER_POOLS, 1L);
static MYSQL_SYSVAR_ULONG(commit_concurrency, innobase_commit_concurrency, static MYSQL_SYSVAR_ULONG(commit_concurrency, innobase_commit_concurrency,
PLUGIN_VAR_RQCMDARG, PLUGIN_VAR_RQCMDARG,
"Helps in performance tuning in heavily concurrent environments.", "Helps in performance tuning in heavily concurrent environments.",
...@@ -10951,6 +10968,7 @@ static struct st_mysql_sys_var* innobase_system_variables[]= { ...@@ -10951,6 +10968,7 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(additional_mem_pool_size), MYSQL_SYSVAR(additional_mem_pool_size),
MYSQL_SYSVAR(autoextend_increment), MYSQL_SYSVAR(autoextend_increment),
MYSQL_SYSVAR(buffer_pool_size), MYSQL_SYSVAR(buffer_pool_size),
MYSQL_SYSVAR(buffer_pool_instances),
MYSQL_SYSVAR(checksums), MYSQL_SYSVAR(checksums),
MYSQL_SYSVAR(commit_concurrency), MYSQL_SYSVAR(commit_concurrency),
MYSQL_SYSVAR(concurrency_tickets), MYSQL_SYSVAR(concurrency_tickets),
......
...@@ -1306,6 +1306,14 @@ static ST_FIELD_INFO i_s_cmpmem_fields_info[] = ...@@ -1306,6 +1306,14 @@ static ST_FIELD_INFO i_s_cmpmem_fields_info[] =
STRUCT_FLD(old_name, "Buddy Block Size"), STRUCT_FLD(old_name, "Buddy Block Size"),
STRUCT_FLD(open_method, SKIP_OPEN_TABLE)}, STRUCT_FLD(open_method, SKIP_OPEN_TABLE)},
{STRUCT_FLD(field_name, "buffer_pool_instance"),
STRUCT_FLD(field_length, MY_INT32_NUM_DECIMAL_DIGITS),
STRUCT_FLD(field_type, MYSQL_TYPE_LONG),
STRUCT_FLD(value, 0),
STRUCT_FLD(field_flags, 0),
STRUCT_FLD(old_name, "Buffer Pool Id"),
STRUCT_FLD(open_method, SKIP_OPEN_TABLE)},
{STRUCT_FLD(field_name, "pages_used"), {STRUCT_FLD(field_name, "pages_used"),
STRUCT_FLD(field_length, MY_INT32_NUM_DECIMAL_DIGITS), STRUCT_FLD(field_length, MY_INT32_NUM_DECIMAL_DIGITS),
STRUCT_FLD(field_type, MYSQL_TYPE_LONG), STRUCT_FLD(field_type, MYSQL_TYPE_LONG),
...@@ -1355,8 +1363,8 @@ i_s_cmpmem_fill_low( ...@@ -1355,8 +1363,8 @@ i_s_cmpmem_fill_low(
COND* cond, /*!< in: condition (ignored) */ COND* cond, /*!< in: condition (ignored) */
ibool reset) /*!< in: TRUE=reset cumulated counts */ ibool reset) /*!< in: TRUE=reset cumulated counts */
{ {
TABLE* table = (TABLE *) tables->table;
int status = 0; int status = 0;
TABLE* table = (TABLE *) tables->table;
DBUG_ENTER("i_s_cmpmem_fill_low"); DBUG_ENTER("i_s_cmpmem_fill_low");
...@@ -1368,22 +1376,33 @@ i_s_cmpmem_fill_low( ...@@ -1368,22 +1376,33 @@ i_s_cmpmem_fill_low(
RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name); RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name);
buf_pool_mutex_enter(); for (ulint i = 0; i < srv_buf_pool_instances; i++) {
buf_pool_t* buf_pool;
status = 0;
buf_pool = buf_pool_from_array(i);
buf_pool_mutex_enter(buf_pool);
for (uint x = 0; x <= BUF_BUDDY_SIZES; x++) { for (uint x = 0; x <= BUF_BUDDY_SIZES; x++) {
buf_buddy_stat_t* buddy_stat = &buf_buddy_stat[x]; buf_buddy_stat_t* buddy_stat;
buddy_stat = &buf_pool->buddy_stat[x];
table->field[0]->store(BUF_BUDDY_LOW << x); table->field[0]->store(BUF_BUDDY_LOW << x);
table->field[1]->store(buddy_stat->used); table->field[1]->store(i);
table->field[2]->store(UNIV_LIKELY(x < BUF_BUDDY_SIZES) table->field[2]->store(buddy_stat->used);
table->field[3]->store(UNIV_LIKELY(x < BUF_BUDDY_SIZES)
? UT_LIST_GET_LEN(buf_pool->zip_free[x]) ? UT_LIST_GET_LEN(buf_pool->zip_free[x])
: 0); : 0);
table->field[3]->store((longlong) buddy_stat->relocated, true); table->field[4]->store((longlong)
table->field[4]->store( buddy_stat->relocated, true);
table->field[5]->store(
(ulong) (buddy_stat->relocated_usec / 1000000)); (ulong) (buddy_stat->relocated_usec / 1000000));
if (reset) { if (reset) {
/* This is protected by buf_pool_mutex. */ /* This is protected by buf_pool->mutex. */
buddy_stat->relocated = 0; buddy_stat->relocated = 0;
buddy_stat->relocated_usec = 0; buddy_stat->relocated_usec = 0;
} }
...@@ -1394,7 +1413,13 @@ i_s_cmpmem_fill_low( ...@@ -1394,7 +1413,13 @@ i_s_cmpmem_fill_low(
} }
} }
buf_pool_mutex_exit(); buf_pool_mutex_exit(buf_pool);
if (status) {
break;
}
}
DBUG_RETURN(status); DBUG_RETURN(status);
} }
......
...@@ -2323,7 +2323,7 @@ ibuf_get_merge_page_nos( ...@@ -2323,7 +2323,7 @@ ibuf_get_merge_page_nos(
*n_stored = 0; *n_stored = 0;
limit = ut_min(IBUF_MAX_N_PAGES_MERGED, buf_pool->curr_size / 4); limit = ut_min(IBUF_MAX_N_PAGES_MERGED, buf_pool_get_curr_size() / 4);
if (page_rec_is_supremum(rec)) { if (page_rec_is_supremum(rec)) {
...@@ -3139,9 +3139,9 @@ ibuf_set_entry_counter( ...@@ -3139,9 +3139,9 @@ ibuf_set_entry_counter(
ibool is_optimistic, /*!< in: is this an optimistic insert */ ibool is_optimistic, /*!< in: is this an optimistic insert */
mtr_t* mtr) /*!< in: mtr */ mtr_t* mtr) /*!< in: mtr */
{ {
ulint counter;
dfield_t* field; dfield_t* field;
byte* data; byte* data;
ulint counter = 0;
/* pcur points to either a user rec or to a page's infimum record. */ /* pcur points to either a user rec or to a page's infimum record. */
ut_ad(page_validate(btr_pcur_get_page(pcur), ibuf->index)); ut_ad(page_validate(btr_pcur_get_page(pcur), ibuf->index));
...@@ -3682,10 +3682,11 @@ check_watch: ...@@ -3682,10 +3682,11 @@ check_watch:
{ {
buf_page_t* bpage; buf_page_t* bpage;
ulint fold = buf_page_address_fold(space, page_no); ulint fold = buf_page_address_fold(space, page_no);
buf_pool_t* buf_pool = buf_pool_get(space, page_no);
buf_pool_mutex_enter(); buf_pool_mutex_enter(buf_pool);
bpage = buf_page_hash_get_low(space, page_no, fold); bpage = buf_page_hash_get_low(buf_pool, space, page_no, fold);
buf_pool_mutex_exit(); buf_pool_mutex_exit(buf_pool);
if (UNIV_LIKELY_NULL(bpage)) { if (UNIV_LIKELY_NULL(bpage)) {
/* A buffer pool watch has been set or the /* A buffer pool watch has been set or the
......
...@@ -36,22 +36,24 @@ Created December 2006 by Marko Makela ...@@ -36,22 +36,24 @@ Created December 2006 by Marko Makela
/**********************************************************************//** /**********************************************************************//**
Allocate a block. The thread calling this function must hold Allocate a block. The thread calling this function must hold
buf_pool_mutex and must not hold buf_pool_zip_mutex or any buf_pool->mutex and must not hold buf_pool_zip_mutex or any
block->mutex. The buf_pool_mutex may only be released and reacquired block->mutex. The buf_pool->mutex may only be released and reacquired
if lru != NULL. This function should only be used for allocating if lru != NULL. This function should only be used for allocating
compressed page frames or control blocks (buf_page_t). Allocated compressed page frames or control blocks (buf_page_t). Allocated
control blocks must be properly initialized immediately after control blocks must be properly initialized immediately after
buf_buddy_alloc() has returned the memory, before releasing buf_buddy_alloc() has returned the memory, before releasing
buf_pool_mutex. buf_pool->mutex.
@return allocated block, possibly NULL if lru == NULL */ @return allocated block, possibly NULL if lru == NULL */
UNIV_INLINE UNIV_INLINE
void* void*
buf_buddy_alloc( buf_buddy_alloc(
/*============*/ /*============*/
buf_pool_t* buf_pool,
/*!< buffer pool in which the block resides */
ulint size, /*!< in: block size, up to UNIV_PAGE_SIZE */ ulint size, /*!< in: block size, up to UNIV_PAGE_SIZE */
ibool* lru) /*!< in: pointer to a variable that will be assigned ibool* lru) /*!< in: pointer to a variable that will be assigned
TRUE if storage was allocated from the LRU list TRUE if storage was allocated from the LRU list
and buf_pool_mutex was temporarily released, and buf_pool->mutex was temporarily released,
or NULL if the LRU list should not be used */ or NULL if the LRU list should not be used */
__attribute__((malloc)); __attribute__((malloc));
...@@ -61,28 +63,13 @@ UNIV_INLINE ...@@ -61,28 +63,13 @@ UNIV_INLINE
void void
buf_buddy_free( buf_buddy_free(
/*===========*/ /*===========*/
buf_pool_t* buf_pool,
/*!< buffer pool in which the block resides */
void* buf, /*!< in: block to be freed, must not be void* buf, /*!< in: block to be freed, must not be
pointed to by the buffer pool */ pointed to by the buffer pool */
ulint size) /*!< in: block size, up to UNIV_PAGE_SIZE */ ulint size) /*!< in: block size, up to UNIV_PAGE_SIZE */
__attribute__((nonnull)); __attribute__((nonnull));
/** Statistics of buddy blocks of a given size. */
struct buf_buddy_stat_struct {
/** Number of blocks allocated from the buddy system. */
ulint used;
/** Number of blocks relocated by the buddy system. */
ib_uint64_t relocated;
/** Total duration of block relocations, in microseconds. */
ib_uint64_t relocated_usec;
};
/** Statistics of buddy blocks of a given size. */
typedef struct buf_buddy_stat_struct buf_buddy_stat_t;
/** Statistics of the buddy system, indexed by block size.
Protected by buf_pool_mutex. */
extern buf_buddy_stat_t buf_buddy_stat[BUF_BUDDY_SIZES + 1];
#ifndef UNIV_NONINL #ifndef UNIV_NONINL
# include "buf0buddy.ic" # include "buf0buddy.ic"
#endif #endif
......
...@@ -35,18 +35,20 @@ Created December 2006 by Marko Makela ...@@ -35,18 +35,20 @@ Created December 2006 by Marko Makela
/**********************************************************************//** /**********************************************************************//**
Allocate a block. The thread calling this function must hold Allocate a block. The thread calling this function must hold
buf_pool_mutex and must not hold buf_pool_zip_mutex or any block->mutex. buf_pool->mutex and must not hold buf_pool_zip_mutex or any block->mutex.
The buf_pool_mutex may only be released and reacquired if lru != NULL. The buf_pool->mutex may only be released and reacquired if lru != NULL.
@return allocated block, possibly NULL if lru==NULL */ @return allocated block, possibly NULL if lru==NULL */
UNIV_INTERN UNIV_INTERN
void* void*
buf_buddy_alloc_low( buf_buddy_alloc_low(
/*================*/ /*================*/
buf_pool_t* buf_pool,
/*!< in: buffer pool in which the page resides */
ulint i, /*!< in: index of buf_pool->zip_free[], ulint i, /*!< in: index of buf_pool->zip_free[],
or BUF_BUDDY_SIZES */ or BUF_BUDDY_SIZES */
ibool* lru) /*!< in: pointer to a variable that will be assigned ibool* lru) /*!< in: pointer to a variable that will be assigned
TRUE if storage was allocated from the LRU list TRUE if storage was allocated from the LRU list
and buf_pool_mutex was temporarily released, and buf_pool->mutex was temporarily released,
or NULL if the LRU list should not be used */ or NULL if the LRU list should not be used */
__attribute__((malloc)); __attribute__((malloc));
...@@ -56,6 +58,7 @@ UNIV_INTERN ...@@ -56,6 +58,7 @@ UNIV_INTERN
void void
buf_buddy_free_low( buf_buddy_free_low(
/*===============*/ /*===============*/
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
void* buf, /*!< in: block to be freed, must not be void* buf, /*!< in: block to be freed, must not be
pointed to by the buffer pool */ pointed to by the buffer pool */
ulint i) /*!< in: index of buf_pool->zip_free[], ulint i) /*!< in: index of buf_pool->zip_free[],
...@@ -83,27 +86,32 @@ buf_buddy_get_slot( ...@@ -83,27 +86,32 @@ buf_buddy_get_slot(
/**********************************************************************//** /**********************************************************************//**
Allocate a block. The thread calling this function must hold Allocate a block. The thread calling this function must hold
buf_pool_mutex and must not hold buf_pool_zip_mutex or any buf_pool->mutex and must not hold buf_pool_zip_mutex or any
block->mutex. The buf_pool_mutex may only be released and reacquired block->mutex. The buf_pool->mutex may only be released and reacquired
if lru != NULL. This function should only be used for allocating if lru != NULL. This function should only be used for allocating
compressed page frames or control blocks (buf_page_t). Allocated compressed page frames or control blocks (buf_page_t). Allocated
control blocks must be properly initialized immediately after control blocks must be properly initialized immediately after
buf_buddy_alloc() has returned the memory, before releasing buf_buddy_alloc() has returned the memory, before releasing
buf_pool_mutex. buf_pool->mutex.
@return allocated block, possibly NULL if lru == NULL */ @return allocated block, possibly NULL if lru == NULL */
UNIV_INLINE UNIV_INLINE
void* void*
buf_buddy_alloc( buf_buddy_alloc(
/*============*/ /*============*/
ulint size, /*!< in: block size, up to UNIV_PAGE_SIZE */ buf_pool_t* buf_pool, /*!< in: buffer pool in which
ibool* lru) /*!< in: pointer to a variable that will be assigned the page resides */
TRUE if storage was allocated from the LRU list ulint size, /*!< in: block size, up to
and buf_pool_mutex was temporarily released, UNIV_PAGE_SIZE */
or NULL if the LRU list should not be used */ ibool* lru) /*!< in: pointer to a variable
that will be assigned TRUE if
storage was allocated from the
LRU list and buf_pool->mutex was
temporarily released, or NULL if
the LRU list should not be used */
{ {
ut_ad(buf_pool_mutex_own()); ut_ad(buf_pool_mutex_own(buf_pool));
return(buf_buddy_alloc_low(buf_buddy_get_slot(size), lru)); return(buf_buddy_alloc_low(buf_pool, buf_buddy_get_slot(size), lru));
} }
/**********************************************************************//** /**********************************************************************//**
...@@ -112,13 +120,15 @@ UNIV_INLINE ...@@ -112,13 +120,15 @@ UNIV_INLINE
void void
buf_buddy_free( buf_buddy_free(
/*===========*/ /*===========*/
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
void* buf, /*!< in: block to be freed, must not be void* buf, /*!< in: block to be freed, must not be
pointed to by the buffer pool */ pointed to by the buffer pool */
ulint size) /*!< in: block size, up to UNIV_PAGE_SIZE */ ulint size) /*!< in: block size, up to
UNIV_PAGE_SIZE */
{ {
ut_ad(buf_pool_mutex_own()); ut_ad(buf_pool_mutex_own(buf_pool));
buf_buddy_free_low(buf, buf_buddy_get_slot(size)); buf_buddy_free_low(buf_pool, buf, buf_buddy_get_slot(size));
} }
#ifdef UNIV_MATERIALIZE #ifdef UNIV_MATERIALIZE
......
This diff is collapsed.
This diff is collapsed.
...@@ -31,6 +31,7 @@ Created 11/5/1995 Heikki Tuuri ...@@ -31,6 +31,7 @@ Created 11/5/1995 Heikki Tuuri
#ifndef UNIV_HOTBACKUP #ifndef UNIV_HOTBACKUP
#include "mtr0types.h" #include "mtr0types.h"
#include "buf0types.h" #include "buf0types.h"
#include "log0log.h"
/********************************************************************//** /********************************************************************//**
Remove a block from the flush list of modified blocks. */ Remove a block from the flush list of modified blocks. */
...@@ -58,11 +59,19 @@ buf_flush_write_complete( ...@@ -58,11 +59,19 @@ buf_flush_write_complete(
buf_page_t* bpage); /*!< in: pointer to the block in question */ buf_page_t* bpage); /*!< in: pointer to the block in question */
/*********************************************************************//** /*********************************************************************//**
Flushes pages from the end of the LRU list if there is too small Flushes pages from the end of the LRU list if there is too small
a margin of replaceable pages there. */ a margin of replaceable pages there. If buffer pool is NULL it
means flush free margin on all buffer pool instances. */
UNIV_INTERN UNIV_INTERN
void void
buf_flush_free_margin(void); buf_flush_free_margin(
/*=======================*/ /*==================*/
buf_pool_t* buf_pool);
/*********************************************************************//**
Flushes pages from the end of all the LRU lists. */
UNIV_INTERN
void
buf_flush_free_margins(void);
/*=========================*/
#endif /* !UNIV_HOTBACKUP */ #endif /* !UNIV_HOTBACKUP */
/********************************************************************//** /********************************************************************//**
Initializes a page for writing to the tablespace. */ Initializes a page for writing to the tablespace. */
...@@ -76,21 +85,30 @@ buf_flush_init_for_writing( ...@@ -76,21 +85,30 @@ buf_flush_init_for_writing(
to the page */ to the page */
#ifndef UNIV_HOTBACKUP #ifndef UNIV_HOTBACKUP
/*******************************************************************//** /*******************************************************************//**
This utility flushes dirty blocks from the end of the LRU list or flush_list. This utility flushes dirty blocks from the end of the LRU list.
NOTE 1: in the case of an LRU flush the calling thread may own latches to NOTE: The calling thread may own latches to pages: to avoid deadlocks,
pages: to avoid deadlocks, this function must be written so that it cannot this function must be written so that it cannot end up waiting for these
end up waiting for these latches! NOTE 2: in the case of a flush list flush, latches!
the calling thread is not allowed to own any latches on pages! @return number of blocks for which the write request was queued;
ULINT_UNDEFINED if there was a flush of the same type already running */
UNIV_INTERN
ulint
buf_flush_LRU(
/*==========*/
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
ulint min_n); /*!< in: wished minimum mumber of blocks
flushed (it is not guaranteed that the
actual number is that big, though) */
/*******************************************************************//**
This utility flushes dirty blocks from the end of the flush_list of
all buffer pool instances.
NOTE: The calling thread is not allowed to own any latches on pages!
@return number of blocks for which the write request was queued; @return number of blocks for which the write request was queued;
ULINT_UNDEFINED if there was a flush of the same type already running */ ULINT_UNDEFINED if there was a flush of the same type already running */
UNIV_INTERN UNIV_INTERN
ulint ulint
buf_flush_batch( buf_flush_list(
/*============*/ /*============*/
enum buf_flush flush_type, /*!< in: BUF_FLUSH_LRU or
BUF_FLUSH_LIST; if BUF_FLUSH_LIST,
then the caller must not own any
latches on pages */
ulint min_n, /*!< in: wished minimum mumber of blocks ulint min_n, /*!< in: wished minimum mumber of blocks
flushed (it is not guaranteed that the flushed (it is not guaranteed that the
actual number is that big, though) */ actual number is that big, though) */
...@@ -105,7 +123,9 @@ UNIV_INTERN ...@@ -105,7 +123,9 @@ UNIV_INTERN
void void
buf_flush_wait_batch_end( buf_flush_wait_batch_end(
/*=====================*/ /*=====================*/
enum buf_flush type); /*!< in: BUF_FLUSH_LRU or BUF_FLUSH_LIST */ buf_pool_t* buf_pool, /*!< buffer pool instance */
enum buf_flush type); /*!< in: BUF_FLUSH_LRU
or BUF_FLUSH_LIST */
/********************************************************************//** /********************************************************************//**
This function should be called at a mini-transaction commit, if a page was This function should be called at a mini-transaction commit, if a page was
modified in it. Puts the block to the list of modified blocks, if it not modified in it. Puts the block to the list of modified blocks, if it not
...@@ -181,8 +201,9 @@ Validates the flush list. ...@@ -181,8 +201,9 @@ Validates the flush list.
@return TRUE if ok */ @return TRUE if ok */
UNIV_INTERN UNIV_INTERN
ibool ibool
buf_flush_validate(void); buf_flush_validate(
/*====================*/ /*===============*/
buf_pool_t* buf_pool);
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
/********************************************************************//** /********************************************************************//**
...@@ -205,9 +226,10 @@ buf_flush_free_flush_rbt(void); ...@@ -205,9 +226,10 @@ buf_flush_free_flush_rbt(void);
available to replacement in the free list and at the end of the LRU list (to available to replacement in the free list and at the end of the LRU list (to
make sure that a read-ahead batch can be read efficiently in a single make sure that a read-ahead batch can be read efficiently in a single
sweep). */ sweep). */
#define BUF_FLUSH_FREE_BLOCK_MARGIN (5 + BUF_READ_AHEAD_AREA) #define BUF_FLUSH_FREE_BLOCK_MARGIN(b) (5 + BUF_READ_AHEAD_AREA(b))
/** Extra margin to apply above BUF_FLUSH_FREE_BLOCK_MARGIN */ /** Extra margin to apply above BUF_FLUSH_FREE_BLOCK_MARGIN */
#define BUF_FLUSH_EXTRA_MARGIN (BUF_FLUSH_FREE_BLOCK_MARGIN / 4 + 100) #define BUF_FLUSH_EXTRA_MARGIN(b) (BUF_FLUSH_FREE_BLOCK_MARGIN(b) / 4 \
+ 100)
#endif /* !UNIV_HOTBACKUP */ #endif /* !UNIV_HOTBACKUP */
#ifndef UNIV_NONINL #ifndef UNIV_NONINL
......
...@@ -33,6 +33,7 @@ UNIV_INTERN ...@@ -33,6 +33,7 @@ UNIV_INTERN
void void
buf_flush_insert_into_flush_list( buf_flush_insert_into_flush_list(
/*=============================*/ /*=============================*/
buf_pool_t* buf_pool, /*!< buffer pool instance */
buf_block_t* block, /*!< in/out: block which is modified */ buf_block_t* block, /*!< in/out: block which is modified */
ib_uint64_t lsn); /*!< in: oldest modification */ ib_uint64_t lsn); /*!< in: oldest modification */
/********************************************************************//** /********************************************************************//**
...@@ -43,6 +44,7 @@ UNIV_INTERN ...@@ -43,6 +44,7 @@ UNIV_INTERN
void void
buf_flush_insert_sorted_into_flush_list( buf_flush_insert_sorted_into_flush_list(
/*====================================*/ /*====================================*/
buf_pool_t* buf_pool, /*!< buffer pool instance */
buf_block_t* block, /*!< in/out: block which is modified */ buf_block_t* block, /*!< in/out: block which is modified */
ib_uint64_t lsn); /*!< in: oldest modification */ ib_uint64_t lsn); /*!< in: oldest modification */
...@@ -57,6 +59,8 @@ buf_flush_note_modification( ...@@ -57,6 +59,8 @@ buf_flush_note_modification(
buf_block_t* block, /*!< in: block which is modified */ buf_block_t* block, /*!< in: block which is modified */
mtr_t* mtr) /*!< in: mtr */ mtr_t* mtr) /*!< in: mtr */
{ {
buf_pool_t* buf_pool = buf_pool_from_block(block);
ut_ad(block); ut_ad(block);
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE); ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
ut_ad(block->page.buf_fix_count > 0); ut_ad(block->page.buf_fix_count > 0);
...@@ -64,9 +68,9 @@ buf_flush_note_modification( ...@@ -64,9 +68,9 @@ buf_flush_note_modification(
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX)); ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */ #endif /* UNIV_SYNC_DEBUG */
ut_ad(!buf_pool_mutex_own()); ut_ad(!buf_pool_mutex_own(buf_pool));
ut_ad(!buf_flush_list_mutex_own()); ut_ad(!buf_flush_list_mutex_own(buf_pool));
ut_ad(buf_flush_order_mutex_own()); ut_ad(log_flush_order_mutex_own());
ut_ad(mtr->start_lsn != 0); ut_ad(mtr->start_lsn != 0);
ut_ad(mtr->modifications); ut_ad(mtr->modifications);
...@@ -77,7 +81,8 @@ buf_flush_note_modification( ...@@ -77,7 +81,8 @@ buf_flush_note_modification(
block->page.newest_modification = mtr->end_lsn; block->page.newest_modification = mtr->end_lsn;
if (!block->page.oldest_modification) { if (!block->page.oldest_modification) {
buf_flush_insert_into_flush_list(block, mtr->start_lsn); buf_flush_insert_into_flush_list(
buf_pool, block, mtr->start_lsn);
} else { } else {
ut_ad(block->page.oldest_modification <= mtr->start_lsn); ut_ad(block->page.oldest_modification <= mtr->start_lsn);
} }
...@@ -99,6 +104,8 @@ buf_flush_recv_note_modification( ...@@ -99,6 +104,8 @@ buf_flush_recv_note_modification(
ib_uint64_t end_lsn) /*!< in: end lsn of the last mtr in the ib_uint64_t end_lsn) /*!< in: end lsn of the last mtr in the
set of mtr's */ set of mtr's */
{ {
buf_pool_t* buf_pool = buf_pool_from_block(block);
ut_ad(block); ut_ad(block);
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE); ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
ut_ad(block->page.buf_fix_count > 0); ut_ad(block->page.buf_fix_count > 0);
...@@ -106,9 +113,9 @@ buf_flush_recv_note_modification( ...@@ -106,9 +113,9 @@ buf_flush_recv_note_modification(
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX)); ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */ #endif /* UNIV_SYNC_DEBUG */
ut_ad(!buf_pool_mutex_own()); ut_ad(!buf_pool_mutex_own(buf_pool));
ut_ad(!buf_flush_list_mutex_own()); ut_ad(!buf_flush_list_mutex_own(buf_pool));
ut_ad(buf_flush_order_mutex_own()); ut_ad(log_flush_order_mutex_own());
ut_ad(start_lsn != 0); ut_ad(start_lsn != 0);
ut_ad(block->page.newest_modification <= end_lsn); ut_ad(block->page.newest_modification <= end_lsn);
...@@ -117,7 +124,8 @@ buf_flush_recv_note_modification( ...@@ -117,7 +124,8 @@ buf_flush_recv_note_modification(
block->page.newest_modification = end_lsn; block->page.newest_modification = end_lsn;
if (!block->page.oldest_modification) { if (!block->page.oldest_modification) {
buf_flush_insert_sorted_into_flush_list(block, start_lsn); buf_flush_insert_sorted_into_flush_list(
buf_pool, block, start_lsn);
} else { } else {
ut_ad(block->page.oldest_modification <= start_lsn); ut_ad(block->page.oldest_modification <= start_lsn);
} }
......
...@@ -52,8 +52,9 @@ operations need new buffer blocks, and the i/o work done in flushing would be ...@@ -52,8 +52,9 @@ operations need new buffer blocks, and the i/o work done in flushing would be
wasted. */ wasted. */
UNIV_INTERN UNIV_INTERN
void void
buf_LRU_try_free_flushed_blocks(void); buf_LRU_try_free_flushed_blocks(
/*==================================*/ /*============================*/
buf_pool_t* buf_pool); /*!< in: buffer pool instance */
/******************************************************************//** /******************************************************************//**
Returns TRUE if less than 25 % of the buffer pool is available. This can be Returns TRUE if less than 25 % of the buffer pool is available. This can be
used in heuristics to prevent huge transactions eating up the whole buffer used in heuristics to prevent huge transactions eating up the whole buffer
...@@ -72,7 +73,7 @@ These are low-level functions ...@@ -72,7 +73,7 @@ These are low-level functions
#define BUF_LRU_OLD_MIN_LEN 512 /* 8 megabytes of 16k pages */ #define BUF_LRU_OLD_MIN_LEN 512 /* 8 megabytes of 16k pages */
/** Maximum LRU list search length in buf_flush_LRU_recommendation() */ /** Maximum LRU list search length in buf_flush_LRU_recommendation() */
#define BUF_LRU_FREE_SEARCH_LEN (5 + 2 * BUF_READ_AHEAD_AREA) #define BUF_LRU_FREE_SEARCH_LEN(b) (5 + 2 * BUF_READ_AHEAD_AREA(b))
/******************************************************************//** /******************************************************************//**
Invalidates all pages belonging to a given tablespace when we are deleting Invalidates all pages belonging to a given tablespace when we are deleting
...@@ -97,10 +98,10 @@ Try to free a block. If bpage is a descriptor of a compressed-only ...@@ -97,10 +98,10 @@ Try to free a block. If bpage is a descriptor of a compressed-only
page, the descriptor object will be freed as well. page, the descriptor object will be freed as well.
NOTE: If this function returns BUF_LRU_FREED, it will not temporarily NOTE: If this function returns BUF_LRU_FREED, it will not temporarily
release buf_pool_mutex. Furthermore, the page frame will no longer be release buf_pool->mutex. Furthermore, the page frame will no longer be
accessible via bpage. accessible via bpage.
The caller must hold buf_pool_mutex and buf_page_get_mutex(bpage) and The caller must hold buf_pool->mutex and buf_page_get_mutex(bpage) and
release these two mutexes after the call. No other release these two mutexes after the call. No other
buf_page_get_mutex() may be held when calling this function. buf_page_get_mutex() may be held when calling this function.
@return BUF_LRU_FREED if freed, BUF_LRU_CANNOT_RELOCATE or @return BUF_LRU_FREED if freed, BUF_LRU_CANNOT_RELOCATE or
...@@ -114,7 +115,7 @@ buf_LRU_free_block( ...@@ -114,7 +115,7 @@ buf_LRU_free_block(
compressed page of an uncompressed page */ compressed page of an uncompressed page */
ibool* buf_pool_mutex_released); ibool* buf_pool_mutex_released);
/*!< in: pointer to a variable that will /*!< in: pointer to a variable that will
be assigned TRUE if buf_pool_mutex be assigned TRUE if buf_pool->mutex
was temporarily released, or NULL */ was temporarily released, or NULL */
/******************************************************************//** /******************************************************************//**
Try to free a replaceable block. Try to free a replaceable block.
...@@ -123,22 +124,26 @@ UNIV_INTERN ...@@ -123,22 +124,26 @@ UNIV_INTERN
ibool ibool
buf_LRU_search_and_free_block( buf_LRU_search_and_free_block(
/*==========================*/ /*==========================*/
ulint n_iterations); /*!< in: how many times this has been called buf_pool_t* buf_pool, /*!< in: buffer pool instance */
repeatedly without result: a high value means ulint n_iterations); /*!< in: how many times this has
that we should search farther; if been called repeatedly without
result: a high value means that
we should search farther; if
n_iterations < 10, then we search n_iterations < 10, then we search
n_iterations / 10 * buf_pool->curr_size n_iterations / 10 * buf_pool->curr_size
pages from the end of the LRU list; if pages from the end of the LRU list; if
n_iterations < 5, then we will also search n_iterations < 5, then we will
n_iterations / 5 of the unzip_LRU list. */ also search n_iterations / 5
of the unzip_LRU list. */
/******************************************************************//** /******************************************************************//**
Returns a free block from the buf_pool. The block is taken off the Returns a free block from the buf_pool. The block is taken off the
free list. If it is empty, returns NULL. free list. If it is empty, returns NULL.
@return a free control block, or NULL if the buf_block->free list is empty */ @return a free control block, or NULL if the buf_block->free list is empty */
UNIV_INTERN UNIV_INTERN
buf_block_t* buf_block_t*
buf_LRU_get_free_only(void); buf_LRU_get_free_only(
/*=======================*/ /*==================*/
buf_pool_t* buf_pool); /*!< buffer pool instance */
/******************************************************************//** /******************************************************************//**
Returns a free block from the buf_pool. The block is taken off the Returns a free block from the buf_pool. The block is taken off the
free list. If it is empty, blocks are moved from the end of the free list. If it is empty, blocks are moved from the end of the
...@@ -148,6 +153,7 @@ UNIV_INTERN ...@@ -148,6 +153,7 @@ UNIV_INTERN
buf_block_t* buf_block_t*
buf_LRU_get_free_block( buf_LRU_get_free_block(
/*===================*/ /*===================*/
buf_pool_t* buf_pool, /*!< in: preferred buffer pool */
ulint zip_size); /*!< in: compressed page size in bytes, ulint zip_size); /*!< in: compressed page size in bytes,
or 0 if uncompressed tablespace */ or 0 if uncompressed tablespace */
...@@ -196,7 +202,7 @@ buf_LRU_make_block_old( ...@@ -196,7 +202,7 @@ buf_LRU_make_block_old(
Updates buf_LRU_old_ratio. Updates buf_LRU_old_ratio.
@return updated old_pct */ @return updated old_pct */
UNIV_INTERN UNIV_INTERN
uint ulint
buf_LRU_old_ratio_update( buf_LRU_old_ratio_update(
/*=====================*/ /*=====================*/
uint old_pct,/*!< in: Reserve this percentage of uint old_pct,/*!< in: Reserve this percentage of
...@@ -232,7 +238,7 @@ buf_LRU_print(void); ...@@ -232,7 +238,7 @@ buf_LRU_print(void);
/** @name Heuristics for detecting index scan @{ */ /** @name Heuristics for detecting index scan @{ */
/** Reserve this much/BUF_LRU_OLD_RATIO_DIV of the buffer pool for /** Reserve this much/BUF_LRU_OLD_RATIO_DIV of the buffer pool for
"old" blocks. Protected by buf_pool_mutex. */ "old" blocks. Protected by buf_pool->mutex. */
extern uint buf_LRU_old_ratio; extern uint buf_LRU_old_ratio;
/** The denominator of buf_LRU_old_ratio. */ /** The denominator of buf_LRU_old_ratio. */
#define BUF_LRU_OLD_RATIO_DIV 1024 #define BUF_LRU_OLD_RATIO_DIV 1024
...@@ -278,7 +284,7 @@ Cleared by buf_LRU_stat_update(). */ ...@@ -278,7 +284,7 @@ Cleared by buf_LRU_stat_update(). */
extern buf_LRU_stat_t buf_LRU_stat_cur; extern buf_LRU_stat_t buf_LRU_stat_cur;
/** Running sum of past values of buf_LRU_stat_cur. /** Running sum of past values of buf_LRU_stat_cur.
Updated by buf_LRU_stat_update(). Protected by buf_pool_mutex. */ Updated by buf_LRU_stat_update(). Protected by buf_pool->mutex. */
extern buf_LRU_stat_t buf_LRU_stat_sum; extern buf_LRU_stat_t buf_LRU_stat_sum;
/********************************************************************//** /********************************************************************//**
......
...@@ -124,8 +124,8 @@ buf_read_recv_pages( ...@@ -124,8 +124,8 @@ buf_read_recv_pages(
/** The size in pages of the area which the read-ahead algorithms read if /** The size in pages of the area which the read-ahead algorithms read if
invoked */ invoked */
#define BUF_READ_AHEAD_AREA \ #define BUF_READ_AHEAD_AREA(b) \
ut_min(64, ut_2_power_up(buf_pool->curr_size / 32)) ut_min(64, ut_2_power_up((b)->curr_size / 32))
/** @name Modes used in read-ahead @{ */ /** @name Modes used in read-ahead @{ */
/** read only pages belonging to the insert buffer tree */ /** read only pages belonging to the insert buffer tree */
......
...@@ -36,6 +36,8 @@ typedef struct buf_chunk_struct buf_chunk_t; ...@@ -36,6 +36,8 @@ typedef struct buf_chunk_struct buf_chunk_t;
typedef struct buf_pool_struct buf_pool_t; typedef struct buf_pool_struct buf_pool_t;
/** Buffer pool statistics struct */ /** Buffer pool statistics struct */
typedef struct buf_pool_stat_struct buf_pool_stat_t; typedef struct buf_pool_stat_struct buf_pool_stat_t;
/** Buffer pool buddy statistics struct */
typedef struct buf_buddy_stat_struct buf_buddy_stat_t;
/** A buffer frame. @see page_t */ /** A buffer frame. @see page_t */
typedef byte buf_frame_t; typedef byte buf_frame_t;
......
...@@ -110,7 +110,7 @@ ibuf_should_try( ...@@ -110,7 +110,7 @@ ibuf_should_try(
if (ibuf_flush_count % 4 == 0) { if (ibuf_flush_count % 4 == 0) {
buf_LRU_try_free_flushed_blocks(); buf_LRU_try_free_flushed_blocks(NULL);
} }
return(TRUE); return(TRUE);
......
...@@ -763,6 +763,15 @@ struct log_struct{ ...@@ -763,6 +763,15 @@ struct log_struct{
#ifndef UNIV_HOTBACKUP #ifndef UNIV_HOTBACKUP
mutex_t mutex; /*!< mutex protecting the log */ mutex_t mutex; /*!< mutex protecting the log */
#endif /* !UNIV_HOTBACKUP */ #endif /* !UNIV_HOTBACKUP */
mutex_t log_flush_order_mutex;/*!< mutex to serialize access to
the flush list when we are putting
dirty blocks in the list. The idea
behind this mutex is to be able
to release log_sys->mutex during
mtr_commit and still ensure that
insertions in the flush_list happen
in the LSN order. */
byte* buf_ptr; /* unaligned log buffer */ byte* buf_ptr; /* unaligned log buffer */
byte* buf; /*!< log buffer */ byte* buf; /*!< log buffer */
ulint buf_size; /*!< log buffer size in bytes */ ulint buf_size; /*!< log buffer size in bytes */
...@@ -952,6 +961,19 @@ struct log_struct{ ...@@ -952,6 +961,19 @@ struct log_struct{
#endif /* UNIV_LOG_ARCHIVE */ #endif /* UNIV_LOG_ARCHIVE */
}; };
/** Test if flush order mutex is owned. */
#define log_flush_order_mutex_own() \
mutex_own(&log_sys->log_flush_order_mutex)
/** Acquire the flush order mutex. */
#define log_flush_order_mutex_enter() do { \
mutex_enter(&log_sys->log_flush_order_mutex); \
} while (0)
/** Release the flush order mutex. */
# define log_flush_order_mutex_exit() do { \
mutex_exit(&log_sys->log_flush_order_mutex); \
} while (0)
#ifdef UNIV_LOG_ARCHIVE #ifdef UNIV_LOG_ARCHIVE
/** Archiving state @{ */ /** Archiving state @{ */
#define LOG_ARCH_ON 71 #define LOG_ARCH_ON 71
......
...@@ -142,6 +142,7 @@ extern my_bool srv_use_sys_malloc; ...@@ -142,6 +142,7 @@ extern my_bool srv_use_sys_malloc;
extern ibool srv_use_sys_malloc; extern ibool srv_use_sys_malloc;
#endif /* UNIV_HOTBACKUP */ #endif /* UNIV_HOTBACKUP */
extern ulint srv_buf_pool_size; /*!< requested size in bytes */ extern ulint srv_buf_pool_size; /*!< requested size in bytes */
extern ulint srv_buf_pool_instances; /*!< requested number of buffer pool instances */
extern ulint srv_buf_pool_old_size; /*!< previously requested size */ extern ulint srv_buf_pool_old_size; /*!< previously requested size */
extern ulint srv_buf_pool_curr_size; /*!< current size in bytes */ extern ulint srv_buf_pool_curr_size; /*!< current size in bytes */
extern ulint srv_mem_pool_size; extern ulint srv_mem_pool_size;
......
This diff is collapsed.
...@@ -113,12 +113,13 @@ ut_test_malloc( ...@@ -113,12 +113,13 @@ ut_test_malloc(
ulint n); /*!< in: try to allocate this many bytes */ ulint n); /*!< in: try to allocate this many bytes */
#endif /* !UNIV_HOTBACKUP */ #endif /* !UNIV_HOTBACKUP */
/**********************************************************************//** /**********************************************************************//**
Frees a memory block allocated with ut_malloc. */ Frees a memory block allocated with ut_malloc. Freeing a NULL pointer is
a nop. */
UNIV_INTERN UNIV_INTERN
void void
ut_free( ut_free(
/*====*/ /*====*/
void* ptr); /*!< in, own: memory block */ void* ptr); /*!< in, own: memory block, can be NULL */
#ifndef UNIV_HOTBACKUP #ifndef UNIV_HOTBACKUP
/**********************************************************************//** /**********************************************************************//**
Implements realloc. This is needed by /pars/lexyy.c. Otherwise, you should not Implements realloc. This is needed by /pars/lexyy.c. Otherwise, you should not
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -764,7 +764,6 @@ trx_commit_off_kernel( ...@@ -764,7 +764,6 @@ trx_commit_off_kernel(
if (undo) { if (undo) {
mutex_enter(&kernel_mutex); mutex_enter(&kernel_mutex);
trx->no = trx_sys_get_new_trx_no(); trx->no = trx_sys_get_new_trx_no();
mutex_exit(&kernel_mutex); mutex_exit(&kernel_mutex);
/* It is not necessary to obtain trx->undo_mutex here /* It is not necessary to obtain trx->undo_mutex here
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment