Commit ebbc572b authored by Marko Mäkelä's avatar Marko Mäkelä

MDEV-12121: Clean up WITH_INNODB_AHI=OFF

buf_flush_or_remove_pages(): Only define BUF_LRU_DROP_SEARCH_SIZE
and dependent code when the adaptive hash index has been enabled.
parent 236aed3f
/***************************************************************************** /*****************************************************************************
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2017, 2019, MariaDB Corporation. Copyright (c) 2017, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software the terms of the GNU General Public License as published by the Free Software
...@@ -62,6 +62,7 @@ static const ulint BUF_LRU_OLD_TOLERANCE = 20; ...@@ -62,6 +62,7 @@ static const ulint BUF_LRU_OLD_TOLERANCE = 20;
# error "BUF_LRU_NON_OLD_MIN_LEN >= BUF_LRU_OLD_MIN_LEN" # error "BUF_LRU_NON_OLD_MIN_LEN >= BUF_LRU_OLD_MIN_LEN"
#endif #endif
#ifdef BTR_CUR_HASH_ADAPT
/** When dropping the search hash index entries before deleting an ibd /** When dropping the search hash index entries before deleting an ibd
file, we build a local array of pages belonging to that tablespace file, we build a local array of pages belonging to that tablespace
in the buffer pool. Following is the size of that array. in the buffer pool. Following is the size of that array.
...@@ -70,6 +71,7 @@ flush_list when dropping a table. This is to ensure that other threads ...@@ -70,6 +71,7 @@ flush_list when dropping a table. This is to ensure that other threads
are not blocked for extended period of time when using very large are not blocked for extended period of time when using very large
buffer pools. */ buffer pools. */
static const ulint BUF_LRU_DROP_SEARCH_SIZE = 1024; static const ulint BUF_LRU_DROP_SEARCH_SIZE = 1024;
#endif /* BTR_CUR_HASH_ADAPT */
/** We scan these many blocks when looking for a clean page to evict /** We scan these many blocks when looking for a clean page to evict
during LRU eviction. */ during LRU eviction. */
...@@ -379,7 +381,6 @@ bool buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table) ...@@ -379,7 +381,6 @@ bool buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
return true; return true;
} }
#endif /* BTR_CUR_HASH_ADAPT */
/******************************************************************//** /******************************************************************//**
While flushing (or removing dirty) pages from a tablespace we don't While flushing (or removing dirty) pages from a tablespace we don't
...@@ -468,6 +469,7 @@ buf_flush_try_yield( ...@@ -468,6 +469,7 @@ buf_flush_try_yield(
return(false); return(false);
} }
#endif /* BTR_CUR_HASH_ADAPT */
/******************************************************************//** /******************************************************************//**
Removes a single page from a given tablespace inside a specific Removes a single page from a given tablespace inside a specific
...@@ -643,6 +645,7 @@ buf_flush_or_remove_pages( ...@@ -643,6 +645,7 @@ buf_flush_or_remove_pages(
goto rescan; goto rescan;
} }
#ifdef BTR_CUR_HASH_ADAPT
++processed; ++processed;
/* Yield if we have hogged the CPU and mutexes for too long. */ /* Yield if we have hogged the CPU and mutexes for too long. */
...@@ -652,6 +655,7 @@ buf_flush_or_remove_pages( ...@@ -652,6 +655,7 @@ buf_flush_or_remove_pages(
processed = 0; processed = 0;
} }
#endif /* BTR_CUR_HASH_ADAPT */
/* The check for trx is interrupted is expensive, we want /* The check for trx is interrupted is expensive, we want
to check every N iterations. */ to check every N iterations. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment