Commit 5f79ed02 authored by vasil's avatar vasil

branches/zip:

Enable functionality to have multiple background io helper threads.
This patch is based on percona contributions.

More details about this patch will be written at:
https://svn.innodb.com/innobase/MultipleBackgroundThreads

The patch essentially does the following:

expose following knobs:
innodb_read_io_threads = [1 - 64] default 1
innodb_write_io_threads = [1 - 64] default 1

deprecate innodb_file_io_threads (this parameter was relevant only on windows)

Internally it allows multiple segments for read and write IO request arrays
where one thread works on one segement.

Submitted by:	Inaam (rb://124)
Approved by:	Heikki (rb://124)
parent b100a45f
...@@ -124,6 +124,8 @@ static long innobase_mirrored_log_groups, innobase_log_files_in_group, ...@@ -124,6 +124,8 @@ static long innobase_mirrored_log_groups, innobase_log_files_in_group,
innobase_force_recovery, innobase_open_files, innobase_force_recovery, innobase_open_files,
innobase_autoinc_lock_mode; innobase_autoinc_lock_mode;
static ulong innobase_commit_concurrency = 0; static ulong innobase_commit_concurrency = 0;
static ulong innobase_read_io_threads;
static ulong innobase_write_io_threads;
static long long innobase_buffer_pool_size, innobase_log_file_size; static long long innobase_buffer_pool_size, innobase_log_file_size;
...@@ -2128,6 +2130,8 @@ innobase_change_buffering_inited_ok: ...@@ -2128,6 +2130,8 @@ innobase_change_buffering_inited_ok:
srv_mem_pool_size = (ulint) innobase_additional_mem_pool_size; srv_mem_pool_size = (ulint) innobase_additional_mem_pool_size;
srv_n_file_io_threads = (ulint) innobase_file_io_threads; srv_n_file_io_threads = (ulint) innobase_file_io_threads;
srv_n_read_io_threads = (ulint) innobase_read_io_threads;
srv_n_write_io_threads = (ulint) innobase_write_io_threads;
srv_force_recovery = (ulint) innobase_force_recovery; srv_force_recovery = (ulint) innobase_force_recovery;
...@@ -9652,6 +9656,16 @@ static MYSQL_SYSVAR_LONG(file_io_threads, innobase_file_io_threads, ...@@ -9652,6 +9656,16 @@ static MYSQL_SYSVAR_LONG(file_io_threads, innobase_file_io_threads,
"Number of file I/O threads in InnoDB.", "Number of file I/O threads in InnoDB.",
NULL, NULL, 4, 4, 64, 0); NULL, NULL, 4, 4, 64, 0);
static MYSQL_SYSVAR_ULONG(read_io_threads, innobase_read_io_threads,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
"Number of background read I/O threads in InnoDB.",
NULL, NULL, 1, 1, 64, 0);
static MYSQL_SYSVAR_ULONG(write_io_threads, innobase_write_io_threads,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
"Number of background write I/O threads in InnoDB.",
NULL, NULL, 1, 1, 64, 0);
static MYSQL_SYSVAR_LONG(force_recovery, innobase_force_recovery, static MYSQL_SYSVAR_LONG(force_recovery, innobase_force_recovery,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
"Helps to save your data in case the disk image of the database becomes corrupt.", "Helps to save your data in case the disk image of the database becomes corrupt.",
...@@ -9755,6 +9769,8 @@ static struct st_mysql_sys_var* innobase_system_variables[]= { ...@@ -9755,6 +9769,8 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(doublewrite), MYSQL_SYSVAR(doublewrite),
MYSQL_SYSVAR(fast_shutdown), MYSQL_SYSVAR(fast_shutdown),
MYSQL_SYSVAR(file_io_threads), MYSQL_SYSVAR(file_io_threads),
MYSQL_SYSVAR(read_io_threads),
MYSQL_SYSVAR(write_io_threads),
MYSQL_SYSVAR(file_per_table), MYSQL_SYSVAR(file_per_table),
MYSQL_SYSVAR(file_format), MYSQL_SYSVAR(file_format),
MYSQL_SYSVAR(file_format_check), MYSQL_SYSVAR(file_format_check),
......
...@@ -574,23 +574,23 @@ ibool ...@@ -574,23 +574,23 @@ ibool
os_file_create_subdirs_if_needed( os_file_create_subdirs_if_needed(
/*=============================*/ /*=============================*/
const char* path); /*!< in: path name */ const char* path); /*!< in: path name */
/************************************************************************//** /***********************************************************************
Initializes the asynchronous io system. Creates separate aio array for Initializes the asynchronous io system. Creates one array each for ibuf
non-ibuf read and write, a third aio array for the ibuf i/o, with just one and log i/o. Also creates one array each for read and write where each
segment, two aio arrays for log reads and writes with one segment, and a array is divided logically into n_read_segs and n_write_segs
synchronous aio array of the specified size. The combined number of segments respectively. The caller must create an i/o handler thread for each
in the three first aio arrays is the parameter n_segments given to the segment in these arrays. This function also creates the sync array.
function. The caller must create an i/o handler thread for each segment in No i/o handler thread needs to be created for that */
the four first arrays, but not for the sync aio array. */
UNIV_INTERN UNIV_INTERN
void void
os_aio_init( os_aio_init(
/*========*/ /*========*/
ulint n, /*!< in: maximum number of pending aio operations ulint n_per_seg, /*<! in: maximum number of pending aio
allowed; n must be divisible by n_segments */ operations allowed per segment */
ulint n_segments, /*!< in: combined number of segments in the four ulint n_read_segs, /*<! in: number of reader threads */
first aio arrays; must be >= 4 */ ulint n_write_segs, /*<! in: number of writer threads */
ulint n_slots_sync); /*!< in: number of slots in the sync aio array */ ulint n_slots_sync); /*<! in: number of slots in the sync aio
array */
/*******************************************************************//** /*******************************************************************//**
Requests an asynchronous i/o operation. Requests an asynchronous i/o operation.
@return TRUE if request was queued successfully, FALSE if fail */ @return TRUE if request was queued successfully, FALSE if fail */
......
...@@ -126,6 +126,8 @@ extern ulint srv_lock_table_size; ...@@ -126,6 +126,8 @@ extern ulint srv_lock_table_size;
extern ulint srv_n_file_io_threads; extern ulint srv_n_file_io_threads;
extern ulint srv_read_ahead_factor; extern ulint srv_read_ahead_factor;
extern ulint srv_n_read_io_threads;
extern ulint srv_n_write_io_threads;
#ifdef UNIV_LOG_ARCHIVE #ifdef UNIV_LOG_ARCHIVE
extern ibool srv_log_archive_on; extern ibool srv_log_archive_on;
...@@ -218,7 +220,7 @@ extern mutex_t* kernel_mutex_temp;/* mutex protecting the server, trx structs, ...@@ -218,7 +220,7 @@ extern mutex_t* kernel_mutex_temp;/* mutex protecting the server, trx structs,
same DRAM page as other hotspot semaphores */ same DRAM page as other hotspot semaphores */
#define kernel_mutex (*kernel_mutex_temp) #define kernel_mutex (*kernel_mutex_temp)
#define SRV_MAX_N_IO_THREADS 100 #define SRV_MAX_N_IO_THREADS 130
/* Array of English strings describing the current state of an /* Array of English strings describing the current state of an
i/o handler thread */ i/o handler thread */
......
...@@ -2923,31 +2923,27 @@ os_aio_array_create( ...@@ -2923,31 +2923,27 @@ os_aio_array_create(
return(array); return(array);
} }
/************************************************************************//** /***********************************************************************
Initializes the asynchronous io system. Calls also os_io_init_simple. Initializes the asynchronous io system. Creates one array each for ibuf
Creates a separate aio array for and log i/o. Also creates one array each for read and write where each
non-ibuf read and write, a third aio array for the ibuf i/o, with just one array is divided logically into n_read_segs and n_write_segs
segment, two aio arrays for log reads and writes with one segment, and a respectively. The caller must create an i/o handler thread for each
synchronous aio array of the specified size. The combined number of segments segment in these arrays. This function also creates the sync array.
in the three first aio arrays is the parameter n_segments given to the No i/o handler thread needs to be created for that */
function. The caller must create an i/o handler thread for each segment in
the four first arrays, but not for the sync aio array. */
UNIV_INTERN UNIV_INTERN
void void
os_aio_init( os_aio_init(
/*========*/ /*========*/
ulint n, /*!< in: maximum number of pending aio operations ulint n_per_seg, /*<! in: maximum number of pending aio
allowed; n must be divisible by n_segments */ operations allowed per segment */
ulint n_segments, /*!< in: combined number of segments in the four ulint n_read_segs, /*<! in: number of reader threads */
first aio arrays; must be >= 4 */ ulint n_write_segs, /*<! in: number of writer threads */
ulint n_slots_sync) /*!< in: number of slots in the sync aio array */ ulint n_slots_sync) /*<! in: number of slots in the sync aio
array */
{ {
ulint n_read_segs;
ulint n_write_segs;
ulint n_per_seg;
ulint i; ulint i;
ulint n_segments = 2 + n_read_segs + n_write_segs;
ut_ad(n % n_segments == 0);
ut_ad(n_segments >= 4); ut_ad(n_segments >= 4);
os_io_init_simple(); os_io_init_simple();
...@@ -2956,9 +2952,6 @@ os_aio_init( ...@@ -2956,9 +2952,6 @@ os_aio_init(
srv_set_io_thread_op_info(i, "not started yet"); srv_set_io_thread_op_info(i, "not started yet");
} }
n_per_seg = n / n_segments;
n_write_segs = (n_segments - 2) / 2;
n_read_segs = n_segments - 2 - n_write_segs;
/* fprintf(stderr, "Array n per seg %lu\n", n_per_seg); */ /* fprintf(stderr, "Array n per seg %lu\n", n_per_seg); */
...@@ -3157,6 +3150,18 @@ os_aio_array_reserve_slot( ...@@ -3157,6 +3150,18 @@ os_aio_array_reserve_slot(
OVERLAPPED* control; OVERLAPPED* control;
#endif #endif
ulint i; ulint i;
ulint slots_per_seg;
ulint local_seg;
/* No need of a mutex. Only reading constant fields */
slots_per_seg = array->n_slots / array->n_segments;
/* We attempt to keep adjacent blocks in the same local
segment. This can help in merging IO requests when we are
doing simulated AIO */
local_seg = (offset >> (UNIV_PAGE_SIZE_SHIFT + 6))
% array->n_segments;
loop: loop:
os_mutex_enter(array->mutex); os_mutex_enter(array->mutex);
...@@ -3175,14 +3180,26 @@ loop: ...@@ -3175,14 +3180,26 @@ loop:
goto loop; goto loop;
} }
/* First try to find a slot in the preferred local segment */
for (i = local_seg * slots_per_seg; i < array->n_slots; i++) {
slot = os_aio_array_get_nth_slot(array, i);
if (slot->reserved == FALSE) {
goto found;
}
}
/* Fall back to a full scan. We are guaranteed to find a slot */
for (i = 0;; i++) { for (i = 0;; i++) {
slot = os_aio_array_get_nth_slot(array, i); slot = os_aio_array_get_nth_slot(array, i);
if (slot->reserved == FALSE) { if (slot->reserved == FALSE) {
break; goto found;
} }
} }
found:
ut_a(slot->reserved == FALSE);
array->n_reserved++; array->n_reserved++;
if (array->n_reserved == 1) { if (array->n_reserved == 1) {
......
...@@ -173,7 +173,11 @@ UNIV_INTERN ulint srv_buf_pool_curr_size = 0; ...@@ -173,7 +173,11 @@ UNIV_INTERN ulint srv_buf_pool_curr_size = 0;
UNIV_INTERN ulint srv_mem_pool_size = ULINT_MAX; UNIV_INTERN ulint srv_mem_pool_size = ULINT_MAX;
UNIV_INTERN ulint srv_lock_table_size = ULINT_MAX; UNIV_INTERN ulint srv_lock_table_size = ULINT_MAX;
/* This parameter is deprecated. Use srv_n_io_[read|write]_threads
instead. */
UNIV_INTERN ulint srv_n_file_io_threads = ULINT_MAX; UNIV_INTERN ulint srv_n_file_io_threads = ULINT_MAX;
UNIV_INTERN ulint srv_n_read_io_threads = ULINT_MAX;
UNIV_INTERN ulint srv_n_write_io_threads = ULINT_MAX;
/* User settable value of the number of pages that InnoDB will tolerate /* User settable value of the number of pages that InnoDB will tolerate
within a 64 page extent even if they are accessed out of order or have within a 64 page extent even if they are accessed out of order or have
......
...@@ -1007,6 +1007,7 @@ innobase_start_or_create_for_mysql(void) ...@@ -1007,6 +1007,7 @@ innobase_start_or_create_for_mysql(void)
ulint tablespace_size_in_header; ulint tablespace_size_in_header;
ulint err; ulint err;
ulint i; ulint i;
ulint io_limit;
my_bool srv_file_per_table_original_value my_bool srv_file_per_table_original_value
= srv_file_per_table; = srv_file_per_table;
mtr_t mtr; mtr_t mtr;
...@@ -1265,27 +1266,35 @@ innobase_start_or_create_for_mysql(void) ...@@ -1265,27 +1266,35 @@ innobase_start_or_create_for_mysql(void)
return(DB_ERROR); return(DB_ERROR);
} }
/* Restrict the maximum number of file i/o threads */ /* If user has set the value of innodb_file_io_threads then
if (srv_n_file_io_threads > SRV_MAX_N_IO_THREADS) { we'll emit a message telling the user that this parameter
is now deprecated. */
srv_n_file_io_threads = SRV_MAX_N_IO_THREADS; if (srv_n_file_io_threads != 4) {
fprintf(stderr, "InnoDB: Warning:"
" innodb_file_io_threads is deprecated."
" Please use innodb_read_io_threads and"
" innodb_write_io_threads instead\n");
} }
if (!os_aio_use_native_aio) { /* Now overwrite the value on srv_n_file_io_threads */
/* In simulated aio we currently have use only for 4 threads */ srv_n_file_io_threads = 2 + srv_n_read_io_threads
srv_n_file_io_threads = 4; + srv_n_write_io_threads;
os_aio_init(8 * SRV_N_PENDING_IOS_PER_THREAD ut_a(srv_n_file_io_threads <= SRV_MAX_N_IO_THREADS);
* srv_n_file_io_threads,
srv_n_file_io_threads, /* TODO: Investigate if SRV_N_PENDING_IOS_PER_THREAD (32) limit
SRV_MAX_N_PENDING_SYNC_IOS); still applies to windows. */
if (!os_aio_use_native_aio) {
io_limit = 8 * SRV_N_PENDING_IOS_PER_THREAD;
} else { } else {
os_aio_init(SRV_N_PENDING_IOS_PER_THREAD io_limit = SRV_N_PENDING_IOS_PER_THREAD;
* srv_n_file_io_threads,
srv_n_file_io_threads,
SRV_MAX_N_PENDING_SYNC_IOS);
} }
os_aio_init(io_limit,
srv_n_read_io_threads,
srv_n_write_io_threads,
SRV_MAX_N_PENDING_SYNC_IOS);
fil_init(srv_file_per_table ? 50000 : 5000, fil_init(srv_file_per_table ? 50000 : 5000,
srv_max_n_open_files); srv_max_n_open_files);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment