Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
1b4ac075
Commit
1b4ac075
authored
Jun 26, 2018
by
Marko Mäkelä
Browse files
Options
Browse Files
Download
Plain Diff
Merge 10.1 into 10.2
parents
7d0d934c
c4eb4bce
Changes
10
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
119 additions
and
77 deletions
+119
-77
storage/innobase/buf/buf0lru.cc
storage/innobase/buf/buf0lru.cc
+7
-4
storage/innobase/dict/dict0dict.cc
storage/innobase/dict/dict0dict.cc
+2
-3
storage/innobase/include/buf0lru.h
storage/innobase/include/buf0lru.h
+5
-3
storage/innobase/row/row0import.cc
storage/innobase/row/row0import.cc
+17
-11
storage/innobase/row/row0mysql.cc
storage/innobase/row/row0mysql.cc
+29
-20
storage/xtradb/buf/buf0lru.cc
storage/xtradb/buf/buf0lru.cc
+7
-4
storage/xtradb/dict/dict0dict.cc
storage/xtradb/dict/dict0dict.cc
+2
-2
storage/xtradb/include/buf0lru.h
storage/xtradb/include/buf0lru.h
+5
-3
storage/xtradb/row/row0import.cc
storage/xtradb/row/row0import.cc
+17
-11
storage/xtradb/row/row0mysql.cc
storage/xtradb/row/row0mysql.cc
+28
-16
No files found.
storage/innobase/buf/buf0lru.cc
View file @
1b4ac075
...
...
@@ -356,9 +356,10 @@ buf_LRU_drop_page_hash_for_tablespace(
ut_free
(
page_arr
);
}
/** Drop the adaptive hash index for a tablespace.
@param[in,out] table table */
void
buf_LRU_drop_page_hash_for_tablespace
(
dict_table_t
*
table
)
/** Try to drop the adaptive hash index for a tablespace.
@param[in,out] table table
@return whether anything was dropped */
bool
buf_LRU_drop_page_hash_for_tablespace
(
dict_table_t
*
table
)
{
for
(
dict_index_t
*
index
=
dict_table_get_first_index
(
table
);
index
!=
NULL
;
...
...
@@ -369,13 +370,15 @@ void buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
}
}
return
;
return
false
;
drop_ahi:
ulint
id
=
table
->
space
;
for
(
ulint
i
=
0
;
i
<
srv_buf_pool_instances
;
i
++
)
{
buf_LRU_drop_page_hash_for_tablespace
(
buf_pool_from_array
(
i
),
id
);
}
return
true
;
}
#endif
/* BTR_CUR_HASH_ADAPT */
...
...
storage/innobase/dict/dict0dict.cc
View file @
1b4ac075
...
...
@@ -2653,12 +2653,11 @@ dict_index_remove_from_cache_low(
zero. See also: dict_table_can_be_evicted() */
do
{
if
(
!
btr_search_info_get_ref_count
(
info
,
index
))
{
if
(
!
btr_search_info_get_ref_count
(
info
,
index
)
||
!
buf_LRU_drop_page_hash_for_tablespace
(
table
))
{
break
;
}
buf_LRU_drop_page_hash_for_tablespace
(
table
);
ut_a
(
++
retries
<
10000
);
}
while
(
srv_shutdown_state
==
SRV_SHUTDOWN_NONE
||
!
lru_evict
);
#endif
/* BTR_CUR_HASH_ADAPT */
...
...
storage/innobase/include/buf0lru.h
View file @
1b4ac075
...
...
@@ -52,9 +52,11 @@ These are low-level functions
#ifdef BTR_CUR_HASH_ADAPT
struct
dict_table_t
;
/** Drop the adaptive hash index for a tablespace.
@param[in,out] table table */
void
buf_LRU_drop_page_hash_for_tablespace
(
dict_table_t
*
table
);
/** Try to drop the adaptive hash index for a tablespace.
@param[in,out] table table
@return whether anything was dropped */
bool
buf_LRU_drop_page_hash_for_tablespace
(
dict_table_t
*
table
)
MY_ATTRIBUTE
((
warn_unused_result
,
nonnull
));
#else
# define buf_LRU_drop_page_hash_for_tablespace(table)
#endif
/* BTR_CUR_HASH_ADAPT */
...
...
storage/innobase/row/row0import.cc
View file @
1b4ac075
...
...
@@ -3886,6 +3886,23 @@ row_import_for_mysql(
DBUG_EXECUTE_IF
(
"ib_import_reset_space_and_lsn_failure"
,
err
=
DB_TOO_MANY_CONCURRENT_TRXS
;);
/* On DISCARD TABLESPACE, we did not drop any adaptive hash
index entries. If we replaced the discarded tablespace with a
smaller one here, there could still be some adaptive hash
index entries that point to cached garbage pages in the buffer
pool, because PageConverter::operator() only evicted those
pages that were replaced by the imported pages. We must
discard all remaining adaptive hash index entries, because the
adaptive hash index must be a subset of the table contents;
false positives are not tolerated. */
while
(
buf_LRU_drop_page_hash_for_tablespace
(
table
))
{
if
(
trx_is_interrupted
(
trx
)
||
srv_shutdown_state
!=
SRV_SHUTDOWN_NONE
)
{
err
=
DB_INTERRUPTED
;
break
;
}
}
if
(
err
!=
DB_SUCCESS
)
{
char
table_name
[
MAX_FULL_NAME_LEN
+
1
];
...
...
@@ -3904,17 +3921,6 @@ row_import_for_mysql(
return
(
row_import_cleanup
(
prebuilt
,
trx
,
err
));
}
/* On DISCARD TABLESPACE, we did not drop any adaptive hash
index entries. If we replaced the discarded tablespace with a
smaller one here, there could still be some adaptive hash
index entries that point to cached garbage pages in the buffer
pool, because PageConverter::operator() only evicted those
pages that were replaced by the imported pages. We must
discard all remaining adaptive hash index entries, because the
adaptive hash index must be a subset of the table contents;
false positives are not tolerated. */
buf_LRU_drop_page_hash_for_tablespace
(
table
);
row_mysql_lock_data_dictionary
(
trx
);
/* If the table is stored in a remote tablespace, we need to
...
...
storage/innobase/row/row0mysql.cc
View file @
1b4ac075
...
...
@@ -63,6 +63,7 @@ Created 9/17/2000 Heikki Tuuri
#include "trx0rec.h"
#include "trx0roll.h"
#include "trx0undo.h"
#include "srv0start.h"
#include "row0ext.h"
#include "ut0new.h"
...
...
@@ -3422,12 +3423,35 @@ row_drop_table_for_mysql(
/* make sure background stats thread is not running on the table */
ut_ad
(
!
(
table
->
stats_bg_flag
&
BG_STAT_IN_PROGRESS
));
/* Delete the link file if used. */
if
(
DICT_TF_HAS_DATA_DIR
(
table
->
flags
))
{
RemoteDatafile
::
delete_link_file
(
name
);
}
if
(
!
dict_table_is_temporary
(
table
))
{
if
(
table
->
space
!=
TRX_SYS_SPACE
)
{
/* On DISCARD TABLESPACE, we would not drop the
adaptive hash index entries. If the tablespace is
missing here, delete-marking the record in SYS_INDEXES
would not free any pages in the buffer pool. Thus,
dict_index_remove_from_cache() would hang due to
adaptive hash index entries existing in the buffer
pool. To prevent this hang, and also to guarantee
that btr_search_drop_page_hash_when_freed() will avoid
calling btr_search_drop_page_hash_index() while we
hold the InnoDB dictionary lock, we will drop any
adaptive hash index entries upfront. */
while
(
buf_LRU_drop_page_hash_for_tablespace
(
table
))
{
if
(
trx_is_interrupted
(
trx
)
||
srv_shutdown_state
!=
SRV_SHUTDOWN_NONE
)
{
err
=
DB_INTERRUPTED
;
table
->
to_be_dropped
=
false
;
dict_table_close
(
table
,
true
,
false
);
goto
funct_exit
;
}
}
/* Delete the link file if used. */
if
(
DICT_TF_HAS_DATA_DIR
(
table
->
flags
))
{
RemoteDatafile
::
delete_link_file
(
name
);
}
}
dict_stats_recalc_pool_del
(
table
);
dict_stats_defrag_pool_del
(
table
,
NULL
);
...
...
@@ -3626,21 +3650,6 @@ row_drop_table_for_mysql(
/* As we don't insert entries to SYSTEM TABLES for temp-tables
we need to avoid running removal of these entries. */
if
(
!
dict_table_is_temporary
(
table
))
{
if
(
table
->
space
!=
TRX_SYS_SPACE
)
{
/* On DISCARD TABLESPACE, we would not drop the
adaptive hash index entries. If the tablespace is
missing here, delete-marking the record in SYS_INDEXES
would not free any pages in the buffer pool. Thus,
dict_index_remove_from_cache() would hang due to
adaptive hash index entries existing in the buffer
pool. To prevent this hang, and also to guarantee
that btr_search_drop_page_hash_when_freed() will avoid
calling btr_search_drop_page_hash_index() while we
hold the InnoDB dictionary lock, we will drop any
adaptive hash index entries upfront. */
buf_LRU_drop_page_hash_for_tablespace
(
table
);
}
/* We use the private SQL parser of Innobase to generate the
query graphs needed in deleting the dictionary data from system
tables in Innobase. Deleting a row from SYS_INDEXES table also
...
...
storage/xtradb/buf/buf0lru.cc
View file @
1b4ac075
...
...
@@ -354,9 +354,10 @@ buf_LRU_drop_page_hash_for_tablespace(
ut_free
(
page_arr
);
}
/** Drop the adaptive hash index for a tablespace.
@param[in,out] table table */
UNIV_INTERN
void
buf_LRU_drop_page_hash_for_tablespace
(
dict_table_t
*
table
)
/** Try to drop the adaptive hash index for a tablespace.
@param[in,out] table table
@return whether anything was dropped */
UNIV_INTERN
bool
buf_LRU_drop_page_hash_for_tablespace
(
dict_table_t
*
table
)
{
for
(
dict_index_t
*
index
=
dict_table_get_first_index
(
table
);
index
!=
NULL
;
...
...
@@ -367,13 +368,15 @@ UNIV_INTERN void buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
}
}
return
;
return
false
;
drop_ahi:
ulint
id
=
table
->
space
;
for
(
ulint
i
=
0
;
i
<
srv_buf_pool_instances
;
i
++
)
{
buf_LRU_drop_page_hash_for_tablespace
(
buf_pool_from_array
(
i
),
id
);
}
return
true
;
}
/******************************************************************//**
...
...
storage/xtradb/dict/dict0dict.cc
View file @
1b4ac075
...
...
@@ -2729,11 +2729,11 @@ dict_index_remove_from_cache_low(
zero. See also: dict_table_can_be_evicted() */
do
{
if
(
!
btr_search_info_get_ref_count
(
info
,
index
))
{
if
(
!
btr_search_info_get_ref_count
(
info
,
index
)
||
!
buf_LRU_drop_page_hash_for_tablespace
(
table
))
{
break
;
}
buf_LRU_drop_page_hash_for_tablespace
(
table
);
ut_a
(
++
retries
<
10000
);
}
while
(
srv_shutdown_state
==
SRV_SHUTDOWN_NONE
||
!
lru_evict
);
...
...
storage/xtradb/include/buf0lru.h
View file @
1b4ac075
...
...
@@ -55,9 +55,11 @@ These are low-level functions
/** Minimum LRU list length for which the LRU_old pointer is defined */
#define BUF_LRU_OLD_MIN_LEN 512
/* 8 megabytes of 16k pages */
/** Drop the adaptive hash index for a tablespace.
@param[in,out] table table */
UNIV_INTERN
void
buf_LRU_drop_page_hash_for_tablespace
(
dict_table_t
*
table
);
/** Try to drop the adaptive hash index for a tablespace.
@param[in,out] table table
@return whether anything was dropped */
UNIV_INTERN
bool
buf_LRU_drop_page_hash_for_tablespace
(
dict_table_t
*
table
)
MY_ATTRIBUTE
((
warn_unused_result
,
nonnull
));
/** Empty the flush list for all pages belonging to a tablespace.
@param[in] id tablespace identifier
...
...
storage/xtradb/row/row0import.cc
View file @
1b4ac075
...
...
@@ -3982,6 +3982,23 @@ row_import_for_mysql(
DBUG_EXECUTE_IF
(
"ib_import_reset_space_and_lsn_failure"
,
err
=
DB_TOO_MANY_CONCURRENT_TRXS
;);
/* On DISCARD TABLESPACE, we did not drop any adaptive hash
index entries. If we replaced the discarded tablespace with a
smaller one here, there could still be some adaptive hash
index entries that point to cached garbage pages in the buffer
pool, because PageConverter::operator() only evicted those
pages that were replaced by the imported pages. We must
discard all remaining adaptive hash index entries, because the
adaptive hash index must be a subset of the table contents;
false positives are not tolerated. */
while
(
buf_LRU_drop_page_hash_for_tablespace
(
table
))
{
if
(
trx_is_interrupted
(
trx
)
||
srv_shutdown_state
!=
SRV_SHUTDOWN_NONE
)
{
err
=
DB_INTERRUPTED
;
break
;
}
}
if
(
err
!=
DB_SUCCESS
)
{
char
table_name
[
MAX_FULL_NAME_LEN
+
1
];
...
...
@@ -3999,17 +4016,6 @@ row_import_for_mysql(
return
(
row_import_cleanup
(
prebuilt
,
trx
,
err
));
}
/* On DISCARD TABLESPACE, we did not drop any adaptive hash
index entries. If we replaced the discarded tablespace with a
smaller one here, there could still be some adaptive hash
index entries that point to cached garbage pages in the buffer
pool, because PageConverter::operator() only evicted those
pages that were replaced by the imported pages. We must
discard all remaining adaptive hash index entries, because the
adaptive hash index must be a subset of the table contents;
false positives are not tolerated. */
buf_LRU_drop_page_hash_for_tablespace
(
table
);
row_mysql_lock_data_dictionary
(
trx
);
/* If the table is stored in a remote tablespace, we need to
...
...
storage/xtradb/row/row0mysql.cc
View file @
1b4ac075
...
...
@@ -3540,7 +3540,13 @@ row_truncate_table_for_mysql(
fil_space_release
(
space
);
}
buf_LRU_drop_page_hash_for_tablespace
(
table
);
while
(
buf_LRU_drop_page_hash_for_tablespace
(
table
))
{
if
(
trx_is_interrupted
(
trx
)
||
srv_shutdown_state
!=
SRV_SHUTDOWN_NONE
)
{
err
=
DB_INTERRUPTED
;
goto
funct_exit
;
}
}
if
(
flags
!=
ULINT_UNDEFINED
&&
fil_discard_tablespace
(
space_id
)
==
DB_SUCCESS
)
{
...
...
@@ -4202,6 +4208,27 @@ row_drop_table_for_mysql(
ut_a
(
!
lock_table_has_locks
(
table
));
if
(
table
->
space
!=
TRX_SYS_SPACE
)
{
/* On DISCARD TABLESPACE, we would not drop the
adaptive hash index entries. If the tablespace is
missing here, delete-marking the record in SYS_INDEXES
would not free any pages in the buffer pool. Thus,
dict_index_remove_from_cache() would hang due to
adaptive hash index entries existing in the buffer
pool. To prevent this hang, and also to guarantee
that btr_search_drop_page_hash_when_freed() will avoid
calling btr_search_drop_page_hash_index() while we
hold the InnoDB dictionary lock, we will drop any
adaptive hash index entries upfront. */
while
(
buf_LRU_drop_page_hash_for_tablespace
(
table
))
{
if
(
trx_is_interrupted
(
trx
)
||
srv_shutdown_state
!=
SRV_SHUTDOWN_NONE
)
{
err
=
DB_INTERRUPTED
;
goto
funct_exit
;
}
}
}
switch
(
trx_get_dict_operation
(
trx
))
{
case
TRX_DICT_OP_NONE
:
trx_set_dict_operation
(
trx
,
TRX_DICT_OP_TABLE
);
...
...
@@ -4241,21 +4268,6 @@ row_drop_table_for_mysql(
rw_lock_x_unlock
(
dict_index_get_lock
(
index
));
}
if
(
table
->
space
!=
TRX_SYS_SPACE
)
{
/* On DISCARD TABLESPACE, we would not drop the
adaptive hash index entries. If the tablespace is
missing here, delete-marking the record in SYS_INDEXES
would not free any pages in the buffer pool. Thus,
dict_index_remove_from_cache() would hang due to
adaptive hash index entries existing in the buffer
pool. To prevent this hang, and also to guarantee
that btr_search_drop_page_hash_when_freed() will avoid
calling btr_search_drop_page_hash_index() while we
hold the InnoDB dictionary lock, we will drop any
adaptive hash index entries upfront. */
buf_LRU_drop_page_hash_for_tablespace
(
table
);
}
/* We use the private SQL parser of Innobase to generate the
query graphs needed in deleting the dictionary data from system
tables in Innobase. Deleting a row from SYS_INDEXES table also
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment