Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
2d3e0ac7
Commit
2d3e0ac7
authored
Dec 02, 2004
by
unknown
Browse files
Options
Browse Files
Download
Plain Diff
Merge gbichot@bk-internal.mysql.com:/home/bk/mysql-5.0
into mysql.com:/home/mysql_src/mysql-5.0-clean
parents
d2c4b545
75fa4c00
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
82 additions
and
7 deletions
+82
-7
mysql-test/r/flush_read_lock_kill.result
mysql-test/r/flush_read_lock_kill.result
+9
-0
mysql-test/t/flush_read_lock_kill-master.opt
mysql-test/t/flush_read_lock_kill-master.opt
+1
-0
mysql-test/t/flush_read_lock_kill.test
mysql-test/t/flush_read_lock_kill.test
+46
-0
sql/lock.cc
sql/lock.cc
+19
-5
sql/mysql_priv.h
sql/mysql_priv.h
+1
-1
sql/sql_parse.cc
sql/sql_parse.cc
+6
-1
No files found.
mysql-test/r/flush_read_lock_kill.result
0 → 100644
View file @
2d3e0ac7
drop table if exists t1;
create table t1 (kill_id int);
insert into t1 values(connection_id());
flush tables with read lock;
select ((@id := kill_id) - kill_id) from t1;
((@id := kill_id) - kill_id)
0
kill connection @id;
drop table t1;
mysql-test/t/flush_read_lock_kill-master.opt
0 → 100644
View file @
2d3e0ac7
--debug=d,make_global_read_lock_block_commit_loop
mysql-test/t/flush_read_lock_kill.test
0 → 100644
View file @
2d3e0ac7
# Let's see if FLUSH TABLES WITH READ LOCK can be killed when waiting
# for running commits to finish (in the past it could not)
# This will not be a meaningful test on non-debug servers so will be
# skipped.
# If running mysql-test-run --debug, the --debug added by
# mysql-test-run to the mysqld command line will override the one of
# -master.opt. But this test is designed to still pass then (though it
# won't test anything interesting).
--
source
include
/
have_debug
.
inc
connect
(
con1
,
localhost
,
root
,,);
connect
(
con2
,
localhost
,
root
,,);
connection
con1
;
--
disable_warnings
drop
table
if
exists
t1
;
--
enable_warnings
create
table
t1
(
kill_id
int
);
insert
into
t1
values
(
connection_id
());
# Thanks to the parameter we passed to --debug, this FLUSH will
# block on a debug build running with our --debug=make_global... It
# will block until killed. In other cases (non-debug build or other
# --debug) it will succeed immediately
connection
con1
;
send
flush
tables
with
read
lock
;
# kill con1
connection
con2
;
select
((
@
id
:=
kill_id
)
-
kill_id
)
from
t1
;
--
sleep
2
;
# leave time for FLUSH to block
kill
connection
@
id
;
connection
con1
;
# On debug builds it will be error 1053 (killed); on non-debug, or
# debug build running without our --debug=make_global..., will be
# error 0 (no error). The only important thing to test is that on
# debug builds with our --debug=make_global... we don't hang forever.
--
error
0
,
1053
reap
;
connection
con2
;
drop
table
t1
;
sql/lock.cc
View file @
2d3e0ac7
...
...
@@ -840,19 +840,33 @@ void start_waiting_global_read_lock(THD *thd)
}
void
make_global_read_lock_block_commit
(
THD
*
thd
)
bool
make_global_read_lock_block_commit
(
THD
*
thd
)
{
bool
error
;
const
char
*
old_message
;
DBUG_ENTER
(
"make_global_read_lock_block_commit"
);
/*
If we didn't succeed lock_global_read_lock(), or if we already suceeded
make_global_read_lock_block_commit(), do nothing.
*/
if
(
thd
->
global_read_lock
!=
GOT_GLOBAL_READ_LOCK
)
return
;
DBUG_RETURN
(
1
)
;
pthread_mutex_lock
(
&
LOCK_open
);
/* increment this BEFORE waiting on cond (otherwise race cond) */
global_read_lock_blocks_commit
++
;
while
(
protect_against_global_read_lock
)
/* For testing we set up some blocking, to see if we can be killed */
DBUG_EXECUTE_IF
(
"make_global_read_lock_block_commit_loop"
,
protect_against_global_read_lock
++
;);
old_message
=
thd
->
enter_cond
(
&
COND_refresh
,
&
LOCK_open
,
"Waiting for all running commits to finish"
);
while
(
protect_against_global_read_lock
&&
!
thd
->
killed
)
pthread_cond_wait
(
&
COND_refresh
,
&
LOCK_open
);
pthread_mutex_unlock
(
&
LOCK_open
);
thd
->
global_read_lock
=
MADE_GLOBAL_READ_LOCK_BLOCK_COMMIT
;
DBUG_EXECUTE_IF
(
"make_global_read_lock_block_commit_loop"
,
protect_against_global_read_lock
--
;);
if
(
error
=
thd
->
killed
)
global_read_lock_blocks_commit
--
;
// undo what we did
else
thd
->
global_read_lock
=
MADE_GLOBAL_READ_LOCK_BLOCK_COMMIT
;
thd
->
exit_cond
(
old_message
);
DBUG_RETURN
(
error
);
}
sql/mysql_priv.h
View file @
2d3e0ac7
...
...
@@ -1096,7 +1096,7 @@ void unlock_global_read_lock(THD *thd);
bool
wait_if_global_read_lock
(
THD
*
thd
,
bool
abort_on_refresh
,
bool
is_not_commit
);
void
start_waiting_global_read_lock
(
THD
*
thd
);
void
make_global_read_lock_block_commit
(
THD
*
thd
);
bool
make_global_read_lock_block_commit
(
THD
*
thd
);
/* Lock based on name */
int
lock_and_wait_for_table_name
(
THD
*
thd
,
TABLE_LIST
*
table_list
);
...
...
sql/sql_parse.cc
View file @
2d3e0ac7
...
...
@@ -5746,7 +5746,12 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables,
return
1
;
result
=
close_cached_tables
(
thd
,(
options
&
REFRESH_FAST
)
?
0
:
1
,
tables
);
make_global_read_lock_block_commit
(
thd
);
if
(
make_global_read_lock_block_commit
(
thd
))
{
/* Don't leave things in a half-locked state */
unlock_global_read_lock
(
thd
);
return
1
;
}
}
else
result
=
close_cached_tables
(
thd
,(
options
&
REFRESH_FAST
)
?
0
:
1
,
tables
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment