Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
b228f5ff
Commit
b228f5ff
authored
Apr 24, 2003
by
monty@narttu.mysql.fi
Browse files
Options
Browse Files
Download
Plain Diff
merge
parents
8c2bda2d
f6a43bd5
Changes
50
Hide whitespace changes
Inline
Side-by-side
Showing
50 changed files
with
358 additions
and
309 deletions
+358
-309
Images/.cvsignore
Images/.cvsignore
+0
-1
include/my_global.h
include/my_global.h
+6
-0
myisam/mi_info.c
myisam/mi_info.c
+1
-1
mysql-test/r/group_by.result
mysql-test/r/group_by.result
+19
-23
mysql-test/r/innodb.result
mysql-test/r/innodb.result
+71
-0
mysql-test/r/join_outer.result
mysql-test/r/join_outer.result
+2
-2
mysql-test/r/multi_update.result
mysql-test/r/multi_update.result
+10
-10
mysql-test/r/null_key.result
mysql-test/r/null_key.result
+1
-1
mysql-test/r/update.result
mysql-test/r/update.result
+33
-7
mysql-test/t/group_by.test
mysql-test/t/group_by.test
+3
-13
mysql-test/t/innodb.test
mysql-test/t/innodb.test
+53
-0
mysql-test/t/join_outer.test
mysql-test/t/join_outer.test
+1
-1
mysql-test/t/multi_update.test
mysql-test/t/multi_update.test
+1
-1
mysql-test/t/update.test
mysql-test/t/update.test
+9
-5
repl-tests/README
repl-tests/README
+0
-12
repl-tests/include/master-slave.inc
repl-tests/include/master-slave.inc
+0
-2
repl-tests/run-all-tests
repl-tests/run-all-tests
+0
-9
repl-tests/test-auto-inc/run.test
repl-tests/test-auto-inc/run.test
+0
-10
repl-tests/test-auto-inc/x.master
repl-tests/test-auto-inc/x.master
+0
-4
repl-tests/test-bad-query/run.test
repl-tests/test-bad-query/run.test
+0
-10
repl-tests/test-bad-query/x.master
repl-tests/test-bad-query/x.master
+0
-4
repl-tests/test-dump/run.test
repl-tests/test-dump/run.test
+0
-25
repl-tests/test-dump/table-dump-check.master
repl-tests/test-dump/table-dump-check.master
+0
-2
repl-tests/test-dump/table-dump-select.master
repl-tests/test-dump/table-dump-select.master
+0
-2
repl-tests/test-repl-alter/run.test
repl-tests/test-repl-alter/run.test
+0
-12
repl-tests/test-repl-alter/test.master
repl-tests/test-repl-alter/test.master
+0
-4
repl-tests/test-repl-ts/repl-timestamp.master
repl-tests/test-repl-ts/repl-timestamp.master
+0
-2
repl-tests/test-repl-ts/repl-timestamp.master.reject
repl-tests/test-repl-ts/repl-timestamp.master.reject
+0
-2
repl-tests/test-repl-ts/run.test
repl-tests/test-repl-ts/run.test
+0
-17
repl-tests/test-repl/foo-dump-master.master
repl-tests/test-repl/foo-dump-master.master
+0
-3
repl-tests/test-repl/foo-dump-slave.master
repl-tests/test-repl/foo-dump-slave.master
+0
-3
repl-tests/test-repl/run.test
repl-tests/test-repl/run.test
+0
-24
repl-tests/test-repl/sum-wlen-master.master
repl-tests/test-repl/sum-wlen-master.master
+0
-2
repl-tests/test-repl/sum-wlen-slave.master
repl-tests/test-repl/sum-wlen-slave.master
+0
-2
sql/field.cc
sql/field.cc
+1
-1
sql/ha_heap.h
sql/ha_heap.h
+3
-2
sql/ha_innodb.cc
sql/ha_innodb.cc
+25
-4
sql/ha_innodb.h
sql/ha_innodb.h
+1
-0
sql/handler.h
sql/handler.h
+2
-1
sql/item_sum.cc
sql/item_sum.cc
+3
-2
sql/mysqld.cc
sql/mysqld.cc
+1
-1
sql/opt_range.cc
sql/opt_range.cc
+8
-4
sql/records.cc
sql/records.cc
+1
-1
sql/sql_list.h
sql/sql_list.h
+1
-1
sql/sql_select.cc
sql/sql_select.cc
+14
-10
sql/sql_select.h
sql/sql_select.h
+2
-2
sql/sql_union.cc
sql/sql_union.cc
+2
-1
sql/sql_update.cc
sql/sql_update.cc
+82
-63
support-files/my-huge.cnf.sh
support-files/my-huge.cnf.sh
+1
-0
support-files/my-large.cnf.sh
support-files/my-large.cnf.sh
+1
-0
No files found.
Images/.cvsignore
deleted
100644 → 0
View file @
8c2bda2d
logo_nusphere_b.tif
include/my_global.h
View file @
b228f5ff
...
...
@@ -850,7 +850,13 @@ typedef char bool; /* Ordinary boolean values 0 1 */
((uint32) (uchar) (A)[0])))
#define sint4korr(A) (*((long *) (A)))
#define uint2korr(A) (*((uint16 *) (A)))
#ifdef HAVE_purify
#define uint3korr(A) (uint32) (((uint32) ((uchar) (A)[0])) +\
(((uint32) ((uchar) (A)[1])) << 8) +\
(((uint32) ((uchar) (A)[2])) << 16))
#else
#define uint3korr(A) (long) (*((unsigned long *) (A)) & 0xFFFFFF)
#endif
#define uint4korr(A) (*((unsigned long *) (A)))
#define uint5korr(A) ((ulonglong)(((uint32) ((uchar) (A)[0])) +\
(((uint32) ((uchar) (A)[1])) << 8) +\
...
...
myisam/mi_info.c
View file @
b228f5ff
...
...
@@ -14,7 +14,7 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
/*
Ger tillbaka en struct med information om isam-filen
*/
/*
Return useful base information for an open table
*/
#include "myisamdef.h"
#ifdef __WIN__
...
...
mysql-test/r/group_by.result
View file @
b228f5ff
...
...
@@ -49,6 +49,10 @@ SELECT t2.userid, MIN(t1.score+0.0) FROM t1, t2 WHERE t1.userID=t2.userID AND t1
userid MIN(t1.score+0.0)
1 1.0
2 2.0
EXPLAIN SELECT t2.userid, MIN(t1.score+0.0) FROM t1, t2 WHERE t1.userID=t2.userID AND t1.spID=2 GROUP BY t2.userid ORDER BY NULL;
table type possible_keys key key_len ref rows Extra
t1 ALL NULL NULL NULL NULL 4 Using where; Using temporary
t2 index PRIMARY PRIMARY 4 NULL 3 Using where; Using index
drop table test.t1,test.t2;
CREATE TABLE t1 (
PID int(10) unsigned NOT NULL auto_increment,
...
...
@@ -242,34 +246,32 @@ score smallint(5) unsigned,
key (spid),
key (score)
);
INSERT INTO t1 VALUES (1,1,1),(2,2,2),(2,1,1),(3,3,3),(4,3,3),(5,3,3);
INSERT INTO t1 VALUES (1,1,1),(2,2,2),(2,1,1),(3,3,3),(4,3,3),(5,3,3)
,(6,3,3),(7,3,3)
;
explain select userid,count(*) from t1 group by userid desc;
table type possible_keys key key_len ref rows Extra
t1 ALL NULL NULL NULL NULL
6
Using temporary; Using filesort
t1 ALL NULL NULL NULL NULL
8
Using temporary; Using filesort
explain select userid,count(*) from t1 group by userid desc order by null;
table type possible_keys key key_len ref rows Extra
t1 ALL NULL NULL NULL NULL
6
Using temporary
t1 ALL NULL NULL NULL NULL
8
Using temporary
select userid,count(*) from t1 group by userid desc;
userid count(*)
3
3
3
5
2 1
1 2
select userid,count(*) from t1 group by userid desc having (count(*)+1) IN (4,3);
userid count(*)
3 3
1 2
select userid,count(*) from t1 group by userid desc having 3 IN (1,COUNT(*));
userid count(*)
3 3
explain select spid,count(*) from t1 where spid between 1 and 2 group by spid desc;
table type possible_keys key key_len ref rows Extra
t1 range spID spID 5 NULL
2
Using where; Using index
t1 range spID spID 5 NULL
3
Using where; Using index
explain select spid,count(*) from t1 where spid between 1 and 2 group by spid;
table type possible_keys key key_len ref rows Extra
t1 range spID spID 5 NULL
2
Using where; Using index
t1 range spID spID 5 NULL
3
Using where; Using index
explain select spid,count(*) from t1 where spid between 1 and 2 group by spid order by null;
table type possible_keys key key_len ref rows Extra
t1 range spID spID 5 NULL
2
Using where; Using index
t1 range spID spID 5 NULL
3
Using where; Using index
select spid,count(*) from t1 where spid between 1 and 2 group by spid;
spid count(*)
1 1
...
...
@@ -280,12 +282,14 @@ spid count(*)
1 1
explain select sql_big_result spid,sum(userid) from t1 group by spid desc;
table type possible_keys key key_len ref rows Extra
t1 ALL NULL NULL NULL NULL
6
Using filesort
t1 ALL NULL NULL NULL NULL
8
Using filesort
explain select sql_big_result spid,sum(userid) from t1 group by spid desc order by null;
table type possible_keys key key_len ref rows Extra
t1 ALL NULL NULL NULL NULL
6
t1 ALL NULL NULL NULL NULL
8
select sql_big_result spid,sum(userid) from t1 group by spid desc;
spid sum(userid)
7 3
6 3
5 3
4 3
3 3
...
...
@@ -293,13 +297,13 @@ spid sum(userid)
1 1
explain select sql_big_result score,count(*) from t1 group by score desc;
table type possible_keys key key_len ref rows Extra
t1 index NULL score 3 NULL
6
Using index
t1 index NULL score 3 NULL
8
Using index
explain select sql_big_result score,count(*) from t1 group by score desc order by null;
table type possible_keys key key_len ref rows Extra
t1 index NULL score 3 NULL
6
Using index
t1 index NULL score 3 NULL
8
Using index
select sql_big_result score,count(*) from t1 group by score desc;
score count(*)
3
3
3
5
2 1
1 2
drop table t1;
...
...
@@ -535,15 +539,7 @@ t1 ALL NULL NULL NULL NULL 6 Using temporary
t2 ALL a NULL NULL NULL 4 Using where
drop table t1,t2;
create table t1 (a int, b int);
insert into t1 values (1, 4);
insert into t1 values (10, 40);
insert into t1 values (1, 4);
insert into t1 values (10, 43);
insert into t1 values (1, 4);
insert into t1 values (10, 41);
insert into t1 values (1, 4);
insert into t1 values (10, 43);
insert into t1 values (1, 4);
insert into t1 values (1, 4),(10, 40),(1, 4),(10, 43),(1, 4),(10, 41),(1, 4),(10, 43),(1, 4);
select a, MAX(b), INTERVAL (MAX(b), 1,3,10,30,39,40,50,60,100,1000) from t1 group by a;
a MAX(b) INTERVAL (MAX(b), 1,3,10,30,39,40,50,60,100,1000)
1 4 2
...
...
mysql-test/r/innodb.result
View file @
b228f5ff
...
...
@@ -1201,3 +1201,74 @@ a b
8 5
9 5
drop table t1,t2;
create table t1 (a int not null auto_increment primary key, b int, c int, key(c)) type=innodb;
create table t2 (a int not null auto_increment primary key, b int);
insert into t1 (b) values (null),(null),(null),(null),(null),(null),(null);
insert into t2 (a) select b from t1;
insert into t1 (b) select b from t2;
insert into t2 (a) select b from t1;
insert into t1 (a) select b from t2;
insert into t2 (a) select b from t1;
insert into t1 (a) select b from t2;
insert into t2 (a) select b from t1;
insert into t1 (a) select b from t2;
insert into t2 (a) select b from t1;
insert into t1 (a) select b from t2;
insert into t2 (a) select b from t1;
insert into t1 (a) select b from t2;
insert into t2 (a) select b from t1;
insert into t1 (a) select b from t2;
insert into t2 (a) select b from t1;
insert into t1 (a) select b from t2;
insert into t2 (a) select b from t1;
insert into t1 (a) select b from t2;
select count(*) from t1;
count(*)
29267
explain select a from t1 where a between 1 and 10000;
table type possible_keys key key_len ref rows Extra
t1 range PRIMARY PRIMARY 4 NULL 14790 Using where; Using index
explain select * from t1 where a between 1 and 10000;
table type possible_keys key key_len ref rows Extra
t1 range PRIMARY PRIMARY 4 NULL 14790 Using where
explain select * from t1 where c between 1 and 10000;
table type possible_keys key key_len ref rows Extra
t1 range c c 5 NULL 1 Using where
update t1 set c=a;
explain select * from t1 where c between 1 and 10000;
table type possible_keys key key_len ref rows Extra
t1 ALL c NULL NULL NULL 29537 Using where
drop table t1,t2;
create table t1 (id int primary key auto_increment, fk int, index index_fk (fk)) type=innodb;
insert into t1 (id) values (null),(null),(null),(null),(null);
update t1 set fk=69 where fk is null order by id limit 1;
SELECT * from t1;
id fk
2 NULL
3 NULL
4 NULL
5 NULL
1 69
drop table t1;
create table t1 (a int not null, b int not null, key (a));
insert into t1 values (1,1),(1,2),(1,3),(3,1),(3,2),(3,3),(3,1),(3,2),(3,3),(2,1),(2,2),(2,3);
SET @tmp=0;
update t1 set b=(@tmp:=@tmp+1) order by a;
update t1 set b=99 where a=1 order by b asc limit 1;
update t1 set b=100 where a=1 order by b desc limit 2;
update t1 set a=a+10+b where a=1 order by b;
select * from t1 order by a,b;
a b
2 4
2 5
2 6
3 7
3 8
3 9
3 10
3 11
3 12
13 2
111 100
111 100
drop table t1;
mysql-test/r/join_outer.result
View file @
b228f5ff
...
...
@@ -373,10 +373,10 @@ Thimble Smith Happy 3 3 3
drop table t1,t2;
create table t1 (id int not null, str char(10), index(str));
insert into t1 values (1, null), (2, null), (3, "foo"), (4, "bar");
select * from t1 where str is not null;
select * from t1 where str is not null
order by id
;
id str
4 bar
3 foo
4 bar
select * from t1 where str is null;
id str
1 NULL
...
...
mysql-test/r/multi_update.result
View file @
b228f5ff
...
...
@@ -286,7 +286,7 @@ a b
107 7
108 8
109 9
update t1,t2 set t1.b=t1.b+2,t2.b=t1.b
where t1.b between 3 and 5
;
update t1,t2 set t1.b=t1.b+2,t2.b=t1.b
+10 where t1.b between 3 and 5 and t2.a=t1.a-100
;
select * from t1;
a b
201 1
...
...
@@ -300,13 +300,13 @@ a b
109 9
select * from t2;
a b
1
3
2
3
3 3
4
3
5
3
6
3
7
3
8
3
9
3
1
1
2
2
3
1
3
4
14
5
15
6
6
7
7
8
8
9
9
drop table t1,t2;
mysql-test/r/null_key.result
View file @
b228f5ff
...
...
@@ -84,7 +84,7 @@ table type possible_keys key key_len ref rows Extra
t1 range a,b a 5 NULL 5 Using where
explain select * from t1 where (a is null or a = 7) and b=7 and c=0;
table type possible_keys key key_len ref rows Extra
t1
range a,b a 5 NULL 4
Using where
t1
ALL a,b NULL NULL NULL 12
Using where
explain select * from t1 where (a is null and b>a) or a is null and b=7 limit 2;
table type possible_keys key key_len ref rows Extra
t1 ref a,b a 5 const 3 Using where
...
...
mysql-test/r/update.result
View file @
b228f5ff
...
...
@@ -102,13 +102,39 @@ select status from t1;
status
1
drop table t1;
create table t1 (a int not null, b int not null);
insert into t1 values (1,1),(1,2),(1,3);
update t1 set b=4 where a=1 order by b asc limit 1;
update t1 set b=4 where a=1 order by b desc limit 1;
select * from t1;
create table t1 (a int not null, b int not null, key (a));
insert into t1 values (1,1),(1,2),(1,3),(3,1),(3,2),(3,3),(3,1),(3,2),(3,3),(2,1),(2,2),(2,3);
SET @tmp=0;
update t1 set b=(@tmp:=@tmp+1) order by a;
update t1 set b=99 where a=1 order by b asc limit 1;
select * from t1 order by a,b;
a b
1 4
1 2
1 4
1 3
1 99
2 4
2 5
2 6
3 7
3 8
3 9
3 10
3 11
3 12
update t1 set b=100 where a=1 order by b desc limit 2;
update t1 set a=a+10+b where a=1 order by b;
select * from t1 order by a,b;
a b
2 4
2 5
2 6
3 7
3 8
3 9
3 10
3 11
3 12
13 2
111 100
111 100
drop table t1;
mysql-test/t/group_by.test
View file @
b228f5ff
...
...
@@ -40,6 +40,7 @@ SELECT t2.userid, MIN(t1.score) FROM t1, t2 WHERE t1.userID=t2.userID GROUP BY t
SELECT
t2
.
userid
,
MIN
(
t1
.
score
)
FROM
t1
,
t2
WHERE
t1
.
userID
=
t2
.
userID
AND
t1
.
spID
=
2
GROUP
BY
t2
.
userid
;
SELECT
t2
.
userid
,
MIN
(
t1
.
score
+
0.0
)
FROM
t1
,
t2
WHERE
t1
.
userID
=
t2
.
userID
AND
t1
.
spID
=
2
GROUP
BY
t2
.
userid
;
SELECT
t2
.
userid
,
MIN
(
t1
.
score
+
0.0
)
FROM
t1
,
t2
WHERE
t1
.
userID
=
t2
.
userID
AND
t1
.
spID
=
2
GROUP
BY
t2
.
userid
ORDER
BY
NULL
;
EXPLAIN
SELECT
t2
.
userid
,
MIN
(
t1
.
score
+
0.0
)
FROM
t1
,
t2
WHERE
t1
.
userID
=
t2
.
userID
AND
t1
.
spID
=
2
GROUP
BY
t2
.
userid
ORDER
BY
NULL
;
drop
table
test
.
t1
,
test
.
t2
;
#
...
...
@@ -236,7 +237,7 @@ CREATE TABLE t1 (
key
(
score
)
);
INSERT
INTO
t1
VALUES
(
1
,
1
,
1
),(
2
,
2
,
2
),(
2
,
1
,
1
),(
3
,
3
,
3
),(
4
,
3
,
3
),(
5
,
3
,
3
);
INSERT
INTO
t1
VALUES
(
1
,
1
,
1
),(
2
,
2
,
2
),(
2
,
1
,
1
),(
3
,
3
,
3
),(
4
,
3
,
3
),(
5
,
3
,
3
)
,(
6
,
3
,
3
),(
7
,
3
,
3
)
;
explain
select
userid
,
count
(
*
)
from
t1
group
by
userid
desc
;
explain
select
userid
,
count
(
*
)
from
t1
group
by
userid
desc
order
by
null
;
select
userid
,
count
(
*
)
from
t1
group
by
userid
desc
;
...
...
@@ -255,8 +256,6 @@ explain select sql_big_result score,count(*) from t1 group by score desc order b
select
sql_big_result
score
,
count
(
*
)
from
t1
group
by
score
desc
;
drop
table
t1
;
#
# not purely group_by bug, but group_by is involved...
create
table
t1
(
a
date
default
null
,
b
date
default
null
);
...
...
@@ -265,7 +264,6 @@ select a,min(b) c,count(distinct rand()) from t1 group by a having c<a + interva
drop
table
t1
;
# Compare with hash keys
#
CREATE
TABLE
t1
(
a
char
(
1
));
INSERT
INTO
t1
VALUES
(
'A'
),(
'B'
),(
'A'
),(
'B'
),(
'A'
),(
'B'
),(
NULL
),(
'a'
),(
'b'
),(
NULL
),(
'A'
),(
'B'
),(
NULL
);
...
...
@@ -401,15 +399,7 @@ drop table t1,t2;
#
create
table
t1
(
a
int
,
b
int
);
insert
into
t1
values
(
1
,
4
);
insert
into
t1
values
(
10
,
40
);
insert
into
t1
values
(
1
,
4
);
insert
into
t1
values
(
10
,
43
);
insert
into
t1
values
(
1
,
4
);
insert
into
t1
values
(
10
,
41
);
insert
into
t1
values
(
1
,
4
);
insert
into
t1
values
(
10
,
43
);
insert
into
t1
values
(
1
,
4
);
insert
into
t1
values
(
1
,
4
),(
10
,
40
),(
1
,
4
),(
10
,
43
),(
1
,
4
),(
10
,
41
),(
1
,
4
),(
10
,
43
),(
1
,
4
);
select
a
,
MAX
(
b
),
INTERVAL
(
MAX
(
b
),
1
,
3
,
10
,
30
,
39
,
40
,
50
,
60
,
100
,
1000
)
from
t1
group
by
a
;
select
a
,
MAX
(
b
),
CASE
MAX
(
b
)
when
4
then
4
when
43
then
43
else
0
end
from
t1
group
by
a
;
select
a
,
MAX
(
b
),
FIELD
(
MAX
(
b
),
'43'
,
'4'
,
'5'
)
from
t1
group
by
a
;
...
...
mysql-test/t/innodb.test
View file @
b228f5ff
...
...
@@ -795,5 +795,58 @@ select * from t1;
update
t1
,
t2
set
t1
.
b
=
t1
.
b
+
2
,
t2
.
b
=
t1
.
b
where
t1
.
b
between
3
and
5
;
select
*
from
t1
;
select
*
from
t2
;
drop
table
t1
,
t2
;
#
# Test that MySQL priorities clustered indexes
#
create
table
t1
(
a
int
not
null
auto_increment
primary
key
,
b
int
,
c
int
,
key
(
c
))
type
=
innodb
;
create
table
t2
(
a
int
not
null
auto_increment
primary
key
,
b
int
);
insert
into
t1
(
b
)
values
(
null
),(
null
),(
null
),(
null
),(
null
),(
null
),(
null
);
insert
into
t2
(
a
)
select
b
from
t1
;
insert
into
t1
(
b
)
select
b
from
t2
;
insert
into
t2
(
a
)
select
b
from
t1
;
insert
into
t1
(
a
)
select
b
from
t2
;
insert
into
t2
(
a
)
select
b
from
t1
;
insert
into
t1
(
a
)
select
b
from
t2
;
insert
into
t2
(
a
)
select
b
from
t1
;
insert
into
t1
(
a
)
select
b
from
t2
;
insert
into
t2
(
a
)
select
b
from
t1
;
insert
into
t1
(
a
)
select
b
from
t2
;
insert
into
t2
(
a
)
select
b
from
t1
;
insert
into
t1
(
a
)
select
b
from
t2
;
insert
into
t2
(
a
)
select
b
from
t1
;
insert
into
t1
(
a
)
select
b
from
t2
;
insert
into
t2
(
a
)
select
b
from
t1
;
insert
into
t1
(
a
)
select
b
from
t2
;
insert
into
t2
(
a
)
select
b
from
t1
;
insert
into
t1
(
a
)
select
b
from
t2
;
select
count
(
*
)
from
t1
;
explain
select
a
from
t1
where
a
between
1
and
10000
;
explain
select
*
from
t1
where
a
between
1
and
10000
;
explain
select
*
from
t1
where
c
between
1
and
10000
;
update
t1
set
c
=
a
;
explain
select
*
from
t1
where
c
between
1
and
10000
;
drop
table
t1
,
t2
;
#
# Test of UPDATE ... ORDER BY
#
create
table
t1
(
id
int
primary
key
auto_increment
,
fk
int
,
index
index_fk
(
fk
))
type
=
innodb
;
insert
into
t1
(
id
)
values
(
null
),(
null
),(
null
),(
null
),(
null
);
update
t1
set
fk
=
69
where
fk
is
null
order
by
id
limit
1
;
SELECT
*
from
t1
;
drop
table
t1
;
create
table
t1
(
a
int
not
null
,
b
int
not
null
,
key
(
a
));
insert
into
t1
values
(
1
,
1
),(
1
,
2
),(
1
,
3
),(
3
,
1
),(
3
,
2
),(
3
,
3
),(
3
,
1
),(
3
,
2
),(
3
,
3
),(
2
,
1
),(
2
,
2
),(
2
,
3
);
SET
@
tmp
=
0
;
update
t1
set
b
=
(
@
tmp
:=@
tmp
+
1
)
order
by
a
;
update
t1
set
b
=
99
where
a
=
1
order
by
b
asc
limit
1
;
update
t1
set
b
=
100
where
a
=
1
order
by
b
desc
limit
2
;
update
t1
set
a
=
a
+
10
+
b
where
a
=
1
order
by
b
;
select
*
from
t1
order
by
a
,
b
;
drop
table
t1
;
mysql-test/t/join_outer.test
View file @
b228f5ff
...
...
@@ -248,7 +248,7 @@ drop table t1,t2;
create
table
t1
(
id
int
not
null
,
str
char
(
10
),
index
(
str
));
insert
into
t1
values
(
1
,
null
),
(
2
,
null
),
(
3
,
"foo"
),
(
4
,
"bar"
);
select
*
from
t1
where
str
is
not
null
;
select
*
from
t1
where
str
is
not
null
order
by
id
;
select
*
from
t1
where
str
is
null
;
drop
table
t1
;
...
...
mysql-test/t/multi_update.test
View file @
b228f5ff
...
...
@@ -247,7 +247,7 @@ update t1,t2 set t1.b=t1.b+10 where t1.b=2;
select
*
from
t1
;
# Range key (in t1)
update
t1
,
t2
set
t1
.
b
=
t1
.
b
+
2
,
t2
.
b
=
t1
.
b
where
t1
.
b
between
3
and
5
;
update
t1
,
t2
set
t1
.
b
=
t1
.
b
+
2
,
t2
.
b
=
t1
.
b
+
10
where
t1
.
b
between
3
and
5
and
t2
.
a
=
t1
.
a
-
100
;
select
*
from
t1
;
select
*
from
t2
;
...
...
mysql-test/t/update.test
View file @
b228f5ff
...
...
@@ -81,9 +81,13 @@ drop table t1;
# Test of ORDER BY
#
create
table
t1
(
a
int
not
null
,
b
int
not
null
);
insert
into
t1
values
(
1
,
1
),(
1
,
2
),(
1
,
3
);
update
t1
set
b
=
4
where
a
=
1
order
by
b
asc
limit
1
;
update
t1
set
b
=
4
where
a
=
1
order
by
b
desc
limit
1
;
select
*
from
t1
;
create
table
t1
(
a
int
not
null
,
b
int
not
null
,
key
(
a
));
insert
into
t1
values
(
1
,
1
),(
1
,
2
),(
1
,
3
),(
3
,
1
),(
3
,
2
),(
3
,
3
),(
3
,
1
),(
3
,
2
),(
3
,
3
),(
2
,
1
),(
2
,
2
),(
2
,
3
);
SET
@
tmp
=
0
;
update
t1
set
b
=
(
@
tmp
:=@
tmp
+
1
)
order
by
a
;
update
t1
set
b
=
99
where
a
=
1
order
by
b
asc
limit
1
;
select
*
from
t1
order
by
a
,
b
;
update
t1
set
b
=
100
where
a
=
1
order
by
b
desc
limit
2
;
update
t1
set
a
=
a
+
10
+
b
where
a
=
1
order
by
b
;
select
*
from
t1
order
by
a
,
b
;
drop
table
t1
;
repl-tests/README
deleted
100644 → 0
View file @
8c2bda2d
This directory contains a set of test cases for replication. To get it
to work on your system, install this version of MySQL on the master and on
the slave, configure them according to the Replication HOWTO in the manual,
modify include/master-slave.inc to specify correct connection parameters
for the master and the slave and do
sh run-all-tests
If you would like to add your own test case, create a directory
test-your-test-case-name, write your own run.test following the examples
in the other test cases. Note that you can create the files containing
the expected output ( master files) by running mysqltest --record < run.test
\ No newline at end of file
repl-tests/include/master-slave.inc
deleted
100644 → 0
View file @
8c2bda2d
connect
(
slave
,
localhost
,
root
,,
test
,
0
,
/
var
/
lib
/
mysql
/
mysql
.
sock
);
connect
(
master
,
sarochka
,
admin
,,
test
,
0
,
0
);
repl-tests/run-all-tests
deleted
100755 → 0
View file @
8c2bda2d
#! /bin/sh
for
d
in
test-
*
;
do
cd
$d
echo
-n
$d
|
sed
-e
s/test-//
echo
-n
"...."
../../client/mysqltest
$@
< run.test
cd
..
done
\ No newline at end of file
repl-tests/test-auto-inc/run.test
deleted
100755 → 0
View file @
8c2bda2d
source
../
include
/
master
-
slave
.
inc
;
connection
master
;
drop
table
if
exists
x
;
create
table
x
(
n
int
auto_increment
primary
key
);
set
insert_id
=
2000
;
insert
into
x
values
(
NULL
),(
NULL
),(
NULL
);
connection
slave
;
sleep
3
;
@
x
.
master
select
*
from
x
;
repl-tests/test-auto-inc/x.master
deleted
100644 → 0
View file @
8c2bda2d
n
2000
2001
2002
repl-tests/test-bad-query/run.test
deleted
100755 → 0
View file @
8c2bda2d
source
../
include
/
master
-
slave
.
inc
;
connection
master
;
drop
table
if
exists
x
;
create
table
x
(
n
int
primary
key
);
!
insert
into
x
values
(
1
),(
2
),(
2
);
insert
into
x
values
(
3
);
connection
slave
;
sleep
3
;
@
x
.
master
select
*
from
x
;
repl-tests/test-bad-query/x.master
deleted
100644 → 0
View file @
8c2bda2d
n
1
2
3
repl-tests/test-dump/run.test
deleted
100644 → 0
View file @
8c2bda2d
source
../
include
/
master
-
slave
.
inc
;
connection
slave
;
!
slave
stop
;
flush
slave
;
connection
master
;
flush
master
;
connection
slave
;
slave
start
;
connection
master
;
use
test
;
drop
table
if
exists
words
;
create
table
words
(
word
char
(
20
)
not
null
,
index
(
word
));
load
data
infile
'/usr/dict/words'
into
table
words
;
drop
table
if
exists
words1
;
create
table
words1
(
word
char
(
20
)
not
null
);
load
data
infile
'/usr/dict/words'
into
table
words1
;
sleep
20
;
connection
slave
;
use
test
;
drop
table
if
exists
words
;
load
table
words
from
master
;
drop
table
if
exists
words1
;
load
table
words1
from
master
;
@
table
-
dump
-
check
.
master
check
table
words
;
@
table
-
dump
-
select
.
master
select
count
(
*
)
from
words1
;
repl-tests/test-dump/table-dump-check.master
deleted
100644 → 0
View file @
8c2bda2d
Table Op Msg_type Msg_text
test.words check status OK
repl-tests/test-dump/table-dump-select.master
deleted
100644 → 0
View file @
8c2bda2d
count(*)
45402
repl-tests/test-repl-alter/run.test
deleted
100644 → 0
View file @
8c2bda2d
source
../
include
/
master
-
slave
.
inc
;
connection
master
;
drop
table
if
exists
test
;
CREATE
TABLE
test
(
name
varchar
(
64
),
age
smallint
(
3
));
INSERT
INTO
test
SET
name
=
'Andy'
,
age
=
31
;
INSERT
test
SET
name
=
'Jacob'
,
age
=
2
;
INSERT
into
test
SET
name
=
'Caleb'
,
age
=
1
;
ALTER
TABLE
test
ADD
id
int
(
8
)
ZEROFILL
AUTO_INCREMENT
PRIMARY
KEY
;
@
test
.
master
select
*
from
test
;
connection
slave
;
sleep
3
;
@
test
.
master
select
*
from
test
;
repl-tests/test-repl-alter/test.master
deleted
100644 → 0
View file @
8c2bda2d
name age id
Andy 31 00000001
Jacob 2 00000002
Caleb 1 00000003
repl-tests/test-repl-ts/repl-timestamp.master
deleted
100644 → 0
View file @
8c2bda2d
unix_timestamp(t)
200006
repl-tests/test-repl-ts/repl-timestamp.master.reject
deleted
100644 → 0
View file @
8c2bda2d
unix_timestamp(t)
973999074
repl-tests/test-repl-ts/run.test
deleted
100644 → 0
View file @
8c2bda2d
#! ../client/mysqltest
# tests if the replicaion preserves timestamp properly
source
../
include
/
master
-
slave
.
inc
;
connection
master
;
set
timestamp
=
200006
;
drop
table
if
exists
foo
;
create
table
foo
(
t
timestamp
not
null
,
a
char
(
1
));
insert
into
foo
(
a
)
values
(
'F'
);
@
repl
-
timestamp
.
master
select
unix_timestamp
(
t
)
from
foo
;
sleep
3
;
connection
slave
;
drop
table
if
exists
foo
;
load
table
foo
from
master
;
@
repl
-
timestamp
.
master
select
unix_timestamp
(
t
)
from
foo
;
repl-tests/test-repl/foo-dump-master.master
deleted
100644 → 0
View file @
8c2bda2d
n
1
2
repl-tests/test-repl/foo-dump-slave.master
deleted
100644 → 0
View file @
8c2bda2d
n
1
2
repl-tests/test-repl/run.test
deleted
100755 → 0
View file @
8c2bda2d
source
../
include
/
master
-
slave
.
inc
;
connection
slave
;
!
slave
stop
;
connection
master
;
flush
master
;
connection
slave
;
flush
slave
;
!
slave
start
;
sleep
3
;
connection
master
;
use
test
;
drop
table
if
exists
words
;
create
table
words
(
word
char
(
20
)
not
null
,
index
(
word
));
load
data
infile
'/usr/dict/words'
into
table
words
;
drop
table
if
exists
foo
;
create
table
foo
(
n
int
);
insert
into
foo
values
(
1
),(
2
);
@
foo
-
dump
-
master
.
master
select
*
from
foo
;
@
sum
-
wlen
-
master
.
master
select
sum
(
length
(
word
))
from
words
;
connection
slave
;
sleep
15
;
use
test
;
@
sum
-
wlen
-
slave
.
master
select
sum
(
length
(
word
))
from
words
;
@
foo
-
dump
-
slave
.
master
select
*
from
foo
;
repl-tests/test-repl/sum-wlen-master.master
deleted
100644 → 0
View file @
8c2bda2d
sum(length(word))
363634
repl-tests/test-repl/sum-wlen-slave.master
deleted
100644 → 0
View file @
8c2bda2d
sum(length(word))
363634
sql/field.cc
View file @
b228f5ff
...
...
@@ -3322,11 +3322,11 @@ bool Field_newdate::get_date(TIME *ltime,bool fuzzydate)
if
(
is_null
())
return
1
;
uint32
tmp
=
(
uint32
)
uint3korr
(
ptr
);
bzero
((
char
*
)
ltime
,
sizeof
(
*
ltime
));
ltime
->
day
=
tmp
&
31
;
ltime
->
month
=
(
tmp
>>
5
)
&
15
;
ltime
->
year
=
(
tmp
>>
9
);
ltime
->
time_type
=
TIMESTAMP_DATE
;
ltime
->
hour
=
ltime
->
minute
=
ltime
->
second
=
ltime
->
second_part
=
0
;
return
(
!
fuzzydate
&&
(
!
ltime
->
month
||
!
ltime
->
day
))
?
1
:
0
;
}
...
...
sql/ha_heap.h
View file @
b228f5ff
...
...
@@ -54,8 +54,9 @@ class ha_heap: public handler
uint
max_keys
()
const
{
return
MAX_KEY
;
}
uint
max_key_parts
()
const
{
return
MAX_REF_PARTS
;
}
uint
max_key_length
()
const
{
return
HA_MAX_REC_LENGTH
;
}
virtual
double
scan_time
()
{
return
(
double
)
(
records
+
deleted
)
/
20.0
+
10
;
}
virtual
double
read_time
(
ha_rows
rows
)
{
return
(
double
)
rows
/
20.0
+
1
;
}
double
scan_time
()
{
return
(
double
)
(
records
+
deleted
)
/
20.0
+
10
;
}
double
read_time
(
uint
index
,
uint
ranges
,
ha_rows
rows
)
{
return
(
double
)
rows
/
20.0
+
1
;
}
virtual
bool
fast_key_read
()
{
return
1
;}
int
open
(
const
char
*
name
,
int
mode
,
uint
test_if_locked
);
...
...
sql/ha_innodb.cc
View file @
b228f5ff
...
...
@@ -3542,10 +3542,8 @@ ha_innobase::records_in_range(
/*************************************************************************
Gives an UPPER BOUND to the number of rows in a table. This is used in
filesort.cc and the upper bound must hold. TODO: Since the number of
rows in a table may change after this function is called, we still may
get a 'Sort aborted' error in filesort.cc of MySQL. The ultimate fix is to
improve the algorithm of filesort.cc. */
filesort.cc and its better if the upper bound hold.
*/
ha_rows
ha_innobase
::
estimate_number_of_rows
(
void
)
...
...
@@ -3611,6 +3609,29 @@ ha_innobase::scan_time()
return
((
double
)
(
prebuilt
->
table
->
stat_clustered_index_size
));
}
/*
Calculate the time it takes to read a set of ranges through and index
This enables us to optimise reads for clustered indexes.
*/
double
ha_innobase
::
read_time
(
uint
index
,
uint
ranges
,
ha_rows
rows
)
{
ha_rows
total_rows
;
double
time_for_scan
;
if
(
index
!=
table
->
primary_key
)
return
handler
::
read_time
(
index
,
ranges
,
rows
);
// Not clustered
if
(
rows
<=
2
)
return
(
double
)
rows
;
/*
Assume that the read is proportional to scan time for all rows + one
seek per range.
*/
time_for_scan
=
scan_time
();
if
((
total_rows
=
estimate_number_of_rows
())
<
rows
)
return
time_for_scan
;
return
(
ranges
+
(
double
)
rows
/
(
double
)
total_rows
*
time_for_scan
);
}
/*************************************************************************
Returns statistics information of the table to the MySQL interpreter,
in various fields of the handle object. */
...
...
sql/ha_innodb.h
View file @
b228f5ff
...
...
@@ -126,6 +126,7 @@ class ha_innobase: public handler
void
initialize
(
void
);
int
close
(
void
);
double
scan_time
();
double
read_time
(
uint
index
,
uint
ranges
,
ha_rows
rows
);
int
write_row
(
byte
*
buf
);
int
update_row
(
const
byte
*
old_data
,
byte
*
new_data
);
...
...
sql/handler.h
View file @
b228f5ff
...
...
@@ -227,7 +227,8 @@ class handler :public Sql_alloc
void
change_table_ptr
(
TABLE
*
table_arg
)
{
table
=
table_arg
;
}
virtual
double
scan_time
()
{
return
ulonglong2double
(
data_file_length
)
/
IO_SIZE
+
1
;
}
virtual
double
read_time
(
ha_rows
rows
)
{
return
rows2double
(
rows
);
}
virtual
double
read_time
(
uint
index
,
uint
ranges
,
ha_rows
rows
)
{
return
rows2double
(
ranges
+
rows
);
}
virtual
bool
fast_key_read
()
{
return
0
;}
virtual
key_map
keys_to_use_for_scanning
()
{
return
0
;
}
virtual
bool
has_transactions
(){
return
0
;}
...
...
sql/item_sum.cc
View file @
b228f5ff
...
...
@@ -1066,7 +1066,7 @@ bool Item_sum_count_distinct::setup(THD *thd)
int
Item_sum_count_distinct
::
tree_to_myisam
()
{
if
(
create_myisam_from_heap
(
table
,
tmp_table_param
,
if
(
create_myisam_from_heap
(
current_thd
,
table
,
tmp_table_param
,
HA_ERR_RECORD_FILE_FULL
,
1
)
||
tree_walk
(
&
tree
,
(
tree_walk_action
)
&
dump_leaf
,
(
void
*
)
this
,
left_root_right
))
...
...
@@ -1120,7 +1120,8 @@ bool Item_sum_count_distinct::add()
if
(
error
!=
HA_ERR_FOUND_DUPP_KEY
&&
error
!=
HA_ERR_FOUND_DUPP_UNIQUE
)
{
if
(
create_myisam_from_heap
(
table
,
tmp_table_param
,
error
,
1
))
if
(
create_myisam_from_heap
(
current_thd
,
table
,
tmp_table_param
,
error
,
1
))
return
1
;
// Not a table_is_full error
}
}
...
...
sql/mysqld.cc
View file @
b228f5ff
...
...
@@ -42,7 +42,7 @@
#define MAIN_THD
#define SIGNAL_THD
#ifdef
PURIFY
#ifdef
HAVE_purify
#define IF_PURIFY(A,B) (A)
#else
#define IF_PURIFY(A,B) (B)
...
...
sql/opt_range.cc
View file @
b228f5ff
...
...
@@ -284,7 +284,7 @@ typedef struct st_qsel_param {
KEY_PART
*
key_parts
,
*
key_parts_end
,
*
key
[
MAX_KEY
];
MEM_ROOT
*
mem_root
;
table_map
prev_tables
,
read_tables
,
current_table
;
uint
baseflag
,
keys
,
max_key_par
t
;
uint
baseflag
,
keys
,
max_key_part
,
range_coun
t
;
uint
real_keynr
[
MAX_KEY
];
char
min_key
[
MAX_KEY_LENGTH
+
MAX_FIELD_WIDTH
],
max_key
[
MAX_KEY_LENGTH
+
MAX_FIELD_WIDTH
];
...
...
@@ -710,8 +710,10 @@ int SQL_SELECT::test_quick_select(key_map keys_to_use, table_map prev_tables,
(
double
)
keys_per_block
);
}
else
found_read_time
=
head
->
file
->
read_time
(
found_records
)
+
(
double
)
found_records
/
TIME_FOR_COMPARE
;
found_read_time
=
(
head
->
file
->
read_time
(
keynr
,
param
.
range_count
,
found_records
)
+
(
double
)
found_records
/
TIME_FOR_COMPARE
);
if
(
read_time
>
found_read_time
)
{
read_time
=
found_read_time
;
...
...
@@ -2113,11 +2115,12 @@ check_quick_select(PARAM *param,uint idx,SEL_ARG *tree)
if
(
!
tree
)
DBUG_RETURN
(
HA_POS_ERROR
);
// Can't use it
param
->
max_key_part
=
0
;
param
->
range_count
=
0
;
if
(
tree
->
type
==
SEL_ARG
::
IMPOSSIBLE
)
DBUG_RETURN
(
0L
);
// Impossible select. return
if
(
tree
->
type
!=
SEL_ARG
::
KEY_RANGE
||
tree
->
part
!=
0
)
DBUG_RETURN
(
HA_POS_ERROR
);
// Don't use tree
param
->
max_key_part
=
0
;
records
=
check_quick_keys
(
param
,
idx
,
tree
,
param
->
min_key
,
0
,
param
->
max_key
,
0
);
if
(
records
!=
HA_POS_ERROR
)
{
...
...
@@ -2185,6 +2188,7 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree,
}
keynr
=
param
->
real_keynr
[
idx
];
param
->
range_count
++
;
if
(
!
tmp_min_flag
&&
!
tmp_max_flag
&&
(
uint
)
key_tree
->
part
+
1
==
param
->
table
->
key_info
[
keynr
].
key_parts
&&
(
param
->
table
->
key_info
[
keynr
].
flags
&
HA_NOSAME
)
&&
...
...
sql/records.cc
View file @
b228f5ff
...
...
@@ -15,7 +15,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
/* Functions
to read, write and lock records
*/
/* Functions
for easy reading of records, possible through a cache
*/
#include "mysql_priv.h"
...
...
sql/sql_list.h
View file @
b228f5ff
/* Copyright (C) 2000
MySQL AB & MySQL Finland AB & TCX DataKonsult
AB
/* Copyright (C) 2000
-2003 MySQL
AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
...
...
sql/sql_select.cc
View file @
b228f5ff
...
...
@@ -1829,7 +1829,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
{
ha_rows
rec
;
double
tmp
;
THD
*
thd
=
current_
thd
;
THD
*
thd
=
join
->
thd
;
if
(
!
rest_tables
)
{
...
...
@@ -1960,7 +1960,10 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
if
(
table
->
quick_keys
&
((
key_map
)
1
<<
key
))
records
=
(
double
)
table
->
quick_rows
[
key
];
else
records
=
(
double
)
s
->
records
/
rec
;
// quick_range couldn't use key!
{
/* quick_range couldn't use key! */
records
=
(
double
)
s
->
records
/
rec
;
}
}
else
{
...
...
@@ -4306,12 +4309,11 @@ free_tmp_table(THD *thd, TABLE *entry)
* If a HEAP table gets full, create a MyISAM table and copy all rows to this
*/
bool
create_myisam_from_heap
(
T
ABLE
*
table
,
TMP_TABLE_PARAM
*
param
,
int
error
,
bool
ignore_last_dupp_key_error
)
bool
create_myisam_from_heap
(
T
HD
*
thd
,
TABLE
*
table
,
TMP_TABLE_PARAM
*
param
,
int
error
,
bool
ignore_last_dupp_key_error
)
{
TABLE
new_table
;
const
char
*
save_proc_info
;
THD
*
thd
=
current_thd
;
int
write_err
;
DBUG_ENTER
(
"create_myisam_from_heap"
);
...
...
@@ -5318,7 +5320,8 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
if
(
error
==
HA_ERR_FOUND_DUPP_KEY
||
error
==
HA_ERR_FOUND_DUPP_UNIQUE
)
goto
end
;
if
(
create_myisam_from_heap
(
table
,
&
join
->
tmp_table_param
,
error
,
1
))
if
(
create_myisam_from_heap
(
join
->
thd
,
table
,
&
join
->
tmp_table_param
,
error
,
1
))
DBUG_RETURN
(
-
1
);
// Not a table_is_full error
table
->
uniques
=
0
;
// To ensure rows are the same
}
...
...
@@ -5395,7 +5398,8 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
copy_funcs
(
join
->
tmp_table_param
.
items_to_copy
);
if
((
error
=
table
->
file
->
write_row
(
table
->
record
[
0
])))
{
if
(
create_myisam_from_heap
(
table
,
&
join
->
tmp_table_param
,
error
,
0
))
if
(
create_myisam_from_heap
(
join
->
thd
,
table
,
&
join
->
tmp_table_param
,
error
,
0
))
DBUG_RETURN
(
-
1
);
// Not a table_is_full error
/* Change method to update rows */
table
->
file
->
index_init
(
0
);
...
...
@@ -5489,8 +5493,8 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
{
if
((
error
=
table
->
file
->
write_row
(
table
->
record
[
0
])))
{
if
(
create_myisam_from_heap
(
table
,
&
join
->
tmp_table_param
,
error
,
0
))
if
(
create_myisam_from_heap
(
join
->
thd
,
table
,
&
join
->
tmp_table_param
,
error
,
0
))
DBUG_RETURN
(
-
1
);
// Not a table_is_full error
}
else
...
...
@@ -6019,7 +6023,7 @@ remove_duplicates(JOIN *join, TABLE *entry,List<Item> &fields, Item *having)
int
error
;
ulong
reclength
,
offset
;
uint
field_count
;
THD
*
thd
=
current_
thd
;
THD
*
thd
=
join
->
thd
;
DBUG_ENTER
(
"remove_duplicates"
);
entry
->
reginfo
.
lock_type
=
TL_WRITE
;
...
...
sql/sql_select.h
View file @
b228f5ff
...
...
@@ -195,8 +195,8 @@ void count_field_types(TMP_TABLE_PARAM *param, List<Item> &fields,
bool
setup_copy_fields
(
THD
*
thd
,
TMP_TABLE_PARAM
*
param
,
List
<
Item
>
&
fields
);
void
copy_fields
(
TMP_TABLE_PARAM
*
param
);
void
copy_funcs
(
Item
**
func_ptr
);
bool
create_myisam_from_heap
(
T
ABLE
*
table
,
TMP_TABLE_PARAM
*
param
,
int
error
,
bool
ignore_last_dupp_error
);
bool
create_myisam_from_heap
(
T
HD
*
Thd
,
TABLE
*
table
,
TMP_TABLE_PARAM
*
param
,
int
error
,
bool
ignore_last_dupp_error
);
/* functions from opt_sum.cc */
int
opt_sum_query
(
TABLE_LIST
*
tables
,
List
<
Item
>
&
all_fields
,
COND
*
conds
);
...
...
sql/sql_union.cc
View file @
b228f5ff
...
...
@@ -262,7 +262,8 @@ bool select_union::send_data(List<Item> &values)
fill_record
(
table
->
field
,
values
);
if
((
write_record
(
table
,
&
info
)))
{
if
(
create_myisam_from_heap
(
table
,
tmp_table_param
,
info
.
last_errno
,
0
))
if
(
create_myisam_from_heap
(
thd
,
table
,
tmp_table_param
,
info
.
last_errno
,
0
))
return
1
;
}
return
0
;
...
...
sql/sql_update.cc
View file @
b228f5ff
...
...
@@ -66,7 +66,10 @@ int mysql_update(THD *thd,
TABLE
*
table
;
SQL_SELECT
*
select
;
READ_RECORD
info
;
TABLE_LIST
tables
;
List
<
Item
>
all_fields
;
DBUG_ENTER
(
"mysql_update"
);
LINT_INIT
(
used_index
);
LINT_INIT
(
timestamp_query_id
);
...
...
@@ -80,8 +83,13 @@ int mysql_update(THD *thd,
table
->
quick_keys
=
0
;
want_privilege
=
table
->
grant
.
want_privilege
;
table
->
grant
.
want_privilege
=
(
SELECT_ACL
&
~
table
->
grant
.
privilege
);
if
(
setup_tables
(
table_list
)
||
setup_conds
(
thd
,
table_list
,
&
conds
)
||
setup_ftfuncs
(
thd
))
bzero
((
char
*
)
&
tables
,
sizeof
(
tables
));
// For ORDER BY
tables
.
table
=
table
;
if
(
setup_tables
(
table_list
)
||
setup_conds
(
thd
,
table_list
,
&
conds
)
||
setup_order
(
thd
,
&
tables
,
all_fields
,
all_fields
,
order
)
||
setup_ftfuncs
(
thd
))
DBUG_RETURN
(
-
1
);
/* purecov: inspected */
old_used_keys
=
table
->
used_keys
;
// Keys used in WHERE
...
...
@@ -159,13 +167,6 @@ int mysql_update(THD *thd,
matching rows before updating the table!
*/
table
->
file
->
extra
(
HA_EXTRA_DONT_USE_CURSOR_TO_UPDATE
);
IO_CACHE
tempfile
;
if
(
open_cached_file
(
&
tempfile
,
mysql_tmpdir
,
TEMP_PREFIX
,
DISK_BUFFER_SIZE
,
MYF
(
MY_WME
)))
{
delete
select
;
/* purecov: inspected */
DBUG_RETURN
(
-
1
);
}
if
(
old_used_keys
&
((
key_map
)
1
<<
used_index
))
{
table
->
key_read
=
1
;
...
...
@@ -174,81 +175,97 @@ int mysql_update(THD *thd,
if
(
order
)
{
/*
Doing an ORDER BY; Let filesort find and sort the rows we are going
to update
*/
uint
length
;
SORT_FIELD
*
sortorder
;
TABLE_LIST
tables
;
List
<
Item
>
fields
;
List
<
Item
>
all_fields
;
ha_rows
examined_rows
;
bzero
((
char
*
)
&
tables
,
sizeof
(
tables
));
tables
.
table
=
table
;
table
->
io_cache
=
(
IO_CACHE
*
)
my_malloc
(
sizeof
(
IO_CACHE
),
MYF
(
MY_FAE
|
MY_ZEROFILL
));
if
(
setup_order
(
thd
,
&
tables
,
fields
,
all_fields
,
order
)
||
!
(
sortorder
=
make_unireg_sortorder
(
order
,
&
length
))
||
if
(
!
(
sortorder
=
make_unireg_sortorder
(
order
,
&
length
))
||
(
table
->
found_records
=
filesort
(
table
,
sortorder
,
length
,
(
SQL_SELECT
*
)
0
,
0L
,
HA_POS_ERROR
,
&
examined_rows
))
==
HA_POS_ERROR
)
select
,
0L
,
limit
,
&
examined_rows
))
==
HA_POS_ERROR
)
{
delete
select
;
free_io_cache
(
table
);
DBUG_RETURN
(
-
1
);
}
/*
Filesort has already found and selected the rows we want to update,
so we don't need the where clause
*/
delete
select
;
select
=
0
;
}
init_read_record
(
&
info
,
thd
,
table
,
select
,
0
,
1
);
thd
->
proc_info
=
"Searching rows for update"
;
while
(
!
(
error
=
info
.
read_record
(
&
info
))
&&
!
thd
->
killed
)
else
{
if
(
!
(
select
&&
select
->
skipp_record
()))
/*
We are doing a search on a key that is updated. In this case
we go trough the matching rows, save a pointer to them and
update these in a separate loop based on the pointer.
*/
IO_CACHE
tempfile
;
if
(
open_cached_file
(
&
tempfile
,
mysql_tmpdir
,
TEMP_PREFIX
,
DISK_BUFFER_SIZE
,
MYF
(
MY_WME
)))
{
table
->
file
->
position
(
table
->
record
[
0
]);
if
(
my_b_write
(
&
tempfile
,
table
->
file
->
ref
,
table
->
file
->
ref_length
))
delete
select
;
/* purecov: inspected */
DBUG_RETURN
(
-
1
);
}
init_read_record
(
&
info
,
thd
,
table
,
select
,
0
,
1
);
thd
->
proc_info
=
"Searching rows for update"
;
uint
tmp_limit
=
limit
;
while
(
!
(
error
=
info
.
read_record
(
&
info
))
&&
!
thd
->
killed
)
{
if
(
!
(
select
&&
select
->
skipp_record
()))
{
error
=
1
;
/* purecov: inspected */
break
;
/* purecov: inspected */
table
->
file
->
position
(
table
->
record
[
0
]);
if
(
my_b_write
(
&
tempfile
,
table
->
file
->
ref
,
table
->
file
->
ref_length
))
{
error
=
1
;
/* purecov: inspected */
break
;
/* purecov: inspected */
}
if
(
!--
limit
&&
using_limit
)
break
;
}
}
end_read_record
(
&
info
);
/* Change select to use tempfile */
if
(
select
)
{
delete
select
->
quick
;
if
(
select
->
free_cond
)
delete
select
->
cond
;
select
->
quick
=
0
;
select
->
cond
=
0
;
}
else
{
if
(
!
(
test_flags
&
512
))
/* For debugging */
{
DBUG_DUMP
(
"record"
,(
char
*
)
table
->
record
[
0
],
table
->
reclength
);
}
select
=
new
SQL_SELECT
;
select
->
head
=
table
;
}
if
(
reinit_io_cache
(
&
tempfile
,
READ_CACHE
,
0L
,
0
,
0
))
error
=
1
;
/* purecov: inspected */
select
->
file
=
tempfile
;
// Read row ptrs from this file
if
(
error
>=
0
)
{
delete
select
;
DBUG_RETURN
(
-
1
);
}
}
end_read_record
(
&
info
);
if
(
table
->
key_read
)
{
table
->
key_read
=
0
;
table
->
file
->
extra
(
HA_EXTRA_NO_KEYREAD
);
}
/* Change select to use tempfile */
if
(
select
)
{
delete
select
->
quick
;
if
(
select
->
free_cond
)
delete
select
->
cond
;
select
->
quick
=
0
;
select
->
cond
=
0
;
}
else
{
select
=
new
SQL_SELECT
;
select
->
head
=
table
;
}
if
(
reinit_io_cache
(
&
tempfile
,
READ_CACHE
,
0L
,
0
,
0
))
error
=
1
;
/* purecov: inspected */
select
->
file
=
tempfile
;
// Read row ptrs from this file
if
(
error
>=
0
)
{
delete
select
;
DBUG_RETURN
(
-
1
);
}
}
if
(
handle_duplicates
==
DUP_IGNORE
)
...
...
@@ -275,11 +292,6 @@ int mysql_update(THD *thd,
(
byte
*
)
table
->
record
[
0
])))
{
updated
++
;
if
(
!--
limit
&&
using_limit
)
{
error
=
-
1
;
break
;
}
}
else
if
(
handle_duplicates
!=
DUP_IGNORE
||
error
!=
HA_ERR_FOUND_DUPP_KEY
)
...
...
@@ -289,11 +301,17 @@ int mysql_update(THD *thd,
break
;
}
}
if
(
!--
limit
&&
using_limit
)
{
error
=
-
1
;
// Simulate end of file
break
;
}
}
else
table
->
file
->
unlock_row
();
}
end_read_record
(
&
info
);
free_io_cache
(
table
);
// If ORDER BY
thd
->
proc_info
=
"end"
;
VOID
(
table
->
file
->
extra
(
HA_EXTRA_NO_IGNORE_DUP_KEY
));
transactional_table
=
table
->
file
->
has_transactions
();
...
...
@@ -741,7 +759,8 @@ bool multi_update::send_data(List<Item> ¬_used_values)
(
error
!=
HA_ERR_FOUND_DUPP_KEY
&&
error
!=
HA_ERR_FOUND_DUPP_UNIQUE
))
{
if
(
create_myisam_from_heap
(
table
,
tmp_table_param
+
offset
,
error
,
1
))
if
(
create_myisam_from_heap
(
thd
,
table
,
tmp_table_param
+
offset
,
error
,
1
))
{
do_update
=
0
;
DBUG_RETURN
(
1
);
// Not a table_is_full error
...
...
support-files/my-huge.cnf.sh
View file @
b228f5ff
...
...
@@ -33,6 +33,7 @@ sort_buffer_size = 2M
read_buffer_size
=
2M
myisam_sort_buffer_size
=
64M
thread_cache
=
8
query_cache_size
=
32M
# Try number of CPU's*2 for thread_concurrency
thread_concurrency
=
8
...
...
support-files/my-large.cnf.sh
View file @
b228f5ff
...
...
@@ -33,6 +33,7 @@ sort_buffer_size = 1M
read_buffer_size
=
1M
myisam_sort_buffer_size
=
64M
thread_cache
=
8
query_cache_size
=
16M
# Try number of CPU's*2 for thread_concurrency
thread_concurrency
=
8
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment