Commit 6f6c95ef authored by unknown's avatar unknown

Fixed bug in mysqlcheck when using --fast

Fixed problem when converting bigint to double.
Fixed bug in count(distinct null)
Fixed bug with empty BDB tables.


BitKeeper/deleted/.del-ATIS-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg~dd306b2e583ebde4:
  Delete: sql-bench/Results/ATIS-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg
BitKeeper/deleted/.del-RUN-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg~11038a44f73070e7:
  Delete: sql-bench/Results/RUN-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg
BitKeeper/deleted/.del-alter-table-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg~662b96bc66bc91b6:
  Delete: sql-bench/Results/alter-table-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg
BitKeeper/deleted/.del-big-tables-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg~788ad492867b1226:
  Delete: sql-bench/Results/big-tables-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg
BitKeeper/deleted/.del-connect-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg~5316add301edb60:
  Delete: sql-bench/Results/connect-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg
BitKeeper/deleted/.del-create-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg~26e09af61f88d8c9:
  Delete: sql-bench/Results/create-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg
BitKeeper/deleted/.del-insert-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg~fd2699adb3190d07:
  Delete: sql-bench/Results/insert-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg
BitKeeper/deleted/.del-select-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg~b01175ad38fd12b6:
  Delete: sql-bench/Results/select-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg
BitKeeper/deleted/.del-wisconsin-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg~8ba598d217450157:
  Delete: sql-bench/Results/wisconsin-mysql-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg
Docs/manual.texi:
  Changelog + update to Linux section
client/mysqlcheck.c:
  Fixed bug when using --fast
  --check --auto-repair --fast will not anymore repair tables that
  had warnings but where ok.
mysql-test/r/bdb.result:
  New test case
mysql-test/r/bigint.result:
  New test case
mysql-test/r/count_distinct.result:
  New test case
mysql-test/t/bdb.test:
  New test case
mysql-test/t/bigint.test:
  New test case
mysql-test/t/count_distinct.test:
  New test case
sql-bench/Comments/postgres.benchmark:
  Update for PostgreSQL 7.1.2
sql-bench/graph-compare-results.sh:
  Fixed colors for mysql --fast tests
sql/field.cc:
  Fixed problem when converting bigint to double.
sql/item_sum.cc:
  Fixed bug in count(distinct null)
sql/item_sum.h:
  Fixed bug in count(distinct null)
sql/mysqlbinlog.cc:
  Fixed typo
sql/sql_base.cc:
  cleanup
sql/sql_select.cc:
  Fixed bug with empty BDB tables.
sql/time.cc:
  Removed warning when reading timestamps with sub seconds.
parent bcdc8f92
......@@ -990,6 +990,7 @@ Changes in release 4.0.x (Development; Alpha)
Changes in release 3.23.x (Stable)
* News-3.23.40:: Changes in release 3.23.40
* News-3.23.39:: Changes in release 3.23.39
* News-3.23.38:: Changes in release 3.23.38
* News-3.23.37:: Changes in release 3.23.37
......@@ -7551,6 +7552,40 @@ relative to the root of @code{glibc} Note that @strong{MySQL} will not be
stable with around 600-1000 connections if @code{STACK_SIZE} is the default
of 2 MB.
If you have a problem with that @strong{MySQL} can't open enough files,
or connections, it may be that you haven't configured Linux to handle
enough files.
In Linux 2.2 and forwards, you can check the number of allocated
file handlers by doing:
@example
cat /proc/sys/fs/file-max
cat /proc/sys/fs/dquot-max
cat /proc/sys/fs/super-max
@end example
If you have more than 16M of memory, you should add something like the
following in your boot script (@file{/etc/rc/boot.local} on SuSE):
@example
echo 65536 > /proc/sys/fs/file-max
echo 8192 > /proc/sys/fs/dquot-max
echo 1024 > /proc/sys/fs/super-max
@end example
You can also run the above from the command line as root, but in this case
your old limits will be used next time your computer reboots.
You should also add /etc/my.cnf:
@example
[safe_mysqld]
open_files_limit=8192
@end example
The above should allow @strong{MySQL} to create up to 8192 connections + files.
The @code{STACK_SIZE} constant in LinuxThreads controls the spacing of thread
stacks in the address space. It needs to be large enough so that there will
be plenty of room for the stack of each individual thread, but small enough
......@@ -9573,11 +9608,8 @@ and are configured with the following compilers and options:
@item SunOS 4.1.4 2 sun4c with @code{gcc} 2.7.2.1
@code{CC=gcc CXX=gcc CXXFLAGS="-O3 -felide-constructors" ./configure --prefix=/usr/local/mysql --disable-shared --with-extra-charsets=complex --enable-assembler}
@item SunOS 5.5.1 sun4u with @code{egcs} 1.0.3a
@code{CC=gcc CFLAGS="-O3" CXX=gcc CXXFLAGS="-O3 -felide-constructors -fno-exceptions -fno-rtti" ./configure --prefix=/usr/local/mysql --with-low-memory --with-extra-charsets=complex}
@item SunOS 5.6 sun4u with @code{egcs} 2.90.27
@code{CC=gcc CFLAGS="-O3" CXX=gcc CXXFLAGS="-O3 -felide-constructors -fno-exceptions -fno-rtti" ./configure --prefix=/usr/local/mysql --with-low-memory --with-extra-charsets=complex}
@item SunOS 5.5.1 (and above) sun4u with @code{egcs} 1.0.3a or 2.90.27 or gcc 2.95.2 and newer
@code{CC=gcc CFLAGS="-O3" CXX=gcc CXXFLAGS="-O3 -felide-constructors -fno-exceptions -fno-rtti" ./configure --prefix=/usr/local/mysql --with-low-memory --with-extra-charsets=complex --enable-assembler}
@item SunOS 5.6 i86pc with @code{gcc} 2.8.1
@code{CC=gcc CXX=gcc CXXFLAGS=-O3 ./configure --prefix=/usr/local/mysql --with-low-memory --with-extra-charsets=complex}
......@@ -19404,9 +19436,6 @@ INDEX} are @strong{MySQL} extensions to ANSI SQL92.
@item
@code{MODIFY} is an Oracle extension to @code{ALTER TABLE}.
@item
@code{TRUNCATE} is an Oracle extension. @xref{TRUNCATE}.
@item
The optional word @code{COLUMN} is a pure noise word and can be omitted.
......@@ -19790,6 +19819,8 @@ minimum needed to restore it. Currenlty only works for @code{MyISAM}
tables. For @code{MyISAM} table, copies @code{.frm} (definition) and
@code{.MYD} (data) files. The index file can be rebuilt from those two.
Before using this command, please see @xref{Backup}.
During the backup, read lock will be held for each table, one at time,
as they are being backed up. If you want to backup several tables as
a snapshot, you must first issue @code{LOCK TABLES} obtaining a read
......@@ -19998,6 +20029,8 @@ valid, the table can be re-created this way, even if the data or index
files have become corrupted.
@end itemize
@code{TRUNCATE} is an Oracle SQL extension.
@findex SELECT
@node SELECT, JOIN, TRUNCATE, Reference
@section @code{SELECT} Syntax
......@@ -20753,6 +20786,13 @@ In other words, you can't access the values of the old row from a
@code{REPLACE} statement. In some old @strong{MySQL} version it looked
like you could do this, but that was a bug that has been corrected.
When one uses a @code{REPLACE} command, @code{mysql_affected_rows()}
will return 2 if the new row replaced and old row. This is because in
this case one row was inserted and then the duplicate was deleted.
The above makes it easy to check if @code{REPLACE} added or replaced a
row.
@findex LOAD DATA INFILE
@node LOAD DATA, UPDATE, REPLACE, Reference
@section @code{LOAD DATA INFILE} Syntax
......@@ -22741,8 +22781,6 @@ This statement is provided for Oracle compatibility.
The @code{SHOW} statement provides similar information.
@xref{SHOW, , @code{SHOW}}.
@findex BEGIN
@findex COMMIT
@findex ROLLBACK
......@@ -38689,6 +38727,8 @@ shell> perror 23
File table overflow
shell> perror 24
Too many open files
shell> perror 11
Resource temporarily unavailable
@end example
The problem here is that @code{mysqld} is trying to keep open too many
......@@ -39206,8 +39246,8 @@ database directory. The @code{FLUSH TABLE} is needed to ensure that
the all active index pages is written to disk before you start the backup.
If you want to make a SQL level backup of a table, you can use
@code{SELECT INTO OUTFILE} or @code{BACKUP
TABLE}. @xref{SELECT}. @xref{BACKUP TABLE}.
@code{SELECT INTO OUTFILE} or @code{BACKUP TABLE}. @xref{SELECT}.
@xref{BACKUP TABLE}.
Another way to back up a database is to use the @code{mysqldump} program or
the @code{mysqlhotcopy script}. @xref{mysqldump, , @code{mysqldump}}.
......@@ -41736,7 +41776,7 @@ specified explicitly.
@item
The @code{passwd} parameter contains the password for @code{user}. If
@code{passwd} is @code{NULL}, only entries in the @code{user} table for the
user that have a blank password field will be checked for a match. This
user that have a blank (empty) password field will be checked for a match. This
allows the database administrator to set up the @strong{MySQL} privilege
system in such a way that users get different privileges depending on whether
or not they have specified a password.
......@@ -46309,6 +46349,7 @@ users use this code as the rest of the code and because of this we are
not yet 100% confident in this code.
@menu
* News-3.23.40:: Changes in release 3.23.40
* News-3.23.39:: Changes in release 3.23.39
* News-3.23.38:: Changes in release 3.23.38
* News-3.23.37:: Changes in release 3.23.37
......@@ -46352,7 +46393,21 @@ not yet 100% confident in this code.
* News-3.23.0:: Changes in release 3.23.0
@end menu
@node News-3.23.39, News-3.23.38, News-3.23.x, News-3.23.x
@node News-3.23.40, News-3.23.39, News-3.23.x, News-3.23.x
@appendixsubsec Changes in release 3.23.40
@itemize @bullet
@item
Fixed bug when converting @code{UNSIGNED BIGINT} to @code{DOUBLE}. This caused
a problem when doing comparisons with @code{BIGINT}'s outside of the
signed range.
@item
Fixed bug in @code{BDB} tables when querying empty tables.
@item
Fixed a bug when using @code{COUNT(DISTINCT)} with @code{LEFT JOIN} and
there wasn't any matching rows.
@end itemize
@node News-3.23.39, News-3.23.38, News-3.23.40, News-3.23.x
@appendixsubsec Changes in release 3.23.39
@itemize @bullet
@item
......@@ -16,7 +16,7 @@
/* By Jani Tolonen, 2001-04-20, MySQL Development Team */
#define CHECK_VERSION "1.01"
#define CHECK_VERSION "1.02"
#include <global.h>
#include <my_sys.h>
......@@ -503,25 +503,24 @@ static int use_db(char *database)
static int handle_request_for_tables(char *tables, uint length)
{
char *query, *end, options[100];
char *query, *end, options[100], message[100];
const char *op = 0;
options[0] = 0;
end = options;
switch (what_to_do) {
case DO_CHECK:
op = "CHECK";
end = options;
if (opt_quick) end = strmov(end, "QUICK");
if (opt_fast) end = strmov(end, "FAST");
if (opt_medium_check) end = strmov(end, "MEDIUM"); /* Default */
if (opt_extended) end = strmov(end, "EXTENDED");
if (opt_check_only_changed) end = strmov(end, "CHANGED");
if (opt_quick) end = strmov(end, " QUICK");
if (opt_fast) end = strmov(end, " FAST");
if (opt_medium_check) end = strmov(end, " MEDIUM"); /* Default */
if (opt_extended) end = strmov(end, " EXTENDED");
if (opt_check_only_changed) end = strmov(end, " CHANGED");
break;
case DO_REPAIR:
op = "REPAIR";
end = options;
if (opt_quick) end = strmov(end, "QUICK");
if (opt_extended) end = strmov(end, "EXTENDED");
if (opt_quick) end = strmov(end, " QUICK");
if (opt_extended) end = strmov(end, " EXTENDED");
break;
case DO_ANALYZE:
op = "ANALYZE";
......@@ -533,11 +532,11 @@ static int handle_request_for_tables(char *tables, uint length)
if (!(query =(char *) my_malloc((sizeof(char)*(length+110)), MYF(MY_WME))))
return 1;
sprintf(query, "%s TABLE %s %s", op, options, tables);
sprintf(query, "%s TABLE %s %s", op, tables, options);
if (mysql_query(sock, query))
{
sprintf(options, "when executing '%s TABLE'", op);
DBerror(sock, options);
sprintf(message, "when executing '%s TABLE ... %s", op, options);
DBerror(sock, message);
return 1;
}
print_result();
......@@ -551,23 +550,34 @@ static void print_result()
MYSQL_RES *res;
MYSQL_ROW row;
char prev[NAME_LEN*2+2];
int i;
uint i;
my_bool found_error=0;
res = mysql_use_result(sock);
prev[0] = '\0';
for (i = 0; (row = mysql_fetch_row(res)); i++)
{
int changed = strcmp(prev, row[0]);
int status = !strcmp(row[2], "status");
if (opt_silent && status)
continue;
my_bool status = !strcmp(row[2], "status");
if (status)
{
if (found_error)
{
if (what_to_do != DO_REPAIR && opt_auto_repair &&
(!opt_fast || strcmp(row[3],"OK")))
insert_dynamic(&tables4repair, row[0]);
}
found_error=0;
if (opt_silent)
continue;
}
if (status && changed)
printf("%-50s %s", row[0], row[3]);
else if (!status && changed)
{
printf("%s\n%-9s: %s", row[0], row[2], row[3]);
if (what_to_do != DO_REPAIR && opt_auto_repair)
insert_dynamic(&tables4repair, row[0]);
found_error=1;
}
else
printf("%-9s: %s", row[2], row[3]);
......
......@@ -509,3 +509,5 @@ id id3
1 1
2 2
100 2
KINMU_DATE
KINMU_DATE
......@@ -5,5 +5,11 @@
+9999999999999999999 -9999999999999999999
10000000000000000000 -10000000000000000000
a
18446744073709551614
18446744073709551615
a
18446744073709551615
a
18446744073709551615
a
18446744073709551614
......@@ -7,3 +7,5 @@ isbn city libname a
isbn city libname a
007 Berkeley Berkeley Public1 2
000 New York New York Public Libra 2
f1 count(distinct t2.f2) count(distinct 1,NULL)
1 0 0
......@@ -705,3 +705,13 @@ commit;
select id,id3 from t1;
UNLOCK TABLES;
DROP TABLE t1;
#
# Test with empty tables (crashed with lock error)
#
CREATE TABLE t1 (SYAIN_NO char(5) NOT NULL default '', KINMU_DATE char(6) NOT NULL default '', PRIMARY KEY (SYAIN_NO,KINMU_DATE)) TYPE=BerkeleyDB;
CREATE TABLE t2 ( SYAIN_NO char(5) NOT NULL default '',STR_DATE char(8) NOT NULL default '',PRIMARY KEY (SYAIN_NO,STR_DATE) ) TYPE=BerkeleyDB;
select T1.KINMU_DATE from t1 T1 ,t2 T2 where T1.SYAIN_NO = '12345' and T1.KINMU_DATE = '200106' and T2.SYAIN_NO = T1.SYAIN_NO;
select T1.KINMU_DATE from t1 T1 ,t2 T2 where T1.SYAIN_NO = '12345' and T1.KINMU_DATE = '200106' and T2.SYAIN_NO = T1.SYAIN_NO;
DROP TABLE t1,t2;
......@@ -6,7 +6,11 @@ select 9223372036854775807,-009223372036854775808;
select +9999999999999999999,-9999999999999999999;
drop table if exists t1;
create table t1 (a bigint unsigned);
insert into t1 values (18446744073709551615), (0xFFFFFFFFFFFFFFFF);
create table t1 (a bigint unsigned not null, primary key(a));
insert into t1 values (18446744073709551615), (0xFFFFFFFFFFFFFFFE);
select * from t1;
select * from t1 where a=18446744073709551615;
select * from t1 where a='18446744073709551615';
delete from t1 where a=18446744073709551615;
select * from t1;
drop table t1;
......@@ -32,3 +32,13 @@ insert into t1 values ('NYC Lib','New York');
select t2.isbn,city,t1.libname,count(t1.libname) as a from t3 left join t1 on t3.libname=t1.libname left join t2 on t3.isbn=t2.isbn group by city,t1.libname;
select t2.isbn,city,t1.libname,count(distinct t1.libname) as a from t3 left join t1 on t3.libname=t1.libname left join t2 on t3.isbn=t2.isbn group by city having count(distinct t1.libname) > 1;
drop table t1, t2, t3;
#
# Problem with LEFT JOIN
#
create table t1 (f1 int);
insert into t1 values (1);
create table t2 (f1 int,f2 int);
select t1.f1,count(distinct t2.f2),count(distinct 1,NULL) from t1 left join t2 on t1.f1=t2.f1 group by t1.f1;
drop table t1,t2;
......@@ -11,24 +11,26 @@
# Another time vacuum() filled our system disk with had 6G free
# while vaccuming a table of 60 M.
#
# We have sent a mail about this to the PostgreSQL mailing list, so
# the PostgreSQL developers should be aware of these problems and should
# hopefully fix this soon.
#
# WARNING
# The test was run on a Intel Xeon 2x 550 Mzh machine with 1G memory,
# 9G hard disk. The OS is Suse 7.1, with Linux 2.4.0 compiled with SMP
# 9G hard disk. The OS is Suse 7.1, with Linux 2.4.2 compiled with SMP
# support
# Both the perl client and the database server is run
# on the same machine. No other cpu intensive process was used during
# the benchmark.
#
# During the test we run PostgreSQL with -o -F, not async mode (not ACID safe)
# because when we started postmaster without -o -F, PostgreSQL log files
# filled up a 9G disk until postmaster crashed.
# We did however notice that with -o -F, PostgreSQL was a magnitude slower
# than when not using -o -F.
# First, install postgresql-7.1.1.tar.gz
#
# First, install postgresql-7.1.2.tar.gz
# Adding the following lines to your ~/.bash_profile or
# corresponding file. If you are using csh, use ´setenv´.
#
export POSTGRES_INCLUDE=/usr/local/pg/include
export POSTGRES_LIB=/usr/local/pg/lib
......@@ -62,7 +64,7 @@ su - postgres
exit
#
# Second, install packages DBD-Pg-1.00.tar.gz and DBI-1.14.tar.gz,
# Second, install packages DBD-Pg-1.00.tar.gz and DBI-1.18.tar.gz,
# available from http://www.perl.com/CPAN/
export POSTGRES_LIB=/usr/local/pg/lib/
......@@ -82,6 +84,7 @@ run-all-tests --comment="Intel Xeon, 2x550 Mhz, 512M, pg started with -o -F" --u
# When running with --fast we run the following vacuum commands on
# the database between each major update of the tables:
# vacuum anlyze table
# vacuum table
# or
# vacuum analyze
......
Testing server 'MySQL 3.23.39' at 2001-06-05 19:26:17
ATIS table test
Creating tables
Time for create_table (28): 0 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
Inserting data
Time to insert (9768): 3 wallclock secs ( 0.45 usr 0.44 sys + 0.00 cusr 0.00 csys = 0.89 CPU)
Retrieving data
Time for select_simple_join (500): 3 wallclock secs ( 0.68 usr 0.19 sys + 0.00 cusr 0.00 csys = 0.87 CPU)
Time for select_join (100): 3 wallclock secs ( 0.51 usr 0.20 sys + 0.00 cusr 0.00 csys = 0.71 CPU)
Time for select_key_prefix_join (100): 13 wallclock secs ( 4.08 usr 2.01 sys + 0.00 cusr 0.00 csys = 6.09 CPU)
Time for select_distinct (800): 15 wallclock secs ( 1.75 usr 0.69 sys + 0.00 cusr 0.00 csys = 2.44 CPU)
Time for select_group (2600): 20 wallclock secs ( 1.57 usr 0.41 sys + 0.00 cusr 0.00 csys = 1.98 CPU)
Removing tables
Time to drop_table (28): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 57 wallclock secs ( 9.06 usr 3.94 sys + 0.00 cusr 0.00 csys = 13.00 CPU)
Benchmark DBD suite: 2.12
Date of test: 2001-06-05 19:27:31
Running tests on: Linux 2.4.0-64GB-SMP i686
Arguments:
Comments: Intel Xeon, 2x550 Mhz, 512M, key_buffer=16M
Limits from: mysql,pg
Server version: MySQL 3.23.39
ATIS: Total time: 57 wallclock secs ( 9.06 usr 3.94 sys + 0.00 cusr 0.00 csys = 13.00 CPU)
alter-table: Total time: 271 wallclock secs ( 0.18 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.20 CPU)
big-tables: Total time: 33 wallclock secs ( 9.40 usr 7.64 sys + 0.00 cusr 0.00 csys = 17.04 CPU)
connect: Total time: 86 wallclock secs (33.98 usr 18.10 sys + 0.00 cusr 0.00 csys = 52.08 CPU)
create: Total time: 103 wallclock secs ( 7.83 usr 3.60 sys + 0.00 cusr 0.00 csys = 11.43 CPU)
insert: Total time: 2736 wallclock secs (661.21 usr 182.47 sys + 0.00 cusr 0.00 csys = 843.68 CPU)
select: Total time: 1949 wallclock secs (70.03 usr 16.42 sys + 0.00 cusr 0.00 csys = 86.45 CPU)
wisconsin: Total time: 19 wallclock secs ( 3.92 usr 1.70 sys + 0.00 cusr 0.00 csys = 5.62 CPU)
All 8 test executed successfully
Totals per operation:
Operation seconds usr sys cpu tests
alter_table_add 261.00 0.13 0.02 0.15 992
connect 16.00 6.84 2.50 9.34 10000
connect+select_1_row 15.00 7.11 3.70 10.81 10000
connect+select_simple 13.00 6.70 3.21 9.91 10000
count 45.00 0.01 0.00 0.01 100
count_distinct 60.00 0.42 0.08 0.50 1000
count_distinct_2 63.00 0.18 0.03 0.21 1000
count_distinct_big 165.00 7.78 3.16 10.94 120
count_distinct_group 194.00 1.21 0.37 1.58 1000
count_distinct_group_on_key 59.00 0.51 0.07 0.58 1000
count_distinct_group_on_key_parts 194.00 1.12 0.46 1.58 1000
count_distinct_key_prefix 51.00 0.45 0.08 0.53 1000
count_group_on_key_parts 58.00 1.16 0.35 1.51 1000
count_on_key 586.00 16.61 2.71 19.32 50100
create+drop 33.00 2.94 0.82 3.76 10000
create_MANY_tables 18.00 1.02 0.62 1.64 5000
create_index 5.00 0.00 0.00 0.00 8
create_key+drop 41.00 3.05 0.66 3.71 10000
create_table 0.00 0.01 0.00 0.01 31
delete_all 17.00 0.00 0.00 0.00 12
delete_all_many_keys 75.00 0.03 0.00 0.03 1
delete_big 1.00 0.00 0.00 0.00 1
delete_big_many_keys 75.00 0.03 0.00 0.03 128
delete_key 4.00 0.76 0.29 1.05 10000
drop_index 5.00 0.00 0.00 0.00 8
drop_table 0.00 0.00 0.00 0.00 28
drop_table_when_MANY_tables 6.00 0.37 0.63 1.00 5000
insert 144.00 24.06 14.28 38.34 350768
insert_duplicates 31.00 5.06 3.72 8.78 100000
insert_key 137.00 9.91 6.26 16.17 100000
insert_many_fields 10.00 0.54 0.08 0.62 2000
insert_select_1_key 7.00 0.00 0.00 0.00 1
insert_select_2_keys 9.00 0.00 0.00 0.00 1
min_max 30.00 0.04 0.01 0.05 60
min_max_on_key 230.00 28.28 4.43 32.71 85000
order_by_big 78.00 22.39 9.83 32.22 10
order_by_big_key 33.00 23.35 10.15 33.50 10
order_by_big_key2 32.00 22.53 9.81 32.34 10
order_by_big_key_desc 36.00 23.47 10.27 33.74 10
order_by_big_key_diff 74.00 22.66 9.76 32.42 10
order_by_big_key_prefix 33.00 22.18 9.81 31.99 10
order_by_key2_diff 9.00 1.30 0.85 2.15 500
order_by_key_prefix 4.00 0.97 0.57 1.54 500
order_by_range 8.00 1.26 0.49 1.75 500
outer_join 110.00 0.00 0.00 0.00 10
outer_join_found 107.00 0.00 0.00 0.00 10
outer_join_not_found 59.00 0.00 0.00 0.00 500
outer_join_on_key 60.00 0.00 0.00 0.00 10
select_1_row 3.00 0.81 0.69 1.50 10000
select_2_rows 3.00 0.67 0.63 1.30 10000
select_big 63.00 32.72 16.55 49.27 10080
select_column+column 4.00 0.52 0.46 0.98 10000
select_diff_key 193.00 0.32 0.04 0.36 500
select_distinct 15.00 1.75 0.69 2.44 800
select_group 75.00 1.59 0.45 2.04 2711
select_group_when_MANY_tables 5.00 0.43 0.87 1.30 5000
select_join 3.00 0.51 0.20 0.71 100
select_key 132.00 53.98 10.53 64.51 200000
select_key2 139.00 78.61 11.08 89.69 200000
select_key2_return_key 131.00 64.58 9.61 74.19 200000
select_key2_return_prim 134.00 72.33 11.34 83.67 200000
select_key_prefix 141.00 86.32 12.05 98.37 200000
select_key_prefix_join 13.00 4.08 2.01 6.09 100
select_key_return_key 125.00 59.92 12.00 71.92 200000
select_many_fields 23.00 8.85 7.55 16.40 2000
select_query_cache 120.00 3.67 0.53 4.20 10000
select_query_cache2 120.00 3.80 0.57 4.37 10000
select_range 201.00 9.05 3.95 13.00 410
select_range_key2 21.00 7.15 1.40 8.55 25010
select_range_prefix 22.00 6.55 1.40 7.95 25010
select_simple 2.00 0.54 0.49 1.03 10000
select_simple_join 3.00 0.68 0.19 0.87 500
update_big 64.00 0.00 0.00 0.00 10
update_of_key 25.00 2.62 1.44 4.06 50000
update_of_key_big 35.00 0.05 0.04 0.09 501
update_of_primary_key_many_keys 47.00 0.01 0.02 0.03 256
update_with_key 119.00 18.44 12.64 31.08 300000
update_with_key_prefix 36.00 6.23 3.85 10.08 100000
wisc_benchmark 5.00 2.33 0.52 2.85 114
TOTALS 5323.00 795.55 233.87 1029.42 2551551
Testing server 'MySQL 3.23.39' at 2001-06-05 13:47:22
Testing of ALTER TABLE
Testing with 1000 columns and 1000 rows in 20 steps
Insert data into the table
Time for insert (1000) 0 wallclock secs ( 0.05 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.05 CPU)
Time for alter_table_add (992): 261 wallclock secs ( 0.13 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.15 CPU)
Time for create_index (8): 5 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for drop_index (8): 5 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 271 wallclock secs ( 0.18 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.20 CPU)
Testing server 'MySQL 3.23.39' at 2001-06-05 13:51:53
Testing of some unusual tables
All tests are done 1000 times with 1000 fields
Testing table with 1000 fields
Testing select * from table with 1 record
Time to select_many_fields(1000): 10 wallclock secs ( 4.43 usr 4.17 sys + 0.00 cusr 0.00 csys = 8.60 CPU)
Testing select all_fields from table with 1 record
Time to select_many_fields(1000): 13 wallclock secs ( 4.42 usr 3.38 sys + 0.00 cusr 0.00 csys = 7.80 CPU)
Testing insert VALUES()
Time to insert_many_fields(1000): 3 wallclock secs ( 0.46 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.49 CPU)
Testing insert (all_fields) VALUES()
Time to insert_many_fields(1000): 7 wallclock secs ( 0.08 usr 0.05 sys + 0.00 cusr 0.00 csys = 0.13 CPU)
Total time: 33 wallclock secs ( 9.40 usr 7.64 sys + 0.00 cusr 0.00 csys = 17.04 CPU)
Testing server 'MySQL 3.23.39' at 2001-06-05 13:52:26
Testing the speed of connecting to the server and sending of data
All tests are done 10000 times
Testing connection/disconnect
Time to connect (10000): 16 wallclock secs ( 6.84 usr 2.50 sys + 0.00 cusr 0.00 csys = 9.34 CPU)
Test connect/simple select/disconnect
Time for connect+select_simple (10000): 13 wallclock secs ( 6.70 usr 3.21 sys + 0.00 cusr 0.00 csys = 9.91 CPU)
Test simple select
Time for select_simple (10000): 2 wallclock secs ( 0.54 usr 0.49 sys + 0.00 cusr 0.00 csys = 1.03 CPU)
Testing connect/select 1 row from table/disconnect
Time to connect+select_1_row (10000): 15 wallclock secs ( 7.11 usr 3.70 sys + 0.00 cusr 0.00 csys = 10.81 CPU)
Testing select 1 row from table
Time to select_1_row (10000): 3 wallclock secs ( 0.81 usr 0.69 sys + 0.00 cusr 0.00 csys = 1.50 CPU)
Testing select 2 rows from table
Time to select_2_rows (10000): 3 wallclock secs ( 0.67 usr 0.63 sys + 0.00 cusr 0.00 csys = 1.30 CPU)
Test select with aritmetic (+)
Time for select_column+column (10000): 4 wallclock secs ( 0.52 usr 0.46 sys + 0.00 cusr 0.00 csys = 0.98 CPU)
Testing retrieval of big records (65000 bytes)
Time to select_big (10000): 30 wallclock secs (10.79 usr 6.41 sys + 0.00 cusr 0.00 csys = 17.20 CPU)
Total time: 86 wallclock secs (33.98 usr 18.10 sys + 0.00 cusr 0.00 csys = 52.08 CPU)
Testing server 'MySQL 3.23.39' at 2001-06-05 13:53:52
Testing the speed of creating and droping tables
Testing with 5000 tables and 10000 loop count
Testing create of tables
Time for create_MANY_tables (5000): 18 wallclock secs ( 1.02 usr 0.62 sys + 0.00 cusr 0.00 csys = 1.64 CPU)
Accessing tables
Time to select_group_when_MANY_tables (5000): 5 wallclock secs ( 0.43 usr 0.87 sys + 0.00 cusr 0.00 csys = 1.30 CPU)
Testing drop
Time for drop_table_when_MANY_tables (5000): 6 wallclock secs ( 0.37 usr 0.63 sys + 0.00 cusr 0.00 csys = 1.00 CPU)
Testing create+drop
Time for create+drop (10000): 33 wallclock secs ( 2.94 usr 0.82 sys + 0.00 cusr 0.00 csys = 3.76 CPU)
Time for create_key+drop (10000): 41 wallclock secs ( 3.05 usr 0.66 sys + 0.00 cusr 0.00 csys = 3.71 CPU)
Total time: 103 wallclock secs ( 7.83 usr 3.60 sys + 0.00 cusr 0.00 csys = 11.43 CPU)
Testing server 'MySQL 3.23.39' at 2001-06-05 13:55:36
Testing the speed of inserting data into 1 table and do some selects on it.
The tests are done with a table that has 100000 rows.
Generating random keys
Creating tables
Inserting 100000 rows in order
Inserting 100000 rows in reverse order
Inserting 100000 rows in random order
Time for insert (300000): 123 wallclock secs (21.22 usr 12.32 sys + 0.00 cusr 0.00 csys = 33.54 CPU)
Testing insert of duplicates
Time for insert_duplicates (100000): 31 wallclock secs ( 5.06 usr 3.72 sys + 0.00 cusr 0.00 csys = 8.78 CPU)
Retrieving data from the table
Time for select_big (10:3000000): 32 wallclock secs (21.78 usr 10.07 sys + 0.00 cusr 0.00 csys = 31.85 CPU)
Time for order_by_big_key (10:3000000): 33 wallclock secs (23.35 usr 10.15 sys + 0.00 cusr 0.00 csys = 33.50 CPU)
Time for order_by_big_key_desc (10:3000000): 36 wallclock secs (23.47 usr 10.27 sys + 0.00 cusr 0.00 csys = 33.74 CPU)
Time for order_by_big_key_prefix (10:3000000): 33 wallclock secs (22.18 usr 9.81 sys + 0.00 cusr 0.00 csys = 31.99 CPU)
Time for order_by_big_key2 (10:3000000): 32 wallclock secs (22.53 usr 9.81 sys + 0.00 cusr 0.00 csys = 32.34 CPU)
Time for order_by_big_key_diff (10:3000000): 74 wallclock secs (22.66 usr 9.76 sys + 0.00 cusr 0.00 csys = 32.42 CPU)
Time for order_by_big (10:3000000): 78 wallclock secs (22.39 usr 9.83 sys + 0.00 cusr 0.00 csys = 32.22 CPU)
Time for order_by_range (500:125750): 8 wallclock secs ( 1.26 usr 0.49 sys + 0.00 cusr 0.00 csys = 1.75 CPU)
Time for order_by_key_prefix (500:125750): 4 wallclock secs ( 0.97 usr 0.57 sys + 0.00 cusr 0.00 csys = 1.54 CPU)
Time for order_by_key2_diff (500:250500): 9 wallclock secs ( 1.30 usr 0.85 sys + 0.00 cusr 0.00 csys = 2.15 CPU)
Time for select_diff_key (500:1000): 193 wallclock secs ( 0.32 usr 0.04 sys + 0.00 cusr 0.00 csys = 0.36 CPU)
Time for select_range_prefix (5010:42084): 13 wallclock secs ( 2.55 usr 0.51 sys + 0.00 cusr 0.00 csys = 3.06 CPU)
Time for select_range_key2 (5010:42084): 12 wallclock secs ( 2.81 usr 0.68 sys + 0.00 cusr 0.00 csys = 3.49 CPU)
Time for select_key_prefix (200000): 141 wallclock secs (86.32 usr 12.05 sys + 0.00 cusr 0.00 csys = 98.37 CPU)
Time for select_key (200000): 132 wallclock secs (53.98 usr 10.53 sys + 0.00 cusr 0.00 csys = 64.51 CPU)
Time for select_key_return_key (200000): 125 wallclock secs (59.92 usr 12.00 sys + 0.00 cusr 0.00 csys = 71.92 CPU)
Time for select_key2 (200000): 139 wallclock secs (78.61 usr 11.08 sys + 0.00 cusr 0.00 csys = 89.69 CPU)
Time for select_key2_return_key (200000): 131 wallclock secs (64.58 usr 9.61 sys + 0.00 cusr 0.00 csys = 74.19 CPU)
Time for select_key2_return_prim (200000): 134 wallclock secs (72.33 usr 11.34 sys + 0.00 cusr 0.00 csys = 83.67 CPU)
Test of compares with simple ranges
Time for select_range_prefix (20000:43500): 9 wallclock secs ( 4.00 usr 0.89 sys + 0.00 cusr 0.00 csys = 4.89 CPU)
Time for select_range_key2 (20000:43500): 9 wallclock secs ( 4.34 usr 0.72 sys + 0.00 cusr 0.00 csys = 5.06 CPU)
Time for select_group (111): 55 wallclock secs ( 0.02 usr 0.04 sys + 0.00 cusr 0.00 csys = 0.06 CPU)
Time for min_max_on_key (15000): 8 wallclock secs ( 5.12 usr 0.76 sys + 0.00 cusr 0.00 csys = 5.88 CPU)
Time for min_max (60): 30 wallclock secs ( 0.04 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.05 CPU)
Time for count_on_key (100): 52 wallclock secs ( 0.03 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.05 CPU)
Time for count (100): 45 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
Time for count_distinct_big (20): 98 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
Testing update of keys with functions
Time for update_of_key (50000): 25 wallclock secs ( 2.62 usr 1.44 sys + 0.00 cusr 0.00 csys = 4.06 CPU)
Time for update_of_key_big (501): 35 wallclock secs ( 0.05 usr 0.04 sys + 0.00 cusr 0.00 csys = 0.09 CPU)
Testing update with key
Time for update_with_key (300000): 119 wallclock secs (18.44 usr 12.64 sys + 0.00 cusr 0.00 csys = 31.08 CPU)
Time for update_with_key_prefix (100000): 36 wallclock secs ( 6.23 usr 3.85 sys + 0.00 cusr 0.00 csys = 10.08 CPU)
Testing update of all rows
Time for update_big (10): 64 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing left outer join
Time for outer_join_on_key (10:10): 60 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for outer_join (10:10): 110 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for outer_join_found (10:10): 107 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for outer_join_not_found (500:10): 59 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing INSERT INTO ... SELECT
Time for insert_select_1_key (1): 7 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for insert_select_2_keys (1): 9 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for drop table(2): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing delete
Time for delete_key (10000): 4 wallclock secs ( 0.76 usr 0.29 sys + 0.00 cusr 0.00 csys = 1.05 CPU)
Time for delete_all (12): 17 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Insert into table with 16 keys and with a primary key with 16 parts
Time for insert_key (100000): 137 wallclock secs ( 9.91 usr 6.26 sys + 0.00 cusr 0.00 csys = 16.17 CPU)
Testing update of keys
Time for update_of_primary_key_many_keys (256): 47 wallclock secs ( 0.01 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.03 CPU)
Deleting rows from the table
Time for delete_big_many_keys (128): 75 wallclock secs ( 0.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.03 CPU)
Deleting everything from table
Time for delete_all_many_keys (1): 75 wallclock secs ( 0.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.03 CPU)
Total time: 2736 wallclock secs (661.21 usr 182.47 sys + 0.00 cusr 0.00 csys = 843.68 CPU)
Testing server 'MySQL 3.23.39' at 2001-06-05 14:41:13
Testing the speed of selecting on keys that consist of many parts
The test-table has 10000 rows and the test is done with 500 ranges.
Creating table
Inserting 10000 rows
Time to insert (10000): 5 wallclock secs ( 0.80 usr 0.34 sys + 0.00 cusr 0.00 csys = 1.14 CPU)
Test if the database has a query cache
Time for select_query_cache (10000): 120 wallclock secs ( 3.67 usr 0.53 sys + 0.00 cusr 0.00 csys = 4.20 CPU)
Time for select_query_cache2 (10000): 120 wallclock secs ( 3.80 usr 0.57 sys + 0.00 cusr 0.00 csys = 4.37 CPU)
Testing big selects on the table
Time for select_big (70:17207): 1 wallclock secs ( 0.15 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.22 CPU)
Time for select_range (410:1057904): 201 wallclock secs ( 9.05 usr 3.95 sys + 0.00 cusr 0.00 csys = 13.00 CPU)
Time for min_max_on_key (70000): 222 wallclock secs (23.16 usr 3.67 sys + 0.00 cusr 0.00 csys = 26.83 CPU)
Time for count_on_key (50000): 534 wallclock secs (16.58 usr 2.69 sys + 0.00 cusr 0.00 csys = 19.27 CPU)
Time for count_group_on_key_parts (1000:100000): 58 wallclock secs ( 1.16 usr 0.35 sys + 0.00 cusr 0.00 csys = 1.51 CPU)
Testing count(distinct) on the table
Time for count_distinct_key_prefix (1000:1000): 51 wallclock secs ( 0.45 usr 0.08 sys + 0.00 cusr 0.00 csys = 0.53 CPU)
Time for count_distinct (1000:1000): 60 wallclock secs ( 0.42 usr 0.08 sys + 0.00 cusr 0.00 csys = 0.50 CPU)
Time for count_distinct_2 (1000:1000): 63 wallclock secs ( 0.18 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.21 CPU)
Time for count_distinct_group_on_key (1000:6000): 59 wallclock secs ( 0.51 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.58 CPU)
Time for count_distinct_group_on_key_parts (1000:100000): 194 wallclock secs ( 1.12 usr 0.46 sys + 0.00 cusr 0.00 csys = 1.58 CPU)
Time for count_distinct_group (1000:100000): 194 wallclock secs ( 1.21 usr 0.37 sys + 0.00 cusr 0.00 csys = 1.58 CPU)
Time for count_distinct_big (100:1000000): 67 wallclock secs ( 7.77 usr 3.16 sys + 0.00 cusr 0.00 csys = 10.93 CPU)
Total time: 1949 wallclock secs (70.03 usr 16.42 sys + 0.00 cusr 0.00 csys = 86.45 CPU)
Testing server 'MySQL 3.23.39' at 2001-06-05 15:13:43
Wisconsin benchmark test
Time for create_table (3): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Inserting data
Time to insert (31000): 13 wallclock secs ( 1.59 usr 1.18 sys + 0.00 cusr 0.00 csys = 2.77 CPU)
Time to delete_big (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Running actual benchmark
Time for wisc_benchmark (114): 5 wallclock secs ( 2.33 usr 0.52 sys + 0.00 cusr 0.00 csys = 2.85 CPU)
Total time: 19 wallclock secs ( 3.92 usr 1.70 sys + 0.00 cusr 0.00 csys = 5.62 CPU)
......@@ -458,7 +458,7 @@ sub gd {
# set a color per server so in every result it has the same color ....
foreach $key (@key_order) {
if ($tot{$key}{'server'} =~ /mysql/i) {
if ($key =~ /mysql_pgcc/i || $key =~ /mysql_odbc/i) {
if ($key =~ /mysql_pgcc/i || $key =~ /mysql_odbc/i || $key =~ /mysql_fast/i) {
$tot{$key}{'color'} = $lblue;
} else {
$tot{$key}{'color'} = $blue;
......
......@@ -1593,7 +1593,7 @@ double Field_longlong::val_real(void)
else
#endif
longlongget(j,ptr);
return unsigned_flag ? ulonglong2double(j) : (double) j;
return unsigned_flag ? ulonglong2double((ulonglong) j) : (double) j;
}
longlong Field_longlong::val_int(void)
......
......@@ -809,8 +809,19 @@ bool Item_sum_count_distinct::setup(THD *thd)
List<Item> list;
/* Create a table with an unique key over all parameters */
for (uint i=0; i < arg_count ; i++)
if (list.push_back(args[i]))
return 1;
{
Item *item=args[i];
if (list.push_back(item))
return 1; // End of memory
if (item->const_item())
{
(void) item->val_int();
if (item->null_value)
always_null=1;
}
}
if (always_null)
return 0;
count_field_types(tmp_table_param,list,0);
if (table)
{
......@@ -827,15 +838,20 @@ bool Item_sum_count_distinct::setup(THD *thd)
void Item_sum_count_distinct::reset()
{
table->file->extra(HA_EXTRA_NO_CACHE);
table->file->delete_all_rows();
table->file->extra(HA_EXTRA_WRITE_CACHE);
(void) add();
if (table)
{
table->file->extra(HA_EXTRA_NO_CACHE);
table->file->delete_all_rows();
table->file->extra(HA_EXTRA_WRITE_CACHE);
(void) add();
}
}
bool Item_sum_count_distinct::add()
{
int error;
if (always_null)
return 0;
copy_fields(tmp_table_param);
copy_funcs(tmp_table_param->funcs);
......
......@@ -145,11 +145,12 @@ class Item_sum_count_distinct :public Item_sum_int
table_map used_table_cache;
bool fix_fields(THD *thd,TABLE_LIST *tables);
TMP_TABLE_PARAM *tmp_table_param;
bool always_null;
public:
Item_sum_count_distinct(List<Item> &list)
:Item_sum_int(list),table(0),used_table_cache(~(table_map) 0),
tmp_table_param(0)
tmp_table_param(0),always_null(0)
{ quick_group=0; }
~Item_sum_count_distinct();
table_map used_tables() const { return used_table_cache; }
......
......@@ -108,7 +108,7 @@ static void die(const char* fmt, ...)
static void print_version()
{
printf("%s Ver 1.3 for %s at %s\n",my_progname,SYSTEM_TYPE, MACHINE_TYPE);
printf("%s Ver 1.4 for %s at %s\n",my_progname,SYSTEM_TYPE, MACHINE_TYPE);
}
......@@ -132,7 +132,7 @@ the mysql command line client\n\n");
-s, --short-form Just show the queries, no extra info\n\
-o, --offset=N Skip the first N entries\n\
-h, --host=server Get the binlog from server\n\
-P, --port=port Use port to connect to the remove server\n\
-P, --port=port Use port to connect to the remote server\n\
-u, --user=username Connect to the remove server as username\n\
-p, --password=password Password to connect to remote server\n\
-r, --result-file=file Direct output to a given file\n\
......
......@@ -498,11 +498,12 @@ void close_temporary(TABLE *table,bool delete_table)
void close_temporary_tables(THD *thd)
{
TABLE *table,*next;
uint init_query_buf_size = 11, query_buf_size; // "drop table "
char* query, *p;
char *query, *end;
const uint init_query_buf_size = 11; // "drop table "
uint query_buf_size;
bool found_user_tables = 0;
LINT_INIT(p);
LINT_INIT(end);
query_buf_size = init_query_buf_size;
for (table=thd->temporary_tables ; table ; table=table->next)
......@@ -510,37 +511,37 @@ void close_temporary_tables(THD *thd)
query_buf_size += table->key_length;
}
if(query_buf_size == init_query_buf_size)
if (query_buf_size == init_query_buf_size)
return; // no tables to close
if((query = alloc_root(&thd->mem_root, query_buf_size)))
{
memcpy(query, "drop table ", init_query_buf_size);
p = query + init_query_buf_size;
}
if ((query = alloc_root(&thd->mem_root, query_buf_size)))
{
memcpy(query, "drop table ", init_query_buf_size);
end = query + init_query_buf_size;
}
for (table=thd->temporary_tables ; table ; table=next)
{
if(query) // we might be out of memory, but this is not fatal
if (query) // we might be out of memory, but this is not fatal
{
// skip temporary tables not created directly by the user
if (table->table_name[0] != '#')
{
// skip temporary tables not created directly by the user
if(table->table_name[0] != '#')
{
p = strxmov(p,table->table_cache_key,".",
table->table_name,",", NullS);
// here we assume table_cache_key always starts
// with \0 terminated db name
found_user_tables = 1;
}
end = strxmov(end,table->table_cache_key,".",
table->table_name,",", NullS);
// here we assume table_cache_key always starts
// with \0 terminated db name
found_user_tables = 1;
}
}
next=table->next;
close_temporary(table);
}
if (query && found_user_tables && mysql_bin_log.is_open())
{
uint save_query_len = thd->query_length;
*--p = 0;
thd->query_length = (uint)(p-query);
*--end = 0; // Remove last ','
thd->query_length = (uint)(end-query);
Query_log_event qinfo(thd, query);
mysql_bin_log.write(&qinfo);
thd->query_length = save_query_len;
......
......@@ -400,7 +400,22 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds,
goto err; /* purecov: inspected */
}
if (join.const_tables && !thd->locked_tables)
{
TABLE **table, **end;
for (table=join.table, end=table + join.const_tables ;
table != end;
table++)
{
/* BDB tables require that we call index_end() before doing an unlock */
if ((*table)->key_read)
{
(*table)->key_read=0;
(*table)->file->extra(HA_EXTRA_NO_KEYREAD);
}
(*table)->file->index_end();
}
mysql_unlock_some_tables(thd, join.table,join.const_tables);
}
if (!conds && join.outer_join)
{
/* Handle the case where we have an OUTER JOIN without a WHERE */
......@@ -2761,7 +2776,12 @@ return_zero_rows(select_result *result,TABLE_LIST *tables,List<Item> &fields,
if (send_row)
result->send_data(fields);
if (tables) // Not from do_select()
{
/* Close open cursors */
for (TABLE_LIST *table=tables; table ; table=table->next)
table->table->file->index_end();
result->send_eof(); // Should be safe
}
}
DBUG_RETURN(0);
}
......
......@@ -455,8 +455,8 @@ str_to_TIME(const char *str, uint length, TIME *l_time,bool fuzzy_date)
if ((date[i]=tmp_value))
date_used=1; // Found something
if (i == 2 && str != end && *str == 'T')
str++; // ISO8601: CCYYMMDDThhmmss
else
str++; // ISO8601: CCYYMMDDThhmmss
else if ( i != 5 ) // Skip inter-field delimiters
{
while (str != end && (ispunct(*str) || isspace(*str)))
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment