Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
4ed8a940
Commit
4ed8a940
authored
Jun 03, 2001
by
monty@hundin.mysql.fi
Browse files
Options
Browse Files
Download
Plain Diff
Merge work:/home/bk/mysql into hundin.mysql.fi:/my/bk/mysql
parents
30bb7b51
4014d236
Changes
8
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
153 additions
and
121 deletions
+153
-121
mysql-test/t/select.test
mysql-test/t/select.test
+1
-1
mysys/tree.c
mysys/tree.c
+2
-0
sql-bench/Comments/postgres.benchmark
sql-bench/Comments/postgres.benchmark
+25
-22
sql-bench/bench-init.pl.sh
sql-bench/bench-init.pl.sh
+1
-1
sql-bench/limits/pg.cfg
sql-bench/limits/pg.cfg
+40
-39
sql-bench/server-cfg.sh
sql-bench/server-cfg.sh
+53
-57
sql-bench/test-connect.sh
sql-bench/test-connect.sh
+1
-1
tests/fork_big.pl
tests/fork_big.pl
+30
-0
No files found.
mysql-test/t/select.test
View file @
4ed8a940
...
...
@@ -1609,7 +1609,7 @@ select t2.fld1,count(*) from t2,t3 where t2.fld1=158402 and t3.name=t2.fld3 grou
#
select
sum
(
Period
)
/
count
(
*
)
from
t1
;
select
companynr
,
count
(
price
)
as
"count"
,
sum
(
price
)
as
"sum"
,
sum
(
price
)
/
count
(
price
)
-
avg
(
price
)
as
"diff"
,(
0
+
count
(
price
))
*
companynr
as
func
from
t3
group
by
companynr
;
select
companynr
,
count
(
price
)
as
"count"
,
sum
(
price
)
as
"sum"
,
abs
(
sum
(
price
)
/
count
(
price
)
-
avg
(
price
)
)
as
"diff"
,(
0
+
count
(
price
))
*
companynr
as
func
from
t3
group
by
companynr
;
select
companynr
,
sum
(
price
)
/
count
(
price
)
as
avg
from
t3
group
by
companynr
having
avg
>
70000000
order
by
avg
;
#
...
...
mysys/tree.c
View file @
4ed8a940
...
...
@@ -251,6 +251,8 @@ int tree_delete(TREE *tree, void *key)
}
if
(
remove_colour
==
BLACK
)
rb_delete_fixup
(
tree
,
parent
);
if
(
tree
->
free
)
(
*
tree
->
free
)(
ELEMENT_KEY
(
tree
,
element
));
my_free
((
gptr
)
element
,
MYF
(
0
));
tree
->
elements_in_tree
--
;
return
0
;
...
...
sql-bench/Comments/postgres.benchmark
View file @
4ed8a940
...
...
@@ -18,39 +18,43 @@
# corresponding file. If you are using csh, use setenv.
#
export POSTGRES_INCLUDE=/usr/local/pg
sql
/include
export POSTGRES_LIB=/usr/local/pg
sql
/lib
export POSTGRES_INCLUDE=/usr/local/pg/include
export POSTGRES_LIB=/usr/local/pg/lib
PATH=$PATH:/usr/local/pg
sql
/bin
MANPATH=$MANPATH:/usr/local/pg
sql
/man
PATH=$PATH:/usr/local/pg/bin
MANPATH=$MANPATH:/usr/local/pg/man
#
# Add the following line to /etc/ld.so.conf:
#
/usr/local/pg
sql
/lib
/usr/local/pg/lib
and run ldconfig.
#
# untar the postgres source distribution and cd to src/
# run the following commands:
#
# untar the postgres source distribution, cd to postgresql-*
# and run the following commands:
./configure
CFLAGS=-O3
./configure
gmake
gmake install
mkdir /usr/local/pg
sql
/data
chown postgres /usr/local/pg
sql
/data
mkdir /usr/local/pg/data
chown postgres /usr/local/pg/data
su - postgres
/usr/local/pgsql/bin/initdb -D /usr/local/pgsql/data
su postgres -c "/usr/local/pgsql/bin/postmaster -o -F -D /usr/local/pgsql/data" &
su postgres -c "/usr/local/pgsql/bin/createdb test"
/usr/local/pg/bin/initdb -D /usr/local/pg/data
/usr/local/pg/bin/postmaster -o -F -D /usr/local/pg/data &
/usr/local/pg/bin/createdb test
exit
#
# Second, install packages DBD-Pg-
0.95
.tar.gz and DBI-1.14.tar.gz,
# Second, install packages DBD-Pg-
1.00
.tar.gz and DBI-1.14.tar.gz,
# available from http://www.perl.com/CPAN/
#
export POSTGRES_LIB=/usr/local/pg/lib/
export POSTGRES_INCLUDE=/usr/local/pg/include/postgresql
perl Makefile.PL
make
make install
#
# Now we run the test that can be found in the sql-bench directory in the
...
...
@@ -59,17 +63,16 @@ su postgres -c "/usr/local/pgsql/bin/createdb test"
# We did run two tests:
# The standard test
run-all-tests --comment="Intel Xeon, 2x550 Mhz,
1G
, pg started with -o -F" --user=postgres --server=pg --cmp=mysql
run-all-tests --comment="Intel Xeon, 2x550 Mhz,
512M
, pg started with -o -F" --user=postgres --server=pg --cmp=mysql
# and a test where we do a vacuum() after each update.
# (The time for vacuum() is counted in the book-keeping() column)
run-all-tests --comment="Intel Xeon, 2x550 Mhz,
1G
, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --fast
run-all-tests --comment="Intel Xeon, 2x550 Mhz,
512M
, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --fast
# If you want to store the results in a output/RUN-xxx file, you should
# repeate the benchmark with the extra option --log --use-old-result
# This will create a the RUN file based of the previous results
#
run-all-tests --comment="Intel Xeon, 2x550 Mhz,
1G
, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --log --use-old-result
run-all-tests --comment="Intel Xeon, 2x550 Mhz,
1
G, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --fast --log --use-old-result
run-all-tests --comment="Intel Xeon, 2x550 Mhz,
512M
, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --log --use-old-result
run-all-tests --comment="Intel Xeon, 2x550 Mhz,
512M
G, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --fast --log --use-old-result
sql-bench/bench-init.pl.sh
View file @
4ed8a940
...
...
@@ -31,7 +31,7 @@
# $server Object for current server
# $limits Hash reference to limits for benchmark
$benchmark_version
=
"2.1
2
"
;
$benchmark_version
=
"2.1
3
"
;
use Getopt::Long
;
require
"
$pwd
/server-cfg"
||
die
"Can't read Configuration file:
$!
\n
"
;
...
...
sql-bench/limits/pg.cfg
View file @
4ed8a940
This diff is collapsed.
Click to expand it.
sql-bench/server-cfg.sh
View file @
4ed8a940
...
...
@@ -121,53 +121,49 @@ sub new
$self
->
{
'vacuum'
}
=
1
;
# When using with --fast
$self
->
{
'drop_attr'
}
=
""
;
$limits
{
'max_conditions'
}
=
9999
;
# (Actually not a limit)
$limits
{
'max_columns'
}
=
2000
;
# Max number of columns in table
# Windows can't handle that many files in one directory
$limits
{
'max_tables'
}
=
((
$machine
||
''
)
=
~
"^win"
)
? 5000 : 65000
;
$limits
{
'max_text_size'
}
=
65000
;
# Max size with default buffers.
$limits
{
'query_size'
}
=
1000000
;
# Max size with default buffers.
$limits
{
'max_index'
}
=
16
;
# Max number of keys
$limits
{
'max_index_parts'
}
=
16
;
# Max segments/key
$limits
{
'max_column_name'
}
=
64
;
# max table and column name
$limits
{
'join_optimizer'
}
=
1
;
# Can optimize FROM tables
$limits
{
'load_data_infile'
}
=
1
;
# Has load data infile
$limits
{
'lock_tables'
}
=
1
;
# Has lock tables
$limits
{
'functions'
}
=
1
;
# Has simple functions (+/-)
$limits
{
'group_functions'
}
=
1
;
# Have group functions
$limits
{
'group_func_sql_min_str'
}
=
1
;
# Can execute MIN() and MAX() on strings
$limits
{
'group_distinct_functions'
}=
1
;
# Have count(distinct)
$limits
{
'select_without_from'
}=
1
;
# Can do 'select 1';
$limits
{
'multi_drop'
}
=
1
;
# Drop table can take many tables
$limits
{
'subqueries'
}
=
0
;
# Doesn't support sub-queries.
$limits
{
'left_outer_join'
}
=
1
;
# Supports left outer joins
$limits
{
'table_wildcard'
}
=
1
;
# Has SELECT table_name.*
$limits
{
'having_with_alias'
}
=
1
;
# Can use aliases in HAVING
$limits
{
'having_with_group'
}
=
1
;
# Can use group functions in HAVING
$limits
{
'like_with_column'
}
=
1
;
# Can use column1 LIKE column2
$limits
{
'order_by_position'
}
=
1
;
# Can use 'ORDER BY 1'
$limits
{
'group_by_position'
}
=
1
;
# Can use 'GROUP BY 1'
$limits
{
'alter_table'
}
=
1
;
# Have ALTER TABLE
$limits
{
'NEG'
}
=
1
;
# Supports -id
$limits
{
'alter_add_multi_col'
}=
1
;
#Have ALTER TABLE t add a int,add b int;
$limits
{
'alter_table'
}
=
1
;
# Have ALTER TABLE
$limits
{
'alter_table_dropcol'
}=
1
;
# Have ALTER TABLE DROP column
$limits
{
'insert_multi_value'
}
=
1
;
# Have INSERT ... values (1,2),(3,4)
$limits
{
'group_func_extra_std'
}
=
1
;
# Have group function std().
$limits
{
'func_odbc_mod'
}
=
1
;
# Have function mod.
$limits
{
'column_alias'
}
=
1
;
# Alias for fields in select statement.
$limits
{
'func_extra_%'
}
=
1
;
# Has % as alias for mod()
$limits
{
'func_odbc_floor'
}
=
1
;
# Has func_odbc_floor function
$limits
{
'func_extra_if'
}
=
1
;
# Have function if.
$limits
{
'column_alias'
}
=
1
;
# Alias for fields in select statement.
$limits
{
'NEG'
}
=
1
;
# Supports -id
$limits
{
'func_extra_in_num'
}
=
1
;
# Has function in
$limits
{
'limit'
}
=
1
;
# supports the limit attribute
$limits
{
'unique_index'
}
=
1
;
# Unique index works or not
$limits
{
'func_odbc_floor'
}
=
1
;
# Has func_odbc_floor function
$limits
{
'func_odbc_mod'
}
=
1
;
# Have function mod.
$limits
{
'functions'
}
=
1
;
# Has simple functions (+/-)
$limits
{
'group_by_position'
}
=
1
;
# Can use 'GROUP BY 1'
$limits
{
'group_distinct_functions'
}=
1
;
# Have count(distinct)
$limits
{
'group_func_extra_std'
}
=
1
;
# Have group function std().
$limits
{
'group_func_sql_min_str'
}
=
1
;
# Can execute MIN() and MAX() on strings
$limits
{
'group_functions'
}
=
1
;
# Have group functions
$limits
{
'having_with_alias'
}
=
1
;
# Can use aliases in HAVING
$limits
{
'having_with_group'
}
=
1
;
# Can use group functions in HAVING
$limits
{
'insert_multi_value'
}
=
1
;
# Have INSERT ... values (1,2),(3,4)
$limits
{
'insert_select'
}
=
1
;
$limits
{
'working_blobs'
}
=
1
;
# If big varchar/blobs works
$limits
{
'join_optimizer'
}
=
1
;
# Can optimize FROM tables
$limits
{
'left_outer_join'
}
=
1
;
# Supports left outer joins
$limits
{
'like_with_column'
}
=
1
;
# Can use column1 LIKE column2
$limits
{
'limit'
}
=
1
;
# supports the limit attribute
$limits
{
'load_data_infile'
}
=
1
;
# Has load data infile
$limits
{
'lock_tables'
}
=
1
;
# Has lock tables
$limits
{
'max_column_name'
}
=
64
;
# max table and column name
$limits
{
'max_columns'
}
=
2000
;
# Max number of columns in table
$limits
{
'max_conditions'
}
=
9999
;
# (Actually not a limit)
$limits
{
'max_index'
}
=
16
;
# Max number of keys
$limits
{
'max_index_parts'
}
=
16
;
# Max segments/key
$limits
{
'max_tables'
}
=
((
$machine
||
''
)
=
~
"^win"
)
? 5000 : 65000
;
$limits
{
'max_text_size'
}
=
1000000
;
# Good enough for tests
$limits
{
'multi_drop'
}
=
1
;
# Drop table can take many tables
$limits
{
'order_by_position'
}
=
1
;
# Can use 'ORDER BY 1'
$limits
{
'order_by_unused'
}
=
1
;
$limits
{
'query_size'
}
=
1000000
;
# Max size with default buffers.
$limits
{
'select_without_from'
}=
1
;
# Can do 'select 1';
$limits
{
'subqueries'
}
=
0
;
# Doesn't support sub-queries.
$limits
{
'table_wildcard'
}
=
1
;
# Has SELECT table_name.*
$limits
{
'unique_index'
}
=
1
;
# Unique index works or not
$limits
{
'working_all_fields'
}
=
1
;
$limits
{
'working_blobs'
}
=
1
;
# If big varchar/blobs works
$smds
{
'time'
}
=
1
;
$smds
{
'q1'
}
=
'b'
;
# with time not supp by mysql ('')
...
...
@@ -568,12 +564,12 @@ sub new
$self
->
{
'drop_attr'
}
=
""
;
$self
->
{
"vacuum"
}
=
1
;
$limits
{
'join_optimizer'
}
=
1
;
# Can optimize FROM tables
$limits
{
'load_data_infile'
}
=
0
;
# Is this true ?
$limits
{
'load_data_infile'
}
=
0
;
$limits
{
'NEG'
}
=
1
;
# Can't handle -id
$limits
{
'alter_table'
}
=
1
;
# alter ??
$limits
{
'NEG'
}
=
1
;
$limits
{
'alter_add_multi_col'
}=
0
;
# alter_add_multi_col ?
$limits
{
'alter_table_dropcol'
}=
0
;
# alter_drop_col ?
$limits
{
'alter_table'
}
=
1
;
$limits
{
'alter_table_dropcol'
}=
0
;
$limits
{
'column_alias'
}
=
1
;
$limits
{
'func_extra_%'
}
=
1
;
$limits
{
'func_extra_if'
}
=
0
;
...
...
@@ -582,33 +578,33 @@ sub new
$limits
{
'func_odbc_mod'
}
=
1
;
# Has %
$limits
{
'functions'
}
=
1
;
$limits
{
'group_by_position'
}
=
1
;
$limits
{
'group_distinct_functions'
}=
1
;
# Have count(distinct)
$limits
{
'group_func_extra_std'
}
=
0
;
$limits
{
'group_func_sql_min_str'
}=
1
;
# Can execute MIN() and MAX() on strings
$limits
{
'group_functions'
}
=
1
;
$limits
{
'group_distinct_functions'
}=
1
;
# Have count(distinct)
$limits
{
'having_with_alias'
}
=
0
;
$limits
{
'having_with_group'
}
=
1
;
$limits
{
'left_outer_join'
}
=
0
;
$limits
{
'insert_select'
}
=
1
;
$limits
{
'left_outer_join'
}
=
1
;
$limits
{
'like_with_column'
}
=
1
;
$limits
{
'lock_tables'
}
=
0
;
# in ATIS gives this a problem
$limits
{
'max_column_name'
}
=
128
;
$limits
{
'max_columns'
}
=
1000
;
# 500 crashes pg 6.3
$limits
{
'max_conditions'
}
=
9999
;
# This makes Pg real slow
$limits
{
'max_index'
}
=
64
;
# Big enough
$limits
{
'max_index_parts'
}
=
16
;
$limits
{
'max_tables'
}
=
5000
;
# 10000 crashes pg 7.0.2
$limits
{
'max_text_size'
}
=
65000
;
# Good enough for test
$limits
{
'multi_drop'
}
=
1
;
$limits
{
'order_by_position'
}
=
1
;
$limits
{
'order_by_unused'
}
=
1
;
$limits
{
'query_size'
}
=
16777216
;
$limits
{
'select_without_from'
}=
1
;
$limits
{
'subqueries'
}
=
1
;
$limits
{
'table_wildcard'
}
=
1
;
$limits
{
'max_column_name'
}
=
32
;
# Is this true
$limits
{
'max_columns'
}
=
1000
;
# 500 crashes pg 6.3
$limits
{
'max_tables'
}
=
5000
;
# 10000 crashes pg 7.0.2
$limits
{
'max_conditions'
}
=
30
;
# This makes Pg real slow
$limits
{
'max_index'
}
=
64
;
# Is this true ?
$limits
{
'max_index_parts'
}
=
16
;
# Is this true ?
$limits
{
'max_text_size'
}
=
7000
;
# 8000 crashes pg 6.3
$limits
{
'query_size'
}
=
16777216
;
$limits
{
'unique_index'
}
=
1
;
# Unique index works or not
$limits
{
'insert_select'
}
=
1
;
$limits
{
'working_blobs'
}
=
1
;
# If big varchar/blobs works
$limits
{
'order_by_unused'
}
=
1
;
$limits
{
'working_all_fields'
}
=
1
;
$limits
{
'working_blobs'
}
=
1
;
# If big varchar/blobs works
# the different cases per query ...
$smds
{
'q1'
}
=
'b'
;
# with time
...
...
@@ -639,7 +635,7 @@ sub new
sub version
{
my
(
$version
,
$dir
)
;
foreach
$dir
(
$ENV
{
'PGDATA'
}
,
"/usr/local/pgsql/data"
,
"/
my/local/pgsql/
"
)
foreach
$dir
(
$ENV
{
'PGDATA'
}
,
"/usr/local/pgsql/data"
,
"/
usr/local/pg/data
"
)
{
if
(
$dir
&&
-e
"
$dir
/PG_VERSION"
)
{
...
...
sql-bench/test-connect.sh
View file @
4ed8a940
...
...
@@ -266,7 +266,7 @@ for ($i=0 ; $i < $opt_loop_count ; $i++)
}
$end_time
=
new Benchmark
;
print
"Time to select_big (
$opt_loop_count
): "
.
print
"Time to select_big
_str
(
$opt_loop_count
): "
.
timestr
(
timediff
(
$end_time
,
$loop_time
)
,
"all"
)
.
"
\n\n
"
;
$sth
=
$dbh
->do
(
"drop table bench1"
.
$server
->
{
'drop_attr'
})
...
...
tests/fork_big.pl
View file @
4ed8a940
...
...
@@ -88,6 +88,7 @@ for ($i=0 ; $i < $opt_threads ; $i ++)
{
test_select
()
if
((
$pid
=
fork
())
==
0
);
$work
{
$pid
}
=
"
select_key
";
}
test_select_count
()
if
((
$pid
=
fork
())
==
0
);
$work
{
$pid
}
=
"
select_count
";
test_delete
()
if
((
$pid
=
fork
())
==
0
);
$work
{
$pid
}
=
"
delete
";
test_update
()
if
((
$pid
=
fork
())
==
0
);
$work
{
$pid
}
=
"
update
";
test_flush
()
if
((
$pid
=
fork
())
==
0
);
$work
{
$pid
}
=
"
flush
";
...
...
@@ -213,6 +214,35 @@ sub test_select
exit
(
0
);
}
#
# Do big select count(distinct..) over the table
#
sub
test_select_count
{
my
(
$dbh
,
$i
,
$j
,
$count
,
$loop
);
$dbh
=
DBI
->
connect
("
DBI:mysql:
$opt_db
:
$opt_host
",
$opt_user
,
$opt_password
,
{
PrintError
=>
0
})
||
die
$
DBI::
errstr
;
$count
=
0
;
$i
=
0
;
while
(
!
test_if_abort
(
$dbh
))
{
for
(
$j
=
0
;
$j
<
$numtables
;
$j
++
)
{
my
(
$table
)
=
$testtables
[
$j
]
->
[
0
];
simple_query
(
$dbh
,
"
select count(distinct marker),count(distinct id),count(distinct info) from
$table
");
$count
++
;
}
sleep
(
20
);
# This query is quite slow
}
$dbh
->
disconnect
;
$dbh
=
0
;
print
"
Test_select: Executed
$count
select count(distinct) queries
\n
";
exit
(
0
);
}
#
# Delete 1-5 rows from the first 2 tables.
# Test ends when the number of rows for table 3 didn't change during
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment