Commit c493b5ec authored by unknown's avatar unknown

Merge


sql/sql_show.cc:
  merged
parents c0965de3 74bb64d3
......@@ -181,3 +181,5 @@ mysql-test/mysql-test-run
BitKeeper/tmp/gone
mysqld.S
mysqld.sym
.snprj/*
sql-bench/output/*
......@@ -135,6 +135,7 @@ version see the relevant distribution.
* MySQL internals:: @strong{MySQL} internals
* Environment variables:: @strong{MySQL} environment variables
* Users:: Some @strong{MySQL} users
* MySQL customer usage::
* Contrib:: Contributed programs
* Credits:: Contributors to @strong{MySQL}
* News:: @strong{MySQL} change history
......@@ -192,7 +193,7 @@ Example Licensing Situations
* ISP:: ISP @strong{MySQL} services
* Web server:: Running a web server using @strong{MySQL}.
@strong{MySQL} Licensing and Support Costs
MySQL Licensing and Support Costs
* Payment information:: Payment information
* Contact information:: Contact information
......@@ -222,7 +223,7 @@ Installing MySQL
* OS/2:: OS/2 notes
* MySQL binaries:: MySQL binaries
* Post-installation:: Post-installation setup and testing
* Upgrade:: Upgrading/downgrading @strong{MySQL}
* Upgrade:: Upgrading/Downgrading MySQL
Installing a MySQL Binary Distribution
......@@ -828,6 +829,7 @@ Credits
* Developers::
* Contributors::
* Supporters::
MySQL change history
......@@ -876,7 +878,7 @@ Changes in release 3.23.x (Recommended; Gamma)
* News-3.23.1:: Changes in release 3.23.1
* News-3.23.0:: Changes in release 3.23.0
Changes in release 3.22.x
Changes in release 3.22.x (Older; Still supported)
* News-3.22.35:: Changes in release 3.22.35
* News-3.22.34:: Changes in release 3.22.34
......@@ -1814,7 +1816,8 @@ In-memory hash tables which are used as temporary tables.
@item
Handles large databases. We are using @strong{MySQL} with some
databases that contain 50,000,000 records.
databases that contain 50,000,000 records and we know of users that
uses @code{MySQL} with 60,000 tables and about 5,000,000,000 rows
@item
All columns have default values. You can use @code{INSERT} to insert a
......@@ -9465,16 +9468,13 @@ section in this manual. @xref{SHOW VARIABLES}.
The tuning server parameters section includes information of how to optimize
these. @xref{Server parameters}.
@item -Sg, --skip-grant-tables
This option causes the server not to use the privilege system at all. This
gives everyone @emph{full access} to all databases! (You can tell a running
server to start using the grant tables again by executing @code{mysqladmin
flush-privileges} or @code{mysqladmin reload}.)
@item --safe-mode
Skip some optimize stages.
Implies @code{--skip-delay-key-write}.
@item --safe-show-database
Don't show databases for which the user doesn't have any privileges.
@item --secure
IP numbers returned by the @code{gethostbyname()} system call are
checked to make sure they resolve back to the original hostname. This
......@@ -9494,6 +9494,12 @@ in this feature).
Ignore the @code{delay_key_write} option for all tables.
@xref{Server parameters}.
@item -Sg, --skip-grant-tables
This option causes the server not to use the privilege system at all. This
gives everyone @emph{full access} to all databases! (You can tell a running
server to start using the grant tables again by executing @code{mysqladmin
flush-privileges} or @code{mysqladmin reload}.)
@item --skip-locking
Don't use system locking. To use @code{isamchk} or @code{myisamchk} you must
shut down the server. @xref{Stability}. Note that in @strong{MySQL} Version
......@@ -20242,33 +20248,39 @@ The output resembles that shown below, though the format and numbers may
differ somewhat:
@example
+-------------------------+---------------------------------+
+-------------------------+---------------------------+
| Variable_name | Value |
+-------------------------+---------------------------------+
+-------------------------+---------------------------+
| ansi_mode | OFF |
| back_log | 50 |
| basedir | /usr/local/mysql/ |
| bdb_cache_size | 1048540 |
| bdb_home | /usr/local/mysql/data/ |
| basedir | /my/monty/ |
| bdb_cache_size | 16777216 |
| bdb_home | /my/monty/data/ |
| bdb_max_lock | 10000 |
| bdb_logdir | |
| bdb_shared_data | OFF |
| bdb_tmpdir | /tmp/ |
| binlog_cache_size | 32768 |
| character_set | latin1 |
| character_sets | latin1 |
| connect_timeout | 5 |
| concurrent_insert | ON |
| datadir | /usr/local/mysql/data/ |
| connect_timeout | 5 |
| datadir | /my/monty/data/ |
| delay_key_write | ON |
| delayed_insert_limit | 100 |
| delayed_insert_timeout | 300 |
| delayed_queue_size | 1000 |
| join_buffer_size | 131072 |
| flush | OFF |
| flush_time | 0 |
| have_bdb | YES |
| have_gemeni | NO |
| have_innobase | YES |
| have_raid | YES |
| have_ssl | NO |
| init_file | |
| interactive_timeout | 28800 |
| join_buffer_size | 131072 |
| key_buffer_size | 16776192 |
| language | /usr/local/mysql/share/english/ |
| language | /my/monty/share/english/ |
| large_files_support | ON |
| log | OFF |
| log_update | OFF |
| log_bin | OFF |
......@@ -20276,7 +20288,8 @@ differ somewhat:
| long_query_time | 10 |
| low_priority_updates | OFF |
| lower_case_table_names | 0 |
| max_allowed_packet | 1047552 |
| max_allowed_packet | 1048576 |
| max_binlog_cache_size | 4294967295 |
| max_connections | 100 |
| max_connect_errors | 10 |
| max_delayed_threads | 20 |
......@@ -20285,14 +20298,20 @@ differ somewhat:
| max_sort_length | 1024 |
| max_tmp_tables | 32 |
| max_write_lock_count | 4294967295 |
| myisam_recover_options | DEFAULT |
| myisam_sort_buffer_size | 8388608 |
| net_buffer_length | 16384 |
| net_read_timeout | 30 |
| net_retry_count | 10 |
| net_write_timeout | 60 |
| open_files_limit | 0 |
| pid_file | /usr/local/mysql/data/tik.pid |
| pid_file | /my/monty/data/donna.pid |
| port | 3306 |
| protocol_version | 10 |
| record_buffer | 131072 |
| query_buffer_size | 0 |
| safe_show_database | OFF |
| server_id | 0 |
| skip_locking | ON |
| skip_networking | OFF |
| skip_show_database | OFF |
......@@ -20301,13 +20320,13 @@ differ somewhat:
| sort_buffer | 2097116 |
| table_cache | 64 |
| table_type | MYISAM |
| thread_stack | 131072 |
| thread_cache_size | 3 |
| thread_cache_size | 4 |
| thread_stack | 65536 |
| tmp_table_size | 1048576 |
| tmpdir | /tmp/ |
| version | 3.23.21-beta-debug |
| version | 3.23.29a-gamma-debug |
| wait_timeout | 28800 |
+-------------------------+---------------------------------+
+-------------------------+---------------------------+
@end example
Each option is described below. Values for buffer sizes, lengths, and stack
......@@ -20427,6 +20446,21 @@ tables will be closed (to free up resources and sync things to disk). We
only recommend this option on Win95, Win98, or on systems where you have
very little resources.
@item @code{have_bdb}
@code{YES} if @code{mysqld} supports Berkeley DB tables. @code{DISABLED}
if @code{--skip-bdb} is used.
@item @code{have_gemeni}
@code{YES} if @code{mysqld} supports Gemeni tables. @code{DISABLED}
if @code{--skip-gemeni} is used.
@item @code{have_innobase}
@code{YES} if @code{mysqld} supports Innobase tables. @code{DISABLED}
if @code{--skip-innobase} is used.
@item @code{have_raid}
@code{YES} if @code{mysqld} supports the @code{RAID} option.
@item @code{have_ssl}
@code{YES} if @code{mysqld} supports SSL (encryption) on the client/server
protocol.
@item @code{init_file}
The name of the file specified with the @code{--init-file} option when
you start the server. This is a file of SQL statements you want the
......@@ -20606,6 +20640,12 @@ want to increase this value.
The initial allocation of the query buffer. If most of your queries are
long (like when inserting blobs), you should increase this!
@item @code{safe_show_databases}
Don't show databases for which the user doesn't have any database or
table privileges. This can improve security if you're concerned about
people being able to see what databases other users have. See also
@code{skip_show_databases}.
@item @code{server_id}
The value of the @code{--server-id} option.
......@@ -20616,10 +20656,10 @@ Is OFF if @code{mysqld} uses external locking.
Is ON if we only allow local (socket) connections.
@item @code{skip_show_databases}
This prevents people from doing @code{SHOW DATABASES} if they don't
have the @code{PROCESS_PRIV} privilege. This can improve security if
you're concerned about people being able to see what databases and
tables other users have.
This prevents people from doing @code{SHOW DATABASES} if they don't have
the @code{PROCESS_PRIV} privilege. This can improve security if you're
concerned about people being able to see what databases other users
have. See also @code{safe_show_databases}.
@item @code{slow_launch_time}
If creating the thread takes longer than this value (in seconds), the
......@@ -38284,9 +38324,6 @@ you should ftp all the relevant files to
@end itemize
@page
@cindex environment variables, list of
@node Environment variables, Users, MySQL internals, Top
......@@ -38371,7 +38408,7 @@ variables to modify the behavior of @strong{MySQL}. @xref{Option files}.
@page
@cindex users, of MySQL
@cindex news sites
@node Users, Contrib, Environment variables, Top
@node Users, MySQL customer usage, Environment variables, Top
@appendix Some MySQL Users
@appendixsec General News Sites
......@@ -38519,6 +38556,8 @@ support @strong{MySQL}}
@c @item @uref{http://dynodns.net, Free dynamic DNS implementation}
@c EMAIL: A Moore <amoore@mooresystems.com>
@item @uref{http://www.hn.org/, Hammernode; Public DNS Servers}
@item @uref{http://www.fdns.net/, Free 3rd level domains}
@item @uref{http://worldcommunity.com/, Online Database}
......@@ -38773,10 +38812,44 @@ Washington's Eastside residents and businesses}
Send any additions to this list to @email{webmaster@@mysql.com}.
@page
@cindex MySQL usage
@node MySQL customer usage, Contrib, Users, Top
@appendix MySQL customer usage
The section 'Some MySQL Users' contains a lot of different links to
@strong{MySQL} users but doesn't provide that much information about how
they are using @strong{MySQL}. @xref{Users}. This manual section is to
give you an idea of how other @strong{MySQL} users are using
@strong{MySQL} to solve their problems.
This manual section is very new and we plan to add more stories here
shortly. If you are interested in contributing of how you use
@code{MySQL} in a unique environment or have success store about how you
use @code{MySQL}, you can write to @code{docs@@lists.mysql.com} with
subject @code{Success:}. Note that as we are very busy it may take some
time before you get some feedback for your story.
@itemize @bullet
@item
I think you might be interested in my database size. The whole database
is currently on 15 servers and I think it's about 60.000 of tables
containing about 5.000.000.000 of rows. My mostly loaded server
currently holds about 10.000 of tables with 1.000.000.000 of rows in it.
Hugest tables have about 50.000.000 of rows, and this value will raise
as soon as I'll move to 2.4 kernel with large files. Currently I have to
delete much of logs for large sites to hold table sizes in 2Gb.
Peter Zaitsev, Spylog.ru.
@item
Texas Instrument is using MySQL for handling tables that contains up
o 2,000 million rows in a validation regression database.
@end itemize
@page
@cindex contributed programs
@cindex programs, contributed
@node Contrib, Credits, Users, Top
@node Contrib, Credits, MySQL customer usage, Top
@appendix Contributed Programs
Many users of @strong{MySQL} have contributed @emph{very} useful support
......@@ -38967,6 +39040,9 @@ applications. By Miguel Angel Sol
@item @uref{http://www.mysql.com/Downloads/Contrib/MySQL-ADA95_API.zip, MySQL-ADA95_API.zip}
An ADA95 interface to the MySQL API. By Francois Fabien.
@item @uref{http://www.mysql.com/Downloads/Contrib/MyTool-DLL_for_VB_and_MySQL.zip, MyTool-DLL_for_VB_and_MySQL.zip}
A DLL with MySQL C API for Visual Basic. By Ken Menzel @email{kenm@@icarz.com}.
@end itemize
@appendixsec Clients
......@@ -39445,12 +39521,14 @@ interested in.
@node Credits, News, Contrib, Top
@appendix Credits
The following persons have helped us make MySQL what it is today
The following persons and companies have helped us make MySQL what it is
today:
@cindex developers, list of
@menu
* Developers::
* Contributors::
* Supporters::
@end menu
@node Developers, Contributors, Credits, Credits
......@@ -39632,7 +39710,7 @@ Allan Larsson (The BOSS for TCX DataKonsult AB).
@end table
@cindex contributors, list of
@node Contributors, , Developers, Credits
@node Contributors, Supporters, Developers, Credits
@appendixsubsec Contributors to MySQL
Contributors to the @strong{MySQL} distribution are listed below, in
......@@ -39702,6 +39780,12 @@ For porting PHP to @strong{MySQL}.
For the first @strong{MySQL} manual. And a lot of spelling/language
fixes for the FAQ (that turned into the @strong{MySQL} manual a long
time ago).
@item Yan Cailin
First translator of the MySQL Reference Manual into simplified chinese
in early 2000 on which the Big5 and HK coded
(@uref{http://mysql.hitstar.com, mysql.hitstar.com}) versions were
based. @uref{http://linuxdb.yeah.net, Personal home page at
linuxdb.yeah.net}.
@item Giovanni Maruzzelli @email{maruzz@@matrice.it}
For porting iODBC (Unix ODBC).
@item Chris Provenzano
......@@ -39857,6 +39941,24 @@ ODBC and VisualC++ interface questions.
@code{DBD}, Linux, some SQL syntax questions.
@end table
@cindex contributing companies, list of
@node Supporters, , Contributors, Credits
@appendixsubsec Supporters to MySQL
The following companies has helped us finance development of
@strong{MySQL} by either paying us for developing a new feature,
developed a @strong{MySQL} feature themself or by giving us hardware for
@strong{MySQL} development.
@multitable @columnfractions .3 .7
@item Va Linux / Andover.net @tab Replication
@item NuSphere @tab Editing of the @strong{MySQL} manual.
@item Intel @tab Contributed to development on Windows and Linux platforms
@item Compaq @tab Contributed to Development on Linux-alpha
@item SWSoft @tab Development on the embedded @code{mysqld} version.
@item FutureQuest @tab @code{--skip-show-variables}
@end multitable
@node News, Bugs, Credits, Top
@appendix MySQL change history
......@@ -39948,6 +40050,12 @@ though, so Version 3.23 is not released as a stable version yet.
@appendixsubsec Changes in release 3.23.30
@itemize @bullet
@item
Added option @code{--safe-show-databases}.
@item
Added @code{have_bdb}, @code{have_gemeni}, @code{have_innobase},
@code{have_raid} and @code{have_ssl} to @code{SHOW VARIABLES} to make it
easy to test for supported extensions.
@item
Added option @code{open-files-limit} to @code{mysqld}.
@item
Changed option @code{open-files} to @code{open-files-limit} in
......@@ -436,10 +436,16 @@ AM_CONDITIONAL(ASSEMBLER, test ASSEMBLER_x86 = "" -o ASSEMBLER_x86 = "")
AC_MSG_CHECKING(whether to use RAID)
AC_ARG_WITH(raid,
[ --with-raid Enable RAID Support],
[ AC_DEFINE(USE_RAID)
AC_MSG_RESULT(yes) ],
[ AC_MSG_RESULT(no) ]
[ USE_RAID=$withval ],
[ USE_RAID=no ]
)
if test "$USE_RAID" = "yes"
then
AC_MSG_RESULT([yes])
AC_DEFINE([USE_RAID])
else
AC_MSG_RESULT([no])
fi
# Use this to set the place used for unix socket used to local communication.
AC_ARG_WITH(unix-socket-path,
......
id code name
1 1 Tim
2 1 Monty
3 2 David
4 2 Erik
5 3 Sasha
6 3 Jeremy
7 4 Matt
id code name
2 1 Monty
3 2 David
4 2 Erik
5 3 Sasha
6 3 Jeremy
7 4 Matt
8 1 Sinisa
id code name
3 2 David
4 2 Erik
5 3 Sasha
6 3 Jeremy
7 4 Matt
8 1 Sinisa
12 1 Ralph
drop table if exists t1;
create table t1 (id int unsigned not null auto_increment, code tinyint unsigned not null, name char(20) not null, primary key (id), key (code), unique (name)) type=bdb;
insert into t1 (code, name) values (1, 'Tim'), (1, 'Monty'), (2, 'David'), (2, 'Erik'), (3, 'Sasha'), (3, 'Jeremy'), (4, 'Matt');
select id, code, name from t1 order by id;
update ignore t1 set id = 8, name = 'Sinisa' where id < 3;
select id, code, name from t1 order by id;
update ignore t1 set id = id + 10, name = 'Ralph' where id < 4;
select id, code, name from t1 order by id;
drop table t1;
......@@ -472,7 +472,7 @@ int main(int argc,char **argv)
int error;
MY_INIT(argv[0]);
start_value=5206280L; best_t1=590774L; best_t2=5977654L; best_type=1; /* mode=6229 add=2 func_type: 0 */
start_value=5307411L; best_t1=4597287L; best_t2=3375760L; best_type=1; /* mode=4783 add=5 func_type: 0 */
if (get_options(argc,(char **) argv))
exit(1);
......
......@@ -889,13 +889,15 @@ int ha_berkeley::key_cmp(uint keynr, const byte * old_row,
/*
Update a row from one value to another.
Clobbers key_buff2
*/
int ha_berkeley::update_primary_key(DB_TXN *trans, bool primary_key_changed,
const byte * old_row,
const byte * new_row, DBT *prim_key)
const byte * old_row, DBT *old_key,
const byte * new_row, DBT *new_key,
ulong thd_options, bool local_using_ignore)
{
DBT row, old_key;
DBT row;
int error;
DBUG_ENTER("update_primary_key");
......@@ -903,37 +905,83 @@ int ha_berkeley::update_primary_key(DB_TXN *trans, bool primary_key_changed,
{
// Primary key changed or we are updating a key that can have duplicates.
// Delete the old row and add a new one
create_key(&old_key, primary_key, key_buff2, old_row);
if ((error=remove_key(trans, primary_key, old_row, (DBT *) 0, &old_key)))
DBUG_RETURN(error); // This should always succeed
if ((error=pack_row(&row, new_row, 0)))
{
// Out of memory (this shouldn't happen!)
(void) file->put(file, trans, &old_key, &row,
key_type[primary_key]);
DBUG_RETURN(error);
}
// Write new key
if ((error=file->put(file, trans, prim_key, &row, key_type[primary_key])))
if (!(error=remove_key(trans, primary_key, old_row, (DBT *) 0, old_key)))
{
if (!(error=pack_row(&row, new_row, 0)))
{
if ((error=file->put(file, trans, new_key, &row,
key_type[primary_key])))
{
// Probably a duplicated key; Return the error and let the caller
// abort.
// Probably a duplicated key; restore old key and row if needed
last_dup_key=primary_key;
DBUG_RETURN(error);
if (local_using_ignore &&
!(thd_options & OPTION_INTERNAL_SUBTRANSACTIONS))
{
int new_error;
if ((new_error=pack_row(&row, old_row, 0)) ||
(new_error=file->put(file, trans, old_key, &row,
key_type[primary_key])))
error=new_error; // fatal error
}
}
}
}
}
else
{
// Primary key didn't change; just update the row data
if ((error=pack_row(&row, new_row, 0)))
DBUG_RETURN(error);
error=file->put(file, trans, prim_key, &row, 0);
if (error)
DBUG_RETURN(error); // Fatal error
if (!(error=pack_row(&row, new_row, 0)))
error=file->put(file, trans, new_key, &row, 0);
}
DBUG_RETURN(0);
DBUG_RETURN(error);
}
/*
Restore changed keys, when a non-fatal error aborts the insert/update
of one row.
Clobbers keybuff2
*/
int ha_berkeley::restore_keys(DB_TXN *trans, key_map changed_keys,
uint primary_key,
const byte *old_row, DBT *old_key,
const byte *new_row, DBT *new_key,
ulong thd_options)
{
int error;
DBT tmp_key;
DBUG_ENTER("restore_keys");
/* Restore the old primary key, and the old row, but don't ignore
duplicate key failure */
if ((error=update_primary_key(trans, TRUE, new_row, new_key,
old_row, old_key, thd_options, FALSE)))
goto err;
/* Remove the new key, and put back the old key
changed_keys is a map of all non-primary keys that need to be
rolled back. The last key set in changed_keys is the one that
triggered the duplicate key error (it wasn't inserted), so for
that one just put back the old value. */
for (uint keynr=0; changed_keys; keynr++, changed_keys >>= 1)
{
if (changed_keys & 1)
{
if (changed_keys != 1 &&
(error = remove_key(trans, keynr, new_row, (DBT*) 0, new_key)))
break;
if ((error = key_file[keynr]->put(key_file[keynr], trans,
create_key(&tmp_key, keynr, key_buff2,
old_row),
old_key, key_type[keynr])))
break;
}
}
err:
dbug_assert(error != DB_KEYEXIST);
DBUG_RETURN(error);
}
int ha_berkeley::update_row(const byte * old_row, byte * new_row)
......@@ -941,6 +989,7 @@ int ha_berkeley::update_row(const byte * old_row, byte * new_row)
DBT prim_key, key, old_prim_key;
int error;
DB_TXN *sub_trans;
ulong thd_options = table->in_use->options;
bool primary_key_changed;
DBUG_ENTER("update_row");
......@@ -966,15 +1015,22 @@ int ha_berkeley::update_row(const byte * old_row, byte * new_row)
old_prim_key=prim_key;
}
sub_trans = transaction;
LINT_INIT(error);
for (uint retry=0 ; retry < berkeley_trans_retry ; retry++)
{
key_map changed_keys = 0;
if (using_ignore && (thd_options & OPTION_INTERNAL_SUBTRANSACTIONS))
{
if ((error=txn_begin(db_env, transaction, &sub_trans, 0)))
break;
DBUG_PRINT("trans",("starting subtransaction"));
}
/* Start by updating the primary key */
if (!(error=update_primary_key(sub_trans, primary_key_changed,
old_row, new_row, &prim_key)))
old_row, &old_prim_key,
new_row, &prim_key,
thd_options, using_ignore)))
{
// Update all other keys
for (uint keynr=0 ; keynr < table->keys ; keynr++)
......@@ -984,8 +1040,21 @@ int ha_berkeley::update_row(const byte * old_row, byte * new_row)
if (key_cmp(keynr, old_row, new_row) || primary_key_changed)
{
if ((error=remove_key(sub_trans, keynr, old_row, (DBT*) 0,
&old_prim_key)) ||
(error=key_file[keynr]->put(key_file[keynr], sub_trans,
&old_prim_key)))
{
if (using_ignore &&
(thd_options & OPTION_INTERNAL_SUBTRANSACTIONS))
{
int new_error;
DBUG_PRINT("trans",("aborting subtransaction"));
new_error=txn_abort(sub_trans);
if (new_error)
error = new_error;
}
DBUG_RETURN(error); // Fatal error
}
changed_keys |= (key_map)1 << keynr;
if ((error=key_file[keynr]->put(key_file[keynr], sub_trans,
create_key(&key, keynr, key_buff2,
new_row),
&prim_key, key_type[keynr])))
......@@ -996,22 +1065,50 @@ int ha_berkeley::update_row(const byte * old_row, byte * new_row)
}
}
}
if (!error)
if (error)
{
DBUG_PRINT("trans",("committing subtransaction"));
error=txn_commit(sub_trans, 0);
/* Remove inserted row */
DBUG_PRINT("error",("Got error %d",error));
if (using_ignore)
{
int new_error = 0;
if (thd_options & OPTION_INTERNAL_SUBTRANSACTIONS)
{
DBUG_PRINT("trans",("aborting subtransaction"));
new_error=txn_abort(sub_trans);
}
else
else if (changed_keys)
new_error=restore_keys(transaction, changed_keys, primary_key,
old_row, &old_prim_key, new_row, &prim_key,
thd_options);
if (new_error)
{
/* Remove inserted row */
error=new_error; // This shouldn't happen
break;
}
}
#ifdef BROKEN_CODE_HERE
int new_error;
DBUG_PRINT("error",("Got error %d",error));
if (using_ignore && (thd_options & OPTION_INTERNAL_SUBTRANSACTIONS))
{
DBUG_PRINT("trans",("aborting subtransaction"));
if ((new_error=txn_abort(sub_trans)))
new_error=txn_abort(sub_trans);
}
else if (changed_keys)
new_error=restore_keys(changed_keys, primary_key,
old_row, old_prim_key, new_row, prim_key);
if (new_error)
{
error=new_error; // This shouldn't happen
break;
}
#endif
}
else if (using_ignore && (thd_options & OPTION_INTERNAL_SUBTRANSACTIONS))
{
DBUG_PRINT("trans",("committing subtransaction"));
error=txn_commit(sub_trans, 0);
}
if (error != DB_LOCK_DEADLOCK)
break;
......@@ -1065,7 +1162,7 @@ int ha_berkeley::remove_key(DB_TXN *sub_trans, uint keynr, const byte *record,
create_key(&key, keynr, key_buff2, record)),
(keynr == primary_key ?
packed_record : prim_key),
DB_GET_BOTH)))
DB_GET_BOTH | DB_RMW)))
{ // This shouldn't happen
error=tmp_cursor->c_del(tmp_cursor,0);
}
......@@ -1401,9 +1498,7 @@ void ha_berkeley::position(const byte *record)
{
DBT key;
if (hidden_primary_key)
{
memcpy_fixed(ref, (char*) current_ident, BDB_HIDDEN_PRIMARY_KEY_LENGTH);
}
else
create_key(&key, primary_key, ref, record);
}
......@@ -1438,6 +1533,7 @@ int ha_berkeley::extra(enum ha_extra_function operation)
case HA_EXTRA_RESET:
case HA_EXTRA_RESET_STATE:
key_read=0;
using_ignore=0;
break;
case HA_EXTRA_KEYREAD:
key_read=1; // Query satisfied with key
......@@ -1445,6 +1541,12 @@ int ha_berkeley::extra(enum ha_extra_function operation)
case HA_EXTRA_NO_KEYREAD:
key_read=0;
break;
case HA_EXTRA_IGNORE_DUP_KEY:
using_ignore=1;
break;
case HA_EXTRA_NO_IGNORE_DUP_KEY:
using_ignore=0;
break;
default:
break;
}
......@@ -1548,6 +1650,8 @@ THR_LOCK_DATA **ha_berkeley::store_lock(THD *thd, THR_LOCK_DATA **to,
!thd->in_lock_tables)
lock_type = TL_WRITE_ALLOW_WRITE;
lock.type=lock_type;
lock_on_read= ((table->reginfo.lock_type > TL_WRITE_ALLOW_READ) ? DB_RMW :
0);
}
*to++= &lock;
return to;
......@@ -1658,7 +1762,7 @@ int ha_berkeley::delete_table(const char *name)
double ha_berkeley::scan_time()
{
return records/3;
}
}
ha_rows ha_berkeley::records_in_range(int keynr,
const byte *start_key,uint start_key_len,
......
......@@ -70,10 +70,15 @@ class ha_berkeley: public handler
DBT *packed_record, DBT *prim_key);
int remove_keys(DB_TXN *trans,const byte *record, DBT *new_record,
DBT *prim_key, key_map keys, int result);
int restore_keys(DB_TXN *trans, key_map changed_keys, uint primary_key,
const byte *old_row, DBT *old_key,
const byte *new_row, DBT *new_key,
ulong thd_options);
int key_cmp(uint keynr, const byte * old_row, const byte * new_row);
int update_primary_key(DB_TXN *trans, bool primary_key_changed,
const byte * old_row, const byte * new_row,
DBT *prim_key);
const byte * old_row, DBT *old_key,
const byte * new_row, DBT *prim_key,
ulong thd_options, bool local_using_ignore);
int read_row(int error, char *buf, uint keynr, DBT *row, DBT *key, bool);
DBT *get_pos(DBT *to, byte *pos);
......
......@@ -520,7 +520,7 @@ extern ulong keybuff_size,sortbuff_size,max_item_sort_length,table_cache_size,
binlog_cache_size, max_binlog_cache_size;
extern ulong specialflag, current_pid;
extern bool low_priority_updates;
extern bool opt_sql_bin_update;
extern bool opt_sql_bin_update, opt_safe_show_db;
extern char language[LIBLEN],reg_ext[FN_EXTLEN],blob_newline;
extern const char **errmesg; /* Error messages */
extern byte last_ref[MAX_REFLENGTH]; /* Index ref of keys */
......
......@@ -146,6 +146,33 @@ static uint handler_count;
static bool opt_console=0;
#endif
#ifdef HAVE_BERKELEY_DB
SHOW_COMP_OPTION have_berkeley_db=SHOW_OPTION_YES;
#else
SHOW_COMP_OPTION have_berkeley_db=SHOW_OPTION_NO;
#endif
#ifdef HAVE_GEMENI_DB
SHOW_COMP_OPTION have_gemeni=SHOW_OPTION_YES;
#else
SHOW_COMP_OPTION have_gemeni=SHOW_OPTION_NO;
#endif
#ifdef HAVE_INNOBASE_DB
SHOW_COMP_OPTION have_innobase=SHOW_OPTION_YES;
#else
SHOW_COMP_OPTION have_innobase=SHOW_OPTION_NO;
#endif
#ifdef USE_RAID
SHOW_COMP_OPTION have_raid=SHOW_OPTION_YES;
#else
SHOW_COMP_OPTION have_raid=SHOW_OPTION_NO;
#endif
#ifdef HAVE_OPENSSL
SHOW_COMP_OPTION have_ssl=SHOW_OPTION_YES;
#else
SHOW_COMP_OPTION have_ssl=SHOW_OPTION_NO;
#endif
static bool opt_skip_slave_start = 0; // if set, slave is not autostarted
static ulong opt_specialflag=SPECIAL_ENGLISH;
static my_socket unix_sock= INVALID_SOCKET,ip_sock= INVALID_SOCKET;
......@@ -156,8 +183,9 @@ static char mysql_home[FN_REFLEN],pidfile_name[FN_REFLEN];
static pthread_t select_thread;
static bool opt_log,opt_update_log,opt_bin_log,opt_slow_log,opt_noacl,
opt_disable_networking=0, opt_bootstrap=0,opt_skip_show_db=0,
opt_ansi_mode=0,opt_myisam_log=0, opt_large_files=sizeof(my_off_t) > 4;
bool opt_sql_bin_update = 0, opt_log_slave_updates = 0;
opt_ansi_mode=0,opt_myisam_log=0,
opt_large_files=sizeof(my_off_t) > 4;
bool opt_sql_bin_update = 0, opt_log_slave_updates = 0, opt_safe_show_db=0;
FILE *bootstrap_file=0;
int segfaulted = 0; // ensure we do not enter SIGSEGV handler twice
extern MASTER_INFO glob_mi;
......@@ -2322,7 +2350,7 @@ enum options {
OPT_INNOBASE_DATA_HOME_DIR,OPT_INNOBASE_DATA_FILE_PATH,
OPT_INNOBASE_LOG_GROUP_HOME_DIR,
OPT_INNOBASE_LOG_ARCH_DIR, OPT_INNOBASE_LOG_ARCHIVE,
OPT_INNOBASE_FLUSH_LOG_AT_TRX_COMMIT
OPT_INNOBASE_FLUSH_LOG_AT_TRX_COMMIT, OPT_SAFE_SHOW_DB
};
static struct option long_options[] = {
......@@ -2424,6 +2452,7 @@ static struct option long_options[] = {
{"replicate-rewrite-db", required_argument, 0,
(int) OPT_REPLICATE_REWRITE_DB},
{"safe-mode", no_argument, 0, (int) OPT_SAFE},
{"safe-show-database", no_argument, 0, (int) OPT_SAFE_SHOW_DB},
{"socket", required_argument, 0, (int) OPT_SOCKET},
{"server-id", required_argument, 0, (int) OPT_SERVER_ID},
{"set-variable", required_argument, 0, 'O'},
......@@ -2593,6 +2622,11 @@ struct show_var_st init_vars[]= {
{"delayed_queue_size", (char*) &delayed_queue_size, SHOW_LONG},
{"flush", (char*) &myisam_flush, SHOW_MY_BOOL},
{"flush_time", (char*) &flush_time, SHOW_LONG},
{"have_bdb", (char*) &have_berkeley_db, SHOW_HAVE},
{"have_gemeni", (char*) &have_gemeni, SHOW_HAVE},
{"have_innobase", (char*) &have_innobase, SHOW_HAVE},
{"have_raid", (char*) &have_raid, SHOW_HAVE},
{"have_ssl", (char*) &have_ssl, SHOW_HAVE},
{"init_file", (char*) &opt_init_file, SHOW_CHAR_PTR},
{"interactive_timeout", (char*) &net_interactive_timeout, SHOW_LONG},
{"join_buffer_size", (char*) &join_buff_size, SHOW_LONG},
......@@ -2631,6 +2665,7 @@ struct show_var_st init_vars[]= {
{"protocol_version", (char*) &protocol_version, SHOW_INT},
{"record_buffer", (char*) &my_default_record_cache_size,SHOW_LONG},
{"query_buffer_size", (char*) &query_buff_size, SHOW_LONG},
{"safe_show_database", (char*) &opt_safe_show_db, SHOW_BOOL},
{"server_id", (char*) &server_id, SHOW_LONG},
{"skip_locking", (char*) &my_disable_locking, SHOW_MY_BOOL},
{"skip_networking", (char*) &opt_disable_networking, SHOW_BOOL},
......@@ -3347,11 +3382,13 @@ static void get_options(int argc,char **argv)
break;
case OPT_BDB_SKIP:
berkeley_skip=1;
have_berkeley_db=SHOW_OPTION_DISABLED;
break;
#endif
#ifdef HAVE_INNOBASE_DB
case OPT_INNOBASE_SKIP:
innobase_skip=1;
have_innobase_db=SHOW_HAVE_DISABLED;
break;
case OPT_INNOBASE_DATA_HOME_DIR:
innobase_data_home_dir=optarg;
......@@ -3410,6 +3447,9 @@ static void get_options(int argc,char **argv)
case OPT_MASTER_CONNECT_RETRY:
master_connect_retry= atoi(optarg);
break;
case (int) OPT_SAFE_SHOW_DB:
opt_safe_show_db=1;
break;
default:
fprintf(stderr,"%s: Unrecognized option: %c\n",my_progname,c);
......
......@@ -74,12 +74,19 @@ mysqld_show_dbs(THD *thd,const char *wild)
DBUG_RETURN(1);
List_iterator<char> it(files);
while ((file_name=it++))
{
if (!opt_safe_show_db || thd->master_access ||
acl_get(thd->host, thd->ip, (char*) &thd->remote.sin_addr,
thd->priv_user, file_name) ||
(grant_option && !check_grant_db(thd, file_name)))
{
thd->packet.length(0);
net_store_data(&thd->packet,file_name);
if (my_net_write(&thd->net,(char*) thd->packet.ptr(),thd->packet.length()))
if (my_net_write(&thd->net, (char*) thd->packet.ptr(),
thd->packet.length()))
DBUG_RETURN(-1);
}
}
send_eof(&thd->net);
DBUG_RETURN(0);
}
......@@ -1086,6 +1093,14 @@ int mysqld_show(THD *thd, const char *wild, show_var_st *variables)
case SHOW_INT:
net_store_data(&packet2,(uint32) *(int*) variables[i].value);
break;
case SHOW_HAVE:
{
SHOW_COMP_OPTION tmp= *(SHOW_COMP_OPTION*) variables[i].value;
net_store_data(&packet2, (tmp == SHOW_OPTION_NO ? "NO" :
tmp == SHOW_OPTION_YES ? "YES" :
"DISABLED"));
break;
}
case SHOW_CHAR:
net_store_data(&packet2,variables[i].value);
break;
......
......@@ -1658,7 +1658,8 @@ copy_data_between_tables(TABLE *from,TABLE *to,
};
init_read_record(&info, thd, from, (SQL_SELECT *) 0, 1,1);
if (handle_duplicates == DUP_IGNORE)
if (handle_duplicates == DUP_IGNORE ||
handle_duplicates == DUP_REPLACE)
to->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
next_field=to->next_number_field;
while (!(error=info.read_record(&info)))
......@@ -1675,7 +1676,8 @@ copy_data_between_tables(TABLE *from,TABLE *to,
copy_ptr->do_copy(copy_ptr);
if ((error=to->file->write_row((byte*) to->record[0])))
{
if (handle_duplicates != DUP_IGNORE ||
if ((handle_duplicates != DUP_IGNORE &&
handle_duplicates != DUP_REPLACE) ||
(error != HA_ERR_FOUND_DUPP_KEY &&
error != HA_ERR_FOUND_DUPP_UNIQUE))
{
......
......@@ -125,7 +125,9 @@ typedef struct {
enum SHOW_TYPE { SHOW_LONG,SHOW_CHAR,SHOW_INT,SHOW_CHAR_PTR,SHOW_BOOL,
SHOW_MY_BOOL,SHOW_OPENTABLES,SHOW_STARTTIME,SHOW_QUESTION,
SHOW_LONG_CONST, SHOW_INT_CONST};
SHOW_LONG_CONST, SHOW_INT_CONST, SHOW_HAVE};
enum SHOW_COMP_OPTION { SHOW_OPTION_YES, SHOW_OPTION_NO, SHOW_OPTION_DISABLED};
struct show_var_st {
const char *name;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment