Commit 7bf58c0f authored by unknown's avatar unknown

Merge bk-internal.mysql.com:/home/bk/mysql-4.1

into mysql.com:/home/my/mysql-4.1
parents 71ac38b1 4923ac5c
#!/bin/sh
#
# This script's purpose is to update the automake/autoconf helper scripts and
# to run a plain "configure" without any special compile flags. Only features
# that affect the content of the source distribution are enabled. The resulting
# tree can then be picked up by "make dist" to create the "pristine source
# package" that is used as the basis for all other binary builds.
#
make distclean
aclocal
autoheader
libtoolize --automake --force --copy
automake --force --add-missing --copy
autoconf
(cd bdb/dist && sh s_all)
(cd innobase && aclocal && autoheader && aclocal && automake && autoconf)
# Default to gcc for CC and CXX
if test -z "$CXX" ; then
export CXX=gcc
fi
if test -z "$CC" ; then
export CC=gcc
fi
# Use ccache, if available
if ccache -V > /dev/null 2>&1
then
if ! (echo "$CC" | grep "ccache" > /dev/null)
then
export CC="ccache $CC"
fi
if ! (echo "$CXX" | grep "ccache" > /dev/null)
then
export CXX="ccache $CXX"
fi
fi
# Make sure to enable all features that affect "make dist"
./configure \
--with-embedded-server \
--with-berkeley-db \
--with-innodb \
--enable-thread-safe-client \
--with-ndbcluster
make
......@@ -103,6 +103,7 @@ lenz@mysql.com
magnus@neptunus.(none)
magnus@shellback.(none)
marko@hundin.mysql.fi
marty@linux.site
mats@mysql.com
matt@booty.(none)
matt@mysql.com
......
......@@ -26,7 +26,7 @@ else
}
# Some predefined settings
$build_command= "BUILD/compile-pentium-max";
$build_command= "BUILD/compile-dist";
$PWD= cwd();
$opt_docdir= $PWD . "/mysqldoc";
$opt_archive_log= undef;
......@@ -70,7 +70,7 @@ GetOptions(
"test|t",
"verbose|v",
"win-dist|w",
"quiet|q",
"quiet|q",
) || print_help("");
#
......@@ -122,18 +122,8 @@ if (($opt_directory ne $PWD) && (!-d $opt_directory && !$opt_dry_run))
#
if ($opt_pull)
{
&logger("Updating BK tree $REPO to latest ChangeSet first");
chdir ($REPO) or &abort("Could not chdir to $REPO!");
&run_command("bk pull", "Could not update $REPO!");
chdir ($PWD) or &abort("Could not chdir to $PWD!");
unless ($opt_skip_manual)
{
&logger("Updating manual tree in $opt_docdir");
chdir ($opt_docdir) or &abort("Could not chdir to $opt_docdir!");
&run_command("bk pull", "Could not update $opt_docdir!");
chdir ($PWD) or &abort("Could not chdir to $PWD!");
}
&bk_pull("$REPO");
&bk_pull("$opt_docdir") unless ($opt_skip_manual);
}
#
......@@ -270,7 +260,7 @@ if (defined $opt_changelog)
$command.= " " . $REPO . " > $target_dir/ChangeLog";
&logger($command);
# We cannot use run_command here because of output redirection
if (!$opt_dry_run)
unless ($opt_dry_run)
{
system($command) == 0 or &abort("Could not create $target_dir/ChangeLog!");
}
......@@ -281,17 +271,17 @@ if (defined $opt_changelog)
#
unless ($opt_skip_manual)
{
$msg= "Updating manual files";
&logger($msg);
&logger("Updating manual files");
foreach $file qw/internals manual reservedwords/
{
system ("bk cat $opt_docdir/Docs/$file.texi > $target_dir/Docs/$file.texi") == 0
or &abort("Could not update $file.texi in $target_dir/Docs/!");
}
system ("rm -f $target_dir/Docs/Images/Makefile*") == 0
or &abort("Could not remove Makefiles in $target_dir/Docs/Images/!");
system ("cp $opt_docdir/Docs/Images/*.* $target_dir/Docs/Images") == 0
or &abort("Could not copy image files in $target_dir/Docs/Images/!");
&run_command("rm -f $target_dir/Docs/Images/Makefile*",
"Could not remove Makefiles in $target_dir/Docs/Images/!");
&run_command("cp $opt_docdir/Docs/Images/*.* $target_dir/Docs/Images",
"Could not copy image files in $target_dir/Docs/Images/!");
}
#
......@@ -377,6 +367,18 @@ if ($opt_archive_log)
exit 0;
#
# Run a BK pull on the given BK tree
#
sub bk_pull
{
my $bk_tree= $_[0];
&logger("Updating BK tree $bk_tree to latest ChangeSet first");
chdir ($bk_tree) or &abort("Could not chdir to $bk_tree!");
&run_command("bk pull", "Could not update $bk_tree!");
chdir ($PWD) or &abort("Could not chdir to $PWD!");
}
#
# Print the help text message (with an optional message on top)
#
......
......@@ -2990,8 +2990,7 @@ recv_reset_log_files_for_backup(
memcpy(name + log_dir_len, logfilename, sizeof logfilename);
buf = ut_malloc(LOG_FILE_HDR_SIZE + OS_FILE_LOG_BLOCK_SIZE);
memset(buf, LOG_FILE_HDR_SIZE + OS_FILE_LOG_BLOCK_SIZE, '\0');
memset(buf, '\0', LOG_FILE_HDR_SIZE + OS_FILE_LOG_BLOCK_SIZE);
for (i = 0; i < n_log_files; i++) {
......
......@@ -381,8 +381,14 @@ row_upd_changes_field_size_or_external(
new_len = new_val->len;
if (new_len == UNIV_SQL_NULL) {
/* A bug fixed on Dec 31st, 2004: we looked at the
SQL NULL size from the wrong field! We may backport
this fix also to 4.0. The merge to 5.0 will be made
manually immediately after we commit this to 4.1. */
new_len = dtype_get_sql_null_size(
dict_index_get_nth_type(index, i));
dict_index_get_nth_type(index,
upd_field->field_no));
}
old_len = rec_get_nth_field_size(rec, upd_field->field_no);
......
This diff is collapsed.
......@@ -212,6 +212,13 @@ Thd_ndb::~Thd_ndb()
{
if (ndb)
delete ndb;
ndb= 0;
}
inline
Ndb *ha_ndbcluster::get_ndb()
{
return ((Thd_ndb*)current_thd->transaction.thd_ndb)->ndb;
}
/*
......@@ -245,8 +252,9 @@ void ha_ndbcluster::records_update()
info->no_uncommitted_rows_count));
// if (info->records == ~(ha_rows)0)
{
Ndb *ndb= get_ndb();
Uint64 rows;
if(ndb_get_table_statistics(m_ndb, m_tabname, &rows, 0) == 0){
if(ndb_get_table_statistics(ndb, m_tabname, &rows, 0) == 0){
info->records= rows;
}
}
......@@ -331,7 +339,8 @@ int ha_ndbcluster::ndb_err(NdbConnection *trans)
switch (err.classification) {
case NdbError::SchemaError:
{
NDBDICT *dict= m_ndb->getDictionary();
Ndb *ndb= get_ndb();
NDBDICT *dict= ndb->getDictionary();
DBUG_PRINT("info", ("invalidateTable %s", m_tabname));
dict->invalidateTable(m_tabname);
table->version=0L; /* Free when thread is ready */
......@@ -361,7 +370,7 @@ bool ha_ndbcluster::get_error_message(int error,
DBUG_ENTER("ha_ndbcluster::get_error_message");
DBUG_PRINT("enter", ("error: %d", error));
Ndb *ndb= ((Thd_ndb*)current_thd->transaction.thd_ndb)->ndb;
Ndb *ndb= get_ndb();
if (!ndb)
DBUG_RETURN(FALSE);
......@@ -690,7 +699,8 @@ bool ha_ndbcluster::uses_blob_value(bool all_fields)
int ha_ndbcluster::get_metadata(const char *path)
{
NDBDICT *dict= m_ndb->getDictionary();
Ndb *ndb= get_ndb();
NDBDICT *dict= ndb->getDictionary();
const NDBTAB *tab;
int error;
bool invalidating_ndb_table= FALSE;
......@@ -765,7 +775,8 @@ int ha_ndbcluster::build_index_list(TABLE *tab, enum ILBP phase)
static const char* unique_suffix= "$unique";
KEY* key_info= tab->key_info;
const char **key_name= tab->keynames.type_names;
NdbDictionary::Dictionary *dict= m_ndb->getDictionary();
Ndb *ndb= get_ndb();
NdbDictionary::Dictionary *dict= ndb->getDictionary();
DBUG_ENTER("build_index_list");
// Save information about all known indexes
......@@ -1735,7 +1746,8 @@ int ha_ndbcluster::write_row(byte *record)
if (table->primary_key == MAX_KEY)
{
// Table has hidden primary key
Uint64 auto_value= m_ndb->getAutoIncrementValue((const NDBTAB *) m_table);
Ndb *ndb= get_ndb();
Uint64 auto_value= ndb->getAutoIncrementValue((const NDBTAB *) m_table);
if (set_hidden_key(op, table->fields, (const byte*)&auto_value))
ERR_RETURN(op->getNdbError());
}
......@@ -1812,11 +1824,12 @@ int ha_ndbcluster::write_row(byte *record)
}
if ((has_auto_increment) && (m_skip_auto_increment))
{
Ndb *ndb= get_ndb();
Uint64 next_val= (Uint64) table->next_number_field->val_int() + 1;
DBUG_PRINT("info",
("Trying to set next auto increment value to %lu",
(ulong) next_val));
if (m_ndb->setAutoIncrementValue((const NDBTAB *) m_table, next_val, TRUE))
if (ndb->setAutoIncrementValue((const NDBTAB *) m_table, next_val, TRUE))
DBUG_PRINT("info",
("Setting next auto increment value to %u", next_val));
}
......@@ -2657,9 +2670,12 @@ void ha_ndbcluster::info(uint flag)
}
else
{
if ((my_errno= check_ndb_connection()))
DBUG_VOID_RETURN;
Ndb *ndb= get_ndb();
Uint64 rows= 100;
if (current_thd->variables.ndb_use_exact_count)
ndb_get_table_statistics(m_ndb, m_tabname, &rows, 0);
ndb_get_table_statistics(ndb, m_tabname, &rows, 0);
records= rows;
}
}
......@@ -2986,6 +3002,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
DBUG_RETURN(1);
Thd_ndb *thd_ndb= (Thd_ndb*)thd->transaction.thd_ndb;
Ndb *ndb= thd_ndb->ndb;
DBUG_PRINT("enter", ("transaction.thd_ndb->lock_count: %d",
thd_ndb->lock_count));
......@@ -3003,9 +3020,9 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
DBUG_ASSERT(!thd->transaction.stmt.ndb_tid);
DBUG_PRINT("trans",("Starting transaction stmt"));
trans= m_ndb->startTransaction();
trans= ndb->startTransaction();
if (trans == NULL)
ERR_RETURN(m_ndb->getNdbError());
ERR_RETURN(ndb->getNdbError());
no_uncommitted_rows_reset(thd);
thd->transaction.stmt.ndb_tid= trans;
}
......@@ -3017,9 +3034,9 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
// A "master" transaction ha not been started yet
DBUG_PRINT("trans",("starting transaction, all"));
trans= m_ndb->startTransaction();
trans= ndb->startTransaction();
if (trans == NULL)
ERR_RETURN(m_ndb->getNdbError());
ERR_RETURN(ndb->getNdbError());
no_uncommitted_rows_reset(thd);
/*
......@@ -3069,7 +3086,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
m_retrieve_primary_key= FALSE;
m_ops_pending= 0;
{
NDBDICT *dict= m_ndb->getDictionary();
NDBDICT *dict= ndb->getDictionary();
const NDBTAB *tab;
void *tab_info;
if (!(tab= dict->getTable(m_tabname, &tab_info)))
......@@ -3096,7 +3113,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
We must in this case close the transaction to release resources
*/
DBUG_PRINT("trans",("ending non-updating transaction"));
m_ndb->closeTransaction(m_active_trans);
ndb->closeTransaction(m_active_trans);
thd->transaction.stmt.ndb_tid= 0;
}
}
......@@ -3142,16 +3159,17 @@ int ha_ndbcluster::start_stmt(THD *thd)
NdbConnection *trans= (NdbConnection*)thd->transaction.stmt.ndb_tid;
if (!trans){
Ndb *ndb= ((Thd_ndb*)thd->transaction.thd_ndb)->ndb;
DBUG_PRINT("trans",("Starting transaction stmt"));
NdbConnection *tablock_trans=
(NdbConnection*)thd->transaction.all.ndb_tid;
DBUG_PRINT("info", ("tablock_trans: %x", (uint)tablock_trans));
DBUG_ASSERT(tablock_trans);
// trans= m_ndb->hupp(tablock_trans);
trans= m_ndb->startTransaction();
// trans= ndb->hupp(tablock_trans);
trans= ndb->startTransaction();
if (trans == NULL)
ERR_RETURN(m_ndb->getNdbError());
ERR_RETURN(ndb->getNdbError());
no_uncommitted_rows_reset(thd);
thd->transaction.stmt.ndb_tid= trans;
}
......@@ -3538,7 +3556,8 @@ int ha_ndbcluster::create(const char *name,
DBUG_RETURN(my_errno);
// Create the table in NDB
NDBDICT *dict= m_ndb->getDictionary();
Ndb *ndb= get_ndb();
NDBDICT *dict= ndb->getDictionary();
if (dict->createTable(tab) != 0)
{
const NdbError err= dict->getNdbError();
......@@ -3583,7 +3602,8 @@ int ha_ndbcluster::create_index(const char *name,
KEY *key_info,
bool unique)
{
NdbDictionary::Dictionary *dict= m_ndb->getDictionary();
Ndb *ndb= get_ndb();
NdbDictionary::Dictionary *dict= ndb->getDictionary();
KEY_PART_INFO *key_part= key_info->key_part;
KEY_PART_INFO *end= key_part + key_info->key_parts;
......@@ -3641,15 +3661,16 @@ int ha_ndbcluster::rename_table(const char *from, const char *to)
if (check_ndb_connection())
DBUG_RETURN(my_errno= HA_ERR_NO_CONNECTION);
dict= m_ndb->getDictionary();
Ndb *ndb= get_ndb();
dict= ndb->getDictionary();
if (!(orig_tab= dict->getTable(m_tabname)))
ERR_RETURN(dict->getNdbError());
m_table= (void *)orig_tab;
// Change current database to that of target table
set_dbname(to);
m_ndb->setDatabaseName(m_dbname);
ndb->setDatabaseName(m_dbname);
if (!(result= alter_table_name(new_tabname)))
{
// Rename .ndb file
......@@ -3666,7 +3687,8 @@ int ha_ndbcluster::rename_table(const char *from, const char *to)
int ha_ndbcluster::alter_table_name(const char *to)
{
NDBDICT * dict= m_ndb->getDictionary();
Ndb *ndb= get_ndb();
NDBDICT *dict= ndb->getDictionary();
const NDBTAB *orig_tab= (const NDBTAB *) m_table;
int ret;
DBUG_ENTER("alter_table_name_table");
......@@ -3708,8 +3730,9 @@ int ha_ndbcluster::delete_table(const char *name)
int ha_ndbcluster::drop_table()
{
NdbDictionary::Dictionary *dict= m_ndb->getDictionary();
Ndb *ndb= get_ndb();
NdbDictionary::Dictionary *dict= ndb->getDictionary();
DBUG_ENTER("drop_table");
DBUG_PRINT("enter", ("Deleting %s", m_tabname));
......@@ -3742,6 +3765,7 @@ longlong ha_ndbcluster::get_auto_increment()
{
DBUG_ENTER("get_auto_increment");
DBUG_PRINT("enter", ("m_tabname: %s", m_tabname));
Ndb *ndb= get_ndb();
int cache_size=
(m_rows_to_insert - m_rows_inserted < m_autoincrement_prefetch) ?
m_rows_to_insert - m_rows_inserted
......@@ -3750,8 +3774,8 @@ longlong ha_ndbcluster::get_auto_increment()
: m_autoincrement_prefetch;
Uint64 auto_value=
(m_skip_auto_increment) ?
m_ndb->readAutoIncrementValue((const NDBTAB *) m_table)
: m_ndb->getAutoIncrementValue((const NDBTAB *) m_table, cache_size);
ndb->readAutoIncrementValue((const NDBTAB *) m_table)
: ndb->getAutoIncrementValue((const NDBTAB *) m_table, cache_size);
DBUG_RETURN((longlong)auto_value);
}
......@@ -3764,7 +3788,6 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
handler(table_arg),
m_active_trans(NULL),
m_active_cursor(NULL),
m_ndb(NULL),
m_table(NULL),
m_table_info(NULL),
m_table_flags(HA_REC_NOT_IN_SEQ |
......@@ -3894,7 +3917,6 @@ int ha_ndbcluster::close(void)
DBUG_ENTER("close");
free_share(m_share); m_share= 0;
release_metadata();
m_ndb= NULL;
DBUG_RETURN(0);
}
......@@ -3956,11 +3978,12 @@ Ndb* check_ndb_in_thd(THD* thd)
int ha_ndbcluster::check_ndb_connection()
{
THD* thd= current_thd;
Ndb *ndb;
DBUG_ENTER("check_ndb_connection");
if (!(m_ndb= check_ndb_in_thd(thd)))
if (!(ndb= check_ndb_in_thd(thd)))
DBUG_RETURN(HA_ERR_NO_CONNECTION);
m_ndb->setDatabaseName(m_dbname);
ndb->setDatabaseName(m_dbname);
DBUG_RETURN(0);
}
......
......@@ -210,7 +210,6 @@ class ha_ndbcluster: public handler
NdbConnection *m_active_trans;
NdbResultSet *m_active_cursor;
Ndb *m_ndb;
void *m_table;
void *m_table_info;
char m_dbname[FN_HEADLEN];
......@@ -246,6 +245,7 @@ class ha_ndbcluster: public handler
bool m_transaction_on;
bool m_use_local_query_cache;
Ndb *get_ndb();
void set_rec_per_key();
void records_update();
void no_uncommitted_rows_execute_failure();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment