Commit af7051c9 authored by unknown's avatar unknown

Merge perch.ndb.mysql.com:/home/jonas/src/51-new

into  perch.ndb.mysql.com:/home/jonas/src/51-ndb

parents 4eede6f0 4234038b
......@@ -377,7 +377,8 @@ static struct my_option my_long_options[] =
{"create-schema", OPT_CREATE_SLAP_SCHEMA, "Schema to run tests in.",
(gptr*) &create_schema_string, (gptr*) &create_schema_string, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"csv", OPT_CREATE_SLAP_SCHEMA, "Schema to run tests in.",
{"csv", OPT_CREATE_SLAP_SCHEMA,
"Generate CSV output to named file or to stdout if no file is named.",
(gptr*) &opt_csv_str, (gptr*) &opt_csv_str, 0, GET_STR,
OPT_ARG, 0, 0, 0, 0, 0, 0},
{"debug", '#', "Output debug log. Often this is 'd:t:o,filename'.",
......
......@@ -39,7 +39,10 @@ then
AC_DEFINE([$5])
mysql_se_decls="${mysql_se_decls},$6"
mysql_se_htons="${mysql_se_htons},&$6"
if test "$8" != "no"
then
mysql_se_objs="$mysql_se_objs $8"
fi
mysql_se_dirs="$mysql_se_dirs $7"
mysql_se_libs="$mysql_se_libs $9"
else
......
......@@ -2437,8 +2437,8 @@ MYSQL_STORAGE_ENGINE(archive,,,,,,storage/archive,,
\$(top_builddir)/storage/archive/libarchive.a, [
AC_CONFIG_FILES(storage/archive/Makefile)
])
MYSQL_STORAGE_ENGINE(csv,,,"yes",,tina_hton,storage/csv,
../storage/csv/ha_tina.o,,[
MYSQL_STORAGE_ENGINE(csv,,,"yes",,tina_hton,storage/csv,no,
\$(top_builddir)/storage/csv/libcsv.a,[
AC_CONFIG_FILES(storage/csv/Makefile)
])
MYSQL_STORAGE_ENGINE(blackhole)
......
......@@ -89,7 +89,7 @@ while test $# -gt 0; do
--small)
ndb_no_ord=32
ndb_con_op=5000
ndb_dmem=10M
ndb_dmem=20M
ndb_imem=1M
ndb_pbmem=4M
;;
......
......@@ -139,3 +139,30 @@ a b c
10 NULL Ten
15 NULL Fifteen
drop table t1, t2;
CREATE TABLE t1 (a int);
INSERT INTO t1 VALUES (1);
SET NAMES latin1;
SET character_set_filesystem=filename;
select @@character_set_filesystem;
@@character_set_filesystem
filename
SELECT * INTO OUTFILE 't-1' FROM t1;
DELETE FROM t1;
LOAD DATA INFILE 't-1' INTO TABLE t1;
SELECT * FROM t1;
a
1
DELETE FROM t1;
SET character_set_filesystem=latin1;
select @@character_set_filesystem;
@@character_set_filesystem
latin1
LOAD DATA INFILE 't@002d1' INTO TABLE t1;
SELECT * FROM t1;
a
1
DROP TABLE t1;
SET character_set_filesystem=default;
select @@character_set_filesystem;
@@character_set_filesystem
binary
ndbd,1,localhost ndbd,2,localhost ndb_mgmd,3,localhost mysqld,4, mysqld,5, mysqld,6, mysqld,7, mysqld,8, mysqld,9, mysqld,10, mysqld,11,
1,localhost,10485760,1048576 2,localhost,10485760,1048576
1 localhost 10485760 1048576
2 localhost 10485760 1048576
1,localhost,20971520,1048576 2,localhost,20971520,1048576
1 localhost 20971520 1048576
2 localhost 20971520 1048576
1 2
ndbd,1,localhost ndbd,2,localhost ndb_mgmd,3,localhost mysqld,4, mysqld,5, mysqld,6, mysqld,7, mysqld,8, mysqld,9, mysqld,10, mysqld,11,
ndbd,1,localhost,52428800,26214400 ndbd,2,localhost,52428800,36700160 ndbd,3,localhost,52428800,52428800 ndbd,4,localhost,52428800,52428800 ndb_mgmd,5,localhost,, mysqld,6,localhost,,
......
use test;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
drop table if exists t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
id type state logging database schema name
2 UserTable Online Yes cluster_replication def apply_status
1 SystemTable Online Yes sys def NDB$EVENTS_0
3 UserTable Online Yes cluster_replication def schema
0 SystemTable Online Yes sys def SYSTAB_0
NDBT_ProgramExit: 0 - OK
CREATE TABLE `t1_c` (
`capgoaledatta` smallint(5) unsigned NOT NULL auto_increment,
`goaledatta` char(2) NOT NULL default '',
......@@ -442,6 +450,7 @@ select * from t9_c) a;
count(*)
3
drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
Create table test/def/t2_c failed: Translate frm error
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
drop table if exists t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
520093696,2
......@@ -26,6 +26,5 @@ rpl_ndb_basic : Bug#16228
rpl_sp : Bug #16456
ndb_autodiscover : Needs to be fixed w.r.t binlog
ndb_autodiscover2 : Needs to be fixed w.r.t binlog
ndb_restore : Needs fixing
system_mysql_db : Needs fixing
system_mysql_db_fix : Needs fixing
......@@ -114,3 +114,27 @@ select * from t1;
drop table t1, t2;
# End of 5.0 tests
#
# Bug#12448 LOAD DATA / SELECT INTO OUTFILE
# doesn't work with multibyte path name
#
CREATE TABLE t1 (a int);
INSERT INTO t1 VALUES (1);
SET NAMES latin1;
SET character_set_filesystem=filename;
select @@character_set_filesystem;
SELECT * INTO OUTFILE 't-1' FROM t1;
DELETE FROM t1;
LOAD DATA INFILE 't-1' INTO TABLE t1;
SELECT * FROM t1;
DELETE FROM t1;
SET character_set_filesystem=latin1;
select @@character_set_filesystem;
LOAD DATA INFILE 't@002d1' INTO TABLE t1;
SELECT * FROM t1;
DROP TABLE t1;
--exec rm $MYSQL_TEST_DIR/var/master-data/test/t@002d1
SET character_set_filesystem=default;
select @@character_set_filesystem;
......@@ -8,6 +8,9 @@ drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
drop table if exists t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
--enable_warnings
--exec $NDB_TOOLS_DIR/ndb_show_tables
--exec $NDB_MGM --no-defaults -e "all dump 1000" >> $NDB_TOOLS_OUTPUT
CREATE TABLE `t1_c` (
`capgoaledatta` smallint(5) unsigned NOT NULL auto_increment,
`goaledatta` char(2) NOT NULL default '',
......@@ -348,8 +351,7 @@ select count(*)
select * from t9_c) a;
drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
--error 134
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 2 -n 1 -m -r --ndb-nodegroup_map '(0,1)' --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-2 >> $NDB_TOOLS_OUTPUT
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 2 -n 1 -m -r --ndb-nodegroup_map '(0,1)' $NDB_BACKUP_DIR/BACKUP/BACKUP-2 2>&1 | grep Translate || true
#
# Cleanup
......
......@@ -296,7 +296,10 @@ int ha_myisam::dump(THD* thd, int fd)
#endif /* HAVE_REPLICATION */
bool ha_myisam::check_if_locking_is_allowed(THD *thd, TABLE *table, uint count)
bool ha_myisam::check_if_locking_is_allowed(uint sql_command,
ulong type, TABLE *table,
uint count,
bool called_by_logger_thread)
{
/*
To be able to open and lock for reading system tables like 'mysql.proc',
......
......@@ -60,7 +60,10 @@ class ha_myisam: public handler
uint max_supported_key_part_length() const { return MI_MAX_KEY_LENGTH; }
uint checksum() const;
virtual bool check_if_locking_is_allowed(THD *thd, TABLE *table, uint count);
virtual bool check_if_locking_is_allowed(uint sql_command,
ulong type, TABLE *table,
uint count,
bool called_by_logger_thread);
int open(const char *name, int mode, uint test_if_locked);
int close(void);
int write_row(byte * buf);
......
......@@ -1425,6 +1425,12 @@ int ha_delete_table(THD *thd, handlerton *table_type, const char *path,
** General handler functions
****************************************************************************/
void handler::ha_statistic_increment(ulong SSV::*offset) const
{
statistic_increment(table->in_use->status_var.*offset, &LOCK_status);
}
/*
Open database-handler.
......
......@@ -1006,6 +1006,8 @@ typedef struct st_handler_buffer
byte *end_of_used_area; /* End of area that was used by handler */
} HANDLER_BUFFER;
typedef struct system_status_var SSV;
class handler :public Sql_alloc
{
#ifdef WITH_PARTITION_STORAGE_ENGINE
......@@ -1027,6 +1029,9 @@ class handler :public Sql_alloc
virtual int rnd_init(bool scan) =0;
virtual int rnd_end() { return 0; }
void ha_statistic_increment(ulong SSV::*offset) const;
private:
virtual int reset() { return extra(HA_EXTRA_RESET); }
public:
......@@ -1109,7 +1114,10 @@ class handler :public Sql_alloc
TRUE Locking is allowed
FALSE Locking is not allowed. The error was thrown.
*/
virtual bool check_if_locking_is_allowed(THD *thd, TABLE *table, uint count)
virtual bool check_if_locking_is_allowed(uint sql_command,
ulong type, TABLE *table,
uint count,
bool called_by_logger_thread)
{
return TRUE;
}
......
......@@ -1932,7 +1932,7 @@ class Item_int_with_ref :public Item_int
virtual Item *real_item() { return ref; }
};
#ifdef MYSQL_SERVER
#include "gstream.h"
#include "spatial.h"
#include "item_sum.h"
......@@ -1945,6 +1945,7 @@ class Item_int_with_ref :public Item_int
#include "item_uniq.h"
#include "item_subselect.h"
#include "item_xmlfunc.h"
#endif
class Item_copy_string :public Item
{
......
......@@ -617,8 +617,11 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count,
Check if we can lock the table. For some tables we cannot do that
beacause of handler-specific locking issues.
*/
if (!table_ptr[i]->file->check_if_locking_is_allowed(thd, table_ptr[i],
count))
if (!table_ptr[i]-> file->
check_if_locking_is_allowed(thd->lex->sql_command, thd->lex->type,
table_ptr[i], count,
(thd == logger.get_general_log_thd()) ||
(thd == logger.get_slow_log_thd())))
return 0;
}
......
......@@ -477,6 +477,11 @@ inline THD *_current_thd(void)
}
#define current_thd _current_thd()
/* below functions are required for plugins as THD class is opaque */
my_bool thd_in_lock_tables(const THD *thd);
my_bool thd_tablespace_op(const THD *thd);
const char *thd_proc_info(THD *thd, const char *info);
/*
External variables
*/
......@@ -507,7 +512,9 @@ enum enum_var_type
class sys_var;
#include "item.h"
extern my_decimal decimal_zero;
#ifdef MYSQL_SERVER
typedef Comp_creator* (*chooser_compare_func_creator)(bool invert);
#endif
/* sql_parse.cc */
void free_items(Item *item);
void cleanup_items(Item *item);
......@@ -545,6 +552,7 @@ Item *negate_expression(THD *thd, Item *expr);
#include "sql_class.h"
#include "sql_acl.h"
#include "tztime.h"
#ifdef MYSQL_SERVER
#include "opt_range.h"
#ifdef HAVE_QUERY_CACHE
......@@ -841,6 +849,8 @@ find_field_in_table(THD *thd, TABLE *table, const char *name, uint length,
Field *
find_field_in_table_sef(TABLE *table, const char *name);
#endif /* MYSQL_SERVER */
#ifdef HAVE_OPENSSL
#include <openssl/des.h>
struct st_des_keyblock
......@@ -858,6 +868,7 @@ extern pthread_mutex_t LOCK_des_key_file;
bool load_des_key_file(const char *file_name);
#endif /* HAVE_OPENSSL */
#ifdef MYSQL_SERVER
/* sql_do.cc */
bool mysql_do(THD *thd, List<Item> &values);
......@@ -1169,6 +1180,7 @@ int key_cmp(KEY_PART_INFO *key_part, const byte *key, uint key_length);
int key_rec_cmp(void *key_info, byte *a, byte *b);
bool init_errmessage(void);
#endif /* MYSQL_SERVER */
void sql_perror(const char *message);
int vprint_msg_to_log(enum loglevel level, const char *format, va_list args);
......@@ -1191,6 +1203,7 @@ bool general_log_print(THD *thd, enum enum_server_command command,
bool fn_format_relative_to_data_home(my_string to, const char *name,
const char *dir, const char *extension);
#ifdef MYSQL_SERVER
File open_binlog(IO_CACHE *log, const char *log_file_name,
const char **errmsg);
......@@ -1739,4 +1752,5 @@ inline void kill_delayed_threads(void) {}
#define check_stack_overrun(A, B, C) 0
#endif
#endif /* MYSQL_SERVER */
#endif /* MYSQL_CLIENT */
......@@ -315,6 +315,7 @@ static const char *sql_mode_str= "OFF";
static char *mysqld_user, *mysqld_chroot, *log_error_file_ptr;
static char *opt_init_slave, *language_ptr, *opt_init_connect;
static char *default_character_set_name;
static char *character_set_filesystem_name;
static char *my_bind_addr_str;
static char *default_collation_name;
static char mysql_data_home_buff[2];
......@@ -565,6 +566,7 @@ MY_BITMAP temp_pool;
CHARSET_INFO *system_charset_info, *files_charset_info ;
CHARSET_INFO *national_charset_info, *table_alias_charset;
CHARSET_INFO *character_set_filesystem;
SHOW_COMP_OPTION have_row_based_replication;
SHOW_COMP_OPTION have_raid, have_openssl, have_symlink, have_query_cache;
......@@ -2806,6 +2808,12 @@ static int init_common_variables(const char *conf_file_name, int argc,
global_system_variables.character_set_client= default_charset_info;
global_system_variables.collation_connection= default_charset_info;
if (!(character_set_filesystem=
get_charset_by_csname(character_set_filesystem_name,
MY_CS_PRIMARY, MYF(MY_WME))))
return 1;
global_system_variables.character_set_filesystem= character_set_filesystem;
sys_init_connect.value_length= 0;
if ((sys_init_connect.value= opt_init_connect))
sys_init_connect.value_length= strlen(opt_init_connect);
......@@ -4792,6 +4800,7 @@ enum options_mysqld
OPT_GROUP_CONCAT_MAX_LEN,
OPT_DEFAULT_COLLATION,
OPT_CHARACTER_SET_CLIENT_HANDSHAKE,
OPT_CHARACTER_SET_FILESYSTEM,
OPT_INIT_CONNECT,
OPT_INIT_SLAVE,
OPT_SECURE_AUTH,
......@@ -4945,6 +4954,11 @@ Disable with --skip-bdb (will save memory).",
(gptr*) &opt_character_set_client_handshake,
(gptr*) &opt_character_set_client_handshake,
0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0},
{"character-set-filesystem", OPT_CHARACTER_SET_FILESYSTEM,
"Set the filesystem character set.",
(gptr*) &character_set_filesystem_name,
(gptr*) &character_set_filesystem_name,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{"character-set-server", 'C', "Set the default character set.",
(gptr*) &default_character_set_name, (gptr*) &default_character_set_name,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
......@@ -6969,6 +6983,7 @@ static void mysql_init_variables(void)
files_charset_info= &my_charset_utf8_general_ci;
national_charset_info= &my_charset_utf8_general_ci;
table_alias_charset= &my_charset_bin;
character_set_filesystem= &my_charset_bin;
opt_date_time_formats[0]= opt_date_time_formats[1]= opt_date_time_formats[2]= 0;
......@@ -7022,6 +7037,7 @@ static void mysql_init_variables(void)
default_character_set_name= (char*) MYSQL_DEFAULT_CHARSET_NAME;
default_collation_name= (char*) MYSQL_DEFAULT_COLLATION_NAME;
sys_charset_system.value= (char*) system_charset_info->csname;
character_set_filesystem_name= (char*) "binary";
/* Set default values for some option variables */
......
......@@ -193,6 +193,7 @@ sys_var_character_set_database sys_character_set_database("character_set_databas
sys_var_character_set_client sys_character_set_client("character_set_client");
sys_var_character_set_connection sys_character_set_connection("character_set_connection");
sys_var_character_set_results sys_character_set_results("character_set_results");
sys_var_character_set_filesystem sys_character_set_filesystem("character_set_filesystem");
sys_var_thd_ulong sys_completion_type("completion_type",
&SV::completion_type,
check_completion_type,
......@@ -706,6 +707,7 @@ SHOW_VAR init_vars[]= {
{sys_character_set_client.name,(char*) &sys_character_set_client, SHOW_SYS},
{sys_character_set_connection.name,(char*) &sys_character_set_connection,SHOW_SYS},
{sys_character_set_database.name, (char*) &sys_character_set_database,SHOW_SYS},
{sys_character_set_filesystem.name,(char*) &sys_character_set_filesystem, SHOW_SYS},
{sys_character_set_results.name,(char*) &sys_character_set_results, SHOW_SYS},
{sys_character_set_server.name, (char*) &sys_character_set_server,SHOW_SYS},
{sys_charset_system.name, (char*) &sys_charset_system, SHOW_SYS},
......@@ -2021,6 +2023,32 @@ void sys_var_character_set_client::set_default(THD *thd, enum_var_type type)
}
CHARSET_INFO **
sys_var_character_set_filesystem::ci_ptr(THD *thd, enum_var_type type)
{
if (type == OPT_GLOBAL)
return &global_system_variables.character_set_filesystem;
else
return &thd->variables.character_set_filesystem;
}
extern CHARSET_INFO *character_set_filesystem;
void
sys_var_character_set_filesystem::set_default(THD *thd, enum_var_type type)
{
if (type == OPT_GLOBAL)
global_system_variables.character_set_filesystem= character_set_filesystem;
else
{
thd->variables.character_set_filesystem= (global_system_variables.
character_set_filesystem);
thd->update_charset();
}
}
CHARSET_INFO **
sys_var_character_set_results::ci_ptr(THD *thd, enum_var_type type)
{
......
......@@ -549,6 +549,15 @@ class sys_var_character_set :public sys_var_thd
virtual CHARSET_INFO **ci_ptr(THD *thd, enum_var_type type)= 0;
};
class sys_var_character_set_filesystem :public sys_var_character_set
{
public:
sys_var_character_set_filesystem(const char *name_arg) :
sys_var_character_set(name_arg) {}
void set_default(THD *thd, enum_var_type type);
CHARSET_INFO **ci_ptr(THD *thd, enum_var_type type);
};
class sys_var_character_set_client :public sys_var_character_set
{
public:
......
......@@ -167,6 +167,25 @@ Open_tables_state::Open_tables_state(ulong version_arg)
reset_open_tables_state();
}
my_bool thd_in_lock_tables(const THD *thd)
{
return thd->in_lock_tables;
}
my_bool thd_tablespace_op(const THD *thd)
{
return thd->tablespace_op;
}
const char *thd_proc_info(THD *thd, const char *info)
{
const char *old_info= thd->proc_info;
thd->proc_info= info;
return old_info;
}
/*
Pass nominal parameters to Statement constructor only to ensure that
......@@ -658,6 +677,9 @@ void THD::update_charset()
charset_is_collation_connection=
!String::needs_conversion(0,charset(),variables.collation_connection,
&not_used);
charset_is_character_set_filesystem=
!String::needs_conversion(0, charset(),
variables.character_set_filesystem, &not_used);
}
......
......@@ -171,6 +171,7 @@ class LEX_COLUMN : public Sql_alloc
class delayed_insert;
class select_result;
class Time_zone;
#define THD_SENTRY_MAGIC 0xfeedd1ff
#define THD_SENTRY_GONE 0xdeadbeef
......@@ -258,6 +259,7 @@ struct system_variables
my_bool old_passwords;
/* Only charset part of these variables is sensible */
CHARSET_INFO *character_set_filesystem;
CHARSET_INFO *character_set_client;
CHARSET_INFO *character_set_results;
......@@ -343,6 +345,8 @@ typedef struct system_status_var
#define last_system_status_var com_stmt_close
#ifdef MYSQL_SERVER
void free_tmp_table(THD *thd, TABLE *entry);
......@@ -353,7 +357,6 @@ void free_tmp_table(THD *thd, TABLE *entry);
#define INIT_ARENA_DBUG_INFO
#endif
class Query_arena
{
public:
......@@ -1126,6 +1129,7 @@ class THD :public Statement,
bool query_error, bootstrap, cleanup_done;
bool tmp_table_used;
bool charset_is_system_charset, charset_is_collation_connection;
bool charset_is_character_set_filesystem;
bool enable_slow_log; /* enable slow log for current statement */
bool no_trans_update, abort_on_warning;
bool got_warning; /* Set on call to push_warning() */
......@@ -1903,3 +1907,5 @@ class select_dumpvar :public select_result_interceptor {
/* Functions in sql_class.cc */
void add_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var);
#endif /* MYSQL_SERVER */
......@@ -29,6 +29,7 @@ class st_alter_tablespace;
class partition_info;
class event_timed;
#ifdef MYSQL_SERVER
/*
The following hack is needed because mysql_yacc.cc does not define
YYSTYPE before including this file
......@@ -43,6 +44,7 @@ class event_timed;
#include "sql_yacc.h"
#define LEX_YYSTYPE YYSTYPE *
#endif
#endif
/*
When a command is added here, be sure it's also added in mysqld.cc
......@@ -115,6 +117,8 @@ enum enum_sql_command {
*/
#define DESCRIBE_PARTITIONS 4
#ifdef MYSQL_SERVER
enum enum_sp_suid_behaviour
{
SP_IS_DEFAULT_SUID= 0,
......@@ -1109,3 +1113,5 @@ extern int yylex(void *arg, void *yythd);
extern pthread_key(LEX*,THR_LEX);
#define current_lex (current_thd->lex)
#endif
......@@ -730,7 +730,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
LEX_HOSTNAME ULONGLONG_NUM field_ident select_alias ident ident_or_text
UNDERSCORE_CHARSET IDENT_sys TEXT_STRING_sys TEXT_STRING_literal
NCHAR_STRING opt_component key_cache_name
sp_opt_label BIN_NUM label_ident
sp_opt_label BIN_NUM label_ident TEXT_STRING_filesystem
%type <lex_str_ptr>
opt_table_alias opt_fulltext_parser
......@@ -7524,7 +7524,7 @@ select_var_ident:
;
into:
INTO OUTFILE TEXT_STRING_sys
INTO OUTFILE TEXT_STRING_filesystem
{
LEX *lex= Lex;
lex->uncacheable(UNCACHEABLE_SIDEEFFECT);
......@@ -7533,7 +7533,7 @@ into:
YYABORT;
}
opt_field_term opt_line_term
| INTO DUMPFILE TEXT_STRING_sys
| INTO DUMPFILE TEXT_STRING_filesystem
{
LEX *lex=Lex;
if (!lex->describe)
......@@ -8596,7 +8596,7 @@ load: LOAD DATA_SYM
};
load_data:
load_data_lock opt_local INFILE TEXT_STRING_sys
load_data_lock opt_local INFILE TEXT_STRING_filesystem
{
LEX *lex=Lex;
lex->sql_command= SQLCOM_LOAD;
......@@ -9124,6 +9124,18 @@ TEXT_STRING_literal:
;
TEXT_STRING_filesystem:
TEXT_STRING
{
THD *thd= YYTHD;
if (thd->charset_is_character_set_filesystem)
$$= $1;
else
thd->convert_string(&$$, thd->variables.character_set_filesystem,
$1.str, $1.length, thd->charset());
}
;
ident:
IDENT_sys { $$=$1; }
| READ_ONLY_SYM
......
......@@ -24,17 +24,16 @@ INCLUDES = -I$(top_srcdir)/include \
-I$(top_srcdir)/regex \
-I$(top_srcdir)/sql \
-I$(srcdir)
WRAPLIBS=
pkglib_LTLIBRARIES = ha_csv.la
ha_csv_la_LDFLAGS = -module
noinst_HEADERS = ha_tina.h
ha_csv_la_SOURCES = ha_tina.cc
pkglib_LIBRARIES = libcsv.a
LDADD =
DEFS = -DMYSQL_SERVER @DEFS@
DEFS = @DEFS@
libcsv_a_CXXFLAGS = $(AM_CFLAGS)
noinst_HEADERS = ha_tina.h
libcsv_a_SOURCES = ha_tina.cc
# Don't update the files from bitkeeper
%::SCCS/s.%
......@@ -590,7 +590,10 @@ void ha_tina::update_status()
}
bool ha_tina::check_if_locking_is_allowed(THD *thd, TABLE *table, uint count)
bool ha_tina::check_if_locking_is_allowed(uint sql_command,
ulong type, TABLE *table,
uint count,
bool called_by_logger_thread)
{
/*
Deny locking of the log tables, which is incompatible with
......@@ -598,11 +601,10 @@ bool ha_tina::check_if_locking_is_allowed(THD *thd, TABLE *table, uint count)
general_log_thd or slow_log_thd.
*/
if (table->s->log_table &&
thd->lex->sql_command != SQLCOM_TRUNCATE &&
!(thd->lex->sql_command == SQLCOM_FLUSH &&
thd->lex->type & REFRESH_LOG) &&
(thd != logger.get_general_log_thd()) &&
(thd != logger.get_slow_log_thd()) &&
sql_command != SQLCOM_TRUNCATE &&
!(sql_command == SQLCOM_FLUSH &&
type & REFRESH_LOG) &&
!called_by_logger_thread &&
(table->reginfo.lock_type >= TL_READ_NO_INSERT))
{
/*
......@@ -665,7 +667,7 @@ int ha_tina::write_row(byte * buf)
int size;
DBUG_ENTER("ha_tina::write_row");
statistic_increment(table->in_use->status_var.ha_write_count, &LOCK_status);
ha_statistic_increment(&SSV::ha_write_count);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
table->timestamp_field->set_time();
......@@ -714,9 +716,7 @@ int ha_tina::update_row(const byte * old_data, byte * new_data)
int size;
DBUG_ENTER("ha_tina::update_row");
statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
&LOCK_status);
ha_statistic_increment(&SSV::ha_read_rnd_next_count);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
table->timestamp_field->set_time();
......@@ -751,8 +751,7 @@ int ha_tina::update_row(const byte * old_data, byte * new_data)
int ha_tina::delete_row(const byte * buf)
{
DBUG_ENTER("ha_tina::delete_row");
statistic_increment(table->in_use->status_var.ha_delete_count,
&LOCK_status);
ha_statistic_increment(&SSV::ha_delete_count);
if (chain_append())
DBUG_RETURN(-1);
......@@ -903,8 +902,7 @@ int ha_tina::rnd_next(byte *buf)
{
DBUG_ENTER("ha_tina::rnd_next");
statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
&LOCK_status);
ha_statistic_increment(&SSV::ha_read_rnd_next_count);
current_position= next_position;
if (!share->mapped_file)
......@@ -941,8 +939,7 @@ void ha_tina::position(const byte *record)
int ha_tina::rnd_pos(byte * buf, byte *pos)
{
DBUG_ENTER("ha_tina::rnd_pos");
statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
&LOCK_status);
ha_statistic_increment(&SSV::ha_read_rnd_next_count);
current_position= my_get_ptr(pos,ref_length);
DBUG_RETURN(find_current_row(buf));
}
......
......@@ -104,7 +104,10 @@ class ha_tina: public handler
*/
ha_rows estimate_rows_upper_bound() { return HA_POS_ERROR; }
virtual bool check_if_locking_is_allowed(THD *thd, TABLE *table, uint count);
virtual bool check_if_locking_is_allowed(uint sql_command,
ulong type, TABLE *table,
uint count,
bool called_by_logger_thread);
int open(const char *name, int mode, uint test_if_locked);
int close(void);
int write_row(byte * buf);
......
......@@ -139,21 +139,11 @@ class StartBackupReq {
friend bool printSTART_BACKUP_REQ(FILE *, const Uint32 *, Uint32, Uint16);
public:
STATIC_CONST( MaxTableTriggers = 4 );
STATIC_CONST( HeaderLength = 5 );
STATIC_CONST( TableTriggerLength = 4);
STATIC_CONST( SignalLength = 2 );
private:
Uint32 backupId;
Uint32 backupPtr;
Uint32 signalNo;
Uint32 noOfSignals;
Uint32 noOfTableTriggers;
struct TableTriggers {
Uint32 tableId;
Uint32 triggerIds[3];
} tableTriggers[MaxTableTriggers];
};
class StartBackupRef {
......@@ -169,7 +159,7 @@ class StartBackupRef {
friend bool printSTART_BACKUP_REF(FILE *, const Uint32 *, Uint32, Uint16);
public:
STATIC_CONST( SignalLength = 5 );
STATIC_CONST( SignalLength = 4 );
enum ErrorCode {
FailedToAllocateTriggerRecord = 1
......@@ -177,7 +167,6 @@ public:
private:
Uint32 backupId;
Uint32 backupPtr;
Uint32 signalNo;
Uint32 errorCode;
Uint32 nodeId;
};
......@@ -195,12 +184,11 @@ class StartBackupConf {
friend bool printSTART_BACKUP_CONF(FILE *, const Uint32 *, Uint32, Uint16);
public:
STATIC_CONST( SignalLength = 3 );
STATIC_CONST( SignalLength = 2 );
private:
Uint32 backupId;
Uint32 backupPtr;
Uint32 signalNo;
};
class BackupFragmentReq {
......
......@@ -48,16 +48,8 @@ printDEFINE_BACKUP_CONF(FILE * out, const Uint32 * data, Uint32 l, Uint16 bno){
bool
printSTART_BACKUP_REQ(FILE * out, const Uint32 * data, Uint32 l, Uint16 bno){
StartBackupReq* sig = (StartBackupReq*)data;
fprintf(out, " backupPtr: %d backupId: %d signalNo: %d of %d\n",
sig->backupPtr, sig->backupId,
sig->signalNo + 1, sig->noOfSignals);
for(Uint32 i = 0; i<sig->noOfTableTriggers; i++)
fprintf(out,
" Table: %d Triggers = [ insert: %d update: %d delete: %d ]\n",
sig->tableTriggers[i].tableId,
sig->tableTriggers[i].triggerIds[TriggerEvent::TE_INSERT],
sig->tableTriggers[i].triggerIds[TriggerEvent::TE_UPDATE],
sig->tableTriggers[i].triggerIds[TriggerEvent::TE_DELETE]);
fprintf(out, " backupPtr: %d backupId: %d\n",
sig->backupPtr, sig->backupId);
return true;
}
......
......@@ -96,8 +96,6 @@ protected:
void execGET_TABINFO_CONF(Signal* signal);
void execCREATE_TRIG_REF(Signal* signal);
void execCREATE_TRIG_CONF(Signal* signal);
void execALTER_TRIG_REF(Signal* signal);
void execALTER_TRIG_CONF(Signal* signal);
void execDROP_TRIG_REF(Signal* signal);
void execDROP_TRIG_CONF(Signal* signal);
......@@ -426,6 +424,7 @@ public:
Uint32 clientRef;
Uint32 clientData;
Uint32 flags;
Uint32 signalNo;
Uint32 backupId;
Uint32 backupKey[2];
Uint32 masterRef;
......@@ -452,6 +451,17 @@ public:
Array<Page32> pages; // Used for (un)packing backup request
SimpleProperties props;// Used for (un)packing backup request
struct SlaveData {
SignalCounter trigSendCounter;
Uint32 gsn;
struct {
Uint32 tableId;
} createTrig;
struct {
Uint32 tableId;
} dropTrig;
} slaveData;
struct MasterData {
MasterData(Backup & b)
{
......@@ -462,15 +472,6 @@ public:
Uint32 gsn;
SignalCounter sendCounter;
Uint32 errorCode;
struct {
Uint32 tableId;
} createTrig;
struct {
Uint32 tableId;
} dropTrig;
struct {
Uint32 tableId;
} alterTrig;
union {
struct {
Uint32 startBackup;
......@@ -563,7 +564,7 @@ public:
void defineBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId);
void createTrigReply(Signal* signal, BackupRecordPtr ptr);
void alterTrigReply(Signal* signal, BackupRecordPtr ptr);
void startBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32, Uint32);
void startBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32);
void stopBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId);
void defineBackupRef(Signal*, BackupRecordPtr, Uint32 errCode = 0);
......
......@@ -25,15 +25,12 @@ BACKUP_REQ
<-------------------------------
BACKUP_CONF
<----------------
CREATE_TRIG
--------------> (If master crashes here -> rouge triggers/memory leak)
<--------------
START_BACKUP
------------------------------>
<------------------------------
ALTER_TRIG
CREATE_TRIG
-------------->
<--------------
<------------------------------
WAIT_GCP
-------------->
<--------------
......@@ -46,11 +43,11 @@ BACKUP_CONF
WAIT_GCP
-------------->
<--------------
STOP_BACKUP
------------------------------>
DROP_TRIG
-------------->
<--------------
STOP_BACKUP
------------------------------>
<------------------------------
BACKUP_COMPLETE_REP
<----------------
......
......@@ -62,9 +62,6 @@ Backup::Backup(const Configuration & conf) :
addRecSignal(GSN_CREATE_TRIG_REF, &Backup::execCREATE_TRIG_REF);
addRecSignal(GSN_CREATE_TRIG_CONF, &Backup::execCREATE_TRIG_CONF);
addRecSignal(GSN_ALTER_TRIG_REF, &Backup::execALTER_TRIG_REF);
addRecSignal(GSN_ALTER_TRIG_CONF, &Backup::execALTER_TRIG_CONF);
addRecSignal(GSN_DROP_TRIG_REF, &Backup::execDROP_TRIG_REF);
addRecSignal(GSN_DROP_TRIG_CONF, &Backup::execDROP_TRIG_CONF);
......
......@@ -777,6 +777,10 @@ struct TupTriggerData {
/**
* Trigger id, used by DICT/TRIX to identify the trigger
*
* trigger Ids are unique per block for SUBSCRIPTION triggers.
* This is so that BACKUP can use TUP triggers directly and delete them
* properly.
*/
Uint32 triggerId;
......@@ -2012,7 +2016,9 @@ private:
bool createTrigger(Tablerec* table, const CreateTrigReq* req);
Uint32 dropTrigger(Tablerec* table, const DropTrigReq* req);
Uint32 dropTrigger(Tablerec* table,
const DropTrigReq* req,
BlockNumber sender);
void
checkImmediateTriggersAfterInsert(KeyReqStruct *req_struct,
......
......@@ -186,7 +186,7 @@ Dbtup::execDROP_TRIG_REQ(Signal* signal)
ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
// Drop trigger
Uint32 r = dropTrigger(tabPtr.p, req);
Uint32 r = dropTrigger(tabPtr.p, req, refToBlock(senderRef));
if (r == 0){
// Send conf
DropTrigConf* const conf = (DropTrigConf*)signal->getDataPtrSend();
......@@ -318,7 +318,7 @@ Dbtup::primaryKey(Tablerec* const regTabPtr, Uint32 attrId)
/* */
/* ---------------------------------------------------------------- */
Uint32
Dbtup::dropTrigger(Tablerec* table, const DropTrigReq* req)
Dbtup::dropTrigger(Tablerec* table, const DropTrigReq* req, BlockNumber sender)
{
if (ERROR_INSERTED(4004)) {
CLEAR_ERROR_INSERT_VALUE;
......@@ -330,7 +330,7 @@ Dbtup::dropTrigger(Tablerec* table, const DropTrigReq* req)
TriggerActionTime::Value ttime = req->getTriggerActionTime();
TriggerEvent::Value tevent = req->getTriggerEvent();
// ndbout_c("Drop TupTrigger %u = %u %u %u %u", triggerId, table, ttype, ttime, tevent);
// ndbout_c("Drop TupTrigger %u = %u %u %u %u by %u", triggerId, table, ttype, ttime, tevent, sender);
ArrayList<TupTriggerData>* tlist = findTriggerList(table, ttype, ttime, tevent);
ndbrequire(tlist != NULL);
......@@ -339,6 +339,19 @@ Dbtup::dropTrigger(Tablerec* table, const DropTrigReq* req)
for (tlist->first(ptr); !ptr.isNull(); tlist->next(ptr)) {
ljam();
if (ptr.p->triggerId == triggerId) {
if(ttype==TriggerType::SUBSCRIPTION && sender != ptr.p->m_receiverBlock)
{
/**
* You can only drop your own triggers for subscription triggers.
* Trigger IDs are private for each block.
*
* SUMA encodes information in the triggerId
*
* Backup doesn't really care about the Ids though.
*/
ljam();
continue;
}
ljam();
tlist->release(ptr.i);
return 0;
......
......@@ -193,7 +193,7 @@ runDDL(NDBT_Context* ctx, NDBT_Step* step){
}
int runRestartInitial(NDBT_Context* ctx, NDBT_Step* step){
int runDropTablesRestart(NDBT_Context* ctx, NDBT_Step* step){
NdbRestarter restarter;
Ndb* pNdb = GETNDB(step);
......@@ -201,7 +201,7 @@ int runRestartInitial(NDBT_Context* ctx, NDBT_Step* step){
const NdbDictionary::Table *tab = ctx->getTab();
pNdb->getDictionary()->dropTable(tab->getName());
if (restarter.restartAll(true) != 0)
if (restarter.restartAll(false) != 0)
return NDBT_FAILED;
if (restarter.waitClusterStarted() != 0)
......@@ -406,6 +406,7 @@ int runRestoreBankAndVerify(NDBT_Context* ctx, NDBT_Step* step){
// TEMPORARY FIX
// To erase all tables from cache(s)
// To be removed, maybe replaced by ndb.invalidate();
runDropTable(ctx,step);
{
Bank bank(ctx->m_cluster_connection);
......@@ -416,8 +417,8 @@ int runRestoreBankAndVerify(NDBT_Context* ctx, NDBT_Step* step){
}
// END TEMPORARY FIX
ndbout << "Performing initial restart" << endl;
if (restarter.restartAll(true) != 0)
ndbout << "Performing restart" << endl;
if (restarter.restartAll(false) != 0)
return NDBT_FAILED;
if (restarter.waitClusterStarted() != 0)
......@@ -465,12 +466,12 @@ TESTCASE("BackupOne",
"Test that backup and restore works on one table \n"
"1. Load table\n"
"2. Backup\n"
"3. Restart -i\n"
"3. Drop tables and restart \n"
"4. Restore\n"
"5. Verify count and content of table\n"){
INITIALIZER(runLoadTable);
INITIALIZER(runBackupOne);
INITIALIZER(runRestartInitial);
INITIALIZER(runDropTablesRestart);
INITIALIZER(runRestoreOne);
VERIFIER(runVerifyOne);
FINALIZER(runClearTable);
......
......@@ -199,7 +199,6 @@ int
NFDuringBackupM_codes[] = {
10003,
10004,
10005,
10007,
10008,
10009,
......@@ -349,6 +348,7 @@ NdbBackup::NF(NdbRestarter& _restarter, int *NFDuringBackup_codes, const int sz,
int
FailS_codes[] = {
10025,
10027,
10033
};
......
......@@ -213,7 +213,6 @@ Uint32 BackupRestore::map_ng(Uint32 ng)
if (ng == UNDEF_NODEGROUP ||
ng_map[ng].map_array[0] == UNDEF_NODEGROUP)
{
ndbout << "No mapping done" << endl;
return ng;
}
else
......@@ -226,13 +225,11 @@ Uint32 BackupRestore::map_ng(Uint32 ng)
assert(curr_inx < MAX_MAPS_PER_NODE_GROUP);
assert(new_curr_inx < MAX_MAPS_PER_NODE_GROUP);
ndbout << "curr_inx = " << curr_inx << endl;
if (new_curr_inx >= MAX_MAPS_PER_NODE_GROUP)
new_curr_inx = 0;
else if (ng_map[ng].map_array[new_curr_inx] == UNDEF_NODEGROUP)
new_curr_inx = 0;
new_ng = ng_map[ng].map_array[curr_inx];
ndbout << "new_ng = " << new_ng << endl;
ng_map[ng].curr_index = new_curr_inx;
return new_ng;
}
......@@ -249,7 +246,6 @@ bool BackupRestore::map_nodegroups(Uint16 *ng_array, Uint32 no_parts)
for (i = 0; i < no_parts; i++)
{
Uint32 ng;
ndbout << "map_nodegroups loop " << i << ", " << ng_array[i] << endl;
ng = map_ng((Uint32)ng_array[i]);
if (ng != ng_array[i])
mapped = TRUE;
......@@ -279,7 +275,6 @@ bool BackupRestore::search_replace(char *search_str, char **new_data,
char start_delimiter = 0;
DBUG_ENTER("search_replace");
ndbout << "search_replace" << endl;
do
{
char c = **data;
......@@ -635,7 +630,6 @@ BackupRestore::table(const TableS & table){
const char * name = table.getTableName();
ndbout << "Starting to handle table " << name << endl;
/**
* Ignore blob tables
*/
......@@ -675,7 +669,6 @@ BackupRestore::table(const TableS & table){
if (copy.getDefaultNoPartitionsFlag())
{
ndbout << "Default number of partitions" << endl;
/*
Table was defined with default number of partitions. We can restore
it with whatever is the default in this cluster.
......@@ -688,7 +681,6 @@ BackupRestore::table(const TableS & table){
}
else
{
ndbout << "Not default number of partitions" << endl;
/*
Table was defined with specific number of partitions. It should be
restored with the same number of partitions. It will either be
......@@ -697,11 +689,8 @@ BackupRestore::table(const TableS & table){
*/
Uint16 *ng_array = (Uint16*)copy.getFragmentData();
Uint16 no_parts = copy.getFragmentCount();
ndbout << "Map node groups, no_parts = " << no_parts << endl;
ndbout << "ng_array = " << hex << (Uint32)ng_array << endl;
if (map_nodegroups(ng_array, no_parts))
{
ndbout << "Node groups were mapped" << endl;
if (translate_frm(&copy))
{
err << "Create table " << table.getTableName() << " failed: ";
......@@ -709,7 +698,6 @@ BackupRestore::table(const TableS & table){
return false;
}
}
ndbout << "Set fragment Data " << endl;
copy.setFragmentData((const void *)ng_array, no_parts << 1);
}
......
......@@ -183,7 +183,6 @@ static bool insert_ng_map(NODE_GROUP_MAP *ng_map,
uint ng_index= ng_map[index].no_maps;
opt_nodegroup_map_len++;
printf("New node group map for source %u index %u\n",index,ng_index);
if (ng_index >= MAX_MAPS_PER_NODE_GROUP)
return true;
ng_map[index].no_maps++;
......@@ -258,7 +257,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
printf("Error in --nodeid,-n setting, see --help\n");
exit(NDBT_ProgramExit(NDBT_WRONGARGS));
}
ndbout << "Nodeid = " << ga_nodeId << endl;
info << "Nodeid = " << ga_nodeId << endl;
break;
case 'b':
if (ga_backupId == 0)
......@@ -266,7 +265,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
printf("Error in --backupid,-b setting, see --help\n");
exit(NDBT_ProgramExit(NDBT_WRONGARGS));
}
ndbout << "Backup Id = " << ga_backupId << endl;
info << "Backup Id = " << ga_backupId << endl;
break;
case OPT_NDB_NODEGROUP_MAP:
/*
......@@ -274,7 +273,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
to nodegroup in new cluster.
*/
opt_nodegroup_map_len= 0;
ndbout << "Analyse node group map" << endl;
info << "Analyse node group map" << endl;
if (analyse_nodegroup_map(opt_nodegroup_map_str,
&opt_nodegroup_map[0]))
{
......@@ -288,12 +287,12 @@ bool
readArguments(int *pargc, char*** pargv)
{
Uint32 i;
ndbout << "Load defaults" << endl;
debug << "Load defaults" << endl;
const char *load_default_groups[]= { "mysql_cluster","ndb_restore",0 };
init_nodegroup_map();
load_defaults("my",load_default_groups,pargc,pargv);
ndbout << "handle_options" << endl;
debug << "handle_options" << endl;
if (handle_options(pargc, pargv, my_long_options, get_one_option))
{
exit(NDBT_ProgramExit(NDBT_WRONGARGS));
......@@ -399,7 +398,7 @@ o verify nodegroup mapping
{
ga_backupPath = *pargv[0];
}
ndbout << "backup path = " << ga_backupPath << endl;
info << "backup path = " << ga_backupPath << endl;
return true;
}
......@@ -445,7 +444,7 @@ main(int argc, char** argv)
{
NDB_INIT(argv[0]);
ndbout << "Start readArguments" << endl;
debug << "Start readArguments" << endl;
if (!readArguments(&argc, &argv))
{
exitHandler(NDBT_FAILED);
......@@ -456,11 +455,11 @@ main(int argc, char** argv)
/**
* we must always load meta data, even if we will only print it to stdout
*/
ndbout << "Start restoring meta data" << endl;
debug << "Start restoring meta data" << endl;
RestoreMetaData metaData(ga_backupPath, ga_nodeId, ga_backupId);
if (!metaData.readHeader())
{
ndbout << "Failed to read " << metaData.getFilename() << endl << endl;
err << "Failed to read " << metaData.getFilename() << endl << endl;
exitHandler(NDBT_FAILED);
}
......@@ -468,58 +467,58 @@ main(int argc, char** argv)
const Uint32 version = tmp.NdbVersion;
char buf[NDB_VERSION_STRING_BUF_SZ];
ndbout << "Ndb version in backup files: "
info << "Ndb version in backup files: "
<< getVersionString(version, 0, buf, sizeof(buf)) << endl;
/**
* check wheater we can restore the backup (right version).
*/
ndbout << "Load content" << endl;
debug << "Load content" << endl;
int res = metaData.loadContent();
if (res == 0)
{
ndbout_c("Restore: Failed to load content");
err << "Restore: Failed to load content" << endl;
exitHandler(NDBT_FAILED);
}
ndbout << "Get no of Tables" << endl;
debug << "Get no of Tables" << endl;
if (metaData.getNoOfTables() == 0)
{
ndbout_c("Restore: The backup contains no tables ");
err << "The backup contains no tables" << endl;
exitHandler(NDBT_FAILED);
}
ndbout << "Validate Footer" << endl;
debug << "Validate Footer" << endl;
if (!metaData.validateFooter())
{
ndbout_c("Restore: Failed to validate footer.");
err << "Restore: Failed to validate footer." << endl;
exitHandler(NDBT_FAILED);
}
ndbout << "Init Backup objects" << endl;
debug << "Init Backup objects" << endl;
Uint32 i;
for(i= 0; i < g_consumers.size(); i++)
{
if (!g_consumers[i]->init())
{
clearConsumers();
err << "Failed to initialize consumers" << endl;
exitHandler(NDBT_FAILED);
}
}
ndbout << "Restore objects (tablespaces, ..)" << endl;
debug << "Restore objects (tablespaces, ..)" << endl;
for(i = 0; i<metaData.getNoOfObjects(); i++)
{
for(Uint32 j= 0; j < g_consumers.size(); j++)
if (!g_consumers[j]->object(metaData.getObjType(i),
metaData.getObjPtr(i)))
{
ndbout_c("Restore: Failed to restore table: %s. "
"Exiting...",
metaData[i]->getTableName());
err << "Restore: Failed to restore table: ";
err << metaData[i]->getTableName() << " ... Exiting " << endl;
exitHandler(NDBT_FAILED);
}
}
ndbout << "Restoring tables" << endl;
debug << "Restoring tables" << endl;
for(i = 0; i<metaData.getNoOfTables(); i++)
{
if (checkSysTable(metaData[i]->getTableName()))
......@@ -527,21 +526,20 @@ main(int argc, char** argv)
for(Uint32 j= 0; j < g_consumers.size(); j++)
if (!g_consumers[j]->table(* metaData[i]))
{
ndbout_c("Restore: Failed to restore table: %s. "
"Exiting...",
metaData[i]->getTableName());
err << "Restore: Failed to restore table: ";
err << metaData[i]->getTableName() << " ... Exiting " << endl;
exitHandler(NDBT_FAILED);
}
}
}
ndbout << "Close tables" << endl;
debug << "Close tables" << endl;
for(i= 0; i < g_consumers.size(); i++)
if (!g_consumers[i]->endOfTables())
{
ndbout_c("Restore: Failed while closing tables");
err << "Restore: Failed while closing tables" << endl;
exitHandler(NDBT_FAILED);
}
ndbout << "Iterate over data" << endl;
debug << "Iterate over data" << endl;
if (ga_restore || ga_print)
{
if(_restore_data || _print_data)
......@@ -551,7 +549,7 @@ main(int argc, char** argv)
// Read data file header
if (!dataIter.readHeader())
{
ndbout << "Failed to read header of data file. Exiting..." ;
err << "Failed to read header of data file. Exiting..." << endl;
exitHandler(NDBT_FAILED);
}
......@@ -568,13 +566,13 @@ main(int argc, char** argv)
if (res < 0)
{
ndbout_c("Restore: An error occured while restoring data. "
"Exiting...");
err <<" Restore: An error occured while restoring data. Exiting...";
err << endl;
exitHandler(NDBT_FAILED);
}
if (!dataIter.validateFragmentFooter()) {
ndbout_c("Restore: Error validating fragment footer. "
"Exiting...");
err << "Restore: Error validating fragment footer. ";
err << "Exiting..." << endl;
exitHandler(NDBT_FAILED);
}
} // while (dataIter.readFragmentHeader(res))
......@@ -582,7 +580,7 @@ main(int argc, char** argv)
if (res < 0)
{
err << "Restore: An error occured while restoring data. Exiting... "
<< "res=" << res << endl;
<< "res= " << res << endl;
exitHandler(NDBT_FAILED);
}
......@@ -632,9 +630,8 @@ main(int argc, char** argv)
for(Uint32 j= 0; j < g_consumers.size(); j++)
if (!g_consumers[j]->finalize_table(* metaData[i]))
{
ndbout_c("Restore: Failed to finalize restore table: %s. "
"Exiting...",
metaData[i]->getTableName());
err << "Restore: Failed to finalize restore table: %s. ";
err << "Exiting... " << metaData[i]->getTableName() << endl;
exitHandler(NDBT_FAILED);
}
}
......@@ -646,7 +643,7 @@ main(int argc, char** argv)
for (i= 0; i < g_consumers.size(); i++)
if (!g_consumers[i]->update_apply_status(metaData))
{
ndbout_c("Restore: Failed to restore epoch");
err << "Restore: Failed to restore epoch" << endl;
return -1;
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment