Commit a3fa962b authored by unknown's avatar unknown

Merge joreland@bk-internal.mysql.com:/home/bk/mysql-5.1-new-ndb

into  perch.ndb.mysql.com:/home/jonas/src/mysql-5.1-new-ndb

parents 21c2e6e1 5e2a3278
......@@ -26,7 +26,7 @@ LDADD_LOC = \
include $(top_srcdir)/storage/ndb/config/common.mk.am
include $(top_srcdir)/storage/ndb/config/type_util.mk.am
ndb_cpcd_LDFLAGS = @ndb_bin_am_ldflags@
ndb_cpcd_LDFLAGS = -static @ndb_bin_am_ldflags@
# Don't update the files from bitkeeper
%::SCCS/s.%
......
......@@ -837,7 +837,7 @@ InitConfigFileParser::parse_mycnf()
opt.arg_type = REQUIRED_ARG;
options.push_back(opt);
opt.name = "api";
opt.name = "ndbapi";
opt.id = 256;
opt.value = (gptr*)malloc(sizeof(char*));
opt.var_type = GET_STR;
......
......@@ -18,20 +18,18 @@ testdir=$(prefix)/mysql-test/ndb
include $(top_srcdir)/storage/ndb/config/common.mk.am
include $(top_srcdir)/storage/ndb/config/type_util.mk.am
include $(top_srcdir)/storage/ndb/config/type_mgmapiclient.mk.am
include $(top_srcdir)/storage/ndb/config/type_ndbapitools.mk.am
test_PROGRAMS = atrt
test_DATA=daily-basic-tests.txt daily-devel-tests.txt 16node-tests.txt \
conf-daily-basic-ndb08.txt \
conf-daily-devel-ndb08.txt \
conf-daily-sql-ndb08.txt \
conf-ndbmaster.txt \
conf-shark.txt \
conf-dl145a.txt
conf-ndbmaster.cnf \
conf-dl145a.cnf test-tests.txt
test_SCRIPTS=atrt-analyze-result.sh atrt-gather-result.sh atrt-setup.sh \
atrt-clear-result.sh make-config.sh make-index.sh make-html-reports.sh
atrt-clear-result.sh autotest-run.sh
atrt_SOURCES = main.cpp setup.cpp files.cpp
atrt_SOURCES = main.cpp run-test.hpp
INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/test/include
LDADD_LOC = $(top_builddir)/storage/ndb/test/src/libNDBT.a \
$(top_builddir)/storage/ndb/src/libndbclient.la \
......@@ -39,6 +37,14 @@ LDADD_LOC = $(top_builddir)/storage/ndb/test/src/libNDBT.a \
$(top_builddir)/mysys/libmysys.a \
$(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@
atrt_CXXFLAGS = -I$(top_srcdir)/ndb/src/mgmapi \
-I$(top_srcdir)/ndb/src/mgmsrv \
-I$(top_srcdir)/ndb/include/mgmcommon \
-DMYSQLCLUSTERDIR="\"\"" \
-DDEFAULT_PREFIX="\"$(prefix)\""
atrt_LDFLAGS = -static @ndb_bin_am_ldflags@
wrappersdir=$(prefix)/bin
wrappers_SCRIPTS=atrt-testBackup atrt-mysql-test-run
......
......@@ -8,7 +8,7 @@ rm -rf *
while [ $# -gt 0 ]
do
rsync -a "$1" .
rsync -a --exclude='ndb_*_fs/*' "$1" .
shift
done
......
......@@ -2,7 +2,8 @@
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
......@@ -16,68 +17,112 @@
#ifndef atrt_config_hpp
#define atrt_config_hpp
#include <getarg.h>
#include <ndb_global.h>
#include <Vector.hpp>
#include <BaseString.hpp>
#include <Logger.hpp>
#include <mgmapi.h>
#include <CpcClient.hpp>
#include <Properties.hpp>
#undef MYSQL_CLIENT
enum ErrorCodes {
enum ErrorCodes
{
ERR_OK = 0,
ERR_NDB_FAILED = 101,
ERR_SERVERS_FAILED = 102,
ERR_MAX_TIME_ELAPSED = 103
};
struct atrt_host {
struct atrt_host
{
size_t m_index;
BaseString m_user;
BaseString m_base_dir;
BaseString m_basedir;
BaseString m_hostname;
SimpleCpcClient * m_cpcd;
Vector<struct atrt_process*> m_processes;
};
struct atrt_options
{
enum Feature {
AO_REPLICATION = 1,
AO_NDBCLUSTER = 2
};
int m_features;
Properties m_loaded;
Properties m_generated;
};
struct atrt_process {
struct atrt_process
{
size_t m_index;
BaseString m_hostname;
struct atrt_host * m_host;
struct atrt_cluster * m_cluster;
enum Type {
ALL = 255,
NDB_DB = 1,
NDB_API = 2,
NDB_MGM = 4,
NDB_REP = 8,
MYSQL_SERVER = 16,
MYSQL_CLIENT = 32
AP_ALL = 255
,AP_NDBD = 1
,AP_NDB_API = 2
,AP_NDB_MGMD = 4
,AP_MYSQLD = 16
,AP_CLIENT = 32
,AP_CLUSTER = 256 // Used for options parsing for "cluster" options
} m_type;
SimpleCpcClient::Process m_proc;
short m_ndb_mgm_port;
NdbMgmHandle m_ndb_mgm_handle; // if type == ndb_mgm
NdbMgmHandle m_ndb_mgm_handle; // if type == ndb_mgm
atrt_process * m_mysqld; // if type == client
atrt_process * m_rep_src; // if type == mysqld
Vector<atrt_process*> m_rep_dst; // if type == mysqld
atrt_options m_options;
};
struct atrt_cluster
{
BaseString m_name;
BaseString m_dir;
Vector<atrt_process*> m_processes;
atrt_options m_options;
};
struct atrt_config {
struct atrt_config
{
bool m_generated;
BaseString m_key;
Vector<atrt_host> m_hosts;
Vector<atrt_process> m_processes;
BaseString m_replication;
Vector<atrt_host*> m_hosts;
Vector<atrt_cluster*> m_clusters;
Vector<atrt_process*> m_processes;
};
struct atrt_testcase {
struct atrt_testcase
{
bool m_report;
bool m_run_all;
time_t m_max_time;
BaseString m_command;
BaseString m_args;
BaseString m_name;
};
extern Logger g_logger;
bool parse_args(int argc, const char** argv);
void require(bool x);
bool parse_args(int argc, char** argv);
bool setup_config(atrt_config&);
bool configure(atrt_config&, int setup);
bool setup_directories(atrt_config&, int setup);
bool setup_files(atrt_config&, int setup, int sshx);
bool deploy(atrt_config&);
bool sshx(atrt_config&, unsigned procmask);
bool start(atrt_config&, unsigned procmask);
bool remove_dir(const char *, bool incl = true);
bool connect_hosts(atrt_config&);
bool connect_ndb_mgm(atrt_config&);
bool wait_ndb(atrt_config&, int ndb_mgm_node_status);
......@@ -92,4 +137,25 @@ bool setup_test_case(atrt_config&, const atrt_testcase&);
bool setup_hosts(atrt_config&);
/**
* Global variables...
*/
extern Logger g_logger;
extern atrt_config g_config;
extern const char * g_cwd;
extern const char * g_my_cnf;
extern const char * g_user;
extern const char * g_basedir;
extern const char * g_prefix;
extern int g_baseport;
extern int g_fqpn;
extern int g_default_ports;
extern const char * g_clusters;
extern const char *save_file;
extern const char *save_group_suffix;
extern char *save_extra_file;
#endif
#!/bin/sh
#############################################################
# This script created by Jonas does the following #
# Cleans up clones and pevious builds, pulls new clones, #
# builds, deploys, configures the tests and launches ATRT #
#############################################################
###############
#Script setup #
##############
save_args=$*
VERSION="autotest-boot.sh version 1.00"
DATE=`date '+%Y-%m-%d'`
HOST=`hostname -s`
export DATE HOST
set -e
echo "`date` starting: $*"
verbose=0
do_clone=yes
build=yes
conf=
LOCK=$HOME/.autotest-lock
############################
# Read command line entries#
############################
while [ "$1" ]
do
case "$1" in
--no-clone) do_clone="";;
--no-build) build="";;
--verbose) verbose=`expr $verbose + 1`;;
--clone=*) clone=`echo $1 | sed s/--clone=//`;;
--version) echo $VERSION; exit;;
--conf=*) conf=`echo $1 | sed s/--conf=//`;;
*) RUN=$*;;
esac
shift
done
#################################
#Make sure the configfile exists#
#if it does not exit. if it does#
# (.) load it #
#################################
if [ -z "$conf" ]
then
conf=`pwd`/autotest.conf
fi
if [ -f $conf ]
then
. $conf
else
echo "Can't find config file: $conf"
exit
fi
###############################
# Validate that all interesting
# variables where set in conf
###############################
vars="src_clone_base install_dir build_dir"
for i in $vars
do
t=`echo echo \\$$i`
if [ -z "`eval $t`" ]
then
echo "Invalid config: $conf, variable $i is not set"
exit
fi
done
###############################
#Print out the enviroment vars#
###############################
if [ $verbose -gt 0 ]
then
env
fi
####################################
# Setup the lock file name and path#
# Setup the clone source location #
####################################
src_clone=$src_clone_base-$clone
#######################################
# Check to see if the lock file exists#
# If it does exit. #
#######################################
if [ -f $LOCK ]
then
echo "Lock file exists: $LOCK"
exit 1
fi
#######################################
# If the lock file does not exist then#
# create it with date and run info #
#######################################
echo "$DATE $RUN" > $LOCK
#############################
#If any errors here down, we#
# trap them, and remove the #
# Lock file before exit #
#############################
if [ `uname -s` != "SunOS" ]
then
trap "rm -f $LOCK" ERR
fi
# You can add more to this path#
################################
dst_place=${build_dir}/clone-mysql-$clone-$DATE.$$
#########################################
# Delete source and pull down the latest#
#########################################
if [ "$do_clone" ]
then
rm -rf $dst_place
bk clone $src_clone $dst_place
fi
##########################################
# Build the source, make installs, and #
# create the database to be rsynced #
##########################################
if [ "$build" ]
then
cd $dst_place
rm -rf $install_dir
BUILD/compile-ndb-autotest --prefix=$install_dir
make install
fi
################################
# Start run script #
################################
script=$install_dir/mysql-test/ndb/autotest-run.sh
$script $save_args --conf=$conf --install-dir=$install_dir --suite=$RUN --nolock
if [ "$build" ]
then
rm -rf $dst_place
fi
rm -f $LOCK
#!/bin/sh
#############################################################
# This script created by Jonas does the following #
# Cleans up clones and pevious builds, pulls new clones, #
# builds, deploys, configures the tests and launches ATRT #
#############################################################
###############
#Script setup #
##############
save_args=$*
VERSION="autotest-run.sh version 1.00"
DATE=`date '+%Y-%m-%d'`
HOST=`hostname -s`
export DATE HOST
set -e
ulimit -Sc unlimited
echo "`date` starting: $*"
RSYNC_RSH=ssh
export RSYNC_RSH
verbose=0
report=yes
nolock=
RUN="daily-basic"
conf=autotest.conf
LOCK=$HOME/.autotest-lock
############################
# Read command line entries#
############################
while [ "$1" ]
do
case "$1" in
--verbose) verbose=`expr $verbose + 1`;;
--conf=*) conf=`echo $1 | sed s/--conf=//`;;
--version) echo $VERSION; exit;;
--suite=*) RUN=`echo $1 | sed s/--suite=//`;;
--install-dir=*) install_dir=`echo $1 | sed s/--install-dir=//`;;
--clone=*) clone=`echo $1 | sed s/--clone=//`;;
--nolock) nolock=true;;
esac
shift
done
#################################
#Make sure the configfile exists#
#if it does not exit. if it does#
# (.) load it #
#################################
install_dir_save=$install_dir
if [ -f $conf ]
then
. $conf
else
echo "Can't find config file: $conf"
exit
fi
install_dir=$install_dir_save
###############################
# Validate that all interesting
# variables where set in conf
###############################
vars="target base_dir install_dir hosts"
if [ "$report" ]
then
vars="$vars result_host result_path"
fi
for i in $vars
do
t=`echo echo \\$$i`
if [ -z "`eval $t`" ]
then
echo "Invalid config: $conf, variable $i is not set"
exit
fi
done
###############################
#Print out the enviroment vars#
###############################
if [ $verbose -gt 0 ]
then
env
fi
#######################################
# Check to see if the lock file exists#
# If it does exit. #
#######################################
if [ -z "$nolock" ]
then
if [ -f $LOCK ]
then
echo "Lock file exists: $LOCK"
exit 1
fi
echo "$DATE $RUN" > $LOCK
fi
#############################
#If any errors here down, we#
# trap them, and remove the #
# Lock file before exit #
#############################
if [ `uname -s` != "SunOS" ]
then
trap "rm -f $LOCK" ERR
fi
###############################################
# Check that all interesting files are present#
###############################################
test_dir=$install_dir/mysql-test/ndb
atrt=$test_dir/atrt
test_file=$test_dir/$RUN-tests.txt
if [ ! -f "$test_file" ]
then
echo "Cant find testfile: $test_file"
exit 1
fi
if [ ! -x "$atrt" ]
then
echo "Cant find atrt binary at $atrt"
exit 1
fi
############################
# check ndb_cpcc fail hosts#
############################
failed=`ndb_cpcc $hosts | awk '{ if($1=="Failed"){ print;}}'`
if [ "$failed" ]
then
echo "Cant contact cpcd on $failed, exiting"
exit 1
fi
#############################
# Function for replacing the#
# choose host with real host#
# names. Note $$ = PID #
#############################
choose(){
SRC=$1
TMP1=/tmp/choose.$$
TMP2=/tmp/choose.$$.$$
shift
cp $SRC $TMP1
i=1
while [ $# -gt 0 ]
do
sed -e s,"CHOOSE_host$i",$1,g < $TMP1 > $TMP2
mv $TMP2 $TMP1
shift
i=`expr $i + 1`
done
cat $TMP1
rm -f $TMP1
}
choose_conf(){
if [ -f $test_dir/conf-$1-$HOST.cnf ]
then
echo "$test_dir/conf-$1-$HOST.cnf"
elif [ -f $test_dir/conf-$1.cnf ]
then
echo "$test_dir/conf-$1.cnf"
elif [ -f $test_dir/conf-$HOST.cnf ]
then
echo "$test_dir/conf-$HOST.cnf"
else
echo "Unable to find conf file looked for" 1>&2
echo "$test_dir/conf-$1-$HOST.cnf and" 1>&2
echo "$test_dir/conf-$HOST.cnf" 1>&2
echo "$test_dir/conf-$1.cnf" 1>&2
exit
fi
}
#########################################
# Count how many computers we have ready#
#########################################
count_hosts(){
cnt=`grep "CHOOSE_host" $1 | awk '{for(i=1; i<=NF;i++) \
if(index($i, "CHOOSE_host") > 0) print $i;}' | sort | uniq | wc -l`
echo $cnt
}
conf=`choose_conf $RUN`
count=`count_hosts $conf`
avail=`echo $hosts | wc -w`
if [ $count -gt $avail ]
then
echo "Not enough hosts"
echo "Needs: $count available: $avail ($avail_hosts)"
exit 1
fi
###
# Make directories needed
p=`pwd`
run_dir=$install_dir/run-$RUN-mysql-$clone-$target
res_dir=$base_dir/result-$RUN-mysql-$clone-$target/$DATE
tar_dir=$base_dir/saved-results
mkdir -p $run_dir $res_dir $tar_dir
rm -rf $res_dir/* $run_dir/*
###
#
# Do sed substitiutions
#
cd $run_dir
choose $conf $hosts > d.tmp.$$
sed -e s,CHOOSE_dir,"$run_dir/run",g < d.tmp.$$ > my.cnf
# Setup configuration
$atrt Cdq my.cnf
# Start...
$atrt --report-file=report.txt --log-file=log.txt --testcase-file=$test_dir/$RUN-tests.txt my.cnf
# Make tar-ball
[ -f log.txt ] && mv log.txt $res_dir
[ -f report.txt ] && mv report.txt $res_dir
[ "`find . -name 'result*'`" ] && mv result* $res_dir
cd $res_dir
echo "date=$DATE" > info.txt
echo "suite=$RUN" >> info.txt
echo "clone=$clone" >> info.txt
echo "arch=$target" >> info.txt
cd ..
p2=`pwd`
cd ..
tarfile=res.$RUN.$clone.$target.$DATE.$HOST.$$.tgz
tar cfz $tar_dir/$tarfile `basename $p2`/$DATE
if [ "$report" ]
then
scp $tar_dir/$tarfile $result_host:$result_path/
fi
cd $p
rm -rf $res_dir $run_dir
if [ -z "$nolock" ]
then
rm -f $LOCK
fi
baseport: 14000
basedir: /space/autotest
mgm: CHOOSE_host1
ndb: CHOOSE_host2 CHOOSE_host3 CHOOSE_host2 CHOOSE_host3
api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
-- cluster config
[DB DEFAULT]
NoOfReplicas: 2
IndexMemory: 100M
DataMemory: 300M
BackupMemory: 64M
MaxNoOfConcurrentScans: 100
DataDir: .
FileSystemPath: /space/autotest/run
[MGM DEFAULT]
PortNumber: 14000
ArbitrationRank: 1
DataDir: .
baseport: 16000
basedir: /space/autotest
mgm: CHOOSE_host1
ndb: CHOOSE_host2 CHOOSE_host3 CHOOSE_host2 CHOOSE_host3
api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
-- cluster config
[DB DEFAULT]
NoOfReplicas: 2
IndexMemory: 100M
DataMemory: 300M
BackupMemory: 64M
MaxNoOfConcurrentScans: 100
DataDir: .
FileSystemPath: /space/autotest/run
[MGM DEFAULT]
PortNumber: 16000
ArbitrationRank: 1
DataDir: .
baseport: 16000
basedir: CHOOSE_dir
mgm: CHOOSE_host1
ndb: CHOOSE_host2 CHOOSE_host3 CHOOSE_host2 CHOOSE_host3
api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
-- cluster config
[DB DEFAULT]
NoOfReplicas: 2
IndexMemory: 100M
DataMemory: 300M
BackupMemory: 64M
MaxNoOfConcurrentScans: 100
DataDir: .
FileSystemPath: CHOOSE_dir/run
[MGM DEFAULT]
PortNumber: 16000
ArbitrationRank: 1
DataDir: .
[TCP DEFAULT]
SendBufferMemory: 2M
baseport: 16000
basedir: /space/autotest
mgm: CHOOSE_host1
ndb: CHOOSE_host2 CHOOSE_host3
mysqld: CHOOSE_host1 CHOOSE_host4
mysql: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4
-- cluster config
[DB DEFAULT]
NoOfReplicas: 2
IndexMemory: 100M
DataMemory: 300M
BackupMemory: 64M
MaxNoOfConcurrentScans: 100
DataDir: .
FileSystemPath: /space/autotest/run
[MGM DEFAULT]
PortNumber: 16000
ArbitrationRank: 1
DataDir: .
baseport: 16000
basedir: /space/autotest
mgm: CHOOSE_host1
ndb: CHOOSE_host2 CHOOSE_host3
mysqld: CHOOSE_host1 CHOOSE_host4
mysql: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4
-- cluster config
[DB DEFAULT]
NoOfReplicas: 2
IndexMemory: 100M
DataMemory: 300M
BackupMemory: 64M
MaxNoOfConcurrentScans: 100
DataDir: .
FileSystemPath: /space/autotest/run
[MGM DEFAULT]
PortNumber: 16000
ArbitrationRank: 1
DataDir: .
[atrt]
basedir = CHOOSE_dir
baseport = 14000
clusters = .2node
[ndb_mgmd]
[mysqld]
skip-innodb
skip-bdb
[cluster_config.2node]
ndb_mgmd = CHOOSE_host1
ndbd = CHOOSE_host2,CHOOSE_host3
ndbapi= CHOOSE_host1,CHOOSE_host1,CHOOSE_host1
NoOfReplicas = 2
IndexMemory = 100M
DataMemory = 300M
BackupMemory = 64M
MaxNoOfConcurrentScans = 100
MaxNoOfSavedMessages= 1000
SendBufferMemory = 2M
baseport: 14000
basedir: /home/ndbdev/autotest/run
mgm: CHOOSE_host1
ndb: CHOOSE_host2 CHOOSE_host3
api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
-- cluster config
[DB DEFAULT]
NoOfReplicas: 2
IndexMemory: 100M
DataMemory: 300M
BackupMemory: 64M
MaxNoOfConcurrentScans: 100
DataDir: .
FileSystemPath: /home/ndbdev/autotest/run
[MGM DEFAULT]
PortNumber: 14000
ArbitrationRank: 1
DataDir: .
[TCP DEFAULT]
SendBufferMemory: 2M
[atrt]
basedir = CHOOSE_dir
baseport = 14000
clusters = .4node
[ndb_mgmd]
[mysqld]
skip-innodb
skip-bdb
[cluster_config.4node]
ndb_mgmd = CHOOSE_host1
ndbd = CHOOSE_host2,CHOOSE_host3,CHOOSE_host2,CHOOSE_host3
ndbapi= CHOOSE_host1,CHOOSE_host1,CHOOSE_host1
NoOfReplicas = 2
IndexMemory = 100M
DataMemory = 300M
BackupMemory = 64M
MaxNoOfConcurrentScans = 100
MaxNoOfSavedMessages= 1000
SendBufferMemory = 2M
baseport: 14000
basedir: CHOOSE_dir
mgm: CHOOSE_host1
ndb: CHOOSE_host2 CHOOSE_host3 CHOOSE_host2 CHOOSE_host3
api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
-- cluster config
[DB DEFAULT]
NoOfReplicas: 2
IndexMemory: 100M
DataMemory: 300M
BackupMemory: 64M
MaxNoOfConcurrentScans: 100
DataDir: .
FileSystemPath: CHOOSE_dir/run
[MGM DEFAULT]
PortNumber: 14000
ArbitrationRank: 1
DataDir: .
[TCP DEFAULT]
SendBufferMemory: 2M
[atrt]
basedir=CHOOSE_dir
baseport=15000
clusters= .master,.slave
replicate= 1.master:1.slave
[ndb_mgmd]
[mysqld]
skip-innodb
skip-bdb
[cluster_config]
MaxNoOfSavedMessages= 1000
DataMemory = 100M
[cluster_config.master]
NoOfReplicas = 2
ndb_mgmd = CHOOSE_host1
ndbd = CHOOSE_host2,CHOOSE_host3
mysqld = CHOOSE_host1
ndbapi= CHOOSE_host1
[cluster_config.slave]
NoOfReplicas = 1
ndb_mgmd = CHOOSE_host4
ndbd = CHOOSE_host4
mysqld = CHOOSE_host4
baseport: 14000
basedir: /space/autotest
mgm: CHOOSE_host1
ndb: CHOOSE_host1 CHOOSE_host1
api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
-- cluster config
[DB DEFAULT]
NoOfReplicas: 2
IndexMemory: 100M
DataMemory: 300M
BackupMemory: 64M
MaxNoOfConcurrentScans: 100
DataDir: .
FileSystemPath: /space/autotest/run
[MGM DEFAULT]
PortNumber: 14000
ArbitrationRank: 1
DataDir: .
[TCP DEFAULT]
SendBufferMemory: 2M
[atrt]
basedir=/home/jonas/atrt
baseport=10000
clusters = .master
clusters= .master,.slave
replicate = 1.master:1.slave
replicate = 2.master:2.slave
[cluster_config]
NoOfReplicas= 2
IndexMemory= 10M
DataMemory= 50M
MaxNoOfConcurrentScans= 100
Diskless = 1
[cluster_config.master]
ndb_mgmd = local1
ndbd = local1,local1
mysqld = local1,local1
ndbapi= local1
NoOfReplicas= 2
[cluster_config.slave]
ndb_mgmd = local1
ndbd = local1
ndbapi= local1
mysqld = local1,local1
NoOfReplicas= 1
[mysqld]
skip-innodb
skip-bdb
#
# Generated by atrt
# Mon May 29 23:27:49 2006
[mysql_cluster.master]
ndb-connectstring= local1:10000
[cluster_config.ndb_mgmd.1.master]
PortNumber= 10000
[cluster_config.ndbd.1.master]
FileSystemPath= /home/jonas/atrt/cluster.master/ndbd.1
[cluster_config.ndbd.2.master]
FileSystemPath= /home/jonas/atrt/cluster.master/ndbd.2
[mysqld.1.master]
datadir= /home/jonas/atrt/cluster.master/mysqld.1
socket= /home/jonas/atrt/cluster.master/mysqld.1/mysql.sock
port= 10001
server-id= 1
log-bin
ndb-connectstring= local1:10000
ndbcluster
[client.1.master]
socket= /home/jonas/atrt/cluster.master/mysqld.1/mysql.sock
port= 10001
[mysqld.2.master]
datadir= /home/jonas/atrt/cluster.master/mysqld.2
socket= /home/jonas/atrt/cluster.master/mysqld.2/mysql.sock
port= 10002
server-id= 2
log-bin
ndb-connectstring= local1:10000
ndbcluster
[client.2.master]
socket= /home/jonas/atrt/cluster.master/mysqld.2/mysql.sock
port= 10002
[mysql_cluster.slave]
ndb-connectstring= local1:10003
[cluster_config.ndb_mgmd.1.slave]
PortNumber= 10003
[cluster_config.ndbd.1.slave]
FileSystemPath= /home/jonas/atrt/cluster.slave/ndbd.1
[mysqld.1.slave]
datadir= /home/jonas/atrt/cluster.slave/mysqld.1
socket= /home/jonas/atrt/cluster.slave/mysqld.1/mysql.sock
port= 10004
server-id= 3
master-host= local1
master-port= 10001
master-user= root
master-password= ""
ndb-connectstring= local1:10003
ndbcluster
[client.1.slave]
socket= /home/jonas/atrt/cluster.slave/mysqld.1/mysql.sock
port= 10004
[mysqld.2.slave]
datadir= /home/jonas/atrt/cluster.slave/mysqld.2
socket= /home/jonas/atrt/cluster.slave/mysqld.2/mysql.sock
port= 10005
server-id= 4
master-host= local1
master-port= 10002
master-user= root
master-password= ""
ndb-connectstring= local1:10003
ndbcluster
[client.2.slave]
socket= /home/jonas/atrt/cluster.slave/mysqld.2/mysql.sock
port= 10005
#include "atrt.hpp"
#include <sys/types.h>
#include <dirent.h>
static bool create_directory(const char * path);
bool
setup_directories(atrt_config& config, int setup)
{
/**
* 0 = validate
* 1 = setup
* 2 = setup+clean
*/
for (size_t i = 0; i < config.m_clusters.size(); i++)
{
atrt_cluster& cluster = *config.m_clusters[i];
for (size_t j = 0; j<cluster.m_processes.size(); j++)
{
atrt_process& proc = *cluster.m_processes[j];
const char * dir = proc.m_proc.m_cwd.c_str();
struct stat sbuf;
int exists = 0;
if (lstat(dir, &sbuf) == 0)
{
if (S_ISDIR(sbuf.st_mode))
exists = 1;
else
exists = -1;
}
switch(setup){
case 0:
switch(exists){
case 0:
g_logger.error("Could not find directory: %s", dir);
return false;
case -1:
g_logger.error("%s is not a directory!", dir);
return false;
}
break;
case 1:
if (exists == -1)
{
g_logger.error("%s is not a directory!", dir);
return false;
}
break;
case 2:
if (exists == 1)
{
if (!remove_dir(dir))
{
g_logger.error("Failed to remove %s!", dir);
return false;
}
exists = 0;
break;
}
else if (exists == -1)
{
if (!unlink(dir))
{
g_logger.error("Failed to remove %s!", dir);
return false;
}
exists = 0;
}
}
if (exists != 1)
{
if (!create_directory(dir))
{
return false;
}
}
}
}
return true;
}
static
void
printfile(FILE* out, Properties& props, const char * section, ...)
{
Properties::Iterator it (&props);
const char * name = it.first();
if (name)
{
va_list ap;
va_start(ap, section);
/* const int ret = */ vfprintf(out, section, ap);
va_end(ap);
fprintf(out, "\n");
for (; name; name = it.next())
{
const char* val;
props.get(name, &val);
fprintf(out, "%s %s\n", name + 2, val);
}
fprintf(out, "\n");
}
fflush(out);
}
bool
setup_files(atrt_config& config, int setup, int sshx)
{
/**
* 0 = validate
* 1 = setup
* 2 = setup+clean
*/
BaseString mycnf;
mycnf.assfmt("%s/my.cnf", g_basedir);
if (mycnf != g_my_cnf)
{
struct stat sbuf;
int ret = lstat(mycnf.c_str(), &sbuf);
if (ret == 0)
{
if (unlink(mycnf.c_str()) != 0)
{
g_logger.error("Failed to remove %s", mycnf.c_str());
return false;
}
}
BaseString cp = "cp ";
cp.appfmt("%s %s", g_my_cnf, mycnf.c_str());
if (system(cp.c_str()) != 0)
{
g_logger.error("Failed to '%s'", cp.c_str());
return false;
}
}
if (setup == 2 || config.m_generated)
{
/**
* Do mysql_install_db
*/
for (size_t i = 0; i < config.m_clusters.size(); i++)
{
atrt_cluster& cluster = *config.m_clusters[i];
for (size_t j = 0; j<cluster.m_processes.size(); j++)
{
atrt_process& proc = *cluster.m_processes[j];
if (proc.m_type == atrt_process::AP_MYSQLD)
{
const char * val;
require(proc.m_options.m_loaded.get("--datadir=", &val));
BaseString tmp;
tmp.assfmt("%s/bin/mysql_install_db --datadir=%s > /dev/null 2>&1",
g_prefix, val);
if (system(tmp.c_str()) != 0)
{
g_logger.error("Failed to mysql_install_db for %s",
proc.m_proc.m_cwd.c_str());
}
else
{
g_logger.info("mysql_install_db for %s",
proc.m_proc.m_cwd.c_str());
}
}
}
}
}
FILE * out = NULL;
if (config.m_generated == false)
{
g_logger.info("Nothing configured...");
}
else
{
out = fopen(mycnf.c_str(), "a+");
if (out == 0)
{
g_logger.error("Failed to open %s for append", mycnf.c_str());
return false;
}
time_t now = time(0);
fprintf(out, "#\n# Generated by atrt\n");
fprintf(out, "# %s\n", ctime(&now));
}
for (size_t i = 0; i < config.m_clusters.size(); i++)
{
atrt_cluster& cluster = *config.m_clusters[i];
if (out)
{
Properties::Iterator it(&cluster.m_options.m_generated);
printfile(out, cluster.m_options.m_generated,
"[mysql_cluster%s]", cluster.m_name.c_str());
}
for (size_t j = 0; j<cluster.m_processes.size(); j++)
{
atrt_process& proc = *cluster.m_processes[j];
if (out)
{
switch(proc.m_type){
case atrt_process::AP_NDB_MGMD:
printfile(out, proc.m_options.m_generated,
"[cluster_config.ndb_mgmd.%d%s]",
proc.m_index, proc.m_cluster->m_name.c_str());
break;
case atrt_process::AP_NDBD:
printfile(out, proc.m_options.m_generated,
"[cluster_config.ndbd.%d%s]",
proc.m_index, proc.m_cluster->m_name.c_str());
break;
case atrt_process::AP_MYSQLD:
printfile(out, proc.m_options.m_generated,
"[mysqld.%d%s]",
proc.m_index, proc.m_cluster->m_name.c_str());
break;
case atrt_process::AP_NDB_API:
break;
case atrt_process::AP_CLIENT:
printfile(out, proc.m_options.m_generated,
"[client.%d%s]",
proc.m_index, proc.m_cluster->m_name.c_str());
break;
case atrt_process::AP_ALL:
case atrt_process::AP_CLUSTER:
abort();
}
}
/**
* Create env.sh
*/
BaseString tmp;
tmp.assfmt("%s/env.sh", proc.m_proc.m_cwd.c_str());
char **env = BaseString::argify(0, proc.m_proc.m_env.c_str());
if (env[0])
{
Vector<BaseString> keys;
FILE *fenv = fopen(tmp.c_str(), "w+");
if (fenv == 0)
{
g_logger.error("Failed to open %s for writing", tmp.c_str());
return false;
}
for (size_t k = 0; env[k]; k++)
{
tmp = env[k];
int pos = tmp.indexOf('=');
require(pos > 0);
env[k][pos] = 0;
fprintf(fenv, "%s=\"%s\"\n", env[k], env[k]+pos+1);
keys.push_back(env[k]);
free(env[k]);
}
fprintf(fenv, "PATH=%s/bin:%s/libexec:$PATH\n", g_prefix, g_prefix);
keys.push_back("PATH");
for (size_t k = 0; k<keys.size(); k++)
fprintf(fenv, "export %s\n", keys[k].c_str());
fflush(fenv);
fclose(fenv);
}
free(env);
tmp.assfmt("%s/ssh-login.sh", proc.m_proc.m_cwd.c_str());
FILE* fenv = fopen(tmp.c_str(), "w+");
if (fenv == 0)
{
g_logger.error("Failed to open %s for writing", tmp.c_str());
return false;
}
fprintf(fenv, "#!/bin/sh\n");
fprintf(fenv, "cd %s\n", proc.m_proc.m_cwd.c_str());
fprintf(fenv, "[ -f /etc/profile ] && . /etc/profile\n");
fprintf(fenv, ". env.sh\n");
fprintf(fenv, "ulimit -Sc unlimited\n");
fprintf(fenv, "bash -i");
fflush(fenv);
fclose(fenv);
}
}
if (out)
{
fflush(out);
fclose(out);
}
return true;
}
static
bool
create_directory(const char * path)
{
BaseString tmp(path);
Vector<BaseString> list;
if (tmp.split(list, "/") == 0)
{
g_logger.error("Failed to create directory: %s", tmp.c_str());
return false;
}
BaseString cwd = "/";
for (size_t i = 0; i < list.size(); i++)
{
cwd.append(list[i].c_str());
cwd.append("/");
mkdir(cwd.c_str(), S_IRUSR | S_IWUSR | S_IXUSR | S_IXGRP | S_IRGRP);
}
struct stat sbuf;
if (lstat(path, &sbuf) != 0 ||
!S_ISDIR(sbuf.st_mode))
{
g_logger.error("Failed to create directory: %s (%s)",
tmp.c_str(),
cwd.c_str());
return false;
}
return true;
}
bool
remove_dir(const char * path, bool inclusive)
{
DIR* dirp = opendir(path);
if (dirp == 0)
{
if(errno != ENOENT)
{
g_logger.error("Failed to remove >%s< errno: %d %s",
path, errno, strerror(errno));
return false;
}
return true;
}
struct dirent * dp;
BaseString name = path;
name.append("/");
while ((dp = readdir(dirp)) != NULL)
{
if ((strcmp(".", dp->d_name) != 0) && (strcmp("..", dp->d_name) != 0))
{
BaseString tmp = name;
tmp.append(dp->d_name);
if (remove(tmp.c_str()) == 0)
{
continue;
}
if (!remove_dir(tmp.c_str()))
{
closedir(dirp);
return false;
}
}
}
closedir(dirp);
if (inclusive)
{
if (rmdir(path) != 0)
{
g_logger.error("Failed to remove >%s< errno: %d %s",
path, errno, strerror(errno));
return false;
}
}
return true;
}
This diff is collapsed.
This diff is collapsed.
max-time: 600
cmd: testBasic
args: -n PkRead T1
max-time: 1800
cmd: testMgm
args: -n SingleUserMode T1
#
#
# SYSTEM RESTARTS
#
max-time: 1500
cmd: testSystemRestart
args: -n SR3 T6
max-time: 1500
cmd: testSystemRestart
args: -n SR4 T6
max-time: 600
cmd: testBasic
args: -n PkRead T1
......@@ -38,6 +38,7 @@ include $(top_srcdir)/storage/ndb/config/common.mk.am
include $(top_srcdir)/storage/ndb/config/type_ndbapitest.mk.am
ndb_cpcc_LDADD = $(LDADD)
ndb_cpcc_LDFLAGS = -static
# Don't update the files from bitkeeper
%::SCCS/s.%
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment