Commit 90024205 authored by unknown's avatar unknown

Merge joreland@bk-internal.mysql.com:/home/bk/mysql-4.1

into mysql.com:/home/jonas/src/mysql-4.1


ndb/src/cw/cpcd/Process.cpp:
  Auto merged
ndb/src/kernel/error/ErrorReporter.cpp:
  Auto merged
parents 27da9ccc 645a7131
...@@ -122,6 +122,7 @@ ParserRow<CPCDAPISession> commands[] = ...@@ -122,6 +122,7 @@ ParserRow<CPCDAPISession> commands[] =
CPCD_ARG("stderr", String, Optional, "Redirection of stderr"), CPCD_ARG("stderr", String, Optional, "Redirection of stderr"),
CPCD_ARG("stdin", String, Optional, "Redirection of stderr"), CPCD_ARG("stdin", String, Optional, "Redirection of stderr"),
CPCD_ARG("ulimit", String, Optional, "ulimit"), CPCD_ARG("ulimit", String, Optional, "ulimit"),
CPCD_ARG("shutdown", String, Optional, "shutdown options"),
CPCD_CMD("undefine process", &CPCDAPISession::undefineProcess, ""), CPCD_CMD("undefine process", &CPCDAPISession::undefineProcess, ""),
CPCD_CMD_ALIAS("undef", "undefine process", 0), CPCD_CMD_ALIAS("undef", "undefine process", 0),
......
...@@ -243,6 +243,12 @@ public: ...@@ -243,6 +243,12 @@ public:
* @desc Format c:unlimited d:0 ... * @desc Format c:unlimited d:0 ...
*/ */
BaseString m_ulimit; BaseString m_ulimit;
/**
* @brief shutdown options
*/
BaseString m_shutdown_options;
private: private:
class CPCD *m_cpcd; class CPCD *m_cpcd;
void do_exec(); void do_exec();
......
...@@ -44,6 +44,8 @@ CPCD::Process::print(FILE * f){ ...@@ -44,6 +44,8 @@ CPCD::Process::print(FILE * f){
fprintf(f, "stdout: %s\n", m_stdout.c_str() ? m_stdout.c_str() : ""); fprintf(f, "stdout: %s\n", m_stdout.c_str() ? m_stdout.c_str() : "");
fprintf(f, "stderr: %s\n", m_stderr.c_str() ? m_stderr.c_str() : ""); fprintf(f, "stderr: %s\n", m_stderr.c_str() ? m_stderr.c_str() : "");
fprintf(f, "ulimit: %s\n", m_ulimit.c_str() ? m_ulimit.c_str() : ""); fprintf(f, "ulimit: %s\n", m_ulimit.c_str() ? m_ulimit.c_str() : "");
fprintf(f, "shutdown: %s\n", m_shutdown_options.c_str() ?
m_shutdown_options.c_str() : "");
} }
CPCD::Process::Process(const Properties & props, class CPCD *cpcd) { CPCD::Process::Process(const Properties & props, class CPCD *cpcd) {
...@@ -64,6 +66,7 @@ CPCD::Process::Process(const Properties & props, class CPCD *cpcd) { ...@@ -64,6 +66,7 @@ CPCD::Process::Process(const Properties & props, class CPCD *cpcd) {
props.get("stdout", m_stdout); props.get("stdout", m_stdout);
props.get("stderr", m_stderr); props.get("stderr", m_stderr);
props.get("ulimit", m_ulimit); props.get("ulimit", m_ulimit);
props.get("shutdown", m_shutdown_options);
m_status = STOPPED; m_status = STOPPED;
if(strcasecmp(m_type.c_str(), "temporary") == 0){ if(strcasecmp(m_type.c_str(), "temporary") == 0){
...@@ -454,7 +457,11 @@ CPCD::Process::stop() { ...@@ -454,7 +457,11 @@ CPCD::Process::stop() {
m_status = STOPPING; m_status = STOPPING;
errno = 0; errno = 0;
int ret = kill(-m_pid, SIGTERM); int signo= SIGTERM;
if(m_shutdown_options == "SIGKILL")
signo= SIGKILL;
int ret = kill(-m_pid, signo);
switch(ret) { switch(ret) {
case 0: case 0:
logger.debug("Sent SIGTERM to pid %d", (int)-m_pid); logger.debug("Sent SIGTERM to pid %d", (int)-m_pid);
......
...@@ -585,34 +585,8 @@ public: ...@@ -585,34 +585,8 @@ public:
*/ */
ArrayPool<TcIndexOperation> c_theIndexOperationPool; ArrayPool<TcIndexOperation> c_theIndexOperationPool;
/**
* The list of index operations
*/
ArrayList<TcIndexOperation> c_theIndexOperations;
UintR c_maxNumberOfIndexOperations; UintR c_maxNumberOfIndexOperations;
struct TcSeizedIndexOperation {
/**
* Next ptr (used in pool/list)
*/
union {
Uint32 nextPool;
Uint32 nextList;
};
/**
* Prev pointer (used in list)
*/
Uint32 prevList;
};
/**
* Pool of seized index operations
*/
ArrayPool<TcSeizedIndexOperation> c_theSeizedIndexOperationPool;
typedef Ptr<TcSeizedIndexOperation> TcSeizedIndexOperationPtr;
/************************** API CONNECT RECORD *********************** /************************** API CONNECT RECORD ***********************
* The API connect record contains the connection record to which the * The API connect record contains the connection record to which the
* application connects. * application connects.
...@@ -650,7 +624,7 @@ public: ...@@ -650,7 +624,7 @@ public:
struct ApiConnectRecord { struct ApiConnectRecord {
ApiConnectRecord(ArrayPool<TcFiredTriggerData> & firedTriggerPool, ApiConnectRecord(ArrayPool<TcFiredTriggerData> & firedTriggerPool,
ArrayPool<TcSeizedIndexOperation> & seizedIndexOpPool): ArrayPool<TcIndexOperation> & seizedIndexOpPool):
theFiredTriggers(firedTriggerPool), theFiredTriggers(firedTriggerPool),
isIndexOp(false), isIndexOp(false),
theSeizedIndexOperations(seizedIndexOpPool) theSeizedIndexOperations(seizedIndexOpPool)
...@@ -763,7 +737,7 @@ public: ...@@ -763,7 +737,7 @@ public:
UintR accumulatingIndexOp; UintR accumulatingIndexOp;
UintR executingIndexOp; UintR executingIndexOp;
UintR tcIndxSendArray[6]; UintR tcIndxSendArray[6];
ArrayList<TcSeizedIndexOperation> theSeizedIndexOperations; ArrayList<TcIndexOperation> theSeizedIndexOperations;
}; };
typedef Ptr<ApiConnectRecord> ApiConnectRecordPtr; typedef Ptr<ApiConnectRecord> ApiConnectRecordPtr;
......
...@@ -65,7 +65,6 @@ void Dbtc::initData() ...@@ -65,7 +65,6 @@ void Dbtc::initData()
c_theFiredTriggerPool.setSize(c_maxNumberOfFiredTriggers); c_theFiredTriggerPool.setSize(c_maxNumberOfFiredTriggers);
c_theIndexPool.setSize(c_maxNumberOfIndexes); c_theIndexPool.setSize(c_maxNumberOfIndexes);
c_theIndexOperationPool.setSize(c_maxNumberOfIndexOperations); c_theIndexOperationPool.setSize(c_maxNumberOfIndexOperations);
c_theSeizedIndexOperationPool.setSize(c_maxNumberOfIndexOperations);
c_theAttributeBufferPool.setSize(c_transactionBufferSpace); c_theAttributeBufferPool.setSize(c_transactionBufferSpace);
c_firedTriggerHash.setSize((c_maxNumberOfFiredTriggers+10)/10); c_firedTriggerHash.setSize((c_maxNumberOfFiredTriggers+10)/10);
}//Dbtc::initData() }//Dbtc::initData()
...@@ -85,7 +84,7 @@ void Dbtc::initRecords() ...@@ -85,7 +84,7 @@ void Dbtc::initRecords()
for(unsigned i = 0; i<capiConnectFilesize; i++) { for(unsigned i = 0; i<capiConnectFilesize; i++) {
p = &apiConnectRecord[i]; p = &apiConnectRecord[i];
new (p) ApiConnectRecord(c_theFiredTriggerPool, new (p) ApiConnectRecord(c_theFiredTriggerPool,
c_theSeizedIndexOperationPool); c_theIndexOperationPool);
} }
// Init all fired triggers // Init all fired triggers
DLFifoList<TcFiredTriggerData> triggers(c_theFiredTriggerPool); DLFifoList<TcFiredTriggerData> triggers(c_theFiredTriggerPool);
...@@ -177,7 +176,6 @@ Dbtc::Dbtc(const class Configuration & conf): ...@@ -177,7 +176,6 @@ Dbtc::Dbtc(const class Configuration & conf):
c_maxNumberOfFiredTriggers(0), c_maxNumberOfFiredTriggers(0),
c_theIndexes(c_theIndexPool), c_theIndexes(c_theIndexPool),
c_maxNumberOfIndexes(0), c_maxNumberOfIndexes(0),
c_theIndexOperations(c_theIndexOperationPool),
c_maxNumberOfIndexOperations(0), c_maxNumberOfIndexOperations(0),
m_commitAckMarkerHash(m_commitAckMarkerPool) m_commitAckMarkerHash(m_commitAckMarkerPool)
{ {
......
...@@ -11161,18 +11161,18 @@ void Dbtc::execTCINDXREQ(Signal* signal) ...@@ -11161,18 +11161,18 @@ void Dbtc::execTCINDXREQ(Signal* signal)
jam(); jam();
// This is a newly started transaction, clean-up // This is a newly started transaction, clean-up
releaseAllSeizedIndexOperations(regApiPtr); releaseAllSeizedIndexOperations(regApiPtr);
regApiPtr->transid[0] = tcIndxReq->transId1;
regApiPtr->transid[1] = tcIndxReq->transId2;
}//if }//if
if (!seizeIndexOperation(regApiPtr, indexOpPtr)) {
if (ERROR_INSERTED(8036) || !seizeIndexOperation(regApiPtr, indexOpPtr)) {
jam(); jam();
// Failed to allocate index operation // Failed to allocate index operation
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend(); terrorCode = 288;
regApiPtr->m_exec_flag |= TcKeyReq::getExecuteFlag(tcIndxRequestInfo);
tcIndxRef->connectPtr = tcIndxReq->senderData; apiConnectptr = transPtr;
tcIndxRef->transId[0] = regApiPtr->transid[0]; abortErrorLab(signal);
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4000;
sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
TcIndxRef::SignalLength, JBB);
return; return;
} }
TcIndexOperation* indexOp = indexOpPtr.p; TcIndexOperation* indexOp = indexOpPtr.p;
...@@ -11307,8 +11307,9 @@ void Dbtc::execINDXKEYINFO(Signal* signal) ...@@ -11307,8 +11307,9 @@ void Dbtc::execINDXKEYINFO(Signal* signal)
TcIndexOperationPtr indexOpPtr; TcIndexOperationPtr indexOpPtr;
TcIndexOperation* indexOp; TcIndexOperation* indexOp;
indexOpPtr.i = regApiPtr->accumulatingIndexOp; if((indexOpPtr.i = regApiPtr->accumulatingIndexOp) != RNIL)
indexOp = c_theIndexOperations.getPtr(indexOpPtr.i); {
indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
if (saveINDXKEYINFO(signal, if (saveINDXKEYINFO(signal,
indexOp, indexOp,
src, src,
...@@ -11317,6 +11318,7 @@ void Dbtc::execINDXKEYINFO(Signal* signal) ...@@ -11317,6 +11318,7 @@ void Dbtc::execINDXKEYINFO(Signal* signal)
// We have received all we need // We have received all we need
readIndexTable(signal, regApiPtr, indexOp); readIndexTable(signal, regApiPtr, indexOp);
} }
}
} }
void Dbtc::execINDXATTRINFO(Signal* signal) void Dbtc::execINDXATTRINFO(Signal* signal)
...@@ -11338,8 +11340,9 @@ void Dbtc::execINDXATTRINFO(Signal* signal) ...@@ -11338,8 +11340,9 @@ void Dbtc::execINDXATTRINFO(Signal* signal)
TcIndexOperationPtr indexOpPtr; TcIndexOperationPtr indexOpPtr;
TcIndexOperation* indexOp; TcIndexOperation* indexOp;
indexOpPtr.i = regApiPtr->accumulatingIndexOp; if((indexOpPtr.i = regApiPtr->accumulatingIndexOp) != RNIL)
indexOp = c_theIndexOperations.getPtr(indexOpPtr.i); {
indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
if (saveINDXATTRINFO(signal, if (saveINDXATTRINFO(signal,
indexOp, indexOp,
src, src,
...@@ -11348,6 +11351,7 @@ void Dbtc::execINDXATTRINFO(Signal* signal) ...@@ -11348,6 +11351,7 @@ void Dbtc::execINDXATTRINFO(Signal* signal)
// We have received all we need // We have received all we need
readIndexTable(signal, regApiPtr, indexOp); readIndexTable(signal, regApiPtr, indexOp);
} }
}
} }
/** /**
...@@ -11371,7 +11375,7 @@ bool Dbtc::saveINDXKEYINFO(Signal* signal, ...@@ -11371,7 +11375,7 @@ bool Dbtc::saveINDXKEYINFO(Signal* signal,
releaseIndexOperation(apiConnectptr.p, indexOp); releaseIndexOperation(apiConnectptr.p, indexOp);
terrorCode = 4000; terrorCode = 4000;
abortErrorLab(signal); abortErrorLab(signal);
return true; return false;
} }
if (receivedAllINDXKEYINFO(indexOp) && receivedAllINDXATTRINFO(indexOp)) { if (receivedAllINDXKEYINFO(indexOp) && receivedAllINDXATTRINFO(indexOp)) {
jam(); jam();
...@@ -11404,7 +11408,7 @@ bool Dbtc::saveINDXATTRINFO(Signal* signal, ...@@ -11404,7 +11408,7 @@ bool Dbtc::saveINDXATTRINFO(Signal* signal,
releaseIndexOperation(apiConnectptr.p, indexOp); releaseIndexOperation(apiConnectptr.p, indexOp);
terrorCode = 4000; terrorCode = 4000;
abortErrorLab(signal); abortErrorLab(signal);
return true; return false;
} }
if (receivedAllINDXKEYINFO(indexOp) && receivedAllINDXATTRINFO(indexOp)) { if (receivedAllINDXKEYINFO(indexOp) && receivedAllINDXATTRINFO(indexOp)) {
jam(); jam();
...@@ -11464,7 +11468,7 @@ void Dbtc::execTCKEYCONF(Signal* signal) ...@@ -11464,7 +11468,7 @@ void Dbtc::execTCKEYCONF(Signal* signal)
jamEntry(); jamEntry();
indexOpPtr.i = tcKeyConf->apiConnectPtr; indexOpPtr.i = tcKeyConf->apiConnectPtr;
TcIndexOperation* indexOp = c_theIndexOperations.getPtr(indexOpPtr.i); TcIndexOperation* indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
Uint32 confInfo = tcKeyConf->confInfo; Uint32 confInfo = tcKeyConf->confInfo;
/** /**
...@@ -11553,7 +11557,7 @@ void Dbtc::execTCKEYREF(Signal* signal) ...@@ -11553,7 +11557,7 @@ void Dbtc::execTCKEYREF(Signal* signal)
jamEntry(); jamEntry();
indexOpPtr.i = tcKeyRef->connectPtr; indexOpPtr.i = tcKeyRef->connectPtr;
TcIndexOperation* indexOp = c_theIndexOperations.getPtr(indexOpPtr.i); TcIndexOperation* indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
indexOpPtr.p = indexOp; indexOpPtr.p = indexOp;
if (!indexOp) { if (!indexOp) {
jam(); jam();
...@@ -11654,7 +11658,7 @@ void Dbtc::execTRANSID_AI(Signal* signal) ...@@ -11654,7 +11658,7 @@ void Dbtc::execTRANSID_AI(Signal* signal)
jamEntry(); jamEntry();
TcIndexOperationPtr indexOpPtr; TcIndexOperationPtr indexOpPtr;
indexOpPtr.i = transIdAI->connectPtr; indexOpPtr.i = transIdAI->connectPtr;
TcIndexOperation* indexOp = c_theIndexOperations.getPtr(indexOpPtr.i); TcIndexOperation* indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
indexOpPtr.p = indexOp; indexOpPtr.p = indexOp;
if (!indexOp) { if (!indexOp) {
jam(); jam();
...@@ -11762,7 +11766,7 @@ void Dbtc::execTCROLLBACKREP(Signal* signal) ...@@ -11762,7 +11766,7 @@ void Dbtc::execTCROLLBACKREP(Signal* signal)
jamEntry(); jamEntry();
TcIndexOperationPtr indexOpPtr; TcIndexOperationPtr indexOpPtr;
indexOpPtr.i = tcRollbackRep->connectPtr; indexOpPtr.i = tcRollbackRep->connectPtr;
TcIndexOperation* indexOp = c_theIndexOperations.getPtr(indexOpPtr.i); TcIndexOperation* indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
indexOpPtr.p = indexOp; indexOpPtr.p = indexOp;
tcRollbackRep = (TcRollbackRep *)signal->getDataPtrSend(); tcRollbackRep = (TcRollbackRep *)signal->getDataPtrSend();
tcRollbackRep->connectPtr = indexOp->tcIndxReq.senderData; tcRollbackRep->connectPtr = indexOp->tcIndxReq.senderData;
...@@ -12090,16 +12094,7 @@ void Dbtc::executeIndexOperation(Signal* signal, ...@@ -12090,16 +12094,7 @@ void Dbtc::executeIndexOperation(Signal* signal,
bool Dbtc::seizeIndexOperation(ApiConnectRecord* regApiPtr, bool Dbtc::seizeIndexOperation(ApiConnectRecord* regApiPtr,
TcIndexOperationPtr& indexOpPtr) TcIndexOperationPtr& indexOpPtr)
{ {
bool seizeOk; return regApiPtr->theSeizedIndexOperations.seize(indexOpPtr);
seizeOk = c_theIndexOperations.seize(indexOpPtr);
if (seizeOk) {
jam();
TcSeizedIndexOperationPtr seizedIndexOpPtr;
seizeOk &= regApiPtr->theSeizedIndexOperations.seizeId(seizedIndexOpPtr,
indexOpPtr.i);
}
return seizeOk;
} }
void Dbtc::releaseIndexOperation(ApiConnectRecord* regApiPtr, void Dbtc::releaseIndexOperation(ApiConnectRecord* regApiPtr,
...@@ -12113,18 +12108,16 @@ void Dbtc::releaseIndexOperation(ApiConnectRecord* regApiPtr, ...@@ -12113,18 +12108,16 @@ void Dbtc::releaseIndexOperation(ApiConnectRecord* regApiPtr,
indexOp->expectedTransIdAI = 0; indexOp->expectedTransIdAI = 0;
indexOp->transIdAI.release(); indexOp->transIdAI.release();
regApiPtr->theSeizedIndexOperations.release(indexOp->indexOpId); regApiPtr->theSeizedIndexOperations.release(indexOp->indexOpId);
c_theIndexOperations.release(indexOp->indexOpId);
} }
void Dbtc::releaseAllSeizedIndexOperations(ApiConnectRecord* regApiPtr) void Dbtc::releaseAllSeizedIndexOperations(ApiConnectRecord* regApiPtr)
{ {
TcSeizedIndexOperationPtr seizedIndexOpPtr; TcIndexOperationPtr seizedIndexOpPtr;
regApiPtr->theSeizedIndexOperations.first(seizedIndexOpPtr); regApiPtr->theSeizedIndexOperations.first(seizedIndexOpPtr);
while(seizedIndexOpPtr.i != RNIL) { while(seizedIndexOpPtr.i != RNIL) {
jam(); jam();
TcIndexOperation* indexOp = TcIndexOperation* indexOp = seizedIndexOpPtr.p;
c_theIndexOperations.getPtr(seizedIndexOpPtr.i);
indexOp->indexOpState = IOS_NOOP; indexOp->indexOpState = IOS_NOOP;
indexOp->expectedKeyInfo = 0; indexOp->expectedKeyInfo = 0;
...@@ -12133,7 +12126,6 @@ void Dbtc::releaseAllSeizedIndexOperations(ApiConnectRecord* regApiPtr) ...@@ -12133,7 +12126,6 @@ void Dbtc::releaseAllSeizedIndexOperations(ApiConnectRecord* regApiPtr)
indexOp->attrInfo.release(); indexOp->attrInfo.release();
indexOp->expectedTransIdAI = 0; indexOp->expectedTransIdAI = 0;
indexOp->transIdAI.release(); indexOp->transIdAI.release();
c_theIndexOperations.release(seizedIndexOpPtr.i);
regApiPtr->theSeizedIndexOperations.next(seizedIndexOpPtr); regApiPtr->theSeizedIndexOperations.next(seizedIndexOpPtr);
} }
regApiPtr->theSeizedIndexOperations.release(); regApiPtr->theSeizedIndexOperations.release();
......
...@@ -130,7 +130,7 @@ ErrorReporter::formatMessage(ErrorCategory type, ...@@ -130,7 +130,7 @@ ErrorReporter::formatMessage(ErrorCategory type,
"Date/Time: %s\nType of error: %s\n" "Date/Time: %s\nType of error: %s\n"
"Message: %s\nFault ID: %d\nProblem data: %s" "Message: %s\nFault ID: %d\nProblem data: %s"
"\nObject of reference: %s\nProgramName: %s\n" "\nObject of reference: %s\nProgramName: %s\n"
"ProcessID: %d\nTraceFile: %s\n***EOM***\n", "ProcessID: %d\nTraceFile: %s\n%s\n***EOM***\n",
formatTimeStampString() , formatTimeStampString() ,
errorType[type], errorType[type],
lookupErrorMessage(faultID), lookupErrorMessage(faultID),
...@@ -139,7 +139,8 @@ ErrorReporter::formatMessage(ErrorCategory type, ...@@ -139,7 +139,8 @@ ErrorReporter::formatMessage(ErrorCategory type,
objRef, objRef,
my_progname, my_progname,
processId, processId,
theNameOfTheTraceFile ? theNameOfTheTraceFile : "<no tracefile>"); theNameOfTheTraceFile ? theNameOfTheTraceFile : "<no tracefile>",
NDB_VERSION_STRING);
// Add trailing blanks to get a fixed lenght of the message // Add trailing blanks to get a fixed lenght of the message
while (strlen(messptr) <= MESSAGE_LENGTH-3){ while (strlen(messptr) <= MESSAGE_LENGTH-3){
......
...@@ -857,7 +857,10 @@ ndb_mgm_restart2(NdbMgmHandle handle, int no_of_nodes, const int * node_list, ...@@ -857,7 +857,10 @@ ndb_mgm_restart2(NdbMgmHandle handle, int no_of_nodes, const int * node_list,
args.put("initialstart", initial); args.put("initialstart", initial);
args.put("nostart", nostart); args.put("nostart", nostart);
const Properties *reply; const Properties *reply;
const int timeout = handle->read_timeout;
handle->read_timeout= 5*60*1000; // 5 minutes
reply = ndb_mgm_call(handle, restart_reply, "restart all", &args); reply = ndb_mgm_call(handle, restart_reply, "restart all", &args);
handle->read_timeout= timeout;
CHECK_REPLY(reply, -1); CHECK_REPLY(reply, -1);
BaseString result; BaseString result;
...@@ -890,7 +893,10 @@ ndb_mgm_restart2(NdbMgmHandle handle, int no_of_nodes, const int * node_list, ...@@ -890,7 +893,10 @@ ndb_mgm_restart2(NdbMgmHandle handle, int no_of_nodes, const int * node_list,
args.put("nostart", nostart); args.put("nostart", nostart);
const Properties *reply; const Properties *reply;
const int timeout = handle->read_timeout;
handle->read_timeout= 5*60*1000; // 5 minutes
reply = ndb_mgm_call(handle, restart_reply, "restart node", &args); reply = ndb_mgm_call(handle, restart_reply, "restart node", &args);
handle->read_timeout= timeout;
if(reply != NULL) { if(reply != NULL) {
BaseString result; BaseString result;
reply->get("result", result); reply->get("result", result);
......
...@@ -66,6 +66,7 @@ ClusterMgr::ClusterMgr(TransporterFacade & _facade): ...@@ -66,6 +66,7 @@ ClusterMgr::ClusterMgr(TransporterFacade & _facade):
{ {
ndbSetOwnVersion(); ndbSetOwnVersion();
clusterMgrThreadMutex = NdbMutex_Create(); clusterMgrThreadMutex = NdbMutex_Create();
noOfAliveNodes= 0;
noOfConnectedNodes= 0; noOfConnectedNodes= 0;
theClusterMgrThread= 0; theClusterMgrThread= 0;
} }
...@@ -335,9 +336,9 @@ ClusterMgr::execAPI_REGCONF(const Uint32 * theData){ ...@@ -335,9 +336,9 @@ ClusterMgr::execAPI_REGCONF(const Uint32 * theData){
node.m_state = apiRegConf->nodeState; node.m_state = apiRegConf->nodeState;
if (node.compatible && (node.m_state.startLevel == NodeState::SL_STARTED || if (node.compatible && (node.m_state.startLevel == NodeState::SL_STARTED ||
node.m_state.startLevel == NodeState::SL_SINGLEUSER)){ node.m_state.startLevel == NodeState::SL_SINGLEUSER)){
node.m_alive = true; set_node_alive(node, true);
} else { } else {
node.m_alive = false; set_node_alive(node, false);
}//if }//if
node.hbSent = 0; node.hbSent = 0;
node.hbCounter = 0; node.hbCounter = 0;
...@@ -360,7 +361,7 @@ ClusterMgr::execAPI_REGREF(const Uint32 * theData){ ...@@ -360,7 +361,7 @@ ClusterMgr::execAPI_REGREF(const Uint32 * theData){
assert(node.defined == true); assert(node.defined == true);
node.compatible = false; node.compatible = false;
node.m_alive = false; set_node_alive(node, false);
node.m_state = NodeState::SL_NOTHING; node.m_state = NodeState::SL_NOTHING;
node.m_info.m_version = ref->version; node.m_info.m_version = ref->version;
...@@ -437,7 +438,7 @@ ClusterMgr::reportNodeFailed(NodeId nodeId){ ...@@ -437,7 +438,7 @@ ClusterMgr::reportNodeFailed(NodeId nodeId){
Node & theNode = theNodes[nodeId]; Node & theNode = theNodes[nodeId];
theNode.m_alive = false; set_node_alive(theNode, false);
if(theNode.connected) if(theNode.connected)
theFacade.doDisconnect(nodeId); theFacade.doDisconnect(nodeId);
...@@ -450,7 +451,7 @@ ClusterMgr::reportNodeFailed(NodeId nodeId){ ...@@ -450,7 +451,7 @@ ClusterMgr::reportNodeFailed(NodeId nodeId){
theNode.nfCompleteRep = false; theNode.nfCompleteRep = false;
if(noOfConnectedNodes == 0){ if(noOfAliveNodes == 0){
NFCompleteRep rep; NFCompleteRep rep;
for(Uint32 i = 1; i<MAX_NODES; i++){ for(Uint32 i = 1; i<MAX_NODES; i++){
if(theNodes[i].defined && theNodes[i].nfCompleteRep == false){ if(theNodes[i].defined && theNodes[i].nfCompleteRep == false){
......
...@@ -80,6 +80,7 @@ public: ...@@ -80,6 +80,7 @@ public:
Uint32 getNoOfConnectedNodes() const; Uint32 getNoOfConnectedNodes() const;
private: private:
Uint32 noOfAliveNodes;
Uint32 noOfConnectedNodes; Uint32 noOfConnectedNodes;
Node theNodes[MAX_NODES]; Node theNodes[MAX_NODES];
NdbThread* theClusterMgrThread; NdbThread* theClusterMgrThread;
...@@ -100,6 +101,19 @@ private: ...@@ -100,6 +101,19 @@ private:
void execAPI_REGREF (const Uint32 * theData); void execAPI_REGREF (const Uint32 * theData);
void execNODE_FAILREP (const Uint32 * theData); void execNODE_FAILREP (const Uint32 * theData);
void execNF_COMPLETEREP(const Uint32 * theData); void execNF_COMPLETEREP(const Uint32 * theData);
inline void set_node_alive(Node& node, bool alive){
if(node.m_alive && !alive)
{
assert(noOfAliveNodes);
noOfAliveNodes--;
}
else if(!node.m_alive && alive)
{
noOfAliveNodes++;
}
node.m_alive = alive;
}
}; };
inline inline
......
...@@ -169,7 +169,7 @@ ErrorBundle ErrorCodes[] = { ...@@ -169,7 +169,7 @@ ErrorBundle ErrorCodes[] = {
{ 4021, TR, "Out of Send Buffer space in NDB API" }, { 4021, TR, "Out of Send Buffer space in NDB API" },
{ 4022, TR, "Out of Send Buffer space in NDB API" }, { 4022, TR, "Out of Send Buffer space in NDB API" },
{ 4032, TR, "Out of Send Buffer space in NDB API" }, { 4032, TR, "Out of Send Buffer space in NDB API" },
{ 288, TR, "Out of index operations in transaction coordinator (increase MaxNoOfConcurrentIndexOperations)" },
/** /**
* InsufficientSpace * InsufficientSpace
*/ */
......
...@@ -56,6 +56,7 @@ public: ...@@ -56,6 +56,7 @@ public:
BaseString m_stdout; BaseString m_stdout;
BaseString m_stderr; BaseString m_stderr;
BaseString m_ulimit; BaseString m_ulimit;
BaseString m_shutdown_options;
}; };
private: private:
......
...@@ -359,7 +359,7 @@ int runLateCommit(NDBT_Context* ctx, NDBT_Step* step){ ...@@ -359,7 +359,7 @@ int runLateCommit(NDBT_Context* ctx, NDBT_Step* step){
if(hugoOps.startTransaction(pNdb) != 0) if(hugoOps.startTransaction(pNdb) != 0)
return NDBT_FAILED; return NDBT_FAILED;
if(hugoOps.pkUpdateRecord(pNdb, 1) != 0) if(hugoOps.pkUpdateRecord(pNdb, 1, 128) != 0)
return NDBT_FAILED; return NDBT_FAILED;
if(hugoOps.execute_NoCommit(pNdb) != 0) if(hugoOps.execute_NoCommit(pNdb) != 0)
......
...@@ -6,7 +6,12 @@ include $(top_srcdir)/ndb/config/type_util.mk.am ...@@ -6,7 +6,12 @@ include $(top_srcdir)/ndb/config/type_util.mk.am
include $(top_srcdir)/ndb/config/type_mgmapiclient.mk.am include $(top_srcdir)/ndb/config/type_mgmapiclient.mk.am
test_PROGRAMS = atrt test_PROGRAMS = atrt
test_DATA=daily-basic-tests.txt daily-devel-tests.txt test_DATA=daily-basic-tests.txt daily-devel-tests.txt \
conf-daily-basic-ndbmaster.txt \
conf-daily-basic-shark.txt \
conf-daily-devel-ndbmaster.txt \
conf-daily-sql-ndbmaster.txt \
conf-daily-basic-dl145a.txt
test_SCRIPTS=atrt-analyze-result.sh atrt-gather-result.sh atrt-setup.sh \ test_SCRIPTS=atrt-analyze-result.sh atrt-gather-result.sh atrt-setup.sh \
atrt-clear-result.sh make-config.sh make-index.sh make-html-reports.sh atrt-clear-result.sh make-config.sh make-index.sh make-html-reports.sh
......
baseport: 14000
basedir: /home/ndbdev/autotest/run
mgm: CHOOSE_host1
ndb: CHOOSE_host2 CHOOSE_host3
api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
-- cluster config
[DB DEFAULT]
NoOfReplicas: 2
IndexMemory: 100M
DataMemory: 300M
BackupMemory: 64M
MaxNoOfConcurrentScans: 100
DataDir: .
FileSystemPath: /home/ndbdev/autotest/run
[MGM DEFAULT]
PortNumber: 14000
ArbitrationRank: 1
DataDir: .
baseport: 14000
basedir: /space/autotest
mgm: CHOOSE_host1
ndb: CHOOSE_host2 CHOOSE_host3 CHOOSE_host2 CHOOSE_host3
api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
-- cluster config
[DB DEFAULT]
NoOfReplicas: 2
IndexMemory: 100M
DataMemory: 300M
BackupMemory: 64M
MaxNoOfConcurrentScans: 100
DataDir: .
FileSystemPath: /space/autotest/run
[MGM DEFAULT]
PortNumber: 14000
ArbitrationRank: 1
DataDir: .
baseport: 14000
basedir: /space/autotest
mgm: CHOOSE_host1
ndb: CHOOSE_host1 CHOOSE_host1
api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
-- cluster config
[DB DEFAULT]
NoOfReplicas: 2
IndexMemory: 100M
DataMemory: 300M
BackupMemory: 64M
MaxNoOfConcurrentScans: 100
DataDir: .
FileSystemPath: /space/autotest/run
[MGM DEFAULT]
PortNumber: 14000
ArbitrationRank: 1
DataDir: .
baseport: 16000
basedir: /space/autotest
mgm: CHOOSE_host1
ndb: CHOOSE_host2 CHOOSE_host3 CHOOSE_host2 CHOOSE_host3
api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
-- cluster config
[DB DEFAULT]
NoOfReplicas: 2
IndexMemory: 100M
DataMemory: 300M
BackupMemory: 64M
MaxNoOfConcurrentScans: 100
DataDir: .
FileSystemPath: /space/autotest/run
[MGM DEFAULT]
PortNumber: 16000
ArbitrationRank: 1
DataDir: .
baseport: 16000
basedir: /space/autotest
mgm: CHOOSE_host1
ndb: CHOOSE_host2 CHOOSE_host3
mysqld: CHOOSE_host1 CHOOSE_host4
mysql: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4
-- cluster config
[DB DEFAULT]
NoOfReplicas: 2
IndexMemory: 100M
DataMemory: 300M
BackupMemory: 64M
MaxNoOfConcurrentScans: 100
DataDir: .
FileSystemPath: /space/autotest/run
[MGM DEFAULT]
PortNumber: 16000
ArbitrationRank: 1
DataDir: .
...@@ -116,10 +116,7 @@ main(int argc, const char ** argv){ ...@@ -116,10 +116,7 @@ main(int argc, const char ** argv){
*/ */
if(restart){ if(restart){
g_logger.info("(Re)starting ndb processes"); g_logger.info("(Re)starting ndb processes");
if(!stop_processes(g_config, atrt_process::NDB_MGM)) if(!stop_processes(g_config, ~0))
goto end;
if(!stop_processes(g_config, atrt_process::NDB_DB))
goto end; goto end;
if(!start_processes(g_config, atrt_process::NDB_MGM)) if(!start_processes(g_config, atrt_process::NDB_MGM))
...@@ -142,6 +139,9 @@ main(int argc, const char ** argv){ ...@@ -142,6 +139,9 @@ main(int argc, const char ** argv){
goto end; goto end;
started: started:
if(!start_processes(g_config, p_servers))
goto end;
g_logger.info("Ndb start completed"); g_logger.info("Ndb start completed");
} }
...@@ -158,9 +158,6 @@ main(int argc, const char ** argv){ ...@@ -158,9 +158,6 @@ main(int argc, const char ** argv){
if(!setup_test_case(g_config, test_case)) if(!setup_test_case(g_config, test_case))
goto end; goto end;
if(!start_processes(g_config, p_servers))
goto end;
if(!start_processes(g_config, p_clients)) if(!start_processes(g_config, p_clients))
goto end; goto end;
...@@ -201,9 +198,6 @@ main(int argc, const char ** argv){ ...@@ -201,9 +198,6 @@ main(int argc, const char ** argv){
if(!stop_processes(g_config, p_clients)) if(!stop_processes(g_config, p_clients))
goto end; goto end;
if(!stop_processes(g_config, p_servers))
goto end;
if(!gather_result(g_config, &result)) if(!gather_result(g_config, &result))
goto end; goto end;
...@@ -454,6 +448,7 @@ setup_config(atrt_config& config){ ...@@ -454,6 +448,7 @@ setup_config(atrt_config& config){
proc.m_proc.m_runas = proc.m_host->m_user; proc.m_proc.m_runas = proc.m_host->m_user;
proc.m_proc.m_ulimit = "c:unlimited"; proc.m_proc.m_ulimit = "c:unlimited";
proc.m_proc.m_env.assfmt("MYSQL_BASE_DIR=%s", dir.c_str()); proc.m_proc.m_env.assfmt("MYSQL_BASE_DIR=%s", dir.c_str());
proc.m_proc.m_shutdown_options = "";
proc.m_hostname = proc.m_host->m_hostname; proc.m_hostname = proc.m_host->m_hostname;
proc.m_ndb_mgm_port = g_default_base_port; proc.m_ndb_mgm_port = g_default_base_port;
if(split1[0] == "mgm"){ if(split1[0] == "mgm"){
...@@ -476,21 +471,19 @@ setup_config(atrt_config& config){ ...@@ -476,21 +471,19 @@ setup_config(atrt_config& config){
proc.m_proc.m_path.assign(dir).append("/libexec/mysqld"); proc.m_proc.m_path.assign(dir).append("/libexec/mysqld");
proc.m_proc.m_args = "--core-file --ndbcluster"; proc.m_proc.m_args = "--core-file --ndbcluster";
proc.m_proc.m_cwd.appfmt("%d.mysqld", index); proc.m_proc.m_cwd.appfmt("%d.mysqld", index);
if(mysql_port_offset > 0 || g_mysqld_use_base){ proc.m_proc.m_shutdown_options = "SIGKILL"; // not nice
// setup mysql specific stuff
const char * basedir = proc.m_proc.m_cwd.c_str();
proc.m_proc.m_args.appfmt("--datadir=%s", basedir);
proc.m_proc.m_args.appfmt("--pid-file=%s/mysql.pid", basedir);
proc.m_proc.m_args.appfmt("--socket=%s/mysql.sock", basedir);
proc.m_proc.m_args.appfmt("--port=%d",
g_default_base_port-(++mysql_port_offset));
}
} else if(split1[0] == "api"){ } else if(split1[0] == "api"){
proc.m_type = atrt_process::NDB_API; proc.m_type = atrt_process::NDB_API;
proc.m_proc.m_name.assfmt("%d-%s", index, "ndb_api"); proc.m_proc.m_name.assfmt("%d-%s", index, "ndb_api");
proc.m_proc.m_path = ""; proc.m_proc.m_path = "";
proc.m_proc.m_args = ""; proc.m_proc.m_args = "";
proc.m_proc.m_cwd.appfmt("%d.ndb_api", index); proc.m_proc.m_cwd.appfmt("%d.ndb_api", index);
} else if(split1[0] == "mysql"){
proc.m_type = atrt_process::MYSQL_CLIENT;
proc.m_proc.m_name.assfmt("%d-%s", index, "mysql");
proc.m_proc.m_path = "";
proc.m_proc.m_args = "";
proc.m_proc.m_cwd.appfmt("%d.mysql", index);
} else { } else {
g_logger.critical("%s:%d: Unhandled process type: %s", g_logger.critical("%s:%d: Unhandled process type: %s",
g_process_config_filename, lineno, g_process_config_filename, lineno,
...@@ -914,6 +907,11 @@ read_test_case(FILE * file, atrt_testcase& tc, int& line){ ...@@ -914,6 +907,11 @@ read_test_case(FILE * file, atrt_testcase& tc, int& line){
else else
tc.m_report= false; tc.m_report= false;
if(p.get("run-all", &mt) && strcmp(mt, "yes") == 0)
tc.m_run_all= true;
else
tc.m_run_all= false;
return true; return true;
} }
...@@ -928,16 +926,17 @@ setup_test_case(atrt_config& config, const atrt_testcase& tc){ ...@@ -928,16 +926,17 @@ setup_test_case(atrt_config& config, const atrt_testcase& tc){
size_t i = 0; size_t i = 0;
for(; i<config.m_processes.size(); i++){ for(; i<config.m_processes.size(); i++){
atrt_process & proc = config.m_processes[i]; atrt_process & proc = config.m_processes[i];
if(proc.m_type == atrt_process::NDB_API){ if(proc.m_type == atrt_process::NDB_API || proc.m_type == atrt_process::MYSQL_CLIENT){
proc.m_proc.m_path.assfmt("%s/bin/%s", proc.m_host->m_base_dir.c_str(), proc.m_proc.m_path.assfmt("%s/bin/%s", proc.m_host->m_base_dir.c_str(),
tc.m_command.c_str()); tc.m_command.c_str());
proc.m_proc.m_args.assign(tc.m_args); proc.m_proc.m_args.assign(tc.m_args);
if(!tc.m_run_all)
break; break;
} }
} }
for(i++; i<config.m_processes.size(); i++){ for(i++; i<config.m_processes.size(); i++){
atrt_process & proc = config.m_processes[i]; atrt_process & proc = config.m_processes[i];
if(proc.m_type == atrt_process::NDB_API){ if(proc.m_type == atrt_process::NDB_API || proc.m_type == atrt_process::MYSQL_CLIENT){
proc.m_proc.m_path.assign(""); proc.m_proc.m_path.assign("");
proc.m_proc.m_args.assign(""); proc.m_proc.m_args.assign("");
} }
......
#!/bin/sh #!/bin/sh
# NAME
# make-config.sh - Makes a config file for mgm server
#
# SYNOPSIS
# make-config.sh [ -t <template> ] [-s] [ -m <machine conf> [ -d <directory> ]
#
# DESCRIPTION
#
# OPTIONS
#
# EXAMPLES
#
#
# ENVIRONMENT
# NDB_PROJ_HOME Home dir for ndb
#
# FILES
# $NDB_PROJ_HOME/lib/funcs.sh general shell script functions
#
#
# SEE ALSO
#
# DIAGNOSTICTS
#
# VERSION
# 1.0
# 1.1 021112 epesson: Adapted for new mgmt server in NDB 2.00
#
# AUTHOR
# Jonas Oreland
#
# CHANGES
# also generate ndbnet config
#
progname=`basename $0` baseport=""
synopsis="make-config.sh [ -t template ] [ -m <machine conf> ] [ -d <dst directory> ][-s] [<mgm host>]" basedir=""
proc_no=1
node_id=1
#: ${NDB_PROJ_HOME:?} # If undefined, exit with error message d_file=/tmp/d.$$
dir_file=/tmp/dirs.$$
config_file=/tmp/config.$$
cluster_file=/tmp/cluster.$$
#: ${NDB_LOCAL_BUILD_OPTIONS:=--} # If undef, set to --. Keeps getopts happy. add_procs(){
# You may have to experiment a bit
# to get quoting right (if you need it).
#. $NDB_PROJ_HOME/lib/funcs.sh # Load some good stuff
trace() {
echo $* 1>&2
}
syndie() {
trace $*
exit 1
}
# defaults for options related variables
#
mgm_nodes=0
ndb_nodes=0
api_nodes=0
uniq_id=$$.$$
own_host=`hostname`
dst_dir=""
template=/dev/null
machines=/dev/null
verbose=yes
# used if error when parsing the options environment variable
#
env_opterr="options environment variable: <<$options>>"
# Option parsing, for the options variable as well as the command line.
#
# We want to be able to set options in an environment variable,
# as well as on the command line. In order not to have to repeat
# the same getopts information twice, we loop two times over the
# getopts while loop. The first time, we process options from
# the options environment variable, the second time we process
# options from the command line.
#
# The things to change are the actual options and what they do.
#
add_node(){
no=$1; shift
type=$1; shift type=$1; shift
echo $* | awk 'BEGIN{FS=":";}{h=$1; if(h=="localhost") h="'$own_host'"; while [ $# -ne 0 ]
printf("%s_%d_host=%s\n", "'$type'", "'$no'", h);
if(NF>1 && $2!="") printf("%s_%d_port=%d\n",
"'$type'", "'$no'", $2);
if(NF>2 && $3!="") printf("%s_%d_dir=%s\n",
"'$type'", "'$no'", $3);
}'
}
add_mgm_node(){
mgm_nodes=`cat /tmp/mgm_nodes.$uniq_id | grep "_host=" | wc -l`
mgm_nodes=`expr $mgm_nodes + 1`
while [ $# -gt 0 ]
do
add_node ${mgm_nodes} mgm_node $1 >> /tmp/mgm_nodes.$uniq_id
shift
mgm_nodes=`expr $mgm_nodes + 1`
done
}
add_ndb_node(){
ndb_nodes=`cat /tmp/ndb_nodes.$uniq_id | grep "_host=" | wc -l`
ndb_nodes=`expr $ndb_nodes + 1`
while [ $# -gt 0 ]
do
add_node ${ndb_nodes} ndb_node $1 >> /tmp/ndb_nodes.$uniq_id
shift
ndb_nodes=`expr $ndb_nodes + 1`
done
}
add_api_node(){
api_nodes=`cat /tmp/api_nodes.$uniq_id | grep "_host=" |wc -l`
api_nodes=`expr $api_nodes + 1`
while [ $# -gt 0 ]
do do
add_node ${api_nodes} api_node $1 >> /tmp/api_nodes.$uniq_id add_proc $type $1
shift shift
api_nodes=`expr $api_nodes + 1`
done done
} }
rm -rf /tmp/mgm_nodes.$uniq_id ; touch /tmp/mgm_nodes.$uniq_id add_proc (){
rm -rf /tmp/ndb_nodes.$uniq_id ; touch /tmp/ndb_nodes.$uniq_id dir=""
rm -rf /tmp/api_nodes.$uniq_id ; touch /tmp/api_nodes.$uniq_id conf=""
case $type in
for optstring in "$options" "" # 1. options variable 2. cmd line mgm)
do dir="ndb_mgmd"
conf="[ndb_mgmd]\nId: $node_id\nHostName: $2\n"
while getopts d:m:t:n:o:a:b:p:s i $optstring # optstring empty => no arg => cmd line
do
case $i in
q) verbose="";; # echo important things
t) template=$OPTARG;; # Template
d) dst_dir=$OPTARG;; # Destination directory
m) machines=$OPTARG;; # Machine configuration
s) mgm_start=yes;; # Make mgm start script
\?) syndie $env_opterr;; # print synopsis and exit
esac
done
[ -n "$optstring" ] && OPTIND=1 # Reset for round 2, cmdline options
env_opterr= # Round 2 should not use the value
done
shift `expr $OPTIND - 1`
if [ -z "$dst_dir" ]
then
verbose=
fi
skip(){
no=$1; shift
shift $no
echo $*
}
# --- option parsing done ---
grep "^ndb: " $machines | while read node
do
node=`skip 1 $node`
add_ndb_node $node
done
grep "^api: " $machines | while read node
do
node=`skip 1 $node`
add_api_node $node
done
grep "^mgm: " $machines | while read node
do
node=`skip 1 $node`
add_mgm_node $node
done
tmp=`grep "^baseport: " $machines | tail -1 | cut -d ":" -f 2`
if [ "$tmp" ]
then
baseport=`echo $tmp`
else
syndie "Unable to find baseport"
fi
trim(){
echo $*
}
tmp=`grep "^basedir: " $machines | tail -1 | cut -d ":" -f 2`
if [ "$tmp" ]
then
basedir=`trim $tmp`
fi
# -- Load enviroment --
ndb_nodes=`cat /tmp/ndb_nodes.$uniq_id | grep "_host=" | wc -l`
api_nodes=`cat /tmp/api_nodes.$uniq_id | grep "_host=" | wc -l`
mgm_nodes=`cat /tmp/mgm_nodes.$uniq_id | grep "_host=" | wc -l`
. /tmp/ndb_nodes.$uniq_id
. /tmp/api_nodes.$uniq_id
. /tmp/mgm_nodes.$uniq_id
rm -f /tmp/ndb_nodes.$uniq_id /tmp/api_nodes.$uniq_id /tmp/mgm_nodes.$uniq_id
# -- Verify
trace "Verifying arguments"
if [ ! -r $template ]
then
syndie "Unable to read template file: $template"
fi
if [ $ndb_nodes -le 0 ]
then
syndie "No ndb nodes specified"
fi
if [ $api_nodes -le 0 ]
then
syndie "No api nodes specified"
fi
if [ $mgm_nodes -gt 1 ]
then
syndie "More than one mgm node specified"
fi
if [ $mgm_nodes -eq 0 ]
then
trace "No managment server specified using `hostname`"
mgm_nodes=1
mgm_node_1=`hostname`
fi
if [ -n "$dst_dir" ]
then
mkdir -p $dst_dir
if [ ! -d $dst_dir ]
then
syndie "Unable to create dst dir: $dst_dir"
fi
DST=/tmp/$uniq_id
fi
# --- option verifying done ---
# Find uniq computers
i=1
while [ $i -le $mgm_nodes ]
do
echo `eval echo "\$"mgm_node_${i}_host` >> /tmp/hosts.$uniq_id
i=`expr $i + 1`
done
i=1
while [ $i -le $ndb_nodes ]
do
echo `eval echo "\$"ndb_node_${i}_host` >> /tmp/hosts.$uniq_id
i=`expr $i + 1`
done
i=1
while [ $i -le $api_nodes ]
do
echo `eval echo "\$"api_node_${i}_host` >> /tmp/hosts.$uniq_id
i=`expr $i + 1`
done
sort -u -o /tmp/hosts.$uniq_id /tmp/hosts.$uniq_id
get_computer_id(){
grep -w -n $1 /tmp/hosts.$uniq_id | cut -d ":" -f 1
}
get_mgm_computer_id(){
a=`eval echo "\$"mgm_node_${1}_host`
get_computer_id $a
}
get_ndb_computer_id(){
a=`eval echo "\$"ndb_node_${1}_host`
get_computer_id $a
}
get_api_computer_id(){
a=`eval echo "\$"api_node_${1}_host`
get_computer_id $a
}
# -- Write config files --
mgm_port=$baseport
(
i=1
#echo "COMPUTERS"
cat /tmp/hosts.$uniq_id | while read host
do
echo "[COMPUTER]"
echo "Id: $i"
echo "ByteOrder: Big"
echo "HostName: $host"
echo
i=`expr $i + 1`
done
node_id=1
echo
# Mgm process
echo
echo "[MGM]"
echo "Id: $node_id"
echo "ExecuteOnComputer: `get_mgm_computer_id 1`"
echo "PortNumber: $mgm_port"
node_id=`expr $node_id + 1` node_id=`expr $node_id + 1`
;;
# Ndb processes api)
i=1 dir="ndb_api"
ndb_nodes=`trim $ndb_nodes` conf="[api]\nId: $node_id\nHostName: $2\n"
while [ $i -le $ndb_nodes ]
do
echo
echo "[DB]"
echo "Id: $node_id"
echo "ExecuteOnComputer: `get_ndb_computer_id $i`"
echo "FileSystemPath: $basedir/run/node-${node_id}-fs"
i=`expr $i + 1`
node_id=`expr $node_id + 1` node_id=`expr $node_id + 1`
done ;;
ndb)
# API processes dir="ndbd"
i=1 conf="[ndbd]\nId: $node_id\nHostName: $2\n"
while [ $i -le $api_nodes ]
do
echo
echo "[API]"
echo "Id: $node_id"
echo "ExecuteOnComputer: `get_api_computer_id $i`"
i=`expr $i + 1`
node_id=`expr $node_id + 1` node_id=`expr $node_id + 1`
done ;;
mysqld)
# Connections dir="mysqld"
current_port=`expr $mgm_port + 1` conf="[mysqld]\nId: $node_id\nHostName: $2\n"
echo node_id=`expr $node_id + 1`
;;
# Connect Mgm with all ndb-nodes mysql)
i=1 dir="mysql"
while [ $i -le $ndb_nodes ] ;;
do esac
echo dir="$proc_no.$dir"
echo "[TCP]" proc_no=`expr $proc_no + 1`
echo "NodeId1: 1" echo -e $dir >> $dir_file
echo "NodeId2: `expr $i + 1`" if [ "$conf" ]
echo "PortNumber: $current_port"
i=`expr $i + 1`
current_port=`expr $current_port + 1`
done
# Connect All ndb processes with all ndb processes
i=1
while [ $i -le $ndb_nodes ]
do
j=`expr $i + 1`
while [ $j -le $ndb_nodes ]
do
echo
echo "[TCP]"
echo "NodeId1: `expr $i + 1`"
echo "NodeId2: `expr $j + 1`"
echo "PortNumber: $current_port"
j=`expr $j + 1`
current_port=`expr $current_port + 1`
done
i=`expr $i + 1`
done
# Connect all ndb-nodes with all api nodes
i=1
while [ $i -le $ndb_nodes ]
do
j=1
while [ $j -le $api_nodes ]
do
echo
echo "[TCP]"
echo "NodeId1: `expr $i + 1`"
echo "NodeId2: `expr $j + $ndb_nodes + 1`"
echo "PortNumber: $current_port"
j=`expr $j + 1`
current_port=`expr $current_port + 1`
done
i=`expr $i + 1`
done
echo
) > $DST
trace "Init config file done"
if [ -z "$dst_dir" ]
then
cat $DST
rm -f $DST
rm -f /tmp/hosts.$uniq_id
exit 0
fi
###
# Create Ndb.cfg files
# nodeid=2;host=localhost:2200
# Mgm node
mkcfg(){
mkdir -p $dst_dir/${2}.ndb_${1}
(
echo "OwnProcessId $2"
echo "host://${mgm_node_1_host}:${mgm_port}"
) > $dst_dir/${2}.ndb_${1}/Ndb.cfg
if [ $1 = "db" ]
then then
mkdir $dst_dir/node-${2}-fs echo -e $conf >> $config_file
fi fi
} }
mkcfg mgm 1
cat $DST > $dst_dir/1.ndb_mgm/initconfig.txt
trace "Creating Ndb.cfg for ndb nodes"
current_node=2 cnf=/dev/null
i=1 cat $1 | while read line
while [ $i -le $ndb_nodes ]
do do
mkcfg db ${current_node} case $line in
i=`expr $i + 1` baseport:*) baseport=`echo $line | sed 's/baseport[ ]*:[ ]*//g'`;;
current_node=`expr $current_node + 1` basedir:*) basedir=`echo $line | sed 's/basedir[ ]*:[ ]*//g'`;;
mgm:*) add_procs mgm `echo $line | sed 's/mgm[ ]*:[ ]*//g'`;;
api:*) add_procs api `echo $line | sed 's/api[ ]*:[ ]*//g'`;;
ndb:*) add_procs ndb `echo $line | sed 's/ndb[ ]*:[ ]*//g'`;;
mysqld:*) add_procs mysqld `echo $line | sed 's/mysqld[ ]*:[ ]*//g'`;;
mysql:*) add_procs mysql `echo $line | sed 's/mysql[ ]*:[ ]*//g'`;;
"-- cluster config")
if [ "$cnf" = "/dev/null" ]
then
cnf=$cluster_file
else
cnf=/dev/null
fi
line="";;
*) echo $line >> $cnf; line="";;
esac
if [ "$line" ]
then
echo $line >> $d_file
fi
done done
trace "Creating Ndb.cfg for api nodes" cat $dir_file | xargs mkdir -p
i=1
while [ $i -le $api_nodes ]
do
mkcfg api ${current_node}
i=`expr $i + 1`
current_node=`expr $current_node + 1`
done
rm -f $DST if [ -f $cluster_file ]
rm -f /tmp/hosts.$uniq_id then
cat $cluster_file $config_file >> /tmp/config2.$$
mv /tmp/config2.$$ $config_file
fi
for i in `find . -type d -name '*.ndb_mgmd'`
do
cp $config_file $i/config.ini
done
exit 0 mv $d_file d.txt
# vim: set sw=4: rm -f $config_file $dir_file $cluster_file
#!/bin/sh #!/bin/sh
save_args=$* save_args=$*
VERSION="ndb-autotest.sh version 1.0" VERSION="ndb-autotest.sh version 1.04"
DATE=`date '+%Y-%m-%d'` DATE=`date '+%Y-%m-%d'`
export DATE export DATE
...@@ -71,11 +71,18 @@ then ...@@ -71,11 +71,18 @@ then
cd $dst_place cd $dst_place
rm -rf $run_dir/* rm -rf $run_dir/*
aclocal; autoheader; autoconf; automake aclocal; autoheader; autoconf; automake
if [ -d storage ]
then
(cd storage/innobase; aclocal; autoheader; autoconf; automake)
(cd storage/bdb/dist; sh s_all)
else
(cd innobase; aclocal; autoheader; autoconf; automake) (cd innobase; aclocal; autoheader; autoconf; automake)
(cd bdb/dist; sh s_all) (cd bdb/dist; sh s_all)
fi
eval $configure --prefix=$run_dir eval $configure --prefix=$run_dir
make make
make install make install
(cd $run_dir; ./bin/mysql_install_db)
fi fi
### ###
...@@ -103,7 +110,9 @@ fi ...@@ -103,7 +110,9 @@ fi
test_dir=$run_dir/mysql-test/ndb test_dir=$run_dir/mysql-test/ndb
atrt=$test_dir/atrt atrt=$test_dir/atrt
html=$test_dir/make-html-reports.sh html=$test_dir/make-html-reports.sh
PATH=$test_dir:$PATH mkconfig=$run_dir/mysql-test/ndb/make-config.sh
PATH=$run_dir/bin:$test_dir:$PATH
export PATH export PATH
filter(){ filter(){
...@@ -125,17 +134,13 @@ hosts=`cat /tmp/hosts.$DATE` ...@@ -125,17 +134,13 @@ hosts=`cat /tmp/hosts.$DATE`
if [ "$deploy" ] if [ "$deploy" ]
then then
(cd / && tar cfz /tmp/build.$DATE.tgz $run_dir )
for i in $hosts for i in $hosts
do do
ok=0 rsync -a --delete --force --ignore-errors $run_dir/ $i:$run_dir
scp /tmp/build.$DATE.tgz $i:/tmp/build.$DATE.$$.tgz && \ ok=$?
ssh $i "rm -rf /space/autotest/*" && \ if [ $ok -ne 0 ]
ssh $i "cd / && tar xfz /tmp/build.$DATE.$$.tgz" && \
ssh $i "rm /tmp/build.$DATE.$$.tgz" && ok=1
if [ $ok -eq 0 ]
then then
echo "$i failed during scp/ssh, excluding" echo "$i failed during rsync, excluding"
echo $i >> /tmp/failed.$DATE echo $i >> /tmp/failed.$DATE
fi fi
done done
...@@ -170,6 +175,18 @@ choose(){ ...@@ -170,6 +175,18 @@ choose(){
cat $TMP1 cat $TMP1
rm -f $TMP1 rm -f $TMP1
} }
choose_conf(){
host=`hostname -s`
if [ -f $test_dir/conf-$1-$host.txt ]
then
echo "$test_dir/conf-$1-$host.txt"
elif [ -f $test_dir/conf-$1.txt ]
then
echo "$test_dir/conf-$1.txt"
fi
}
start(){ start(){
rm -rf report.txt result* log.txt rm -rf report.txt result* log.txt
$atrt -v -v -r -R --log-file=log.txt --testcase-file=$test_dir/$2-tests.txt & $atrt -v -v -r -R --log-file=log.txt --testcase-file=$test_dir/$2-tests.txt &
...@@ -186,11 +203,17 @@ start(){ ...@@ -186,11 +203,17 @@ start(){
p2=`pwd` p2=`pwd`
cd .. cd ..
tar cfz /tmp/res.$$.tgz `basename $p2`/$DATE tar cfz /tmp/res.$$.tgz `basename $p2`/$DATE
scp /tmp/res.$$.tgz $result_host:$result_path scp /tmp/res.$$.tgz $result_host:$result_path/res.$DATE.`hostname -s`.$2.$$.tgz
ssh $result_host "cd $result_path && tar xfz res.$$.tgz && rm -f res.$$.tgz"
rm -f /tmp/res.$$.tgz rm -f /tmp/res.$$.tgz
} }
count_hosts(){
cnt=`grep "CHOOSE_host" $1 |
awk '{for(i=1; i<=NF;i++) if(match($i, "CHOOSE_host") > 0) print $i;}' |
sort | uniq | wc -l`
echo $cnt
}
p=`pwd` p=`pwd`
for dir in $RUN for dir in $RUN
do do
...@@ -199,10 +222,11 @@ do ...@@ -199,10 +222,11 @@ do
run_dir=$base_dir/run-$dir-mysql-$clone-$target run_dir=$base_dir/run-$dir-mysql-$clone-$target
res_dir=$base_dir/result-$dir-mysql-$clone-$target/$DATE res_dir=$base_dir/result-$dir-mysql-$clone-$target/$DATE
mkdir -p $res_dir mkdir -p $run_dir $res_dir
rm -rf $res_dir/* rm -rf $res_dir/* $run_dir/*
count=`grep -c "COMPUTER" $run_dir/1.ndb_mgmd/initconfig.template` conf=`choose_conf $dir`
count=`count_hosts $conf`
avail_hosts=`filter /tmp/filter_hosts.$$ $hosts` avail_hosts=`filter /tmp/filter_hosts.$$ $hosts`
avail=`echo $avail_hosts | wc -w` avail=`echo $avail_hosts | wc -w`
if [ $count -gt $avail ] if [ $count -gt $avail ]
...@@ -212,12 +236,12 @@ do ...@@ -212,12 +236,12 @@ do
break; break;
fi fi
run_hosts=`echo $avail_hosts| awk '{for(i=1;i<='$count';i++)print $i;}'` run_hosts=`echo $avail_hosts|awk '{for(i=1;i<='$count';i++)print $i;}'`
choose $run_dir/d.template $run_hosts > $run_dir/d.txt
choose $run_dir/1.ndb_mgmd/initconfig.template $run_hosts > $run_dir/1.ndb_mgmd/config.ini
echo $run_hosts >> /tmp/filter_hosts.$$ echo $run_hosts >> /tmp/filter_hosts.$$
cd $run_dir cd $run_dir
choose $conf $run_hosts > d.tmp
$mkconfig d.tmp
start $dir-mysql-$clone-$target $dir $res_dir & start $dir-mysql-$clone-$target $dir $res_dir &
done done
cd $p cd $p
......
...@@ -69,6 +69,7 @@ struct atrt_config { ...@@ -69,6 +69,7 @@ struct atrt_config {
struct atrt_testcase { struct atrt_testcase {
bool m_report; bool m_report;
bool m_run_all;
time_t m_max_time; time_t m_max_time;
BaseString m_command; BaseString m_command;
BaseString m_args; BaseString m_args;
......
...@@ -282,6 +282,7 @@ convert(const Properties & src, SimpleCpcClient::Process & dst){ ...@@ -282,6 +282,7 @@ convert(const Properties & src, SimpleCpcClient::Process & dst){
b &= src.get("stdout", dst.m_stdout); b &= src.get("stdout", dst.m_stdout);
b &= src.get("stderr", dst.m_stderr); b &= src.get("stderr", dst.m_stderr);
b &= src.get("ulimit", dst.m_ulimit); b &= src.get("ulimit", dst.m_ulimit);
b &= src.get("shutdown", dst.m_shutdown_options);
return b; return b;
} }
...@@ -305,6 +306,7 @@ convert(const SimpleCpcClient::Process & src, Properties & dst ){ ...@@ -305,6 +306,7 @@ convert(const SimpleCpcClient::Process & src, Properties & dst ){
b &= dst.put("stdout", src.m_stdout.c_str()); b &= dst.put("stdout", src.m_stdout.c_str());
b &= dst.put("stderr", src.m_stderr.c_str()); b &= dst.put("stderr", src.m_stderr.c_str());
b &= dst.put("ulimit", src.m_ulimit.c_str()); b &= dst.put("ulimit", src.m_ulimit.c_str());
b &= dst.put("shutdown", src.m_shutdown_options.c_str());
return b; return b;
} }
...@@ -372,6 +374,7 @@ SimpleCpcClient::list_processes(Vector<Process> &procs, Properties& reply) { ...@@ -372,6 +374,7 @@ SimpleCpcClient::list_processes(Vector<Process> &procs, Properties& reply) {
CPC_ARG("stdout",String, Mandatory, "Redirect stdout"), CPC_ARG("stdout",String, Mandatory, "Redirect stdout"),
CPC_ARG("stderr",String, Mandatory, "Redirect stderr"), CPC_ARG("stderr",String, Mandatory, "Redirect stderr"),
CPC_ARG("ulimit",String, Mandatory, "ulimit"), CPC_ARG("ulimit",String, Mandatory, "ulimit"),
CPC_ARG("shutdown",String, Mandatory, "shutdown"),
CPC_END() CPC_END()
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment