Commit da5a8043 authored by unknown's avatar unknown

Merge perch.ndb.mysql.com:/home/jonas/src/50-work

into  perch.ndb.mysql.com:/home/jonas/src/mysql-5.0-ndb


ndb/src/ndbapi/NdbTransaction.cpp:
  Auto merged
ndb/src/ndbapi/Ndbif.cpp:
  Auto merged
ndb/test/run-test/daily-basic-tests.txt:
  Auto merged
parents bc675bbd 9a79c0dc
......@@ -5084,7 +5084,7 @@ void Dbtc::execLQHKEYREF(Signal* signal)
ptrAss(tcConnectptr, tcConnectRecord);
TcConnectRecord * const regTcPtr = tcConnectptr.p;
if (regTcPtr->tcConnectstate == OS_OPERATING) {
apiConnectptr.i = regTcPtr->apiConnect;
Uint32 save = apiConnectptr.i = regTcPtr->apiConnect;
ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
ApiConnectRecord * const regApiPtr = apiConnectptr.p;
compare_transid1 = regApiPtr->transid[0] ^ lqhKeyRef->transId1;
......@@ -5195,7 +5195,7 @@ void Dbtc::execLQHKEYREF(Signal* signal)
regApiPtr->lqhkeyreqrec--; // Compensate for extra during read
tcKeyRef->connectPtr = indexOp;
EXECUTE_DIRECT(DBTC, GSN_TCKEYREF, signal, TcKeyRef::SignalLength);
apiConnectptr.i = regTcPtr->apiConnect;
apiConnectptr.i = save;
apiConnectptr.p = regApiPtr;
} else {
jam();
......@@ -5220,6 +5220,8 @@ void Dbtc::execLQHKEYREF(Signal* signal)
jam();
sendtckeyconf(signal, 1);
regApiPtr->apiConnectstate = CS_CONNECTED;
regApiPtr->m_transaction_nodes.clear();
setApiConTimer(apiConnectptr.i, 0,__LINE__);
}
return;
} else if (regApiPtr->tckeyrec > 0 || regApiPtr->m_exec_flag) {
......@@ -11878,17 +11880,6 @@ void Dbtc::execTCKEYREF(Signal* signal)
case(IOS_INDEX_ACCESS_WAIT_FOR_TRANSID_AI):
case(IOS_INDEX_ACCESS_WAIT_FOR_TCKEYCONF): {
jam();
// If we fail index access for a non-read operation during commit
// we abort transaction
if (commitFlg == 1) {
jam();
releaseIndexOperation(regApiPtr, indexOp);
apiConnectptr.i = indexOp->connectionIndex;
ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
terrorCode = tcKeyRef->errorCode;
abortErrorLab(signal);
break;
}
/**
* Increase count as it will be decreased below...
* (and the code is written to handle failing lookup on "real" table
......
......@@ -474,6 +474,7 @@ NdbTransaction::executeNoBlobs(ExecType aTypeOfExec,
* This timeout situation can occur if NDB crashes.
*/
ndbout << "This timeout should never occur, execute(..)" << endl;
theError.code = 4012;
setOperationErrorCodeAbort(4012); // Error code for "Cluster Failure"
DBUG_RETURN(-1);
}//if
......@@ -1966,6 +1967,14 @@ NdbTransaction::receiveTCINDXCONF(const TcIndxConf * indxConf,
theGlobalCheckpointId = tGCI;
} else if ((tNoComp >= tNoSent) &&
(theLastExecOpInList->theCommitIndicator == 1)){
if (m_abortOption == AO_IgnoreError && theError.code != 0){
/**
* There's always a TCKEYCONF when using IgnoreError
*/
return -1;
}
/**********************************************************************/
// We sent the transaction with Commit flag set and received a CONF with
// no Commit flag set. This is clearly an anomaly.
......
......@@ -952,6 +952,7 @@ Ndb::check_send_timeout()
//abort();
#endif
a_con->theReleaseOnClose = true;
a_con->theError.code = 4012;
a_con->setOperationErrorCodeAbort(4012);
a_con->theCommitStatus = NdbTransaction::NeedAbort;
a_con->theCompletionStatus = NdbTransaction::CompletedFailure;
......
......@@ -1034,6 +1034,28 @@ runMassiveRollback2(NDBT_Context* ctx, NDBT_Step* step){
return result;
}
int
runBug25090(NDBT_Context* ctx, NDBT_Step* step){
Ndb* pNdb = GETNDB(step);
NdbDictionary::Dictionary * dict = pNdb->getDictionary();
HugoOperations ops(*ctx->getTab());
int loops = ctx->getNumLoops();
const int rows = ctx->getNumRecords();
while (loops--)
{
ops.startTransaction(pNdb);
ops.pkReadRecord(pNdb, 1, 1);
ops.execute_Commit(pNdb, AO_IgnoreError);
sleep(10);
ops.closeTransaction(pNdb);
}
return NDBT_OK;
}
NDBT_TESTSUITE(testBasic);
TESTCASE("PkInsert",
......@@ -1277,6 +1299,10 @@ TESTCASE("Fill",
INITIALIZER(runPkRead);
FINALIZER(runClearTable2);
}
TESTCASE("Bug25090",
"Verify what happens when we fill the db" ){
STEP(runBug25090);
}
NDBT_TESTSUITE_END(testBasic);
#if 0
......
......@@ -1239,7 +1239,64 @@ runBug21384(NDBT_Context* ctx, NDBT_Step* step)
return NDBT_OK;
}
int
runBug25059(NDBT_Context* ctx, NDBT_Step* step)
{
Ndb* pNdb = GETNDB(step);
NdbDictionary::Dictionary * dict = pNdb->getDictionary();
const NdbDictionary::Index * idx = dict->getIndex(pkIdxName, *ctx->getTab());
HugoOperations ops(*ctx->getTab(), idx);
int res = NDBT_OK;
int loops = ctx->getNumLoops();
const int rows = ctx->getNumRecords();
while (res == NDBT_OK && loops--)
{
ops.startTransaction(pNdb);
ops.pkReadRecord(pNdb, 10 + rand() % rows, rows);
int tmp;
if (tmp = ops.execute_Commit(pNdb, AO_IgnoreError))
{
if (tmp == 4012)
res = NDBT_FAILED;
else
if (ops.getTransaction()->getNdbError().code == 4012)
res = NDBT_FAILED;
}
ops.closeTransaction(pNdb);
}
loops = ctx->getNumLoops();
while (res == NDBT_OK && loops--)
{
ops.startTransaction(pNdb);
ops.pkUpdateRecord(pNdb, 10 + rand() % rows, rows);
int tmp;
int arg;
switch(rand() % 2){
case 0:
arg = AbortOnError;
break;
case 1:
arg = AO_IgnoreError;
ndbout_c("ignore error");
break;
}
if (tmp = ops.execute_Commit(pNdb, (AbortOption)arg))
{
if (tmp == 4012)
res = NDBT_FAILED;
else
if (ops.getTransaction()->getNdbError().code == 4012)
res = NDBT_FAILED;
}
ops.closeTransaction(pNdb);
}
return res;
}
NDBT_TESTSUITE(testIndex);
TESTCASE("CreateAll",
......@@ -1564,6 +1621,14 @@ TESTCASE("Bug21384",
FINALIZER(createPkIndex_Drop);
FINALIZER(runClearTable);
}
TESTCASE("Bug25059",
"Test that unique indexes and nulls"){
TC_PROPERTY("LoggedIndexes", (unsigned)0);
INITIALIZER(createPkIndex);
INITIALIZER(runLoadTable);
STEP(runBug25059);
FINALIZER(createPkIndex_Drop);
}
NDBT_TESTSUITE_END(testIndex);
int main(int argc, const char** argv){
......
......@@ -211,6 +211,14 @@ max-time: 500
cmd: testTimeout
args: T1
max-time: 500
cmd: testBasic
args: -n Bug25090 T1
max-time: 500
cmd: testIndex
args: -n Bug25059 -r 3000 T1
# SCAN TESTS
#
max-time: 500
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment