Commit 84b32099 authored by unknown's avatar unknown

New config parameters

malloc -> NdbMem_Allocate


ndb/include/mgmapi/mgmapi_config_parameters.h:
  Added Meta Data config parameter constants
ndb/src/common/mgmcommon/ConfigInfo.cpp:
  Added Meta Data config parameters
ndb/src/common/mgmcommon/LocalConfig.cpp:
  malloc -> NdbMem_Allocate
ndb/src/common/mgmcommon/NdbConfig.c:
  malloc -> NdbMem_Allocate
ndb/src/common/portlib/NdbCondition.c:
  malloc -> NdbMem_Allocate
ndb/src/common/portlib/NdbMutex.c:
  malloc -> NdbMem_Allocate
ndb/src/common/portlib/NdbThread.c:
  malloc -> NdbMem_Allocate
ndb/src/kernel/blocks/dbtc/Dbtc.hpp:
  Removed memory allocation as part of new of TcIndexOperation
ndb/src/kernel/blocks/dbtc/DbtcInit.cpp:
  Some minor things + change of config parameter for consistency
ndb/src/kernel/blocks/dbtc/DbtcMain.cpp:
  Removed memory allocation as part of new of TcIndexOperation
ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp:
  malloc -> NdbMem_Allocate
ndb/src/kernel/vm/Configuration.cpp:
  malloc -> NdbMem_Allocate
  + New config parameters for more consistency
ndb/src/kernel/vm/pc.hpp:
  Not so many fragments allocated. Saves 140kBytes per table
  160 kb -> 20 kb per table
parent 370523ef
......@@ -77,6 +77,12 @@
#define CFG_DB_DISCLESS 148
#define CFG_DB_NO_ORDERED_INDEXES 149
#define CFG_DB_NO_UNIQUE_HASH_INDEXES 150
#define CFG_DB_NO_LOCAL_OPS 151
#define CFG_DB_NO_LOCAL_SCANS 152
#define CFG_DB_BATCH_SIZE 153
#define CFG_NODE_ARBIT_RANK 200
#define CFG_NODE_ARBIT_DELAY 201
......
......@@ -405,12 +405,36 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
8,
MAX_INT_RNIL },
{
CFG_DB_NO_ORDERED_INDEXES,
"MaxNoOfOrderedIndexes",
"DB",
"Total number of ordered indexes that can be defined in the system",
ConfigInfo::USED,
false,
ConfigInfo::INT,
128,
0,
MAX_INT_RNIL },
{
CFG_DB_NO_UNIQUE_HASH_INDEXES,
"MaxNoOfUniqueHashIndexes",
"DB",
"Total number of unique hash indexes that can be defined in the system",
ConfigInfo::USED,
false,
ConfigInfo::INT,
64,
0,
MAX_INT_RNIL },
{
CFG_DB_NO_INDEXES,
"MaxNoOfIndexes",
"DB",
"Total number of indexes that can be defined in the system",
ConfigInfo::USED,
ConfigInfo::DEPRICATED,
false,
ConfigInfo::INT,
128,
......@@ -530,7 +554,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
CFG_DB_NO_OPS,
"MaxNoOfConcurrentOperations",
"DB",
"Max no of op:s on DB (op:s within a transaction are concurrently executed)",
"Max number of operation records in transaction coordinator",
ConfigInfo::USED,
false,
ConfigInfo::INT,
......@@ -538,6 +562,43 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
32,
MAX_INT_RNIL },
{
CFG_DB_NO_LOCAL_OPS,
"MaxNoOfLocalOperations",
"DB",
"Max number of operation records defined in the local storage node",
ConfigInfo::USED,
false,
ConfigInfo::INT,
0, //0 means that it is calculated using MaxNoOfConcurrentOperations
32,
MAX_INT_RNIL },
{
CFG_DB_NO_LOCAL_SCANS,
"MaxNoOfLocalScans",
"DB",
"Max number of fragment scans in parallel in the local storage node",
ConfigInfo::USED,
false,
ConfigInfo::INT,
0, //0 means that it is calculated using MaxNoOfConcurrentScans
32,
MAX_INT_RNIL },
{
CFG_DB_BATCH_SIZE,
"BatchSizePerLocalScan",
"DB",
"Used to calculate the number of lock records for scan with hold lock",
ConfigInfo::USED,
false,
ConfigInfo::INT,
32,
1,
MAX_INT_RNIL },
{
CFG_DB_NO_TRANSACTIONS,
"MaxNoOfConcurrentTransactions",
......
......@@ -18,6 +18,7 @@
#include <NdbEnv.h>
#include <NdbConfig.h>
#include <NdbAutoPtr.hpp>
#include <NdbMem.h>
LocalConfig::LocalConfig(){
error_line = 0; error_msg[0] = 0;
......@@ -242,7 +243,7 @@ bool LocalConfig::readFile(const char * filename, bool &fopenError)
}
int sz = 1024;
char* theString = (char*)malloc(sz);
char* theString = (char*)NdbMem_Allocate(sz);
theString[0] = 0;
fgets(theString, sz, file);
......@@ -250,7 +251,7 @@ bool LocalConfig::readFile(const char * filename, bool &fopenError)
line[0] = ';';
while (strlen(theString) + strlen(line) >= sz) {
sz = sz*2;
char *newString = (char*)malloc(sz);
char *newString = (char*)NdbMem_Allocate(sz);
strcpy(newString, theString);
free(theString);
theString = newString;
......
......@@ -17,6 +17,7 @@
#include <ndb_global.h>
#include <NdbConfig.h>
#include <NdbEnv.h>
#include <NdbMem.h>
static char*
NdbConfig_AllocHomePath(int _len)
......@@ -30,7 +31,7 @@ NdbConfig_AllocHomePath(int _len)
path_len= strlen(path);
len+= path_len;
buf= malloc(len);
buf= NdbMem_Allocate(len);
if (path_len > 0)
snprintf(buf, len, "%s%c", path, DIR_SEPARATOR);
else
......@@ -48,7 +49,7 @@ NdbConfig_NdbCfgName(int with_ndb_home){
buf= NdbConfig_AllocHomePath(128);
len= strlen(buf);
} else
buf= malloc(128);
buf= NdbMem_Allocate(128);
snprintf(buf+len, 128, "Ndb.cfg");
return buf;
}
......
......@@ -20,6 +20,7 @@
#include <NdbCondition.h>
#include <NdbThread.h>
#include <NdbMutex.h>
#include <NdbMem.h>
struct NdbCondition
{
......@@ -34,7 +35,7 @@ NdbCondition_Create(void)
struct NdbCondition* tmpCond;
int result;
tmpCond = (struct NdbCondition*)malloc(sizeof(struct NdbCondition));
tmpCond = (struct NdbCondition*)NdbMem_Allocate(sizeof(struct NdbCondition));
if (tmpCond == NULL)
return NULL;
......
......@@ -19,13 +19,14 @@
#include <NdbThread.h>
#include <NdbMutex.h>
#include <NdbMem.h>
NdbMutex* NdbMutex_Create(void)
{
NdbMutex* pNdbMutex;
int result;
pNdbMutex = (NdbMutex*)malloc(sizeof(NdbMutex));
pNdbMutex = (NdbMutex*)NdbMem_Allocate(sizeof(NdbMutex));
if (pNdbMutex == NULL)
return NULL;
......
......@@ -18,6 +18,7 @@
#include <ndb_global.h>
#include <NdbThread.h>
#include <pthread.h>
#include <NdbMem.h>
#define MAX_THREAD_NAME 16
......@@ -44,7 +45,7 @@ struct NdbThread* NdbThread_Create(NDB_THREAD_FUNC *p_thread_func,
if (p_thread_func == NULL)
return 0;
tmpThread = (struct NdbThread*)malloc(sizeof(struct NdbThread));
tmpThread = (struct NdbThread*)NdbMem_Allocate(sizeof(struct NdbThread));
if (tmpThread == NULL)
return NULL;
......
......@@ -542,13 +542,11 @@ public:
attrInfo(abp),
expectedTransIdAI(0),
transIdAI(abp),
tcIndxReq(new TcIndxReq()),
indexReadTcConnect(RNIL)
{}
~TcIndexOperation()
{
delete tcIndxReq;
}
// Index data
......@@ -561,7 +559,7 @@ public:
Uint32 expectedTransIdAI;
AttributeBuffer transIdAI; // For accumulating TransId_AI
TcIndxReq* tcIndxReq;
TcIndxReq tcIndxReq;
UintR connectionIndex;
UintR indexReadTcConnect; //
......
......@@ -73,6 +73,7 @@ void Dbtc::initData()
void Dbtc::initRecords()
{
void *p;
// Records with dynamic sizes
cacheRecord = (CacheRecord*)allocRecord("CacheRecord",
sizeof(CacheRecord),
......@@ -83,7 +84,7 @@ void Dbtc::initRecords()
capiConnectFilesize);
for(unsigned i = 0; i<capiConnectFilesize; i++) {
void * p = &apiConnectRecord[i];
p = &apiConnectRecord[i];
new (p) ApiConnectRecord(c_theFiredTriggerPool,
c_theSeizedIndexOperationPool);
}
......@@ -91,7 +92,8 @@ void Dbtc::initRecords()
DLFifoList<TcFiredTriggerData> triggers(c_theFiredTriggerPool);
FiredTriggerPtr tptr;
while(triggers.seize(tptr) == true) {
new (tptr.p) TcFiredTriggerData();
p= tptr.p;
new (p) TcFiredTriggerData();
}
triggers.release();
......@@ -109,7 +111,8 @@ void Dbtc::initRecords()
ArrayList<TcIndexOperation> indexOps(c_theIndexOperationPool);
TcIndexOperationPtr ioptr;
while(indexOps.seize(ioptr) == true) {
new (ioptr.p) TcIndexOperation(c_theAttributeBufferPool);
p= ioptr.p;
new (p) TcIndexOperation(c_theAttributeBufferPool);
}
indexOps.release();
......@@ -179,7 +182,6 @@ Dbtc::Dbtc(const class Configuration & conf):
c_maxNumberOfIndexOperations(0),
m_commitAckMarkerHash(m_commitAckMarkerPool)
{
BLOCK_CONSTRUCTOR(Dbtc);
const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
......@@ -191,7 +193,7 @@ Dbtc::Dbtc(const class Configuration & conf):
ndb_mgm_get_int_parameter(p, CFG_DB_TRANS_BUFFER_MEM,
&transactionBufferMemory);
ndb_mgm_get_int_parameter(p, CFG_DB_NO_INDEXES,
ndb_mgm_get_int_parameter(p, CFG_DB_NO_UNIQUE_HASH_INDEXES,
&maxNoOfIndexes);
ndb_mgm_get_int_parameter(p, CFG_DB_NO_INDEX_OPS,
&maxNoOfConcurrentIndexOperations);
......
......@@ -11033,7 +11033,7 @@ void Dbtc::execTCINDXREQ(Signal* signal)
indexOp->indexOpId = indexOpPtr.i;
// Save original signal
*indexOp->tcIndxReq = *tcIndxReq;
indexOp->tcIndxReq = *tcIndxReq;
indexOp->connectionIndex = TapiIndex;
regApiPtr->accumulatingIndexOp = indexOp->indexOpId;
......@@ -11342,7 +11342,7 @@ void Dbtc::execTCKEYCONF(Signal* signal)
// Should never happen, abort
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
tcIndxRef->connectPtr = indexOp->tcIndxReq->senderData;
tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349;
......@@ -11361,7 +11361,7 @@ void Dbtc::execTCKEYCONF(Signal* signal)
// Double TCKEYCONF, should never happen, abort
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
tcIndxRef->connectPtr = indexOp->tcIndxReq->senderData;
tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349;
......@@ -11382,7 +11382,7 @@ void Dbtc::execTCKEYCONF(Signal* signal)
// Copy reply from TcKeyConf
regApiPtr->noIndexOp--; // Decrease count
regApiPtr->tcIndxSendArray[Ttcindxrec] = indexOp->tcIndxReq->senderData;
regApiPtr->tcIndxSendArray[Ttcindxrec] = indexOp->tcIndxReq.senderData;
regApiPtr->tcIndxSendArray[Ttcindxrec + 1] =
tcKeyConf->operations[0].attrInfoLen;
regApiPtr->tcindxrec = Ttcindxrec + 2;
......@@ -11415,7 +11415,7 @@ void Dbtc::execTCKEYREF(Signal* signal)
}
const UintR TconnectIndex = indexOp->connectionIndex;
ApiConnectRecord * const regApiPtr = &apiConnectRecord[TconnectIndex];
Uint32 tcKeyRequestInfo = indexOp->tcIndxReq->requestInfo;
Uint32 tcKeyRequestInfo = indexOp->tcIndxReq.requestInfo;
Uint32 commitFlg = TcKeyReq::getCommitFlag(tcKeyRequestInfo);
switch(indexOp->indexOpState) {
......@@ -11445,7 +11445,7 @@ void Dbtc::execTCKEYREF(Signal* signal)
// Send TCINDXREF
jam();
TcIndxReq * const tcIndxReq = indexOp->tcIndxReq;
TcIndxReq * const tcIndxReq = &indexOp->tcIndxReq;
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
regApiPtr->noIndexOp--; // Decrease count
......@@ -11523,7 +11523,7 @@ void Dbtc::execTRANSID_AI(Signal* signal)
// Failed to allocate space for TransIdAI
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
tcIndxRef->connectPtr = indexOp->tcIndxReq->senderData;
tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4000;
......@@ -11538,7 +11538,7 @@ void Dbtc::execTRANSID_AI(Signal* signal)
// Should never happen, abort
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
tcIndxRef->connectPtr = indexOp->tcIndxReq->senderData;
tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349;
......@@ -11566,7 +11566,7 @@ void Dbtc::execTRANSID_AI(Signal* signal)
// Too many TRANSID_AI
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
tcIndexRef->connectPtr = indexOp->tcIndxReq->senderData;
tcIndexRef->connectPtr = indexOp->tcIndxReq.senderData;
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349;
......@@ -11591,7 +11591,7 @@ void Dbtc::execTRANSID_AI(Signal* signal)
jam();
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
tcIndxRef->connectPtr = indexOp->tcIndxReq->senderData;
tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349;
......@@ -11611,7 +11611,7 @@ void Dbtc::execTCROLLBACKREP(Signal* signal)
TcIndexOperation* indexOp = c_theIndexOperations.getPtr(indexOpPtr.i);
indexOpPtr.p = indexOp;
tcRollbackRep = (TcRollbackRep *)signal->getDataPtrSend();
tcRollbackRep->connectPtr = indexOp->tcIndxReq->senderData;
tcRollbackRep->connectPtr = indexOp->tcIndxReq.senderData;
sendSignal(apiConnectptr.p->ndbapiBlockref,
GSN_TCROLLBACKREP, signal, TcRollbackRep::SignalLength, JBB);
}
......@@ -11628,23 +11628,23 @@ void Dbtc::readIndexTable(Signal* signal,
TcKeyReq * const tcKeyReq = (TcKeyReq *)signal->getDataPtrSend();
Uint32 * dataPtr = &tcKeyReq->scanInfo;
Uint32 tcKeyLength = TcKeyReq::StaticLength;
Uint32 tcKeyRequestInfo = indexOp->tcIndxReq->requestInfo;
Uint32 tcKeyRequestInfo = indexOp->tcIndxReq.requestInfo;
AttributeBuffer::DataBufferIterator keyIter;
Uint32 keyLength = TcKeyReq::getKeyLength(tcKeyRequestInfo);
TcIndexData* indexData;
Uint32 transId1 = indexOp->tcIndxReq->transId1;
Uint32 transId2 = indexOp->tcIndxReq->transId2;
Uint32 transId1 = indexOp->tcIndxReq.transId1;
Uint32 transId2 = indexOp->tcIndxReq.transId2;
const Operation_t opType =
(Operation_t)TcKeyReq::getOperationType(tcKeyRequestInfo);
// Find index table
if ((indexData = c_theIndexes.getPtr(indexOp->tcIndxReq->indexId)) == NULL) {
if ((indexData = c_theIndexes.getPtr(indexOp->tcIndxReq.indexId)) == NULL) {
jam();
// Failed to find index record
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
tcIndxRef->connectPtr = indexOp->tcIndxReq->senderData;
tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4000;
......@@ -11656,7 +11656,7 @@ void Dbtc::readIndexTable(Signal* signal,
tcKeyReq->transId2 = transId2;
tcKeyReq->tableId = indexData->indexId;
tcKeyLength += MIN(keyLength, keyBufSize);
tcKeyReq->tableSchemaVersion = indexOp->tcIndxReq->indexSchemaVersion;
tcKeyReq->tableSchemaVersion = indexOp->tcIndxReq.indexSchemaVersion;
TcKeyReq::setOperationType(tcKeyRequestInfo,
opType == ZREAD ? opType : ZREAD_EX);
TcKeyReq::setAIInTcKeyReq(tcKeyRequestInfo, 1); // Allways send one AttrInfo
......@@ -11705,7 +11705,7 @@ void Dbtc::readIndexTable(Signal* signal,
// Send KEYINFO sequence
KeyInfo * const keyInfo = (KeyInfo *)signal->getDataPtrSend();
keyInfo->connectPtr = indexOp->tcIndxReq->apiConnectPtr;
keyInfo->connectPtr = indexOp->tcIndxReq.apiConnectPtr;
keyInfo->transId[0] = transId1;
keyInfo->transId[1] = transId2;
dataPtr = (Uint32 *) &keyInfo->keyData;
......@@ -11745,7 +11745,7 @@ void Dbtc::executeIndexOperation(Signal* signal,
Uint32 keyBufSize = 8; // Maximum for key in TCKEYREQ
Uint32 attrBufSize = 5;
Uint32 dataPos = 0;
TcIndxReq * const tcIndxReq = indexOp->tcIndxReq;
TcIndxReq * const tcIndxReq = &indexOp->tcIndxReq;
TcKeyReq * const tcKeyReq = (TcKeyReq *)signal->getDataPtrSend();
Uint32 * dataPtr = &tcKeyReq->scanInfo;
Uint32 tcKeyLength = TcKeyReq::StaticLength;
......@@ -11761,7 +11761,7 @@ void Dbtc::executeIndexOperation(Signal* signal,
// Failed to find index record
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
tcIndxRef->connectPtr = indexOp->tcIndxReq->senderData;
tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349;
......@@ -11861,7 +11861,7 @@ void Dbtc::executeIndexOperation(Signal* signal,
// Send KEYINFO sequence
KeyInfo * const keyInfo = (KeyInfo *)signal->getDataPtrSend();
keyInfo->connectPtr = indexOp->tcIndxReq->apiConnectPtr;
keyInfo->connectPtr = indexOp->tcIndxReq.apiConnectPtr;
keyInfo->transId[0] = regApiPtr->transid[0];
keyInfo->transId[1] = regApiPtr->transid[1];
dataPtr = (Uint32 *) &keyInfo->keyData;
......@@ -11897,7 +11897,7 @@ void Dbtc::executeIndexOperation(Signal* signal,
AttrInfo * const attrInfo = (AttrInfo *)signal->getDataPtrSend();
Uint32 attrInfoPos = 0;
attrInfo->connectPtr = indexOp->tcIndxReq->apiConnectPtr;
attrInfo->connectPtr = indexOp->tcIndxReq.apiConnectPtr;
attrInfo->transId[0] = regApiPtr->transid[0];
attrInfo->transId[1] = regApiPtr->transid[1];
dataPtr = (Uint32 *) &attrInfo->attrData;
......
......@@ -510,7 +510,7 @@ AsyncFile::extendfile(Request* request) {
DEBUG(ndbout_c("extendfile: maxOffset=%d, size=%d", maxOffset, maxSize));
// Allocate a buffer and fill it with zeros
void* pbuf = malloc(maxSize);
void* pbuf = NdbMem_Allocate(maxSize);
memset(pbuf, 0, maxSize);
for (int p = 0; p <= maxOffset; p = p + maxSize) {
int return_value;
......
......@@ -285,7 +285,7 @@ Configuration::setupConfiguration(){
if(pFileSystemPath[strlen(pFileSystemPath) - 1] == '/')
_fsPath = strdup(pFileSystemPath);
else {
_fsPath = (char *)malloc(strlen(pFileSystemPath) + 2);
_fsPath = (char *)NdbMem_Allocate(strlen(pFileSystemPath) + 2);
strcpy(_fsPath, pFileSystemPath);
strcat(_fsPath, "/");
}
......@@ -381,7 +381,8 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
char buf[255];
unsigned int noOfTables = 0;
unsigned int noOfIndexes = 0;
unsigned int noOfUniqueHashIndexes = 0;
unsigned int noOfOrderedIndexes = 0;
unsigned int noOfReplicas = 0;
unsigned int noOfDBNodes = 0;
unsigned int noOfAPINodes = 0;
......@@ -389,33 +390,28 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
unsigned int noOfNodes = 0;
unsigned int noOfAttributes = 0;
unsigned int noOfOperations = 0;
unsigned int noOfLocalOperations = 0;
unsigned int noOfTransactions = 0;
unsigned int noOfIndexPages = 0;
unsigned int noOfDataPages = 0;
unsigned int noOfScanRecords = 0;
unsigned int noOfLocalScanRecords = 0;
unsigned int noBatchSize = 0;
m_logLevel = new LogLevel();
/**
* {"NoOfConcurrentCheckpointsDuringRestart", &cd.ispValues[1][5] },
* {"NoOfConcurrentCheckpointsAfterRestart", &cd.ispValues[2][4] },
* {"NoOfConcurrentProcessesHandleTakeover", &cd.ispValues[1][7] },
* {"TimeToWaitAlive", &cd.ispValues[0][0] },
*/
struct AttribStorage { int paramId; Uint32 * storage; };
AttribStorage tmp[] = {
{ CFG_DB_NO_SCANS, &noOfScanRecords },
{ CFG_DB_NO_LOCAL_SCANS, &noOfLocalScanRecords },
{ CFG_DB_BATCH_SIZE, &noBatchSize },
{ CFG_DB_NO_TABLES, &noOfTables },
{ CFG_DB_NO_INDEXES, &noOfIndexes },
{ CFG_DB_NO_ORDERED_INDEXES, &noOfOrderedIndexes },
{ CFG_DB_NO_UNIQUE_HASH_INDEXES, &noOfUniqueHashIndexes },
{ CFG_DB_NO_REPLICAS, &noOfReplicas },
{ CFG_DB_NO_ATTRIBUTES, &noOfAttributes },
{ CFG_DB_NO_OPS, &noOfOperations },
{ CFG_DB_NO_LOCAL_OPS, &noOfLocalOperations },
{ CFG_DB_NO_TRANSACTIONS, &noOfTransactions }
#if 0
{ "NoOfDiskPagesToDiskDuringRestartTUP", &cd.ispValues[3][8] },
{ "NoOfDiskPagesToDiskAfterRestartTUP", &cd.ispValues[3][9] },
{ "NoOfDiskPagesToDiskDuringRestartACC", &cd.ispValues[3][10] },
{ "NoOfDiskPagesToDiskAfterRestartACC", &cd.ispValues[3][11] },
#endif
};
ndb_mgm_configuration_iterator db(*(ndb_mgm_configuration*)ownConfig, 0);
......@@ -514,31 +510,32 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
*/
ConfigValuesFactory cfg(ownConfig);
noOfTables++; // Remove impact of system table
noOfTables += noOfIndexes; // Indexes are tables too
noOfAttributes += 2; // ---"----
noOfTables *= 2; // Remove impact of Dict need 2 ids for each table
noOfTables+= 2; // Add System tables
noOfAttributes += 5; // Add System table attributes
if (noOfDBNodes > 15) {
noOfDBNodes = 15;
}//if
Uint32 noOfLocalScanRecords = (noOfDBNodes * noOfScanRecords) + 1;
if (noOfLocalScanRecords == 0) {
noOfLocalScanRecords = (noOfDBNodes * noOfScanRecords) + 1;
}
if (noOfLocalOperations == 0) {
noOfLocalOperations= (11 * noOfOperations) / 10;
}
Uint32 noOfTCScanRecords = noOfScanRecords;
{
Uint32 noOfAccTables= noOfTables + noOfUniqueHashIndexes;
/**
* Acc Size Alt values
*/
// Can keep 65536 pages (= 0.5 GByte)
cfg.put(CFG_ACC_DIR_RANGE,
4 * NO_OF_FRAG_PER_NODE * noOfTables* noOfReplicas);
4 * NO_OF_FRAG_PER_NODE * noOfAccTables* noOfReplicas);
cfg.put(CFG_ACC_DIR_ARRAY,
(noOfIndexPages >> 8) +
4 * NO_OF_FRAG_PER_NODE * noOfTables* noOfReplicas);
4 * NO_OF_FRAG_PER_NODE * noOfAccTables* noOfReplicas);
cfg.put(CFG_ACC_FRAGMENT,
2 * NO_OF_FRAG_PER_NODE * noOfTables* noOfReplicas);
2 * NO_OF_FRAG_PER_NODE * noOfAccTables* noOfReplicas);
/*-----------------------------------------------------------------------*/
// The extra operation records added are used by the scan and node
......@@ -548,25 +545,27 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
// The remainder are allowed for use by the scan processes.
/*-----------------------------------------------------------------------*/
cfg.put(CFG_ACC_OP_RECS,
((11 * noOfOperations) / 10 + 50) +
(noOfLocalScanRecords * MAX_PARALLEL_SCANS_PER_FRAG) +
(noOfLocalOperations + 50) +
(noOfLocalScanRecords * noBatchSize) +
NODE_RECOVERY_SCAN_OP_RECORDS);
cfg.put(CFG_ACC_OVERFLOW_RECS,
noOfIndexPages +
2 * NO_OF_FRAG_PER_NODE * noOfTables* noOfReplicas);
2 * NO_OF_FRAG_PER_NODE * noOfAccTables* noOfReplicas);
cfg.put(CFG_ACC_PAGE8,
noOfIndexPages + 32);
cfg.put(CFG_ACC_ROOT_FRAG,
NO_OF_FRAG_PER_NODE * noOfTables* noOfReplicas);
NO_OF_FRAG_PER_NODE * noOfAccTables* noOfReplicas);
cfg.put(CFG_ACC_TABLE, noOfTables);
cfg.put(CFG_ACC_TABLE, noOfAccTables);
cfg.put(CFG_ACC_SCAN, noOfLocalScanRecords);
}
Uint32 noOfMetaTables= noOfTables + noOfOrderedIndexes +
noOfUniqueHashIndexes;
{
/**
* Dict Size Alt values
......@@ -575,7 +574,7 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
noOfAttributes);
cfg.put(CFG_DICT_TABLE,
noOfTables);
noOfMetaTables);
}
{
......@@ -589,7 +588,7 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
noOfOperations + noOfTransactions + 46);
cfg.put(CFG_DIH_FRAG_CONNECT,
NO_OF_FRAG_PER_NODE * noOfTables * noOfDBNodes);
NO_OF_FRAG_PER_NODE * noOfMetaTables * noOfDBNodes);
int temp;
temp = noOfReplicas - 2;
......@@ -599,14 +598,14 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
temp++;
cfg.put(CFG_DIH_MORE_NODES,
temp * NO_OF_FRAG_PER_NODE *
noOfTables * noOfDBNodes);
noOfMetaTables * noOfDBNodes);
cfg.put(CFG_DIH_REPLICAS,
NO_OF_FRAG_PER_NODE * noOfTables *
NO_OF_FRAG_PER_NODE * noOfMetaTables *
noOfDBNodes * noOfReplicas);
cfg.put(CFG_DIH_TABLE,
noOfTables);
noOfMetaTables);
}
{
......@@ -614,13 +613,13 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
* Lqh Size Alt values
*/
cfg.put(CFG_LQH_FRAG,
NO_OF_FRAG_PER_NODE * noOfTables * noOfReplicas);
NO_OF_FRAG_PER_NODE * noOfMetaTables * noOfReplicas);
cfg.put(CFG_LQH_TABLE,
noOfTables);
noOfMetaTables);
cfg.put(CFG_LQH_TC_CONNECT,
(11 * noOfOperations) / 10 + 50);
noOfLocalOperations + 50);
cfg.put(CFG_LQH_SCAN,
noOfLocalScanRecords);
......@@ -637,7 +636,7 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
(2 * noOfOperations) + 16 + noOfTransactions);
cfg.put(CFG_TC_TABLE,
noOfTables);
noOfMetaTables);
cfg.put(CFG_TC_LOCAL_SCAN,
noOfLocalScanRecords);
......@@ -651,23 +650,23 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
* Tup Size Alt values
*/
cfg.put(CFG_TUP_FRAG,
2 * NO_OF_FRAG_PER_NODE * noOfTables* noOfReplicas);
2 * NO_OF_FRAG_PER_NODE * noOfMetaTables* noOfReplicas);
cfg.put(CFG_TUP_OP_RECS,
(11 * noOfOperations) / 10 + 50);
noOfLocalOperations + 50);
cfg.put(CFG_TUP_PAGE,
noOfDataPages);
cfg.put(CFG_TUP_PAGE_RANGE,
4 * NO_OF_FRAG_PER_NODE * noOfTables* noOfReplicas);
4 * NO_OF_FRAG_PER_NODE * noOfMetaTables* noOfReplicas);
cfg.put(CFG_TUP_TABLE,
noOfTables);
noOfMetaTables);
cfg.put(CFG_TUP_TABLE_DESC,
4 * NO_OF_FRAG_PER_NODE * noOfAttributes* noOfReplicas +
12 * NO_OF_FRAG_PER_NODE * noOfTables* noOfReplicas );
12 * NO_OF_FRAG_PER_NODE * noOfMetaTables* noOfReplicas );
cfg.put(CFG_TUP_STORED_PROC,
noOfLocalScanRecords);
......@@ -678,13 +677,13 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
* Tux Size Alt values
*/
cfg.put(CFG_TUX_INDEX,
noOfTables);
noOfOrderedIndexes);
cfg.put(CFG_TUX_FRAGMENT,
2 * NO_OF_FRAG_PER_NODE * noOfTables * noOfReplicas);
2 * NO_OF_FRAG_PER_NODE * noOfOrderedIndexes * noOfReplicas);
cfg.put(CFG_TUX_ATTRIBUTE,
noOfIndexes * 4);
noOfOrderedIndexes * 4);
cfg.put(CFG_TUX_SCAN_OP, noOfLocalScanRecords);
}
......
......@@ -147,7 +147,7 @@
// in future version since small tables want small value and large tables
// need large value.
/* ------------------------------------------------------------------------- */
#define NO_OF_FRAG_PER_NODE 8
#define NO_OF_FRAG_PER_NODE 1
/* ---------------------------------------------------------------- */
// To avoid synching too big chunks at a time we synch after writing
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment