Commit 99b3bc7a authored by unknown's avatar unknown

ndb - super pool update (future use)


storage/ndb/src/kernel/vm/SuperPool.cpp:
  super pool update
storage/ndb/src/kernel/vm/SuperPool.hpp:
  super pool update
storage/ndb/src/kernel/vm/testSuperPool.cpp:
  super pool update
parent 867a5b54
...@@ -17,28 +17,33 @@ ...@@ -17,28 +17,33 @@
#include <ndb_global.h> #include <ndb_global.h>
#include "SuperPool.hpp" #include "SuperPool.hpp"
#define SP_ALIGN(sz, al) (((sz) + (al) - 1) & ~((al) - 1))
// This is used for m_freeRecI when there is no record pool page.
#define NNIL 0xffffffff
// SuperPool // SuperPool
SuperPool::SuperPool(Uint32 pageSize, Uint32 pageBits) : SuperPool::SuperPool(Uint32 pageSize, Uint32 pageBits) :
m_pageSize(SP_ALIGN_SIZE(pageSize, SP_ALIGN)), m_pageSize(pageSize),
m_pageBits(pageBits), m_pageBits(pageBits),
m_recBits(32 - m_pageBits),
m_recMask((1 << m_recBits) - 1),
m_memRoot(0), m_memRoot(0),
m_pageEnt(0), m_pageEnt(0),
m_typeCheck(0), m_pageType(0),
m_typeSeq(0), m_freeList(),
m_pageList(), m_initPages(0),
m_totalSize(0), m_incrPages(0),
m_initSize(0), m_maxPages(0),
m_incrSize(0), m_totPages(0),
m_maxSize(0) m_typeCount(0),
{ m_groupMinPct(0),
assert(5 <= pageBits <= 30); m_groupMinPages(0),
} m_groupTotPages(0)
bool
SuperPool::init()
{ {
return true; assert(m_pageSize != 0 && (m_pageSize & (m_pageSize - 1)) == 0);
assert(m_pageBits <= 30);
} }
SuperPool::~SuperPool() SuperPool::~SuperPool()
...@@ -47,13 +52,15 @@ SuperPool::~SuperPool() ...@@ -47,13 +52,15 @@ SuperPool::~SuperPool()
SuperPool::PageEnt::PageEnt() : SuperPool::PageEnt::PageEnt() :
m_pageType(0), m_pageType(0),
m_freeRecI(RNIL),
m_useCount(0), m_useCount(0),
m_freeRecI(NNIL),
m_nextPageI(RNIL), m_nextPageI(RNIL),
m_prevPageI(RNIL) m_prevPageI(RNIL)
{ {
} }
// page list routines
SuperPool::PageList::PageList() : SuperPool::PageList::PageList() :
m_headPageI(RNIL), m_headPageI(RNIL),
m_tailPageI(RNIL), m_tailPageI(RNIL),
...@@ -66,55 +73,29 @@ SuperPool::PageList::PageList(PtrI pageI) : ...@@ -66,55 +73,29 @@ SuperPool::PageList::PageList(PtrI pageI) :
m_tailPageI(pageI), m_tailPageI(pageI),
m_pageCount(1) m_pageCount(1)
{ {
} assert(pageI != RNIL);
SuperPool::RecInfo::RecInfo(Uint32 recType, Uint32 recSize) :
m_recType(recType),
m_recSize(recSize),
m_maxUseCount(0),
m_currPageI(RNIL),
m_currFreeRecI(RNIL),
m_currUseCount(0),
m_totalUseCount(0),
m_totalRecCount(0),
m_freeList(),
m_activeList(),
m_fullList()
{
}
SuperPool::PtrI
SuperPool::getPageI(void* pageP)
{
const Uint32 pageSize = m_pageSize;
const Uint32 pageBits = m_pageBits;
const Uint32 recBits = 32 - pageBits;
void* const memRoot = m_memRoot;
assert(pageP == SP_ALIGN_PTR(pageP, memRoot, pageSize));
my_ptrdiff_t ipL = ((Uint8*)pageP - (Uint8*)memRoot) / pageSize;
Int32 ip = (Int32)ipL;
Int32 lim = 1 << (pageBits - 1);
assert(ip == ipL && -lim <= ip && ip < lim && ip != -1);
PtrI pageI = ip << recBits;
assert(pageP == getPageP(pageI));
return pageI;
} }
void void
SuperPool::movePages(PageList& pl1, PageList& pl2) SuperPool::movePages(PageList& pl1, PageList& pl2)
{ {
const Uint32 recBits = 32 - m_pageBits; PtrI pageI1 = pl1.m_tailPageI;
PtrI pageI2 = pl2.m_headPageI;
if (pl1.m_pageCount != 0) { if (pl1.m_pageCount != 0) {
assert(pageI1 != RNIL);
if (pl2.m_pageCount != 0) { if (pl2.m_pageCount != 0) {
PtrI pageI1 = pl1.m_tailPageI; assert(pageI2 != RNIL);
PtrI pageI2 = pl2.m_headPageI;
PageEnt& pe1 = getPageEnt(pageI1); PageEnt& pe1 = getPageEnt(pageI1);
PageEnt& pe2 = getPageEnt(pageI2); PageEnt& pe2 = getPageEnt(pageI2);
pe1.m_nextPageI = pageI2; pe1.m_nextPageI = pageI2;
pe2.m_prevPageI = pageI1; pe2.m_prevPageI = pageI1;
pl1.m_tailPageI = pl2.m_tailPageI;
pl1.m_pageCount += pl2.m_pageCount; pl1.m_pageCount += pl2.m_pageCount;
} else {
assert(pageI2 == RNIL);
} }
} else { } else {
assert(pageI1 == RNIL);
pl1 = pl2; pl1 = pl2;
} }
pl2.m_headPageI = pl2.m_tailPageI = RNIL; pl2.m_headPageI = pl2.m_tailPageI = RNIL;
...@@ -124,6 +105,9 @@ SuperPool::movePages(PageList& pl1, PageList& pl2) ...@@ -124,6 +105,9 @@ SuperPool::movePages(PageList& pl1, PageList& pl2)
void void
SuperPool::addHeadPage(PageList& pl, PtrI pageI) SuperPool::addHeadPage(PageList& pl, PtrI pageI)
{ {
assert(pageI != RNIL);
PageEnt& pe = getPageEnt(pageI);
assert(pe.m_nextPageI == RNIL & pe.m_prevPageI == RNIL);
PageList pl2(pageI); PageList pl2(pageI);
movePages(pl2, pl); movePages(pl2, pl);
pl = pl2; pl = pl2;
...@@ -132,6 +116,9 @@ SuperPool::addHeadPage(PageList& pl, PtrI pageI) ...@@ -132,6 +116,9 @@ SuperPool::addHeadPage(PageList& pl, PtrI pageI)
void void
SuperPool::addTailPage(PageList& pl, PtrI pageI) SuperPool::addTailPage(PageList& pl, PtrI pageI)
{ {
assert(pageI != RNIL);
PageEnt& pe = getPageEnt(pageI);
assert(pe.m_nextPageI == RNIL & pe.m_prevPageI == RNIL);
PageList pl2(pageI); PageList pl2(pageI);
movePages(pl, pl2); movePages(pl, pl2);
} }
...@@ -139,81 +126,187 @@ SuperPool::addTailPage(PageList& pl, PtrI pageI) ...@@ -139,81 +126,187 @@ SuperPool::addTailPage(PageList& pl, PtrI pageI)
void void
SuperPool::removePage(PageList& pl, PtrI pageI) SuperPool::removePage(PageList& pl, PtrI pageI)
{ {
assert(pageI != RNIL);
PageEnt& pe = getPageEnt(pageI); PageEnt& pe = getPageEnt(pageI);
PtrI pageI1 = pe.m_prevPageI; if (pe.m_nextPageI != RNIL) {
PtrI pageI2 = pe.m_nextPageI; assert(pl.m_tailPageI != pageI);
if (pageI1 != RNIL) { PageEnt& nextPe = getPageEnt(pe.m_nextPageI);
PageEnt& pe1 = getPageEnt(pageI1); nextPe.m_prevPageI = pe.m_prevPageI;
pe1.m_nextPageI = pageI2;
if (pageI2 != RNIL) {
PageEnt& pe2 = getPageEnt(pageI2);
pe2.m_prevPageI = pageI1;
} else {
pl.m_tailPageI = pageI1;
}
} else { } else {
if (pageI2 != RNIL) { assert(pl.m_tailPageI == pageI);
PageEnt& pe2 = getPageEnt(pageI2); pl.m_tailPageI = pe.m_prevPageI;
pe2.m_prevPageI = pageI1;
pl.m_headPageI = pageI2;
} else {
pl.m_headPageI = pl.m_tailPageI = RNIL;
}
} }
pe.m_prevPageI = pe.m_nextPageI = RNIL; if (pe.m_prevPageI != RNIL) {
assert(pl.m_headPageI != pageI);
PageEnt& prevPe = getPageEnt(pe.m_prevPageI);
prevPe.m_nextPageI = pe.m_nextPageI;
} else {
assert(pl.m_headPageI == pageI);
pl.m_headPageI = pe.m_nextPageI;
}
pe.m_nextPageI = RNIL;
pe.m_prevPageI = RNIL;
assert(pl.m_pageCount != 0); assert(pl.m_pageCount != 0);
pl.m_pageCount--; pl.m_pageCount--;
} }
void // reverse mapping
SuperPool::setCurrPage(RecInfo& ri, PtrI newPageI)
{ SuperPool::PtrI
PtrI oldPageI = ri.m_currPageI; SuperPool::getPageI(void* pageP)
if (oldPageI != RNIL) { {
// copy from cache Uint32 pageSize = m_pageSize;
PageEnt& pe = getPageEnt(oldPageI); Uint32 pageBits = m_pageBits;
pe.m_freeRecI = ri.m_currFreeRecI; Uint32 recBits = m_recBits;
pe.m_useCount = ri.m_currUseCount; void* memRoot = m_memRoot;
// add to right list according to "pp2" policy my_ptrdiff_t ipL = (Uint8*)pageP - (Uint8*)memRoot;
if (pe.m_useCount == 0) { assert(ipL % pageSize == 0);
pe.m_pageType = 0; ipL /= (Int32)pageSize;
addHeadPage(m_pageList, oldPageI); Int32 ip = (Int32)ipL;
ri.m_totalRecCount -= ri.m_maxUseCount; Int32 lim = 1 << (pageBits - 1);
} else if (pe.m_useCount < ri.m_maxUseCount) { if (! (ip == ipL && -lim <= ip && ip < lim && ip != -1)) {
addHeadPage(ri.m_activeList, oldPageI); // page was too distant from memory root
} else { return RNIL;
addHeadPage(ri.m_fullList, oldPageI);
}
} }
if (newPageI != RNIL) { PtrI pageI = ip << recBits;
PageEnt& pe = getPageEnt(newPageI); assert(pageP == getPageP(pageI));
// copy to cache return pageI;
ri.m_currPageI = newPageI; }
ri.m_currFreeRecI = pe.m_freeRecI;
ri.m_currUseCount = pe.m_useCount; // record pool
// remove from right list
if (pe.m_useCount == 0) { SuperPool::RecInfo::RecInfo(GroupPool& gp, Uint32 recSize) :
removePage(ri.m_freeList, newPageI); m_groupPool(gp),
} else if (pe.m_useCount < ri.m_maxUseCount) { m_recSize(recSize),
removePage(ri.m_activeList, newPageI); m_recType(0),
} else { m_maxPerPage(0),
removePage(ri.m_fullList, newPageI); m_freeRecI(NNIL),
} m_useCount(0),
} else { m_pageList(),
ri.m_currPageI = RNIL; m_hyX(1),
ri.m_currFreeRecI = RNIL; m_hyY(2)
ri.m_currUseCount = 0; {
SuperPool& sp = gp.m_superPool;
m_recType = (sp.m_typeCount++ << 1) | 1;
assert(m_recSize == SP_ALIGN(m_recSize, sizeof(Uint32)));
{ // compute max records per page
Uint32 n1 = sp.m_pageSize / m_recSize;
Uint32 b2 = (sp.m_recBits < 16 ? sp.m_recBits : 16);
Uint32 n2 = (1 << b2) - 1; // last is reserved
m_maxPerPage = (n1 < n2 ? n1 : n2);
assert(m_maxPerPage != 0);
}
}
Uint32
SuperPool::getFreeCount(RecInfo& ri, PtrI recI)
{
Uint32 n = 0;
Uint32 recMask = m_recMask;
Uint32 loopRecI = recI;
while ((loopRecI & recMask) != recMask) {
n++;
void* loopRecP = getRecP(loopRecI, ri);
loopRecI = *(Uint32*)loopRecP;
}
assert(n == (Uint16)n);
return n;
}
Uint32
SuperPool::getRecPageCount(RecInfo& ri)
{
Uint32 n = 0;
for (Uint32 k = 0; k <= 2; k++)
n += ri.m_pageList[k].m_pageCount;
if (ri.m_freeRecI != NNIL)
n += 1;
return n;
}
Uint32
SuperPool::getRecTotCount(RecInfo& ri)
{
return ri.m_maxPerPage * getRecPageCount(ri);
}
Uint32
SuperPool::getRecUseCount(RecInfo& ri)
{
Uint32 n = ri.m_useCount;
// current page does not keep count
if (ri.m_freeRecI != NNIL) {
Uint32 maxPerPage = ri.m_maxPerPage;
Uint32 freeCount = getFreeCount(ri, ri.m_freeRecI);
assert(maxPerPage >= freeCount);
n += maxPerPage - freeCount;
}
return n;
}
// current page
Uint32
SuperPool::getRecPageList(RecInfo& ri, PageEnt& pe)
{
if (pe.m_useCount == 0)
return 0;
if (pe.m_useCount < ri.m_maxPerPage)
return 1;
if (pe.m_useCount == ri.m_maxPerPage)
return 2;
assert(false);
return ~(Uint32)0;
}
void
SuperPool::addCurrPage(RecInfo& ri, PtrI pageI)
{
PageEnt& pe = getPageEnt(pageI);
ri.m_freeRecI = pe.m_freeRecI;
// remove from right list
Uint32 k = getRecPageList(ri, pe);
assert(k != 2);
removePage(ri.m_pageList[k], pageI);
assert(ri.m_useCount >= pe.m_useCount);
ri.m_useCount -= pe.m_useCount;
}
void
SuperPool::removeCurrPage(RecInfo& ri)
{
Uint32 recMask = m_recMask;
PtrI pageI = ri.m_freeRecI & ~ m_recMask;
// update page entry
PageEnt& pe = getPageEnt(pageI);
pe.m_freeRecI = ri.m_freeRecI;
Uint32 maxPerPage = ri.m_maxPerPage;
Uint32 freeCount = getFreeCount(ri, pe.m_freeRecI);
assert(maxPerPage >= freeCount);
pe.m_useCount = maxPerPage - freeCount;
// add to right list
Uint32 k = getRecPageList(ri, pe);
addHeadPage(ri.m_pageList[k], pageI);
ri.m_useCount += pe.m_useCount;
ri.m_freeRecI = NNIL;
if (k == 0) {
freeRecPages(ri);
} }
} }
// page allocation
bool bool
SuperPool::getAvailPage(RecInfo& ri) SuperPool::getAvailPage(RecInfo& ri)
{ {
PtrI pageI; PtrI pageI;
if ((pageI = ri.m_activeList.m_headPageI) != RNIL || if ((pageI = ri.m_pageList[1].m_headPageI) != RNIL ||
(pageI = ri.m_freeList.m_headPageI) != RNIL || (pageI = ri.m_pageList[0].m_headPageI) != RNIL ||
(pageI = getFreePage(ri)) != RNIL) { (pageI = getFreePage(ri)) != RNIL) {
setCurrPage(ri, pageI); // the page is in record pool now
if (ri.m_freeRecI != NNIL)
removeCurrPage(ri);
addCurrPage(ri, pageI);
return true; return true;
} }
return false; return false;
...@@ -222,121 +315,310 @@ SuperPool::getAvailPage(RecInfo& ri) ...@@ -222,121 +315,310 @@ SuperPool::getAvailPage(RecInfo& ri)
SuperPool::PtrI SuperPool::PtrI
SuperPool::getFreePage(RecInfo& ri) SuperPool::getFreePage(RecInfo& ri)
{ {
GroupPool& gp = ri.m_groupPool;
PtrI pageI; PtrI pageI;
if (m_pageList.m_pageCount != 0) { if ((pageI = getFreePage(gp)) != RNIL) {
pageI = m_pageList.m_headPageI; initFreePage(ri, pageI);
removePage(m_pageList, pageI); addHeadPage(ri.m_pageList[0], pageI);
} else { return pageI;
pageI = getNewPage(); }
if (pageI == RNIL) return RNIL;
return RNIL; }
SuperPool::PtrI
SuperPool::getFreePage(GroupPool& gp)
{
PtrI pageI;
if ((pageI = gp.m_freeList.m_headPageI) != RNIL) {
removePage(gp.m_freeList, pageI);
return pageI;
}
if (gp.m_totPages < getMaxPages(gp) &&
(pageI = getFreePage()) != RNIL) {
gp.m_totPages++;
return pageI;
}
return RNIL;
}
SuperPool::PtrI
SuperPool::getFreePage()
{
PtrI pageI;
if ((pageI = m_freeList.m_headPageI) != RNIL) {
removePage(m_freeList, pageI);
return pageI;
} }
if ((pageI = getNewPage()) != RNIL) {
return pageI;
}
return RNIL;
}
void
SuperPool::initFreePage(RecInfo& ri, PtrI pageI)
{
void* pageP = getPageP(pageI); void* pageP = getPageP(pageI);
// set up free record list // set up free record list
Uint32 maxUseCount = ri.m_maxUseCount; Uint32 num = ri.m_maxPerPage;
Uint32 recSize = ri.m_recSize; Uint32 recSize = ri.m_recSize;
void* recP = (Uint8*)pageP; void* recP = (Uint8*)pageP;
Uint32 irNext = 1; Uint32 irNext = 1;
while (irNext < maxUseCount) { while (irNext < num) {
*(Uint32*)recP = pageI | irNext; *(Uint32*)recP = pageI | irNext;
recP = (Uint8*)recP + recSize; recP = (Uint8*)recP + recSize;
irNext++; irNext++;
} }
*(Uint32*)recP = RNIL; // terminator has all recBits set
// add to total record count *(Uint32*)recP = pageI | m_recMask;
ri.m_totalRecCount += maxUseCount;
// set up new page entry // set up new page entry
PageEnt& pe = getPageEnt(pageI); PageEnt& pe = getPageEnt(pageI);
new (&pe) PageEnt(); new (&pe) PageEnt();
pe.m_pageType = ri.m_recType; pe.m_pageType = ri.m_recType;
pe.m_freeRecI = pageI | 0; pe.m_freeRecI = pageI | 0;
pe.m_useCount = 0; pe.m_useCount = 0;
// set type check bits // set type check byte
setCheckBits(pageI, ri.m_recType); Uint32 ip = pageI >> m_recBits;
// add to record pool free list m_pageType[ip] = (ri.m_recType & 0xFF);
addHeadPage(ri.m_freeList, pageI);
return pageI;
} }
// release
void void
SuperPool::setSizes(size_t initSize, size_t incrSize, size_t maxSize) SuperPool::releaseNotCurrent(RecInfo& ri, PtrI recI)
{ {
const Uint32 pageSize = m_pageSize; PageEnt& pe = getPageEnt(recI);
m_initSize = SP_ALIGN_SIZE(initSize, pageSize); void* recP = getRecP(recI, ri);
m_incrSize = SP_ALIGN_SIZE(incrSize, pageSize); *(Uint32*)recP = pe.m_freeRecI;
m_maxSize = SP_ALIGN_SIZE(maxSize, pageSize); pe.m_freeRecI = recI;
PtrI pageI = recI & ~ m_recMask;
Uint32 maxPerPage = ri.m_maxPerPage;
// move to right list
Uint32 k1 = getRecPageList(ri, pe);
assert(pe.m_useCount != 0);
pe.m_useCount--;
Uint32 k2 = getRecPageList(ri, pe);
if (k1 != k2) {
removePage(ri.m_pageList[k1], pageI);
addHeadPage(ri.m_pageList[k2], pageI);
if (k2 == 0) {
freeRecPages(ri);
}
}
assert(ri.m_useCount != 0);
ri.m_useCount--;
} }
void
SuperPool::freeRecPages(RecInfo& ri)
{
// ignore current page
Uint32 useCount = ri.m_useCount;
Uint32 totCount = 0;
for (uint32 k = 0; k <= 2; k++)
totCount += ri.m_pageList[k].m_pageCount;
totCount *= ri.m_maxPerPage;
assert(totCount >= useCount);
if ((totCount - useCount) * ri.m_hyY < useCount * ri.m_hyX)
return;
// free all free pages
GroupPool& gp = ri.m_groupPool;
Uint32 minPages = getMinPages(gp);
PageList& pl = ri.m_pageList[0];
while (pl.m_pageCount != 0) {
PtrI pageI = pl.m_headPageI;
removePage(pl, pageI);
PageEnt& pe = getPageEnt(pageI);
pe.m_pageType = 0;
pe.m_freeRecI = NNIL;
Uint32 ip = pageI >> m_recBits;
m_pageType[ip] = 0;
if (gp.m_totPages <= minPages) {
addHeadPage(gp.m_freeList, pageI);
} else {
// return excess to super pool
addHeadPage(m_freeList, pageI);
assert(gp.m_totPages != 0);
gp.m_totPages--;
}
}
}
void
SuperPool::freeAllRecPages(RecInfo& ri, bool force)
{
GroupPool& gp = ri.m_groupPool;
if (ri.m_freeRecI != NNIL)
removeCurrPage(ri);
assert(force || ri.m_useCount == 0);
for (Uint32 k = 0; k <= 2; k++)
movePages(gp.m_freeList, ri.m_pageList[k]);
}
// size parameters
void
SuperPool::setInitPages(Uint32 initPages)
{
m_initPages = initPages;
}
void
SuperPool::setIncrPages(Uint32 incrPages)
{
m_incrPages = incrPages;
}
void
SuperPool::setMaxPages(Uint32 maxPages)
{
m_maxPages = maxPages;
}
Uint32
SuperPool::getGpMinPages()
{
Uint32 minPages = (m_groupMinPct * m_totPages) / 100;
if (minPages < m_groupMinPages)
minPages = m_groupMinPages;
return minPages;
}
Uint32
SuperPool::getMinPages(GroupPool& gp)
{
Uint32 minPages = (gp.m_minPct * m_totPages) / 100;
if (minPages < gp.m_minPages)
minPages = gp.m_minPages;
return minPages;
}
Uint32
SuperPool::getMaxPages(GroupPool& gp)
{
Uint32 n1 = getGpMinPages();
Uint32 n2 = getMinPages(gp);
assert(n1 >= n2);
// pages reserved by other groups
Uint32 n3 = n1 - n2;
// rest can be claimed
Uint32 n4 = (m_totPages >= n3 ? m_totPages - n3 : 0);
return n4;
}
// debug
void void
SuperPool::verify(RecInfo& ri) SuperPool::verify(RecInfo& ri)
{ {
PageList* plList[3] = { &ri.m_freeList, &ri.m_activeList, &ri.m_fullList }; GroupPool& gp = ri.m_groupPool;
for (int i = 0; i < 3; i++) { verifyPageList(m_freeList);
PageList& pl = *plList[i]; verifyPageList(gp.m_freeList);
unsigned count = 0; for (Uint32 k = 0; k <= 2; k++) {
PageList& pl = ri.m_pageList[k];
verifyPageList(pl);
PtrI pageI = pl.m_headPageI; PtrI pageI = pl.m_headPageI;
while (pageI != RNIL) { while (pageI != RNIL) {
PageEnt& pe = getPageEnt(pageI); PageEnt& pe = getPageEnt(pageI);
PtrI pageI1 = pe.m_prevPageI; assert(pe.m_pageType == ri.m_recType);
PtrI pageI2 = pe.m_nextPageI; Uint32 maxPerPage = ri.m_maxPerPage;
if (count == 0) { Uint32 freeCount = getFreeCount(ri, pe.m_freeRecI);
assert(pageI1 == RNIL); assert(maxPerPage >= freeCount);
} else { Uint32 useCount = maxPerPage - freeCount;
assert(pageI1 != RNIL); assert(pe.m_useCount == useCount);
PageEnt& pe1 = getPageEnt(pageI1); assert(k != 0 || useCount == 0);
assert(pe1.m_nextPageI == pageI); assert(k != 1 || (useCount != 0 && freeCount != 0));
if (pageI2 != RNIL) { assert(k != 2 || freeCount == 0);
PageEnt& pe2 = getPageEnt(pageI2); pageI = pe.m_nextPageI;
assert(pe2.m_prevPageI == pageI);
}
}
pageI = pageI2;
count++;
} }
assert(pl.m_pageCount == count);
} }
} }
void
SuperPool::verifyPageList(PageList& pl)
{
Uint32 count = 0;
PtrI pageI = pl.m_headPageI;
while (pageI != RNIL) {
PageEnt& pe = getPageEnt(pageI);
if (pe.m_prevPageI == RNIL) {
assert(count == 0);
} else {
PageEnt& prevPe = getPageEnt(pe.m_prevPageI);
assert(prevPe.m_nextPageI == pageI);
}
if (pe.m_nextPageI == RNIL) {
assert(pl.m_tailPageI == pageI);
} else {
PageEnt& nextPe = getPageEnt(pe.m_nextPageI);
assert(nextPe.m_prevPageI == pageI);
}
if (pe.m_pageType != 0) {
assert(pe.m_freeRecI != NNIL);
PageEnt& pe2 = getPageEnt(pe.m_freeRecI);
assert(&pe == &pe2);
} else {
assert(pe.m_freeRecI == NNIL);
}
pageI = pe.m_nextPageI;
count++;
}
assert(pl.m_pageCount == count);
}
// GroupPool
GroupPool::GroupPool(SuperPool& sp) :
m_superPool(sp),
m_minPct(0),
m_minPages(0),
m_totPages(0),
m_freeList()
{
}
GroupPool::~GroupPool()
{
}
void
GroupPool::setMinPct(Uint32 minPct)
{
SuperPool& sp = m_superPool;
// subtract any previous value
assert(sp.m_groupMinPct >= m_minPct);
sp.m_groupMinPct -= m_minPct;
// add new value
sp.m_groupMinPct += minPct;
m_minPct = minPct;
}
void
GroupPool::setMinPages(Uint32 minPages)
{
SuperPool& sp = m_superPool;
// subtract any previous value
assert(sp.m_groupMinPages >= m_minPages);
sp.m_groupMinPages -= m_minPages;
// add new value
sp.m_groupMinPages += minPages;
m_minPages = minPages;
}
// HeapPool // HeapPool
HeapPool::HeapPool(Uint32 pageSize, Uint32 pageBits) : HeapPool::HeapPool(Uint32 pageSize, Uint32 pageBits) :
SuperPool(pageSize, pageBits), SuperPool(pageSize, pageBits),
m_areaHead(), m_areaHead(),
m_currArea(&m_areaHead), m_currArea(&m_areaHead),
m_lastArea(&m_areaHead), m_lastArea(&m_areaHead)
m_mallocPart(4)
{ {
} }
bool
HeapPool::init()
{
const Uint32 pageBits = m_pageBits;
if (! SuperPool::init())
return false;;
// allocate page entry array
Uint32 peBytes = (1 << pageBits) * sizeof(PageEnt);
m_pageEnt = static_cast<PageEnt*>(malloc(peBytes));
if (m_pageEnt == 0)
return false;
memset(m_pageEnt, 0, peBytes);
// allocate type check array
Uint32 tcWords = 1 << (pageBits - (5 - SP_CHECK_LOG2));
m_typeCheck = static_cast<Uint32*>(malloc(tcWords << 2));
if (m_typeCheck == 0)
return false;
memset(m_typeCheck, 0, tcWords << 2);
// allocate initial data
assert(m_totalSize == 0);
if (! allocMoreData(m_initSize))
return false;
return true;
}
HeapPool::~HeapPool() HeapPool::~HeapPool()
{ {
free(m_pageEnt); free(m_pageEnt);
free(m_typeCheck); free(m_pageType);
Area* ap; Area* ap;
while ((ap = m_areaHead.m_nextArea) != 0) { while ((ap = m_areaHead.m_nextArea) != 0) {
m_areaHead.m_nextArea = ap->m_nextArea; m_areaHead.m_nextArea = ap->m_nextArea;
...@@ -349,29 +631,28 @@ HeapPool::Area::Area() : ...@@ -349,29 +631,28 @@ HeapPool::Area::Area() :
m_nextArea(0), m_nextArea(0),
m_firstPageI(RNIL), m_firstPageI(RNIL),
m_currPage(0), m_currPage(0),
m_numPages(0), m_memory(0),
m_memory(0) m_pages(0),
m_numPages(0)
{ {
} }
SuperPool::PtrI SuperPool::PtrI
HeapPool::getNewPage() HeapPool::getNewPage()
{ {
const Uint32 pageSize = m_pageSize;
const Uint32 pageBits = m_pageBits;
const Uint32 recBits= 32 - pageBits;
Area* ap = m_currArea; Area* ap = m_currArea;
if (ap->m_currPage == ap->m_numPages) { if (ap->m_currPage == ap->m_numPages) {
// area is used up // area is used up
if (ap->m_nextArea == 0) { if (ap->m_nextArea == 0) {
// todo dynamic increase if (! allocMemory())
assert(m_incrSize == 0); return RNIL;
return RNIL;
} }
ap = m_currArea = ap->m_nextArea; ap = m_currArea = ap->m_nextArea;
assert(ap != 0);
} }
assert(ap->m_currPage < ap->m_numPages); assert(ap->m_currPage < ap->m_numPages);
PtrI pageI = ap->m_firstPageI; PtrI pageI = ap->m_firstPageI;
Uint32 recBits = m_recBits;
Int32 ip = (Int32)pageI >> recBits; Int32 ip = (Int32)pageI >> recBits;
ip += ap->m_currPage; ip += ap->m_currPage;
pageI = ip << recBits; pageI = ip << recBits;
...@@ -380,63 +661,90 @@ HeapPool::getNewPage() ...@@ -380,63 +661,90 @@ HeapPool::getNewPage()
} }
bool bool
HeapPool::allocMoreData(size_t size) HeapPool::allocInit()
{ {
const Uint32 pageSize = m_pageSize; Uint32 pageCount = (1 << m_pageBits);
const Uint32 pageBits = m_pageBits; if (m_pageEnt == 0) {
const Uint32 recBits = 32 - pageBits; // allocate page entry array
const Uint32 incrSize = m_incrSize; Uint32 bytes = pageCount * sizeof(PageEnt);
const Uint32 incrPages = incrSize / pageSize; m_pageEnt = static_cast<PageEnt*>(malloc(bytes));
const Uint32 mallocPart = m_mallocPart; if (m_pageEnt == 0)
size = SP_ALIGN_SIZE(size, pageSize);
if (incrSize != 0)
size = SP_ALIGN_SIZE(size, incrSize);
Uint32 needPages = size / pageSize;
while (needPages != 0) {
Uint32 wantPages = needPages;
if (incrPages != 0 && wantPages > incrPages)
wantPages = incrPages;
Uint32 tryPages = 0;
void* p1 = 0;
for (Uint32 i = mallocPart; i > 0 && p1 == 0; i--) {
// one page is usually wasted due to alignment to memory root
tryPages = ((wantPages + 1) * i) / mallocPart;
if (tryPages < 2)
break;
p1 = malloc(pageSize * tryPages);
}
if (p1 == 0)
return false; return false;
if (m_memRoot == 0) { for (Uint32 i = 0; i < pageCount; i++)
// set memory root at first "big" alloc new (&m_pageEnt[i]) PageEnt();
// assume malloc header makes later ip = -1 impossible }
m_memRoot = p1; if (m_pageType == 0) {
} // allocate type check array
void* p2 = SP_ALIGN_PTR(p1, m_memRoot, pageSize); Uint32 bytes = pageCount;
Uint32 numPages = tryPages - (p1 != p2); m_pageType = static_cast<Uint8*>(malloc(bytes));
my_ptrdiff_t ipL = ((Uint8*)p2 - (Uint8*)m_memRoot) / pageSize; if (m_pageType == 0)
Int32 ip = (Int32)ipL;
Int32 lim = 1 << (pageBits - 1);
if (! (ip == ipL && -lim <= ip && ip + numPages < lim)) {
free(p1);
return false; return false;
} memset(m_pageType, 0, bytes);
assert(ip != -1); }
PtrI pageI = ip << recBits; return true;
needPages = (needPages >= numPages ? needPages - numPages : 0); }
m_totalSize += numPages * pageSize;
// allocate new area bool
HeapPool::allocArea(Area* ap, Uint32 tryPages)
{
Uint32 pageSize = m_pageSize;
// one page is usually lost due to alignment
Uint8* p1 = (Uint8*)malloc(pageSize * (tryPages + 1));
if (p1 == 0)
return false;
// align
UintPtr n1 = (UintPtr)p1;
UintPtr n2 = SP_ALIGN(n1, (UintPtr)pageSize);
Uint8* p2 = p1 + (n2 - n1);
assert(p2 >= p1 && p2 - p1 < pageSize && (UintPtr)p2 % pageSize == 0);
// set memory root to first allocated page
if (m_memRoot == 0)
m_memRoot = p2;
// convert to i-value
Uint32 pageI = getPageI(p2);
ap->m_firstPageI = pageI;
ap->m_currPage = 0;
ap->m_memory = p1;
ap->m_pages = p2;
ap->m_numPages = tryPages + (p1 == p2);
return true;
}
bool
HeapPool::allocMemory()
{
if (! allocInit())
return false;
// compute number of additional pages needed
if (m_maxPages <= m_totPages)
return false;
Uint32 needPages = (m_totPages == 0 ? m_initPages : m_incrPages);
if (needPages > m_maxPages - m_totPages)
needPages = m_maxPages - m_totPages;
while (needPages != 0) {
// add new area
Area* ap = static_cast<Area*>(malloc(sizeof(Area))); Area* ap = static_cast<Area*>(malloc(sizeof(Area)));
if (ap == 0) { if (ap == 0)
free(p1);
return false; return false;
}
new (ap) Area(); new (ap) Area();
ap->m_firstPageI = pageI;
ap->m_numPages = numPages;
ap->m_memory = p1;
m_lastArea->m_nextArea = ap; m_lastArea->m_nextArea = ap;
m_lastArea = ap; m_lastArea = ap;
// initial malloc is done in m_incrPages pieces
Uint32 wantPages = needPages;
if (m_incrPages != 0 && wantPages > m_incrPages)
wantPages = m_incrPages;
Uint32 tryPages = wantPages;
while (tryPages != 0) {
if (allocArea(ap, tryPages))
break;
tryPages /= 2;
}
if (tryPages == 0)
return false;
// update counts
Uint32 numPages = ap->m_numPages;
m_totPages += numPages;
needPages = (needPages > numPages ? needPages - numPages : 0);
} }
return true; return true;
} }
...@@ -22,20 +22,18 @@ ...@@ -22,20 +22,18 @@
#include <pc.hpp> #include <pc.hpp>
#include <ErrorReporter.hpp> #include <ErrorReporter.hpp>
#define NDB_SP_VERIFY_LEVEL 1
/* /*
* SuperPool - super pool for record pools (abstract class) * SuperPool - super pool for record pools (abstract class)
* *
* Documents SuperPool and RecordPool<T>. * Documents: SuperPool GroupPool RecordPool<T>
* *
* GENERAL * SUPER POOL
* *
* A "super pool" is a shared pool of pages of fixed size. A "record * A "super pool" is a shared pool of pages of fixed size. A "record
* pool" is a pool of records of fixed size. One super pool instance is * pool" is a pool of records of fixed size. One super pool instance is
* used by any number of record pools to allocate their memory. * used by a number of record pools to allocate their memory. A special
* A special case is a "page pool" where a record is a simple page, * case is a "page pool" where a record is a simple page whose size
* possibly smaller than super pool page. * divides super pool page size.
* *
* A record pool allocates memory in pages. Thus each used page is * A record pool allocates memory in pages. Thus each used page is
* associated with one record pool and one record type. The records on * associated with one record pool and one record type. The records on
...@@ -49,27 +47,26 @@ ...@@ -49,27 +47,26 @@
* record is stored as an "i-value" from which the record pointer "p" is * record is stored as an "i-value" from which the record pointer "p" is
* computed. In super pool the i-value is a Uint32 with two parts: * computed. In super pool the i-value is a Uint32 with two parts:
* *
* - "ip" index of page within super pool (high pageBits) * - "ip" index of page within super pool (high "pageBits")
* - "ir" index of record within page (low recBits) * - "ir" index of record within page (low "recBits")
*
* At most 16 recBits are used, the rest are zero.
* *
* The translation between "ip" and page address is described in next * The translation between "ip" and page address is described in next
* section. Once page address is known, the record address is found * section. Once page address is known, the record address is found
* from "ir" in the obvious way. * from "ir" in the obvious way.
* *
* The main advantage with i-value is that it can be verified. The * One advantage of i-value is that it can be verified. The level of
* level of verification depends on compile type (release, debug). * verification can depend on compile options.
* *
* - "v0" minimal sanity check * - "v1" check i-value specifies valid page
* - "v1" check record type matches page type, see below * - "v2" check record type matches page type, see below
* - "v2" check record is in use (not yet implemented) * - "v3" check record is in use
* - "v4" check unused record is unmodified
* *
* Another advantage of a 32-bit i-value is that it extends the space of * Another advantage of a 32-bit i-value is that it extends the space of
* 32-bit addressable records on a 64-bit platform. * 32-bit addressable records on a 64-bit platform.
* *
* RNIL is 0xffffff00 and indicates NULL i-value. To avoid hitting RNIL
* it is required that pageBits <= 30 and that the maximum value of the
* range (2^pageBits-1) is not used.
*
* MEMORY ROOT * MEMORY ROOT
* *
* This super pool requires a "memory root" i.e. a memory address such * This super pool requires a "memory root" i.e. a memory address such
...@@ -77,13 +74,28 @@ ...@@ -77,13 +74,28 @@
* *
* page address = memory root + (signed)ip * page size * page address = memory root + (signed)ip * page size
* *
* This is possible on most platforms, provided that the memory root and * This is possible on all platforms, provided that the memory root and
* all pages are either on the heap or on the stack, in order to keep * all pages are either on the heap or on the stack, in order to keep
* the size of "ip" reasonably small. * the size of "ip" reasonably small.
* *
* The cast (signed)ip is done as integer of pageBits bits. "ip" has * The cast (signed)ip is done as integer of pageBits bits. "ip" has
* same sign bit as i-value "i" so (signed)ip = (Int32)i >> recBits. * same sign bit as i-value "i" so (signed)ip = (Int32)i >> recBits.
* The RNIL restriction can be expressed as (signed)ip != -1. *
* RESERVED I-VALUES
*
* RNIL is 0xffffff00 (signed -256). It is used everywhere in NDB as
* "null pointer" i.e. as an i-value which does not point to a record.
* In addition the signed values -255 to -1 are reserved for use by the
* application.
*
* An i-value with all "ir" bits set is used as terminator in free
* record list. Unlike RNIL, it still has valid page bits "ip".
*
* Following restrictions avoid hitting the reserved values:
*
* - pageBits is <= 30
* - the maximum "ip" value 2^pageBits-1 (signed -1) is not used
* - the maximum "ir" value 2^recBits-1 is not used
* *
* PAGE ENTRIES * PAGE ENTRIES
* *
...@@ -95,37 +107,54 @@ ...@@ -95,37 +107,54 @@
* - pointers (as i-values) to next and previous page in list * - pointers (as i-values) to next and previous page in list
* *
* Page entry cannot be stored on the page itself since this prevents * Page entry cannot be stored on the page itself since this prevents
* aligning pages to OS block size and the use of BATs (don't ask) for * aligning pages to OS block size and the use of BATs for page pools in
* page pools in NDB. For now the implementation provides an array of * NDB. For now the implementation provides an array of page entries
* page entries with place for all (2^pageBits) entries. * with place for all potential (2^pageBits) entries.
* *
* PAGE TYPE * PAGE TYPE
* *
* Page type is (in principle) unique to the record pool using the super * Page type is unique to the record pool using the super pool. It is
* pool. It is assigned in record pool constructor. Page type zero * assigned in record pool constructor. Page type zero means that the
* means that the page is free i.e. not allocated to a record pool. * page is free i.e. not allocated to a record pool.
* *
* Each "i-p" conversion checks ("v1") that the record belongs to same * Each "i-p" conversion checks ("v2") that the record belongs to same
* pool as the page. This check is much more common than page or record * pool as the page. This check is much more common than page or record
* allocation. To make it cache effective, there is a separate array of * allocation. To make it cache effective, there is a separate page
* reduced "type bits" (computed from real type). * type array. It truncates type to one non-zero byte.
*
* GROUP POOL
*
* Each record pool belongs to a group. The group specifies minimum
* size or memory percentage the group must be able to allocate. The
* sum of the minimum sizes of group pools is normally smaller than
* super pool size. This provides unclaimed memory which a group can
* use temporarily to allocate more than its minimum.
* *
* FREE LISTS * The record pools within a group compete freely for the available
* memory within the group.
* *
* A record is either used or on the free list of the record pool. * Typical exmaple is group of all metadata pools. The group allows
* A page has a use count i.e. number of used records. When use count * specifying the memory to reserve for metadata, without having to
* drops to zero the page can be returned to the super pool. This is * specify number of tables, attributes, indexes, triggers, etc.
* not necessarily done at once, or ever.
* *
* To make freeing pages feasible, the record pool free list has two * PAGE LISTS
* levels. There are available pages (some free) and a singly linked
* free list within the page. A page allocated to record pool is on one
* of 4 lists:
* *
* - free page list (all free, available) * Super pool has free page list. Each group pool uses it to allocate
* - active page list (some free, some used, available) * its own free page list. And each record pool within the group uses
* - full page list (none free) * the group's free list to allocate its pages.
* - current page (list of 1), see below *
* A page allocated to a record pool has a use count i.e. number of used
* records. When use count drops to zero the page can be returned to
* the group. This is not necessarily done at once.
*
* The list of free records in a record pool has two levels. There are
* available pages (some free) and a singly linked free list within the
* page. A page allocated to record pool is on one of 4 lists:
*
* - free page (all free, available, could be returned to group)
* - busy page (some free, some used, available)
* - full page (none free)
* - current page (list of one), see below
* *
* Some usage types (temporary pools) may never free records. They pay * Some usage types (temporary pools) may never free records. They pay
* a small penalty for the extra overhead. * a small penalty for the extra overhead.
...@@ -133,7 +162,7 @@ ...@@ -133,7 +162,7 @@
* RECORD POOL * RECORD POOL
* *
* A pool of records which allocates its memory from a super pool * A pool of records which allocates its memory from a super pool
* instance specified in the constructor. There are 3 basic operations: * instance via a group pool. There are 3 basic operations:
* *
* - getPtr - translate i-value to pointer-to-record p * - getPtr - translate i-value to pointer-to-record p
* - seize - allocate record * - seize - allocate record
...@@ -141,76 +170,64 @@ ...@@ -141,76 +170,64 @@
* *
* CURRENT PAGE * CURRENT PAGE
* *
* getPtr is a fast computation which does not touch the page. For * getPtr is a fast computation which does not touch the page entry.
* seize and release there is an optimization: * For seize (and release) there is a small optimization.
* *
* Define "current page" as page of latest seize or release. Its page * The "current page" is the page of latest seize. It is unlinked from
* entry is cached under record pool instance. The page is removed from * its normal list and the free record pointer is stored under record
* its normal list. Seize and release on current page are fast and * pool instance.
* avoid touching the page. The current page is used until
* *
* - seize and current page is full * The page remains current until there is a seize and the page is full.
* - release and the page is not current page * Then the real page entry and its list membership are updated, and
* a new page is made current.
* *
* Then the real page entry is updated and the page is added to the * This implies that each (active) record pool allocates at least one
* appropriate list, and a new page is made current. * page which is never returned to the group.
* *
* PAGE POLICY * PAGE POLICY
* *
* Allocating new page to record pool is expensive. Therefore record * A group pool returns its "excess" (above minimum) free pages to the
* pool should not always return empty pages to super pool. There are * super pool immediately.
* two trivial policies, each with problems: *
* Allocating a new page to a record pool is expensive due to free list
* setup. Therefore a record pool should not always return empty pages
* to the group. Policies:
*
* - "pp1" never return empty page to the group
* - "pp2" always return empty (non-current) page to the group
* - "pp3" simple hysteresis
* *
* - "pp1" never return empty page to super pool * Last one "pp3" is used. It works as follows:
* - "pp2" always return empty page to super pool
* *
* This implementation uses "pp2" for now. A real policy is implemented * When a page becomes free, check if number of free records exceeds
* in next version. * some fixed fraction of all records. If it does, move all free pages
* to the group. Current page is ignored in the check.
* *
* OPEN ISSUES AND LIMITATIONS * TODO
* *
* - smarter (virtual) placement of check bits & page entries * Define abstract class SuperAlloc. Make SuperPool a concrete class
* - should getPtr etc be inlined? (too much code) * with SuperAlloc instance in ctor. Replace HeapPool by HeapAlloc.
* - real page policy
* - other implementations (only HeapPool is done)
* - super pool list of all record pools, for statistics etc
* - access by multiple threads is not supported
*/ */
// align size // Types forward.
#define SP_ALIGN_SIZE(sz, al) \ class GroupPool;
(((sz) + (al) - 1) & ~((al) - 1))
// align pointer relative to base
#define SP_ALIGN_PTR(p, base, al) \
(void*)((Uint8*)(base) + SP_ALIGN_SIZE((Uint8*)(p) - (Uint8*)(base), (al)))
class SuperPool { class SuperPool {
public: public:
// Type of i-value, used to reference both pages and records. Page // Type of i-value, used to reference both pages and records.
// index "ip" occupies the high bits. The i-value of a page is same
// as i-value of record 0 on the page.
typedef Uint32 PtrI; typedef Uint32 PtrI;
// Size and address alignment given as number of bytes (power of 2). // Page entry.
STATIC_CONST( SP_ALIGN = 8 );
// Page entry. Current|y allocated as array of (2^pageBits).
struct PageEnt { struct PageEnt {
PageEnt(); PageEnt();
Uint32 m_pageType; Uint16 m_pageType; // zero if not in record pool
Uint32 m_freeRecI; Uint16 m_useCount; // used records on the page
Uint32 m_useCount; PtrI m_freeRecI; // first free record on the page
PtrI m_nextPageI; PtrI m_nextPageI;
PtrI m_prevPageI; PtrI m_prevPageI;
}; };
// Number of bits for cache effective type check given as log of 2. // Doubly-linked list of page entries.
// Example: 2 means 4 bits and uses 32k for 2g of 32k pages.
STATIC_CONST( SP_CHECK_LOG2 = 2 );
// Doubly-linked list of pages. There is one free list in super pool
// and free, active, full list in each record pool.
struct PageList { struct PageList {
PageList(); PageList();
PageList(PtrI pageI); PageList(PtrI pageI);
...@@ -219,234 +236,209 @@ public: ...@@ -219,234 +236,209 @@ public:
Uint32 m_pageCount; Uint32 m_pageCount;
}; };
// Record pool information. Each record pool instance contains one. // Constructor. Gives page size in bytes (must be power of 2) and
struct RecInfo {
RecInfo(Uint32 recType, Uint32 recSize);
const Uint32 m_recType;
const Uint32 m_recSize;
Uint32 m_maxUseCount; // could be computed
Uint32 m_currPageI; // current page
Uint32 m_currFreeRecI;
Uint32 m_currUseCount;
Uint32 m_totalUseCount; // total per pool
Uint32 m_totalRecCount;
PageList m_freeList;
PageList m_activeList;
PageList m_fullList;
};
// Constructor. Gives page size in bytes (excluding page header) and
// number of bits to use for page index "ip" in i-value. // number of bits to use for page index "ip" in i-value.
SuperPool(Uint32 pageSize, Uint32 pageBits); SuperPool(Uint32 pageSize, Uint32 pageBits);
// Initialize. Must be called after setting sizes or other parameters
// and before the pool is used.
virtual bool init();
// Destructor. // Destructor.
virtual ~SuperPool() = 0; virtual ~SuperPool() = 0;
// Translate i-value to page entry. // Move all pages from second list to end of first list.
void movePages(PageList& pl1, PageList& pl2);
// Add page to beginning of page list.
void addHeadPage(PageList& pl, PtrI pageI);
// Add page to end of page list.
void addTailPage(PageList& pl, PtrI pageI);
// Remove any page from page list.
void removePage(PageList& pl, PtrI pageI);
// Translate i-value ("ri" ignored) to page entry.
PageEnt& getPageEnt(PtrI pageI); PageEnt& getPageEnt(PtrI pageI);
// Translate i-value to page address. // Translate i-value ("ri" ignored) to page address.
void* getPageP(PtrI pageI); void* getPageP(PtrI pageI);
// Translate page address to i-value (unused). // Translate page address to i-value. Address must be page-aligned to
// memory root. Returns RNIL if "ip" range exceeded.
PtrI getPageI(void* pageP); PtrI getPageI(void* pageP);
// Given type, return non-zero reduced type check bits. // Record pool info.
Uint32 makeCheckBits(Uint32 type); struct RecInfo {
RecInfo(GroupPool& gp, Uint32 recSize);
// Get type check bits from type check array. GroupPool& m_groupPool;
Uint32 getCheckBits(PtrI pageI); Uint32 m_recSize;
Uint16 m_recType;
// Set type check bits in type check array. Uint16 m_maxPerPage;
void setCheckBits(PtrI pageI, Uint32 type); PtrI m_freeRecI; // first free record on current page
Uint32 m_useCount; // used records excluding current page
PageList m_pageList[3]; // 0-free 1-busy 2-full
Uint16 m_hyX; // hysteresis fraction x/y in "pp3"
Uint16 m_hyY;
};
// Translate i-value to record address. // Translate i-value to record address.
void* getRecP(PtrI recI, RecInfo& ri); void* getRecP(PtrI recI, RecInfo& ri);
// Move all pages from second list to end of first list. // Count records on page free list.
void movePages(PageList& pl1, PageList& pl2); Uint32 getFreeCount(RecInfo& ri, PtrI freeRecPtrI);
// Add page to beginning of page list. // Compute total number of pages in pool.
void addHeadPage(PageList& pl, PtrI pageI); Uint32 getRecPageCount(RecInfo& ri);
// Add page to end of page list. // Compute total number of records (used or not) in pool.
void addTailPage(PageList& pl, PtrI pageI); Uint32 getRecTotCount(RecInfo& ri);
// Remove any page from page list. // Compute total number of used records in pool.
void removePage(PageList& pl, PtrI pageI); Uint32 getRecUseCount(RecInfo& ri);
// Compute record pool page list index (0,1,2).
Uint32 getRecPageList(RecInfo& ri, PageEnt& pe);
// Add current page.
void addCurrPage(RecInfo& ri, PtrI pageI);
// Set current page. Previous current page is updated and added to // Remove current page.
// appropriate list. void removeCurrPage(RecInfo& ri);
void setCurrPage(RecInfo& ri, PtrI pageI);
// Get page with some free records and make it current. Takes head of // Get page with some free records and make it current. Takes head of
// active or free list, or else gets free page from super pool. // used or free list, or else gets free page from group pool.
bool getAvailPage(RecInfo& ri); bool getAvailPage(RecInfo& ri);
// Get free page from super pool and add it to record pool free list. // Get free page from group pool and add it to record pool free list.
// This is an expensive subroutine of getAvailPage(). // This is an expensive subroutine of getAvailPage(RecInfo&):
PtrI getFreePage(RecInfo& ri); PtrI getFreePage(RecInfo& ri);
// Get new free page from the implementation. // Get free detached (not on list) page from group pool.
PtrI getFreePage(GroupPool& gp);
// Get free detached page from super pool.
PtrI getFreePage();
// Get new free detached page from the implementation.
virtual PtrI getNewPage() = 0; virtual PtrI getNewPage() = 0;
// Set 3 size parameters, rounded to page size. If called before // Initialize free list etc. Subroutine of getFreePage(RecInfo&).
// init() then init() allocates the initial size. void initFreePage(RecInfo& ri, PtrI pageI);
void setSizes(size_t initSize = 0, size_t incrSize = 0, size_t maxSize = 0);
const Uint32 m_pageSize; // Release record which is not on current page.
const Uint32 m_pageBits; void releaseNotCurrent(RecInfo& ri, PtrI recI);
// implementation must set up these pointers
void* m_memRoot; // Free pages from record pool according to page policy.
PageEnt* m_pageEnt; void freeRecPages(RecInfo& ri);
Uint32* m_typeCheck;
Uint32 m_typeSeq; // Free all pages in record pool.
PageList m_pageList; void freeAllRecPages(RecInfo& ri, bool force);
size_t m_totalSize;
size_t m_initSize; // Set pool size parameters in pages. Call allocMemory() for changes
size_t m_incrSize; // (such as extra mallocs) to take effect.
size_t m_maxSize; void setInitPages(Uint32 initPages);
void setIncrPages(Uint32 incrPages);
void setMaxPages(Uint32 maxPages);
// Get number of pages reserved by all groups.
Uint32 getGpMinPages();
// Get number of pages reserved to a group.
Uint32 getMinPages(GroupPool& gp);
// Get max number of pages a group can try to allocate.
Uint32 getMaxPages(GroupPool& gp);
// Allocate more memory according to current parameters. Returns
// false if no new memory was allocated. Otherwise returns true,
// even if the amount allocated was less than requested.
virtual bool allocMemory() = 0;
// Debugging. // Debugging.
void verify(RecInfo& ri); void verify(RecInfo& ri);
void verifyPageList(PageList& pl);
// Super pool parameters.
const Uint32 m_pageSize;
const Uint16 m_pageBits;
const Uint16 m_recBits;
const Uint32 m_recMask;
// Implementation must set up these 3 pointers.
void* m_memRoot;
PageEnt* m_pageEnt;
Uint8* m_pageType;
// Free page list.
PageList m_freeList;
// Free pages and sizes.
Uint32 m_initPages;
Uint32 m_incrPages;
Uint32 m_maxPages;
Uint32 m_totPages;
Uint32 m_typeCount;
// Reserved and allocated by group pools.
Uint32 m_groupMinPct;
Uint32 m_groupMinPages;
Uint32 m_groupTotPages;
}; };
inline SuperPool::PageEnt& inline SuperPool::PageEnt&
SuperPool::getPageEnt(PtrI pageI) SuperPool::getPageEnt(PtrI pageI)
{ {
Uint32 ip = pageI >> (32 - m_pageBits); Uint32 ip = pageI >> m_recBits;
return m_pageEnt[ip]; return m_pageEnt[ip];
} }
inline void* inline void*
SuperPool::getPageP(PtrI ptrI) SuperPool::getPageP(PtrI ptrI)
{ {
Int32 ip = (Int32)ptrI >> (32 - m_pageBits); Int32 ip = (Int32)ptrI >> m_recBits;
my_ptrdiff_t sz = m_pageSize; return (Uint8*)m_memRoot + ip * (my_ptrdiff_t)m_pageSize;
void* pageP = (Uint8*)m_memRoot + ip * sz;
return pageP;
}
inline Uint32
SuperPool::makeCheckBits(Uint32 type)
{
Uint32 shift = 1 << SP_CHECK_LOG2;
Uint32 mask = (1 << shift) - 1;
return 1 + type % mask;
}
inline Uint32
SuperPool::getCheckBits(PtrI pageI)
{
Uint32 ip = pageI >> (32 - m_pageBits);
Uint32 xp = ip >> (5 - SP_CHECK_LOG2);
Uint32 yp = ip & (1 << (5 - SP_CHECK_LOG2)) - 1;
Uint32& w = m_typeCheck[xp];
Uint32 shift = 1 << SP_CHECK_LOG2;
Uint32 mask = (1 << shift) - 1;
// get
Uint32 bits = (w >> yp * shift) & mask;
return bits;
}
inline void
SuperPool::setCheckBits(PtrI pageI, Uint32 type)
{
Uint32 ip = pageI >> (32 - m_pageBits);
Uint32 xp = ip >> (5 - SP_CHECK_LOG2);
Uint32 yp = ip & (1 << (5 - SP_CHECK_LOG2)) - 1;
Uint32& w = m_typeCheck[xp];
Uint32 shift = 1 << SP_CHECK_LOG2;
Uint32 mask = (1 << shift) - 1;
// set
Uint32 bits = makeCheckBits(type);
w &= ~(mask << yp * shift);
w |= (bits << yp * shift);
} }
inline void* inline void*
SuperPool::getRecP(PtrI ptrI, RecInfo& ri) SuperPool::getRecP(PtrI ptrI, RecInfo& ri)
{ {
const Uint32 recMask = (1 << (32 - m_pageBits)) - 1; Uint32 ip = ptrI >> m_recBits;
PtrI pageI = ptrI & ~recMask; assert(m_pageType[ip] == (ri.m_recType & 0xFF));
#if NDB_SP_VERIFY_LEVEL >= 1 Uint32 ir = ptrI & m_recMask;
Uint32 bits1 = getCheckBits(pageI); return (Uint8*)getPageP(ptrI) + ir * ri.m_recSize;
Uint32 bits2 = makeCheckBits(ri.m_recType);
assert(bits1 == bits2);
#endif
void* pageP = getPageP(pageI);
Uint32 ir = ptrI & recMask;
void* recP = (Uint8*)pageP + ir * ri.m_recSize;
return recP;
} }
/* /*
* HeapPool - SuperPool on heap (concrete class) * GroupPool - subset of a super pool pages (concrete class)
*
* A super pool based on malloc with memory root on the heap. This
* pool type has 2 realistic uses:
*
* - a small pool with only initial malloc and pageBits set to match
* - the big pool from which all heap allocations are done
*
* A "smart" malloc may break "ip" limit by using different VM areas for
* different sized requests. For this reason malloc is done in units of
* increment size if possible. Memory root is set to start of first
* malloc.
*/ */
class HeapPool : public SuperPool { class GroupPool {
public: public:
// Describes malloc area. The areas are kept in singly linked list. // Types.
// There is a list head and pointers to current and last area. typedef SuperPool::PageList PageList;
struct Area {
Area();
Area* m_nextArea;
PtrI m_firstPageI;
Uint32 m_currPage;
Uint32 m_numPages;
void* m_memory;
};
// Constructor. // Constructor.
HeapPool(Uint32 pageSize, Uint32 pageBits); GroupPool(SuperPool& sp);
// Initialize.
virtual bool init();
// Destructor. // Destructor.
virtual ~HeapPool(); ~GroupPool();
// Use malloc to allocate more. // Set minimum pct reserved in super pool.
bool allocMoreData(size_t size); void setMinPct(Uint32 resPct);
// Get new page from current area. // Set minimum pages reserved in super pool.
virtual PtrI getNewPage(); void setMinPages(Uint32 resPages);
// List of malloc areas.
Area m_areaHead;
Area* m_currArea;
Area* m_lastArea;
// Fraction of malloc size to try if cannot get all in one. SuperPool& m_superPool;
Uint32 m_mallocPart; Uint32 m_minPct;
Uint32 m_minPages;
Uint32 m_totPages;
PageList m_freeList;
}; };
/* /*
* RecordPool - record pool using one super pool instance (template) * RecordPool - record pool using one super pool instance (template)
*
* Documented under SuperPool. Satisfies ArrayPool interface.
*/ */
template <class T> template <class T>
class RecordPool { class RecordPool {
public: public:
// Constructor. // Constructor.
RecordPool(SuperPool& superPool); RecordPool(GroupPool& gp);
// Destructor. // Destructor.
~RecordPool(); ~RecordPool();
...@@ -462,9 +454,9 @@ public: ...@@ -462,9 +454,9 @@ public:
// todo variants of basic methods // todo variants of basic methods
// Return all pages to super pool. The force flag is required if // Return all pages to group pool. The force flag is required if
// there are any used records. // there are any used records.
void free(bool force); void freeAllRecPages(bool force);
SuperPool& m_superPool; SuperPool& m_superPool;
SuperPool::RecInfo m_recInfo; SuperPool::RecInfo m_recInfo;
...@@ -472,24 +464,17 @@ public: ...@@ -472,24 +464,17 @@ public:
template <class T> template <class T>
inline inline
RecordPool<T>::RecordPool(SuperPool& superPool) : RecordPool<T>::RecordPool(GroupPool& gp) :
m_superPool(superPool), m_superPool(gp.m_superPool),
m_recInfo(1 + superPool.m_typeSeq++, sizeof(T)) m_recInfo(gp, sizeof(T))
{ {
SuperPool::RecInfo& ri = m_recInfo;
assert(sizeof(T) == SP_ALIGN_SIZE(sizeof(T), sizeof(Uint32)));
Uint32 maxUseCount = superPool.m_pageSize / sizeof(T);
Uint32 sizeLimit = 1 << (32 - superPool.m_pageBits);
if (maxUseCount >= sizeLimit)
maxUseCount = sizeLimit;
ri.m_maxUseCount = maxUseCount;
} }
template <class T> template <class T>
inline inline
RecordPool<T>::~RecordPool() RecordPool<T>::~RecordPool()
{ {
free(true); freeAllRecPages(true);
} }
template <class T> template <class T>
...@@ -506,18 +491,19 @@ RecordPool<T>::seize(Ptr<T>& ptr) ...@@ -506,18 +491,19 @@ RecordPool<T>::seize(Ptr<T>& ptr)
{ {
SuperPool& sp = m_superPool; SuperPool& sp = m_superPool;
SuperPool::RecInfo& ri = m_recInfo; SuperPool::RecInfo& ri = m_recInfo;
if (ri.m_currFreeRecI != RNIL || sp.getAvailPage(ri)) { Uint32 recMask = sp.m_recMask;
SuperPool::PtrI recI = ri.m_currFreeRecI; // get current page
if ((ri.m_freeRecI & recMask) != recMask ||
sp.getAvailPage(ri)) {
SuperPool::PtrI recI = ri.m_freeRecI;
void* recP = sp.getRecP(recI, ri); void* recP = sp.getRecP(recI, ri);
ri.m_currFreeRecI = *(Uint32*)recP; ri.m_freeRecI = *(Uint32*)recP;
Uint32 useCount = ri.m_currUseCount;
assert(useCount < ri.m_maxUseCount);
ri.m_currUseCount = useCount + 1;
ri.m_totalUseCount++;
ptr.i = recI; ptr.i = recI;
ptr.p = static_cast<T*>(recP); ptr.p = static_cast<T*>(recP);
return true; return true;
} }
ptr.i = RNIL;
ptr.p = 0;
return false; return false;
} }
...@@ -527,35 +513,79 @@ RecordPool<T>::release(Ptr<T>& ptr) ...@@ -527,35 +513,79 @@ RecordPool<T>::release(Ptr<T>& ptr)
{ {
SuperPool& sp = m_superPool; SuperPool& sp = m_superPool;
SuperPool::RecInfo& ri = m_recInfo; SuperPool::RecInfo& ri = m_recInfo;
const Uint32 recMask = (1 << (32 - sp.m_pageBits)) - 1;
SuperPool::PtrI recI = ptr.i; SuperPool::PtrI recI = ptr.i;
SuperPool::PtrI pageI = recI & ~recMask; Uint32 recMask = sp.m_recMask;
if (pageI != ri.m_currPageI) { // check if current page
sp.setCurrPage(ri, pageI); if ((recI & ~recMask) == (ri.m_freeRecI & ~recMask)) {
void* recP = sp.getRecP(recI, ri);
*(Uint32*)recP = ri.m_freeRecI;
ri.m_freeRecI = recI;
} else {
sp.releaseNotCurrent(ri, recI);
} }
void* recP = sp.getRecP(recI, ri);
*(Uint32*)recP = ri.m_currFreeRecI;
ri.m_currFreeRecI = recI;
Uint32 useCount = ri.m_currUseCount;
assert(useCount != 0);
ri.m_currUseCount = useCount - 1;
ri.m_totalUseCount--;
ptr.i = RNIL; ptr.i = RNIL;
ptr.p = 0; ptr.p = 0;
} }
template <class T> template <class T>
inline void inline void
RecordPool<T>::free(bool force) RecordPool<T>::freeAllRecPages(bool force)
{ {
SuperPool& sp = m_superPool; SuperPool& sp = m_superPool;
SuperPool::RecInfo& ri = m_recInfo; sp.freeAllRecPages(m_recInfo, force);
sp.setCurrPage(ri, RNIL);
assert(force || ri.m_totalUseCount == 0);
sp.movePages(sp.m_pageList, ri.m_freeList);
sp.movePages(sp.m_pageList, ri.m_activeList);
sp.movePages(sp.m_pageList, ri.m_fullList);
ri.m_totalRecCount = 0;
} }
/*
* HeapPool - SuperPool on heap (concrete class)
*
* A super pool based on malloc with memory root on the heap. This
* pool type has 2 realistic uses:
*
* - a small pool with only initial malloc and pageBits set to match
* - the big pool from which all heap allocations are done
*
* A smart malloc may break "ip" limit by using different VM areas for
* different sized requests. For this reason malloc is done in units of
* increment size if possible. Memory root is set to the page-aligned
* address from first page malloc.
*/
class HeapPool : public SuperPool {
public:
// Describes malloc area. The areas are kept in singly linked list.
// There is a list head and pointers to current and last area.
struct Area {
Area();
Area* m_nextArea;
PtrI m_firstPageI;
Uint32 m_currPage;
void* m_memory; // from malloc
void* m_pages; // page-aligned pages
Uint32 m_numPages; // number of pages
};
// Constructor.
HeapPool(Uint32 pageSize, Uint32 pageBits);
// Destructor.
virtual ~HeapPool();
// Get new page from current area.
virtual PtrI getNewPage();
// Allocate fixed arrays.
bool allocInit();
// Allocate array of aligned pages.
bool allocArea(Area* ap, Uint32 tryPages);
// Allocate memory.
virtual bool allocMemory();
// List of malloc areas.
Area m_areaHead;
Area* m_currArea;
Area* m_lastArea;
};
#endif #endif
...@@ -81,19 +81,22 @@ cmpPtrP(const void* a, const void* b) ...@@ -81,19 +81,22 @@ cmpPtrP(const void* a, const void* b)
static Uint32 loopcount = 3; static Uint32 loopcount = 3;
template <Uint32 sz> template <class T>
void static void
sp_test(SuperPool& sp) sp_test(GroupPool& gp)
{ {
typedef A<sz> T; SuperPool& sp = gp.m_superPool;
RecordPool<T> rp(sp); RecordPool<T> rp(gp);
assert(gp.m_totPages == gp.m_freeList.m_pageCount);
SuperPool::RecInfo& ri = rp.m_recInfo; SuperPool::RecInfo& ri = rp.m_recInfo;
Uint32 pageCount = sp.m_totalSize / sp.m_pageSize; Uint32 pageCount = sp.m_totPages;
Uint32 perPage = rp.m_recInfo.m_maxUseCount; Uint32 perPage = rp.m_recInfo.m_maxPerPage;
Uint32 perPool = perPage * pageCount; Uint32 perPool = perPage * pageCount;
ndbout << "pages=" << pageCount << " perpage=" << perPage << " perpool=" << perPool << endl; ndbout << "pages=" << pageCount << " perpage=" << perPage << " perpool=" << perPool << endl;
Ptr<T>* ptrList = new Ptr<T> [perPool]; Ptr<T>* ptrList = new Ptr<T> [perPool];
memset(ptrList, 0x1f, perPool * sizeof(Ptr<T>)); memset(ptrList, 0x1f, perPool * sizeof(Ptr<T>));
Uint32 verify = 1000;
Uint32 useCount;
Uint32 loop; Uint32 loop;
for (loop = 0; loop < loopcount; loop++) { for (loop = 0; loop < loopcount; loop++) {
ndbout << "loop " << loop << endl; ndbout << "loop " << loop << endl;
...@@ -101,25 +104,26 @@ sp_test(SuperPool& sp) ...@@ -101,25 +104,26 @@ sp_test(SuperPool& sp)
// seize all // seize all
ndbout << "seize all" << endl; ndbout << "seize all" << endl;
for (i = 0; i < perPool + 1; i++) { for (i = 0; i < perPool + 1; i++) {
if (verify == 0 || urandom(perPool) < verify)
sp.verify(ri);
j = i; j = i;
sp.verify(ri);
Ptr<T> ptr1 = { 0, RNIL }; Ptr<T> ptr1 = { 0, RNIL };
if (! rp.seize(ptr1)) if (! rp.seize(ptr1))
break; break;
// write value
ptr1.p->fill(); ptr1.p->fill();
ptr1.p->check(); ptr1.p->check();
// verify getPtr
Ptr<T> ptr2 = { 0, ptr1.i }; Ptr<T> ptr2 = { 0, ptr1.i };
rp.getPtr(ptr2); rp.getPtr(ptr2);
assert(ptr1.i == ptr2.i && ptr1.p == ptr2.p); assert(ptr1.i == ptr2.i && ptr1.p == ptr2.p);
// save
ptrList[j] = ptr1; ptrList[j] = ptr1;
} }
assert(i == perPool);
assert(ri.m_totalUseCount == perPool && ri.m_totalRecCount == perPool);
sp.verify(ri); sp.verify(ri);
ndbout << "seized " << i << endl;
assert(i == perPool);
useCount = sp.getRecUseCount(ri);
assert(useCount == perPool);
// check duplicates // check duplicates
ndbout << "check dups" << endl;
{ {
Ptr<T>* ptrList2 = new Ptr<T> [perPool]; Ptr<T>* ptrList2 = new Ptr<T> [perPool];
memcpy(ptrList2, ptrList, perPool * sizeof(Ptr<T>)); memcpy(ptrList2, ptrList, perPool * sizeof(Ptr<T>));
...@@ -135,7 +139,8 @@ sp_test(SuperPool& sp) ...@@ -135,7 +139,8 @@ sp_test(SuperPool& sp)
ndbout << "release all" << endl; ndbout << "release all" << endl;
Uint32 coprime = random_coprime(perPool); Uint32 coprime = random_coprime(perPool);
for (i = 0; i < perPool; i++) { for (i = 0; i < perPool; i++) {
sp.verify(ri); if (verify == 0 || urandom(perPool) < verify)
sp.verify(ri);
switch (loop % 3) { switch (loop % 3) {
case 0: // ascending case 0: // ascending
j = i; j = i;
...@@ -153,27 +158,31 @@ sp_test(SuperPool& sp) ...@@ -153,27 +158,31 @@ sp_test(SuperPool& sp)
rp.release(ptr); rp.release(ptr);
assert(ptr.i == RNIL && ptr.p == 0); assert(ptr.i == RNIL && ptr.p == 0);
} }
sp.setCurrPage(ri, RNIL);
assert(ri.m_totalUseCount == 0 && ri.m_totalRecCount == 0);
sp.verify(ri); sp.verify(ri);
useCount = sp.getRecUseCount(ri);
assert(useCount == 0);
// seize/release at random // seize/release at random
ndbout << "seize/release at random" << endl; ndbout << "seize/release at random" << endl;
for (i = 0; i < loopcount * perPool; i++) { for (i = 0; i < loopcount * perPool; i++) {
if (verify == 0 || urandom(perPool) < verify)
sp.verify(ri);
j = urandom(perPool); j = urandom(perPool);
Ptr<T>& ptr = ptrList[j]; Ptr<T>& ptr = ptrList[j];
if (ptr.i == RNIL) { if (ptr.i == RNIL) {
rp.seize(ptr); if (rp.seize(ptr))
ptr.p->fill(); ptr.p->fill();
} else { } else {
ptr.p->check(); ptr.p->check();
rp.release(ptr); rp.release(ptr);
} }
} }
ndbout << "used " << ri.m_totalUseCount << endl; ndbout << "used " << ri.m_useCount << endl;
sp.verify(ri); sp.verify(ri);
// release all // release all
ndbout << "release all" << endl; ndbout << "release all" << endl;
for (i = 0; i < perPool; i++) { for (i = 0; i < perPool; i++) {
if (verify == 0 || urandom(perPool) < verify)
sp.verify(ri);
j = i; j = i;
Ptr<T>& ptr = ptrList[j]; Ptr<T>& ptr = ptrList[j];
if (ptr.i != RNIL) { if (ptr.i != RNIL) {
...@@ -181,40 +190,54 @@ sp_test(SuperPool& sp) ...@@ -181,40 +190,54 @@ sp_test(SuperPool& sp)
rp.release(ptr); rp.release(ptr);
} }
} }
sp.setCurrPage(ri, RNIL);
assert(ri.m_totalUseCount == 0 && ri.m_totalRecCount == 0);
sp.verify(ri); sp.verify(ri);
useCount = sp.getRecUseCount(ri);
assert(useCount == 0);
} }
// done // done
delete [] ptrList; delete [] ptrList;
} }
static Uint32 pageCount = 99;
static Uint32 pageSize = 32768; static Uint32 pageSize = 32768;
static Uint32 pageBits = 15; static Uint32 pageBits = 17;
const Uint32 sz1 = 3, sz2 = 4, sz3 = 53, sz4 = 424, sz5 = 5353; const Uint32 sz1 = 3;
const Uint32 sz2 = 4;
template void sp_test<sz1>(SuperPool& sp); const Uint32 sz3 = 53;
template void sp_test<sz2>(SuperPool& sp); const Uint32 sz4 = 424;
template void sp_test<sz3>(SuperPool& sp); const Uint32 sz5 = 5353;
template void sp_test<sz4>(SuperPool& sp);
template void sp_test<sz5>(SuperPool& sp); typedef A<sz1> T1;
typedef A<sz2> T2;
typedef A<sz3> T3;
typedef A<sz4> T4;
typedef A<sz5> T5;
template static void sp_test<T1>(GroupPool& sp);
template static void sp_test<T2>(GroupPool& sp);
template static void sp_test<T3>(GroupPool& sp);
template static void sp_test<T4>(GroupPool& sp);
template static void sp_test<T5>(GroupPool& sp);
int int
main() main()
{ {
HeapPool sp(pageSize, pageBits); HeapPool sp(pageSize, pageBits);
sp.setSizes(pageCount * pageSize); sp.setInitPages(7);
if (! sp.init()) sp.setMaxPages(7);
if (! sp.allocMemory())
assert(false); assert(false);
GroupPool gp(sp);
Uint16 s = (Uint16)getpid(); Uint16 s = (Uint16)getpid();
srandom(s); srandom(s);
ndbout << "rand " << s << endl; ndbout << "rand " << s << endl;
sp_test<sz1>(sp); int count = 0;
sp_test<sz2>(sp); while (++count <= 1) {
sp_test<sz3>(sp); sp_test<T1>(gp);
sp_test<sz4>(sp); sp_test<T2>(gp);
sp_test<sz5>(sp); sp_test<T3>(gp);
sp_test<T4>(gp);
sp_test<T5>(gp);
}
return 0; return 0;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment