ndb - jamify (better) DbtupDiskAlloc

parent 4d552dbb
...@@ -86,6 +86,7 @@ inline const Uint32* ALIGN_WORD(const void* ptr) ...@@ -86,6 +86,7 @@ inline const Uint32* ALIGN_WORD(const void* ptr)
// DbtupDebug.cpp 30000 // DbtupDebug.cpp 30000
// DbtupVarAlloc.cpp 32000 // DbtupVarAlloc.cpp 32000
// DbtupScan.cpp 33000 // DbtupScan.cpp 33000
// DbtupDiskAlloc.cpp 35000
//------------------------------------------------------------------ //------------------------------------------------------------------
/* /*
......
...@@ -108,8 +108,11 @@ void Dbtup::removeActiveOpList(Operationrec* const regOperPtr, ...@@ -108,8 +108,11 @@ void Dbtup::removeActiveOpList(Operationrec* const regOperPtr,
* Release copy tuple * Release copy tuple
*/ */
if(!regOperPtr->m_copy_tuple_location.isNull()) if(!regOperPtr->m_copy_tuple_location.isNull())
{
ljam();
c_undo_buffer.free_copy_tuple(&regOperPtr->m_copy_tuple_location); c_undo_buffer.free_copy_tuple(&regOperPtr->m_copy_tuple_location);
}
if (regOperPtr->op_struct.in_active_list) { if (regOperPtr->op_struct.in_active_list) {
regOperPtr->op_struct.in_active_list= false; regOperPtr->op_struct.in_active_list= false;
if (regOperPtr->nextActiveOp != RNIL) { if (regOperPtr->nextActiveOp != RNIL) {
...@@ -172,6 +175,7 @@ Dbtup::dealloc_tuple(Signal* signal, ...@@ -172,6 +175,7 @@ Dbtup::dealloc_tuple(Signal* signal,
Uint32 extra_bits = Tuple_header::FREED; Uint32 extra_bits = Tuple_header::FREED;
if (bits & Tuple_header::DISK_PART) if (bits & Tuple_header::DISK_PART)
{ {
ljam();
Local_key disk; Local_key disk;
memcpy(&disk, ptr->get_disk_ref_ptr(regTabPtr), sizeof(disk)); memcpy(&disk, ptr->get_disk_ref_ptr(regTabPtr), sizeof(disk));
PagePtr tmpptr; PagePtr tmpptr;
...@@ -184,6 +188,7 @@ Dbtup::dealloc_tuple(Signal* signal, ...@@ -184,6 +188,7 @@ Dbtup::dealloc_tuple(Signal* signal,
if (! (bits & (Tuple_header::LCP_SKIP | Tuple_header::ALLOC)) && if (! (bits & (Tuple_header::LCP_SKIP | Tuple_header::ALLOC)) &&
lcpScan_ptr_i != RNIL) lcpScan_ptr_i != RNIL)
{ {
ljam();
ScanOpPtr scanOp; ScanOpPtr scanOp;
c_scanOpPool.getPtr(scanOp, lcpScan_ptr_i); c_scanOpPool.getPtr(scanOp, lcpScan_ptr_i);
Local_key rowid = regOperPtr->m_tuple_location; Local_key rowid = regOperPtr->m_tuple_location;
...@@ -191,6 +196,7 @@ Dbtup::dealloc_tuple(Signal* signal, ...@@ -191,6 +196,7 @@ Dbtup::dealloc_tuple(Signal* signal,
rowid.m_page_no = page->frag_page_id; rowid.m_page_no = page->frag_page_id;
if (rowid > scanpos) if (rowid > scanpos)
{ {
ljam();
extra_bits = Tuple_header::LCP_KEEP; // Note REMOVE FREE extra_bits = Tuple_header::LCP_KEEP; // Note REMOVE FREE
ptr->m_operation_ptr_i = lcp_keep_list; ptr->m_operation_ptr_i = lcp_keep_list;
regFragPtr->m_lcp_keep_list = rowid.ref(); regFragPtr->m_lcp_keep_list = rowid.ref();
...@@ -231,11 +237,13 @@ Dbtup::commit_operation(Signal* signal, ...@@ -231,11 +237,13 @@ Dbtup::commit_operation(Signal* signal,
Uint32 mm_vars= regTabPtr->m_attributes[MM].m_no_of_varsize; Uint32 mm_vars= regTabPtr->m_attributes[MM].m_no_of_varsize;
if(mm_vars == 0) if(mm_vars == 0)
{ {
ljam();
memcpy(tuple_ptr, copy, 4*fixsize); memcpy(tuple_ptr, copy, 4*fixsize);
disk_ptr= (Tuple_header*)(((Uint32*)copy)+fixsize); disk_ptr= (Tuple_header*)(((Uint32*)copy)+fixsize);
} }
else else
{ {
ljam();
/** /**
* Var_part_ref is only stored in *allocated* tuple * Var_part_ref is only stored in *allocated* tuple
* so memcpy from copy, will over write it... * so memcpy from copy, will over write it...
...@@ -260,6 +268,7 @@ Dbtup::commit_operation(Signal* signal, ...@@ -260,6 +268,7 @@ Dbtup::commit_operation(Signal* signal,
if(copy_bits & Tuple_header::MM_SHRINK) if(copy_bits & Tuple_header::MM_SHRINK)
{ {
ljam();
vpagePtrP->shrink_entry(tmp.m_page_idx, (sz + 3) >> 2); vpagePtrP->shrink_entry(tmp.m_page_idx, (sz + 3) >> 2);
update_free_page_list(regFragPtr, vpagePtr); update_free_page_list(regFragPtr, vpagePtr);
} }
...@@ -270,6 +279,7 @@ Dbtup::commit_operation(Signal* signal, ...@@ -270,6 +279,7 @@ Dbtup::commit_operation(Signal* signal,
if (regTabPtr->m_no_of_disk_attributes && if (regTabPtr->m_no_of_disk_attributes &&
(copy_bits & Tuple_header::DISK_INLINE)) (copy_bits & Tuple_header::DISK_INLINE))
{ {
ljam();
Local_key key; Local_key key;
memcpy(&key, copy->get_disk_ref_ptr(regTabPtr), sizeof(Local_key)); memcpy(&key, copy->get_disk_ref_ptr(regTabPtr), sizeof(Local_key));
Uint32 logfile_group_id= regFragPtr->m_logfile_group_id; Uint32 logfile_group_id= regFragPtr->m_logfile_group_id;
...@@ -280,22 +290,26 @@ Dbtup::commit_operation(Signal* signal, ...@@ -280,22 +290,26 @@ Dbtup::commit_operation(Signal* signal,
Uint32 sz, *dst; Uint32 sz, *dst;
if(copy_bits & Tuple_header::DISK_ALLOC) if(copy_bits & Tuple_header::DISK_ALLOC)
{ {
ljam();
disk_page_alloc(signal, regTabPtr, regFragPtr, &key, diskPagePtr, gci); disk_page_alloc(signal, regTabPtr, regFragPtr, &key, diskPagePtr, gci);
} }
if(regTabPtr->m_attributes[DD].m_no_of_varsize == 0) if(regTabPtr->m_attributes[DD].m_no_of_varsize == 0)
{ {
ljam();
sz= regTabPtr->m_offsets[DD].m_fix_header_size; sz= regTabPtr->m_offsets[DD].m_fix_header_size;
dst= ((Fix_page*)diskPagePtr.p)->get_ptr(key.m_page_idx, sz); dst= ((Fix_page*)diskPagePtr.p)->get_ptr(key.m_page_idx, sz);
} }
else else
{ {
ljam();
dst= ((Var_page*)diskPagePtr.p)->get_ptr(key.m_page_idx); dst= ((Var_page*)diskPagePtr.p)->get_ptr(key.m_page_idx);
sz= ((Var_page*)diskPagePtr.p)->get_entry_len(key.m_page_idx); sz= ((Var_page*)diskPagePtr.p)->get_entry_len(key.m_page_idx);
} }
if(! (copy_bits & Tuple_header::DISK_ALLOC)) if(! (copy_bits & Tuple_header::DISK_ALLOC))
{ {
ljam();
disk_page_undo_update(diskPagePtr.p, disk_page_undo_update(diskPagePtr.p,
&key, dst, sz, gci, logfile_group_id); &key, dst, sz, gci, logfile_group_id);
} }
...@@ -309,6 +323,7 @@ Dbtup::commit_operation(Signal* signal, ...@@ -309,6 +323,7 @@ Dbtup::commit_operation(Signal* signal,
if(lcpScan_ptr_i != RNIL && (bits & Tuple_header::ALLOC)) if(lcpScan_ptr_i != RNIL && (bits & Tuple_header::ALLOC))
{ {
ljam();
ScanOpPtr scanOp; ScanOpPtr scanOp;
c_scanOpPool.getPtr(scanOp, lcpScan_ptr_i); c_scanOpPool.getPtr(scanOp, lcpScan_ptr_i);
Local_key rowid = regOperPtr->m_tuple_location; Local_key rowid = regOperPtr->m_tuple_location;
...@@ -316,6 +331,7 @@ Dbtup::commit_operation(Signal* signal, ...@@ -316,6 +331,7 @@ Dbtup::commit_operation(Signal* signal,
rowid.m_page_no = pagePtr.p->frag_page_id; rowid.m_page_no = pagePtr.p->frag_page_id;
if(rowid > scanpos) if(rowid > scanpos)
{ {
ljam();
copy_bits |= Tuple_header::LCP_SKIP; copy_bits |= Tuple_header::LCP_SKIP;
} }
} }
...@@ -374,7 +390,10 @@ Dbtup::disk_page_commit_callback(Signal* signal, ...@@ -374,7 +390,10 @@ Dbtup::disk_page_commit_callback(Signal* signal,
execTUP_COMMITREQ(signal); execTUP_COMMITREQ(signal);
if(signal->theData[0] == 0) if(signal->theData[0] == 0)
{
ljam();
c_lqh->tupcommit_conf_callback(signal, regOperPtr.p->userpointer); c_lqh->tupcommit_conf_callback(signal, regOperPtr.p->userpointer);
}
} }
void void
...@@ -412,6 +431,7 @@ Dbtup::disk_page_log_buffer_callback(Signal* signal, ...@@ -412,6 +431,7 @@ Dbtup::disk_page_log_buffer_callback(Signal* signal,
void void
Dbtup::fix_commit_order(OperationrecPtr opPtr) Dbtup::fix_commit_order(OperationrecPtr opPtr)
{ {
ljam();
ndbassert(!opPtr.p->is_first_operation()); ndbassert(!opPtr.p->is_first_operation());
OperationrecPtr firstPtr = opPtr; OperationrecPtr firstPtr = opPtr;
while(firstPtr.p->prevActiveOp != RNIL) while(firstPtr.p->prevActiveOp != RNIL)
...@@ -437,7 +457,10 @@ Dbtup::fix_commit_order(OperationrecPtr opPtr) ...@@ -437,7 +457,10 @@ Dbtup::fix_commit_order(OperationrecPtr opPtr)
c_operation_pool.getPtr(seco)->prevActiveOp = opPtr.i; c_operation_pool.getPtr(seco)->prevActiveOp = opPtr.i;
c_operation_pool.getPtr(prev)->nextActiveOp = firstPtr.i; c_operation_pool.getPtr(prev)->nextActiveOp = firstPtr.i;
if(next != RNIL) if(next != RNIL)
{
ljam();
c_operation_pool.getPtr(next)->prevActiveOp = firstPtr.i; c_operation_pool.getPtr(next)->prevActiveOp = firstPtr.i;
}
} }
/* ----------------------------------------------------------------- */ /* ----------------------------------------------------------------- */
...@@ -502,6 +525,7 @@ void Dbtup::execTUP_COMMITREQ(Signal* signal) ...@@ -502,6 +525,7 @@ void Dbtup::execTUP_COMMITREQ(Signal* signal)
bool get_page = false; bool get_page = false;
if(regOperPtr.p->op_struct.m_load_diskpage_on_commit) if(regOperPtr.p->op_struct.m_load_diskpage_on_commit)
{ {
ljam();
Page_cache_client::Request req; Page_cache_client::Request req;
ndbassert(regOperPtr.p->is_first_operation() && ndbassert(regOperPtr.p->is_first_operation() &&
regOperPtr.p->is_last_operation()); regOperPtr.p->is_last_operation());
...@@ -511,6 +535,7 @@ void Dbtup::execTUP_COMMITREQ(Signal* signal) ...@@ -511,6 +535,7 @@ void Dbtup::execTUP_COMMITREQ(Signal* signal)
*/ */
if(!regOperPtr.p->m_copy_tuple_location.isNull()) if(!regOperPtr.p->m_copy_tuple_location.isNull())
{ {
ljam();
Tuple_header* tmp= (Tuple_header*) Tuple_header* tmp= (Tuple_header*)
c_undo_buffer.get_ptr(&regOperPtr.p->m_copy_tuple_location); c_undo_buffer.get_ptr(&regOperPtr.p->m_copy_tuple_location);
...@@ -520,23 +545,25 @@ void Dbtup::execTUP_COMMITREQ(Signal* signal) ...@@ -520,23 +545,25 @@ void Dbtup::execTUP_COMMITREQ(Signal* signal)
if (unlikely(regOperPtr.p->op_struct.op_type == ZDELETE && if (unlikely(regOperPtr.p->op_struct.op_type == ZDELETE &&
tmp->m_header_bits & Tuple_header::DISK_ALLOC)) tmp->m_header_bits & Tuple_header::DISK_ALLOC))
{ {
jam(); ljam();
/** /**
* Insert+Delete * Insert+Delete
*/ */
regOperPtr.p->op_struct.m_load_diskpage_on_commit = 0; regOperPtr.p->op_struct.m_load_diskpage_on_commit = 0;
regOperPtr.p->op_struct.m_wait_log_buffer = 0; regOperPtr.p->op_struct.m_wait_log_buffer = 0;
disk_page_abort_prealloc(signal, regFragPtr.p, disk_page_abort_prealloc(signal, regFragPtr.p,
&req.m_page, req.m_page.m_page_idx); &req.m_page, req.m_page.m_page_idx);
c_lgman->free_log_space(regFragPtr.p->m_logfile_group_id, c_lgman->free_log_space(regFragPtr.p->m_logfile_group_id,
regOperPtr.p->m_undo_buffer_space); regOperPtr.p->m_undo_buffer_space);
ndbout_c("insert+delete"); ndbout_c("insert+delete");
goto skip_disk; ljamEntry();
goto skip_disk;
} }
} }
else else
{ {
ljam();
// initial delete // initial delete
ndbassert(regOperPtr.p->op_struct.op_type == ZDELETE); ndbassert(regOperPtr.p->op_struct.op_type == ZDELETE);
memcpy(&req.m_page, memcpy(&req.m_page,
...@@ -560,11 +587,14 @@ void Dbtup::execTUP_COMMITREQ(Signal* signal) ...@@ -560,11 +587,14 @@ void Dbtup::execTUP_COMMITREQ(Signal* signal)
/** /**
* Timeslice * Timeslice
*/ */
ljam();
signal->theData[0] = 1; signal->theData[0] = 1;
return; return;
case -1: case -1:
ndbrequire("NOT YET IMPLEMENTED" == 0); ndbrequire("NOT YET IMPLEMENTED" == 0);
break; break;
default:
ljam();
} }
get_page = true; get_page = true;
...@@ -581,6 +611,7 @@ void Dbtup::execTUP_COMMITREQ(Signal* signal) ...@@ -581,6 +611,7 @@ void Dbtup::execTUP_COMMITREQ(Signal* signal)
if(regOperPtr.p->op_struct.m_wait_log_buffer) if(regOperPtr.p->op_struct.m_wait_log_buffer)
{ {
ljam();
ndbassert(regOperPtr.p->is_first_operation() && ndbassert(regOperPtr.p->is_first_operation() &&
regOperPtr.p->is_last_operation()); regOperPtr.p->is_last_operation());
...@@ -592,18 +623,23 @@ void Dbtup::execTUP_COMMITREQ(Signal* signal) ...@@ -592,18 +623,23 @@ void Dbtup::execTUP_COMMITREQ(Signal* signal)
Logfile_client lgman(this, c_lgman, regFragPtr.p->m_logfile_group_id); Logfile_client lgman(this, c_lgman, regFragPtr.p->m_logfile_group_id);
int res= lgman.get_log_buffer(signal, sz, &cb); int res= lgman.get_log_buffer(signal, sz, &cb);
ljamEntry();
switch(res){ switch(res){
case 0: case 0:
ljam();
signal->theData[0] = 1; signal->theData[0] = 1;
return; return;
case -1: case -1:
ndbrequire("NOT YET IMPLEMENTED" == 0); ndbrequire("NOT YET IMPLEMENTED" == 0);
break; break;
default:
ljam();
} }
} }
if(!tuple_ptr) if(!tuple_ptr)
{ {
ljam();
tuple_ptr = (Tuple_header*) tuple_ptr = (Tuple_header*)
get_ptr(&page, &regOperPtr.p->m_tuple_location,regTabPtr.p); get_ptr(&page, &regOperPtr.p->m_tuple_location,regTabPtr.p);
} }
...@@ -612,6 +648,7 @@ void Dbtup::execTUP_COMMITREQ(Signal* signal) ...@@ -612,6 +648,7 @@ void Dbtup::execTUP_COMMITREQ(Signal* signal)
if(get_tuple_state(regOperPtr.p) == TUPLE_PREPARED) if(get_tuple_state(regOperPtr.p) == TUPLE_PREPARED)
{ {
ljam();
/** /**
* Execute all tux triggers at first commit * Execute all tux triggers at first commit
* since previous tuple is otherwise removed... * since previous tuple is otherwise removed...
...@@ -637,6 +674,7 @@ void Dbtup::execTUP_COMMITREQ(Signal* signal) ...@@ -637,6 +674,7 @@ void Dbtup::execTUP_COMMITREQ(Signal* signal)
if(regOperPtr.p->is_last_operation()) if(regOperPtr.p->is_last_operation())
{ {
ljam();
/** /**
* Perform "real" commit * Perform "real" commit
*/ */
...@@ -647,12 +685,14 @@ void Dbtup::execTUP_COMMITREQ(Signal* signal) ...@@ -647,12 +685,14 @@ void Dbtup::execTUP_COMMITREQ(Signal* signal)
if(regOperPtr.p->op_struct.op_type != ZDELETE) if(regOperPtr.p->op_struct.op_type != ZDELETE)
{ {
ljam();
commit_operation(signal, gci, tuple_ptr, page, commit_operation(signal, gci, tuple_ptr, page,
regOperPtr.p, regFragPtr.p, regTabPtr.p); regOperPtr.p, regFragPtr.p, regTabPtr.p);
removeActiveOpList(regOperPtr.p, tuple_ptr); removeActiveOpList(regOperPtr.p, tuple_ptr);
} }
else else
{ {
ljam();
removeActiveOpList(regOperPtr.p, tuple_ptr); removeActiveOpList(regOperPtr.p, tuple_ptr);
if (get_page) if (get_page)
ndbassert(tuple_ptr->m_header_bits & Tuple_header::DISK_PART); ndbassert(tuple_ptr->m_header_bits & Tuple_header::DISK_PART);
...@@ -662,6 +702,7 @@ void Dbtup::execTUP_COMMITREQ(Signal* signal) ...@@ -662,6 +702,7 @@ void Dbtup::execTUP_COMMITREQ(Signal* signal)
} }
else else
{ {
ljam();
removeActiveOpList(regOperPtr.p, tuple_ptr); removeActiveOpList(regOperPtr.p, tuple_ptr);
} }
......
...@@ -16,6 +16,10 @@ ...@@ -16,6 +16,10 @@
#define DBTUP_C #define DBTUP_C
#include "Dbtup.hpp" #include "Dbtup.hpp"
#define ljam() { jamLine(35000 + __LINE__); }
#define ljamEntry() { jamEntryLine(35000 + __LINE__); }
static bool f_undo_done = true; static bool f_undo_done = true;
static static
...@@ -266,7 +270,7 @@ Dbtup::update_extent_pos(Disk_alloc_info& alloc, ...@@ -266,7 +270,7 @@ Dbtup::update_extent_pos(Disk_alloc_info& alloc,
Uint32 pos = alloc.calc_extent_pos(extentPtr.p); Uint32 pos = alloc.calc_extent_pos(extentPtr.p);
if (old != pos) if (old != pos)
{ {
jam(); ljam();
Local_extent_info_list old_list(c_extent_pool, alloc.m_free_extents[old]); Local_extent_info_list old_list(c_extent_pool, alloc.m_free_extents[old]);
Local_extent_info_list new_list(c_extent_pool, alloc.m_free_extents[pos]); Local_extent_info_list new_list(c_extent_pool, alloc.m_free_extents[pos]);
old_list.remove(extentPtr); old_list.remove(extentPtr);
...@@ -283,6 +287,7 @@ Dbtup::update_extent_pos(Disk_alloc_info& alloc, ...@@ -283,6 +287,7 @@ Dbtup::update_extent_pos(Disk_alloc_info& alloc,
void void
Dbtup::restart_setup_page(Disk_alloc_info& alloc, PagePtr pagePtr) Dbtup::restart_setup_page(Disk_alloc_info& alloc, PagePtr pagePtr)
{ {
ljam();
/** /**
* Link to extent, clear uncommitted_used_space * Link to extent, clear uncommitted_used_space
*/ */
...@@ -303,6 +308,7 @@ Dbtup::restart_setup_page(Disk_alloc_info& alloc, PagePtr pagePtr) ...@@ -303,6 +308,7 @@ Dbtup::restart_setup_page(Disk_alloc_info& alloc, PagePtr pagePtr)
ddassert(real_free >= estimated); ddassert(real_free >= estimated);
if (real_free != estimated) if (real_free != estimated)
{ {
ljam();
extentPtr.p->m_free_space += (real_free - estimated); extentPtr.p->m_free_space += (real_free - estimated);
update_extent_pos(alloc, extentPtr); update_extent_pos(alloc, extentPtr);
} }
...@@ -318,7 +324,7 @@ Dbtup::restart_setup_page(Disk_alloc_info& alloc, PagePtr pagePtr) ...@@ -318,7 +324,7 @@ Dbtup::restart_setup_page(Disk_alloc_info& alloc, PagePtr pagePtr)
unsigned uncommitted, committed; unsigned uncommitted, committed;
uncommitted = committed = ~(unsigned)0; uncommitted = committed = ~(unsigned)0;
int ret = tsman.get_page_free_bits(&page, &uncommitted, &committed); int ret = tsman.get_page_free_bits(&page, &uncommitted, &committed);
jamEntry(); ljamEntry();
idx = alloc.calc_page_free_bits(real_free); idx = alloc.calc_page_free_bits(real_free);
ddassert(idx == committed); ddassert(idx == committed);
...@@ -375,6 +381,7 @@ Dbtup::disk_page_prealloc(Signal* signal, ...@@ -375,6 +381,7 @@ Dbtup::disk_page_prealloc(Signal* signal,
key->m_file_no= tmp.p->m_file_no; key->m_file_no= tmp.p->m_file_no;
if (DBG_DISK) if (DBG_DISK)
ndbout << " found dirty page " << *key << endl; ndbout << " found dirty page " << *key << endl;
ljam();
return 0; // Page in memory return 0; // Page in memory
} }
} }
...@@ -396,6 +403,7 @@ Dbtup::disk_page_prealloc(Signal* signal, ...@@ -396,6 +403,7 @@ Dbtup::disk_page_prealloc(Signal* signal,
* key = req.p->m_key; * key = req.p->m_key;
if (DBG_DISK) if (DBG_DISK)
ndbout << " found transit page " << *key << endl; ndbout << " found transit page " << *key << endl;
ljam();
return 0; return 0;
} }
} }
...@@ -405,6 +413,7 @@ Dbtup::disk_page_prealloc(Signal* signal, ...@@ -405,6 +413,7 @@ Dbtup::disk_page_prealloc(Signal* signal,
*/ */
if (!c_page_request_pool.seize(req)) if (!c_page_request_pool.seize(req))
{ {
ljam();
err= 1; err= 1;
//XXX set error code //XXX set error code
ndbout_c("no free request"); ndbout_c("no free request");
...@@ -425,16 +434,16 @@ Dbtup::disk_page_prealloc(Signal* signal, ...@@ -425,16 +434,16 @@ Dbtup::disk_page_prealloc(Signal* signal,
*/ */
if ((ext.i= alloc.m_curr_extent_info_ptr_i) != RNIL) if ((ext.i= alloc.m_curr_extent_info_ptr_i) != RNIL)
{ {
jam(); ljam();
c_extent_pool.getPtr(ext); c_extent_pool.getPtr(ext);
if ((pageBits= tsman.alloc_page_from_extent(&ext.p->m_key, bits)) >= 0) if ((pageBits= tsman.alloc_page_from_extent(&ext.p->m_key, bits)) >= 0)
{ {
jamEntry(); ljamEntry();
found= true; found= true;
} }
else else
{ {
jamEntry(); ljamEntry();
/** /**
* The current extent is not in a free list * The current extent is not in a free list
* and since it couldn't accomadate the request * and since it couldn't accomadate the request
...@@ -453,14 +462,14 @@ Dbtup::disk_page_prealloc(Signal* signal, ...@@ -453,14 +462,14 @@ Dbtup::disk_page_prealloc(Signal* signal,
Uint32 pos; Uint32 pos;
if ((pos= alloc.find_extent(sz)) != RNIL) if ((pos= alloc.find_extent(sz)) != RNIL)
{ {
jam(); ljam();
Local_extent_info_list list(c_extent_pool, alloc.m_free_extents[pos]); Local_extent_info_list list(c_extent_pool, alloc.m_free_extents[pos]);
list.first(ext); list.first(ext);
list.remove(ext); list.remove(ext);
} }
else else
{ {
jam(); ljam();
/** /**
* We need to alloc an extent * We need to alloc an extent
*/ */
...@@ -469,6 +478,7 @@ Dbtup::disk_page_prealloc(Signal* signal, ...@@ -469,6 +478,7 @@ Dbtup::disk_page_prealloc(Signal* signal,
err = c_lgman->alloc_log_space(logfile_group_id, err = c_lgman->alloc_log_space(logfile_group_id,
sizeof(Disk_undo::AllocExtent)>>2); sizeof(Disk_undo::AllocExtent)>>2);
ljamEntry();
if(unlikely(err)) if(unlikely(err))
{ {
return -err; return -err;
...@@ -477,7 +487,7 @@ Dbtup::disk_page_prealloc(Signal* signal, ...@@ -477,7 +487,7 @@ Dbtup::disk_page_prealloc(Signal* signal,
if (!c_extent_pool.seize(ext)) if (!c_extent_pool.seize(ext))
{ {
jam(); ljam();
//XXX //XXX
err= 2; err= 2;
#if NOT_YET_UNDO_ALLOC_EXTENT #if NOT_YET_UNDO_ALLOC_EXTENT
...@@ -491,7 +501,7 @@ Dbtup::disk_page_prealloc(Signal* signal, ...@@ -491,7 +501,7 @@ Dbtup::disk_page_prealloc(Signal* signal,
if ((err= tsman.alloc_extent(&ext.p->m_key)) < 0) if ((err= tsman.alloc_extent(&ext.p->m_key)) < 0)
{ {
jamEntry(); ljamEntry();
#if NOT_YET_UNDO_ALLOC_EXTENT #if NOT_YET_UNDO_ALLOC_EXTENT
c_lgman->free_log_space(logfile_group_id, c_lgman->free_log_space(logfile_group_id,
sizeof(Disk_undo::AllocExtent)>>2); sizeof(Disk_undo::AllocExtent)>>2);
...@@ -543,7 +553,7 @@ Dbtup::disk_page_prealloc(Signal* signal, ...@@ -543,7 +553,7 @@ Dbtup::disk_page_prealloc(Signal* signal,
alloc.m_curr_extent_info_ptr_i= ext.i; alloc.m_curr_extent_info_ptr_i= ext.i;
ext.p->m_free_matrix_pos= RNIL; ext.p->m_free_matrix_pos= RNIL;
pageBits= tsman.alloc_page_from_extent(&ext.p->m_key, bits); pageBits= tsman.alloc_page_from_extent(&ext.p->m_key, bits);
jamEntry(); ljamEntry();
ddassert(pageBits >= 0); ddassert(pageBits >= 0);
} }
...@@ -569,6 +579,7 @@ Dbtup::disk_page_prealloc(Signal* signal, ...@@ -569,6 +579,7 @@ Dbtup::disk_page_prealloc(Signal* signal,
Uint32 newPageBits= alloc.calc_page_free_bits(new_size); Uint32 newPageBits= alloc.calc_page_free_bits(new_size);
if (newPageBits != (Uint32)pageBits) if (newPageBits != (Uint32)pageBits)
{ {
ljam();
ddassert(ext.p->m_free_page_count[pageBits] > 0); ddassert(ext.p->m_free_page_count[pageBits] > 0);
ext.p->m_free_page_count[pageBits]--; ext.p->m_free_page_count[pageBits]--;
ext.p->m_free_page_count[newPageBits]++; ext.p->m_free_page_count[newPageBits]++;
...@@ -596,6 +607,7 @@ Dbtup::disk_page_prealloc(Signal* signal, ...@@ -596,6 +607,7 @@ Dbtup::disk_page_prealloc(Signal* signal,
int flags= Page_cache_client::ALLOC_REQ; int flags= Page_cache_client::ALLOC_REQ;
if (pageBits == 0) if (pageBits == 0)
{ {
ljam();
//XXX empty page -> fast to map //XXX empty page -> fast to map
flags |= Page_cache_client::EMPTY_PAGE; flags |= Page_cache_client::EMPTY_PAGE;
preq.m_callback.m_callbackFunction = preq.m_callback.m_callbackFunction =
...@@ -603,15 +615,17 @@ Dbtup::disk_page_prealloc(Signal* signal, ...@@ -603,15 +615,17 @@ Dbtup::disk_page_prealloc(Signal* signal,
} }
int res= m_pgman.get_page(signal, preq, flags); int res= m_pgman.get_page(signal, preq, flags);
jamEntry(); ljamEntry();
switch(res) switch(res)
{ {
case 0: case 0:
ljam();
break; break;
case -1: case -1:
ndbassert(false); ndbassert(false);
break; break;
default: default:
ljam();
execute(signal, preq.m_callback, res); // run callback execute(signal, preq.m_callback, res); // run callback
} }
...@@ -623,6 +637,7 @@ Dbtup::disk_page_prealloc_dirty_page(Disk_alloc_info & alloc, ...@@ -623,6 +637,7 @@ Dbtup::disk_page_prealloc_dirty_page(Disk_alloc_info & alloc,
PagePtr pagePtr, PagePtr pagePtr,
Uint32 old_idx, Uint32 sz) Uint32 old_idx, Uint32 sz)
{ {
ljam();
ddassert(pagePtr.p->list_index == old_idx); ddassert(pagePtr.p->list_index == old_idx);
Uint32 free= pagePtr.p->free_space; Uint32 free= pagePtr.p->free_space;
...@@ -638,6 +653,7 @@ Dbtup::disk_page_prealloc_dirty_page(Disk_alloc_info & alloc, ...@@ -638,6 +653,7 @@ Dbtup::disk_page_prealloc_dirty_page(Disk_alloc_info & alloc,
if (old_idx != new_idx) if (old_idx != new_idx)
{ {
ljam();
LocalDLList<Page> old_list(*pool, alloc.m_dirty_pages[old_idx]); LocalDLList<Page> old_list(*pool, alloc.m_dirty_pages[old_idx]);
LocalDLList<Page> new_list(*pool, alloc.m_dirty_pages[new_idx]); LocalDLList<Page> new_list(*pool, alloc.m_dirty_pages[new_idx]);
old_list.remove(pagePtr); old_list.remove(pagePtr);
...@@ -661,6 +677,7 @@ Dbtup::disk_page_prealloc_transit_page(Disk_alloc_info& alloc, ...@@ -661,6 +677,7 @@ Dbtup::disk_page_prealloc_transit_page(Disk_alloc_info& alloc,
Ptr<Page_request> req, Ptr<Page_request> req,
Uint32 old_idx, Uint32 sz) Uint32 old_idx, Uint32 sz)
{ {
ljam();
ddassert(req.p->m_list_index == old_idx); ddassert(req.p->m_list_index == old_idx);
Uint32 free= req.p->m_estimated_free_space; Uint32 free= req.p->m_estimated_free_space;
...@@ -675,6 +692,7 @@ Dbtup::disk_page_prealloc_transit_page(Disk_alloc_info& alloc, ...@@ -675,6 +692,7 @@ Dbtup::disk_page_prealloc_transit_page(Disk_alloc_info& alloc,
if (old_idx != new_idx) if (old_idx != new_idx)
{ {
ljam();
Page_request_list::Head *lists = alloc.m_page_requests; Page_request_list::Head *lists = alloc.m_page_requests;
Local_page_request_list old_list(c_page_request_pool, lists[old_idx]); Local_page_request_list old_list(c_page_request_pool, lists[old_idx]);
Local_page_request_list new_list(c_page_request_pool, lists[new_idx]); Local_page_request_list new_list(c_page_request_pool, lists[new_idx]);
...@@ -699,6 +717,7 @@ void ...@@ -699,6 +717,7 @@ void
Dbtup::disk_page_prealloc_callback(Signal* signal, Dbtup::disk_page_prealloc_callback(Signal* signal,
Uint32 page_request, Uint32 page_id) Uint32 page_request, Uint32 page_id)
{ {
ljamEntry();
//ndbout_c("disk_alloc_page_callback id: %d", page_id); //ndbout_c("disk_alloc_page_callback id: %d", page_id);
Ptr<Page_request> req; Ptr<Page_request> req;
...@@ -728,6 +747,7 @@ Dbtup::disk_page_prealloc_initial_callback(Signal*signal, ...@@ -728,6 +747,7 @@ Dbtup::disk_page_prealloc_initial_callback(Signal*signal,
Uint32 page_request, Uint32 page_request,
Uint32 page_id) Uint32 page_id)
{ {
ljamEntry();
//ndbout_c("disk_alloc_page_callback_initial id: %d", page_id); //ndbout_c("disk_alloc_page_callback_initial id: %d", page_id);
/** /**
* 1) lookup page request * 1) lookup page request
...@@ -819,6 +839,7 @@ Dbtup::disk_page_prealloc_callback_common(Signal* signal, ...@@ -819,6 +839,7 @@ Dbtup::disk_page_prealloc_callback_common(Signal* signal,
if (old_idx != new_idx || free != real_free) if (old_idx != new_idx || free != real_free)
{ {
ljam();
Ptr<Extent_info> extentPtr; Ptr<Extent_info> extentPtr;
c_extent_pool.getPtr(extentPtr, ext); c_extent_pool.getPtr(extentPtr, ext);
...@@ -826,6 +847,7 @@ Dbtup::disk_page_prealloc_callback_common(Signal* signal, ...@@ -826,6 +847,7 @@ Dbtup::disk_page_prealloc_callback_common(Signal* signal,
if (old_idx != new_idx) if (old_idx != new_idx)
{ {
ljam();
ddassert(extentPtr.p->m_free_page_count[old_idx]); ddassert(extentPtr.p->m_free_page_count[old_idx]);
extentPtr.p->m_free_page_count[old_idx]--; extentPtr.p->m_free_page_count[old_idx]--;
extentPtr.p->m_free_page_count[new_idx]++; extentPtr.p->m_free_page_count[new_idx]++;
...@@ -844,9 +866,11 @@ Dbtup::disk_page_prealloc_callback_common(Signal* signal, ...@@ -844,9 +866,11 @@ Dbtup::disk_page_prealloc_callback_common(Signal* signal,
void void
Dbtup::disk_page_set_dirty(PagePtr pagePtr) Dbtup::disk_page_set_dirty(PagePtr pagePtr)
{ {
ljam();
Uint32 idx = pagePtr.p->list_index; Uint32 idx = pagePtr.p->list_index;
if ((idx & 0x8000) == 0) if ((idx & 0x8000) == 0)
{ {
ljam();
/** /**
* Already in dirty list * Already in dirty list
*/ */
...@@ -878,7 +902,6 @@ Dbtup::disk_page_set_dirty(PagePtr pagePtr) ...@@ -878,7 +902,6 @@ Dbtup::disk_page_set_dirty(PagePtr pagePtr)
Uint32 used = pagePtr.p->uncommitted_used_space; Uint32 used = pagePtr.p->uncommitted_used_space;
if (unlikely(pagePtr.p->m_restart_seq != globalData.m_restart_seq)) if (unlikely(pagePtr.p->m_restart_seq != globalData.m_restart_seq))
{ {
jam();
restart_setup_page(alloc, pagePtr); restart_setup_page(alloc, pagePtr);
idx = alloc.calc_page_free_bits(free); idx = alloc.calc_page_free_bits(free);
used = 0; used = 0;
...@@ -903,14 +926,14 @@ Dbtup::disk_page_set_dirty(PagePtr pagePtr) ...@@ -903,14 +926,14 @@ Dbtup::disk_page_set_dirty(PagePtr pagePtr)
// Make sure no one will allocate it... // Make sure no one will allocate it...
tsman.unmap_page(&key, MAX_FREE_LIST - 1); tsman.unmap_page(&key, MAX_FREE_LIST - 1);
jamEntry(); ljamEntry();
} }
void void
Dbtup::disk_page_unmap_callback(Uint32 when, Dbtup::disk_page_unmap_callback(Uint32 when,
Uint32 page_id, Uint32 dirty_count) Uint32 page_id, Uint32 dirty_count)
{ {
jamEntry(); ljamEntry();
Ptr<GlobalPage> gpage; Ptr<GlobalPage> gpage;
m_global_page_pool.getPtr(gpage, page_id); m_global_page_pool.getPtr(gpage, page_id);
PagePtr pagePtr; PagePtr pagePtr;
...@@ -922,6 +945,7 @@ Dbtup::disk_page_unmap_callback(Uint32 when, ...@@ -922,6 +945,7 @@ Dbtup::disk_page_unmap_callback(Uint32 when,
type != File_formats::PT_Tup_varsize_page) || type != File_formats::PT_Tup_varsize_page) ||
f_undo_done == false)) f_undo_done == false))
{ {
ljam();
return ; return ;
} }
...@@ -941,7 +965,7 @@ Dbtup::disk_page_unmap_callback(Uint32 when, ...@@ -941,7 +965,7 @@ Dbtup::disk_page_unmap_callback(Uint32 when,
/** /**
* Before pageout * Before pageout
*/ */
jam(); ljam();
if (DBG_DISK) if (DBG_DISK)
{ {
...@@ -962,7 +986,7 @@ Dbtup::disk_page_unmap_callback(Uint32 when, ...@@ -962,7 +986,7 @@ Dbtup::disk_page_unmap_callback(Uint32 when,
if (dirty_count == 0) if (dirty_count == 0)
{ {
jam(); ljam();
pagePtr.p->list_index = idx | 0x8000; pagePtr.p->list_index = idx | 0x8000;
Local_key key; Local_key key;
...@@ -980,7 +1004,7 @@ Dbtup::disk_page_unmap_callback(Uint32 when, ...@@ -980,7 +1004,7 @@ Dbtup::disk_page_unmap_callback(Uint32 when,
fragPtr.p->m_tablespace_id); fragPtr.p->m_tablespace_id);
tsman.unmap_page(&key, idx); tsman.unmap_page(&key, idx);
jamEntry(); ljamEntry();
} }
} }
else if (when == 1) else if (when == 1)
...@@ -988,7 +1012,7 @@ Dbtup::disk_page_unmap_callback(Uint32 when, ...@@ -988,7 +1012,7 @@ Dbtup::disk_page_unmap_callback(Uint32 when,
/** /**
* After page out * After page out
*/ */
jam(); ljam();
Local_key key; Local_key key;
key.m_page_no = pagePtr.p->m_page_no; key.m_page_no = pagePtr.p->m_page_no;
...@@ -1018,6 +1042,7 @@ Dbtup::disk_page_unmap_callback(Uint32 when, ...@@ -1018,6 +1042,7 @@ Dbtup::disk_page_unmap_callback(Uint32 when,
<< endl; << endl;
} }
tsman.update_page_free_bits(&key, alloc.calc_page_free_bits(real_free)); tsman.update_page_free_bits(&key, alloc.calc_page_free_bits(real_free));
ljamEntry();
} }
} }
...@@ -1026,6 +1051,7 @@ Dbtup::disk_page_alloc(Signal* signal, ...@@ -1026,6 +1051,7 @@ Dbtup::disk_page_alloc(Signal* signal,
Tablerec* tabPtrP, Fragrecord* fragPtrP, Tablerec* tabPtrP, Fragrecord* fragPtrP,
Local_key* key, PagePtr pagePtr, Uint32 gci) Local_key* key, PagePtr pagePtr, Uint32 gci)
{ {
ljam();
Uint32 logfile_group_id= fragPtrP->m_logfile_group_id; Uint32 logfile_group_id= fragPtrP->m_logfile_group_id;
Disk_alloc_info& alloc= fragPtrP->m_disk_alloc_info; Disk_alloc_info& alloc= fragPtrP->m_disk_alloc_info;
...@@ -1056,6 +1082,7 @@ Dbtup::disk_page_free(Signal *signal, ...@@ -1056,6 +1082,7 @@ Dbtup::disk_page_free(Signal *signal,
Tablerec *tabPtrP, Fragrecord * fragPtrP, Tablerec *tabPtrP, Fragrecord * fragPtrP,
Local_key* key, PagePtr pagePtr, Uint32 gci) Local_key* key, PagePtr pagePtr, Uint32 gci)
{ {
ljam();
if (DBG_DISK) if (DBG_DISK)
ndbout << " disk_page_free " << *key << endl; ndbout << " disk_page_free " << *key << endl;
...@@ -1108,6 +1135,7 @@ Dbtup::disk_page_free(Signal *signal, ...@@ -1108,6 +1135,7 @@ Dbtup::disk_page_free(Signal *signal,
if (old_idx != new_idx) if (old_idx != new_idx)
{ {
ljam();
ddassert(extentPtr.p->m_free_page_count[old_idx]); ddassert(extentPtr.p->m_free_page_count[old_idx]);
extentPtr.p->m_free_page_count[old_idx]--; extentPtr.p->m_free_page_count[old_idx]--;
extentPtr.p->m_free_page_count[new_idx]++; extentPtr.p->m_free_page_count[new_idx]++;
...@@ -1134,6 +1162,7 @@ void ...@@ -1134,6 +1162,7 @@ void
Dbtup::disk_page_abort_prealloc(Signal *signal, Fragrecord* fragPtrP, Dbtup::disk_page_abort_prealloc(Signal *signal, Fragrecord* fragPtrP,
Local_key* key, Uint32 sz) Local_key* key, Uint32 sz)
{ {
ljam();
Page_cache_client::Request req; Page_cache_client::Request req;
req.m_callback.m_callbackData= sz; req.m_callback.m_callbackData= sz;
req.m_callback.m_callbackFunction = req.m_callback.m_callbackFunction =
...@@ -1143,13 +1172,17 @@ Dbtup::disk_page_abort_prealloc(Signal *signal, Fragrecord* fragPtrP, ...@@ -1143,13 +1172,17 @@ Dbtup::disk_page_abort_prealloc(Signal *signal, Fragrecord* fragPtrP,
memcpy(&req.m_page, key, sizeof(Local_key)); memcpy(&req.m_page, key, sizeof(Local_key));
int res= m_pgman.get_page(signal, req, flags); int res= m_pgman.get_page(signal, req, flags);
jamEntry(); ljamEntry();
switch(res) switch(res)
{ {
case 0: case 0:
ljam();
break;
case -1: case -1:
ndbrequire(false);
break; break;
default: default:
ljam();
Ptr<GlobalPage> gpage; Ptr<GlobalPage> gpage;
m_global_page_pool.getPtr(gpage, (Uint32)res); m_global_page_pool.getPtr(gpage, (Uint32)res);
PagePtr pagePtr; PagePtr pagePtr;
...@@ -1165,7 +1198,7 @@ Dbtup::disk_page_abort_prealloc_callback(Signal* signal, ...@@ -1165,7 +1198,7 @@ Dbtup::disk_page_abort_prealloc_callback(Signal* signal,
Uint32 sz, Uint32 page_id) Uint32 sz, Uint32 page_id)
{ {
//ndbout_c("disk_alloc_page_callback id: %d", page_id); //ndbout_c("disk_alloc_page_callback id: %d", page_id);
ljamEntry();
Ptr<GlobalPage> gpage; Ptr<GlobalPage> gpage;
m_global_page_pool.getPtr(gpage, page_id); m_global_page_pool.getPtr(gpage, page_id);
...@@ -1189,7 +1222,7 @@ Dbtup::disk_page_abort_prealloc_callback_1(Signal* signal, ...@@ -1189,7 +1222,7 @@ Dbtup::disk_page_abort_prealloc_callback_1(Signal* signal,
PagePtr pagePtr, PagePtr pagePtr,
Uint32 sz) Uint32 sz)
{ {
jam(); ljam();
disk_page_set_dirty(pagePtr); disk_page_set_dirty(pagePtr);
Disk_alloc_info& alloc= fragPtrP->m_disk_alloc_info; Disk_alloc_info& alloc= fragPtrP->m_disk_alloc_info;
...@@ -1208,12 +1241,14 @@ Dbtup::disk_page_abort_prealloc_callback_1(Signal* signal, ...@@ -1208,12 +1241,14 @@ Dbtup::disk_page_abort_prealloc_callback_1(Signal* signal,
c_extent_pool.getPtr(extentPtr, ext); c_extent_pool.getPtr(extentPtr, ext);
if (old_idx != new_idx) if (old_idx != new_idx)
{ {
ljam();
ddassert(extentPtr.p->m_free_page_count[old_idx]); ddassert(extentPtr.p->m_free_page_count[old_idx]);
extentPtr.p->m_free_page_count[old_idx]--; extentPtr.p->m_free_page_count[old_idx]--;
extentPtr.p->m_free_page_count[new_idx]++; extentPtr.p->m_free_page_count[new_idx]++;
if (old_idx == page_idx) if (old_idx == page_idx)
{ {
ljam();
ArrayPool<Page> *pool= (ArrayPool<Page>*)&m_global_page_pool; ArrayPool<Page> *pool= (ArrayPool<Page>*)&m_global_page_pool;
LocalDLList<Page> old_list(*pool, alloc.m_dirty_pages[old_idx]); LocalDLList<Page> old_list(*pool, alloc.m_dirty_pages[old_idx]);
LocalDLList<Page> new_list(*pool, alloc.m_dirty_pages[new_idx]); LocalDLList<Page> new_list(*pool, alloc.m_dirty_pages[new_idx]);
...@@ -1223,6 +1258,7 @@ Dbtup::disk_page_abort_prealloc_callback_1(Signal* signal, ...@@ -1223,6 +1258,7 @@ Dbtup::disk_page_abort_prealloc_callback_1(Signal* signal,
} }
else else
{ {
ljam();
pagePtr.p->list_index = new_idx | 0x8000; pagePtr.p->list_index = new_idx | 0x8000;
} }
} }
...@@ -1272,7 +1308,7 @@ Dbtup::disk_page_alloc_extent_log_buffer_callback(Signal* signal, ...@@ -1272,7 +1308,7 @@ Dbtup::disk_page_alloc_extent_log_buffer_callback(Signal* signal,
Uint64 lsn= lgman.add_entry(c, 1); Uint64 lsn= lgman.add_entry(c, 1);
tsman.update_lsn(&key, lsn); tsman.update_lsn(&key, lsn);
jamEntry(); ljamEntry();
} }
#endif #endif
...@@ -1280,6 +1316,7 @@ Uint64 ...@@ -1280,6 +1316,7 @@ Uint64
Dbtup::disk_page_undo_alloc(Page* page, const Local_key* key, Dbtup::disk_page_undo_alloc(Page* page, const Local_key* key,
Uint32 sz, Uint32 gci, Uint32 logfile_group_id) Uint32 sz, Uint32 gci, Uint32 logfile_group_id)
{ {
ljam();
Logfile_client lgman(this, c_lgman, logfile_group_id); Logfile_client lgman(this, c_lgman, logfile_group_id);
Disk_undo::Alloc alloc; Disk_undo::Alloc alloc;
...@@ -1291,7 +1328,7 @@ Dbtup::disk_page_undo_alloc(Page* page, const Local_key* key, ...@@ -1291,7 +1328,7 @@ Dbtup::disk_page_undo_alloc(Page* page, const Local_key* key,
Uint64 lsn= lgman.add_entry(c, 1); Uint64 lsn= lgman.add_entry(c, 1);
m_pgman.update_lsn(* key, lsn); m_pgman.update_lsn(* key, lsn);
jamEntry(); ljamEntry();
return lsn; return lsn;
} }
...@@ -1301,6 +1338,7 @@ Dbtup::disk_page_undo_update(Page* page, const Local_key* key, ...@@ -1301,6 +1338,7 @@ Dbtup::disk_page_undo_update(Page* page, const Local_key* key,
const Uint32* src, Uint32 sz, const Uint32* src, Uint32 sz,
Uint32 gci, Uint32 logfile_group_id) Uint32 gci, Uint32 logfile_group_id)
{ {
ljam();
Logfile_client lgman(this, c_lgman, logfile_group_id); Logfile_client lgman(this, c_lgman, logfile_group_id);
Disk_undo::Update update; Disk_undo::Update update;
...@@ -1321,7 +1359,7 @@ Dbtup::disk_page_undo_update(Page* page, const Local_key* key, ...@@ -1321,7 +1359,7 @@ Dbtup::disk_page_undo_update(Page* page, const Local_key* key,
Uint64 lsn= lgman.add_entry(c, 3); Uint64 lsn= lgman.add_entry(c, 3);
m_pgman.update_lsn(* key, lsn); m_pgman.update_lsn(* key, lsn);
jamEntry(); ljamEntry();
return lsn; return lsn;
} }
...@@ -1331,6 +1369,7 @@ Dbtup::disk_page_undo_free(Page* page, const Local_key* key, ...@@ -1331,6 +1369,7 @@ Dbtup::disk_page_undo_free(Page* page, const Local_key* key,
const Uint32* src, Uint32 sz, const Uint32* src, Uint32 sz,
Uint32 gci, Uint32 logfile_group_id) Uint32 gci, Uint32 logfile_group_id)
{ {
ljam();
Logfile_client lgman(this, c_lgman, logfile_group_id); Logfile_client lgman(this, c_lgman, logfile_group_id);
Disk_undo::Free free; Disk_undo::Free free;
...@@ -1351,7 +1390,7 @@ Dbtup::disk_page_undo_free(Page* page, const Local_key* key, ...@@ -1351,7 +1390,7 @@ Dbtup::disk_page_undo_free(Page* page, const Local_key* key,
Uint64 lsn= lgman.add_entry(c, 3); Uint64 lsn= lgman.add_entry(c, 3);
m_pgman.update_lsn(* key, lsn); m_pgman.update_lsn(* key, lsn);
jamEntry(); ljamEntry();
return lsn; return lsn;
} }
...@@ -1377,7 +1416,7 @@ Dbtup::disk_restart_undo(Signal* signal, Uint64 lsn, ...@@ -1377,7 +1416,7 @@ Dbtup::disk_restart_undo(Signal* signal, Uint64 lsn,
case File_formats::Undofile::UNDO_LCP_FIRST: case File_formats::Undofile::UNDO_LCP_FIRST:
case File_formats::Undofile::UNDO_LCP: case File_formats::Undofile::UNDO_LCP:
{ {
jam(); ljam();
ndbrequire(len == 3); ndbrequire(len == 3);
Uint32 lcp = ptr[0]; Uint32 lcp = ptr[0];
Uint32 tableId = ptr[1] >> 16; Uint32 tableId = ptr[1] >> 16;
...@@ -1393,7 +1432,7 @@ Dbtup::disk_restart_undo(Signal* signal, Uint64 lsn, ...@@ -1393,7 +1432,7 @@ Dbtup::disk_restart_undo(Signal* signal, Uint64 lsn,
} }
case File_formats::Undofile::UNDO_TUP_ALLOC: case File_formats::Undofile::UNDO_TUP_ALLOC:
{ {
jam(); ljam();
Disk_undo::Alloc* rec= (Disk_undo::Alloc*)ptr; Disk_undo::Alloc* rec= (Disk_undo::Alloc*)ptr;
preq.m_page.m_page_no = rec->m_page_no; preq.m_page.m_page_no = rec->m_page_no;
preq.m_page.m_file_no = rec->m_file_no_page_idx >> 16; preq.m_page.m_file_no = rec->m_file_no_page_idx >> 16;
...@@ -1402,7 +1441,7 @@ Dbtup::disk_restart_undo(Signal* signal, Uint64 lsn, ...@@ -1402,7 +1441,7 @@ Dbtup::disk_restart_undo(Signal* signal, Uint64 lsn,
} }
case File_formats::Undofile::UNDO_TUP_UPDATE: case File_formats::Undofile::UNDO_TUP_UPDATE:
{ {
jam(); ljam();
Disk_undo::Update* rec= (Disk_undo::Update*)ptr; Disk_undo::Update* rec= (Disk_undo::Update*)ptr;
preq.m_page.m_page_no = rec->m_page_no; preq.m_page.m_page_no = rec->m_page_no;
preq.m_page.m_file_no = rec->m_file_no_page_idx >> 16; preq.m_page.m_file_no = rec->m_file_no_page_idx >> 16;
...@@ -1411,7 +1450,7 @@ Dbtup::disk_restart_undo(Signal* signal, Uint64 lsn, ...@@ -1411,7 +1450,7 @@ Dbtup::disk_restart_undo(Signal* signal, Uint64 lsn,
} }
case File_formats::Undofile::UNDO_TUP_FREE: case File_formats::Undofile::UNDO_TUP_FREE:
{ {
jam(); ljam();
Disk_undo::Free* rec= (Disk_undo::Free*)ptr; Disk_undo::Free* rec= (Disk_undo::Free*)ptr;
preq.m_page.m_page_no = rec->m_page_no; preq.m_page.m_page_no = rec->m_page_no;
preq.m_page.m_file_no = rec->m_file_no_page_idx >> 16; preq.m_page.m_file_no = rec->m_file_no_page_idx >> 16;
...@@ -1423,7 +1462,7 @@ Dbtup::disk_restart_undo(Signal* signal, Uint64 lsn, ...@@ -1423,7 +1462,7 @@ Dbtup::disk_restart_undo(Signal* signal, Uint64 lsn,
* *
*/ */
{ {
jam(); ljam();
Disk_undo::Create* rec= (Disk_undo::Create*)ptr; Disk_undo::Create* rec= (Disk_undo::Create*)ptr;
Ptr<Tablerec> tabPtr; Ptr<Tablerec> tabPtr;
tabPtr.i= rec->m_table; tabPtr.i= rec->m_table;
...@@ -1442,7 +1481,7 @@ Dbtup::disk_restart_undo(Signal* signal, Uint64 lsn, ...@@ -1442,7 +1481,7 @@ Dbtup::disk_restart_undo(Signal* signal, Uint64 lsn,
} }
case File_formats::Undofile::UNDO_TUP_DROP: case File_formats::Undofile::UNDO_TUP_DROP:
{ {
jam(); ljam();
Disk_undo::Drop* rec = (Disk_undo::Drop*)ptr; Disk_undo::Drop* rec = (Disk_undo::Drop*)ptr;
Ptr<Tablerec> tabPtr; Ptr<Tablerec> tabPtr;
tabPtr.i= rec->m_table; tabPtr.i= rec->m_table;
...@@ -1460,14 +1499,14 @@ Dbtup::disk_restart_undo(Signal* signal, Uint64 lsn, ...@@ -1460,14 +1499,14 @@ Dbtup::disk_restart_undo(Signal* signal, Uint64 lsn,
return; return;
} }
case File_formats::Undofile::UNDO_TUP_ALLOC_EXTENT: case File_formats::Undofile::UNDO_TUP_ALLOC_EXTENT:
jam(); ljam();
case File_formats::Undofile::UNDO_TUP_FREE_EXTENT: case File_formats::Undofile::UNDO_TUP_FREE_EXTENT:
jam(); ljam();
disk_restart_undo_next(signal); disk_restart_undo_next(signal);
return; return;
case File_formats::Undofile::UNDO_END: case File_formats::Undofile::UNDO_END:
jam(); ljam();
f_undo_done = true; f_undo_done = true;
return; return;
default: default:
...@@ -1480,7 +1519,7 @@ Dbtup::disk_restart_undo(Signal* signal, Uint64 lsn, ...@@ -1480,7 +1519,7 @@ Dbtup::disk_restart_undo(Signal* signal, Uint64 lsn,
int flags = 0; int flags = 0;
int res= m_pgman.get_page(signal, preq, flags); int res= m_pgman.get_page(signal, preq, flags);
jamEntry(); ljamEntry();
switch(res) switch(res)
{ {
case 0: case 0:
...@@ -1503,7 +1542,7 @@ Dbtup::disk_restart_undo_next(Signal* signal) ...@@ -1503,7 +1542,7 @@ Dbtup::disk_restart_undo_next(Signal* signal)
void void
Dbtup::disk_restart_lcp_id(Uint32 tableId, Uint32 fragId, Uint32 lcpId) Dbtup::disk_restart_lcp_id(Uint32 tableId, Uint32 fragId, Uint32 lcpId)
{ {
jamEntry(); ljamEntry();
if (lcpId == RNIL) if (lcpId == RNIL)
{ {
...@@ -1534,23 +1573,23 @@ Dbtup::disk_restart_undo_lcp(Uint32 tableId, Uint32 fragId, Uint32 flag, ...@@ -1534,23 +1573,23 @@ Dbtup::disk_restart_undo_lcp(Uint32 tableId, Uint32 fragId, Uint32 flag,
if (tabPtr.p->tableStatus == DEFINED) if (tabPtr.p->tableStatus == DEFINED)
{ {
jam(); ljam();
FragrecordPtr fragPtr; FragrecordPtr fragPtr;
getFragmentrec(fragPtr, fragId, tabPtr.p); getFragmentrec(fragPtr, fragId, tabPtr.p);
if (!fragPtr.isNull()) if (!fragPtr.isNull())
{ {
jam(); ljam();
switch(flag){ switch(flag){
case Fragrecord::UC_CREATE: case Fragrecord::UC_CREATE:
jam(); ljam();
fragPtr.p->m_undo_complete |= flag; fragPtr.p->m_undo_complete |= flag;
return; return;
case Fragrecord::UC_LCP: case Fragrecord::UC_LCP:
jam(); ljam();
if (fragPtr.p->m_undo_complete == 0 && if (fragPtr.p->m_undo_complete == 0 &&
fragPtr.p->m_restore_lcp_id == lcpId) fragPtr.p->m_restore_lcp_id == lcpId)
{ {
jam(); ljam();
fragPtr.p->m_undo_complete |= flag; fragPtr.p->m_undo_complete |= flag;
if (DBG_UNDO) if (DBG_UNDO)
ndbout_c("table: %u fragment: %u lcp: %u -> done", ndbout_c("table: %u fragment: %u lcp: %u -> done",
...@@ -1559,7 +1598,7 @@ Dbtup::disk_restart_undo_lcp(Uint32 tableId, Uint32 fragId, Uint32 flag, ...@@ -1559,7 +1598,7 @@ Dbtup::disk_restart_undo_lcp(Uint32 tableId, Uint32 fragId, Uint32 flag,
return; return;
case Fragrecord::UC_SET_LCP: case Fragrecord::UC_SET_LCP:
{ {
jam(); ljam();
if (DBG_UNDO) if (DBG_UNDO)
ndbout_c("table: %u fragment: %u restore to lcp: %u", ndbout_c("table: %u fragment: %u restore to lcp: %u",
tableId, fragId, lcpId); tableId, fragId, lcpId);
...@@ -1580,7 +1619,7 @@ Dbtup::disk_restart_undo_callback(Signal* signal, ...@@ -1580,7 +1619,7 @@ Dbtup::disk_restart_undo_callback(Signal* signal,
Uint32 id, Uint32 id,
Uint32 page_id) Uint32 page_id)
{ {
jamEntry(); ljamEntry();
Ptr<GlobalPage> gpage; Ptr<GlobalPage> gpage;
m_global_page_pool.getPtr(gpage, page_id); m_global_page_pool.getPtr(gpage, page_id);
PagePtr pagePtr; PagePtr pagePtr;
...@@ -1594,7 +1633,7 @@ Dbtup::disk_restart_undo_callback(Signal* signal, ...@@ -1594,7 +1633,7 @@ Dbtup::disk_restart_undo_callback(Signal* signal,
pagePtr.p->nextList != RNIL || pagePtr.p->nextList != RNIL ||
pagePtr.p->prevList != RNIL) pagePtr.p->prevList != RNIL)
{ {
jam(); ljam();
update = true; update = true;
pagePtr.p->list_index |= 0x8000; pagePtr.p->list_index |= 0x8000;
pagePtr.p->nextList = pagePtr.p->prevList = RNIL; pagePtr.p->nextList = pagePtr.p->prevList = RNIL;
...@@ -1605,7 +1644,7 @@ Dbtup::disk_restart_undo_callback(Signal* signal, ...@@ -1605,7 +1644,7 @@ Dbtup::disk_restart_undo_callback(Signal* signal,
if (tableId >= cnoOfTablerec) if (tableId >= cnoOfTablerec)
{ {
jam(); ljam();
if (DBG_UNDO) if (DBG_UNDO)
ndbout_c("UNDO table> %u", tableId); ndbout_c("UNDO table> %u", tableId);
disk_restart_undo_next(signal); disk_restart_undo_next(signal);
...@@ -1616,7 +1655,7 @@ Dbtup::disk_restart_undo_callback(Signal* signal, ...@@ -1616,7 +1655,7 @@ Dbtup::disk_restart_undo_callback(Signal* signal,
if (undo->m_table_ptr.p->tableStatus != DEFINED) if (undo->m_table_ptr.p->tableStatus != DEFINED)
{ {
jam(); ljam();
if (DBG_UNDO) if (DBG_UNDO)
ndbout_c("UNDO !defined (%u) ", tableId); ndbout_c("UNDO !defined (%u) ", tableId);
disk_restart_undo_next(signal); disk_restart_undo_next(signal);
...@@ -1626,7 +1665,7 @@ Dbtup::disk_restart_undo_callback(Signal* signal, ...@@ -1626,7 +1665,7 @@ Dbtup::disk_restart_undo_callback(Signal* signal,
getFragmentrec(undo->m_fragment_ptr, fragId, undo->m_table_ptr.p); getFragmentrec(undo->m_fragment_ptr, fragId, undo->m_table_ptr.p);
if(undo->m_fragment_ptr.isNull()) if(undo->m_fragment_ptr.isNull())
{ {
jam(); ljam();
if (DBG_UNDO) if (DBG_UNDO)
ndbout_c("UNDO fragment null %u/%u", tableId, fragId); ndbout_c("UNDO fragment null %u/%u", tableId, fragId);
disk_restart_undo_next(signal); disk_restart_undo_next(signal);
...@@ -1635,7 +1674,7 @@ Dbtup::disk_restart_undo_callback(Signal* signal, ...@@ -1635,7 +1674,7 @@ Dbtup::disk_restart_undo_callback(Signal* signal,
if (undo->m_fragment_ptr.p->m_undo_complete) if (undo->m_fragment_ptr.p->m_undo_complete)
{ {
jam(); ljam();
if (DBG_UNDO) if (DBG_UNDO)
ndbout_c("UNDO undo complete %u/%u", tableId, fragId); ndbout_c("UNDO undo complete %u/%u", tableId, fragId);
disk_restart_undo_next(signal); disk_restart_undo_next(signal);
...@@ -1654,7 +1693,7 @@ Dbtup::disk_restart_undo_callback(Signal* signal, ...@@ -1654,7 +1693,7 @@ Dbtup::disk_restart_undo_callback(Signal* signal,
if (undo->m_lsn <= lsn) if (undo->m_lsn <= lsn)
{ {
jam(); ljam();
if (DBG_UNDO) if (DBG_UNDO)
{ {
ndbout << "apply: " << undo->m_lsn << "(" << lsn << " )" ndbout << "apply: " << undo->m_lsn << "(" << lsn << " )"
...@@ -1669,15 +1708,15 @@ Dbtup::disk_restart_undo_callback(Signal* signal, ...@@ -1669,15 +1708,15 @@ Dbtup::disk_restart_undo_callback(Signal* signal,
*/ */
switch(undo->m_type){ switch(undo->m_type){
case File_formats::Undofile::UNDO_TUP_ALLOC: case File_formats::Undofile::UNDO_TUP_ALLOC:
jam(); ljam();
disk_restart_undo_alloc(undo); disk_restart_undo_alloc(undo);
break; break;
case File_formats::Undofile::UNDO_TUP_UPDATE: case File_formats::Undofile::UNDO_TUP_UPDATE:
jam(); ljam();
disk_restart_undo_update(undo); disk_restart_undo_update(undo);
break; break;
case File_formats::Undofile::UNDO_TUP_FREE: case File_formats::Undofile::UNDO_TUP_FREE:
jam(); ljam();
disk_restart_undo_free(undo); disk_restart_undo_free(undo);
break; break;
default: default:
...@@ -1691,13 +1730,13 @@ Dbtup::disk_restart_undo_callback(Signal* signal, ...@@ -1691,13 +1730,13 @@ Dbtup::disk_restart_undo_callback(Signal* signal,
lsn = undo->m_lsn - 1; // make sure undo isn't run again... lsn = undo->m_lsn - 1; // make sure undo isn't run again...
m_pgman.update_lsn(undo->m_key, lsn); m_pgman.update_lsn(undo->m_key, lsn);
jamEntry(); ljamEntry();
disk_restart_undo_page_bits(signal, undo); disk_restart_undo_page_bits(signal, undo);
} }
else if (DBG_UNDO) else if (DBG_UNDO)
{ {
jam(); ljam();
ndbout << "ignore: " << undo->m_lsn << "(" << lsn << " )" ndbout << "ignore: " << undo->m_lsn << "(" << lsn << " )"
<< key << " type: " << undo->m_type << key << " type: " << undo->m_type
<< " tab: " << tableId << endl; << " tab: " << tableId << endl;
...@@ -1783,7 +1822,7 @@ Dbtup::disk_restart_undo_page_bits(Signal* signal, Apply_undo* undo) ...@@ -1783,7 +1822,7 @@ Dbtup::disk_restart_undo_page_bits(Signal* signal, Apply_undo* undo)
fragPtrP->m_tablespace_id); fragPtrP->m_tablespace_id);
tsman.restart_undo_page_free_bits(&undo->m_key, new_bits); tsman.restart_undo_page_free_bits(&undo->m_key, new_bits);
jamEntry(); ljamEntry();
} }
int int
...@@ -1799,7 +1838,7 @@ Dbtup::disk_restart_alloc_extent(Uint32 tableId, Uint32 fragId, ...@@ -1799,7 +1838,7 @@ Dbtup::disk_restart_alloc_extent(Uint32 tableId, Uint32 fragId,
getFragmentrec(fragPtr, fragId, tabPtr.p); getFragmentrec(fragPtr, fragId, tabPtr.p);
if (fragPtr.p->m_undo_complete & Fragrecord::UC_CREATE) if (fragPtr.p->m_undo_complete & Fragrecord::UC_CREATE)
{ {
jam(); ljam();
return -1; return -1;
} }
...@@ -1819,7 +1858,7 @@ Dbtup::disk_restart_alloc_extent(Uint32 tableId, Uint32 fragId, ...@@ -1819,7 +1858,7 @@ Dbtup::disk_restart_alloc_extent(Uint32 tableId, Uint32 fragId,
if (alloc.m_curr_extent_info_ptr_i != RNIL) if (alloc.m_curr_extent_info_ptr_i != RNIL)
{ {
jam(); ljam();
Ptr<Extent_info> old; Ptr<Extent_info> old;
c_extent_pool.getPtr(old, alloc.m_curr_extent_info_ptr_i); c_extent_pool.getPtr(old, alloc.m_curr_extent_info_ptr_i);
ndbassert(old.p->m_free_matrix_pos == RNIL); ndbassert(old.p->m_free_matrix_pos == RNIL);
...@@ -1846,7 +1885,7 @@ void ...@@ -1846,7 +1885,7 @@ void
Dbtup::disk_restart_page_bits(Uint32 tableId, Uint32 fragId, Dbtup::disk_restart_page_bits(Uint32 tableId, Uint32 fragId,
const Local_key*, Uint32 bits) const Local_key*, Uint32 bits)
{ {
jam(); ljam();
TablerecPtr tabPtr; TablerecPtr tabPtr;
FragrecordPtr fragPtr; FragrecordPtr fragPtr;
tabPtr.i = tableId; tabPtr.i = tableId;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment