Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
9cc5abd2
Commit
9cc5abd2
authored
Dec 15, 2004
by
joreland@mysql.com
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
wl2240 - test prg for distribution keys wrt ordered indexes
parent
3c2228f3
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
279 additions
and
77 deletions
+279
-77
ndb/test/include/HugoOperations.hpp
ndb/test/include/HugoOperations.hpp
+1
-0
ndb/test/ndbapi/testPartitioning.cpp
ndb/test/ndbapi/testPartitioning.cpp
+144
-22
ndb/test/src/HugoOperations.cpp
ndb/test/src/HugoOperations.cpp
+31
-15
ndb/test/src/HugoTransactions.cpp
ndb/test/src/HugoTransactions.cpp
+103
-40
No files found.
ndb/test/include/HugoOperations.hpp
View file @
9cc5abd2
...
...
@@ -88,6 +88,7 @@ public:
NdbScanOperation
::
LM_CommittedRead
,
int
numRecords
=
1
);
NdbIndexScanOperation
*
pIndexScanOp
;
protected:
void
allocRows
(
int
rows
);
void
deallocRows
();
...
...
ndb/test/ndbapi/testPartitioning.cpp
View file @
9cc5abd2
...
...
@@ -33,6 +33,8 @@ int runLoadTable(NDBT_Context* ctx, NDBT_Step* step)
return
NDBT_OK
;
}
static
Uint32
max_dks
=
0
;
static
int
run_drop_table
(
NDBT_Context
*
ctx
,
NDBT_Step
*
step
)
...
...
@@ -56,7 +58,7 @@ add_distribution_key(Ndb*, NdbDictionary::Table& tab, int when)
}
int
keys
=
tab
.
getNoOfPrimaryKeys
();
int
dks
=
(
2
*
keys
+
2
)
/
3
;
int
dks
=
(
2
*
keys
+
2
)
/
3
;
dks
=
(
dks
>
max_dks
?
max_dks
:
dks
);
int
cnt
=
0
;
ndbout_c
(
"%s pks: %d dks: %d"
,
tab
.
getName
(),
keys
,
dks
);
for
(
unsigned
i
=
0
;
i
<
tab
.
getNoOfColumns
();
i
++
)
...
...
@@ -75,24 +77,104 @@ add_distribution_key(Ndb*, NdbDictionary::Table& tab, int when)
return
0
;
}
int
static
int
run_create_table
(
NDBT_Context
*
ctx
,
NDBT_Step
*
step
)
{
bool
dk
=
ctx
->
getProperty
(
"distributionkey"
,
(
unsigned
)
0
);
return
NDBT_Tables
::
createTable
(
GETNDB
(
step
),
max_dks
=
ctx
->
getProperty
(
"distributionkey"
,
(
unsigned
)
0
);
if
(
NDBT_Tables
::
createTable
(
GETNDB
(
step
),
ctx
->
getTab
()
->
getName
(),
false
,
false
,
dk
?
add_distribution_key
:
0
);
false
,
false
,
max_dks
?
add_distribution_key
:
0
)
==
NDBT_OK
)
{
return
NDBT_OK
;
}
if
(
GETNDB
(
step
)
->
getDictionary
()
->
getNdbError
().
code
==
745
)
return
NDBT_OK
;
return
NDBT_FAILED
;
}
int
run_pk_dk
(
NDBT_Context
*
ctx
,
NDBT_Step
*
step
)
{
Ndb
*
p_ndb
=
GETNDB
(
step
);
int
records
=
ctx
->
getNumRecords
();
const
NdbDictionary
::
Table
*
tab
=
p_ndb
->
getDictionary
()
->
getTable
(
ctx
->
getTab
()
->
getName
());
HugoTransactions
hugoTrans
(
*
tab
);
static
int
run_create_pk_index
(
NDBT_Context
*
ctx
,
NDBT_Step
*
step
){
bool
orderedIndex
=
ctx
->
getProperty
(
"OrderedIndex"
,
(
unsigned
)
0
);
Ndb
*
pNdb
=
GETNDB
(
step
);
const
NdbDictionary
::
Table
*
pTab
=
pNdb
->
getDictionary
()
->
getTable
(
ctx
->
getTab
()
->
getName
());
if
(
!
pTab
)
return
NDBT_OK
;
bool
logged
=
ctx
->
getProperty
(
"LoggedIndexes"
,
orderedIndex
?
0
:
1
);
BaseString
name
;
name
.
assfmt
(
"IND_%s_PK_%c"
,
pTab
->
getName
(),
orderedIndex
?
'O'
:
'U'
);
// Create index
if
(
orderedIndex
)
ndbout
<<
"Creating "
<<
((
logged
)
?
"logged "
:
"temporary "
)
<<
"ordered index "
<<
name
.
c_str
()
<<
" ("
;
else
ndbout
<<
"Creating "
<<
((
logged
)
?
"logged "
:
"temporary "
)
<<
"unique index "
<<
name
.
c_str
()
<<
" ("
;
NdbDictionary
::
Index
pIdx
(
name
.
c_str
());
pIdx
.
setTable
(
pTab
->
getName
());
if
(
orderedIndex
)
pIdx
.
setType
(
NdbDictionary
::
Index
::
OrderedIndex
);
else
pIdx
.
setType
(
NdbDictionary
::
Index
::
UniqueHashIndex
);
for
(
int
c
=
0
;
c
<
pTab
->
getNoOfColumns
();
c
++
){
const
NdbDictionary
::
Column
*
col
=
pTab
->
getColumn
(
c
);
if
(
col
->
getPrimaryKey
()){
pIdx
.
addIndexColumn
(
col
->
getName
());
ndbout
<<
col
->
getName
()
<<
" "
;
}
}
pIdx
.
setStoredIndex
(
logged
);
ndbout
<<
") "
;
if
(
pNdb
->
getDictionary
()
->
createIndex
(
pIdx
)
!=
0
){
ndbout
<<
"FAILED!"
<<
endl
;
const
NdbError
err
=
pNdb
->
getDictionary
()
->
getNdbError
();
ERR
(
err
);
return
NDBT_FAILED
;
}
ndbout
<<
"OK!"
<<
endl
;
return
NDBT_OK
;
}
static
int
run_create_pk_index_drop
(
NDBT_Context
*
ctx
,
NDBT_Step
*
step
){
bool
orderedIndex
=
ctx
->
getProperty
(
"OrderedIndex"
,
(
unsigned
)
0
);
Ndb
*
pNdb
=
GETNDB
(
step
);
const
NdbDictionary
::
Table
*
pTab
=
pNdb
->
getDictionary
()
->
getTable
(
ctx
->
getTab
()
->
getName
());
if
(
!
pTab
)
return
NDBT_OK
;
BaseString
name
;
name
.
assfmt
(
"IND_%s_PK_%c"
,
pTab
->
getName
(),
orderedIndex
?
'O'
:
'U'
);
ndbout
<<
"Dropping index "
<<
name
.
c_str
()
<<
" "
;
if
(
pNdb
->
getDictionary
()
->
dropIndex
(
name
.
c_str
(),
pTab
->
getName
())
!=
0
){
ndbout
<<
"FAILED!"
<<
endl
;
ERR
(
pNdb
->
getDictionary
()
->
getNdbError
());
return
NDBT_FAILED
;
}
else
{
ndbout
<<
"OK!"
<<
endl
;
}
return
NDBT_OK
;
}
static
int
run_tests
(
Ndb
*
p_ndb
,
HugoTransactions
&
hugoTrans
,
int
records
)
{
if
(
hugoTrans
.
loadTable
(
p_ndb
,
records
)
!=
0
)
{
return
NDBT_FAILED
;
...
...
@@ -136,16 +218,50 @@ run_pk_dk(NDBT_Context* ctx, NDBT_Step* step)
return
0
;
}
int
run_
hash
_dk
(
NDBT_Context
*
ctx
,
NDBT_Step
*
step
)
static
int
run_
pk
_dk
(
NDBT_Context
*
ctx
,
NDBT_Step
*
step
)
{
return
0
;
Ndb
*
p_ndb
=
GETNDB
(
step
);
int
records
=
ctx
->
getNumRecords
();
const
NdbDictionary
::
Table
*
tab
=
p_ndb
->
getDictionary
()
->
getTable
(
ctx
->
getTab
()
->
getName
());
if
(
!
tab
)
return
NDBT_OK
;
HugoTransactions
hugoTrans
(
*
tab
);
return
run_tests
(
p_ndb
,
hugoTrans
,
records
);
}
int
run_index_dk
(
NDBT_Context
*
ctx
,
NDBT_Step
*
step
)
{
return
0
;
Ndb
*
p_ndb
=
GETNDB
(
step
);
int
records
=
ctx
->
getNumRecords
();
const
NdbDictionary
::
Table
*
pTab
=
p_ndb
->
getDictionary
()
->
getTable
(
ctx
->
getTab
()
->
getName
());
if
(
!
pTab
)
return
NDBT_OK
;
bool
orderedIndex
=
ctx
->
getProperty
(
"OrderedIndex"
,
(
unsigned
)
0
);
BaseString
name
;
name
.
assfmt
(
"IND_%s_PK_%c"
,
pTab
->
getName
(),
orderedIndex
?
'O'
:
'U'
);
const
NdbDictionary
::
Index
*
idx
=
p_ndb
->
getDictionary
()
->
getIndex
(
name
.
c_str
(),
pTab
->
getName
());
if
(
!
idx
)
{
ndbout
<<
"Failed to retreive index: "
<<
name
.
c_str
()
<<
endl
;
return
NDBT_FAILED
;
}
HugoTransactions
hugoTrans
(
*
pTab
,
idx
);
return
run_tests
(
p_ndb
,
hugoTrans
,
records
);
}
...
...
@@ -153,7 +269,7 @@ NDBT_TESTSUITE(testPartitioning);
TESTCASE
(
"pk_dk"
,
"Primary key operations with distribution key"
)
{
TC_PROPERTY
(
"distributionkey"
,
1
);
TC_PROPERTY
(
"distributionkey"
,
~
0
);
INITIALIZER
(
run_drop_table
);
INITIALIZER
(
run_create_table
);
INITIALIZER
(
run_pk_dk
);
...
...
@@ -162,19 +278,25 @@ TESTCASE("pk_dk",
TESTCASE
(
"hash_index_dk"
,
"Unique index operatations with distribution key"
)
{
TC_PROPERTY
(
"distributionkey"
,
1
);
TC_PROPERTY
(
"distributionkey"
,
~
0
);
TC_PROPERTY
(
"OrderedIndex"
,
(
unsigned
)
0
);
INITIALIZER
(
run_drop_table
);
INITIALIZER
(
run_create_table
);
INITIALIZER
(
run_hash_dk
);
INITIALIZER
(
run_create_pk_index
);
INITIALIZER
(
run_index_dk
);
INITIALIZER
(
run_create_pk_index_drop
);
INITIALIZER
(
run_drop_table
);
}
TESTCASE
(
"ordered_index_dk"
,
"Ordered index operatations with distribution key"
)
{
TC_PROPERTY
(
"distributionkey"
,
1
);
TC_PROPERTY
(
"distributionkey"
,
(
unsigned
)
1
);
TC_PROPERTY
(
"OrderedIndex"
,
(
unsigned
)
1
);
INITIALIZER
(
run_drop_table
);
INITIALIZER
(
run_create_table
);
INITIALIZER
(
run_create_pk_index
);
INITIALIZER
(
run_index_dk
);
INITIALIZER
(
run_create_pk_index_drop
);
INITIALIZER
(
run_drop_table
);
}
NDBT_TESTSUITE_END
(
testPartitioning
);
...
...
ndb/test/src/HugoOperations.cpp
View file @
9cc5abd2
...
...
@@ -58,9 +58,15 @@ int HugoOperations::pkReadRecord(Ndb* pNdb,
allocRows
(
numRecords
);
int
check
;
NdbOperation
*
pOp
=
0
;
pIndexScanOp
=
0
;
for
(
int
r
=
0
;
r
<
numRecords
;
r
++
){
NdbOperation
*
pOp
=
getOperation
(
pTrans
,
NdbOperation
::
ReadRequest
);
if
(
pOp
==
0
)
{
pOp
=
getOperation
(
pTrans
,
NdbOperation
::
ReadRequest
);
}
if
(
pOp
==
NULL
)
{
ERR
(
pTrans
->
getNdbError
());
return
NDBT_FAILED
;
...
...
@@ -69,13 +75,16 @@ int HugoOperations::pkReadRecord(Ndb* pNdb,
rand_lock_mode:
switch
(
lm
){
case
NdbOperation
:
:
LM_Read
:
check
=
pOp
->
readTuple
();
break
;
case
NdbOperation
:
:
LM_Exclusive
:
check
=
pOp
->
readTupleExclusive
();
break
;
case
NdbOperation
:
:
LM_CommittedRead
:
check
=
pOp
->
dirtyRead
();
if
(
idx
&&
idx
->
getType
()
==
NdbDictionary
::
Index
::
OrderedIndex
&&
pIndexScanOp
==
0
)
{
pIndexScanOp
=
((
NdbIndexScanOperation
*
)
pOp
);
check
=
pIndexScanOp
->
readTuples
(
lm
);
}
else
check
=
pOp
->
readTuple
(
lm
);
break
;
default:
lm
=
(
NdbOperation
::
LockMode
)((
rand
()
>>
16
)
&
3
);
...
...
@@ -97,6 +106,11 @@ int HugoOperations::pkReadRecord(Ndb* pNdb,
}
}
if
(
pIndexScanOp
)
pIndexScanOp
->
end_of_bound
(
r
);
if
(
r
==
0
||
pIndexScanOp
==
0
)
{
// Define attributes to read
for
(
a
=
0
;
a
<
tab
.
getNoOfColumns
();
a
++
){
if
((
rows
[
r
]
->
attributeStore
(
a
)
=
...
...
@@ -106,6 +120,8 @@ int HugoOperations::pkReadRecord(Ndb* pNdb,
}
}
}
pOp
=
pIndexScanOp
;
}
return
NDBT_OK
;
}
...
...
ndb/test/src/HugoTransactions.cpp
View file @
9cc5abd2
...
...
@@ -1031,23 +1031,22 @@ HugoTransactions::eventOperation(Ndb* pNdb, void* pstats,
int
HugoTransactions
::
pkReadRecords
(
Ndb
*
pNdb
,
int
records
,
int
batch
size
,
int
batch
,
NdbOperation
::
LockMode
lm
){
int
reads
=
0
;
int
r
=
0
;
int
retryAttempt
=
0
;
const
int
retryMax
=
100
;
int
check
,
a
;
NdbOperation
*
pOp
;
if
(
batch
size
==
0
)
{
g_info
<<
"ERROR: Argument batch
size
== 0 in pkReadRecords(). Not allowed."
<<
endl
;
if
(
batch
==
0
)
{
g_info
<<
"ERROR: Argument batch == 0 in pkReadRecords(). Not allowed."
<<
endl
;
return
NDBT_FAILED
;
}
while
(
r
<
records
){
if
(
r
+
batch
size
>
records
)
batch
size
=
records
-
r
;
if
(
r
+
batch
>
records
)
batch
=
records
-
r
;
if
(
retryAttempt
>=
retryMax
){
g_info
<<
"ERROR: has retried this operation "
<<
retryAttempt
...
...
@@ -1069,7 +1068,7 @@ HugoTransactions::pkReadRecords(Ndb* pNdb,
return
NDBT_FAILED
;
}
if
(
pkReadRecord
(
pNdb
,
r
,
batch
size
,
lm
)
!=
NDBT_OK
)
if
(
pkReadRecord
(
pNdb
,
r
,
batch
,
lm
)
!=
NDBT_OK
)
{
ERR
(
pTrans
->
getNdbError
());
pNdb
->
closeTransaction
(
pTrans
);
...
...
@@ -1099,7 +1098,36 @@ HugoTransactions::pkReadRecords(Ndb* pNdb,
return
NDBT_FAILED
;
}
}
else
{
for
(
int
b
=
0
;
(
b
<
batchsize
)
&&
(
r
+
b
<
records
);
b
++
){
if
(
pIndexScanOp
)
{
int
rows_found
=
0
;
while
((
check
=
pIndexScanOp
->
nextResult
())
==
0
)
{
rows_found
++
;
if
(
calc
.
verifyRowValues
(
rows
[
0
])
!=
0
){
pNdb
->
closeTransaction
(
pTrans
);
return
NDBT_FAILED
;
}
}
if
(
check
!=
1
||
rows_found
>
batch
)
{
pNdb
->
closeTransaction
(
pTrans
);
return
NDBT_FAILED
;
}
else
if
(
rows_found
<
batch
)
{
if
(
batch
==
1
){
g_info
<<
r
<<
": not found"
<<
endl
;
abort
();
}
else
g_info
<<
"Found "
<<
rows_found
<<
" of "
<<
batch
<<
" rows"
<<
endl
;
}
r
+=
batch
;
reads
+=
rows_found
;
}
else
{
for
(
int
b
=
0
;
(
b
<
batch
)
&&
(
r
+
b
<
records
);
b
++
){
if
(
calc
.
verifyRowValues
(
rows
[
b
])
!=
0
){
pNdb
->
closeTransaction
(
pTrans
);
return
NDBT_FAILED
;
...
...
@@ -1108,9 +1136,9 @@ HugoTransactions::pkReadRecords(Ndb* pNdb,
r
++
;
}
}
}
pNdb
->
closeTransaction
(
pTrans
);
}
deallocRows
();
g_info
<<
reads
<<
" records read"
<<
endl
;
...
...
@@ -1184,6 +1212,42 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb,
return
NDBT_FAILED
;
}
if
(
pIndexScanOp
)
{
int
rows_found
=
0
;
while
((
check
=
pIndexScanOp
->
nextResult
(
true
))
==
0
)
{
do
{
if
(
calc
.
verifyRowValues
(
rows
[
0
])
!=
0
){
pNdb
->
closeTransaction
(
pTrans
);
return
NDBT_FAILED
;
}
int
updates
=
calc
.
getUpdatesValue
(
rows
[
0
])
+
1
;
if
(
pkUpdateRecord
(
pNdb
,
r
+
rows_found
,
1
,
updates
)
!=
NDBT_OK
)
{
ERR
(
pTrans
->
getNdbError
());
pNdb
->
closeTransaction
(
pTrans
);
return
NDBT_FAILED
;
}
rows_found
++
;
}
while
((
check
=
pIndexScanOp
->
nextResult
(
false
))
==
0
);
if
(
check
!=
2
)
break
;
if
((
check
=
pTrans
->
execute
(
NoCommit
))
!=
0
)
break
;
}
if
(
check
!=
1
||
rows_found
!=
batch
)
{
pNdb
->
closeTransaction
(
pTrans
);
return
NDBT_FAILED
;
}
}
else
{
for
(
b
=
0
;
b
<
batch
&&
(
b
+
r
)
<
records
;
b
++
)
{
if
(
calc
.
verifyRowValues
(
rows
[
b
])
!=
0
)
...
...
@@ -1201,8 +1265,8 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb,
return
NDBT_FAILED
;
}
}
check
=
pTrans
->
execute
(
Commit
);
}
if
(
check
==
-
1
)
{
const
NdbError
err
=
pTrans
->
getNdbError
();
...
...
@@ -1222,7 +1286,6 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb,
updated
+=
batch
;
}
pNdb
->
closeTransaction
(
pTrans
);
r
+=
batch
;
// Read next record
...
...
@@ -1633,7 +1696,7 @@ int
HugoTransactions
::
indexReadRecords
(
Ndb
*
pNdb
,
const
char
*
idxName
,
int
records
,
int
batch
size
){
int
batch
){
int
reads
=
0
;
int
r
=
0
;
int
retryAttempt
=
0
;
...
...
@@ -1647,17 +1710,17 @@ HugoTransactions::indexReadRecords(Ndb* pNdb,
const
bool
ordered
=
(
pIndex
->
getType
()
==
NdbDictionary
::
Index
::
OrderedIndex
);
if
(
batch
size
==
0
)
{
g_info
<<
"ERROR: Argument batch
size
== 0 in indexReadRecords(). "
if
(
batch
==
0
)
{
g_info
<<
"ERROR: Argument batch == 0 in indexReadRecords(). "
<<
"Not allowed."
<<
endl
;
return
NDBT_FAILED
;
}
if
(
ordered
)
{
batch
size
=
1
;
batch
=
1
;
}
allocRows
(
batch
size
);
allocRows
(
batch
);
while
(
r
<
records
){
if
(
retryAttempt
>=
retryMax
){
...
...
@@ -1680,7 +1743,7 @@ HugoTransactions::indexReadRecords(Ndb* pNdb,
return
NDBT_FAILED
;
}
for
(
int
b
=
0
;
(
b
<
batch
size
)
&&
(
r
+
b
<
records
);
b
++
){
for
(
int
b
=
0
;
(
b
<
batch
)
&&
(
r
+
b
<
records
);
b
++
){
if
(
!
ordered
){
pOp
=
pTrans
->
getNdbIndexOperation
(
idxName
,
tab
.
getName
());
if
(
pOp
==
NULL
)
{
...
...
@@ -1751,7 +1814,7 @@ HugoTransactions::indexReadRecords(Ndb* pNdb,
return
NDBT_FAILED
;
}
}
else
{
for
(
int
b
=
0
;
(
b
<
batch
size
)
&&
(
r
+
b
<
records
);
b
++
){
for
(
int
b
=
0
;
(
b
<
batch
)
&&
(
r
+
b
<
records
);
b
++
){
if
(
calc
.
verifyRowValues
(
rows
[
b
])
!=
0
){
pNdb
->
closeTransaction
(
pTrans
);
return
NDBT_FAILED
;
...
...
@@ -1779,7 +1842,7 @@ int
HugoTransactions
::
indexUpdateRecords
(
Ndb
*
pNdb
,
const
char
*
idxName
,
int
records
,
int
batch
size
){
int
batch
){
int
updated
=
0
;
int
r
=
0
;
...
...
@@ -1794,10 +1857,10 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb,
const
bool
ordered
=
(
pIndex
->
getType
()
==
NdbDictionary
::
Index
::
OrderedIndex
);
if
(
ordered
){
batch
size
=
1
;
batch
=
1
;
}
allocRows
(
batch
size
);
allocRows
(
batch
);
while
(
r
<
records
){
if
(
retryAttempt
>=
retryMax
){
...
...
@@ -1820,7 +1883,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb,
return
NDBT_FAILED
;
}
for
(
b
=
0
;
b
<
batch
size
&&
(
b
+
r
)
<
records
;
b
++
){
for
(
b
=
0
;
b
<
batch
&&
(
b
+
r
)
<
records
;
b
++
){
if
(
!
ordered
){
pOp
=
pTrans
->
getNdbIndexOperation
(
idxName
,
tab
.
getName
());
if
(
pOp
==
NULL
)
{
...
...
@@ -1890,7 +1953,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb,
return
NDBT_FAILED
;
}
for
(
b
=
0
;
b
<
batch
size
&&
(
b
+
r
)
<
records
;
b
++
){
for
(
b
=
0
;
b
<
batch
&&
(
b
+
r
)
<
records
;
b
++
){
if
(
calc
.
verifyRowValues
(
rows
[
b
])
!=
0
){
pNdb
->
closeTransaction
(
pTrans
);
return
NDBT_FAILED
;
...
...
@@ -1955,12 +2018,12 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb,
ndbout
<<
"r = "
<<
r
<<
endl
;
return
NDBT_FAILED
;
}
else
{
updated
+=
batch
size
;
updated
+=
batch
;
}
pNdb
->
closeTransaction
(
pTrans
);
r
+=
batch
size
;
// Read next record
r
+=
batch
;
// Read next record
}
g_info
<<
"|- "
<<
updated
<<
" records updated"
<<
endl
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment