Commit c4de1e0d authored by unknown's avatar unknown

Merge mysql.com:/home/stewart/Documents/MySQL/5.0/ndb

into mysql.com:/home/stewart/Documents/MySQL/5.0/ndb-dynamic-port

parents 839e6a24 1d32cbb1
...@@ -94,6 +94,7 @@ jcole@sarvik.tfr.cafe.ee ...@@ -94,6 +94,7 @@ jcole@sarvik.tfr.cafe.ee
jcole@tetra.spaceapes.com jcole@tetra.spaceapes.com
jimw@mysql.com jimw@mysql.com
joerg@mysql.com joerg@mysql.com
jon@gigan.
joreland@mysql.com joreland@mysql.com
jorge@linux.jorge.mysql.com jorge@linux.jorge.mysql.com
jplindst@t41.(none) jplindst@t41.(none)
......
...@@ -532,7 +532,7 @@ os_mem_alloc_large( ...@@ -532,7 +532,7 @@ os_mem_alloc_large(
ibool assert_on_error) /* in: if TRUE, we crash mysqld if the memory ibool assert_on_error) /* in: if TRUE, we crash mysqld if the memory
cannot be allocated */ cannot be allocated */
{ {
#ifdef UNIV_LINUX #ifdef HAVE_LARGE_PAGES
ulint size; ulint size;
int shmid; int shmid;
void *ptr = NULL; void *ptr = NULL;
...@@ -542,6 +542,7 @@ os_mem_alloc_large( ...@@ -542,6 +542,7 @@ os_mem_alloc_large(
goto skip; goto skip;
} }
#ifdef UNIV_LINUX
/* Align block size to os_large_page_size */ /* Align block size to os_large_page_size */
size = ((n - 1) & ~(os_large_page_size - 1)) + os_large_page_size; size = ((n - 1) & ~(os_large_page_size - 1)) + os_large_page_size;
...@@ -561,6 +562,7 @@ os_mem_alloc_large( ...@@ -561,6 +562,7 @@ os_mem_alloc_large(
*/ */
shmctl(shmid, IPC_RMID, &buf); shmctl(shmid, IPC_RMID, &buf);
} }
#endif
if (ptr) { if (ptr) {
if (set_to_zero) { if (set_to_zero) {
...@@ -573,8 +575,8 @@ os_mem_alloc_large( ...@@ -573,8 +575,8 @@ os_mem_alloc_large(
} }
fprintf(stderr, "InnoDB HugeTLB: Warning: Using conventional memory pool\n"); fprintf(stderr, "InnoDB HugeTLB: Warning: Using conventional memory pool\n");
#endif
skip: skip:
#endif /* HAVE_LARGE_PAGES */
return(ut_malloc_low(n, set_to_zero, assert_on_error)); return(ut_malloc_low(n, set_to_zero, assert_on_error));
} }
...@@ -587,8 +589,12 @@ os_mem_free_large( ...@@ -587,8 +589,12 @@ os_mem_free_large(
/*=================*/ /*=================*/
void *ptr) /* in: number of bytes */ void *ptr) /* in: number of bytes */
{ {
#ifdef HAVE_LARGE_PAGES
if (os_use_large_pages && os_large_page_size
#ifdef UNIV_LINUX #ifdef UNIV_LINUX
if (os_use_large_pages && os_large_page_size && !shmdt(ptr)) { && !shmdt(ptr)
#endif
) {
return; return;
} }
#endif #endif
......
...@@ -59,7 +59,7 @@ EXTRACT_PRIVATE = NO ...@@ -59,7 +59,7 @@ EXTRACT_PRIVATE = NO
# If the EXTRACT_STATIC tag is set to YES all static members of a file # If the EXTRACT_STATIC tag is set to YES all static members of a file
# will be included in the documentation. # will be included in the documentation.
EXTRACT_STATIC = NO EXTRACT_STATIC = YES
# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
# undocumented members of documented classes, files or namespaces. # undocumented members of documented classes, files or namespaces.
...@@ -146,7 +146,7 @@ HIDE_SCOPE_NAMES = NO ...@@ -146,7 +146,7 @@ HIDE_SCOPE_NAMES = NO
# will generate a verbatim copy of the header file for each class for # will generate a verbatim copy of the header file for each class for
# which an include is specified. Set to NO to disable this. # which an include is specified. Set to NO to disable this.
VERBATIM_HEADERS = YES VERBATIM_HEADERS = NO
# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
# will put list of the files that are included by a file in the documentation # will put list of the files that are included by a file in the documentation
......
...@@ -52,7 +52,7 @@ EXTRACT_ALL = YES ...@@ -52,7 +52,7 @@ EXTRACT_ALL = YES
# If the EXTRACT_PRIVATE tag is set to YES all private members of a class # If the EXTRACT_PRIVATE tag is set to YES all private members of a class
# will be included in the documentation. # will be included in the documentation.
EXTRACT_PRIVATE = YES EXTRACT_PRIVATE = NO
# If the EXTRACT_STATIC tag is set to YES all static members of a file # If the EXTRACT_STATIC tag is set to YES all static members of a file
# will be included in the documentation. # will be included in the documentation.
...@@ -157,7 +157,7 @@ HIDE_SCOPE_NAMES = NO ...@@ -157,7 +157,7 @@ HIDE_SCOPE_NAMES = NO
# will generate a verbatim copy of the header file for each class for # will generate a verbatim copy of the header file for each class for
# which an include is specified. Set to NO to disable this. # which an include is specified. Set to NO to disable this.
VERBATIM_HEADERS = YES VERBATIM_HEADERS = NO
# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
# will put list of the files that are included by a file in the documentation # will put list of the files that are included by a file in the documentation
......
...@@ -59,7 +59,7 @@ EXTRACT_PRIVATE = NO ...@@ -59,7 +59,7 @@ EXTRACT_PRIVATE = NO
# If the EXTRACT_STATIC tag is set to YES all static members of a file # If the EXTRACT_STATIC tag is set to YES all static members of a file
# will be included in the documentation. # will be included in the documentation.
EXTRACT_STATIC = NO EXTRACT_STATIC = YES
# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
# undocumented members of documented classes, files or namespaces. # undocumented members of documented classes, files or namespaces.
...@@ -146,7 +146,7 @@ HIDE_SCOPE_NAMES = NO ...@@ -146,7 +146,7 @@ HIDE_SCOPE_NAMES = NO
# will generate a verbatim copy of the header file for each class for # will generate a verbatim copy of the header file for each class for
# which an include is specified. Set to NO to disable this. # which an include is specified. Set to NO to disable this.
VERBATIM_HEADERS = YES VERBATIM_HEADERS = NO
# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
# will put list of the files that are included by a file in the documentation # will put list of the files that are included by a file in the documentation
......
...@@ -52,7 +52,7 @@ EXTRACT_ALL = YES ...@@ -52,7 +52,7 @@ EXTRACT_ALL = YES
# If the EXTRACT_PRIVATE tag is set to YES all private members of a class # If the EXTRACT_PRIVATE tag is set to YES all private members of a class
# will be included in the documentation. # will be included in the documentation.
EXTRACT_PRIVATE = YES EXTRACT_PRIVATE = NO
# If the EXTRACT_STATIC tag is set to YES all static members of a file # If the EXTRACT_STATIC tag is set to YES all static members of a file
# will be included in the documentation. # will be included in the documentation.
...@@ -157,7 +157,7 @@ HIDE_SCOPE_NAMES = NO ...@@ -157,7 +157,7 @@ HIDE_SCOPE_NAMES = NO
# will generate a verbatim copy of the header file for each class for # will generate a verbatim copy of the header file for each class for
# which an include is specified. Set to NO to disable this. # which an include is specified. Set to NO to disable this.
VERBATIM_HEADERS = YES VERBATIM_HEADERS = NO
# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
# will put list of the files that are included by a file in the documentation # will put list of the files that are included by a file in the documentation
......
...@@ -52,7 +52,7 @@ EXTRACT_ALL = YES ...@@ -52,7 +52,7 @@ EXTRACT_ALL = YES
# If the EXTRACT_PRIVATE tag is set to YES all private members of a class # If the EXTRACT_PRIVATE tag is set to YES all private members of a class
# will be included in the documentation. # will be included in the documentation.
EXTRACT_PRIVATE = YES EXTRACT_PRIVATE = NO
# If the EXTRACT_STATIC tag is set to YES all static members of a file # If the EXTRACT_STATIC tag is set to YES all static members of a file
# will be included in the documentation. # will be included in the documentation.
...@@ -157,7 +157,7 @@ HIDE_SCOPE_NAMES = NO ...@@ -157,7 +157,7 @@ HIDE_SCOPE_NAMES = NO
# will generate a verbatim copy of the header file for each class for # will generate a verbatim copy of the header file for each class for
# which an include is specified. Set to NO to disable this. # which an include is specified. Set to NO to disable this.
VERBATIM_HEADERS = YES VERBATIM_HEADERS = NO
# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
# will put list of the files that are included by a file in the documentation # will put list of the files that are included by a file in the documentation
......
...@@ -17,30 +17,37 @@ ...@@ -17,30 +17,37 @@
/** /**
@mainpage NDB API Programmers' Guide @mainpage NDB API Programmers' Guide
This guide assumes a basic familiarity with NDB Cluster concepts. This guide assumes a basic familiarity with MySQL Cluster concepts.
Some of the fundamental ones are described in section @ref secConcepts. Some of the fundamental ones are described in section @ref secConcepts.
The <em>NDB API</em> is an NDB Cluster application interface The <em>NDB API</em> is an MySQL Cluster application interface
that implements both synchronous and asynchronous transactions. that implements transactions.
The NDB API consists of the following fundamental classes: The NDB API consists of the following fundamental classes:
- Ndb_cluster_connection class representing a connection to a cluster,
- Ndb is the main class representing the database, - Ndb is the main class representing the database,
- NdbConnection represents a transaction, - NdbConnection represents a transaction,
- NdbOperation represents a transaction operation using primary key, - NdbOperation represents a operation using primary key,
- NdbIndexOperation represents a transaction operation using a secondary - NdbScanOperation represents a operation performing a full table scan.
index, - NdbIndexOperation represents a operation using a unique hash index,
- NdbIndexScanOperation represents a operation performing a scan using
an ordered index,
- NdbRecAttr represents the value of an attribute, and - NdbRecAttr represents the value of an attribute, and
- NdbDictionary represents meta information about tables and attributes. - NdbDictionary represents meta information about tables and attributes.
- NdbError represents an error condition - NdbError contains a specification of an error.
There are also some auxiliary classes. There are also some auxiliary classes.
The main structure of an application program is as follows: The main structure of an application program is as follows:
-# Construct and connect to a cluster using the Ndb_cluster_connection
object.
-# Construct and initialize Ndb object(s). -# Construct and initialize Ndb object(s).
-# Define and execute (synchronous or asynchronous) transactions. -# Define and execute transactions using NdbConnection and Ndb*Operation.
-# Delete Ndb objects -# Delete Ndb objects
-# Delete connection to cluster
The main structure of a transaction is as follows: The main structure of a transaction is as follows:
-# Start transaction -# Start transaction, a NdbConnection
-# Add and define operations (associated with the transaction) -# Add and define operations (associated with the transaction),
Ndb*Operation
-# Execute transaction -# Execute transaction
The execute can be of two different types, The execute can be of two different types,
...@@ -71,8 +78,8 @@ ...@@ -71,8 +78,8 @@
At this step the transaction is being defined. At this step the transaction is being defined.
It is not yet sent to the NDB kernel. It is not yet sent to the NDB kernel.
-# Add and define operations to the transaction -# Add and define operations to the transaction
(using NdbConnection::getNdbOperation and (using NdbConnection::getNdb*Operation and
methods from class NdbOperation). methods from class Ndb*Operation).
The transaction is still not sent to the NDB kernel. The transaction is still not sent to the NDB kernel.
-# Execute the transaction (using NdbConnection::execute). -# Execute the transaction (using NdbConnection::execute).
-# Close the transaction (using Ndb::closeTransaction). -# Close the transaction (using Ndb::closeTransaction).
...@@ -82,20 +89,21 @@ ...@@ -82,20 +89,21 @@
To execute several parallel synchronous transactions, one can either To execute several parallel synchronous transactions, one can either
use multiple Ndb objects in several threads or start multiple use multiple Ndb objects in several threads or start multiple
applications programs. applications programs.
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
Another way to execute several parallel transactions is to use Another way to execute several parallel transactions is to use
asynchronous transactions. asynchronous transactions.
#endif
@section secNdbOperations Operations @section secNdbOperations Operations
Each transaction (NdbConnection object) consist of a list of Each transaction (NdbConnection object) consist of a list of
operations (NdbOperation or NdbIndexOperation objects. operations (Ndb*Operation objects).
NdbIndexOperation is used for accessing tables through secondary indexes).
Operations are of two different kinds: Operations are of two different kinds:
-# standard operations, and -# standard operations, and
-# interpreted program operations. -# interpreted program operations.
<h3>Standard Operations</h3> <h3>Single row operations</h3>
After the operation is created using NdbConnection::getNdbOperation After the operation is created using NdbConnection::getNdbOperation
(or NdbConnection::getNdbIndexOperation), (or NdbConnection::getNdbIndexOperation),
it is defined in the following three steps: it is defined in the following three steps:
...@@ -106,36 +114,42 @@ ...@@ -106,36 +114,42 @@
-# Specify attribute actions -# Specify attribute actions
(e.g. using NdbOperation::getValue) (e.g. using NdbOperation::getValue)
Example code (using an NdbOperation): Example code (using an NdbOperation and excluding error handling):
@code @code
MyOperation = MyConnection->getNdbOperation("MYTABLENAME"); // 1. Create // 1. Create
if (MyOperation == NULL) APIERROR(MyConnection->getNdbError()); MyOperation= MyConnection->getNdbOperation("MYTABLENAME");
// 2. Define type of operation and lock mode
MyOperation->readTuple(NdbOperation::LM_Read);
MyOperation->readTuple(); // 2. Define type of operation // 3. Specify Search Conditions
MyOperation->equal("ATTR1", i); // 3. Specify Search Conditions MyOperation->equal("ATTR1", i);
MyRecAttr = MyOperation->getValue("ATTR2", NULL); // 4. Attribute Actions // 4. Attribute Actions
if (MyRecAttr == NULL) APIERROR(MyConnection->getNdbError()); MyRecAttr= MyOperation->getValue("ATTR2", NULL);
@endcode @endcode
For more examples, see @ref ndbapi_example1.cpp and @ref ndbapi_example2.cpp. For more examples, see @ref ndbapi_example1.cpp and
@ref ndbapi_example2.cpp.
Example code using an NdbIndexOperation: Example code (using an NdbIndexOperation and excluding error handling):
@code @code
MyOperation = // 1. Create // 1. Create
MyConnection->getNdbIndexOperation("MYINDEX", "MYTABLENAME"); MyOperation= MyConnection->getNdbIndexOperation("MYINDEX", "MYTABLENAME");
if (MyOperation == NULL) APIERROR(MyConnection->getNdbError());
// 2. Define type of operation and lock mode
MyOperation->readTuple(NdbOperation::LM_Read);
MyOperation->readTuple(); // 2. Define type of operation // 3. Specify Search Conditions
MyOperation->equal("ATTR1", i); // 3. Specify Search Conditions MyOperation->equal("ATTR1", i);
MyRecAttr = MyOperation->getValue("ATTR2", NULL); // 4. Attribute Actions // 4. Attribute Actions
if (MyRecAttr == NULL) APIERROR(MyConnection->getNdbError()); MyRecAttr = MyOperation->getValue("ATTR2", NULL);
@endcode @endcode
For more examples, see @ref ndbapi_example4.cpp. For more examples, see @ref ndbapi_example4.cpp.
<h4>Step 1: Define Standard Operation Type</h4> <h4>Step 1: Define single row operation type</h4>
The following types of standard operations exist: The following types of operations exist:
-# NdbOperation::insertTuple : -# NdbOperation::insertTuple :
inserts a non-existing tuple inserts a non-existing tuple
-# NdbOperation::writeTuple : -# NdbOperation::writeTuple :
...@@ -146,70 +160,18 @@ ...@@ -146,70 +160,18 @@
-# NdbOperation::deleteTuple : -# NdbOperation::deleteTuple :
deletes an existing tuple deletes an existing tuple
-# NdbOperation::readTuple : -# NdbOperation::readTuple :
reads an existing tuple reads an existing tuple with specified lock mode
-# NdbOperation::readTupleExclusive :
reads an existing tuple using an exclusive lock
-# NdbOperation::simpleRead :
reads an existing tuple (using shared read lock),
but releases lock immediately after read
-# NdbOperation::committedRead :
reads committed tuple
-# NdbOperation::dirtyUpdate :
updates an existing tuple, but releases lock immediately
after read (uses dirty lock)
-# NdbOperation::dirtyWrite :
updates or writes a tuple, but releases lock immediately
after read (uses dirty lock)
All of these operations operate on the unique tuple key. All of these operations operate on the unique tuple key.
(When NdbIndexOperation is used then all of these operations (When NdbIndexOperation is used then all of these operations
operate on a defined secondary index.) operate on a defined unique hash index.)
Some comments:
- NdbOperation::simpleRead and
NdbOperation::committedRead can execute on the same transaction
as the above operations but will release its locks immediately
after reading the tuple.
NdbOperation::simpleRead will always read the latest version
of the tuple.
Thus it will wait until it can acquire a shared read lock on
the tuple.
NdbOperation::committedRead will read the latest committed
version of the tuple.
<br>
Both NdbOperation::simpleRead and NdbOperation::committedRead
are examples of consistent reads which are not repeatable.
All reads read the latest version if updates were made by the same
transaction.
Errors on simple read are only reported by the NdbOperation object.
These error codes are not transferred to the NdbConnection object.
- NdbOperation::dirtyUpdate and NdbOperation::dirtyWrite
will execute in the same transaction
but will release the lock immediately after updating the
tuple.
It will wait on the lock until it can acquire an exclusive
write lock.
In a replicated version of NDB Cluster NdbOperation::dirtyUpdate
can lead to inconsistency between the replicas.
Examples of when it could be used is
to update statistical counters on tuples which are "hot-spots".
@note If you want to define multiple operations within the same transaction, @note If you want to define multiple operations within the same transaction,
then you need to call NdbConnection::getNdbOperation then you need to call NdbConnection::getNdb*Operation for each
(or NdbConnection::getNdbIndexOperation) for each
operation. operation.
<h4>Step 2: Specify Search Conditions</h4> <h4>Step 2: Specify Search Conditions</h4>
The search condition is used to select tuples. The search condition is used to select tuples.
(In the current NdbIndexOperation implementation
this means setting the value of
the secondary index attributes of the wanted tuple.)
If a tuple identity is used, then NdbOperation::setTupleId
is used to define the search key when inserting new tuples.
Otherwise, NdbOperation::equal is used.
For NdbOperation::insertTuple it is also allowed to define the For NdbOperation::insertTuple it is also allowed to define the
search key by using NdbOperation::setValue. search key by using NdbOperation::setValue.
...@@ -218,7 +180,6 @@ ...@@ -218,7 +180,6 @@
For NdbOperation::insertTuple it is not necessary to use For NdbOperation::insertTuple it is not necessary to use
NdbOperation::setValue on key attributes before other attributes. NdbOperation::setValue on key attributes before other attributes.
<h4>Step 3: Specify Attribute Actions</h4> <h4>Step 3: Specify Attribute Actions</h4>
Now it is time to define which attributes should be read or updated. Now it is time to define which attributes should be read or updated.
Deletes can neither read nor set values, read can only read values and Deletes can neither read nor set values, read can only read values and
...@@ -495,7 +456,7 @@ ...@@ -495,7 +456,7 @@
should match the automatic numbering to make it easier to should match the automatic numbering to make it easier to
debug the interpreted program. debug the interpreted program.
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
@section secAsync Asynchronous Transactions @section secAsync Asynchronous Transactions
The asynchronous interface is used to increase the speed of The asynchronous interface is used to increase the speed of
transaction executing by better utilizing the connection transaction executing by better utilizing the connection
...@@ -595,7 +556,7 @@ ...@@ -595,7 +556,7 @@
More about how transactions are send the NDB Kernel is More about how transactions are send the NDB Kernel is
available in section @ref secAdapt. available in section @ref secAdapt.
#endif
@section secError Error Handling @section secError Error Handling
...@@ -671,6 +632,7 @@ ...@@ -671,6 +632,7 @@
* @include ndbapi_example4.cpp * @include ndbapi_example4.cpp
*/ */
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
/** /**
* @page select_all.cpp select_all.cpp * @page select_all.cpp select_all.cpp
* @include select_all.cpp * @include select_all.cpp
...@@ -680,6 +642,7 @@ ...@@ -680,6 +642,7 @@
* @page ndbapi_async.cpp ndbapi_async.cpp * @page ndbapi_async.cpp ndbapi_async.cpp
* @include ndbapi_async.cpp * @include ndbapi_async.cpp
*/ */
#endif
/** /**
* @page ndbapi_scan.cpp ndbapi_scan.cpp * @page ndbapi_scan.cpp ndbapi_scan.cpp
...@@ -691,8 +654,7 @@ ...@@ -691,8 +654,7 @@
@page secAdapt Adaptive Send Algorithm @page secAdapt Adaptive Send Algorithm
At the time of "sending" the transaction At the time of "sending" the transaction
(using NdbConnection::execute, NdbConnection::executeAsynch, (using NdbConnection::execute), the transactions
Ndb::sendPreparedTransactions, or Ndb::sendPollNdb), the transactions
are in reality <em>not</em> immediately transfered to the NDB Kernel. are in reality <em>not</em> immediately transfered to the NDB Kernel.
Instead, the "sent" transactions are only kept in a Instead, the "sent" transactions are only kept in a
special send list (buffer) in the Ndb object to which they belong. special send list (buffer) in the Ndb object to which they belong.
...@@ -847,12 +809,56 @@ ...@@ -847,12 +809,56 @@
then a timeout error occurs. then a timeout error occurs.
Concurrent transactions (parallel application programs, thread-based Concurrent transactions (parallel application programs, thread-based
applications, or applications with asynchronous transactions) applications)
sometimes deadlock when they try to access the same information. sometimes deadlock when they try to access the same information.
Applications need to be programmed so that timeout errors Applications need to be programmed so that timeout errors
occurring due to deadlocks are handled. This generally occurring due to deadlocks are handled. This generally
means that the transaction encountering timeout means that the transaction encountering timeout
should be rolled back and restarted. should be rolled back and restarted.
@section secHint Hints and performance
NDB API can be hinted to select a particular transaction coordinator.
The default method is round robin where each set of new transactions
is placed on the next NDB kernel node.
By providing a distribution key (usually the primary key
of the mostly used table of the transaction) for a record
the transaction will be placed on the node where the primary replica
of that record resides.
Note that this is only a hint, the system can
be under reconfiguration and then the NDB API
will use select the transaction coordinator without using
this hint.
Placing the transaction coordinator close
to the actual data used in the transaction can in many cases
improve performance significantly. This is particularly true for
systems using TCP/IP. A system using Solaris and a 500 MHz processor
has a cost model for TCP/IP communication which is:
30 microseconds + (100 nanoseconds * no of Bytes)
This means that if we can ensure that we use "popular" links we increase
buffering and thus drastically reduce the communication cost.
Systems using SCI has a different cost model which is:
5 microseconds + (10 nanoseconds * no of Bytes)
Thus SCI systems are much less dependent on selection of
transaction coordinators.
Typically TCP/IP systems spend 30-60% of the time during communication,
whereas SCI systems typically spend 5-10% of the time during
communication.
Thus SCI means that less care from the NDB API programmer is
needed and great scalability can be achieved even for applications using
data from many parts of the database.
A simple example is an application that uses many simple updates where
a transaction needs to update one record.
This record has a 32 bit primary key,
which is also the distribution key.
Then the keyData will be the address of the integer
of the primary key and keyLen will be 4.
*/ */
#ifndef Ndb_H #ifndef Ndb_H
...@@ -945,6 +951,11 @@ public: ...@@ -945,6 +951,11 @@ public:
* Semaphores, mutexes and so forth are easy ways of issuing memory * Semaphores, mutexes and so forth are easy ways of issuing memory
* barriers without having to bother about the memory barrier concept. * barriers without having to bother about the memory barrier concept.
* *
*/
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
// to be documented later
/*
* If one Ndb object is used to handle parallel transactions through the * If one Ndb object is used to handle parallel transactions through the
* asynchronous programming interface, please read the notes regarding * asynchronous programming interface, please read the notes regarding
* asynchronous transactions (Section @ref secAsync). * asynchronous transactions (Section @ref secAsync).
...@@ -955,6 +966,8 @@ public: ...@@ -955,6 +966,8 @@ public:
* asynchronous transaction or the methods for * asynchronous transaction or the methods for
* synchronous transactions but not both. * synchronous transactions but not both.
*/ */
#endif
class Ndb class Ndb
{ {
friend class NdbReceiver; friend class NdbReceiver;
...@@ -976,29 +989,30 @@ public: ...@@ -976,29 +989,30 @@ public:
* @{ * @{
*/ */
/** /**
* The starting point of your application code is to create an * The Ndb object represents a connection to a database.
* Ndb object. *
* This object represents the NDB kernel and is the main * @note the init() method must be called before it may be used
* object used in interaction with the NDB kernel.
* *
* @param ndb_cluster_connection is a connection to a cluster containing
* the database to be used
* @param aCatalogName is the name of the catalog you want to use. * @param aCatalogName is the name of the catalog you want to use.
* @note The catalog name provides a name space for the tables and * @note The catalog name provides a name space for the tables and
* indexes created in any connection from the Ndb object. * indexes created in any connection from the Ndb object.
* @param aSchemaName is the name of the schema you * @param aSchemaName is the name of the schema you
* want to use. It is optional and defaults to the "def" schema. * want to use.
* @note The schema name provides an additional name space * @note The schema name provides an additional name space
* for the tables and indexes created in a given catalog. * for the tables and indexes created in a given catalog.
* @note The methods get/setDatabaseName and get/setDatabaseSchemaName
* are equivalent to get/setCatalogName and get/setSchemaName.
* The get/setDatabaseName and get/setDatabaseSchemaName are
* deprecated.
*/ */
Ndb(const char* aCatalogName = "", const char* aSchemaName = "def");
Ndb(Ndb_cluster_connection *ndb_cluster_connection, Ndb(Ndb_cluster_connection *ndb_cluster_connection,
const char* aCatalogName = "", const char* aSchemaName = "def"); const char* aCatalogName = "", const char* aSchemaName = "def");
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
// depricated
Ndb(const char* aCatalogName = "", const char* aSchemaName = "def");
#endif
~Ndb(); ~Ndb();
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
/** /**
* The current catalog name can be fetched by getCatalogName. * The current catalog name can be fetched by getCatalogName.
* *
...@@ -1026,7 +1040,7 @@ public: ...@@ -1026,7 +1040,7 @@ public:
* @param aSchemaName is the new name of the current schema * @param aSchemaName is the new name of the current schema
*/ */
void setSchemaName(const char * aSchemaName); void setSchemaName(const char * aSchemaName);
#endif
/** /**
* The current database name can be fetched by getDatabaseName. * The current database name can be fetched by getDatabaseName.
...@@ -1057,22 +1071,22 @@ public: ...@@ -1057,22 +1071,22 @@ public:
void setDatabaseSchemaName(const char * aDatabaseSchemaName); void setDatabaseSchemaName(const char * aDatabaseSchemaName);
/** /**
* Before anything else it is necessary to initialize (start) * Initializes the Ndb object
* the Ndb object.
* *
* @param maxNoOfTransactions * @param maxNoOfTransactions
* Maximum number of parallel * Maximum number of parallel
* NdbConnection objects that should be handled by the Ndb object. * NdbConnection objects that can be handled by the Ndb object.
* A value larger than 1024 will be downgraded to 1024. * Maximum value is 1024.
* This means that one Ndb object can handle at most 1024 parallel *
* transactions. * @note each scan or index scan operation uses one extra
* @return 0 if successful, -1 otherwise. * NdbConnection object
* *
* @note The internal implementation multiplies this value * @return 0 if successful, -1 otherwise.
* with 3.
*/ */
int init(int maxNoOfTransactions = 4); int init(int maxNoOfTransactions = 4);
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
// depricated
/** /**
* Wait for Ndb object to successfully set-up connections to * Wait for Ndb object to successfully set-up connections to
* the NDB kernel. * the NDB kernel.
...@@ -1085,8 +1099,8 @@ public: ...@@ -1085,8 +1099,8 @@ public:
* @return 0: Ndb is ready and timeout has not occurred.<br> * @return 0: Ndb is ready and timeout has not occurred.<br>
* -1: Timeout has expired * -1: Timeout has expired
*/ */
int waitUntilReady(int timeout = 60); int waitUntilReady(int timeout = 60);
#endif
/** @} *********************************************************************/ /** @} *********************************************************************/
...@@ -1096,30 +1110,55 @@ public: ...@@ -1096,30 +1110,55 @@ public:
*/ */
/** /**
* Query the database for schema information * Get an object for retrieving or manipulating database schema information
* (without performing any transaction). *
* @note this object operates outside any transaction
* *
* @return Object containing meta information about all tables * @return Object containing meta information about all tables
* in NDB Cluster. * in NDB Cluster.
*/ */
class NdbDictionary::Dictionary* getDictionary() const; class NdbDictionary::Dictionary* getDictionary() const;
/** @} *********************************************************************/
/**
* @name Event subscriptions
* @{
*/
/**
* Create a subcription to an event defined in the database
*
* @param eventName
* unique identifier of the event
* @param bufferLength
* buffer size for storing event data
*
* @return Object representing an event, NULL on failure
*/
NdbEventOperation* createEventOperation(const char* eventName, NdbEventOperation* createEventOperation(const char* eventName,
const int bufferLength); const int bufferLength);
int dropEventOperation(NdbEventOperation*); /**
void monitorEvent(NdbEventOperation *, NdbEventCallback, void*); * Drop a subscription to an event
int pollEvents(int aMillisecondNumber); *
* @param eventName
* unique identifier of the event
*
* @return 0 on success
*/
int dropEventOperation(NdbEventOperation* eventName);
/** /**
* Get the application node identity. * Wait for an event to occur. Will return as soon as an event
* is detected on any of the created events.
* *
* Each node (DB nodes, Applications, and Management Servers) * @param aMillisecondNumber
* has its own node identity in the NDB Cluster. * maximum time to wait
* See documentation for the management server configuration file.
* *
* @return Node id of this application. * @return the number of events that has occured, -1 on failure
*/ */
int getNodeId(); int pollEvents(int aMillisecondNumber);
/** @} *********************************************************************/ /** @} *********************************************************************/
...@@ -1129,71 +1168,19 @@ public: ...@@ -1129,71 +1168,19 @@ public:
*/ */
/** /**
* This method returns an NdbConnection which caters for the transaction. * Start a transaction
* When the transaction is completed it must be closed. *
* The Ndb::closeTransaction also return the NdbConnection object * @note When the transaction is completed it must be closed using
* and all other memory related to the transaction. * Ndb::closeTransaction or NdbConnection::close.
* Failure to close the transaction will lead to memory leakage.
* The transaction must be closed independent of its outcome, i.e. * The transaction must be closed independent of its outcome, i.e.
* even if there is an error. * even if there is an error.
* *
* NDB API can be hinted to select a particular transaction coordinator. * @param prio Not implemented
* The default method is round robin where each set of new transactions * @param keyData Pointer to partition key to be used for deciding
* is placed on the next NDB kernel node. * which node to run the Transaction Coordinator on
* By providing a distribution key (usually the primary key * @param keyLen Length of partition key expressed in bytes
* of the mostly used table of the transaction) for a record
* the transaction will be placed on the node where the primary replica
* of that record resides.
* Note that this is only a hint, the system can
* be under reconfiguration and then the NDB API
* will use select the transaction coordinator without using
* this hint.
*
* Placing the transaction coordinator close
* to the actual data used in the transaction can in many cases
* improve performance significantly. This is particularly true for
* systems using TCP/IP. A system using Solaris and a 500 MHz processor
* has a cost model for TCP/IP communication which is:
*
* 30 microseconds + (100 nanoseconds * no of Bytes)
*
* This means that if we can ensure that we use "popular" links we increase
* buffering and thus drastically reduce the communication cost.
* Systems using SCI has a different cost model which is:
*
* 5 microseconds + (10 nanoseconds * no of Bytes)
*
* Thus SCI systems are much less dependent on selection of
* transaction coordinators.
* Typically TCP/IP systems spend 30-60% of the time during communication,
* whereas SCI systems typically spend 5-10% of the time during
* communication.
* Thus SCI means that less care from the NDB API programmer is
* needed and great scalability can be achieved even for applications using
* data from many parts of the database.
* *
* A simple example is an application that uses many simple updates where * @return NdbConnection object, or NULL on failure.
* a transaction needs to update one record.
* This record has a 32 bit primary key,
* which is also the distribution key.
* Then the keyData will be the address of the integer
* of the primary key and keyLen will be 4.
*
* @note Transaction priorities are not yet supported.
*
* @param prio The priority of the transaction.<br>
* Priority 0 is the highest priority and is used
* for short transactions with requirements on low delay.<br>
* Priority 1 is a medium priority for short transactions.
* <br>
* Priority 2 is a medium priority for long transactions.<br>
* Priority 3 is a low priority for long transactions.<br>
* <em>This parameter is not currently used,
* and can be set to any value</em>
* @param keyData Pointer to distribution key
* @param keyLen Length of distribution key expressed in bytes
*
* @return NdbConnection object, or NULL if method failed.
*/ */
NdbConnection* startTransaction(Uint32 prio = 0, NdbConnection* startTransaction(Uint32 prio = 0,
const char * keyData = 0, const char * keyData = 0,
...@@ -1233,7 +1220,10 @@ public: ...@@ -1233,7 +1220,10 @@ public:
#endif #endif
/** /**
* When a transactions is completed, the transaction has to be closed. * Close a transaction.
*
* @note should be called after the transaction has completed, irrespective
* of success or failure
* *
* @note It is not allowed to call Ndb::closeTransaction after sending the * @note It is not allowed to call Ndb::closeTransaction after sending the
* transaction asynchronously with either * transaction asynchronously with either
...@@ -1246,9 +1236,10 @@ public: ...@@ -1246,9 +1236,10 @@ public:
*/ */
void closeTransaction(NdbConnection* aConnection); void closeTransaction(NdbConnection* aConnection);
/** @} *********************************************************************/ /** @} *********************************************************************/
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
// to be documented later
/** /**
* @name Asynchronous Transactions * @name Asynchronous Transactions
* @{ * @{
...@@ -1259,10 +1250,9 @@ public: ...@@ -1259,10 +1250,9 @@ public:
* Will return as soon as at least 'minNoOfEventsToWakeUp' * Will return as soon as at least 'minNoOfEventsToWakeUp'
* of them have completed, or the maximum time given as timeout has passed. * of them have completed, or the maximum time given as timeout has passed.
* *
* @param aMillisecondNumber Maximum time to wait for transactions * @param aMillisecondNumber
* to complete. * Maximum time to wait for transactions to complete. Polling
* Polling without wait is achieved by setting the * without wait is achieved by setting the timer to zero.
* timer to zero.
* Time is expressed in milliseconds. * Time is expressed in milliseconds.
* @param minNoOfEventsToWakeup Minimum number of transactions * @param minNoOfEventsToWakeup Minimum number of transactions
* which has to wake up before the poll-call will return. * which has to wake up before the poll-call will return.
...@@ -1325,6 +1315,7 @@ public: ...@@ -1325,6 +1315,7 @@ public:
int sendPollNdb(int aMillisecondNumber = WAITFOR_RESPONSE_TIMEOUT, int sendPollNdb(int aMillisecondNumber = WAITFOR_RESPONSE_TIMEOUT,
int minNoOfEventsToWakeup = 1, int minNoOfEventsToWakeup = 1,
int forceSend = 0); int forceSend = 0);
#endif
/** @} *********************************************************************/ /** @} *********************************************************************/
...@@ -1336,7 +1327,7 @@ public: ...@@ -1336,7 +1327,7 @@ public:
/** /**
* Get the NdbError object * Get the NdbError object
* *
* The NdbError object is valid until you call a new NDB API method. * @note The NdbError object is valid until a new NDB API method is called.
*/ */
const NdbError & getNdbError() const; const NdbError & getNdbError() const;
...@@ -1348,26 +1339,25 @@ public: ...@@ -1348,26 +1339,25 @@ public:
const NdbError & getNdbError(int errorCode); const NdbError & getNdbError(int errorCode);
/** @} *********************************************************************/
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
/**
* Get the application node identity.
*
* @return Node id of this application.
*/
int getNodeId();
/** /**
* setConnectString * setConnectString
* @param connectString - the connectString has the following format: *
* @code * @param connectString - see MySQL ref manual for format
* "nodeid=<ID>;host=host://<HOSTNAME>:<PORT>;
* host=host://<HOSTNAME2>:<PORT>;..."
* @endcode
* or
* @code
* "nodeid=<ID>;host=<HOSTNAME>:<PORT>;host=<HOSTNAME2>:<PORT>;..."
* @endcode
*/ */
static void setConnectString(const char * connectString); static void setConnectString(const char * connectString);
bool usingFullyQualifiedNames(); bool usingFullyQualifiedNames();
/** @} *********************************************************************/
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
/** /**
* Different types of tampering with the NDB Cluster. * Different types of tampering with the NDB Cluster.
* <b>Only for debugging purposes only.</b> * <b>Only for debugging purposes only.</b>
...@@ -1397,9 +1387,7 @@ public: ...@@ -1397,9 +1387,7 @@ public:
* on type of tampering. * on type of tampering.
*/ */
int NdbTamper(TamperType aAction, int aNode); int NdbTamper(TamperType aAction, int aNode);
#endif
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
/** /**
* Return a unique tuple id for a table. The id sequence is * Return a unique tuple id for a table. The id sequence is
* ascending but may contain gaps. * ascending but may contain gaps.
...@@ -1429,9 +1417,7 @@ public: ...@@ -1429,9 +1417,7 @@ public:
bool increase); bool increase);
bool setTupleIdInNdb(Uint32 aTableId, Uint64 val, bool increase); bool setTupleIdInNdb(Uint32 aTableId, Uint64 val, bool increase);
Uint64 opTupleIdOnNdb(Uint32 aTableId, Uint64 opValue, Uint32 op); Uint64 opTupleIdOnNdb(Uint32 aTableId, Uint64 opValue, Uint32 op);
#endif
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
/** /**
*/ */
NdbConnection* hupp( NdbConnection* ); NdbConnection* hupp( NdbConnection* );
......
...@@ -31,6 +31,8 @@ class Ndb; ...@@ -31,6 +31,8 @@ class Ndb;
class NdbBlob; class NdbBlob;
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
// to be documented later
/** /**
* NdbAsynchCallback functions are used when executing asynchronous * NdbAsynchCallback functions are used when executing asynchronous
* transactions (using NdbConnection::executeAsynchPrepare, or * transactions (using NdbConnection::executeAsynchPrepare, or
...@@ -39,6 +41,7 @@ class NdbBlob; ...@@ -39,6 +41,7 @@ class NdbBlob;
* See @ref secAsync for more information. * See @ref secAsync for more information.
*/ */
typedef void (* NdbAsynchCallback)(int, NdbConnection*, void*); typedef void (* NdbAsynchCallback)(int, NdbConnection*, void*);
#endif
/** /**
* Commit type of transaction * Commit type of transaction
...@@ -184,7 +187,8 @@ public: ...@@ -184,7 +187,8 @@ public:
* @note All operations within the same transaction need to * @note All operations within the same transaction need to
* be initialized with this method. * be initialized with this method.
* *
* @param aTable A table object (fetched by NdbDictionary::Dictionary::getTable) * @param aTable
* A table object (fetched by NdbDictionary::Dictionary::getTable)
* @return Pointer to an NdbOperation object if successful, otherwise NULL. * @return Pointer to an NdbOperation object if successful, otherwise NULL.
*/ */
NdbOperation* getNdbOperation(const NdbDictionary::Table * aTable); NdbOperation* getNdbOperation(const NdbDictionary::Table * aTable);
...@@ -204,7 +208,8 @@ public: ...@@ -204,7 +208,8 @@ public:
* get the NdbConnection object which * get the NdbConnection object which
* was fetched by startTransaction pointing to this operation. * was fetched by startTransaction pointing to this operation.
* *
* @param aTable A table object (fetched by NdbDictionary::Dictionary::getTable) * @param aTable
* A table object (fetched by NdbDictionary::Dictionary::getTable)
* @return pointer to an NdbOperation object if successful, otherwise NULL * @return pointer to an NdbOperation object if successful, otherwise NULL
*/ */
NdbScanOperation* getNdbScanOperation(const NdbDictionary::Table * aTable); NdbScanOperation* getNdbScanOperation(const NdbDictionary::Table * aTable);
...@@ -226,11 +231,14 @@ public: ...@@ -226,11 +231,14 @@ public:
* get the NdbConnection object which * get the NdbConnection object which
* was fetched by startTransaction pointing to this operation. * was fetched by startTransaction pointing to this operation.
* *
* @param anIndex An index object (fetched by NdbDictionary::Dictionary::getIndex). * @param anIndex
* @param aTable A table object (fetched by NdbDictionary::Dictionary::getTable). An index object (fetched by NdbDictionary::Dictionary::getIndex).
* @param aTable
A table object (fetched by NdbDictionary::Dictionary::getTable).
* @return pointer to an NdbOperation object if successful, otherwise NULL * @return pointer to an NdbOperation object if successful, otherwise NULL
*/ */
NdbIndexScanOperation* getNdbIndexScanOperation(const NdbDictionary::Index * anIndex, NdbIndexScanOperation* getNdbIndexScanOperation
(const NdbDictionary::Index * anIndex,
const NdbDictionary::Table * aTable); const NdbDictionary::Table * aTable);
/** /**
...@@ -251,8 +259,10 @@ public: ...@@ -251,8 +259,10 @@ public:
* get the NdbConnection object that * get the NdbConnection object that
* was fetched by startTransaction pointing to this operation. * was fetched by startTransaction pointing to this operation.
* *
* @param anIndex An index object (fetched by NdbDictionary::Dictionary::getIndex). * @param anIndex
* @param aTable A table object (fetched by NdbDictionary::Dictionary::getTable). * An index object (fetched by NdbDictionary::Dictionary::getIndex).
* @param aTable
* A table object (fetched by NdbDictionary::Dictionary::getTable).
* @return Pointer to an NdbIndexOperation object if * @return Pointer to an NdbIndexOperation object if
* successful, otherwise NULL * successful, otherwise NULL
*/ */
...@@ -289,6 +299,8 @@ public: ...@@ -289,6 +299,8 @@ public:
AbortOption abortOption = AbortOnError, AbortOption abortOption = AbortOnError,
int force = 0 ); int force = 0 );
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
// to be documented later
/** /**
* Prepare an asynchronous transaction. * Prepare an asynchronous transaction.
* *
...@@ -334,7 +346,7 @@ public: ...@@ -334,7 +346,7 @@ public:
NdbAsynchCallback aCallback, NdbAsynchCallback aCallback,
void* anyObject, void* anyObject,
AbortOption abortOption = AbortOnError); AbortOption abortOption = AbortOnError);
#endif
/** /**
* Refresh * Refresh
* Update timeout counter of this transaction * Update timeout counter of this transaction
......
...@@ -166,7 +166,7 @@ public: ...@@ -166,7 +166,7 @@ public:
* The builtin column types * The builtin column types
*/ */
enum Type { enum Type {
Undefined=0,///< Undefined Undefined=0, ///< Undefined
Tinyint, ///< 8 bit. 1 byte signed integer, can be used in array Tinyint, ///< 8 bit. 1 byte signed integer, can be used in array
Tinyunsigned, ///< 8 bit. 1 byte unsigned integer, can be used in array Tinyunsigned, ///< 8 bit. 1 byte unsigned integer, can be used in array
Smallint, ///< 16 bit. 2 byte signed integer, can be used in array Smallint, ///< 16 bit. 2 byte signed integer, can be used in array
...@@ -374,16 +374,11 @@ public: ...@@ -374,16 +374,11 @@ public:
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL #ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
const Table * getBlobTable() const; const Table * getBlobTable() const;
/**
* @name ODBC Specific methods
* @{
*/
void setAutoIncrement(bool); void setAutoIncrement(bool);
bool getAutoIncrement() const; bool getAutoIncrement() const;
void setAutoIncrementInitialValue(Uint64 val); void setAutoIncrementInitialValue(Uint64 val);
void setDefaultValue(const char*); void setDefaultValue(const char*);
const char* getDefaultValue() const; const char* getDefaultValue() const;
/** @} *******************************************************************/
static const Column * FRAGMENT; static const Column * FRAGMENT;
static const Column * ROW_COUNT; static const Column * ROW_COUNT;
......
...@@ -125,6 +125,16 @@ int runCreateTheTable(NDBT_Context* ctx, NDBT_Step* step){ ...@@ -125,6 +125,16 @@ int runCreateTheTable(NDBT_Context* ctx, NDBT_Step* step){
return NDBT_OK; return NDBT_OK;
} }
int runDropTheTable(NDBT_Context* ctx, NDBT_Step* step){
Ndb* pNdb = GETNDB(step);
const NdbDictionary::Table* pTab = ctx->getTab();
// Try to create table in db
pNdb->getDictionary()->dropTable(pTab->getName());
return NDBT_OK;
}
int runCreateTableWhenDbIsFull(NDBT_Context* ctx, NDBT_Step* step){ int runCreateTableWhenDbIsFull(NDBT_Context* ctx, NDBT_Step* step){
Ndb* pNdb = GETNDB(step); Ndb* pNdb = GETNDB(step);
int result = NDBT_OK; int result = NDBT_OK;
...@@ -1584,7 +1594,7 @@ TESTCASE("CreateTableWhenDbIsFull", ...@@ -1584,7 +1594,7 @@ TESTCASE("CreateTableWhenDbIsFull",
INITIALIZER(runFillTable); INITIALIZER(runFillTable);
INITIALIZER(runCreateTableWhenDbIsFull); INITIALIZER(runCreateTableWhenDbIsFull);
INITIALIZER(runDropTableWhenDbIsFull); INITIALIZER(runDropTableWhenDbIsFull);
FINALIZER(runClearTable); FINALIZER(runDropTheTable);
} }
TESTCASE("FragmentTypeSingle", TESTCASE("FragmentTypeSingle",
"Create the table with fragment type Single\n"){ "Create the table with fragment type Single\n"){
......
...@@ -1277,7 +1277,7 @@ TESTCASE("CreateLoadDrop_O", ...@@ -1277,7 +1277,7 @@ TESTCASE("CreateLoadDrop_O",
TESTCASE("NFNR1", TESTCASE("NFNR1",
"Test that indexes are correctly maintained during node fail and node restart"){ "Test that indexes are correctly maintained during node fail and node restart"){
TC_PROPERTY("LoggedIndexes", (unsigned)0); TC_PROPERTY("LoggedIndexes", (unsigned)0);
//TC_PROPERTY("Threads", 2); TC_PROPERTY("PauseThreads", 2);
INITIALIZER(runClearTable); INITIALIZER(runClearTable);
INITIALIZER(createRandomIndex); INITIALIZER(createRandomIndex);
INITIALIZER(runLoadTable); INITIALIZER(runLoadTable);
...@@ -1292,6 +1292,7 @@ TESTCASE("NFNR1_O", ...@@ -1292,6 +1292,7 @@ TESTCASE("NFNR1_O",
"Test that indexes are correctly maintained during node fail and node restart"){ "Test that indexes are correctly maintained during node fail and node restart"){
TC_PROPERTY("OrderedIndex", 1); TC_PROPERTY("OrderedIndex", 1);
TC_PROPERTY("LoggedIndexes", (unsigned)0); TC_PROPERTY("LoggedIndexes", (unsigned)0);
TC_PROPERTY("PauseThreads", 2);
INITIALIZER(runClearTable); INITIALIZER(runClearTable);
INITIALIZER(createRandomIndex); INITIALIZER(createRandomIndex);
INITIALIZER(runLoadTable); INITIALIZER(runLoadTable);
...@@ -1305,6 +1306,7 @@ TESTCASE("NFNR1_O", ...@@ -1305,6 +1306,7 @@ TESTCASE("NFNR1_O",
TESTCASE("NFNR2", TESTCASE("NFNR2",
"Test that indexes are correctly maintained during node fail and node restart"){ "Test that indexes are correctly maintained during node fail and node restart"){
TC_PROPERTY("LoggedIndexes", (unsigned)0); TC_PROPERTY("LoggedIndexes", (unsigned)0);
TC_PROPERTY("PauseThreads", 2);
INITIALIZER(runClearTable); INITIALIZER(runClearTable);
INITIALIZER(createRandomIndex); INITIALIZER(createRandomIndex);
INITIALIZER(createPkIndex); INITIALIZER(createPkIndex);
...@@ -1321,6 +1323,7 @@ TESTCASE("NFNR2_O", ...@@ -1321,6 +1323,7 @@ TESTCASE("NFNR2_O",
"Test that indexes are correctly maintained during node fail and node restart"){ "Test that indexes are correctly maintained during node fail and node restart"){
TC_PROPERTY("OrderedIndex", 1); TC_PROPERTY("OrderedIndex", 1);
TC_PROPERTY("LoggedIndexes", (unsigned)0); TC_PROPERTY("LoggedIndexes", (unsigned)0);
TC_PROPERTY("PauseThreads", 2);
INITIALIZER(runClearTable); INITIALIZER(runClearTable);
INITIALIZER(createRandomIndex); INITIALIZER(createRandomIndex);
INITIALIZER(createPkIndex); INITIALIZER(createPkIndex);
...@@ -1336,6 +1339,7 @@ TESTCASE("NFNR2_O", ...@@ -1336,6 +1339,7 @@ TESTCASE("NFNR2_O",
TESTCASE("NFNR3", TESTCASE("NFNR3",
"Test that indexes are correctly maintained during node fail and node restart"){ "Test that indexes are correctly maintained during node fail and node restart"){
TC_PROPERTY("LoggedIndexes", (unsigned)0); TC_PROPERTY("LoggedIndexes", (unsigned)0);
TC_PROPERTY("PauseThreads", 2);
INITIALIZER(runClearTable); INITIALIZER(runClearTable);
INITIALIZER(createRandomIndex); INITIALIZER(createRandomIndex);
INITIALIZER(createPkIndex); INITIALIZER(createPkIndex);
...@@ -1351,6 +1355,7 @@ TESTCASE("NFNR3_O", ...@@ -1351,6 +1355,7 @@ TESTCASE("NFNR3_O",
"Test that indexes are correctly maintained during node fail and node restart"){ "Test that indexes are correctly maintained during node fail and node restart"){
TC_PROPERTY("OrderedIndex", 1); TC_PROPERTY("OrderedIndex", 1);
TC_PROPERTY("LoggedIndexes", (unsigned)0); TC_PROPERTY("LoggedIndexes", (unsigned)0);
TC_PROPERTY("PauseThreads", 2);
INITIALIZER(runClearTable); INITIALIZER(runClearTable);
INITIALIZER(createRandomIndex); INITIALIZER(createRandomIndex);
INITIALIZER(createPkIndex); INITIALIZER(createPkIndex);
...@@ -1365,6 +1370,7 @@ TESTCASE("NFNR3_O", ...@@ -1365,6 +1370,7 @@ TESTCASE("NFNR3_O",
TESTCASE("NFNR4", TESTCASE("NFNR4",
"Test that indexes are correctly maintained during node fail and node restart"){ "Test that indexes are correctly maintained during node fail and node restart"){
TC_PROPERTY("LoggedIndexes", (unsigned)0); TC_PROPERTY("LoggedIndexes", (unsigned)0);
TC_PROPERTY("PauseThreads", 4);
INITIALIZER(runClearTable); INITIALIZER(runClearTable);
INITIALIZER(createRandomIndex); INITIALIZER(createRandomIndex);
INITIALIZER(createPkIndex); INITIALIZER(createPkIndex);
...@@ -1383,6 +1389,7 @@ TESTCASE("NFNR4_O", ...@@ -1383,6 +1389,7 @@ TESTCASE("NFNR4_O",
"Test that indexes are correctly maintained during node fail and node restart"){ "Test that indexes are correctly maintained during node fail and node restart"){
TC_PROPERTY("OrderedIndex", 1); TC_PROPERTY("OrderedIndex", 1);
TC_PROPERTY("LoggedIndexes", (unsigned)0); TC_PROPERTY("LoggedIndexes", (unsigned)0);
TC_PROPERTY("PauseThreads", 4);
INITIALIZER(runClearTable); INITIALIZER(runClearTable);
INITIALIZER(createRandomIndex); INITIALIZER(createRandomIndex);
INITIALIZER(createPkIndex); INITIALIZER(createPkIndex);
......
...@@ -799,13 +799,13 @@ int runUpdateWithoutValues(NDBT_Context* ctx, NDBT_Step* step){ ...@@ -799,13 +799,13 @@ int runUpdateWithoutValues(NDBT_Context* ctx, NDBT_Step* step){
// Dont' call any setValues // Dont' call any setValues
// Execute should not work // Execute should work
int check = pCon->execute(Commit); int check = pCon->execute(Commit);
if (check == 0){ if (check == 0){
ndbout << "execute worked" << endl; ndbout << "execute worked" << endl;
result = NDBT_FAILED;
} else { } else {
ERR(pCon->getNdbError()); ERR(pCon->getNdbError());
result = NDBT_FAILED;
} }
pNdb->closeTransaction(pCon); pNdb->closeTransaction(pCon);
......
...@@ -465,9 +465,73 @@ max-time: 150000 ...@@ -465,9 +465,73 @@ max-time: 150000
cmd: testOperations cmd: testOperations
args: args:
max-time: 150000 max-time: 1500
cmd: testTransactions cmd: testTransactions
args: args: T1
max-time: 1500
cmd: testTransactions
args: T2
max-time: 1500
cmd: testTransactions
args: T3
max-time: 1500
cmd: testTransactions
args: T4
max-time: 1500
cmd: testTransactions
args: T5
max-time: 1500
cmd: testTransactions
args: T6
max-time: 1500
cmd: testTransactions
args: T7
max-time: 1500
cmd: testTransactions
args: T8
max-time: 1500
cmd: testTransactions
args: T9
max-time: 1500
cmd: testTransactions
args: T10
max-time: 1500
cmd: testTransactions
args: T11
max-time: 1500
cmd: testTransactions
args: T12
max-time: 1500
cmd: testTransactions
args: T13
max-time: 1500
cmd: testTransactions
args: T14
max-time: 1500
cmd: testTransactions
args: I1
max-time: 1500
cmd: testTransactions
args: I2
max-time: 1500
cmd: testTransactions
args: I3
max-time: 1500 max-time: 1500
cmd: testRestartGci cmd: testRestartGci
...@@ -477,7 +541,7 @@ max-time: 600 ...@@ -477,7 +541,7 @@ max-time: 600
cmd: testBlobs cmd: testBlobs
args: args:
max-time: 2500 max-time: 5000
cmd: testOIBasic cmd: testOIBasic
args: args:
......
...@@ -1097,7 +1097,7 @@ HugoTransactions::pkReadRecords(Ndb* pNdb, ...@@ -1097,7 +1097,7 @@ HugoTransactions::pkReadRecords(Ndb* pNdb,
pNdb->closeTransaction(pTrans); pNdb->closeTransaction(pTrans);
return NDBT_FAILED; return NDBT_FAILED;
} }
} else{ } else {
if(pIndexScanOp) if(pIndexScanOp)
{ {
int rows_found = 0; int rows_found = 0;
...@@ -1759,7 +1759,7 @@ HugoTransactions::indexReadRecords(Ndb* pNdb, ...@@ -1759,7 +1759,7 @@ HugoTransactions::indexReadRecords(Ndb* pNdb,
pNdb->closeTransaction(pTrans); pNdb->closeTransaction(pTrans);
return NDBT_FAILED; return NDBT_FAILED;
} }
check = 0; check = sOp->readTuples();
} }
if( check == -1 ) { if( check == -1 ) {
...@@ -1948,7 +1948,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb, ...@@ -1948,7 +1948,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb,
} }
if(ordered && check != 0){ if(ordered && check != 0){
g_err << "Row: " << r << " not found!!" << endl; g_err << check << " - Row: " << r << " not found!!" << endl;
pNdb->closeTransaction(pTrans); pNdb->closeTransaction(pTrans);
return NDBT_FAILED; return NDBT_FAILED;
} }
......
...@@ -629,7 +629,7 @@ UtilTransactions::scanAndCompareUniqueIndex(Ndb* pNdb, ...@@ -629,7 +629,7 @@ UtilTransactions::scanAndCompareUniqueIndex(Ndb* pNdb,
parallelism = 1; parallelism = 1;
while (true){ while (true){
restart:
if (retryAttempt >= retryMax){ if (retryAttempt >= retryMax){
g_info << "ERROR: has retried this operation " << retryAttempt g_info << "ERROR: has retried this operation " << retryAttempt
<< " times, failing!" << endl; << " times, failing!" << endl;
...@@ -719,11 +719,26 @@ UtilTransactions::scanAndCompareUniqueIndex(Ndb* pNdb, ...@@ -719,11 +719,26 @@ UtilTransactions::scanAndCompareUniqueIndex(Ndb* pNdb,
// ndbout << row.c_str().c_str() << endl; // ndbout << row.c_str().c_str() << endl;
if (readRowFromTableAndIndex(pNdb, if (readRowFromTableAndIndex(pNdb,
pTrans, pTrans,
pIndex, pIndex,
row) != NDBT_OK){ row) != NDBT_OK){
while((eof= pOp->nextResult(false)) == 0);
if(eof == 2)
eof = pOp->nextResult(true); // this should give -1
if(eof == -1)
{
const NdbError err = pTrans->getNdbError();
if (err.status == NdbError::TemporaryError){
ERR(err);
pNdb->closeTransaction(pTrans);
NdbSleep_MilliSleep(50);
retryAttempt++;
goto restart;
}
}
pNdb->closeTransaction(pTrans); pNdb->closeTransaction(pTrans);
return NDBT_FAILED; return NDBT_FAILED;
} }
...@@ -736,7 +751,6 @@ UtilTransactions::scanAndCompareUniqueIndex(Ndb* pNdb, ...@@ -736,7 +751,6 @@ UtilTransactions::scanAndCompareUniqueIndex(Ndb* pNdb,
pNdb->closeTransaction(pTrans); pNdb->closeTransaction(pTrans);
NdbSleep_MilliSleep(50); NdbSleep_MilliSleep(50);
retryAttempt++; retryAttempt++;
rows--;
continue; continue;
} }
ERR(err); ERR(err);
...@@ -811,7 +825,6 @@ UtilTransactions::readRowFromTableAndIndex(Ndb* pNdb, ...@@ -811,7 +825,6 @@ UtilTransactions::readRowFromTableAndIndex(Ndb* pNdb,
check = pOp->readTuple(); check = pOp->readTuple();
if( check == -1 ) { if( check == -1 ) {
ERR(pTrans1->getNdbError()); ERR(pTrans1->getNdbError());
pNdb->closeTransaction(pTrans1);
goto close_all; goto close_all;
} }
...@@ -943,7 +956,7 @@ UtilTransactions::readRowFromTableAndIndex(Ndb* pNdb, ...@@ -943,7 +956,7 @@ UtilTransactions::readRowFromTableAndIndex(Ndb* pNdb,
#if VERBOSE #if VERBOSE
printf("\n"); printf("\n");
#endif #endif
scanTrans->refresh();
check = pTrans1->execute(Commit); check = pTrans1->execute(Commit);
if( check == -1 ) { if( check == -1 ) {
const NdbError err = pTrans1->getNdbError(); const NdbError err = pTrans1->getNdbError();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment