Commit 99f07f83 authored by unknown's avatar unknown

Merge tulin@bk-internal.mysql.com:/home/bk/mysql-5.0

into poseidon.ndb.mysql.com:/home/tomas/mysql-5.0-ndb

parents cfe5650c 33465a14
......@@ -26,17 +26,17 @@ CONNECT [<connectstring>] Connect to management server (reconnect i
QUIT Quit management client
<severity> = ALERT | CRITICAL | ERROR | WARNING | INFO | DEBUG
<category> = STARTUP | SHUTDOWN | STATISTICS | CHECKPOINT | NODERESTART | CONNECTION | INFO | ERROR | GREP | DEBUG | BACKUP
<category> = STARTUP | SHUTDOWN | STATISTICS | CHECKPOINT | NODERESTART | CONNECTION | INFO | ERROR | CONGESTION | DEBUG | BACKUP
<level> = 0 - 15
<id> = ALL | Any database node id
Connected to Management Server at: localhost:1186
Node 1: started (Version 4.1.9)
Node 2: started (Version 4.1.9)
Node 1: started (Version 5.0.3)
Node 2: started (Version 5.0.3)
Node 1: started (Version 4.1.9)
Node 1: started (Version 5.0.3)
Node 2: started (Version 4.1.9)
Node 2: started (Version 5.0.3)
Executing CLUSTERLOG on node 1 OK!
Executing CLUSTERLOG on node 2 OK!
......
......@@ -8,5 +8,5 @@ sleep 5
all clusterlog connection=8
sleep 1
1 restart
sleep 5
sleep 10
clusterlog on all
......@@ -14,6 +14,8 @@
# General configuration options
#---------------------------------------------------------------------------
DETAILS_AT_TOP = YES
# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
# by quotes) that should identify the project.
......
......@@ -13,8 +13,8 @@
#---------------------------------------------------------------------------
# General configuration options
#---------------------------------------------------------------------------
DETAILS_AT_TOP = yes
HIDE_FRIEND_COMPOUNDS = yes
DETAILS_AT_TOP = YES
HIDE_FRIEND_COMPOUNDS = YES
# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
# by quotes) that should identify the project.
......
......@@ -14,6 +14,8 @@
# General configuration options
#---------------------------------------------------------------------------
DETAILS_AT_TOP = YES
# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
# by quotes) that should identify the project.
......
......@@ -14,6 +14,8 @@
# General configuration options
#---------------------------------------------------------------------------
DETAILS_AT_TOP = YES
# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
# by quotes) that should identify the project.
......
......@@ -145,9 +145,6 @@ int create_table(Ndb * myNdb);
int tempErrors = 0;
int permErrors = 0;
/**
* Helper function for callback(...)
*/
void
closeTransaction(Ndb * ndb , async_callback_t * cb)
{
......
......@@ -57,7 +57,7 @@ public:
llInfo = CFG_LOGLEVEL_INFO - CFG_MIN_LOGLEVEL,
llWarning = CFG_LOGLEVEL_WARNING - CFG_MIN_LOGLEVEL,
llError = CFG_LOGLEVEL_ERROR - CFG_MIN_LOGLEVEL,
llGrep = CFG_LOGLEVEL_GREP - CFG_MIN_LOGLEVEL,
llCongestion = CFG_LOGLEVEL_CONGESTION - CFG_MIN_LOGLEVEL,
llDebug = CFG_LOGLEVEL_DEBUG - CFG_MIN_LOGLEVEL
,llBackup = CFG_LOGLEVEL_BACKUP - CFG_MIN_LOGLEVEL
};
......
......@@ -18,35 +18,74 @@
#define MGMAPI_H
/**
* @mainpage NDB Cluster Management API
* @mainpage MySQL Cluster Management API
*
* The NDB Cluster Management API (MGM API) is a C API
* that is used to:
* - Start and stop database nodes (ndbd processes)
* - Start and stop NDB Cluster backups
* - Control the NDB Cluster log
* - Perform other administrative tasks
* The MySQL Cluster Management API (MGM API) is a C language API
* that is used for:
* - Starting and stopping database nodes (ndbd processes)
* - Starting and stopping Cluster backups
* - Controlling the NDB Cluster log
* - Performing other administrative tasks
*
* @section General Concepts
* @section secMgmApiGeneral General Concepts
*
* Each MGM API function needs a management server handle
* of type @ref NdbMgmHandle.
* This handle is initally created by calling the
* This handle is created by calling the function
* function ndb_mgm_create_handle() and freed by calling
* ndb_mgm_destroy_handle().
*
* A function can return:
* -# An integer value.
* A value of <b>-1</b> indicates an error.
* -# A non-const pointer value. A <var>NULL</var> value indicates an error;
* A function can return any of the following:
* -# An integer value, with
* a value of <b>-1</b> indicating an error.
* -# A non-constant pointer value. A <var>NULL</var> value indicates an error;
* otherwise, the return value must be freed
* by the user of the MGM API
* -# A const pointer value. A <var>NULL</var> value indicates an error.
* Returned value should not be freed.
* by the programmer
* -# A constant pointer value, with a <var>NULL</var> value indicating an error.
* The returned value should <em>not</em> be freed.
*
* Error conditions can be identified by using the appropriate
* error-reporting functions ndb_mgm_get_latest_error() and
* @ref ndb_mgm_error.
*
* Here is an example using the MGM API (without error handling for brevity's sake).
* @code
* NdbMgmHandle handle= ndb_mgm_create_handle();
* ndb_mgm_connect(handle,0,0,0);
* struct ndb_mgm_cluster_state *state= ndb_mgm_get_status(handle);
* for(int i=0; i < state->no_of_nodes; i++)
* {
* struct ndb_mgm_node_state *node_state= &state->node_states[i];
* printf("node with ID=%d ", node_state->node_id);
* if(node_state->version != 0)
* printf("connected\n");
* else
* printf("not connected\n");
* }
* free((void*)state);
* ndb_mgm_destroy_handle(&handle);
* @endcode
*
* @section secLogEvents Log Events
*
* The database nodes and management server(s) regularly and on specific
* occations report on various log events that occurs in the cluster. These
* log events are written to the cluster log. Optionally a mgmapi client
* may listen to these events by using the method ndb_mgm_listen_event().
* Each log event belongs to a category, @ref ndb_mgm_event_category, and
* has a severity, @ref ndb_mgm_event_severity, associated with it. Each
* log event also has a level (0-15) associated with it.
*
* Which log events that come out is controlled with ndb_mgm_listen_event(),
* ndb_mgm_set_clusterlog_loglevel(), and
* ndb_mgm_set_clusterlog_severity_filter().
*
* Below is an example of how to listen to events related to backup.
*
* @code
* int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_BACKUP, 0 };
* int fd = ndb_mgm_listen_event(handle, filter);
* @endcode
*/
/** @addtogroup MGM_C_API
......@@ -70,15 +109,15 @@ extern "C" {
*/
enum ndb_mgm_node_type {
NDB_MGM_NODE_TYPE_UNKNOWN = -1 /** Node type not known*/
,NDB_MGM_NODE_TYPE_API /** An application node (API) */
,NDB_MGM_NODE_TYPE_API /** An application (NdbApi) node */
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
= NODE_TYPE_API
#endif
,NDB_MGM_NODE_TYPE_NDB /** A database node (DB) */
,NDB_MGM_NODE_TYPE_NDB /** A database node */
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
= NODE_TYPE_DB
#endif
,NDB_MGM_NODE_TYPE_MGM /** A mgmt server node (MGM)*/
,NDB_MGM_NODE_TYPE_MGM /** A management server node */
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
= NODE_TYPE_MGM
#endif
......@@ -123,35 +162,49 @@ extern "C" {
* Error codes
*/
enum ndb_mgm_error {
/** Not an error */
NDB_MGM_NO_ERROR = 0,
/* Request for service errors */
/** Supplied connectstring is illegal */
NDB_MGM_ILLEGAL_CONNECT_STRING = 1001,
NDB_MGM_ILLEGAL_PORT_NUMBER = 1002,
NDB_MGM_ILLEGAL_SOCKET = 1003,
NDB_MGM_ILLEGAL_IP_ADDRESS = 1004,
/** Supplied NdbMgmHandle is illegal */
NDB_MGM_ILLEGAL_SERVER_HANDLE = 1005,
/** Illegal reply from server */
NDB_MGM_ILLEGAL_SERVER_REPLY = 1006,
/** Illegal number of nodes */
NDB_MGM_ILLEGAL_NUMBER_OF_NODES = 1007,
/** Illegal node status */
NDB_MGM_ILLEGAL_NODE_STATUS = 1008,
/** Memory allocation error */
NDB_MGM_OUT_OF_MEMORY = 1009,
/** Management server not connected */
NDB_MGM_SERVER_NOT_CONNECTED = 1010,
/** Could not connect to socker */
NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET = 1011,
/* Service errors - Start/Stop Node or System */
/** Start failed */
NDB_MGM_START_FAILED = 2001,
/** Stop failed */
NDB_MGM_STOP_FAILED = 2002,
/** Restart failed */
NDB_MGM_RESTART_FAILED = 2003,
/* Service errors - Backup */
/** Unable to start backup */
NDB_MGM_COULD_NOT_START_BACKUP = 3001,
/** Unable to abort backup */
NDB_MGM_COULD_NOT_ABORT_BACKUP = 3002,
/* Service errors - Single User Mode */
/** Unable to enter single user mode */
NDB_MGM_COULD_NOT_ENTER_SINGLE_USER_MODE = 4001,
/** Unable to exit single user mode */
NDB_MGM_COULD_NOT_EXIT_SINGLE_USER_MODE = 4002,
/* Usage errors */
/** Usage error */
NDB_MGM_USAGE_ERROR = 5001
};
......@@ -165,9 +218,6 @@ extern "C" {
/* Request for service errors */
{ NDB_MGM_ILLEGAL_CONNECT_STRING, "Illegal connect string" },
{ NDB_MGM_ILLEGAL_PORT_NUMBER, "Illegal port number" },
{ NDB_MGM_ILLEGAL_SOCKET, "Illegal socket" },
{ NDB_MGM_ILLEGAL_IP_ADDRESS, "Illegal IP address" },
{ NDB_MGM_ILLEGAL_SERVER_HANDLE, "Illegal server handle" },
{ NDB_MGM_ILLEGAL_SERVER_REPLY, "Illegal reply from server" },
{ NDB_MGM_ILLEGAL_NUMBER_OF_NODES, "Illegal number of nodes" },
......@@ -200,13 +250,18 @@ extern "C" {
#endif
/**
* Status of a node in the cluster
* Status of a node in the cluster.
*
* Sub-structure in enum ndb_mgm_cluster_state
* returned by ndb_mgm_get_status()
* returned by ndb_mgm_get_status().
*
* @note <var>node_status</var>, <var>start_phase</var>,
* <var>dynamic_id</var>
* and <var>node_group</var> are relevant only for database nodes,
* i.e. <var>node_type</var> == @ref NDB_MGM_NODE_TYPE_NDB.
*/
struct ndb_mgm_node_state {
/** NDB Cluster node id*/
/** NDB Cluster node ID*/
int node_id;
/** Type of NDB Cluster node*/
enum ndb_mgm_node_type node_type;
......@@ -214,12 +269,12 @@ extern "C" {
enum ndb_mgm_node_status node_status;
/** Start phase.
*
* @note Start phase is only valid if node_type is
* NDB_MGM_NODE_TYPE_NDB and node_status is
* @note Start phase is only valid if the <var>node_type</var> is
* NDB_MGM_NODE_TYPE_NDB and the <var>node_status</var> is
* NDB_MGM_NODE_STATUS_STARTING
*/
int start_phase;
/** Id for heartbeats and master take-over (only valid for DB nodes)
/** ID for heartbeats and master take-over (only valid for DB nodes)
*/
int dynamic_id;
/** Node group of node (only valid for DB nodes)*/
......@@ -230,9 +285,9 @@ extern "C" {
* management server
*/
int connect_count;
/** Ip adress of node when it connected to the management server.
* @note it will be empty if the management server has restarted
* after the node connected.
/** IP address of node when it connected to the management server.
* @note This value will be empty if the management server has restarted
* since the node last connected.
*/
char connect_address[
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
......@@ -242,11 +297,11 @@ extern "C" {
};
/**
* State of all nodes in the cluster returned from
* State of all nodes in the cluster; returned from
* ndb_mgm_get_status()
*/
struct ndb_mgm_cluster_state {
/** No of entries in the node_states array */
/** Number of entries in the node_states array */
int no_of_nodes;
/** An array with node_states*/
struct ndb_mgm_node_state node_states[
......@@ -257,7 +312,7 @@ extern "C" {
};
/**
* Default reply from the server (for future use, not used today)
* Default reply from the server (reserved for future use)
*/
struct ndb_mgm_reply {
/** 0 if successful, otherwise error code. */
......@@ -294,92 +349,90 @@ extern "C" {
#endif
/**
* Log severities (used to filter the cluster log)
* Log event severities (used to filter the cluster log,
* ndb_mgm_set_clusterlog_severity_filter(), and filter listening to events
* ndb_mgm_listen_event())
*/
enum ndb_mgm_clusterlog_level {
NDB_MGM_ILLEGAL_CLUSTERLOG_LEVEL = -1,
/* must range from 0 and up, indexes into an array */
/** Cluster log on*/
NDB_MGM_CLUSTERLOG_ON = 0,
enum ndb_mgm_event_severity {
NDB_MGM_ILLEGAL_EVENT_SEVERITY = -1,
/* Must be a nonnegative integer (used for array indexing) */
/** Cluster log on */
NDB_MGM_EVENT_SEVERITY_ON = 0,
/** Used in NDB Cluster developement */
NDB_MGM_CLUSTERLOG_DEBUG = 1,
NDB_MGM_EVENT_SEVERITY_DEBUG = 1,
/** Informational messages*/
NDB_MGM_CLUSTERLOG_INFO = 2,
/** Conditions that are not error condition, but might require handling
NDB_MGM_EVENT_SEVERITY_INFO = 2,
/** Conditions that are not error condition, but might require handling.
*/
NDB_MGM_CLUSTERLOG_WARNING = 3,
/** Conditions that should be corrected */
NDB_MGM_CLUSTERLOG_ERROR = 4,
NDB_MGM_EVENT_SEVERITY_WARNING = 3,
/** Conditions that, while not fatal, should be corrected. */
NDB_MGM_EVENT_SEVERITY_ERROR = 4,
/** Critical conditions, like device errors or out of resources */
NDB_MGM_CLUSTERLOG_CRITICAL = 5,
NDB_MGM_EVENT_SEVERITY_CRITICAL = 5,
/** A condition that should be corrected immediately,
* such as a corrupted system
*/
NDB_MGM_CLUSTERLOG_ALERT = 6,
NDB_MGM_EVENT_SEVERITY_ALERT = 6,
/* must be next number, works as bound in loop */
/** All severities */
NDB_MGM_CLUSTERLOG_ALL = 7
NDB_MGM_EVENT_SEVERITY_ALL = 7
};
/**
* Log categories, used to set filter on the clusterlog using
* ndb_mgm_set_loglevel_clusterlog()
* Log event categories, used to set filter level on the log events using
* ndb_mgm_set_clusterlog_loglevel() and ndb_mgm_listen_event()
*/
enum ndb_mgm_event_category {
/**
* Invalid
* Invalid log event category
*/
NDB_MGM_ILLEGAL_EVENT_CATEGORY = -1,
/**
* Events during all kinds of startups
* Log events during all kinds of startups
*/
NDB_MGM_EVENT_CATEGORY_STARTUP = CFG_LOGLEVEL_STARTUP,
/**
* Events during shutdown
* Log events during shutdown
*/
NDB_MGM_EVENT_CATEGORY_SHUTDOWN = CFG_LOGLEVEL_SHUTDOWN,
/**
* Transaction statistics (Job level, TCP/IP speed)
* Statistics log events
*/
NDB_MGM_EVENT_CATEGORY_STATISTIC = CFG_LOGLEVEL_STATISTICS,
/**
* Events regarding checkpoints
* Log events related to checkpoints
*/
NDB_MGM_EVENT_CATEGORY_CHECKPOINT = CFG_LOGLEVEL_CHECKPOINT,
/**
* Events during node restart
* Log events during node restart
*/
NDB_MGM_EVENT_CATEGORY_NODE_RESTART = CFG_LOGLEVEL_NODERESTART,
/**
* Events on connection between cluster nodes
* Log events related to connections between cluster nodes
*/
NDB_MGM_EVENT_CATEGORY_CONNECTION = CFG_LOGLEVEL_CONNECTION,
/**
* Backup events
* Backup related log events
*/
NDB_MGM_EVENT_CATEGORY_BACKUP = CFG_LOGLEVEL_BACKUP,
/**
* Congestion related log events
*/
NDB_MGM_EVENT_CATEGORY_CONGESTION = CFG_LOGLEVEL_CONGESTION,
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
/**
* Loglevel debug
*/
NDB_MGM_EVENT_CATEGORY_DEBUG = CFG_LOGLEVEL_DEBUG,
#endif
/**
* Loglevel info
* Uncategorized log events (severity info)
*/
NDB_MGM_EVENT_CATEGORY_INFO = CFG_LOGLEVEL_INFO,
/**
* Loglevel warning
*/
NDB_MGM_EVENT_CATEGORY_WARNING = CFG_LOGLEVEL_WARNING,
/**
* Loglevel error
* Uncategorized log events (severity warning or higher)
*/
NDB_MGM_EVENT_CATEGORY_ERROR = CFG_LOGLEVEL_ERROR,
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
/**
*
*/
NDB_MGM_EVENT_CATEGORY_GREP = CFG_LOGLEVEL_GREP,
#endif
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
NDB_MGM_MIN_EVENT_CATEGORY = CFG_MIN_LOGLEVEL,
NDB_MGM_MAX_EVENT_CATEGORY = CFG_MAX_LOGLEVEL
......@@ -393,7 +446,8 @@ extern "C" {
*/
/**
* Get latest error associated with a management server handle
* Get the most recent error associated with the management server whose handle
* is used as the value of <var>handle</var>.
*
* @param handle Management handle
* @return Latest error code
......@@ -401,7 +455,7 @@ extern "C" {
int ndb_mgm_get_latest_error(const NdbMgmHandle handle);
/**
* Get latest main error message associated with a handle
* Get the most recent general error message associated with a handle
*
* @param handle Management handle.
* @return Latest error message
......@@ -409,9 +463,9 @@ extern "C" {
const char * ndb_mgm_get_latest_error_msg(const NdbMgmHandle handle);
/**
* Get latest error description associated with a handle
* Get the most recent error description associated with a handle
*
* The error description gives some additional information to
* The error description gives some additional information regarding
* the error message.
*
* @param handle Management handle.
......@@ -421,7 +475,7 @@ extern "C" {
#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
/**
* Get latest internal source code error line associated with a handle
* Get the most recent internal source code error line associated with a handle
*
* @param handle Management handle.
* @return Latest internal source code line of latest error
......@@ -437,15 +491,15 @@ extern "C" {
*/
/**
* Create a handle to a management server
* Create a handle to a management server.
*
* @return A management handle<br>
* or NULL if no management handle could be created.
* or <var>NULL</var> if no management handle could be created.
*/
NdbMgmHandle ndb_mgm_create_handle();
/**
* Destroy a management server handle
* Destroy a management server handle.
*
* @param handle Management handle
*/
......@@ -458,7 +512,7 @@ extern "C" {
*/
/**
* Set connect string to management server
* Sets the connectstring for a management server
*
* @param handle Management handle
* @param connect_string Connect string to the management server,
......@@ -469,38 +523,47 @@ extern "C" {
* <connectstring> := [<nodeid-specification>,]<host-specification>[,<host-specification>]
* <nodeid-specification> := nodeid=<id>
* <host-specification> := <host>[:<port>]
* <id> is an integer larger than 1 identifying a node in config.ini
* <id> is an integer greater than 0 identifying a node in config.ini
* <port> is an integer referring to a regular unix port
* <host> is a string which is a valid Internet host address
* <host> is a string containing a valid network host address
* @endcode
*/
int ndb_mgm_set_connectstring(NdbMgmHandle handle,
const char *connect_string);
/**
* Get connectstring used for connection
* Gets the connectstring used for a connection
*
* @note returns what the connectstring defaults to if the
* ndb_mgm_set_connectstring() call has not been performed
* @note This function returns the default connectstring if no call to
* ndb_mgm_set_connectstring() has been performed. Also, the
* returned connectstring may be formatted differently.
*
* @param handle Management handle
* @param buf Buffer to hold result
* @param buf_sz Size of buffer.
*
* @return connectstring
* @return connectstring (same as <var>buf</var>)
*/
const char *ndb_mgm_get_connectstring(NdbMgmHandle handle, char *buf, int buf_sz);
/**
* Connect to a management server. Coonect string is set by
* Connects to a management server. Connectstring is set by
* ndb_mgm_set_connectstring().
*
* @param handle Management handle.
* @param no_retries Number of retries to connect
* (0 means connect once).
* @param retry_delay_in_seconds
* How long to wait until retry is performed.
* @param verbose Make printout regarding connect retries.
*
* @return -1 on error.
*/
int ndb_mgm_connect(NdbMgmHandle handle, int no_retries,
int retry_delay_in_seconds, int verbose);
/**
* Disconnect from a management server
* Disconnects from a management server
*
* @param handle Management handle.
* @return -1 on error.
......@@ -508,17 +571,17 @@ extern "C" {
int ndb_mgm_disconnect(NdbMgmHandle handle);
/**
* Get nodeid used in the connection
* Gets connection node ID
*
* @param handle Management handle
*
* @return node id, 0 indicated that no nodeid has been
* @return Node ID; 0 indicates that no node ID has been
* specified
*/
int ndb_mgm_get_configuration_nodeid(NdbMgmHandle handle);
/**
* Get port used in the connection
* Gets connection port
*
* @param handle Management handle
*
......@@ -527,7 +590,7 @@ extern "C" {
int ndb_mgm_get_connected_port(NdbMgmHandle handle);
/**
* Get host used in the connection
* Gets connection host
*
* @param handle Management handle
*
......@@ -538,12 +601,12 @@ extern "C" {
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
/** @} *********************************************************************/
/**
* @name Functions: Convert between different data formats
* @name Functions: Used to convert between different data formats
* @{
*/
/**
* Convert a string to a ndb_mgm_node_type
* Converts a string to an <var>ndb_mgm_node_type</var> value
*
* @param type Node type as string.
* @return NDB_MGM_NODE_TYPE_UNKNOWN if invalid string.
......@@ -551,24 +614,24 @@ extern "C" {
enum ndb_mgm_node_type ndb_mgm_match_node_type(const char * type);
/**
* Convert an ndb_mgm_node_type to a string
* Converts an ndb_mgm_node_type to a string
*
* @param type Node type.
* @return NULL if invalid id.
* @return <var>NULL</var> if invalid ID.
*/
const char * ndb_mgm_get_node_type_string(enum ndb_mgm_node_type type);
/**
* Convert an ndb_mgm_node_type to a alias string
* Converts an ndb_mgm_node_type to a alias string
*
* @param type Node type.
* @return NULL if invalid id.
* @return <var>NULL</var> if the ID is invalid.
*/
const char * ndb_mgm_get_node_type_alias_string(enum ndb_mgm_node_type type,
const char **str);
/**
* Convert a string to a ndb_mgm_node_status
* Converts a string to a <var>ndb_mgm_node_status</var> value
*
* @param status NDB node status string.
* @return NDB_MGM_NODE_STATUS_UNKNOWN if invalid string.
......@@ -576,32 +639,32 @@ extern "C" {
enum ndb_mgm_node_status ndb_mgm_match_node_status(const char * status);
/**
* Convert an id to a string
* Converts an ID to a string
*
* @param status NDB node status.
* @return NULL if invalid id.
* @return <var>NULL</var> if invalid ID.
*/
const char * ndb_mgm_get_node_status_string(enum ndb_mgm_node_status status);
const char * ndb_mgm_get_clusterlog_level_string(enum ndb_mgm_clusterlog_level);
const char * ndb_mgm_get_event_severity_string(enum ndb_mgm_event_severity);
ndb_mgm_event_category ndb_mgm_match_event_category(const char *);
const char * ndb_mgm_get_event_category_string(enum ndb_mgm_event_category);
#endif
/** @} *********************************************************************/
/**
* @name Functions: State of cluster
* @name Functions: Cluster status
* @{
*/
/**
* Get status of the nodes in an NDB Cluster
* Gets status of the nodes in an NDB Cluster
*
* Note the caller must free the pointer returned.
* @note The caller must free the pointer returned by this function.
*
* @param handle Management handle.
*
* @return Cluster state (or NULL on error).
* @return Cluster state (or <var>NULL</var> on error).
*/
struct ndb_mgm_cluster_state * ndb_mgm_get_status(NdbMgmHandle handle);
......@@ -612,36 +675,36 @@ extern "C" {
*/
/**
* Stop database nodes
* Stops database nodes
*
* @param handle Management handle.
* @param no_of_nodes no of database nodes<br>
* 0 - means all database nodes in cluster<br>
* n - Means stop n node(s) specified in the
* @param no_of_nodes Number of database nodes to be stopped<br>
* 0: All database nodes in cluster<br>
* n: Stop the <var>n</var> node(s) specified in the
* array node_list
* @param node_list List of node ids of database nodes to be stopped
* @param node_list List of node IDs for database nodes to be stopped
*
* @return No of nodes stopped (or -1 on error)
* @return Number of nodes stopped (-1 on error)
*
* @note The function is equivalent
* to ndb_mgm_stop2(handle, no_of_nodes, node_list, 0)
* @note This function is equivalent
* to calling ndb_mgm_stop2(handle, no_of_nodes, node_list, 0)
*/
int ndb_mgm_stop(NdbMgmHandle handle, int no_of_nodes,
const int * node_list);
/**
* Stop database nodes
* Stops database nodes
*
* @param handle Management handle.
* @param no_of_nodes No of database nodes<br>
* 0 - means all database nodes in cluster<br>
* n - Means stop n node(s) specified in
* @param no_of_nodes Number of database nodes to stop<br>
* 0: All database nodes in cluster<br>
* n: Stop the <var>n</var> node(s) specified in
* the array node_list
* @param node_list List of node ids of database nodes to be stopped
* @param abort Don't perform gracefull stop,
* but rather stop immediatly
* @param node_list List of node IDs of database nodes to be stopped
* @param abort Don't perform graceful stop,
* but rather stop immediately
*
* @return No of nodes stopped (or -1 on error).
* @return Number of nodes stopped (-1 on error).
*/
int ndb_mgm_stop2(NdbMgmHandle handle, int no_of_nodes,
const int * node_list, int abort);
......@@ -650,15 +713,15 @@ extern "C" {
* Restart database nodes
*
* @param handle Management handle.
* @param no_of_nodes No of database nodes<br>
* 0 - means all database nodes in cluster<br>
* n - Means stop n node(s) specified in the
* @param no_of_nodes Number of database nodes to restart<br>
* 0: All database nodes in cluster<br>
* n: Restart the <var>n</var> node(s) specified in the
* array node_list
* @param node_list List of node ids of database nodes to be stopped
* @param node_list List of node IDs of database nodes to be restarted
*
* @return No of nodes stopped (or -1 on error).
* @return Number of nodes restarted (-1 on error).
*
* @note The function is equivalent to
* @note This function is equivalent to calling
* ndb_mgm_restart2(handle, no_of_nodes, node_list, 0, 0, 0);
*/
int ndb_mgm_restart(NdbMgmHandle handle, int no_of_nodes,
......@@ -668,18 +731,18 @@ extern "C" {
* Restart database nodes
*
* @param handle Management handle.
* @param no_of_nodes No of database nodes<br>
* 0 - means all database nodes in cluster<br>
* n - Means stop n node(s) specified in the
* @param no_of_nodes Number of database nodes to be restarted:<br>
* 0: Restart all database nodes in the cluster<br>
* n: Restart the <var>n</var> node(s) specified in the
* array node_list
* @param node_list List of node ids of database nodes to be stopped
* @param initial Remove filesystem from node(s) restarting
* @param node_list List of node IDs of database nodes to be restarted
* @param initial Remove filesystem from restarting node(s)
* @param nostart Don't actually start node(s) but leave them
* waiting for start command
* @param abort Don't perform gracefull restart,
* but rather restart immediatly
* @param abort Don't perform graceful restart,
* but rather restart immediately
*
* @return No of nodes stopped (or -1 on error).
* @return Number of nodes stopped (-1 on error).
*/
int ndb_mgm_restart2(NdbMgmHandle handle, int no_of_nodes,
const int * node_list, int initial,
......@@ -689,19 +752,19 @@ extern "C" {
* Start database nodes
*
* @param handle Management handle.
* @param no_of_nodes No of database nodes<br>
* 0 - means all database nodes in cluster<br>
* n - Means start n node(s) specified in
* @param no_of_nodes Number of database nodes to be started<br>
* 0: Start all database nodes in the cluster<br>
* n: Start the <var>n</var> node(s) specified in
* the array node_list
* @param node_list List of node ids of database nodes to be started
* @param node_list List of node IDs of database nodes to be started
*
* @return No of nodes started (or -1 on error).
* @return Number of nodes actually started (-1 on error).
*
* @note The nodes to start must have been started with nostart(-n)
* @note The nodes to be started must have been started with nostart(-n)
* argument.
* This means that the database node binary is started and
* waiting for a START management command which will
* actually start the database node functionality
* actually enable the database node
*/
int ndb_mgm_start(NdbMgmHandle handle,
int no_of_nodes,
......@@ -709,75 +772,83 @@ extern "C" {
/** @} *********************************************************************/
/**
* @name Functions: Logging
* @name Functions: Controlling Clusterlog output
* @{
*/
/**
* Filter cluster log
* Filter cluster log severities
*
* @param handle NDB management handle.
* @param level A cluster log level to filter.
* @param enable set 1=enable 0=disable
* @param severity A cluster log severity to filter.
* @param enable set 1=enable o 0=disable
* @param reply Reply message.
*
* @return -1 on error.
*/
int ndb_mgm_filter_clusterlog(NdbMgmHandle handle,
enum ndb_mgm_clusterlog_level level,
int ndb_mgm_set_clusterlog_severity_filter(NdbMgmHandle handle,
enum ndb_mgm_event_severity severity,
int enable,
struct ndb_mgm_reply* reply);
/**
* Get log filter
* Get clusterlog severity filter
*
* @param handle NDB management handle
*
* @return A vector of seven elements,
* where each element contains
* 1 if a severity is enabled and 0 if not.
* A severity is stored at position
* ndb_mgm_clusterlog_level,
* 1 if a severity indicator is enabled and 0 if not.
* A severity level is stored at position
* ndb_mgm_clusterlog_level;
* for example the "error" level is stored in position
* [NDB_MGM_CLUSTERLOG_ERROR-1].
* The first element in the vector signals
* whether the clusterlog
* [NDB_MGM_EVENT_SEVERITY_ERROR].
* The first element [NDB_MGM_EVENT_SEVERITY_ON] in
* the vector signals
* whether the cluster log
* is disabled or enabled.
*/
unsigned int *ndb_mgm_get_logfilter(NdbMgmHandle handle);
const unsigned int *ndb_mgm_get_clusterlog_severity_filter(NdbMgmHandle handle);
/**
* Set log category and levels for the cluster log
*
* @param handle NDB management handle.
* @param nodeId Node id.
* @param nodeId Node ID.
* @param category Event category.
* @param level Log level (0-15).
* @param reply Reply message.
* @return -1 on error.
*/
int ndb_mgm_set_loglevel_clusterlog(NdbMgmHandle handle,
int ndb_mgm_set_clusterlog_loglevel(NdbMgmHandle handle,
int nodeId,
enum ndb_mgm_event_category category,
int level,
struct ndb_mgm_reply* reply);
/** @} *********************************************************************/
/**
* @name Functions: Listening to log events
* @{
*/
/**
* Listen to log events
* Listen to log events. They are read from the return file descriptor
* and the format is textual, and the same as in the cluster log.
*
* @param handle NDB management handle.
* @param filter pairs of { level, ndb_mgm_event_category } that will be
* pushed to fd, level=0 ends lists
* pushed to fd, level=0 ends list.
*
* @return fd which events will be pushed to
* @return fd filedescriptor to read events from
*/
int ndb_mgm_listen_event(NdbMgmHandle handle, int filter[]);
int ndb_mgm_listen_event(NdbMgmHandle handle, const int filter[]);
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
/**
* Set log category and levels for the Node
*
* @param handle NDB management handle.
* @param nodeId Node id.
* @param nodeId Node ID.
* @param category Event category.
* @param level Log level (0-15).
* @param reply Reply message.
......@@ -810,10 +881,10 @@ extern "C" {
* Start backup
*
* @param handle NDB management handle.
* @param wait_completed 0=don't wait for confirmation,
* 1=wait for backup started,
* 2=wait for backup completed
* @param backup_id Backup id is returned from function.
* @param wait_completed 0: Don't wait for confirmation<br>
* 1: Wait for backup to be started<br>
* 2: Wait for backup to be completed
* @param backup_id Backup ID is returned from function.
* @param reply Reply message.
* @return -1 on error.
*/
......@@ -825,7 +896,7 @@ extern "C" {
* Abort backup
*
* @param handle NDB management handle.
* @param backup_id Backup Id.
* @param backup_id Backup ID.
* @param reply Reply message.
* @return -1 on error.
*/
......@@ -843,7 +914,7 @@ extern "C" {
* Enter Single user mode
*
* @param handle NDB management handle.
* @param nodeId Node Id of the single user node
* @param nodeId Node ID of the single user node
* @param reply Reply message.
* @return -1 on error.
*/
......@@ -854,7 +925,6 @@ extern "C" {
* Exit Single user mode
*
* @param handle NDB management handle.
* @param nodeId Node Id of the single user node
* @param reply Reply message.
*
* @return -1 on error.
......@@ -862,6 +932,7 @@ extern "C" {
int ndb_mgm_exit_single_user(NdbMgmHandle handle,
struct ndb_mgm_reply* reply);
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
/** @} *********************************************************************/
/**
* @name Configuration handling
......@@ -872,17 +943,16 @@ extern "C" {
* Get configuration
* @param handle NDB management handle.
* @param version Version of configuration, 0 means latest
* (which is the only supported input at this point)
* (Currently this is the only supported value for this parameter)
*
* @return configuration
*
* @note the caller must call ndb_mgm_destroy_configuration()
* @note The caller is responsible for calling ndb_mgm_destroy_configuration()
*/
struct ndb_mgm_configuration * ndb_mgm_get_configuration(NdbMgmHandle handle,
unsigned version);
void ndb_mgm_destroy_configuration(struct ndb_mgm_configuration *);
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
int ndb_mgm_alloc_nodeid(NdbMgmHandle handle,
unsigned version, int nodetype);
/**
......@@ -910,6 +980,36 @@ extern "C" {
int ndb_mgm_check_connection(NdbMgmHandle handle);
#endif
#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
enum ndb_mgm_clusterlog_level {
NDB_MGM_ILLEGAL_CLUSTERLOG_LEVEL = -1,
NDB_MGM_CLUSTERLOG_ON = 0,
NDB_MGM_CLUSTERLOG_DEBUG = 1,
NDB_MGM_CLUSTERLOG_INFO = 2,
NDB_MGM_CLUSTERLOG_WARNING = 3,
NDB_MGM_CLUSTERLOG_ERROR = 4,
NDB_MGM_CLUSTERLOG_CRITICAL = 5,
NDB_MGM_CLUSTERLOG_ALERT = 6,
NDB_MGM_CLUSTERLOG_ALL = 7
};
inline
int ndb_mgm_filter_clusterlog(NdbMgmHandle h,
enum ndb_mgm_clusterlog_level s,
int e, struct ndb_mgm_reply* r)
{ return ndb_mgm_set_clusterlog_severity_filter(h,(ndb_mgm_event_severity)s,
e,r); }
inline
const unsigned int *ndb_mgm_get_logfilter(NdbMgmHandle h)
{ return ndb_mgm_get_clusterlog_severity_filter(h); }
inline
int ndb_mgm_set_loglevel_clusterlog(NdbMgmHandle h, int n,
enum ndb_mgm_event_category c,
int l, struct ndb_mgm_reply* r)
{ return ndb_mgm_set_clusterlog_loglevel(h,n,c,l,r); }
#endif
#ifdef __cplusplus
}
#endif
......
......@@ -94,7 +94,7 @@
#define CFG_LOGLEVEL_INFO 256
#define CFG_LOGLEVEL_WARNING 257
#define CFG_LOGLEVEL_ERROR 258
#define CFG_LOGLEVEL_GREP 259
#define CFG_LOGLEVEL_CONGESTION 259
#define CFG_LOGLEVEL_DEBUG 260
#define CFG_LOGLEVEL_BACKUP 261
#define CFG_MAX_LOGLEVEL 261
......
......@@ -51,20 +51,20 @@
The procedure for using transactions is as follows:
-# Start transaction (instantiate an NdbTransaction object)
-# Add and define operations associated with the transaction using the
NdbOperation, NdbScanOperation, NdbIndexOperation, and NdbIndexScanOperation classes.
-# Execute transaction
-# Add and define operations associated with the transaction using instances of one or more of the
NdbOperation, NdbScanOperation, NdbIndexOperation, and NdbIndexScanOperation classes
-# Execute transaction (call NdbTransaction::execute())
The execution can be of two different types,
The operation can be of two different types,
<var>Commit</var> or <var>NoCommit</var>.
If the execution is of type <var>NoCommit</var>,
then the application program executes part of a transaction,
If the operation is of type <var>NoCommit</var>,
then the application program executes the operation part of a transaction,
but without actually committing the transaction.
After executing a <var>NoCommit</var> transaction, the program can continue
After executing a <var>NoCommit</var> operation, the program can continue
to add and define more operations to the transaction
for later execution.
If the execute is of type <var>Commit</var>, then the transaction is
If the operation is of type <var>Commit</var>, then the transaction is
immediately committed. The transaction <em>must</em> be closed after it has been
commited (event if commit fails), and no further addition or definition of
operations for this transaction is allowed.
......@@ -78,15 +78,16 @@
(typically created using Ndb::startTransaction()).
At this point, the transaction is only being defined,
and is not yet sent to the NDB kernel.
-# Define operations and add them to the transaction, using
NdbTransaction::getNdbOperation(),
NdbTransaction::getNdbScanOperation(),
NdbTransaction::getNdbIndexOperation(), or
NdbTransaction::getNdbIndexScanOperation(),
and methods of the respective NdbOperation class.
-# Define operations and add them to the transaction, using one or more of
- NdbTransaction::getNdbOperation()
- NdbTransaction::getNdbScanOperation()
- NdbTransaction::getNdbIndexOperation()
- NdbTransaction::getNdbIndexScanOperation()
along with the appropriate methods of the respective NdbOperation class
(or one possiblt one or more of its subclasses).
Note that the transaction has still not yet been sent to the NDB kernel.
-# Execute the transaction, using the NdbTransaction::execute() method.
-# Close the transaction (using Ndb::closeTransaction()).
-# Close the transaction (call Ndb::closeTransaction()).
For an example of this process, see the program listing in
@ref ndbapi_simple.cpp.
......@@ -467,51 +468,53 @@
/**
@page secAdapt Adaptive Send Algorithm
At the time of "sending" the transaction
At the time of "sending" a transaction
(using NdbTransaction::execute()), the transactions
are in reality <em>not</em> immediately transfered to the NDB Kernel.
Instead, the "sent" transactions are only kept in a
special send list (buffer) in the Ndb object to which they belong.
The adaptive send algorithm decides when transactions should
be transfered to the NDB kernel.
actually be transferred to the NDB kernel.
The NDB API is designed as a multi-threaded interface and
it is desirable to transfer database operations from more than
The NDB API is designed as a multi-threaded interface and so
it is often desirable to transfer database operations from more than
one thread at a time.
The NDB API keeps track of which Ndb objects are active in transfering
The NDB API keeps track of which Ndb objects are active in transferring
information to the NDB kernel and the expected amount of threads to
interact with the NDB kernel.
Note that an Ndb object should be used in at most one thread.
Two different threads should <em>not</em> use the same Ndb object.
Note that a given instance of Ndb should be used in at most one thread;
different threads should <em>not</em> use the same Ndb object.
There are four reasons leading to transfering of database
operations:
There are four conditions leading to the transfer of database
operations from Ndb object buffers to the NDB kernel:
-# The NDB Transporter (TCP/IP, OSE, SCI or shared memory)
decides that a buffer is full and sends it off.
The buffer size is implementation dependent and
might change between NDB Cluster releases.
On TCP/IP the buffer size is usually around 64 kByte and
The buffer size is implementation-dependent and
may change between MySQL Cluster releases.
On TCP/IP the buffer size is usually around 64 KB;
on OSE/Delta it is usually less than 2000 bytes.
In each Ndb object there is one buffer per DB node,
so this criteria of a full buffer is only
local to the connection to one DB node.
-# Statistical information on the transfered information
may force sending of buffers to all DB nodes.
-# Every 10 ms a special send-thread checks whether
Since each Ndb object provides a single buffer per storage node,
the notion of a "full" buffer is local to this storage node.
-# The accumulation of statistical data on transferred information
may force sending of buffers to all storage nodes.
-# Every 10 ms, a special transmission thread checks whether or not
any send activity has occurred. If not, then the thread will
force sending to all nodes.
force transmission to all nodes.
This means that 20 ms is the maximum time database operations
are waiting before being sent off. The 10 millisecond limit
are kept waiting before being sent off. The 10-millisecond limit
is likely to become a configuration parameter in
later releases of NDB Cluster.
However, to support faster than 10 ms checks,
there has to be support from the operating system.
-# When methods that are affected by the adaptive send alorithm,
e.g. NdbTransaction::execute(), there is a force parameter
that overrides it forces the send to all nodes.
@note The reasons mentioned above are examples. These might
change in later releases of NDB Cluster.
future releases of MySQL Cluster; however, for checks that
are more frequent than each 10 ms,
additional support from the operating system is required.
-# For methods that are affected by the adaptive send alorithm
(such as NdbTransaction::execute()), there is a <var>force</var>
parameter
that overrides its default behaviour in this regard and forces
immediate transmission to all nodes. See the inidvidual NDB API class
listings for more information.
@note The conditions listed above are subject to change in future releases
of MySQL Cluster.
*/
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
......@@ -537,64 +540,69 @@
#endif
/**
@page secConcepts NDB Cluster Concepts
@page secConcepts MySQL Cluster Concepts
The <em>NDB Kernel</em> is the collection of storage nodes
belonging to an NDB Cluster.
belonging to a MySQL Cluster.
The application programmer can for most purposes view the
set of all DB nodes as one entity.
Each DB node has three main components:
- TC : The transaction coordinator
- ACC : The index storage
- TUP : The data storage
When the application program executes a transaction,
it connects to one TC on one DB node.
Usually, the programmer does not need to specify which TC to use,
but some cases when performance is important,
transactions can be hinted to use a certain TC.
(If the node with the TC is down, then another TC will
set of all storage nodes as a single entity.
Each storage node is made up of three main components:
- TC : The transaction co-ordinator
- ACC : Index storage component
- TUP : Data storage component
When an application program executes a transaction,
it connects to one transaction co-ordinator on one storage node.
Usually, the programmer does not need to specify which TC should be used,
but in some cases when performance is important, the programmer can
provide "hints" to use a certain TC.
(If the node with the desired transaction co-ordinator is down, then another TC will
automatically take over the work.)
Every DB node has an ACC and a TUP which stores
the index and the data part of the database.
Every storage node has an ACC and a TUP which store
the indexes and data portions of the database table fragment.
Even though one TC is responsible for the transaction,
several ACCs and TUPs on other DB nodes might be involved in the
several ACCs and TUPs on other storage nodes might be involved in the
execution of the transaction.
@section secNdbKernelConnection Selecting Transaction Coordinator
@section secNdbKernelConnection Selecting a Transaction Co-ordinator
The default method is to select the transaction coordinator (TC) as being
the "closest" DB node. There is a heuristics for closeness based on
the type of transporter connection. In order of closest first, we have
SCI, SHM, TCP/IP (localhost), and TCP/IP (remote host). If there are several
connections available with the same "closeness", they will each be
The default method is to select the transaction co-ordinator (TC) determined to be
the "closest" storage node, using a heuristic for proximity based on
the type of transporter connection. In order of closest to most distant, these are
- SCI
- SHM
- TCP/IP (localhost)
- TCP/IP (remote host)
If there are several connections available with the same proximity, they will each be
selected in a round robin fashion for every transaction. Optionally
one may set the methos for TC selection round robin over all available
connections, where each new set of transactions
is placed on the next DB node.
The application programmer can however hint the NDB API which
transaction coordinator to use
by providing a <em>partition key</em> (usually the primary key).
By using the primary key as partition key,
one may set the method for TC selection to round-robin mode, where each new set of
transactions is placed on the next DB node. The pool of connections from which this
selection is made consists of all available connections.
As noted previously, the application programmer can provide hints to the NDB API as to
which transaction co-ordinator it should use. This is done by
providing a <em>partition key</em> (usually the primary key).
By using the primary key as the partition key,
the transaction will be placed on the node where the primary replica
of that record resides.
Note that this is only a hint, the system can be
reconfigured and then the NDB API will choose a transaction
coordinator without using the hint.
For more information, see NdbDictionary::Column::getPartitionKey(),
Note that this is only a hint; the system can be
reconfigured at any time, in which case the NDB API will choose a transaction
co-ordinator without using the hint.
For more information, see NdbDictionary::Column::getPartitionKey() and
Ndb::startTransaction(). The application programmer can specify
the partition key from SQL by using the construct,
"CREATE TABLE ... ENGINE=NDB PARTITION BY KEY (<attribute list>)".
<code>CREATE TABLE ... ENGINE=NDB PARTITION BY KEY (<var>attribute-list</var>);</code>.
@section secRecordStruct Record Structure
NDB Cluster is a relational database with tables of records.
Table rows represent tuples of relational data stored as records.
When created, the attribute schema of the table is specified,
and thus each record of the table has the same schema.
@section secRecordStruct NDB Record Structure
The NDB Cluster engine used by MySQL Cluster is a relational database engine
storing records in tables just as with any other RDBMS.
Table rows represent records as tuples of relational data.
When a new table is created, its attribute schema is specified for the table as a whole,
and thus each record of the table has the same structure. Again, this is typical
of relational databases, and NDB is no different in this regard.
@subsection secKeys Primary Keys
......@@ -603,14 +611,14 @@
@section secTrans Transactions
Transactions are committed to main memory,
and are committed to disk after a global checkpoint, GCP.
Transactions are committed first to main memory,
and then to disk after a global checkpoint (GCP) is issued.
Since all data is (in most NDB Cluster configurations)
synchronously replicated and stored on multiple NDB nodes,
the system can still handle processor failures without loss
of data.
However, in the case of a system failure (e.g. the whole system goes down),
then all (committed or not) transactions after the latest GCP are lost.
then all (committed or not) transactions occurring since the latest GCP are lost.
@subsection secConcur Concurrency Control
......@@ -619,39 +627,38 @@
cannot be attained within a specified time,
then a timeout error occurs.
Concurrent transactions (parallel application programs, thread-based
applications)
sometimes deadlock when they try to access the same information.
Applications need to be programmed so that timeout errors
occurring due to deadlocks are handled. This generally
means that the transaction encountering timeout
should be rolled back and restarted.
Concurrent transactions as requested by parallel application programs and
thread-based applications can sometimes deadlock when they try to access
the same information simultaneously.
Thus, applications need to be written in a manner so that timeout errors
occurring due to such deadlocks are handled gracefully. This generally
means that the transaction encountering a timeout should be rolled back
and restarted.
@section secHint Hints and performance
@section secHint Hints and Performance
Placing the transaction coordinator close
Placing the transaction co-ordinator in close proximity
to the actual data used in the transaction can in many cases
improve performance significantly. This is particularly true for
systems using TCP/IP. A system using Solaris and a 500 MHz processor
has a cost model for TCP/IP communication which is:
systems using TCP/IP. For example, a Solaris system using a single 500 MHz processor
has a cost model for TCP/IP communication which can be represented by the formula
30 microseconds + (100 nanoseconds * no of Bytes)
<code>[30 microseconds] + ([100 nanoseconds] * [<var>number of bytes</var>])</code>
This means that if we can ensure that we use "popular" links we increase
buffering and thus drastically reduce the communication cost.
Systems using SCI has a different cost model which is:
The same system using SCI has a different cost model:
5 microseconds + (10 nanoseconds * no of Bytes)
<code>[5 microseconds] + ([10 nanoseconds] * [<var>number of bytes</var>])</code>
Thus SCI systems are much less dependent on selection of
transaction coordinators.
Typically TCP/IP systems spend 30-60% of the time during communication,
whereas SCI systems typically spend 5-10% of the time during
communication.
Thus SCI means that less care from the NDB API programmer is
needed and great scalability can be achieved even for applications using
data from many parts of the database.
Thus, the efficiency of an SCI system is much less dependent on selection of
transaction co-ordinators.
Typically, TCP/IP systems spend 30-60% of their working time on communication,
whereas for SCI systems this figure is closer to 5-10%.
Thus, employing SCI for data transport means that less care from the NDB API
programmer is required and greater scalability can be achieved, even for
applications using data from many different parts of the database.
A simple example is an application that uses many simple updates where
a transaction needs to update one record.
......@@ -927,11 +934,11 @@
i.e. get/setValue("kalle[3]");
@subsection secArrays Array Attributes
A table attribute in NDB Cluster can be of <em>array type</em>.
This means that the attribute consists of an array of
<em>elements</em>. The <em>attribute size</em> is the size
of one element of the array (expressed in bits) and the
<em>array size</em> is the number of elements of the array.
A table attribute in NDB Cluster can be of type <var>Array</var>,
meaning that the attribute consists of an ordered sequence of
elements. In such cases, <var>attribute size</var> is the size
(expressed in bits) of any one element making up the array; the
<var>array size</var> is the number of elements in the array.
*/
......@@ -1050,16 +1057,16 @@ public:
/**
* The Ndb object represents a connection to a database.
*
* @note the init() method must be called before it may be used
* @note The init() method must be called before the Ndb object may actually be used.
*
* @param ndb_cluster_connection is a connection to a cluster containing
* @param ndb_cluster_connection is a connection to the cluster containing
* the database to be used
* @param aCatalogName is the name of the catalog you want to use.
* @note The catalog name provides a name space for the tables and
* @param aCatalogName is the name of the catalog to be used.
* @note The catalog name provides a namespace for the tables and
* indexes created in any connection from the Ndb object.
* @param aSchemaName is the name of the schema you
* want to use.
* @note The schema name provides an additional name space
* @note The schema name provides an additional namespace
* for the tables and indexes created in a given catalog.
*/
Ndb(Ndb_cluster_connection *ndb_cluster_connection,
......@@ -1339,9 +1346,8 @@ public:
int sendPollNdb(int aMillisecondNumber = WAITFOR_RESPONSE_TIMEOUT,
int minNoOfEventsToWakeup = 1,
int forceSend = 0);
#endif
/** @} *********************************************************************/
#endif
/**
* @name Error Handling
......
......@@ -107,6 +107,9 @@ public:
Closed = 3,
Invalid = 9
};
/**
* Get the state of a NdbBlob object.
*/
State getState();
/**
* Inline blob header.
......@@ -116,7 +119,7 @@ public:
};
/**
* Prepare to read blob value. The value is available after execute.
* Use getNull to check for NULL and getLength to get the real length
* Use getNull() to check for NULL and getLength() to get the real length
* and to check for truncation. Sets current read/write position to
* after the data read.
*/
......@@ -129,10 +132,10 @@ public:
*/
int setValue(const void* data, Uint32 bytes);
/**
* Callback for setActiveHook. Invoked immediately when the prepared
* operation has been executed (but not committed). Any getValue or
* setValue is done first. The blob handle is active so readData or
* writeData etc can be used to manipulate blob value. A user-defined
* Callback for setActiveHook(). Invoked immediately when the prepared
* operation has been executed (but not committed). Any getValue() or
* setValue() is done first. The blob handle is active so readData or
* writeData() etc can be used to manipulate blob value. A user-defined
* argument is passed along. Returns non-zero on error.
*/
typedef int ActiveHook(NdbBlob* me, void* arg);
......@@ -195,10 +198,15 @@ public:
const NdbError& getNdbError() const;
/**
* Return info about all blobs in this operation.
*
* Get first blob in list.
*/
// Get first blob in list
NdbBlob* blobsFirstBlob();
// Get next blob in list after this one
/**
* Return info about all blobs in this operation.
*
* Get next blob in list. Initialize with blobsFirstBlob().
*/
NdbBlob* blobsNextBlob();
private:
......
......@@ -631,7 +631,7 @@ public:
* Assignment operator, deep copy
* @param table Table to be copied
*/
Table& operator=(const Table&);
Table& operator=(const Table& table);
/**
* Name of table
......@@ -946,12 +946,12 @@ public:
#endif
};
/*
/**
* Constructor
* @param name Name of event
*/
Event(const char *name);
/*
/**
* Constructor
* @param name Name of event
* @param table Reference retrieved from NdbDictionary
......@@ -994,9 +994,12 @@ public:
*/
void addTableEvent(const TableEvent te);
/**
* Get/set durability of the event
* Set durability of the event
*/
void setDurability(EventDurability);
/**
* Get durability of the event
*/
void setDurability(EventDurability ed);
EventDurability getDurability() const;
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
void addColumn(const Column &c);
......@@ -1116,6 +1119,13 @@ public:
/**
* Fetch list of all objects, optionally restricted to given type.
*
* @param list List of objects returned in the dictionary
* @param type Restrict returned list to only contain objects of
* this type
*
* @return -1 if error.
*
*/
int listObjects(List & list, Object::Type type = Object::TypeUndefined);
int listObjects(List & list,
......@@ -1168,10 +1178,10 @@ public:
/**
* Create event given defined Event instance
* @param Event to create
* @param event Event to create
* @return 0 if successful otherwise -1.
*/
int createEvent(const Event &);
int createEvent(const Event &event);
/**
* Drop event with given name
......@@ -1199,17 +1209,17 @@ public:
/**
* Create defined table given defined Table instance
* @param Table Table to create
* @param table Table to create
* @return 0 if successful otherwise -1.
*/
int createTable(const Table &);
int createTable(const Table &table);
/**
* Drop table given retrieved Table instance
* @param Table Table to drop
* @param table Table to drop
* @return 0 if successful otherwise -1.
*/
int dropTable(Table &);
int dropTable(Table & table);
/**
* Drop table given table name
......@@ -1220,12 +1230,12 @@ public:
/**
* Alter defined table given defined Table instance
* @param Table Table to alter
* @param table Table to alter
* @return -2 (incompatible version) <br>
* -1 general error <br>
* 0 success
*/
int alterTable(const Table &);
int alterTable(const Table &table);
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
/**
......@@ -1257,10 +1267,10 @@ public:
/**
* Create index given defined Index instance
* @param Index to create
* @param index Index to create
* @return 0 if successful otherwise -1.
*/
int createIndex(const Index &);
int createIndex(const Index &index);
/**
* Drop index with given name
......
......@@ -204,6 +204,7 @@ struct NdbError {
*/
char * details;
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
NdbError(){
status = UnknownResult;
classification = NoError;
......@@ -227,6 +228,7 @@ struct NdbError {
ndberror.details = details;
return ndberror;
}
#endif
};
class NdbOut& operator <<(class NdbOut&, const NdbError &);
......
......@@ -132,7 +132,7 @@ public:
* next() has returned a value greater than
* zero. If a specific attribute has not changed the corresponding
* NdbRecAttr will be in state UNDEFINED. This is checked by
* NdbRecAttr::isNull() which then returns -1.
* NdbRecAttr::isNULL() which then returns -1.
*
* @param anAttrName Attribute name
* @param aValue If this is non-NULL, then the attribute value
......@@ -144,6 +144,9 @@ public:
* (indicating error).
*/
NdbRecAttr *getValue(const char *anAttrName, char *aValue = 0);
/**
* See getValue().
*/
NdbRecAttr *getPreValue(const char *anAttrName, char *aValue = 0);
/**
......
......@@ -45,7 +45,7 @@ public:
* @returns 0 for success and -1 for failure
* @see NdbScanOperation::readTuples
*/
int readTuples(LockMode = LM_Read,
int readTuples(LockMode lock_mode = LM_Read,
Uint32 batch = 0,
Uint32 parallel = 0,
bool order_by = false,
......@@ -90,15 +90,14 @@ public:
* An index stores also all-NULL keys. Doing index scan with empty
* bound set returns all table tuples.
*
* @param attrName Attribute name, alternatively:
* @param anAttrId Index column id (starting from 0)
* @param attr Attribute name, alternatively:
* @param type Type of bound
* @param value Pointer to bound value, 0 for NULL
* @param len Value length in bytes.
* Fixed per datatype and can be omitted
* @return 0 if successful otherwise -1
*/
int setBound(const char* attr, int type, const void* aValue, Uint32 len = 0);
int setBound(const char* attr, int type, const void* value, Uint32 len = 0);
/**
* Define bound on index key in range scan using index column id.
......
......@@ -966,6 +966,7 @@ protected:
#include <stdlib.h>
#endif
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
inline
int
......@@ -1215,4 +1216,6 @@ NdbOperation::setValue(Uint32 anAttrId, double aPar)
return setValue(anAttrId, (const char*)&aPar, (Uint32)8);
}
#endif // doxygen
#endif
......@@ -281,6 +281,8 @@ private:
const NdbDictionary::Column* m_column;
};
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
inline
NdbDictionary::Column::Type
NdbRecAttr::getType() const {
......@@ -457,5 +459,7 @@ NdbRecAttr::isNULL() const
class NdbOut& operator <<(class NdbOut&, const NdbRecAttr &);
#endif // ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
#endif
......@@ -71,6 +71,7 @@ public:
* from the scanned tuple.
*
* @param fetchAllowed If set to false, then fetching is disabled
* @param forceSend If true send will occur immediately (see @ref secAdapt)
*
* The NDB API will contact the NDB Kernel for more tuples
* when necessary to do so unless you set the fetchAllowed
......@@ -122,6 +123,13 @@ public:
* @return an NdbOperation or NULL.
*/
NdbOperation* updateCurrentTuple();
/**
* Update current tuple
*
* @param updateTrans Transaction that should perform the update
*
* @return an NdbOperation or NULL.
*/
NdbOperation* updateCurrentTuple(NdbTransaction* updateTrans);
/**
......@@ -129,6 +137,13 @@ public:
* @return 0 on success or -1 on failure
*/
int deleteCurrentTuple();
/**
* Delete current tuple
*
* @param takeOverTransaction Transaction that should perform the delete
*
* @return 0 on success or -1 on failure
*/
int deleteCurrentTuple(NdbTransaction* takeOverTransaction);
/**
......
......@@ -796,6 +796,8 @@ private:
void define_scan_op(NdbIndexScanOperation*);
};
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
inline
Uint32
NdbTransaction::get_send_size()
......@@ -1034,4 +1036,6 @@ NdbTransaction::ptr2int(){
typedef NdbTransaction NdbConnection;
#endif // ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
#endif
......@@ -20,39 +20,42 @@
/**
* @class Ndb_cluster_connection
* @brief Represents a connection to a cluster of storage nodes
* @brief Represents a connection to a cluster of storage nodes.
*
* Always start your application program by creating a
* Ndb_cluster_connection object. Your application should contain
* only one Ndb_cluster_connection. Your application connects to
* a cluster management server when method connect() is called.
* With the method wait_until_ready() it is possible to wait
* for the connection to one or several storage nodes.
* Any NDB application program should begin with the creation of a
* single Ndb_cluster_connection object, and should make use of one
* and only one Ndb_cluster_connection. The application connects to
* a cluster management server when this object's connect() method is called.
* By using the wait_until_ready() method it is possible to wait
* for the connection to reach one or more storage nodes.
*/
class Ndb_cluster_connection {
public:
/**
* Create a connection to a cluster of storage nodes
*
* @param specify the connectstring for where to find the
* @param connectstring The connectstring for where to find the
* management server
*/
Ndb_cluster_connection(const char * connect_string = 0);
Ndb_cluster_connection(const char * connectstring = 0);
~Ndb_cluster_connection();
/**
* Connect to a cluster management server
*
* @param no_retries specifies the number of retries to perform
* if the connect fails, negative number results in infinite
* number of retries
* @param no_retries specifies the number of retries to attempt
* in the event of connection failure; a negative value
* will result in the attempt to connect being repeated
* indefinitely
*
* @param retry_delay_in_seconds specifies how often retries should
* be performed
* @param verbose specifies if the method should print progess
*
* @return 0 if success,
* 1 if retriable error,
* -1 if non-retriable error
* @param verbose specifies if the method should print a report of its progess
*
* @return 0 = success,
* 1 = recoverable error,
* -1 = non-recoverable error
*/
int connect(int no_retries=0, int retry_delay_in_seconds=1, int verbose=0);
......@@ -61,15 +64,15 @@ public:
#endif
/**
* Wait until one or several storage nodes are connected
* Wait until the requested connection with one or more storage nodes is successful
*
* @param time_out_for_first_alive number of seconds to wait until
* first alive node is detected
* @param timeout_after_first_alive number of seconds to wait after
* first alive node is detected
* @param timeout_for_first_alive Number of seconds to wait until
* first live node is detected
* @param timeout_after_first_alive Number of seconds to wait after
* first live node is detected
*
* @return 0 all nodes alive,
* > 0 at least one node alive,
* @return = 0 all nodes live,
* > 0 at least one node live,
* < 0 error
*/
int wait_until_ready(int timeout_for_first_alive,
......
......@@ -21,6 +21,8 @@
extern "C" {
#endif
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
typedef enum
{
ndberror_st_success = 0,
......@@ -93,6 +95,8 @@ const char *ndberror_classification_message(ndberror_classification);
void ndberror_update(ndberror_struct *);
int ndb_error_string(int err_no, char *str, unsigned int size);
#endif /* doxygen skip internal*/
#ifdef __cplusplus
}
#endif
......
......@@ -105,10 +105,6 @@ const EventLoggerBase::EventRepLogLevelMatrix EventLoggerBase::matrix[] = {
{ EventReport::CreateLogBytes, LogLevel::llInfo, 11, Logger::LL_INFO },
{ EventReport::InfoEvent, LogLevel::llInfo, 2, Logger::LL_INFO },
//Global replication
{ EventReport::GrepSubscriptionInfo, LogLevel::llGrep, 7, Logger::LL_INFO},
{ EventReport::GrepSubscriptionAlert, LogLevel::llGrep, 7, Logger::LL_ALERT},
// Backup
{ EventReport::BackupStarted, LogLevel::llBackup, 7, Logger::LL_INFO },
{ EventReport::BackupCompleted, LogLevel::llBackup, 7, Logger::LL_INFO },
......
......@@ -916,67 +916,68 @@ ndb_mgm_restart(NdbMgmHandle handle, int no_of_nodes, const int *node_list)
return ndb_mgm_restart2(handle, no_of_nodes, node_list, 0, 0, 0);
}
static const char *clusterlog_level_names[]=
static const char *clusterlog_severity_names[]=
{ "enabled", "debug", "info", "warning", "error", "critical", "alert" };
struct ndb_mgm_clusterlog_levels
struct ndb_mgm_event_severities
{
const char* name;
enum ndb_mgm_clusterlog_level level;
} clusterlog_levels[] = {
{ clusterlog_level_names[0], NDB_MGM_CLUSTERLOG_ON },
{ clusterlog_level_names[1], NDB_MGM_CLUSTERLOG_DEBUG },
{ clusterlog_level_names[2], NDB_MGM_CLUSTERLOG_INFO },
{ clusterlog_level_names[3], NDB_MGM_CLUSTERLOG_WARNING },
{ clusterlog_level_names[4], NDB_MGM_CLUSTERLOG_ERROR },
{ clusterlog_level_names[5], NDB_MGM_CLUSTERLOG_CRITICAL },
{ clusterlog_level_names[6], NDB_MGM_CLUSTERLOG_ALERT },
{ "all", NDB_MGM_CLUSTERLOG_ALL },
{ 0, NDB_MGM_ILLEGAL_CLUSTERLOG_LEVEL },
enum ndb_mgm_event_severity severity;
} clusterlog_severities[] = {
{ clusterlog_severity_names[0], NDB_MGM_EVENT_SEVERITY_ON },
{ clusterlog_severity_names[1], NDB_MGM_EVENT_SEVERITY_DEBUG },
{ clusterlog_severity_names[2], NDB_MGM_EVENT_SEVERITY_INFO },
{ clusterlog_severity_names[3], NDB_MGM_EVENT_SEVERITY_WARNING },
{ clusterlog_severity_names[4], NDB_MGM_EVENT_SEVERITY_ERROR },
{ clusterlog_severity_names[5], NDB_MGM_EVENT_SEVERITY_CRITICAL },
{ clusterlog_severity_names[6], NDB_MGM_EVENT_SEVERITY_ALERT },
{ "all", NDB_MGM_EVENT_SEVERITY_ALL },
{ 0, NDB_MGM_ILLEGAL_EVENT_SEVERITY },
};
extern "C"
ndb_mgm_clusterlog_level
ndb_mgm_match_clusterlog_level(const char * name)
ndb_mgm_event_severity
ndb_mgm_match_event_severity(const char * name)
{
if(name == 0)
return NDB_MGM_ILLEGAL_CLUSTERLOG_LEVEL;
return NDB_MGM_ILLEGAL_EVENT_SEVERITY;
for(int i = 0; clusterlog_levels[i].name !=0 ; i++)
if(strcasecmp(name, clusterlog_levels[i].name) == 0)
return clusterlog_levels[i].level;
for(int i = 0; clusterlog_severities[i].name !=0 ; i++)
if(strcasecmp(name, clusterlog_severities[i].name) == 0)
return clusterlog_severities[i].severity;
return NDB_MGM_ILLEGAL_CLUSTERLOG_LEVEL;
return NDB_MGM_ILLEGAL_EVENT_SEVERITY;
}
extern "C"
const char *
ndb_mgm_get_clusterlog_level_string(enum ndb_mgm_clusterlog_level level)
ndb_mgm_get_event_severity_string(enum ndb_mgm_event_severity severity)
{
int i= (int)level;
if (i >= 0 && i < (int)NDB_MGM_CLUSTERLOG_ALL)
return clusterlog_level_names[i];
for(i = (int)NDB_MGM_CLUSTERLOG_ALL; clusterlog_levels[i].name != 0; i++)
if(clusterlog_levels[i].level == level)
return clusterlog_levels[i].name;
int i= (int)severity;
if (i >= 0 && i < (int)NDB_MGM_EVENT_SEVERITY_ALL)
return clusterlog_severity_names[i];
for(i = (int)NDB_MGM_EVENT_SEVERITY_ALL; clusterlog_severities[i].name != 0; i++)
if(clusterlog_severities[i].severity == severity)
return clusterlog_severities[i].name;
return 0;
}
extern "C"
unsigned int *
ndb_mgm_get_logfilter(NdbMgmHandle handle)
const unsigned int *
ndb_mgm_get_clusterlog_severity_filter(NdbMgmHandle handle)
{
SET_ERROR(handle, NDB_MGM_NO_ERROR, "Executing: ndb_mgm_get_logfilter");
static Uint32 enabled[(int)NDB_MGM_CLUSTERLOG_ALL] = {0,0,0,0,0,0,0};
SET_ERROR(handle, NDB_MGM_NO_ERROR, "Executing: ndb_mgm_get_clusterlog_severity_filter");
static unsigned int enabled[(int)NDB_MGM_EVENT_SEVERITY_ALL]=
{0,0,0,0,0,0,0};
const ParserRow<ParserDummy> getinfo_reply[] = {
MGM_CMD("clusterlog", NULL, ""),
MGM_ARG(clusterlog_level_names[0], Int, Mandatory, ""),
MGM_ARG(clusterlog_level_names[1], Int, Mandatory, ""),
MGM_ARG(clusterlog_level_names[2], Int, Mandatory, ""),
MGM_ARG(clusterlog_level_names[3], Int, Mandatory, ""),
MGM_ARG(clusterlog_level_names[4], Int, Mandatory, ""),
MGM_ARG(clusterlog_level_names[5], Int, Mandatory, ""),
MGM_ARG(clusterlog_level_names[6], Int, Mandatory, ""),
MGM_ARG(clusterlog_severity_names[0], Int, Mandatory, ""),
MGM_ARG(clusterlog_severity_names[1], Int, Mandatory, ""),
MGM_ARG(clusterlog_severity_names[2], Int, Mandatory, ""),
MGM_ARG(clusterlog_severity_names[3], Int, Mandatory, ""),
MGM_ARG(clusterlog_severity_names[4], Int, Mandatory, ""),
MGM_ARG(clusterlog_severity_names[5], Int, Mandatory, ""),
MGM_ARG(clusterlog_severity_names[6], Int, Mandatory, ""),
};
CHECK_HANDLE(handle, NULL);
CHECK_CONNECTED(handle, NULL);
......@@ -986,20 +987,21 @@ ndb_mgm_get_logfilter(NdbMgmHandle handle)
reply = ndb_mgm_call(handle, getinfo_reply, "get info clusterlog", &args);
CHECK_REPLY(reply, NULL);
for(int i=0; i < (int)NDB_MGM_CLUSTERLOG_ALL; i++) {
reply->get(clusterlog_level_names[i], &enabled[i]);
for(int i=0; i < (int)NDB_MGM_EVENT_SEVERITY_ALL; i++) {
reply->get(clusterlog_severity_names[i], &enabled[i]);
}
return enabled;
}
extern "C"
int
ndb_mgm_filter_clusterlog(NdbMgmHandle handle,
enum ndb_mgm_clusterlog_level level,
ndb_mgm_set_clusterlog_severity_filter(NdbMgmHandle handle,
enum ndb_mgm_event_severity severity,
int enable,
struct ndb_mgm_reply* /*reply*/)
{
SET_ERROR(handle, NDB_MGM_NO_ERROR, "Executing: ndb_mgm_filter_clusterlog");
SET_ERROR(handle, NDB_MGM_NO_ERROR,
"Executing: ndb_mgm_set_clusterlog_severity_filter");
const ParserRow<ParserDummy> filter_reply[] = {
MGM_CMD("set logfilter reply", NULL, ""),
MGM_ARG("result", String, Mandatory, "Error message"),
......@@ -1010,7 +1012,7 @@ ndb_mgm_filter_clusterlog(NdbMgmHandle handle,
CHECK_CONNECTED(handle, -1);
Properties args;
args.put("level", level);
args.put("level", severity);
args.put("enable", enable);
const Properties *reply;
......@@ -1045,10 +1047,9 @@ struct ndb_mgm_event_categories
{ "CHECKPOINT", NDB_MGM_EVENT_CATEGORY_CHECKPOINT },
{ "DEBUG", NDB_MGM_EVENT_CATEGORY_DEBUG },
{ "INFO", NDB_MGM_EVENT_CATEGORY_INFO },
{ "WARNING", NDB_MGM_EVENT_CATEGORY_WARNING },
{ "ERROR", NDB_MGM_EVENT_CATEGORY_ERROR },
{ "GREP", NDB_MGM_EVENT_CATEGORY_GREP },
{ "BACKUP", NDB_MGM_EVENT_CATEGORY_BACKUP },
{ "CONGESTION", NDB_MGM_EVENT_CATEGORY_CONGESTION },
{ 0, NDB_MGM_ILLEGAL_EVENT_CATEGORY }
};
......@@ -1080,13 +1081,13 @@ ndb_mgm_get_event_category_string(enum ndb_mgm_event_category status)
extern "C"
int
ndb_mgm_set_loglevel_clusterlog(NdbMgmHandle handle, int nodeId,
ndb_mgm_set_clusterlog_loglevel(NdbMgmHandle handle, int nodeId,
enum ndb_mgm_event_category cat,
int level,
struct ndb_mgm_reply* /*reply*/)
{
SET_ERROR(handle, NDB_MGM_NO_ERROR,
"Executing: ndb_mgm_set_loglevel_clusterlog");
"Executing: ndb_mgm_set_clusterlog_loglevel");
const ParserRow<ParserDummy> clusterlog_reply[] = {
MGM_CMD("set cluster loglevel reply", NULL, ""),
MGM_ARG("result", String, Mandatory, "Error message"),
......@@ -1105,7 +1106,7 @@ ndb_mgm_set_loglevel_clusterlog(NdbMgmHandle handle, int nodeId,
"set cluster loglevel", &args);
CHECK_REPLY(reply, -1);
DBUG_ENTER("ndb_mgm_set_loglevel_clusterlog");
DBUG_ENTER("ndb_mgm_set_clusterlog_loglevel");
DBUG_PRINT("enter",("node=%d, category=%d, level=%d", nodeId, cat, level));
BaseString result;
......@@ -1157,7 +1158,7 @@ ndb_mgm_set_loglevel_node(NdbMgmHandle handle, int nodeId,
extern "C"
int
ndb_mgm_listen_event(NdbMgmHandle handle, int filter[])
ndb_mgm_listen_event(NdbMgmHandle handle, const int filter[])
{
SET_ERROR(handle, NDB_MGM_NO_ERROR, "Executing: ndb_mgm_listen_event");
const ParserRow<ParserDummy> stat_reply[] = {
......@@ -1195,7 +1196,10 @@ ndb_mgm_listen_event(NdbMgmHandle handle, int filter[])
handle->socket = tmp;
if(reply == NULL) {
close(sockfd);
CHECK_REPLY(reply, -1);
}
return sockfd;
}
......
......@@ -1246,7 +1246,7 @@ CommandInterpreter::executeClusterLog(char* parameters)
DBUG_VOID_RETURN;
}
enum ndb_mgm_clusterlog_level severity = NDB_MGM_CLUSTERLOG_ALL;
enum ndb_mgm_event_severity severity = NDB_MGM_EVENT_SEVERITY_ALL;
char * tmpString = my_strdup(parameters,MYF(MY_WME));
My_auto_ptr<char> ap1(tmpString);
......@@ -1254,7 +1254,7 @@ CommandInterpreter::executeClusterLog(char* parameters)
char * item = strtok_r(tmpString, " ", &tmpPtr);
int enable;
Uint32 *enabled = ndb_mgm_get_logfilter(m_mgmsrv);
const unsigned int *enabled= ndb_mgm_get_logfilter(m_mgmsrv);
if(enabled == NULL) {
ndbout << "Couldn't get status" << endl;
printError();
......@@ -1276,8 +1276,8 @@ CommandInterpreter::executeClusterLog(char* parameters)
printf("enabled[%d] = %d\n", i, enabled[i]);
#endif
ndbout << "Severities enabled: ";
for(i = 1; i < (int)NDB_MGM_CLUSTERLOG_ALL; i++) {
const char *str= ndb_mgm_get_clusterlog_level_string((ndb_mgm_clusterlog_level)i);
for(i = 1; i < (int)NDB_MGM_EVENT_SEVERITY_ALL; i++) {
const char *str= ndb_mgm_get_event_severity_string((ndb_mgm_event_severity)i);
if (str == 0)
{
DBUG_ASSERT(false);
......@@ -1311,8 +1311,10 @@ CommandInterpreter::executeClusterLog(char* parameters)
int res_enable;
item = strtok_r(NULL, " ", &tmpPtr);
if (item == NULL) {
res_enable= ndb_mgm_filter_clusterlog(m_mgmsrv,
NDB_MGM_CLUSTERLOG_ON, enable, NULL);
res_enable=
ndb_mgm_set_clusterlog_severity_filter(m_mgmsrv,
NDB_MGM_EVENT_SEVERITY_ON,
enable, NULL);
if (res_enable < 0)
{
ndbout << "Couldn't set filter" << endl;
......@@ -1324,32 +1326,33 @@ CommandInterpreter::executeClusterLog(char* parameters)
}
do {
severity= NDB_MGM_ILLEGAL_CLUSTERLOG_LEVEL;
severity= NDB_MGM_ILLEGAL_EVENT_SEVERITY;
if (strcasecmp(item, "ALL") == 0) {
severity = NDB_MGM_CLUSTERLOG_ALL;
severity = NDB_MGM_EVENT_SEVERITY_ALL;
} else if (strcasecmp(item, "ALERT") == 0) {
severity = NDB_MGM_CLUSTERLOG_ALERT;
severity = NDB_MGM_EVENT_SEVERITY_ALERT;
} else if (strcasecmp(item, "CRITICAL") == 0) {
severity = NDB_MGM_CLUSTERLOG_CRITICAL;
severity = NDB_MGM_EVENT_SEVERITY_CRITICAL;
} else if (strcasecmp(item, "ERROR") == 0) {
severity = NDB_MGM_CLUSTERLOG_ERROR;
severity = NDB_MGM_EVENT_SEVERITY_ERROR;
} else if (strcasecmp(item, "WARNING") == 0) {
severity = NDB_MGM_CLUSTERLOG_WARNING;
severity = NDB_MGM_EVENT_SEVERITY_WARNING;
} else if (strcasecmp(item, "INFO") == 0) {
severity = NDB_MGM_CLUSTERLOG_INFO;
severity = NDB_MGM_EVENT_SEVERITY_INFO;
} else if (strcasecmp(item, "DEBUG") == 0) {
severity = NDB_MGM_CLUSTERLOG_DEBUG;
severity = NDB_MGM_EVENT_SEVERITY_DEBUG;
} else if (strcasecmp(item, "OFF") == 0 ||
strcasecmp(item, "ON") == 0) {
if (enable < 0) // only makes sense with toggle
severity = NDB_MGM_CLUSTERLOG_ON;
severity = NDB_MGM_EVENT_SEVERITY_ON;
}
if (severity == NDB_MGM_ILLEGAL_CLUSTERLOG_LEVEL) {
if (severity == NDB_MGM_ILLEGAL_EVENT_SEVERITY) {
ndbout << "Invalid severity level: " << item << endl;
DBUG_VOID_RETURN;
}
res_enable = ndb_mgm_filter_clusterlog(m_mgmsrv, severity, enable, NULL);
res_enable= ndb_mgm_set_clusterlog_severity_filter(m_mgmsrv, severity,
enable, NULL);
if (res_enable < 0)
{
ndbout << "Couldn't set filter" << endl;
......
......@@ -1105,6 +1105,18 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
"0",
"15" },
{
CFG_LOGLEVEL_CONGESTION,
"LogLevelCongestion",
DB_TOKEN,
"Congestion info printed on stdout",
ConfigInfo::CI_USED,
false,
ConfigInfo::CI_INT,
"0",
"0",
"15" },
{
CFG_LOGLEVEL_ERROR,
"LogLevelError",
......
......@@ -1184,13 +1184,13 @@ MgmApiSession::startAll(Parser<MgmApiSession>::Context &,
void
MgmApiSession::setLogFilter(Parser_t::Context &ctx,
const class Properties &args) {
Uint32 level;
Uint32 severity;
Uint32 enable;
args.get("level", &level);
args.get("level", &severity);
args.get("enable", &enable);
int result = m_mgmsrv.setEventLogFilter(level, enable);
int result = m_mgmsrv.setEventLogFilter(severity, enable);
m_output->println("set logfilter reply");
m_output->println("result: %d", result);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment