Commit 7e6ab3a0 authored by unknown's avatar unknown

Ndb.hpp, Ndb.cpp, ha_ndbcluster.cc:

  Add a check if setting an auto_increment field will change it's next value before retrieving tuple_id_range lock. This avoids hitting locks when updating auto_increment values to a lower value than the current maximum. This is useful in loading a table with auto_increment where one loads the highest numbered pk's first and then proceeds backwards to the first. This can then be achieved with the same performance as a normal insert without auto_increment.
ndb_restore.result:
  Updated result file


mysql-test/suite/ndb/r/ndb_restore.result:
  Updated result file
sql/ha_ndbcluster.cc:
  Add a check if setting an auto_increment field will change it's next value before retrieving tuple_id_range lock. This avoids hitting locks when updating auto_increment values to a lower value than the current maximum. This is useful in loading a table with auto_increment where one loads the highest numbered pk's first and then proceeds backwards to the first. This can then be achieved with the same performance as a normal insert without auto_increment.
storage/ndb/include/ndbapi/Ndb.hpp:
  Add a check if setting an auto_increment field will change it's next value before retrieving tuple_id_range lock. This avoids hitting locks when updating auto_increment values to a lower value than the current maximum. This is useful in loading a table with auto_increment where one loads the highest numbered pk's first and then proceeds backwards to the first. This can then be achieved with the same performance as a normal insert without auto_increment.
storage/ndb/src/ndbapi/Ndb.cpp:
  Add a check if setting an auto_increment field will change it's next value before retrieving tuple_id_range lock. This avoids hitting locks when updating auto_increment values to a lower value than the current maximum. This is useful in loading a table with auto_increment where one loads the highest numbered pk's first and then proceeds backwards to the first. This can then be achieved with the same performance as a normal insert without auto_increment.
parent 57cea8f6
......@@ -18,12 +18,12 @@ CREATE TABLE `t2_c` (
PRIMARY KEY (`capgotod`),
KEY `i quadaddsvr` (`gotod`)
) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
INSERT INTO `t2_c` VALUES (500,4,'','q3.net','addavp:MK_CASELECTOR=1','postorod rattoaa'),(2,1,'4','','addavp:MK_BRANDTAD=345','REDS Brandtad'),(3,2,'4','q3.net','execorder','fixedRatediPO REDS'),(1,1,'3','','addavp:MK_BRANDTAD=123','TEST Brandtad'),(6,5,'','told.q3.net','addavp:MK_BRANDTAD=123','Brandtad Toldzone'),(4,3,'3','q3.net','addavp:MK_POOLHINT=2','ratedi PO TEST'),(5,0,'',NULL,NULL,'');
INSERT INTO `t2_c` VALUES (500,4,'','q3.net','addavp:MK_CASELECTOR=1','postorod rattoaa'),(2,1,'4','','addavp:MK_BRANDTAD=345','REDS Brandtad'),(3,2,'4','q3.net','execorder','fixedRatediPO REDS'),(1,1,'3','','addavp:MK_BRANDTAD=123','TEST Brandtad'),(6,5,'','told.q3.net','addavp:MK_BRANDTAD=123','Brandtad Toldzone'),(4,3,'3','q3.net','addavp:MK_POOLHINT=2','ratedi PO TEST');
CREATE TABLE `t3_c` (
`CapGoaledatta` smallint(5) unsigned NOT NULL default '0',
`capgotod` smallint(5) unsigned NOT NULL default '0',
PRIMARY KEY (`capgotod`,`CapGoaledatta`)
) ENGINE=ndbcluster DEFAULT CHARSET=latin1 ROW_FORMAT=FIXED;
) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
INSERT INTO `t3_c` VALUES (5,3),(2,4),(5,4),(1,3);
CREATE TABLE `t4_c` (
`capfa` bigint(20) unsigned NOT NULL auto_increment,
......@@ -116,8 +116,8 @@ CREATE TABLE `t9_c` (
PRIMARY KEY (`kattjame`,`hunderaaarbagefa`,`hassetistart`,`hassetino`)
) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
INSERT INTO `t9_c` VALUES ('3g4jh8gar2t','joe','q3.net','elredun.com','q3.net','436643316120','436643316939','91341234568968','695595699','1.1.1.1','2.2.6.2','3','86989','34','x','x','2012-03-12 18:35:04','2012-12-05 12:35:04',3123123,9569,6565,1),('4tt45345235','pap','q3plus.qt','q3plus.qt','q3.net','436643316120','436643316939','8956234534568968','5254595969','1.1.1.1','8.6.2.2','4','86989','34','x','x','2012-03-12 12:55:34','2012-12-05 11:20:04',3223433,3369,9565,2),('4545435545','john','q3.net','q3.net','acne.li','436643316120','436643316939','45345234568968','995696699','1.1.1.1','2.9.9.2','2','86998','34','x','x','2012-03-12 11:35:03','2012-12-05 08:50:04',8823123,169,3565,3);
CREATE TABLE t10_c (a INT AUTO_INCREMENT KEY) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
INSERT INTO t10_c VALUES (1),(2),(3);
create table t10_c (a int auto_increment key) ENGINE=ndbcluster;
insert into t10_c values (1),(2),(3);
insert into t10_c values (10000),(2000),(3000);
create table t1 engine=myisam as select * from t1_c;
create table t2 engine=myisam as select * from t2_c;
......@@ -129,8 +129,6 @@ create table t7 engine=myisam as select * from t7_c;
create table t8 engine=myisam as select * from t8_c;
create table t9 engine=myisam as select * from t9_c;
create table t10 engine=myisam as select * from t10_c;
ForceVarPart: 0
ForceVarPart: 1
CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP;
DELETE FROM test.backup_info;
LOAD DATA INFILE '../tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ',';
......@@ -138,14 +136,29 @@ SELECT @the_backup_id:=backup_id FROM test.backup_info;
@the_backup_id:=backup_id
<the_backup_id>
DROP TABLE test.backup_info;
drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c;
ForceVarPart: 0
ForceVarPart: 1
select * from information_schema.columns where table_name = "t1_c";
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION COLUMN_DEFAULT IS_NULLABLE DATA_TYPE CHARACTER_MAXIMUM_LENGTH CHARACTER_OCTET_LENGTH NUMERIC_PRECISION NUMERIC_SCALE CHARACTER_SET_NAME COLLATION_NAME COLUMN_TYPE COLUMN_KEY EXTRA PRIVILEGES COLUMN_COMMENT
NULL test t1_c capgoaledatta 1 NULL NO mediumint NULL NULL 7 0 NULL NULL mediumint(5) unsigned PRI auto_increment select,insert,update,references
NULL test t1_c goaledatta 2 NO char 2 2 NULL NULL latin1 latin1_swedish_ci char(2) PRI select,insert,update,references
NULL test t1_c maturegarbagefa 3 NO varchar 32 32 NULL NULL latin1 latin1_swedish_ci varchar(32) PRI select,insert,update,references
drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c, t10_c;
show tables;
Tables_in_test
t1
t10
t2
t3
t4
t5
t6
t7
t8
t9
t4_c
t3_c
t2_c
t5_c
t6_c
t7_c
t8_c
t9_c
t10_c
t1_c
select count(*) from t1;
count(*)
5
......@@ -159,15 +172,15 @@ count(*)
5
select count(*) from t2;
count(*)
7
6
select count(*) from t2_c;
count(*)
7
6
select count(*)
from (select * from t2 union
select * from t2_c) a;
count(*)
7
6
select count(*) from t3;
count(*)
4
......@@ -253,238 +266,41 @@ a
2000
3000
10000
show table status like 't1_c';
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
X X X X X X X X X X 3001 X X X X X X X
show table status like 't2_c';
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
X X X X X X X X X X 501 X X X X X X X
show table status like 't4_c';
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
X X X X X X X X X X 290000001 X X X X X X X
show table status like 't7_c';
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
X X X X X X X X X X 29 X X X X X X X
show table status like 't10_c';
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
X X X X X X X X X X 10001 X X X X X X X
ALTER TABLE t7_c
PARTITION BY LINEAR KEY (`dardtestard`);
CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP;
DELETE FROM test.backup_info;
LOAD DATA INFILE '../tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ',';
SELECT @the_backup_id:=backup_id FROM test.backup_info;
@the_backup_id:=backup_id
<the_backup_id>
DROP TABLE test.backup_info;
drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c;
select count(*) from t1;
count(*)
5
select count(*) from t1_c;
count(*)
5
select count(*)
from (select * from t1 union
select * from t1_c) a;
count(*)
5
select count(*) from t2;
count(*)
7
select count(*) from t2_c;
count(*)
7
select count(*)
from (select * from t2 union
select * from t2_c) a;
count(*)
7
select count(*) from t3;
count(*)
4
select count(*) from t3_c;
count(*)
4
select count(*)
from (select * from t3 union
select * from t3_c) a;
count(*)
4
select count(*) from t4;
count(*)
22
select count(*) from t4_c;
count(*)
22
select count(*)
from (select * from t4 union
select * from t4_c) a;
count(*)
22
select count(*) from t5;
count(*)
3
select count(*) from t5_c;
count(*)
3
select count(*)
from (select * from t5 union
select * from t5_c) a;
count(*)
3
select count(*) from t6;
count(*)
8
select count(*) from t6_c;
count(*)
8
select count(*)
from (select * from t6 union
select * from t6_c) a;
count(*)
8
select count(*) from t7;
count(*)
5
select count(*) from t7_c;
count(*)
5
select count(*)
from (select * from t7 union
select * from t7_c) a;
count(*)
5
select count(*) from t8;
count(*)
3
select count(*) from t8_c;
count(*)
3
select count(*)
from (select * from t8 union
select * from t8_c) a;
count(*)
3
select count(*) from t9;
count(*)
3
select count(*) from t9_c;
count(*)
3
select count(*)
from (select * from t9 union
select * from t9_c) a;
count(*)
3
drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c;
select count(*) from t1;
count(*)
5
select count(*) from t1_c;
count(*)
5
select count(*)
from (select * from t1 union
select * from t1_c) a;
count(*)
5
select count(*) from t2;
count(*)
7
select count(*) from t2_c;
count(*)
7
select count(*)
from (select * from t2 union
select * from t2_c) a;
count(*)
7
select count(*) from t3;
count(*)
4
select count(*) from t3_c;
count(*)
4
select count(*)
from (select * from t3 union
select * from t3_c) a;
count(*)
4
select count(*) from t4;
count(*)
22
select count(*) from t4_c;
count(*)
22
select count(*)
from (select * from t4 union
select * from t4_c) a;
count(*)
22
select count(*) from t5;
count(*)
3
select count(*) from t5_c;
count(*)
3
select count(*)
from (select * from t5 union
select * from t5_c) a;
count(*)
3
select count(*) from t6;
count(*)
8
select count(*) from t6_c;
count(*)
8
select count(*)
from (select * from t6 union
select * from t6_c) a;
count(*)
8
select count(*) from t7;
count(*)
5
select count(*) from t7_c;
count(*)
5
select count(*)
from (select * from t7 union
select * from t7_c) a;
count(*)
5
select count(*) from t8;
count(*)
3
select count(*) from t8_c;
count(*)
3
select count(*)
from (select * from t8 union
select * from t8_c) a;
count(*)
3
select count(*) from t9;
count(*)
3
select count(*) from t9_c;
count(*)
3
select count(*)
from (select * from t9 union
select * from t9_c) a;
count(*)
3
drop table t1_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c;
CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP;
DELETE FROM test.backup_info;
LOAD DATA INFILE '../tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ',';
SELECT @the_backup_id:=backup_id FROM test.backup_info;
@the_backup_id:=backup_id
<the_backup_id>
DROP TABLE test.backup_info;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
drop table if exists t2_c;
select max(capgoaledatta) from t1_c;
max(capgoaledatta)
3000
select auto_increment from information_schema.tables
where table_name = 't1_c';
auto_increment
3001
select max(capgotod) from t2_c;
max(capgotod)
500
select auto_increment from information_schema.tables
where table_name = 't2_c';
auto_increment
501
select max(capfa) from t4_c;
max(capfa)
290000000
select auto_increment from information_schema.tables
where table_name = 't4_c';
auto_increment
290000001
select max(dardtestard) from t7_c;
max(dardtestard)
28
select auto_increment from information_schema.tables
where table_name = 't7_c';
auto_increment
29
select max(a) from t10_c;
max(a)
10000
select auto_increment from information_schema.tables
where table_name = 't10_c';
auto_increment
10001
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9, t10;
drop table if exists t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c, t10_c;
520093696,<the_backup_id>
......@@ -2738,10 +2738,13 @@ ha_ndbcluster::set_auto_inc(Field *field)
("Trying to set next auto increment value to %s",
llstr(next_val, buff)));
#endif
Ndb_tuple_id_range_guard g(m_share);
if (ndb->setAutoIncrementValue(m_table, g.range, next_val, TRUE)
== -1)
ERR_RETURN(ndb->getNdbError());
if (ndb->checkUpdateAutoIncrementValue(m_share->tuple_id_range, next_val))
{
Ndb_tuple_id_range_guard g(m_share);
if (ndb->setAutoIncrementValue(m_table, g.range, next_val, TRUE)
== -1)
ERR_RETURN(ndb->getNdbError());
}
DBUG_RETURN(0);
}
......
......@@ -1515,37 +1515,40 @@ public:
TupleIdRange() {}
Uint64 m_first_tuple_id;
Uint64 m_last_tuple_id;
Uint64 m_highest_seen;
void reset() {
m_first_tuple_id = ~(Uint64)0;
m_last_tuple_id = ~(Uint64)0;
m_highest_seen = 0;
};
};
int initAutoIncrement();
int getAutoIncrementValue(const char* aTableName,
Uint64 & tupleId, Uint32 cacheSize,
Uint64 & autoValue, Uint32 cacheSize,
Uint64 step = 1, Uint64 start = 1);
int getAutoIncrementValue(const NdbDictionary::Table * aTable,
Uint64 & tupleId, Uint32 cacheSize,
Uint64 & autoValue, Uint32 cacheSize,
Uint64 step = 1, Uint64 start = 1);
int getAutoIncrementValue(const NdbDictionary::Table * aTable,
TupleIdRange & range, Uint64 & tupleId,
TupleIdRange & range, Uint64 & autoValue,
Uint32 cacheSize,
Uint64 step = 1, Uint64 start = 1);
int readAutoIncrementValue(const char* aTableName,
Uint64 & tupleId);
Uint64 & autoValue);
int readAutoIncrementValue(const NdbDictionary::Table * aTable,
Uint64 & tupleId);
Uint64 & autoValue);
int readAutoIncrementValue(const NdbDictionary::Table * aTable,
TupleIdRange & range, Uint64 & tupleId);
TupleIdRange & range, Uint64 & autoValue);
int setAutoIncrementValue(const char* aTableName,
Uint64 tupleId, bool modify);
Uint64 autoValue, bool modify);
int setAutoIncrementValue(const NdbDictionary::Table * aTable,
Uint64 tupleId, bool modify);
Uint64 autoValue, bool modify);
int setAutoIncrementValue(const NdbDictionary::Table * aTable,
TupleIdRange & range, Uint64 tupleId,
TupleIdRange & range, Uint64 autoValue,
bool modify);
bool checkUpdateAutoIncrementValue(TupleIdRange & range, Uint64 autoValue);
private:
int getTupleIdFromNdb(const NdbTableImpl* table,
TupleIdRange & range, Uint64 & tupleId,
......@@ -1554,6 +1557,8 @@ private:
TupleIdRange & range, Uint64 & tupleId);
int setTupleIdInNdb(const NdbTableImpl* table,
TupleIdRange & range, Uint64 tupleId, bool modify);
int checkTupleIdInNdb(TupleIdRange & range,
Uint64 tupleId);
int opTupleIdOnNdb(const NdbTableImpl* table,
TupleIdRange & range, Uint64 & opValue, Uint32 op);
public:
......
......@@ -942,6 +942,7 @@ Parameters: aTableName (IN) : The table name.
step (IN) : Specifies the step between the
autoincrement values.
start (IN) : Start value for first value
Returns: 0 if succesful, -1 if error encountered
Remark: Returns a new autoincrement value to the application.
The autoincrement values can be increased by steps
(default 1) and a number of values can be prefetched
......@@ -1072,9 +1073,18 @@ Ndb::getTupleIdFromNdb(const NdbTableImpl* table,
DBUG_RETURN(0);
}
/****************************************************************************
int readAutoIncrementValue( const char* aTableName,
Uint64 & autoValue);
Parameters: aTableName (IN) : The table name.
autoValue (OUT) : The current autoincrement value
Returns: 0 if succesful, -1 if error encountered
Remark: Returns the current autoincrement value to the application.
****************************************************************************/
int
Ndb::readAutoIncrementValue(const char* aTableName,
Uint64 & tupleId)
Uint64 & autoValue)
{
DBUG_ENTER("Ndb::readAutoIncrementValue");
ASSERT_NOT_MYSQLD;
......@@ -1088,15 +1098,15 @@ Ndb::readAutoIncrementValue(const char* aTableName,
}
const NdbTableImpl* table = info->m_table_impl;
TupleIdRange & range = info->m_tuple_id_range;
if (readTupleIdFromNdb(table, range, tupleId) == -1)
if (readTupleIdFromNdb(table, range, autoValue) == -1)
DBUG_RETURN(-1);
DBUG_PRINT("info", ("value %lu", (ulong)tupleId));
DBUG_PRINT("info", ("value %lu", (ulong)autoValue));
DBUG_RETURN(0);
}
int
Ndb::readAutoIncrementValue(const NdbDictionary::Table * aTable,
Uint64 & tupleId)
Uint64 & autoValue)
{
DBUG_ENTER("Ndb::readAutoIncrementValue");
ASSERT_NOT_MYSQLD;
......@@ -1111,23 +1121,23 @@ Ndb::readAutoIncrementValue(const NdbDictionary::Table * aTable,
DBUG_RETURN(-1);
}
TupleIdRange & range = info->m_tuple_id_range;
if (readTupleIdFromNdb(table, range, tupleId) == -1)
if (readTupleIdFromNdb(table, range, autoValue) == -1)
DBUG_RETURN(-1);
DBUG_PRINT("info", ("value %lu", (ulong)tupleId));
DBUG_PRINT("info", ("value %lu", (ulong)autoValue));
DBUG_RETURN(0);
}
int
Ndb::readAutoIncrementValue(const NdbDictionary::Table * aTable,
TupleIdRange & range, Uint64 & tupleId)
TupleIdRange & range, Uint64 & autoValue)
{
DBUG_ENTER("Ndb::readAutoIncrementValue");
assert(aTable != 0);
const NdbTableImpl* table = & NdbTableImpl::getImpl(*aTable);
if (readTupleIdFromNdb(table, range, tupleId) == -1)
if (readTupleIdFromNdb(table, range, autoValue) == -1)
DBUG_RETURN(-1);
DBUG_PRINT("info", ("value %lu", (ulong)tupleId));
DBUG_PRINT("info", ("value %lu", (ulong)autoValue));
DBUG_RETURN(0);
}
......@@ -1155,9 +1165,20 @@ Ndb::readTupleIdFromNdb(const NdbTableImpl* table,
DBUG_RETURN(0);
}
/****************************************************************************
int setAutoIncrementValue( const char* aTableName,
Uint64 autoValue,
bool modify);
Parameters: aTableName (IN) : The table name.
autoValue (IN) : The new autoincrement value
modify (IN) : Modify existing value (not initialization)
Returns: 0 if succesful, -1 if error encountered
Remark: Sets a new autoincrement value for the application.
****************************************************************************/
int
Ndb::setAutoIncrementValue(const char* aTableName,
Uint64 tupleId, bool increase)
Uint64 autoValue, bool modify)
{
DBUG_ENTER("Ndb::setAutoIncrementValue");
ASSERT_NOT_MYSQLD;
......@@ -1171,14 +1192,14 @@ Ndb::setAutoIncrementValue(const char* aTableName,
}
const NdbTableImpl* table = info->m_table_impl;
TupleIdRange & range = info->m_tuple_id_range;
if (setTupleIdInNdb(table, range, tupleId, increase) == -1)
if (setTupleIdInNdb(table, range, autoValue, modify) == -1)
DBUG_RETURN(-1);
DBUG_RETURN(0);
}
int
Ndb::setAutoIncrementValue(const NdbDictionary::Table * aTable,
Uint64 tupleId, bool increase)
Uint64 autoValue, bool modify)
{
DBUG_ENTER("Ndb::setAutoIncrementValue");
ASSERT_NOT_MYSQLD;
......@@ -1193,52 +1214,55 @@ Ndb::setAutoIncrementValue(const NdbDictionary::Table * aTable,
DBUG_RETURN(-1);
}
TupleIdRange & range = info->m_tuple_id_range;
if (setTupleIdInNdb(table, range, tupleId, increase) == -1)
if (setTupleIdInNdb(table, range, autoValue, modify) == -1)
DBUG_RETURN(-1);
DBUG_RETURN(0);
}
int
Ndb::setAutoIncrementValue(const NdbDictionary::Table * aTable,
TupleIdRange & range, Uint64 tupleId,
bool increase)
TupleIdRange & range, Uint64 autoValue,
bool modify)
{
DBUG_ENTER("Ndb::setAutoIncrementValue");
assert(aTable != 0);
const NdbTableImpl* table = & NdbTableImpl::getImpl(*aTable);
if (setTupleIdInNdb(table, range, tupleId, increase) == -1)
if (setTupleIdInNdb(table, range, autoValue, modify) == -1)
DBUG_RETURN(-1);
DBUG_RETURN(0);
}
int
Ndb::setTupleIdInNdb(const NdbTableImpl* table,
TupleIdRange & range, Uint64 tupleId, bool increase)
TupleIdRange & range, Uint64 tupleId, bool modify)
{
DBUG_ENTER("Ndb::setTupleIdInNdb");
if (increase)
if (modify)
{
if (range.m_first_tuple_id != range.m_last_tuple_id)
if (checkTupleIdInNdb(range, tupleId))
{
assert(range.m_first_tuple_id < range.m_last_tuple_id);
if (tupleId <= range.m_first_tuple_id + 1)
DBUG_RETURN(0);
if (tupleId <= range.m_last_tuple_id)
if (range.m_first_tuple_id != range.m_last_tuple_id)
{
range.m_first_tuple_id = tupleId - 1;
DBUG_PRINT("info",
("Setting next auto increment cached value to %lu",
(ulong)tupleId));
DBUG_RETURN(0);
assert(range.m_first_tuple_id < range.m_last_tuple_id);
if (tupleId <= range.m_first_tuple_id + 1)
DBUG_RETURN(0);
if (tupleId <= range.m_last_tuple_id)
{
range.m_first_tuple_id = tupleId - 1;
DBUG_PRINT("info",
("Setting next auto increment cached value to %lu",
(ulong)tupleId));
DBUG_RETURN(0);
}
}
/*
* if tupleId <= NEXTID, do nothing. otherwise update NEXTID to
* tupleId and set cached range to first = last = tupleId - 1.
*/
if (opTupleIdOnNdb(table, range, tupleId, 2) == -1)
DBUG_RETURN(-1);
}
/*
* if tupleId <= NEXTID, do nothing. otherwise update NEXTID to
* tupleId and set cached range to first = last = tupleId - 1.
*/
if (opTupleIdOnNdb(table, range, tupleId, 2) == -1)
DBUG_RETURN(-1);
}
else
{
......@@ -1277,6 +1301,39 @@ int Ndb::initAutoIncrement()
return 0;
}
bool
Ndb::checkUpdateAutoIncrementValue(TupleIdRange & range, Uint64 autoValue)
{
return(checkTupleIdInNdb(range, autoValue) != 0);
}
int
Ndb::checkTupleIdInNdb(TupleIdRange & range, Uint64 tupleId)
{
DBUG_ENTER("Ndb::checkTupleIdIndNdb");
if ((range.m_first_tuple_id != ~(Uint64)0) &&
(range.m_first_tuple_id > tupleId))
{
/*
* If we have ever cached a value in this object and this cached
* value is larger than the value we're trying to set then we
* need not check with the real value in the SYSTAB_0 table.
*/
DBUG_RETURN(0);
}
if (range.m_highest_seen > tupleId)
{
/*
* Although we've never cached any higher value we have read
* a higher value and again it isn't necessary to change the
* auto increment value.
*/
DBUG_RETURN(0);
}
DBUG_RETURN(1);
}
int
Ndb::opTupleIdOnNdb(const NdbTableImpl* table,
TupleIdRange & range, Uint64 & opValue, Uint32 op)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment