Commit c0a46fc4 authored by mskold@mysql.com's avatar mskold@mysql.com

Merge mskold@bk-internal.mysql.com:/home/bk/mysql-5.0-ndb

into mysql.com:/usr/local/home/marty/MySQL/mysql-5.0-ndb
parents 9ab13311 c9502b55
......@@ -47,7 +47,7 @@ my_bool _hash_init(HASH *hash, CHARSET_INFO *charset,
uint key_length, hash_get_key get_key,
void (*free_element)(void*), uint flags CALLER_INFO_PROTO);
void hash_free(HASH *tree);
void hash_reset(HASH *hash);
void my_hash_reset(HASH *hash);
byte *hash_element(HASH *hash,uint idx);
gptr hash_search(HASH *info,const byte *key,uint length);
gptr hash_next(HASH *info,const byte *key,uint length);
......
......@@ -1300,7 +1300,7 @@ insert into t1 values('+ ', '+ ', '+ ');
set @a=repeat(' ',20);
insert into t1 values (concat('+',@a),concat('+',@a),concat('+',@a));
Warnings:
Warning 1265 Data truncated for column 'v' at row 1
Note 1265 Data truncated for column 'v' at row 1
select concat('*',v,'*',c,'*',t,'*') from t1;
concat('*',v,'*',c,'*',t,'*')
*+ *+*+ *
......@@ -1346,7 +1346,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=BerkeleyDB DEFAULT CHARSET=latin1
alter table t1 modify t varchar(10);
Warnings:
Warning 1265 Data truncated for column 't' at row 2
Note 1265 Data truncated for column 't' at row 2
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
......
......@@ -255,7 +255,7 @@ insert into t1 values('+ ', '+ ', '+ ');
set @a=repeat(' ',20);
insert into t1 values (concat('+',@a),concat('+',@a),concat('+',@a));
Warnings:
Warning 1265 Data truncated for column 'v' at row 1
Note 1265 Data truncated for column 'v' at row 1
select concat('*',v,'*',c,'*',t,'*') from t1;
concat('*',v,'*',c,'*',t,'*')
*+ *+*+ *
......
......@@ -571,7 +571,7 @@ insert into t1 values('+ ', '+ ', '+ ');
set @a=repeat(' ',20);
insert into t1 values (concat('+',@a),concat('+',@a),concat('+',@a));
Warnings:
Warning 1265 Data truncated for column 'v' at row 1
Note 1265 Data truncated for column 'v' at row 1
select concat('*',v,'*',c,'*',t,'*') from t1;
concat('*',v,'*',c,'*',t,'*')
*+ *+*+ *
......@@ -617,7 +617,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=MyISAM DEFAULT CHARSET=latin1
alter table t1 modify t varchar(10);
Warnings:
Warning 1265 Data truncated for column 't' at row 2
Note 1265 Data truncated for column 't' at row 2
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
......
......@@ -529,3 +529,19 @@ count(*)-8
select count(*)-9 from t1 use index (ti) where ti <= '23:59:59';
count(*)-9
0
drop table t1;
create table t1(a int primary key, b int not null, index(b));
insert into t1 values (1,1), (2,2);
set autocommit=0;
begin;
select count(*) from t1;
count(*)
2
ALTER TABLE t1 ADD COLUMN c int;
select a from t1 where b = 2;
a
2
show tables;
Tables_in_test
t1
drop table t1;
......@@ -70,7 +70,7 @@ def test t9 t9 c18 c18 1 4 1 Y 32768 0 63
def test t9 t9 c19 c19 1 1 1 Y 32768 0 63
def test t9 t9 c20 c20 254 1 1 Y 0 0 8
def test t9 t9 c21 c21 254 10 10 Y 0 0 8
def test t9 t9 c22 c22 254 30 30 Y 0 0 8
def test t9 t9 c22 c22 253 30 30 Y 0 0 8
def test t9 t9 c23 c23 252 255 8 Y 144 0 63
def test t9 t9 c24 c24 252 255 8 Y 16 0 8
def test t9 t9 c25 c25 252 65535 4 Y 144 0 63
......@@ -1926,7 +1926,7 @@ def @arg28 253 8192 10 Y 0 31 8
def @arg29 253 8192 8 Y 128 31 63
def @arg30 253 8192 8 Y 0 31 8
def @arg31 253 8192 3 Y 0 31 8
def @arg32 253 8192 6 Y 128 31 63
def @arg32 253 8192 6 Y 0 31 8
@arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32
1 1 1 1 1 1 1 1 1 1 1 1 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday
select @arg01:= c1, @arg02:= c2, @arg03:= c3, @arg04:= c4,
......@@ -2023,7 +2023,7 @@ def @arg28 253 8192 10 Y 0 31 8
def @arg29 253 8192 8 Y 128 31 63
def @arg30 253 8192 8 Y 0 31 8
def @arg31 253 8192 3 Y 0 31 8
def @arg32 253 8192 6 Y 128 31 63
def @arg32 253 8192 6 Y 0 31 8
@arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32
1 1 1 1 1 1 1 1 1 1 1 1 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday
set @my_key= 0 ;
......@@ -2111,7 +2111,7 @@ def @arg28 253 8192 10 Y 0 31 8
def @arg29 253 8192 8 Y 128 31 63
def @arg30 253 8192 8 Y 0 31 8
def @arg31 253 8192 3 Y 0 31 8
def @arg32 253 8192 6 Y 128 31 63
def @arg32 253 8192 6 Y 0 31 8
@arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32
1 1 1 1 1 1 1 1 1 1 1 1 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday
select c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12,
......@@ -2201,7 +2201,7 @@ def @arg28 253 8192 10 Y 0 31 8
def @arg29 253 8192 8 Y 128 31 63
def @arg30 253 8192 8 Y 0 31 8
def @arg31 253 8192 3 Y 0 31 8
def @arg32 253 8192 6 Y 128 31 63
def @arg32 253 8192 6 Y 0 31 8
@arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32
1 1 1 1 1 1 1 1 1 1 1 1 2004-02-29 2004-02-29 11:11:11 2004-02-29 11:11:11 11:11:11 2004 1 1 a 123456789a 123456789a123456789b123456789c tinyblob tinytext blob text mediumblob mediumtext longblob longtext one monday
set @my_key= 0 ;
......
......@@ -818,7 +818,6 @@ ERROR 01000: Data truncated for column 'col1' at row 1
INSERT INTO t1 (col2) VALUES ('hellobob');
ERROR 01000: Data truncated for column 'col2' at row 1
INSERT INTO t1 (col2) VALUES ('hello ');
ERROR 01000: Data truncated for column 'col2' at row 1
UPDATE t1 SET col1 ='hellobob' WHERE col1 ='he';
ERROR 01000: Data truncated for column 'col1' at row 2
UPDATE t1 SET col2 ='hellobob' WHERE col2 ='he';
......@@ -835,6 +834,7 @@ col1 col2
hello hello
he hellot
hello hello
NULL hello
hello hellob
DROP TABLE t1;
CREATE TABLE t1 (col1 enum('red','blue','green'));
......
......@@ -267,3 +267,21 @@ select count(*)-5 from t1 use index (ti) where ti < '10:11:11';
select count(*)-6 from t1 use index (ti) where ti <= '10:11:11';
select count(*)-8 from t1 use index (ti) where ti < '23:59:59';
select count(*)-9 from t1 use index (ti) where ti <= '23:59:59';
drop table t1;
# bug#7798
create table t1(a int primary key, b int not null, index(b));
insert into t1 values (1,1), (2,2);
connect (con1,localhost,,,test);
connect (con2,localhost,,,test);
connection con1;
set autocommit=0;
begin;
select count(*) from t1;
connection con2;
ALTER TABLE t1 ADD COLUMN c int;
connection con1;
select a from t1 where b = 2;
show tables;
drop table t1;
......@@ -122,13 +122,13 @@ void hash_free(HASH *hash)
Delete all elements from the hash (the hash itself is to be reused).
SYNOPSIS
hash_reset()
my_hash_reset()
hash the hash to delete elements of
*/
void hash_reset(HASH *hash)
void my_hash_reset(HASH *hash)
{
DBUG_ENTER("hash_reset");
DBUG_ENTER("my_hash_reset");
DBUG_PRINT("enter",("hash: 0x%lxd",hash));
hash_free_elements(hash);
......
......@@ -1419,6 +1419,6 @@ NdbOut & operator <<(NdbOut & out, SignalHeader & sh){
Transporter*
TransporterRegistry::get_transporter(NodeId nodeId) {
return theTransporters[nodeId];
};
}
template class Vector<TransporterRegistry::Transporter_interface>;
......@@ -1920,7 +1920,6 @@ void Dbtc::packKeyData000Lab(Signal* signal,
Uint32 totalLen)
{
CacheRecord * const regCachePtr = cachePtr.p;
UintR Tmp;
jam();
Uint32 len = 0;
......@@ -8646,14 +8645,16 @@ void Dbtc::execSCAN_TABREQ(Signal* signal)
apiConnectptr.i = scanTabReq->apiConnectPtr;
tabptr.i = scanTabReq->tableId;
if (apiConnectptr.i >= capiConnectFilesize ||
tabptr.i >= ctabrecFilesize) {
if (apiConnectptr.i >= capiConnectFilesize)
{
jam();
warningHandlerLab(signal);
return;
}//if
ptrAss(apiConnectptr, apiConnectRecord);
ApiConnectRecord * transP = apiConnectptr.p;
if (transP->apiConnectstate != CS_CONNECTED) {
jam();
// could be left over from TCKEYREQ rollback
......@@ -8667,9 +8668,16 @@ void Dbtc::execSCAN_TABREQ(Signal* signal)
} else {
jam();
errCode = ZSTATE_ERROR;
goto SCAN_TAB_error;
goto SCAN_TAB_error_no_state_change;
}
}
if(tabptr.i >= ctabrecFilesize)
{
errCode = ZUNKNOWN_TABLE_ERROR;
goto SCAN_TAB_error;
}
ptrAss(tabptr, tableRecord);
if ((aiLength == 0) ||
(!tabptr.p->checkTable(schemaVersion)) ||
......@@ -8766,8 +8774,18 @@ void Dbtc::execSCAN_TABREQ(Signal* signal)
errCode = ZNO_SCANREC_ERROR;
goto SCAN_TAB_error;
SCAN_TAB_error:
SCAN_TAB_error:
jam();
/**
* Prepare for up coming ATTRINFO/KEYINFO
*/
transP->apiConnectstate = CS_ABORTING;
transP->abortState = AS_IDLE;
transP->transid[0] = transid1;
transP->transid[1] = transid2;
SCAN_TAB_error_no_state_change:
ScanTabRef * ref = (ScanTabRef*)&signal->theData[0];
ref->apiConnectPtr = transP->ndbapiConnect;
ref->transId1 = transid1;
......
......@@ -4672,6 +4672,7 @@ int Field_varstring::store(const char *from,uint length,CHARSET_INFO *cs)
uint32 not_used, copy_length;
char buff[80];
String tmpstr(buff,sizeof(buff), &my_charset_bin);
enum MYSQL_ERROR::enum_warning_level level= MYSQL_ERROR::WARN_LEVEL_WARN;
/* Convert character set if nesessary */
if (String::needs_conversion(length, cs, field_charset, &not_used))
......@@ -4697,10 +4698,23 @@ int Field_varstring::store(const char *from,uint length,CHARSET_INFO *cs)
else
int2store(ptr, copy_length);
if (copy_length < length)
// Check if we lost something other than just trailing spaces
if ((copy_length < length) && table->in_use->count_cuted_fields)
{
const char *end= from + length;
from+= copy_length;
from+= field_charset->cset->scan(field_charset, from, end, MY_SEQ_SPACES);
/*
If we lost only spaces then produce a NOTE, not a WARNING.
But if we have already had errors (e.g with charset conversion),
then don't reset level to NOTE.
*/
if (from == end && !error)
level= MYSQL_ERROR::WARN_LEVEL_NOTE;
error= 1;
}
if (error)
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1);
set_warning(level, ER_WARN_DATA_TRUNCATED, 1);
return error;
}
......
......@@ -156,8 +156,8 @@ static int ndb_to_mysql_error(const NdbError *err)
inline
int execute_no_commit(ha_ndbcluster *h, NdbTransaction *trans)
{
int m_batch_execute= 0;
#ifdef NOT_USED
int m_batch_execute= 0;
if (m_batch_execute)
return 0;
#endif
......@@ -169,8 +169,8 @@ int execute_no_commit(ha_ndbcluster *h, NdbTransaction *trans)
inline
int execute_commit(ha_ndbcluster *h, NdbTransaction *trans)
{
int m_batch_execute= 0;
#ifdef NOT_USED
int m_batch_execute= 0;
if (m_batch_execute)
return 0;
#endif
......@@ -182,8 +182,8 @@ int execute_commit(ha_ndbcluster *h, NdbTransaction *trans)
inline
int execute_commit(THD *thd, NdbTransaction *trans)
{
int m_batch_execute= 0;
#ifdef NOT_USED
int m_batch_execute= 0;
if (m_batch_execute)
return 0;
#endif
......@@ -195,8 +195,8 @@ int execute_commit(THD *thd, NdbTransaction *trans)
inline
int execute_no_commit_ie(ha_ndbcluster *h, NdbTransaction *trans)
{
int m_batch_execute= 0;
#ifdef NOT_USED
int m_batch_execute= 0;
if (m_batch_execute)
return 0;
#endif
......@@ -810,7 +810,7 @@ int ha_ndbcluster::build_index_list(TABLE *tab, enum ILBP phase)
{
uint i;
int error= 0;
const char *name, *index_name;
const char *index_name;
char unique_index_name[FN_LEN];
static const char* unique_suffix= "$unique";
KEY* key_info= tab->key_info;
......@@ -1118,10 +1118,10 @@ ha_ndbcluster::set_index_key(NdbOperation *op,
int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf)
{
uint no_fields= table->s->fields, i;
uint no_fields= table->s->fields;
NdbConnection *trans= m_active_trans;
NdbOperation *op;
THD *thd= current_thd;
int res;
DBUG_ENTER("pk_read");
DBUG_PRINT("enter", ("key_len: %u", key_len));
......@@ -1238,7 +1238,6 @@ int ha_ndbcluster::peek_row()
{
NdbTransaction *trans= m_active_trans;
NdbOperation *op;
THD *thd= current_thd;
DBUG_ENTER("peek_row");
NdbOperation::LockMode lm=
......@@ -1345,8 +1344,11 @@ inline int ha_ndbcluster::fetch_next(NdbScanOperation* cursor)
{
if (execute_commit(this,trans) != 0)
DBUG_RETURN(-1);
int res= trans->restart();
DBUG_ASSERT(res == 0);
if(trans->restart() != 0)
{
DBUG_ASSERT(0);
DBUG_RETURN(-1);
}
}
m_ops_pending= 0;
}
......@@ -1441,7 +1443,9 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op,
{
KEY_PART_INFO *key_part= &key_info->key_part[i];
Field *field= key_part->field;
#ifndef DBUG_OFF
uint part_len= key_part->length;
#endif
uint part_store_len= key_part->store_length;
// Info about each key part
struct part_st {
......@@ -1586,7 +1590,6 @@ int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op)
{
uint i;
THD *thd= current_thd;
NdbTransaction *trans= m_active_trans;
DBUG_ENTER("define_read_attrs");
......@@ -1780,7 +1783,6 @@ int ha_ndbcluster::filtered_scan(const byte *key, uint key_len,
int ha_ndbcluster::full_table_scan(byte *buf)
{
uint i;
int res;
NdbScanOperation *op;
NdbTransaction *trans= m_active_trans;
......@@ -1893,14 +1895,12 @@ int ha_ndbcluster::write_row(byte *record)
((m_rows_inserted % m_bulk_insert_rows) == 0) ||
set_blob_value)
{
THD *thd= current_thd;
// Send rows to NDB
DBUG_PRINT("info", ("Sending inserts to NDB, "\
"rows_inserted:%d, bulk_insert_rows: %d",
(int)m_rows_inserted, (int)m_bulk_insert_rows));
m_bulk_insert_not_flushed= FALSE;
// if (thd->transaction.on)
if (m_transaction_on)
{
if (execute_no_commit(this,trans) != 0)
......@@ -1918,8 +1918,11 @@ int ha_ndbcluster::write_row(byte *record)
no_uncommitted_rows_execute_failure();
DBUG_RETURN(ndb_err(trans));
}
int res= trans->restart();
DBUG_ASSERT(res == 0);
if(trans->restart() != 0)
{
DBUG_ASSERT(0);
DBUG_RETURN(-1);
}
}
}
if ((has_auto_increment) && (m_skip_auto_increment))
......@@ -2220,7 +2223,10 @@ void ha_ndbcluster::unpack_record(byte* buf)
{
NdbBlob* ndb_blob= (*value).blob;
bool isNull= TRUE;
int ret= ndb_blob->getNull(isNull);
#ifndef DBUG_OFF
int ret=
#endif
ndb_blob->getNull(isNull);
DBUG_ASSERT(ret == 0);
if (isNull)
(*field)->set_null(row_offset);
......@@ -2252,32 +2258,35 @@ void ha_ndbcluster::unpack_record(byte* buf)
void ha_ndbcluster::print_results()
{
const NDBTAB *tab= (const NDBTAB*) m_table;
DBUG_ENTER("print_results");
#ifndef DBUG_OFF
const NDBTAB *tab= (const NDBTAB*) m_table;
if (!_db_on_)
DBUG_VOID_RETURN;
char buf_type[MAX_FIELD_WIDTH], buf_val[MAX_FIELD_WIDTH];
String type(buf_type, sizeof(buf_type), &my_charset_bin);
String val(buf_val, sizeof(buf_val), &my_charset_bin);
for (uint f=0; f<table->s->fields;f++)
{
// Use DBUG_PRINT since DBUG_FILE cannot be filtered out
char buf[2000];
Field *field;
void* ptr;
const NDBCOL *col;
const NDBCOL *col= NULL;
NdbValue value;
NdbBlob *ndb_blob;
buf[0] = 0;
buf[0]= 0;
field= table->field[f];
if (!(value= m_value[f]).ptr)
{
my_snprintf(buf, sizeof(buf), "not read");
goto print_value;
}
field= table->field[f];
ptr= field->ptr;
DBUG_DUMP("field->ptr", (char*)ptr, field->pack_length());
col= tab->getColumn(f);
......@@ -2290,6 +2299,11 @@ void ha_ndbcluster::print_results()
my_snprintf(buf, sizeof(buf), "NULL");
goto print_value;
}
type.length(0);
val.length(0);
field->sql_type(type);
field->val_str(&val);
my_snprintf(buf, sizeof(buf), "%s %s", type.c_ptr(), val.c_ptr());
}
else
{
......@@ -2302,142 +2316,6 @@ void ha_ndbcluster::print_results()
}
}
switch (col->getType()) {
case NdbDictionary::Column::Tinyint: {
Int8 value= *(Int8*)ptr;
my_snprintf(buf, sizeof(buf), "Tinyint %d", value);
break;
}
case NdbDictionary::Column::Tinyunsigned: {
Uint8 value= *(Uint8*)ptr;
my_snprintf(buf, sizeof(buf), "Tinyunsigned %u", value);
break;
}
case NdbDictionary::Column::Smallint: {
Int16 value= *(Int16*)ptr;
my_snprintf(buf, sizeof(buf), "Smallint %d", value);
break;
}
case NdbDictionary::Column::Smallunsigned: {
Uint16 value= *(Uint16*)ptr;
my_snprintf(buf, sizeof(buf), "Smallunsigned %u", value);
break;
}
case NdbDictionary::Column::Mediumint: {
byte value[3];
memcpy(value, ptr, 3);
my_snprintf(buf, sizeof(buf), "Mediumint %d,%d,%d", value[0], value[1], value[2]);
break;
}
case NdbDictionary::Column::Mediumunsigned: {
byte value[3];
memcpy(value, ptr, 3);
my_snprintf(buf, sizeof(buf), "Mediumunsigned %u,%u,%u", value[0], value[1], value[2]);
break;
}
case NdbDictionary::Column::Int: {
Int32 value= *(Int32*)ptr;
my_snprintf(buf, sizeof(buf), "Int %d", value);
break;
}
case NdbDictionary::Column::Unsigned: {
Uint32 value= *(Uint32*)ptr;
my_snprintf(buf, sizeof(buf), "Unsigned %u", value);
break;
}
case NdbDictionary::Column::Bigint: {
Int64 value= *(Int64*)ptr;
my_snprintf(buf, sizeof(buf), "Bigint %d", (int)value);
break;
}
case NdbDictionary::Column::Bigunsigned: {
Uint64 value= *(Uint64*)ptr;
my_snprintf(buf, sizeof(buf), "Bigunsigned %u", (unsigned)value);
break;
}
case NdbDictionary::Column::Float: {
float value= *(float*)ptr;
my_snprintf(buf, sizeof(buf), "Float %f", (double)value);
break;
}
case NdbDictionary::Column::Double: {
double value= *(double*)ptr;
my_snprintf(buf, sizeof(buf), "Double %f", value);
break;
}
case NdbDictionary::Column::Decimal: {
const char *value= (char*)ptr;
my_snprintf(buf, sizeof(buf), "Decimal '%-*s'", field->pack_length(), value);
break;
}
case NdbDictionary::Column::Char: {
const char *value= (char*)ptr;
my_snprintf(buf, sizeof(buf), "Char '%.*s'", field->pack_length(), value);
break;
}
case NdbDictionary::Column::Varchar: {
uint len= *(uchar*)ptr;
const char *value= (char*)ptr + 1;
my_snprintf(buf, sizeof(buf), "Varchar (%u)'%.*s'", len, len, value);
break;
}
case NdbDictionary::Column::Binary: {
const char *value= (char*)ptr;
my_snprintf(buf, sizeof(buf), "Binary '%.*s'", field->pack_length(), value);
break;
}
case NdbDictionary::Column::Varbinary: {
uint len= *(uchar*)ptr;
const char *value= (char*)ptr + 1;
my_snprintf(buf, sizeof(buf), "Varbinary (%u)'%.*s'", len, len, value);
break;
}
case NdbDictionary::Column::Datetime: {
my_snprintf(buf, sizeof(buf), "Datetime ?"); // fix-me
break;
}
case NdbDictionary::Column::Date: {
my_snprintf(buf, sizeof(buf), "Date ?"); // fix-me
break;
}
case NdbDictionary::Column::Time: {
my_snprintf(buf, sizeof(buf), "Time ?"); // fix-me
break;
}
case NdbDictionary::Column::Blob: {
Uint64 len= 0;
ndb_blob->getLength(len);
my_snprintf(buf, sizeof(buf), "Blob [len=%u]", (unsigned)len);
break;
}
case NdbDictionary::Column::Text: {
Uint64 len= 0;
ndb_blob->getLength(len);
my_snprintf(buf, sizeof(buf), "Text [len=%u]", (unsigned)len);
break;
}
case NdbDictionary::Column::Bit: {
const char *value= (char*)ptr;
my_snprintf(buf, sizeof(buf), "Bit '%.*s'", field->pack_length(), value);
break;
}
case NdbDictionary::Column::Longvarchar: {
uint len= uint2korr(ptr);
const char *value= (char*)ptr + 2;
my_snprintf(buf, sizeof(buf), "Longvarchar (%u)'%.*s'", len, len, value);
break;
}
case NdbDictionary::Column::Longvarbinary: {
uint len= uint2korr(ptr);
const char *value= (char*)ptr + 2;
my_snprintf(buf, sizeof(buf), "Longvarbinary (%u)'%.*s'", len, len, value);
break;
}
case NdbDictionary::Column::Undefined:
my_snprintf(buf, sizeof(buf), "Unknown type: %d", col->getType());
break;
}
print_value:
DBUG_PRINT("value", ("%u,%s: %s", f, col->getName(), buf));
}
......@@ -2685,8 +2563,11 @@ int ha_ndbcluster::rnd_init(bool scan)
{
if (!scan)
DBUG_RETURN(1);
int res= cursor->restart(m_force_send);
DBUG_ASSERT(res == 0);
if(cursor->restart(m_force_send) != 0)
{
DBUG_ASSERT(0);
DBUG_RETURN(-1);
}
}
index_init(table->s->primary_key);
DBUG_RETURN(0);
......@@ -2801,13 +2682,15 @@ void ha_ndbcluster::position(const byte *record)
DBUG_PRINT("info", ("Getting hidden key"));
int hidden_no= table->s->fields;
const NdbRecAttr* rec= m_value[hidden_no].rec;
memcpy(ref, (const void*)rec->aRef(), ref_length);
#ifndef DBUG_OFF
const NDBTAB *tab= (const NDBTAB *) m_table;
const NDBCOL *hidden_col= tab->getColumn(hidden_no);
DBUG_ASSERT(hidden_col->getPrimaryKey() &&
hidden_col->getAutoIncrement() &&
rec != NULL &&
ref_length == NDB_HIDDEN_PRIMARY_KEY_LENGTH);
memcpy(ref, (const void*)rec->aRef(), ref_length);
#endif
}
DBUG_DUMP("ref", (char*)ref, ref_length);
......@@ -3336,11 +3219,13 @@ int ha_ndbcluster::start_stmt(THD *thd)
Ndb *ndb= ((Thd_ndb*)thd->transaction.thd_ndb)->ndb;
DBUG_PRINT("trans",("Starting transaction stmt"));
#if 0
NdbTransaction *tablock_trans=
(NdbTransaction*)thd->transaction.all.ndb_tid;
DBUG_PRINT("info", ("tablock_trans: %x", (uint)tablock_trans));
DBUG_ASSERT(tablock_trans);
// trans= ndb->hupp(tablock_trans);
#endif
trans= ndb->startTransaction();
if (trans == NULL)
ERR_RETURN(ndb->getNdbError());
......@@ -3675,7 +3560,6 @@ int ha_ndbcluster::create(const char *name,
NDBCOL col;
uint pack_length, length, i, pk_length= 0;
const void *data, *pack_data;
const char **key_names= form->s->keynames.type_names;
char name2[FN_HEADLEN];
bool create_from_engine= (info->table_options & HA_CREATE_FROM_ENGINE);
......@@ -3908,7 +3792,6 @@ int ha_ndbcluster::alter_table_name(const char *to)
Ndb *ndb= get_ndb();
NDBDICT *dict= ndb->getDictionary();
const NDBTAB *orig_tab= (const NDBTAB *) m_table;
int ret;
DBUG_ENTER("alter_table_name_table");
NdbDictionary::Table new_tab= *orig_tab;
......@@ -4008,7 +3891,6 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
handler(table_arg),
m_active_trans(NULL),
m_active_cursor(NULL),
m_multi_cursor(NULL),
m_table(NULL),
m_table_info(NULL),
m_table_flags(HA_REC_NOT_IN_SEQ |
......@@ -4037,7 +3919,8 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
m_force_send(TRUE),
m_autoincrement_prefetch(32),
m_transaction_on(TRUE),
m_use_local_query_cache(FALSE)
m_use_local_query_cache(FALSE),
m_multi_cursor(NULL)
{
int i;
......@@ -4512,15 +4395,21 @@ bool ndbcluster_init()
}
else if(res == 1)
{
if (g_ndb_cluster_connection->start_connect_thread()) {
if (g_ndb_cluster_connection->start_connect_thread())
{
DBUG_PRINT("error", ("g_ndb_cluster_connection->start_connect_thread()"));
goto ndbcluster_init_error;
}
#ifndef DBUG_OFF
{
char buf[1024];
DBUG_PRINT("info",("NDBCLUSTER storage engine not started, will connect using %s",
g_ndb_cluster_connection->get_connectstring(buf,sizeof(buf))));
DBUG_PRINT("info",
("NDBCLUSTER storage engine not started, "
"will connect using %s",
g_ndb_cluster_connection->
get_connectstring(buf,sizeof(buf))));
}
#endif
}
else
{
......@@ -5059,6 +4948,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
* pk-op 4 pk-op 4
* range 5
* pk-op 6 pk-ok 6
*/
/**
* Variables for loop
......@@ -5117,7 +5007,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
multi_range_curr->start_key.length))
goto sk;
goto range;
case ORDERED_INDEX:
case ORDERED_INDEX: {
range:
multi_range_curr->range_flag &= ~(uint)UNIQUE_RANGE;
if (scanOp == 0)
......@@ -5152,6 +5042,11 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
DBUG_RETURN(res);
break;
}
case(UNDEFINED_INDEX):
DBUG_ASSERT(FALSE);
DBUG_RETURN(1);
break;
}
}
if (multi_range_curr != multi_range_end)
......@@ -5238,7 +5133,7 @@ ha_ndbcluster::read_multi_range_next(KEY_MULTI_RANGE ** multi_range_found_p)
range_no= m_multi_cursor->get_range_no();
uint current_range_no= multi_range_curr - m_multi_ranges;
if (range_no == current_range_no)
if ((uint) range_no == current_range_no)
{
DBUG_MULTI_RANGE(4);
// return current row
......
......@@ -255,7 +255,7 @@ my_bool
my_net_write(NET *net,const char *packet,ulong len)
{
uchar buff[NET_HEADER_SIZE];
if (unlikely(!net->vio)) // nowhere to write
if (unlikely(!net->vio)) /* nowhere to write */
return 0;
/*
Big packets are handled by splitting them in packets of MAX_PACKET_LENGTH
......
......@@ -742,8 +742,8 @@ public:
/* Erase all statements (calls Statement destructor) */
void reset()
{
hash_reset(&names_hash);
hash_reset(&st_hash);
my_hash_reset(&names_hash);
my_hash_reset(&st_hash);
last_found_statement= 0;
}
......
......@@ -292,7 +292,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
error=0;
id=0;
thd->proc_info="update";
if (duplic != DUP_ERROR)
if (duplic != DUP_ERROR || ignore)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
/*
let's *try* to start bulk inserts. It won't necessary
......@@ -471,7 +471,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
table->next_number_field=0;
thd->count_cuted_fields= CHECK_FIELD_IGNORE;
thd->next_insert_id=0; // Reset this if wrongly used
if (duplic != DUP_ERROR)
if (duplic != DUP_ERROR || ignore)
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
/* Reset value of LAST_INSERT_ID if no rows where inserted */
......@@ -1606,7 +1606,7 @@ bool delayed_insert::handle_inserts(void)
info.ignore= row->ignore;
info.handle_duplicates= row->dup;
if (info.ignore ||
info.handle_duplicates == DUP_REPLACE)
info.handle_duplicates != DUP_ERROR)
{
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
using_ignore=1;
......@@ -1806,7 +1806,7 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
restore_record(table,s->default_values); // Get empty record
table->next_number_field=table->found_next_number_field;
thd->cuted_fields=0;
if (info.ignore || info.handle_duplicates == DUP_REPLACE)
if (info.ignore || info.handle_duplicates != DUP_ERROR)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
table->file->start_bulk_insert((ha_rows) 0);
thd->no_trans_update= 0;
......@@ -2008,7 +2008,7 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
restore_record(table,s->default_values); // Get empty record
thd->cuted_fields=0;
if (info.ignore || info.handle_duplicates == DUP_REPLACE)
if (info.ignore || info.handle_duplicates != DUP_ERROR)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
table->file->start_bulk_insert((ha_rows) 0);
thd->no_trans_update= 0;
......
......@@ -172,7 +172,7 @@ void lex_start(THD *thd, uchar *buf,uint length)
lex->proc_list.first= 0;
if (lex->spfuns.records)
hash_reset(&lex->spfuns);
my_hash_reset(&lex->spfuns);
}
void lex_end(LEX *lex)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment