Commit ddff854b authored by joreland@mysql.com's avatar joreland@mysql.com

Merge mysql.com:/home/jonas/src/kalle

into mysql.com:/home/jonas/src/new-fix
parents 1c929732 3821a8df
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
flush status;
create table t1(
id int not null primary key,
......@@ -363,3 +363,8 @@ a int NOT NULL PRIMARY KEY,
b int
) engine=ndb;
insert t9 values(1, 2), (2,3), (3, 4), (4, 5);
create table t10 (
a int not null primary key,
b blob
) engine=ndb;
insert into t10 values (1, 'kalle');
......@@ -8,3 +8,6 @@ show status like 'handler_discover%';
Variable_name Value
Handler_discover 1
drop table t9;
select * from t10;
ERROR HY000: Got error 4263 'Invalid blob attributes or invalid blob parts table' from ndbcluster
drop table t10;
drop table if exists t1;
drop table if exists t1, test1, test2;
CREATE TABLE t1 (
a int unsigned NOT NULL PRIMARY KEY,
b int unsigned not null,
......@@ -275,3 +275,38 @@ a b c
1 1 1
4 4 NULL
drop table t1;
CREATE TABLE test1 (
SubscrID int(11) NOT NULL auto_increment,
UsrID int(11) NOT NULL default '0',
PRIMARY KEY (SubscrID),
KEY idx_usrid (UsrID)
) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
INSERT INTO test1 VALUES (2,224),(3,224),(1,224);
CREATE TABLE test2 (
SbclID int(11) NOT NULL auto_increment,
SbcrID int(11) NOT NULL default '0',
PRIMARY KEY (SbclID),
KEY idx_sbcrid (SbcrID)
) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
INSERT INTO test2 VALUES (3,2),(1,1),(2,1),(4,2);
select * from test1 order by 1;
SubscrID UsrID
1 224
2 224
3 224
select * from test2 order by 1;
SbclID SbcrID
1 1
2 1
3 2
4 2
SELECT s.SubscrID,l.SbclID FROM test1 s left JOIN test2 l ON
l.SbcrID=s.SubscrID WHERE s.UsrID=224 order by 1, 2;
SubscrID SbclID
1 1
1 2
2 3
2 4
3 NULL
drop table test1;
drop table test2;
......@@ -561,3 +561,19 @@ select count(*) from t1 where x = 18446744073709551601;
count(*)
1
drop table t1;
set names latin1;
create table t1 (a char(10), b text, key (a)) character set latin1;
INSERT INTO t1 (a) VALUES
('111'),('222'),('222'),('222'),('222'),('444'),('aaa'),('AAA'),('bbb');
explain select * from t1 where a='aaa';
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref a a 11 const 2 Using where
explain select * from t1 where a=binary 'aaa';
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range a a 11 NULL 2 Using where
explain select * from t1 where a='aaa' collate latin1_bin;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range a a 11 NULL 2 Using where
explain select * from t1 where a='aaa' collate latin1_german1_ci;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL a NULL NULL NULL 9 Using where
-- source include/have_ndb.inc
--disable_warnings
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
--enable_warnings
################################################
......@@ -472,5 +472,11 @@ system rm var/master-data/test/t9.frm ;
# MySQL Server will have been restarted because it has a
# ndb_autodiscover2-master.opt file.
create table t10 (
a int not null primary key,
b blob
) engine=ndb;
insert into t10 values (1, 'kalle');
--exec $NDB_TOOLS_DIR/ndb_drop_table -d test `$NDB_TOOLS_DIR/ndb_show_tables | grep BLOB` > /dev/null 2>&1 || true
......@@ -13,4 +13,7 @@ show status like 'handler_discover%';
drop table t9;
--error 1296
select * from t10;
drop table t10;
-- source include/have_ndb.inc
--disable_warnings
drop table if exists t1;
drop table if exists t1, test1, test2;
--enable_warnings
#
......@@ -146,3 +146,29 @@ select * from t1 use index (bc) where b IS NULL and c = 2 order by a;
select * from t1 use index (bc) where b < 4 order by a;
select * from t1 use index (bc) where b IS NOT NULL order by a;
drop table t1;
#
# Bug #6435
CREATE TABLE test1 (
SubscrID int(11) NOT NULL auto_increment,
UsrID int(11) NOT NULL default '0',
PRIMARY KEY (SubscrID),
KEY idx_usrid (UsrID)
) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
INSERT INTO test1 VALUES (2,224),(3,224),(1,224);
CREATE TABLE test2 (
SbclID int(11) NOT NULL auto_increment,
SbcrID int(11) NOT NULL default '0',
PRIMARY KEY (SbclID),
KEY idx_sbcrid (SbcrID)
) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
INSERT INTO test2 VALUES (3,2),(1,1),(2,1),(4,2);
select * from test1 order by 1;
select * from test2 order by 1;
SELECT s.SubscrID,l.SbclID FROM test1 s left JOIN test2 l ON
l.SbcrID=s.SubscrID WHERE s.UsrID=224 order by 1, 2;
drop table test1;
drop table test2;
......@@ -431,3 +431,17 @@ select count(*) from t1 where x = 18446744073709551601;
drop table t1;
#
# Bug #6045: Binary Comparison regression in MySQL 4.1
# Binary searches didn't use a case insensitive index.
#
set names latin1;
create table t1 (a char(10), b text, key (a)) character set latin1;
INSERT INTO t1 (a) VALUES
('111'),('222'),('222'),('222'),('222'),('444'),('aaa'),('AAA'),('bbb');
# all these three can be optimized
explain select * from t1 where a='aaa';
explain select * from t1 where a=binary 'aaa';
explain select * from t1 where a='aaa' collate latin1_bin;
# this one cannot:
explain select * from t1 where a='aaa' collate latin1_german1_ci;
......@@ -369,7 +369,7 @@ public:
*/
bool getDistributionKey() const;
/** @} *******************************************************************/
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
void setTupleKey(bool);
bool getTupleKey() const;
......@@ -486,6 +486,18 @@ public:
*/
const Column* getColumn(const char * name) const;
/**
* Get column definition via index in table.
* @return null if none existing name
*/
Column* getColumn(const int attributeId);
/**
* Get column definition via name.
* @return null if none existing name
*/
Column* getColumn(const char * name);
/**
* Get column definition via index in table.
* @return null if none existing name
......
......@@ -343,6 +343,18 @@ NdbDictionary::Table::getColumn(const int attrId) const {
return m_impl.getColumn(attrId);
}
NdbDictionary::Column*
NdbDictionary::Table::getColumn(const char * name)
{
return m_impl.getColumn(name);
}
NdbDictionary::Column*
NdbDictionary::Table::getColumn(const int attrId)
{
return m_impl.getColumn(attrId);
}
void
NdbDictionary::Table::setLogging(bool val){
m_impl.m_logging = val;
......
......@@ -637,11 +637,9 @@ NdbDictionaryImpl::get_local_table_info(const char * internalTableName,
return 0;
}
}
if (do_add_blob_tables &&
info->m_table_impl->m_noOfBlobs &&
addBlobTables(*(info->m_table_impl))) {
return 0;
}
if (do_add_blob_tables && info->m_table_impl->m_noOfBlobs)
addBlobTables(*(info->m_table_impl));
return info; // autoincrement already initialized
}
......
......@@ -850,6 +850,14 @@ NdbScanOperation::doSendScan(int aProcessorId)
tSignal = tSignal->next();
}
theStatus = WaitResponse;
m_sent_receivers_count = theParallelism;
if(m_ordered)
{
m_current_api_receiver = theParallelism;
m_api_receivers_count = theParallelism;
}
return tSignalCount;
}//NdbOperation::doSendScan()
......@@ -1507,13 +1515,8 @@ NdbScanOperation::reset_receivers(Uint32 parallell, Uint32 ordered){
m_api_receivers_count = 0;
m_current_api_receiver = 0;
m_sent_receivers_count = parallell;
m_sent_receivers_count = 0;
m_conf_receivers_count = 0;
if(ordered){
m_current_api_receiver = parallell;
m_api_receivers_count = parallell;
}
}
int
......
......@@ -1290,7 +1290,6 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op,
Field *field= key_part->field;
uint part_len= key_part->length;
uint part_store_len= key_part->store_length;
bool part_nullable= (bool) key_part->null_bit;
// Info about each key part
struct part_st {
bool part_last;
......@@ -1312,9 +1311,9 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op,
p.part_last= (tot_len + part_store_len >= key_tot_len[j]);
p.key= keys[j];
p.part_ptr= &p.key->key[tot_len];
p.part_null= (field->maybe_null() && *p.part_ptr);
p.part_null= key_part->null_bit && *p.part_ptr;
p.bound_ptr= (const char *)
p.part_null ? 0 : part_nullable ? p.part_ptr + 1 : p.part_ptr;
p.part_null ? 0 : key_part->null_bit ? p.part_ptr + 1 : p.part_ptr;
if (j == 0)
{
......@@ -3307,7 +3306,7 @@ int ha_ndbcluster::create(const char *name,
{
NDBTAB tab;
NDBCOL col;
uint pack_length, length, i;
uint pack_length, length, i, pk_length= 0;
const void *data, *pack_data;
const char **key_names= form->keynames.type_names;
char name2[FN_HEADLEN];
......@@ -3354,6 +3353,8 @@ int ha_ndbcluster::create(const char *name,
if ((my_errno= create_ndb_column(col, field, info)))
DBUG_RETURN(my_errno);
tab.addColumn(col);
if(col.getPrimaryKey())
pk_length += (field->pack_length() + 3) / 4;
}
// No primary key, create shadow key as 64 bit, auto increment
......@@ -3367,6 +3368,39 @@ int ha_ndbcluster::create(const char *name,
col.setPrimaryKey(TRUE);
col.setAutoIncrement(TRUE);
tab.addColumn(col);
pk_length += 2;
}
// Make sure that blob tables don't have to big part size
for (i= 0; i < form->fields; i++)
{
/**
* The extra +7 concists
* 2 - words from pk in blob table
* 5 - from extra words added by tup/dict??
*/
switch (form->field[i]->real_type()) {
case MYSQL_TYPE_BLOB:
case MYSQL_TYPE_MEDIUM_BLOB:
case MYSQL_TYPE_LONG_BLOB:
{
NdbDictionary::Column * col = tab.getColumn(i);
int size = pk_length + (col->getPartSize()+3)/4 + 7;
if(size > NDB_MAX_TUPLE_SIZE_IN_WORDS &&
(pk_length+7) < NDB_MAX_TUPLE_SIZE_IN_WORDS)
{
size = NDB_MAX_TUPLE_SIZE_IN_WORDS - pk_length - 7;
col->setPartSize(4*size);
}
/**
* If size > NDB_MAX and pk_length+7 >= NDB_MAX
* then the table can't be created anyway, so skip
* changing part size, and have error later
*/
}
default:
break;
}
}
if ((my_errno= check_ndb_connection()))
......
......@@ -1013,13 +1013,22 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part,
}
/*
We can't use an index when comparing strings of
different collations
1. Usually we can't use an index if the column collation
differ from the operation collation.
2. However, we can reuse a case insensitive index for
the binary searches:
WHERE latin1_swedish_ci_column = 'a' COLLATE lati1_bin;
WHERE latin1_swedish_ci_colimn = BINARY 'a '
*/
if (field->result_type() == STRING_RESULT &&
value->result_type() == STRING_RESULT &&
key_part->image_type == Field::itRAW &&
((Field_str*)field)->charset() != conf_func->compare_collation())
((Field_str*)field)->charset() != conf_func->compare_collation() &&
!(conf_func->compare_collation()->state & MY_CS_BINSORT))
DBUG_RETURN(0);
if (type == Item_func::LIKE_FUNC)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment