Commit 24c4877e authored by Michael Widenius's avatar Michael Widenius

tmp commit to try to find crash in windows builds

This commit adds support for more than one key in temporary tables
parent 07232ac0
......@@ -104,7 +104,8 @@ enum ha_key_alg {
HA_KEY_ALG_RTREE= 2, /* R-tree, for spatial searches */
HA_KEY_ALG_HASH= 3, /* HASH keys (HEAP tables) */
HA_KEY_ALG_FULLTEXT= 4, /* FULLTEXT (MyISAM tables) */
HA_KEY_ALG_LONG_HASH= 5 /* long BLOB keys */
HA_KEY_ALG_LONG_HASH= 5, /* long BLOB keys */
HA_KEY_ALG_UNIQUE_HASH= 6 /* Internal UNIQUE hash (Aria) */
};
/* Storage media types */
......@@ -276,11 +277,13 @@ enum ha_base_keytype {
#define HA_SPATIAL 1024U /* For spatial search */
#define HA_NULL_ARE_EQUAL 2048U /* NULL in key are cmp as equal */
#define HA_GENERATED_KEY 8192U /* Automatically generated key */
#define HA_UNIQUE_HASH (1U << 14) /* Part of unique hash key */
/* The combination of the above can be used for key type comparison. */
#define HA_KEYFLAG_MASK (HA_NOSAME | HA_AUTO_KEY | \
HA_FULLTEXT | HA_UNIQUE_CHECK | \
HA_SPATIAL | HA_NULL_ARE_EQUAL | HA_GENERATED_KEY)
HA_SPATIAL | HA_NULL_ARE_EQUAL | HA_GENERATED_KEY | \
HA_UNIQUE_HASH)
/*
Key contains partial segments.
......
......@@ -4899,8 +4899,11 @@ SJ_TMP_TABLE::create_sj_weedout_tmp_table(THD *thd)
share->uniques= MY_TEST(using_unique_constraint);
table->key_info= share->key_info= keyinfo;
keyinfo->key_part=key_part_info;
keyinfo->flags=HA_NOSAME;
keyinfo->flags= HA_NOSAME | (using_unique_constraint ? HA_UNIQUE_HASH : 0);
keyinfo->ext_key_flags= keyinfo->flags;
keyinfo->usable_key_parts= keyinfo->user_defined_key_parts= 1;
keyinfo->ext_key_parts= 1;
share->key_parts= 1;
keyinfo->key_length=0;
keyinfo->rec_per_key=0;
keyinfo->algorithm= HA_KEY_ALG_UNDEF;
......
......@@ -8928,7 +8928,7 @@ best_access_path(JOIN *join,
best.use_join_buffer= TRUE;
best.filter= 0;
best.type= JT_HASH;
best.refills= refills;
best.refills= (ulonglong) ceil(refills);
Json_writer_object trace_access_hash(thd);
if (unlikely(trace_access_hash.trace_started()))
trace_access_hash.
......@@ -21183,6 +21183,8 @@ bool Create_tmp_table::finalize(THD *thd,
share->keys_in_use.set_bit(0);
keyinfo->key_part= m_key_part_info;
keyinfo->flags=HA_NOSAME | HA_BINARY_PACK_KEY | HA_PACK_KEY;
if (share->uniques)
keyinfo->flags|= HA_UNIQUE_HASH;
keyinfo->ext_key_flags= keyinfo->flags;
keyinfo->usable_key_parts=keyinfo->user_defined_key_parts=
param->group_parts;
......@@ -21281,6 +21283,7 @@ bool Create_tmp_table::finalize(THD *thd,
*/
DBUG_PRINT("info",("hidden_field_count: %d", param->hidden_field_count));
keyinfo->flags= 0;
if (m_blobs_count[distinct])
{
/*
......@@ -21289,6 +21292,7 @@ bool Create_tmp_table::finalize(THD *thd,
used for lookups.
*/
share->uniques= 1;
keyinfo->flags|= HA_UNIQUE_HASH;
}
keyinfo->user_defined_key_parts= m_field_count[distinct] +
(share->uniques ? MY_TEST(null_pack_length[distinct]) : 0);
......@@ -21306,7 +21310,8 @@ bool Create_tmp_table::finalize(THD *thd,
share->keys_in_use.set_bit(0);
table->key_info= table->s->key_info= keyinfo;
keyinfo->key_part= m_key_part_info;
keyinfo->flags=HA_NOSAME | HA_NULL_ARE_EQUAL | HA_BINARY_PACK_KEY | HA_PACK_KEY;
keyinfo->flags|= (HA_NOSAME | HA_NULL_ARE_EQUAL | HA_BINARY_PACK_KEY |
HA_PACK_KEY);
keyinfo->ext_key_flags= keyinfo->flags;
keyinfo->key_length= 0; // Will compute the sum of the parts below.
keyinfo->name= distinct_key;
......@@ -21753,13 +21758,13 @@ bool open_tmp_table(TABLE *table)
*/
bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
bool create_internal_tmp_table(TABLE *table, KEY *org_keyinfo,
TMP_ENGINE_COLUMNDEF *start_recinfo,
TMP_ENGINE_COLUMNDEF **recinfo,
ulonglong options)
{
int error;
MARIA_KEYDEF keydef;
MARIA_KEYDEF *keydefs= 0, *keydef;
MARIA_UNIQUEDEF uniquedef;
TABLE_SHARE *share= table->s;
MARIA_CREATE_INFO create_info;
......@@ -21767,13 +21772,25 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
if (share->keys)
{ // Get keys for ni_create
bool using_unique_constraint=0;
HA_KEYSEG *seg= (HA_KEYSEG*) alloc_root(&table->mem_root,
sizeof(*seg) * keyinfo->user_defined_key_parts);
if (!seg)
HA_KEYSEG *seg;
DBUG_ASSERT(share->key_parts);
if (!(multi_alloc_root(&table->mem_root,
&seg, sizeof(*seg) * share->key_parts,
&keydefs, sizeof(*keydefs) * share->keys,
NullS)))
goto err;
keydef= keydefs;
bzero(seg, sizeof(*seg) * keyinfo->user_defined_key_parts);
DBUG_ASSERT(share->keys == 1);
share->uniques= 0; // Will be adjusted below
bzero(seg, sizeof(*seg) * share->key_parts);
/* Note that share->keys may change in the loop ! */
for (KEY *keyinfo= org_keyinfo, *end_keyinfo= keyinfo + share->keys;
keyinfo < end_keyinfo ;
keyinfo++)
{
/*
Note that a similar check is performed during
subquery_types_allow_materialization. See MDEV-7122 for more details as
......@@ -21782,24 +21799,25 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
*/
if (keyinfo->key_length > table->file->max_key_length() ||
keyinfo->user_defined_key_parts > table->file->max_key_parts() ||
share->uniques)
(keyinfo->flags & HA_UNIQUE_HASH))
{
if (!share->uniques && !(keyinfo->flags & HA_NOSAME))
if (!(keyinfo->flags & (HA_NOSAME | HA_UNIQUE_HASH)))
{
my_error(ER_INTERNAL_ERROR, MYF(0),
"Using too big key for internal temp tables");
DBUG_RETURN(1);
}
/* Can't create a key; Make a unique constraint instead of a key */
share->keys= 0;
share->key_parts= share->ext_key_parts= 0;
share->keys--;
share->key_parts-= keyinfo->user_defined_key_parts;
share->ext_key_parts-= keyinfo->ext_key_parts;
share->uniques= 1;
using_unique_constraint=1;
bzero((char*) &uniquedef,sizeof(uniquedef));
uniquedef.keysegs=keyinfo->user_defined_key_parts;
uniquedef.keysegs= keyinfo->user_defined_key_parts;
uniquedef.seg=seg;
uniquedef.null_are_equal=1;
keyinfo->flags|= HA_UNIQUE_HASH;
keyinfo->algorithm= HA_KEY_ALG_UNIQUE_HASH;
/* Create extra column for hash value */
bzero((uchar*) *recinfo,sizeof(**recinfo));
......@@ -21809,16 +21827,23 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
/* Avoid warnings from valgrind */
bzero(table->record[0]+ share->reclength, MARIA_UNIQUE_HASH_LENGTH);
bzero(share->default_values+ share->reclength, MARIA_UNIQUE_HASH_LENGTH);
bzero(share->default_values+ share->reclength,
MARIA_UNIQUE_HASH_LENGTH);
share->reclength+= MARIA_UNIQUE_HASH_LENGTH;
}
else
{
/* Create a key */
bzero((char*) &keydef,sizeof(keydef));
keydef.flag= keyinfo->flags & HA_NOSAME;
keydef.keysegs= keyinfo->user_defined_key_parts;
keydef.seg= seg;
bzero((char*) keydef,sizeof(*keydef));
/*
We are using a GROUP BY on something that contains NULL
In this case we have to tell Aria that two NULL should
on INSERT be regarded at the same value.
*/
keydef->flag= (keyinfo->flags & HA_NOSAME) | HA_NULL_ARE_EQUAL;
keydef->keysegs= keyinfo->user_defined_key_parts;
keydef->seg= seg;
keydef++;
}
for (uint i=0; i < keyinfo->user_defined_key_parts ; i++,seg++)
{
......@@ -21849,18 +21874,11 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
{
seg->null_bit= field->null_bit;
seg->null_pos= (uint) (field->null_ptr - (uchar*) table->record[0]);
/*
We are using a GROUP BY on something that contains NULL
In this case we have to tell Aria that two NULL should
on INSERT be regarded at the same value
*/
if (!using_unique_constraint)
keydef.flag|= HA_NULL_ARE_EQUAL;
}
}
if (share->keys)
keyinfo->index_flags= table->file->index_flags(0, 0, 1);
}
}
bzero((char*) &create_info,sizeof(create_info));
create_info.data_file_length= table->in_use->variables.tmp_disk_table_size;
......@@ -21905,7 +21923,7 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
}
if (unlikely((error= maria_create(share->path.str, file_type, share->keys,
&keydef, (uint) (*recinfo-start_recinfo),
keydefs, (uint) (*recinfo-start_recinfo),
start_recinfo, share->uniques, &uniquedef,
&create_info, create_flags))))
{
......@@ -21958,7 +21976,7 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
/* Create internal MyISAM temporary table */
bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
bool create_internal_tmp_table(TABLE *table, KEY *org_keyinfo,
TMP_ENGINE_COLUMNDEF *start_recinfo,
TMP_ENGINE_COLUMNDEF **recinfo,
ulonglong options)
......@@ -21973,11 +21991,12 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
{ // Get keys for ni_create
bool using_unique_constraint=0;
HA_KEYSEG *seg= (HA_KEYSEG*) alloc_root(&table->mem_root,
sizeof(*seg) * keyinfo->user_defined_key_parts);
sizeof(*seg) *
share->user_defined_key_parts);
if (!seg)
goto err;
bzero(seg, sizeof(*seg) * keyinfo->user_defined_key_parts);
bzero(seg, sizeof(*seg) * share->user_defined_key_parts);
/*
Note that a similar check is performed during
subquery_types_allow_materialization. See MDEV-7122 for more details as
......@@ -22623,6 +22642,8 @@ bool instantiate_tmp_table(TABLE *table, KEY *keyinfo,
TMP_ENGINE_COLUMNDEF **recinfo,
ulonglong options)
{
DBUG_ASSERT(table->s->keys == 0 || table->key_info == keyinfo);
DBUG_ASSERT(table->s->keys <= 1);
if (table->s->db_type() == TMP_ENGINE_HTON)
{
/*
......@@ -8473,9 +8473,14 @@ bool TABLE::add_tmp_key(uint key, uint key_parts,
void TABLE::use_index(int key_to_save, key_map *map_to_update)
{
#ifndef NEW
DBUG_ASSERT(!created && key_to_save < (int)s->keys);
uint saved_keys= 0, key_parts= 0;
key_map new_bitmap;
DBUG_ENTER("TABLE::use_index");
DBUG_PRINT("enter", ("key_to_save: %d keys: %u uniques: %u",
key_to_save, s->keys, s->uniques));
new_bitmap.clear_all();
/*
......@@ -8485,11 +8490,14 @@ void TABLE::use_index(int key_to_save, key_map *map_to_update)
{
new_bitmap.set_bit(saved_keys);
if (key_to_save != 0) // Avoid not needed copy
{
KEY tmp_buff= key_info[saved_keys];
key_info[saved_keys]= key_info[key_to_save];
key_info[key_to_save]= tmp_buff;
saved_keys++;
}
key_parts= key_info[saved_keys].user_defined_key_parts;
saved_keys++;
}
/*
......@@ -8497,7 +8505,7 @@ void TABLE::use_index(int key_to_save, key_map *map_to_update)
*/
for (uint i= saved_keys; i < s->keys; i++)
{
if (key_info[i].flags & HA_NOSAME)
if (key_info[i].flags & (HA_NOSAME | HA_UNIQUE_HASH))
{
if (map_to_update->is_set(i))
new_bitmap.set_bit(saved_keys);
......@@ -8510,6 +8518,26 @@ void TABLE::use_index(int key_to_save, key_map *map_to_update)
*map_to_update= new_bitmap;
s->keys= saved_keys;
s->key_parts= s->ext_key_parts= key_parts;
#else
uint i= 1;
DBUG_ASSERT(!created && key_to_save < (int)s->keys);
if (key_to_save >= 0)
{
/* Save the given key. */
memmove(key_info, key_info + key_to_save, sizeof(KEY));
map_to_update->clear_all();
map_to_update->set_bit(0);
}
else
{
/* Drop all keys; */
i= 0;
map_to_update->clear_all();
}
s->keys= i;
s->uniques= 0;
#endif
DBUG_VOID_RETURN;
}
/*
......
......@@ -1082,7 +1082,8 @@ const char *ha_maria::index_type(uint key_number)
ulong ha_maria::index_flags(uint inx, uint part, bool all_parts) const
{
ulong flags;
if (table_share->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT)
if (table_share->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT ||
table_share->key_info[inx].algorithm == HA_KEY_ALG_UNIQUE_HASH)
flags= 0;
else
if ((table_share->key_info[inx].flags & HA_SPATIAL ||
......@@ -2733,12 +2734,15 @@ int ha_maria::info(uint flag)
key < key_end ; key++)
{
ulong *to= key->rec_per_key;
if (to)
{
for (ulong *end= to+ key->user_defined_key_parts ;
to < end ;
to++, from++)
*to= (ulong) (*from + 0.5);
}
}
}
/*
Set data_file_name and index_file_name to point at the symlink value
if table is symlinked (Ie; Real name is not same as generated name)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment