Commit 2239766d authored by Mattias Jonsson's avatar Mattias Jonsson

merge

parents 19301684 f7b98c25
......@@ -43,6 +43,10 @@
*.vcxproj
*.vcxproj.filters
*/*.dir/*
*.dir
Debug
MySql.sdf
Win32
*/*_pure_*warnings
*/.deps
*/.libs/*
......@@ -615,6 +619,7 @@ include/mysql_h.ic
include/mysql_version.h
include/mysqld_ername.h
include/mysqld_error.h
include/mysqld_error.h.rule
include/openssl
include/probes_mysql_dtrace.h
include/readline
......@@ -1897,7 +1902,9 @@ scripts/mysql_find_rows
scripts/mysql_fix_extensions
scripts/mysql_fix_privilege_tables
scripts/mysql_fix_privilege_tables.sql
scripts/mysql_fix_privilege_tables.sql.rule
scripts/mysql_fix_privilege_tables_sql.c
scripts/mysql_fix_privilege_tables_sql.c.rule
scripts/mysql_install_db
scripts/mysql_secure_installation
scripts/mysql_setpermission
......@@ -2137,6 +2144,7 @@ sql/handlerton.cc
sql/html
sql/latex
sql/lex_hash.h
sql/lex_hash.h.rule
sql/link_sources
sql/max/*
sql/message.h
......@@ -2168,6 +2176,7 @@ sql/sql_builtin.cc
sql/sql_select.cc.orig
sql/sql_yacc.cc
sql/sql_yacc.h
sql/sql_yacc.h.rule
sql/sql_yacc.output
sql/sql_yacc.yy.orig
sql/test_time
......
......@@ -162,8 +162,7 @@ const uint ha_partition::NO_CURRENT_PART_ID= 0xFFFFFFFF;
*/
ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share)
:handler(hton, share), m_part_info(NULL), m_create_handler(FALSE),
m_is_sub_partitioned(0)
:handler(hton, share)
{
DBUG_ENTER("ha_partition::ha_partition(table)");
init_handler_variables();
......@@ -183,15 +182,44 @@ ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share)
*/
ha_partition::ha_partition(handlerton *hton, partition_info *part_info)
:handler(hton, NULL), m_part_info(part_info), m_create_handler(TRUE),
m_is_sub_partitioned(m_part_info->is_sub_partitioned())
:handler(hton, NULL)
{
DBUG_ENTER("ha_partition::ha_partition(part_info)");
DBUG_ASSERT(part_info);
init_handler_variables();
DBUG_ASSERT(m_part_info);
m_part_info= part_info;
m_create_handler= TRUE;
m_is_sub_partitioned= m_part_info->is_sub_partitioned();
DBUG_VOID_RETURN;
}
/**
ha_partition constructor method used by ha_partition::clone()
@param hton Handlerton (partition_hton)
@param share Table share object
@param part_info_arg partition_info to use
@param clone_arg ha_partition to clone
@param clme_mem_root_arg MEM_ROOT to use
@return New partition handler
*/
ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share,
partition_info *part_info_arg,
ha_partition *clone_arg,
MEM_ROOT *clone_mem_root_arg)
:handler(hton, share)
{
DBUG_ENTER("ha_partition::ha_partition(clone)");
init_handler_variables();
m_part_info= part_info_arg;
m_create_handler= TRUE;
m_is_sub_partitioned= m_part_info->is_sub_partitioned();
m_is_clone_of= clone_arg;
m_clone_mem_root= clone_mem_root_arg;
DBUG_VOID_RETURN;
}
/*
Initialize handler object
......@@ -243,7 +271,6 @@ void ha_partition::init_handler_variables()
m_rec0= 0;
m_curr_key_info[0]= NULL;
m_curr_key_info[1]= NULL;
is_clone= FALSE,
m_part_func_monotonicity_info= NON_MONOTONIC;
auto_increment_lock= FALSE;
auto_increment_safe_stmt_log_lock= FALSE;
......@@ -251,6 +278,11 @@ void ha_partition::init_handler_variables()
this allows blackhole to work properly
*/
m_num_locks= 0;
m_part_info= NULL;
m_create_handler= FALSE;
m_is_sub_partitioned= 0;
m_is_clone_of= NULL;
m_clone_mem_root= NULL;
#ifdef DONT_HAVE_TO_BE_INITALIZED
m_start_key.flag= 0;
......@@ -358,7 +390,8 @@ bool ha_partition::initialize_partition(MEM_ROOT *mem_root)
*/
DBUG_RETURN(0);
}
else if (get_from_handler_file(table_share->normalized_path.str, mem_root))
else if (get_from_handler_file(table_share->normalized_path.str,
mem_root, false))
{
my_error(ER_FAILED_READ_FROM_PAR_FILE, MYF(0));
DBUG_RETURN(1);
......@@ -1890,7 +1923,7 @@ uint ha_partition::del_ren_cre_table(const char *from,
DBUG_RETURN(TRUE);
}
if (get_from_handler_file(from, ha_thd()->mem_root))
if (get_from_handler_file(from, ha_thd()->mem_root, false))
DBUG_RETURN(TRUE);
DBUG_ASSERT(m_file_buffer);
DBUG_PRINT("enter", ("from: (%s) to: (%s)", from, to ? to : "(nil)"));
......@@ -2105,18 +2138,16 @@ static uint name_add(char *dest, const char *first_name, const char *sec_name)
}
/*
/**
Create the special .par file
SYNOPSIS
create_handler_file()
name Full path of table name
@param name Full path of table name
RETURN VALUE
>0 Error code
0 Success
@return Operation status
@retval FALSE Error code
@retval TRUE Success
DESCRIPTION
@note
Method used to create handler file with names of partitions, their
engine types and the number of partitions.
*/
......@@ -2180,19 +2211,22 @@ bool ha_partition::create_handler_file(const char *name)
Array of engine types n * 4 bytes where
n = (m_tot_parts + 3)/4
Length of name part in bytes 4 bytes
(Names in filename format)
Name part m * 4 bytes where
m = ((length_name_part + 3)/4)*4
All padding bytes are zeroed
*/
tot_partition_words= (tot_parts + 3) / 4;
tot_name_words= (tot_name_len + 3) / 4;
tot_partition_words= (tot_parts + PAR_WORD_SIZE - 1) / PAR_WORD_SIZE;
tot_name_words= (tot_name_len + PAR_WORD_SIZE - 1) / PAR_WORD_SIZE;
/* 4 static words (tot words, checksum, tot partitions, name length) */
tot_len_words= 4 + tot_partition_words + tot_name_words;
tot_len_byte= 4 * tot_len_words;
tot_len_byte= PAR_WORD_SIZE * tot_len_words;
if (!(file_buffer= (uchar *) my_malloc(tot_len_byte, MYF(MY_ZEROFILL))))
DBUG_RETURN(TRUE);
engine_array= (file_buffer + 12);
name_buffer_ptr= (char*) (file_buffer + ((4 + tot_partition_words) * 4));
engine_array= (file_buffer + PAR_ENGINES_OFFSET);
name_buffer_ptr= (char*) (engine_array + tot_partition_words * PAR_WORD_SIZE
+ PAR_WORD_SIZE);
part_it.rewind();
for (i= 0; i < num_parts; i++)
{
......@@ -2230,13 +2264,15 @@ bool ha_partition::create_handler_file(const char *name)
}
chksum= 0;
int4store(file_buffer, tot_len_words);
int4store(file_buffer + 8, tot_parts);
int4store(file_buffer + 12 + (tot_partition_words * 4), tot_name_len);
int4store(file_buffer + PAR_NUM_PARTS_OFFSET, tot_parts);
int4store(file_buffer + PAR_ENGINES_OFFSET +
(tot_partition_words * PAR_WORD_SIZE),
tot_name_len);
for (i= 0; i < tot_len_words; i++)
chksum^= uint4korr(file_buffer + 4 * i);
int4store(file_buffer + 4, chksum);
chksum^= uint4korr(file_buffer + PAR_WORD_SIZE * i);
int4store(file_buffer + PAR_CHECKSUM_OFFSET, chksum);
/*
Remove .frm extension and replace with .par
Add .par extension to the file name.
Create and write and close file
to be used at open, delete_table and rename_table
*/
......@@ -2255,14 +2291,9 @@ bool ha_partition::create_handler_file(const char *name)
DBUG_RETURN(result);
}
/*
Clear handler variables and free some memory
SYNOPSIS
clear_handler_file()
RETURN VALUE
NONE
/**
Clear handler variables and free some memory
*/
void ha_partition::clear_handler_file()
......@@ -2275,16 +2306,15 @@ void ha_partition::clear_handler_file()
m_engine_array= NULL;
}
/*
/**
Create underlying handler objects
SYNOPSIS
create_handlers()
mem_root Allocate memory through this
@param mem_root Allocate memory through this
RETURN VALUE
TRUE Error
FALSE Success
@return Operation status
@retval TRUE Error
@retval FALSE Success
*/
bool ha_partition::create_handlers(MEM_ROOT *mem_root)
......@@ -2322,6 +2352,7 @@ bool ha_partition::create_handlers(MEM_ROOT *mem_root)
DBUG_RETURN(FALSE);
}
/*
Create underlying handler objects from partition info
......@@ -2393,101 +2424,165 @@ error_end:
}
/*
Get info about partition engines and their names from the .par file
/**
Read the .par file to get the partitions engines and names
SYNOPSIS
get_from_handler_file()
name Full path of table name
mem_root Allocate memory through this
@param name Name of table file (without extention)
RETURN VALUE
TRUE Error
FALSE Success
@return Operation status
@retval true Failure
@retval false Success
DESCRIPTION
Open handler file to get partition names, engine types and number of
partitions.
@note On success, m_file_buffer is allocated and must be
freed by the caller. m_name_buffer_ptr and m_tot_parts is also set.
*/
bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root)
bool ha_partition::read_par_file(const char *name)
{
char buff[FN_REFLEN], *address_tot_name_len;
char buff[FN_REFLEN], *tot_name_len_offset;
File file;
char *file_buffer, *name_buffer_ptr;
handlerton **engine_array;
char *file_buffer;
uint i, len_bytes, len_words, tot_partition_words, tot_name_words, chksum;
DBUG_ENTER("ha_partition::get_from_handler_file");
DBUG_ENTER("ha_partition::read_par_file");
DBUG_PRINT("enter", ("table name: '%s'", name));
if (m_file_buffer)
DBUG_RETURN(FALSE);
DBUG_RETURN(false);
fn_format(buff, name, "", ha_par_ext, MY_APPEND_EXT);
/* Following could be done with mysql_file_stat to read in whole file */
if ((file= mysql_file_open(key_file_partition,
buff, O_RDONLY | O_SHARE, MYF(0))) < 0)
DBUG_RETURN(TRUE);
if (mysql_file_read(file, (uchar *) &buff[0], 8, MYF(MY_NABP)))
if (mysql_file_read(file, (uchar *) &buff[0], PAR_WORD_SIZE, MYF(MY_NABP)))
goto err1;
len_words= uint4korr(buff);
len_bytes= 4 * len_words;
len_bytes= PAR_WORD_SIZE * len_words;
if (mysql_file_seek(file, 0, MY_SEEK_SET, MYF(0)) == MY_FILEPOS_ERROR)
goto err1;
if (!(file_buffer= (char*) my_malloc(len_bytes, MYF(0))))
goto err1;
mysql_file_seek(file, 0, MY_SEEK_SET, MYF(0));
if (mysql_file_read(file, (uchar *) file_buffer, len_bytes, MYF(MY_NABP)))
goto err2;
chksum= 0;
for (i= 0; i < len_words; i++)
chksum ^= uint4korr((file_buffer) + 4 * i);
chksum ^= uint4korr((file_buffer) + PAR_WORD_SIZE * i);
if (chksum)
goto err2;
m_tot_parts= uint4korr((file_buffer) + 8);
m_tot_parts= uint4korr((file_buffer) + PAR_NUM_PARTS_OFFSET);
DBUG_PRINT("info", ("No of parts = %u", m_tot_parts));
tot_partition_words= (m_tot_parts + 3) / 4;
tot_partition_words= (m_tot_parts + PAR_WORD_SIZE - 1) / PAR_WORD_SIZE;
tot_name_len_offset= file_buffer + PAR_ENGINES_OFFSET +
PAR_WORD_SIZE * tot_partition_words;
tot_name_words= (uint4korr(tot_name_len_offset) + PAR_WORD_SIZE - 1) /
PAR_WORD_SIZE;
/*
Verify the total length = tot size word, checksum word, num parts word +
engines array + name length word + name array.
*/
if (len_words != (tot_partition_words + tot_name_words + 4))
goto err2;
(void) mysql_file_close(file, MYF(0));
m_file_buffer= file_buffer; // Will be freed in clear_handler_file()
m_name_buffer_ptr= tot_name_len_offset + PAR_WORD_SIZE;
DBUG_RETURN(false);
err2:
my_free(file_buffer);
err1:
(void) mysql_file_close(file, MYF(0));
DBUG_RETURN(true);
}
/**
Setup m_engine_array
@param mem_root MEM_ROOT to use for allocating new handlers
@return Operation status
@retval false Success
@retval true Failure
*/
bool ha_partition::setup_engine_array(MEM_ROOT *mem_root)
{
uint i;
uchar *buff;
handlerton **engine_array;
DBUG_ASSERT(!m_file);
DBUG_ENTER("ha_partition::setup_engine_array");
engine_array= (handlerton **) my_alloca(m_tot_parts * sizeof(handlerton*));
if (!engine_array)
DBUG_RETURN(true);
buff= (uchar *) (m_file_buffer + PAR_ENGINES_OFFSET);
for (i= 0; i < m_tot_parts; i++)
{
engine_array[i]= ha_resolve_by_legacy_type(ha_thd(),
(enum legacy_db_type)
*(uchar *) ((file_buffer) +
12 + i));
*(buff + i));
if (!engine_array[i])
goto err3;
goto err;
}
address_tot_name_len= file_buffer + 12 + 4 * tot_partition_words;
tot_name_words= (uint4korr(address_tot_name_len) + 3) / 4;
if (len_words != (tot_partition_words + tot_name_words + 4))
goto err3;
name_buffer_ptr= file_buffer + 16 + 4 * tot_partition_words;
(void) mysql_file_close(file, MYF(0));
m_file_buffer= file_buffer; // Will be freed in clear_handler_file()
m_name_buffer_ptr= name_buffer_ptr;
if (!(m_engine_array= (plugin_ref*)
my_malloc(m_tot_parts * sizeof(plugin_ref), MYF(MY_WME))))
goto err3;
goto err;
for (i= 0; i < m_tot_parts; i++)
m_engine_array[i]= ha_lock_engine(NULL, engine_array[i]);
my_afree((gptr) engine_array);
if (!m_file && create_handlers(mem_root))
if (create_handlers(mem_root))
{
clear_handler_file();
DBUG_RETURN(TRUE);
DBUG_RETURN(true);
}
DBUG_RETURN(FALSE);
err3:
DBUG_RETURN(false);
err:
my_afree((gptr) engine_array);
err2:
my_free(file_buffer);
err1:
(void) mysql_file_close(file, MYF(0));
DBUG_RETURN(TRUE);
DBUG_RETURN(true);
}
/**
Get info about partition engines and their names from the .par file
@param name Full path of table name
@param mem_root Allocate memory through this
@param is_clone If it is a clone, don't create new handlers
@return Operation status
@retval true Error
@retval false Success
@note Open handler file to get partition names, engine types and number of
partitions.
*/
bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root,
bool is_clone)
{
DBUG_ENTER("ha_partition::get_from_handler_file");
DBUG_PRINT("enter", ("table name: '%s'", name));
if (m_file_buffer)
DBUG_RETURN(false);
if (read_par_file(name))
DBUG_RETURN(true);
if (!is_clone && setup_engine_array(mem_root))
DBUG_RETURN(true);
DBUG_RETURN(false);
}
......@@ -2533,13 +2628,13 @@ void ha_data_partition_destroy(HA_DATA_PARTITION* ha_part_data)
int ha_partition::open(const char *name, int mode, uint test_if_locked)
{
char *name_buffer_ptr= m_name_buffer_ptr;
char *name_buffer_ptr;
int error;
uint alloc_len;
handler **file;
char name_buff[FN_REFLEN];
bool is_not_tmp_table= (table_share->tmp_table == NO_TMP_TABLE);
ulonglong check_table_flags= 0;
ulonglong check_table_flags;
DBUG_ENTER("ha_partition::open");
DBUG_ASSERT(table->s == table_share);
......@@ -2547,8 +2642,9 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
m_mode= mode;
m_open_test_lock= test_if_locked;
m_part_field_array= m_part_info->full_part_field_array;
if (get_from_handler_file(name, &table->mem_root))
if (get_from_handler_file(name, &table->mem_root, test(m_is_clone_of)))
DBUG_RETURN(1);
name_buffer_ptr= m_name_buffer_ptr;
m_start_key.length= 0;
m_rec0= table->record[0];
m_rec_length= table_share->reclength;
......@@ -2584,8 +2680,9 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
DBUG_RETURN(1);
bitmap_clear_all(&m_bulk_insert_started);
/* Initialize the bitmap we use to determine what partitions are used */
if (!is_clone)
if (!m_is_clone_of)
{
DBUG_ASSERT(!m_clone_mem_root);
if (bitmap_init(&(m_part_info->used_partitions), NULL, m_tot_parts, TRUE))
{
bitmap_free(&m_bulk_insert_started);
......@@ -2594,35 +2691,70 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
bitmap_set_all(&(m_part_info->used_partitions));
}
if (m_is_clone_of)
{
uint i;
DBUG_ASSERT(m_clone_mem_root);
/* Allocate an array of handler pointers for the partitions handlers. */
alloc_len= (m_tot_parts + 1) * sizeof(handler*);
if (!(m_file= (handler **) alloc_root(m_clone_mem_root, alloc_len)))
goto err_alloc;
memset(m_file, 0, alloc_len);
/*
Populate them by cloning the original partitions. This also opens them.
Note that file->ref is allocated too.
*/
file= m_is_clone_of->m_file;
for (i= 0; i < m_tot_parts; i++)
{
create_partition_name(name_buff, name, name_buffer_ptr, NORMAL_PART_NAME,
FALSE);
if (!(m_file[i]= file[i]->clone(name_buff, m_clone_mem_root)))
{
error= HA_ERR_INITIALIZATION;
file= &m_file[i];
goto err_handler;
}
name_buffer_ptr+= strlen(name_buffer_ptr) + 1;
}
}
else
{
file= m_file;
do
{
create_partition_name(name_buff, name, name_buffer_ptr, NORMAL_PART_NAME,
FALSE);
if ((error= (*file)->ha_open(table, name_buff, mode, test_if_locked)))
goto err_handler;
m_num_locks+= (*file)->lock_count();
name_buffer_ptr+= strlen(name_buffer_ptr) + 1;
} while (*(++file));
}
file= m_file;
do
ref_length= (*file)->ref_length;
check_table_flags= (((*file)->ha_table_flags() &
~(PARTITION_DISABLED_TABLE_FLAGS)) |
(PARTITION_ENABLED_TABLE_FLAGS));
while (*(++file))
{
create_partition_name(name_buff, name, name_buffer_ptr, NORMAL_PART_NAME,
FALSE);
if ((error= (*file)->ha_open(table, (const char*) name_buff, mode,
test_if_locked)))
goto err_handler;
m_num_locks+= (*file)->lock_count();
name_buffer_ptr+= strlen(name_buffer_ptr) + 1;
DBUG_ASSERT(ref_length >= (*file)->ref_length);
set_if_bigger(ref_length, ((*file)->ref_length));
/*
Verify that all partitions have the same set of table flags.
Mask all flags that partitioning enables/disables.
*/
if (!check_table_flags)
{
check_table_flags= (((*file)->ha_table_flags() &
~(PARTITION_DISABLED_TABLE_FLAGS)) |
(PARTITION_ENABLED_TABLE_FLAGS));
}
else if (check_table_flags != (((*file)->ha_table_flags() &
~(PARTITION_DISABLED_TABLE_FLAGS)) |
(PARTITION_ENABLED_TABLE_FLAGS)))
if (check_table_flags != (((*file)->ha_table_flags() &
~(PARTITION_DISABLED_TABLE_FLAGS)) |
(PARTITION_ENABLED_TABLE_FLAGS)))
{
error= HA_ERR_INITIALIZATION;
/* set file to last handler, so all of them is closed */
file = &m_file[m_tot_parts - 1];
goto err_handler;
}
} while (*(++file));
}
key_used_on_scan= m_file[0]->key_used_on_scan;
implicit_emptied= m_file[0]->implicit_emptied;
/*
......@@ -2631,6 +2763,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
*/
ref_length+= PARTITION_BYTES_IN_POS;
m_ref_length= ref_length;
/*
Release buffer read from .par file. It will not be reused again after
being opened once.
......@@ -2690,25 +2823,54 @@ err_handler:
DEBUG_SYNC(ha_thd(), "partition_open_error");
while (file-- != m_file)
(*file)->close();
err_alloc:
bitmap_free(&m_bulk_insert_started);
if (!is_clone)
if (!m_is_clone_of)
bitmap_free(&(m_part_info->used_partitions));
DBUG_RETURN(error);
}
handler *ha_partition::clone(MEM_ROOT *mem_root)
/**
Clone the open and locked partitioning handler.
@param mem_root MEM_ROOT to use.
@return Pointer to the successfully created clone or NULL
@details
This function creates a new ha_partition handler as a clone/copy. The
original (this) must already be opened and locked. The clone will use
the originals m_part_info.
It also allocates memory for ref + ref_dup.
In ha_partition::open() it will clone its original handlers partitions
which will allocate then on the correct MEM_ROOT and also open them.
*/
handler *ha_partition::clone(const char *name, MEM_ROOT *mem_root)
{
handler *new_handler= get_new_handler(table->s, mem_root,
table->s->db_type());
((ha_partition*)new_handler)->m_part_info= m_part_info;
((ha_partition*)new_handler)->is_clone= TRUE;
if (new_handler && !new_handler->ha_open(table,
table->s->normalized_path.str,
table->db_stat,
HA_OPEN_IGNORE_IF_LOCKED))
return new_handler;
return NULL;
ha_partition *new_handler;
DBUG_ENTER("ha_partition::clone");
new_handler= new (mem_root) ha_partition(ht, table_share, m_part_info,
this, mem_root);
/*
Allocate new_handler->ref here because otherwise ha_open will allocate it
on this->table->mem_root and we will not be able to reclaim that memory
when the clone handler object is destroyed.
*/
if (new_handler &&
!(new_handler->ref= (uchar*) alloc_root(mem_root,
ALIGN_SIZE(m_ref_length)*2)))
new_handler= NULL;
if (new_handler &&
new_handler->ha_open(table, name,
table->db_stat, HA_OPEN_IGNORE_IF_LOCKED))
new_handler= NULL;
DBUG_RETURN((handler*) new_handler);
}
......@@ -2739,7 +2901,7 @@ int ha_partition::close(void)
DBUG_ASSERT(table->s == table_share);
delete_queue(&m_queue);
bitmap_free(&m_bulk_insert_started);
if (!is_clone)
if (!m_is_clone_of)
bitmap_free(&(m_part_info->used_partitions));
file= m_file;
......
......@@ -37,6 +37,16 @@ enum partition_keywords
HA_DUPLICATE_POS | \
HA_CAN_SQL_HANDLER | \
HA_CAN_INSERT_DELAYED)
/* First 4 bytes in the .par file is the number of 32-bit words in the file */
#define PAR_WORD_SIZE 4
/* offset to the .par file checksum */
#define PAR_CHECKSUM_OFFSET 4
/* offset to the total number of partitions */
#define PAR_NUM_PARTS_OFFSET 8
/* offset to the engines array */
#define PAR_ENGINES_OFFSET 12
class ha_partition :public handler
{
private:
......@@ -53,7 +63,7 @@ private:
/* Data for the partition handler */
int m_mode; // Open mode
uint m_open_test_lock; // Open test_if_locked
char *m_file_buffer; // Buffer with names
char *m_file_buffer; // Content of the .par file
char *m_name_buffer_ptr; // Pointer to first partition name
plugin_ref *m_engine_array; // Array of types of the handlers
handler **m_file; // Array of references to handler inst.
......@@ -115,6 +125,13 @@ private:
bool m_is_sub_partitioned; // Is subpartitioned
bool m_ordered_scan_ongoing;
/*
If set, this object was created with ha_partition::clone and doesn't
"own" the m_part_info structure.
*/
ha_partition *m_is_clone_of;
MEM_ROOT *m_clone_mem_root;
/*
We keep track if all underlying handlers are MyISAM since MyISAM has a
great number of extra flags not needed by other handlers.
......@@ -148,11 +165,6 @@ private:
*/
THR_LOCK_DATA lock; /* MySQL lock */
/*
TRUE <=> this object was created with ha_partition::clone and doesn't
"own" the m_part_info structure.
*/
bool is_clone;
bool auto_increment_lock; /**< lock reading/updating auto_inc */
/**
Flag to keep the auto_increment lock through out the statement.
......@@ -165,7 +177,7 @@ private:
/** used for prediction of start_bulk_insert rows */
enum_monotonicity_info m_part_func_monotonicity_info;
public:
handler *clone(MEM_ROOT *mem_root);
handler *clone(const char *name, MEM_ROOT *mem_root);
virtual void set_part_info(partition_info *part_info)
{
m_part_info= part_info;
......@@ -184,6 +196,10 @@ public:
*/
ha_partition(handlerton *hton, TABLE_SHARE * table);
ha_partition(handlerton *hton, partition_info * part_info);
ha_partition(handlerton *hton, TABLE_SHARE *share,
partition_info *part_info_arg,
ha_partition *clone_arg,
MEM_ROOT *clone_mem_root_arg);
~ha_partition();
/*
A partition handler has no characteristics in itself. It only inherits
......@@ -254,7 +270,10 @@ private:
And one method to read it in.
*/
bool create_handler_file(const char *name);
bool get_from_handler_file(const char *name, MEM_ROOT *mem_root);
bool setup_engine_array(MEM_ROOT *mem_root);
bool read_par_file(const char *name);
bool get_from_handler_file(const char *name, MEM_ROOT *mem_root,
bool is_clone);
bool new_handlers_from_part_info(MEM_ROOT *mem_root);
bool create_handlers(MEM_ROOT *mem_root);
void clear_handler_file();
......
......@@ -2076,22 +2076,29 @@ int ha_delete_table(THD *thd, handlerton *table_type, const char *path,
/****************************************************************************
** General handler functions
****************************************************************************/
handler *handler::clone(MEM_ROOT *mem_root)
handler *handler::clone(const char *name, MEM_ROOT *mem_root)
{
handler *new_handler= get_new_handler(table->s, mem_root, table->s->db_type());
handler *new_handler= get_new_handler(table->s, mem_root, ht);
/*
Allocate handler->ref here because otherwise ha_open will allocate it
on this->table->mem_root and we will not be able to reclaim that memory
when the clone handler object is destroyed.
*/
if (!(new_handler->ref= (uchar*) alloc_root(mem_root, ALIGN_SIZE(ref_length)*2)))
return NULL;
if (new_handler && !new_handler->ha_open(table,
table->s->normalized_path.str,
table->db_stat,
HA_OPEN_IGNORE_IF_LOCKED))
return new_handler;
return NULL;
if (new_handler &&
!(new_handler->ref= (uchar*) alloc_root(mem_root,
ALIGN_SIZE(ref_length)*2)))
new_handler= NULL;
/*
TODO: Implement a more efficient way to have more than one index open for
the same table instance. The ha_open call is not cachable for clone.
*/
if (new_handler && new_handler->ha_open(table,
name,
table->db_stat,
HA_OPEN_IGNORE_IF_LOCKED))
new_handler= NULL;
return new_handler;
}
......
......@@ -1262,7 +1262,7 @@ public:
DBUG_ASSERT(locked == FALSE);
DBUG_ASSERT(inited == NONE);
}
virtual handler *clone(MEM_ROOT *mem_root);
virtual handler *clone(const char *name, MEM_ROOT *mem_root);
/** This is called after create to allow us to set up cached variables */
void init()
{
......
......@@ -1368,7 +1368,7 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler)
}
thd= head->in_use;
if (!(file= head->file->clone(thd->mem_root)))
if (!(file= head->file->clone(head->s->normalized_path.str, thd->mem_root)))
{
/*
Manually set the error flag. Note: there seems to be quite a few
......
......@@ -157,11 +157,11 @@ int ha_heap::close(void)
DESCRIPTION
Do same as default implementation but use file->s->name instead of
table->s->path. This is needed by Windows where the clone() call sees
'/'-delimited path in table->s->path, while ha_peap::open() was called
'/'-delimited path in table->s->path, while ha_heap::open() was called
with '\'-delimited path.
*/
handler *ha_heap::clone(MEM_ROOT *mem_root)
handler *ha_heap::clone(const char *name, MEM_ROOT *mem_root)
{
handler *new_handler= get_new_handler(table->s, mem_root, table->s->db_type());
if (new_handler && !new_handler->ha_open(table, file->s->name, table->db_stat,
......
......@@ -35,7 +35,7 @@ class ha_heap: public handler
public:
ha_heap(handlerton *hton, TABLE_SHARE *table);
~ha_heap() {}
handler *clone(MEM_ROOT *mem_root);
handler *clone(const char *name, MEM_ROOT *mem_root);
const char *table_type() const
{
return (table->in_use->variables.sql_mode & MODE_MYSQL323) ?
......
......@@ -643,9 +643,10 @@ ha_myisam::ha_myisam(handlerton *hton, TABLE_SHARE *table_arg)
can_enable_indexes(1)
{}
handler *ha_myisam::clone(MEM_ROOT *mem_root)
handler *ha_myisam::clone(const char *name, MEM_ROOT *mem_root)
{
ha_myisam *new_handler= static_cast <ha_myisam *>(handler::clone(mem_root));
ha_myisam *new_handler= static_cast <ha_myisam *>(handler::clone(name,
mem_root));
if (new_handler)
new_handler->file->state= file->state;
return new_handler;
......
......@@ -50,7 +50,7 @@ class ha_myisam: public handler
public:
ha_myisam(handlerton *hton, TABLE_SHARE *table_arg);
~ha_myisam() {}
handler *clone(MEM_ROOT *mem_root);
handler *clone(const char *name, MEM_ROOT *mem_root);
const char *table_type() const { return "MyISAM"; }
const char *index_type(uint key_number);
const char **bas_ext() const;
......
......@@ -681,7 +681,7 @@ CPP_UNNAMED_NS_END
@return A cloned handler instance.
*/
handler *ha_myisammrg::clone(MEM_ROOT *mem_root)
handler *ha_myisammrg::clone(const char *name, MEM_ROOT *mem_root)
{
MYRG_TABLE *u_table,*newu_table;
ha_myisammrg *new_handler=
......@@ -702,8 +702,8 @@ handler *ha_myisammrg::clone(MEM_ROOT *mem_root)
return NULL;
}
if (new_handler->ha_open(table, table->s->normalized_path.str, table->db_stat,
HA_OPEN_IGNORE_IF_LOCKED))
if (new_handler->ha_open(table, name, table->db_stat,
HA_OPEN_IGNORE_IF_LOCKED))
{
delete new_handler;
return NULL;
......
......@@ -110,7 +110,7 @@ public:
int add_children_list(void);
int attach_children(void);
int detach_children(void);
virtual handler *clone(MEM_ROOT *mem_root);
virtual handler *clone(const char *name, MEM_ROOT *mem_root);
int close(void);
int write_row(uchar * buf);
int update_row(const uchar * old_data, uchar * new_data);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment