diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 1b1b16f4ae4040f6abc14c84f996bec1734a75ad..16fa0052214bbbff937df40541ab09c9354b4f78 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -2198,6 +2198,7 @@ int ha_ndbcluster::full_table_scan(byte *buf) int res; NdbScanOperation *op; NdbTransaction *trans= m_active_trans; + part_id_range part_spec; DBUG_ENTER("full_table_scan"); DBUG_PRINT("enter", ("Starting new scan on %s", m_tabname)); @@ -2209,6 +2210,35 @@ int ha_ndbcluster::full_table_scan(byte *buf) op->readTuples(lm, 0, parallelism)) ERR_RETURN(trans->getNdbError()); m_active_cursor= op; + + if (m_use_partition_function) + { + part_spec.start_part= 0; + part_spec.end_part= get_tot_partitions(m_part_info) - 1; + prune_partition_set(table, &part_spec); + DBUG_PRINT("info", ("part_spec.start_part = %u, part_spec.end_part = %u", + part_spec.start_part, part_spec.end_part)); + /* + If partition pruning has found no partition in set + we can return HA_ERR_END_OF_FILE + If partition pruning has found exactly one partition in set + we can optimize scan to run towards that partition only. + */ + if (part_spec.start_part > part_spec.end_part) + { + DBUG_RETURN(HA_ERR_END_OF_FILE); + } + else if (part_spec.start_part == part_spec.end_part) + { + /* + Only one partition is required to scan, if sorted is required we + don't need it any more since output from one ordered partitioned + index is always sorted. + */ + m_active_cursor->setPartitionId(part_spec.start_part); + } + } + if (generate_scan_filter(m_cond_stack, op)) DBUG_RETURN(ndb_err(trans)); if ((res= define_read_attrs(buf, op))) @@ -3012,6 +3042,14 @@ int ha_ndbcluster::read_range_first_to_buf(const key_range *start_key, if (m_use_partition_function) { get_partition_set(table, buf, active_index, start_key, &part_spec); + DBUG_PRINT("info", ("part_spec.start_part = %u, part_spec.end_part = %u", + part_spec.start_part, part_spec.end_part)); + /* + If partition pruning has found no partition in set + we can return HA_ERR_END_OF_FILE + If partition pruning has found exactly one partition in set + we can optimize scan to run towards that partition only. + */ if (part_spec.start_part > part_spec.end_part) { DBUG_RETURN(HA_ERR_END_OF_FILE); @@ -3026,6 +3064,7 @@ int ha_ndbcluster::read_range_first_to_buf(const key_range *start_key, sorted= FALSE; } } + m_write_op= FALSE; switch (type){ case PRIMARY_KEY_ORDERED_INDEX: @@ -7003,6 +7042,12 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p, get_partition_set(table, curr, active_index, &multi_range_curr->start_key, &part_spec); + DBUG_PRINT("info", ("part_spec.start_part = %u, part_spec.end_part = %u", + part_spec.start_part, part_spec.end_part)); + /* + If partition pruning has found no partition in set + we can skip this scan + */ if (part_spec.start_part > part_spec.end_part) { /* diff --git a/sql/handler.h b/sql/handler.h index 55af6cf4da6ad6695ab84c7c6755982d082cc67e..b8852d35e66d0adc6c718dccb674f4785e70d0f0 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -1126,6 +1126,7 @@ char *generate_partition_syntax(partition_info *part_info, uint *buf_length, bool use_sql_alloc, bool write_all); bool partition_key_modified(TABLE *table, List<Item> &fields); +void prune_partition_set(const TABLE *table, part_id_range *part_spec); void get_partition_set(const TABLE *table, byte *buf, const uint index, const key_range *key_spec, part_id_range *part_spec); diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc index 290d512198fe6219f1cb1c09a1ce87b5ea9cd939..f058e22d6938400794eb17357af03c7fc455eab1 100644 --- a/sql/sql_partition.cc +++ b/sql/sql_partition.cc @@ -3605,7 +3605,55 @@ void get_full_part_id_from_key(const TABLE *table, byte *buf, part_spec->start_part++; DBUG_VOID_RETURN; } - + +/* + Prune the set of partitions to use in query + + SYNOPSIS + prune_partition_set() + table The table object + out:part_spec Contains start part, end part + + DESCRIPTION + This function is called to prune the range of partitions to scan by + checking the used_partitions bitmap. + If start_part > end_part at return it means no partition needs to be + scanned. If start_part == end_part it always means a single partition + needs to be scanned. + + RETURN VALUE + part_spec +*/ +void prune_partition_set(const TABLE *table, part_id_range *part_spec) +{ + int last_partition= -1; + uint i; + partition_info *part_info= table->part_info; + + DBUG_ENTER("prune_partition_set"); + for (i= part_spec->start_part; i <= part_spec->end_part; i++) + { + if (bitmap_is_set(&(part_info->used_partitions), i)) + { + DBUG_PRINT("info", ("Partition %d is set", i)); + if (last_partition == -1) + /* First partition found in set and pruned bitmap */ + part_spec->start_part= i; + last_partition= i; + } + else + { + if (last_partition == -1) + /* No partition found in pruned bitmap yet */ + part_spec->start_part= i + 1; + } + } + if (last_partition != -1) + part_spec->end_part= last_partition; + + DBUG_VOID_RETURN; +} + /* Get the set of partitions to use in query. @@ -3669,6 +3717,10 @@ void get_partition_set(const TABLE *table, byte *buf, const uint index, is needed. */ get_full_part_id_from_key(table,buf,key_info,key_spec,part_spec); + /* + Check if range can be adjusted by looking in used_partitions + */ + prune_partition_set(table, part_spec); DBUG_VOID_RETURN; } else if (is_sub_partitioned(part_info)) @@ -3711,6 +3763,10 @@ void get_partition_set(const TABLE *table, byte *buf, const uint index, */ get_full_part_id_from_key(table,buf,key_info,key_spec,part_spec); clear_indicator_in_key_fields(key_info); + /* + Check if range can be adjusted by looking in used_partitions + */ + prune_partition_set(table, part_spec); DBUG_VOID_RETURN; } else if (is_sub_partitioned(part_info)) @@ -3770,10 +3826,13 @@ void get_partition_set(const TABLE *table, byte *buf, const uint index, } if (found_part_field) clear_indicator_in_key_fields(key_info); + /* + Check if range can be adjusted by looking in used_partitions + */ + prune_partition_set(table, part_spec); DBUG_VOID_RETURN; } - /* If the table is partitioned we will read the partition info into the .frm file here.