Commit e1dc86b0 authored by unknown's avatar unknown

type conversions fixed to avoid warnings on Windows


myisam/mi_write.c:
  type conversion fixed
myisam/sort.c:
  type conversion fixed
sql/ha_federated.cc:
  type conversion fixed
sql/ha_heap.cc:
  type conversion fixed
sql/ha_innodb.cc:
  type conversion fixed
sql/ha_myisam.cc:
  type conversion fixed
sql/opt_range.cc:
  type conversion fixed
sql/sql_map.cc:
  type conversion fixed
sql/sql_select.cc:
  type conversion fixed
sql/sql_update.cc:
  type conversion fixed
parent 578c2386
...@@ -975,7 +975,7 @@ int mi_init_bulk_insert(MI_INFO *info, ulong cache_size, ha_rows rows) ...@@ -975,7 +975,7 @@ int mi_init_bulk_insert(MI_INFO *info, ulong cache_size, ha_rows rows)
DBUG_RETURN(0); DBUG_RETURN(0);
if (rows && rows*total_keylength < cache_size) if (rows && rows*total_keylength < cache_size)
cache_size=rows; cache_size= (ulong)rows;
else else
cache_size/=total_keylength*16; cache_size/=total_keylength*16;
......
...@@ -141,7 +141,7 @@ int _create_index_by_sort(MI_SORT_PARAM *info,my_bool no_messages, ...@@ -141,7 +141,7 @@ int _create_index_by_sort(MI_SORT_PARAM *info,my_bool no_messages,
if ((records < UINT_MAX32) && if ((records < UINT_MAX32) &&
((my_off_t) (records + 1) * ((my_off_t) (records + 1) *
(sort_length + sizeof(char*)) <= (my_off_t) memavl)) (sort_length + sizeof(char*)) <= (my_off_t) memavl))
keys= records+1; keys= (uint)records+1;
else else
do do
{ {
...@@ -349,7 +349,7 @@ pthread_handler_t thr_find_all_keys(void *arg) ...@@ -349,7 +349,7 @@ pthread_handler_t thr_find_all_keys(void *arg)
sort_keys= (uchar **) NULL; sort_keys= (uchar **) NULL;
memavl= max(sort_param->sortbuff_size, MIN_SORT_MEMORY); memavl= max(sort_param->sortbuff_size, MIN_SORT_MEMORY);
idx= sort_param->sort_info->max_records; idx= (uint)sort_param->sort_info->max_records;
sort_length= sort_param->key_length; sort_length= sort_param->key_length;
maxbuffer= 1; maxbuffer= 1;
......
...@@ -2562,9 +2562,9 @@ int ha_federated::info(uint flag) ...@@ -2562,9 +2562,9 @@ int ha_federated::info(uint flag)
data_file_length= records * mean_rec_length; data_file_length= records * mean_rec_length;
if (row[12] != NULL) if (row[12] != NULL)
update_time= (ha_rows) my_strtoll10(row[12], (char**) 0, &error); update_time= (time_t) my_strtoll10(row[12], (char**) 0, &error);
if (row[13] != NULL) if (row[13] != NULL)
check_time= (ha_rows) my_strtoll10(row[13], (char**) 0, &error); check_time= (time_t) my_strtoll10(row[13], (char**) 0, &error);
} }
/* /*
......
...@@ -175,7 +175,7 @@ void ha_heap::update_key_stats() ...@@ -175,7 +175,7 @@ void ha_heap::update_key_stats()
else else
{ {
ha_rows hash_buckets= file->s->keydef[i].hash_buckets; ha_rows hash_buckets= file->s->keydef[i].hash_buckets;
uint no_records= hash_buckets ? file->s->records/hash_buckets : 2; uint no_records= hash_buckets ? (uint) file->s->records/hash_buckets : 2;
if (no_records < 2) if (no_records < 2)
no_records= 2; no_records= 2;
key->rec_per_key[key->key_parts-1]= no_records; key->rec_per_key[key->key_parts-1]= no_records;
......
...@@ -5474,7 +5474,7 @@ ha_innobase::info( ...@@ -5474,7 +5474,7 @@ ha_innobase::info(
table->key_info[i].rec_per_key[j]= table->key_info[i].rec_per_key[j]=
rec_per_key >= ~(ulong) 0 ? ~(ulong) 0 : rec_per_key >= ~(ulong) 0 ? ~(ulong) 0 :
rec_per_key; (ulong) rec_per_key;
} }
index = dict_table_get_next_index_noninline(index); index = dict_table_get_next_index_noninline(index);
......
...@@ -1412,7 +1412,7 @@ void ha_myisam::start_bulk_insert(ha_rows rows) ...@@ -1412,7 +1412,7 @@ void ha_myisam::start_bulk_insert(ha_rows rows)
DBUG_ENTER("ha_myisam::start_bulk_insert"); DBUG_ENTER("ha_myisam::start_bulk_insert");
THD *thd= current_thd; THD *thd= current_thd;
ulong size= min(thd->variables.read_buff_size, ulong size= min(thd->variables.read_buff_size,
table->s->avg_row_length*rows); (ulong) (table->s->avg_row_length*rows));
DBUG_PRINT("info",("start_bulk_insert: rows %lu size %lu", DBUG_PRINT("info",("start_bulk_insert: rows %lu size %lu",
(ulong) rows, size)); (ulong) rows, size));
......
...@@ -2206,7 +2206,7 @@ double get_sweep_read_cost(const PARAM *param, ha_rows records) ...@@ -2206,7 +2206,7 @@ double get_sweep_read_cost(const PARAM *param, ha_rows records)
if (param->table->file->primary_key_is_clustered()) if (param->table->file->primary_key_is_clustered())
{ {
result= param->table->file->read_time(param->table->s->primary_key, result= param->table->file->read_time(param->table->s->primary_key,
records, records); (uint)records, records);
} }
else else
{ {
...@@ -2414,7 +2414,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge, ...@@ -2414,7 +2414,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
/* Add Unique operations cost */ /* Add Unique operations cost */
unique_calc_buff_size= unique_calc_buff_size=
Unique::get_cost_calc_buff_size(non_cpk_scan_records, Unique::get_cost_calc_buff_size((ulong)non_cpk_scan_records,
param->table->file->ref_length, param->table->file->ref_length,
param->thd->variables.sortbuff_size); param->thd->variables.sortbuff_size);
if (param->imerge_cost_buff_size < unique_calc_buff_size) if (param->imerge_cost_buff_size < unique_calc_buff_size)
...@@ -2426,7 +2426,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge, ...@@ -2426,7 +2426,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
} }
imerge_cost += imerge_cost +=
Unique::get_use_cost(param->imerge_cost_buff, non_cpk_scan_records, Unique::get_use_cost(param->imerge_cost_buff, (uint)non_cpk_scan_records,
param->table->file->ref_length, param->table->file->ref_length,
param->thd->variables.sortbuff_size); param->thd->variables.sortbuff_size);
DBUG_PRINT("info",("index_merge total cost: %g (wanted: less then %g)", DBUG_PRINT("info",("index_merge total cost: %g (wanted: less then %g)",
...@@ -2765,7 +2765,7 @@ ROR_INTERSECT_INFO* ror_intersect_init(const PARAM *param) ...@@ -2765,7 +2765,7 @@ ROR_INTERSECT_INFO* ror_intersect_init(const PARAM *param)
info->is_covering= FALSE; info->is_covering= FALSE;
info->index_scan_costs= 0.0; info->index_scan_costs= 0.0;
info->index_records= 0; info->index_records= 0;
info->out_rows= param->table->file->records; info->out_rows= (double) param->table->file->records;
bitmap_clear_all(&info->covered_fields); bitmap_clear_all(&info->covered_fields);
return info; return info;
} }
...@@ -6757,7 +6757,7 @@ int QUICK_RANGE_SELECT::reset() ...@@ -6757,7 +6757,7 @@ int QUICK_RANGE_SELECT::reset()
if (file->table_flags() & HA_NEED_READ_RANGE_BUFFER) if (file->table_flags() & HA_NEED_READ_RANGE_BUFFER)
{ {
mrange_bufsiz= min(multi_range_bufsiz, mrange_bufsiz= min(multi_range_bufsiz,
(QUICK_SELECT_I::records + 1)* head->s->reclength); ((uint)QUICK_SELECT_I::records + 1)* head->s->reclength);
while (mrange_bufsiz && while (mrange_bufsiz &&
! my_multi_malloc(MYF(MY_WME), ! my_multi_malloc(MYF(MY_WME),
...@@ -8359,7 +8359,7 @@ void cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts, ...@@ -8359,7 +8359,7 @@ void cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
bool have_min, bool have_max, bool have_min, bool have_max,
double *read_cost, ha_rows *records) double *read_cost, ha_rows *records)
{ {
uint table_records; ha_rows table_records;
uint num_groups; uint num_groups;
uint num_blocks; uint num_blocks;
uint keys_per_block; uint keys_per_block;
......
...@@ -41,7 +41,7 @@ mapped_files::mapped_files(const my_string filename,byte *magic,uint magic_lengt ...@@ -41,7 +41,7 @@ mapped_files::mapped_files(const my_string filename,byte *magic,uint magic_lengt
struct stat stat_buf; struct stat stat_buf;
if (!fstat(file,&stat_buf)) if (!fstat(file,&stat_buf))
{ {
if (!(map=(byte*) my_mmap(0,(size=(ulong) stat_buf.st_size),PROT_READ, if (!(map=(byte*) my_mmap(0,(size_t)(size=(ulong) stat_buf.st_size),PROT_READ,
MAP_SHARED | MAP_NORESERVE,file, MAP_SHARED | MAP_NORESERVE,file,
0L))) 0L)))
{ {
...@@ -52,7 +52,7 @@ mapped_files::mapped_files(const my_string filename,byte *magic,uint magic_lengt ...@@ -52,7 +52,7 @@ mapped_files::mapped_files(const my_string filename,byte *magic,uint magic_lengt
if (map && memcmp(map,magic,magic_length)) if (map && memcmp(map,magic,magic_length))
{ {
my_error(ER_WRONG_MAGIC, MYF(0), name); my_error(ER_WRONG_MAGIC, MYF(0), name);
VOID(my_munmap(map,size)); VOID(my_munmap(map,(size_t)size));
map=0; map=0;
} }
if (!map) if (!map)
...@@ -70,7 +70,7 @@ mapped_files::~mapped_files() ...@@ -70,7 +70,7 @@ mapped_files::~mapped_files()
#ifdef HAVE_MMAP #ifdef HAVE_MMAP
if (file >= 0) if (file >= 0)
{ {
VOID(my_munmap(map,size)); VOID(my_munmap(map,(size_t)size));
VOID(my_close(file,MYF(0))); VOID(my_close(file,MYF(0)));
file= -1; map=0; file= -1; map=0;
} }
......
...@@ -5915,7 +5915,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) ...@@ -5915,7 +5915,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
/* Fix for EXPLAIN */ /* Fix for EXPLAIN */
if (sel->quick) if (sel->quick)
join->best_positions[i].records_read= sel->quick->records; join->best_positions[i].records_read= (double)sel->quick->records;
} }
else else
{ {
......
...@@ -362,7 +362,7 @@ int mysql_update(THD *thd, ...@@ -362,7 +362,7 @@ int mysql_update(THD *thd,
init_read_record_idx(&info, thd, table, 1, used_index); init_read_record_idx(&info, thd, table, 1, used_index);
thd->proc_info="Searching rows for update"; thd->proc_info="Searching rows for update";
uint tmp_limit= limit; ha_rows tmp_limit= limit;
while (!(error=info.read_record(&info)) && !thd->killed) while (!(error=info.read_record(&info)) && !thd->killed)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment