Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
92771001
Commit
92771001
authored
Sep 21, 2007
by
unknown
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
fixed type conversion warnings revealed by bug 30639
parent
c9f5a087
Changes
10
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
23 additions
and
23 deletions
+23
-23
sql/opt_range.cc
sql/opt_range.cc
+9
-9
sql/sql_map.cc
sql/sql_map.cc
+3
-3
sql/sql_select.cc
sql/sql_select.cc
+1
-1
sql/sql_update.cc
sql/sql_update.cc
+1
-1
storage/federated/ha_federated.cc
storage/federated/ha_federated.cc
+3
-3
storage/heap/ha_heap.cc
storage/heap/ha_heap.cc
+1
-1
storage/innobase/handler/ha_innodb.cc
storage/innobase/handler/ha_innodb.cc
+1
-1
storage/myisam/ha_myisam.cc
storage/myisam/ha_myisam.cc
+1
-1
storage/myisam/mi_write.c
storage/myisam/mi_write.c
+1
-1
storage/myisam/sort.c
storage/myisam/sort.c
+2
-2
No files found.
sql/opt_range.cc
View file @
92771001
...
...
@@ -3506,7 +3506,7 @@ double get_sweep_read_cost(const PARAM *param, ha_rows records)
if
(
param
->
table
->
file
->
primary_key_is_clustered
())
{
result
=
param
->
table
->
file
->
read_time
(
param
->
table
->
s
->
primary_key
,
records
,
records
);
(
uint
)
records
,
records
);
}
else
{
...
...
@@ -3715,7 +3715,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
/* Add Unique operations cost */
unique_calc_buff_size
=
Unique
::
get_cost_calc_buff_size
(
non_cpk_scan_records
,
Unique
::
get_cost_calc_buff_size
(
(
ulong
)
non_cpk_scan_records
,
param
->
table
->
file
->
ref_length
,
param
->
thd
->
variables
.
sortbuff_size
);
if
(
param
->
imerge_cost_buff_size
<
unique_calc_buff_size
)
...
...
@@ -3727,7 +3727,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
}
imerge_cost
+=
Unique
::
get_use_cost
(
param
->
imerge_cost_buff
,
non_cpk_scan_records
,
Unique
::
get_use_cost
(
param
->
imerge_cost_buff
,
(
uint
)
non_cpk_scan_records
,
param
->
table
->
file
->
ref_length
,
param
->
thd
->
variables
.
sortbuff_size
);
DBUG_PRINT
(
"info"
,(
"index_merge total cost: %g (wanted: less then %g)"
,
...
...
@@ -4067,7 +4067,7 @@ ROR_INTERSECT_INFO* ror_intersect_init(const PARAM *param)
info
->
is_covering
=
FALSE
;
info
->
index_scan_costs
=
0.0
;
info
->
index_records
=
0
;
info
->
out_rows
=
param
->
table
->
file
->
stats
.
records
;
info
->
out_rows
=
(
double
)
param
->
table
->
file
->
stats
.
records
;
bitmap_clear_all
(
&
info
->
covered_fields
);
return
info
;
}
...
...
@@ -8196,7 +8196,7 @@ int QUICK_RANGE_SELECT::reset()
if
(
file
->
ha_table_flags
()
&
HA_NEED_READ_RANGE_BUFFER
)
{
mrange_bufsiz
=
min
(
multi_range_bufsiz
,
(
QUICK_SELECT_I
::
records
+
1
)
*
head
->
s
->
reclength
);
(
(
uint
)
QUICK_SELECT_I
::
records
+
1
)
*
head
->
s
->
reclength
);
while
(
mrange_bufsiz
&&
!
my_multi_malloc
(
MYF
(
MY_WME
),
...
...
@@ -9822,7 +9822,7 @@ void cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
bool
have_min
,
bool
have_max
,
double
*
read_cost
,
ha_rows
*
records
)
{
uint
table_records
;
ha_rows
table_records
;
uint
num_groups
;
uint
num_blocks
;
uint
keys_per_block
;
...
...
@@ -9839,14 +9839,14 @@ void cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
keys_per_block
=
(
table
->
file
->
stats
.
block_size
/
2
/
(
index_info
->
key_length
+
table
->
file
->
ref_length
)
+
1
);
num_blocks
=
(
table_records
/
keys_per_block
)
+
1
;
num_blocks
=
(
uint
)(
table_records
/
keys_per_block
)
+
1
;
/* Compute the number of keys in a group. */
keys_per_group
=
index_info
->
rec_per_key
[
group_key_parts
-
1
];
if
(
keys_per_group
==
0
)
/* If there is no statistics try to guess */
/* each group contains 10% of all records */
keys_per_group
=
(
table_records
/
10
)
+
1
;
num_groups
=
(
table_records
/
keys_per_group
)
+
1
;
keys_per_group
=
(
uint
)(
table_records
/
10
)
+
1
;
num_groups
=
(
uint
)(
table_records
/
keys_per_group
)
+
1
;
/* Apply the selectivity of the quick select for group prefixes. */
if
(
range_tree
&&
(
quick_prefix_records
!=
HA_POS_ERROR
))
...
...
sql/sql_map.cc
View file @
92771001
...
...
@@ -37,7 +37,7 @@ mapped_files::mapped_files(const char * filename,uchar *magic,uint magic_length)
struct
stat
stat_buf
;
if
(
!
fstat
(
file
,
&
stat_buf
))
{
if
(
!
(
map
=
(
uchar
*
)
my_mmap
(
0
,(
size
=
(
ulong
)
stat_buf
.
st_size
),
PROT_READ
,
if
(
!
(
map
=
(
uchar
*
)
my_mmap
(
0
,(
size
_t
)(
size
=
stat_buf
.
st_size
),
PROT_READ
,
MAP_SHARED
|
MAP_NORESERVE
,
file
,
0L
)))
{
...
...
@@ -48,7 +48,7 @@ mapped_files::mapped_files(const char * filename,uchar *magic,uint magic_length)
if
(
map
&&
memcmp
(
map
,
magic
,
magic_length
))
{
my_error
(
ER_WRONG_MAGIC
,
MYF
(
0
),
name
);
VOID
(
my_munmap
((
char
*
)
map
,
size
));
VOID
(
my_munmap
((
char
*
)
map
,
(
size_t
)
size
));
map
=
0
;
}
if
(
!
map
)
...
...
@@ -66,7 +66,7 @@ mapped_files::~mapped_files()
#ifdef HAVE_MMAP
if
(
file
>=
0
)
{
VOID
(
my_munmap
((
char
*
)
map
,
size
));
VOID
(
my_munmap
((
char
*
)
map
,
(
size_t
)
size
));
VOID
(
my_close
(
file
,
MYF
(
0
)));
file
=
-
1
;
map
=
0
;
}
...
...
sql/sql_select.cc
View file @
92771001
...
...
@@ -6126,7 +6126,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
/* Fix for EXPLAIN */
if
(
sel
->
quick
)
join
->
best_positions
[
i
].
records_read
=
sel
->
quick
->
records
;
join
->
best_positions
[
i
].
records_read
=
(
double
)
sel
->
quick
->
records
;
}
else
{
...
...
sql/sql_update.cc
View file @
92771001
...
...
@@ -461,7 +461,7 @@ int mysql_update(THD *thd,
init_read_record_idx
(
&
info
,
thd
,
table
,
1
,
used_index
);
thd
->
proc_info
=
"Searching rows for update"
;
uint
tmp_limit
=
limit
;
ha_rows
tmp_limit
=
limit
;
while
(
!
(
error
=
info
.
read_record
(
&
info
))
&&
!
thd
->
killed
)
{
...
...
storage/federated/ha_federated.cc
View file @
92771001
...
...
@@ -2795,15 +2795,15 @@ int ha_federated::info(uint flag)
stats
.
records
=
(
ha_rows
)
my_strtoll10
(
row
[
4
],
(
char
**
)
0
,
&
error
);
if
(
row
[
5
]
!=
NULL
)
stats
.
mean_rec_length
=
(
ha_rows
)
my_strtoll10
(
row
[
5
],
(
char
**
)
0
,
&
error
);
stats
.
mean_rec_length
=
(
ulong
)
my_strtoll10
(
row
[
5
],
(
char
**
)
0
,
&
error
);
stats
.
data_file_length
=
stats
.
records
*
stats
.
mean_rec_length
;
if
(
row
[
12
]
!=
NULL
)
stats
.
update_time
=
(
ha_rows
)
my_strtoll10
(
row
[
12
],
(
char
**
)
0
,
stats
.
update_time
=
(
time_t
)
my_strtoll10
(
row
[
12
],
(
char
**
)
0
,
&
error
);
if
(
row
[
13
]
!=
NULL
)
stats
.
check_time
=
(
ha_rows
)
my_strtoll10
(
row
[
13
],
(
char
**
)
0
,
stats
.
check_time
=
(
time_t
)
my_strtoll10
(
row
[
13
],
(
char
**
)
0
,
&
error
);
}
/*
...
...
storage/heap/ha_heap.cc
View file @
92771001
...
...
@@ -197,7 +197,7 @@ void ha_heap::update_key_stats()
else
{
ha_rows
hash_buckets
=
file
->
s
->
keydef
[
i
].
hash_buckets
;
uint
no_records
=
hash_buckets
?
file
->
s
->
records
/
hash_buckets
:
2
;
uint
no_records
=
hash_buckets
?
(
uint
)
(
file
->
s
->
records
/
hash_buckets
)
:
2
;
if
(
no_records
<
2
)
no_records
=
2
;
key
->
rec_per_key
[
key
->
key_parts
-
1
]
=
no_records
;
...
...
storage/innobase/handler/ha_innodb.cc
View file @
92771001
...
...
@@ -5765,7 +5765,7 @@ ha_innobase::info(
table
->
key_info
[
i
].
rec_per_key
[
j
]
=
rec_per_key
>=
~
(
ulong
)
0
?
~
(
ulong
)
0
:
rec_per_key
;
(
ulong
)
rec_per_key
;
}
index
=
dict_table_get_next_index_noninline
(
index
);
...
...
storage/myisam/ha_myisam.cc
View file @
92771001
...
...
@@ -1461,7 +1461,7 @@ void ha_myisam::start_bulk_insert(ha_rows rows)
DBUG_ENTER
(
"ha_myisam::start_bulk_insert"
);
THD
*
thd
=
current_thd
;
ulong
size
=
min
(
thd
->
variables
.
read_buff_size
,
table
->
s
->
avg_row_length
*
rows
);
(
ulong
)
(
table
->
s
->
avg_row_length
*
rows
)
);
DBUG_PRINT
(
"info"
,(
"start_bulk_insert: rows %lu size %lu"
,
(
ulong
)
rows
,
size
));
...
...
storage/myisam/mi_write.c
View file @
92771001
...
...
@@ -987,7 +987,7 @@ int mi_init_bulk_insert(MI_INFO *info, ulong cache_size, ha_rows rows)
DBUG_RETURN
(
0
);
if
(
rows
&&
rows
*
total_keylength
<
cache_size
)
cache_size
=
rows
;
cache_size
=
(
ulong
)
rows
;
else
cache_size
/=
total_keylength
*
16
;
...
...
storage/myisam/sort.c
View file @
92771001
...
...
@@ -141,7 +141,7 @@ int _create_index_by_sort(MI_SORT_PARAM *info,my_bool no_messages,
if
((
records
<
UINT_MAX32
)
&&
((
my_off_t
)
(
records
+
1
)
*
(
sort_length
+
sizeof
(
char
*
))
<=
(
my_off_t
)
memavl
))
keys
=
records
+
1
;
keys
=
(
uint
)
records
+
1
;
else
do
{
...
...
@@ -349,7 +349,7 @@ pthread_handler_t thr_find_all_keys(void *arg)
sort_keys
=
(
uchar
**
)
NULL
;
memavl
=
max
(
sort_param
->
sortbuff_size
,
MIN_SORT_MEMORY
);
idx
=
sort_param
->
sort_info
->
max_records
;
idx
=
(
uint
)
sort_param
->
sort_info
->
max_records
;
sort_length
=
sort_param
->
key_length
;
maxbuffer
=
1
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment