Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
23dc8abe
Commit
23dc8abe
authored
Sep 12, 2009
by
Mattias Jonsson
Browse files
Options
Browse Files
Download
Plain Diff
merge
parents
b6c16b32
35d6911b
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
91 additions
and
14 deletions
+91
-14
sql/ha_partition.cc
sql/ha_partition.cc
+82
-13
sql/ha_partition.h
sql/ha_partition.h
+9
-1
No files found.
sql/ha_partition.cc
View file @
23dc8abe
...
...
@@ -239,6 +239,7 @@ void ha_partition::init_handler_variables()
m_curr_key_info
[
0
]
=
NULL
;
m_curr_key_info
[
1
]
=
NULL
;
is_clone
=
FALSE
,
m_part_func_monotonicity_info
=
NON_MONOTONIC
;
auto_increment_lock
=
FALSE
;
auto_increment_safe_stmt_log_lock
=
FALSE
;
/*
...
...
@@ -2465,11 +2466,18 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
}
}
/* Initialize the bitmap we use to minimize ha_start_bulk_insert calls */
if
(
bitmap_init
(
&
m_bulk_insert_started
,
NULL
,
m_tot_parts
+
1
,
FALSE
))
DBUG_RETURN
(
1
);
bitmap_clear_all
(
&
m_bulk_insert_started
);
/* Initialize the bitmap we use to determine what partitions are used */
if
(
!
is_clone
)
{
if
(
bitmap_init
(
&
(
m_part_info
->
used_partitions
),
NULL
,
m_tot_parts
,
TRUE
))
{
bitmap_free
(
&
m_bulk_insert_started
);
DBUG_RETURN
(
1
);
}
bitmap_set_all
(
&
(
m_part_info
->
used_partitions
));
}
...
...
@@ -2553,12 +2561,18 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
calling open on all individual handlers.
*/
m_handler_status
=
handler_opened
;
if
(
m_part_info
->
part_expr
)
m_part_func_monotonicity_info
=
m_part_info
->
part_expr
->
get_monotonicity_info
();
else
if
(
m_part_info
->
list_of_part_fields
)
m_part_func_monotonicity_info
=
MONOTONIC_STRICT_INCREASING
;
info
(
HA_STATUS_VARIABLE
|
HA_STATUS_CONST
);
DBUG_RETURN
(
0
);
err_handler:
while
(
file
--
!=
m_file
)
(
*
file
)
->
close
();
bitmap_free
(
&
m_bulk_insert_started
);
if
(
!
is_clone
)
bitmap_free
(
&
(
m_part_info
->
used_partitions
));
...
...
@@ -2606,6 +2620,7 @@ int ha_partition::close(void)
DBUG_ASSERT
(
table
->
s
==
table_share
);
delete_queue
(
&
m_queue
);
bitmap_free
(
&
m_bulk_insert_started
);
if
(
!
is_clone
)
bitmap_free
(
&
(
m_part_info
->
used_partitions
));
file
=
m_file
;
...
...
@@ -3022,6 +3037,8 @@ int ha_partition::write_row(uchar * buf)
}
m_last_part
=
part_id
;
DBUG_PRINT
(
"info"
,
(
"Insert in partition %d"
,
part_id
));
start_part_bulk_insert
(
part_id
);
tmp_disable_binlog
(
thd
);
/* Do not replicate the low-level changes. */
error
=
m_file
[
part_id
]
->
ha_write_row
(
buf
);
if
(
have_auto_increment
&&
!
table
->
s
->
next_number_keypart
)
...
...
@@ -3084,6 +3101,7 @@ int ha_partition::update_row(const uchar *old_data, uchar *new_data)
}
m_last_part
=
new_part_id
;
start_part_bulk_insert
(
new_part_id
);
if
(
new_part_id
==
old_part_id
)
{
DBUG_PRINT
(
"info"
,
(
"Update in partition %d"
,
new_part_id
));
...
...
@@ -3248,22 +3266,65 @@ int ha_partition::delete_all_rows()
DESCRIPTION
rows == 0 means we will probably insert many rows
*/
void
ha_partition
::
start_bulk_insert
(
ha_rows
rows
)
{
handler
**
file
;
DBUG_ENTER
(
"ha_partition::start_bulk_insert"
);
rows
=
rows
?
rows
/
m_tot_parts
+
1
:
0
;
file
=
m_file
;
do
{
(
*
file
)
->
ha_start_bulk_insert
(
rows
);
}
while
(
*
(
++
file
));
m_bulk_inserted_rows
=
0
;
bitmap_clear_all
(
&
m_bulk_insert_started
);
/* use the last bit for marking if bulk_insert_started was called */
bitmap_set_bit
(
&
m_bulk_insert_started
,
m_tot_parts
);
DBUG_VOID_RETURN
;
}
/*
Check if start_bulk_insert has been called for this partition,
if not, call it and mark it called
*/
void
ha_partition
::
start_part_bulk_insert
(
uint
part_id
)
{
if
(
!
bitmap_is_set
(
&
m_bulk_insert_started
,
part_id
)
&&
bitmap_is_set
(
&
m_bulk_insert_started
,
m_tot_parts
))
{
m_file
[
part_id
]
->
ha_start_bulk_insert
(
guess_bulk_insert_rows
());
bitmap_set_bit
(
&
m_bulk_insert_started
,
part_id
);
}
m_bulk_inserted_rows
++
;
}
/*
Try to predict the number of inserts into this partition.
If less than 10 rows (including 0 which means Unknown)
just give that as a guess
If monotonic partitioning function was used
guess that 50 % of the inserts goes to the first partition
For all other cases, guess on equal distribution between the partitions
*/
ha_rows
ha_partition
::
guess_bulk_insert_rows
()
{
DBUG_ENTER
(
"guess_bulk_insert_rows"
);
if
(
estimation_rows_to_insert
<
10
)
DBUG_RETURN
(
estimation_rows_to_insert
);
/* If first insert/partition and monotonic partition function, guess 50%. */
if
(
!
m_bulk_inserted_rows
&&
m_part_func_monotonicity_info
!=
NON_MONOTONIC
&&
m_tot_parts
>
1
)
DBUG_RETURN
(
estimation_rows_to_insert
/
2
);
/* Else guess on equal distribution (+1 is to avoid returning 0/Unknown) */
if
(
m_bulk_inserted_rows
<
estimation_rows_to_insert
)
DBUG_RETURN
(((
estimation_rows_to_insert
-
m_bulk_inserted_rows
)
/
m_tot_parts
)
+
1
);
/* The estimation was wrong, must say 'Unknown' */
DBUG_RETURN
(
0
);
}
/*
Finish a large batch of insert rows
...
...
@@ -3273,21 +3334,29 @@ void ha_partition::start_bulk_insert(ha_rows rows)
RETURN VALUE
>0 Error code
0 Success
Note: end_bulk_insert can be called without start_bulk_insert
being called, see bug¤44108.
*/
int
ha_partition
::
end_bulk_insert
()
{
int
error
=
0
;
handler
**
file
;
uint
i
;
DBUG_ENTER
(
"ha_partition::end_bulk_insert"
);
file
=
m_file
;
do
if
(
!
bitmap_is_set
(
&
m_bulk_insert_started
,
m_tot_parts
))
DBUG_RETURN
(
error
);
for
(
i
=
0
;
i
<
m_tot_parts
;
i
++
)
{
int
tmp
;
if
((
tmp
=
(
*
file
)
->
ha_end_bulk_insert
()))
if
(
bitmap_is_set
(
&
m_bulk_insert_started
,
i
)
&&
(
tmp
=
m_file
[
i
]
->
ha_end_bulk_insert
()))
error
=
tmp
;
}
while
(
*
(
++
file
));
}
bitmap_clear_all
(
&
m_bulk_insert_started
);
DBUG_RETURN
(
error
);
}
...
...
sql/ha_partition.h
View file @
23dc8abe
...
...
@@ -176,6 +176,11 @@ class ha_partition :public handler
This to ensure it will work with statement based replication.
*/
bool
auto_increment_safe_stmt_log_lock
;
/** For optimizing ha_start_bulk_insert calls */
MY_BITMAP
m_bulk_insert_started
;
ha_rows
m_bulk_inserted_rows
;
/** used for prediction of start_bulk_insert rows */
enum_monotonicity_info
m_part_func_monotonicity_info
;
public:
handler
*
clone
(
MEM_ROOT
*
mem_root
);
virtual
void
set_part_info
(
partition_info
*
part_info
)
...
...
@@ -353,7 +358,6 @@ class ha_partition :public handler
Bulk inserts are supported if all underlying handlers support it.
start_bulk_insert and end_bulk_insert is called before and after a
number of calls to write_row.
Not yet though.
*/
virtual
int
write_row
(
uchar
*
buf
);
virtual
int
update_row
(
const
uchar
*
old_data
,
uchar
*
new_data
);
...
...
@@ -361,6 +365,10 @@ class ha_partition :public handler
virtual
int
delete_all_rows
(
void
);
virtual
void
start_bulk_insert
(
ha_rows
rows
);
virtual
int
end_bulk_insert
();
private:
ha_rows
guess_bulk_insert_rows
();
void
start_part_bulk_insert
(
uint
part_id
);
public:
virtual
bool
is_fatal_error
(
int
error
,
uint
flags
)
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment