Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
c543a359
Commit
c543a359
authored
Jul 03, 2014
by
John Esmet
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
FT-302 Add block allocation strategy to the block allocator. Default to
the one and only strategy so far - first fit.
parent
a22faee9
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
72 additions
and
36 deletions
+72
-36
ft/serialize/block_allocator.cc
ft/serialize/block_allocator.cc
+60
-36
ft/serialize/block_allocator.h
ft/serialize/block_allocator.h
+12
-0
No files found.
ft/serialize/block_allocator.cc
View file @
c543a359
...
...
@@ -119,6 +119,7 @@ void block_allocator::create(uint64_t reserve_at_beginning, uint64_t alignment)
_blocks_array_size
=
1
;
XMALLOC_N
(
_blocks_array_size
,
_blocks_array
);
_n_bytes_in_use
=
reserve_at_beginning
;
_strategy
=
BA_STRATEGY_FIRST_FIT
;
VALIDATE
();
}
...
...
@@ -127,6 +128,10 @@ void block_allocator::destroy() {
toku_free
(
_blocks_array
);
}
void
block_allocator
::
set_strategy
(
enum
allocation_strategy
strategy
)
{
_strategy
=
strategy
;
}
void
block_allocator
::
grow_blocks_array_by
(
uint64_t
n_to_add
)
{
if
(
_n_blocks
+
n_to_add
>
_blocks_array_size
)
{
uint64_t
new_size
=
_n_blocks
+
n_to_add
;
...
...
@@ -221,6 +226,34 @@ static inline uint64_t align(uint64_t value, uint64_t ba_alignment) {
return
((
value
+
ba_alignment
-
1
)
/
ba_alignment
)
*
ba_alignment
;
}
static
struct
block_allocator
::
blockpair
*
choose_block_first_fit_strategy
(
struct
block_allocator
::
blockpair
*
blocks_array
,
uint64_t
n_blocks
,
uint64_t
size
,
uint64_t
alignment
)
{
// Implement first fit.
for
(
uint64_t
blocknum
=
0
;
blocknum
+
1
<
n_blocks
;
blocknum
++
)
{
// Consider the space after blocknum
struct
block_allocator
::
blockpair
*
bp
=
&
blocks_array
[
blocknum
];
uint64_t
possible_offset
=
align
(
bp
->
offset
+
bp
->
size
,
alignment
);
if
(
possible_offset
+
size
<=
bp
[
1
].
offset
)
{
return
bp
;
}
}
return
nullptr
;
}
// TODO: other strategies
// TODO: Put strategies in their own file, ft/serialize/block_allocator_strategy.{cc,h}?
struct
block_allocator
::
blockpair
*
block_allocator
::
choose_block_to_alloc_after
(
size_t
size
)
{
switch
(
_strategy
)
{
case
BA_STRATEGY_FIRST_FIT
:
return
choose_block_first_fit_strategy
(
_blocks_array
,
_n_blocks
,
size
,
_alignment
);
default:
abort
();
}
}
// Effect: Allocate a block. The resulting block must be aligned on the ba->alignment (which to make direct_io happy must be a positive multiple of 512).
void
block_allocator
::
alloc_block
(
uint64_t
size
,
uint64_t
*
offset
)
{
// Allocator does not support size 0 blocks. See block_allocator_free_block.
...
...
@@ -228,6 +261,8 @@ void block_allocator::alloc_block(uint64_t size, uint64_t *offset) {
grow_blocks_array
();
_n_bytes_in_use
+=
size
;
// First and only block
if
(
_n_blocks
==
0
)
{
assert
(
_n_bytes_in_use
==
_reserve_at_beginning
+
size
);
// we know exactly how many are in use
_blocks_array
[
0
].
offset
=
align
(
_reserve_at_beginning
,
_alignment
);
...
...
@@ -237,50 +272,39 @@ void block_allocator::alloc_block(uint64_t size, uint64_t *offset) {
return
;
}
// Implement first fit.
{
uint64_t
end_of_reserve
=
align
(
_reserve_at_beginning
,
_alignment
);
if
(
end_of_reserve
+
size
<=
_blocks_array
[
0
].
offset
)
{
// Check to see if the space immediately after the reserve is big enough to hold the new block.
struct
blockpair
*
bp
=
&
_blocks_array
[
0
];
memmove
(
bp
+
1
,
bp
,
_n_blocks
*
sizeof
(
*
bp
));
bp
[
0
].
offset
=
end_of_reserve
;
bp
[
0
].
size
=
size
;
_n_blocks
++
;
*
offset
=
end_of_reserve
;
VALIDATE
();
return
;
}
// Check to see if the space immediately after the reserve is big enough to hold the new block.
uint64_t
end_of_reserve
=
align
(
_reserve_at_beginning
,
_alignment
);
if
(
end_of_reserve
+
size
<=
_blocks_array
[
0
].
offset
)
{
struct
blockpair
*
bp
=
&
_blocks_array
[
0
];
memmove
(
bp
+
1
,
bp
,
_n_blocks
*
sizeof
(
*
bp
));
bp
[
0
].
offset
=
end_of_reserve
;
bp
[
0
].
size
=
size
;
_n_blocks
++
;
*
offset
=
end_of_reserve
;
VALIDATE
();
return
;
}
for
(
uint64_t
blocknum
=
0
;
blocknum
+
1
<
_n_blocks
;
blocknum
++
)
{
// Consider the space after blocknum
struct
blockpair
*
bp
=
&
_blocks_array
[
blocknum
];
uint64_t
this_offset
=
bp
[
0
].
offset
;
uint64_t
this_size
=
bp
[
0
].
size
;
uint64_t
answer_offset
=
align
(
this_offset
+
this_size
,
_alignment
);
if
(
answer_offset
+
size
>
bp
[
1
].
offset
)
{
continue
;
// The block we want doesn't fit after this block.
}
// It fits, so allocate it here.
struct
blockpair
*
bp
=
choose_block_first_fit_strategy
(
_blocks_array
,
_n_blocks
,
size
,
_alignment
);
if
(
bp
!=
nullptr
)
{
// our allocation strategy chose the space after `bp' to fit the new block
uint64_t
answer_offset
=
align
(
bp
->
offset
+
bp
->
size
,
_alignment
);
uint64_t
blocknum
=
bp
-
_blocks_array
;
assert
(
&
_blocks_array
[
blocknum
]
==
bp
);
memmove
(
bp
+
2
,
bp
+
1
,
(
_n_blocks
-
blocknum
-
1
)
*
sizeof
(
*
bp
));
bp
[
1
].
offset
=
answer_offset
;
bp
[
1
].
size
=
size
;
_n_blocks
++
;
*
offset
=
answer_offset
;
VALIDATE
();
return
;
}
else
{
// It didn't fit anywhere, so fit it on the end.
assert
(
_n_blocks
<
_blocks_array_size
);
bp
=
&
_blocks_array
[
_n_blocks
];
uint64_t
answer_offset
=
align
(
bp
[
-
1
].
offset
+
bp
[
-
1
].
size
,
_alignment
);
bp
->
offset
=
answer_offset
;
bp
->
size
=
size
;
*
offset
=
answer_offset
;
}
// It didn't fit anywhere, so fit it on the end.
assert
(
_n_blocks
<
_blocks_array_size
);
struct
blockpair
*
bp
=
&
_blocks_array
[
_n_blocks
];
uint64_t
answer_offset
=
align
(
bp
[
-
1
].
offset
+
bp
[
-
1
].
size
,
_alignment
);
bp
->
offset
=
answer_offset
;
bp
->
size
=
size
;
_n_blocks
++
;
*
offset
=
answer_offset
;
VALIDATE
();
}
...
...
ft/serialize/block_allocator.h
View file @
c543a359
...
...
@@ -124,7 +124,12 @@ class block_allocator {
static
const
size_t
BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE
=
BLOCK_ALLOCATOR_HEADER_RESERVE
*
2
;
enum
allocation_strategy
{
BA_STRATEGY_FIRST_FIT
=
1
};
// Effect: Create a block allocator, in which the first RESERVE_AT_BEGINNING bytes are not put into a block.
// The default allocation strategy is first fit (BA_STRATEGY_FIRST_FIT)
// All blocks be start on a multiple of ALIGNMENT.
// Aborts if we run out of memory.
// Parameters
...
...
@@ -135,6 +140,10 @@ class block_allocator {
// Effect: Destroy this block allocator
void
destroy
();
// Effect: Set the allocation strategy that the allocator should use
// Requires: No other threads are operating on this block allocator
void
set_strategy
(
enum
allocation_strategy
strategy
);
// Effect: Allocate a block of the specified size at a particular offset.
// Aborts if anything goes wrong.
// The performance of this function may be as bad as Theta(N), where N is the number of blocks currently in use.
...
...
@@ -219,6 +228,7 @@ class block_allocator {
void
grow_blocks_array_by
(
uint64_t
n_to_add
);
void
grow_blocks_array
();
int64_t
find_block
(
uint64_t
offset
);
struct
blockpair
*
choose_block_to_alloc_after
(
size_t
size
);
static
int
compare_blockpairs
(
const
void
*
av
,
const
void
*
bv
);
...
...
@@ -234,4 +244,6 @@ class block_allocator {
struct
blockpair
*
_blocks_array
;
// Including the reserve_at_beginning
uint64_t
_n_bytes_in_use
;
// The allocation strategy are we using
enum
allocation_strategy
_strategy
;
};
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment