Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
6892188b
Commit
6892188b
authored
Apr 28, 2002
by
Andrew Morton
Committed by
Jaroslav Kysela
Apr 28, 2002
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[PATCH] Re: [patch] change mempool to not alter managed elements
Here's an array-based implementation.
parent
41f2642b
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
77 additions
and
100 deletions
+77
-100
include/linux/mempool.h
include/linux/mempool.h
+6
-9
lib/radix-tree.c
lib/radix-tree.c
+1
-11
mm/mempool.c
mm/mempool.c
+70
-80
No files found.
include/linux/mempool.h
View file @
6892188b
...
@@ -4,28 +4,25 @@
...
@@ -4,28 +4,25 @@
#ifndef _LINUX_MEMPOOL_H
#ifndef _LINUX_MEMPOOL_H
#define _LINUX_MEMPOOL_H
#define _LINUX_MEMPOOL_H
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/wait.h>
struct
mempool_s
;
typedef
struct
mempool_s
mempool_t
;
typedef
void
*
(
mempool_alloc_t
)(
int
gfp_mask
,
void
*
pool_data
);
typedef
void
*
(
mempool_alloc_t
)(
int
gfp_mask
,
void
*
pool_data
);
typedef
void
(
mempool_free_t
)(
void
*
element
,
void
*
pool_data
);
typedef
void
(
mempool_free_t
)(
void
*
element
,
void
*
pool_data
);
struct
mempool_s
{
typedef
struct
mempool_s
{
spinlock_t
lock
;
spinlock_t
lock
;
int
min_nr
,
curr_nr
;
int
min_nr
;
/* nr of elements at *elements */
struct
list_head
elements
;
int
curr_nr
;
/* Current nr of elements at *elements */
void
**
elements
;
void
*
pool_data
;
void
*
pool_data
;
mempool_alloc_t
*
alloc
;
mempool_alloc_t
*
alloc
;
mempool_free_t
*
free
;
mempool_free_t
*
free
;
wait_queue_head_t
wait
;
wait_queue_head_t
wait
;
};
}
mempool_t
;
extern
mempool_t
*
mempool_create
(
int
min_nr
,
mempool_alloc_t
*
alloc_fn
,
extern
mempool_t
*
mempool_create
(
int
min_nr
,
mempool_alloc_t
*
alloc_fn
,
mempool_free_t
*
free_fn
,
void
*
pool_data
);
mempool_free_t
*
free_fn
,
void
*
pool_data
);
extern
void
mempool_resize
(
mempool_t
*
pool
,
int
new_min_nr
,
int
gfp_mask
);
extern
int
mempool_resize
(
mempool_t
*
pool
,
int
new_min_nr
,
int
gfp_mask
);
extern
void
mempool_destroy
(
mempool_t
*
pool
);
extern
void
mempool_destroy
(
mempool_t
*
pool
);
extern
void
*
mempool_alloc
(
mempool_t
*
pool
,
int
gfp_mask
);
extern
void
*
mempool_alloc
(
mempool_t
*
pool
,
int
gfp_mask
);
extern
void
mempool_free
(
void
*
element
,
mempool_t
*
pool
);
extern
void
mempool_free
(
void
*
element
,
mempool_t
*
pool
);
...
...
lib/radix-tree.c
View file @
6892188b
...
@@ -50,20 +50,10 @@ struct radix_tree_path {
...
@@ -50,20 +50,10 @@ struct radix_tree_path {
static
kmem_cache_t
*
radix_tree_node_cachep
;
static
kmem_cache_t
*
radix_tree_node_cachep
;
static
mempool_t
*
radix_tree_node_pool
;
static
mempool_t
*
radix_tree_node_pool
;
/*
* mempool scribbles on the first eight bytes of the managed
* memory. Here we implement a temp workaround for that.
*/
#include <linux/list.h>
static
inline
struct
radix_tree_node
*
static
inline
struct
radix_tree_node
*
radix_tree_node_alloc
(
struct
radix_tree_root
*
root
)
radix_tree_node_alloc
(
struct
radix_tree_root
*
root
)
{
{
struct
radix_tree_node
*
ret
;
return
mempool_alloc
(
radix_tree_node_pool
,
root
->
gfp_mask
);
ret
=
mempool_alloc
(
radix_tree_node_pool
,
root
->
gfp_mask
);
if
(
ret
)
memset
(
ret
,
0
,
sizeof
(
struct
list_head
));
return
ret
;
}
}
static
inline
void
static
inline
void
...
...
mm/mempool.c
View file @
6892188b
...
@@ -13,6 +13,28 @@
...
@@ -13,6 +13,28 @@
#include <linux/module.h>
#include <linux/module.h>
#include <linux/mempool.h>
#include <linux/mempool.h>
static
void
add_element
(
mempool_t
*
pool
,
void
*
element
)
{
BUG_ON
(
pool
->
curr_nr
>=
pool
->
min_nr
);
pool
->
elements
[
pool
->
curr_nr
++
]
=
element
;
}
static
void
*
remove_element
(
mempool_t
*
pool
)
{
BUG_ON
(
pool
->
curr_nr
<=
0
);
return
pool
->
elements
[
--
pool
->
curr_nr
];
}
static
void
free_pool
(
mempool_t
*
pool
)
{
while
(
pool
->
curr_nr
)
{
void
*
element
=
remove_element
(
pool
);
pool
->
free
(
element
,
pool
->
pool_data
);
}
kfree
(
pool
->
elements
);
kfree
(
pool
);
}
/**
/**
* mempool_create - create a memory pool
* mempool_create - create a memory pool
* @min_nr: the minimum number of elements guaranteed to be
* @min_nr: the minimum number of elements guaranteed to be
...
@@ -25,27 +47,25 @@
...
@@ -25,27 +47,25 @@
* memory pool. The pool can be used from the mempool_alloc and mempool_free
* memory pool. The pool can be used from the mempool_alloc and mempool_free
* functions. This function might sleep. Both the alloc_fn() and the free_fn()
* functions. This function might sleep. Both the alloc_fn() and the free_fn()
* functions might sleep - as long as the mempool_alloc function is not called
* functions might sleep - as long as the mempool_alloc function is not called
* from IRQ contexts. The element allocated by alloc_fn() must be able to
* from IRQ contexts.
* hold a struct list_head. (8 bytes on x86.)
*/
*/
mempool_t
*
mempool_create
(
int
min_nr
,
mempool_alloc_t
*
alloc_fn
,
mempool_t
*
mempool_create
(
int
min_nr
,
mempool_alloc_t
*
alloc_fn
,
mempool_free_t
*
free_fn
,
void
*
pool_data
)
mempool_free_t
*
free_fn
,
void
*
pool_data
)
{
{
mempool_t
*
pool
;
mempool_t
*
pool
;
int
i
;
BUG_ON
(
!
alloc_fn
);
BUG_ON
(
!
free_fn
);
pool
=
kmalloc
(
sizeof
(
*
pool
),
GFP_KERNEL
);
pool
=
kmalloc
(
sizeof
(
*
pool
),
GFP_KERNEL
);
if
(
!
pool
)
if
(
!
pool
)
return
NULL
;
return
NULL
;
memset
(
pool
,
0
,
sizeof
(
*
pool
));
memset
(
pool
,
0
,
sizeof
(
*
pool
));
pool
->
elements
=
kmalloc
(
min_nr
*
sizeof
(
void
*
),
GFP_KERNEL
);
if
(
!
pool
->
elements
)
{
kfree
(
pool
);
return
NULL
;
}
spin_lock_init
(
&
pool
->
lock
);
spin_lock_init
(
&
pool
->
lock
);
pool
->
min_nr
=
min_nr
;
pool
->
min_nr
=
min_nr
;
pool
->
pool_data
=
pool_data
;
pool
->
pool_data
=
pool_data
;
INIT_LIST_HEAD
(
&
pool
->
elements
);
init_waitqueue_head
(
&
pool
->
wait
);
init_waitqueue_head
(
&
pool
->
wait
);
pool
->
alloc
=
alloc_fn
;
pool
->
alloc
=
alloc_fn
;
pool
->
free
=
free_fn
;
pool
->
free
=
free_fn
;
...
@@ -53,27 +73,15 @@ mempool_t * mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
...
@@ -53,27 +73,15 @@ mempool_t * mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
/*
/*
* First pre-allocate the guaranteed number of buffers.
* First pre-allocate the guaranteed number of buffers.
*/
*/
for
(
i
=
0
;
i
<
min_nr
;
i
++
)
{
while
(
pool
->
curr_nr
<
pool
->
min_nr
)
{
void
*
element
;
void
*
element
;
struct
list_head
*
tmp
;
element
=
pool
->
alloc
(
GFP_KERNEL
,
pool
->
pool_data
);
element
=
pool
->
alloc
(
GFP_KERNEL
,
pool
->
pool_data
);
if
(
unlikely
(
!
element
))
{
if
(
unlikely
(
!
element
))
{
/*
free_pool
(
pool
);
* Not enough memory - free the allocated ones
* and return:
*/
list_for_each
(
tmp
,
&
pool
->
elements
)
{
element
=
tmp
;
pool
->
free
(
element
,
pool
->
pool_data
);
}
kfree
(
pool
);
return
NULL
;
return
NULL
;
}
}
tmp
=
element
;
add_element
(
pool
,
element
);
list_add
(
tmp
,
&
pool
->
elements
);
pool
->
curr_nr
++
;
}
}
return
pool
;
return
pool
;
}
}
...
@@ -94,53 +102,54 @@ mempool_t * mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
...
@@ -94,53 +102,54 @@ mempool_t * mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
* while this function is running. mempool_alloc() & mempool_free()
* while this function is running. mempool_alloc() & mempool_free()
* might be called (eg. from IRQ contexts) while this function executes.
* might be called (eg. from IRQ contexts) while this function executes.
*/
*/
void
mempool_resize
(
mempool_t
*
pool
,
int
new_min_nr
,
int
gfp_mask
)
int
mempool_resize
(
mempool_t
*
pool
,
int
new_min_nr
,
int
gfp_mask
)
{
{
int
delta
;
void
*
element
;
void
*
element
;
void
**
new_elements
;
unsigned
long
flags
;
unsigned
long
flags
;
struct
list_head
*
tmp
;
if
(
new_min_nr
<=
0
)
BUG_ON
(
new_min_nr
<=
0
);
BUG
();
spin_lock_irqsave
(
&
pool
->
lock
,
flags
);
spin_lock_irqsave
(
&
pool
->
lock
,
flags
);
if
(
new_min_nr
<
pool
->
min_nr
)
{
if
(
new_min_nr
<
pool
->
min_nr
)
{
pool
->
min_nr
=
new_min_nr
;
while
(
pool
->
curr_nr
>
new_min_nr
)
{
/*
element
=
remove_element
(
pool
);
* Free possible excess elements.
*/
while
(
pool
->
curr_nr
>
pool
->
min_nr
)
{
tmp
=
pool
->
elements
.
next
;
if
(
tmp
==
&
pool
->
elements
)
BUG
();
list_del
(
tmp
);
element
=
tmp
;
pool
->
curr_nr
--
;
spin_unlock_irqrestore
(
&
pool
->
lock
,
flags
);
spin_unlock_irqrestore
(
&
pool
->
lock
,
flags
);
pool
->
free
(
element
,
pool
->
pool_data
);
pool
->
free
(
element
,
pool
->
pool_data
);
spin_lock_irqsave
(
&
pool
->
lock
,
flags
);
spin_lock_irqsave
(
&
pool
->
lock
,
flags
);
}
}
spin_unlock_irqrestore
(
&
pool
->
lock
,
flags
)
;
pool
->
min_nr
=
new_min_nr
;
return
;
goto
out_unlock
;
}
}
delta
=
new_min_nr
-
pool
->
min_nr
;
pool
->
min_nr
=
new_min_nr
;
spin_unlock_irqrestore
(
&
pool
->
lock
,
flags
);
spin_unlock_irqrestore
(
&
pool
->
lock
,
flags
);
/*
/* Grow the pool */
* We refill the pool up to the new treshold - but we dont
new_elements
=
kmalloc
(
new_min_nr
*
sizeof
(
*
new_elements
),
gfp_mask
);
* (cannot) guarantee that the refill succeeds.
if
(
!
new_elements
)
*/
return
-
ENOMEM
;
while
(
delta
)
{
spin_lock_irqsave
(
&
pool
->
lock
,
flags
);
memcpy
(
new_elements
,
pool
->
elements
,
pool
->
curr_nr
*
sizeof
(
*
new_elements
));
kfree
(
pool
->
elements
);
pool
->
elements
=
new_elements
;
pool
->
min_nr
=
new_min_nr
;
while
(
pool
->
curr_nr
<
pool
->
min_nr
)
{
spin_unlock_irqrestore
(
&
pool
->
lock
,
flags
);
element
=
pool
->
alloc
(
gfp_mask
,
pool
->
pool_data
);
element
=
pool
->
alloc
(
gfp_mask
,
pool
->
pool_data
);
if
(
!
element
)
if
(
!
element
)
break
;
goto
out
;
mempool_free
(
element
,
pool
);
spin_lock_irqsave
(
&
pool
->
lock
,
flags
);
delta
--
;
if
(
pool
->
curr_nr
<
pool
->
min_nr
)
add_element
(
pool
,
element
);
else
kfree
(
element
);
/* Raced */
}
}
out_unlock:
spin_unlock_irqrestore
(
&
pool
->
lock
,
flags
);
out:
return
0
;
}
}
/**
/**
...
@@ -149,27 +158,14 @@ void mempool_resize(mempool_t *pool, int new_min_nr, int gfp_mask)
...
@@ -149,27 +158,14 @@ void mempool_resize(mempool_t *pool, int new_min_nr, int gfp_mask)
* mempool_create().
* mempool_create().
*
*
* this function only sleeps if the free_fn() function sleeps. The caller
* this function only sleeps if the free_fn() function sleeps. The caller
* has to guarantee that
no mempool_alloc() nor mempool_free() happens in
* has to guarantee that
all elements have been returned to the pool (ie:
*
this pool when calling this function
.
*
freed) prior to calling mempool_destroy()
.
*/
*/
void
mempool_destroy
(
mempool_t
*
pool
)
void
mempool_destroy
(
mempool_t
*
pool
)
{
{
void
*
element
;
if
(
pool
->
curr_nr
!=
pool
->
min_nr
)
struct
list_head
*
head
,
*
tmp
;
BUG
();
/* There were outstanding elements */
free_pool
(
pool
);
if
(
!
pool
)
return
;
head
=
&
pool
->
elements
;
for
(
tmp
=
head
->
next
;
tmp
!=
head
;
)
{
element
=
tmp
;
tmp
=
tmp
->
next
;
pool
->
free
(
element
,
pool
->
pool_data
);
pool
->
curr_nr
--
;
}
if
(
pool
->
curr_nr
)
BUG
();
kfree
(
pool
);
}
}
/**
/**
...
@@ -187,7 +183,6 @@ void * mempool_alloc(mempool_t *pool, int gfp_mask)
...
@@ -187,7 +183,6 @@ void * mempool_alloc(mempool_t *pool, int gfp_mask)
{
{
void
*
element
;
void
*
element
;
unsigned
long
flags
;
unsigned
long
flags
;
struct
list_head
*
tmp
;
int
curr_nr
;
int
curr_nr
;
DECLARE_WAITQUEUE
(
wait
,
current
);
DECLARE_WAITQUEUE
(
wait
,
current
);
int
gfp_nowait
=
gfp_mask
&
~
(
__GFP_WAIT
|
__GFP_IO
);
int
gfp_nowait
=
gfp_mask
&
~
(
__GFP_WAIT
|
__GFP_IO
);
...
@@ -214,10 +209,7 @@ void * mempool_alloc(mempool_t *pool, int gfp_mask)
...
@@ -214,10 +209,7 @@ void * mempool_alloc(mempool_t *pool, int gfp_mask)
spin_lock_irqsave
(
&
pool
->
lock
,
flags
);
spin_lock_irqsave
(
&
pool
->
lock
,
flags
);
if
(
likely
(
pool
->
curr_nr
))
{
if
(
likely
(
pool
->
curr_nr
))
{
tmp
=
pool
->
elements
.
next
;
element
=
remove_element
(
pool
);
list_del
(
tmp
);
element
=
tmp
;
pool
->
curr_nr
--
;
spin_unlock_irqrestore
(
&
pool
->
lock
,
flags
);
spin_unlock_irqrestore
(
&
pool
->
lock
,
flags
);
return
element
;
return
element
;
}
}
...
@@ -260,8 +252,7 @@ void mempool_free(void *element, mempool_t *pool)
...
@@ -260,8 +252,7 @@ void mempool_free(void *element, mempool_t *pool)
if
(
pool
->
curr_nr
<
pool
->
min_nr
)
{
if
(
pool
->
curr_nr
<
pool
->
min_nr
)
{
spin_lock_irqsave
(
&
pool
->
lock
,
flags
);
spin_lock_irqsave
(
&
pool
->
lock
,
flags
);
if
(
pool
->
curr_nr
<
pool
->
min_nr
)
{
if
(
pool
->
curr_nr
<
pool
->
min_nr
)
{
list_add
(
element
,
&
pool
->
elements
);
add_element
(
pool
,
element
);
pool
->
curr_nr
++
;
spin_unlock_irqrestore
(
&
pool
->
lock
,
flags
);
spin_unlock_irqrestore
(
&
pool
->
lock
,
flags
);
wake_up
(
&
pool
->
wait
);
wake_up
(
&
pool
->
wait
);
return
;
return
;
...
@@ -276,4 +267,3 @@ EXPORT_SYMBOL(mempool_resize);
...
@@ -276,4 +267,3 @@ EXPORT_SYMBOL(mempool_resize);
EXPORT_SYMBOL
(
mempool_destroy
);
EXPORT_SYMBOL
(
mempool_destroy
);
EXPORT_SYMBOL
(
mempool_alloc
);
EXPORT_SYMBOL
(
mempool_alloc
);
EXPORT_SYMBOL
(
mempool_free
);
EXPORT_SYMBOL
(
mempool_free
);
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment