Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
e03ab9d4
Commit
e03ab9d4
authored
Jun 17, 2009
by
Pekka Enberg
Browse files
Options
Browse Files
Download
Plain Diff
Merge branches 'slab/documentation', 'slab/fixes', 'slob/cleanups' and 'slub/fixes' into for-linus
parents
65795efb
a234bdc9
67461365
7303f240
95f85989
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
84 additions
and
24 deletions
+84
-24
include/linux/page-flags.h
include/linux/page-flags.h
+0
-2
mm/slab.c
mm/slab.c
+9
-0
mm/slob.c
mm/slob.c
+3
-3
mm/slub.c
mm/slub.c
+68
-19
mm/util.c
mm/util.c
+4
-0
No files found.
include/linux/page-flags.h
View file @
e03ab9d4
...
...
@@ -118,7 +118,6 @@ enum pageflags {
PG_savepinned
=
PG_dirty
,
/* SLOB */
PG_slob_page
=
PG_active
,
PG_slob_free
=
PG_private
,
/* SLUB */
...
...
@@ -201,7 +200,6 @@ PAGEFLAG(SavePinned, savepinned); /* Xen */
PAGEFLAG
(
Reserved
,
reserved
)
__CLEARPAGEFLAG
(
Reserved
,
reserved
)
PAGEFLAG
(
SwapBacked
,
swapbacked
)
__CLEARPAGEFLAG
(
SwapBacked
,
swapbacked
)
__PAGEFLAG
(
SlobPage
,
slob_page
)
__PAGEFLAG
(
SlobFree
,
slob_free
)
__PAGEFLAG
(
SlubFrozen
,
slub_frozen
)
...
...
mm/slab.c
View file @
e03ab9d4
...
...
@@ -2308,6 +2308,15 @@ kmem_cache_create (const char *name, size_t size, size_t align,
/* really off slab. No need for manual alignment */
slab_size
=
cachep
->
num
*
sizeof
(
kmem_bufctl_t
)
+
sizeof
(
struct
slab
);
#ifdef CONFIG_PAGE_POISONING
/* If we're going to use the generic kernel_map_pages()
* poisoning, then it's going to smash the contents of
* the redzone and userword anyhow, so switch them off.
*/
if
(
size
%
PAGE_SIZE
==
0
&&
flags
&
SLAB_POISON
)
flags
&=
~
(
SLAB_RED_ZONE
|
SLAB_STORE_USER
);
#endif
}
cachep
->
colour_off
=
cache_line_size
();
...
...
mm/slob.c
View file @
e03ab9d4
...
...
@@ -133,17 +133,17 @@ static LIST_HEAD(free_slob_large);
*/
static
inline
int
is_slob_page
(
struct
slob_page
*
sp
)
{
return
PageSl
obPage
((
struct
page
*
)
sp
);
return
PageSl
ab
((
struct
page
*
)
sp
);
}
static
inline
void
set_slob_page
(
struct
slob_page
*
sp
)
{
__SetPageSl
obPage
((
struct
page
*
)
sp
);
__SetPageSl
ab
((
struct
page
*
)
sp
);
}
static
inline
void
clear_slob_page
(
struct
slob_page
*
sp
)
{
__ClearPageSl
obPage
((
struct
page
*
)
sp
);
__ClearPageSl
ab
((
struct
page
*
)
sp
);
}
static
inline
struct
slob_page
*
slob_page
(
const
void
*
addr
)
...
...
mm/slub.c
View file @
e03ab9d4
...
...
@@ -840,6 +840,11 @@ static inline unsigned long slabs_node(struct kmem_cache *s, int node)
return
atomic_long_read
(
&
n
->
nr_slabs
);
}
static
inline
unsigned
long
node_nr_slabs
(
struct
kmem_cache_node
*
n
)
{
return
atomic_long_read
(
&
n
->
nr_slabs
);
}
static
inline
void
inc_slabs_node
(
struct
kmem_cache
*
s
,
int
node
,
int
objects
)
{
struct
kmem_cache_node
*
n
=
get_node
(
s
,
node
);
...
...
@@ -1058,6 +1063,8 @@ static inline unsigned long kmem_cache_flags(unsigned long objsize,
static
inline
unsigned
long
slabs_node
(
struct
kmem_cache
*
s
,
int
node
)
{
return
0
;
}
static
inline
unsigned
long
node_nr_slabs
(
struct
kmem_cache_node
*
n
)
{
return
0
;
}
static
inline
void
inc_slabs_node
(
struct
kmem_cache
*
s
,
int
node
,
int
objects
)
{}
static
inline
void
dec_slabs_node
(
struct
kmem_cache
*
s
,
int
node
,
...
...
@@ -1514,6 +1521,65 @@ static inline int node_match(struct kmem_cache_cpu *c, int node)
return
1
;
}
static
int
count_free
(
struct
page
*
page
)
{
return
page
->
objects
-
page
->
inuse
;
}
static
unsigned
long
count_partial
(
struct
kmem_cache_node
*
n
,
int
(
*
get_count
)(
struct
page
*
))
{
unsigned
long
flags
;
unsigned
long
x
=
0
;
struct
page
*
page
;
spin_lock_irqsave
(
&
n
->
list_lock
,
flags
);
list_for_each_entry
(
page
,
&
n
->
partial
,
lru
)
x
+=
get_count
(
page
);
spin_unlock_irqrestore
(
&
n
->
list_lock
,
flags
);
return
x
;
}
static
inline
unsigned
long
node_nr_objs
(
struct
kmem_cache_node
*
n
)
{
#ifdef CONFIG_SLUB_DEBUG
return
atomic_long_read
(
&
n
->
total_objects
);
#else
return
0
;
#endif
}
static
noinline
void
slab_out_of_memory
(
struct
kmem_cache
*
s
,
gfp_t
gfpflags
,
int
nid
)
{
int
node
;
printk
(
KERN_WARNING
"SLUB: Unable to allocate memory on node %d (gfp=0x%x)
\n
"
,
nid
,
gfpflags
);
printk
(
KERN_WARNING
" cache: %s, object size: %d, buffer size: %d, "
"default order: %d, min order: %d
\n
"
,
s
->
name
,
s
->
objsize
,
s
->
size
,
oo_order
(
s
->
oo
),
oo_order
(
s
->
min
));
for_each_online_node
(
node
)
{
struct
kmem_cache_node
*
n
=
get_node
(
s
,
node
);
unsigned
long
nr_slabs
;
unsigned
long
nr_objs
;
unsigned
long
nr_free
;
if
(
!
n
)
continue
;
nr_free
=
count_partial
(
n
,
count_free
);
nr_slabs
=
node_nr_slabs
(
n
);
nr_objs
=
node_nr_objs
(
n
);
printk
(
KERN_WARNING
" node %d: slabs: %ld, objs: %ld, free: %ld
\n
"
,
node
,
nr_slabs
,
nr_objs
,
nr_free
);
}
}
/*
* Slow path. The lockless freelist is empty or we need to perform
* debugging duties.
...
...
@@ -1595,6 +1661,8 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
c
->
page
=
new
;
goto
load_freelist
;
}
if
(
!
(
gfpflags
&
__GFP_NOWARN
)
&&
printk_ratelimit
())
slab_out_of_memory
(
s
,
gfpflags
,
node
);
return
NULL
;
debug:
if
(
!
alloc_debug_processing
(
s
,
c
->
page
,
object
,
addr
))
...
...
@@ -3368,20 +3436,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
}
#ifdef CONFIG_SLUB_DEBUG
static
unsigned
long
count_partial
(
struct
kmem_cache_node
*
n
,
int
(
*
get_count
)(
struct
page
*
))
{
unsigned
long
flags
;
unsigned
long
x
=
0
;
struct
page
*
page
;
spin_lock_irqsave
(
&
n
->
list_lock
,
flags
);
list_for_each_entry
(
page
,
&
n
->
partial
,
lru
)
x
+=
get_count
(
page
);
spin_unlock_irqrestore
(
&
n
->
list_lock
,
flags
);
return
x
;
}
static
int
count_inuse
(
struct
page
*
page
)
{
return
page
->
inuse
;
...
...
@@ -3392,11 +3446,6 @@ static int count_total(struct page *page)
return
page
->
objects
;
}
static
int
count_free
(
struct
page
*
page
)
{
return
page
->
objects
-
page
->
inuse
;
}
static
int
validate_slab
(
struct
kmem_cache
*
s
,
struct
page
*
page
,
unsigned
long
*
map
)
{
...
...
mm/util.c
View file @
e03ab9d4
...
...
@@ -168,6 +168,10 @@ EXPORT_SYMBOL(krealloc);
*
* The memory of the object @p points to is zeroed before freed.
* If @p is %NULL, kzfree() does nothing.
*
* Note: this function zeroes the whole allocated buffer which can be a good
* deal bigger than the requested buffer size passed to kmalloc(). So be
* careful when using this function in performance sensitive code.
*/
void
kzfree
(
const
void
*
p
)
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment