Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
355d79c8
Commit
355d79c8
authored
Dec 12, 2009
by
Pekka Enberg
Browse files
Options
Browse Files
Download
Plain Diff
Merge branches 'slab/fixes', 'slab/kmemleak', 'slub/perf' and 'slub/stats' into for-linus
parents
053fe57a
8e15b79c
ddbf2e83
74e2134f
78eb00cc
Changes
3
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
145 additions
and
102 deletions
+145
-102
Documentation/ABI/testing/sysfs-kernel-slab
Documentation/ABI/testing/sysfs-kernel-slab
+58
-51
mm/slab.c
mm/slab.c
+69
-49
mm/slub.c
mm/slub.c
+18
-2
No files found.
Documentation/ABI/testing/sysfs-kernel-slab
View file @
355d79c8
This diff is collapsed.
Click to expand it.
mm/slab.c
View file @
355d79c8
...
@@ -604,6 +604,26 @@ static struct kmem_cache cache_cache = {
...
@@ -604,6 +604,26 @@ static struct kmem_cache cache_cache = {
#define BAD_ALIEN_MAGIC 0x01020304ul
#define BAD_ALIEN_MAGIC 0x01020304ul
/*
* chicken and egg problem: delay the per-cpu array allocation
* until the general caches are up.
*/
static
enum
{
NONE
,
PARTIAL_AC
,
PARTIAL_L3
,
EARLY
,
FULL
}
g_cpucache_up
;
/*
* used by boot code to determine if it can use slab based allocator
*/
int
slab_is_available
(
void
)
{
return
g_cpucache_up
>=
EARLY
;
}
#ifdef CONFIG_LOCKDEP
#ifdef CONFIG_LOCKDEP
/*
/*
...
@@ -620,40 +640,52 @@ static struct kmem_cache cache_cache = {
...
@@ -620,40 +640,52 @@ static struct kmem_cache cache_cache = {
static
struct
lock_class_key
on_slab_l3_key
;
static
struct
lock_class_key
on_slab_l3_key
;
static
struct
lock_class_key
on_slab_alc_key
;
static
struct
lock_class_key
on_slab_alc_key
;
static
inline
void
init_lock_keys
(
void
)
static
void
init_node_lock_keys
(
int
q
)
{
{
int
q
;
struct
cache_sizes
*
s
=
malloc_sizes
;
struct
cache_sizes
*
s
=
malloc_sizes
;
while
(
s
->
cs_size
!=
ULONG_MAX
)
{
if
(
g_cpucache_up
!=
FULL
)
for_each_node
(
q
)
{
return
;
struct
array_cache
**
alc
;
int
r
;
for
(
s
=
malloc_sizes
;
s
->
cs_size
!=
ULONG_MAX
;
s
++
)
{
struct
kmem_list3
*
l3
=
s
->
cs_cachep
->
nodelists
[
q
];
struct
array_cache
**
alc
;
if
(
!
l3
||
OFF_SLAB
(
s
->
cs_cachep
))
struct
kmem_list3
*
l3
;
continue
;
int
r
;
lockdep_set_class
(
&
l3
->
list_lock
,
&
on_slab_l3_key
);
alc
=
l3
->
alien
;
l3
=
s
->
cs_cachep
->
nodelists
[
q
];
/*
if
(
!
l3
||
OFF_SLAB
(
s
->
cs_cachep
))
* FIXME: This check for BAD_ALIEN_MAGIC
return
;
* should go away when common slab code is taught to
lockdep_set_class
(
&
l3
->
list_lock
,
&
on_slab_l3_key
);
* work even without alien caches.
alc
=
l3
->
alien
;
* Currently, non NUMA code returns BAD_ALIEN_MAGIC
/*
* for alloc_alien_cache,
* FIXME: This check for BAD_ALIEN_MAGIC
*/
* should go away when common slab code is taught to
if
(
!
alc
||
(
unsigned
long
)
alc
==
BAD_ALIEN_MAGIC
)
* work even without alien caches.
continue
;
* Currently, non NUMA code returns BAD_ALIEN_MAGIC
for_each_node
(
r
)
{
* for alloc_alien_cache,
if
(
alc
[
r
])
*/
lockdep_set_class
(
&
alc
[
r
]
->
lock
,
if
(
!
alc
||
(
unsigned
long
)
alc
==
BAD_ALIEN_MAGIC
)
&
on_slab_alc_key
);
return
;
}
for_each_node
(
r
)
{
if
(
alc
[
r
])
lockdep_set_class
(
&
alc
[
r
]
->
lock
,
&
on_slab_alc_key
);
}
}
s
++
;
}
}
}
}
static
inline
void
init_lock_keys
(
void
)
{
int
node
;
for_each_node
(
node
)
init_node_lock_keys
(
node
);
}
#else
#else
static
void
init_node_lock_keys
(
int
q
)
{
}
static
inline
void
init_lock_keys
(
void
)
static
inline
void
init_lock_keys
(
void
)
{
{
}
}
...
@@ -665,26 +697,6 @@ static inline void init_lock_keys(void)
...
@@ -665,26 +697,6 @@ static inline void init_lock_keys(void)
static
DEFINE_MUTEX
(
cache_chain_mutex
);
static
DEFINE_MUTEX
(
cache_chain_mutex
);
static
struct
list_head
cache_chain
;
static
struct
list_head
cache_chain
;
/*
* chicken and egg problem: delay the per-cpu array allocation
* until the general caches are up.
*/
static
enum
{
NONE
,
PARTIAL_AC
,
PARTIAL_L3
,
EARLY
,
FULL
}
g_cpucache_up
;
/*
* used by boot code to determine if it can use slab based allocator
*/
int
slab_is_available
(
void
)
{
return
g_cpucache_up
>=
EARLY
;
}
static
DEFINE_PER_CPU
(
struct
delayed_work
,
reap_work
);
static
DEFINE_PER_CPU
(
struct
delayed_work
,
reap_work
);
static
inline
struct
array_cache
*
cpu_cache_get
(
struct
kmem_cache
*
cachep
)
static
inline
struct
array_cache
*
cpu_cache_get
(
struct
kmem_cache
*
cachep
)
...
@@ -1254,6 +1266,8 @@ static int __cpuinit cpuup_prepare(long cpu)
...
@@ -1254,6 +1266,8 @@ static int __cpuinit cpuup_prepare(long cpu)
kfree
(
shared
);
kfree
(
shared
);
free_alien_cache
(
alien
);
free_alien_cache
(
alien
);
}
}
init_node_lock_keys
(
node
);
return
0
;
return
0
;
bad:
bad:
cpuup_canceled
(
cpu
);
cpuup_canceled
(
cpu
);
...
@@ -3103,13 +3117,19 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
...
@@ -3103,13 +3117,19 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
}
else
{
}
else
{
STATS_INC_ALLOCMISS
(
cachep
);
STATS_INC_ALLOCMISS
(
cachep
);
objp
=
cache_alloc_refill
(
cachep
,
flags
);
objp
=
cache_alloc_refill
(
cachep
,
flags
);
/*
* the 'ac' may be updated by cache_alloc_refill(),
* and kmemleak_erase() requires its correct value.
*/
ac
=
cpu_cache_get
(
cachep
);
}
}
/*
/*
* To avoid a false negative, if an object that is in one of the
* To avoid a false negative, if an object that is in one of the
* per-CPU caches is leaked, we need to make sure kmemleak doesn't
* per-CPU caches is leaked, we need to make sure kmemleak doesn't
* treat the array pointers as a reference to the object.
* treat the array pointers as a reference to the object.
*/
*/
kmemleak_erase
(
&
ac
->
entry
[
ac
->
avail
]);
if
(
objp
)
kmemleak_erase
(
&
ac
->
entry
[
ac
->
avail
]);
return
objp
;
return
objp
;
}
}
...
@@ -3306,7 +3326,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
...
@@ -3306,7 +3326,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
cache_alloc_debugcheck_before
(
cachep
,
flags
);
cache_alloc_debugcheck_before
(
cachep
,
flags
);
local_irq_save
(
save_flags
);
local_irq_save
(
save_flags
);
if
(
unlikely
(
nodeid
==
-
1
)
)
if
(
nodeid
==
-
1
)
nodeid
=
numa_node_id
();
nodeid
=
numa_node_id
();
if
(
unlikely
(
!
cachep
->
nodelists
[
nodeid
]))
{
if
(
unlikely
(
!
cachep
->
nodelists
[
nodeid
]))
{
...
...
mm/slub.c
View file @
355d79c8
...
@@ -1735,7 +1735,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
...
@@ -1735,7 +1735,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
}
}
local_irq_restore
(
flags
);
local_irq_restore
(
flags
);
if
(
unlikely
(
(
gfpflags
&
__GFP_ZERO
)
&&
object
)
)
if
(
unlikely
(
gfpflags
&
__GFP_ZERO
)
&&
object
)
memset
(
object
,
0
,
objsize
);
memset
(
object
,
0
,
objsize
);
kmemcheck_slab_alloc
(
s
,
gfpflags
,
object
,
c
->
objsize
);
kmemcheck_slab_alloc
(
s
,
gfpflags
,
object
,
c
->
objsize
);
...
@@ -4371,12 +4371,28 @@ static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
...
@@ -4371,12 +4371,28 @@ static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
return
len
+
sprintf
(
buf
+
len
,
"
\n
"
);
return
len
+
sprintf
(
buf
+
len
,
"
\n
"
);
}
}
static
void
clear_stat
(
struct
kmem_cache
*
s
,
enum
stat_item
si
)
{
int
cpu
;
for_each_online_cpu
(
cpu
)
get_cpu_slab
(
s
,
cpu
)
->
stat
[
si
]
=
0
;
}
#define STAT_ATTR(si, text) \
#define STAT_ATTR(si, text) \
static ssize_t text##_show(struct kmem_cache *s, char *buf) \
static ssize_t text##_show(struct kmem_cache *s, char *buf) \
{ \
{ \
return show_stat(s, buf, si); \
return show_stat(s, buf, si); \
} \
} \
SLAB_ATTR_RO(text); \
static ssize_t text##_store(struct kmem_cache *s, \
const char *buf, size_t length) \
{ \
if (buf[0] != '0') \
return -EINVAL; \
clear_stat(s, si); \
return length; \
} \
SLAB_ATTR(text); \
STAT_ATTR
(
ALLOC_FASTPATH
,
alloc_fastpath
);
STAT_ATTR
(
ALLOC_FASTPATH
,
alloc_fastpath
);
STAT_ATTR
(
ALLOC_SLOWPATH
,
alloc_slowpath
);
STAT_ATTR
(
ALLOC_SLOWPATH
,
alloc_slowpath
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment