Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
aceda773
Commit
aceda773
authored
Sep 14, 2009
by
Pekka Enberg
Browse files
Options
Browse Files
Download
Plain Diff
Merge branches 'slab/cleanups' and 'slab/fixes' into for-linus
parents
0cc6d77e
5086c389
8a3d271d
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
85 additions
and
25 deletions
+85
-25
Documentation/vm/slub.txt
Documentation/vm/slub.txt
+10
-0
include/linux/slob_def.h
include/linux/slob_def.h
+0
-5
include/linux/slub_def.h
include/linux/slub_def.h
+2
-6
mm/slob.c
mm/slob.c
+5
-0
mm/slub.c
mm/slub.c
+68
-14
No files found.
Documentation/vm/slub.txt
View file @
aceda773
...
...
@@ -41,6 +41,8 @@ Possible debug options are
P Poisoning (object and padding)
U User tracking (free and alloc)
T Trace (please only use on single slabs)
O Switch debugging off for caches that would have
caused higher minimum slab orders
- Switch all debugging off (useful if the kernel is
configured with CONFIG_SLUB_DEBUG_ON)
...
...
@@ -59,6 +61,14 @@ to the dentry cache with
slub_debug=F,dentry
Debugging options may require the minimum possible slab order to increase as
a result of storing the metadata (for example, caches with PAGE_SIZE object
sizes). This has a higher liklihood of resulting in slab allocation errors
in low memory situations or if there's high fragmentation of memory. To
switch off debugging for such caches by default, use
slub_debug=O
In case you forgot to enable debugging on the kernel command line: It is
possible to enable debugging manually when the kernel is up. Look at the
contents of:
...
...
include/linux/slob_def.h
View file @
aceda773
...
...
@@ -34,9 +34,4 @@ static __always_inline void *__kmalloc(size_t size, gfp_t flags)
return
kmalloc
(
size
,
flags
);
}
static
inline
void
kmem_cache_init_late
(
void
)
{
/* Nothing to do */
}
#endif
/* __LINUX_SLOB_DEF_H */
include/linux/slub_def.h
View file @
aceda773
...
...
@@ -153,12 +153,10 @@ static __always_inline int kmalloc_index(size_t size)
if
(
size
<=
KMALLOC_MIN_SIZE
)
return
KMALLOC_SHIFT_LOW
;
#if KMALLOC_MIN_SIZE <= 64
if
(
size
>
64
&&
size
<=
96
)
if
(
KMALLOC_MIN_SIZE
<=
32
&&
size
>
64
&&
size
<=
96
)
return
1
;
if
(
size
>
128
&&
size
<=
192
)
if
(
KMALLOC_MIN_SIZE
<=
64
&&
size
>
128
&&
size
<=
192
)
return
2
;
#endif
if
(
size
<=
8
)
return
3
;
if
(
size
<=
16
)
return
4
;
if
(
size
<=
32
)
return
5
;
...
...
@@ -304,6 +302,4 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
}
#endif
void
__init
kmem_cache_init_late
(
void
);
#endif
/* _LINUX_SLUB_DEF_H */
mm/slob.c
View file @
aceda773
...
...
@@ -692,3 +692,8 @@ void __init kmem_cache_init(void)
{
slob_ready
=
1
;
}
void
__init
kmem_cache_init_late
(
void
)
{
/* Nothing to do */
}
mm/slub.c
View file @
aceda773
...
...
@@ -140,6 +140,13 @@
#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
SLAB_POISON | SLAB_STORE_USER)
/*
* Debugging flags that require metadata to be stored in the slab. These get
* disabled when slub_debug=O is used and a cache's min order increases with
* metadata.
*/
#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
/*
* Set of flags that will prevent slab merging
*/
...
...
@@ -325,6 +332,7 @@ static int slub_debug;
#endif
static
char
*
slub_debug_slabs
;
static
int
disable_higher_order_debug
;
/*
* Object debugging
...
...
@@ -646,7 +654,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
slab_err
(
s
,
page
,
"Padding overwritten. 0x%p-0x%p"
,
fault
,
end
-
1
);
print_section
(
"Padding"
,
end
-
remainder
,
remainder
);
restore_bytes
(
s
,
"slab padding"
,
POISON_INUSE
,
start
,
end
);
restore_bytes
(
s
,
"slab padding"
,
POISON_INUSE
,
end
-
remainder
,
end
);
return
0
;
}
...
...
@@ -976,6 +984,15 @@ static int __init setup_slub_debug(char *str)
*/
goto
check_slabs
;
if
(
tolower
(
*
str
)
==
'o'
)
{
/*
* Avoid enabling debugging on caches if its minimum order
* would increase as a result.
*/
disable_higher_order_debug
=
1
;
goto
out
;
}
slub_debug
=
0
;
if
(
*
str
==
'-'
)
/*
...
...
@@ -1026,8 +1043,8 @@ static unsigned long kmem_cache_flags(unsigned long objsize,
* Enable debugging if selected on the kernel commandline.
*/
if
(
slub_debug
&&
(
!
slub_debug_slabs
||
strncmp
(
slub_debug_slabs
,
name
,
strlen
(
slub_debug_slabs
))
==
0
))
flags
|=
slub_debug
;
!
strncmp
(
slub_debug_slabs
,
name
,
strlen
(
slub_debug_slabs
))
))
flags
|=
slub_debug
;
return
flags
;
}
...
...
@@ -1109,8 +1126,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
}
if
(
kmemcheck_enabled
&&
!
(
s
->
flags
&
(
SLAB_NOTRACK
|
DEBUG_DEFAULT_FLAGS
)))
{
&&
!
(
s
->
flags
&
(
SLAB_NOTRACK
|
DEBUG_DEFAULT_FLAGS
)))
{
int
pages
=
1
<<
oo_order
(
oo
);
kmemcheck_alloc_shadow
(
page
,
oo_order
(
oo
),
flags
,
node
);
...
...
@@ -1560,6 +1576,10 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
"default order: %d, min order: %d
\n
"
,
s
->
name
,
s
->
objsize
,
s
->
size
,
oo_order
(
s
->
oo
),
oo_order
(
s
->
min
));
if
(
oo_order
(
s
->
min
)
>
get_order
(
s
->
objsize
))
printk
(
KERN_WARNING
" %s debugging increased min order, use "
"slub_debug=O to disable.
\n
"
,
s
->
name
);
for_each_online_node
(
node
)
{
struct
kmem_cache_node
*
n
=
get_node
(
s
,
node
);
unsigned
long
nr_slabs
;
...
...
@@ -2001,7 +2021,7 @@ static inline int calculate_order(int size)
return
order
;
fraction
/=
2
;
}
min_objects
--
;
min_objects
--
;
}
/*
...
...
@@ -2400,6 +2420,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
* on bootup.
*/
align
=
calculate_alignment
(
flags
,
align
,
s
->
objsize
);
s
->
align
=
align
;
/*
* SLUB stores one object immediately after another beginning from
...
...
@@ -2452,6 +2473,18 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
if
(
!
calculate_sizes
(
s
,
-
1
))
goto
error
;
if
(
disable_higher_order_debug
)
{
/*
* Disable debugging flags that store metadata if the min slab
* order increased.
*/
if
(
get_order
(
s
->
size
)
>
get_order
(
s
->
objsize
))
{
s
->
flags
&=
~
DEBUG_METADATA_FLAGS
;
s
->
offset
=
0
;
if
(
!
calculate_sizes
(
s
,
-
1
))
goto
error
;
}
}
/*
* The larger the object size is, the more pages we want on the partial
...
...
@@ -2790,6 +2823,11 @@ static s8 size_index[24] = {
2
/* 192 */
};
static
inline
int
size_index_elem
(
size_t
bytes
)
{
return
(
bytes
-
1
)
/
8
;
}
static
struct
kmem_cache
*
get_slab
(
size_t
size
,
gfp_t
flags
)
{
int
index
;
...
...
@@ -2798,7 +2836,7 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
if
(
!
size
)
return
ZERO_SIZE_PTR
;
index
=
size_index
[
(
size
-
1
)
/
8
];
index
=
size_index
[
size_index_elem
(
size
)
];
}
else
index
=
fls
(
size
-
1
);
...
...
@@ -3156,10 +3194,12 @@ void __init kmem_cache_init(void)
slab_state
=
PARTIAL
;
/* Caches that are not of the two-to-the-power-of size */
if
(
KMALLOC_MIN_SIZE
<=
64
)
{
if
(
KMALLOC_MIN_SIZE
<=
32
)
{
create_kmalloc_cache
(
&
kmalloc_caches
[
1
],
"kmalloc-96"
,
96
,
GFP_NOWAIT
);
caches
++
;
}
if
(
KMALLOC_MIN_SIZE
<=
64
)
{
create_kmalloc_cache
(
&
kmalloc_caches
[
2
],
"kmalloc-192"
,
192
,
GFP_NOWAIT
);
caches
++
;
...
...
@@ -3186,17 +3226,28 @@ void __init kmem_cache_init(void)
BUILD_BUG_ON
(
KMALLOC_MIN_SIZE
>
256
||
(
KMALLOC_MIN_SIZE
&
(
KMALLOC_MIN_SIZE
-
1
)));
for
(
i
=
8
;
i
<
KMALLOC_MIN_SIZE
;
i
+=
8
)
size_index
[(
i
-
1
)
/
8
]
=
KMALLOC_SHIFT_LOW
;
for
(
i
=
8
;
i
<
KMALLOC_MIN_SIZE
;
i
+=
8
)
{
int
elem
=
size_index_elem
(
i
);
if
(
elem
>=
ARRAY_SIZE
(
size_index
))
break
;
size_index
[
elem
]
=
KMALLOC_SHIFT_LOW
;
}
if
(
KMALLOC_MIN_SIZE
==
128
)
{
if
(
KMALLOC_MIN_SIZE
==
64
)
{
/*
* The 96 byte size cache is not used if the alignment
* is 64 byte.
*/
for
(
i
=
64
+
8
;
i
<=
96
;
i
+=
8
)
size_index
[
size_index_elem
(
i
)]
=
7
;
}
else
if
(
KMALLOC_MIN_SIZE
==
128
)
{
/*
* The 192 byte sized cache is not used if the alignment
* is 128 byte. Redirect kmalloc to use the 256 byte cache
* instead.
*/
for
(
i
=
128
+
8
;
i
<=
192
;
i
+=
8
)
size_index
[
(
i
-
1
)
/
8
]
=
8
;
size_index
[
size_index_elem
(
i
)
]
=
8
;
}
slab_state
=
UP
;
...
...
@@ -4543,8 +4594,11 @@ static int sysfs_slab_add(struct kmem_cache *s)
}
err
=
sysfs_create_group
(
&
s
->
kobj
,
&
slab_attr_group
);
if
(
err
)
if
(
err
)
{
kobject_del
(
&
s
->
kobj
);
kobject_put
(
&
s
->
kobj
);
return
err
;
}
kobject_uevent
(
&
s
->
kobj
,
KOBJ_ADD
);
if
(
!
unmergeable
)
{
/* Setup first alias */
...
...
@@ -4726,7 +4780,7 @@ static const struct file_operations proc_slabinfo_operations = {
static
int
__init
slab_proc_init
(
void
)
{
proc_create
(
"slabinfo"
,
S_IWUSR
|
S_IRUGO
,
NULL
,
&
proc_slabinfo_operations
);
proc_create
(
"slabinfo"
,
S_IRUGO
,
NULL
,
&
proc_slabinfo_operations
);
return
0
;
}
module_init
(
slab_proc_init
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment