Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
023dc704
Commit
023dc704
authored
Oct 03, 2012
by
Pekka Enberg
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'slab/next' into slab/for-linus
parents
a0d271cb
608da7e3
Changes
8
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
186 additions
and
162 deletions
+186
-162
include/linux/slab.h
include/linux/slab.h
+4
-2
include/linux/slab_def.h
include/linux/slab_def.h
+3
-10
include/linux/slob_def.h
include/linux/slob_def.h
+4
-2
mm/slab.c
mm/slab.c
+40
-55
mm/slab_common.c
mm/slab_common.c
+48
-49
mm/slob.c
mm/slob.c
+27
-6
mm/slub.c
mm/slub.c
+39
-24
mm/util.c
mm/util.c
+21
-14
No files found.
include/linux/slab.h
View file @
023dc704
...
...
@@ -321,7 +321,8 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
* request comes from.
*/
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
(defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
(defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
(defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
extern
void
*
__kmalloc_track_caller
(
size_t
,
gfp_t
,
unsigned
long
);
#define kmalloc_track_caller(size, flags) \
__kmalloc_track_caller(size, flags, _RET_IP_)
...
...
@@ -340,7 +341,8 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
* allocation request comes from.
*/
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
(defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
(defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
(defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
extern
void
*
__kmalloc_node_track_caller
(
size_t
,
gfp_t
,
int
,
unsigned
long
);
#define kmalloc_node_track_caller(size, flags, node) \
__kmalloc_node_track_caller(size, flags, node, \
...
...
include/linux/slab_def.h
View file @
023dc704
...
...
@@ -45,7 +45,6 @@ struct kmem_cache {
unsigned
int
colour_off
;
/* colour offset */
struct
kmem_cache
*
slabp_cache
;
unsigned
int
slab_size
;
unsigned
int
dflags
;
/* dynamic flags */
/* constructor func */
void
(
*
ctor
)(
void
*
obj
);
...
...
@@ -112,19 +111,13 @@ void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void
*
__kmalloc
(
size_t
size
,
gfp_t
flags
);
#ifdef CONFIG_TRACING
extern
void
*
kmem_cache_alloc_trace
(
size_t
size
,
struct
kmem_cache
*
cachep
,
gfp_t
flags
);
extern
size_t
slab_buffer_size
(
struct
kmem_cache
*
cachep
);
extern
void
*
kmem_cache_alloc_trace
(
struct
kmem_cache
*
,
gfp_t
,
size_t
);
#else
static
__always_inline
void
*
kmem_cache_alloc_trace
(
s
ize_t
size
,
struct
kmem_cache
*
cachep
,
gfp_t
flags
)
kmem_cache_alloc_trace
(
s
truct
kmem_cache
*
cachep
,
gfp_t
flags
,
size_t
size
)
{
return
kmem_cache_alloc
(
cachep
,
flags
);
}
static
inline
size_t
slab_buffer_size
(
struct
kmem_cache
*
cachep
)
{
return
0
;
}
#endif
static
__always_inline
void
*
kmalloc
(
size_t
size
,
gfp_t
flags
)
...
...
@@ -154,7 +147,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
#endif
cachep
=
malloc_sizes
[
i
].
cs_cachep
;
ret
=
kmem_cache_alloc_trace
(
size
,
cachep
,
flags
);
ret
=
kmem_cache_alloc_trace
(
cachep
,
flags
,
size
);
return
ret
;
}
...
...
include/linux/slob_def.h
View file @
023dc704
#ifndef __LINUX_SLOB_DEF_H
#define __LINUX_SLOB_DEF_H
#include <linux/numa.h>
void
*
kmem_cache_alloc_node
(
struct
kmem_cache
*
,
gfp_t
flags
,
int
node
);
static
__always_inline
void
*
kmem_cache_alloc
(
struct
kmem_cache
*
cachep
,
gfp_t
flags
)
{
return
kmem_cache_alloc_node
(
cachep
,
flags
,
-
1
);
return
kmem_cache_alloc_node
(
cachep
,
flags
,
NUMA_NO_NODE
);
}
void
*
__kmalloc_node
(
size_t
size
,
gfp_t
flags
,
int
node
);
...
...
@@ -26,7 +28,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
*/
static
__always_inline
void
*
kmalloc
(
size_t
size
,
gfp_t
flags
)
{
return
__kmalloc_node
(
size
,
flags
,
-
1
);
return
__kmalloc_node
(
size
,
flags
,
NUMA_NO_NODE
);
}
static
__always_inline
void
*
__kmalloc
(
size_t
size
,
gfp_t
flags
)
...
...
mm/slab.c
View file @
023dc704
This diff is collapsed.
Click to expand it.
mm/slab_common.c
View file @
023dc704
...
...
@@ -23,6 +23,52 @@ enum slab_state slab_state;
LIST_HEAD
(
slab_caches
);
DEFINE_MUTEX
(
slab_mutex
);
#ifdef CONFIG_DEBUG_VM
static
int
kmem_cache_sanity_check
(
const
char
*
name
,
size_t
size
)
{
struct
kmem_cache
*
s
=
NULL
;
if
(
!
name
||
in_interrupt
()
||
size
<
sizeof
(
void
*
)
||
size
>
KMALLOC_MAX_SIZE
)
{
pr_err
(
"kmem_cache_create(%s) integrity check failed
\n
"
,
name
);
return
-
EINVAL
;
}
list_for_each_entry
(
s
,
&
slab_caches
,
list
)
{
char
tmp
;
int
res
;
/*
* This happens when the module gets unloaded and doesn't
* destroy its slab cache and no-one else reuses the vmalloc
* area of the module. Print a warning.
*/
res
=
probe_kernel_address
(
s
->
name
,
tmp
);
if
(
res
)
{
pr_err
(
"Slab cache with size %d has lost its name
\n
"
,
s
->
object_size
);
continue
;
}
if
(
!
strcmp
(
s
->
name
,
name
))
{
pr_err
(
"%s (%s): Cache name already exists.
\n
"
,
__func__
,
name
);
dump_stack
();
s
=
NULL
;
return
-
EINVAL
;
}
}
WARN_ON
(
strchr
(
name
,
' '
));
/* It confuses parsers */
return
0
;
}
#else
static
inline
int
kmem_cache_sanity_check
(
const
char
*
name
,
size_t
size
)
{
return
0
;
}
#endif
/*
* kmem_cache_create - Create a cache.
* @name: A string which is used in /proc/slabinfo to identify this cache.
...
...
@@ -53,60 +99,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
{
struct
kmem_cache
*
s
=
NULL
;
#ifdef CONFIG_DEBUG_VM
if
(
!
name
||
in_interrupt
()
||
size
<
sizeof
(
void
*
)
||
size
>
KMALLOC_MAX_SIZE
)
{
printk
(
KERN_ERR
"kmem_cache_create(%s) integrity check"
" failed
\n
"
,
name
);
goto
out
;
}
#endif
get_online_cpus
();
mutex_lock
(
&
slab_mutex
);
#ifdef CONFIG_DEBUG_VM
list_for_each_entry
(
s
,
&
slab_caches
,
list
)
{
char
tmp
;
int
res
;
/*
* This happens when the module gets unloaded and doesn't
* destroy its slab cache and no-one else reuses the vmalloc
* area of the module. Print a warning.
*/
res
=
probe_kernel_address
(
s
->
name
,
tmp
);
if
(
res
)
{
printk
(
KERN_ERR
"Slab cache with size %d has lost its name
\n
"
,
s
->
object_size
);
continue
;
}
if
(
!
strcmp
(
s
->
name
,
name
))
{
printk
(
KERN_ERR
"kmem_cache_create(%s): Cache name"
" already exists.
\n
"
,
name
);
dump_stack
();
s
=
NULL
;
goto
oops
;
}
}
WARN_ON
(
strchr
(
name
,
' '
));
/* It confuses parsers */
#endif
if
(
kmem_cache_sanity_check
(
name
,
size
)
==
0
)
s
=
__kmem_cache_create
(
name
,
size
,
align
,
flags
,
ctor
);
#ifdef CONFIG_DEBUG_VM
oops:
#endif
mutex_unlock
(
&
slab_mutex
);
put_online_cpus
();
#ifdef CONFIG_DEBUG_VM
out:
#endif
if
(
!
s
&&
(
flags
&
SLAB_PANIC
))
panic
(
"kmem_cache_create: Failed to create slab '%s'
\n
"
,
name
);
...
...
mm/slob.c
View file @
023dc704
...
...
@@ -194,7 +194,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
void
*
page
;
#ifdef CONFIG_NUMA
if
(
node
!=
-
1
)
if
(
node
!=
NUMA_NO_NODE
)
page
=
alloc_pages_exact_node
(
node
,
gfp
,
order
);
else
#endif
...
...
@@ -290,7 +290,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
* If there's a node specification, search for a partial
* page with a matching node id in the freelist.
*/
if
(
node
!=
-
1
&&
page_to_nid
(
sp
)
!=
node
)
if
(
node
!=
NUMA_NO_NODE
&&
page_to_nid
(
sp
)
!=
node
)
continue
;
#endif
/* Enough room on this page? */
...
...
@@ -425,7 +425,8 @@ static void slob_free(void *block, int size)
* End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
*/
void
*
__kmalloc_node
(
size_t
size
,
gfp_t
gfp
,
int
node
)
static
__always_inline
void
*
__do_kmalloc_node
(
size_t
size
,
gfp_t
gfp
,
int
node
,
unsigned
long
caller
)
{
unsigned
int
*
m
;
int
align
=
max
(
ARCH_KMALLOC_MINALIGN
,
ARCH_SLAB_MINALIGN
);
...
...
@@ -446,7 +447,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
*
m
=
size
;
ret
=
(
void
*
)
m
+
align
;
trace_kmalloc_node
(
_RET_IP_
,
ret
,
trace_kmalloc_node
(
caller
,
ret
,
size
,
size
+
align
,
gfp
,
node
);
}
else
{
unsigned
int
order
=
get_order
(
size
);
...
...
@@ -460,15 +461,35 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
page
->
private
=
size
;
}
trace_kmalloc_node
(
_RET_IP_
,
ret
,
trace_kmalloc_node
(
caller
,
ret
,
size
,
PAGE_SIZE
<<
order
,
gfp
,
node
);
}
kmemleak_alloc
(
ret
,
size
,
1
,
gfp
);
return
ret
;
}
void
*
__kmalloc_node
(
size_t
size
,
gfp_t
gfp
,
int
node
)
{
return
__do_kmalloc_node
(
size
,
gfp
,
node
,
_RET_IP_
);
}
EXPORT_SYMBOL
(
__kmalloc_node
);
#ifdef CONFIG_TRACING
void
*
__kmalloc_track_caller
(
size_t
size
,
gfp_t
gfp
,
unsigned
long
caller
)
{
return
__do_kmalloc_node
(
size
,
gfp
,
NUMA_NO_NODE
,
caller
);
}
#ifdef CONFIG_NUMA
void
*
__kmalloc_node_track_caller
(
size_t
size
,
gfp_t
gfpflags
,
int
node
,
unsigned
long
caller
)
{
return
__do_kmalloc_node
(
size
,
gfp
,
node
,
caller
);
}
#endif
#endif
void
kfree
(
const
void
*
block
)
{
struct
page
*
sp
;
...
...
@@ -514,7 +535,7 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
struct
kmem_cache
*
c
;
c
=
slob_alloc
(
sizeof
(
struct
kmem_cache
),
GFP_KERNEL
,
ARCH_KMALLOC_MINALIGN
,
-
1
);
GFP_KERNEL
,
ARCH_KMALLOC_MINALIGN
,
NUMA_NO_NODE
);
if
(
c
)
{
c
->
name
=
name
;
...
...
mm/slub.c
View file @
023dc704
...
...
@@ -568,6 +568,8 @@ static void slab_bug(struct kmem_cache *s, char *fmt, ...)
printk
(
KERN_ERR
"BUG %s (%s): %s
\n
"
,
s
->
name
,
print_tainted
(),
buf
);
printk
(
KERN_ERR
"----------------------------------------"
"-------------------------------------
\n\n
"
);
add_taint
(
TAINT_BAD_PAGE
);
}
static
void
slab_fix
(
struct
kmem_cache
*
s
,
char
*
fmt
,
...)
...
...
@@ -1069,13 +1071,13 @@ static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *pa
return
0
;
}
static
noinline
int
free_debug_processing
(
struct
kmem_cache
*
s
,
struct
page
*
page
,
void
*
object
,
unsigned
long
addr
)
static
noinline
struct
kmem_cache_node
*
free_debug_processing
(
struct
kmem_cache
*
s
,
struct
page
*
page
,
void
*
object
,
unsigned
long
addr
,
unsigned
long
*
flags
)
{
unsigned
long
flags
;
int
rc
=
0
;
struct
kmem_cache_node
*
n
=
get_node
(
s
,
page_to_nid
(
page
));
local_irq_save
(
flags
);
spin_lock_irqsave
(
&
n
->
list_lock
,
*
flags
);
slab_lock
(
page
);
if
(
!
check_slab
(
s
,
page
))
...
...
@@ -1113,15 +1115,19 @@ static noinline int free_debug_processing(struct kmem_cache *s,
set_track
(
s
,
object
,
TRACK_FREE
,
addr
);
trace
(
s
,
page
,
object
,
0
);
init_object
(
s
,
object
,
SLUB_RED_INACTIVE
);
rc
=
1
;
out:
slab_unlock
(
page
);
local_irq_restore
(
flags
);
return
rc
;
/*
* Keep node_lock to preserve integrity
* until the object is actually freed
*/
return
n
;
fail:
slab_unlock
(
page
);
spin_unlock_irqrestore
(
&
n
->
list_lock
,
*
flags
);
slab_fix
(
s
,
"Object at 0x%p not freed"
,
object
);
goto
out
;
return
NULL
;
}
static
int
__init
setup_slub_debug
(
char
*
str
)
...
...
@@ -1214,8 +1220,9 @@ static inline void setup_object_debug(struct kmem_cache *s,
static
inline
int
alloc_debug_processing
(
struct
kmem_cache
*
s
,
struct
page
*
page
,
void
*
object
,
unsigned
long
addr
)
{
return
0
;
}
static
inline
int
free_debug_processing
(
struct
kmem_cache
*
s
,
struct
page
*
page
,
void
*
object
,
unsigned
long
addr
)
{
return
0
;
}
static
inline
struct
kmem_cache_node
*
free_debug_processing
(
struct
kmem_cache
*
s
,
struct
page
*
page
,
void
*
object
,
unsigned
long
addr
,
unsigned
long
*
flags
)
{
return
NULL
;
}
static
inline
int
slab_pad_check
(
struct
kmem_cache
*
s
,
struct
page
*
page
)
{
return
1
;
}
...
...
@@ -1714,7 +1721,7 @@ static inline void note_cmpxchg_failure(const char *n,
stat
(
s
,
CMPXCHG_DOUBLE_CPU_FAIL
);
}
void
init_kmem_cache_cpus
(
struct
kmem_cache
*
s
)
static
void
init_kmem_cache_cpus
(
struct
kmem_cache
*
s
)
{
int
cpu
;
...
...
@@ -1939,7 +1946,7 @@ static void unfreeze_partials(struct kmem_cache *s)
* If we did not find a slot then simply move all the partials to the
* per node partial list.
*/
int
put_cpu_partial
(
struct
kmem_cache
*
s
,
struct
page
*
page
,
int
drain
)
static
int
put_cpu_partial
(
struct
kmem_cache
*
s
,
struct
page
*
page
,
int
drain
)
{
struct
page
*
oldpage
;
int
pages
;
...
...
@@ -1962,6 +1969,7 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
local_irq_save
(
flags
);
unfreeze_partials
(
s
);
local_irq_restore
(
flags
);
oldpage
=
NULL
;
pobjects
=
0
;
pages
=
0
;
stat
(
s
,
CPU_PARTIAL_DRAIN
);
...
...
@@ -2310,7 +2318,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
*
* Otherwise we can simply pick the next object from the lockless free list.
*/
static
__always_inline
void
*
slab_alloc
(
struct
kmem_cache
*
s
,
static
__always_inline
void
*
slab_alloc
_node
(
struct
kmem_cache
*
s
,
gfp_t
gfpflags
,
int
node
,
unsigned
long
addr
)
{
void
**
object
;
...
...
@@ -2380,9 +2388,15 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
return
object
;
}
static
__always_inline
void
*
slab_alloc
(
struct
kmem_cache
*
s
,
gfp_t
gfpflags
,
unsigned
long
addr
)
{
return
slab_alloc_node
(
s
,
gfpflags
,
NUMA_NO_NODE
,
addr
);
}
void
*
kmem_cache_alloc
(
struct
kmem_cache
*
s
,
gfp_t
gfpflags
)
{
void
*
ret
=
slab_alloc
(
s
,
gfpflags
,
NUMA_NO_NODE
,
_RET_IP_
);
void
*
ret
=
slab_alloc
(
s
,
gfpflags
,
_RET_IP_
);
trace_kmem_cache_alloc
(
_RET_IP_
,
ret
,
s
->
object_size
,
s
->
size
,
gfpflags
);
...
...
@@ -2393,7 +2407,7 @@ EXPORT_SYMBOL(kmem_cache_alloc);
#ifdef CONFIG_TRACING
void
*
kmem_cache_alloc_trace
(
struct
kmem_cache
*
s
,
gfp_t
gfpflags
,
size_t
size
)
{
void
*
ret
=
slab_alloc
(
s
,
gfpflags
,
NUMA_NO_NODE
,
_RET_IP_
);
void
*
ret
=
slab_alloc
(
s
,
gfpflags
,
_RET_IP_
);
trace_kmalloc
(
_RET_IP_
,
ret
,
size
,
s
->
size
,
gfpflags
);
return
ret
;
}
...
...
@@ -2411,7 +2425,7 @@ EXPORT_SYMBOL(kmalloc_order_trace);
#ifdef CONFIG_NUMA
void
*
kmem_cache_alloc_node
(
struct
kmem_cache
*
s
,
gfp_t
gfpflags
,
int
node
)
{
void
*
ret
=
slab_alloc
(
s
,
gfpflags
,
node
,
_RET_IP_
);
void
*
ret
=
slab_alloc
_node
(
s
,
gfpflags
,
node
,
_RET_IP_
);
trace_kmem_cache_alloc_node
(
_RET_IP_
,
ret
,
s
->
object_size
,
s
->
size
,
gfpflags
,
node
);
...
...
@@ -2425,7 +2439,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
gfp_t
gfpflags
,
int
node
,
size_t
size
)
{
void
*
ret
=
slab_alloc
(
s
,
gfpflags
,
node
,
_RET_IP_
);
void
*
ret
=
slab_alloc
_node
(
s
,
gfpflags
,
node
,
_RET_IP_
);
trace_kmalloc_node
(
_RET_IP_
,
ret
,
size
,
s
->
size
,
gfpflags
,
node
);
...
...
@@ -2457,7 +2471,8 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
stat
(
s
,
FREE_SLOWPATH
);
if
(
kmem_cache_debug
(
s
)
&&
!
free_debug_processing
(
s
,
page
,
x
,
addr
))
if
(
kmem_cache_debug
(
s
)
&&
!
(
n
=
free_debug_processing
(
s
,
page
,
x
,
addr
,
&
flags
)))
return
;
do
{
...
...
@@ -3362,7 +3377,7 @@ void *__kmalloc(size_t size, gfp_t flags)
if
(
unlikely
(
ZERO_OR_NULL_PTR
(
s
)))
return
s
;
ret
=
slab_alloc
(
s
,
flags
,
NUMA_NO_NODE
,
_RET_IP_
);
ret
=
slab_alloc
(
s
,
flags
,
_RET_IP_
);
trace_kmalloc
(
_RET_IP_
,
ret
,
size
,
s
->
size
,
flags
);
...
...
@@ -3405,7 +3420,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
if
(
unlikely
(
ZERO_OR_NULL_PTR
(
s
)))
return
s
;
ret
=
slab_alloc
(
s
,
flags
,
node
,
_RET_IP_
);
ret
=
slab_alloc
_node
(
s
,
flags
,
node
,
_RET_IP_
);
trace_kmalloc_node
(
_RET_IP_
,
ret
,
size
,
s
->
size
,
flags
,
node
);
...
...
@@ -3482,7 +3497,7 @@ void kfree(const void *x)
if
(
unlikely
(
!
PageSlab
(
page
)))
{
BUG_ON
(
!
PageCompound
(
page
));
kmemleak_free
(
x
);
put_page
(
page
);
__free_pages
(
page
,
compound_order
(
page
)
);
return
;
}
slab_free
(
page
->
slab
,
page
,
object
,
_RET_IP_
);
...
...
@@ -4033,7 +4048,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
if
(
unlikely
(
ZERO_OR_NULL_PTR
(
s
)))
return
s
;
ret
=
slab_alloc
(
s
,
gfpflags
,
NUMA_NO_NODE
,
caller
);
ret
=
slab_alloc
(
s
,
gfpflags
,
caller
);
/* Honor the call site pointer we received. */
trace_kmalloc
(
caller
,
ret
,
size
,
s
->
size
,
gfpflags
);
...
...
@@ -4063,7 +4078,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
if
(
unlikely
(
ZERO_OR_NULL_PTR
(
s
)))
return
s
;
ret
=
slab_alloc
(
s
,
gfpflags
,
node
,
caller
);
ret
=
slab_alloc
_node
(
s
,
gfpflags
,
node
,
caller
);
/* Honor the call site pointer we received. */
trace_kmalloc_node
(
caller
,
ret
,
size
,
s
->
size
,
gfpflags
,
node
);
...
...
mm/util.c
View file @
023dc704
...
...
@@ -105,6 +105,25 @@ void *memdup_user(const void __user *src, size_t len)
}
EXPORT_SYMBOL
(
memdup_user
);
static
__always_inline
void
*
__do_krealloc
(
const
void
*
p
,
size_t
new_size
,
gfp_t
flags
)
{
void
*
ret
;
size_t
ks
=
0
;
if
(
p
)
ks
=
ksize
(
p
);
if
(
ks
>=
new_size
)
return
(
void
*
)
p
;
ret
=
kmalloc_track_caller
(
new_size
,
flags
);
if
(
ret
&&
p
)
memcpy
(
ret
,
p
,
ks
);
return
ret
;
}
/**
* __krealloc - like krealloc() but don't free @p.
* @p: object to reallocate memory for.
...
...
@@ -117,23 +136,11 @@ EXPORT_SYMBOL(memdup_user);
*/
void
*
__krealloc
(
const
void
*
p
,
size_t
new_size
,
gfp_t
flags
)
{
void
*
ret
;
size_t
ks
=
0
;
if
(
unlikely
(
!
new_size
))
return
ZERO_SIZE_PTR
;
if
(
p
)
ks
=
ksize
(
p
);
return
__do_krealloc
(
p
,
new_size
,
flags
);
if
(
ks
>=
new_size
)
return
(
void
*
)
p
;
ret
=
kmalloc_track_caller
(
new_size
,
flags
);
if
(
ret
&&
p
)
memcpy
(
ret
,
p
,
ks
);
return
ret
;
}
EXPORT_SYMBOL
(
__krealloc
);
...
...
@@ -157,7 +164,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
return
ZERO_SIZE_PTR
;
}
ret
=
__krealloc
(
p
,
new_size
,
flags
);
ret
=
__
do_
krealloc
(
p
,
new_size
,
flags
);
if
(
ret
&&
p
!=
ret
)
kfree
(
p
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment