Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
c6fa2883
Commit
c6fa2883
authored
May 20, 2003
by
Christoph Hellwig
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[XFS] simplify memory allocation code big time
SGI Modid: 2.5.x-xfs:slinx:148933a
parent
825e0c16
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
118 additions
and
35 deletions
+118
-35
fs/xfs/Makefile
fs/xfs/Makefile
+0
-1
fs/xfs/pagebuf/page_buf.c
fs/xfs/pagebuf/page_buf.c
+0
-9
fs/xfs/quota/xfs_qm.c
fs/xfs/quota/xfs_qm.c
+17
-11
fs/xfs/support/kmem.h
fs/xfs/support/kmem.h
+101
-14
No files found.
fs/xfs/Makefile
View file @
c6fa2883
...
...
@@ -132,7 +132,6 @@ xfs-y += $(addprefix linux/, \
# Objects in support/
xfs-y
+=
$(
addprefix
support/,
\
debug.o
\
kmem.o
\
ktrace.o
\
move.o
\
mrlock.o
\
...
...
fs/xfs/pagebuf/page_buf.c
View file @
c6fa2883
...
...
@@ -1897,13 +1897,6 @@ pagebuf_readstats(
}
#endif
/* CONFIG_PROC_FS */
STATIC
void
pagebuf_shaker
(
void
)
{
pagebuf_daemon_wakeup
(
1
);
}
/*
* Initialization and Termination
*/
...
...
@@ -1943,7 +1936,6 @@ pagebuf_init(void)
#endif
pagebuf_daemon_start
();
kmem_shake_register
(
pagebuf_shaker
);
return
0
;
}
...
...
@@ -1959,7 +1951,6 @@ pagebuf_terminate(void)
pagebuf_daemon_stop
();
kmem_cache_destroy
(
pagebuf_cache
);
kmem_shake_deregister
(
pagebuf_shaker
);
unregister_sysctl_table
(
pagebuf_table_header
);
#ifdef CONFIG_PROC_FS
...
...
fs/xfs/quota/xfs_qm.c
View file @
c6fa2883
...
...
@@ -88,7 +88,7 @@ STATIC void xfs_qm_list_destroy(xfs_dqlist_t *);
STATIC
int
xfs_qm_quotacheck
(
xfs_mount_t
*
);
STATIC
int
xfs_qm_init_quotainos
(
xfs_mount_t
*
);
STATIC
void
xfs_qm_shake
(
void
);
STATIC
int
xfs_qm_shake
(
int
,
unsigned
int
);
#ifdef DEBUG
extern
mutex_t
qcheck_lock
;
...
...
@@ -112,6 +112,8 @@ extern mutex_t qcheck_lock;
#define XQM_LIST_PRINT(l, NXT, title) do { } while (0)
#endif
struct
shrinker
*
xfs_qm_shrinker
;
/*
* Initialize the XQM structure.
* Note that there is not one quota manager per file system.
...
...
@@ -161,7 +163,7 @@ xfs_Gqm_init(void)
}
else
xqm
->
qm_dqzone
=
qm_dqzone
;
kmem_shake_register
(
xfs_qm_shake
);
xfs_qm_shrinker
=
set_shrinker
(
DEFAULT_SEEKS
,
xfs_qm_shake
);
/*
* The t_dqinfo portion of transactions.
...
...
@@ -193,7 +195,8 @@ xfs_qm_destroy(
ASSERT
(
xqm
!=
NULL
);
ASSERT
(
xqm
->
qm_nrefs
==
0
);
kmem_shake_deregister
(
xfs_qm_shake
);
remove_shrinker
(
xfs_qm_shrinker
);
hsize
=
xqm
->
qm_dqhashmask
+
1
;
for
(
i
=
0
;
i
<
hsize
;
i
++
)
{
xfs_qm_list_destroy
(
&
(
xqm
->
qm_usr_dqhtable
[
i
]));
...
...
@@ -2088,7 +2091,7 @@ xfs_qm_shake_freelist(
xfs_dqunlock
(
dqp
);
xfs_qm_freelist_unlock
(
xfs_Gqm
);
if
(
++
restarts
>=
XFS_QM_RECLAIM_MAX_RESTARTS
)
return
(
nreclaimed
!=
howmany
)
;
goto
out
;
XQM_STATS_INC
(
xqmstats
.
xs_qm_dqwants
);
goto
tryagain
;
}
...
...
@@ -2163,7 +2166,7 @@ xfs_qm_shake_freelist(
XFS_DQ_HASH_UNLOCK
(
hash
);
xfs_qm_freelist_unlock
(
xfs_Gqm
);
if
(
++
restarts
>=
XFS_QM_RECLAIM_MAX_RESTARTS
)
return
(
nreclaimed
!=
howmany
)
;
goto
out
;
goto
tryagain
;
}
xfs_dqtrace_entry
(
dqp
,
"DQSHAKE: UNLINKING"
);
...
...
@@ -2188,7 +2191,8 @@ xfs_qm_shake_freelist(
dqp
=
nextdqp
;
}
xfs_qm_freelist_unlock
(
xfs_Gqm
);
return
(
nreclaimed
!=
howmany
);
out:
return
nreclaimed
;
}
...
...
@@ -2197,13 +2201,15 @@ xfs_qm_shake_freelist(
* running low.
*/
/* ARGSUSED */
STATIC
void
xfs_qm_shake
(
void
)
STATIC
int
xfs_qm_shake
(
int
nr_to_scan
,
unsigned
int
gfp_mask
)
{
int
ndqused
,
nfree
,
n
;
if
(
!
(
gfp_mask
&
__GFP_WAIT
))
return
0
;
if
(
!
xfs_Gqm
)
return
;
return
0
;
nfree
=
xfs_Gqm
->
qm_dqfreelist
.
qh_nelems
;
/* free dquots */
/* incore dquots in all f/s's */
...
...
@@ -2212,12 +2218,12 @@ xfs_qm_shake(void)
ASSERT
(
ndqused
>=
0
);
if
(
nfree
<=
ndqused
&&
nfree
<
ndquot
)
return
;
return
0
;
ndqused
*=
xfs_Gqm
->
qm_dqfree_ratio
;
/* target # of free dquots */
n
=
nfree
-
ndqused
-
ndquot
;
/* # over target */
(
void
)
xfs_qm_shake_freelist
(
MAX
(
nfree
,
n
));
return
xfs_qm_shake_freelist
(
MAX
(
nfree
,
n
));
}
...
...
fs/xfs/support/kmem.h
View file @
c6fa2883
...
...
@@ -32,31 +32,118 @@
#ifndef __XFS_SUPPORT_KMEM_H__
#define __XFS_SUPPORT_KMEM_H__
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
/*
*
memory management routines
*
Cutoff point to use vmalloc instead of kmalloc.
*/
#define MAX_SLAB_SIZE 0x10000
/*
* XFS uses slightly different names for these due to the
* IRIX heritage.
*/
#define kmem_zone kmem_cache_s
#define kmem_zone_t kmem_cache_t
#define KM_SLEEP 0x0001
#define KM_NOSLEEP 0x0002
#define KM_NOFS 0x0004
#define kmem_zone kmem_cache_s
#define kmem_zone_t kmem_cache_t
extern
kmem_zone_t
*
kmem_zone_init
(
int
,
char
*
);
extern
void
*
kmem_zone_zalloc
(
kmem_zone_t
*
,
int
);
extern
void
*
kmem_zone_alloc
(
kmem_zone_t
*
,
int
);
extern
void
kmem_zone_free
(
kmem_zone_t
*
,
void
*
);
/*
* XXX get rid of the unconditional __GFP_NOFAIL by adding
* a KM_FAIL flag and using it where we're allowed to fail.
*/
static
__inline
unsigned
int
flag_convert
(
int
flags
)
{
#if DEBUG
if
(
unlikely
(
flags
&
~
(
KM_SLEEP
|
KM_NOSLEEP
|
KM_NOFS
)))
{
printk
(
KERN_WARNING
"XFS: memory allocation with wrong flags (%x)
\n
"
,
flags
);
BUG
();
}
#endif
if
(
flags
&
KM_NOSLEEP
)
return
GFP_ATOMIC
;
/* If we're in a transaction, FS activity is not ok */
else
if
((
current
->
flags
&
PF_FSTRANS
)
||
(
flags
&
KM_NOFS
))
return
GFP_NOFS
|
__GFP_NOFAIL
;
return
GFP_KERNEL
|
__GFP_NOFAIL
;
}
static
__inline
void
*
kmem_alloc
(
size_t
size
,
int
flags
)
{
if
(
unlikely
(
MAX_SLAB_SIZE
<
size
))
/* Avoid doing filesystem sensitive stuff to get this */
return
__vmalloc
(
size
,
flag_convert
(
flags
),
PAGE_KERNEL
);
return
kmalloc
(
size
,
flag_convert
(
flags
));
}
static
__inline
void
*
kmem_zalloc
(
size_t
size
,
int
flags
)
{
void
*
ptr
=
kmem_alloc
(
size
,
flags
);
if
(
likely
(
ptr
!=
NULL
))
memset
(
ptr
,
0
,
size
);
return
ptr
;
}
static
__inline
void
kmem_free
(
void
*
ptr
,
size_t
size
)
{
if
(
unlikely
((
unsigned
long
)
ptr
<
VMALLOC_START
||
(
unsigned
long
)
ptr
>=
VMALLOC_END
))
kfree
(
ptr
);
else
vfree
(
ptr
);
}
static
__inline
void
*
kmem_realloc
(
void
*
ptr
,
size_t
newsize
,
size_t
oldsize
,
int
flags
)
{
void
*
new
=
kmem_alloc
(
newsize
,
flags
);
if
(
likely
(
ptr
!=
NULL
))
{
if
(
likely
(
new
!=
NULL
))
memcpy
(
new
,
ptr
,
min
(
oldsize
,
newsize
));
kmem_free
(
ptr
,
oldsize
);
}
return
new
;
}
static
__inline
kmem_zone_t
*
kmem_zone_init
(
int
size
,
char
*
zone_name
)
{
return
kmem_cache_create
(
zone_name
,
size
,
0
,
0
,
NULL
,
NULL
);
}
extern
void
*
kmem_alloc
(
size_t
,
int
);
extern
void
*
kmem_realloc
(
void
*
,
size_t
,
size_t
,
int
);
extern
void
*
kmem_zalloc
(
size_t
,
int
);
extern
void
kmem_free
(
void
*
,
size_t
);
static
__inline
void
*
kmem_zone_alloc
(
kmem_zone_t
*
zone
,
int
flags
)
{
return
kmem_cache_alloc
(
zone
,
flag_convert
(
flags
));
}
typedef
void
(
*
kmem_shake_func_t
)(
void
);
static
__inline
void
*
kmem_zone_zalloc
(
kmem_zone_t
*
zone
,
int
flags
)
{
void
*
ptr
=
kmem_zone_alloc
(
zone
,
flags
);
if
(
likely
(
ptr
!=
NULL
))
memset
(
ptr
,
0
,
kmem_cache_size
(
zone
));
return
ptr
;
}
extern
void
kmem_shake_register
(
kmem_shake_func_t
);
extern
void
kmem_shake_deregister
(
kmem_shake_func_t
);
static
__inline
void
kmem_zone_free
(
kmem_zone_t
*
zone
,
void
*
ptr
)
{
kmem_cache_free
(
zone
,
ptr
);
}
#endif
/* __XFS_SUPPORT_KMEM_H__ */
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment