Commit c6fa2883 authored by Christoph Hellwig's avatar Christoph Hellwig

[XFS] simplify memory allocation code big time

SGI Modid: 2.5.x-xfs:slinx:148933a
parent 825e0c16
...@@ -132,7 +132,6 @@ xfs-y += $(addprefix linux/, \ ...@@ -132,7 +132,6 @@ xfs-y += $(addprefix linux/, \
# Objects in support/ # Objects in support/
xfs-y += $(addprefix support/, \ xfs-y += $(addprefix support/, \
debug.o \ debug.o \
kmem.o \
ktrace.o \ ktrace.o \
move.o \ move.o \
mrlock.o \ mrlock.o \
......
...@@ -1897,13 +1897,6 @@ pagebuf_readstats( ...@@ -1897,13 +1897,6 @@ pagebuf_readstats(
} }
#endif /* CONFIG_PROC_FS */ #endif /* CONFIG_PROC_FS */
STATIC void
pagebuf_shaker(void)
{
pagebuf_daemon_wakeup(1);
}
/* /*
* Initialization and Termination * Initialization and Termination
*/ */
...@@ -1943,7 +1936,6 @@ pagebuf_init(void) ...@@ -1943,7 +1936,6 @@ pagebuf_init(void)
#endif #endif
pagebuf_daemon_start(); pagebuf_daemon_start();
kmem_shake_register(pagebuf_shaker);
return 0; return 0;
} }
...@@ -1959,7 +1951,6 @@ pagebuf_terminate(void) ...@@ -1959,7 +1951,6 @@ pagebuf_terminate(void)
pagebuf_daemon_stop(); pagebuf_daemon_stop();
kmem_cache_destroy(pagebuf_cache); kmem_cache_destroy(pagebuf_cache);
kmem_shake_deregister(pagebuf_shaker);
unregister_sysctl_table(pagebuf_table_header); unregister_sysctl_table(pagebuf_table_header);
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
......
...@@ -88,7 +88,7 @@ STATIC void xfs_qm_list_destroy(xfs_dqlist_t *); ...@@ -88,7 +88,7 @@ STATIC void xfs_qm_list_destroy(xfs_dqlist_t *);
STATIC int xfs_qm_quotacheck(xfs_mount_t *); STATIC int xfs_qm_quotacheck(xfs_mount_t *);
STATIC int xfs_qm_init_quotainos(xfs_mount_t *); STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
STATIC void xfs_qm_shake(void); STATIC int xfs_qm_shake(int, unsigned int);
#ifdef DEBUG #ifdef DEBUG
extern mutex_t qcheck_lock; extern mutex_t qcheck_lock;
...@@ -112,6 +112,8 @@ extern mutex_t qcheck_lock; ...@@ -112,6 +112,8 @@ extern mutex_t qcheck_lock;
#define XQM_LIST_PRINT(l, NXT, title) do { } while (0) #define XQM_LIST_PRINT(l, NXT, title) do { } while (0)
#endif #endif
struct shrinker *xfs_qm_shrinker;
/* /*
* Initialize the XQM structure. * Initialize the XQM structure.
* Note that there is not one quota manager per file system. * Note that there is not one quota manager per file system.
...@@ -161,7 +163,7 @@ xfs_Gqm_init(void) ...@@ -161,7 +163,7 @@ xfs_Gqm_init(void)
} else } else
xqm->qm_dqzone = qm_dqzone; xqm->qm_dqzone = qm_dqzone;
kmem_shake_register(xfs_qm_shake); xfs_qm_shrinker = set_shrinker(DEFAULT_SEEKS, xfs_qm_shake);
/* /*
* The t_dqinfo portion of transactions. * The t_dqinfo portion of transactions.
...@@ -193,7 +195,8 @@ xfs_qm_destroy( ...@@ -193,7 +195,8 @@ xfs_qm_destroy(
ASSERT(xqm != NULL); ASSERT(xqm != NULL);
ASSERT(xqm->qm_nrefs == 0); ASSERT(xqm->qm_nrefs == 0);
kmem_shake_deregister(xfs_qm_shake);
remove_shrinker(xfs_qm_shrinker);
hsize = xqm->qm_dqhashmask + 1; hsize = xqm->qm_dqhashmask + 1;
for (i = 0; i < hsize; i++) { for (i = 0; i < hsize; i++) {
xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i])); xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i]));
...@@ -2088,7 +2091,7 @@ xfs_qm_shake_freelist( ...@@ -2088,7 +2091,7 @@ xfs_qm_shake_freelist(
xfs_dqunlock(dqp); xfs_dqunlock(dqp);
xfs_qm_freelist_unlock(xfs_Gqm); xfs_qm_freelist_unlock(xfs_Gqm);
if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS) if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
return (nreclaimed != howmany); goto out;
XQM_STATS_INC(xqmstats.xs_qm_dqwants); XQM_STATS_INC(xqmstats.xs_qm_dqwants);
goto tryagain; goto tryagain;
} }
...@@ -2163,7 +2166,7 @@ xfs_qm_shake_freelist( ...@@ -2163,7 +2166,7 @@ xfs_qm_shake_freelist(
XFS_DQ_HASH_UNLOCK(hash); XFS_DQ_HASH_UNLOCK(hash);
xfs_qm_freelist_unlock(xfs_Gqm); xfs_qm_freelist_unlock(xfs_Gqm);
if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS) if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
return (nreclaimed != howmany); goto out;
goto tryagain; goto tryagain;
} }
xfs_dqtrace_entry(dqp, "DQSHAKE: UNLINKING"); xfs_dqtrace_entry(dqp, "DQSHAKE: UNLINKING");
...@@ -2188,7 +2191,8 @@ xfs_qm_shake_freelist( ...@@ -2188,7 +2191,8 @@ xfs_qm_shake_freelist(
dqp = nextdqp; dqp = nextdqp;
} }
xfs_qm_freelist_unlock(xfs_Gqm); xfs_qm_freelist_unlock(xfs_Gqm);
return (nreclaimed != howmany); out:
return nreclaimed;
} }
...@@ -2197,13 +2201,15 @@ xfs_qm_shake_freelist( ...@@ -2197,13 +2201,15 @@ xfs_qm_shake_freelist(
* running low. * running low.
*/ */
/* ARGSUSED */ /* ARGSUSED */
STATIC void STATIC int
xfs_qm_shake(void) xfs_qm_shake(int nr_to_scan, unsigned int gfp_mask)
{ {
int ndqused, nfree, n; int ndqused, nfree, n;
if (!(gfp_mask & __GFP_WAIT))
return 0;
if (!xfs_Gqm) if (!xfs_Gqm)
return; return 0;
nfree = xfs_Gqm->qm_dqfreelist.qh_nelems; /* free dquots */ nfree = xfs_Gqm->qm_dqfreelist.qh_nelems; /* free dquots */
/* incore dquots in all f/s's */ /* incore dquots in all f/s's */
...@@ -2212,12 +2218,12 @@ xfs_qm_shake(void) ...@@ -2212,12 +2218,12 @@ xfs_qm_shake(void)
ASSERT(ndqused >= 0); ASSERT(ndqused >= 0);
if (nfree <= ndqused && nfree < ndquot) if (nfree <= ndqused && nfree < ndquot)
return; return 0;
ndqused *= xfs_Gqm->qm_dqfree_ratio; /* target # of free dquots */ ndqused *= xfs_Gqm->qm_dqfree_ratio; /* target # of free dquots */
n = nfree - ndqused - ndquot; /* # over target */ n = nfree - ndqused - ndquot; /* # over target */
(void) xfs_qm_shake_freelist(MAX(nfree, n)); return xfs_qm_shake_freelist(MAX(nfree, n));
} }
......
...@@ -32,31 +32,118 @@ ...@@ -32,31 +32,118 @@
#ifndef __XFS_SUPPORT_KMEM_H__ #ifndef __XFS_SUPPORT_KMEM_H__
#define __XFS_SUPPORT_KMEM_H__ #define __XFS_SUPPORT_KMEM_H__
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/vmalloc.h>
/* /*
* memory management routines * Cutoff point to use vmalloc instead of kmalloc.
*/ */
#define MAX_SLAB_SIZE 0x10000
/*
* XFS uses slightly different names for these due to the
* IRIX heritage.
*/
#define kmem_zone kmem_cache_s
#define kmem_zone_t kmem_cache_t
#define KM_SLEEP 0x0001 #define KM_SLEEP 0x0001
#define KM_NOSLEEP 0x0002 #define KM_NOSLEEP 0x0002
#define KM_NOFS 0x0004 #define KM_NOFS 0x0004
#define kmem_zone kmem_cache_s
#define kmem_zone_t kmem_cache_t
extern kmem_zone_t *kmem_zone_init(int, char *); /*
extern void *kmem_zone_zalloc(kmem_zone_t *, int); * XXX get rid of the unconditional __GFP_NOFAIL by adding
extern void *kmem_zone_alloc(kmem_zone_t *, int); * a KM_FAIL flag and using it where we're allowed to fail.
extern void kmem_zone_free(kmem_zone_t *, void *); */
static __inline unsigned int
flag_convert(int flags)
{
#if DEBUG
if (unlikely(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS))) {
printk(KERN_WARNING
"XFS: memory allocation with wrong flags (%x)\n", flags);
BUG();
}
#endif
if (flags & KM_NOSLEEP)
return GFP_ATOMIC;
/* If we're in a transaction, FS activity is not ok */
else if ((current->flags & PF_FSTRANS) || (flags & KM_NOFS))
return GFP_NOFS | __GFP_NOFAIL;
return GFP_KERNEL | __GFP_NOFAIL;
}
static __inline void *
kmem_alloc(size_t size, int flags)
{
if (unlikely(MAX_SLAB_SIZE < size))
/* Avoid doing filesystem sensitive stuff to get this */
return __vmalloc(size, flag_convert(flags), PAGE_KERNEL);
return kmalloc(size, flag_convert(flags));
}
static __inline void *
kmem_zalloc(size_t size, int flags)
{
void *ptr = kmem_alloc(size, flags);
if (likely(ptr != NULL))
memset(ptr, 0, size);
return ptr;
}
static __inline void
kmem_free(void *ptr, size_t size)
{
if (unlikely((unsigned long)ptr < VMALLOC_START ||
(unsigned long)ptr >= VMALLOC_END))
kfree(ptr);
else
vfree(ptr);
}
static __inline void *
kmem_realloc(void *ptr, size_t newsize, size_t oldsize, int flags)
{
void *new = kmem_alloc(newsize, flags);
if (likely(ptr != NULL)) {
if (likely(new != NULL))
memcpy(new, ptr, min(oldsize, newsize));
kmem_free(ptr, oldsize);
}
return new;
}
static __inline kmem_zone_t *
kmem_zone_init(int size, char *zone_name)
{
return kmem_cache_create(zone_name, size, 0, 0, NULL, NULL);
}
extern void *kmem_alloc(size_t, int); static __inline void *
extern void *kmem_realloc(void *, size_t, size_t, int); kmem_zone_alloc(kmem_zone_t *zone, int flags)
extern void *kmem_zalloc(size_t, int); {
extern void kmem_free(void *, size_t); return kmem_cache_alloc(zone, flag_convert(flags));
}
typedef void (*kmem_shake_func_t)(void); static __inline void *
kmem_zone_zalloc(kmem_zone_t *zone, int flags)
{
void *ptr = kmem_zone_alloc(zone, flags);
if (likely(ptr != NULL))
memset(ptr, 0, kmem_cache_size(zone));
return ptr;
}
extern void kmem_shake_register(kmem_shake_func_t); static __inline void
extern void kmem_shake_deregister(kmem_shake_func_t); kmem_zone_free(kmem_zone_t *zone, void *ptr)
{
kmem_cache_free(zone, ptr);
}
#endif /* __XFS_SUPPORT_KMEM_H__ */ #endif /* __XFS_SUPPORT_KMEM_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment