Commit 89594c74 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'fscache-next-20210829' of...

Merge tag 'fscache-next-20210829' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs

Pull fscache updates from David Howells:
 "Preparatory work for the fscache rewrite that's being worked on and
  fix some bugs. These include:

   - Always select netfs stats when enabling fscache stats since they're
     displayed through the same procfile.

   - Add a cookie debug ID that can be used in tracepoints instead of a
     pointer and cache it in the netfs_cache_resources struct rather
     than in the netfs_read_request struct to make it more available.

   - Use file_inode() in cachefiles rather than dereferencing
     file->f_inode directly.

   - Provide a procfile to display fscache cookies.

   - Remove the fscache and cachefiles histogram procfiles.

   - Remove the fscache object list procfile.

   - Avoid using %p in fscache and cachefiles as the value is hashed and
     not comparable to the register dump in an oops trace.

   - Fix the cookie hash function to actually achieve useful dispersion.

   - Fix fscache_cookie_put() so that it doesn't dereference the cookie
     pointer in the tracepoint after the refcount has been decremented
     (we're only allowed to do that if we decremented it to zero).

   - Use refcount_t rather than atomic_t for the fscache_cookie
     refcount"

* tag 'fscache-next-20210829' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs:
  fscache: Use refcount_t for the cookie refcount instead of atomic_t
  fscache: Fix fscache_cookie_put() to not deref after dec
  fscache: Fix cookie key hashing
  cachefiles: Change %p in format strings to something else
  fscache: Change %p in format strings to something else
  fscache: Remove the object list procfile
  fscache, cachefiles: Remove the histogram stuff
  fscache: Procfile to display cookies
  fscache: Add a cookie debug ID and use that in traces
  cachefiles: Use file_inode() rather than accessing ->f_inode
  netfs: Move cookie debug ID to struct netfs_cache_resources
  fscache: Select netfs stats if fscache stats are enabled
parents 75ae663d 20ec197b
......@@ -19,22 +19,3 @@ config CACHEFILES_DEBUG
caching on files module. If this is set, the debugging output may be
enabled by setting bits in /sys/modules/cachefiles/parameter/debug or
by including a debugging specifier in /etc/cachefilesd.conf.
config CACHEFILES_HISTOGRAM
bool "Gather latency information on CacheFiles"
depends on CACHEFILES && PROC_FS
help
This option causes latency information to be gathered on CacheFiles
operation and exported through file:
/proc/fs/cachefiles/histogram
The generation of this histogram adds a certain amount of overhead to
execution as there are a number of points at which data is gathered,
and on a multi-CPU system these may be on cachelines that keep
bouncing between CPUs. On the other hand, the histogram may be
useful for debugging purposes. Saying 'N' here is recommended.
See Documentation/filesystems/caching/cachefiles.rst for more
information.
......@@ -15,6 +15,4 @@ cachefiles-y := \
security.o \
xattr.o
cachefiles-$(CONFIG_CACHEFILES_HISTOGRAM) += proc.o
obj-$(CONFIG_CACHEFILES) := cachefiles.o
......@@ -108,8 +108,6 @@ static int cachefiles_daemon_add_cache(struct cachefiles_cache *cache)
atomic_set(&fsdef->usage, 1);
fsdef->type = FSCACHE_COOKIE_TYPE_INDEX;
_debug("- fsdef %p", fsdef);
/* look up the directory at the root of the cache */
ret = kern_path(cache->rootdirname, LOOKUP_DIRECTORY, &path);
if (ret < 0)
......
......@@ -33,7 +33,7 @@ static struct fscache_object *cachefiles_alloc_object(
cache = container_of(_cache, struct cachefiles_cache, cache);
_enter("{%s},%p,", cache->cache.identifier, cookie);
_enter("{%s},%x,", cache->cache.identifier, cookie->debug_id);
lookup_data = kmalloc(sizeof(*lookup_data), cachefiles_gfp);
if (!lookup_data)
......@@ -96,7 +96,7 @@ static struct fscache_object *cachefiles_alloc_object(
lookup_data->key = key;
object->lookup_data = lookup_data;
_leave(" = %p [%p]", &object->fscache, lookup_data);
_leave(" = %x [%p]", object->fscache.debug_id, lookup_data);
return &object->fscache;
nomem_key:
......@@ -379,7 +379,7 @@ static void cachefiles_sync_cache(struct fscache_cache *_cache)
const struct cred *saved_cred;
int ret;
_enter("%p", _cache);
_enter("%s", _cache->tag->name);
cache = container_of(_cache, struct cachefiles_cache, cache);
......
......@@ -180,31 +180,6 @@ extern int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
struct dentry *dir, char *filename);
/*
* proc.c
*/
#ifdef CONFIG_CACHEFILES_HISTOGRAM
extern atomic_t cachefiles_lookup_histogram[HZ];
extern atomic_t cachefiles_mkdir_histogram[HZ];
extern atomic_t cachefiles_create_histogram[HZ];
extern int __init cachefiles_proc_init(void);
extern void cachefiles_proc_cleanup(void);
static inline
void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
{
unsigned long jif = jiffies - start_jif;
if (jif >= HZ)
jif = HZ - 1;
atomic_inc(&histogram[jif]);
}
#else
#define cachefiles_proc_init() (0)
#define cachefiles_proc_cleanup() do {} while (0)
#define cachefiles_hist(hist, start_jif) do {} while (0)
#endif
/*
* rdwr.c
*/
......
......@@ -70,7 +70,7 @@ static int cachefiles_read(struct netfs_cache_resources *cres,
_enter("%pD,%li,%llx,%zx/%llx",
file, file_inode(file)->i_ino, start_pos, len,
i_size_read(file->f_inode));
i_size_read(file_inode(file)));
/* If the caller asked us to seek for data before doing the read, then
* we should do that now. If we find a gap, we fill it with zeros.
......@@ -194,7 +194,7 @@ static int cachefiles_write(struct netfs_cache_resources *cres,
_enter("%pD,%li,%llx,%zx/%llx",
file, file_inode(file)->i_ino, start_pos, len,
i_size_read(file->f_inode));
i_size_read(file_inode(file)));
ki = kzalloc(sizeof(struct cachefiles_kiocb), GFP_KERNEL);
if (!ki)
......@@ -410,7 +410,7 @@ int cachefiles_begin_read_operation(struct netfs_read_request *rreq,
rreq->cache_resources.cache_priv = op;
rreq->cache_resources.cache_priv2 = file;
rreq->cache_resources.ops = &cachefiles_netfs_cache_ops;
rreq->cookie_debug_id = object->fscache.debug_id;
rreq->cache_resources.debug_id = object->fscache.debug_id;
_leave("");
return 0;
......
......@@ -150,6 +150,6 @@ char *cachefiles_cook_key(const u8 *raw, int keylen, uint8_t type)
key[len++] = 0;
key[len] = 0;
_leave(" = %p %d", key, len);
_leave(" = %s %d", key, len);
return key;
}
......@@ -69,15 +69,9 @@ static int __init cachefiles_init(void)
goto error_object_jar;
}
ret = cachefiles_proc_init();
if (ret < 0)
goto error_proc;
pr_info("Loaded\n");
return 0;
error_proc:
kmem_cache_destroy(cachefiles_object_jar);
error_object_jar:
misc_deregister(&cachefiles_dev);
error_dev:
......@@ -94,7 +88,6 @@ static void __exit cachefiles_exit(void)
{
pr_info("Unloading\n");
cachefiles_proc_cleanup();
kmem_cache_destroy(cachefiles_object_jar);
misc_deregister(&cachefiles_dev);
}
......
......@@ -39,18 +39,18 @@ void __cachefiles_printk_object(struct cachefiles_object *object,
pr_err("%sops=%u inp=%u exc=%u\n",
prefix, object->fscache.n_ops, object->fscache.n_in_progress,
object->fscache.n_exclusive);
pr_err("%sparent=%p\n",
prefix, object->fscache.parent);
pr_err("%sparent=%x\n",
prefix, object->fscache.parent ? object->fscache.parent->debug_id : 0);
spin_lock(&object->fscache.lock);
cookie = object->fscache.cookie;
if (cookie) {
pr_err("%scookie=%p [pr=%p nd=%p fl=%lx]\n",
pr_err("%scookie=%x [pr=%x nd=%p fl=%lx]\n",
prefix,
object->fscache.cookie,
object->fscache.cookie->parent,
object->fscache.cookie->netfs_data,
object->fscache.cookie->flags);
cookie->debug_id,
cookie->parent ? cookie->parent->debug_id : 0,
cookie->netfs_data,
cookie->flags);
pr_err("%skey=[%u] '", prefix, cookie->key_len);
k = (cookie->key_len <= sizeof(cookie->inline_key)) ?
cookie->inline_key : cookie->key;
......@@ -110,7 +110,7 @@ static void cachefiles_mark_object_buried(struct cachefiles_cache *cache,
/* found the dentry for */
found_dentry:
kdebug("preemptive burial: OBJ%x [%s] %p",
kdebug("preemptive burial: OBJ%x [%s] %pd",
object->fscache.debug_id,
object->fscache.state->name,
dentry);
......@@ -140,7 +140,7 @@ static int cachefiles_mark_object_active(struct cachefiles_cache *cache,
struct rb_node **_p, *_parent = NULL;
struct dentry *dentry;
_enter(",%p", object);
_enter(",%x", object->fscache.debug_id);
try_again:
write_lock(&cache->active_lock);
......@@ -298,8 +298,6 @@ static int cachefiles_bury_object(struct cachefiles_cache *cache,
_enter(",'%pd','%pd'", dir, rep);
_debug("remove %p from %p", rep, dir);
/* non-directories can just be unlinked */
if (!d_is_dir(rep)) {
_debug("unlink stale object");
......@@ -446,7 +444,7 @@ int cachefiles_delete_object(struct cachefiles_cache *cache,
struct dentry *dir;
int ret;
_enter(",OBJ%x{%p}", object->fscache.debug_id, object->dentry);
_enter(",OBJ%x{%pd}", object->fscache.debug_id, object->dentry);
ASSERT(object->dentry);
ASSERT(d_backing_inode(object->dentry));
......@@ -496,11 +494,10 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent,
struct dentry *dir, *next = NULL;
struct inode *inode;
struct path path;
unsigned long start;
const char *name;
int ret, nlen;
_enter("OBJ%x{%p},OBJ%x,%s,",
_enter("OBJ%x{%pd},OBJ%x,%s,",
parent->fscache.debug_id, parent->dentry,
object->fscache.debug_id, key);
......@@ -535,9 +532,7 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent,
inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
start = jiffies;
next = lookup_one_len(name, dir, nlen);
cachefiles_hist(cachefiles_lookup_histogram, start);
if (IS_ERR(next)) {
trace_cachefiles_lookup(object, next, NULL);
goto lookup_error;
......@@ -545,7 +540,7 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent,
inode = d_backing_inode(next);
trace_cachefiles_lookup(object, next, inode);
_debug("next -> %p %s", next, inode ? "positive" : "negative");
_debug("next -> %pd %s", next, inode ? "positive" : "negative");
if (!key)
object->new = !inode;
......@@ -568,9 +563,7 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent,
ret = security_path_mkdir(&path, next, 0);
if (ret < 0)
goto create_error;
start = jiffies;
ret = vfs_mkdir(&init_user_ns, d_inode(dir), next, 0);
cachefiles_hist(cachefiles_mkdir_histogram, start);
if (!key)
trace_cachefiles_mkdir(object, next, ret);
if (ret < 0)
......@@ -583,8 +576,8 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent,
}
ASSERT(d_backing_inode(next));
_debug("mkdir -> %p{%p{ino=%lu}}",
next, d_backing_inode(next), d_backing_inode(next)->i_ino);
_debug("mkdir -> %pd{ino=%lu}",
next, d_backing_inode(next)->i_ino);
} else if (!d_can_lookup(next)) {
pr_err("inode %lu is not a directory\n",
......@@ -604,18 +597,16 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent,
ret = security_path_mknod(&path, next, S_IFREG, 0);
if (ret < 0)
goto create_error;
start = jiffies;
ret = vfs_create(&init_user_ns, d_inode(dir), next,
S_IFREG, true);
cachefiles_hist(cachefiles_create_histogram, start);
trace_cachefiles_create(object, next, ret);
if (ret < 0)
goto create_error;
ASSERT(d_backing_inode(next));
_debug("create -> %p{%p{ino=%lu}}",
next, d_backing_inode(next), d_backing_inode(next)->i_ino);
_debug("create -> %pd{ino=%lu}",
next, d_backing_inode(next)->i_ino);
} else if (!d_can_lookup(next) &&
!d_is_reg(next)
......@@ -765,7 +756,6 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
const char *dirname)
{
struct dentry *subdir;
unsigned long start;
struct path path;
int ret;
......@@ -775,16 +765,14 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
inode_lock(d_inode(dir));
retry:
start = jiffies;
subdir = lookup_one_len(dirname, dir, strlen(dirname));
cachefiles_hist(cachefiles_lookup_histogram, start);
if (IS_ERR(subdir)) {
if (PTR_ERR(subdir) == -ENOMEM)
goto nomem_d_alloc;
goto lookup_error;
}
_debug("subdir -> %p %s",
_debug("subdir -> %pd %s",
subdir, d_backing_inode(subdir) ? "positive" : "negative");
/* we need to create the subdir if it doesn't exist yet */
......@@ -810,10 +798,8 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
}
ASSERT(d_backing_inode(subdir));
_debug("mkdir -> %p{%p{ino=%lu}}",
subdir,
d_backing_inode(subdir),
d_backing_inode(subdir)->i_ino);
_debug("mkdir -> %pd{ino=%lu}",
subdir, d_backing_inode(subdir)->i_ino);
}
inode_unlock(d_inode(dir));
......@@ -876,7 +862,6 @@ static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache,
struct cachefiles_object *object;
struct rb_node *_n;
struct dentry *victim;
unsigned long start;
int ret;
//_enter(",%pd/,%s",
......@@ -885,13 +870,11 @@ static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache,
/* look up the victim */
inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
start = jiffies;
victim = lookup_one_len(filename, dir, strlen(filename));
cachefiles_hist(cachefiles_lookup_histogram, start);
if (IS_ERR(victim))
goto lookup_error;
//_debug("victim -> %p %s",
//_debug("victim -> %pd %s",
// victim, d_backing_inode(victim) ? "positive" : "negative");
/* if the object is no longer there then we probably retired the object
......@@ -922,7 +905,7 @@ static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache,
read_unlock(&cache->active_lock);
//_leave(" = %p", victim);
//_leave(" = %pd", victim);
return victim;
object_in_use:
......@@ -968,7 +951,7 @@ int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
if (IS_ERR(victim))
return PTR_ERR(victim);
_debug("victim -> %p %s",
_debug("victim -> %pd %s",
victim, d_backing_inode(victim) ? "positive" : "negative");
/* okay... the victim is not being used so we can cull it
......
// SPDX-License-Identifier: GPL-2.0-or-later
/* CacheFiles statistics
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include "internal.h"
atomic_t cachefiles_lookup_histogram[HZ];
atomic_t cachefiles_mkdir_histogram[HZ];
atomic_t cachefiles_create_histogram[HZ];
/*
* display the latency histogram
*/
static int cachefiles_histogram_show(struct seq_file *m, void *v)
{
unsigned long index;
unsigned x, y, z, t;
switch ((unsigned long) v) {
case 1:
seq_puts(m, "JIFS SECS LOOKUPS MKDIRS CREATES\n");
return 0;
case 2:
seq_puts(m, "===== ===== ========= ========= =========\n");
return 0;
default:
index = (unsigned long) v - 3;
x = atomic_read(&cachefiles_lookup_histogram[index]);
y = atomic_read(&cachefiles_mkdir_histogram[index]);
z = atomic_read(&cachefiles_create_histogram[index]);
if (x == 0 && y == 0 && z == 0)
return 0;
t = (index * 1000) / HZ;
seq_printf(m, "%4lu 0.%03u %9u %9u %9u\n", index, t, x, y, z);
return 0;
}
}
/*
* set up the iterator to start reading from the first line
*/
static void *cachefiles_histogram_start(struct seq_file *m, loff_t *_pos)
{
if ((unsigned long long)*_pos >= HZ + 2)
return NULL;
if (*_pos == 0)
*_pos = 1;
return (void *)(unsigned long) *_pos;
}
/*
* move to the next line
*/
static void *cachefiles_histogram_next(struct seq_file *m, void *v, loff_t *pos)
{
(*pos)++;
return (unsigned long long)*pos > HZ + 2 ?
NULL : (void *)(unsigned long) *pos;
}
/*
* clean up after reading
*/
static void cachefiles_histogram_stop(struct seq_file *m, void *v)
{
}
static const struct seq_operations cachefiles_histogram_ops = {
.start = cachefiles_histogram_start,
.stop = cachefiles_histogram_stop,
.next = cachefiles_histogram_next,
.show = cachefiles_histogram_show,
};
/*
* initialise the /proc/fs/cachefiles/ directory
*/
int __init cachefiles_proc_init(void)
{
_enter("");
if (!proc_mkdir("fs/cachefiles", NULL))
goto error_dir;
if (!proc_create_seq("fs/cachefiles/histogram", S_IFREG | 0444, NULL,
&cachefiles_histogram_ops))
goto error_histogram;
_leave(" = 0");
return 0;
error_histogram:
remove_proc_entry("fs/cachefiles", NULL);
error_dir:
_leave(" = -ENOMEM");
return -ENOMEM;
}
/*
* clean up the /proc/fs/cachefiles/ directory
*/
void cachefiles_proc_cleanup(void)
{
remove_proc_entry("fs/cachefiles/histogram", NULL);
remove_proc_entry("fs/cachefiles", NULL);
}
......@@ -36,7 +36,7 @@ int cachefiles_check_object_type(struct cachefiles_object *object)
else
snprintf(type, 3, "%02x", object->fscache.cookie->def->type);
_enter("%p{%s}", object, type);
_enter("%x{%s}", object->fscache.debug_id, type);
/* attempt to install a type label directly */
ret = vfs_setxattr(&init_user_ns, dentry, cachefiles_xattr_cache, type,
......@@ -134,7 +134,7 @@ int cachefiles_update_object_xattr(struct cachefiles_object *object,
if (!dentry)
return -ESTALE;
_enter("%p,#%d", object, auxdata->len);
_enter("%x,#%d", object->fscache.debug_id, auxdata->len);
/* attempt to install the cache metadata directly */
_debug("SET #%u", auxdata->len);
......
......@@ -14,6 +14,7 @@ config FSCACHE
config FSCACHE_STATS
bool "Gather statistical information on local caching"
depends on FSCACHE && PROC_FS
select NETFS_STATS
help
This option causes statistical information to be gathered on local
caching and exported through file:
......@@ -28,23 +29,6 @@ config FSCACHE_STATS
See Documentation/filesystems/caching/fscache.rst for more information.
config FSCACHE_HISTOGRAM
bool "Gather latency information on local caching"
depends on FSCACHE && PROC_FS
help
This option causes latency information to be gathered on local
caching and exported through file:
/proc/fs/fscache/histogram
The generation of this histogram adds a certain amount of overhead to
execution as there are a number of points at which data is gathered,
and on a multi-CPU system these may be on cachelines that keep
bouncing between CPUs. On the other hand, the histogram may be
useful for debugging purposes. Saying 'N' here is recommended.
See Documentation/filesystems/caching/fscache.rst for more information.
config FSCACHE_DEBUG
bool "Debug FS-Cache"
depends on FSCACHE
......@@ -54,10 +38,3 @@ config FSCACHE_DEBUG
enabled by setting bits in /sys/modules/fscache/parameter/debug.
See Documentation/filesystems/caching/fscache.rst for more information.
config FSCACHE_OBJECT_LIST
bool "Maintain global object list for debugging purposes"
depends on FSCACHE && PROC_FS
help
Maintain a global list of active fscache objects that can be
retrieved through /proc/fs/fscache/objects for debugging purposes
......@@ -16,7 +16,5 @@ fscache-y := \
fscache-$(CONFIG_PROC_FS) += proc.o
fscache-$(CONFIG_FSCACHE_STATS) += stats.o
fscache-$(CONFIG_FSCACHE_HISTOGRAM) += histogram.o
fscache-$(CONFIG_FSCACHE_OBJECT_LIST) += object-list.o
obj-$(CONFIG_FSCACHE) := fscache.o
......@@ -116,7 +116,7 @@ struct fscache_cache *fscache_select_cache_for_object(
cache = NULL;
spin_unlock(&cookie->lock);
_leave(" = %p [parent]", cache);
_leave(" = %s [parent]", cache ? cache->tag->name : "NULL");
return cache;
}
......@@ -152,14 +152,14 @@ struct fscache_cache *fscache_select_cache_for_object(
if (test_bit(FSCACHE_IOERROR, &tag->cache->flags))
return NULL;
_leave(" = %p [specific]", tag->cache);
_leave(" = %s [specific]", tag->name);
return tag->cache;
no_preference:
/* netfs has no preference - just select first cache */
cache = list_entry(fscache_cache_list.next,
struct fscache_cache, link);
_leave(" = %p [first]", cache);
_leave(" = %s [first]", cache->tag->name);
return cache;
}
......@@ -261,7 +261,6 @@ int fscache_add_cache(struct fscache_cache *cache,
spin_lock(&cache->object_list_lock);
list_add_tail(&ifsdef->cache_link, &cache->object_list);
spin_unlock(&cache->object_list_lock);
fscache_objlist_add(ifsdef);
/* add the cache's netfs definition index object to the top level index
* cookie as a known backing object */
......@@ -270,7 +269,7 @@ int fscache_add_cache(struct fscache_cache *cache,
hlist_add_head(&ifsdef->cookie_link,
&fscache_fsdef_index.backing_objects);
atomic_inc(&fscache_fsdef_index.usage);
refcount_inc(&fscache_fsdef_index.ref);
/* done */
spin_unlock(&fscache_fsdef_index.lock);
......@@ -335,7 +334,7 @@ static void fscache_withdraw_all_objects(struct fscache_cache *cache,
struct fscache_object, cache_link);
list_move_tail(&object->cache_link, dying_objects);
_debug("withdraw %p", object->cookie);
_debug("withdraw %x", object->cookie->debug_id);
/* This must be done under object_list_lock to prevent
* a race with fscache_drop_object().
......
This diff is collapsed.
......@@ -45,7 +45,8 @@ static struct fscache_cookie_def fscache_fsdef_index_def = {
};
struct fscache_cookie fscache_fsdef_index = {
.usage = ATOMIC_INIT(1),
.debug_id = 1,
.ref = REFCOUNT_INIT(1),
.n_active = ATOMIC_INIT(1),
.lock = __SPIN_LOCK_UNLOCKED(fscache_fsdef_index.lock),
.backing_objects = HLIST_HEAD_INIT,
......
// SPDX-License-Identifier: GPL-2.0-or-later
/* FS-Cache latency histogram
*
* Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
#define FSCACHE_DEBUG_LEVEL THREAD
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include "internal.h"
atomic_t fscache_obj_instantiate_histogram[HZ];
atomic_t fscache_objs_histogram[HZ];
atomic_t fscache_ops_histogram[HZ];
atomic_t fscache_retrieval_delay_histogram[HZ];
atomic_t fscache_retrieval_histogram[HZ];
/*
* display the time-taken histogram
*/
static int fscache_histogram_show(struct seq_file *m, void *v)
{
unsigned long index;
unsigned n[5], t;
switch ((unsigned long) v) {
case 1:
seq_puts(m, "JIFS SECS OBJ INST OP RUNS OBJ RUNS RETRV DLY RETRIEVLS\n");
return 0;
case 2:
seq_puts(m, "===== ===== ========= ========= ========= ========= =========\n");
return 0;
default:
index = (unsigned long) v - 3;
n[0] = atomic_read(&fscache_obj_instantiate_histogram[index]);
n[1] = atomic_read(&fscache_ops_histogram[index]);
n[2] = atomic_read(&fscache_objs_histogram[index]);
n[3] = atomic_read(&fscache_retrieval_delay_histogram[index]);
n[4] = atomic_read(&fscache_retrieval_histogram[index]);
if (!(n[0] | n[1] | n[2] | n[3] | n[4]))
return 0;
t = (index * 1000) / HZ;
seq_printf(m, "%4lu 0.%03u %9u %9u %9u %9u %9u\n",
index, t, n[0], n[1], n[2], n[3], n[4]);
return 0;
}
}
/*
* set up the iterator to start reading from the first line
*/
static void *fscache_histogram_start(struct seq_file *m, loff_t *_pos)
{
if ((unsigned long long)*_pos >= HZ + 2)
return NULL;
if (*_pos == 0)
*_pos = 1;
return (void *)(unsigned long) *_pos;
}
/*
* move to the next line
*/
static void *fscache_histogram_next(struct seq_file *m, void *v, loff_t *pos)
{
(*pos)++;
return (unsigned long long)*pos > HZ + 2 ?
NULL : (void *)(unsigned long) *pos;
}
/*
* clean up after reading
*/
static void fscache_histogram_stop(struct seq_file *m, void *v)
{
}
const struct seq_operations fscache_histogram_ops = {
.start = fscache_histogram_start,
.stop = fscache_histogram_stop,
.next = fscache_histogram_next,
.show = fscache_histogram_show,
};
......@@ -45,6 +45,7 @@ extern struct fscache_cache *fscache_select_cache_for_object(
* cookie.c
*/
extern struct kmem_cache *fscache_cookie_jar;
extern const struct seq_operations fscache_cookies_seq_ops;
extern void fscache_free_cookie(struct fscache_cookie *);
extern struct fscache_cookie *fscache_alloc_cookie(struct fscache_cookie *,
......@@ -53,39 +54,24 @@ extern struct fscache_cookie *fscache_alloc_cookie(struct fscache_cookie *,
const void *, size_t,
void *, loff_t);
extern struct fscache_cookie *fscache_hash_cookie(struct fscache_cookie *);
extern struct fscache_cookie *fscache_cookie_get(struct fscache_cookie *,
enum fscache_cookie_trace);
extern void fscache_cookie_put(struct fscache_cookie *,
enum fscache_cookie_trace);
static inline void fscache_cookie_see(struct fscache_cookie *cookie,
enum fscache_cookie_trace where)
{
trace_fscache_cookie(cookie->debug_id, refcount_read(&cookie->ref),
where);
}
/*
* fsdef.c
*/
extern struct fscache_cookie fscache_fsdef_index;
extern struct fscache_cookie_def fscache_fsdef_netfs_def;
/*
* histogram.c
*/
#ifdef CONFIG_FSCACHE_HISTOGRAM
extern atomic_t fscache_obj_instantiate_histogram[HZ];
extern atomic_t fscache_objs_histogram[HZ];
extern atomic_t fscache_ops_histogram[HZ];
extern atomic_t fscache_retrieval_delay_histogram[HZ];
extern atomic_t fscache_retrieval_histogram[HZ];
static inline void fscache_hist(atomic_t histogram[], unsigned long start_jif)
{
unsigned long jif = jiffies - start_jif;
if (jif >= HZ)
jif = HZ - 1;
atomic_inc(&histogram[jif]);
}
extern const struct seq_operations fscache_histogram_ops;
#else
#define fscache_hist(hist, start_jif) do {} while (0)
#endif
/*
* main.c
*/
......@@ -97,6 +83,8 @@ extern struct workqueue_struct *fscache_object_wq;
extern struct workqueue_struct *fscache_op_wq;
DECLARE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait);
extern unsigned int fscache_hash(unsigned int salt, unsigned int *data, unsigned int n);
static inline bool fscache_object_congested(void)
{
return workqueue_congested(WORK_CPU_UNBOUND, fscache_object_wq);
......@@ -107,19 +95,6 @@ static inline bool fscache_object_congested(void)
*/
extern void fscache_enqueue_object(struct fscache_object *);
/*
* object-list.c
*/
#ifdef CONFIG_FSCACHE_OBJECT_LIST
extern const struct proc_ops fscache_objlist_proc_ops;
extern void fscache_objlist_add(struct fscache_object *);
extern void fscache_objlist_remove(struct fscache_object *);
#else
#define fscache_objlist_add(object) do {} while(0)
#define fscache_objlist_remove(object) do {} while(0)
#endif
/*
* operation.c
*/
......@@ -320,14 +295,6 @@ static inline void fscache_raise_event(struct fscache_object *object,
fscache_enqueue_object(object);
}
static inline void fscache_cookie_get(struct fscache_cookie *cookie,
enum fscache_cookie_trace where)
{
int usage = atomic_inc_return(&cookie->usage);
trace_fscache_cookie(cookie, where, usage);
}
/*
* get an extra reference to a netfs retrieval context
*/
......
......@@ -93,6 +93,45 @@ static struct ctl_table fscache_sysctls_root[] = {
};
#endif
/*
* Mixing scores (in bits) for (7,20):
* Input delta: 1-bit 2-bit
* 1 round: 330.3 9201.6
* 2 rounds: 1246.4 25475.4
* 3 rounds: 1907.1 31295.1
* 4 rounds: 2042.3 31718.6
* Perfect: 2048 31744
* (32*64) (32*31/2 * 64)
*/
#define HASH_MIX(x, y, a) \
( x ^= (a), \
y ^= x, x = rol32(x, 7),\
x += y, y = rol32(y,20),\
y *= 9 )
static inline unsigned int fold_hash(unsigned long x, unsigned long y)
{
/* Use arch-optimized multiply if one exists */
return __hash_32(y ^ __hash_32(x));
}
/*
* Generate a hash. This is derived from full_name_hash(), but we want to be
* sure it is arch independent and that it doesn't change as bits of the
* computed hash value might appear on disk. The caller also guarantees that
* the hashed data will be a series of aligned 32-bit words.
*/
unsigned int fscache_hash(unsigned int salt, unsigned int *data, unsigned int n)
{
unsigned int a, x = 0, y = salt;
for (; n; n--) {
a = *data++;
HASH_MIX(x, y, a);
}
return fold_hash(x, y);
}
/*
* initialise the fs caching module
*/
......
......@@ -37,7 +37,7 @@ int __fscache_register_netfs(struct fscache_netfs *netfs)
if (!cookie)
goto already_registered;
if (cookie != candidate) {
trace_fscache_cookie(candidate, fscache_cookie_discard, 1);
trace_fscache_cookie(candidate->debug_id, 1, fscache_cookie_discard);
fscache_free_cookie(candidate);
}
......
This diff is collapsed.
......@@ -277,13 +277,10 @@ static void fscache_object_work_func(struct work_struct *work)
{
struct fscache_object *object =
container_of(work, struct fscache_object, work);
unsigned long start;
_enter("{OBJ%x}", object->debug_id);
start = jiffies;
fscache_object_sm_dispatcher(object);
fscache_hist(fscache_objs_histogram, start);
fscache_put_object(object, fscache_obj_put_work);
}
......@@ -436,7 +433,6 @@ static const struct fscache_state *fscache_parent_ready(struct fscache_object *o
spin_lock(&parent->lock);
parent->n_ops++;
parent->n_obj_ops++;
object->lookup_jif = jiffies;
spin_unlock(&parent->lock);
_leave("");
......@@ -522,7 +518,6 @@ void fscache_object_lookup_negative(struct fscache_object *object)
set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
_debug("wake up lookup %p", &cookie->flags);
clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
}
......@@ -596,7 +591,6 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec
object->cache->ops->lookup_complete(object);
fscache_stat_d(&fscache_n_cop_lookup_complete);
fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
fscache_stat(&fscache_n_object_avail);
_leave("");
......@@ -799,8 +793,6 @@ static void fscache_put_object(struct fscache_object *object,
*/
void fscache_object_destroy(struct fscache_object *object)
{
fscache_objlist_remove(object);
/* We can get rid of the cookie now */
fscache_cookie_put(object->cookie, fscache_cookie_put_object);
object->cookie = NULL;
......
......@@ -616,7 +616,6 @@ void fscache_op_work_func(struct work_struct *work)
{
struct fscache_operation *op =
container_of(work, struct fscache_operation, work);
unsigned long start;
_enter("{OBJ%x OP%x,%d}",
op->object->debug_id, op->debug_id, atomic_read(&op->usage));
......@@ -624,9 +623,7 @@ void fscache_op_work_func(struct work_struct *work)
trace_fscache_op(op->object->cookie, op, fscache_op_work);
ASSERT(op->processor != NULL);
start = jiffies;
op->processor(op);
fscache_hist(fscache_ops_histogram, start);
fscache_put_operation(op);
_leave("");
......
......@@ -289,7 +289,6 @@ static void fscache_release_retrieval_op(struct fscache_operation *_op)
ASSERTIFCMP(op->op.state != FSCACHE_OP_ST_INITIALISED,
atomic_read(&op->n_pages), ==, 0);
fscache_hist(fscache_retrieval_histogram, op->start_time);
if (op->context)
fscache_put_context(op->cookie, op->context);
......@@ -324,7 +323,6 @@ struct fscache_retrieval *fscache_alloc_retrieval(
op->mapping = mapping;
op->end_io_func = end_io_func;
op->context = context;
op->start_time = jiffies;
INIT_LIST_HEAD(&op->to_do);
/* Pin the netfs read context in case we need to do the actual netfs
......@@ -340,8 +338,6 @@ struct fscache_retrieval *fscache_alloc_retrieval(
*/
int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
{
unsigned long jif;
_enter("");
if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) {
......@@ -351,7 +347,6 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
fscache_stat(&fscache_n_retrievals_wait);
jif = jiffies;
if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
TASK_INTERRUPTIBLE) != 0) {
fscache_stat(&fscache_n_retrievals_intr);
......@@ -362,7 +357,6 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags));
smp_rmb();
fscache_hist(fscache_retrieval_delay_histogram, jif);
_leave(" = 0 [dly]");
return 0;
}
......
......@@ -21,18 +21,16 @@ int __init fscache_proc_init(void)
if (!proc_mkdir("fs/fscache", NULL))
goto error_dir;
if (!proc_create_seq("fs/fscache/cookies", S_IFREG | 0444, NULL,
&fscache_cookies_seq_ops))
goto error_cookies;
#ifdef CONFIG_FSCACHE_STATS
if (!proc_create_single("fs/fscache/stats", S_IFREG | 0444, NULL,
fscache_stats_show))
goto error_stats;
#endif
#ifdef CONFIG_FSCACHE_HISTOGRAM
if (!proc_create_seq("fs/fscache/histogram", S_IFREG | 0444, NULL,
&fscache_histogram_ops))
goto error_histogram;
#endif
#ifdef CONFIG_FSCACHE_OBJECT_LIST
if (!proc_create("fs/fscache/objects", S_IFREG | 0444, NULL,
&fscache_objlist_proc_ops))
......@@ -45,14 +43,12 @@ int __init fscache_proc_init(void)
#ifdef CONFIG_FSCACHE_OBJECT_LIST
error_objects:
#endif
#ifdef CONFIG_FSCACHE_HISTOGRAM
remove_proc_entry("fs/fscache/histogram", NULL);
error_histogram:
#endif
#ifdef CONFIG_FSCACHE_STATS
remove_proc_entry("fs/fscache/stats", NULL);
error_stats:
#endif
remove_proc_entry("fs/fscache/cookies", NULL);
error_cookies:
remove_proc_entry("fs/fscache", NULL);
error_dir:
_leave(" = -ENOMEM");
......@@ -67,11 +63,9 @@ void fscache_proc_cleanup(void)
#ifdef CONFIG_FSCACHE_OBJECT_LIST
remove_proc_entry("fs/fscache/objects", NULL);
#endif
#ifdef CONFIG_FSCACHE_HISTOGRAM
remove_proc_entry("fs/fscache/histogram", NULL);
#endif
#ifdef CONFIG_FSCACHE_STATS
remove_proc_entry("fs/fscache/stats", NULL);
#endif
remove_proc_entry("fs/fscache/cookies", NULL);
remove_proc_entry("fs/fscache", NULL);
}
......@@ -147,7 +147,6 @@ struct fscache_retrieval {
fscache_rw_complete_t end_io_func; /* function to call on I/O completion */
void *context; /* netfs read context (pinned) */
struct list_head to_do; /* list of things to be done by the backend */
unsigned long start_time; /* time at which retrieval started */
atomic_t n_pages; /* number of pages to be retrieved */
};
......@@ -385,9 +384,6 @@ struct fscache_object {
struct list_head dependents; /* FIFO of dependent objects */
struct list_head dep_link; /* link in parent's dependents list */
struct list_head pending_ops; /* unstarted operations on this object */
#ifdef CONFIG_FSCACHE_OBJECT_LIST
struct rb_node objlist_link; /* link in global object list */
#endif
pgoff_t store_limit; /* current storage limit */
loff_t store_limit_l; /* current storage limit */
};
......
......@@ -123,15 +123,17 @@ struct fscache_netfs {
* - indices are created on disk just-in-time
*/
struct fscache_cookie {
atomic_t usage; /* number of users of this cookie */
refcount_t ref; /* number of users of this cookie */
atomic_t n_children; /* number of children of this cookie */
atomic_t n_active; /* number of active users of netfs ptrs */
unsigned int debug_id;
spinlock_t lock;
spinlock_t stores_lock; /* lock on page store tree */
struct hlist_head backing_objects; /* object(s) backing this file/index */
const struct fscache_cookie_def *def; /* definition */
struct fscache_cookie *parent; /* parent of this entry */
struct hlist_bl_node hash_link; /* Link in hash table */
struct list_head proc_link; /* Link in proc list */
void *netfs_data; /* back pointer to netfs */
struct radix_tree_root stores; /* pages to be stored on this cookie */
#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */
......
......@@ -102,6 +102,7 @@ struct netfs_cache_resources {
const struct netfs_cache_ops *ops;
void *cache_priv;
void *cache_priv2;
unsigned int debug_id; /* Cookie debug ID */
};
/*
......@@ -137,7 +138,6 @@ struct netfs_read_request {
struct list_head subrequests; /* Requests to fetch I/O from disk or net */
void *netfs_priv; /* Private data for the netfs */
unsigned int debug_id;
unsigned int cookie_debug_id;
atomic_t nr_rd_ops; /* Number of read ops in progress */
atomic_t nr_wr_ops; /* Number of write ops in progress */
size_t submitted; /* Amount submitted for I/O so far */
......
......@@ -78,20 +78,20 @@ TRACE_EVENT(cachefiles_ref,
/* Note that obj may be NULL */
TP_STRUCT__entry(
__field(struct cachefiles_object *, obj )
__field(struct fscache_cookie *, cookie )
__field(unsigned int, obj )
__field(unsigned int, cookie )
__field(enum cachefiles_obj_ref_trace, why )
__field(int, usage )
),
TP_fast_assign(
__entry->obj = obj;
__entry->cookie = cookie;
__entry->obj = obj->fscache.debug_id;
__entry->cookie = cookie->debug_id;
__entry->usage = usage;
__entry->why = why;
),
TP_printk("c=%p o=%p u=%d %s",
TP_printk("c=%08x o=%08x u=%d %s",
__entry->cookie, __entry->obj, __entry->usage,
__print_symbolic(__entry->why, cachefiles_obj_ref_traces))
);
......@@ -104,18 +104,18 @@ TRACE_EVENT(cachefiles_lookup,
TP_ARGS(obj, de, inode),
TP_STRUCT__entry(
__field(struct cachefiles_object *, obj )
__field(unsigned int, obj )
__field(struct dentry *, de )
__field(struct inode *, inode )
),
TP_fast_assign(
__entry->obj = obj;
__entry->obj = obj->fscache.debug_id;
__entry->de = de;
__entry->inode = inode;
),
TP_printk("o=%p d=%p i=%p",
TP_printk("o=%08x d=%p i=%p",
__entry->obj, __entry->de, __entry->inode)
);
......@@ -126,18 +126,18 @@ TRACE_EVENT(cachefiles_mkdir,
TP_ARGS(obj, de, ret),
TP_STRUCT__entry(
__field(struct cachefiles_object *, obj )
__field(unsigned int, obj )
__field(struct dentry *, de )
__field(int, ret )
),
TP_fast_assign(
__entry->obj = obj;
__entry->obj = obj->fscache.debug_id;
__entry->de = de;
__entry->ret = ret;
),
TP_printk("o=%p d=%p r=%u",
TP_printk("o=%08x d=%p r=%u",
__entry->obj, __entry->de, __entry->ret)
);
......@@ -148,18 +148,18 @@ TRACE_EVENT(cachefiles_create,
TP_ARGS(obj, de, ret),
TP_STRUCT__entry(
__field(struct cachefiles_object *, obj )
__field(unsigned int, obj )
__field(struct dentry *, de )
__field(int, ret )
),
TP_fast_assign(
__entry->obj = obj;
__entry->obj = obj->fscache.debug_id;
__entry->de = de;
__entry->ret = ret;
),
TP_printk("o=%p d=%p r=%u",
TP_printk("o=%08x d=%p r=%u",
__entry->obj, __entry->de, __entry->ret)
);
......@@ -172,18 +172,18 @@ TRACE_EVENT(cachefiles_unlink,
/* Note that obj may be NULL */
TP_STRUCT__entry(
__field(struct cachefiles_object *, obj )
__field(unsigned int, obj )
__field(struct dentry *, de )
__field(enum fscache_why_object_killed, why )
),
TP_fast_assign(
__entry->obj = obj;
__entry->obj = obj->fscache.debug_id;
__entry->de = de;
__entry->why = why;
),
TP_printk("o=%p d=%p w=%s",
TP_printk("o=%08x d=%p w=%s",
__entry->obj, __entry->de,
__print_symbolic(__entry->why, cachefiles_obj_kill_traces))
);
......@@ -198,20 +198,20 @@ TRACE_EVENT(cachefiles_rename,
/* Note that obj may be NULL */
TP_STRUCT__entry(
__field(struct cachefiles_object *, obj )
__field(unsigned int, obj )
__field(struct dentry *, de )
__field(struct dentry *, to )
__field(enum fscache_why_object_killed, why )
),
TP_fast_assign(
__entry->obj = obj;
__entry->obj = obj->fscache.debug_id;
__entry->de = de;
__entry->to = to;
__entry->why = why;
),
TP_printk("o=%p d=%p t=%p w=%s",
TP_printk("o=%08x d=%p t=%p w=%s",
__entry->obj, __entry->de, __entry->to,
__print_symbolic(__entry->why, cachefiles_obj_kill_traces))
);
......@@ -224,16 +224,16 @@ TRACE_EVENT(cachefiles_mark_active,
/* Note that obj may be NULL */
TP_STRUCT__entry(
__field(struct cachefiles_object *, obj )
__field(unsigned int, obj )
__field(struct dentry *, de )
),
TP_fast_assign(
__entry->obj = obj;
__entry->obj = obj->fscache.debug_id;
__entry->de = de;
),
TP_printk("o=%p d=%p",
TP_printk("o=%08x d=%p",
__entry->obj, __entry->de)
);
......@@ -246,22 +246,22 @@ TRACE_EVENT(cachefiles_wait_active,
/* Note that obj may be NULL */
TP_STRUCT__entry(
__field(struct cachefiles_object *, obj )
__field(unsigned int, obj )
__field(unsigned int, xobj )
__field(struct dentry *, de )
__field(struct cachefiles_object *, xobj )
__field(u16, flags )
__field(u16, fsc_flags )
),
TP_fast_assign(
__entry->obj = obj;
__entry->obj = obj->fscache.debug_id;
__entry->de = de;
__entry->xobj = xobj;
__entry->xobj = xobj->fscache.debug_id;
__entry->flags = xobj->flags;
__entry->fsc_flags = xobj->fscache.flags;
),
TP_printk("o=%p d=%p wo=%p wf=%x wff=%x",
TP_printk("o=%08x d=%p wo=%08x wf=%x wff=%x",
__entry->obj, __entry->de, __entry->xobj,
__entry->flags, __entry->fsc_flags)
);
......@@ -275,18 +275,18 @@ TRACE_EVENT(cachefiles_mark_inactive,
/* Note that obj may be NULL */
TP_STRUCT__entry(
__field(struct cachefiles_object *, obj )
__field(unsigned int, obj )
__field(struct dentry *, de )
__field(struct inode *, inode )
),
TP_fast_assign(
__entry->obj = obj;
__entry->obj = obj->fscache.debug_id;
__entry->de = de;
__entry->inode = inode;
),
TP_printk("o=%p d=%p i=%p",
TP_printk("o=%08x d=%p i=%p",
__entry->obj, __entry->de, __entry->inode)
);
......@@ -299,18 +299,18 @@ TRACE_EVENT(cachefiles_mark_buried,
/* Note that obj may be NULL */
TP_STRUCT__entry(
__field(struct cachefiles_object *, obj )
__field(unsigned int, obj )
__field(struct dentry *, de )
__field(enum fscache_why_object_killed, why )
),
TP_fast_assign(
__entry->obj = obj;
__entry->obj = obj->fscache.debug_id;
__entry->de = de;
__entry->why = why;
),
TP_printk("o=%p d=%p w=%s",
TP_printk("o=%08x d=%p w=%s",
__entry->obj, __entry->de,
__print_symbolic(__entry->why, cachefiles_obj_kill_traces))
);
......
This diff is collapsed.
......@@ -139,7 +139,7 @@ TRACE_EVENT(netfs_read,
TP_fast_assign(
__entry->rreq = rreq->debug_id;
__entry->cookie = rreq->cookie_debug_id;
__entry->cookie = rreq->cache_resources.debug_id;
__entry->start = start;
__entry->len = len;
__entry->what = what;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment