Commit 89594c74 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'fscache-next-20210829' of...

Merge tag 'fscache-next-20210829' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs

Pull fscache updates from David Howells:
 "Preparatory work for the fscache rewrite that's being worked on and
  fix some bugs. These include:

   - Always select netfs stats when enabling fscache stats since they're
     displayed through the same procfile.

   - Add a cookie debug ID that can be used in tracepoints instead of a
     pointer and cache it in the netfs_cache_resources struct rather
     than in the netfs_read_request struct to make it more available.

   - Use file_inode() in cachefiles rather than dereferencing
     file->f_inode directly.

   - Provide a procfile to display fscache cookies.

   - Remove the fscache and cachefiles histogram procfiles.

   - Remove the fscache object list procfile.

   - Avoid using %p in fscache and cachefiles as the value is hashed and
     not comparable to the register dump in an oops trace.

   - Fix the cookie hash function to actually achieve useful dispersion.

   - Fix fscache_cookie_put() so that it doesn't dereference the cookie
     pointer in the tracepoint after the refcount has been decremented
     (we're only allowed to do that if we decremented it to zero).

   - Use refcount_t rather than atomic_t for the fscache_cookie
     refcount"

* tag 'fscache-next-20210829' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs:
  fscache: Use refcount_t for the cookie refcount instead of atomic_t
  fscache: Fix fscache_cookie_put() to not deref after dec
  fscache: Fix cookie key hashing
  cachefiles: Change %p in format strings to something else
  fscache: Change %p in format strings to something else
  fscache: Remove the object list procfile
  fscache, cachefiles: Remove the histogram stuff
  fscache: Procfile to display cookies
  fscache: Add a cookie debug ID and use that in traces
  cachefiles: Use file_inode() rather than accessing ->f_inode
  netfs: Move cookie debug ID to struct netfs_cache_resources
  fscache: Select netfs stats if fscache stats are enabled
parents 75ae663d 20ec197b
......@@ -19,22 +19,3 @@ config CACHEFILES_DEBUG
caching on files module. If this is set, the debugging output may be
enabled by setting bits in /sys/modules/cachefiles/parameter/debug or
by including a debugging specifier in /etc/cachefilesd.conf.
config CACHEFILES_HISTOGRAM
bool "Gather latency information on CacheFiles"
depends on CACHEFILES && PROC_FS
help
This option causes latency information to be gathered on CacheFiles
operation and exported through file:
/proc/fs/cachefiles/histogram
The generation of this histogram adds a certain amount of overhead to
execution as there are a number of points at which data is gathered,
and on a multi-CPU system these may be on cachelines that keep
bouncing between CPUs. On the other hand, the histogram may be
useful for debugging purposes. Saying 'N' here is recommended.
See Documentation/filesystems/caching/cachefiles.rst for more
information.
......@@ -15,6 +15,4 @@ cachefiles-y := \
security.o \
xattr.o
cachefiles-$(CONFIG_CACHEFILES_HISTOGRAM) += proc.o
obj-$(CONFIG_CACHEFILES) := cachefiles.o
......@@ -108,8 +108,6 @@ static int cachefiles_daemon_add_cache(struct cachefiles_cache *cache)
atomic_set(&fsdef->usage, 1);
fsdef->type = FSCACHE_COOKIE_TYPE_INDEX;
_debug("- fsdef %p", fsdef);
/* look up the directory at the root of the cache */
ret = kern_path(cache->rootdirname, LOOKUP_DIRECTORY, &path);
if (ret < 0)
......
......@@ -33,7 +33,7 @@ static struct fscache_object *cachefiles_alloc_object(
cache = container_of(_cache, struct cachefiles_cache, cache);
_enter("{%s},%p,", cache->cache.identifier, cookie);
_enter("{%s},%x,", cache->cache.identifier, cookie->debug_id);
lookup_data = kmalloc(sizeof(*lookup_data), cachefiles_gfp);
if (!lookup_data)
......@@ -96,7 +96,7 @@ static struct fscache_object *cachefiles_alloc_object(
lookup_data->key = key;
object->lookup_data = lookup_data;
_leave(" = %p [%p]", &object->fscache, lookup_data);
_leave(" = %x [%p]", object->fscache.debug_id, lookup_data);
return &object->fscache;
nomem_key:
......@@ -379,7 +379,7 @@ static void cachefiles_sync_cache(struct fscache_cache *_cache)
const struct cred *saved_cred;
int ret;
_enter("%p", _cache);
_enter("%s", _cache->tag->name);
cache = container_of(_cache, struct cachefiles_cache, cache);
......
......@@ -180,31 +180,6 @@ extern int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
struct dentry *dir, char *filename);
/*
* proc.c
*/
#ifdef CONFIG_CACHEFILES_HISTOGRAM
extern atomic_t cachefiles_lookup_histogram[HZ];
extern atomic_t cachefiles_mkdir_histogram[HZ];
extern atomic_t cachefiles_create_histogram[HZ];
extern int __init cachefiles_proc_init(void);
extern void cachefiles_proc_cleanup(void);
static inline
void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
{
unsigned long jif = jiffies - start_jif;
if (jif >= HZ)
jif = HZ - 1;
atomic_inc(&histogram[jif]);
}
#else
#define cachefiles_proc_init() (0)
#define cachefiles_proc_cleanup() do {} while (0)
#define cachefiles_hist(hist, start_jif) do {} while (0)
#endif
/*
* rdwr.c
*/
......
......@@ -70,7 +70,7 @@ static int cachefiles_read(struct netfs_cache_resources *cres,
_enter("%pD,%li,%llx,%zx/%llx",
file, file_inode(file)->i_ino, start_pos, len,
i_size_read(file->f_inode));
i_size_read(file_inode(file)));
/* If the caller asked us to seek for data before doing the read, then
* we should do that now. If we find a gap, we fill it with zeros.
......@@ -194,7 +194,7 @@ static int cachefiles_write(struct netfs_cache_resources *cres,
_enter("%pD,%li,%llx,%zx/%llx",
file, file_inode(file)->i_ino, start_pos, len,
i_size_read(file->f_inode));
i_size_read(file_inode(file)));
ki = kzalloc(sizeof(struct cachefiles_kiocb), GFP_KERNEL);
if (!ki)
......@@ -410,7 +410,7 @@ int cachefiles_begin_read_operation(struct netfs_read_request *rreq,
rreq->cache_resources.cache_priv = op;
rreq->cache_resources.cache_priv2 = file;
rreq->cache_resources.ops = &cachefiles_netfs_cache_ops;
rreq->cookie_debug_id = object->fscache.debug_id;
rreq->cache_resources.debug_id = object->fscache.debug_id;
_leave("");
return 0;
......
......@@ -150,6 +150,6 @@ char *cachefiles_cook_key(const u8 *raw, int keylen, uint8_t type)
key[len++] = 0;
key[len] = 0;
_leave(" = %p %d", key, len);
_leave(" = %s %d", key, len);
return key;
}
......@@ -69,15 +69,9 @@ static int __init cachefiles_init(void)
goto error_object_jar;
}
ret = cachefiles_proc_init();
if (ret < 0)
goto error_proc;
pr_info("Loaded\n");
return 0;
error_proc:
kmem_cache_destroy(cachefiles_object_jar);
error_object_jar:
misc_deregister(&cachefiles_dev);
error_dev:
......@@ -94,7 +88,6 @@ static void __exit cachefiles_exit(void)
{
pr_info("Unloading\n");
cachefiles_proc_cleanup();
kmem_cache_destroy(cachefiles_object_jar);
misc_deregister(&cachefiles_dev);
}
......
......@@ -39,18 +39,18 @@ void __cachefiles_printk_object(struct cachefiles_object *object,
pr_err("%sops=%u inp=%u exc=%u\n",
prefix, object->fscache.n_ops, object->fscache.n_in_progress,
object->fscache.n_exclusive);
pr_err("%sparent=%p\n",
prefix, object->fscache.parent);
pr_err("%sparent=%x\n",
prefix, object->fscache.parent ? object->fscache.parent->debug_id : 0);
spin_lock(&object->fscache.lock);
cookie = object->fscache.cookie;
if (cookie) {
pr_err("%scookie=%p [pr=%p nd=%p fl=%lx]\n",
pr_err("%scookie=%x [pr=%x nd=%p fl=%lx]\n",
prefix,
object->fscache.cookie,
object->fscache.cookie->parent,
object->fscache.cookie->netfs_data,
object->fscache.cookie->flags);
cookie->debug_id,
cookie->parent ? cookie->parent->debug_id : 0,
cookie->netfs_data,
cookie->flags);
pr_err("%skey=[%u] '", prefix, cookie->key_len);
k = (cookie->key_len <= sizeof(cookie->inline_key)) ?
cookie->inline_key : cookie->key;
......@@ -110,7 +110,7 @@ static void cachefiles_mark_object_buried(struct cachefiles_cache *cache,
/* found the dentry for */
found_dentry:
kdebug("preemptive burial: OBJ%x [%s] %p",
kdebug("preemptive burial: OBJ%x [%s] %pd",
object->fscache.debug_id,
object->fscache.state->name,
dentry);
......@@ -140,7 +140,7 @@ static int cachefiles_mark_object_active(struct cachefiles_cache *cache,
struct rb_node **_p, *_parent = NULL;
struct dentry *dentry;
_enter(",%p", object);
_enter(",%x", object->fscache.debug_id);
try_again:
write_lock(&cache->active_lock);
......@@ -298,8 +298,6 @@ static int cachefiles_bury_object(struct cachefiles_cache *cache,
_enter(",'%pd','%pd'", dir, rep);
_debug("remove %p from %p", rep, dir);
/* non-directories can just be unlinked */
if (!d_is_dir(rep)) {
_debug("unlink stale object");
......@@ -446,7 +444,7 @@ int cachefiles_delete_object(struct cachefiles_cache *cache,
struct dentry *dir;
int ret;
_enter(",OBJ%x{%p}", object->fscache.debug_id, object->dentry);
_enter(",OBJ%x{%pd}", object->fscache.debug_id, object->dentry);
ASSERT(object->dentry);
ASSERT(d_backing_inode(object->dentry));
......@@ -496,11 +494,10 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent,
struct dentry *dir, *next = NULL;
struct inode *inode;
struct path path;
unsigned long start;
const char *name;
int ret, nlen;
_enter("OBJ%x{%p},OBJ%x,%s,",
_enter("OBJ%x{%pd},OBJ%x,%s,",
parent->fscache.debug_id, parent->dentry,
object->fscache.debug_id, key);
......@@ -535,9 +532,7 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent,
inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
start = jiffies;
next = lookup_one_len(name, dir, nlen);
cachefiles_hist(cachefiles_lookup_histogram, start);
if (IS_ERR(next)) {
trace_cachefiles_lookup(object, next, NULL);
goto lookup_error;
......@@ -545,7 +540,7 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent,
inode = d_backing_inode(next);
trace_cachefiles_lookup(object, next, inode);
_debug("next -> %p %s", next, inode ? "positive" : "negative");
_debug("next -> %pd %s", next, inode ? "positive" : "negative");
if (!key)
object->new = !inode;
......@@ -568,9 +563,7 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent,
ret = security_path_mkdir(&path, next, 0);
if (ret < 0)
goto create_error;
start = jiffies;
ret = vfs_mkdir(&init_user_ns, d_inode(dir), next, 0);
cachefiles_hist(cachefiles_mkdir_histogram, start);
if (!key)
trace_cachefiles_mkdir(object, next, ret);
if (ret < 0)
......@@ -583,8 +576,8 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent,
}
ASSERT(d_backing_inode(next));
_debug("mkdir -> %p{%p{ino=%lu}}",
next, d_backing_inode(next), d_backing_inode(next)->i_ino);
_debug("mkdir -> %pd{ino=%lu}",
next, d_backing_inode(next)->i_ino);
} else if (!d_can_lookup(next)) {
pr_err("inode %lu is not a directory\n",
......@@ -604,18 +597,16 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent,
ret = security_path_mknod(&path, next, S_IFREG, 0);
if (ret < 0)
goto create_error;
start = jiffies;
ret = vfs_create(&init_user_ns, d_inode(dir), next,
S_IFREG, true);
cachefiles_hist(cachefiles_create_histogram, start);
trace_cachefiles_create(object, next, ret);
if (ret < 0)
goto create_error;
ASSERT(d_backing_inode(next));
_debug("create -> %p{%p{ino=%lu}}",
next, d_backing_inode(next), d_backing_inode(next)->i_ino);
_debug("create -> %pd{ino=%lu}",
next, d_backing_inode(next)->i_ino);
} else if (!d_can_lookup(next) &&
!d_is_reg(next)
......@@ -765,7 +756,6 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
const char *dirname)
{
struct dentry *subdir;
unsigned long start;
struct path path;
int ret;
......@@ -775,16 +765,14 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
inode_lock(d_inode(dir));
retry:
start = jiffies;
subdir = lookup_one_len(dirname, dir, strlen(dirname));
cachefiles_hist(cachefiles_lookup_histogram, start);
if (IS_ERR(subdir)) {
if (PTR_ERR(subdir) == -ENOMEM)
goto nomem_d_alloc;
goto lookup_error;
}
_debug("subdir -> %p %s",
_debug("subdir -> %pd %s",
subdir, d_backing_inode(subdir) ? "positive" : "negative");
/* we need to create the subdir if it doesn't exist yet */
......@@ -810,10 +798,8 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
}
ASSERT(d_backing_inode(subdir));
_debug("mkdir -> %p{%p{ino=%lu}}",
subdir,
d_backing_inode(subdir),
d_backing_inode(subdir)->i_ino);
_debug("mkdir -> %pd{ino=%lu}",
subdir, d_backing_inode(subdir)->i_ino);
}
inode_unlock(d_inode(dir));
......@@ -876,7 +862,6 @@ static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache,
struct cachefiles_object *object;
struct rb_node *_n;
struct dentry *victim;
unsigned long start;
int ret;
//_enter(",%pd/,%s",
......@@ -885,13 +870,11 @@ static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache,
/* look up the victim */
inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
start = jiffies;
victim = lookup_one_len(filename, dir, strlen(filename));
cachefiles_hist(cachefiles_lookup_histogram, start);
if (IS_ERR(victim))
goto lookup_error;
//_debug("victim -> %p %s",
//_debug("victim -> %pd %s",
// victim, d_backing_inode(victim) ? "positive" : "negative");
/* if the object is no longer there then we probably retired the object
......@@ -922,7 +905,7 @@ static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache,
read_unlock(&cache->active_lock);
//_leave(" = %p", victim);
//_leave(" = %pd", victim);
return victim;
object_in_use:
......@@ -968,7 +951,7 @@ int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
if (IS_ERR(victim))
return PTR_ERR(victim);
_debug("victim -> %p %s",
_debug("victim -> %pd %s",
victim, d_backing_inode(victim) ? "positive" : "negative");
/* okay... the victim is not being used so we can cull it
......
// SPDX-License-Identifier: GPL-2.0-or-later
/* CacheFiles statistics
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include "internal.h"
atomic_t cachefiles_lookup_histogram[HZ];
atomic_t cachefiles_mkdir_histogram[HZ];
atomic_t cachefiles_create_histogram[HZ];
/*
* display the latency histogram
*/
static int cachefiles_histogram_show(struct seq_file *m, void *v)
{
unsigned long index;
unsigned x, y, z, t;
switch ((unsigned long) v) {
case 1:
seq_puts(m, "JIFS SECS LOOKUPS MKDIRS CREATES\n");
return 0;
case 2:
seq_puts(m, "===== ===== ========= ========= =========\n");
return 0;
default:
index = (unsigned long) v - 3;
x = atomic_read(&cachefiles_lookup_histogram[index]);
y = atomic_read(&cachefiles_mkdir_histogram[index]);
z = atomic_read(&cachefiles_create_histogram[index]);
if (x == 0 && y == 0 && z == 0)
return 0;
t = (index * 1000) / HZ;
seq_printf(m, "%4lu 0.%03u %9u %9u %9u\n", index, t, x, y, z);
return 0;
}
}
/*
* set up the iterator to start reading from the first line
*/
static void *cachefiles_histogram_start(struct seq_file *m, loff_t *_pos)
{
if ((unsigned long long)*_pos >= HZ + 2)
return NULL;
if (*_pos == 0)
*_pos = 1;
return (void *)(unsigned long) *_pos;
}
/*
* move to the next line
*/
static void *cachefiles_histogram_next(struct seq_file *m, void *v, loff_t *pos)
{
(*pos)++;
return (unsigned long long)*pos > HZ + 2 ?
NULL : (void *)(unsigned long) *pos;
}
/*
* clean up after reading
*/
static void cachefiles_histogram_stop(struct seq_file *m, void *v)
{
}
static const struct seq_operations cachefiles_histogram_ops = {
.start = cachefiles_histogram_start,
.stop = cachefiles_histogram_stop,
.next = cachefiles_histogram_next,
.show = cachefiles_histogram_show,
};
/*
* initialise the /proc/fs/cachefiles/ directory
*/
int __init cachefiles_proc_init(void)
{
_enter("");
if (!proc_mkdir("fs/cachefiles", NULL))
goto error_dir;
if (!proc_create_seq("fs/cachefiles/histogram", S_IFREG | 0444, NULL,
&cachefiles_histogram_ops))
goto error_histogram;
_leave(" = 0");
return 0;
error_histogram:
remove_proc_entry("fs/cachefiles", NULL);
error_dir:
_leave(" = -ENOMEM");
return -ENOMEM;
}
/*
* clean up the /proc/fs/cachefiles/ directory
*/
void cachefiles_proc_cleanup(void)
{
remove_proc_entry("fs/cachefiles/histogram", NULL);
remove_proc_entry("fs/cachefiles", NULL);
}
......@@ -36,7 +36,7 @@ int cachefiles_check_object_type(struct cachefiles_object *object)
else
snprintf(type, 3, "%02x", object->fscache.cookie->def->type);
_enter("%p{%s}", object, type);
_enter("%x{%s}", object->fscache.debug_id, type);
/* attempt to install a type label directly */
ret = vfs_setxattr(&init_user_ns, dentry, cachefiles_xattr_cache, type,
......@@ -134,7 +134,7 @@ int cachefiles_update_object_xattr(struct cachefiles_object *object,
if (!dentry)
return -ESTALE;
_enter("%p,#%d", object, auxdata->len);
_enter("%x,#%d", object->fscache.debug_id, auxdata->len);
/* attempt to install the cache metadata directly */
_debug("SET #%u", auxdata->len);
......
......@@ -14,6 +14,7 @@ config FSCACHE
config FSCACHE_STATS
bool "Gather statistical information on local caching"
depends on FSCACHE && PROC_FS
select NETFS_STATS
help
This option causes statistical information to be gathered on local
caching and exported through file:
......@@ -28,23 +29,6 @@ config FSCACHE_STATS
See Documentation/filesystems/caching/fscache.rst for more information.
config FSCACHE_HISTOGRAM
bool "Gather latency information on local caching"
depends on FSCACHE && PROC_FS
help
This option causes latency information to be gathered on local
caching and exported through file:
/proc/fs/fscache/histogram
The generation of this histogram adds a certain amount of overhead to
execution as there are a number of points at which data is gathered,
and on a multi-CPU system these may be on cachelines that keep
bouncing between CPUs. On the other hand, the histogram may be
useful for debugging purposes. Saying 'N' here is recommended.
See Documentation/filesystems/caching/fscache.rst for more information.
config FSCACHE_DEBUG
bool "Debug FS-Cache"
depends on FSCACHE
......@@ -54,10 +38,3 @@ config FSCACHE_DEBUG
enabled by setting bits in /sys/modules/fscache/parameter/debug.
See Documentation/filesystems/caching/fscache.rst for more information.
config FSCACHE_OBJECT_LIST
bool "Maintain global object list for debugging purposes"
depends on FSCACHE && PROC_FS
help
Maintain a global list of active fscache objects that can be
retrieved through /proc/fs/fscache/objects for debugging purposes
......@@ -16,7 +16,5 @@ fscache-y := \
fscache-$(CONFIG_PROC_FS) += proc.o
fscache-$(CONFIG_FSCACHE_STATS) += stats.o
fscache-$(CONFIG_FSCACHE_HISTOGRAM) += histogram.o
fscache-$(CONFIG_FSCACHE_OBJECT_LIST) += object-list.o
obj-$(CONFIG_FSCACHE) := fscache.o
......@@ -116,7 +116,7 @@ struct fscache_cache *fscache_select_cache_for_object(
cache = NULL;
spin_unlock(&cookie->lock);
_leave(" = %p [parent]", cache);
_leave(" = %s [parent]", cache ? cache->tag->name : "NULL");
return cache;
}
......@@ -152,14 +152,14 @@ struct fscache_cache *fscache_select_cache_for_object(
if (test_bit(FSCACHE_IOERROR, &tag->cache->flags))
return NULL;
_leave(" = %p [specific]", tag->cache);
_leave(" = %s [specific]", tag->name);
return tag->cache;
no_preference:
/* netfs has no preference - just select first cache */
cache = list_entry(fscache_cache_list.next,
struct fscache_cache, link);
_leave(" = %p [first]", cache);
_leave(" = %s [first]", cache->tag->name);
return cache;
}
......@@ -261,7 +261,6 @@ int fscache_add_cache(struct fscache_cache *cache,
spin_lock(&cache->object_list_lock);
list_add_tail(&ifsdef->cache_link, &cache->object_list);
spin_unlock(&cache->object_list_lock);
fscache_objlist_add(ifsdef);
/* add the cache's netfs definition index object to the top level index
* cookie as a known backing object */
......@@ -270,7 +269,7 @@ int fscache_add_cache(struct fscache_cache *cache,
hlist_add_head(&ifsdef->cookie_link,
&fscache_fsdef_index.backing_objects);
atomic_inc(&fscache_fsdef_index.usage);
refcount_inc(&fscache_fsdef_index.ref);
/* done */
spin_unlock(&fscache_fsdef_index.lock);
......@@ -335,7 +334,7 @@ static void fscache_withdraw_all_objects(struct fscache_cache *cache,
struct fscache_object, cache_link);
list_move_tail(&object->cache_link, dying_objects);
_debug("withdraw %p", object->cookie);
_debug("withdraw %x", object->cookie->debug_id);
/* This must be done under object_list_lock to prevent
* a race with fscache_drop_object().
......
......@@ -19,6 +19,8 @@ static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
#define fscache_cookie_hash_shift 15
static struct hlist_bl_head fscache_cookie_hash[1 << fscache_cookie_hash_shift];
static LIST_HEAD(fscache_cookies);
static DEFINE_RWLOCK(fscache_cookies_lock);
static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie,
loff_t object_size);
......@@ -29,21 +31,29 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
static void fscache_print_cookie(struct fscache_cookie *cookie, char prefix)
{
struct hlist_node *object;
struct fscache_object *object;
struct hlist_node *o;
const u8 *k;
unsigned loop;
pr_err("%c-cookie c=%p [p=%p fl=%lx nc=%u na=%u]\n",
prefix, cookie, cookie->parent, cookie->flags,
pr_err("%c-cookie c=%08x [p=%08x fl=%lx nc=%u na=%u]\n",
prefix,
cookie->debug_id,
cookie->parent ? cookie->parent->debug_id : 0,
cookie->flags,
atomic_read(&cookie->n_children),
atomic_read(&cookie->n_active));
pr_err("%c-cookie d=%p n=%p\n",
prefix, cookie->def, cookie->netfs_data);
object = READ_ONCE(cookie->backing_objects.first);
if (object)
pr_err("%c-cookie o=%p\n",
prefix, hlist_entry(object, struct fscache_object, cookie_link));
pr_err("%c-cookie d=%p{%s} n=%p\n",
prefix,
cookie->def,
cookie->def ? cookie->def->name : "?",
cookie->netfs_data);
o = READ_ONCE(cookie->backing_objects.first);
if (o) {
object = hlist_entry(o, struct fscache_object, cookie_link);
pr_err("%c-cookie o=%u\n", prefix, object->debug_id);
}
pr_err("%c-key=[%u] '", prefix, cookie->key_len);
k = (cookie->key_len <= sizeof(cookie->inline_key)) ?
......@@ -57,6 +67,9 @@ void fscache_free_cookie(struct fscache_cookie *cookie)
{
if (cookie) {
BUG_ON(!hlist_empty(&cookie->backing_objects));
write_lock(&fscache_cookies_lock);
list_del(&cookie->proc_link);
write_unlock(&fscache_cookies_lock);
if (cookie->aux_len > sizeof(cookie->inline_aux))
kfree(cookie->aux);
if (cookie->key_len > sizeof(cookie->inline_key))
......@@ -74,10 +87,8 @@ void fscache_free_cookie(struct fscache_cookie *cookie)
static int fscache_set_key(struct fscache_cookie *cookie,
const void *index_key, size_t index_key_len)
{
unsigned long long h;
u32 *buf;
int bufs;
int i;
bufs = DIV_ROUND_UP(index_key_len, sizeof(*buf));
......@@ -91,17 +102,7 @@ static int fscache_set_key(struct fscache_cookie *cookie,
}
memcpy(buf, index_key, index_key_len);
/* Calculate a hash and combine this with the length in the first word
* or first half word
*/
h = (unsigned long)cookie->parent;
h += index_key_len + cookie->type;
for (i = 0; i < bufs; i++)
h += buf[i];
cookie->key_hash = h ^ (h >> 32);
cookie->key_hash = fscache_hash(0, buf, bufs);
return 0;
}
......@@ -129,6 +130,8 @@ static long fscache_compare_cookie(const struct fscache_cookie *a,
return memcmp(ka, kb, a->key_len);
}
static atomic_t fscache_cookie_debug_id = ATOMIC_INIT(1);
/*
* Allocate a cookie.
*/
......@@ -161,8 +164,9 @@ struct fscache_cookie *fscache_alloc_cookie(
goto nomem;
}
atomic_set(&cookie->usage, 1);
refcount_set(&cookie->ref, 1);
atomic_set(&cookie->n_children, 0);
cookie->debug_id = atomic_inc_return(&fscache_cookie_debug_id);
/* We keep the active count elevated until relinquishment to prevent an
* attempt to wake up every time the object operations queue quiesces.
......@@ -181,6 +185,10 @@ struct fscache_cookie *fscache_alloc_cookie(
/* radix tree insertion won't use the preallocation pool unless it's
* told it may not wait */
INIT_RADIX_TREE(&cookie->stores, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
write_lock(&fscache_cookies_lock);
list_add_tail(&cookie->proc_link, &fscache_cookies);
write_unlock(&fscache_cookies_lock);
return cookie;
nomem:
......@@ -217,8 +225,8 @@ struct fscache_cookie *fscache_hash_cookie(struct fscache_cookie *candidate)
collision:
if (test_and_set_bit(FSCACHE_COOKIE_ACQUIRED, &cursor->flags)) {
trace_fscache_cookie(cursor, fscache_cookie_collision,
atomic_read(&cursor->usage));
trace_fscache_cookie(cursor->debug_id, refcount_read(&cursor->ref),
fscache_cookie_collision);
pr_err("Duplicate cookie detected\n");
fscache_print_cookie(cursor, 'O');
fscache_print_cookie(candidate, 'N');
......@@ -297,7 +305,8 @@ struct fscache_cookie *__fscache_acquire_cookie(
cookie = fscache_hash_cookie(candidate);
if (!cookie) {
trace_fscache_cookie(candidate, fscache_cookie_discard, 1);
trace_fscache_cookie(candidate->debug_id, 1,
fscache_cookie_discard);
goto out;
}
......@@ -355,7 +364,7 @@ void __fscache_enable_cookie(struct fscache_cookie *cookie,
bool (*can_enable)(void *data),
void *data)
{
_enter("%p", cookie);
_enter("%x", cookie->debug_id);
trace_fscache_enable(cookie);
......@@ -452,10 +461,8 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie,
/* we may be required to wait for lookup to complete at this point */
if (!fscache_defer_lookup) {
_debug("non-deferred lookup %p", &cookie->flags);
wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
TASK_UNINTERRUPTIBLE);
_debug("complete");
if (test_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags))
goto unavailable;
}
......@@ -480,7 +487,7 @@ static int fscache_alloc_object(struct fscache_cache *cache,
struct fscache_object *object;
int ret;
_enter("%p,%p{%s}", cache, cookie, cookie->def->name);
_enter("%s,%x{%s}", cache->tag->name, cookie->debug_id, cookie->def->name);
spin_lock(&cookie->lock);
hlist_for_each_entry(object, &cookie->backing_objects,
......@@ -600,8 +607,6 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
/* Attach to the cookie. The object already has a ref on it. */
hlist_add_head(&object->cookie_link, &cookie->backing_objects);
fscache_objlist_add(object);
ret = 0;
cant_attach_object:
......@@ -658,7 +663,7 @@ EXPORT_SYMBOL(__fscache_invalidate);
*/
void __fscache_wait_on_invalidate(struct fscache_cookie *cookie)
{
_enter("%p", cookie);
_enter("%x", cookie->debug_id);
wait_on_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING,
TASK_UNINTERRUPTIBLE);
......@@ -713,7 +718,7 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie,
struct fscache_object *object;
bool awaken = false;
_enter("%p,%u", cookie, invalidate);
_enter("%x,%u", cookie->debug_id, invalidate);
trace_fscache_disable(cookie);
......@@ -803,8 +808,8 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie,
return;
}
_enter("%p{%s,%p,%d},%d",
cookie, cookie->def->name, cookie->netfs_data,
_enter("%x{%s,%d},%d",
cookie->debug_id, cookie->def->name,
atomic_read(&cookie->n_active), retire);
trace_fscache_relinquish(cookie, retire);
......@@ -821,13 +826,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie,
BUG_ON(!radix_tree_empty(&cookie->stores));
if (cookie->parent) {
ASSERTCMP(atomic_read(&cookie->parent->usage), >, 0);
ASSERTCMP(refcount_read(&cookie->parent->ref), >, 0);
ASSERTCMP(atomic_read(&cookie->parent->n_children), >, 0);
atomic_dec(&cookie->parent->n_children);
}
/* Dispose of the netfs's link to the cookie */
ASSERTCMP(atomic_read(&cookie->usage), >, 0);
fscache_cookie_put(cookie, fscache_cookie_put_relinquish);
_leave("");
......@@ -857,17 +861,17 @@ void fscache_cookie_put(struct fscache_cookie *cookie,
enum fscache_cookie_trace where)
{
struct fscache_cookie *parent;
int usage;
int ref;
_enter("%p", cookie);
_enter("%x", cookie->debug_id);
do {
usage = atomic_dec_return(&cookie->usage);
trace_fscache_cookie(cookie, where, usage);
unsigned int cookie_debug_id = cookie->debug_id;
bool zero = __refcount_dec_and_test(&cookie->ref, &ref);
if (usage > 0)
trace_fscache_cookie(cookie_debug_id, ref - 1, where);
if (!zero)
return;
BUG_ON(usage < 0);
parent = cookie->parent;
fscache_unhash_cookie(cookie);
......@@ -880,6 +884,19 @@ void fscache_cookie_put(struct fscache_cookie *cookie,
_leave("");
}
/*
* Get a reference to a cookie.
*/
struct fscache_cookie *fscache_cookie_get(struct fscache_cookie *cookie,
enum fscache_cookie_trace where)
{
int ref;
__refcount_inc(&cookie->ref, &ref);
trace_fscache_cookie(cookie->debug_id, ref + 1, where);
return cookie;
}
/*
* check the consistency between the netfs inode and the backing cache
*
......@@ -958,3 +975,97 @@ int __fscache_check_consistency(struct fscache_cookie *cookie,
return -ESTALE;
}
EXPORT_SYMBOL(__fscache_check_consistency);
/*
* Generate a list of extant cookies in /proc/fs/fscache/cookies
*/
static int fscache_cookies_seq_show(struct seq_file *m, void *v)
{
struct fscache_cookie *cookie;
unsigned int keylen = 0, auxlen = 0;
char _type[3], *type;
u8 *p;
if (v == &fscache_cookies) {
seq_puts(m,
"COOKIE PARENT USAGE CHILD ACT TY FL DEF NETFS_DATA\n"
"======== ======== ===== ===== === == === ================ ==========\n"
);
return 0;
}
cookie = list_entry(v, struct fscache_cookie, proc_link);
switch (cookie->type) {
case 0:
type = "IX";
break;
case 1:
type = "DT";
break;
default:
snprintf(_type, sizeof(_type), "%02u",
cookie->type);
type = _type;
break;
}
seq_printf(m,
"%08x %08x %5u %5u %3u %s %03lx %-16s %px",
cookie->debug_id,
cookie->parent ? cookie->parent->debug_id : 0,
refcount_read(&cookie->ref),
atomic_read(&cookie->n_children),
atomic_read(&cookie->n_active),
type,
cookie->flags,
cookie->def->name,
cookie->netfs_data);
keylen = cookie->key_len;
auxlen = cookie->aux_len;
if (keylen > 0 || auxlen > 0) {
seq_puts(m, " ");
p = keylen <= sizeof(cookie->inline_key) ?
cookie->inline_key : cookie->key;
for (; keylen > 0; keylen--)
seq_printf(m, "%02x", *p++);
if (auxlen > 0) {
seq_puts(m, ", ");
p = auxlen <= sizeof(cookie->inline_aux) ?
cookie->inline_aux : cookie->aux;
for (; auxlen > 0; auxlen--)
seq_printf(m, "%02x", *p++);
}
}
seq_puts(m, "\n");
return 0;
}
static void *fscache_cookies_seq_start(struct seq_file *m, loff_t *_pos)
__acquires(fscache_cookies_lock)
{
read_lock(&fscache_cookies_lock);
return seq_list_start_head(&fscache_cookies, *_pos);
}
static void *fscache_cookies_seq_next(struct seq_file *m, void *v, loff_t *_pos)
{
return seq_list_next(v, &fscache_cookies, _pos);
}
static void fscache_cookies_seq_stop(struct seq_file *m, void *v)
__releases(rcu)
{
read_unlock(&fscache_cookies_lock);
}
const struct seq_operations fscache_cookies_seq_ops = {
.start = fscache_cookies_seq_start,
.next = fscache_cookies_seq_next,
.stop = fscache_cookies_seq_stop,
.show = fscache_cookies_seq_show,
};
......@@ -45,7 +45,8 @@ static struct fscache_cookie_def fscache_fsdef_index_def = {
};
struct fscache_cookie fscache_fsdef_index = {
.usage = ATOMIC_INIT(1),
.debug_id = 1,
.ref = REFCOUNT_INIT(1),
.n_active = ATOMIC_INIT(1),
.lock = __SPIN_LOCK_UNLOCKED(fscache_fsdef_index.lock),
.backing_objects = HLIST_HEAD_INIT,
......
// SPDX-License-Identifier: GPL-2.0-or-later
/* FS-Cache latency histogram
*
* Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
#define FSCACHE_DEBUG_LEVEL THREAD
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include "internal.h"
atomic_t fscache_obj_instantiate_histogram[HZ];
atomic_t fscache_objs_histogram[HZ];
atomic_t fscache_ops_histogram[HZ];
atomic_t fscache_retrieval_delay_histogram[HZ];
atomic_t fscache_retrieval_histogram[HZ];
/*
* display the time-taken histogram
*/
static int fscache_histogram_show(struct seq_file *m, void *v)
{
unsigned long index;
unsigned n[5], t;
switch ((unsigned long) v) {
case 1:
seq_puts(m, "JIFS SECS OBJ INST OP RUNS OBJ RUNS RETRV DLY RETRIEVLS\n");
return 0;
case 2:
seq_puts(m, "===== ===== ========= ========= ========= ========= =========\n");
return 0;
default:
index = (unsigned long) v - 3;
n[0] = atomic_read(&fscache_obj_instantiate_histogram[index]);
n[1] = atomic_read(&fscache_ops_histogram[index]);
n[2] = atomic_read(&fscache_objs_histogram[index]);
n[3] = atomic_read(&fscache_retrieval_delay_histogram[index]);
n[4] = atomic_read(&fscache_retrieval_histogram[index]);
if (!(n[0] | n[1] | n[2] | n[3] | n[4]))
return 0;
t = (index * 1000) / HZ;
seq_printf(m, "%4lu 0.%03u %9u %9u %9u %9u %9u\n",
index, t, n[0], n[1], n[2], n[3], n[4]);
return 0;
}
}
/*
* set up the iterator to start reading from the first line
*/
static void *fscache_histogram_start(struct seq_file *m, loff_t *_pos)
{
if ((unsigned long long)*_pos >= HZ + 2)
return NULL;
if (*_pos == 0)
*_pos = 1;
return (void *)(unsigned long) *_pos;
}
/*
* move to the next line
*/
static void *fscache_histogram_next(struct seq_file *m, void *v, loff_t *pos)
{
(*pos)++;
return (unsigned long long)*pos > HZ + 2 ?
NULL : (void *)(unsigned long) *pos;
}
/*
* clean up after reading
*/
static void fscache_histogram_stop(struct seq_file *m, void *v)
{
}
const struct seq_operations fscache_histogram_ops = {
.start = fscache_histogram_start,
.stop = fscache_histogram_stop,
.next = fscache_histogram_next,
.show = fscache_histogram_show,
};
......@@ -45,6 +45,7 @@ extern struct fscache_cache *fscache_select_cache_for_object(
* cookie.c
*/
extern struct kmem_cache *fscache_cookie_jar;
extern const struct seq_operations fscache_cookies_seq_ops;
extern void fscache_free_cookie(struct fscache_cookie *);
extern struct fscache_cookie *fscache_alloc_cookie(struct fscache_cookie *,
......@@ -53,39 +54,24 @@ extern struct fscache_cookie *fscache_alloc_cookie(struct fscache_cookie *,
const void *, size_t,
void *, loff_t);
extern struct fscache_cookie *fscache_hash_cookie(struct fscache_cookie *);
extern struct fscache_cookie *fscache_cookie_get(struct fscache_cookie *,
enum fscache_cookie_trace);
extern void fscache_cookie_put(struct fscache_cookie *,
enum fscache_cookie_trace);
static inline void fscache_cookie_see(struct fscache_cookie *cookie,
enum fscache_cookie_trace where)
{
trace_fscache_cookie(cookie->debug_id, refcount_read(&cookie->ref),
where);
}
/*
* fsdef.c
*/
extern struct fscache_cookie fscache_fsdef_index;
extern struct fscache_cookie_def fscache_fsdef_netfs_def;
/*
* histogram.c
*/
#ifdef CONFIG_FSCACHE_HISTOGRAM
extern atomic_t fscache_obj_instantiate_histogram[HZ];
extern atomic_t fscache_objs_histogram[HZ];
extern atomic_t fscache_ops_histogram[HZ];
extern atomic_t fscache_retrieval_delay_histogram[HZ];
extern atomic_t fscache_retrieval_histogram[HZ];
static inline void fscache_hist(atomic_t histogram[], unsigned long start_jif)
{
unsigned long jif = jiffies - start_jif;
if (jif >= HZ)
jif = HZ - 1;
atomic_inc(&histogram[jif]);
}
extern const struct seq_operations fscache_histogram_ops;
#else
#define fscache_hist(hist, start_jif) do {} while (0)
#endif
/*
* main.c
*/
......@@ -97,6 +83,8 @@ extern struct workqueue_struct *fscache_object_wq;
extern struct workqueue_struct *fscache_op_wq;
DECLARE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait);
extern unsigned int fscache_hash(unsigned int salt, unsigned int *data, unsigned int n);
static inline bool fscache_object_congested(void)
{
return workqueue_congested(WORK_CPU_UNBOUND, fscache_object_wq);
......@@ -107,19 +95,6 @@ static inline bool fscache_object_congested(void)
*/
extern void fscache_enqueue_object(struct fscache_object *);
/*
* object-list.c
*/
#ifdef CONFIG_FSCACHE_OBJECT_LIST
extern const struct proc_ops fscache_objlist_proc_ops;
extern void fscache_objlist_add(struct fscache_object *);
extern void fscache_objlist_remove(struct fscache_object *);
#else
#define fscache_objlist_add(object) do {} while(0)
#define fscache_objlist_remove(object) do {} while(0)
#endif
/*
* operation.c
*/
......@@ -320,14 +295,6 @@ static inline void fscache_raise_event(struct fscache_object *object,
fscache_enqueue_object(object);
}
static inline void fscache_cookie_get(struct fscache_cookie *cookie,
enum fscache_cookie_trace where)
{
int usage = atomic_inc_return(&cookie->usage);
trace_fscache_cookie(cookie, where, usage);
}
/*
* get an extra reference to a netfs retrieval context
*/
......
......@@ -93,6 +93,45 @@ static struct ctl_table fscache_sysctls_root[] = {
};
#endif
/*
* Mixing scores (in bits) for (7,20):
* Input delta: 1-bit 2-bit
* 1 round: 330.3 9201.6
* 2 rounds: 1246.4 25475.4
* 3 rounds: 1907.1 31295.1
* 4 rounds: 2042.3 31718.6
* Perfect: 2048 31744
* (32*64) (32*31/2 * 64)
*/
#define HASH_MIX(x, y, a) \
( x ^= (a), \
y ^= x, x = rol32(x, 7),\
x += y, y = rol32(y,20),\
y *= 9 )
static inline unsigned int fold_hash(unsigned long x, unsigned long y)
{
/* Use arch-optimized multiply if one exists */
return __hash_32(y ^ __hash_32(x));
}
/*
* Generate a hash. This is derived from full_name_hash(), but we want to be
* sure it is arch independent and that it doesn't change as bits of the
* computed hash value might appear on disk. The caller also guarantees that
* the hashed data will be a series of aligned 32-bit words.
*/
unsigned int fscache_hash(unsigned int salt, unsigned int *data, unsigned int n)
{
unsigned int a, x = 0, y = salt;
for (; n; n--) {
a = *data++;
HASH_MIX(x, y, a);
}
return fold_hash(x, y);
}
/*
* initialise the fs caching module
*/
......
......@@ -37,7 +37,7 @@ int __fscache_register_netfs(struct fscache_netfs *netfs)
if (!cookie)
goto already_registered;
if (cookie != candidate) {
trace_fscache_cookie(candidate, fscache_cookie_discard, 1);
trace_fscache_cookie(candidate->debug_id, 1, fscache_cookie_discard);
fscache_free_cookie(candidate);
}
......
// SPDX-License-Identifier: GPL-2.0-or-later
/* Global fscache object list maintainer and viewer
*
* Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
#define FSCACHE_DEBUG_LEVEL COOKIE
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/key.h>
#include <keys/user-type.h>
#include "internal.h"
static struct rb_root fscache_object_list;
static DEFINE_RWLOCK(fscache_object_list_lock);
struct fscache_objlist_data {
unsigned long config; /* display configuration */
#define FSCACHE_OBJLIST_CONFIG_KEY 0x00000001 /* show object keys */
#define FSCACHE_OBJLIST_CONFIG_AUX 0x00000002 /* show object auxdata */
#define FSCACHE_OBJLIST_CONFIG_COOKIE 0x00000004 /* show objects with cookies */
#define FSCACHE_OBJLIST_CONFIG_NOCOOKIE 0x00000008 /* show objects without cookies */
#define FSCACHE_OBJLIST_CONFIG_BUSY 0x00000010 /* show busy objects */
#define FSCACHE_OBJLIST_CONFIG_IDLE 0x00000020 /* show idle objects */
#define FSCACHE_OBJLIST_CONFIG_PENDWR 0x00000040 /* show objects with pending writes */
#define FSCACHE_OBJLIST_CONFIG_NOPENDWR 0x00000080 /* show objects without pending writes */
#define FSCACHE_OBJLIST_CONFIG_READS 0x00000100 /* show objects with active reads */
#define FSCACHE_OBJLIST_CONFIG_NOREADS 0x00000200 /* show objects without active reads */
#define FSCACHE_OBJLIST_CONFIG_EVENTS 0x00000400 /* show objects with events */
#define FSCACHE_OBJLIST_CONFIG_NOEVENTS 0x00000800 /* show objects without no events */
#define FSCACHE_OBJLIST_CONFIG_WORK 0x00001000 /* show objects with work */
#define FSCACHE_OBJLIST_CONFIG_NOWORK 0x00002000 /* show objects without work */
};
/*
* Add an object to the object list
* - we use the address of the fscache_object structure as the key into the
* tree
*/
void fscache_objlist_add(struct fscache_object *obj)
{
struct fscache_object *xobj;
struct rb_node **p = &fscache_object_list.rb_node, *parent = NULL;
ASSERT(RB_EMPTY_NODE(&obj->objlist_link));
write_lock(&fscache_object_list_lock);
while (*p) {
parent = *p;
xobj = rb_entry(parent, struct fscache_object, objlist_link);
if (obj < xobj)
p = &(*p)->rb_left;
else if (obj > xobj)
p = &(*p)->rb_right;
else
BUG();
}
rb_link_node(&obj->objlist_link, parent, p);
rb_insert_color(&obj->objlist_link, &fscache_object_list);
write_unlock(&fscache_object_list_lock);
}
/*
* Remove an object from the object list.
*/
void fscache_objlist_remove(struct fscache_object *obj)
{
if (RB_EMPTY_NODE(&obj->objlist_link))
return;
write_lock(&fscache_object_list_lock);
BUG_ON(RB_EMPTY_ROOT(&fscache_object_list));
rb_erase(&obj->objlist_link, &fscache_object_list);
write_unlock(&fscache_object_list_lock);
}
/*
* find the object in the tree on or after the specified index
*/
static struct fscache_object *fscache_objlist_lookup(loff_t *_pos)
{
struct fscache_object *pobj, *obj = NULL, *minobj = NULL;
struct rb_node *p;
unsigned long pos;
if (*_pos >= (unsigned long) ERR_PTR(-ENOENT))
return NULL;
pos = *_pos;
/* banners (can't represent line 0 by pos 0 as that would involve
* returning a NULL pointer) */
if (pos == 0)
return (struct fscache_object *)(long)++(*_pos);
if (pos < 3)
return (struct fscache_object *)pos;
pobj = (struct fscache_object *)pos;
p = fscache_object_list.rb_node;
while (p) {
obj = rb_entry(p, struct fscache_object, objlist_link);
if (pobj < obj) {
if (!minobj || minobj > obj)
minobj = obj;
p = p->rb_left;
} else if (pobj > obj) {
p = p->rb_right;
} else {
minobj = obj;
break;
}
obj = NULL;
}
if (!minobj)
*_pos = (unsigned long) ERR_PTR(-ENOENT);
else if (minobj != obj)
*_pos = (unsigned long) minobj;
return minobj;
}
/*
* set up the iterator to start reading from the first line
*/
static void *fscache_objlist_start(struct seq_file *m, loff_t *_pos)
__acquires(&fscache_object_list_lock)
{
read_lock(&fscache_object_list_lock);
return fscache_objlist_lookup(_pos);
}
/*
* move to the next line
*/
static void *fscache_objlist_next(struct seq_file *m, void *v, loff_t *_pos)
{
(*_pos)++;
return fscache_objlist_lookup(_pos);
}
/*
* clean up after reading
*/
static void fscache_objlist_stop(struct seq_file *m, void *v)
__releases(&fscache_object_list_lock)
{
read_unlock(&fscache_object_list_lock);
}
/*
* display an object
*/
static int fscache_objlist_show(struct seq_file *m, void *v)
{
struct fscache_objlist_data *data = m->private;
struct fscache_object *obj = v;
struct fscache_cookie *cookie;
unsigned long config = data->config;
char _type[3], *type;
u8 *p;
if ((unsigned long) v == 1) {
seq_puts(m, "OBJECT PARENT STAT CHLDN OPS OOP IPR EX READS"
" EM EV FL S"
" | NETFS_COOKIE_DEF TY FL NETFS_DATA");
if (config & (FSCACHE_OBJLIST_CONFIG_KEY |
FSCACHE_OBJLIST_CONFIG_AUX))
seq_puts(m, " ");
if (config & FSCACHE_OBJLIST_CONFIG_KEY)
seq_puts(m, "OBJECT_KEY");
if ((config & (FSCACHE_OBJLIST_CONFIG_KEY |
FSCACHE_OBJLIST_CONFIG_AUX)) ==
(FSCACHE_OBJLIST_CONFIG_KEY | FSCACHE_OBJLIST_CONFIG_AUX))
seq_puts(m, ", ");
if (config & FSCACHE_OBJLIST_CONFIG_AUX)
seq_puts(m, "AUX_DATA");
seq_puts(m, "\n");
return 0;
}
if ((unsigned long) v == 2) {
seq_puts(m, "======== ======== ==== ===== === === === == ====="
" == == == ="
" | ================ == == ================");
if (config & (FSCACHE_OBJLIST_CONFIG_KEY |
FSCACHE_OBJLIST_CONFIG_AUX))
seq_puts(m, " ================");
seq_puts(m, "\n");
return 0;
}
/* filter out any unwanted objects */
#define FILTER(criterion, _yes, _no) \
do { \
unsigned long yes = FSCACHE_OBJLIST_CONFIG_##_yes; \
unsigned long no = FSCACHE_OBJLIST_CONFIG_##_no; \
if (criterion) { \
if (!(config & yes)) \
return 0; \
} else { \
if (!(config & no)) \
return 0; \
} \
} while(0)
cookie = obj->cookie;
if (~config) {
FILTER(cookie->def,
COOKIE, NOCOOKIE);
FILTER(fscache_object_is_active(obj) ||
obj->n_ops != 0 ||
obj->n_obj_ops != 0 ||
obj->flags ||
!list_empty(&obj->dependents),
BUSY, IDLE);
FILTER(test_bit(FSCACHE_OBJECT_PENDING_WRITE, &obj->flags),
PENDWR, NOPENDWR);
FILTER(atomic_read(&obj->n_reads),
READS, NOREADS);
FILTER(obj->events & obj->event_mask,
EVENTS, NOEVENTS);
FILTER(work_busy(&obj->work), WORK, NOWORK);
}
seq_printf(m,
"%8x %8x %s %5u %3u %3u %3u %2u %5u %2lx %2lx %2lx %1x | ",
obj->debug_id,
obj->parent ? obj->parent->debug_id : -1,
obj->state->short_name,
obj->n_children,
obj->n_ops,
obj->n_obj_ops,
obj->n_in_progress,
obj->n_exclusive,
atomic_read(&obj->n_reads),
obj->event_mask,
obj->events,
obj->flags,
work_busy(&obj->work));
if (fscache_use_cookie(obj)) {
uint16_t keylen = 0, auxlen = 0;
switch (cookie->type) {
case 0:
type = "IX";
break;
case 1:
type = "DT";
break;
default:
snprintf(_type, sizeof(_type), "%02u",
cookie->type);
type = _type;
break;
}
seq_printf(m, "%-16s %s %2lx %16p",
cookie->def->name,
type,
cookie->flags,
cookie->netfs_data);
if (config & FSCACHE_OBJLIST_CONFIG_KEY)
keylen = cookie->key_len;
if (config & FSCACHE_OBJLIST_CONFIG_AUX)
auxlen = cookie->aux_len;
if (keylen > 0 || auxlen > 0) {
seq_puts(m, " ");
p = keylen <= sizeof(cookie->inline_key) ?
cookie->inline_key : cookie->key;
for (; keylen > 0; keylen--)
seq_printf(m, "%02x", *p++);
if (auxlen > 0) {
if (config & FSCACHE_OBJLIST_CONFIG_KEY)
seq_puts(m, ", ");
p = auxlen <= sizeof(cookie->inline_aux) ?
cookie->inline_aux : cookie->aux;
for (; auxlen > 0; auxlen--)
seq_printf(m, "%02x", *p++);
}
}
seq_puts(m, "\n");
fscache_unuse_cookie(obj);
} else {
seq_puts(m, "<no_netfs>\n");
}
return 0;
}
static const struct seq_operations fscache_objlist_ops = {
.start = fscache_objlist_start,
.stop = fscache_objlist_stop,
.next = fscache_objlist_next,
.show = fscache_objlist_show,
};
/*
* get the configuration for filtering the list
*/
static void fscache_objlist_config(struct fscache_objlist_data *data)
{
#ifdef CONFIG_KEYS
const struct user_key_payload *confkey;
unsigned long config;
struct key *key;
const char *buf;
int len;
key = request_key(&key_type_user, "fscache:objlist", NULL);
if (IS_ERR(key))
goto no_config;
config = 0;
rcu_read_lock();
confkey = user_key_payload_rcu(key);
if (!confkey) {
/* key was revoked */
rcu_read_unlock();
key_put(key);
goto no_config;
}
buf = confkey->data;
for (len = confkey->datalen - 1; len >= 0; len--) {
switch (buf[len]) {
case 'K': config |= FSCACHE_OBJLIST_CONFIG_KEY; break;
case 'A': config |= FSCACHE_OBJLIST_CONFIG_AUX; break;
case 'C': config |= FSCACHE_OBJLIST_CONFIG_COOKIE; break;
case 'c': config |= FSCACHE_OBJLIST_CONFIG_NOCOOKIE; break;
case 'B': config |= FSCACHE_OBJLIST_CONFIG_BUSY; break;
case 'b': config |= FSCACHE_OBJLIST_CONFIG_IDLE; break;
case 'W': config |= FSCACHE_OBJLIST_CONFIG_PENDWR; break;
case 'w': config |= FSCACHE_OBJLIST_CONFIG_NOPENDWR; break;
case 'R': config |= FSCACHE_OBJLIST_CONFIG_READS; break;
case 'r': config |= FSCACHE_OBJLIST_CONFIG_NOREADS; break;
case 'S': config |= FSCACHE_OBJLIST_CONFIG_WORK; break;
case 's': config |= FSCACHE_OBJLIST_CONFIG_NOWORK; break;
}
}
rcu_read_unlock();
key_put(key);
if (!(config & (FSCACHE_OBJLIST_CONFIG_COOKIE | FSCACHE_OBJLIST_CONFIG_NOCOOKIE)))
config |= FSCACHE_OBJLIST_CONFIG_COOKIE | FSCACHE_OBJLIST_CONFIG_NOCOOKIE;
if (!(config & (FSCACHE_OBJLIST_CONFIG_BUSY | FSCACHE_OBJLIST_CONFIG_IDLE)))
config |= FSCACHE_OBJLIST_CONFIG_BUSY | FSCACHE_OBJLIST_CONFIG_IDLE;
if (!(config & (FSCACHE_OBJLIST_CONFIG_PENDWR | FSCACHE_OBJLIST_CONFIG_NOPENDWR)))
config |= FSCACHE_OBJLIST_CONFIG_PENDWR | FSCACHE_OBJLIST_CONFIG_NOPENDWR;
if (!(config & (FSCACHE_OBJLIST_CONFIG_READS | FSCACHE_OBJLIST_CONFIG_NOREADS)))
config |= FSCACHE_OBJLIST_CONFIG_READS | FSCACHE_OBJLIST_CONFIG_NOREADS;
if (!(config & (FSCACHE_OBJLIST_CONFIG_EVENTS | FSCACHE_OBJLIST_CONFIG_NOEVENTS)))
config |= FSCACHE_OBJLIST_CONFIG_EVENTS | FSCACHE_OBJLIST_CONFIG_NOEVENTS;
if (!(config & (FSCACHE_OBJLIST_CONFIG_WORK | FSCACHE_OBJLIST_CONFIG_NOWORK)))
config |= FSCACHE_OBJLIST_CONFIG_WORK | FSCACHE_OBJLIST_CONFIG_NOWORK;
data->config = config;
return;
no_config:
#endif
data->config = ULONG_MAX;
}
/*
* open "/proc/fs/fscache/objects" to provide a list of active objects
* - can be configured by a user-defined key added to the caller's keyrings
*/
static int fscache_objlist_open(struct inode *inode, struct file *file)
{
struct fscache_objlist_data *data;
data = __seq_open_private(file, &fscache_objlist_ops, sizeof(*data));
if (!data)
return -ENOMEM;
/* get the configuration key */
fscache_objlist_config(data);
return 0;
}
/*
* clean up on close
*/
static int fscache_objlist_release(struct inode *inode, struct file *file)
{
struct seq_file *m = file->private_data;
kfree(m->private);
m->private = NULL;
return seq_release(inode, file);
}
const struct proc_ops fscache_objlist_proc_ops = {
.proc_open = fscache_objlist_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_release = fscache_objlist_release,
};
......@@ -277,13 +277,10 @@ static void fscache_object_work_func(struct work_struct *work)
{
struct fscache_object *object =
container_of(work, struct fscache_object, work);
unsigned long start;
_enter("{OBJ%x}", object->debug_id);
start = jiffies;
fscache_object_sm_dispatcher(object);
fscache_hist(fscache_objs_histogram, start);
fscache_put_object(object, fscache_obj_put_work);
}
......@@ -436,7 +433,6 @@ static const struct fscache_state *fscache_parent_ready(struct fscache_object *o
spin_lock(&parent->lock);
parent->n_ops++;
parent->n_obj_ops++;
object->lookup_jif = jiffies;
spin_unlock(&parent->lock);
_leave("");
......@@ -522,7 +518,6 @@ void fscache_object_lookup_negative(struct fscache_object *object)
set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
_debug("wake up lookup %p", &cookie->flags);
clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
}
......@@ -596,7 +591,6 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec
object->cache->ops->lookup_complete(object);
fscache_stat_d(&fscache_n_cop_lookup_complete);
fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
fscache_stat(&fscache_n_object_avail);
_leave("");
......@@ -799,8 +793,6 @@ static void fscache_put_object(struct fscache_object *object,
*/
void fscache_object_destroy(struct fscache_object *object)
{
fscache_objlist_remove(object);
/* We can get rid of the cookie now */
fscache_cookie_put(object->cookie, fscache_cookie_put_object);
object->cookie = NULL;
......
......@@ -616,7 +616,6 @@ void fscache_op_work_func(struct work_struct *work)
{
struct fscache_operation *op =
container_of(work, struct fscache_operation, work);
unsigned long start;
_enter("{OBJ%x OP%x,%d}",
op->object->debug_id, op->debug_id, atomic_read(&op->usage));
......@@ -624,9 +623,7 @@ void fscache_op_work_func(struct work_struct *work)
trace_fscache_op(op->object->cookie, op, fscache_op_work);
ASSERT(op->processor != NULL);
start = jiffies;
op->processor(op);
fscache_hist(fscache_ops_histogram, start);
fscache_put_operation(op);
_leave("");
......
......@@ -289,7 +289,6 @@ static void fscache_release_retrieval_op(struct fscache_operation *_op)
ASSERTIFCMP(op->op.state != FSCACHE_OP_ST_INITIALISED,
atomic_read(&op->n_pages), ==, 0);
fscache_hist(fscache_retrieval_histogram, op->start_time);
if (op->context)
fscache_put_context(op->cookie, op->context);
......@@ -324,7 +323,6 @@ struct fscache_retrieval *fscache_alloc_retrieval(
op->mapping = mapping;
op->end_io_func = end_io_func;
op->context = context;
op->start_time = jiffies;
INIT_LIST_HEAD(&op->to_do);
/* Pin the netfs read context in case we need to do the actual netfs
......@@ -340,8 +338,6 @@ struct fscache_retrieval *fscache_alloc_retrieval(
*/
int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
{
unsigned long jif;
_enter("");
if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) {
......@@ -351,7 +347,6 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
fscache_stat(&fscache_n_retrievals_wait);
jif = jiffies;
if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
TASK_INTERRUPTIBLE) != 0) {
fscache_stat(&fscache_n_retrievals_intr);
......@@ -362,7 +357,6 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags));
smp_rmb();
fscache_hist(fscache_retrieval_delay_histogram, jif);
_leave(" = 0 [dly]");
return 0;
}
......
......@@ -21,18 +21,16 @@ int __init fscache_proc_init(void)
if (!proc_mkdir("fs/fscache", NULL))
goto error_dir;
if (!proc_create_seq("fs/fscache/cookies", S_IFREG | 0444, NULL,
&fscache_cookies_seq_ops))
goto error_cookies;
#ifdef CONFIG_FSCACHE_STATS
if (!proc_create_single("fs/fscache/stats", S_IFREG | 0444, NULL,
fscache_stats_show))
goto error_stats;
#endif
#ifdef CONFIG_FSCACHE_HISTOGRAM
if (!proc_create_seq("fs/fscache/histogram", S_IFREG | 0444, NULL,
&fscache_histogram_ops))
goto error_histogram;
#endif
#ifdef CONFIG_FSCACHE_OBJECT_LIST
if (!proc_create("fs/fscache/objects", S_IFREG | 0444, NULL,
&fscache_objlist_proc_ops))
......@@ -45,14 +43,12 @@ int __init fscache_proc_init(void)
#ifdef CONFIG_FSCACHE_OBJECT_LIST
error_objects:
#endif
#ifdef CONFIG_FSCACHE_HISTOGRAM
remove_proc_entry("fs/fscache/histogram", NULL);
error_histogram:
#endif
#ifdef CONFIG_FSCACHE_STATS
remove_proc_entry("fs/fscache/stats", NULL);
error_stats:
#endif
remove_proc_entry("fs/fscache/cookies", NULL);
error_cookies:
remove_proc_entry("fs/fscache", NULL);
error_dir:
_leave(" = -ENOMEM");
......@@ -67,11 +63,9 @@ void fscache_proc_cleanup(void)
#ifdef CONFIG_FSCACHE_OBJECT_LIST
remove_proc_entry("fs/fscache/objects", NULL);
#endif
#ifdef CONFIG_FSCACHE_HISTOGRAM
remove_proc_entry("fs/fscache/histogram", NULL);
#endif
#ifdef CONFIG_FSCACHE_STATS
remove_proc_entry("fs/fscache/stats", NULL);
#endif
remove_proc_entry("fs/fscache/cookies", NULL);
remove_proc_entry("fs/fscache", NULL);
}
......@@ -147,7 +147,6 @@ struct fscache_retrieval {
fscache_rw_complete_t end_io_func; /* function to call on I/O completion */
void *context; /* netfs read context (pinned) */
struct list_head to_do; /* list of things to be done by the backend */
unsigned long start_time; /* time at which retrieval started */
atomic_t n_pages; /* number of pages to be retrieved */
};
......@@ -385,9 +384,6 @@ struct fscache_object {
struct list_head dependents; /* FIFO of dependent objects */
struct list_head dep_link; /* link in parent's dependents list */
struct list_head pending_ops; /* unstarted operations on this object */
#ifdef CONFIG_FSCACHE_OBJECT_LIST
struct rb_node objlist_link; /* link in global object list */
#endif
pgoff_t store_limit; /* current storage limit */
loff_t store_limit_l; /* current storage limit */
};
......
......@@ -123,15 +123,17 @@ struct fscache_netfs {
* - indices are created on disk just-in-time
*/
struct fscache_cookie {
atomic_t usage; /* number of users of this cookie */
refcount_t ref; /* number of users of this cookie */
atomic_t n_children; /* number of children of this cookie */
atomic_t n_active; /* number of active users of netfs ptrs */
unsigned int debug_id;
spinlock_t lock;
spinlock_t stores_lock; /* lock on page store tree */
struct hlist_head backing_objects; /* object(s) backing this file/index */
const struct fscache_cookie_def *def; /* definition */
struct fscache_cookie *parent; /* parent of this entry */
struct hlist_bl_node hash_link; /* Link in hash table */
struct list_head proc_link; /* Link in proc list */
void *netfs_data; /* back pointer to netfs */
struct radix_tree_root stores; /* pages to be stored on this cookie */
#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */
......
......@@ -102,6 +102,7 @@ struct netfs_cache_resources {
const struct netfs_cache_ops *ops;
void *cache_priv;
void *cache_priv2;
unsigned int debug_id; /* Cookie debug ID */
};
/*
......@@ -137,7 +138,6 @@ struct netfs_read_request {
struct list_head subrequests; /* Requests to fetch I/O from disk or net */
void *netfs_priv; /* Private data for the netfs */
unsigned int debug_id;
unsigned int cookie_debug_id;
atomic_t nr_rd_ops; /* Number of read ops in progress */
atomic_t nr_wr_ops; /* Number of write ops in progress */
size_t submitted; /* Amount submitted for I/O so far */
......
......@@ -78,20 +78,20 @@ TRACE_EVENT(cachefiles_ref,
/* Note that obj may be NULL */
TP_STRUCT__entry(
__field(struct cachefiles_object *, obj )
__field(struct fscache_cookie *, cookie )
__field(unsigned int, obj )
__field(unsigned int, cookie )
__field(enum cachefiles_obj_ref_trace, why )
__field(int, usage )
),
TP_fast_assign(
__entry->obj = obj;
__entry->cookie = cookie;
__entry->obj = obj->fscache.debug_id;
__entry->cookie = cookie->debug_id;
__entry->usage = usage;
__entry->why = why;
),
TP_printk("c=%p o=%p u=%d %s",
TP_printk("c=%08x o=%08x u=%d %s",
__entry->cookie, __entry->obj, __entry->usage,
__print_symbolic(__entry->why, cachefiles_obj_ref_traces))
);
......@@ -104,18 +104,18 @@ TRACE_EVENT(cachefiles_lookup,
TP_ARGS(obj, de, inode),
TP_STRUCT__entry(
__field(struct cachefiles_object *, obj )
__field(unsigned int, obj )
__field(struct dentry *, de )
__field(struct inode *, inode )
),
TP_fast_assign(
__entry->obj = obj;
__entry->obj = obj->fscache.debug_id;
__entry->de = de;
__entry->inode = inode;
),
TP_printk("o=%p d=%p i=%p",
TP_printk("o=%08x d=%p i=%p",
__entry->obj, __entry->de, __entry->inode)
);
......@@ -126,18 +126,18 @@ TRACE_EVENT(cachefiles_mkdir,
TP_ARGS(obj, de, ret),
TP_STRUCT__entry(
__field(struct cachefiles_object *, obj )
__field(unsigned int, obj )
__field(struct dentry *, de )
__field(int, ret )
),
TP_fast_assign(
__entry->obj = obj;
__entry->obj = obj->fscache.debug_id;
__entry->de = de;
__entry->ret = ret;
),
TP_printk("o=%p d=%p r=%u",
TP_printk("o=%08x d=%p r=%u",
__entry->obj, __entry->de, __entry->ret)
);
......@@ -148,18 +148,18 @@ TRACE_EVENT(cachefiles_create,
TP_ARGS(obj, de, ret),
TP_STRUCT__entry(
__field(struct cachefiles_object *, obj )
__field(unsigned int, obj )
__field(struct dentry *, de )
__field(int, ret )
),
TP_fast_assign(
__entry->obj = obj;
__entry->obj = obj->fscache.debug_id;
__entry->de = de;
__entry->ret = ret;
),
TP_printk("o=%p d=%p r=%u",
TP_printk("o=%08x d=%p r=%u",
__entry->obj, __entry->de, __entry->ret)
);
......@@ -172,18 +172,18 @@ TRACE_EVENT(cachefiles_unlink,
/* Note that obj may be NULL */
TP_STRUCT__entry(
__field(struct cachefiles_object *, obj )
__field(unsigned int, obj )
__field(struct dentry *, de )
__field(enum fscache_why_object_killed, why )
),
TP_fast_assign(
__entry->obj = obj;
__entry->obj = obj->fscache.debug_id;
__entry->de = de;
__entry->why = why;
),
TP_printk("o=%p d=%p w=%s",
TP_printk("o=%08x d=%p w=%s",
__entry->obj, __entry->de,
__print_symbolic(__entry->why, cachefiles_obj_kill_traces))
);
......@@ -198,20 +198,20 @@ TRACE_EVENT(cachefiles_rename,
/* Note that obj may be NULL */
TP_STRUCT__entry(
__field(struct cachefiles_object *, obj )
__field(unsigned int, obj )
__field(struct dentry *, de )
__field(struct dentry *, to )
__field(enum fscache_why_object_killed, why )
),
TP_fast_assign(
__entry->obj = obj;
__entry->obj = obj->fscache.debug_id;
__entry->de = de;
__entry->to = to;
__entry->why = why;
),
TP_printk("o=%p d=%p t=%p w=%s",
TP_printk("o=%08x d=%p t=%p w=%s",
__entry->obj, __entry->de, __entry->to,
__print_symbolic(__entry->why, cachefiles_obj_kill_traces))
);
......@@ -224,16 +224,16 @@ TRACE_EVENT(cachefiles_mark_active,
/* Note that obj may be NULL */
TP_STRUCT__entry(
__field(struct cachefiles_object *, obj )
__field(unsigned int, obj )
__field(struct dentry *, de )
),
TP_fast_assign(
__entry->obj = obj;
__entry->obj = obj->fscache.debug_id;
__entry->de = de;
),
TP_printk("o=%p d=%p",
TP_printk("o=%08x d=%p",
__entry->obj, __entry->de)
);
......@@ -246,22 +246,22 @@ TRACE_EVENT(cachefiles_wait_active,
/* Note that obj may be NULL */
TP_STRUCT__entry(
__field(struct cachefiles_object *, obj )
__field(unsigned int, obj )
__field(unsigned int, xobj )
__field(struct dentry *, de )
__field(struct cachefiles_object *, xobj )
__field(u16, flags )
__field(u16, fsc_flags )
),
TP_fast_assign(
__entry->obj = obj;
__entry->obj = obj->fscache.debug_id;
__entry->de = de;
__entry->xobj = xobj;
__entry->xobj = xobj->fscache.debug_id;
__entry->flags = xobj->flags;
__entry->fsc_flags = xobj->fscache.flags;
),
TP_printk("o=%p d=%p wo=%p wf=%x wff=%x",
TP_printk("o=%08x d=%p wo=%08x wf=%x wff=%x",
__entry->obj, __entry->de, __entry->xobj,
__entry->flags, __entry->fsc_flags)
);
......@@ -275,18 +275,18 @@ TRACE_EVENT(cachefiles_mark_inactive,
/* Note that obj may be NULL */
TP_STRUCT__entry(
__field(struct cachefiles_object *, obj )
__field(unsigned int, obj )
__field(struct dentry *, de )
__field(struct inode *, inode )
),
TP_fast_assign(
__entry->obj = obj;
__entry->obj = obj->fscache.debug_id;
__entry->de = de;
__entry->inode = inode;
),
TP_printk("o=%p d=%p i=%p",
TP_printk("o=%08x d=%p i=%p",
__entry->obj, __entry->de, __entry->inode)
);
......@@ -299,18 +299,18 @@ TRACE_EVENT(cachefiles_mark_buried,
/* Note that obj may be NULL */
TP_STRUCT__entry(
__field(struct cachefiles_object *, obj )
__field(unsigned int, obj )
__field(struct dentry *, de )
__field(enum fscache_why_object_killed, why )
),
TP_fast_assign(
__entry->obj = obj;
__entry->obj = obj->fscache.debug_id;
__entry->de = de;
__entry->why = why;
),
TP_printk("o=%p d=%p w=%s",
TP_printk("o=%08x d=%p w=%s",
__entry->obj, __entry->de,
__print_symbolic(__entry->why, cachefiles_obj_kill_traces))
);
......
......@@ -160,37 +160,27 @@ fscache_cookie_traces;
TRACE_EVENT(fscache_cookie,
TP_PROTO(struct fscache_cookie *cookie,
enum fscache_cookie_trace where,
int usage),
TP_PROTO(unsigned int cookie_debug_id,
int ref,
enum fscache_cookie_trace where),
TP_ARGS(cookie, where, usage),
TP_ARGS(cookie_debug_id, ref, where),
TP_STRUCT__entry(
__field(struct fscache_cookie *, cookie )
__field(struct fscache_cookie *, parent )
__field(unsigned int, cookie )
__field(enum fscache_cookie_trace, where )
__field(int, usage )
__field(int, n_children )
__field(int, n_active )
__field(u8, flags )
__field(int, ref )
),
TP_fast_assign(
__entry->cookie = cookie;
__entry->parent = cookie->parent;
__entry->cookie = cookie_debug_id;
__entry->where = where;
__entry->usage = usage;
__entry->n_children = atomic_read(&cookie->n_children);
__entry->n_active = atomic_read(&cookie->n_active);
__entry->flags = cookie->flags;
__entry->ref = ref;
),
TP_printk("%s c=%p u=%d p=%p Nc=%d Na=%d f=%02x",
TP_printk("%s c=%08x r=%d",
__print_symbolic(__entry->where, fscache_cookie_traces),
__entry->cookie, __entry->usage,
__entry->parent, __entry->n_children, __entry->n_active,
__entry->flags)
__entry->cookie, __entry->ref)
);
TRACE_EVENT(fscache_netfs,
......@@ -199,17 +189,17 @@ TRACE_EVENT(fscache_netfs,
TP_ARGS(netfs),
TP_STRUCT__entry(
__field(struct fscache_cookie *, cookie )
__field(unsigned int, cookie )
__array(char, name, 8 )
),
TP_fast_assign(
__entry->cookie = netfs->primary_index;
__entry->cookie = netfs->primary_index->debug_id;
strncpy(__entry->name, netfs->name, 8);
__entry->name[7] = 0;
),
TP_printk("c=%p n=%s",
TP_printk("c=%08x n=%s",
__entry->cookie, __entry->name)
);
......@@ -219,26 +209,26 @@ TRACE_EVENT(fscache_acquire,
TP_ARGS(cookie),
TP_STRUCT__entry(
__field(struct fscache_cookie *, cookie )
__field(struct fscache_cookie *, parent )
__field(unsigned int, cookie )
__field(unsigned int, parent )
__array(char, name, 8 )
__field(int, p_usage )
__field(int, p_ref )
__field(int, p_n_children )
__field(u8, p_flags )
),
TP_fast_assign(
__entry->cookie = cookie;
__entry->parent = cookie->parent;
__entry->p_usage = atomic_read(&cookie->parent->usage);
__entry->cookie = cookie->debug_id;
__entry->parent = cookie->parent->debug_id;
__entry->p_ref = refcount_read(&cookie->parent->ref);
__entry->p_n_children = atomic_read(&cookie->parent->n_children);
__entry->p_flags = cookie->parent->flags;
memcpy(__entry->name, cookie->def->name, 8);
__entry->name[7] = 0;
),
TP_printk("c=%p p=%p pu=%d pc=%d pf=%02x n=%s",
__entry->cookie, __entry->parent, __entry->p_usage,
TP_printk("c=%08x p=%08x pr=%d pc=%d pf=%02x n=%s",
__entry->cookie, __entry->parent, __entry->p_ref,
__entry->p_n_children, __entry->p_flags, __entry->name)
);
......@@ -248,9 +238,9 @@ TRACE_EVENT(fscache_relinquish,
TP_ARGS(cookie, retire),
TP_STRUCT__entry(
__field(struct fscache_cookie *, cookie )
__field(struct fscache_cookie *, parent )
__field(int, usage )
__field(unsigned int, cookie )
__field(unsigned int, parent )
__field(int, ref )
__field(int, n_children )
__field(int, n_active )
__field(u8, flags )
......@@ -258,17 +248,17 @@ TRACE_EVENT(fscache_relinquish,
),
TP_fast_assign(
__entry->cookie = cookie;
__entry->parent = cookie->parent;
__entry->usage = atomic_read(&cookie->usage);
__entry->cookie = cookie->debug_id;
__entry->parent = cookie->parent->debug_id;
__entry->ref = refcount_read(&cookie->ref);
__entry->n_children = atomic_read(&cookie->n_children);
__entry->n_active = atomic_read(&cookie->n_active);
__entry->flags = cookie->flags;
__entry->retire = retire;
),
TP_printk("c=%p u=%d p=%p Nc=%d Na=%d f=%02x r=%u",
__entry->cookie, __entry->usage,
TP_printk("c=%08x r=%d p=%08x Nc=%d Na=%d f=%02x r=%u",
__entry->cookie, __entry->ref,
__entry->parent, __entry->n_children, __entry->n_active,
__entry->flags, __entry->retire)
);
......@@ -279,23 +269,23 @@ TRACE_EVENT(fscache_enable,
TP_ARGS(cookie),
TP_STRUCT__entry(
__field(struct fscache_cookie *, cookie )
__field(int, usage )
__field(unsigned int, cookie )
__field(int, ref )
__field(int, n_children )
__field(int, n_active )
__field(u8, flags )
),
TP_fast_assign(
__entry->cookie = cookie;
__entry->usage = atomic_read(&cookie->usage);
__entry->cookie = cookie->debug_id;
__entry->ref = refcount_read(&cookie->ref);
__entry->n_children = atomic_read(&cookie->n_children);
__entry->n_active = atomic_read(&cookie->n_active);
__entry->flags = cookie->flags;
),
TP_printk("c=%p u=%d Nc=%d Na=%d f=%02x",
__entry->cookie, __entry->usage,
TP_printk("c=%08x r=%d Nc=%d Na=%d f=%02x",
__entry->cookie, __entry->ref,
__entry->n_children, __entry->n_active, __entry->flags)
);
......@@ -305,23 +295,23 @@ TRACE_EVENT(fscache_disable,
TP_ARGS(cookie),
TP_STRUCT__entry(
__field(struct fscache_cookie *, cookie )
__field(int, usage )
__field(unsigned int, cookie )
__field(int, ref )
__field(int, n_children )
__field(int, n_active )
__field(u8, flags )
),
TP_fast_assign(
__entry->cookie = cookie;
__entry->usage = atomic_read(&cookie->usage);
__entry->cookie = cookie->debug_id;
__entry->ref = refcount_read(&cookie->ref);
__entry->n_children = atomic_read(&cookie->n_children);
__entry->n_active = atomic_read(&cookie->n_active);
__entry->flags = cookie->flags;
),
TP_printk("c=%p u=%d Nc=%d Na=%d f=%02x",
__entry->cookie, __entry->usage,
TP_printk("c=%08x r=%d Nc=%d Na=%d f=%02x",
__entry->cookie, __entry->ref,
__entry->n_children, __entry->n_active, __entry->flags)
);
......@@ -333,8 +323,8 @@ TRACE_EVENT(fscache_osm,
TP_ARGS(object, state, wait, oob, event_num),
TP_STRUCT__entry(
__field(struct fscache_cookie *, cookie )
__field(struct fscache_object *, object )
__field(unsigned int, cookie )
__field(unsigned int, object )
__array(char, state, 8 )
__field(bool, wait )
__field(bool, oob )
......@@ -342,15 +332,15 @@ TRACE_EVENT(fscache_osm,
),
TP_fast_assign(
__entry->cookie = object->cookie;
__entry->object = object;
__entry->cookie = object->cookie->debug_id;
__entry->object = object->debug_id;
__entry->wait = wait;
__entry->oob = oob;
__entry->event_num = event_num;
memcpy(__entry->state, state->short_name, 8);
),
TP_printk("c=%p o=%p %s %s%sev=%d",
TP_printk("c=%08x o=%08d %s %s%sev=%d",
__entry->cookie,
__entry->object,
__entry->state,
......@@ -370,18 +360,18 @@ TRACE_EVENT(fscache_page,
TP_ARGS(cookie, page, why),
TP_STRUCT__entry(
__field(struct fscache_cookie *, cookie )
__field(unsigned int, cookie )
__field(pgoff_t, page )
__field(enum fscache_page_trace, why )
),
TP_fast_assign(
__entry->cookie = cookie;
__entry->cookie = cookie->debug_id;
__entry->page = page->index;
__entry->why = why;
),
TP_printk("c=%p %s pg=%lx",
TP_printk("c=%08x %s pg=%lx",
__entry->cookie,
__print_symbolic(__entry->why, fscache_page_traces),
__entry->page)
......@@ -394,20 +384,20 @@ TRACE_EVENT(fscache_check_page,
TP_ARGS(cookie, page, val, n),
TP_STRUCT__entry(
__field(struct fscache_cookie *, cookie )
__field(unsigned int, cookie )
__field(void *, page )
__field(void *, val )
__field(int, n )
),
TP_fast_assign(
__entry->cookie = cookie;
__entry->cookie = cookie->debug_id;
__entry->page = page;
__entry->val = val;
__entry->n = n;
),
TP_printk("c=%p pg=%p val=%p n=%d",
TP_printk("c=%08x pg=%p val=%p n=%d",
__entry->cookie, __entry->page, __entry->val, __entry->n)
);
......@@ -417,14 +407,14 @@ TRACE_EVENT(fscache_wake_cookie,
TP_ARGS(cookie),
TP_STRUCT__entry(
__field(struct fscache_cookie *, cookie )
__field(unsigned int, cookie )
),
TP_fast_assign(
__entry->cookie = cookie;
__entry->cookie = cookie->debug_id;
),
TP_printk("c=%p", __entry->cookie)
TP_printk("c=%08x", __entry->cookie)
);
TRACE_EVENT(fscache_op,
......@@ -434,18 +424,18 @@ TRACE_EVENT(fscache_op,
TP_ARGS(cookie, op, why),
TP_STRUCT__entry(
__field(struct fscache_cookie *, cookie )
__field(struct fscache_operation *, op )
__field(unsigned int, cookie )
__field(unsigned int, op )
__field(enum fscache_op_trace, why )
),
TP_fast_assign(
__entry->cookie = cookie;
__entry->op = op;
__entry->cookie = cookie ? cookie->debug_id : 0;
__entry->op = op->debug_id;
__entry->why = why;
),
TP_printk("c=%p op=%p %s",
TP_printk("c=%08x op=%08x %s",
__entry->cookie, __entry->op,
__print_symbolic(__entry->why, fscache_op_traces))
);
......@@ -457,20 +447,20 @@ TRACE_EVENT(fscache_page_op,
TP_ARGS(cookie, page, op, what),
TP_STRUCT__entry(
__field(struct fscache_cookie *, cookie )
__field(unsigned int, cookie )
__field(unsigned int, op )
__field(pgoff_t, page )
__field(struct fscache_operation *, op )
__field(enum fscache_page_op_trace, what )
),
TP_fast_assign(
__entry->cookie = cookie;
__entry->cookie = cookie->debug_id;
__entry->page = page ? page->index : 0;
__entry->op = op;
__entry->op = op->debug_id;
__entry->what = what;
),
TP_printk("c=%p %s pg=%lx op=%p",
TP_printk("c=%08x %s pg=%lx op=%08x",
__entry->cookie,
__print_symbolic(__entry->what, fscache_page_op_traces),
__entry->page, __entry->op)
......@@ -483,20 +473,20 @@ TRACE_EVENT(fscache_wrote_page,
TP_ARGS(cookie, page, op, ret),
TP_STRUCT__entry(
__field(struct fscache_cookie *, cookie )
__field(unsigned int, cookie )
__field(unsigned int, op )
__field(pgoff_t, page )
__field(struct fscache_operation *, op )
__field(int, ret )
),
TP_fast_assign(
__entry->cookie = cookie;
__entry->cookie = cookie->debug_id;
__entry->page = page->index;
__entry->op = op;
__entry->op = op->debug_id;
__entry->ret = ret;
),
TP_printk("c=%p pg=%lx op=%p ret=%d",
TP_printk("c=%08x pg=%lx op=%08x ret=%d",
__entry->cookie, __entry->page, __entry->op, __entry->ret)
);
......@@ -507,22 +497,22 @@ TRACE_EVENT(fscache_gang_lookup,
TP_ARGS(cookie, op, results, n, store_limit),
TP_STRUCT__entry(
__field(struct fscache_cookie *, cookie )
__field(struct fscache_operation *, op )
__field(unsigned int, cookie )
__field(unsigned int, op )
__field(pgoff_t, results0 )
__field(int, n )
__field(pgoff_t, store_limit )
),
TP_fast_assign(
__entry->cookie = cookie;
__entry->op = op;
__entry->cookie = cookie->debug_id;
__entry->op = op->debug_id;
__entry->results0 = results[0] ? ((struct page *)results[0])->index : (pgoff_t)-1;
__entry->n = n;
__entry->store_limit = store_limit;
),
TP_printk("c=%p op=%p r0=%lx n=%d sl=%lx",
TP_printk("c=%08x op=%08x r0=%lx n=%d sl=%lx",
__entry->cookie, __entry->op, __entry->results0, __entry->n,
__entry->store_limit)
);
......
......@@ -139,7 +139,7 @@ TRACE_EVENT(netfs_read,
TP_fast_assign(
__entry->rreq = rreq->debug_id;
__entry->cookie = rreq->cookie_debug_id;
__entry->cookie = rreq->cache_resources.debug_id;
__entry->start = start;
__entry->len = len;
__entry->what = what;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment