Commit 1362729b authored by David Howells's avatar David Howells

FS-Cache: Simplify cookie retention for fscache_objects, fixing oops

Simplify the way fscache cache objects retain their cookie.  The way I
implemented the cookie storage handling made synchronisation a pain (ie. the
object state machine can't rely on the cookie actually still being there).

Instead of the the object being detached from the cookie and the cookie being
freed in __fscache_relinquish_cookie(), we defer both operations:

 (*) The detachment of the object from the list in the cookie now takes place
     in fscache_drop_object() and is thus governed by the object state machine
     (fscache_detach_from_cookie() has been removed).

 (*) The release of the cookie is now in fscache_object_destroy() - which is
     called by the cache backend just before it frees the object.

This means that the fscache_cookie struct is now available to the cache all the
way through from ->alloc_object() to ->drop_object() and ->put_object() -
meaning that it's no longer necessary to take object->lock to guarantee access.

However, __fscache_relinquish_cookie() doesn't wait for the object to go all
the way through to destruction before letting the netfs proceed.  That would
massively slow down the netfs.  Since __fscache_relinquish_cookie() leaves the
cookie around, in must therefore break all attachments to the netfs - which
includes ->def, ->netfs_data and any outstanding page read/writes.

To handle this, struct fscache_cookie now has an n_active counter:

 (1) This starts off initialised to 1.

 (2) Any time the cache needs to get at the netfs data, it calls
     fscache_use_cookie() to increment it - if it is not zero.  If it was zero,
     then access is not permitted.

 (3) When the cache has finished with the data, it calls fscache_unuse_cookie()
     to decrement it.  This does a wake-up on it if it reaches 0.

 (4) __fscache_relinquish_cookie() decrements n_active and then waits for it to
     reach 0.  The initialisation to 1 in step (1) ensures that we only get
     wake ups when we're trying to get rid of the cookie.

This leaves __fscache_relinquish_cookie() a lot simpler.


***
This fixes a problem in the current code whereby if fscache_invalidate() is
followed sufficiently quickly by fscache_relinquish_cookie() then it is
possible for __fscache_relinquish_cookie() to have detached the cookie from the
object and cleared the pointer before a thread is dispatched to process the
invalidation state in the object state machine.

Since the pending write clearance was deferred to the invalidation state to
make it asynchronous, we need to either wait in relinquishment for the stores
tree to be cleared in the invalidation state or we need to handle the clearance
in relinquishment.

Further, if the relinquishment code does clear the tree, then the invalidation
state need to make the clearance contingent on still having the cookie to hand
(since that's where the tree is rooted) and we have to prevent the cookie from
disappearing for the duration.

This can lead to an oops like the following:

BUG: unable to handle kernel NULL pointer dereference at 000000000000000c
...
RIP: 0010:[<ffffffff8151023e>] _spin_lock+0xe/0x30
...
CR2: 000000000000000c ...
...
Process kslowd002 (...)
....
Call Trace:
 [<ffffffffa01c3278>] fscache_invalidate_writes+0x38/0xd0 [fscache]
 [<ffffffff810096f0>] ? __switch_to+0xd0/0x320
 [<ffffffff8105e759>] ? find_busiest_queue+0x69/0x150
 [<ffffffff8110ddd4>] ? slow_work_enqueue+0x104/0x180
 [<ffffffffa01c1303>] fscache_object_slow_work_execute+0x5e3/0x9d0 [fscache]
 [<ffffffff81096b67>] ? bit_waitqueue+0x17/0xd0
 [<ffffffff8110e233>] slow_work_execute+0x233/0x310
 [<ffffffff8110e515>] slow_work_thread+0x205/0x360
 [<ffffffff81096ca0>] ? autoremove_wake_function+0x0/0x40
 [<ffffffff8110e310>] ? slow_work_thread+0x0/0x360
 [<ffffffff81096936>] kthread+0x96/0xa0
 [<ffffffff8100c0ca>] child_rip+0xa/0x20
 [<ffffffff810968a0>] ? kthread+0x0/0xa0
 [<ffffffff8100c0c0>] ? child_rip+0x0/0x20

The parameter to fscache_invalidate_writes() was object->cookie which is NULL.
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
Tested-By: default avatarMilosz Tanski <milosz@adfin.com>
Acked-by: default avatarJeff Layton <jlayton@redhat.com>
parent caaef690
......@@ -212,20 +212,29 @@ static void cachefiles_update_object(struct fscache_object *_object)
object = container_of(_object, struct cachefiles_object, fscache);
cache = container_of(object->fscache.cache, struct cachefiles_cache,
cache);
if (!fscache_use_cookie(_object)) {
_leave(" [relinq]");
return;
}
cookie = object->fscache.cookie;
if (!cookie->def->get_aux) {
fscache_unuse_cookie(_object);
_leave(" [no aux]");
return;
}
auxdata = kmalloc(2 + 512 + 3, cachefiles_gfp);
if (!auxdata) {
fscache_unuse_cookie(_object);
_leave(" [nomem]");
return;
}
auxlen = cookie->def->get_aux(cookie->netfs_data, auxdata->data, 511);
fscache_unuse_cookie(_object);
ASSERTCMP(auxlen, <, 511);
auxdata->len = auxlen + 1;
......@@ -263,7 +272,7 @@ static void cachefiles_drop_object(struct fscache_object *_object)
#endif
/* delete retired objects */
if (test_bit(FSCACHE_OBJECT_RETIRE, &object->fscache.flags) &&
if (test_bit(FSCACHE_COOKIE_RETIRED, &object->fscache.cookie->flags) &&
_object != cache->cache.fsdef
) {
_debug("- retire object OBJ%x", object->fscache.debug_id);
......
......@@ -109,13 +109,12 @@ int cachefiles_set_object_xattr(struct cachefiles_object *object,
struct dentry *dentry = object->dentry;
int ret;
ASSERT(object->fscache.cookie);
ASSERT(dentry);
_enter("%p,#%d", object, auxdata->len);
/* attempt to install the cache metadata directly */
_debug("SET %s #%u", object->fscache.cookie->def->name, auxdata->len);
_debug("SET #%u", auxdata->len);
ret = vfs_setxattr(dentry, cachefiles_xattr_cache,
&auxdata->type, auxdata->len,
......@@ -138,13 +137,12 @@ int cachefiles_update_object_xattr(struct cachefiles_object *object,
struct dentry *dentry = object->dentry;
int ret;
ASSERT(object->fscache.cookie);
ASSERT(dentry);
_enter("%p,#%d", object, auxdata->len);
/* attempt to install the cache metadata directly */
_debug("SET %s #%u", object->fscache.cookie->def->name, auxdata->len);
_debug("SET #%u", auxdata->len);
ret = vfs_setxattr(dentry, cachefiles_xattr_cache,
&auxdata->type, auxdata->len,
......
......@@ -95,6 +95,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
atomic_set(&cookie->usage, 1);
atomic_set(&cookie->n_children, 0);
/* We keep the active count elevated until relinquishment to prevent an
* attempt to wake up every time the object operations queue quiesces.
*/
atomic_set(&cookie->n_active, 1);
atomic_inc(&parent->usage);
atomic_inc(&parent->n_children);
......@@ -177,7 +182,6 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
cookie->flags =
(1 << FSCACHE_COOKIE_LOOKING_UP) |
(1 << FSCACHE_COOKIE_CREATING) |
(1 << FSCACHE_COOKIE_NO_DATA_YET);
/* ask the cache to allocate objects for this cookie and its parent
......@@ -467,7 +471,6 @@ EXPORT_SYMBOL(__fscache_update_cookie);
*/
void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
{
struct fscache_cache *cache;
struct fscache_object *object;
fscache_stat(&fscache_n_relinquishes);
......@@ -480,8 +483,11 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
return;
}
_enter("%p{%s,%p},%d",
cookie, cookie->def->name, cookie->netfs_data, retire);
_enter("%p{%s,%p,%d},%d",
cookie, cookie->def->name, cookie->netfs_data,
atomic_read(&cookie->n_active), retire);
ASSERTCMP(atomic_read(&cookie->n_active), >, 0);
if (atomic_read(&cookie->n_children) != 0) {
printk(KERN_ERR "FS-Cache: Cookie '%s' still has children\n",
......@@ -489,62 +495,28 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
BUG();
}
/* wait for the cookie to finish being instantiated (or to fail) */
if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
fscache_stat(&fscache_n_relinquishes_waitcrt);
wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
fscache_wait_bit, TASK_UNINTERRUPTIBLE);
}
/* No further netfs-accessing operations on this cookie permitted */
set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags);
if (retire)
set_bit(FSCACHE_COOKIE_RETIRED, &cookie->flags);
try_again:
spin_lock(&cookie->lock);
/* break links with all the active objects */
while (!hlist_empty(&cookie->backing_objects)) {
int n_reads;
object = hlist_entry(cookie->backing_objects.first,
struct fscache_object,
cookie_link);
_debug("RELEASE OBJ%x", object->debug_id);
set_bit(FSCACHE_COOKIE_WAITING_ON_READS, &cookie->flags);
n_reads = atomic_read(&object->n_reads);
if (n_reads) {
int n_ops = object->n_ops;
int n_in_progress = object->n_in_progress;
spin_unlock(&cookie->lock);
printk(KERN_ERR "FS-Cache:"
" Cookie '%s' still has %d outstanding reads (%d,%d)\n",
cookie->def->name,
n_reads, n_ops, n_in_progress);
wait_on_bit(&cookie->flags, FSCACHE_COOKIE_WAITING_ON_READS,
fscache_wait_bit, TASK_UNINTERRUPTIBLE);
printk("Wait finished\n");
goto try_again;
}
/* detach each cache object from the object cookie */
spin_lock(&object->lock);
hlist_del_init(&object->cookie_link);
cache = object->cache;
object->cookie = NULL;
if (retire)
set_bit(FSCACHE_OBJECT_RETIRE, &object->flags);
hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) {
fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
spin_unlock(&object->lock);
if (atomic_dec_and_test(&cookie->usage))
/* the cookie refcount shouldn't be reduced to 0 yet */
BUG();
}
spin_unlock(&cookie->lock);
/* Wait for cessation of activity requiring access to the netfs (when
* n_active reaches 0).
*/
if (!atomic_dec_and_test(&cookie->n_active))
wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t,
TASK_UNINTERRUPTIBLE);
/* detach pointers back to the netfs */
/* Clear pointers back to the netfs */
cookie->netfs_data = NULL;
cookie->def = NULL;
spin_unlock(&cookie->lock);
BUG_ON(cookie->stores.rnode);
if (cookie->parent) {
ASSERTCMP(atomic_read(&cookie->parent->usage), >, 0);
......@@ -552,7 +524,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
atomic_dec(&cookie->parent->n_children);
}
/* finally dispose of the cookie */
/* Dispose of the netfs's link to the cookie */
ASSERTCMP(atomic_read(&cookie->usage), >, 0);
fscache_cookie_put(cookie);
......
......@@ -55,6 +55,7 @@ static struct fscache_cookie_def fscache_fsdef_index_def = {
struct fscache_cookie fscache_fsdef_index = {
.usage = ATOMIC_INIT(1),
.n_active = ATOMIC_INIT(1),
.lock = __SPIN_LOCK_UNLOCKED(fscache_fsdef_index.lock),
.backing_objects = HLIST_HEAD_INIT,
.def = &fscache_fsdef_index_def,
......
......@@ -93,6 +93,7 @@ static inline bool fscache_object_congested(void)
extern int fscache_wait_bit(void *);
extern int fscache_wait_bit_interruptible(void *);
extern int fscache_wait_atomic_t(atomic_t *);
/*
* object.c
......@@ -106,8 +107,10 @@ extern void fscache_enqueue_object(struct fscache_object *);
extern const struct file_operations fscache_objlist_fops;
extern void fscache_objlist_add(struct fscache_object *);
extern void fscache_objlist_remove(struct fscache_object *);
#else
#define fscache_objlist_add(object) do {} while(0)
#define fscache_objlist_remove(object) do {} while(0)
#endif
/*
......
......@@ -205,7 +205,6 @@ int fscache_wait_bit(void *flags)
schedule();
return 0;
}
EXPORT_SYMBOL(fscache_wait_bit);
/*
* wait_on_bit() sleep function for interruptible waiting
......@@ -215,4 +214,12 @@ int fscache_wait_bit_interruptible(void *flags)
schedule();
return signal_pending(current);
}
EXPORT_SYMBOL(fscache_wait_bit_interruptible);
/*
* wait_on_atomic_t() sleep function for uninterruptible waiting
*/
int fscache_wait_atomic_t(atomic_t *p)
{
schedule();
return 0;
}
......@@ -40,6 +40,7 @@ int __fscache_register_netfs(struct fscache_netfs *netfs)
/* initialise the primary index cookie */
atomic_set(&netfs->primary_index->usage, 1);
atomic_set(&netfs->primary_index->n_children, 0);
atomic_set(&netfs->primary_index->n_active, 1);
netfs->primary_index->def = &fscache_fsdef_netfs_def;
netfs->primary_index->parent = &fscache_fsdef_index;
......
......@@ -70,13 +70,10 @@ void fscache_objlist_add(struct fscache_object *obj)
write_unlock(&fscache_object_list_lock);
}
/**
* fscache_object_destroy - Note that a cache object is about to be destroyed
* @object: The object to be destroyed
*
* Note the imminent destruction and deallocation of a cache object record.
/*
* Remove an object from the object list.
*/
void fscache_object_destroy(struct fscache_object *obj)
void fscache_objlist_remove(struct fscache_object *obj)
{
write_lock(&fscache_object_list_lock);
......@@ -85,7 +82,6 @@ void fscache_object_destroy(struct fscache_object *obj)
write_unlock(&fscache_object_list_lock);
}
EXPORT_SYMBOL(fscache_object_destroy);
/*
* find the object in the tree on or after the specified index
......@@ -166,10 +162,9 @@ static int fscache_objlist_show(struct seq_file *m, void *v)
{
struct fscache_objlist_data *data = m->private;
struct fscache_object *obj = v;
struct fscache_cookie *cookie;
unsigned long config = data->config;
uint16_t keylen, auxlen;
char _type[3], *type;
bool no_cookie;
u8 *buf = data->buf, *p;
if ((unsigned long) v == 1) {
......@@ -216,8 +211,9 @@ static int fscache_objlist_show(struct seq_file *m, void *v)
} \
} while(0)
cookie = obj->cookie;
if (~config) {
FILTER(obj->cookie,
FILTER(cookie->def,
COOKIE, NOCOOKIE);
FILTER(fscache_object_is_active(obj) ||
obj->n_ops != 0 ||
......@@ -250,12 +246,10 @@ static int fscache_objlist_show(struct seq_file *m, void *v)
obj->flags,
work_busy(&obj->work));
no_cookie = true;
keylen = auxlen = 0;
if (obj->cookie) {
spin_lock(&obj->lock);
if (obj->cookie) {
switch (obj->cookie->def->type) {
if (fscache_use_cookie(obj)) {
uint16_t keylen = 0, auxlen = 0;
switch (cookie->def->type) {
case 0:
type = "IX";
break;
......@@ -263,35 +257,29 @@ static int fscache_objlist_show(struct seq_file *m, void *v)
type = "DT";
break;
default:
sprintf(_type, "%02u",
obj->cookie->def->type);
sprintf(_type, "%02u", cookie->def->type);
type = _type;
break;
}
seq_printf(m, "%-16s %s %2lx %16p",
obj->cookie->def->name,
cookie->def->name,
type,
obj->cookie->flags,
obj->cookie->netfs_data);
cookie->flags,
cookie->netfs_data);
if (obj->cookie->def->get_key &&
if (cookie->def->get_key &&
config & FSCACHE_OBJLIST_CONFIG_KEY)
keylen = obj->cookie->def->get_key(
obj->cookie->netfs_data,
keylen = cookie->def->get_key(cookie->netfs_data,
buf, 400);
if (obj->cookie->def->get_aux &&
if (cookie->def->get_aux &&
config & FSCACHE_OBJLIST_CONFIG_AUX)
auxlen = obj->cookie->def->get_aux(
obj->cookie->netfs_data,
auxlen = cookie->def->get_aux(cookie->netfs_data,
buf + keylen, 512 - keylen);
fscache_unuse_cookie(obj);
no_cookie = false;
}
spin_unlock(&obj->lock);
if (!no_cookie && (keylen > 0 || auxlen > 0)) {
if (keylen > 0 || auxlen > 0) {
seq_printf(m, " ");
for (p = buf; keylen > 0; keylen--)
seq_printf(m, "%02x", *p++);
......@@ -302,12 +290,11 @@ static int fscache_objlist_show(struct seq_file *m, void *v)
seq_printf(m, "%02x", *p++);
}
}
}
if (no_cookie)
seq_printf(m, "<no_cookie>\n");
else
seq_printf(m, "\n");
} else {
seq_printf(m, "<no_netfs>\n");
}
return 0;
}
......
This diff is collapsed.
......@@ -424,14 +424,10 @@ void fscache_put_operation(struct fscache_operation *op)
object = op->object;
if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags)) {
if (atomic_dec_and_test(&object->n_reads)) {
clear_bit(FSCACHE_COOKIE_WAITING_ON_READS,
&object->cookie->flags);
wake_up_bit(&object->cookie->flags,
FSCACHE_COOKIE_WAITING_ON_READS);
}
}
if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags))
atomic_dec(&object->n_reads);
if (test_bit(FSCACHE_OP_UNUSE_COOKIE, &op->flags))
fscache_unuse_cookie(object);
/* now... we may get called with the object spinlock held, so we
* complete the cleanup here only if we can immediately acquire the
......
......@@ -163,10 +163,12 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
fscache_stat(&fscache_n_attr_changed_calls);
if (fscache_object_is_active(object)) {
if (fscache_object_is_active(object) &&
fscache_use_cookie(object)) {
fscache_stat(&fscache_n_cop_attr_changed);
ret = object->cache->ops->attr_changed(object);
fscache_stat_d(&fscache_n_cop_attr_changed);
fscache_unuse_cookie(object);
if (ret < 0)
fscache_abort_object(object);
}
......@@ -246,6 +248,7 @@ static void fscache_release_retrieval_op(struct fscache_operation *_op)
* allocate a retrieval op
*/
static struct fscache_retrieval *fscache_alloc_retrieval(
struct fscache_cookie *cookie,
struct address_space *mapping,
fscache_rw_complete_t end_io_func,
void *context)
......@@ -260,7 +263,10 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
}
fscache_operation_init(&op->op, NULL, fscache_release_retrieval_op);
op->op.flags = FSCACHE_OP_MYTHREAD | (1 << FSCACHE_OP_WAITING);
atomic_inc(&cookie->n_active);
op->op.flags = FSCACHE_OP_MYTHREAD |
(1UL << FSCACHE_OP_WAITING) |
(1UL << FSCACHE_OP_UNUSE_COOKIE);
op->mapping = mapping;
op->end_io_func = end_io_func;
op->context = context;
......@@ -394,7 +400,8 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
if (fscache_wait_for_deferred_lookup(cookie) < 0)
return -ERESTARTSYS;
op = fscache_alloc_retrieval(page->mapping, end_io_func, context);
op = fscache_alloc_retrieval(cookie, page->mapping,
end_io_func,context);
if (!op) {
_leave(" = -ENOMEM");
return -ENOMEM;
......@@ -465,6 +472,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
atomic_dec(&object->n_reads);
nobufs_unlock:
spin_unlock(&cookie->lock);
atomic_dec(&cookie->n_active);
kfree(op);
nobufs:
fscache_stat(&fscache_n_retrievals_nobufs);
......@@ -522,7 +530,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
if (fscache_wait_for_deferred_lookup(cookie) < 0)
return -ERESTARTSYS;
op = fscache_alloc_retrieval(mapping, end_io_func, context);
op = fscache_alloc_retrieval(cookie, mapping, end_io_func, context);
if (!op)
return -ENOMEM;
op->n_pages = *nr_pages;
......@@ -589,6 +597,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
atomic_dec(&object->n_reads);
nobufs_unlock:
spin_unlock(&cookie->lock);
atomic_dec(&cookie->n_active);
kfree(op);
nobufs:
fscache_stat(&fscache_n_retrievals_nobufs);
......@@ -631,7 +640,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
if (fscache_wait_for_deferred_lookup(cookie) < 0)
return -ERESTARTSYS;
op = fscache_alloc_retrieval(page->mapping, NULL, NULL);
op = fscache_alloc_retrieval(cookie, page->mapping, NULL, NULL);
if (!op)
return -ENOMEM;
op->n_pages = 1;
......@@ -675,6 +684,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
nobufs_unlock:
spin_unlock(&cookie->lock);
atomic_dec(&cookie->n_active);
kfree(op);
nobufs:
fscache_stat(&fscache_n_allocs_nobufs);
......@@ -876,7 +886,9 @@ int __fscache_write_page(struct fscache_cookie *cookie,
fscache_operation_init(&op->op, fscache_write_op,
fscache_release_write_op);
op->op.flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_WAITING);
op->op.flags = FSCACHE_OP_ASYNC |
(1 << FSCACHE_OP_WAITING) |
(1 << FSCACHE_OP_UNUSE_COOKIE);
ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
if (ret < 0)
......@@ -922,6 +934,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
op->store_limit = object->store_limit;
atomic_inc(&cookie->n_active);
if (fscache_submit_op(object, &op->op) < 0)
goto submit_failed;
......@@ -948,6 +961,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
return 0;
submit_failed:
atomic_dec(&cookie->n_active);
spin_lock(&cookie->stores_lock);
radix_tree_delete(&cookie->stores, page->index);
spin_unlock(&cookie->stores_lock);
......
......@@ -97,7 +97,8 @@ struct fscache_operation {
#define FSCACHE_OP_WAITING 4 /* cleared when op is woken */
#define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */
#define FSCACHE_OP_DEC_READ_CNT 6 /* decrement object->n_reads on destruction */
#define FSCACHE_OP_KEEP_FLAGS 0x0070 /* flags to keep when repurposing an op */
#define FSCACHE_OP_UNUSE_COOKIE 7 /* call fscache_unuse_cookie() on completion */
#define FSCACHE_OP_KEEP_FLAGS 0x00f0 /* flags to keep when repurposing an op */
enum fscache_operation_state state;
atomic_t usage;
......@@ -314,6 +315,7 @@ struct fscache_cache_ops {
struct fscache_cookie {
atomic_t usage; /* number of users of this cookie */
atomic_t n_children; /* number of children of this cookie */
atomic_t n_active; /* number of active users of netfs ptrs */
spinlock_t lock;
spinlock_t stores_lock; /* lock on page store tree */
struct hlist_head backing_objects; /* object(s) backing this file/index */
......@@ -326,11 +328,11 @@ struct fscache_cookie {
unsigned long flags;
#define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */
#define FSCACHE_COOKIE_CREATING 1 /* T if non-index object being created still */
#define FSCACHE_COOKIE_NO_DATA_YET 2 /* T if new object with no cached data yet */
#define FSCACHE_COOKIE_UNAVAILABLE 3 /* T if cookie is unavailable (error, etc) */
#define FSCACHE_COOKIE_WAITING_ON_READS 4 /* T if cookie is waiting on reads */
#define FSCACHE_COOKIE_INVALIDATING 5 /* T if cookie is being invalidated */
#define FSCACHE_COOKIE_NO_DATA_YET 1 /* T if new object with no cached data yet */
#define FSCACHE_COOKIE_UNAVAILABLE 2 /* T if cookie is unavailable (error, etc) */
#define FSCACHE_COOKIE_INVALIDATING 3 /* T if cookie is being invalidated */
#define FSCACHE_COOKIE_RELINQUISHED 4 /* T if cookie has been relinquished */
#define FSCACHE_COOKIE_RETIRED 5 /* T if cookie was retired */
};
extern struct fscache_cookie fscache_fsdef_index;
......@@ -392,10 +394,9 @@ struct fscache_object {
#define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */
#define FSCACHE_OBJECT_PENDING_WRITE 1 /* T if object has pending write */
#define FSCACHE_OBJECT_WAITING 2 /* T if object is waiting on its parent */
#define FSCACHE_OBJECT_RETIRE 3 /* T if object should be retired */
#define FSCACHE_OBJECT_IS_LIVE 4 /* T if object is not withdrawn or relinquished */
#define FSCACHE_OBJECT_IS_LOOKED_UP 5 /* T if object has been looked up */
#define FSCACHE_OBJECT_IS_AVAILABLE 6 /* T if object has become active */
#define FSCACHE_OBJECT_IS_LIVE 3 /* T if object is not withdrawn or relinquished */
#define FSCACHE_OBJECT_IS_LOOKED_UP 4 /* T if object has been looked up */
#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */
struct list_head cache_link; /* link in cache->object_list */
struct hlist_node cookie_link; /* link in cookie->backing_objects */
......@@ -415,16 +416,11 @@ struct fscache_object {
extern void fscache_object_init(struct fscache_object *, struct fscache_cookie *,
struct fscache_cache *);
extern void fscache_object_destroy(struct fscache_object *);
extern void fscache_object_lookup_negative(struct fscache_object *object);
extern void fscache_obtained_object(struct fscache_object *object);
#ifdef CONFIG_FSCACHE_OBJECT_LIST
extern void fscache_object_destroy(struct fscache_object *object);
#else
#define fscache_object_destroy(object) do {} while(0)
#endif
static inline bool fscache_object_is_live(struct fscache_object *object)
{
return test_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
......@@ -512,6 +508,33 @@ static inline void fscache_end_io(struct fscache_retrieval *op,
op->end_io_func(page, op->context, error);
}
/**
* fscache_use_cookie - Request usage of cookie attached to an object
* @object: Object description
*
* Request usage of the cookie attached to an object. NULL is returned if the
* relinquishment had reduced the cookie usage count to 0.
*/
static inline bool fscache_use_cookie(struct fscache_object *object)
{
struct fscache_cookie *cookie = object->cookie;
return atomic_inc_not_zero(&cookie->n_active) != 0;
}
/**
* fscache_unuse_cookie - Cease usage of cookie attached to an object
* @object: Object description
*
* Cease usage of the cookie attached to an object. When the users count
* reaches zero then the cookie relinquishment will be permitted to proceed.
*/
static inline void fscache_unuse_cookie(struct fscache_object *object)
{
struct fscache_cookie *cookie = object->cookie;
if (atomic_dec_and_test(&cookie->n_active))
wake_up_atomic_t(&cookie->n_active);
}
/*
* out-of-line cache backend functions
*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment