Commit 6e80133f authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-2.6-fscache

* git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-2.6-fscache: (31 commits)
  FS-Cache: Provide nop fscache_stat_d() if CONFIG_FSCACHE_STATS=n
  SLOW_WORK: Fix GFS2 to #include <linux/module.h> before using THIS_MODULE
  SLOW_WORK: Fix CIFS to pass THIS_MODULE to slow_work_register_user()
  CacheFiles: Don't log lookup/create failing with ENOBUFS
  CacheFiles: Catch an overly long wait for an old active object
  CacheFiles: Better showing of debugging information in active object problems
  CacheFiles: Mark parent directory locks as I_MUTEX_PARENT to keep lockdep happy
  CacheFiles: Handle truncate unlocking the page we're reading
  CacheFiles: Don't write a full page if there's only a partial page to cache
  FS-Cache: Actually requeue an object when requested
  FS-Cache: Start processing an object's operations on that object's death
  FS-Cache: Make sure FSCACHE_COOKIE_LOOKING_UP cleared on lookup failure
  FS-Cache: Add a retirement stat counter
  FS-Cache: Handle pages pending storage that get evicted under OOM conditions
  FS-Cache: Handle read request vs lookup, creation or other cache failure
  FS-Cache: Don't delete pending pages from the page-store tracking tree
  FS-Cache: Fix lock misorder in fscache_write_op()
  FS-Cache: The object-available state can't rely on the cookie to be available
  FS-Cache: Permit cache retrieval ops to be interrupted in the initial wait phase
  FS-Cache: Use radix tree preload correctly in tracking of pages to be stored
  ...
parents e3a41d7b 4fa9f4ed
...@@ -235,6 +235,7 @@ proc files. ...@@ -235,6 +235,7 @@ proc files.
neg=N Number of negative lookups made neg=N Number of negative lookups made
pos=N Number of positive lookups made pos=N Number of positive lookups made
crt=N Number of objects created by lookup crt=N Number of objects created by lookup
tmo=N Number of lookups timed out and requeued
Updates n=N Number of update cookie requests seen Updates n=N Number of update cookie requests seen
nul=N Number of upd reqs given a NULL parent nul=N Number of upd reqs given a NULL parent
run=N Number of upd reqs granted CPU time run=N Number of upd reqs granted CPU time
...@@ -250,8 +251,10 @@ proc files. ...@@ -250,8 +251,10 @@ proc files.
ok=N Number of successful alloc reqs ok=N Number of successful alloc reqs
wt=N Number of alloc reqs that waited on lookup completion wt=N Number of alloc reqs that waited on lookup completion
nbf=N Number of alloc reqs rejected -ENOBUFS nbf=N Number of alloc reqs rejected -ENOBUFS
int=N Number of alloc reqs aborted -ERESTARTSYS
ops=N Number of alloc reqs submitted ops=N Number of alloc reqs submitted
owt=N Number of alloc reqs waited for CPU time owt=N Number of alloc reqs waited for CPU time
abt=N Number of alloc reqs aborted due to object death
Retrvls n=N Number of retrieval (read) requests seen Retrvls n=N Number of retrieval (read) requests seen
ok=N Number of successful retr reqs ok=N Number of successful retr reqs
wt=N Number of retr reqs that waited on lookup completion wt=N Number of retr reqs that waited on lookup completion
...@@ -261,6 +264,7 @@ proc files. ...@@ -261,6 +264,7 @@ proc files.
oom=N Number of retr reqs failed -ENOMEM oom=N Number of retr reqs failed -ENOMEM
ops=N Number of retr reqs submitted ops=N Number of retr reqs submitted
owt=N Number of retr reqs waited for CPU time owt=N Number of retr reqs waited for CPU time
abt=N Number of retr reqs aborted due to object death
Stores n=N Number of storage (write) requests seen Stores n=N Number of storage (write) requests seen
ok=N Number of successful store reqs ok=N Number of successful store reqs
agn=N Number of store reqs on a page already pending storage agn=N Number of store reqs on a page already pending storage
...@@ -268,12 +272,37 @@ proc files. ...@@ -268,12 +272,37 @@ proc files.
oom=N Number of store reqs failed -ENOMEM oom=N Number of store reqs failed -ENOMEM
ops=N Number of store reqs submitted ops=N Number of store reqs submitted
run=N Number of store reqs granted CPU time run=N Number of store reqs granted CPU time
pgs=N Number of pages given store req processing time
rxd=N Number of store reqs deleted from tracking tree
olm=N Number of store reqs over store limit
VmScan nos=N Number of release reqs against pages with no pending store
gon=N Number of release reqs against pages stored by time lock granted
bsy=N Number of release reqs ignored due to in-progress store
can=N Number of page stores cancelled due to release req
Ops pend=N Number of times async ops added to pending queues Ops pend=N Number of times async ops added to pending queues
run=N Number of times async ops given CPU time run=N Number of times async ops given CPU time
enq=N Number of times async ops queued for processing enq=N Number of times async ops queued for processing
can=N Number of async ops cancelled
rej=N Number of async ops rejected due to object lookup/create failure
dfr=N Number of async ops queued for deferred release dfr=N Number of async ops queued for deferred release
rel=N Number of async ops released rel=N Number of async ops released
gc=N Number of deferred-release async ops garbage collected gc=N Number of deferred-release async ops garbage collected
CacheOp alo=N Number of in-progress alloc_object() cache ops
luo=N Number of in-progress lookup_object() cache ops
luc=N Number of in-progress lookup_complete() cache ops
gro=N Number of in-progress grab_object() cache ops
upo=N Number of in-progress update_object() cache ops
dro=N Number of in-progress drop_object() cache ops
pto=N Number of in-progress put_object() cache ops
syn=N Number of in-progress sync_cache() cache ops
atc=N Number of in-progress attr_changed() cache ops
rap=N Number of in-progress read_or_alloc_page() cache ops
ras=N Number of in-progress read_or_alloc_pages() cache ops
alp=N Number of in-progress allocate_page() cache ops
als=N Number of in-progress allocate_pages() cache ops
wrp=N Number of in-progress write_page() cache ops
ucp=N Number of in-progress uncache_page() cache ops
dsp=N Number of in-progress dissociate_pages() cache ops
(*) /proc/fs/fscache/histogram (*) /proc/fs/fscache/histogram
...@@ -299,6 +328,87 @@ proc files. ...@@ -299,6 +328,87 @@ proc files.
jiffy range covered, and the SECS field the equivalent number of seconds. jiffy range covered, and the SECS field the equivalent number of seconds.
===========
OBJECT LIST
===========
If CONFIG_FSCACHE_OBJECT_LIST is enabled, the FS-Cache facility will maintain a
list of all the objects currently allocated and allow them to be viewed
through:
/proc/fs/fscache/objects
This will look something like:
[root@andromeda ~]# head /proc/fs/fscache/objects
OBJECT PARENT STAT CHLDN OPS OOP IPR EX READS EM EV F S | NETFS_COOKIE_DEF TY FL NETFS_DATA OBJECT_KEY, AUX_DATA
======== ======== ==== ===== === === === == ===== == == = = | ================ == == ================ ================
17e4b 2 ACTV 0 0 0 0 0 0 7b 4 0 8 | NFS.fh DT 0 ffff88001dd82820 010006017edcf8bbc93b43298fdfbe71e50b57b13a172c0117f38472, e567634700000000000000000000000063f2404a000000000000000000000000c9030000000000000000000063f2404a
1693a 2 ACTV 0 0 0 0 0 0 7b 4 0 8 | NFS.fh DT 0 ffff88002db23380 010006017edcf8bbc93b43298fdfbe71e50b57b1e0162c01a2df0ea6, 420ebc4a000000000000000000000000420ebc4a0000000000000000000000000e1801000000000000000000420ebc4a
where the first set of columns before the '|' describe the object:
COLUMN DESCRIPTION
======= ===============================================================
OBJECT Object debugging ID (appears as OBJ%x in some debug messages)
PARENT Debugging ID of parent object
STAT Object state
CHLDN Number of child objects of this object
OPS Number of outstanding operations on this object
OOP Number of outstanding child object management operations
IPR
EX Number of outstanding exclusive operations
READS Number of outstanding read operations
EM Object's event mask
EV Events raised on this object
F Object flags
S Object slow-work work item flags
and the second set of columns describe the object's cookie, if present:
COLUMN DESCRIPTION
=============== =======================================================
NETFS_COOKIE_DEF Name of netfs cookie definition
TY Cookie type (IX - index, DT - data, hex - special)
FL Cookie flags
NETFS_DATA Netfs private data stored in the cookie
OBJECT_KEY Object key } 1 column, with separating comma
AUX_DATA Object aux data } presence may be configured
The data shown may be filtered by attaching the a key to an appropriate keyring
before viewing the file. Something like:
keyctl add user fscache:objlist <restrictions> @s
where <restrictions> are a selection of the following letters:
K Show hexdump of object key (don't show if not given)
A Show hexdump of object aux data (don't show if not given)
and the following paired letters:
C Show objects that have a cookie
c Show objects that don't have a cookie
B Show objects that are busy
b Show objects that aren't busy
W Show objects that have pending writes
w Show objects that don't have pending writes
R Show objects that have outstanding reads
r Show objects that don't have outstanding reads
S Show objects that have slow work queued
s Show objects that don't have slow work queued
If neither side of a letter pair is given, then both are implied. For example:
keyctl add user fscache:objlist KB @s
shows objects that are busy, and lists their object keys, but does not dump
their auxiliary data. It also implies "CcWwRrSs", but as 'B' is given, 'b' is
not implied.
By default all objects and all fields will be shown.
========= =========
DEBUGGING DEBUGGING
========= =========
......
...@@ -641,7 +641,7 @@ data file must be retired (see the relinquish cookie function below). ...@@ -641,7 +641,7 @@ data file must be retired (see the relinquish cookie function below).
Furthermore, note that this does not cancel the asynchronous read or write Furthermore, note that this does not cancel the asynchronous read or write
operation started by the read/alloc and write functions, so the page operation started by the read/alloc and write functions, so the page
invalidation and release functions must use: invalidation functions must use:
bool fscache_check_page_write(struct fscache_cookie *cookie, bool fscache_check_page_write(struct fscache_cookie *cookie,
struct page *page); struct page *page);
...@@ -654,6 +654,25 @@ to see if a page is being written to the cache, and: ...@@ -654,6 +654,25 @@ to see if a page is being written to the cache, and:
to wait for it to finish if it is. to wait for it to finish if it is.
When releasepage() is being implemented, a special FS-Cache function exists to
manage the heuristics of coping with vmscan trying to eject pages, which may
conflict with the cache trying to write pages to the cache (which may itself
need to allocate memory):
bool fscache_maybe_release_page(struct fscache_cookie *cookie,
struct page *page,
gfp_t gfp);
This takes the netfs cookie, and the page and gfp arguments as supplied to
releasepage(). It will return false if the page cannot be released yet for
some reason and if it returns true, the page has been uncached and can now be
released.
To make a page available for release, this function may wait for an outstanding
storage request to complete, or it may attempt to cancel the storage request -
in which case the page will not be stored in the cache this time.
========================== ==========================
INDEX AND DATA FILE UPDATE INDEX AND DATA FILE UPDATE
========================== ==========================
......
...@@ -41,6 +41,13 @@ expand files, provided the time taken to do so isn't too long. ...@@ -41,6 +41,13 @@ expand files, provided the time taken to do so isn't too long.
Operations of both types may sleep during execution, thus tying up the thread Operations of both types may sleep during execution, thus tying up the thread
loaned to it. loaned to it.
A further class of work item is available, based on the slow work item class:
(*) Delayed slow work items.
These are slow work items that have a timer to defer queueing of the item for
a while.
THREAD-TO-CLASS ALLOCATION THREAD-TO-CLASS ALLOCATION
-------------------------- --------------------------
...@@ -64,9 +71,11 @@ USING SLOW WORK ITEMS ...@@ -64,9 +71,11 @@ USING SLOW WORK ITEMS
Firstly, a module or subsystem wanting to make use of slow work items must Firstly, a module or subsystem wanting to make use of slow work items must
register its interest: register its interest:
int ret = slow_work_register_user(); int ret = slow_work_register_user(struct module *module);
This will return 0 if successful, or a -ve error upon failure. This will return 0 if successful, or a -ve error upon failure. The module
pointer should be the module interested in using this facility (almost
certainly THIS_MODULE).
Slow work items may then be set up by: Slow work items may then be set up by:
...@@ -91,6 +100,10 @@ Slow work items may then be set up by: ...@@ -91,6 +100,10 @@ Slow work items may then be set up by:
slow_work_init(&myitem, &myitem_ops); slow_work_init(&myitem, &myitem_ops);
or:
delayed_slow_work_init(&myitem, &myitem_ops);
or: or:
vslow_work_init(&myitem, &myitem_ops); vslow_work_init(&myitem, &myitem_ops);
...@@ -102,15 +115,92 @@ A suitably set up work item can then be enqueued for processing: ...@@ -102,15 +115,92 @@ A suitably set up work item can then be enqueued for processing:
int ret = slow_work_enqueue(&myitem); int ret = slow_work_enqueue(&myitem);
This will return a -ve error if the thread pool is unable to gain a reference This will return a -ve error if the thread pool is unable to gain a reference
on the item, 0 otherwise. on the item, 0 otherwise, or (for delayed work):
int ret = delayed_slow_work_enqueue(&myitem, my_jiffy_delay);
The items are reference counted, so there ought to be no need for a flush The items are reference counted, so there ought to be no need for a flush
operation. When all a module's slow work items have been processed, and the operation. But as the reference counting is optional, means to cancel
existing work items are also included:
cancel_slow_work(&myitem);
cancel_delayed_slow_work(&myitem);
can be used to cancel pending work. The above cancel function waits for
existing work to have been executed (or prevent execution of them, depending
on timing).
When all a module's slow work items have been processed, and the
module has no further interest in the facility, it should unregister its module has no further interest in the facility, it should unregister its
interest: interest:
slow_work_unregister_user(); slow_work_unregister_user(struct module *module);
The module pointer is used to wait for all outstanding work items for that
module before completing the unregistration. This prevents the put_ref() code
from being taken away before it completes. module should almost certainly be
THIS_MODULE.
================
HELPER FUNCTIONS
================
The slow-work facility provides a function by which it can be determined
whether or not an item is queued for later execution:
bool queued = slow_work_is_queued(struct slow_work *work);
If it returns false, then the item is not on the queue (it may be executing
with a requeue pending). This can be used to work out whether an item on which
another depends is on the queue, thus allowing a dependent item to be queued
after it.
If the above shows an item on which another depends not to be queued, then the
owner of the dependent item might need to wait. However, to avoid locking up
the threads unnecessarily be sleeping in them, it can make sense under some
circumstances to return the work item to the queue, thus deferring it until
some other items have had a chance to make use of the yielded thread.
To yield a thread and defer an item, the work function should simply enqueue
the work item again and return. However, this doesn't work if there's nothing
actually on the queue, as the thread just vacated will jump straight back into
the item's work function, thus busy waiting on a CPU.
Instead, the item should use the thread to wait for the dependency to go away,
but rather than using schedule() or schedule_timeout() to sleep, it should use
the following function:
bool requeue = slow_work_sleep_till_thread_needed(
struct slow_work *work,
signed long *_timeout);
This will add a second wait and then sleep, such that it will be woken up if
either something appears on the queue that could usefully make use of the
thread - and behind which this item can be queued, or if the event the caller
set up to wait for happens. True will be returned if something else appeared
on the queue and this work function should perhaps return, of false if
something else woke it up. The timeout is as for schedule_timeout().
For example:
wq = bit_waitqueue(&my_flags, MY_BIT);
init_wait(&wait);
requeue = false;
do {
prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
if (!test_bit(MY_BIT, &my_flags))
break;
requeue = slow_work_sleep_till_thread_needed(&my_work,
&timeout);
} while (timeout > 0 && !requeue);
finish_wait(wq, &wait);
if (!test_bit(MY_BIT, &my_flags)
goto do_my_thing;
if (requeue)
return; // to slow_work
=============== ===============
...@@ -118,7 +208,8 @@ ITEM OPERATIONS ...@@ -118,7 +208,8 @@ ITEM OPERATIONS
=============== ===============
Each work item requires a table of operations of type struct slow_work_ops. Each work item requires a table of operations of type struct slow_work_ops.
All members are required: Only ->execute() is required; the getting and putting of a reference and the
describing of an item are all optional.
(*) Get a reference on an item: (*) Get a reference on an item:
...@@ -148,6 +239,16 @@ All members are required: ...@@ -148,6 +239,16 @@ All members are required:
This should perform the work required of the item. It may sleep, it may This should perform the work required of the item. It may sleep, it may
perform disk I/O and it may wait for locks. perform disk I/O and it may wait for locks.
(*) View an item through /proc:
void (*desc)(struct slow_work *work, struct seq_file *m);
If supplied, this should print to 'm' a small string describing the work
the item is to do. This should be no more than about 40 characters, and
shouldn't include a newline character.
See the 'Viewing executing and queued items' section below.
================== ==================
POOL CONFIGURATION POOL CONFIGURATION
...@@ -172,3 +273,50 @@ The slow-work thread pool has a number of configurables: ...@@ -172,3 +273,50 @@ The slow-work thread pool has a number of configurables:
is bounded to between 1 and one fewer than the number of active threads. is bounded to between 1 and one fewer than the number of active threads.
This ensures there is always at least one thread that can process very This ensures there is always at least one thread that can process very
slow work items, and always at least one thread that won't. slow work items, and always at least one thread that won't.
==================================
VIEWING EXECUTING AND QUEUED ITEMS
==================================
If CONFIG_SLOW_WORK_PROC is enabled, a proc file is made available:
/proc/slow_work_rq
through which the list of work items being executed and the queues of items to
be executed may be viewed. The owner of a work item is given the chance to
add some information of its own.
The contents look something like the following:
THR PID ITEM ADDR FL MARK DESC
=== ===== ================ == ===== ==========
0 3005 ffff880023f52348 a 952ms FSC: OBJ17d3: LOOK
1 3006 ffff880024e33668 2 160ms FSC: OBJ17e5 OP60d3b: Write1/Store fl=2
2 3165 ffff8800296dd180 a 424ms FSC: OBJ17e4: LOOK
3 4089 ffff8800262c8d78 a 212ms FSC: OBJ17ea: CRTN
4 4090 ffff88002792bed8 2 388ms FSC: OBJ17e8 OP60d36: Write1/Store fl=2
5 4092 ffff88002a0ef308 2 388ms FSC: OBJ17e7 OP60d2e: Write1/Store fl=2
6 4094 ffff88002abaf4b8 2 132ms FSC: OBJ17e2 OP60d4e: Write1/Store fl=2
7 4095 ffff88002bb188e0 a 388ms FSC: OBJ17e9: CRTN
vsq - ffff880023d99668 1 308ms FSC: OBJ17e0 OP60f91: Write1/EnQ fl=2
vsq - ffff8800295d1740 1 212ms FSC: OBJ16be OP4d4b6: Write1/EnQ fl=2
vsq - ffff880025ba3308 1 160ms FSC: OBJ179a OP58dec: Write1/EnQ fl=2
vsq - ffff880024ec83e0 1 160ms FSC: OBJ17ae OP599f2: Write1/EnQ fl=2
vsq - ffff880026618e00 1 160ms FSC: OBJ17e6 OP60d33: Write1/EnQ fl=2
vsq - ffff880025a2a4b8 1 132ms FSC: OBJ16a2 OP4d583: Write1/EnQ fl=2
vsq - ffff880023cbe6d8 9 212ms FSC: OBJ17eb: LOOK
vsq - ffff880024d37590 9 212ms FSC: OBJ17ec: LOOK
vsq - ffff880027746cb0 9 212ms FSC: OBJ17ed: LOOK
vsq - ffff880024d37ae8 9 212ms FSC: OBJ17ee: LOOK
vsq - ffff880024d37cb0 9 212ms FSC: OBJ17ef: LOOK
vsq - ffff880025036550 9 212ms FSC: OBJ17f0: LOOK
vsq - ffff8800250368e0 9 212ms FSC: OBJ17f1: LOOK
vsq - ffff880025036aa8 9 212ms FSC: OBJ17f2: LOOK
In the 'THR' column, executing items show the thread they're occupying and
queued threads indicate which queue they're on. 'PID' shows the process ID of
a slow-work thread that's executing something. 'FL' shows the work item flags.
'MARK' indicates how long since an item was queued or began executing. Lastly,
the 'DESC' column permits the owner of an item to give some information.
...@@ -343,18 +343,7 @@ int __v9fs_fscache_release_page(struct page *page, gfp_t gfp) ...@@ -343,18 +343,7 @@ int __v9fs_fscache_release_page(struct page *page, gfp_t gfp)
BUG_ON(!vcookie->fscache); BUG_ON(!vcookie->fscache);
if (PageFsCache(page)) { return fscache_maybe_release_page(vnode->cache, page, gfp);
if (fscache_check_page_write(vcookie->fscache, page)) {
if (!(gfp & __GFP_WAIT))
return 0;
fscache_wait_on_page_write(vcookie->fscache, page);
}
fscache_uncache_page(vcookie->fscache, page);
ClearPageFsCache(page);
}
return 1;
} }
void __v9fs_fscache_invalidate_page(struct page *page) void __v9fs_fscache_invalidate_page(struct page *page)
...@@ -368,7 +357,6 @@ void __v9fs_fscache_invalidate_page(struct page *page) ...@@ -368,7 +357,6 @@ void __v9fs_fscache_invalidate_page(struct page *page)
fscache_wait_on_page_write(vcookie->fscache, page); fscache_wait_on_page_write(vcookie->fscache, page);
BUG_ON(!PageLocked(page)); BUG_ON(!PageLocked(page));
fscache_uncache_page(vcookie->fscache, page); fscache_uncache_page(vcookie->fscache, page);
ClearPageFsCache(page);
} }
} }
......
...@@ -315,7 +315,6 @@ static void afs_invalidatepage(struct page *page, unsigned long offset) ...@@ -315,7 +315,6 @@ static void afs_invalidatepage(struct page *page, unsigned long offset)
struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
fscache_wait_on_page_write(vnode->cache, page); fscache_wait_on_page_write(vnode->cache, page);
fscache_uncache_page(vnode->cache, page); fscache_uncache_page(vnode->cache, page);
ClearPageFsCache(page);
} }
#endif #endif
...@@ -349,18 +348,10 @@ static int afs_releasepage(struct page *page, gfp_t gfp_flags) ...@@ -349,18 +348,10 @@ static int afs_releasepage(struct page *page, gfp_t gfp_flags)
/* deny if page is being written to the cache and the caller hasn't /* deny if page is being written to the cache and the caller hasn't
* elected to wait */ * elected to wait */
#ifdef CONFIG_AFS_FSCACHE #ifdef CONFIG_AFS_FSCACHE
if (PageFsCache(page)) { if (!fscache_maybe_release_page(vnode->cache, page, gfp_flags)) {
if (fscache_check_page_write(vnode->cache, page)) {
if (!(gfp_flags & __GFP_WAIT)) {
_leave(" = F [cache busy]"); _leave(" = F [cache busy]");
return 0; return 0;
} }
fscache_wait_on_page_write(vnode->cache, page);
}
fscache_uncache_page(vnode->cache, page);
ClearPageFsCache(page);
}
#endif #endif
if (PagePrivate(page)) { if (PagePrivate(page)) {
......
...@@ -114,8 +114,9 @@ static struct fscache_object *cachefiles_alloc_object( ...@@ -114,8 +114,9 @@ static struct fscache_object *cachefiles_alloc_object(
/* /*
* attempt to look up the nominated node in this cache * attempt to look up the nominated node in this cache
* - return -ETIMEDOUT to be scheduled again
*/ */
static void cachefiles_lookup_object(struct fscache_object *_object) static int cachefiles_lookup_object(struct fscache_object *_object)
{ {
struct cachefiles_lookup_data *lookup_data; struct cachefiles_lookup_data *lookup_data;
struct cachefiles_object *parent, *object; struct cachefiles_object *parent, *object;
...@@ -145,13 +146,15 @@ static void cachefiles_lookup_object(struct fscache_object *_object) ...@@ -145,13 +146,15 @@ static void cachefiles_lookup_object(struct fscache_object *_object)
object->fscache.cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX) object->fscache.cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX)
cachefiles_attr_changed(&object->fscache); cachefiles_attr_changed(&object->fscache);
if (ret < 0) { if (ret < 0 && ret != -ETIMEDOUT) {
printk(KERN_WARNING "CacheFiles: Lookup failed error %d\n", if (ret != -ENOBUFS)
ret); printk(KERN_WARNING
"CacheFiles: Lookup failed error %d\n", ret);
fscache_object_lookup_error(&object->fscache); fscache_object_lookup_error(&object->fscache);
} }
_leave(" [%d]", ret); _leave(" [%d]", ret);
return ret;
} }
/* /*
...@@ -331,6 +334,7 @@ static void cachefiles_put_object(struct fscache_object *_object) ...@@ -331,6 +334,7 @@ static void cachefiles_put_object(struct fscache_object *_object)
} }
cache = object->fscache.cache; cache = object->fscache.cache;
fscache_object_destroy(&object->fscache);
kmem_cache_free(cachefiles_object_jar, object); kmem_cache_free(cachefiles_object_jar, object);
fscache_object_destroyed(cache); fscache_object_destroyed(cache);
} }
...@@ -403,12 +407,26 @@ static int cachefiles_attr_changed(struct fscache_object *_object) ...@@ -403,12 +407,26 @@ static int cachefiles_attr_changed(struct fscache_object *_object)
if (oi_size == ni_size) if (oi_size == ni_size)
return 0; return 0;
newattrs.ia_size = ni_size;
newattrs.ia_valid = ATTR_SIZE;
cachefiles_begin_secure(cache, &saved_cred); cachefiles_begin_secure(cache, &saved_cred);
mutex_lock(&object->backer->d_inode->i_mutex); mutex_lock(&object->backer->d_inode->i_mutex);
/* if there's an extension to a partial page at the end of the backing
* file, we need to discard the partial page so that we pick up new
* data after it */
if (oi_size & ~PAGE_MASK && ni_size > oi_size) {
_debug("discard tail %llx", oi_size);
newattrs.ia_valid = ATTR_SIZE;
newattrs.ia_size = oi_size & PAGE_MASK;
ret = notify_change(object->backer, &newattrs); ret = notify_change(object->backer, &newattrs);
if (ret < 0)
goto truncate_failed;
}
newattrs.ia_valid = ATTR_SIZE;
newattrs.ia_size = ni_size;
ret = notify_change(object->backer, &newattrs);
truncate_failed:
mutex_unlock(&object->backer->d_inode->i_mutex); mutex_unlock(&object->backer->d_inode->i_mutex);
cachefiles_end_secure(cache, saved_cred); cachefiles_end_secure(cache, saved_cred);
......
...@@ -21,16 +21,80 @@ ...@@ -21,16 +21,80 @@
#include <linux/security.h> #include <linux/security.h>
#include "internal.h" #include "internal.h"
static int cachefiles_wait_bit(void *flags) #define CACHEFILES_KEYBUF_SIZE 512
/*
* dump debugging info about an object
*/
static noinline
void __cachefiles_printk_object(struct cachefiles_object *object,
const char *prefix,
u8 *keybuf)
{ {
schedule(); struct fscache_cookie *cookie;
return 0; unsigned keylen, loop;
printk(KERN_ERR "%sobject: OBJ%x\n",
prefix, object->fscache.debug_id);
printk(KERN_ERR "%sobjstate=%s fl=%lx swfl=%lx ev=%lx[%lx]\n",
prefix, fscache_object_states[object->fscache.state],
object->fscache.flags, object->fscache.work.flags,
object->fscache.events,
object->fscache.event_mask & FSCACHE_OBJECT_EVENTS_MASK);
printk(KERN_ERR "%sops=%u inp=%u exc=%u\n",
prefix, object->fscache.n_ops, object->fscache.n_in_progress,
object->fscache.n_exclusive);
printk(KERN_ERR "%sparent=%p\n",
prefix, object->fscache.parent);
spin_lock(&object->fscache.lock);
cookie = object->fscache.cookie;
if (cookie) {
printk(KERN_ERR "%scookie=%p [pr=%p nd=%p fl=%lx]\n",
prefix,
object->fscache.cookie,
object->fscache.cookie->parent,
object->fscache.cookie->netfs_data,
object->fscache.cookie->flags);
if (keybuf)
keylen = cookie->def->get_key(cookie->netfs_data, keybuf,
CACHEFILES_KEYBUF_SIZE);
else
keylen = 0;
} else {
printk(KERN_ERR "%scookie=NULL\n", prefix);
keylen = 0;
}
spin_unlock(&object->fscache.lock);
if (keylen) {
printk(KERN_ERR "%skey=[%u] '", prefix, keylen);
for (loop = 0; loop < keylen; loop++)
printk("%02x", keybuf[loop]);
printk("'\n");
}
}
/*
* dump debugging info about a pair of objects
*/
static noinline void cachefiles_printk_object(struct cachefiles_object *object,
struct cachefiles_object *xobject)
{
u8 *keybuf;
keybuf = kmalloc(CACHEFILES_KEYBUF_SIZE, GFP_NOIO);
if (object)
__cachefiles_printk_object(object, "", keybuf);
if (xobject)
__cachefiles_printk_object(xobject, "x", keybuf);
kfree(keybuf);
} }
/* /*
* record the fact that an object is now active * record the fact that an object is now active
*/ */
static void cachefiles_mark_object_active(struct cachefiles_cache *cache, static int cachefiles_mark_object_active(struct cachefiles_cache *cache,
struct cachefiles_object *object) struct cachefiles_object *object)
{ {
struct cachefiles_object *xobject; struct cachefiles_object *xobject;
...@@ -42,8 +106,11 @@ static void cachefiles_mark_object_active(struct cachefiles_cache *cache, ...@@ -42,8 +106,11 @@ static void cachefiles_mark_object_active(struct cachefiles_cache *cache,
try_again: try_again:
write_lock(&cache->active_lock); write_lock(&cache->active_lock);
if (test_and_set_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags)) if (test_and_set_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags)) {
printk(KERN_ERR "CacheFiles: Error: Object already active\n");
cachefiles_printk_object(object, NULL);
BUG(); BUG();
}
dentry = object->dentry; dentry = object->dentry;
_p = &cache->active_nodes.rb_node; _p = &cache->active_nodes.rb_node;
...@@ -66,8 +133,8 @@ static void cachefiles_mark_object_active(struct cachefiles_cache *cache, ...@@ -66,8 +133,8 @@ static void cachefiles_mark_object_active(struct cachefiles_cache *cache,
rb_insert_color(&object->active_node, &cache->active_nodes); rb_insert_color(&object->active_node, &cache->active_nodes);
write_unlock(&cache->active_lock); write_unlock(&cache->active_lock);
_leave(""); _leave(" = 0");
return; return 0;
/* an old object from a previous incarnation is hogging the slot - we /* an old object from a previous incarnation is hogging the slot - we
* need to wait for it to be destroyed */ * need to wait for it to be destroyed */
...@@ -76,44 +143,70 @@ static void cachefiles_mark_object_active(struct cachefiles_cache *cache, ...@@ -76,44 +143,70 @@ static void cachefiles_mark_object_active(struct cachefiles_cache *cache,
printk(KERN_ERR "\n"); printk(KERN_ERR "\n");
printk(KERN_ERR "CacheFiles: Error:" printk(KERN_ERR "CacheFiles: Error:"
" Unexpected object collision\n"); " Unexpected object collision\n");
printk(KERN_ERR "xobject: OBJ%x\n", cachefiles_printk_object(object, xobject);
xobject->fscache.debug_id);
printk(KERN_ERR "xobjstate=%s\n",
fscache_object_states[xobject->fscache.state]);
printk(KERN_ERR "xobjflags=%lx\n", xobject->fscache.flags);
printk(KERN_ERR "xobjevent=%lx [%lx]\n",
xobject->fscache.events, xobject->fscache.event_mask);
printk(KERN_ERR "xops=%u inp=%u exc=%u\n",
xobject->fscache.n_ops, xobject->fscache.n_in_progress,
xobject->fscache.n_exclusive);
printk(KERN_ERR "xcookie=%p [pr=%p nd=%p fl=%lx]\n",
xobject->fscache.cookie,
xobject->fscache.cookie->parent,
xobject->fscache.cookie->netfs_data,
xobject->fscache.cookie->flags);
printk(KERN_ERR "xparent=%p\n",
xobject->fscache.parent);
printk(KERN_ERR "object: OBJ%x\n",
object->fscache.debug_id);
printk(KERN_ERR "cookie=%p [pr=%p nd=%p fl=%lx]\n",
object->fscache.cookie,
object->fscache.cookie->parent,
object->fscache.cookie->netfs_data,
object->fscache.cookie->flags);
printk(KERN_ERR "parent=%p\n",
object->fscache.parent);
BUG(); BUG();
} }
atomic_inc(&xobject->usage); atomic_inc(&xobject->usage);
write_unlock(&cache->active_lock); write_unlock(&cache->active_lock);
_debug(">>> wait"); if (test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) {
wait_on_bit(&xobject->flags, CACHEFILES_OBJECT_ACTIVE, wait_queue_head_t *wq;
cachefiles_wait_bit, TASK_UNINTERRUPTIBLE);
_debug("<<< waited"); signed long timeout = 60 * HZ;
wait_queue_t wait;
bool requeue;
/* if the object we're waiting for is queued for processing,
* then just put ourselves on the queue behind it */
if (slow_work_is_queued(&xobject->fscache.work)) {
_debug("queue OBJ%x behind OBJ%x immediately",
object->fscache.debug_id,
xobject->fscache.debug_id);
goto requeue;
}
/* otherwise we sleep until either the object we're waiting for
* is done, or the slow-work facility wants the thread back to
* do other work */
wq = bit_waitqueue(&xobject->flags, CACHEFILES_OBJECT_ACTIVE);
init_wait(&wait);
requeue = false;
do {
prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
if (!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags))
break;
requeue = slow_work_sleep_till_thread_needed(
&object->fscache.work, &timeout);
} while (timeout > 0 && !requeue);
finish_wait(wq, &wait);
if (requeue &&
test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) {
_debug("queue OBJ%x behind OBJ%x after wait",
object->fscache.debug_id,
xobject->fscache.debug_id);
goto requeue;
}
if (timeout <= 0) {
printk(KERN_ERR "\n");
printk(KERN_ERR "CacheFiles: Error: Overlong"
" wait for old active object to go away\n");
cachefiles_printk_object(object, xobject);
goto requeue;
}
}
ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags));
cache->cache.ops->put_object(&xobject->fscache); cache->cache.ops->put_object(&xobject->fscache);
goto try_again; goto try_again;
requeue:
clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
cache->cache.ops->put_object(&xobject->fscache);
_leave(" = -ETIMEDOUT");
return -ETIMEDOUT;
} }
/* /*
...@@ -254,7 +347,7 @@ int cachefiles_delete_object(struct cachefiles_cache *cache, ...@@ -254,7 +347,7 @@ int cachefiles_delete_object(struct cachefiles_cache *cache,
dir = dget_parent(object->dentry); dir = dget_parent(object->dentry);
mutex_lock(&dir->d_inode->i_mutex); mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
ret = cachefiles_bury_object(cache, dir, object->dentry); ret = cachefiles_bury_object(cache, dir, object->dentry);
dput(dir); dput(dir);
...@@ -307,7 +400,7 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent, ...@@ -307,7 +400,7 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent,
/* search the current directory for the element name */ /* search the current directory for the element name */
_debug("lookup '%s'", name); _debug("lookup '%s'", name);
mutex_lock(&dir->d_inode->i_mutex); mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
start = jiffies; start = jiffies;
next = lookup_one_len(name, dir, nlen); next = lookup_one_len(name, dir, nlen);
...@@ -418,12 +511,15 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent, ...@@ -418,12 +511,15 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent,
} }
/* note that we're now using this object */ /* note that we're now using this object */
cachefiles_mark_object_active(cache, object); ret = cachefiles_mark_object_active(cache, object);
mutex_unlock(&dir->d_inode->i_mutex); mutex_unlock(&dir->d_inode->i_mutex);
dput(dir); dput(dir);
dir = NULL; dir = NULL;
if (ret == -ETIMEDOUT)
goto mark_active_timed_out;
_debug("=== OBTAINED_OBJECT ==="); _debug("=== OBTAINED_OBJECT ===");
if (object->new) { if (object->new) {
...@@ -467,6 +563,10 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent, ...@@ -467,6 +563,10 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent,
cachefiles_io_error(cache, "Create/mkdir failed"); cachefiles_io_error(cache, "Create/mkdir failed");
goto error; goto error;
mark_active_timed_out:
_debug("mark active timed out");
goto release_dentry;
check_error: check_error:
_debug("check error %d", ret); _debug("check error %d", ret);
write_lock(&cache->active_lock); write_lock(&cache->active_lock);
...@@ -474,7 +574,7 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent, ...@@ -474,7 +574,7 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent,
clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags); clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
wake_up_bit(&object->flags, CACHEFILES_OBJECT_ACTIVE); wake_up_bit(&object->flags, CACHEFILES_OBJECT_ACTIVE);
write_unlock(&cache->active_lock); write_unlock(&cache->active_lock);
release_dentry:
dput(object->dentry); dput(object->dentry);
object->dentry = NULL; object->dentry = NULL;
goto error_out; goto error_out;
...@@ -495,9 +595,6 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent, ...@@ -495,9 +595,6 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent,
error_out2: error_out2:
dput(dir); dput(dir);
error_out: error_out:
if (ret == -ENOSPC)
ret = -ENOBUFS;
_leave(" = error %d", -ret); _leave(" = error %d", -ret);
return ret; return ret;
} }
......
...@@ -40,8 +40,10 @@ static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode, ...@@ -40,8 +40,10 @@ static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
_debug("--- monitor %p %lx ---", page, page->flags); _debug("--- monitor %p %lx ---", page, page->flags);
if (!PageUptodate(page) && !PageError(page)) if (!PageUptodate(page) && !PageError(page)) {
dump_stack(); /* unlocked, not uptodate and not erronous? */
_debug("page probably truncated");
}
/* remove from the waitqueue */ /* remove from the waitqueue */
list_del(&wait->task_list); list_del(&wait->task_list);
...@@ -60,6 +62,84 @@ static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode, ...@@ -60,6 +62,84 @@ static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
return 0; return 0;
} }
/*
* handle a probably truncated page
* - check to see if the page is still relevant and reissue the read if
* possible
* - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we
* must wait again and 0 if successful
*/
static int cachefiles_read_reissue(struct cachefiles_object *object,
struct cachefiles_one_read *monitor)
{
struct address_space *bmapping = object->backer->d_inode->i_mapping;
struct page *backpage = monitor->back_page, *backpage2;
int ret;
kenter("{ino=%lx},{%lx,%lx}",
object->backer->d_inode->i_ino,
backpage->index, backpage->flags);
/* skip if the page was truncated away completely */
if (backpage->mapping != bmapping) {
kleave(" = -ENODATA [mapping]");
return -ENODATA;
}
backpage2 = find_get_page(bmapping, backpage->index);
if (!backpage2) {
kleave(" = -ENODATA [gone]");
return -ENODATA;
}
if (backpage != backpage2) {
put_page(backpage2);
kleave(" = -ENODATA [different]");
return -ENODATA;
}
/* the page is still there and we already have a ref on it, so we don't
* need a second */
put_page(backpage2);
INIT_LIST_HEAD(&monitor->op_link);
add_page_wait_queue(backpage, &monitor->monitor);
if (trylock_page(backpage)) {
ret = -EIO;
if (PageError(backpage))
goto unlock_discard;
ret = 0;
if (PageUptodate(backpage))
goto unlock_discard;
kdebug("reissue read");
ret = bmapping->a_ops->readpage(NULL, backpage);
if (ret < 0)
goto unlock_discard;
}
/* but the page may have been read before the monitor was installed, so
* the monitor may miss the event - so we have to ensure that we do get
* one in such a case */
if (trylock_page(backpage)) {
_debug("jumpstart %p {%lx}", backpage, backpage->flags);
unlock_page(backpage);
}
/* it'll reappear on the todo list */
kleave(" = -EINPROGRESS");
return -EINPROGRESS;
unlock_discard:
unlock_page(backpage);
spin_lock_irq(&object->work_lock);
list_del(&monitor->op_link);
spin_unlock_irq(&object->work_lock);
kleave(" = %d", ret);
return ret;
}
/* /*
* copy data from backing pages to netfs pages to complete a read operation * copy data from backing pages to netfs pages to complete a read operation
* - driven by FS-Cache's thread pool * - driven by FS-Cache's thread pool
...@@ -92,20 +172,26 @@ static void cachefiles_read_copier(struct fscache_operation *_op) ...@@ -92,20 +172,26 @@ static void cachefiles_read_copier(struct fscache_operation *_op)
_debug("- copy {%lu}", monitor->back_page->index); _debug("- copy {%lu}", monitor->back_page->index);
error = -EIO; recheck:
if (PageUptodate(monitor->back_page)) { if (PageUptodate(monitor->back_page)) {
copy_highpage(monitor->netfs_page, monitor->back_page); copy_highpage(monitor->netfs_page, monitor->back_page);
pagevec_add(&pagevec, monitor->netfs_page); pagevec_add(&pagevec, monitor->netfs_page);
fscache_mark_pages_cached(monitor->op, &pagevec); fscache_mark_pages_cached(monitor->op, &pagevec);
error = 0; error = 0;
} } else if (!PageError(monitor->back_page)) {
/* the page has probably been truncated */
if (error) error = cachefiles_read_reissue(object, monitor);
if (error == -EINPROGRESS)
goto next;
goto recheck;
} else {
cachefiles_io_error_obj( cachefiles_io_error_obj(
object, object,
"Readpage failed on backing file %lx", "Readpage failed on backing file %lx",
(unsigned long) monitor->back_page->flags); (unsigned long) monitor->back_page->flags);
error = -EIO;
}
page_cache_release(monitor->back_page); page_cache_release(monitor->back_page);
...@@ -114,6 +200,7 @@ static void cachefiles_read_copier(struct fscache_operation *_op) ...@@ -114,6 +200,7 @@ static void cachefiles_read_copier(struct fscache_operation *_op)
fscache_put_retrieval(op); fscache_put_retrieval(op);
kfree(monitor); kfree(monitor);
next:
/* let the thread pool have some air occasionally */ /* let the thread pool have some air occasionally */
max--; max--;
if (max < 0 || need_resched()) { if (max < 0 || need_resched()) {
...@@ -333,7 +420,8 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op, ...@@ -333,7 +420,8 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits; shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
op->op.flags = FSCACHE_OP_FAST; op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
op->op.flags |= FSCACHE_OP_FAST;
op->op.processor = cachefiles_read_copier; op->op.processor = cachefiles_read_copier;
pagevec_init(&pagevec, 0); pagevec_init(&pagevec, 0);
...@@ -639,7 +727,8 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op, ...@@ -639,7 +727,8 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
pagevec_init(&pagevec, 0); pagevec_init(&pagevec, 0);
op->op.flags = FSCACHE_OP_FAST; op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
op->op.flags |= FSCACHE_OP_FAST;
op->op.processor = cachefiles_read_copier; op->op.processor = cachefiles_read_copier;
INIT_LIST_HEAD(&backpages); INIT_LIST_HEAD(&backpages);
...@@ -801,7 +890,8 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page) ...@@ -801,7 +890,8 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
struct cachefiles_cache *cache; struct cachefiles_cache *cache;
mm_segment_t old_fs; mm_segment_t old_fs;
struct file *file; struct file *file;
loff_t pos; loff_t pos, eof;
size_t len;
void *data; void *data;
int ret; int ret;
...@@ -835,15 +925,29 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page) ...@@ -835,15 +925,29 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
ret = -EIO; ret = -EIO;
if (file->f_op->write) { if (file->f_op->write) {
pos = (loff_t) page->index << PAGE_SHIFT; pos = (loff_t) page->index << PAGE_SHIFT;
/* we mustn't write more data than we have, so we have
* to beware of a partial page at EOF */
eof = object->fscache.store_limit_l;
len = PAGE_SIZE;
if (eof & ~PAGE_MASK) {
ASSERTCMP(pos, <, eof);
if (eof - pos < PAGE_SIZE) {
_debug("cut short %llx to %llx",
pos, eof);
len = eof - pos;
ASSERTCMP(pos + len, ==, eof);
}
}
data = kmap(page); data = kmap(page);
old_fs = get_fs(); old_fs = get_fs();
set_fs(KERNEL_DS); set_fs(KERNEL_DS);
ret = file->f_op->write( ret = file->f_op->write(
file, (const void __user *) data, PAGE_SIZE, file, (const void __user *) data, len, &pos);
&pos);
set_fs(old_fs); set_fs(old_fs);
kunmap(page); kunmap(page);
if (ret != PAGE_SIZE) if (ret != len)
ret = -EIO; ret = -EIO;
} }
fput(file); fput(file);
......
...@@ -1037,7 +1037,7 @@ init_cifs(void) ...@@ -1037,7 +1037,7 @@ init_cifs(void)
if (rc) if (rc)
goto out_unregister_key_type; goto out_unregister_key_type;
#endif #endif
rc = slow_work_register_user(); rc = slow_work_register_user(THIS_MODULE);
if (rc) if (rc)
goto out_unregister_resolver_key; goto out_unregister_resolver_key;
......
...@@ -54,3 +54,10 @@ config FSCACHE_DEBUG ...@@ -54,3 +54,10 @@ config FSCACHE_DEBUG
enabled by setting bits in /sys/modules/fscache/parameter/debug. enabled by setting bits in /sys/modules/fscache/parameter/debug.
See Documentation/filesystems/caching/fscache.txt for more information. See Documentation/filesystems/caching/fscache.txt for more information.
config FSCACHE_OBJECT_LIST
bool "Maintain global object list for debugging purposes"
depends on FSCACHE && PROC_FS
help
Maintain a global list of active fscache objects that can be
retrieved through /proc/fs/fscache/objects for debugging purposes
...@@ -15,5 +15,6 @@ fscache-y := \ ...@@ -15,5 +15,6 @@ fscache-y := \
fscache-$(CONFIG_PROC_FS) += proc.o fscache-$(CONFIG_PROC_FS) += proc.o
fscache-$(CONFIG_FSCACHE_STATS) += stats.o fscache-$(CONFIG_FSCACHE_STATS) += stats.o
fscache-$(CONFIG_FSCACHE_HISTOGRAM) += histogram.o fscache-$(CONFIG_FSCACHE_HISTOGRAM) += histogram.o
fscache-$(CONFIG_FSCACHE_OBJECT_LIST) += object-list.o
obj-$(CONFIG_FSCACHE) := fscache.o obj-$(CONFIG_FSCACHE) := fscache.o
...@@ -263,6 +263,7 @@ int fscache_add_cache(struct fscache_cache *cache, ...@@ -263,6 +263,7 @@ int fscache_add_cache(struct fscache_cache *cache,
spin_lock(&cache->object_list_lock); spin_lock(&cache->object_list_lock);
list_add_tail(&ifsdef->cache_link, &cache->object_list); list_add_tail(&ifsdef->cache_link, &cache->object_list);
spin_unlock(&cache->object_list_lock); spin_unlock(&cache->object_list_lock);
fscache_objlist_add(ifsdef);
/* add the cache's netfs definition index object to the top level index /* add the cache's netfs definition index object to the top level index
* cookie as a known backing object */ * cookie as a known backing object */
...@@ -380,11 +381,15 @@ void fscache_withdraw_cache(struct fscache_cache *cache) ...@@ -380,11 +381,15 @@ void fscache_withdraw_cache(struct fscache_cache *cache)
/* make sure all pages pinned by operations on behalf of the netfs are /* make sure all pages pinned by operations on behalf of the netfs are
* written to disk */ * written to disk */
fscache_stat(&fscache_n_cop_sync_cache);
cache->ops->sync_cache(cache); cache->ops->sync_cache(cache);
fscache_stat_d(&fscache_n_cop_sync_cache);
/* dissociate all the netfs pages backed by this cache from the block /* dissociate all the netfs pages backed by this cache from the block
* mappings in the cache */ * mappings in the cache */
fscache_stat(&fscache_n_cop_dissociate_pages);
cache->ops->dissociate_pages(cache); cache->ops->dissociate_pages(cache);
fscache_stat_d(&fscache_n_cop_dissociate_pages);
/* we now have to destroy all the active objects pertaining to this /* we now have to destroy all the active objects pertaining to this
* cache - which we do by passing them off to thread pool to be * cache - which we do by passing them off to thread pool to be
......
...@@ -36,6 +36,7 @@ void fscache_cookie_init_once(void *_cookie) ...@@ -36,6 +36,7 @@ void fscache_cookie_init_once(void *_cookie)
memset(cookie, 0, sizeof(*cookie)); memset(cookie, 0, sizeof(*cookie));
spin_lock_init(&cookie->lock); spin_lock_init(&cookie->lock);
spin_lock_init(&cookie->stores_lock);
INIT_HLIST_HEAD(&cookie->backing_objects); INIT_HLIST_HEAD(&cookie->backing_objects);
} }
...@@ -102,7 +103,9 @@ struct fscache_cookie *__fscache_acquire_cookie( ...@@ -102,7 +103,9 @@ struct fscache_cookie *__fscache_acquire_cookie(
cookie->netfs_data = netfs_data; cookie->netfs_data = netfs_data;
cookie->flags = 0; cookie->flags = 0;
INIT_RADIX_TREE(&cookie->stores, GFP_NOFS); /* radix tree insertion won't use the preallocation pool unless it's
* told it may not wait */
INIT_RADIX_TREE(&cookie->stores, GFP_NOFS & ~__GFP_WAIT);
switch (cookie->def->type) { switch (cookie->def->type) {
case FSCACHE_COOKIE_TYPE_INDEX: case FSCACHE_COOKIE_TYPE_INDEX:
...@@ -249,7 +252,9 @@ static int fscache_alloc_object(struct fscache_cache *cache, ...@@ -249,7 +252,9 @@ static int fscache_alloc_object(struct fscache_cache *cache,
/* ask the cache to allocate an object (we may end up with duplicate /* ask the cache to allocate an object (we may end up with duplicate
* objects at this stage, but we sort that out later) */ * objects at this stage, but we sort that out later) */
fscache_stat(&fscache_n_cop_alloc_object);
object = cache->ops->alloc_object(cache, cookie); object = cache->ops->alloc_object(cache, cookie);
fscache_stat_d(&fscache_n_cop_alloc_object);
if (IS_ERR(object)) { if (IS_ERR(object)) {
fscache_stat(&fscache_n_object_no_alloc); fscache_stat(&fscache_n_object_no_alloc);
ret = PTR_ERR(object); ret = PTR_ERR(object);
...@@ -270,8 +275,11 @@ static int fscache_alloc_object(struct fscache_cache *cache, ...@@ -270,8 +275,11 @@ static int fscache_alloc_object(struct fscache_cache *cache,
/* only attach if we managed to allocate all we needed, otherwise /* only attach if we managed to allocate all we needed, otherwise
* discard the object we just allocated and instead use the one * discard the object we just allocated and instead use the one
* attached to the cookie */ * attached to the cookie */
if (fscache_attach_object(cookie, object) < 0) if (fscache_attach_object(cookie, object) < 0) {
fscache_stat(&fscache_n_cop_put_object);
cache->ops->put_object(object); cache->ops->put_object(object);
fscache_stat_d(&fscache_n_cop_put_object);
}
_leave(" = 0"); _leave(" = 0");
return 0; return 0;
...@@ -287,7 +295,9 @@ static int fscache_alloc_object(struct fscache_cache *cache, ...@@ -287,7 +295,9 @@ static int fscache_alloc_object(struct fscache_cache *cache,
return 0; return 0;
error_put: error_put:
fscache_stat(&fscache_n_cop_put_object);
cache->ops->put_object(object); cache->ops->put_object(object);
fscache_stat_d(&fscache_n_cop_put_object);
error: error:
_leave(" = %d", ret); _leave(" = %d", ret);
return ret; return ret;
...@@ -349,6 +359,8 @@ static int fscache_attach_object(struct fscache_cookie *cookie, ...@@ -349,6 +359,8 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
object->cookie = cookie; object->cookie = cookie;
atomic_inc(&cookie->usage); atomic_inc(&cookie->usage);
hlist_add_head(&object->cookie_link, &cookie->backing_objects); hlist_add_head(&object->cookie_link, &cookie->backing_objects);
fscache_objlist_add(object);
ret = 0; ret = 0;
cant_attach_object: cant_attach_object:
...@@ -403,6 +415,8 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire) ...@@ -403,6 +415,8 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
unsigned long event; unsigned long event;
fscache_stat(&fscache_n_relinquishes); fscache_stat(&fscache_n_relinquishes);
if (retire)
fscache_stat(&fscache_n_relinquishes_retire);
if (!cookie) { if (!cookie) {
fscache_stat(&fscache_n_relinquishes_null); fscache_stat(&fscache_n_relinquishes_null);
...@@ -428,12 +442,8 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire) ...@@ -428,12 +442,8 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
event = retire ? FSCACHE_OBJECT_EV_RETIRE : FSCACHE_OBJECT_EV_RELEASE; event = retire ? FSCACHE_OBJECT_EV_RETIRE : FSCACHE_OBJECT_EV_RELEASE;
/* detach pointers back to the netfs */
spin_lock(&cookie->lock); spin_lock(&cookie->lock);
cookie->netfs_data = NULL;
cookie->def = NULL;
/* break links with all the active objects */ /* break links with all the active objects */
while (!hlist_empty(&cookie->backing_objects)) { while (!hlist_empty(&cookie->backing_objects)) {
object = hlist_entry(cookie->backing_objects.first, object = hlist_entry(cookie->backing_objects.first,
...@@ -456,6 +466,10 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire) ...@@ -456,6 +466,10 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
BUG(); BUG();
} }
/* detach pointers back to the netfs */
cookie->netfs_data = NULL;
cookie->def = NULL;
spin_unlock(&cookie->lock); spin_unlock(&cookie->lock);
if (cookie->parent) { if (cookie->parent) {
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
* - cache->object_list_lock * - cache->object_list_lock
* - object->lock * - object->lock
* - object->parent->lock * - object->parent->lock
* - cookie->stores_lock
* - fscache_thread_lock * - fscache_thread_lock
* *
*/ */
...@@ -88,10 +89,23 @@ extern int fscache_wait_bit_interruptible(void *); ...@@ -88,10 +89,23 @@ extern int fscache_wait_bit_interruptible(void *);
/* /*
* object.c * object.c
*/ */
extern const char fscache_object_states_short[FSCACHE_OBJECT__NSTATES][5];
extern void fscache_withdrawing_object(struct fscache_cache *, extern void fscache_withdrawing_object(struct fscache_cache *,
struct fscache_object *); struct fscache_object *);
extern void fscache_enqueue_object(struct fscache_object *); extern void fscache_enqueue_object(struct fscache_object *);
/*
* object-list.c
*/
#ifdef CONFIG_FSCACHE_OBJECT_LIST
extern const struct file_operations fscache_objlist_fops;
extern void fscache_objlist_add(struct fscache_object *);
#else
#define fscache_objlist_add(object) do {} while(0)
#endif
/* /*
* operation.c * operation.c
*/ */
...@@ -99,6 +113,7 @@ extern int fscache_submit_exclusive_op(struct fscache_object *, ...@@ -99,6 +113,7 @@ extern int fscache_submit_exclusive_op(struct fscache_object *,
struct fscache_operation *); struct fscache_operation *);
extern int fscache_submit_op(struct fscache_object *, extern int fscache_submit_op(struct fscache_object *,
struct fscache_operation *); struct fscache_operation *);
extern int fscache_cancel_op(struct fscache_operation *);
extern void fscache_abort_object(struct fscache_object *); extern void fscache_abort_object(struct fscache_object *);
extern void fscache_start_operations(struct fscache_object *); extern void fscache_start_operations(struct fscache_object *);
extern void fscache_operation_gc(struct work_struct *); extern void fscache_operation_gc(struct work_struct *);
...@@ -127,6 +142,8 @@ extern atomic_t fscache_n_op_enqueue; ...@@ -127,6 +142,8 @@ extern atomic_t fscache_n_op_enqueue;
extern atomic_t fscache_n_op_deferred_release; extern atomic_t fscache_n_op_deferred_release;
extern atomic_t fscache_n_op_release; extern atomic_t fscache_n_op_release;
extern atomic_t fscache_n_op_gc; extern atomic_t fscache_n_op_gc;
extern atomic_t fscache_n_op_cancelled;
extern atomic_t fscache_n_op_rejected;
extern atomic_t fscache_n_attr_changed; extern atomic_t fscache_n_attr_changed;
extern atomic_t fscache_n_attr_changed_ok; extern atomic_t fscache_n_attr_changed_ok;
...@@ -138,6 +155,8 @@ extern atomic_t fscache_n_allocs; ...@@ -138,6 +155,8 @@ extern atomic_t fscache_n_allocs;
extern atomic_t fscache_n_allocs_ok; extern atomic_t fscache_n_allocs_ok;
extern atomic_t fscache_n_allocs_wait; extern atomic_t fscache_n_allocs_wait;
extern atomic_t fscache_n_allocs_nobufs; extern atomic_t fscache_n_allocs_nobufs;
extern atomic_t fscache_n_allocs_intr;
extern atomic_t fscache_n_allocs_object_dead;
extern atomic_t fscache_n_alloc_ops; extern atomic_t fscache_n_alloc_ops;
extern atomic_t fscache_n_alloc_op_waits; extern atomic_t fscache_n_alloc_op_waits;
...@@ -148,6 +167,7 @@ extern atomic_t fscache_n_retrievals_nodata; ...@@ -148,6 +167,7 @@ extern atomic_t fscache_n_retrievals_nodata;
extern atomic_t fscache_n_retrievals_nobufs; extern atomic_t fscache_n_retrievals_nobufs;
extern atomic_t fscache_n_retrievals_intr; extern atomic_t fscache_n_retrievals_intr;
extern atomic_t fscache_n_retrievals_nomem; extern atomic_t fscache_n_retrievals_nomem;
extern atomic_t fscache_n_retrievals_object_dead;
extern atomic_t fscache_n_retrieval_ops; extern atomic_t fscache_n_retrieval_ops;
extern atomic_t fscache_n_retrieval_op_waits; extern atomic_t fscache_n_retrieval_op_waits;
...@@ -158,6 +178,14 @@ extern atomic_t fscache_n_stores_nobufs; ...@@ -158,6 +178,14 @@ extern atomic_t fscache_n_stores_nobufs;
extern atomic_t fscache_n_stores_oom; extern atomic_t fscache_n_stores_oom;
extern atomic_t fscache_n_store_ops; extern atomic_t fscache_n_store_ops;
extern atomic_t fscache_n_store_calls; extern atomic_t fscache_n_store_calls;
extern atomic_t fscache_n_store_pages;
extern atomic_t fscache_n_store_radix_deletes;
extern atomic_t fscache_n_store_pages_over_limit;
extern atomic_t fscache_n_store_vmscan_not_storing;
extern atomic_t fscache_n_store_vmscan_gone;
extern atomic_t fscache_n_store_vmscan_busy;
extern atomic_t fscache_n_store_vmscan_cancelled;
extern atomic_t fscache_n_marks; extern atomic_t fscache_n_marks;
extern atomic_t fscache_n_uncaches; extern atomic_t fscache_n_uncaches;
...@@ -176,6 +204,7 @@ extern atomic_t fscache_n_updates_run; ...@@ -176,6 +204,7 @@ extern atomic_t fscache_n_updates_run;
extern atomic_t fscache_n_relinquishes; extern atomic_t fscache_n_relinquishes;
extern atomic_t fscache_n_relinquishes_null; extern atomic_t fscache_n_relinquishes_null;
extern atomic_t fscache_n_relinquishes_waitcrt; extern atomic_t fscache_n_relinquishes_waitcrt;
extern atomic_t fscache_n_relinquishes_retire;
extern atomic_t fscache_n_cookie_index; extern atomic_t fscache_n_cookie_index;
extern atomic_t fscache_n_cookie_data; extern atomic_t fscache_n_cookie_data;
...@@ -186,6 +215,7 @@ extern atomic_t fscache_n_object_no_alloc; ...@@ -186,6 +215,7 @@ extern atomic_t fscache_n_object_no_alloc;
extern atomic_t fscache_n_object_lookups; extern atomic_t fscache_n_object_lookups;
extern atomic_t fscache_n_object_lookups_negative; extern atomic_t fscache_n_object_lookups_negative;
extern atomic_t fscache_n_object_lookups_positive; extern atomic_t fscache_n_object_lookups_positive;
extern atomic_t fscache_n_object_lookups_timed_out;
extern atomic_t fscache_n_object_created; extern atomic_t fscache_n_object_created;
extern atomic_t fscache_n_object_avail; extern atomic_t fscache_n_object_avail;
extern atomic_t fscache_n_object_dead; extern atomic_t fscache_n_object_dead;
...@@ -195,15 +225,41 @@ extern atomic_t fscache_n_checkaux_okay; ...@@ -195,15 +225,41 @@ extern atomic_t fscache_n_checkaux_okay;
extern atomic_t fscache_n_checkaux_update; extern atomic_t fscache_n_checkaux_update;
extern atomic_t fscache_n_checkaux_obsolete; extern atomic_t fscache_n_checkaux_obsolete;
extern atomic_t fscache_n_cop_alloc_object;
extern atomic_t fscache_n_cop_lookup_object;
extern atomic_t fscache_n_cop_lookup_complete;
extern atomic_t fscache_n_cop_grab_object;
extern atomic_t fscache_n_cop_update_object;
extern atomic_t fscache_n_cop_drop_object;
extern atomic_t fscache_n_cop_put_object;
extern atomic_t fscache_n_cop_sync_cache;
extern atomic_t fscache_n_cop_attr_changed;
extern atomic_t fscache_n_cop_read_or_alloc_page;
extern atomic_t fscache_n_cop_read_or_alloc_pages;
extern atomic_t fscache_n_cop_allocate_page;
extern atomic_t fscache_n_cop_allocate_pages;
extern atomic_t fscache_n_cop_write_page;
extern atomic_t fscache_n_cop_uncache_page;
extern atomic_t fscache_n_cop_dissociate_pages;
static inline void fscache_stat(atomic_t *stat) static inline void fscache_stat(atomic_t *stat)
{ {
atomic_inc(stat); atomic_inc(stat);
} }
static inline void fscache_stat_d(atomic_t *stat)
{
atomic_dec(stat);
}
#define __fscache_stat(stat) (stat)
extern const struct file_operations fscache_stats_fops; extern const struct file_operations fscache_stats_fops;
#else #else
#define __fscache_stat(stat) (NULL)
#define fscache_stat(stat) do {} while (0) #define fscache_stat(stat) do {} while (0)
#define fscache_stat_d(stat) do {} while (0)
#endif #endif
/* /*
......
...@@ -48,7 +48,7 @@ static int __init fscache_init(void) ...@@ -48,7 +48,7 @@ static int __init fscache_init(void)
{ {
int ret; int ret;
ret = slow_work_register_user(); ret = slow_work_register_user(THIS_MODULE);
if (ret < 0) if (ret < 0)
goto error_slow_work; goto error_slow_work;
...@@ -80,7 +80,7 @@ static int __init fscache_init(void) ...@@ -80,7 +80,7 @@ static int __init fscache_init(void)
error_cookie_jar: error_cookie_jar:
fscache_proc_cleanup(); fscache_proc_cleanup();
error_proc: error_proc:
slow_work_unregister_user(); slow_work_unregister_user(THIS_MODULE);
error_slow_work: error_slow_work:
return ret; return ret;
} }
...@@ -97,7 +97,7 @@ static void __exit fscache_exit(void) ...@@ -97,7 +97,7 @@ static void __exit fscache_exit(void)
kobject_put(fscache_root); kobject_put(fscache_root);
kmem_cache_destroy(fscache_cookie_jar); kmem_cache_destroy(fscache_cookie_jar);
fscache_proc_cleanup(); fscache_proc_cleanup();
slow_work_unregister_user(); slow_work_unregister_user(THIS_MODULE);
printk(KERN_NOTICE "FS-Cache: Unloaded\n"); printk(KERN_NOTICE "FS-Cache: Unloaded\n");
} }
......
This diff is collapsed.
...@@ -14,9 +14,10 @@ ...@@ -14,9 +14,10 @@
#define FSCACHE_DEBUG_LEVEL COOKIE #define FSCACHE_DEBUG_LEVEL COOKIE
#include <linux/module.h> #include <linux/module.h>
#include <linux/seq_file.h>
#include "internal.h" #include "internal.h"
const char *fscache_object_states[] = { const char *fscache_object_states[FSCACHE_OBJECT__NSTATES] = {
[FSCACHE_OBJECT_INIT] = "OBJECT_INIT", [FSCACHE_OBJECT_INIT] = "OBJECT_INIT",
[FSCACHE_OBJECT_LOOKING_UP] = "OBJECT_LOOKING_UP", [FSCACHE_OBJECT_LOOKING_UP] = "OBJECT_LOOKING_UP",
[FSCACHE_OBJECT_CREATING] = "OBJECT_CREATING", [FSCACHE_OBJECT_CREATING] = "OBJECT_CREATING",
...@@ -33,9 +34,28 @@ const char *fscache_object_states[] = { ...@@ -33,9 +34,28 @@ const char *fscache_object_states[] = {
}; };
EXPORT_SYMBOL(fscache_object_states); EXPORT_SYMBOL(fscache_object_states);
const char fscache_object_states_short[FSCACHE_OBJECT__NSTATES][5] = {
[FSCACHE_OBJECT_INIT] = "INIT",
[FSCACHE_OBJECT_LOOKING_UP] = "LOOK",
[FSCACHE_OBJECT_CREATING] = "CRTN",
[FSCACHE_OBJECT_AVAILABLE] = "AVBL",
[FSCACHE_OBJECT_ACTIVE] = "ACTV",
[FSCACHE_OBJECT_UPDATING] = "UPDT",
[FSCACHE_OBJECT_DYING] = "DYNG",
[FSCACHE_OBJECT_LC_DYING] = "LCDY",
[FSCACHE_OBJECT_ABORT_INIT] = "ABTI",
[FSCACHE_OBJECT_RELEASING] = "RELS",
[FSCACHE_OBJECT_RECYCLING] = "RCYC",
[FSCACHE_OBJECT_WITHDRAWING] = "WTHD",
[FSCACHE_OBJECT_DEAD] = "DEAD",
};
static void fscache_object_slow_work_put_ref(struct slow_work *); static void fscache_object_slow_work_put_ref(struct slow_work *);
static int fscache_object_slow_work_get_ref(struct slow_work *); static int fscache_object_slow_work_get_ref(struct slow_work *);
static void fscache_object_slow_work_execute(struct slow_work *); static void fscache_object_slow_work_execute(struct slow_work *);
#ifdef CONFIG_SLOW_WORK_PROC
static void fscache_object_slow_work_desc(struct slow_work *, struct seq_file *);
#endif
static void fscache_initialise_object(struct fscache_object *); static void fscache_initialise_object(struct fscache_object *);
static void fscache_lookup_object(struct fscache_object *); static void fscache_lookup_object(struct fscache_object *);
static void fscache_object_available(struct fscache_object *); static void fscache_object_available(struct fscache_object *);
...@@ -45,9 +65,13 @@ static void fscache_enqueue_dependents(struct fscache_object *); ...@@ -45,9 +65,13 @@ static void fscache_enqueue_dependents(struct fscache_object *);
static void fscache_dequeue_object(struct fscache_object *); static void fscache_dequeue_object(struct fscache_object *);
const struct slow_work_ops fscache_object_slow_work_ops = { const struct slow_work_ops fscache_object_slow_work_ops = {
.owner = THIS_MODULE,
.get_ref = fscache_object_slow_work_get_ref, .get_ref = fscache_object_slow_work_get_ref,
.put_ref = fscache_object_slow_work_put_ref, .put_ref = fscache_object_slow_work_put_ref,
.execute = fscache_object_slow_work_execute, .execute = fscache_object_slow_work_execute,
#ifdef CONFIG_SLOW_WORK_PROC
.desc = fscache_object_slow_work_desc,
#endif
}; };
EXPORT_SYMBOL(fscache_object_slow_work_ops); EXPORT_SYMBOL(fscache_object_slow_work_ops);
...@@ -81,6 +105,7 @@ static inline void fscache_done_parent_op(struct fscache_object *object) ...@@ -81,6 +105,7 @@ static inline void fscache_done_parent_op(struct fscache_object *object)
static void fscache_object_state_machine(struct fscache_object *object) static void fscache_object_state_machine(struct fscache_object *object)
{ {
enum fscache_object_state new_state; enum fscache_object_state new_state;
struct fscache_cookie *cookie;
ASSERT(object != NULL); ASSERT(object != NULL);
...@@ -120,20 +145,31 @@ static void fscache_object_state_machine(struct fscache_object *object) ...@@ -120,20 +145,31 @@ static void fscache_object_state_machine(struct fscache_object *object)
case FSCACHE_OBJECT_UPDATING: case FSCACHE_OBJECT_UPDATING:
clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events); clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
fscache_stat(&fscache_n_updates_run); fscache_stat(&fscache_n_updates_run);
fscache_stat(&fscache_n_cop_update_object);
object->cache->ops->update_object(object); object->cache->ops->update_object(object);
fscache_stat_d(&fscache_n_cop_update_object);
goto active_transit; goto active_transit;
/* handle an object dying during lookup or creation */ /* handle an object dying during lookup or creation */
case FSCACHE_OBJECT_LC_DYING: case FSCACHE_OBJECT_LC_DYING:
object->event_mask &= ~(1 << FSCACHE_OBJECT_EV_UPDATE); object->event_mask &= ~(1 << FSCACHE_OBJECT_EV_UPDATE);
fscache_stat(&fscache_n_cop_lookup_complete);
object->cache->ops->lookup_complete(object); object->cache->ops->lookup_complete(object);
fscache_stat_d(&fscache_n_cop_lookup_complete);
spin_lock(&object->lock); spin_lock(&object->lock);
object->state = FSCACHE_OBJECT_DYING; object->state = FSCACHE_OBJECT_DYING;
cookie = object->cookie;
if (cookie) {
if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP,
&cookie->flags))
wake_up_bit(&cookie->flags,
FSCACHE_COOKIE_LOOKING_UP);
if (test_and_clear_bit(FSCACHE_COOKIE_CREATING, if (test_and_clear_bit(FSCACHE_COOKIE_CREATING,
&object->cookie->flags)) &cookie->flags))
wake_up_bit(&object->cookie->flags, wake_up_bit(&cookie->flags,
FSCACHE_COOKIE_CREATING); FSCACHE_COOKIE_CREATING);
}
spin_unlock(&object->lock); spin_unlock(&object->lock);
fscache_done_parent_op(object); fscache_done_parent_op(object);
...@@ -165,6 +201,7 @@ static void fscache_object_state_machine(struct fscache_object *object) ...@@ -165,6 +201,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
} }
spin_unlock(&object->lock); spin_unlock(&object->lock);
fscache_enqueue_dependents(object); fscache_enqueue_dependents(object);
fscache_start_operations(object);
goto terminal_transit; goto terminal_transit;
/* handle an abort during initialisation */ /* handle an abort during initialisation */
...@@ -316,15 +353,30 @@ static void fscache_object_slow_work_execute(struct slow_work *work) ...@@ -316,15 +353,30 @@ static void fscache_object_slow_work_execute(struct slow_work *work)
_enter("{OBJ%x}", object->debug_id); _enter("{OBJ%x}", object->debug_id);
clear_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
start = jiffies; start = jiffies;
fscache_object_state_machine(object); fscache_object_state_machine(object);
fscache_hist(fscache_objs_histogram, start); fscache_hist(fscache_objs_histogram, start);
if (object->events & object->event_mask) if (object->events & object->event_mask)
fscache_enqueue_object(object); fscache_enqueue_object(object);
clear_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
} }
/*
* describe an object for slow-work debugging
*/
#ifdef CONFIG_SLOW_WORK_PROC
static void fscache_object_slow_work_desc(struct slow_work *work,
struct seq_file *m)
{
struct fscache_object *object =
container_of(work, struct fscache_object, work);
seq_printf(m, "FSC: OBJ%x: %s",
object->debug_id,
fscache_object_states_short[object->state]);
}
#endif
/* /*
* initialise an object * initialise an object
* - check the specified object's parent to see if we can make use of it * - check the specified object's parent to see if we can make use of it
...@@ -376,7 +428,9 @@ static void fscache_initialise_object(struct fscache_object *object) ...@@ -376,7 +428,9 @@ static void fscache_initialise_object(struct fscache_object *object)
* binding on to us, so we need to make sure we don't * binding on to us, so we need to make sure we don't
* add ourself to the list multiple times */ * add ourself to the list multiple times */
if (list_empty(&object->dep_link)) { if (list_empty(&object->dep_link)) {
fscache_stat(&fscache_n_cop_grab_object);
object->cache->ops->grab_object(object); object->cache->ops->grab_object(object);
fscache_stat_d(&fscache_n_cop_grab_object);
list_add(&object->dep_link, list_add(&object->dep_link,
&parent->dependents); &parent->dependents);
...@@ -414,6 +468,7 @@ static void fscache_lookup_object(struct fscache_object *object) ...@@ -414,6 +468,7 @@ static void fscache_lookup_object(struct fscache_object *object)
{ {
struct fscache_cookie *cookie = object->cookie; struct fscache_cookie *cookie = object->cookie;
struct fscache_object *parent; struct fscache_object *parent;
int ret;
_enter(""); _enter("");
...@@ -438,11 +493,20 @@ static void fscache_lookup_object(struct fscache_object *object) ...@@ -438,11 +493,20 @@ static void fscache_lookup_object(struct fscache_object *object)
object->cache->tag->name); object->cache->tag->name);
fscache_stat(&fscache_n_object_lookups); fscache_stat(&fscache_n_object_lookups);
object->cache->ops->lookup_object(object); fscache_stat(&fscache_n_cop_lookup_object);
ret = object->cache->ops->lookup_object(object);
fscache_stat_d(&fscache_n_cop_lookup_object);
if (test_bit(FSCACHE_OBJECT_EV_ERROR, &object->events)) if (test_bit(FSCACHE_OBJECT_EV_ERROR, &object->events))
set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags); set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
if (ret == -ETIMEDOUT) {
/* probably stuck behind another object, so move this one to
* the back of the queue */
fscache_stat(&fscache_n_object_lookups_timed_out);
set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
}
_leave(""); _leave("");
} }
...@@ -546,7 +610,8 @@ static void fscache_object_available(struct fscache_object *object) ...@@ -546,7 +610,8 @@ static void fscache_object_available(struct fscache_object *object)
spin_lock(&object->lock); spin_lock(&object->lock);
if (test_and_clear_bit(FSCACHE_COOKIE_CREATING, &object->cookie->flags)) if (object->cookie &&
test_and_clear_bit(FSCACHE_COOKIE_CREATING, &object->cookie->flags))
wake_up_bit(&object->cookie->flags, FSCACHE_COOKIE_CREATING); wake_up_bit(&object->cookie->flags, FSCACHE_COOKIE_CREATING);
fscache_done_parent_op(object); fscache_done_parent_op(object);
...@@ -562,7 +627,9 @@ static void fscache_object_available(struct fscache_object *object) ...@@ -562,7 +627,9 @@ static void fscache_object_available(struct fscache_object *object)
} }
spin_unlock(&object->lock); spin_unlock(&object->lock);
fscache_stat(&fscache_n_cop_lookup_complete);
object->cache->ops->lookup_complete(object); object->cache->ops->lookup_complete(object);
fscache_stat_d(&fscache_n_cop_lookup_complete);
fscache_enqueue_dependents(object); fscache_enqueue_dependents(object);
fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif); fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
...@@ -581,11 +648,16 @@ static void fscache_drop_object(struct fscache_object *object) ...@@ -581,11 +648,16 @@ static void fscache_drop_object(struct fscache_object *object)
_enter("{OBJ%x,%d}", object->debug_id, object->n_children); _enter("{OBJ%x,%d}", object->debug_id, object->n_children);
ASSERTCMP(object->cookie, ==, NULL);
ASSERT(hlist_unhashed(&object->cookie_link));
spin_lock(&cache->object_list_lock); spin_lock(&cache->object_list_lock);
list_del_init(&object->cache_link); list_del_init(&object->cache_link);
spin_unlock(&cache->object_list_lock); spin_unlock(&cache->object_list_lock);
fscache_stat(&fscache_n_cop_drop_object);
cache->ops->drop_object(object); cache->ops->drop_object(object);
fscache_stat_d(&fscache_n_cop_drop_object);
if (parent) { if (parent) {
_debug("release parent OBJ%x {%d}", _debug("release parent OBJ%x {%d}",
...@@ -600,7 +672,9 @@ static void fscache_drop_object(struct fscache_object *object) ...@@ -600,7 +672,9 @@ static void fscache_drop_object(struct fscache_object *object)
} }
/* this just shifts the object release to the slow work processor */ /* this just shifts the object release to the slow work processor */
fscache_stat(&fscache_n_cop_put_object);
object->cache->ops->put_object(object); object->cache->ops->put_object(object);
fscache_stat_d(&fscache_n_cop_put_object);
_leave(""); _leave("");
} }
...@@ -690,8 +764,12 @@ static int fscache_object_slow_work_get_ref(struct slow_work *work) ...@@ -690,8 +764,12 @@ static int fscache_object_slow_work_get_ref(struct slow_work *work)
{ {
struct fscache_object *object = struct fscache_object *object =
container_of(work, struct fscache_object, work); container_of(work, struct fscache_object, work);
int ret;
return object->cache->ops->grab_object(object) ? 0 : -EAGAIN; fscache_stat(&fscache_n_cop_grab_object);
ret = object->cache->ops->grab_object(object) ? 0 : -EAGAIN;
fscache_stat_d(&fscache_n_cop_grab_object);
return ret;
} }
/* /*
...@@ -702,7 +780,9 @@ static void fscache_object_slow_work_put_ref(struct slow_work *work) ...@@ -702,7 +780,9 @@ static void fscache_object_slow_work_put_ref(struct slow_work *work)
struct fscache_object *object = struct fscache_object *object =
container_of(work, struct fscache_object, work); container_of(work, struct fscache_object, work);
return object->cache->ops->put_object(object); fscache_stat(&fscache_n_cop_put_object);
object->cache->ops->put_object(object);
fscache_stat_d(&fscache_n_cop_put_object);
} }
/* /*
...@@ -739,7 +819,9 @@ static void fscache_enqueue_dependents(struct fscache_object *object) ...@@ -739,7 +819,9 @@ static void fscache_enqueue_dependents(struct fscache_object *object)
/* sort onto appropriate lists */ /* sort onto appropriate lists */
fscache_enqueue_object(dep); fscache_enqueue_object(dep);
fscache_stat(&fscache_n_cop_put_object);
dep->cache->ops->put_object(dep); dep->cache->ops->put_object(dep);
fscache_stat_d(&fscache_n_cop_put_object);
if (!list_empty(&object->dependents)) if (!list_empty(&object->dependents))
cond_resched_lock(&object->lock); cond_resched_lock(&object->lock);
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#define FSCACHE_DEBUG_LEVEL OPERATION #define FSCACHE_DEBUG_LEVEL OPERATION
#include <linux/module.h> #include <linux/module.h>
#include <linux/seq_file.h>
#include "internal.h" #include "internal.h"
atomic_t fscache_op_debug_id; atomic_t fscache_op_debug_id;
...@@ -31,11 +32,14 @@ void fscache_enqueue_operation(struct fscache_operation *op) ...@@ -31,11 +32,14 @@ void fscache_enqueue_operation(struct fscache_operation *op)
_enter("{OBJ%x OP%x,%u}", _enter("{OBJ%x OP%x,%u}",
op->object->debug_id, op->debug_id, atomic_read(&op->usage)); op->object->debug_id, op->debug_id, atomic_read(&op->usage));
fscache_set_op_state(op, "EnQ");
ASSERT(list_empty(&op->pend_link));
ASSERT(op->processor != NULL); ASSERT(op->processor != NULL);
ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE); ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
ASSERTCMP(atomic_read(&op->usage), >, 0); ASSERTCMP(atomic_read(&op->usage), >, 0);
if (list_empty(&op->pend_link)) { fscache_stat(&fscache_n_op_enqueue);
switch (op->flags & FSCACHE_OP_TYPE) { switch (op->flags & FSCACHE_OP_TYPE) {
case FSCACHE_OP_FAST: case FSCACHE_OP_FAST:
_debug("queue fast"); _debug("queue fast");
...@@ -56,8 +60,6 @@ void fscache_enqueue_operation(struct fscache_operation *op) ...@@ -56,8 +60,6 @@ void fscache_enqueue_operation(struct fscache_operation *op)
BUG(); BUG();
break; break;
} }
fscache_stat(&fscache_n_op_enqueue);
}
} }
EXPORT_SYMBOL(fscache_enqueue_operation); EXPORT_SYMBOL(fscache_enqueue_operation);
...@@ -67,6 +69,8 @@ EXPORT_SYMBOL(fscache_enqueue_operation); ...@@ -67,6 +69,8 @@ EXPORT_SYMBOL(fscache_enqueue_operation);
static void fscache_run_op(struct fscache_object *object, static void fscache_run_op(struct fscache_object *object,
struct fscache_operation *op) struct fscache_operation *op)
{ {
fscache_set_op_state(op, "Run");
object->n_in_progress++; object->n_in_progress++;
if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags)) if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
wake_up_bit(&op->flags, FSCACHE_OP_WAITING); wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
...@@ -87,9 +91,12 @@ int fscache_submit_exclusive_op(struct fscache_object *object, ...@@ -87,9 +91,12 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
_enter("{OBJ%x OP%x},", object->debug_id, op->debug_id); _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id);
fscache_set_op_state(op, "SubmitX");
spin_lock(&object->lock); spin_lock(&object->lock);
ASSERTCMP(object->n_ops, >=, object->n_in_progress); ASSERTCMP(object->n_ops, >=, object->n_in_progress);
ASSERTCMP(object->n_ops, >=, object->n_exclusive); ASSERTCMP(object->n_ops, >=, object->n_exclusive);
ASSERT(list_empty(&op->pend_link));
ret = -ENOBUFS; ret = -ENOBUFS;
if (fscache_object_is_active(object)) { if (fscache_object_is_active(object)) {
...@@ -190,9 +197,12 @@ int fscache_submit_op(struct fscache_object *object, ...@@ -190,9 +197,12 @@ int fscache_submit_op(struct fscache_object *object,
ASSERTCMP(atomic_read(&op->usage), >, 0); ASSERTCMP(atomic_read(&op->usage), >, 0);
fscache_set_op_state(op, "Submit");
spin_lock(&object->lock); spin_lock(&object->lock);
ASSERTCMP(object->n_ops, >=, object->n_in_progress); ASSERTCMP(object->n_ops, >=, object->n_in_progress);
ASSERTCMP(object->n_ops, >=, object->n_exclusive); ASSERTCMP(object->n_ops, >=, object->n_exclusive);
ASSERT(list_empty(&op->pend_link));
ostate = object->state; ostate = object->state;
smp_rmb(); smp_rmb();
...@@ -222,6 +232,11 @@ int fscache_submit_op(struct fscache_object *object, ...@@ -222,6 +232,11 @@ int fscache_submit_op(struct fscache_object *object,
list_add_tail(&op->pend_link, &object->pending_ops); list_add_tail(&op->pend_link, &object->pending_ops);
fscache_stat(&fscache_n_op_pend); fscache_stat(&fscache_n_op_pend);
ret = 0; ret = 0;
} else if (object->state == FSCACHE_OBJECT_DYING ||
object->state == FSCACHE_OBJECT_LC_DYING ||
object->state == FSCACHE_OBJECT_WITHDRAWING) {
fscache_stat(&fscache_n_op_rejected);
ret = -ENOBUFS;
} else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) { } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
fscache_report_unexpected_submission(object, op, ostate); fscache_report_unexpected_submission(object, op, ostate);
ASSERT(!fscache_object_is_active(object)); ASSERT(!fscache_object_is_active(object));
...@@ -264,12 +279,7 @@ void fscache_start_operations(struct fscache_object *object) ...@@ -264,12 +279,7 @@ void fscache_start_operations(struct fscache_object *object)
stop = true; stop = true;
} }
list_del_init(&op->pend_link); list_del_init(&op->pend_link);
object->n_in_progress++; fscache_run_op(object, op);
if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
if (op->processor)
fscache_enqueue_operation(op);
/* the pending queue was holding a ref on the object */ /* the pending queue was holding a ref on the object */
fscache_put_operation(op); fscache_put_operation(op);
...@@ -281,6 +291,36 @@ void fscache_start_operations(struct fscache_object *object) ...@@ -281,6 +291,36 @@ void fscache_start_operations(struct fscache_object *object)
object->n_in_progress, object->debug_id); object->n_in_progress, object->debug_id);
} }
/*
* cancel an operation that's pending on an object
*/
int fscache_cancel_op(struct fscache_operation *op)
{
struct fscache_object *object = op->object;
int ret;
_enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id);
spin_lock(&object->lock);
ret = -EBUSY;
if (!list_empty(&op->pend_link)) {
fscache_stat(&fscache_n_op_cancelled);
list_del_init(&op->pend_link);
object->n_ops--;
if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
object->n_exclusive--;
if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
fscache_put_operation(op);
ret = 0;
}
spin_unlock(&object->lock);
_leave(" = %d", ret);
return ret;
}
/* /*
* release an operation * release an operation
* - queues pending ops if this is the last in-progress op * - queues pending ops if this is the last in-progress op
...@@ -298,6 +338,8 @@ void fscache_put_operation(struct fscache_operation *op) ...@@ -298,6 +338,8 @@ void fscache_put_operation(struct fscache_operation *op)
if (!atomic_dec_and_test(&op->usage)) if (!atomic_dec_and_test(&op->usage))
return; return;
fscache_set_op_state(op, "Put");
_debug("PUT OP"); _debug("PUT OP");
if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags)) if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
BUG(); BUG();
...@@ -311,6 +353,9 @@ void fscache_put_operation(struct fscache_operation *op) ...@@ -311,6 +353,9 @@ void fscache_put_operation(struct fscache_operation *op)
object = op->object; object = op->object;
if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags))
atomic_dec(&object->n_reads);
/* now... we may get called with the object spinlock held, so we /* now... we may get called with the object spinlock held, so we
* complete the cleanup here only if we can immediately acquire the * complete the cleanup here only if we can immediately acquire the
* lock, and defer it otherwise */ * lock, and defer it otherwise */
...@@ -452,8 +497,27 @@ static void fscache_op_execute(struct slow_work *work) ...@@ -452,8 +497,27 @@ static void fscache_op_execute(struct slow_work *work)
_leave(""); _leave("");
} }
/*
* describe an operation for slow-work debugging
*/
#ifdef CONFIG_SLOW_WORK_PROC
static void fscache_op_desc(struct slow_work *work, struct seq_file *m)
{
struct fscache_operation *op =
container_of(work, struct fscache_operation, slow_work);
seq_printf(m, "FSC: OBJ%x OP%x: %s/%s fl=%lx",
op->object->debug_id, op->debug_id,
op->name, op->state, op->flags);
}
#endif
const struct slow_work_ops fscache_op_slow_work_ops = { const struct slow_work_ops fscache_op_slow_work_ops = {
.owner = THIS_MODULE,
.get_ref = fscache_op_get_ref, .get_ref = fscache_op_get_ref,
.put_ref = fscache_op_put_ref, .put_ref = fscache_op_put_ref,
.execute = fscache_op_execute, .execute = fscache_op_execute,
#ifdef CONFIG_SLOW_WORK_PROC
.desc = fscache_op_desc,
#endif
}; };
This diff is collapsed.
...@@ -37,10 +37,20 @@ int __init fscache_proc_init(void) ...@@ -37,10 +37,20 @@ int __init fscache_proc_init(void)
goto error_histogram; goto error_histogram;
#endif #endif
#ifdef CONFIG_FSCACHE_OBJECT_LIST
if (!proc_create("fs/fscache/objects", S_IFREG | 0444, NULL,
&fscache_objlist_fops))
goto error_objects;
#endif
_leave(" = 0"); _leave(" = 0");
return 0; return 0;
#ifdef CONFIG_FSCACHE_OBJECT_LIST
error_objects:
#endif
#ifdef CONFIG_FSCACHE_HISTOGRAM #ifdef CONFIG_FSCACHE_HISTOGRAM
remove_proc_entry("fs/fscache/histogram", NULL);
error_histogram: error_histogram:
#endif #endif
#ifdef CONFIG_FSCACHE_STATS #ifdef CONFIG_FSCACHE_STATS
...@@ -58,6 +68,9 @@ int __init fscache_proc_init(void) ...@@ -58,6 +68,9 @@ int __init fscache_proc_init(void)
*/ */
void fscache_proc_cleanup(void) void fscache_proc_cleanup(void)
{ {
#ifdef CONFIG_FSCACHE_OBJECT_LIST
remove_proc_entry("fs/fscache/objects", NULL);
#endif
#ifdef CONFIG_FSCACHE_HISTOGRAM #ifdef CONFIG_FSCACHE_HISTOGRAM
remove_proc_entry("fs/fscache/histogram", NULL); remove_proc_entry("fs/fscache/histogram", NULL);
#endif #endif
......
...@@ -25,6 +25,8 @@ atomic_t fscache_n_op_requeue; ...@@ -25,6 +25,8 @@ atomic_t fscache_n_op_requeue;
atomic_t fscache_n_op_deferred_release; atomic_t fscache_n_op_deferred_release;
atomic_t fscache_n_op_release; atomic_t fscache_n_op_release;
atomic_t fscache_n_op_gc; atomic_t fscache_n_op_gc;
atomic_t fscache_n_op_cancelled;
atomic_t fscache_n_op_rejected;
atomic_t fscache_n_attr_changed; atomic_t fscache_n_attr_changed;
atomic_t fscache_n_attr_changed_ok; atomic_t fscache_n_attr_changed_ok;
...@@ -36,6 +38,8 @@ atomic_t fscache_n_allocs; ...@@ -36,6 +38,8 @@ atomic_t fscache_n_allocs;
atomic_t fscache_n_allocs_ok; atomic_t fscache_n_allocs_ok;
atomic_t fscache_n_allocs_wait; atomic_t fscache_n_allocs_wait;
atomic_t fscache_n_allocs_nobufs; atomic_t fscache_n_allocs_nobufs;
atomic_t fscache_n_allocs_intr;
atomic_t fscache_n_allocs_object_dead;
atomic_t fscache_n_alloc_ops; atomic_t fscache_n_alloc_ops;
atomic_t fscache_n_alloc_op_waits; atomic_t fscache_n_alloc_op_waits;
...@@ -46,6 +50,7 @@ atomic_t fscache_n_retrievals_nodata; ...@@ -46,6 +50,7 @@ atomic_t fscache_n_retrievals_nodata;
atomic_t fscache_n_retrievals_nobufs; atomic_t fscache_n_retrievals_nobufs;
atomic_t fscache_n_retrievals_intr; atomic_t fscache_n_retrievals_intr;
atomic_t fscache_n_retrievals_nomem; atomic_t fscache_n_retrievals_nomem;
atomic_t fscache_n_retrievals_object_dead;
atomic_t fscache_n_retrieval_ops; atomic_t fscache_n_retrieval_ops;
atomic_t fscache_n_retrieval_op_waits; atomic_t fscache_n_retrieval_op_waits;
...@@ -56,6 +61,14 @@ atomic_t fscache_n_stores_nobufs; ...@@ -56,6 +61,14 @@ atomic_t fscache_n_stores_nobufs;
atomic_t fscache_n_stores_oom; atomic_t fscache_n_stores_oom;
atomic_t fscache_n_store_ops; atomic_t fscache_n_store_ops;
atomic_t fscache_n_store_calls; atomic_t fscache_n_store_calls;
atomic_t fscache_n_store_pages;
atomic_t fscache_n_store_radix_deletes;
atomic_t fscache_n_store_pages_over_limit;
atomic_t fscache_n_store_vmscan_not_storing;
atomic_t fscache_n_store_vmscan_gone;
atomic_t fscache_n_store_vmscan_busy;
atomic_t fscache_n_store_vmscan_cancelled;
atomic_t fscache_n_marks; atomic_t fscache_n_marks;
atomic_t fscache_n_uncaches; atomic_t fscache_n_uncaches;
...@@ -74,6 +87,7 @@ atomic_t fscache_n_updates_run; ...@@ -74,6 +87,7 @@ atomic_t fscache_n_updates_run;
atomic_t fscache_n_relinquishes; atomic_t fscache_n_relinquishes;
atomic_t fscache_n_relinquishes_null; atomic_t fscache_n_relinquishes_null;
atomic_t fscache_n_relinquishes_waitcrt; atomic_t fscache_n_relinquishes_waitcrt;
atomic_t fscache_n_relinquishes_retire;
atomic_t fscache_n_cookie_index; atomic_t fscache_n_cookie_index;
atomic_t fscache_n_cookie_data; atomic_t fscache_n_cookie_data;
...@@ -84,6 +98,7 @@ atomic_t fscache_n_object_no_alloc; ...@@ -84,6 +98,7 @@ atomic_t fscache_n_object_no_alloc;
atomic_t fscache_n_object_lookups; atomic_t fscache_n_object_lookups;
atomic_t fscache_n_object_lookups_negative; atomic_t fscache_n_object_lookups_negative;
atomic_t fscache_n_object_lookups_positive; atomic_t fscache_n_object_lookups_positive;
atomic_t fscache_n_object_lookups_timed_out;
atomic_t fscache_n_object_created; atomic_t fscache_n_object_created;
atomic_t fscache_n_object_avail; atomic_t fscache_n_object_avail;
atomic_t fscache_n_object_dead; atomic_t fscache_n_object_dead;
...@@ -93,6 +108,23 @@ atomic_t fscache_n_checkaux_okay; ...@@ -93,6 +108,23 @@ atomic_t fscache_n_checkaux_okay;
atomic_t fscache_n_checkaux_update; atomic_t fscache_n_checkaux_update;
atomic_t fscache_n_checkaux_obsolete; atomic_t fscache_n_checkaux_obsolete;
atomic_t fscache_n_cop_alloc_object;
atomic_t fscache_n_cop_lookup_object;
atomic_t fscache_n_cop_lookup_complete;
atomic_t fscache_n_cop_grab_object;
atomic_t fscache_n_cop_update_object;
atomic_t fscache_n_cop_drop_object;
atomic_t fscache_n_cop_put_object;
atomic_t fscache_n_cop_sync_cache;
atomic_t fscache_n_cop_attr_changed;
atomic_t fscache_n_cop_read_or_alloc_page;
atomic_t fscache_n_cop_read_or_alloc_pages;
atomic_t fscache_n_cop_allocate_page;
atomic_t fscache_n_cop_allocate_pages;
atomic_t fscache_n_cop_write_page;
atomic_t fscache_n_cop_uncache_page;
atomic_t fscache_n_cop_dissociate_pages;
/* /*
* display the general statistics * display the general statistics
*/ */
...@@ -129,10 +161,11 @@ static int fscache_stats_show(struct seq_file *m, void *v) ...@@ -129,10 +161,11 @@ static int fscache_stats_show(struct seq_file *m, void *v)
atomic_read(&fscache_n_acquires_nobufs), atomic_read(&fscache_n_acquires_nobufs),
atomic_read(&fscache_n_acquires_oom)); atomic_read(&fscache_n_acquires_oom));
seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u\n", seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
atomic_read(&fscache_n_object_lookups), atomic_read(&fscache_n_object_lookups),
atomic_read(&fscache_n_object_lookups_negative), atomic_read(&fscache_n_object_lookups_negative),
atomic_read(&fscache_n_object_lookups_positive), atomic_read(&fscache_n_object_lookups_positive),
atomic_read(&fscache_n_object_lookups_timed_out),
atomic_read(&fscache_n_object_created)); atomic_read(&fscache_n_object_created));
seq_printf(m, "Updates: n=%u nul=%u run=%u\n", seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
...@@ -140,10 +173,11 @@ static int fscache_stats_show(struct seq_file *m, void *v) ...@@ -140,10 +173,11 @@ static int fscache_stats_show(struct seq_file *m, void *v)
atomic_read(&fscache_n_updates_null), atomic_read(&fscache_n_updates_null),
atomic_read(&fscache_n_updates_run)); atomic_read(&fscache_n_updates_run));
seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u\n", seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
atomic_read(&fscache_n_relinquishes), atomic_read(&fscache_n_relinquishes),
atomic_read(&fscache_n_relinquishes_null), atomic_read(&fscache_n_relinquishes_null),
atomic_read(&fscache_n_relinquishes_waitcrt)); atomic_read(&fscache_n_relinquishes_waitcrt),
atomic_read(&fscache_n_relinquishes_retire));
seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n", seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
atomic_read(&fscache_n_attr_changed), atomic_read(&fscache_n_attr_changed),
...@@ -152,14 +186,16 @@ static int fscache_stats_show(struct seq_file *m, void *v) ...@@ -152,14 +186,16 @@ static int fscache_stats_show(struct seq_file *m, void *v)
atomic_read(&fscache_n_attr_changed_nomem), atomic_read(&fscache_n_attr_changed_nomem),
atomic_read(&fscache_n_attr_changed_calls)); atomic_read(&fscache_n_attr_changed_calls));
seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u\n", seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
atomic_read(&fscache_n_allocs), atomic_read(&fscache_n_allocs),
atomic_read(&fscache_n_allocs_ok), atomic_read(&fscache_n_allocs_ok),
atomic_read(&fscache_n_allocs_wait), atomic_read(&fscache_n_allocs_wait),
atomic_read(&fscache_n_allocs_nobufs)); atomic_read(&fscache_n_allocs_nobufs),
seq_printf(m, "Allocs : ops=%u owt=%u\n", atomic_read(&fscache_n_allocs_intr));
seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
atomic_read(&fscache_n_alloc_ops), atomic_read(&fscache_n_alloc_ops),
atomic_read(&fscache_n_alloc_op_waits)); atomic_read(&fscache_n_alloc_op_waits),
atomic_read(&fscache_n_allocs_object_dead));
seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u" seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
" int=%u oom=%u\n", " int=%u oom=%u\n",
...@@ -170,9 +206,10 @@ static int fscache_stats_show(struct seq_file *m, void *v) ...@@ -170,9 +206,10 @@ static int fscache_stats_show(struct seq_file *m, void *v)
atomic_read(&fscache_n_retrievals_nobufs), atomic_read(&fscache_n_retrievals_nobufs),
atomic_read(&fscache_n_retrievals_intr), atomic_read(&fscache_n_retrievals_intr),
atomic_read(&fscache_n_retrievals_nomem)); atomic_read(&fscache_n_retrievals_nomem));
seq_printf(m, "Retrvls: ops=%u owt=%u\n", seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
atomic_read(&fscache_n_retrieval_ops), atomic_read(&fscache_n_retrieval_ops),
atomic_read(&fscache_n_retrieval_op_waits)); atomic_read(&fscache_n_retrieval_op_waits),
atomic_read(&fscache_n_retrievals_object_dead));
seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n", seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
atomic_read(&fscache_n_stores), atomic_read(&fscache_n_stores),
...@@ -180,18 +217,49 @@ static int fscache_stats_show(struct seq_file *m, void *v) ...@@ -180,18 +217,49 @@ static int fscache_stats_show(struct seq_file *m, void *v)
atomic_read(&fscache_n_stores_again), atomic_read(&fscache_n_stores_again),
atomic_read(&fscache_n_stores_nobufs), atomic_read(&fscache_n_stores_nobufs),
atomic_read(&fscache_n_stores_oom)); atomic_read(&fscache_n_stores_oom));
seq_printf(m, "Stores : ops=%u run=%u\n", seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
atomic_read(&fscache_n_store_ops), atomic_read(&fscache_n_store_ops),
atomic_read(&fscache_n_store_calls)); atomic_read(&fscache_n_store_calls),
atomic_read(&fscache_n_store_pages),
atomic_read(&fscache_n_store_radix_deletes),
atomic_read(&fscache_n_store_pages_over_limit));
seq_printf(m, "Ops : pend=%u run=%u enq=%u\n", seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
atomic_read(&fscache_n_store_vmscan_not_storing),
atomic_read(&fscache_n_store_vmscan_gone),
atomic_read(&fscache_n_store_vmscan_busy),
atomic_read(&fscache_n_store_vmscan_cancelled));
seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
atomic_read(&fscache_n_op_pend), atomic_read(&fscache_n_op_pend),
atomic_read(&fscache_n_op_run), atomic_read(&fscache_n_op_run),
atomic_read(&fscache_n_op_enqueue)); atomic_read(&fscache_n_op_enqueue),
atomic_read(&fscache_n_op_cancelled),
atomic_read(&fscache_n_op_rejected));
seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n", seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
atomic_read(&fscache_n_op_deferred_release), atomic_read(&fscache_n_op_deferred_release),
atomic_read(&fscache_n_op_release), atomic_read(&fscache_n_op_release),
atomic_read(&fscache_n_op_gc)); atomic_read(&fscache_n_op_gc));
seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
atomic_read(&fscache_n_cop_alloc_object),
atomic_read(&fscache_n_cop_lookup_object),
atomic_read(&fscache_n_cop_lookup_complete),
atomic_read(&fscache_n_cop_grab_object));
seq_printf(m, "CacheOp: upo=%d dro=%d pto=%d atc=%d syn=%d\n",
atomic_read(&fscache_n_cop_update_object),
atomic_read(&fscache_n_cop_drop_object),
atomic_read(&fscache_n_cop_put_object),
atomic_read(&fscache_n_cop_attr_changed),
atomic_read(&fscache_n_cop_sync_cache));
seq_printf(m, "CacheOp: rap=%d ras=%d alp=%d als=%d wrp=%d ucp=%d dsp=%d\n",
atomic_read(&fscache_n_cop_read_or_alloc_page),
atomic_read(&fscache_n_cop_read_or_alloc_pages),
atomic_read(&fscache_n_cop_allocate_page),
atomic_read(&fscache_n_cop_allocate_pages),
atomic_read(&fscache_n_cop_write_page),
atomic_read(&fscache_n_cop_uncache_page),
atomic_read(&fscache_n_cop_dissociate_pages));
return 0; return 0;
} }
......
...@@ -114,7 +114,7 @@ static int __init init_gfs2_fs(void) ...@@ -114,7 +114,7 @@ static int __init init_gfs2_fs(void)
if (error) if (error)
goto fail_unregister; goto fail_unregister;
error = slow_work_register_user(); error = slow_work_register_user(THIS_MODULE);
if (error) if (error)
goto fail_slow; goto fail_slow;
...@@ -163,7 +163,7 @@ static void __exit exit_gfs2_fs(void) ...@@ -163,7 +163,7 @@ static void __exit exit_gfs2_fs(void)
gfs2_unregister_debugfs(); gfs2_unregister_debugfs();
unregister_filesystem(&gfs2_fs_type); unregister_filesystem(&gfs2_fs_type);
unregister_filesystem(&gfs2meta_fs_type); unregister_filesystem(&gfs2meta_fs_type);
slow_work_unregister_user(); slow_work_unregister_user(THIS_MODULE);
kmem_cache_destroy(gfs2_quotad_cachep); kmem_cache_destroy(gfs2_quotad_cachep);
kmem_cache_destroy(gfs2_rgrpd_cachep); kmem_cache_destroy(gfs2_rgrpd_cachep);
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
* of the GNU General Public License version 2. * of the GNU General Public License version 2.
*/ */
#include <linux/module.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/completion.h> #include <linux/completion.h>
...@@ -593,6 +594,7 @@ static void gfs2_recover_work(struct slow_work *work) ...@@ -593,6 +594,7 @@ static void gfs2_recover_work(struct slow_work *work)
} }
struct slow_work_ops gfs2_recover_ops = { struct slow_work_ops gfs2_recover_ops = {
.owner = THIS_MODULE,
.get_ref = gfs2_recover_get_ref, .get_ref = gfs2_recover_get_ref,
.put_ref = gfs2_recover_put_ref, .put_ref = gfs2_recover_put_ref,
.execute = gfs2_recover_work, .execute = gfs2_recover_work,
......
...@@ -359,17 +359,13 @@ int nfs_fscache_release_page(struct page *page, gfp_t gfp) ...@@ -359,17 +359,13 @@ int nfs_fscache_release_page(struct page *page, gfp_t gfp)
BUG_ON(!cookie); BUG_ON(!cookie);
if (fscache_check_page_write(cookie, page)) {
if (!(gfp & __GFP_WAIT))
return 0;
fscache_wait_on_page_write(cookie, page);
}
if (PageFsCache(page)) { if (PageFsCache(page)) {
dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n", dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n",
cookie, page, nfsi); cookie, page, nfsi);
fscache_uncache_page(cookie, page); if (!fscache_maybe_release_page(cookie, page, gfp))
return 0;
nfs_add_fscache_stats(page->mapping->host, nfs_add_fscache_stats(page->mapping->host,
NFSIOS_FSCACHE_PAGES_UNCACHED, 1); NFSIOS_FSCACHE_PAGES_UNCACHED, 1);
} }
......
...@@ -91,6 +91,8 @@ struct fscache_operation { ...@@ -91,6 +91,8 @@ struct fscache_operation {
#define FSCACHE_OP_WAITING 4 /* cleared when op is woken */ #define FSCACHE_OP_WAITING 4 /* cleared when op is woken */
#define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */ #define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */
#define FSCACHE_OP_DEAD 6 /* op is now dead */ #define FSCACHE_OP_DEAD 6 /* op is now dead */
#define FSCACHE_OP_DEC_READ_CNT 7 /* decrement object->n_reads on destruction */
#define FSCACHE_OP_KEEP_FLAGS 0xc0 /* flags to keep when repurposing an op */
atomic_t usage; atomic_t usage;
unsigned debug_id; /* debugging ID */ unsigned debug_id; /* debugging ID */
...@@ -102,6 +104,16 @@ struct fscache_operation { ...@@ -102,6 +104,16 @@ struct fscache_operation {
/* operation releaser */ /* operation releaser */
fscache_operation_release_t release; fscache_operation_release_t release;
#ifdef CONFIG_SLOW_WORK_PROC
const char *name; /* operation name */
const char *state; /* operation state */
#define fscache_set_op_name(OP, N) do { (OP)->name = (N); } while(0)
#define fscache_set_op_state(OP, S) do { (OP)->state = (S); } while(0)
#else
#define fscache_set_op_name(OP, N) do { } while(0)
#define fscache_set_op_state(OP, S) do { } while(0)
#endif
}; };
extern atomic_t fscache_op_debug_id; extern atomic_t fscache_op_debug_id;
...@@ -125,6 +137,7 @@ static inline void fscache_operation_init(struct fscache_operation *op, ...@@ -125,6 +137,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
op->debug_id = atomic_inc_return(&fscache_op_debug_id); op->debug_id = atomic_inc_return(&fscache_op_debug_id);
op->release = release; op->release = release;
INIT_LIST_HEAD(&op->pend_link); INIT_LIST_HEAD(&op->pend_link);
fscache_set_op_state(op, "Init");
} }
/** /**
...@@ -221,8 +234,10 @@ struct fscache_cache_ops { ...@@ -221,8 +234,10 @@ struct fscache_cache_ops {
struct fscache_object *(*alloc_object)(struct fscache_cache *cache, struct fscache_object *(*alloc_object)(struct fscache_cache *cache,
struct fscache_cookie *cookie); struct fscache_cookie *cookie);
/* look up the object for a cookie */ /* look up the object for a cookie
void (*lookup_object)(struct fscache_object *object); * - return -ETIMEDOUT to be requeued
*/
int (*lookup_object)(struct fscache_object *object);
/* finished looking up */ /* finished looking up */
void (*lookup_complete)(struct fscache_object *object); void (*lookup_complete)(struct fscache_object *object);
...@@ -297,12 +312,14 @@ struct fscache_cookie { ...@@ -297,12 +312,14 @@ struct fscache_cookie {
atomic_t usage; /* number of users of this cookie */ atomic_t usage; /* number of users of this cookie */
atomic_t n_children; /* number of children of this cookie */ atomic_t n_children; /* number of children of this cookie */
spinlock_t lock; spinlock_t lock;
spinlock_t stores_lock; /* lock on page store tree */
struct hlist_head backing_objects; /* object(s) backing this file/index */ struct hlist_head backing_objects; /* object(s) backing this file/index */
const struct fscache_cookie_def *def; /* definition */ const struct fscache_cookie_def *def; /* definition */
struct fscache_cookie *parent; /* parent of this entry */ struct fscache_cookie *parent; /* parent of this entry */
void *netfs_data; /* back pointer to netfs */ void *netfs_data; /* back pointer to netfs */
struct radix_tree_root stores; /* pages to be stored on this cookie */ struct radix_tree_root stores; /* pages to be stored on this cookie */
#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */ #define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */
#define FSCACHE_COOKIE_STORING_TAG 1 /* pages tag: writing to cache */
unsigned long flags; unsigned long flags;
#define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */ #define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */
...@@ -337,6 +354,7 @@ struct fscache_object { ...@@ -337,6 +354,7 @@ struct fscache_object {
FSCACHE_OBJECT_RECYCLING, /* retiring object */ FSCACHE_OBJECT_RECYCLING, /* retiring object */
FSCACHE_OBJECT_WITHDRAWING, /* withdrawing object */ FSCACHE_OBJECT_WITHDRAWING, /* withdrawing object */
FSCACHE_OBJECT_DEAD, /* object is now dead */ FSCACHE_OBJECT_DEAD, /* object is now dead */
FSCACHE_OBJECT__NSTATES
} state; } state;
int debug_id; /* debugging ID */ int debug_id; /* debugging ID */
...@@ -345,6 +363,7 @@ struct fscache_object { ...@@ -345,6 +363,7 @@ struct fscache_object {
int n_obj_ops; /* number of object ops outstanding on object */ int n_obj_ops; /* number of object ops outstanding on object */
int n_in_progress; /* number of ops in progress */ int n_in_progress; /* number of ops in progress */
int n_exclusive; /* number of exclusive ops queued */ int n_exclusive; /* number of exclusive ops queued */
atomic_t n_reads; /* number of read ops in progress */
spinlock_t lock; /* state and operations lock */ spinlock_t lock; /* state and operations lock */
unsigned long lookup_jif; /* time at which lookup started */ unsigned long lookup_jif; /* time at which lookup started */
...@@ -358,6 +377,7 @@ struct fscache_object { ...@@ -358,6 +377,7 @@ struct fscache_object {
#define FSCACHE_OBJECT_EV_RELEASE 4 /* T if netfs requested object release */ #define FSCACHE_OBJECT_EV_RELEASE 4 /* T if netfs requested object release */
#define FSCACHE_OBJECT_EV_RETIRE 5 /* T if netfs requested object retirement */ #define FSCACHE_OBJECT_EV_RETIRE 5 /* T if netfs requested object retirement */
#define FSCACHE_OBJECT_EV_WITHDRAW 6 /* T if cache requested object withdrawal */ #define FSCACHE_OBJECT_EV_WITHDRAW 6 /* T if cache requested object withdrawal */
#define FSCACHE_OBJECT_EVENTS_MASK 0x7f /* mask of all events*/
unsigned long flags; unsigned long flags;
#define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */ #define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */
...@@ -373,7 +393,11 @@ struct fscache_object { ...@@ -373,7 +393,11 @@ struct fscache_object {
struct list_head dependents; /* FIFO of dependent objects */ struct list_head dependents; /* FIFO of dependent objects */
struct list_head dep_link; /* link in parent's dependents list */ struct list_head dep_link; /* link in parent's dependents list */
struct list_head pending_ops; /* unstarted operations on this object */ struct list_head pending_ops; /* unstarted operations on this object */
#ifdef CONFIG_FSCACHE_OBJECT_LIST
struct rb_node objlist_link; /* link in global object list */
#endif
pgoff_t store_limit; /* current storage limit */ pgoff_t store_limit; /* current storage limit */
loff_t store_limit_l; /* current storage limit */
}; };
extern const char *fscache_object_states[]; extern const char *fscache_object_states[];
...@@ -383,6 +407,10 @@ extern const char *fscache_object_states[]; ...@@ -383,6 +407,10 @@ extern const char *fscache_object_states[];
(obj)->state >= FSCACHE_OBJECT_AVAILABLE && \ (obj)->state >= FSCACHE_OBJECT_AVAILABLE && \
(obj)->state < FSCACHE_OBJECT_DYING) (obj)->state < FSCACHE_OBJECT_DYING)
#define fscache_object_is_dead(obj) \
(test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) && \
(obj)->state >= FSCACHE_OBJECT_DYING)
extern const struct slow_work_ops fscache_object_slow_work_ops; extern const struct slow_work_ops fscache_object_slow_work_ops;
/** /**
...@@ -414,6 +442,7 @@ void fscache_object_init(struct fscache_object *object, ...@@ -414,6 +442,7 @@ void fscache_object_init(struct fscache_object *object,
object->events = object->event_mask = 0; object->events = object->event_mask = 0;
object->flags = 0; object->flags = 0;
object->store_limit = 0; object->store_limit = 0;
object->store_limit_l = 0;
object->cache = cache; object->cache = cache;
object->cookie = cookie; object->cookie = cookie;
object->parent = NULL; object->parent = NULL;
...@@ -422,6 +451,12 @@ void fscache_object_init(struct fscache_object *object, ...@@ -422,6 +451,12 @@ void fscache_object_init(struct fscache_object *object,
extern void fscache_object_lookup_negative(struct fscache_object *object); extern void fscache_object_lookup_negative(struct fscache_object *object);
extern void fscache_obtained_object(struct fscache_object *object); extern void fscache_obtained_object(struct fscache_object *object);
#ifdef CONFIG_FSCACHE_OBJECT_LIST
extern void fscache_object_destroy(struct fscache_object *object);
#else
#define fscache_object_destroy(object) do {} while(0)
#endif
/** /**
* fscache_object_destroyed - Note destruction of an object in a cache * fscache_object_destroyed - Note destruction of an object in a cache
* @cache: The cache from which the object came * @cache: The cache from which the object came
...@@ -460,6 +495,7 @@ static inline void fscache_object_lookup_error(struct fscache_object *object) ...@@ -460,6 +495,7 @@ static inline void fscache_object_lookup_error(struct fscache_object *object)
static inline static inline
void fscache_set_store_limit(struct fscache_object *object, loff_t i_size) void fscache_set_store_limit(struct fscache_object *object, loff_t i_size)
{ {
object->store_limit_l = i_size;
object->store_limit = i_size >> PAGE_SHIFT; object->store_limit = i_size >> PAGE_SHIFT;
if (i_size & ~PAGE_MASK) if (i_size & ~PAGE_MASK)
object->store_limit++; object->store_limit++;
......
...@@ -202,6 +202,8 @@ extern int __fscache_write_page(struct fscache_cookie *, struct page *, gfp_t); ...@@ -202,6 +202,8 @@ extern int __fscache_write_page(struct fscache_cookie *, struct page *, gfp_t);
extern void __fscache_uncache_page(struct fscache_cookie *, struct page *); extern void __fscache_uncache_page(struct fscache_cookie *, struct page *);
extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *); extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *);
extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *); extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *);
extern bool __fscache_maybe_release_page(struct fscache_cookie *, struct page *,
gfp_t);
/** /**
* fscache_register_netfs - Register a filesystem as desiring caching services * fscache_register_netfs - Register a filesystem as desiring caching services
...@@ -615,4 +617,29 @@ void fscache_wait_on_page_write(struct fscache_cookie *cookie, ...@@ -615,4 +617,29 @@ void fscache_wait_on_page_write(struct fscache_cookie *cookie,
__fscache_wait_on_page_write(cookie, page); __fscache_wait_on_page_write(cookie, page);
} }
/**
* fscache_maybe_release_page - Consider releasing a page, cancelling a store
* @cookie: The cookie representing the cache object
* @page: The netfs page that is being cached.
* @gfp: The gfp flags passed to releasepage()
*
* Consider releasing a page for the vmscan algorithm, on behalf of the netfs's
* releasepage() call. A storage request on the page may cancelled if it is
* not currently being processed.
*
* The function returns true if the page no longer has a storage request on it,
* and false if a storage request is left in place. If true is returned, the
* page will have been passed to fscache_uncache_page(). If false is returned
* the page cannot be freed yet.
*/
static inline
bool fscache_maybe_release_page(struct fscache_cookie *cookie,
struct page *page,
gfp_t gfp)
{
if (fscache_cookie_valid(cookie) && PageFsCache(page))
return __fscache_maybe_release_page(cookie, page, gfp);
return false;
}
#endif /* _LINUX_FSCACHE_H */ #endif /* _LINUX_FSCACHE_H */
...@@ -17,13 +17,20 @@ ...@@ -17,13 +17,20 @@
#ifdef CONFIG_SLOW_WORK #ifdef CONFIG_SLOW_WORK
#include <linux/sysctl.h> #include <linux/sysctl.h>
#include <linux/timer.h>
struct slow_work; struct slow_work;
#ifdef CONFIG_SLOW_WORK_PROC
struct seq_file;
#endif
/* /*
* The operations used to support slow work items * The operations used to support slow work items
*/ */
struct slow_work_ops { struct slow_work_ops {
/* owner */
struct module *owner;
/* get a ref on a work item /* get a ref on a work item
* - return 0 if successful, -ve if not * - return 0 if successful, -ve if not
*/ */
...@@ -34,6 +41,11 @@ struct slow_work_ops { ...@@ -34,6 +41,11 @@ struct slow_work_ops {
/* execute a work item */ /* execute a work item */
void (*execute)(struct slow_work *work); void (*execute)(struct slow_work *work);
#ifdef CONFIG_SLOW_WORK_PROC
/* describe a work item for /proc */
void (*desc)(struct slow_work *work, struct seq_file *m);
#endif
}; };
/* /*
...@@ -42,13 +54,24 @@ struct slow_work_ops { ...@@ -42,13 +54,24 @@ struct slow_work_ops {
* queued * queued
*/ */
struct slow_work { struct slow_work {
struct module *owner; /* the owning module */
unsigned long flags; unsigned long flags;
#define SLOW_WORK_PENDING 0 /* item pending (further) execution */ #define SLOW_WORK_PENDING 0 /* item pending (further) execution */
#define SLOW_WORK_EXECUTING 1 /* item currently executing */ #define SLOW_WORK_EXECUTING 1 /* item currently executing */
#define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */ #define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */
#define SLOW_WORK_VERY_SLOW 3 /* item is very slow */ #define SLOW_WORK_VERY_SLOW 3 /* item is very slow */
#define SLOW_WORK_CANCELLING 4 /* item is being cancelled, don't enqueue */
#define SLOW_WORK_DELAYED 5 /* item is struct delayed_slow_work with active timer */
const struct slow_work_ops *ops; /* operations table for this item */ const struct slow_work_ops *ops; /* operations table for this item */
struct list_head link; /* link in queue */ struct list_head link; /* link in queue */
#ifdef CONFIG_SLOW_WORK_PROC
struct timespec mark; /* jiffies at which queued or exec begun */
#endif
};
struct delayed_slow_work {
struct slow_work work;
struct timer_list timer;
}; };
/** /**
...@@ -66,6 +89,20 @@ static inline void slow_work_init(struct slow_work *work, ...@@ -66,6 +89,20 @@ static inline void slow_work_init(struct slow_work *work,
INIT_LIST_HEAD(&work->link); INIT_LIST_HEAD(&work->link);
} }
/**
* slow_work_init - Initialise a delayed slow work item
* @work: The work item to initialise
* @ops: The operations to use to handle the slow work item
*
* Initialise a delayed slow work item.
*/
static inline void delayed_slow_work_init(struct delayed_slow_work *dwork,
const struct slow_work_ops *ops)
{
init_timer(&dwork->timer);
slow_work_init(&dwork->work, ops);
}
/** /**
* vslow_work_init - Initialise a very slow work item * vslow_work_init - Initialise a very slow work item
* @work: The work item to initialise * @work: The work item to initialise
...@@ -83,9 +120,40 @@ static inline void vslow_work_init(struct slow_work *work, ...@@ -83,9 +120,40 @@ static inline void vslow_work_init(struct slow_work *work,
INIT_LIST_HEAD(&work->link); INIT_LIST_HEAD(&work->link);
} }
/**
* slow_work_is_queued - Determine if a slow work item is on the work queue
* work: The work item to test
*
* Determine if the specified slow-work item is on the work queue. This
* returns true if it is actually on the queue.
*
* If the item is executing and has been marked for requeue when execution
* finishes, then false will be returned.
*
* Anyone wishing to wait for completion of execution can wait on the
* SLOW_WORK_EXECUTING bit.
*/
static inline bool slow_work_is_queued(struct slow_work *work)
{
unsigned long flags = work->flags;
return flags & SLOW_WORK_PENDING && !(flags & SLOW_WORK_EXECUTING);
}
extern int slow_work_enqueue(struct slow_work *work); extern int slow_work_enqueue(struct slow_work *work);
extern int slow_work_register_user(void); extern void slow_work_cancel(struct slow_work *work);
extern void slow_work_unregister_user(void); extern int slow_work_register_user(struct module *owner);
extern void slow_work_unregister_user(struct module *owner);
extern int delayed_slow_work_enqueue(struct delayed_slow_work *dwork,
unsigned long delay);
static inline void delayed_slow_work_cancel(struct delayed_slow_work *dwork)
{
slow_work_cancel(&dwork->work);
}
extern bool slow_work_sleep_till_thread_needed(struct slow_work *work,
signed long *_timeout);
#ifdef CONFIG_SYSCTL #ifdef CONFIG_SYSCTL
extern ctl_table slow_work_sysctls[]; extern ctl_table slow_work_sysctls[];
......
...@@ -1098,6 +1098,16 @@ config SLOW_WORK ...@@ -1098,6 +1098,16 @@ config SLOW_WORK
See Documentation/slow-work.txt. See Documentation/slow-work.txt.
config SLOW_WORK_PROC
bool "Slow work debugging through /proc"
default n
depends on SLOW_WORK && PROC_FS
help
Display the contents of the slow work run queue through /proc,
including items currently executing.
See Documentation/slow-work.txt.
endmenu # General setup endmenu # General setup
config HAVE_GENERIC_DMA_COHERENT config HAVE_GENERIC_DMA_COHERENT
......
...@@ -94,6 +94,7 @@ obj-$(CONFIG_X86_DS) += trace/ ...@@ -94,6 +94,7 @@ obj-$(CONFIG_X86_DS) += trace/
obj-$(CONFIG_RING_BUFFER) += trace/ obj-$(CONFIG_RING_BUFFER) += trace/
obj-$(CONFIG_SMP) += sched_cpupri.o obj-$(CONFIG_SMP) += sched_cpupri.o
obj-$(CONFIG_SLOW_WORK) += slow-work.o obj-$(CONFIG_SLOW_WORK) += slow-work.o
obj-$(CONFIG_SLOW_WORK_PROC) += slow-work-proc.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o obj-$(CONFIG_PERF_EVENTS) += perf_event.o
ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
......
/* Slow work debugging
*
* Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/slow-work.h>
#include <linux/fs.h>
#include <linux/time.h>
#include <linux/seq_file.h>
#include "slow-work.h"
#define ITERATOR_SHIFT (BITS_PER_LONG - 4)
#define ITERATOR_SELECTOR (0xfUL << ITERATOR_SHIFT)
#define ITERATOR_COUNTER (~ITERATOR_SELECTOR)
void slow_work_new_thread_desc(struct slow_work *work, struct seq_file *m)
{
seq_puts(m, "Slow-work: New thread");
}
/*
* Render the time mark field on a work item into a 5-char time with units plus
* a space
*/
static void slow_work_print_mark(struct seq_file *m, struct slow_work *work)
{
struct timespec now, diff;
now = CURRENT_TIME;
diff = timespec_sub(now, work->mark);
if (diff.tv_sec < 0)
seq_puts(m, " -ve ");
else if (diff.tv_sec == 0 && diff.tv_nsec < 1000)
seq_printf(m, "%3luns ", diff.tv_nsec);
else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000)
seq_printf(m, "%3luus ", diff.tv_nsec / 1000);
else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000000)
seq_printf(m, "%3lums ", diff.tv_nsec / 1000000);
else if (diff.tv_sec <= 1)
seq_puts(m, " 1s ");
else if (diff.tv_sec < 60)
seq_printf(m, "%4lus ", diff.tv_sec);
else if (diff.tv_sec < 60 * 60)
seq_printf(m, "%4lum ", diff.tv_sec / 60);
else if (diff.tv_sec < 60 * 60 * 24)
seq_printf(m, "%4luh ", diff.tv_sec / 3600);
else
seq_puts(m, "exces ");
}
/*
* Describe a slow work item for /proc
*/
static int slow_work_runqueue_show(struct seq_file *m, void *v)
{
struct slow_work *work;
struct list_head *p = v;
unsigned long id;
switch ((unsigned long) v) {
case 1:
seq_puts(m, "THR PID ITEM ADDR FL MARK DESC\n");
return 0;
case 2:
seq_puts(m, "=== ===== ================ == ===== ==========\n");
return 0;
case 3 ... 3 + SLOW_WORK_THREAD_LIMIT - 1:
id = (unsigned long) v - 3;
read_lock(&slow_work_execs_lock);
work = slow_work_execs[id];
if (work) {
smp_read_barrier_depends();
seq_printf(m, "%3lu %5d %16p %2lx ",
id, slow_work_pids[id], work, work->flags);
slow_work_print_mark(m, work);
if (work->ops->desc)
work->ops->desc(work, m);
seq_putc(m, '\n');
}
read_unlock(&slow_work_execs_lock);
return 0;
default:
work = list_entry(p, struct slow_work, link);
seq_printf(m, "%3s - %16p %2lx ",
work->flags & SLOW_WORK_VERY_SLOW ? "vsq" : "sq",
work, work->flags);
slow_work_print_mark(m, work);
if (work->ops->desc)
work->ops->desc(work, m);
seq_putc(m, '\n');
return 0;
}
}
/*
* map the iterator to a work item
*/
static void *slow_work_runqueue_index(struct seq_file *m, loff_t *_pos)
{
struct list_head *p;
unsigned long count, id;
switch (*_pos >> ITERATOR_SHIFT) {
case 0x0:
if (*_pos == 0)
*_pos = 1;
if (*_pos < 3)
return (void *)(unsigned long) *_pos;
if (*_pos < 3 + SLOW_WORK_THREAD_LIMIT)
for (id = *_pos - 3;
id < SLOW_WORK_THREAD_LIMIT;
id++, (*_pos)++)
if (slow_work_execs[id])
return (void *)(unsigned long) *_pos;
*_pos = 0x1UL << ITERATOR_SHIFT;
case 0x1:
count = *_pos & ITERATOR_COUNTER;
list_for_each(p, &slow_work_queue) {
if (count == 0)
return p;
count--;
}
*_pos = 0x2UL << ITERATOR_SHIFT;
case 0x2:
count = *_pos & ITERATOR_COUNTER;
list_for_each(p, &vslow_work_queue) {
if (count == 0)
return p;
count--;
}
*_pos = 0x3UL << ITERATOR_SHIFT;
default:
return NULL;
}
}
/*
* set up the iterator to start reading from the first line
*/
static void *slow_work_runqueue_start(struct seq_file *m, loff_t *_pos)
{
spin_lock_irq(&slow_work_queue_lock);
return slow_work_runqueue_index(m, _pos);
}
/*
* move to the next line
*/
static void *slow_work_runqueue_next(struct seq_file *m, void *v, loff_t *_pos)
{
struct list_head *p = v;
unsigned long selector = *_pos >> ITERATOR_SHIFT;
(*_pos)++;
switch (selector) {
case 0x0:
return slow_work_runqueue_index(m, _pos);
case 0x1:
if (*_pos >> ITERATOR_SHIFT == 0x1) {
p = p->next;
if (p != &slow_work_queue)
return p;
}
*_pos = 0x2UL << ITERATOR_SHIFT;
p = &vslow_work_queue;
case 0x2:
if (*_pos >> ITERATOR_SHIFT == 0x2) {
p = p->next;
if (p != &vslow_work_queue)
return p;
}
*_pos = 0x3UL << ITERATOR_SHIFT;
default:
return NULL;
}
}
/*
* clean up after reading
*/
static void slow_work_runqueue_stop(struct seq_file *m, void *v)
{
spin_unlock_irq(&slow_work_queue_lock);
}
static const struct seq_operations slow_work_runqueue_ops = {
.start = slow_work_runqueue_start,
.stop = slow_work_runqueue_stop,
.next = slow_work_runqueue_next,
.show = slow_work_runqueue_show,
};
/*
* open "/proc/slow_work_rq" to list queue contents
*/
static int slow_work_runqueue_open(struct inode *inode, struct file *file)
{
return seq_open(file, &slow_work_runqueue_ops);
}
const struct file_operations slow_work_runqueue_fops = {
.owner = THIS_MODULE,
.open = slow_work_runqueue_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
This diff is collapsed.
/* Slow work private definitions
*
* Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of
* things to do */
#define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after
* OOM */
#define SLOW_WORK_THREAD_LIMIT 255 /* abs maximum number of slow-work threads */
/*
* slow-work.c
*/
#ifdef CONFIG_SLOW_WORK_PROC
extern struct slow_work *slow_work_execs[];
extern pid_t slow_work_pids[];
extern rwlock_t slow_work_execs_lock;
#endif
extern struct list_head slow_work_queue;
extern struct list_head vslow_work_queue;
extern spinlock_t slow_work_queue_lock;
/*
* slow-work-proc.c
*/
#ifdef CONFIG_SLOW_WORK_PROC
extern const struct file_operations slow_work_runqueue_fops;
extern void slow_work_new_thread_desc(struct slow_work *, struct seq_file *);
#endif
/*
* Helper functions
*/
static inline void slow_work_set_thread_pid(int id, pid_t pid)
{
#ifdef CONFIG_SLOW_WORK_PROC
slow_work_pids[id] = pid;
#endif
}
static inline void slow_work_mark_time(struct slow_work *work)
{
#ifdef CONFIG_SLOW_WORK_PROC
work->mark = CURRENT_TIME;
#endif
}
static inline void slow_work_begin_exec(int id, struct slow_work *work)
{
#ifdef CONFIG_SLOW_WORK_PROC
slow_work_execs[id] = work;
#endif
}
static inline void slow_work_end_exec(int id, struct slow_work *work)
{
#ifdef CONFIG_SLOW_WORK_PROC
write_lock(&slow_work_execs_lock);
slow_work_execs[id] = NULL;
write_unlock(&slow_work_execs_lock);
#endif
}
...@@ -200,6 +200,9 @@ radix_tree_node_free(struct radix_tree_node *node) ...@@ -200,6 +200,9 @@ radix_tree_node_free(struct radix_tree_node *node)
* ensure that the addition of a single element in the tree cannot fail. On * ensure that the addition of a single element in the tree cannot fail. On
* success, return zero, with preemption disabled. On error, return -ENOMEM * success, return zero, with preemption disabled. On error, return -ENOMEM
* with preemption not disabled. * with preemption not disabled.
*
* To make use of this facility, the radix tree must be initialised without
* __GFP_WAIT being passed to INIT_RADIX_TREE().
*/ */
int radix_tree_preload(gfp_t gfp_mask) int radix_tree_preload(gfp_t gfp_mask)
{ {
...@@ -543,7 +546,6 @@ void *radix_tree_tag_clear(struct radix_tree_root *root, ...@@ -543,7 +546,6 @@ void *radix_tree_tag_clear(struct radix_tree_root *root,
} }
EXPORT_SYMBOL(radix_tree_tag_clear); EXPORT_SYMBOL(radix_tree_tag_clear);
#ifndef __KERNEL__ /* Only the test harness uses this at present */
/** /**
* radix_tree_tag_get - get a tag on a radix tree node * radix_tree_tag_get - get a tag on a radix tree node
* @root: radix tree root * @root: radix tree root
...@@ -606,7 +608,6 @@ int radix_tree_tag_get(struct radix_tree_root *root, ...@@ -606,7 +608,6 @@ int radix_tree_tag_get(struct radix_tree_root *root,
} }
} }
EXPORT_SYMBOL(radix_tree_tag_get); EXPORT_SYMBOL(radix_tree_tag_get);
#endif
/** /**
* radix_tree_next_hole - find the next hole (not-present entry) * radix_tree_next_hole - find the next hole (not-present entry)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment