Commit cda35204 authored by NeilBrown's avatar NeilBrown Committed by Greg Kroah-Hartman

staging: lustre: remove l_wait_event from ptlrpc_set_wait

This is the last remaining use of l_wait_event().
It is the only use of LWI_TIMEOUT_INTR_ALL() which
has a meaning that timeouts can be interrupted.
Only interrupts by "fatal" signals are allowed, so
introduce l_wait_event_abortable_timeout() to
support this.
Reviewed-by: default avatarJames Simmons <jsimmons@infradead.org>
Signed-off-by: default avatarNeilBrown <neilb@suse.com>
Reviewed-by: default avatarPatrick Farrell <paf@cray.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 9fc53ff2
...@@ -196,6 +196,10 @@ struct l_wait_info { ...@@ -196,6 +196,10 @@ struct l_wait_info {
#define LUSTRE_FATAL_SIGS (sigmask(SIGKILL) | sigmask(SIGINT) | \ #define LUSTRE_FATAL_SIGS (sigmask(SIGKILL) | sigmask(SIGINT) | \
sigmask(SIGTERM) | sigmask(SIGQUIT) | \ sigmask(SIGTERM) | sigmask(SIGQUIT) | \
sigmask(SIGALRM)) sigmask(SIGALRM))
static inline int l_fatal_signal_pending(struct task_struct *p)
{
return signal_pending(p) && sigtestsetmask(&p->pending.signal, LUSTRE_FATAL_SIGS);
}
/** /**
* wait_queue_entry_t of Linux (version < 2.6.34) is a FIFO list for exclusively * wait_queue_entry_t of Linux (version < 2.6.34) is a FIFO list for exclusively
...@@ -347,6 +351,16 @@ do { \ ...@@ -347,6 +351,16 @@ do { \
__ret; \ __ret; \
}) })
#define l_wait_event_abortable_timeout(wq, condition, timeout) \
({ \
sigset_t __blocked; \
int __ret = 0; \
__blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS); \
__ret = wait_event_interruptible_timeout(wq, condition, timeout);\
cfs_restore_sigs(__blocked); \
__ret; \
})
#define l_wait_event_abortable_exclusive(wq, condition) \ #define l_wait_event_abortable_exclusive(wq, condition) \
({ \ ({ \
sigset_t __blocked; \ sigset_t __blocked; \
......
...@@ -1774,7 +1774,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) ...@@ -1774,7 +1774,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
} }
/* /*
* ptlrpc_set_wait->l_wait_event sets lwi_allow_intr * ptlrpc_set_wait allow signal to abort the timeout
* so it sets rq_intr regardless of individual rpc * so it sets rq_intr regardless of individual rpc
* timeouts. The synchronous IO waiting path sets * timeouts. The synchronous IO waiting path sets
* rq_intr irrespective of whether ptlrpcd * rq_intr irrespective of whether ptlrpcd
...@@ -2122,8 +2122,7 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink) ...@@ -2122,8 +2122,7 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
/** /**
* Time out all uncompleted requests in request set pointed by \a data * Time out all uncompleted requests in request set pointed by \a data
* Callback used when waiting on sets with l_wait_event. * Called when wait_event_idle_timeout times out.
* Always returns 1.
*/ */
void ptlrpc_expired_set(struct ptlrpc_request_set *set) void ptlrpc_expired_set(struct ptlrpc_request_set *set)
{ {
...@@ -2156,18 +2155,6 @@ void ptlrpc_expired_set(struct ptlrpc_request_set *set) ...@@ -2156,18 +2155,6 @@ void ptlrpc_expired_set(struct ptlrpc_request_set *set)
ptlrpc_expire_one_request(req, 1); ptlrpc_expire_one_request(req, 1);
} }
} }
static int ptlrpc_expired_set_void(void *data)
{
struct ptlrpc_request_set *set = data;
ptlrpc_expired_set(set);
/*
* When waiting for a whole set, we always break out of the
* sleep so we can recalculate the timeout, or enable interrupts
* if everyone's timed out.
*/
return 1;
}
/** /**
* Sets rq_intr flag in \a req under spinlock. * Sets rq_intr flag in \a req under spinlock.
...@@ -2182,11 +2169,10 @@ EXPORT_SYMBOL(ptlrpc_mark_interrupted); ...@@ -2182,11 +2169,10 @@ EXPORT_SYMBOL(ptlrpc_mark_interrupted);
/** /**
* Interrupts (sets interrupted flag) all uncompleted requests in * Interrupts (sets interrupted flag) all uncompleted requests in
* a set \a data. Callback for l_wait_event for interruptible waits. * a set \a data. Called when l_wait_event_abortable_timeout receives signal.
*/ */
static void ptlrpc_interrupted_set(void *data) static void ptlrpc_interrupted_set(struct ptlrpc_request_set *set)
{ {
struct ptlrpc_request_set *set = data;
struct list_head *tmp; struct list_head *tmp;
CDEBUG(D_RPCTRACE, "INTERRUPTED SET %p\n", set); CDEBUG(D_RPCTRACE, "INTERRUPTED SET %p\n", set);
...@@ -2256,7 +2242,6 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set) ...@@ -2256,7 +2242,6 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set)
{ {
struct list_head *tmp; struct list_head *tmp;
struct ptlrpc_request *req; struct ptlrpc_request *req;
struct l_wait_info lwi;
int rc, timeout; int rc, timeout;
if (set->set_producer) if (set->set_producer)
...@@ -2282,46 +2267,47 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set) ...@@ -2282,46 +2267,47 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set)
CDEBUG(D_RPCTRACE, "set %p going to sleep for %d seconds\n", CDEBUG(D_RPCTRACE, "set %p going to sleep for %d seconds\n",
set, timeout); set, timeout);
if (timeout == 0 && !signal_pending(current)) if (timeout == 0 && !signal_pending(current)) {
/* /*
* No requests are in-flight (ether timed out * No requests are in-flight (ether timed out
* or delayed), so we can allow interrupts. * or delayed), so we can allow interrupts.
* We still want to block for a limited time, * We still want to block for a limited time,
* so we allow interrupts during the timeout. * so we allow interrupts during the timeout.
*/ */
lwi = LWI_TIMEOUT_INTR_ALL(HZ, rc = l_wait_event_abortable_timeout(set->set_waitq,
ptlrpc_expired_set_void, ptlrpc_check_set(NULL, set),
ptlrpc_interrupted_set, set); HZ);
else if (rc == 0) {
rc = -ETIMEDOUT;
ptlrpc_expired_set(set);
} else if (rc < 0) {
rc = -EINTR;
ptlrpc_interrupted_set(set);
} else
rc = 0;
} else {
/* /*
* At least one request is in flight, so no * At least one request is in flight, so no
* interrupts are allowed. Wait until all * interrupts are allowed. Wait until all
* complete, or an in-flight req times out. * complete, or an in-flight req times out.
*/ */
lwi = LWI_TIMEOUT((timeout ? timeout : 1) * HZ, rc = wait_event_idle_timeout(set->set_waitq,
ptlrpc_expired_set_void, set); ptlrpc_check_set(NULL, set),
(timeout ? timeout : 1) * HZ);
rc = l_wait_event(set->set_waitq, ptlrpc_check_set(NULL, set), &lwi); if (rc == 0) {
ptlrpc_expired_set(set);
/* rc = -ETIMEDOUT;
* LU-769 - if we ignored the signal because it was already
* pending when we started, we need to handle it now or we risk
* it being ignored forever
*/
if (rc == -ETIMEDOUT && !lwi.lwi_allow_intr &&
signal_pending(current)) {
sigset_t blocked_sigs =
cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
/* /*
* In fact we only interrupt for the "fatal" signals * LU-769 - if we ignored the signal
* like SIGINT or SIGKILL. We still ignore less * because it was already pending when
* important signals since ptlrpc set is not easily * we started, we need to handle it
* reentrant from userspace again * now or we risk it being ignored
* forever
*/ */
if (signal_pending(current)) if (l_fatal_signal_pending(current))
ptlrpc_interrupted_set(set); ptlrpc_interrupted_set(set);
cfs_restore_sigs(blocked_sigs); } else
rc = 0;
} }
LASSERT(rc == 0 || rc == -EINTR || rc == -ETIMEDOUT); LASSERT(rc == 0 || rc == -EINTR || rc == -ETIMEDOUT);
...@@ -2528,7 +2514,7 @@ static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async) ...@@ -2528,7 +2514,7 @@ static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
return 0; return 0;
/* /*
* We have to l_wait_event() whatever the result, to give liblustre * We have to wait_event_idle_timeout() whatever the result, to give liblustre
* a chance to run reply_in_callback(), and to make sure we've * a chance to run reply_in_callback(), and to make sure we've
* unlinked before returning a req to the pool. * unlinked before returning a req to the pool.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment