Commit c2242d14 authored by Niu Yawei's avatar Niu Yawei Committed by Greg Kroah-Hartman

staging: lustre: ldlm: reclaim granted locks defensively

It was discovered that to many ldlm locks where being
created on the server side to the point of memory
exhaustion. The work of LU-6529 introduced watermarks
to avoid this memory exhaustion. This is the client
side part of this work for the upstream client.
Signed-off-by: default avatarNiu Yawei <yawei.niu@intel.com>
Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-6529
Reviewed-on: http://review.whamcloud.com/14931
Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-6929
Reviewed-on: http://review.whamcloud.com/15813Reviewed-by: default avatarAndreas Dilger <andreas.dilger@intel.com>
Reviewed-by: default avatarBobi Jam <bobijam@hotmail.com>
Reviewed-by: default avatarLai Siyao <lai.siyao@intel.com>
Reviewed-by: default avatarOleg Drokin <oleg.drokin@intel.com>
Signed-off-by: default avatarJames Simmons <jsimmons@infradead.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 115ee9d0
...@@ -705,7 +705,7 @@ void ...@@ -705,7 +705,7 @@ void
cfs_hash_for_each_safe(struct cfs_hash *hs, cfs_hash_for_each_cb_t, void *data); cfs_hash_for_each_safe(struct cfs_hash *hs, cfs_hash_for_each_cb_t, void *data);
int int
cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t, cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t,
void *data); void *data, int start);
int int
cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t, cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t,
void *data); void *data);
......
...@@ -1552,7 +1552,7 @@ EXPORT_SYMBOL(cfs_hash_size_get); ...@@ -1552,7 +1552,7 @@ EXPORT_SYMBOL(cfs_hash_size_get);
*/ */
static int static int
cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
void *data) void *data, int start)
{ {
struct hlist_node *hnode; struct hlist_node *hnode;
struct hlist_node *tmp; struct hlist_node *tmp;
...@@ -1560,18 +1560,25 @@ cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, ...@@ -1560,18 +1560,25 @@ cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
__u32 version; __u32 version;
int count = 0; int count = 0;
int stop_on_change; int stop_on_change;
int rc; int end = -1;
int rc = 0;
int i; int i;
stop_on_change = cfs_hash_with_rehash_key(hs) || stop_on_change = cfs_hash_with_rehash_key(hs) ||
!cfs_hash_with_no_itemref(hs) || !cfs_hash_with_no_itemref(hs) ||
!hs->hs_ops->hs_put_locked; !hs->hs_ops->hs_put_locked;
cfs_hash_lock(hs, 0); cfs_hash_lock(hs, 0);
again:
LASSERT(!cfs_hash_is_rehashing(hs)); LASSERT(!cfs_hash_is_rehashing(hs));
cfs_hash_for_each_bucket(hs, &bd, i) { cfs_hash_for_each_bucket(hs, &bd, i) {
struct hlist_head *hhead; struct hlist_head *hhead;
if (i < start)
continue;
else if (end > 0 && i >= end)
break;
cfs_hash_bd_lock(hs, &bd, 0); cfs_hash_bd_lock(hs, &bd, 0);
version = cfs_hash_bd_version_get(&bd); version = cfs_hash_bd_version_get(&bd);
...@@ -1611,14 +1618,19 @@ cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, ...@@ -1611,14 +1618,19 @@ cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
if (rc) /* callback wants to break iteration */ if (rc) /* callback wants to break iteration */
break; break;
} }
cfs_hash_unlock(hs, 0); if (start > 0 && !rc) {
end = start;
start = 0;
goto again;
}
cfs_hash_unlock(hs, 0);
return count; return count;
} }
int int
cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
void *data) void *data, int start)
{ {
if (cfs_hash_with_no_lock(hs) || if (cfs_hash_with_no_lock(hs) ||
cfs_hash_with_rehash_key(hs) || cfs_hash_with_rehash_key(hs) ||
...@@ -1630,7 +1642,7 @@ cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, ...@@ -1630,7 +1642,7 @@ cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
return -EOPNOTSUPP; return -EOPNOTSUPP;
cfs_hash_for_each_enter(hs); cfs_hash_for_each_enter(hs);
cfs_hash_for_each_relax(hs, func, data); cfs_hash_for_each_relax(hs, func, data, start);
cfs_hash_for_each_exit(hs); cfs_hash_for_each_exit(hs);
return 0; return 0;
...@@ -1662,7 +1674,7 @@ cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, ...@@ -1662,7 +1674,7 @@ cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
return -EOPNOTSUPP; return -EOPNOTSUPP;
cfs_hash_for_each_enter(hs); cfs_hash_for_each_enter(hs);
while (cfs_hash_for_each_relax(hs, func, data)) { while (cfs_hash_for_each_relax(hs, func, data, 0)) {
CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n", CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n",
hs->hs_name, i++); hs->hs_name, i++);
} }
......
...@@ -1729,7 +1729,7 @@ int ldlm_cli_cancel_unused(struct ldlm_namespace *ns, ...@@ -1729,7 +1729,7 @@ int ldlm_cli_cancel_unused(struct ldlm_namespace *ns,
opaque); opaque);
} else { } else {
cfs_hash_for_each_nolock(ns->ns_rs_hash, cfs_hash_for_each_nolock(ns->ns_rs_hash,
ldlm_cli_hash_cancel_unused, &arg); ldlm_cli_hash_cancel_unused, &arg, 0);
return ELDLM_OK; return ELDLM_OK;
} }
} }
...@@ -1802,7 +1802,7 @@ static void ldlm_namespace_foreach(struct ldlm_namespace *ns, ...@@ -1802,7 +1802,7 @@ static void ldlm_namespace_foreach(struct ldlm_namespace *ns,
}; };
cfs_hash_for_each_nolock(ns->ns_rs_hash, cfs_hash_for_each_nolock(ns->ns_rs_hash,
ldlm_res_iter_helper, &helper); ldlm_res_iter_helper, &helper, 0);
} }
/* non-blocking function to manipulate a lock whose cb_data is being put away. /* non-blocking function to manipulate a lock whose cb_data is being put away.
......
...@@ -855,8 +855,10 @@ int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags) ...@@ -855,8 +855,10 @@ int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags)
return ELDLM_OK; return ELDLM_OK;
} }
cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_clean, &flags); cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_clean,
cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_complain, NULL); &flags, 0);
cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_complain,
NULL, 0);
return ELDLM_OK; return ELDLM_OK;
} }
EXPORT_SYMBOL(ldlm_namespace_cleanup); EXPORT_SYMBOL(ldlm_namespace_cleanup);
...@@ -1352,7 +1354,7 @@ void ldlm_namespace_dump(int level, struct ldlm_namespace *ns) ...@@ -1352,7 +1354,7 @@ void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
cfs_hash_for_each_nolock(ns->ns_rs_hash, cfs_hash_for_each_nolock(ns->ns_rs_hash,
ldlm_res_hash_dump, ldlm_res_hash_dump,
(void *)(unsigned long)level); (void *)(unsigned long)level, 0);
spin_lock(&ns->ns_lock); spin_lock(&ns->ns_lock);
ns->ns_next_dump = cfs_time_shift(10); ns->ns_next_dump = cfs_time_shift(10);
spin_unlock(&ns->ns_lock); spin_unlock(&ns->ns_lock);
......
...@@ -760,12 +760,6 @@ int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo, ...@@ -760,12 +760,6 @@ int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
if (IS_ERR(req)) if (IS_ERR(req))
return PTR_ERR(req); return PTR_ERR(req);
if (req && it && it->it_op & IT_CREAT)
/* ask ptlrpc not to resend on EINPROGRESS since we have our own
* retry logic
*/
req->rq_no_retry_einprogress = 1;
if (resends) { if (resends) {
req->rq_generation_set = 1; req->rq_generation_set = 1;
req->rq_import_generation = generation; req->rq_import_generation = generation;
...@@ -823,11 +817,12 @@ int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo, ...@@ -823,11 +817,12 @@ int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
lockrep->lock_policy_res2 = lockrep->lock_policy_res2 =
ptlrpc_status_ntoh(lockrep->lock_policy_res2); ptlrpc_status_ntoh(lockrep->lock_policy_res2);
/* Retry the create infinitely when we get -EINPROGRESS from /*
* server. This is required by the new quota design. * Retry infinitely when the server returns -EINPROGRESS for the
* intent operation, when server returns -EINPROGRESS for acquiring
* intent lock, we'll retry in after_reply().
*/ */
if (it->it_op & IT_CREAT && if (it->it_op && (int)lockrep->lock_policy_res2 == -EINPROGRESS) {
(int)lockrep->lock_policy_res2 == -EINPROGRESS) {
mdc_clear_replay_flag(req, rc); mdc_clear_replay_flag(req, rc);
ptlrpc_req_finished(req); ptlrpc_req_finished(req);
resends++; resends++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment