Commit a01fa108 authored by Amir Shehata's avatar Amir Shehata Committed by Greg Kroah-Hartman

staging: lustre: make ko2iblnd connect parameters persistent

Store map-on-demand and peertx credits in the peer, since the peer
is persistent. Also made sure that when assigning the parameters
received on the connection to the peer structure through create,
that if another peer is added before grabbing the lock we assign
these parameters to it as well.
Signed-off-by: default avatarAmir Shehata <amir.shehata@intel.com>
Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-3322
Reviewed-on: http://review.whamcloud.com/17074Reviewed-by: default avatarDoug Oucharek <doug.s.oucharek@intel.com>
Reviewed-by: default avatarJames Simmons <uja.ornl@yahoo.com>
Reviewed-by: default avatarOleg Drokin <oleg.drokin@intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 2fb44f2b
...@@ -335,6 +335,8 @@ int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid) ...@@ -335,6 +335,8 @@ int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid)
peer->ibp_nid = nid; peer->ibp_nid = nid;
peer->ibp_error = 0; peer->ibp_error = 0;
peer->ibp_last_alive = 0; peer->ibp_last_alive = 0;
peer->ibp_max_frags = IBLND_CFG_RDMA_FRAGS;
peer->ibp_queue_depth = *kiblnd_tunables.kib_peertxcredits;
atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */ atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */
INIT_LIST_HEAD(&peer->ibp_list); /* not in the peer table yet */ INIT_LIST_HEAD(&peer->ibp_list); /* not in the peer table yet */
...@@ -631,7 +633,7 @@ static int kiblnd_get_completion_vector(kib_conn_t *conn, int cpt) ...@@ -631,7 +633,7 @@ static int kiblnd_get_completion_vector(kib_conn_t *conn, int cpt)
} }
kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
int state, int version, kib_connparams_t *cp) int state, int version)
{ {
/* /*
* CAVEAT EMPTOR: * CAVEAT EMPTOR:
...@@ -685,14 +687,8 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, ...@@ -685,14 +687,8 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
conn->ibc_peer = peer; /* I take the caller's ref */ conn->ibc_peer = peer; /* I take the caller's ref */
cmid->context = conn; /* for future CM callbacks */ cmid->context = conn; /* for future CM callbacks */
conn->ibc_cmid = cmid; conn->ibc_cmid = cmid;
conn->ibc_max_frags = peer->ibp_max_frags;
if (!cp) { conn->ibc_queue_depth = peer->ibp_queue_depth;
conn->ibc_max_frags = IBLND_CFG_RDMA_FRAGS;
conn->ibc_queue_depth = *kiblnd_tunables.kib_peertxcredits;
} else {
conn->ibc_max_frags = cp->ibcp_max_frags;
conn->ibc_queue_depth = cp->ibcp_queue_depth;
}
INIT_LIST_HEAD(&conn->ibc_early_rxs); INIT_LIST_HEAD(&conn->ibc_early_rxs);
INIT_LIST_HEAD(&conn->ibc_tx_noops); INIT_LIST_HEAD(&conn->ibc_tx_noops);
......
...@@ -586,6 +586,10 @@ typedef struct kib_peer { ...@@ -586,6 +586,10 @@ typedef struct kib_peer {
int ibp_error; /* errno on closing this peer */ int ibp_error; /* errno on closing this peer */
unsigned long ibp_last_alive; /* when (in jiffies) I was last alive unsigned long ibp_last_alive; /* when (in jiffies) I was last alive
*/ */
/* max map_on_demand */
__u16 ibp_max_frags;
/* max_peer_credits */
__u16 ibp_queue_depth;
} kib_peer_t; } kib_peer_t;
extern kib_data_t kiblnd_data; extern kib_data_t kiblnd_data;
...@@ -946,7 +950,7 @@ int kiblnd_close_stale_conns_locked(kib_peer_t *peer, ...@@ -946,7 +950,7 @@ int kiblnd_close_stale_conns_locked(kib_peer_t *peer,
int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why); int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why);
kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
int state, int version, kib_connparams_t *cp); int state, int version);
void kiblnd_destroy_conn(kib_conn_t *conn); void kiblnd_destroy_conn(kib_conn_t *conn);
void kiblnd_close_conn(kib_conn_t *conn, int error); void kiblnd_close_conn(kib_conn_t *conn, int error);
void kiblnd_close_conn_locked(kib_conn_t *conn, int error); void kiblnd_close_conn_locked(kib_conn_t *conn, int error);
......
...@@ -2322,6 +2322,10 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) ...@@ -2322,6 +2322,10 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
goto failed; goto failed;
} }
/* We have validated the peer's parameters so use those */
peer->ibp_max_frags = reqmsg->ibm_u.connparams.ibcp_max_frags;
peer->ibp_queue_depth = reqmsg->ibm_u.connparams.ibcp_queue_depth;
write_lock_irqsave(g_lock, flags); write_lock_irqsave(g_lock, flags);
peer2 = kiblnd_find_peer_locked(nid); peer2 = kiblnd_find_peer_locked(nid);
...@@ -2360,6 +2364,14 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) ...@@ -2360,6 +2364,14 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
peer2->ibp_accepting++; peer2->ibp_accepting++;
kiblnd_peer_addref(peer2); kiblnd_peer_addref(peer2);
/**
* Race with kiblnd_launch_tx (active connect) to create peer
* so copy validated parameters since we now know what the
* peer's limits are
*/
peer2->ibp_max_frags = peer->ibp_max_frags;
peer2->ibp_queue_depth = peer->ibp_queue_depth;
write_unlock_irqrestore(g_lock, flags); write_unlock_irqrestore(g_lock, flags);
kiblnd_peer_decref(peer); kiblnd_peer_decref(peer);
peer = peer2; peer = peer2;
...@@ -2382,8 +2394,8 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) ...@@ -2382,8 +2394,8 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
write_unlock_irqrestore(g_lock, flags); write_unlock_irqrestore(g_lock, flags);
} }
conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_PASSIVE_WAIT, version, conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_PASSIVE_WAIT,
&reqmsg->ibm_u.connparams); version);
if (!conn) { if (!conn) {
kiblnd_peer_connect_failed(peer, 0, -ENOMEM); kiblnd_peer_connect_failed(peer, 0, -ENOMEM);
kiblnd_peer_decref(peer); kiblnd_peer_decref(peer);
...@@ -2396,8 +2408,8 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) ...@@ -2396,8 +2408,8 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
* CM callback doesn't destroy cmid. * CM callback doesn't destroy cmid.
*/ */
conn->ibc_incarnation = reqmsg->ibm_srcstamp; conn->ibc_incarnation = reqmsg->ibm_srcstamp;
conn->ibc_credits = reqmsg->ibm_u.connparams.ibcp_queue_depth; conn->ibc_credits = conn->ibc_queue_depth;
conn->ibc_reserved_credits = reqmsg->ibm_u.connparams.ibcp_queue_depth; conn->ibc_reserved_credits = conn->ibc_queue_depth;
LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + LASSERT(conn->ibc_credits + conn->ibc_reserved_credits +
IBLND_OOB_MSGS(version) <= IBLND_RX_MSGS(conn)); IBLND_OOB_MSGS(version) <= IBLND_RX_MSGS(conn));
...@@ -2406,10 +2418,8 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) ...@@ -2406,10 +2418,8 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
kiblnd_init_msg(ackmsg, IBLND_MSG_CONNACK, kiblnd_init_msg(ackmsg, IBLND_MSG_CONNACK,
sizeof(ackmsg->ibm_u.connparams)); sizeof(ackmsg->ibm_u.connparams));
ackmsg->ibm_u.connparams.ibcp_queue_depth = ackmsg->ibm_u.connparams.ibcp_queue_depth = conn->ibc_queue_depth;
reqmsg->ibm_u.connparams.ibcp_queue_depth; ackmsg->ibm_u.connparams.ibcp_max_frags = conn->ibc_max_frags;
ackmsg->ibm_u.connparams.ibcp_max_frags =
reqmsg->ibm_u.connparams.ibcp_max_frags;
ackmsg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE; ackmsg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
kiblnd_pack_msg(ni, ackmsg, version, 0, nid, reqmsg->ibm_srcstamp); kiblnd_pack_msg(ni, ackmsg, version, 0, nid, reqmsg->ibm_srcstamp);
...@@ -2494,6 +2504,9 @@ kiblnd_reconnect(kib_conn_t *conn, int version, ...@@ -2494,6 +2504,9 @@ kiblnd_reconnect(kib_conn_t *conn, int version,
break; break;
case IBLND_REJECT_RDMA_FRAGS: case IBLND_REJECT_RDMA_FRAGS:
if (!cp)
goto failed;
if (conn->ibc_max_frags <= cp->ibcp_max_frags) { if (conn->ibc_max_frags <= cp->ibcp_max_frags) {
CNETERR("Unsupported max frags, peer supports %d\n", CNETERR("Unsupported max frags, peer supports %d\n",
cp->ibcp_max_frags); cp->ibcp_max_frags);
...@@ -2503,18 +2516,21 @@ kiblnd_reconnect(kib_conn_t *conn, int version, ...@@ -2503,18 +2516,21 @@ kiblnd_reconnect(kib_conn_t *conn, int version,
goto failed; goto failed;
} }
conn->ibc_max_frags = cp->ibcp_max_frags; peer->ibp_max_frags = cp->ibcp_max_frags;
reason = "rdma fragments"; reason = "rdma fragments";
break; break;
case IBLND_REJECT_MSG_QUEUE_SIZE: case IBLND_REJECT_MSG_QUEUE_SIZE:
if (!cp)
goto failed;
if (conn->ibc_queue_depth <= cp->ibcp_queue_depth) { if (conn->ibc_queue_depth <= cp->ibcp_queue_depth) {
CNETERR("Unsupported queue depth, peer supports %d\n", CNETERR("Unsupported queue depth, peer supports %d\n",
cp->ibcp_queue_depth); cp->ibcp_queue_depth);
goto failed; goto failed;
} }
conn->ibc_queue_depth = cp->ibcp_queue_depth; peer->ibp_queue_depth = cp->ibcp_queue_depth;
reason = "queue depth"; reason = "queue depth";
break; break;
...@@ -2795,7 +2811,7 @@ kiblnd_active_connect(struct rdma_cm_id *cmid) ...@@ -2795,7 +2811,7 @@ kiblnd_active_connect(struct rdma_cm_id *cmid)
read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT, conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT,
version, NULL); version);
if (!conn) { if (!conn) {
kiblnd_peer_connect_failed(peer, 1, -ENOMEM); kiblnd_peer_connect_failed(peer, 1, -ENOMEM);
kiblnd_peer_decref(peer); /* lose cmid's ref */ kiblnd_peer_decref(peer); /* lose cmid's ref */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment