Commit 52361a06 authored by Alan Brady's avatar Alan Brady Committed by Tony Nguyen

idpf: refactor queue related virtchnl messages

This reworks queue specific virtchnl messages to use the added
transaction API.  It is fairly mechanical and generally makes the
functions using it more simple. Functions using transaction API no
longer need to take the vc_buf_lock since it's not using it anymore.
After filling out an idpf_vc_xn_params struct, idpf_vc_xn_exec takes
care of the send and recv handling.

This also converts those functions where appropriate to use
auto-variables instead of manually calling kfree. This greatly
simplifies the memory alloc paths and makes them less prone memory
leaks.
Tested-by: default avatarAlexander Lobakin <aleksander.lobakin@intel.com>
Reviewed-by: default avatarPrzemek Kitszel <przemyslaw.kitszel@intel.com>
Reviewed-by: default avatarIgor Bagnucki <igor.bagnucki@intel.com>
Signed-off-by: default avatarAlan Brady <alan.brady@intel.com>
Tested-by: default avatarKrishneil Singh <krishneil.k.singh@intel.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
parent 8c49e68f
...@@ -553,7 +553,7 @@ struct idpf_vector_lifo { ...@@ -553,7 +553,7 @@ struct idpf_vector_lifo {
struct idpf_vport_config { struct idpf_vport_config {
struct idpf_vport_user_config_data user_config; struct idpf_vport_user_config_data user_config;
struct idpf_vport_max_q max_q; struct idpf_vport_max_q max_q;
void *req_qs_chunks; struct virtchnl2_add_queues *req_qs_chunks;
spinlock_t mac_filter_list_lock; spinlock_t mac_filter_list_lock;
DECLARE_BITMAP(flags, IDPF_VPORT_CONFIG_FLAGS_NBITS); DECLARE_BITMAP(flags, IDPF_VPORT_CONFIG_FLAGS_NBITS);
}; };
......
...@@ -904,47 +904,15 @@ int idpf_recv_mb_msg(struct idpf_adapter *adapter, u32 op, ...@@ -904,47 +904,15 @@ int idpf_recv_mb_msg(struct idpf_adapter *adapter, u32 op,
case VIRTCHNL2_OP_ENABLE_VPORT: case VIRTCHNL2_OP_ENABLE_VPORT:
case VIRTCHNL2_OP_DISABLE_VPORT: case VIRTCHNL2_OP_DISABLE_VPORT:
case VIRTCHNL2_OP_DESTROY_VPORT: case VIRTCHNL2_OP_DESTROY_VPORT:
err = idpf_vc_xn_forward_reply(adapter, &ctlq_msg);
break;
case VIRTCHNL2_OP_CONFIG_TX_QUEUES: case VIRTCHNL2_OP_CONFIG_TX_QUEUES:
idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
IDPF_VC_CONFIG_TXQ,
IDPF_VC_CONFIG_TXQ_ERR);
break;
case VIRTCHNL2_OP_CONFIG_RX_QUEUES: case VIRTCHNL2_OP_CONFIG_RX_QUEUES:
idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
IDPF_VC_CONFIG_RXQ,
IDPF_VC_CONFIG_RXQ_ERR);
break;
case VIRTCHNL2_OP_ENABLE_QUEUES: case VIRTCHNL2_OP_ENABLE_QUEUES:
idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
IDPF_VC_ENA_QUEUES,
IDPF_VC_ENA_QUEUES_ERR);
break;
case VIRTCHNL2_OP_DISABLE_QUEUES: case VIRTCHNL2_OP_DISABLE_QUEUES:
idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
IDPF_VC_DIS_QUEUES,
IDPF_VC_DIS_QUEUES_ERR);
break;
case VIRTCHNL2_OP_ADD_QUEUES: case VIRTCHNL2_OP_ADD_QUEUES:
idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
IDPF_VC_ADD_QUEUES,
IDPF_VC_ADD_QUEUES_ERR);
break;
case VIRTCHNL2_OP_DEL_QUEUES: case VIRTCHNL2_OP_DEL_QUEUES:
idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
IDPF_VC_DEL_QUEUES,
IDPF_VC_DEL_QUEUES_ERR);
break;
case VIRTCHNL2_OP_MAP_QUEUE_VECTOR: case VIRTCHNL2_OP_MAP_QUEUE_VECTOR:
idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
IDPF_VC_MAP_IRQ,
IDPF_VC_MAP_IRQ_ERR);
break;
case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR: case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR:
idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, err = idpf_vc_xn_forward_reply(adapter, &ctlq_msg);
IDPF_VC_UNMAP_IRQ,
IDPF_VC_UNMAP_IRQ_ERR);
break; break;
case VIRTCHNL2_OP_GET_STATS: case VIRTCHNL2_OP_GET_STATS:
idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
...@@ -1879,11 +1847,13 @@ int idpf_send_disable_vport_msg(struct idpf_vport *vport) ...@@ -1879,11 +1847,13 @@ int idpf_send_disable_vport_msg(struct idpf_vport *vport)
*/ */
static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
{ {
struct virtchnl2_config_tx_queues *ctq; struct virtchnl2_config_tx_queues *ctq __free(kfree) = NULL;
struct virtchnl2_txq_info *qi __free(kfree) = NULL;
struct idpf_vc_xn_params xn_params = {};
u32 config_sz, chunk_sz, buf_sz; u32 config_sz, chunk_sz, buf_sz;
int totqs, num_msgs, num_chunks; int totqs, num_msgs, num_chunks;
struct virtchnl2_txq_info *qi; ssize_t reply_sz;
int err = 0, i, k = 0; int i, k = 0;
totqs = vport->num_txq + vport->num_complq; totqs = vport->num_txq + vport->num_complq;
qi = kcalloc(totqs, sizeof(struct virtchnl2_txq_info), GFP_KERNEL); qi = kcalloc(totqs, sizeof(struct virtchnl2_txq_info), GFP_KERNEL);
...@@ -1944,10 +1914,8 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) ...@@ -1944,10 +1914,8 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
} }
/* Make sure accounting agrees */ /* Make sure accounting agrees */
if (k != totqs) { if (k != totqs)
err = -EINVAL; return -EINVAL;
goto error;
}
/* Chunk up the queue contexts into multiple messages to avoid /* Chunk up the queue contexts into multiple messages to avoid
* sending a control queue message buffer that is too large * sending a control queue message buffer that is too large
...@@ -1961,12 +1929,11 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) ...@@ -1961,12 +1929,11 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
buf_sz = struct_size(ctq, qinfo, num_chunks); buf_sz = struct_size(ctq, qinfo, num_chunks);
ctq = kzalloc(buf_sz, GFP_KERNEL); ctq = kzalloc(buf_sz, GFP_KERNEL);
if (!ctq) { if (!ctq)
err = -ENOMEM; return -ENOMEM;
goto error;
}
mutex_lock(&vport->vc_buf_lock); xn_params.vc_op = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
for (i = 0, k = 0; i < num_msgs; i++) { for (i = 0, k = 0; i < num_msgs; i++) {
memset(ctq, 0, buf_sz); memset(ctq, 0, buf_sz);
...@@ -1974,17 +1941,11 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) ...@@ -1974,17 +1941,11 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
ctq->num_qinfo = cpu_to_le16(num_chunks); ctq->num_qinfo = cpu_to_le16(num_chunks);
memcpy(ctq->qinfo, &qi[k], chunk_sz * num_chunks); memcpy(ctq->qinfo, &qi[k], chunk_sz * num_chunks);
err = idpf_send_mb_msg(vport->adapter, xn_params.send_buf.iov_base = ctq;
VIRTCHNL2_OP_CONFIG_TX_QUEUES, xn_params.send_buf.iov_len = buf_sz;
buf_sz, (u8 *)ctq, 0); reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
if (err) if (reply_sz < 0)
goto mbx_error; return reply_sz;
err = idpf_wait_for_event(vport->adapter, vport,
IDPF_VC_CONFIG_TXQ,
IDPF_VC_CONFIG_TXQ_ERR);
if (err)
goto mbx_error;
k += num_chunks; k += num_chunks;
totqs -= num_chunks; totqs -= num_chunks;
...@@ -1993,13 +1954,7 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) ...@@ -1993,13 +1954,7 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
buf_sz = struct_size(ctq, qinfo, num_chunks); buf_sz = struct_size(ctq, qinfo, num_chunks);
} }
mbx_error: return 0;
mutex_unlock(&vport->vc_buf_lock);
kfree(ctq);
error:
kfree(qi);
return err;
} }
/** /**
...@@ -2011,11 +1966,13 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) ...@@ -2011,11 +1966,13 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
*/ */
static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport) static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
{ {
struct virtchnl2_config_rx_queues *crq; struct virtchnl2_config_rx_queues *crq __free(kfree) = NULL;
struct virtchnl2_rxq_info *qi __free(kfree) = NULL;
struct idpf_vc_xn_params xn_params = {};
u32 config_sz, chunk_sz, buf_sz; u32 config_sz, chunk_sz, buf_sz;
int totqs, num_msgs, num_chunks; int totqs, num_msgs, num_chunks;
struct virtchnl2_rxq_info *qi; ssize_t reply_sz;
int err = 0, i, k = 0; int i, k = 0;
totqs = vport->num_rxq + vport->num_bufq; totqs = vport->num_rxq + vport->num_bufq;
qi = kcalloc(totqs, sizeof(struct virtchnl2_rxq_info), GFP_KERNEL); qi = kcalloc(totqs, sizeof(struct virtchnl2_rxq_info), GFP_KERNEL);
...@@ -2096,10 +2053,8 @@ static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport) ...@@ -2096,10 +2053,8 @@ static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
} }
/* Make sure accounting agrees */ /* Make sure accounting agrees */
if (k != totqs) { if (k != totqs)
err = -EINVAL; return -EINVAL;
goto error;
}
/* Chunk up the queue contexts into multiple messages to avoid /* Chunk up the queue contexts into multiple messages to avoid
* sending a control queue message buffer that is too large * sending a control queue message buffer that is too large
...@@ -2113,12 +2068,11 @@ static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport) ...@@ -2113,12 +2068,11 @@ static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
buf_sz = struct_size(crq, qinfo, num_chunks); buf_sz = struct_size(crq, qinfo, num_chunks);
crq = kzalloc(buf_sz, GFP_KERNEL); crq = kzalloc(buf_sz, GFP_KERNEL);
if (!crq) { if (!crq)
err = -ENOMEM; return -ENOMEM;
goto error;
}
mutex_lock(&vport->vc_buf_lock); xn_params.vc_op = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
for (i = 0, k = 0; i < num_msgs; i++) { for (i = 0, k = 0; i < num_msgs; i++) {
memset(crq, 0, buf_sz); memset(crq, 0, buf_sz);
...@@ -2126,17 +2080,11 @@ static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport) ...@@ -2126,17 +2080,11 @@ static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
crq->num_qinfo = cpu_to_le16(num_chunks); crq->num_qinfo = cpu_to_le16(num_chunks);
memcpy(crq->qinfo, &qi[k], chunk_sz * num_chunks); memcpy(crq->qinfo, &qi[k], chunk_sz * num_chunks);
err = idpf_send_mb_msg(vport->adapter, xn_params.send_buf.iov_base = crq;
VIRTCHNL2_OP_CONFIG_RX_QUEUES, xn_params.send_buf.iov_len = buf_sz;
buf_sz, (u8 *)crq, 0); reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
if (err) if (reply_sz < 0)
goto mbx_error; return reply_sz;
err = idpf_wait_for_event(vport->adapter, vport,
IDPF_VC_CONFIG_RXQ,
IDPF_VC_CONFIG_RXQ_ERR);
if (err)
goto mbx_error;
k += num_chunks; k += num_chunks;
totqs -= num_chunks; totqs -= num_chunks;
...@@ -2145,42 +2093,28 @@ static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport) ...@@ -2145,42 +2093,28 @@ static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
buf_sz = struct_size(crq, qinfo, num_chunks); buf_sz = struct_size(crq, qinfo, num_chunks);
} }
mbx_error: return 0;
mutex_unlock(&vport->vc_buf_lock);
kfree(crq);
error:
kfree(qi);
return err;
} }
/** /**
* idpf_send_ena_dis_queues_msg - Send virtchnl enable or disable * idpf_send_ena_dis_queues_msg - Send virtchnl enable or disable
* queues message * queues message
* @vport: virtual port data structure * @vport: virtual port data structure
* @vc_op: virtchnl op code to send * @ena: if true enable, false disable
* *
* Send enable or disable queues virtchnl message. Returns 0 on success, * Send enable or disable queues virtchnl message. Returns 0 on success,
* negative on failure. * negative on failure.
*/ */
static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op) static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena)
{ {
struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;
struct virtchnl2_queue_chunk *qc __free(kfree) = NULL;
u32 num_msgs, num_chunks, num_txq, num_rxq, num_q; u32 num_msgs, num_chunks, num_txq, num_rxq, num_q;
struct idpf_adapter *adapter = vport->adapter; struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_del_ena_dis_queues *eq;
struct virtchnl2_queue_chunks *qcs; struct virtchnl2_queue_chunks *qcs;
struct virtchnl2_queue_chunk *qc;
u32 config_sz, chunk_sz, buf_sz; u32 config_sz, chunk_sz, buf_sz;
int i, j, k = 0, err = 0; ssize_t reply_sz;
int i, j, k = 0;
/* validate virtchnl op */
switch (vc_op) {
case VIRTCHNL2_OP_ENABLE_QUEUES:
case VIRTCHNL2_OP_DISABLE_QUEUES:
break;
default:
return -EINVAL;
}
num_txq = vport->num_txq + vport->num_complq; num_txq = vport->num_txq + vport->num_complq;
num_rxq = vport->num_rxq + vport->num_bufq; num_rxq = vport->num_rxq + vport->num_bufq;
...@@ -2199,10 +2133,8 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op) ...@@ -2199,10 +2133,8 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
} }
} }
if (vport->num_txq != k) { if (vport->num_txq != k)
err = -EINVAL; return -EINVAL;
goto error;
}
if (!idpf_is_queue_model_split(vport->txq_model)) if (!idpf_is_queue_model_split(vport->txq_model))
goto setup_rx; goto setup_rx;
...@@ -2214,10 +2146,8 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op) ...@@ -2214,10 +2146,8 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id); qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
} }
if (vport->num_complq != (k - vport->num_txq)) { if (vport->num_complq != (k - vport->num_txq))
err = -EINVAL; return -EINVAL;
goto error;
}
setup_rx: setup_rx:
for (i = 0; i < vport->num_rxq_grp; i++) { for (i = 0; i < vport->num_rxq_grp; i++) {
...@@ -2243,10 +2173,8 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op) ...@@ -2243,10 +2173,8 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
} }
} }
if (vport->num_rxq != k - (vport->num_txq + vport->num_complq)) { if (vport->num_rxq != k - (vport->num_txq + vport->num_complq))
err = -EINVAL; return -EINVAL;
goto error;
}
if (!idpf_is_queue_model_split(vport->rxq_model)) if (!idpf_is_queue_model_split(vport->rxq_model))
goto send_msg; goto send_msg;
...@@ -2265,10 +2193,8 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op) ...@@ -2265,10 +2193,8 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
} }
if (vport->num_bufq != k - (vport->num_txq + if (vport->num_bufq != k - (vport->num_txq +
vport->num_complq + vport->num_complq +
vport->num_rxq)) { vport->num_rxq))
err = -EINVAL; return -EINVAL;
goto error;
}
send_msg: send_msg:
/* Chunk up the queue info into multiple messages */ /* Chunk up the queue info into multiple messages */
...@@ -2281,12 +2207,16 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op) ...@@ -2281,12 +2207,16 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
buf_sz = struct_size(eq, chunks.chunks, num_chunks); buf_sz = struct_size(eq, chunks.chunks, num_chunks);
eq = kzalloc(buf_sz, GFP_KERNEL); eq = kzalloc(buf_sz, GFP_KERNEL);
if (!eq) { if (!eq)
err = -ENOMEM; return -ENOMEM;
goto error;
}
mutex_lock(&vport->vc_buf_lock); if (ena) {
xn_params.vc_op = VIRTCHNL2_OP_ENABLE_QUEUES;
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
} else {
xn_params.vc_op = VIRTCHNL2_OP_DISABLE_QUEUES;
xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
}
for (i = 0, k = 0; i < num_msgs; i++) { for (i = 0, k = 0; i < num_msgs; i++) {
memset(eq, 0, buf_sz); memset(eq, 0, buf_sz);
...@@ -2295,20 +2225,11 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op) ...@@ -2295,20 +2225,11 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
qcs = &eq->chunks; qcs = &eq->chunks;
memcpy(qcs->chunks, &qc[k], chunk_sz * num_chunks); memcpy(qcs->chunks, &qc[k], chunk_sz * num_chunks);
err = idpf_send_mb_msg(adapter, vc_op, buf_sz, (u8 *)eq, 0); xn_params.send_buf.iov_base = eq;
if (err) xn_params.send_buf.iov_len = buf_sz;
goto mbx_error; reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
if (reply_sz < 0)
if (vc_op == VIRTCHNL2_OP_ENABLE_QUEUES) return reply_sz;
err = idpf_wait_for_event(adapter, vport,
IDPF_VC_ENA_QUEUES,
IDPF_VC_ENA_QUEUES_ERR);
else
err = idpf_min_wait_for_event(adapter, vport,
IDPF_VC_DIS_QUEUES,
IDPF_VC_DIS_QUEUES_ERR);
if (err)
goto mbx_error;
k += num_chunks; k += num_chunks;
num_q -= num_chunks; num_q -= num_chunks;
...@@ -2317,13 +2238,7 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op) ...@@ -2317,13 +2238,7 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
buf_sz = struct_size(eq, chunks.chunks, num_chunks); buf_sz = struct_size(eq, chunks.chunks, num_chunks);
} }
mbx_error: return 0;
mutex_unlock(&vport->vc_buf_lock);
kfree(eq);
error:
kfree(qc);
return err;
} }
/** /**
...@@ -2337,12 +2252,13 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op) ...@@ -2337,12 +2252,13 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
*/ */
int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
{ {
struct idpf_adapter *adapter = vport->adapter; struct virtchnl2_queue_vector_maps *vqvm __free(kfree) = NULL;
struct virtchnl2_queue_vector_maps *vqvm; struct virtchnl2_queue_vector *vqv __free(kfree) = NULL;
struct virtchnl2_queue_vector *vqv; struct idpf_vc_xn_params xn_params = {};
u32 config_sz, chunk_sz, buf_sz; u32 config_sz, chunk_sz, buf_sz;
u32 num_msgs, num_chunks, num_q; u32 num_msgs, num_chunks, num_q;
int i, j, k = 0, err = 0; ssize_t reply_sz;
int i, j, k = 0;
num_q = vport->num_txq + vport->num_rxq; num_q = vport->num_txq + vport->num_rxq;
...@@ -2372,10 +2288,8 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) ...@@ -2372,10 +2288,8 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
} }
} }
if (vport->num_txq != k) { if (vport->num_txq != k)
err = -EINVAL; return -EINVAL;
goto error;
}
for (i = 0; i < vport->num_rxq_grp; i++) { for (i = 0; i < vport->num_rxq_grp; i++) {
struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
...@@ -2402,15 +2316,11 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) ...@@ -2402,15 +2316,11 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
} }
if (idpf_is_queue_model_split(vport->txq_model)) { if (idpf_is_queue_model_split(vport->txq_model)) {
if (vport->num_rxq != k - vport->num_complq) { if (vport->num_rxq != k - vport->num_complq)
err = -EINVAL; return -EINVAL;
goto error;
}
} else { } else {
if (vport->num_rxq != k - vport->num_txq) { if (vport->num_rxq != k - vport->num_txq)
err = -EINVAL; return -EINVAL;
goto error;
}
} }
/* Chunk up the vector info into multiple messages */ /* Chunk up the vector info into multiple messages */
...@@ -2423,39 +2333,28 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) ...@@ -2423,39 +2333,28 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
buf_sz = struct_size(vqvm, qv_maps, num_chunks); buf_sz = struct_size(vqvm, qv_maps, num_chunks);
vqvm = kzalloc(buf_sz, GFP_KERNEL); vqvm = kzalloc(buf_sz, GFP_KERNEL);
if (!vqvm) { if (!vqvm)
err = -ENOMEM; return -ENOMEM;
goto error;
}
mutex_lock(&vport->vc_buf_lock); if (map) {
xn_params.vc_op = VIRTCHNL2_OP_MAP_QUEUE_VECTOR;
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
} else {
xn_params.vc_op = VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR;
xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
}
for (i = 0, k = 0; i < num_msgs; i++) { for (i = 0, k = 0; i < num_msgs; i++) {
memset(vqvm, 0, buf_sz); memset(vqvm, 0, buf_sz);
xn_params.send_buf.iov_base = vqvm;
xn_params.send_buf.iov_len = buf_sz;
vqvm->vport_id = cpu_to_le32(vport->vport_id); vqvm->vport_id = cpu_to_le32(vport->vport_id);
vqvm->num_qv_maps = cpu_to_le16(num_chunks); vqvm->num_qv_maps = cpu_to_le16(num_chunks);
memcpy(vqvm->qv_maps, &vqv[k], chunk_sz * num_chunks); memcpy(vqvm->qv_maps, &vqv[k], chunk_sz * num_chunks);
if (map) { reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
err = idpf_send_mb_msg(adapter, if (reply_sz < 0)
VIRTCHNL2_OP_MAP_QUEUE_VECTOR, return reply_sz;
buf_sz, (u8 *)vqvm, 0);
if (!err)
err = idpf_wait_for_event(adapter, vport,
IDPF_VC_MAP_IRQ,
IDPF_VC_MAP_IRQ_ERR);
} else {
err = idpf_send_mb_msg(adapter,
VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR,
buf_sz, (u8 *)vqvm, 0);
if (!err)
err =
idpf_min_wait_for_event(adapter, vport,
IDPF_VC_UNMAP_IRQ,
IDPF_VC_UNMAP_IRQ_ERR);
}
if (err)
goto mbx_error;
k += num_chunks; k += num_chunks;
num_q -= num_chunks; num_q -= num_chunks;
...@@ -2464,13 +2363,7 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) ...@@ -2464,13 +2363,7 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
buf_sz = struct_size(vqvm, qv_maps, num_chunks); buf_sz = struct_size(vqvm, qv_maps, num_chunks);
} }
mbx_error: return 0;
mutex_unlock(&vport->vc_buf_lock);
kfree(vqvm);
error:
kfree(vqv);
return err;
} }
/** /**
...@@ -2482,7 +2375,7 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) ...@@ -2482,7 +2375,7 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
*/ */
int idpf_send_enable_queues_msg(struct idpf_vport *vport) int idpf_send_enable_queues_msg(struct idpf_vport *vport)
{ {
return idpf_send_ena_dis_queues_msg(vport, VIRTCHNL2_OP_ENABLE_QUEUES); return idpf_send_ena_dis_queues_msg(vport, true);
} }
/** /**
...@@ -2496,7 +2389,7 @@ int idpf_send_disable_queues_msg(struct idpf_vport *vport) ...@@ -2496,7 +2389,7 @@ int idpf_send_disable_queues_msg(struct idpf_vport *vport)
{ {
int err, i; int err, i;
err = idpf_send_ena_dis_queues_msg(vport, VIRTCHNL2_OP_DISABLE_QUEUES); err = idpf_send_ena_dis_queues_msg(vport, false);
if (err) if (err)
return err; return err;
...@@ -2542,22 +2435,21 @@ static void idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk *dchun ...@@ -2542,22 +2435,21 @@ static void idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk *dchun
*/ */
int idpf_send_delete_queues_msg(struct idpf_vport *vport) int idpf_send_delete_queues_msg(struct idpf_vport *vport)
{ {
struct idpf_adapter *adapter = vport->adapter; struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;
struct virtchnl2_create_vport *vport_params; struct virtchnl2_create_vport *vport_params;
struct virtchnl2_queue_reg_chunks *chunks; struct virtchnl2_queue_reg_chunks *chunks;
struct virtchnl2_del_ena_dis_queues *eq; struct idpf_vc_xn_params xn_params = {};
struct idpf_vport_config *vport_config; struct idpf_vport_config *vport_config;
u16 vport_idx = vport->idx; u16 vport_idx = vport->idx;
int buf_size, err; ssize_t reply_sz;
u16 num_chunks; u16 num_chunks;
int buf_size;
vport_config = adapter->vport_config[vport_idx]; vport_config = vport->adapter->vport_config[vport_idx];
if (vport_config->req_qs_chunks) { if (vport_config->req_qs_chunks) {
struct virtchnl2_add_queues *vc_aq = chunks = &vport_config->req_qs_chunks->chunks;
(struct virtchnl2_add_queues *)vport_config->req_qs_chunks;
chunks = &vc_aq->chunks;
} else { } else {
vport_params = adapter->vport_params_recvd[vport_idx]; vport_params = vport->adapter->vport_params_recvd[vport_idx];
chunks = &vport_params->chunks; chunks = &vport_params->chunks;
} }
...@@ -2574,21 +2466,13 @@ int idpf_send_delete_queues_msg(struct idpf_vport *vport) ...@@ -2574,21 +2466,13 @@ int idpf_send_delete_queues_msg(struct idpf_vport *vport)
idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->chunks, idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->chunks,
num_chunks); num_chunks);
mutex_lock(&vport->vc_buf_lock); xn_params.vc_op = VIRTCHNL2_OP_DEL_QUEUES;
xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DEL_QUEUES, xn_params.send_buf.iov_base = eq;
buf_size, (u8 *)eq, 0); xn_params.send_buf.iov_len = buf_size;
if (err) reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
goto rel_lock;
err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DEL_QUEUES,
IDPF_VC_DEL_QUEUES_ERR);
rel_lock:
mutex_unlock(&vport->vc_buf_lock);
kfree(eq);
return err; return reply_sz < 0 ? reply_sz : 0;
} }
/** /**
...@@ -2623,14 +2507,21 @@ int idpf_send_config_queues_msg(struct idpf_vport *vport) ...@@ -2623,14 +2507,21 @@ int idpf_send_config_queues_msg(struct idpf_vport *vport)
int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q, int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
u16 num_complq, u16 num_rx_q, u16 num_rx_bufq) u16 num_complq, u16 num_rx_q, u16 num_rx_bufq)
{ {
struct idpf_adapter *adapter = vport->adapter; struct virtchnl2_add_queues *vc_msg __free(kfree) = NULL;
struct idpf_vc_xn_params xn_params = {};
struct idpf_vport_config *vport_config; struct idpf_vport_config *vport_config;
struct virtchnl2_add_queues aq = { }; struct virtchnl2_add_queues aq = {};
struct virtchnl2_add_queues *vc_msg;
u16 vport_idx = vport->idx; u16 vport_idx = vport->idx;
int size, err; ssize_t reply_sz;
int size;
vport_config = adapter->vport_config[vport_idx]; vc_msg = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
if (!vc_msg)
return -ENOMEM;
vport_config = vport->adapter->vport_config[vport_idx];
kfree(vport_config->req_qs_chunks);
vport_config->req_qs_chunks = NULL;
aq.vport_id = cpu_to_le32(vport->vport_id); aq.vport_id = cpu_to_le32(vport->vport_id);
aq.num_tx_q = cpu_to_le16(num_tx_q); aq.num_tx_q = cpu_to_le16(num_tx_q);
...@@ -2638,47 +2529,33 @@ int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q, ...@@ -2638,47 +2529,33 @@ int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
aq.num_rx_q = cpu_to_le16(num_rx_q); aq.num_rx_q = cpu_to_le16(num_rx_q);
aq.num_rx_bufq = cpu_to_le16(num_rx_bufq); aq.num_rx_bufq = cpu_to_le16(num_rx_bufq);
mutex_lock(&((struct idpf_vport *)vport)->vc_buf_lock); xn_params.vc_op = VIRTCHNL2_OP_ADD_QUEUES;
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ADD_QUEUES, xn_params.send_buf.iov_base = &aq;
sizeof(struct virtchnl2_add_queues), (u8 *)&aq, 0); xn_params.send_buf.iov_len = sizeof(aq);
if (err) xn_params.recv_buf.iov_base = vc_msg;
goto rel_lock; xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
/* We want vport to be const to prevent incidental code changes making if (reply_sz < 0)
* changes to the vport config. We're making a special exception here return reply_sz;
* to discard const to use the virtchnl.
*/
err = idpf_wait_for_event(adapter, (struct idpf_vport *)vport,
IDPF_VC_ADD_QUEUES, IDPF_VC_ADD_QUEUES_ERR);
if (err)
goto rel_lock;
kfree(vport_config->req_qs_chunks);
vport_config->req_qs_chunks = NULL;
vc_msg = (struct virtchnl2_add_queues *)vport->vc_msg;
/* compare vc_msg num queues with vport num queues */ /* compare vc_msg num queues with vport num queues */
if (le16_to_cpu(vc_msg->num_tx_q) != num_tx_q || if (le16_to_cpu(vc_msg->num_tx_q) != num_tx_q ||
le16_to_cpu(vc_msg->num_rx_q) != num_rx_q || le16_to_cpu(vc_msg->num_rx_q) != num_rx_q ||
le16_to_cpu(vc_msg->num_tx_complq) != num_complq || le16_to_cpu(vc_msg->num_tx_complq) != num_complq ||
le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq) { le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq)
err = -EINVAL; return -EINVAL;
goto rel_lock;
}
size = struct_size(vc_msg, chunks.chunks, size = struct_size(vc_msg, chunks.chunks,
le16_to_cpu(vc_msg->chunks.num_chunks)); le16_to_cpu(vc_msg->chunks.num_chunks));
vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL); if (reply_sz < size)
if (!vport_config->req_qs_chunks) { return -EIO;
err = -ENOMEM;
goto rel_lock;
}
rel_lock: vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL);
mutex_unlock(&((struct idpf_vport *)vport)->vc_buf_lock); if (!vport_config->req_qs_chunks)
return -ENOMEM;
return err; return 0;
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment