Commit b6ee566c authored by Dany Madden's avatar Dany Madden Committed by David S. Miller

ibmvnic: Update driver return codes

Update return codes to be more informative.
Signed-off-by: default avatarJacob Root <otis@otisroot.com>
Signed-off-by: default avatarDany Madden <drt@linux.ibm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent dc91e3be
...@@ -308,7 +308,7 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, ...@@ -308,7 +308,7 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
if (adapter->fw_done_rc) { if (adapter->fw_done_rc) {
dev_err(dev, "Couldn't map LTB, rc = %d\n", dev_err(dev, "Couldn't map LTB, rc = %d\n",
adapter->fw_done_rc); adapter->fw_done_rc);
rc = -1; rc = -EIO;
goto out; goto out;
} }
rc = 0; rc = 0;
...@@ -540,13 +540,15 @@ static int init_stats_token(struct ibmvnic_adapter *adapter) ...@@ -540,13 +540,15 @@ static int init_stats_token(struct ibmvnic_adapter *adapter)
{ {
struct device *dev = &adapter->vdev->dev; struct device *dev = &adapter->vdev->dev;
dma_addr_t stok; dma_addr_t stok;
int rc;
stok = dma_map_single(dev, &adapter->stats, stok = dma_map_single(dev, &adapter->stats,
sizeof(struct ibmvnic_statistics), sizeof(struct ibmvnic_statistics),
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (dma_mapping_error(dev, stok)) { rc = dma_mapping_error(dev, stok);
dev_err(dev, "Couldn't map stats buffer\n"); if (rc) {
return -1; dev_err(dev, "Couldn't map stats buffer, rc = %d\n", rc);
return rc;
} }
adapter->stats_token = stok; adapter->stats_token = stok;
...@@ -655,7 +657,7 @@ static int init_rx_pools(struct net_device *netdev) ...@@ -655,7 +657,7 @@ static int init_rx_pools(struct net_device *netdev)
u64 num_pools; u64 num_pools;
u64 pool_size; /* # of buffers in one pool */ u64 pool_size; /* # of buffers in one pool */
u64 buff_size; u64 buff_size;
int i, j; int i, j, rc;
pool_size = adapter->req_rx_add_entries_per_subcrq; pool_size = adapter->req_rx_add_entries_per_subcrq;
num_pools = adapter->req_rx_queues; num_pools = adapter->req_rx_queues;
...@@ -674,7 +676,7 @@ static int init_rx_pools(struct net_device *netdev) ...@@ -674,7 +676,7 @@ static int init_rx_pools(struct net_device *netdev)
GFP_KERNEL); GFP_KERNEL);
if (!adapter->rx_pool) { if (!adapter->rx_pool) {
dev_err(dev, "Failed to allocate rx pools\n"); dev_err(dev, "Failed to allocate rx pools\n");
return -1; return -ENOMEM;
} }
/* Set num_active_rx_pools early. If we fail below after partial /* Set num_active_rx_pools early. If we fail below after partial
...@@ -697,6 +699,7 @@ static int init_rx_pools(struct net_device *netdev) ...@@ -697,6 +699,7 @@ static int init_rx_pools(struct net_device *netdev)
GFP_KERNEL); GFP_KERNEL);
if (!rx_pool->free_map) { if (!rx_pool->free_map) {
dev_err(dev, "Couldn't alloc free_map %d\n", i); dev_err(dev, "Couldn't alloc free_map %d\n", i);
rc = -ENOMEM;
goto out_release; goto out_release;
} }
...@@ -705,6 +708,7 @@ static int init_rx_pools(struct net_device *netdev) ...@@ -705,6 +708,7 @@ static int init_rx_pools(struct net_device *netdev)
GFP_KERNEL); GFP_KERNEL);
if (!rx_pool->rx_buff) { if (!rx_pool->rx_buff) {
dev_err(dev, "Couldn't alloc rx buffers\n"); dev_err(dev, "Couldn't alloc rx buffers\n");
rc = -ENOMEM;
goto out_release; goto out_release;
} }
} }
...@@ -718,8 +722,9 @@ static int init_rx_pools(struct net_device *netdev) ...@@ -718,8 +722,9 @@ static int init_rx_pools(struct net_device *netdev)
dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n", dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n",
i, rx_pool->size, rx_pool->buff_size); i, rx_pool->size, rx_pool->buff_size);
if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff, rc = alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
rx_pool->size * rx_pool->buff_size)) rx_pool->size * rx_pool->buff_size);
if (rc)
goto out; goto out;
for (j = 0; j < rx_pool->size; ++j) { for (j = 0; j < rx_pool->size; ++j) {
...@@ -756,7 +761,7 @@ static int init_rx_pools(struct net_device *netdev) ...@@ -756,7 +761,7 @@ static int init_rx_pools(struct net_device *netdev)
/* We failed to allocate one or more LTBs or map them on the VIOS. /* We failed to allocate one or more LTBs or map them on the VIOS.
* Hold onto the pools and any LTBs that we did allocate/map. * Hold onto the pools and any LTBs that we did allocate/map.
*/ */
return -1; return rc;
} }
static void release_vpd_data(struct ibmvnic_adapter *adapter) static void release_vpd_data(struct ibmvnic_adapter *adapter)
...@@ -817,13 +822,13 @@ static int init_one_tx_pool(struct net_device *netdev, ...@@ -817,13 +822,13 @@ static int init_one_tx_pool(struct net_device *netdev,
sizeof(struct ibmvnic_tx_buff), sizeof(struct ibmvnic_tx_buff),
GFP_KERNEL); GFP_KERNEL);
if (!tx_pool->tx_buff) if (!tx_pool->tx_buff)
return -1; return -ENOMEM;
tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL); tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL);
if (!tx_pool->free_map) { if (!tx_pool->free_map) {
kfree(tx_pool->tx_buff); kfree(tx_pool->tx_buff);
tx_pool->tx_buff = NULL; tx_pool->tx_buff = NULL;
return -1; return -ENOMEM;
} }
for (i = 0; i < pool_size; i++) for (i = 0; i < pool_size; i++)
...@@ -914,7 +919,7 @@ static int init_tx_pools(struct net_device *netdev) ...@@ -914,7 +919,7 @@ static int init_tx_pools(struct net_device *netdev)
adapter->tx_pool = kcalloc(num_pools, adapter->tx_pool = kcalloc(num_pools,
sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
if (!adapter->tx_pool) if (!adapter->tx_pool)
return -1; return -ENOMEM;
adapter->tso_pool = kcalloc(num_pools, adapter->tso_pool = kcalloc(num_pools,
sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
...@@ -924,7 +929,7 @@ static int init_tx_pools(struct net_device *netdev) ...@@ -924,7 +929,7 @@ static int init_tx_pools(struct net_device *netdev)
if (!adapter->tso_pool) { if (!adapter->tso_pool) {
kfree(adapter->tx_pool); kfree(adapter->tx_pool);
adapter->tx_pool = NULL; adapter->tx_pool = NULL;
return -1; return -ENOMEM;
} }
/* Set num_active_tx_pools early. If we fail below after partial /* Set num_active_tx_pools early. If we fail below after partial
...@@ -1113,7 +1118,7 @@ static int ibmvnic_login(struct net_device *netdev) ...@@ -1113,7 +1118,7 @@ static int ibmvnic_login(struct net_device *netdev)
retry = false; retry = false;
if (retry_count > retries) { if (retry_count > retries) {
netdev_warn(netdev, "Login attempts exceeded\n"); netdev_warn(netdev, "Login attempts exceeded\n");
return -1; return -EACCES;
} }
adapter->init_done_rc = 0; adapter->init_done_rc = 0;
...@@ -1154,25 +1159,26 @@ static int ibmvnic_login(struct net_device *netdev) ...@@ -1154,25 +1159,26 @@ static int ibmvnic_login(struct net_device *netdev)
timeout)) { timeout)) {
netdev_warn(netdev, netdev_warn(netdev,
"Capabilities query timed out\n"); "Capabilities query timed out\n");
return -1; return -ETIMEDOUT;
} }
rc = init_sub_crqs(adapter); rc = init_sub_crqs(adapter);
if (rc) { if (rc) {
netdev_warn(netdev, netdev_warn(netdev,
"SCRQ initialization failed\n"); "SCRQ initialization failed\n");
return -1; return rc;
} }
rc = init_sub_crq_irqs(adapter); rc = init_sub_crq_irqs(adapter);
if (rc) { if (rc) {
netdev_warn(netdev, netdev_warn(netdev,
"SCRQ irq initialization failed\n"); "SCRQ irq initialization failed\n");
return -1; return rc;
} }
} else if (adapter->init_done_rc) { } else if (adapter->init_done_rc) {
netdev_warn(netdev, "Adapter login failed\n"); netdev_warn(netdev, "Adapter login failed, init_done_rc = %d\n",
return -1; adapter->init_done_rc);
return -EIO;
} }
} while (retry); } while (retry);
...@@ -1231,7 +1237,7 @@ static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) ...@@ -1231,7 +1237,7 @@ static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
if (!wait_for_completion_timeout(&adapter->init_done, if (!wait_for_completion_timeout(&adapter->init_done,
timeout)) { timeout)) {
netdev_err(netdev, "timeout setting link state\n"); netdev_err(netdev, "timeout setting link state\n");
return -1; return -ETIMEDOUT;
} }
if (adapter->init_done_rc == PARTIALSUCCESS) { if (adapter->init_done_rc == PARTIALSUCCESS) {
...@@ -2288,7 +2294,7 @@ static int do_reset(struct ibmvnic_adapter *adapter, ...@@ -2288,7 +2294,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
/* If someone else changed the adapter state /* If someone else changed the adapter state
* when we dropped the rtnl, fail the reset * when we dropped the rtnl, fail the reset
*/ */
rc = -1; rc = -EAGAIN;
goto out; goto out;
} }
adapter->state = VNIC_CLOSED; adapter->state = VNIC_CLOSED;
...@@ -2330,10 +2336,8 @@ static int do_reset(struct ibmvnic_adapter *adapter, ...@@ -2330,10 +2336,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
} }
rc = ibmvnic_reset_init(adapter, true); rc = ibmvnic_reset_init(adapter, true);
if (rc) { if (rc)
rc = IBMVNIC_INIT_FAILED;
goto out; goto out;
}
/* If the adapter was in PROBE or DOWN state prior to the reset, /* If the adapter was in PROBE or DOWN state prior to the reset,
* exit here. * exit here.
...@@ -3763,7 +3767,7 @@ static int init_sub_crqs(struct ibmvnic_adapter *adapter) ...@@ -3763,7 +3767,7 @@ static int init_sub_crqs(struct ibmvnic_adapter *adapter)
allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL); allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
if (!allqueues) if (!allqueues)
return -1; return -ENOMEM;
for (i = 0; i < total_queues; i++) { for (i = 0; i < total_queues; i++) {
allqueues[i] = init_sub_crq_queue(adapter); allqueues[i] = init_sub_crq_queue(adapter);
...@@ -3832,7 +3836,7 @@ static int init_sub_crqs(struct ibmvnic_adapter *adapter) ...@@ -3832,7 +3836,7 @@ static int init_sub_crqs(struct ibmvnic_adapter *adapter)
for (i = 0; i < registered_queues; i++) for (i = 0; i < registered_queues; i++)
release_sub_crq_queue(adapter, allqueues[i], 1); release_sub_crq_queue(adapter, allqueues[i], 1);
kfree(allqueues); kfree(allqueues);
return -1; return -ENOMEM;
} }
static void send_request_cap(struct ibmvnic_adapter *adapter, int retry) static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
...@@ -4191,7 +4195,7 @@ static int send_login(struct ibmvnic_adapter *adapter) ...@@ -4191,7 +4195,7 @@ static int send_login(struct ibmvnic_adapter *adapter)
if (!adapter->tx_scrq || !adapter->rx_scrq) { if (!adapter->tx_scrq || !adapter->rx_scrq) {
netdev_err(adapter->netdev, netdev_err(adapter->netdev,
"RX or TX queues are not allocated, device login failed\n"); "RX or TX queues are not allocated, device login failed\n");
return -1; return -ENOMEM;
} }
release_login_buffer(adapter); release_login_buffer(adapter);
...@@ -4311,7 +4315,7 @@ static int send_login(struct ibmvnic_adapter *adapter) ...@@ -4311,7 +4315,7 @@ static int send_login(struct ibmvnic_adapter *adapter)
kfree(login_buffer); kfree(login_buffer);
adapter->login_buf = NULL; adapter->login_buf = NULL;
buf_alloc_failed: buf_alloc_failed:
return -1; return -ENOMEM;
} }
static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr, static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
...@@ -5632,7 +5636,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset) ...@@ -5632,7 +5636,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
dev_err(dev, "Initialization sequence timed out\n"); dev_err(dev, "Initialization sequence timed out\n");
return -1; return -ETIMEDOUT;
} }
if (adapter->init_done_rc) { if (adapter->init_done_rc) {
...@@ -5643,7 +5647,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset) ...@@ -5643,7 +5647,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
if (adapter->from_passive_init) { if (adapter->from_passive_init) {
adapter->state = VNIC_OPEN; adapter->state = VNIC_OPEN;
adapter->from_passive_init = false; adapter->from_passive_init = false;
return -1; return -EINVAL;
} }
if (reset && if (reset &&
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment