Commit d43732ce authored by Christoph Hellwig's avatar Christoph Hellwig

ibmveth: properly unwind on init errors

That way the driver doesn't have to rely on DMA_ERROR_CODE, which
is not a public API and going away.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 54ed8121
...@@ -467,56 +467,6 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) ...@@ -467,56 +467,6 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
} }
} }
static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
{
int i;
struct device *dev = &adapter->vdev->dev;
if (adapter->buffer_list_addr != NULL) {
if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
DMA_BIDIRECTIONAL);
adapter->buffer_list_dma = DMA_ERROR_CODE;
}
free_page((unsigned long)adapter->buffer_list_addr);
adapter->buffer_list_addr = NULL;
}
if (adapter->filter_list_addr != NULL) {
if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
dma_unmap_single(dev, adapter->filter_list_dma, 4096,
DMA_BIDIRECTIONAL);
adapter->filter_list_dma = DMA_ERROR_CODE;
}
free_page((unsigned long)adapter->filter_list_addr);
adapter->filter_list_addr = NULL;
}
if (adapter->rx_queue.queue_addr != NULL) {
dma_free_coherent(dev, adapter->rx_queue.queue_len,
adapter->rx_queue.queue_addr,
adapter->rx_queue.queue_dma);
adapter->rx_queue.queue_addr = NULL;
}
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
if (adapter->rx_buff_pool[i].active)
ibmveth_free_buffer_pool(adapter,
&adapter->rx_buff_pool[i]);
if (adapter->bounce_buffer != NULL) {
if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
dma_unmap_single(&adapter->vdev->dev,
adapter->bounce_buffer_dma,
adapter->netdev->mtu + IBMVETH_BUFF_OH,
DMA_BIDIRECTIONAL);
adapter->bounce_buffer_dma = DMA_ERROR_CODE;
}
kfree(adapter->bounce_buffer);
adapter->bounce_buffer = NULL;
}
}
static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter, static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
union ibmveth_buf_desc rxq_desc, u64 mac_address) union ibmveth_buf_desc rxq_desc, u64 mac_address)
{ {
...@@ -573,14 +523,17 @@ static int ibmveth_open(struct net_device *netdev) ...@@ -573,14 +523,17 @@ static int ibmveth_open(struct net_device *netdev)
for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
rxq_entries += adapter->rx_buff_pool[i].size; rxq_entries += adapter->rx_buff_pool[i].size;
rc = -ENOMEM;
adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); if (!adapter->buffer_list_addr) {
netdev_err(netdev, "unable to allocate list pages\n");
goto out;
}
if (!adapter->buffer_list_addr || !adapter->filter_list_addr) { adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
netdev_err(netdev, "unable to allocate filter or buffer list " if (!adapter->filter_list_addr) {
"pages\n"); netdev_err(netdev, "unable to allocate filter pages\n");
rc = -ENOMEM; goto out_free_buffer_list;
goto err_out;
} }
dev = &adapter->vdev->dev; dev = &adapter->vdev->dev;
...@@ -590,22 +543,21 @@ static int ibmveth_open(struct net_device *netdev) ...@@ -590,22 +543,21 @@ static int ibmveth_open(struct net_device *netdev)
adapter->rx_queue.queue_addr = adapter->rx_queue.queue_addr =
dma_alloc_coherent(dev, adapter->rx_queue.queue_len, dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
&adapter->rx_queue.queue_dma, GFP_KERNEL); &adapter->rx_queue.queue_dma, GFP_KERNEL);
if (!adapter->rx_queue.queue_addr) { if (!adapter->rx_queue.queue_addr)
rc = -ENOMEM; goto out_free_filter_list;
goto err_out;
}
adapter->buffer_list_dma = dma_map_single(dev, adapter->buffer_list_dma = dma_map_single(dev,
adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, adapter->buffer_list_dma)) {
netdev_err(netdev, "unable to map buffer list pages\n");
goto out_free_queue_mem;
}
adapter->filter_list_dma = dma_map_single(dev, adapter->filter_list_dma = dma_map_single(dev,
adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, adapter->filter_list_dma)) {
if ((dma_mapping_error(dev, adapter->buffer_list_dma)) || netdev_err(netdev, "unable to map filter list pages\n");
(dma_mapping_error(dev, adapter->filter_list_dma))) { goto out_unmap_buffer_list;
netdev_err(netdev, "unable to map filter or buffer list "
"pages\n");
rc = -ENOMEM;
goto err_out;
} }
adapter->rx_queue.index = 0; adapter->rx_queue.index = 0;
...@@ -636,7 +588,7 @@ static int ibmveth_open(struct net_device *netdev) ...@@ -636,7 +588,7 @@ static int ibmveth_open(struct net_device *netdev)
rxq_desc.desc, rxq_desc.desc,
mac_address); mac_address);
rc = -ENONET; rc = -ENONET;
goto err_out; goto out_unmap_filter_list;
} }
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
...@@ -646,7 +598,7 @@ static int ibmveth_open(struct net_device *netdev) ...@@ -646,7 +598,7 @@ static int ibmveth_open(struct net_device *netdev)
netdev_err(netdev, "unable to alloc pool\n"); netdev_err(netdev, "unable to alloc pool\n");
adapter->rx_buff_pool[i].active = 0; adapter->rx_buff_pool[i].active = 0;
rc = -ENOMEM; rc = -ENOMEM;
goto err_out; goto out_free_buffer_pools;
} }
} }
...@@ -660,22 +612,21 @@ static int ibmveth_open(struct net_device *netdev) ...@@ -660,22 +612,21 @@ static int ibmveth_open(struct net_device *netdev)
lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
} while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY)); } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
goto err_out; goto out_free_buffer_pools;
} }
rc = -ENOMEM;
adapter->bounce_buffer = adapter->bounce_buffer =
kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL); kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
if (!adapter->bounce_buffer) { if (!adapter->bounce_buffer)
rc = -ENOMEM; goto out_free_irq;
goto err_out_free_irq;
}
adapter->bounce_buffer_dma = adapter->bounce_buffer_dma =
dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
netdev_err(netdev, "unable to map bounce buffer\n"); netdev_err(netdev, "unable to map bounce buffer\n");
rc = -ENOMEM; goto out_free_bounce_buffer;
goto err_out_free_irq;
} }
netdev_dbg(netdev, "initial replenish cycle\n"); netdev_dbg(netdev, "initial replenish cycle\n");
...@@ -687,10 +638,31 @@ static int ibmveth_open(struct net_device *netdev) ...@@ -687,10 +638,31 @@ static int ibmveth_open(struct net_device *netdev)
return 0; return 0;
err_out_free_irq: out_free_bounce_buffer:
kfree(adapter->bounce_buffer);
out_free_irq:
free_irq(netdev->irq, netdev); free_irq(netdev->irq, netdev);
err_out: out_free_buffer_pools:
ibmveth_cleanup(adapter); while (--i >= 0) {
if (adapter->rx_buff_pool[i].active)
ibmveth_free_buffer_pool(adapter,
&adapter->rx_buff_pool[i]);
}
out_unmap_filter_list:
dma_unmap_single(dev, adapter->filter_list_dma, 4096,
DMA_BIDIRECTIONAL);
out_unmap_buffer_list:
dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
DMA_BIDIRECTIONAL);
out_free_queue_mem:
dma_free_coherent(dev, adapter->rx_queue.queue_len,
adapter->rx_queue.queue_addr,
adapter->rx_queue.queue_dma);
out_free_filter_list:
free_page((unsigned long)adapter->filter_list_addr);
out_free_buffer_list:
free_page((unsigned long)adapter->buffer_list_addr);
out:
napi_disable(&adapter->napi); napi_disable(&adapter->napi);
return rc; return rc;
} }
...@@ -698,7 +670,9 @@ static int ibmveth_open(struct net_device *netdev) ...@@ -698,7 +670,9 @@ static int ibmveth_open(struct net_device *netdev)
static int ibmveth_close(struct net_device *netdev) static int ibmveth_close(struct net_device *netdev)
{ {
struct ibmveth_adapter *adapter = netdev_priv(netdev); struct ibmveth_adapter *adapter = netdev_priv(netdev);
struct device *dev = &adapter->vdev->dev;
long lpar_rc; long lpar_rc;
int i;
netdev_dbg(netdev, "close starting\n"); netdev_dbg(netdev, "close starting\n");
...@@ -722,7 +696,27 @@ static int ibmveth_close(struct net_device *netdev) ...@@ -722,7 +696,27 @@ static int ibmveth_close(struct net_device *netdev)
ibmveth_update_rx_no_buffer(adapter); ibmveth_update_rx_no_buffer(adapter);
ibmveth_cleanup(adapter); dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
DMA_BIDIRECTIONAL);
free_page((unsigned long)adapter->buffer_list_addr);
dma_unmap_single(dev, adapter->filter_list_dma, 4096,
DMA_BIDIRECTIONAL);
free_page((unsigned long)adapter->filter_list_addr);
dma_free_coherent(dev, adapter->rx_queue.queue_len,
adapter->rx_queue.queue_addr,
adapter->rx_queue.queue_dma);
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
if (adapter->rx_buff_pool[i].active)
ibmveth_free_buffer_pool(adapter,
&adapter->rx_buff_pool[i]);
dma_unmap_single(&adapter->vdev->dev, adapter->bounce_buffer_dma,
adapter->netdev->mtu + IBMVETH_BUFF_OH,
DMA_BIDIRECTIONAL);
kfree(adapter->bounce_buffer);
netdev_dbg(netdev, "close complete\n"); netdev_dbg(netdev, "close complete\n");
...@@ -1648,11 +1642,6 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) ...@@ -1648,11 +1642,6 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
} }
netdev_dbg(netdev, "adapter @ 0x%p\n", adapter); netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
adapter->buffer_list_dma = DMA_ERROR_CODE;
adapter->filter_list_dma = DMA_ERROR_CODE;
adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
netdev_dbg(netdev, "registering netdev...\n"); netdev_dbg(netdev, "registering netdev...\n");
ibmveth_set_features(netdev, netdev->features); ibmveth_set_features(netdev, netdev->features);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment