Commit 517e80e6 authored by Santiago Leon's avatar Santiago Leon Committed by David S. Miller

ibmveth: Some formatting fixes

IbmVethNumBufferPools -> IBMVETH_NUM_BUFF_POOLS

Also change IBMVETH_MAX_MTU -> IBMVETH_MIN_MTU, it refers to the minimum
size not the maximum.
Signed-off-by: default avatarAnton Blanchard <anton@samba.org>
Signed-off-by: default avatarSantiago Leon <santil@linux.vnet.ibm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 21c2dece
...@@ -312,7 +312,7 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) ...@@ -312,7 +312,7 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
adapter->replenish_task_cycles++; adapter->replenish_task_cycles++;
for (i = (IbmVethNumBufferPools - 1); i >= 0; i--) { for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) {
struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i]; struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
if (pool->active && if (pool->active &&
...@@ -364,7 +364,7 @@ static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64 ...@@ -364,7 +364,7 @@ static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64
unsigned int free_index; unsigned int free_index;
struct sk_buff *skb; struct sk_buff *skb;
ibmveth_assert(pool < IbmVethNumBufferPools); ibmveth_assert(pool < IBMVETH_NUM_BUFF_POOLS);
ibmveth_assert(index < adapter->rx_buff_pool[pool].size); ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
skb = adapter->rx_buff_pool[pool].skbuff[index]; skb = adapter->rx_buff_pool[pool].skbuff[index];
...@@ -397,7 +397,7 @@ static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *ada ...@@ -397,7 +397,7 @@ static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *ada
unsigned int pool = correlator >> 32; unsigned int pool = correlator >> 32;
unsigned int index = correlator & 0xffffffffUL; unsigned int index = correlator & 0xffffffffUL;
ibmveth_assert(pool < IbmVethNumBufferPools); ibmveth_assert(pool < IBMVETH_NUM_BUFF_POOLS);
ibmveth_assert(index < adapter->rx_buff_pool[pool].size); ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
return adapter->rx_buff_pool[pool].skbuff[index]; return adapter->rx_buff_pool[pool].skbuff[index];
...@@ -413,7 +413,7 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) ...@@ -413,7 +413,7 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
union ibmveth_buf_desc desc; union ibmveth_buf_desc desc;
unsigned long lpar_rc; unsigned long lpar_rc;
ibmveth_assert(pool < IbmVethNumBufferPools); ibmveth_assert(pool < IBMVETH_NUM_BUFF_POOLS);
ibmveth_assert(index < adapter->rx_buff_pool[pool].size); ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
if(!adapter->rx_buff_pool[pool].active) { if(!adapter->rx_buff_pool[pool].active) {
...@@ -487,7 +487,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) ...@@ -487,7 +487,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
adapter->rx_queue.queue_addr = NULL; adapter->rx_queue.queue_addr = NULL;
} }
for(i = 0; i<IbmVethNumBufferPools; i++) for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
if (adapter->rx_buff_pool[i].active) if (adapter->rx_buff_pool[i].active)
ibmveth_free_buffer_pool(adapter, ibmveth_free_buffer_pool(adapter,
&adapter->rx_buff_pool[i]); &adapter->rx_buff_pool[i]);
...@@ -545,7 +545,7 @@ static int ibmveth_open(struct net_device *netdev) ...@@ -545,7 +545,7 @@ static int ibmveth_open(struct net_device *netdev)
napi_enable(&adapter->napi); napi_enable(&adapter->napi);
for(i = 0; i<IbmVethNumBufferPools; i++) for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
rxq_entries += adapter->rx_buff_pool[i].size; rxq_entries += adapter->rx_buff_pool[i].size;
adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
...@@ -621,7 +621,7 @@ static int ibmveth_open(struct net_device *netdev) ...@@ -621,7 +621,7 @@ static int ibmveth_open(struct net_device *netdev)
return -ENONET; return -ENONET;
} }
for(i = 0; i<IbmVethNumBufferPools; i++) { for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
if(!adapter->rx_buff_pool[i].active) if(!adapter->rx_buff_pool[i].active)
continue; continue;
if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) { if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
...@@ -1248,14 +1248,14 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) ...@@ -1248,14 +1248,14 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
int i, rc; int i, rc;
int need_restart = 0; int need_restart = 0;
if (new_mtu < IBMVETH_MAX_MTU) if (new_mtu < IBMVETH_MIN_MTU)
return -EINVAL; return -EINVAL;
for (i = 0; i < IbmVethNumBufferPools; i++) for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size)
break; break;
if (i == IbmVethNumBufferPools) if (i == IBMVETH_NUM_BUFF_POOLS)
return -EINVAL; return -EINVAL;
/* Deactivate all the buffer pools so that the next loop can activate /* Deactivate all the buffer pools so that the next loop can activate
...@@ -1268,7 +1268,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) ...@@ -1268,7 +1268,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
} }
/* Look for an active buffer pool that can hold the new MTU */ /* Look for an active buffer pool that can hold the new MTU */
for(i = 0; i<IbmVethNumBufferPools; i++) { for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
adapter->rx_buff_pool[i].active = 1; adapter->rx_buff_pool[i].active = 1;
if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) { if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
...@@ -1322,7 +1322,7 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev) ...@@ -1322,7 +1322,7 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE; ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
ret += IOMMU_PAGE_ALIGN(netdev->mtu); ret += IOMMU_PAGE_ALIGN(netdev->mtu);
for (i = 0; i < IbmVethNumBufferPools; i++) { for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
/* add the size of the active receive buffers */ /* add the size of the active receive buffers */
if (adapter->rx_buff_pool[i].active) if (adapter->rx_buff_pool[i].active)
ret += ret +=
...@@ -1416,7 +1416,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ ...@@ -1416,7 +1416,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
for(i = 0; i<IbmVethNumBufferPools; i++) { for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
struct kobject *kobj = &adapter->rx_buff_pool[i].kobj; struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
int error; int error;
...@@ -1458,7 +1458,7 @@ static int __devexit ibmveth_remove(struct vio_dev *dev) ...@@ -1458,7 +1458,7 @@ static int __devexit ibmveth_remove(struct vio_dev *dev)
struct ibmveth_adapter *adapter = netdev_priv(netdev); struct ibmveth_adapter *adapter = netdev_priv(netdev);
int i; int i;
for(i = 0; i<IbmVethNumBufferPools; i++) for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
kobject_put(&adapter->rx_buff_pool[i].kobj); kobject_put(&adapter->rx_buff_pool[i].kobj);
unregister_netdev(netdev); unregister_netdev(netdev);
...@@ -1522,7 +1522,7 @@ const char * buf, size_t count) ...@@ -1522,7 +1522,7 @@ const char * buf, size_t count)
int i; int i;
/* Make sure there is a buffer pool with buffers that /* Make sure there is a buffer pool with buffers that
can hold a packet of the size of the MTU */ can hold a packet of the size of the MTU */
for (i = 0; i < IbmVethNumBufferPools; i++) { for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
if (pool == &adapter->rx_buff_pool[i]) if (pool == &adapter->rx_buff_pool[i])
continue; continue;
if (!adapter->rx_buff_pool[i].active) if (!adapter->rx_buff_pool[i].active)
...@@ -1531,7 +1531,7 @@ const char * buf, size_t count) ...@@ -1531,7 +1531,7 @@ const char * buf, size_t count)
break; break;
} }
if (i == IbmVethNumBufferPools) { if (i == IBMVETH_NUM_BUFF_POOLS) {
netdev_err(netdev, "no active pool >= MTU\n"); netdev_err(netdev, "no active pool >= MTU\n");
return -EPERM; return -EPERM;
} }
......
...@@ -92,10 +92,10 @@ static inline long h_illan_attributes(unsigned long unit_address, ...@@ -92,10 +92,10 @@ static inline long h_illan_attributes(unsigned long unit_address,
#define h_change_logical_lan_mac(ua, mac) \ #define h_change_logical_lan_mac(ua, mac) \
plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac) plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac)
#define IbmVethNumBufferPools 5 #define IBMVETH_NUM_BUFF_POOLS 5
#define IBMVETH_IO_ENTITLEMENT_DEFAULT 4243456 /* MTU of 1500 needs 4.2Mb */ #define IBMVETH_IO_ENTITLEMENT_DEFAULT 4243456 /* MTU of 1500 needs 4.2Mb */
#define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */ #define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */
#define IBMVETH_MAX_MTU 68 #define IBMVETH_MIN_MTU 68
#define IBMVETH_MAX_POOL_COUNT 4096 #define IBMVETH_MAX_POOL_COUNT 4096
#define IBMVETH_BUFF_LIST_SIZE 4096 #define IBMVETH_BUFF_LIST_SIZE 4096
#define IBMVETH_FILT_LIST_SIZE 4096 #define IBMVETH_FILT_LIST_SIZE 4096
...@@ -142,7 +142,7 @@ struct ibmveth_adapter { ...@@ -142,7 +142,7 @@ struct ibmveth_adapter {
void * filter_list_addr; void * filter_list_addr;
dma_addr_t buffer_list_dma; dma_addr_t buffer_list_dma;
dma_addr_t filter_list_dma; dma_addr_t filter_list_dma;
struct ibmveth_buff_pool rx_buff_pool[IbmVethNumBufferPools]; struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
struct ibmveth_rx_q rx_queue; struct ibmveth_rx_q rx_queue;
int pool_config; int pool_config;
int rx_csum; int rx_csum;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment