Commit 7c734359 authored by Ron Mercer's avatar Ron Mercer Committed by David S. Miller

qlge: Size RX buffers based on MTU.

Change RX large buffer size based on MTU. If pages are larger
than the MTU the page is divided up into multiple chunks and passed to
the hardware.  When pages are smaller than MTU each RX buffer can
contain be comprised of up to 2 pages.
Signed-off-by: default avatarRon Mercer <ron.mercer@qlogic.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent eb2ff967
...@@ -56,7 +56,8 @@ ...@@ -56,7 +56,8 @@
MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64)) MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64))
#define SMALL_BUFFER_SIZE 512 #define SMALL_BUFFER_SIZE 512
#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2) #define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2)
#define LARGE_BUFFER_SIZE PAGE_SIZE #define LARGE_BUFFER_MAX_SIZE 8192
#define LARGE_BUFFER_MIN_SIZE 2048
#define MAX_SPLIT_SIZE 1023 #define MAX_SPLIT_SIZE 1023
#define QLGE_SB_PAD 32 #define QLGE_SB_PAD 32
...@@ -1201,9 +1202,17 @@ struct tx_ring_desc { ...@@ -1201,9 +1202,17 @@ struct tx_ring_desc {
struct tx_ring_desc *next; struct tx_ring_desc *next;
}; };
struct page_chunk {
struct page *page; /* master page */
char *va; /* virt addr for this chunk */
u64 map; /* mapping for master */
unsigned int offset; /* offset for this chunk */
unsigned int last_flag; /* flag set for last chunk in page */
};
struct bq_desc { struct bq_desc {
union { union {
struct page *lbq_page; struct page_chunk pg_chunk;
struct sk_buff *skb; struct sk_buff *skb;
} p; } p;
__le64 *addr; __le64 *addr;
...@@ -1272,6 +1281,7 @@ struct rx_ring { ...@@ -1272,6 +1281,7 @@ struct rx_ring {
dma_addr_t lbq_base_dma; dma_addr_t lbq_base_dma;
void *lbq_base_indirect; void *lbq_base_indirect;
dma_addr_t lbq_base_indirect_dma; dma_addr_t lbq_base_indirect_dma;
struct page_chunk pg_chunk; /* current page for chunks */
struct bq_desc *lbq; /* array of control blocks */ struct bq_desc *lbq; /* array of control blocks */
void __iomem *lbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x18 */ void __iomem *lbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x18 */
u32 lbq_prod_idx; /* current sw prod idx */ u32 lbq_prod_idx; /* current sw prod idx */
...@@ -1526,6 +1536,7 @@ struct ql_adapter { ...@@ -1526,6 +1536,7 @@ struct ql_adapter {
struct rx_ring rx_ring[MAX_RX_RINGS]; struct rx_ring rx_ring[MAX_RX_RINGS];
struct tx_ring tx_ring[MAX_TX_RINGS]; struct tx_ring tx_ring[MAX_TX_RINGS];
unsigned int lbq_buf_order;
int rx_csum; int rx_csum;
u32 default_rx_queue; u32 default_rx_queue;
......
...@@ -1025,6 +1025,11 @@ static int ql_8012_port_initialize(struct ql_adapter *qdev) ...@@ -1025,6 +1025,11 @@ static int ql_8012_port_initialize(struct ql_adapter *qdev)
return status; return status;
} }
static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
{
return PAGE_SIZE << qdev->lbq_buf_order;
}
/* Get the next large buffer. */ /* Get the next large buffer. */
static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring) static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
{ {
...@@ -1036,6 +1041,28 @@ static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring) ...@@ -1036,6 +1041,28 @@ static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
return lbq_desc; return lbq_desc;
} }
static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
struct rx_ring *rx_ring)
{
struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
pci_dma_sync_single_for_cpu(qdev->pdev,
pci_unmap_addr(lbq_desc, mapaddr),
rx_ring->lbq_buf_size,
PCI_DMA_FROMDEVICE);
/* If it's the last chunk of our master page then
* we unmap it.
*/
if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
== ql_lbq_block_size(qdev))
pci_unmap_page(qdev->pdev,
lbq_desc->p.pg_chunk.map,
ql_lbq_block_size(qdev),
PCI_DMA_FROMDEVICE);
return lbq_desc;
}
/* Get the next small buffer. */ /* Get the next small buffer. */
static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring) static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
{ {
...@@ -1063,6 +1090,53 @@ static void ql_write_cq_idx(struct rx_ring *rx_ring) ...@@ -1063,6 +1090,53 @@ static void ql_write_cq_idx(struct rx_ring *rx_ring)
ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg); ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
} }
static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
struct bq_desc *lbq_desc)
{
if (!rx_ring->pg_chunk.page) {
u64 map;
rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
GFP_ATOMIC,
qdev->lbq_buf_order);
if (unlikely(!rx_ring->pg_chunk.page)) {
QPRINTK(qdev, DRV, ERR,
"page allocation failed.\n");
return -ENOMEM;
}
rx_ring->pg_chunk.offset = 0;
map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
0, ql_lbq_block_size(qdev),
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(qdev->pdev, map)) {
__free_pages(rx_ring->pg_chunk.page,
qdev->lbq_buf_order);
QPRINTK(qdev, DRV, ERR,
"PCI mapping failed.\n");
return -ENOMEM;
}
rx_ring->pg_chunk.map = map;
rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
}
/* Copy the current master pg_chunk info
* to the current descriptor.
*/
lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
/* Adjust the master page chunk for next
* buffer get.
*/
rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
rx_ring->pg_chunk.page = NULL;
lbq_desc->p.pg_chunk.last_flag = 1;
} else {
rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
get_page(rx_ring->pg_chunk.page);
lbq_desc->p.pg_chunk.last_flag = 0;
}
return 0;
}
/* Process (refill) a large buffer queue. */ /* Process (refill) a large buffer queue. */
static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
{ {
...@@ -1072,39 +1146,28 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) ...@@ -1072,39 +1146,28 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
u64 map; u64 map;
int i; int i;
while (rx_ring->lbq_free_cnt > 16) { while (rx_ring->lbq_free_cnt > 32) {
for (i = 0; i < 16; i++) { for (i = 0; i < 16; i++) {
QPRINTK(qdev, RX_STATUS, DEBUG, QPRINTK(qdev, RX_STATUS, DEBUG,
"lbq: try cleaning clean_idx = %d.\n", "lbq: try cleaning clean_idx = %d.\n",
clean_idx); clean_idx);
lbq_desc = &rx_ring->lbq[clean_idx]; lbq_desc = &rx_ring->lbq[clean_idx];
if (lbq_desc->p.lbq_page == NULL) { if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
QPRINTK(qdev, RX_STATUS, DEBUG, QPRINTK(qdev, IFUP, ERR,
"lbq: getting new page for index %d.\n", "Could not get a page chunk.\n");
lbq_desc->index);
lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
if (lbq_desc->p.lbq_page == NULL) {
rx_ring->lbq_clean_idx = clean_idx;
QPRINTK(qdev, RX_STATUS, ERR,
"Couldn't get a page.\n");
return;
}
map = pci_map_page(qdev->pdev,
lbq_desc->p.lbq_page,
0, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(qdev->pdev, map)) {
rx_ring->lbq_clean_idx = clean_idx;
put_page(lbq_desc->p.lbq_page);
lbq_desc->p.lbq_page = NULL;
QPRINTK(qdev, RX_STATUS, ERR,
"PCI mapping failed.\n");
return; return;
} }
map = lbq_desc->p.pg_chunk.map +
lbq_desc->p.pg_chunk.offset;
pci_unmap_addr_set(lbq_desc, mapaddr, map); pci_unmap_addr_set(lbq_desc, mapaddr, map);
pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE); pci_unmap_len_set(lbq_desc, maplen,
rx_ring->lbq_buf_size);
*lbq_desc->addr = cpu_to_le64(map); *lbq_desc->addr = cpu_to_le64(map);
}
pci_dma_sync_single_for_device(qdev->pdev, map,
rx_ring->lbq_buf_size,
PCI_DMA_FROMDEVICE);
clean_idx++; clean_idx++;
if (clean_idx == rx_ring->lbq_len) if (clean_idx == rx_ring->lbq_len)
clean_idx = 0; clean_idx = 0;
...@@ -1480,27 +1543,24 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, ...@@ -1480,27 +1543,24 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
* chain it to the header buffer's skb and let * chain it to the header buffer's skb and let
* it rip. * it rip.
*/ */
lbq_desc = ql_get_curr_lbuf(rx_ring); lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
pci_unmap_page(qdev->pdev,
pci_unmap_addr(lbq_desc,
mapaddr),
pci_unmap_len(lbq_desc, maplen),
PCI_DMA_FROMDEVICE);
QPRINTK(qdev, RX_STATUS, DEBUG, QPRINTK(qdev, RX_STATUS, DEBUG,
"Chaining page to skb.\n"); "Chaining page at offset = %d,"
skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page, "for %d bytes to skb.\n",
0, length); lbq_desc->p.pg_chunk.offset, length);
skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
lbq_desc->p.pg_chunk.offset,
length);
skb->len += length; skb->len += length;
skb->data_len += length; skb->data_len += length;
skb->truesize += length; skb->truesize += length;
lbq_desc->p.lbq_page = NULL;
} else { } else {
/* /*
* The headers and data are in a single large buffer. We * The headers and data are in a single large buffer. We
* copy it to a new skb and let it go. This can happen with * copy it to a new skb and let it go. This can happen with
* jumbo mtu on a non-TCP/UDP frame. * jumbo mtu on a non-TCP/UDP frame.
*/ */
lbq_desc = ql_get_curr_lbuf(rx_ring); lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
skb = netdev_alloc_skb(qdev->ndev, length); skb = netdev_alloc_skb(qdev->ndev, length);
if (skb == NULL) { if (skb == NULL) {
QPRINTK(qdev, PROBE, DEBUG, QPRINTK(qdev, PROBE, DEBUG,
...@@ -1515,13 +1575,14 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, ...@@ -1515,13 +1575,14 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
skb_reserve(skb, NET_IP_ALIGN); skb_reserve(skb, NET_IP_ALIGN);
QPRINTK(qdev, RX_STATUS, DEBUG, QPRINTK(qdev, RX_STATUS, DEBUG,
"%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length); "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page, skb_fill_page_desc(skb, 0,
0, length); lbq_desc->p.pg_chunk.page,
lbq_desc->p.pg_chunk.offset,
length);
skb->len += length; skb->len += length;
skb->data_len += length; skb->data_len += length;
skb->truesize += length; skb->truesize += length;
length -= length; length -= length;
lbq_desc->p.lbq_page = NULL;
__pskb_pull_tail(skb, __pskb_pull_tail(skb,
(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
VLAN_ETH_HLEN : ETH_HLEN); VLAN_ETH_HLEN : ETH_HLEN);
...@@ -1538,8 +1599,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, ...@@ -1538,8 +1599,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
* frames. If the MTU goes up we could * frames. If the MTU goes up we could
* eventually be in trouble. * eventually be in trouble.
*/ */
int size, offset, i = 0; int size, i = 0;
__le64 *bq, bq_array[8];
sbq_desc = ql_get_curr_sbuf(rx_ring); sbq_desc = ql_get_curr_sbuf(rx_ring);
pci_unmap_single(qdev->pdev, pci_unmap_single(qdev->pdev,
pci_unmap_addr(sbq_desc, mapaddr), pci_unmap_addr(sbq_desc, mapaddr),
...@@ -1558,37 +1618,25 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, ...@@ -1558,37 +1618,25 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
QPRINTK(qdev, RX_STATUS, DEBUG, QPRINTK(qdev, RX_STATUS, DEBUG,
"%d bytes of headers & data in chain of large.\n", length); "%d bytes of headers & data in chain of large.\n", length);
skb = sbq_desc->p.skb; skb = sbq_desc->p.skb;
bq = &bq_array[0];
memcpy(bq, skb->data, sizeof(bq_array));
sbq_desc->p.skb = NULL; sbq_desc->p.skb = NULL;
skb_reserve(skb, NET_IP_ALIGN); skb_reserve(skb, NET_IP_ALIGN);
} else {
QPRINTK(qdev, RX_STATUS, DEBUG,
"Headers in small, %d bytes of data in chain of large.\n", length);
bq = (__le64 *)sbq_desc->p.skb->data;
} }
while (length > 0) { while (length > 0) {
lbq_desc = ql_get_curr_lbuf(rx_ring); lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
pci_unmap_page(qdev->pdev, size = (length < rx_ring->lbq_buf_size) ? length :
pci_unmap_addr(lbq_desc, rx_ring->lbq_buf_size;
mapaddr),
pci_unmap_len(lbq_desc,
maplen),
PCI_DMA_FROMDEVICE);
size = (length < PAGE_SIZE) ? length : PAGE_SIZE;
offset = 0;
QPRINTK(qdev, RX_STATUS, DEBUG, QPRINTK(qdev, RX_STATUS, DEBUG,
"Adding page %d to skb for %d bytes.\n", "Adding page %d to skb for %d bytes.\n",
i, size); i, size);
skb_fill_page_desc(skb, i, lbq_desc->p.lbq_page, skb_fill_page_desc(skb, i,
offset, size); lbq_desc->p.pg_chunk.page,
lbq_desc->p.pg_chunk.offset,
size);
skb->len += size; skb->len += size;
skb->data_len += size; skb->data_len += size;
skb->truesize += size; skb->truesize += size;
length -= size; length -= size;
lbq_desc->p.lbq_page = NULL;
bq++;
i++; i++;
} }
__pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
...@@ -2305,20 +2353,29 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev, ...@@ -2305,20 +2353,29 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev,
static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
{ {
int i;
struct bq_desc *lbq_desc; struct bq_desc *lbq_desc;
for (i = 0; i < rx_ring->lbq_len; i++) { uint32_t curr_idx, clean_idx;
lbq_desc = &rx_ring->lbq[i];
if (lbq_desc->p.lbq_page) { curr_idx = rx_ring->lbq_curr_idx;
clean_idx = rx_ring->lbq_clean_idx;
while (curr_idx != clean_idx) {
lbq_desc = &rx_ring->lbq[curr_idx];
if (lbq_desc->p.pg_chunk.last_flag) {
pci_unmap_page(qdev->pdev, pci_unmap_page(qdev->pdev,
pci_unmap_addr(lbq_desc, mapaddr), lbq_desc->p.pg_chunk.map,
pci_unmap_len(lbq_desc, maplen), ql_lbq_block_size(qdev),
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
lbq_desc->p.pg_chunk.last_flag = 0;
put_page(lbq_desc->p.lbq_page);
lbq_desc->p.lbq_page = NULL;
} }
put_page(lbq_desc->p.pg_chunk.page);
lbq_desc->p.pg_chunk.page = NULL;
if (++curr_idx == rx_ring->lbq_len)
curr_idx = 0;
} }
} }
...@@ -2616,6 +2673,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) ...@@ -2616,6 +2673,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
/* Set up the shadow registers for this ring. */ /* Set up the shadow registers for this ring. */
rx_ring->prod_idx_sh_reg = shadow_reg; rx_ring->prod_idx_sh_reg = shadow_reg;
rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma; rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
*rx_ring->prod_idx_sh_reg = 0;
shadow_reg += sizeof(u64); shadow_reg += sizeof(u64);
shadow_reg_dma += sizeof(u64); shadow_reg_dma += sizeof(u64);
rx_ring->lbq_base_indirect = shadow_reg; rx_ring->lbq_base_indirect = shadow_reg;
...@@ -3496,6 +3554,10 @@ static int ql_configure_rings(struct ql_adapter *qdev) ...@@ -3496,6 +3554,10 @@ static int ql_configure_rings(struct ql_adapter *qdev)
struct rx_ring *rx_ring; struct rx_ring *rx_ring;
struct tx_ring *tx_ring; struct tx_ring *tx_ring;
int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus()); int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
qdev->lbq_buf_order = get_order(lbq_buf_len);
/* In a perfect world we have one RSS ring for each CPU /* In a perfect world we have one RSS ring for each CPU
* and each has it's own vector. To do that we ask for * and each has it's own vector. To do that we ask for
...@@ -3543,7 +3605,10 @@ static int ql_configure_rings(struct ql_adapter *qdev) ...@@ -3543,7 +3605,10 @@ static int ql_configure_rings(struct ql_adapter *qdev)
rx_ring->lbq_len = NUM_LARGE_BUFFERS; rx_ring->lbq_len = NUM_LARGE_BUFFERS;
rx_ring->lbq_size = rx_ring->lbq_size =
rx_ring->lbq_len * sizeof(__le64); rx_ring->lbq_len * sizeof(__le64);
rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE; rx_ring->lbq_buf_size = (u16)lbq_buf_len;
QPRINTK(qdev, IFUP, DEBUG,
"lbq_buf_size %d, order = %d\n",
rx_ring->lbq_buf_size, qdev->lbq_buf_order);
rx_ring->sbq_len = NUM_SMALL_BUFFERS; rx_ring->sbq_len = NUM_SMALL_BUFFERS;
rx_ring->sbq_size = rx_ring->sbq_size =
rx_ring->sbq_len * sizeof(__le64); rx_ring->sbq_len * sizeof(__le64);
...@@ -3593,14 +3658,63 @@ static int qlge_open(struct net_device *ndev) ...@@ -3593,14 +3658,63 @@ static int qlge_open(struct net_device *ndev)
return err; return err;
} }
static int ql_change_rx_buffers(struct ql_adapter *qdev)
{
struct rx_ring *rx_ring;
int i, status;
u32 lbq_buf_len;
/* Wait for an oustanding reset to complete. */
if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
int i = 3;
while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
QPRINTK(qdev, IFUP, ERR,
"Waiting for adapter UP...\n");
ssleep(1);
}
if (!i) {
QPRINTK(qdev, IFUP, ERR,
"Timed out waiting for adapter UP\n");
return -ETIMEDOUT;
}
}
status = ql_adapter_down(qdev);
if (status)
goto error;
/* Get the new rx buffer size. */
lbq_buf_len = (qdev->ndev->mtu > 1500) ?
LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
qdev->lbq_buf_order = get_order(lbq_buf_len);
for (i = 0; i < qdev->rss_ring_count; i++) {
rx_ring = &qdev->rx_ring[i];
/* Set the new size. */
rx_ring->lbq_buf_size = lbq_buf_len;
}
status = ql_adapter_up(qdev);
if (status)
goto error;
return status;
error:
QPRINTK(qdev, IFUP, ALERT,
"Driver up/down cycle failed, closing device.\n");
set_bit(QL_ADAPTER_UP, &qdev->flags);
dev_close(qdev->ndev);
return status;
}
static int qlge_change_mtu(struct net_device *ndev, int new_mtu) static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
{ {
struct ql_adapter *qdev = netdev_priv(ndev); struct ql_adapter *qdev = netdev_priv(ndev);
int status;
if (ndev->mtu == 1500 && new_mtu == 9000) { if (ndev->mtu == 1500 && new_mtu == 9000) {
QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n"); QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
queue_delayed_work(qdev->workqueue,
&qdev->mpi_port_cfg_work, 0);
} else if (ndev->mtu == 9000 && new_mtu == 1500) { } else if (ndev->mtu == 9000 && new_mtu == 1500) {
QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n"); QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
} else if ((ndev->mtu == 1500 && new_mtu == 1500) || } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
...@@ -3608,8 +3722,23 @@ static int qlge_change_mtu(struct net_device *ndev, int new_mtu) ...@@ -3608,8 +3722,23 @@ static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
return 0; return 0;
} else } else
return -EINVAL; return -EINVAL;
queue_delayed_work(qdev->workqueue,
&qdev->mpi_port_cfg_work, 3*HZ);
if (!netif_running(qdev->ndev)) {
ndev->mtu = new_mtu; ndev->mtu = new_mtu;
return 0; return 0;
}
ndev->mtu = new_mtu;
status = ql_change_rx_buffers(qdev);
if (status) {
QPRINTK(qdev, IFUP, ERR,
"Changing MTU failed.\n");
}
return status;
} }
static struct net_device_stats *qlge_get_stats(struct net_device static struct net_device_stats *qlge_get_stats(struct net_device
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment