Commit c2fed996 authored by Jeff Kirsher's avatar Jeff Kirsher Committed by David S. Miller

e1000e: use GFP_KERNEL allocations at init time

In process and sleep allowed context, favor GFP_KERNEL allocations over
GFP_ATOMIC ones.

-v2: fixed checkpatch.pl warnings

CC: Eric Dumazet <eric.dumazet@gmail.com>
CC: Ben Greear <greearb@candelatech.com>
CC: Bruce Allan <bruce.w.allan@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
Tested-by: default avatarAaron Brown <aaron.f.brown@intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a3d72d5d
......@@ -334,7 +334,7 @@ struct e1000_adapter {
int *work_done, int work_to_do)
____cacheline_aligned_in_smp;
void (*alloc_rx_buf) (struct e1000_adapter *adapter,
int cleaned_count);
int cleaned_count, gfp_t gfp);
struct e1000_ring *rx_ring;
u32 rx_int_delay;
......
......@@ -523,7 +523,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
* @adapter: address of board private structure
**/
static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
int cleaned_count)
int cleaned_count, gfp_t gfp)
{
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
......@@ -544,7 +544,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
goto map_skb;
}
skb = netdev_alloc_skb_ip_align(netdev, bufsz);
skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
if (!skb) {
/* Better luck next round */
adapter->alloc_rx_buff_failed++;
......@@ -589,7 +589,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
* @adapter: address of board private structure
**/
static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
int cleaned_count)
int cleaned_count, gfp_t gfp)
{
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
......@@ -615,7 +615,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
continue;
}
if (!ps_page->page) {
ps_page->page = alloc_page(GFP_ATOMIC);
ps_page->page = alloc_page(gfp);
if (!ps_page->page) {
adapter->alloc_rx_buff_failed++;
goto no_buffers;
......@@ -641,8 +641,9 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
cpu_to_le64(ps_page->dma);
}
skb = netdev_alloc_skb_ip_align(netdev,
adapter->rx_ps_bsize0);
skb = __netdev_alloc_skb_ip_align(netdev,
adapter->rx_ps_bsize0,
gfp);
if (!skb) {
adapter->alloc_rx_buff_failed++;
......@@ -692,7 +693,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
**/
static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
int cleaned_count)
int cleaned_count, gfp_t gfp)
{
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
......@@ -713,7 +714,7 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
goto check_page;
}
skb = netdev_alloc_skb_ip_align(netdev, bufsz);
skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
if (unlikely(!skb)) {
/* Better luck next round */
adapter->alloc_rx_buff_failed++;
......@@ -724,7 +725,7 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
check_page:
/* allocate a new page if necessary */
if (!buffer_info->page) {
buffer_info->page = alloc_page(GFP_ATOMIC);
buffer_info->page = alloc_page(gfp);
if (unlikely(!buffer_info->page)) {
adapter->alloc_rx_buff_failed++;
break;
......@@ -888,7 +889,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
adapter->alloc_rx_buf(adapter, cleaned_count);
adapter->alloc_rx_buf(adapter, cleaned_count,
GFP_ATOMIC);
cleaned_count = 0;
}
......@@ -900,7 +902,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
cleaned_count = e1000_desc_unused(rx_ring);
if (cleaned_count)
adapter->alloc_rx_buf(adapter, cleaned_count);
adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
adapter->total_rx_bytes += total_rx_bytes;
adapter->total_rx_packets += total_rx_packets;
......@@ -1230,7 +1232,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
adapter->alloc_rx_buf(adapter, cleaned_count);
adapter->alloc_rx_buf(adapter, cleaned_count,
GFP_ATOMIC);
cleaned_count = 0;
}
......@@ -1244,7 +1247,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
cleaned_count = e1000_desc_unused(rx_ring);
if (cleaned_count)
adapter->alloc_rx_buf(adapter, cleaned_count);
adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
adapter->total_rx_bytes += total_rx_bytes;
adapter->total_rx_packets += total_rx_packets;
......@@ -1411,7 +1414,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
/* return some buffers to hardware, one at a time is too slow */
if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
adapter->alloc_rx_buf(adapter, cleaned_count);
adapter->alloc_rx_buf(adapter, cleaned_count,
GFP_ATOMIC);
cleaned_count = 0;
}
......@@ -1423,7 +1427,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
cleaned_count = e1000_desc_unused(rx_ring);
if (cleaned_count)
adapter->alloc_rx_buf(adapter, cleaned_count);
adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
adapter->total_rx_bytes += total_rx_bytes;
adapter->total_rx_packets += total_rx_packets;
......@@ -3105,7 +3109,8 @@ static void e1000_configure(struct e1000_adapter *adapter)
e1000_configure_tx(adapter);
e1000_setup_rctl(adapter);
e1000_configure_rx(adapter);
adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring));
adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring),
GFP_KERNEL);
}
/**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment