Commit cce28794 authored by Jon Cooper's avatar Jon Cooper Committed by Ben Hutchings

sfc: Make initial fill of RX descriptors synchronous

Signed-off-by: default avatarBen Hutchings <bhutchings@solarflare.com>
parent 92a04168
...@@ -1907,7 +1907,7 @@ static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel, ...@@ -1907,7 +1907,7 @@ static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
* events, so efx_process_channel() won't refill the * events, so efx_process_channel() won't refill the
* queue. Refill it here * queue. Refill it here
*/ */
efx_fast_push_rx_descriptors(&channel->rx_queue); efx_fast_push_rx_descriptors(&channel->rx_queue, true);
break; break;
default: default:
netif_err(efx, hw, efx->net_dev, netif_err(efx, hw, efx->net_dev,
......
...@@ -253,7 +253,7 @@ static int efx_process_channel(struct efx_channel *channel, int budget) ...@@ -253,7 +253,7 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
efx_channel_get_rx_queue(channel); efx_channel_get_rx_queue(channel);
efx_rx_flush_packet(channel); efx_rx_flush_packet(channel);
efx_fast_push_rx_descriptors(rx_queue); efx_fast_push_rx_descriptors(rx_queue, true);
} }
return spent; return spent;
...@@ -646,7 +646,9 @@ static void efx_start_datapath(struct efx_nic *efx) ...@@ -646,7 +646,9 @@ static void efx_start_datapath(struct efx_nic *efx)
efx_for_each_channel_rx_queue(rx_queue, channel) { efx_for_each_channel_rx_queue(rx_queue, channel) {
efx_init_rx_queue(rx_queue); efx_init_rx_queue(rx_queue);
atomic_inc(&efx->active_queues); atomic_inc(&efx->active_queues);
efx_nic_generate_fill_event(rx_queue); efx_stop_eventq(channel);
efx_fast_push_rx_descriptors(rx_queue, false);
efx_start_eventq(channel);
} }
WARN_ON(channel->rx_pkt_n_frags); WARN_ON(channel->rx_pkt_n_frags);
......
...@@ -37,7 +37,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); ...@@ -37,7 +37,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue); void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
void efx_init_rx_queue(struct efx_rx_queue *rx_queue); void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic);
void efx_rx_slow_fill(unsigned long context); void efx_rx_slow_fill(unsigned long context);
void __efx_rx_packet(struct efx_channel *channel); void __efx_rx_packet(struct efx_channel *channel);
void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
......
...@@ -1147,7 +1147,7 @@ static void efx_farch_handle_generated_event(struct efx_channel *channel, ...@@ -1147,7 +1147,7 @@ static void efx_farch_handle_generated_event(struct efx_channel *channel,
/* The queue must be empty, so we won't receive any rx /* The queue must be empty, so we won't receive any rx
* events, so efx_process_channel() won't refill the * events, so efx_process_channel() won't refill the
* queue. Refill it here */ * queue. Refill it here */
efx_fast_push_rx_descriptors(rx_queue); efx_fast_push_rx_descriptors(rx_queue, true);
} else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) { } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
efx_farch_handle_drain_event(channel); efx_farch_handle_drain_event(channel);
} else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) { } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
......
...@@ -149,7 +149,7 @@ static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue) ...@@ -149,7 +149,7 @@ static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
* 0 on success. If a single page can be used for multiple buffers, * 0 on success. If a single page can be used for multiple buffers,
* then the page will either be inserted fully, or not at all. * then the page will either be inserted fully, or not at all.
*/ */
static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue) static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
{ {
struct efx_nic *efx = rx_queue->efx; struct efx_nic *efx = rx_queue->efx;
struct efx_rx_buffer *rx_buf; struct efx_rx_buffer *rx_buf;
...@@ -163,7 +163,8 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue) ...@@ -163,7 +163,8 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
do { do {
page = efx_reuse_page(rx_queue); page = efx_reuse_page(rx_queue);
if (page == NULL) { if (page == NULL) {
page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC, page = alloc_pages(__GFP_COLD | __GFP_COMP |
(atomic ? GFP_ATOMIC : GFP_KERNEL),
efx->rx_buffer_order); efx->rx_buffer_order);
if (unlikely(page == NULL)) if (unlikely(page == NULL))
return -ENOMEM; return -ENOMEM;
...@@ -321,7 +322,7 @@ static void efx_discard_rx_packet(struct efx_channel *channel, ...@@ -321,7 +322,7 @@ static void efx_discard_rx_packet(struct efx_channel *channel,
* this means this function must run from the NAPI handler, or be called * this means this function must run from the NAPI handler, or be called
* when NAPI is disabled. * when NAPI is disabled.
*/ */
void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
{ {
struct efx_nic *efx = rx_queue->efx; struct efx_nic *efx = rx_queue->efx;
unsigned int fill_level, batch_size; unsigned int fill_level, batch_size;
...@@ -354,7 +355,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) ...@@ -354,7 +355,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
do { do {
rc = efx_init_rx_buffers(rx_queue); rc = efx_init_rx_buffers(rx_queue, atomic);
if (unlikely(rc)) { if (unlikely(rc)) {
/* Ensure that we don't leave the rx queue empty */ /* Ensure that we don't leave the rx queue empty */
if (rx_queue->added_count == rx_queue->removed_count) if (rx_queue->added_count == rx_queue->removed_count)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment