Commit baf9573e authored by David S. Miller's avatar David S. Miller

Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc-next

Ben Hutchings says:

====================
Miscellaneous changes for 3.14:

1. Add more information to some WARN messages.
2. Refactor pushing of RSS configuration, from Andrew Rybchenko.
3. Refactor handling of automatic (device address list) vs manual (RX
NFC) MAC filters.
4. Implement clearing of manual RX filters on EF10 when ntuple offload
is disabled.
5. Remove definitions that are unused since the RX buffer allocation
changes, from Andrew Rybchenko.
6. Improve naming of some statistics, from Shradha Shah.
7. Add statistics for PTP support code.
8. Fix insertion of RX drop filters on EF10.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f66fd2dd a0bc3487
...@@ -53,31 +53,31 @@ struct efx_ef10_filter_table { ...@@ -53,31 +53,31 @@ struct efx_ef10_filter_table {
struct { struct {
unsigned long spec; /* pointer to spec plus flag bits */ unsigned long spec; /* pointer to spec plus flag bits */
/* BUSY flag indicates that an update is in progress. STACK_OLD is /* BUSY flag indicates that an update is in progress. AUTO_OLD is
* used to mark and sweep stack-owned MAC filters. * used to mark and sweep MAC filters for the device address lists.
*/ */
#define EFX_EF10_FILTER_FLAG_BUSY 1UL #define EFX_EF10_FILTER_FLAG_BUSY 1UL
#define EFX_EF10_FILTER_FLAG_STACK_OLD 2UL #define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL
#define EFX_EF10_FILTER_FLAGS 3UL #define EFX_EF10_FILTER_FLAGS 3UL
u64 handle; /* firmware handle */ u64 handle; /* firmware handle */
} *entry; } *entry;
wait_queue_head_t waitq; wait_queue_head_t waitq;
/* Shadow of net_device address lists, guarded by mac_lock */ /* Shadow of net_device address lists, guarded by mac_lock */
#define EFX_EF10_FILTER_STACK_UC_MAX 32 #define EFX_EF10_FILTER_DEV_UC_MAX 32
#define EFX_EF10_FILTER_STACK_MC_MAX 256 #define EFX_EF10_FILTER_DEV_MC_MAX 256
struct { struct {
u8 addr[ETH_ALEN]; u8 addr[ETH_ALEN];
u16 id; u16 id;
} stack_uc_list[EFX_EF10_FILTER_STACK_UC_MAX], } dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX],
stack_mc_list[EFX_EF10_FILTER_STACK_MC_MAX]; dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
int stack_uc_count; /* negative for PROMISC */ int dev_uc_count; /* negative for PROMISC */
int stack_mc_count; /* negative for PROMISC/ALLMULTI */ int dev_mc_count; /* negative for PROMISC/ALLMULTI */
}; };
/* An arbitrary search limit for the software hash table */ /* An arbitrary search limit for the software hash table */
#define EFX_EF10_FILTER_SEARCH_LIMIT 200 #define EFX_EF10_FILTER_SEARCH_LIMIT 200
static void efx_ef10_rx_push_indir_table(struct efx_nic *efx); static void efx_ef10_rx_push_rss_config(struct efx_nic *efx);
static void efx_ef10_rx_free_indir_table(struct efx_nic *efx); static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
static void efx_ef10_filter_table_remove(struct efx_nic *efx); static void efx_ef10_filter_table_remove(struct efx_nic *efx);
...@@ -679,7 +679,7 @@ static int efx_ef10_init_nic(struct efx_nic *efx) ...@@ -679,7 +679,7 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
nic_data->must_restore_piobufs = false; nic_data->must_restore_piobufs = false;
} }
efx_ef10_rx_push_indir_table(efx); efx_ef10_rx_push_rss_config(efx);
return 0; return 0;
} }
...@@ -774,8 +774,8 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { ...@@ -774,8 +774,8 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS), EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS), EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS), EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
EF10_DMA_STAT(rx_dp_emerg_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS), EF10_DMA_STAT(rx_dp_hlb_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS),
EF10_DMA_STAT(rx_dp_emerg_wait, RXDP_EMERGENCY_WAIT_CONDITIONS), EF10_DMA_STAT(rx_dp_hlb_wait, RXDP_EMERGENCY_WAIT_CONDITIONS),
}; };
#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \ #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \
...@@ -844,8 +844,8 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { ...@@ -844,8 +844,8 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
(1ULL << EF10_STAT_rx_dp_q_disabled_packets) | \ (1ULL << EF10_STAT_rx_dp_q_disabled_packets) | \
(1ULL << EF10_STAT_rx_dp_di_dropped_packets) | \ (1ULL << EF10_STAT_rx_dp_di_dropped_packets) | \
(1ULL << EF10_STAT_rx_dp_streaming_packets) | \ (1ULL << EF10_STAT_rx_dp_streaming_packets) | \
(1ULL << EF10_STAT_rx_dp_emerg_fetch) | \ (1ULL << EF10_STAT_rx_dp_hlb_fetch) | \
(1ULL << EF10_STAT_rx_dp_emerg_wait)) (1ULL << EF10_STAT_rx_dp_hlb_wait))
static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx) static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
{ {
...@@ -1252,7 +1252,8 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) ...@@ -1252,7 +1252,8 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
return; return;
fail: fail:
WARN_ON(true); netdev_WARN(efx->net_dev, "failed to initialise TXQ %d\n",
tx_queue->queue);
} }
static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue) static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
...@@ -1419,12 +1420,12 @@ static void efx_ef10_rx_free_indir_table(struct efx_nic *efx) ...@@ -1419,12 +1420,12 @@ static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
} }
static void efx_ef10_rx_push_indir_table(struct efx_nic *efx) static void efx_ef10_rx_push_rss_config(struct efx_nic *efx)
{ {
struct efx_ef10_nic_data *nic_data = efx->nic_data; struct efx_ef10_nic_data *nic_data = efx->nic_data;
int rc; int rc;
netif_dbg(efx, drv, efx->net_dev, "pushing RX indirection table\n"); netif_dbg(efx, drv, efx->net_dev, "pushing RSS config\n");
if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) { if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) {
rc = efx_ef10_alloc_rss_context(efx, &nic_data->rx_rss_context); rc = efx_ef10_alloc_rss_context(efx, &nic_data->rx_rss_context);
...@@ -1492,9 +1493,9 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue) ...@@ -1492,9 +1493,9 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen, rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
outbuf, sizeof(outbuf), &outlen); outbuf, sizeof(outbuf), &outlen);
WARN_ON(rc); if (rc)
netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
return; efx_rx_queue_index(rx_queue));
} }
static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue) static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
...@@ -1718,8 +1719,6 @@ static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue) ...@@ -1718,8 +1719,6 @@ static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue)
{ {
unsigned int rx_desc_ptr; unsigned int rx_desc_ptr;
WARN_ON(rx_queue->scatter_n == 0);
netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev, netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev,
"scattered RX aborted (dropping %u buffers)\n", "scattered RX aborted (dropping %u buffers)\n",
rx_queue->scatter_n); rx_queue->scatter_n);
...@@ -1755,7 +1754,10 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel, ...@@ -1755,7 +1754,10 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS); rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS);
rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT); rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
WARN_ON(EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT)); if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT))
netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event="
EFX_QWORD_FMT "\n",
EFX_QWORD_VAL(*event));
rx_queue = efx_channel_get_rx_queue(channel); rx_queue = efx_channel_get_rx_queue(channel);
...@@ -1770,7 +1772,12 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel, ...@@ -1770,7 +1772,12 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
/* detect rx abort */ /* detect rx abort */
if (unlikely(n_descs == rx_queue->scatter_n)) { if (unlikely(n_descs == rx_queue->scatter_n)) {
WARN_ON(rx_bytes != 0); if (rx_queue->scatter_n == 0 || rx_bytes != 0)
netdev_WARN(efx->net_dev,
"invalid RX abort: scatter_n=%u event="
EFX_QWORD_FMT "\n",
rx_queue->scatter_n,
EFX_QWORD_VAL(*event));
efx_ef10_handle_rx_abort(rx_queue); efx_ef10_handle_rx_abort(rx_queue);
return 0; return 0;
} }
...@@ -2238,7 +2245,9 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx, ...@@ -2238,7 +2245,9 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx,
MC_CMD_FILTER_OP_IN_RX_DEST_HOST); MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST, MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT); MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE, spec->dmaq_id); MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE,
spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
0 : spec->dmaq_id);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE, MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
(spec->flags & EFX_FILTER_FLAG_RX_RSS) ? (spec->flags & EFX_FILTER_FLAG_RX_RSS) ?
MC_CMD_FILTER_OP_IN_RX_MODE_RSS : MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
...@@ -2334,10 +2343,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx, ...@@ -2334,10 +2343,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
EFX_EF10_FILTER_FLAG_BUSY) EFX_EF10_FILTER_FLAG_BUSY)
break; break;
if (spec->priority < saved_spec->priority && if (spec->priority < saved_spec->priority &&
!(saved_spec->priority == spec->priority != EFX_FILTER_PRI_AUTO) {
EFX_FILTER_PRI_REQUIRED &&
saved_spec->flags &
EFX_FILTER_FLAG_RX_STACK)) {
rc = -EPERM; rc = -EPERM;
goto out_unlock; goto out_unlock;
} }
...@@ -2391,11 +2397,13 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx, ...@@ -2391,11 +2397,13 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
*/ */
saved_spec = efx_ef10_filter_entry_spec(table, ins_index); saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
if (saved_spec) { if (saved_spec) {
if (spec->flags & EFX_FILTER_FLAG_RX_STACK) { if (spec->priority == EFX_FILTER_PRI_AUTO &&
saved_spec->priority >= EFX_FILTER_PRI_AUTO) {
/* Just make sure it won't be removed */ /* Just make sure it won't be removed */
saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK; if (saved_spec->priority > EFX_FILTER_PRI_AUTO)
saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
table->entry[ins_index].spec &= table->entry[ins_index].spec &=
~EFX_EF10_FILTER_FLAG_STACK_OLD; ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
rc = ins_index; rc = ins_index;
goto out_unlock; goto out_unlock;
} }
...@@ -2435,8 +2443,11 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx, ...@@ -2435,8 +2443,11 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
if (rc == 0) { if (rc == 0) {
if (replacing) { if (replacing) {
/* Update the fields that may differ */ /* Update the fields that may differ */
if (saved_spec->priority == EFX_FILTER_PRI_AUTO)
saved_spec->flags |=
EFX_FILTER_FLAG_RX_OVER_AUTO;
saved_spec->priority = spec->priority; saved_spec->priority = spec->priority;
saved_spec->flags &= EFX_FILTER_FLAG_RX_STACK; saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO;
saved_spec->flags |= spec->flags; saved_spec->flags |= spec->flags;
saved_spec->rss_context = spec->rss_context; saved_spec->rss_context = spec->rss_context;
saved_spec->dmaq_id = spec->dmaq_id; saved_spec->dmaq_id = spec->dmaq_id;
...@@ -2505,13 +2516,13 @@ static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx) ...@@ -2505,13 +2516,13 @@ static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
} }
/* Remove a filter. /* Remove a filter.
* If !stack_requested, remove by ID * If !by_index, remove by ID
* If stack_requested, remove by index * If by_index, remove by index
* Filter ID may come from userland and must be range-checked. * Filter ID may come from userland and must be range-checked.
*/ */
static int efx_ef10_filter_remove_internal(struct efx_nic *efx, static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
enum efx_filter_priority priority, unsigned int priority_mask,
u32 filter_id, bool stack_requested) u32 filter_id, bool by_index)
{ {
unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS; unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
struct efx_ef10_filter_table *table = efx->filter_state; struct efx_ef10_filter_table *table = efx->filter_state;
...@@ -2535,26 +2546,41 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx, ...@@ -2535,26 +2546,41 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
spin_unlock_bh(&efx->filter_lock); spin_unlock_bh(&efx->filter_lock);
schedule(); schedule();
} }
spec = efx_ef10_filter_entry_spec(table, filter_idx); spec = efx_ef10_filter_entry_spec(table, filter_idx);
if (!spec || spec->priority > priority || if (!spec ||
(!stack_requested && (!by_index &&
efx_ef10_filter_rx_match_pri(table, spec->match_flags) != efx_ef10_filter_rx_match_pri(table, spec->match_flags) !=
filter_id / HUNT_FILTER_TBL_ROWS)) { filter_id / HUNT_FILTER_TBL_ROWS)) {
rc = -ENOENT; rc = -ENOENT;
goto out_unlock; goto out_unlock;
} }
if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO &&
priority_mask == (1U << EFX_FILTER_PRI_AUTO)) {
/* Just remove flags */
spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO;
table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
rc = 0;
goto out_unlock;
}
if (!(priority_mask & (1U << spec->priority))) {
rc = -ENOENT;
goto out_unlock;
}
table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
spin_unlock_bh(&efx->filter_lock); spin_unlock_bh(&efx->filter_lock);
if (spec->flags & EFX_FILTER_FLAG_RX_STACK && !stack_requested) { if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
/* Reset steering of a stack-owned filter */ /* Reset to an automatic filter */
struct efx_filter_spec new_spec = *spec; struct efx_filter_spec new_spec = *spec;
new_spec.priority = EFX_FILTER_PRI_REQUIRED; new_spec.priority = EFX_FILTER_PRI_AUTO;
new_spec.flags = (EFX_FILTER_FLAG_RX | new_spec.flags = (EFX_FILTER_FLAG_RX |
EFX_FILTER_FLAG_RX_RSS | EFX_FILTER_FLAG_RX_RSS);
EFX_FILTER_FLAG_RX_STACK);
new_spec.dmaq_id = 0; new_spec.dmaq_id = 0;
new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT; new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
rc = efx_ef10_filter_push(efx, &new_spec, rc = efx_ef10_filter_push(efx, &new_spec,
...@@ -2582,6 +2608,7 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx, ...@@ -2582,6 +2608,7 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
} }
} }
table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY; table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
wake_up_all(&table->waitq); wake_up_all(&table->waitq);
out_unlock: out_unlock:
...@@ -2594,7 +2621,8 @@ static int efx_ef10_filter_remove_safe(struct efx_nic *efx, ...@@ -2594,7 +2621,8 @@ static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
enum efx_filter_priority priority, enum efx_filter_priority priority,
u32 filter_id) u32 filter_id)
{ {
return efx_ef10_filter_remove_internal(efx, priority, filter_id, false); return efx_ef10_filter_remove_internal(efx, 1U << priority,
filter_id, false);
} }
static int efx_ef10_filter_get_safe(struct efx_nic *efx, static int efx_ef10_filter_get_safe(struct efx_nic *efx,
...@@ -2620,10 +2648,24 @@ static int efx_ef10_filter_get_safe(struct efx_nic *efx, ...@@ -2620,10 +2648,24 @@ static int efx_ef10_filter_get_safe(struct efx_nic *efx,
return rc; return rc;
} }
static void efx_ef10_filter_clear_rx(struct efx_nic *efx, static int efx_ef10_filter_clear_rx(struct efx_nic *efx,
enum efx_filter_priority priority) enum efx_filter_priority priority)
{ {
/* TODO */ unsigned int priority_mask;
unsigned int i;
int rc;
priority_mask = (((1U << (priority + 1)) - 1) &
~(1U << EFX_FILTER_PRI_AUTO));
for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
rc = efx_ef10_filter_remove_internal(efx, priority_mask,
i, true);
if (rc && rc != -ENOENT)
return rc;
}
return 0;
} }
static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx, static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
...@@ -2724,8 +2766,6 @@ static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx, ...@@ -2724,8 +2766,6 @@ static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx,
rc = -EBUSY; rc = -EBUSY;
goto fail_unlock; goto fail_unlock;
} }
EFX_WARN_ON_PARANOID(saved_spec->flags &
EFX_FILTER_FLAG_RX_STACK);
if (spec->priority < saved_spec->priority) { if (spec->priority < saved_spec->priority) {
rc = -EPERM; rc = -EPERM;
goto fail_unlock; goto fail_unlock;
...@@ -3035,8 +3075,11 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx) ...@@ -3035,8 +3075,11 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx)
table->entry[filter_idx].handle); table->entry[filter_idx].handle);
rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
NULL, 0, NULL); NULL, 0, NULL);
if (rc)
WARN_ON(rc != 0); netdev_WARN(efx->net_dev,
"filter_idx=%#x handle=%#llx\n",
filter_idx,
table->entry[filter_idx].handle);
kfree(spec); kfree(spec);
} }
...@@ -3060,15 +3103,15 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) ...@@ -3060,15 +3103,15 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
/* Mark old filters that may need to be removed */ /* Mark old filters that may need to be removed */
spin_lock_bh(&efx->filter_lock); spin_lock_bh(&efx->filter_lock);
n = table->stack_uc_count < 0 ? 1 : table->stack_uc_count; n = table->dev_uc_count < 0 ? 1 : table->dev_uc_count;
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
filter_idx = table->stack_uc_list[i].id % HUNT_FILTER_TBL_ROWS; filter_idx = table->dev_uc_list[i].id % HUNT_FILTER_TBL_ROWS;
table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD; table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
} }
n = table->stack_mc_count < 0 ? 1 : table->stack_mc_count; n = table->dev_mc_count < 0 ? 1 : table->dev_mc_count;
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
filter_idx = table->stack_mc_list[i].id % HUNT_FILTER_TBL_ROWS; filter_idx = table->dev_mc_list[i].id % HUNT_FILTER_TBL_ROWS;
table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD; table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
} }
spin_unlock_bh(&efx->filter_lock); spin_unlock_bh(&efx->filter_lock);
...@@ -3077,28 +3120,28 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) ...@@ -3077,28 +3120,28 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
*/ */
netif_addr_lock_bh(net_dev); netif_addr_lock_bh(net_dev);
if (net_dev->flags & IFF_PROMISC || if (net_dev->flags & IFF_PROMISC ||
netdev_uc_count(net_dev) >= EFX_EF10_FILTER_STACK_UC_MAX) { netdev_uc_count(net_dev) >= EFX_EF10_FILTER_DEV_UC_MAX) {
table->stack_uc_count = -1; table->dev_uc_count = -1;
} else { } else {
table->stack_uc_count = 1 + netdev_uc_count(net_dev); table->dev_uc_count = 1 + netdev_uc_count(net_dev);
memcpy(table->stack_uc_list[0].addr, net_dev->dev_addr, memcpy(table->dev_uc_list[0].addr, net_dev->dev_addr,
ETH_ALEN); ETH_ALEN);
i = 1; i = 1;
netdev_for_each_uc_addr(uc, net_dev) { netdev_for_each_uc_addr(uc, net_dev) {
memcpy(table->stack_uc_list[i].addr, memcpy(table->dev_uc_list[i].addr,
uc->addr, ETH_ALEN); uc->addr, ETH_ALEN);
i++; i++;
} }
} }
if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) || if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
netdev_mc_count(net_dev) >= EFX_EF10_FILTER_STACK_MC_MAX) { netdev_mc_count(net_dev) >= EFX_EF10_FILTER_DEV_MC_MAX) {
table->stack_mc_count = -1; table->dev_mc_count = -1;
} else { } else {
table->stack_mc_count = 1 + netdev_mc_count(net_dev); table->dev_mc_count = 1 + netdev_mc_count(net_dev);
eth_broadcast_addr(table->stack_mc_list[0].addr); eth_broadcast_addr(table->dev_mc_list[0].addr);
i = 1; i = 1;
netdev_for_each_mc_addr(mc, net_dev) { netdev_for_each_mc_addr(mc, net_dev) {
memcpy(table->stack_mc_list[i].addr, memcpy(table->dev_mc_list[i].addr,
mc->addr, ETH_ALEN); mc->addr, ETH_ALEN);
i++; i++;
} }
...@@ -3106,90 +3149,86 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) ...@@ -3106,90 +3149,86 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
netif_addr_unlock_bh(net_dev); netif_addr_unlock_bh(net_dev);
/* Insert/renew unicast filters */ /* Insert/renew unicast filters */
if (table->stack_uc_count >= 0) { if (table->dev_uc_count >= 0) {
for (i = 0; i < table->stack_uc_count; i++) { for (i = 0; i < table->dev_uc_count; i++) {
efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED, efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
EFX_FILTER_FLAG_RX_RSS | EFX_FILTER_FLAG_RX_RSS,
EFX_FILTER_FLAG_RX_STACK,
0); 0);
efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
table->stack_uc_list[i].addr); table->dev_uc_list[i].addr);
rc = efx_ef10_filter_insert(efx, &spec, true); rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) { if (rc < 0) {
/* Fall back to unicast-promisc */ /* Fall back to unicast-promisc */
while (i--) while (i--)
efx_ef10_filter_remove_safe( efx_ef10_filter_remove_safe(
efx, EFX_FILTER_PRI_REQUIRED, efx, EFX_FILTER_PRI_AUTO,
table->stack_uc_list[i].id); table->dev_uc_list[i].id);
table->stack_uc_count = -1; table->dev_uc_count = -1;
break; break;
} }
table->stack_uc_list[i].id = rc; table->dev_uc_list[i].id = rc;
} }
} }
if (table->stack_uc_count < 0) { if (table->dev_uc_count < 0) {
efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED, efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
EFX_FILTER_FLAG_RX_RSS | EFX_FILTER_FLAG_RX_RSS,
EFX_FILTER_FLAG_RX_STACK,
0); 0);
efx_filter_set_uc_def(&spec); efx_filter_set_uc_def(&spec);
rc = efx_ef10_filter_insert(efx, &spec, true); rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) { if (rc < 0) {
WARN_ON(1); WARN_ON(1);
table->stack_uc_count = 0; table->dev_uc_count = 0;
} else { } else {
table->stack_uc_list[0].id = rc; table->dev_uc_list[0].id = rc;
} }
} }
/* Insert/renew multicast filters */ /* Insert/renew multicast filters */
if (table->stack_mc_count >= 0) { if (table->dev_mc_count >= 0) {
for (i = 0; i < table->stack_mc_count; i++) { for (i = 0; i < table->dev_mc_count; i++) {
efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED, efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
EFX_FILTER_FLAG_RX_RSS | EFX_FILTER_FLAG_RX_RSS,
EFX_FILTER_FLAG_RX_STACK,
0); 0);
efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
table->stack_mc_list[i].addr); table->dev_mc_list[i].addr);
rc = efx_ef10_filter_insert(efx, &spec, true); rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) { if (rc < 0) {
/* Fall back to multicast-promisc */ /* Fall back to multicast-promisc */
while (i--) while (i--)
efx_ef10_filter_remove_safe( efx_ef10_filter_remove_safe(
efx, EFX_FILTER_PRI_REQUIRED, efx, EFX_FILTER_PRI_AUTO,
table->stack_mc_list[i].id); table->dev_mc_list[i].id);
table->stack_mc_count = -1; table->dev_mc_count = -1;
break; break;
} }
table->stack_mc_list[i].id = rc; table->dev_mc_list[i].id = rc;
} }
} }
if (table->stack_mc_count < 0) { if (table->dev_mc_count < 0) {
efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED, efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
EFX_FILTER_FLAG_RX_RSS | EFX_FILTER_FLAG_RX_RSS,
EFX_FILTER_FLAG_RX_STACK,
0); 0);
efx_filter_set_mc_def(&spec); efx_filter_set_mc_def(&spec);
rc = efx_ef10_filter_insert(efx, &spec, true); rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) { if (rc < 0) {
WARN_ON(1); WARN_ON(1);
table->stack_mc_count = 0; table->dev_mc_count = 0;
} else { } else {
table->stack_mc_list[0].id = rc; table->dev_mc_list[0].id = rc;
} }
} }
/* Remove filters that weren't renewed. Since nothing else /* Remove filters that weren't renewed. Since nothing else
* changes the STACK_OLD flag or removes these filters, we * changes the AUTO_OLD flag or removes these filters, we
* don't need to hold the filter_lock while scanning for * don't need to hold the filter_lock while scanning for
* these filters. * these filters.
*/ */
for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
if (ACCESS_ONCE(table->entry[i].spec) & if (ACCESS_ONCE(table->entry[i].spec) &
EFX_EF10_FILTER_FLAG_STACK_OLD) { EFX_EF10_FILTER_FLAG_AUTO_OLD) {
if (efx_ef10_filter_remove_internal(efx, if (efx_ef10_filter_remove_internal(
EFX_FILTER_PRI_REQUIRED, efx, 1U << EFX_FILTER_PRI_AUTO,
i, true) < 0) i, true) < 0)
remove_failed = true; remove_failed = true;
} }
} }
...@@ -3564,7 +3603,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { ...@@ -3564,7 +3603,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.tx_init = efx_ef10_tx_init, .tx_init = efx_ef10_tx_init,
.tx_remove = efx_ef10_tx_remove, .tx_remove = efx_ef10_tx_remove,
.tx_write = efx_ef10_tx_write, .tx_write = efx_ef10_tx_write,
.rx_push_indir_table = efx_ef10_rx_push_indir_table, .rx_push_rss_config = efx_ef10_rx_push_rss_config,
.rx_probe = efx_ef10_rx_probe, .rx_probe = efx_ef10_rx_probe,
.rx_init = efx_ef10_rx_init, .rx_init = efx_ef10_rx_init,
.rx_remove = efx_ef10_rx_remove, .rx_remove = efx_ef10_rx_remove,
......
...@@ -2151,7 +2151,7 @@ static int efx_set_features(struct net_device *net_dev, netdev_features_t data) ...@@ -2151,7 +2151,7 @@ static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
/* If disabling RX n-tuple filtering, clear existing filters */ /* If disabling RX n-tuple filtering, clear existing filters */
if (net_dev->features & ~data & NETIF_F_NTUPLE) if (net_dev->features & ~data & NETIF_F_NTUPLE)
efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL); return efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
return 0; return 0;
} }
......
...@@ -134,17 +134,6 @@ efx_filter_get_filter_safe(struct efx_nic *efx, ...@@ -134,17 +134,6 @@ efx_filter_get_filter_safe(struct efx_nic *efx,
return efx->type->filter_get_safe(efx, priority, filter_id, spec); return efx->type->filter_get_safe(efx, priority, filter_id, spec);
} }
/**
* efx_farch_filter_clear_rx - remove RX filters by priority
* @efx: NIC from which to remove the filters
* @priority: Maximum priority to remove
*/
static inline void efx_filter_clear_rx(struct efx_nic *efx,
enum efx_filter_priority priority)
{
return efx->type->filter_clear_rx(efx, priority);
}
static inline u32 efx_filter_count_rx_used(struct efx_nic *efx, static inline u32 efx_filter_count_rx_used(struct efx_nic *efx,
enum efx_filter_priority priority) enum efx_filter_priority priority)
{ {
......
...@@ -359,7 +359,8 @@ static int efx_ethtool_get_sset_count(struct net_device *net_dev, ...@@ -359,7 +359,8 @@ static int efx_ethtool_get_sset_count(struct net_device *net_dev,
switch (string_set) { switch (string_set) {
case ETH_SS_STATS: case ETH_SS_STATS:
return efx->type->describe_stats(efx, NULL) + return efx->type->describe_stats(efx, NULL) +
EFX_ETHTOOL_SW_STAT_COUNT; EFX_ETHTOOL_SW_STAT_COUNT +
efx_ptp_describe_stats(efx, NULL);
case ETH_SS_TEST: case ETH_SS_TEST:
return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL); return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
default: default:
...@@ -380,6 +381,8 @@ static void efx_ethtool_get_strings(struct net_device *net_dev, ...@@ -380,6 +381,8 @@ static void efx_ethtool_get_strings(struct net_device *net_dev,
for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
strlcpy(strings + i * ETH_GSTRING_LEN, strlcpy(strings + i * ETH_GSTRING_LEN,
efx_sw_stat_desc[i].name, ETH_GSTRING_LEN); efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
efx_ptp_describe_stats(efx, strings);
break; break;
case ETH_SS_TEST: case ETH_SS_TEST:
efx_ethtool_fill_self_tests(efx, NULL, strings, NULL); efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
...@@ -429,8 +432,11 @@ static void efx_ethtool_get_stats(struct net_device *net_dev, ...@@ -429,8 +432,11 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
break; break;
} }
} }
data += EFX_ETHTOOL_SW_STAT_COUNT;
spin_unlock_bh(&efx->stats_lock); spin_unlock_bh(&efx->stats_lock);
efx_ptp_update_stats(efx, data);
} }
static void efx_ethtool_self_test(struct net_device *net_dev, static void efx_ethtool_self_test(struct net_device *net_dev,
...@@ -1034,7 +1040,7 @@ static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev, ...@@ -1034,7 +1040,7 @@ static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
struct efx_nic *efx = netdev_priv(net_dev); struct efx_nic *efx = netdev_priv(net_dev);
memcpy(efx->rx_indir_table, indir, sizeof(efx->rx_indir_table)); memcpy(efx->rx_indir_table, indir, sizeof(efx->rx_indir_table));
efx_nic_push_rx_indir_table(efx); efx->type->rx_push_rss_config(efx);
return 0; return 0;
} }
......
...@@ -467,6 +467,24 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) ...@@ -467,6 +467,24 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
efx_schedule_channel_irq(efx_get_channel(efx, 1)); efx_schedule_channel_irq(efx_get_channel(efx, 1));
return IRQ_HANDLED; return IRQ_HANDLED;
} }
/**************************************************************************
*
* RSS
*
**************************************************************************
*/
static void falcon_b0_rx_push_rss_config(struct efx_nic *efx)
{
efx_oword_t temp;
/* Set hash key for IPv4 */
memcpy(&temp, efx->rx_hash_key, sizeof(temp));
efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
efx_farch_rx_push_indir_table(efx);
}
/************************************************************************** /**************************************************************************
* *
* EEPROM/flash * EEPROM/flash
...@@ -2484,9 +2502,7 @@ static int falcon_init_nic(struct efx_nic *efx) ...@@ -2484,9 +2502,7 @@ static int falcon_init_nic(struct efx_nic *efx)
falcon_init_rx_cfg(efx); falcon_init_rx_cfg(efx);
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
/* Set hash key for IPv4 */ falcon_b0_rx_push_rss_config(efx);
memcpy(&temp, efx->rx_hash_key, sizeof(temp));
efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
/* Set destination of both TX and RX Flush events */ /* Set destination of both TX and RX Flush events */
EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0); EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
...@@ -2703,7 +2719,7 @@ const struct efx_nic_type falcon_a1_nic_type = { ...@@ -2703,7 +2719,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
.tx_init = efx_farch_tx_init, .tx_init = efx_farch_tx_init,
.tx_remove = efx_farch_tx_remove, .tx_remove = efx_farch_tx_remove,
.tx_write = efx_farch_tx_write, .tx_write = efx_farch_tx_write,
.rx_push_indir_table = efx_farch_rx_push_indir_table, .rx_push_rss_config = efx_port_dummy_op_void,
.rx_probe = efx_farch_rx_probe, .rx_probe = efx_farch_rx_probe,
.rx_init = efx_farch_rx_init, .rx_init = efx_farch_rx_init,
.rx_remove = efx_farch_rx_remove, .rx_remove = efx_farch_rx_remove,
...@@ -2798,7 +2814,7 @@ const struct efx_nic_type falcon_b0_nic_type = { ...@@ -2798,7 +2814,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
.tx_init = efx_farch_tx_init, .tx_init = efx_farch_tx_init,
.tx_remove = efx_farch_tx_remove, .tx_remove = efx_farch_tx_remove,
.tx_write = efx_farch_tx_write, .tx_write = efx_farch_tx_write,
.rx_push_indir_table = efx_farch_rx_push_indir_table, .rx_push_rss_config = falcon_b0_rx_push_rss_config,
.rx_probe = efx_farch_rx_probe, .rx_probe = efx_farch_rx_probe,
.rx_init = efx_farch_rx_init, .rx_init = efx_farch_rx_init,
.rx_remove = efx_farch_rx_remove, .rx_remove = efx_farch_rx_remove,
......
...@@ -1618,8 +1618,7 @@ void efx_farch_rx_push_indir_table(struct efx_nic *efx) ...@@ -1618,8 +1618,7 @@ void efx_farch_rx_push_indir_table(struct efx_nic *efx)
size_t i = 0; size_t i = 0;
efx_dword_t dword; efx_dword_t dword;
if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) BUG_ON(efx_nic_rev(efx) < EFX_REV_FALCON_B0);
return;
BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
FR_BZ_RX_INDIRECTION_TBL_ROWS); FR_BZ_RX_INDIRECTION_TBL_ROWS);
...@@ -1745,8 +1744,6 @@ void efx_farch_init_common(struct efx_nic *efx) ...@@ -1745,8 +1744,6 @@ void efx_farch_init_common(struct efx_nic *efx)
EFX_INVERT_OWORD(temp); EFX_INVERT_OWORD(temp);
efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
efx_farch_rx_push_indir_table(efx);
/* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
* controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
*/ */
...@@ -2187,14 +2184,14 @@ efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec, ...@@ -2187,14 +2184,14 @@ efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec,
} }
static void static void
efx_farch_filter_init_rx_for_stack(struct efx_nic *efx, efx_farch_filter_init_rx_auto(struct efx_nic *efx,
struct efx_farch_filter_spec *spec) struct efx_farch_filter_spec *spec)
{ {
/* If there's only one channel then disable RSS for non VF /* If there's only one channel then disable RSS for non VF
* traffic, thereby allowing VFs to use RSS when the PF can't. * traffic, thereby allowing VFs to use RSS when the PF can't.
*/ */
spec->priority = EFX_FILTER_PRI_REQUIRED; spec->priority = EFX_FILTER_PRI_AUTO;
spec->flags = (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_STACK | spec->flags = (EFX_FILTER_FLAG_RX |
(efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) | (efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) |
(efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0)); (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0));
spec->dmaq_id = 0; spec->dmaq_id = 0;
...@@ -2459,20 +2456,13 @@ s32 efx_farch_filter_insert(struct efx_nic *efx, ...@@ -2459,20 +2456,13 @@ s32 efx_farch_filter_insert(struct efx_nic *efx,
rc = -EEXIST; rc = -EEXIST;
goto out; goto out;
} }
if (spec.priority < saved_spec->priority && if (spec.priority < saved_spec->priority) {
!(saved_spec->priority == EFX_FILTER_PRI_REQUIRED &&
saved_spec->flags & EFX_FILTER_FLAG_RX_STACK)) {
rc = -EPERM; rc = -EPERM;
goto out; goto out;
} }
if (spec.flags & EFX_FILTER_FLAG_RX_STACK) { if (saved_spec->priority == EFX_FILTER_PRI_AUTO ||
/* Just make sure it won't be removed */ saved_spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO)
saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK; spec.flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
rc = 0;
goto out;
}
/* Retain the RX_STACK flag */
spec.flags |= saved_spec->flags & EFX_FILTER_FLAG_RX_STACK;
} }
/* Insert the filter */ /* Insert the filter */
...@@ -2553,11 +2543,11 @@ static int efx_farch_filter_remove(struct efx_nic *efx, ...@@ -2553,11 +2543,11 @@ static int efx_farch_filter_remove(struct efx_nic *efx,
struct efx_farch_filter_spec *spec = &table->spec[filter_idx]; struct efx_farch_filter_spec *spec = &table->spec[filter_idx];
if (!test_bit(filter_idx, table->used_bitmap) || if (!test_bit(filter_idx, table->used_bitmap) ||
spec->priority > priority) spec->priority != priority)
return -ENOENT; return -ENOENT;
if (spec->flags & EFX_FILTER_FLAG_RX_STACK) { if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
efx_farch_filter_init_rx_for_stack(efx, spec); efx_farch_filter_init_rx_auto(efx, spec);
efx_farch_filter_push_rx_config(efx); efx_farch_filter_push_rx_config(efx);
} else { } else {
efx_farch_filter_table_clear_entry(efx, table, filter_idx); efx_farch_filter_table_clear_entry(efx, table, filter_idx);
...@@ -2640,12 +2630,15 @@ efx_farch_filter_table_clear(struct efx_nic *efx, ...@@ -2640,12 +2630,15 @@ efx_farch_filter_table_clear(struct efx_nic *efx,
unsigned int filter_idx; unsigned int filter_idx;
spin_lock_bh(&efx->filter_lock); spin_lock_bh(&efx->filter_lock);
for (filter_idx = 0; filter_idx < table->size; ++filter_idx) for (filter_idx = 0; filter_idx < table->size; ++filter_idx) {
efx_farch_filter_remove(efx, table, filter_idx, priority); if (table->spec[filter_idx].priority != EFX_FILTER_PRI_AUTO)
efx_farch_filter_remove(efx, table,
filter_idx, priority);
}
spin_unlock_bh(&efx->filter_lock); spin_unlock_bh(&efx->filter_lock);
} }
void efx_farch_filter_clear_rx(struct efx_nic *efx, int efx_farch_filter_clear_rx(struct efx_nic *efx,
enum efx_filter_priority priority) enum efx_filter_priority priority)
{ {
efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP, efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP,
...@@ -2654,6 +2647,7 @@ void efx_farch_filter_clear_rx(struct efx_nic *efx, ...@@ -2654,6 +2647,7 @@ void efx_farch_filter_clear_rx(struct efx_nic *efx,
priority); priority);
efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_DEF, efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_DEF,
priority); priority);
return 0;
} }
u32 efx_farch_filter_count_rx_used(struct efx_nic *efx, u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
...@@ -2822,7 +2816,7 @@ int efx_farch_filter_table_probe(struct efx_nic *efx) ...@@ -2822,7 +2816,7 @@ int efx_farch_filter_table_probe(struct efx_nic *efx)
for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) { for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) {
spec = &table->spec[i]; spec = &table->spec[i];
spec->type = EFX_FARCH_FILTER_UC_DEF + i; spec->type = EFX_FARCH_FILTER_UC_DEF + i;
efx_farch_filter_init_rx_for_stack(efx, spec); efx_farch_filter_init_rx_auto(efx, spec);
__set_bit(i, table->used_bitmap); __set_bit(i, table->used_bitmap);
} }
} }
......
...@@ -59,12 +59,16 @@ enum efx_filter_match_flags { ...@@ -59,12 +59,16 @@ enum efx_filter_match_flags {
/** /**
* enum efx_filter_priority - priority of a hardware filter specification * enum efx_filter_priority - priority of a hardware filter specification
* @EFX_FILTER_PRI_HINT: Performance hint * @EFX_FILTER_PRI_HINT: Performance hint
* @EFX_FILTER_PRI_AUTO: Automatic filter based on device address list
* or hardware requirements. This may only be used by the filter
* implementation for each NIC type.
* @EFX_FILTER_PRI_MANUAL: Manually configured filter * @EFX_FILTER_PRI_MANUAL: Manually configured filter
* @EFX_FILTER_PRI_REQUIRED: Required for correct behaviour (user-level * @EFX_FILTER_PRI_REQUIRED: Required for correct behaviour (user-level
* networking and SR-IOV) * networking and SR-IOV)
*/ */
enum efx_filter_priority { enum efx_filter_priority {
EFX_FILTER_PRI_HINT = 0, EFX_FILTER_PRI_HINT = 0,
EFX_FILTER_PRI_AUTO,
EFX_FILTER_PRI_MANUAL, EFX_FILTER_PRI_MANUAL,
EFX_FILTER_PRI_REQUIRED, EFX_FILTER_PRI_REQUIRED,
}; };
...@@ -78,19 +82,18 @@ enum efx_filter_priority { ...@@ -78,19 +82,18 @@ enum efx_filter_priority {
* according to the indirection table. * according to the indirection table.
* @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving * @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving
* queue. * queue.
* @EFX_FILTER_FLAG_RX_STACK: Indicates a filter inserted for the * @EFX_FILTER_FLAG_RX_OVER_AUTO: Indicates a filter that is
* network stack. The filter must have a priority of * overriding an automatic filter (priority
* %EFX_FILTER_PRI_REQUIRED. It can be steered by a replacement * %EFX_FILTER_PRI_AUTO). This may only be set by the filter
* request with priority %EFX_FILTER_PRI_MANUAL, and a removal * implementation for each type. A removal request will restore
* request with priority %EFX_FILTER_PRI_MANUAL will reset the * the automatic filter in its place.
* steering (but not remove the filter).
* @EFX_FILTER_FLAG_RX: Filter is for RX * @EFX_FILTER_FLAG_RX: Filter is for RX
* @EFX_FILTER_FLAG_TX: Filter is for TX * @EFX_FILTER_FLAG_TX: Filter is for TX
*/ */
enum efx_filter_flags { enum efx_filter_flags {
EFX_FILTER_FLAG_RX_RSS = 0x01, EFX_FILTER_FLAG_RX_RSS = 0x01,
EFX_FILTER_FLAG_RX_SCATTER = 0x02, EFX_FILTER_FLAG_RX_SCATTER = 0x02,
EFX_FILTER_FLAG_RX_STACK = 0x04, EFX_FILTER_FLAG_RX_OVER_AUTO = 0x04,
EFX_FILTER_FLAG_RX = 0x08, EFX_FILTER_FLAG_RX = 0x08,
EFX_FILTER_FLAG_TX = 0x10, EFX_FILTER_FLAG_TX = 0x10,
}; };
......
...@@ -288,12 +288,9 @@ struct efx_rx_buffer { ...@@ -288,12 +288,9 @@ struct efx_rx_buffer {
* Used to facilitate sharing dma mappings between recycled rx buffers * Used to facilitate sharing dma mappings between recycled rx buffers
* and those passed up to the kernel. * and those passed up to the kernel.
* *
* @refcnt: Number of struct efx_rx_buffer's referencing this page.
* When refcnt falls to zero, the page is unmapped for dma
* @dma_addr: The dma address of this page. * @dma_addr: The dma address of this page.
*/ */
struct efx_rx_page_state { struct efx_rx_page_state {
unsigned refcnt;
dma_addr_t dma_addr; dma_addr_t dma_addr;
unsigned int __pad[0] ____cacheline_aligned; unsigned int __pad[0] ____cacheline_aligned;
...@@ -363,12 +360,6 @@ struct efx_rx_queue { ...@@ -363,12 +360,6 @@ struct efx_rx_queue {
unsigned int slow_fill_count; unsigned int slow_fill_count;
}; };
enum efx_rx_alloc_method {
RX_ALLOC_METHOD_AUTO = 0,
RX_ALLOC_METHOD_SKB = 1,
RX_ALLOC_METHOD_PAGE = 2,
};
enum efx_sync_events_state { enum efx_sync_events_state {
SYNC_EVENTS_DISABLED = 0, SYNC_EVENTS_DISABLED = 0,
SYNC_EVENTS_QUIESCENT, SYNC_EVENTS_QUIESCENT,
...@@ -1024,7 +1015,7 @@ struct efx_mtd_partition { ...@@ -1024,7 +1015,7 @@ struct efx_mtd_partition {
* @tx_init: Initialise TX queue on the NIC * @tx_init: Initialise TX queue on the NIC
* @tx_remove: Free resources for TX queue * @tx_remove: Free resources for TX queue
* @tx_write: Write TX descriptors and doorbell * @tx_write: Write TX descriptors and doorbell
* @rx_push_indir_table: Write RSS indirection table to the NIC * @rx_push_rss_config: Write RSS hash key and indirection table to the NIC
* @rx_probe: Allocate resources for RX queue * @rx_probe: Allocate resources for RX queue
* @rx_init: Initialise RX queue on the NIC * @rx_init: Initialise RX queue on the NIC
* @rx_remove: Free resources for RX queue * @rx_remove: Free resources for RX queue
...@@ -1044,7 +1035,8 @@ struct efx_mtd_partition { ...@@ -1044,7 +1035,8 @@ struct efx_mtd_partition {
* @filter_insert: add or replace a filter * @filter_insert: add or replace a filter
* @filter_remove_safe: remove a filter by ID, carefully * @filter_remove_safe: remove a filter by ID, carefully
* @filter_get_safe: retrieve a filter by ID, carefully * @filter_get_safe: retrieve a filter by ID, carefully
* @filter_clear_rx: remove RX filters by priority * @filter_clear_rx: Remove all RX filters whose priority is less than or
* equal to the given priority and is not %EFX_FILTER_PRI_AUTO
* @filter_count_rx_used: Get the number of filters in use at a given priority * @filter_count_rx_used: Get the number of filters in use at a given priority
* @filter_get_rx_id_limit: Get maximum value of a filter id, plus 1 * @filter_get_rx_id_limit: Get maximum value of a filter id, plus 1
* @filter_get_rx_ids: Get list of RX filters at a given priority * @filter_get_rx_ids: Get list of RX filters at a given priority
...@@ -1141,7 +1133,7 @@ struct efx_nic_type { ...@@ -1141,7 +1133,7 @@ struct efx_nic_type {
void (*tx_init)(struct efx_tx_queue *tx_queue); void (*tx_init)(struct efx_tx_queue *tx_queue);
void (*tx_remove)(struct efx_tx_queue *tx_queue); void (*tx_remove)(struct efx_tx_queue *tx_queue);
void (*tx_write)(struct efx_tx_queue *tx_queue); void (*tx_write)(struct efx_tx_queue *tx_queue);
void (*rx_push_indir_table)(struct efx_nic *efx); void (*rx_push_rss_config)(struct efx_nic *efx);
int (*rx_probe)(struct efx_rx_queue *rx_queue); int (*rx_probe)(struct efx_rx_queue *rx_queue);
void (*rx_init)(struct efx_rx_queue *rx_queue); void (*rx_init)(struct efx_rx_queue *rx_queue);
void (*rx_remove)(struct efx_rx_queue *rx_queue); void (*rx_remove)(struct efx_rx_queue *rx_queue);
...@@ -1166,8 +1158,8 @@ struct efx_nic_type { ...@@ -1166,8 +1158,8 @@ struct efx_nic_type {
int (*filter_get_safe)(struct efx_nic *efx, int (*filter_get_safe)(struct efx_nic *efx,
enum efx_filter_priority priority, enum efx_filter_priority priority,
u32 filter_id, struct efx_filter_spec *); u32 filter_id, struct efx_filter_spec *);
void (*filter_clear_rx)(struct efx_nic *efx, int (*filter_clear_rx)(struct efx_nic *efx,
enum efx_filter_priority priority); enum efx_filter_priority priority);
u32 (*filter_count_rx_used)(struct efx_nic *efx, u32 (*filter_count_rx_used)(struct efx_nic *efx,
enum efx_filter_priority priority); enum efx_filter_priority priority);
u32 (*filter_get_rx_id_limit)(struct efx_nic *efx); u32 (*filter_get_rx_id_limit)(struct efx_nic *efx);
......
...@@ -412,8 +412,8 @@ enum { ...@@ -412,8 +412,8 @@ enum {
EF10_STAT_rx_dp_q_disabled_packets, EF10_STAT_rx_dp_q_disabled_packets,
EF10_STAT_rx_dp_di_dropped_packets, EF10_STAT_rx_dp_di_dropped_packets,
EF10_STAT_rx_dp_streaming_packets, EF10_STAT_rx_dp_streaming_packets,
EF10_STAT_rx_dp_emerg_fetch, EF10_STAT_rx_dp_hlb_fetch,
EF10_STAT_rx_dp_emerg_wait, EF10_STAT_rx_dp_hlb_wait,
EF10_STAT_COUNT EF10_STAT_COUNT
}; };
...@@ -566,6 +566,8 @@ int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, ...@@ -566,6 +566,8 @@ int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
unsigned int new_mode); unsigned int new_mode);
int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev); void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings);
size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats);
void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev); void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev);
void __efx_rx_skb_attach_timestamp(struct efx_channel *channel, void __efx_rx_skb_attach_timestamp(struct efx_channel *channel,
struct sk_buff *skb); struct sk_buff *skb);
...@@ -693,8 +695,8 @@ int efx_farch_filter_remove_safe(struct efx_nic *efx, ...@@ -693,8 +695,8 @@ int efx_farch_filter_remove_safe(struct efx_nic *efx,
int efx_farch_filter_get_safe(struct efx_nic *efx, int efx_farch_filter_get_safe(struct efx_nic *efx,
enum efx_filter_priority priority, u32 filter_id, enum efx_filter_priority priority, u32 filter_id,
struct efx_filter_spec *); struct efx_filter_spec *);
void efx_farch_filter_clear_rx(struct efx_nic *efx, int efx_farch_filter_clear_rx(struct efx_nic *efx,
enum efx_filter_priority priority); enum efx_filter_priority priority);
u32 efx_farch_filter_count_rx_used(struct efx_nic *efx, u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
enum efx_filter_priority priority); enum efx_filter_priority priority);
u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx); u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
...@@ -762,10 +764,6 @@ int falcon_reset_xaui(struct efx_nic *efx); ...@@ -762,10 +764,6 @@ int falcon_reset_xaui(struct efx_nic *efx);
void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw); void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
void efx_farch_init_common(struct efx_nic *efx); void efx_farch_init_common(struct efx_nic *efx);
void efx_ef10_handle_drain_event(struct efx_nic *efx); void efx_ef10_handle_drain_event(struct efx_nic *efx);
static inline void efx_nic_push_rx_indir_table(struct efx_nic *efx)
{
efx->type->rx_push_indir_table(efx);
}
void efx_farch_rx_push_indir_table(struct efx_nic *efx); void efx_farch_rx_push_indir_table(struct efx_nic *efx);
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
......
...@@ -255,6 +255,15 @@ struct efx_ptp_timeset { ...@@ -255,6 +255,15 @@ struct efx_ptp_timeset {
* @nic_ts_enabled: Flag indicating if NIC generated TS events are handled * @nic_ts_enabled: Flag indicating if NIC generated TS events are handled
* @txbuf: Buffer for use when transmitting (PTP) packets to MC (avoids * @txbuf: Buffer for use when transmitting (PTP) packets to MC (avoids
* allocations in main data path). * allocations in main data path).
* @good_syncs: Number of successful synchronisations.
* @fast_syncs: Number of synchronisations requiring short delay
* @bad_syncs: Number of failed synchronisations.
* @sync_timeouts: Number of synchronisation timeouts
* @no_time_syncs: Number of synchronisations with no good times.
* @invalid_sync_windows: Number of sync windows with bad durations.
* @undersize_sync_windows: Number of corrected sync windows that are too small
* @oversize_sync_windows: Number of corrected sync windows that are too large
* @rx_no_timestamp: Number of packets received without a timestamp.
* @timeset: Last set of synchronisation statistics. * @timeset: Last set of synchronisation statistics.
*/ */
struct efx_ptp_data { struct efx_ptp_data {
...@@ -300,6 +309,16 @@ struct efx_ptp_data { ...@@ -300,6 +309,16 @@ struct efx_ptp_data {
struct workqueue_struct *pps_workwq; struct workqueue_struct *pps_workwq;
bool nic_ts_enabled; bool nic_ts_enabled;
MCDI_DECLARE_BUF(txbuf, MC_CMD_PTP_IN_TRANSMIT_LENMAX); MCDI_DECLARE_BUF(txbuf, MC_CMD_PTP_IN_TRANSMIT_LENMAX);
unsigned int good_syncs;
unsigned int fast_syncs;
unsigned int bad_syncs;
unsigned int sync_timeouts;
unsigned int no_time_syncs;
unsigned int invalid_sync_windows;
unsigned int undersize_sync_windows;
unsigned int oversize_sync_windows;
unsigned int rx_no_timestamp;
struct efx_ptp_timeset struct efx_ptp_timeset
timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM]; timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM];
}; };
...@@ -312,6 +331,78 @@ static int efx_phc_settime(struct ptp_clock_info *ptp, ...@@ -312,6 +331,78 @@ static int efx_phc_settime(struct ptp_clock_info *ptp,
static int efx_phc_enable(struct ptp_clock_info *ptp, static int efx_phc_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *request, int on); struct ptp_clock_request *request, int on);
#define PTP_SW_STAT(ext_name, field_name) \
{ #ext_name, 0, offsetof(struct efx_ptp_data, field_name) }
#define PTP_MC_STAT(ext_name, mcdi_name) \
{ #ext_name, 32, MC_CMD_PTP_OUT_STATUS_STATS_ ## mcdi_name ## _OFST }
static const struct efx_hw_stat_desc efx_ptp_stat_desc[] = {
PTP_SW_STAT(ptp_good_syncs, good_syncs),
PTP_SW_STAT(ptp_fast_syncs, fast_syncs),
PTP_SW_STAT(ptp_bad_syncs, bad_syncs),
PTP_SW_STAT(ptp_sync_timeouts, sync_timeouts),
PTP_SW_STAT(ptp_no_time_syncs, no_time_syncs),
PTP_SW_STAT(ptp_invalid_sync_windows, invalid_sync_windows),
PTP_SW_STAT(ptp_undersize_sync_windows, undersize_sync_windows),
PTP_SW_STAT(ptp_oversize_sync_windows, oversize_sync_windows),
PTP_SW_STAT(ptp_rx_no_timestamp, rx_no_timestamp),
PTP_MC_STAT(ptp_tx_timestamp_packets, TX),
PTP_MC_STAT(ptp_rx_timestamp_packets, RX),
PTP_MC_STAT(ptp_timestamp_packets, TS),
PTP_MC_STAT(ptp_filter_matches, FM),
PTP_MC_STAT(ptp_non_filter_matches, NFM),
};
#define PTP_STAT_COUNT ARRAY_SIZE(efx_ptp_stat_desc)
static const unsigned long efx_ptp_stat_mask[] = {
[0 ... BITS_TO_LONGS(PTP_STAT_COUNT) - 1] = ~0UL,
};
size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings)
{
if (!efx->ptp_data)
return 0;
return efx_nic_describe_stats(efx_ptp_stat_desc, PTP_STAT_COUNT,
efx_ptp_stat_mask, strings);
}
size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_STATUS_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_STATUS_LEN);
size_t i;
int rc;
if (!efx->ptp_data)
return 0;
/* Copy software statistics */
for (i = 0; i < PTP_STAT_COUNT; i++) {
if (efx_ptp_stat_desc[i].dma_width)
continue;
stats[i] = *(unsigned int *)((char *)efx->ptp_data +
efx_ptp_stat_desc[i].offset);
}
/* Fetch MC statistics. We *must* fill in all statistics or
* risk leaking kernel memory to userland, so if the MCDI
* request fails we pretend we got zeroes.
*/
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_STATUS);
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), NULL);
if (rc) {
netif_err(efx, hw, efx->net_dev,
"MC_CMD_PTP_OP_STATUS failed (%d)\n", rc);
memset(outbuf, 0, sizeof(outbuf));
}
efx_nic_update_stats(efx_ptp_stat_desc, PTP_STAT_COUNT,
efx_ptp_stat_mask,
stats, _MCDI_PTR(outbuf, 0), false);
return PTP_STAT_COUNT;
}
/* For Siena platforms NIC time is s and ns */ /* For Siena platforms NIC time is s and ns */
static void efx_ptp_ns_to_s_ns(s64 ns, u32 *nic_major, u32 *nic_minor) static void efx_ptp_ns_to_s_ns(s64 ns, u32 *nic_major, u32 *nic_minor)
{ {
...@@ -633,7 +724,8 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf), ...@@ -633,7 +724,8 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
/* Read the set of results and find the last good host-MC /* Read the set of results and find the last good host-MC
* synchronization result. The MC times when it finishes reading the * synchronization result. The MC times when it finishes reading the
* host time so the corrected window time should be fairly constant * host time so the corrected window time should be fairly constant
* for a given platform. * for a given platform. Increment stats for any results that appear
* to be erroneous.
*/ */
for (i = 0; i < number_readings; i++) { for (i = 0; i < number_readings; i++) {
s32 window, corrected; s32 window, corrected;
...@@ -658,9 +750,13 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf), ...@@ -658,9 +750,13 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
* interrupt or other delay occurred between reading the system * interrupt or other delay occurred between reading the system
* time and writing it to MC memory. * time and writing it to MC memory.
*/ */
if (window >= SYNCHRONISATION_GRANULARITY_NS && if (window < SYNCHRONISATION_GRANULARITY_NS) {
corrected < MAX_SYNCHRONISATION_NS && ++ptp->invalid_sync_windows;
corrected >= ptp->min_synchronisation_ns) { } else if (corrected >= MAX_SYNCHRONISATION_NS) {
++ptp->undersize_sync_windows;
} else if (corrected < ptp->min_synchronisation_ns) {
++ptp->oversize_sync_windows;
} else {
ngood++; ngood++;
last_good = i; last_good = i;
} }
...@@ -741,6 +837,11 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings) ...@@ -741,6 +837,11 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
loops++; loops++;
} }
if (loops <= 1)
++ptp->fast_syncs;
if (!time_before(jiffies, timeout))
++ptp->sync_timeouts;
if (ACCESS_ONCE(*start)) if (ACCESS_ONCE(*start))
efx_ptp_send_times(efx, &last_time); efx_ptp_send_times(efx, &last_time);
...@@ -749,9 +850,20 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings) ...@@ -749,9 +850,20 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
MC_CMD_PTP_IN_SYNCHRONIZE_LEN, MC_CMD_PTP_IN_SYNCHRONIZE_LEN,
synch_buf, sizeof(synch_buf), synch_buf, sizeof(synch_buf),
&response_length); &response_length);
if (rc == 0) if (rc == 0) {
rc = efx_ptp_process_times(efx, synch_buf, response_length, rc = efx_ptp_process_times(efx, synch_buf, response_length,
&last_time); &last_time);
if (rc == 0)
++ptp->good_syncs;
else
++ptp->no_time_syncs;
}
/* Increment the bad syncs counter if the synchronize fails, whatever
* the reason.
*/
if (rc != 0)
++ptp->bad_syncs;
return rc; return rc;
} }
...@@ -907,9 +1019,7 @@ static void efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q) ...@@ -907,9 +1019,7 @@ static void efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
__skb_queue_tail(q, skb); __skb_queue_tail(q, skb);
} else if (time_after(jiffies, match->expiry)) { } else if (time_after(jiffies, match->expiry)) {
match->state = PTP_PACKET_STATE_TIMED_OUT; match->state = PTP_PACKET_STATE_TIMED_OUT;
if (net_ratelimit()) ++ptp->rx_no_timestamp;
netif_warn(efx, rx_err, efx->net_dev,
"PTP packet - no timestamp seen\n");
__skb_queue_tail(q, skb); __skb_queue_tail(q, skb);
} else { } else {
/* Replace unprocessed entry and stop */ /* Replace unprocessed entry and stop */
......
...@@ -321,6 +321,31 @@ static int siena_probe_nic(struct efx_nic *efx) ...@@ -321,6 +321,31 @@ static int siena_probe_nic(struct efx_nic *efx)
return rc; return rc;
} }
static void siena_rx_push_rss_config(struct efx_nic *efx)
{
efx_oword_t temp;
/* Set hash key for IPv4 */
memcpy(&temp, efx->rx_hash_key, sizeof(temp));
efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
/* Enable IPv6 RSS */
BUILD_BUG_ON(sizeof(efx->rx_hash_key) <
2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 ||
FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0);
memcpy(&temp, efx->rx_hash_key, sizeof(temp));
efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1);
memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp));
efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2);
EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1,
FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1);
memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp),
FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
efx_farch_rx_push_indir_table(efx);
}
/* This call performs hardware-specific global initialisation, such as /* This call performs hardware-specific global initialisation, such as
* defining the descriptor cache sizes and number of RSS channels. * defining the descriptor cache sizes and number of RSS channels.
* It does not set up any buffers, descriptor rings or event queues. * It does not set up any buffers, descriptor rings or event queues.
...@@ -361,23 +386,7 @@ static int siena_init_nic(struct efx_nic *efx) ...@@ -361,23 +386,7 @@ static int siena_init_nic(struct efx_nic *efx)
EFX_RX_USR_BUF_SIZE >> 5); EFX_RX_USR_BUF_SIZE >> 5);
efx_writeo(efx, &temp, FR_AZ_RX_CFG); efx_writeo(efx, &temp, FR_AZ_RX_CFG);
/* Set hash key for IPv4 */ siena_rx_push_rss_config(efx);
memcpy(&temp, efx->rx_hash_key, sizeof(temp));
efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
/* Enable IPv6 RSS */
BUILD_BUG_ON(sizeof(efx->rx_hash_key) <
2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 ||
FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0);
memcpy(&temp, efx->rx_hash_key, sizeof(temp));
efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1);
memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp));
efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2);
EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1,
FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1);
memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp),
FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
/* Enable event logging */ /* Enable event logging */
rc = efx_mcdi_log_ctrl(efx, true, false, 0); rc = efx_mcdi_log_ctrl(efx, true, false, 0);
...@@ -940,7 +949,7 @@ const struct efx_nic_type siena_a0_nic_type = { ...@@ -940,7 +949,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.tx_init = efx_farch_tx_init, .tx_init = efx_farch_tx_init,
.tx_remove = efx_farch_tx_remove, .tx_remove = efx_farch_tx_remove,
.tx_write = efx_farch_tx_write, .tx_write = efx_farch_tx_write,
.rx_push_indir_table = efx_farch_rx_push_indir_table, .rx_push_rss_config = siena_rx_push_rss_config,
.rx_probe = efx_farch_rx_probe, .rx_probe = efx_farch_rx_probe,
.rx_init = efx_farch_rx_init, .rx_init = efx_farch_rx_init,
.rx_remove = efx_farch_rx_remove, .rx_remove = efx_farch_rx_remove,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment