Commit 7112f8b0 authored by David S. Miller's avatar David S. Miller

Merge branch 'sfc-even-more-code-refactoring'

Alex Maftei says:

====================
sfc: even more code refactoring

Splitting even more of the driver code into different files, which
will later be used in another driver for a new product.

This is a continuation to my previous patch series, and the one
before it.
There will be a stand-alone patch as well after this - after which
the refactoring will be concluded, for now.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 9f120e76 f7226e0f
# SPDX-License-Identifier: GPL-2.0
sfc-y += efx.o efx_common.o efx_channels.o nic.o \
farch.o siena.o ef10.o \
tx.o tx_common.o rx.o rx_common.o \
selftest.o ethtool.o ptp.o tx_tso.o \
tx.o tx_common.o tx_tso.o rx.o rx_common.o \
selftest.o ethtool.o ethtool_common.o ptp.o \
mcdi.o mcdi_port.o mcdi_port_common.o \
mcdi_functions.o mcdi_mon.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
......
......@@ -189,24 +189,6 @@ static bool efx_ef10_is_vf(struct efx_nic *efx)
return efx->type->is_vf;
}
static int efx_ef10_get_pf_index(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
struct efx_ef10_nic_data *nic_data = efx->nic_data;
size_t outlen;
int rc;
rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
sizeof(outbuf), &outlen);
if (rc)
return rc;
if (outlen < sizeof(outbuf))
return -EIO;
nic_data->pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF);
return 0;
}
#ifdef CONFIG_SFC_SRIOV
static int efx_ef10_get_vf_index(struct efx_nic *efx)
{
......@@ -277,24 +259,9 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
u8 vi_window_mode = MCDI_BYTE(outbuf,
GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE);
switch (vi_window_mode) {
case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K:
efx->vi_stride = 8192;
break;
case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K:
efx->vi_stride = 16384;
break;
case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K:
efx->vi_stride = 65536;
break;
default:
netif_err(efx, probe, efx->net_dev,
"Unrecognised VI window mode %d\n",
vi_window_mode);
return -EIO;
}
netif_dbg(efx, probe, efx->net_dev, "vi_stride = %u\n",
efx->vi_stride);
rc = efx_mcdi_window_mode_to_stride(efx, vi_window_mode);
if (rc)
return rc;
} else {
/* keep default VI stride */
netif_dbg(efx, probe, efx->net_dev,
......@@ -693,7 +660,7 @@ static int efx_ef10_probe(struct efx_nic *efx)
}
nic_data->warm_boot_count = rc;
efx->rss_context.context_id = EFX_EF10_RSS_CONTEXT_INVALID;
efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
......@@ -729,7 +696,7 @@ static int efx_ef10_probe(struct efx_nic *efx)
if (rc)
goto fail4;
rc = efx_ef10_get_pf_index(efx);
rc = efx_get_pf_index(efx, &nic_data->pf_index);
if (rc)
goto fail5;
......@@ -1473,7 +1440,7 @@ static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
nic_data->must_restore_filters = true;
nic_data->must_restore_piobufs = true;
efx_ef10_forget_old_piobufs(efx);
efx->rss_context.context_id = EFX_EF10_RSS_CONTEXT_INVALID;
efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
/* Driver-created vswitches and vports must be re-created */
nic_data->must_probe_vswitching = true;
......@@ -2631,7 +2598,7 @@ static int efx_ef10_alloc_rss_context(struct efx_nic *efx, bool exclusive,
EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE);
if (!exclusive && rss_spread == 1) {
ctx->context_id = EFX_EF10_RSS_CONTEXT_INVALID;
ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
if (context_size)
*context_size = 1;
return 0;
......@@ -2718,11 +2685,11 @@ static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
{
int rc;
if (efx->rss_context.context_id != EFX_EF10_RSS_CONTEXT_INVALID) {
if (efx->rss_context.context_id != EFX_MCDI_RSS_CONTEXT_INVALID) {
rc = efx_ef10_free_rss_context(efx, efx->rss_context.context_id);
WARN_ON(rc != 0);
}
efx->rss_context.context_id = EFX_EF10_RSS_CONTEXT_INVALID;
efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
}
static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx,
......@@ -2748,7 +2715,7 @@ static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx,
struct efx_ef10_nic_data *nic_data = efx->nic_data;
int rc;
if (efx->rss_context.context_id == EFX_EF10_RSS_CONTEXT_INVALID ||
if (efx->rss_context.context_id == EFX_MCDI_RSS_CONTEXT_INVALID ||
!nic_data->rx_rss_context_exclusive) {
rc = efx_ef10_alloc_rss_context(efx, true, &efx->rss_context,
NULL);
......@@ -2764,7 +2731,7 @@ static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx,
goto fail2;
if (efx->rss_context.context_id != old_rx_rss_context &&
old_rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
old_rx_rss_context != EFX_MCDI_RSS_CONTEXT_INVALID)
WARN_ON(efx_ef10_free_rss_context(efx, old_rx_rss_context) != 0);
nic_data->rx_rss_context_exclusive = true;
if (rx_indir_table != efx->rss_context.rx_indir_table)
......@@ -2795,7 +2762,7 @@ static int efx_ef10_rx_push_rss_context_config(struct efx_nic *efx,
WARN_ON(!mutex_is_locked(&efx->rss_lock));
if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) {
if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID) {
rc = efx_ef10_alloc_rss_context(efx, true, ctx, NULL);
if (rc)
return rc;
......@@ -2830,7 +2797,7 @@ static int efx_ef10_rx_pull_rss_context_config(struct efx_nic *efx,
BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN !=
MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN);
if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID)
if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID)
return -ENOENT;
MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID,
......@@ -2891,7 +2858,7 @@ static void efx_ef10_rx_restore_rss_contexts(struct efx_nic *efx)
list_for_each_entry(ctx, &efx->rss_context.list, list) {
/* previous NIC RSS context is gone */
ctx->context_id = EFX_EF10_RSS_CONTEXT_INVALID;
ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
/* so try to allocate a new one */
rc = efx_ef10_rx_push_rss_context_config(efx, ctx,
ctx->rx_indir_table,
......@@ -2962,7 +2929,7 @@ static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
{
if (user)
return -EOPNOTSUPP;
if (efx->rss_context.context_id != EFX_EF10_RSS_CONTEXT_INVALID)
if (efx->rss_context.context_id != EFX_MCDI_RSS_CONTEXT_INVALID)
return 0;
return efx_ef10_rx_push_shared_rss_config(efx, NULL);
}
......@@ -3883,7 +3850,7 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx,
*/
if (WARN_ON_ONCE(!ctx))
flags &= ~EFX_FILTER_FLAG_RX_RSS;
else if (WARN_ON_ONCE(ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID))
else if (WARN_ON_ONCE(ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID))
flags &= ~EFX_FILTER_FLAG_RX_RSS;
}
......@@ -4062,7 +4029,7 @@ static s32 efx_ef10_filter_insert_locked(struct efx_nic *efx,
rc = -ENOENT;
goto out_unlock;
}
if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) {
if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID) {
rc = -EOPNOTSUPP;
goto out_unlock;
}
......@@ -4803,7 +4770,7 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx)
invalid_filters++;
goto not_restored;
}
if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) {
if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID) {
netif_warn(efx, drv, efx->net_dev,
"Warning: unable to restore a filter with RSS context %u as it was not created.\n",
spec->rss_context);
......
......@@ -27,6 +27,7 @@
#include "efx_channels.h"
#include "rx_common.h"
#include "tx_common.h"
#include "rx_common.h"
#include "nic.h"
#include "io.h"
#include "selftest.h"
......@@ -311,16 +312,6 @@ static void efx_dissociate(struct efx_nic *efx)
}
}
void efx_set_default_rx_indir_table(struct efx_nic *efx,
struct efx_rss_context *ctx)
{
size_t i;
for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
ctx->rx_indir_table[i] =
ethtool_rxfh_indir_default(i, efx->rss_spread);
}
static int efx_probe_nic(struct efx_nic *efx)
{
int rc;
......@@ -393,70 +384,6 @@ static void efx_remove_nic(struct efx_nic *efx)
efx->type->remove(efx);
}
static int efx_probe_filters(struct efx_nic *efx)
{
int rc;
init_rwsem(&efx->filter_sem);
mutex_lock(&efx->mac_lock);
down_write(&efx->filter_sem);
rc = efx->type->filter_table_probe(efx);
if (rc)
goto out_unlock;
#ifdef CONFIG_RFS_ACCEL
if (efx->type->offload_features & NETIF_F_NTUPLE) {
struct efx_channel *channel;
int i, success = 1;
efx_for_each_channel(channel, efx) {
channel->rps_flow_id =
kcalloc(efx->type->max_rx_ip_filters,
sizeof(*channel->rps_flow_id),
GFP_KERNEL);
if (!channel->rps_flow_id)
success = 0;
else
for (i = 0;
i < efx->type->max_rx_ip_filters;
++i)
channel->rps_flow_id[i] =
RPS_FLOW_ID_INVALID;
channel->rfs_expire_index = 0;
channel->rfs_filter_count = 0;
}
if (!success) {
efx_for_each_channel(channel, efx)
kfree(channel->rps_flow_id);
efx->type->filter_table_remove(efx);
rc = -ENOMEM;
goto out_unlock;
}
}
#endif
out_unlock:
up_write(&efx->filter_sem);
mutex_unlock(&efx->mac_lock);
return rc;
}
static void efx_remove_filters(struct efx_nic *efx)
{
#ifdef CONFIG_RFS_ACCEL
struct efx_channel *channel;
efx_for_each_channel(channel, efx) {
cancel_delayed_work_sync(&channel->filter_work);
kfree(channel->rps_flow_id);
}
#endif
down_write(&efx->filter_sem);
efx->type->filter_table_remove(efx);
up_write(&efx->filter_sem);
}
/**************************************************************************
*
* NIC startup/shutdown
......@@ -692,17 +619,6 @@ int efx_net_stop(struct net_device *net_dev)
return 0;
}
/* Context: process, dev_base_lock or RTNL held, non-blocking. */
static void efx_net_stats(struct net_device *net_dev,
struct rtnl_link_stats64 *stats)
{
struct efx_nic *efx = netdev_priv(net_dev);
spin_lock_bh(&efx->stats_lock);
efx->type->update_stats(efx, NULL, stats);
spin_unlock_bh(&efx->stats_lock);
}
/* Context: netif_tx_lock held, BHs disabled. */
static void efx_watchdog(struct net_device *net_dev, unsigned int txqueue)
{
......@@ -715,51 +631,6 @@ static void efx_watchdog(struct net_device *net_dev, unsigned int txqueue)
efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
}
static unsigned int efx_xdp_max_mtu(struct efx_nic *efx)
{
/* The maximum MTU that we can fit in a single page, allowing for
* framing, overhead and XDP headroom.
*/
int overhead = EFX_MAX_FRAME_LEN(0) + sizeof(struct efx_rx_page_state) +
efx->rx_prefix_size + efx->type->rx_buffer_padding +
efx->rx_ip_align + XDP_PACKET_HEADROOM;
return PAGE_SIZE - overhead;
}
/* Context: process, rtnl_lock() held. */
static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
{
struct efx_nic *efx = netdev_priv(net_dev);
int rc;
rc = efx_check_disabled(efx);
if (rc)
return rc;
if (rtnl_dereference(efx->xdp_prog) &&
new_mtu > efx_xdp_max_mtu(efx)) {
netif_err(efx, drv, efx->net_dev,
"Requested MTU of %d too big for XDP (max: %d)\n",
new_mtu, efx_xdp_max_mtu(efx));
return -EINVAL;
}
netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
efx_device_detach_sync(efx);
efx_stop_all(efx);
mutex_lock(&efx->mac_lock);
net_dev->mtu = new_mtu;
efx_mac_reconfigure(efx);
mutex_unlock(&efx->mac_lock);
efx_start_all(efx);
efx_device_attach_if_not_resetting(efx);
return 0;
}
static int efx_set_mac_address(struct net_device *net_dev, void *data)
{
struct efx_nic *efx = netdev_priv(net_dev);
......@@ -1174,197 +1045,6 @@ void efx_update_sw_stats(struct efx_nic *efx, u64 *stats)
stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops);
}
bool efx_filter_spec_equal(const struct efx_filter_spec *left,
const struct efx_filter_spec *right)
{
if ((left->match_flags ^ right->match_flags) |
((left->flags ^ right->flags) &
(EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
return false;
return memcmp(&left->outer_vid, &right->outer_vid,
sizeof(struct efx_filter_spec) -
offsetof(struct efx_filter_spec, outer_vid)) == 0;
}
u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
{
BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
return jhash2((const u32 *)&spec->outer_vid,
(sizeof(struct efx_filter_spec) -
offsetof(struct efx_filter_spec, outer_vid)) / 4,
0);
}
#ifdef CONFIG_RFS_ACCEL
bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
bool *force)
{
if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
/* ARFS is currently updating this entry, leave it */
return false;
}
if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) {
/* ARFS tried and failed to update this, so it's probably out
* of date. Remove the filter and the ARFS rule entry.
*/
rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
*force = true;
return true;
} else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
/* ARFS has moved on, so old filter is not needed. Since we did
* not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
* not be removed by efx_rps_hash_del() subsequently.
*/
*force = true;
return true;
}
/* Remove it iff ARFS wants to. */
return true;
}
static
struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
const struct efx_filter_spec *spec)
{
u32 hash = efx_filter_spec_hash(spec);
lockdep_assert_held(&efx->rps_hash_lock);
if (!efx->rps_hash_table)
return NULL;
return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
}
struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
const struct efx_filter_spec *spec)
{
struct efx_arfs_rule *rule;
struct hlist_head *head;
struct hlist_node *node;
head = efx_rps_hash_bucket(efx, spec);
if (!head)
return NULL;
hlist_for_each(node, head) {
rule = container_of(node, struct efx_arfs_rule, node);
if (efx_filter_spec_equal(spec, &rule->spec))
return rule;
}
return NULL;
}
struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
const struct efx_filter_spec *spec,
bool *new)
{
struct efx_arfs_rule *rule;
struct hlist_head *head;
struct hlist_node *node;
head = efx_rps_hash_bucket(efx, spec);
if (!head)
return NULL;
hlist_for_each(node, head) {
rule = container_of(node, struct efx_arfs_rule, node);
if (efx_filter_spec_equal(spec, &rule->spec)) {
*new = false;
return rule;
}
}
rule = kmalloc(sizeof(*rule), GFP_ATOMIC);
*new = true;
if (rule) {
memcpy(&rule->spec, spec, sizeof(rule->spec));
hlist_add_head(&rule->node, head);
}
return rule;
}
void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
{
struct efx_arfs_rule *rule;
struct hlist_head *head;
struct hlist_node *node;
head = efx_rps_hash_bucket(efx, spec);
if (WARN_ON(!head))
return;
hlist_for_each(node, head) {
rule = container_of(node, struct efx_arfs_rule, node);
if (efx_filter_spec_equal(spec, &rule->spec)) {
/* Someone already reused the entry. We know that if
* this check doesn't fire (i.e. filter_id == REMOVING)
* then the REMOVING mark was put there by our caller,
* because caller is holding a lock on filter table and
* only holders of that lock set REMOVING.
*/
if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING)
return;
hlist_del(node);
kfree(rule);
return;
}
}
/* We didn't find it. */
WARN_ON(1);
}
#endif
/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because
* (a) this is an infrequent control-plane operation and (b) n is small (max 64)
*/
struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx)
{
struct list_head *head = &efx->rss_context.list;
struct efx_rss_context *ctx, *new;
u32 id = 1; /* Don't use zero, that refers to the master RSS context */
WARN_ON(!mutex_is_locked(&efx->rss_lock));
/* Search for first gap in the numbering */
list_for_each_entry(ctx, head, list) {
if (ctx->user_id != id)
break;
id++;
/* Check for wrap. If this happens, we have nearly 2^32
* allocated RSS contexts, which seems unlikely.
*/
if (WARN_ON_ONCE(!id))
return NULL;
}
/* Create the new entry */
new = kmalloc(sizeof(struct efx_rss_context), GFP_KERNEL);
if (!new)
return NULL;
new->context_id = EFX_EF10_RSS_CONTEXT_INVALID;
new->rx_hash_udp_4tuple = false;
/* Insert the new entry into the gap */
new->user_id = id;
list_add_tail(&new->list, &ctx->list);
return new;
}
struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id)
{
struct list_head *head = &efx->rss_context.list;
struct efx_rss_context *ctx;
WARN_ON(!mutex_is_locked(&efx->rss_lock));
list_for_each_entry(ctx, head, list)
if (ctx->user_id == id)
return ctx;
return NULL;
}
void efx_free_rss_context_entry(struct efx_rss_context *ctx)
{
list_del(&ctx->list);
kfree(ctx);
}
/**************************************************************************
*
* PCI interface
......
......@@ -26,8 +26,6 @@ extern unsigned int efx_piobuf_size;
extern bool efx_separate_tx_channels;
/* RX */
void efx_set_default_rx_indir_table(struct efx_nic *efx,
struct efx_rss_context *ctx);
void __efx_rx_packet(struct efx_channel *channel);
void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
unsigned int n_frags, unsigned int len, u16 flags);
......@@ -37,9 +35,6 @@ static inline void efx_rx_flush_packet(struct efx_channel *channel)
__efx_rx_packet(channel);
}
void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue);
struct page *efx_reuse_page(struct efx_rx_queue *rx_queue);
#define EFX_MAX_DMAQ_SIZE 4096UL
#define EFX_DEFAULT_DMAQ_SIZE 1024UL
#define EFX_MIN_DMAQ_SIZE 512UL
......@@ -174,36 +169,11 @@ static inline void efx_filter_rfs_expire(struct work_struct *data)
static inline void efx_filter_rfs_expire(struct work_struct *data) {}
#define efx_filter_rfs_enabled() 0
#endif
bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
bool efx_filter_spec_equal(const struct efx_filter_spec *left,
const struct efx_filter_spec *right);
u32 efx_filter_spec_hash(const struct efx_filter_spec *spec);
#ifdef CONFIG_RFS_ACCEL
bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
bool *force);
struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
const struct efx_filter_spec *spec);
/* @new is written to indicate if entry was newly added (true) or if an old
* entry was found and returned (false).
*/
struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
const struct efx_filter_spec *spec,
bool *new);
void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec);
#endif
/* RSS contexts */
struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx);
struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id);
void efx_free_rss_context_entry(struct efx_rss_context *ctx);
static inline bool efx_rss_active(struct efx_rss_context *ctx)
{
return ctx->context_id != EFX_EF10_RSS_CONTEXT_INVALID;
return ctx->context_id != EFX_MCDI_RSS_CONTEXT_INVALID;
}
/* Ethtool support */
......
......@@ -197,6 +197,51 @@ void efx_link_status_changed(struct efx_nic *efx)
netif_info(efx, link, efx->net_dev, "link down\n");
}
unsigned int efx_xdp_max_mtu(struct efx_nic *efx)
{
/* The maximum MTU that we can fit in a single page, allowing for
* framing, overhead and XDP headroom.
*/
int overhead = EFX_MAX_FRAME_LEN(0) + sizeof(struct efx_rx_page_state) +
efx->rx_prefix_size + efx->type->rx_buffer_padding +
efx->rx_ip_align + XDP_PACKET_HEADROOM;
return PAGE_SIZE - overhead;
}
/* Context: process, rtnl_lock() held. */
int efx_change_mtu(struct net_device *net_dev, int new_mtu)
{
struct efx_nic *efx = netdev_priv(net_dev);
int rc;
rc = efx_check_disabled(efx);
if (rc)
return rc;
if (rtnl_dereference(efx->xdp_prog) &&
new_mtu > efx_xdp_max_mtu(efx)) {
netif_err(efx, drv, efx->net_dev,
"Requested MTU of %d too big for XDP (max: %d)\n",
new_mtu, efx_xdp_max_mtu(efx));
return -EINVAL;
}
netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
efx_device_detach_sync(efx);
efx_stop_all(efx);
mutex_lock(&efx->mac_lock);
net_dev->mtu = new_mtu;
efx_mac_reconfigure(efx);
mutex_unlock(&efx->mac_lock);
efx_start_all(efx);
efx_device_attach_if_not_resetting(efx);
return 0;
}
/**************************************************************************
*
* Hardware monitor
......@@ -454,6 +499,16 @@ void efx_stop_all(struct efx_nic *efx)
efx_stop_datapath(efx);
}
/* Context: process, dev_base_lock or RTNL held, non-blocking. */
void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats)
{
struct efx_nic *efx = netdev_priv(net_dev);
spin_lock_bh(&efx->stats_lock);
efx->type->update_stats(efx, NULL, stats);
spin_unlock_bh(&efx->stats_lock);
}
/* Push loopback/power/transmit disable settings to the PHY, and reconfigure
* the MAC appropriately. All other PHY configuration changes are pushed
* through phy_op->set_settings(), and pushed asynchronously to the MAC
......@@ -840,7 +895,7 @@ int efx_init_struct(struct efx_nic *efx,
#endif
INIT_WORK(&efx->reset_work, efx_reset_work);
INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
efx_selftest_async_init(efx);
efx->pci_dev = pci_dev;
efx->msg_enable = debug;
efx->state = STATE_UNINIT;
......
......@@ -21,6 +21,8 @@ void efx_fini_struct(struct efx_nic *efx);
void efx_start_all(struct efx_nic *efx);
void efx_stop_all(struct efx_nic *efx);
void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats);
int efx_create_reset_workqueue(void);
void efx_queue_reset_work(struct efx_nic *efx);
void efx_flush_reset_workqueue(struct efx_nic *efx);
......@@ -65,5 +67,7 @@ static inline void efx_fini_mcdi_logging(struct efx_nic *efx) {}
void efx_mac_reconfigure(struct efx_nic *efx);
void efx_link_status_changed(struct efx_nic *efx);
unsigned int efx_xdp_max_mtu(struct efx_nic *efx);
int efx_change_mtu(struct net_device *net_dev, int new_mtu);
#endif
This diff is collapsed.
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2019 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EFX_ETHTOOL_COMMON_H
#define EFX_ETHTOOL_COMMON_H
void efx_ethtool_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *info);
u32 efx_ethtool_get_msglevel(struct net_device *net_dev);
void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable);
void efx_ethtool_get_pauseparam(struct net_device *net_dev,
struct ethtool_pauseparam *pause);
int efx_ethtool_fill_self_tests(struct efx_nic *efx,
struct efx_self_tests *tests,
u8 *strings, u64 *data);
int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set);
void efx_ethtool_get_strings(struct net_device *net_dev, u32 string_set,
u8 *strings);
void efx_ethtool_get_stats(struct net_device *net_dev,
struct ethtool_stats *stats __attribute__ ((unused)),
u64 *data);
#endif
......@@ -348,8 +348,6 @@ void efx_mcdi_port_remove(struct efx_nic *efx);
int efx_mcdi_port_reconfigure(struct efx_nic *efx);
u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
int efx_mcdi_set_mac(struct efx_nic *efx);
#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
void efx_mcdi_mac_start_stats(struct efx_nic *efx);
void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
void efx_mcdi_mac_pull_stats(struct efx_nic *efx);
......
......@@ -347,3 +347,43 @@ void efx_mcdi_rx_fini(struct efx_rx_queue *rx_queue)
efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN,
outbuf, outlen, rc);
}
int efx_mcdi_window_mode_to_stride(struct efx_nic *efx, u8 vi_window_mode)
{
switch (vi_window_mode) {
case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K:
efx->vi_stride = 8192;
break;
case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K:
efx->vi_stride = 16384;
break;
case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K:
efx->vi_stride = 65536;
break;
default:
netif_err(efx, probe, efx->net_dev,
"Unrecognised VI window mode %d\n",
vi_window_mode);
return -EIO;
}
netif_dbg(efx, probe, efx->net_dev, "vi_stride = %u\n",
efx->vi_stride);
return 0;
}
int efx_get_pf_index(struct efx_nic *efx, unsigned int *pf_index)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
size_t outlen;
int rc;
rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
sizeof(outbuf), &outlen);
if (rc)
return rc;
if (outlen < sizeof(outbuf))
return -EIO;
*pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF);
return 0;
}
......@@ -26,5 +26,7 @@ int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue);
void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue);
void efx_mcdi_rx_remove(struct efx_rx_queue *rx_queue);
void efx_mcdi_rx_fini(struct efx_rx_queue *rx_queue);
int efx_mcdi_window_mode_to_stride(struct efx_nic *efx, u8 vi_window_mode);
int efx_get_pf_index(struct efx_nic *efx, unsigned int *pf_index);
#endif
......@@ -675,84 +675,6 @@ u32 efx_mcdi_phy_get_caps(struct efx_nic *efx)
return phy_data->supported_cap;
}
static unsigned int efx_mcdi_event_link_speed[] = {
[MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
[MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
[MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
[MCDI_EVENT_LINKCHANGE_SPEED_40G] = 40000,
[MCDI_EVENT_LINKCHANGE_SPEED_25G] = 25000,
[MCDI_EVENT_LINKCHANGE_SPEED_50G] = 50000,
[MCDI_EVENT_LINKCHANGE_SPEED_100G] = 100000,
};
void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
{
u32 flags, fcntl, speed, lpa;
speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED);
EFX_WARN_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed));
speed = efx_mcdi_event_link_speed[speed];
flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS);
fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL);
lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP);
/* efx->link_state is only modified by efx_mcdi_phy_get_link(),
* which is only run after flushing the event queues. Therefore, it
* is safe to modify the link state outside of the mac_lock here.
*/
efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl);
efx_mcdi_phy_check_fcntl(efx, lpa);
efx_link_status_changed(efx);
}
int efx_mcdi_set_mac(struct efx_nic *efx)
{
u32 fcntl;
MCDI_DECLARE_BUF(cmdbytes, MC_CMD_SET_MAC_IN_LEN);
BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0);
/* This has no effect on EF10 */
ether_addr_copy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR),
efx->net_dev->dev_addr);
MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU,
EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0);
/* Set simple MAC filter for Siena */
MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_REJECT,
SET_MAC_IN_REJECT_UNCST, efx->unicast_filter);
MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_FLAGS,
SET_MAC_IN_FLAG_INCLUDE_FCS,
!!(efx->net_dev->features & NETIF_F_RXFCS));
switch (efx->wanted_fc) {
case EFX_FC_RX | EFX_FC_TX:
fcntl = MC_CMD_FCNTL_BIDIR;
break;
case EFX_FC_RX:
fcntl = MC_CMD_FCNTL_RESPOND;
break;
default:
fcntl = MC_CMD_FCNTL_OFF;
break;
}
if (efx->wanted_fc & EFX_FC_AUTO)
fcntl = MC_CMD_FCNTL_AUTO;
if (efx->fc_disable)
fcntl = MC_CMD_FCNTL_OFF;
MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl);
return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes),
NULL, 0, NULL);
}
bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
......
......@@ -9,6 +9,7 @@
*/
#include "mcdi_port_common.h"
#include "efx_common.h"
int efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg)
{
......@@ -474,6 +475,51 @@ int efx_mcdi_phy_test_alive(struct efx_nic *efx)
return 0;
}
int efx_mcdi_set_mac(struct efx_nic *efx)
{
u32 fcntl;
MCDI_DECLARE_BUF(cmdbytes, MC_CMD_SET_MAC_IN_LEN);
BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0);
/* This has no effect on EF10 */
ether_addr_copy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR),
efx->net_dev->dev_addr);
MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU,
EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0);
/* Set simple MAC filter for Siena */
MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_REJECT,
SET_MAC_IN_REJECT_UNCST, efx->unicast_filter);
MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_FLAGS,
SET_MAC_IN_FLAG_INCLUDE_FCS,
!!(efx->net_dev->features & NETIF_F_RXFCS));
switch (efx->wanted_fc) {
case EFX_FC_RX | EFX_FC_TX:
fcntl = MC_CMD_FCNTL_BIDIR;
break;
case EFX_FC_RX:
fcntl = MC_CMD_FCNTL_RESPOND;
break;
default:
fcntl = MC_CMD_FCNTL_OFF;
break;
}
if (efx->wanted_fc & EFX_FC_AUTO)
fcntl = MC_CMD_FCNTL_AUTO;
if (efx->fc_disable)
fcntl = MC_CMD_FCNTL_OFF;
MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl);
return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes),
NULL, 0, NULL);
}
/* Get physical port number (EF10 only; on Siena it is same as PF number) */
int efx_mcdi_port_get_number(struct efx_nic *efx)
{
......@@ -487,3 +533,36 @@ int efx_mcdi_port_get_number(struct efx_nic *efx)
return MCDI_DWORD(outbuf, GET_PORT_ASSIGNMENT_OUT_PORT);
}
static unsigned int efx_mcdi_event_link_speed[] = {
[MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
[MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
[MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
[MCDI_EVENT_LINKCHANGE_SPEED_40G] = 40000,
[MCDI_EVENT_LINKCHANGE_SPEED_25G] = 25000,
[MCDI_EVENT_LINKCHANGE_SPEED_50G] = 50000,
[MCDI_EVENT_LINKCHANGE_SPEED_100G] = 100000,
};
void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
{
u32 flags, fcntl, speed, lpa;
speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED);
EFX_WARN_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed));
speed = efx_mcdi_event_link_speed[speed];
flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS);
fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL);
lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP);
/* efx->link_state is only modified by efx_mcdi_phy_get_link(),
* which is only run after flushing the event queues. Therefore, it
* is safe to modify the link state outside of the mac_lock here.
*/
efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl);
efx_mcdi_phy_check_fcntl(efx, lpa);
efx_link_status_changed(efx);
}
......@@ -28,6 +28,8 @@ struct efx_mcdi_phy_data {
u32 forced_cap;
};
#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
int efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg);
void efx_link_set_advertising(struct efx_nic *efx,
const unsigned long *advertising);
......@@ -48,6 +50,8 @@ bool efx_mcdi_phy_poll(struct efx_nic *efx);
int efx_mcdi_phy_get_fecparam(struct efx_nic *efx,
struct ethtool_fecparam *fec);
int efx_mcdi_phy_test_alive(struct efx_nic *efx);
int efx_mcdi_set_mac(struct efx_nic *efx);
int efx_mcdi_port_get_number(struct efx_nic *efx);
void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
#endif
......@@ -744,13 +744,13 @@ union efx_multicast_hash {
struct vfdi_status;
/* The reserved RSS context value */
#define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff
#define EFX_MCDI_RSS_CONTEXT_INVALID 0xffffffff
/**
* struct efx_rss_context - A user-defined RSS context for filtering
* @list: node of linked list on which this struct is stored
* @context_id: the RSS_CONTEXT_ID returned by MC firmware, or
* %EFX_EF10_RSS_CONTEXT_INVALID if this context is not present on the NIC.
* For Siena, 0 if RSS is active, else %EFX_EF10_RSS_CONTEXT_INVALID.
* %EFX_MCDI_RSS_CONTEXT_INVALID if this context is not present on the NIC.
* For Siena, 0 if RSS is active, else %EFX_MCDI_RSS_CONTEXT_INVALID.
* @user_id: the rss_context ID exposed to userspace over ethtool.
* @rx_hash_udp_4tuple: UDP 4-tuple hashing enabled
* @rx_hash_key: Toeplitz hash key for this RSS context
......
......@@ -33,13 +33,6 @@
/* Maximum rx prefix used by any architecture. */
#define EFX_MAX_RX_PREFIX_SIZE 16
/* Number of RX buffers to recycle pages for. When creating the RX page recycle
* ring, this number is divided by the number of buffers per page to calculate
* the number of pages to store in the RX page recycle ring.
*/
#define EFX_RECYCLE_RING_SIZE_IOMMU 4096
#define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
/* Size of buffer allocated for skb header area. */
#define EFX_SKB_HEADERS 128u
......@@ -47,24 +40,6 @@
#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
EFX_RX_USR_BUF_SIZE)
static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
{
return page_address(buf->page) + buf->page_offset;
}
static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
#else
const u8 *data = eh + efx->rx_packet_hash_offset;
return (u32)data[0] |
(u32)data[1] << 8 |
(u32)data[2] << 16 |
(u32)data[3] << 24;
#endif
}
static inline void efx_sync_rx_buffer(struct efx_nic *efx,
struct efx_rx_buffer *rx_buf,
unsigned int len)
......@@ -73,100 +48,6 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx,
DMA_FROM_DEVICE);
}
/* Check the RX page recycle ring for a page that can be reused. */
struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
struct page *page;
struct efx_rx_page_state *state;
unsigned index;
index = rx_queue->page_remove & rx_queue->page_ptr_mask;
page = rx_queue->page_ring[index];
if (page == NULL)
return NULL;
rx_queue->page_ring[index] = NULL;
/* page_remove cannot exceed page_add. */
if (rx_queue->page_remove != rx_queue->page_add)
++rx_queue->page_remove;
/* If page_count is 1 then we hold the only reference to this page. */
if (page_count(page) == 1) {
++rx_queue->page_recycle_count;
return page;
} else {
state = page_address(page);
dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
PAGE_SIZE << efx->rx_buffer_order,
DMA_FROM_DEVICE);
put_page(page);
++rx_queue->page_recycle_failed;
}
return NULL;
}
/* Attempt to recycle the page if there is an RX recycle ring; the page can
* only be added if this is the final RX buffer, to prevent pages being used in
* the descriptor ring and appearing in the recycle ring simultaneously.
*/
static void efx_recycle_rx_page(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf)
{
struct page *page = rx_buf->page;
struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
struct efx_nic *efx = rx_queue->efx;
unsigned index;
/* Only recycle the page after processing the final buffer. */
if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
return;
index = rx_queue->page_add & rx_queue->page_ptr_mask;
if (rx_queue->page_ring[index] == NULL) {
unsigned read_index = rx_queue->page_remove &
rx_queue->page_ptr_mask;
/* The next slot in the recycle ring is available, but
* increment page_remove if the read pointer currently
* points here.
*/
if (read_index == index)
++rx_queue->page_remove;
rx_queue->page_ring[index] = page;
++rx_queue->page_add;
return;
}
++rx_queue->page_recycle_full;
efx_unmap_rx_buffer(efx, rx_buf);
put_page(rx_buf->page);
}
/* Recycle the pages that are used by buffers that have just been received. */
static void efx_recycle_rx_pages(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf,
unsigned int n_frags)
{
struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
do {
efx_recycle_rx_page(channel, rx_buf);
rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
} while (--n_frags);
}
static void efx_discard_rx_packet(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf,
unsigned int n_frags)
{
struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
efx_recycle_rx_pages(channel, rx_buf, n_frags);
efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
}
static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
struct efx_rx_buffer *rx_buf,
int len)
......@@ -190,53 +71,6 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
}
/* Pass a received packet up through GRO. GRO can handle pages
* regardless of checksum state and skbs with a good checksum.
*/
static void
efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
unsigned int n_frags, u8 *eh)
{
struct napi_struct *napi = &channel->napi_str;
struct efx_nic *efx = channel->efx;
struct sk_buff *skb;
skb = napi_get_frags(napi);
if (unlikely(!skb)) {
struct efx_rx_queue *rx_queue;
rx_queue = efx_channel_get_rx_queue(channel);
efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
return;
}
if (efx->net_dev->features & NETIF_F_RXHASH)
skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
PKT_HASH_TYPE_L3);
skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
for (;;) {
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
rx_buf->page, rx_buf->page_offset,
rx_buf->len);
rx_buf->page = NULL;
skb->len += rx_buf->len;
if (skb_shinfo(skb)->nr_frags == n_frags)
break;
rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
}
skb->data_len = skb->len;
skb->truesize += n_frags * efx->rx_buffer_truesize;
skb_record_rx_queue(skb, channel->rx_queue.core_index);
napi_gro_frags(napi);
}
/* Allocate and construct an SKB around page fragments */
static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf,
......@@ -583,28 +417,6 @@ void __efx_rx_packet(struct efx_channel *channel)
channel->rx_pkt_n_frags = 0;
}
void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
{
unsigned int bufs_in_recycle_ring, page_ring_size;
struct efx_nic *efx = rx_queue->efx;
/* Set the RX recycle ring size */
#ifdef CONFIG_PPC64
bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
#else
if (iommu_present(&pci_bus_type))
bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
else
bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
#endif /* CONFIG_PPC64 */
page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
efx->rx_bufs_per_page);
rx_queue->page_ring = kcalloc(page_ring_size,
sizeof(*rx_queue->page_ring), GFP_KERNEL);
rx_queue->page_ptr_mask = page_ring_size - 1;
}
#ifdef CONFIG_RFS_ACCEL
static void efx_filter_rfs_work(struct work_struct *data)
......@@ -838,37 +650,3 @@ bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota)
}
#endif /* CONFIG_RFS_ACCEL */
/**
* efx_filter_is_mc_recipient - test whether spec is a multicast recipient
* @spec: Specification to test
*
* Return: %true if the specification is a non-drop RX filter that
* matches a local MAC address I/G bit value of 1 or matches a local
* IPv4 or IPv6 address value in the respective multicast address
* range. Otherwise %false.
*/
bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
{
if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
return false;
if (spec->match_flags &
(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
is_multicast_ether_addr(spec->loc_mac))
return true;
if ((spec->match_flags &
(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
if (spec->ether_type == htons(ETH_P_IP) &&
ipv4_is_multicast(spec->loc_host[0]))
return true;
if (spec->ether_type == htons(ETH_P_IPV6) &&
((const u8 *)spec->loc_host)[0] == 0xff)
return true;
}
return false;
}
This diff is collapsed.
......@@ -18,8 +18,34 @@
#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
EFX_RX_USR_BUF_SIZE)
static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
{
return page_address(buf->page) + buf->page_offset;
}
static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
#else
const u8 *data = eh + efx->rx_packet_hash_offset;
return (u32)data[0] |
(u32)data[1] << 8 |
(u32)data[2] << 16 |
(u32)data[3] << 24;
#endif
}
void efx_rx_slow_fill(struct timer_list *t);
void efx_recycle_rx_pages(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf,
unsigned int n_frags);
void efx_discard_rx_packet(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf,
unsigned int n_frags);
int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
......@@ -39,4 +65,33 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
void efx_rx_config_page_split(struct efx_nic *efx);
void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic);
void
efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
unsigned int n_frags, u8 *eh);
struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx);
struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id);
void efx_free_rss_context_entry(struct efx_rss_context *ctx);
void efx_set_default_rx_indir_table(struct efx_nic *efx,
struct efx_rss_context *ctx);
bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
bool efx_filter_spec_equal(const struct efx_filter_spec *left,
const struct efx_filter_spec *right);
u32 efx_filter_spec_hash(const struct efx_filter_spec *spec);
#ifdef CONFIG_RFS_ACCEL
bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
bool *force);
struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
const struct efx_filter_spec *spec);
struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
const struct efx_filter_spec *spec,
bool *new);
void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec);
#endif
int efx_probe_filters(struct efx_nic *efx);
void efx_remove_filters(struct efx_nic *efx);
#endif
......@@ -785,7 +785,7 @@ void efx_selftest_async_cancel(struct efx_nic *efx)
cancel_delayed_work_sync(&efx->selftest_work);
}
void efx_selftest_async_work(struct work_struct *data)
static void efx_selftest_async_work(struct work_struct *data)
{
struct efx_nic *efx = container_of(data, struct efx_nic,
selftest_work.work);
......@@ -804,3 +804,8 @@ void efx_selftest_async_work(struct work_struct *data)
channel->channel, cpu);
}
}
void efx_selftest_async_init(struct efx_nic *efx)
{
INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
}
......@@ -45,8 +45,8 @@ void efx_loopback_rx_packet(struct efx_nic *efx, const char *buf_ptr,
int pkt_len);
int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
unsigned flags);
void efx_selftest_async_init(struct efx_nic *efx);
void efx_selftest_async_start(struct efx_nic *efx);
void efx_selftest_async_cancel(struct efx_nic *efx);
void efx_selftest_async_work(struct work_struct *data);
#endif /* EFX_SELFTEST_H */
......@@ -21,6 +21,7 @@
#include "workarounds.h"
#include "mcdi.h"
#include "mcdi_pcol.h"
#include "mcdi_port_common.h"
#include "selftest.h"
#include "siena_sriov.h"
......
......@@ -268,24 +268,6 @@ static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue,
}
#endif /* EFX_USE_PIO */
/* Remove buffers put into a tx_queue for the current packet.
* None of the buffers must have an skb attached.
*/
static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
unsigned int insert_count)
{
struct efx_tx_buffer *buffer;
unsigned int bytes_compl = 0;
unsigned int pkts_compl = 0;
/* Work backwards until we hit the original insert pointer value */
while (tx_queue->insert_count != insert_count) {
--tx_queue->insert_count;
buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
}
}
/*
* Fallback to software TSO.
*
......@@ -518,41 +500,6 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
return i;
}
/* Remove packets from the TX queue
*
* This removes packets from the TX queue, up to and including the
* specified index.
*/
static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
unsigned int index,
unsigned int *pkts_compl,
unsigned int *bytes_compl)
{
struct efx_nic *efx = tx_queue->efx;
unsigned int stop_index, read_ptr;
stop_index = (index + 1) & tx_queue->ptr_mask;
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
while (read_ptr != stop_index) {
struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
unlikely(buffer->len == 0)) {
netif_err(efx, tx_err, efx->net_dev,
"TX queue %d spurious TX completion id %x\n",
tx_queue->queue, read_ptr);
efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
return;
}
efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
++tx_queue->read_count;
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
}
}
/* Initiate a packet transmission. We use one channel per CPU
* (sharing when we have more CPUs than channels). On Falcon, the TX
* completion events will be directed back to the CPU that transmitted
......@@ -665,45 +612,3 @@ int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
net_dev->num_tc = num_tc;
return 0;
}
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
{
unsigned fill_level;
struct efx_nic *efx = tx_queue->efx;
struct efx_tx_queue *txq2;
unsigned int pkts_compl = 0, bytes_compl = 0;
EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
tx_queue->pkts_compl += pkts_compl;
tx_queue->bytes_compl += bytes_compl;
if (pkts_compl > 1)
++tx_queue->merge_events;
/* See if we need to restart the netif queue. This memory
* barrier ensures that we write read_count (inside
* efx_dequeue_buffers()) before reading the queue status.
*/
smp_mb();
if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
likely(efx->port_enabled) &&
likely(netif_device_present(efx->net_dev))) {
txq2 = efx_tx_queue_partner(tx_queue);
fill_level = max(tx_queue->insert_count - tx_queue->read_count,
txq2->insert_count - txq2->read_count);
if (fill_level <= efx->txq_wake_thresh)
netif_tx_wake_queue(tx_queue->core_txq);
}
/* Check whether the hardware queue is now empty */
if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
if (tx_queue->read_count == tx_queue->old_write_count) {
smp_mb();
tx_queue->empty_read_count =
tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
}
}
}
......@@ -191,6 +191,100 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
buffer->flags = 0;
}
/* Remove packets from the TX queue
*
* This removes packets from the TX queue, up to and including the
* specified index.
*/
static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
unsigned int index,
unsigned int *pkts_compl,
unsigned int *bytes_compl)
{
struct efx_nic *efx = tx_queue->efx;
unsigned int stop_index, read_ptr;
stop_index = (index + 1) & tx_queue->ptr_mask;
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
while (read_ptr != stop_index) {
struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
unlikely(buffer->len == 0)) {
netif_err(efx, tx_err, efx->net_dev,
"TX queue %d spurious TX completion id %x\n",
tx_queue->queue, read_ptr);
efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
return;
}
efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
++tx_queue->read_count;
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
}
}
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
{
unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
struct efx_nic *efx = tx_queue->efx;
struct efx_tx_queue *txq2;
EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
tx_queue->pkts_compl += pkts_compl;
tx_queue->bytes_compl += bytes_compl;
if (pkts_compl > 1)
++tx_queue->merge_events;
/* See if we need to restart the netif queue. This memory
* barrier ensures that we write read_count (inside
* efx_dequeue_buffers()) before reading the queue status.
*/
smp_mb();
if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
likely(efx->port_enabled) &&
likely(netif_device_present(efx->net_dev))) {
txq2 = efx_tx_queue_partner(tx_queue);
fill_level = max(tx_queue->insert_count - tx_queue->read_count,
txq2->insert_count - txq2->read_count);
if (fill_level <= efx->txq_wake_thresh)
netif_tx_wake_queue(tx_queue->core_txq);
}
/* Check whether the hardware queue is now empty */
if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
if (tx_queue->read_count == tx_queue->old_write_count) {
smp_mb();
tx_queue->empty_read_count =
tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
}
}
}
/* Remove buffers put into a tx_queue for the current packet.
* None of the buffers must have an skb attached.
*/
void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
unsigned int insert_count)
{
struct efx_tx_buffer *buffer;
unsigned int bytes_compl = 0;
unsigned int pkts_compl = 0;
/* Work backwards until we hit the original insert pointer value */
while (tx_queue->insert_count != insert_count) {
--tx_queue->insert_count;
buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
}
}
struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
dma_addr_t dma_addr, size_t len)
{
......
......@@ -21,6 +21,11 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
unsigned int *pkts_compl,
unsigned int *bytes_compl);
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
unsigned int insert_count);
struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
dma_addr_t dma_addr, size_t len);
int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment