Commit 1bb1c5bc authored by David S. Miller's avatar David S. Miller

Merge branch 'FFungible-ethernet-driver'

Dimitris Michailidis says:

====================
new Fungible Ethernet driver

This patch series contains a new network driver for the Ethernet
functionality of Fungible cards.

It contains two modules. The first one in patch 2 is a library module
that implements some of the device setup, queue managenent, and support
for operating an admin queue. These are placed in a separate module
because the cards provide a number of PCI functions handled by different
types of drivers and all use the same common means to interact with the
device. Each of the drivers will be relying on this library module for
them.

The remaining patches provide the Ethernet driver for the cards.

v2:
- Fix set_pauseparam, remove get_wol, remove module param (Andrew Lunn)
- Fix a register poll loop (Andrew)
- Replace constants defined with 'static const'
- make W=1 C=1 is clean
- Remove devlink FW update (Jakub)
- Remove duplicate ethtool stats covered by structured API (Jakub)

v3:
- Make TLS stats unconditional (Andrew)
- Remove inline from .c (Andrew)
- Replace some ifdef with IS_ENABLED (Andrew)
- Fix build failure on 32b arches (build robot)
- Fix build issue with make O= (Jakub)

v4:
- Fix for newer bpf_warn_invalid_xdp_action() (Jakub)
- Remove 32b dma_set_mask_and_coherent()

v5:
- Make XDP enter/exit non-disruptive to active traffic
- Remove dormant port state
- Style fixes, unused stuff removal (Jakub)

v6:
- When changing queue depth or numbers allocate the new queues
  before shutting down the existing ones (Jakub)

v7:
- Convert IRQ bookeeping to use XArray.
- Changes to the numbers of Tx/Rx queues are now incremental and
  do not disrupt ongoing traffic.
- Implement .ndo_eth_ioctl instead of .ndo_do_ioctl.
- Replace deprecated irq_set_affinity_hint.
- Remove TLS 1.3 support (Jakub)
- Remove hwtstamp_config.flags check (Jakub)
- Add locking in SR-IOV enable/disable. (Jakub)

v8:
- Remove dropping of <33B packets and the associated counter (Jakub)
- Report CQE size.
- Show last MAC stats when the netdev isn't running (Andrew)
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4aaa4895 749efb1e
......@@ -7931,6 +7931,12 @@ L: platform-driver-x86@vger.kernel.org
S: Maintained
F: drivers/platform/x86/fujitsu-tablet.c
FUNGIBLE ETHERNET DRIVERS
M: Dimitris Michailidis <dmichail@fungible.com>
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/fungible/
FUSE: FILESYSTEM IN USERSPACE
M: Miklos Szeredi <miklos@szeredi.hu>
L: linux-fsdevel@vger.kernel.org
......
......@@ -78,6 +78,7 @@ source "drivers/net/ethernet/ezchip/Kconfig"
source "drivers/net/ethernet/faraday/Kconfig"
source "drivers/net/ethernet/freescale/Kconfig"
source "drivers/net/ethernet/fujitsu/Kconfig"
source "drivers/net/ethernet/fungible/Kconfig"
source "drivers/net/ethernet/google/Kconfig"
source "drivers/net/ethernet/hisilicon/Kconfig"
source "drivers/net/ethernet/huawei/Kconfig"
......
......@@ -41,6 +41,7 @@ obj-$(CONFIG_NET_VENDOR_EZCHIP) += ezchip/
obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/
obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/
obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/
obj-$(CONFIG_NET_VENDOR_FUNGIBLE) += fungible/
obj-$(CONFIG_NET_VENDOR_GOOGLE) += google/
obj-$(CONFIG_NET_VENDOR_HISILICON) += hisilicon/
obj-$(CONFIG_NET_VENDOR_HUAWEI) += huawei/
......
# SPDX-License-Identifier: GPL-2.0-only
#
# Fungible network driver configuration
#
config NET_VENDOR_FUNGIBLE
bool "Fungible devices"
default y
help
If you have a Fungible network device, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
the questions about Fungible cards. If you say Y, you will be asked
for your specific card in the following questions.
if NET_VENDOR_FUNGIBLE
config FUN_CORE
tristate
help
A service module offering basic common services to Fungible
device drivers.
source "drivers/net/ethernet/fungible/funeth/Kconfig"
endif # NET_VENDOR_FUNGIBLE
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
#
# Makefile for the Fungible network device drivers.
#
obj-$(CONFIG_FUN_CORE) += funcore/
obj-$(CONFIG_FUN_ETH) += funeth/
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
obj-$(CONFIG_FUN_CORE) += funcore.o
funcore-y := fun_dev.o fun_queue.o
This diff is collapsed.
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
#ifndef _FUNDEV_H
#define _FUNDEV_H
#include <linux/sbitmap.h>
#include <linux/spinlock_types.h>
#include <linux/workqueue.h>
#include "fun_hci.h"
struct pci_dev;
struct fun_dev;
struct fun_queue;
struct fun_cmd_ctx;
struct fun_queue_alloc_req;
/* doorbell fields */
enum {
FUN_DB_QIDX_S = 0,
FUN_DB_INTCOAL_ENTRIES_S = 16,
FUN_DB_INTCOAL_ENTRIES_M = 0x7f,
FUN_DB_INTCOAL_USEC_S = 23,
FUN_DB_INTCOAL_USEC_M = 0x7f,
FUN_DB_IRQ_S = 30,
FUN_DB_IRQ_F = 1 << FUN_DB_IRQ_S,
FUN_DB_IRQ_ARM_S = 31,
FUN_DB_IRQ_ARM_F = 1U << FUN_DB_IRQ_ARM_S
};
/* Callback for asynchronous admin commands.
* Invoked on reception of command response.
*/
typedef void (*fun_admin_callback_t)(struct fun_dev *fdev, void *rsp,
void *cb_data);
/* Callback for events/notifications received by an admin queue. */
typedef void (*fun_admin_event_cb)(struct fun_dev *fdev, void *cqe);
/* Callback for pending work handled by the service task. */
typedef void (*fun_serv_cb)(struct fun_dev *fd);
/* service task flags */
enum {
FUN_SERV_DISABLED, /* service task is disabled */
FUN_SERV_FIRST_AVAIL
};
/* Driver state associated with a PCI function. */
struct fun_dev {
struct device *dev;
void __iomem *bar; /* start of BAR0 mapping */
u32 __iomem *dbs; /* start of doorbells in BAR0 mapping */
/* admin queue */
struct fun_queue *admin_q;
struct sbitmap_queue admin_sbq;
struct fun_cmd_ctx *cmd_ctx;
fun_admin_event_cb adminq_cb;
bool suppress_cmds; /* if set don't write commands to SQ */
/* address increment between consecutive doorbells, in 4B units */
unsigned int db_stride;
/* SW versions of device registers */
u32 cc_reg; /* CC register */
u64 cap_reg; /* CAPability register */
unsigned int q_depth; /* max queue depth supported by device */
unsigned int max_qid; /* = #queues - 1, separately for SQs and CQs */
unsigned int kern_end_qid; /* last qid in the kernel range + 1 */
unsigned int fw_handle;
/* IRQ manager */
unsigned int num_irqs;
unsigned int irqs_avail;
spinlock_t irqmgr_lock;
unsigned long *irq_map;
/* The service task handles work that needs a process context */
struct work_struct service_task;
unsigned long service_flags;
fun_serv_cb serv_cb;
};
struct fun_dev_params {
u8 cqe_size_log2; /* admin q CQE size */
u8 sqe_size_log2; /* admin q SQE size */
/* admin q depths */
u16 cq_depth;
u16 sq_depth;
u16 rq_depth;
u16 min_msix; /* min vectors needed by requesting driver */
fun_admin_event_cb event_cb;
fun_serv_cb serv_cb;
};
/* Return the BAR address of a doorbell. */
static inline u32 __iomem *fun_db_addr(const struct fun_dev *fdev,
unsigned int db_index)
{
return &fdev->dbs[db_index * fdev->db_stride];
}
/* Return the BAR address of an SQ doorbell. SQ and CQ DBs alternate,
* SQs have even DB indices.
*/
static inline u32 __iomem *fun_sq_db_addr(const struct fun_dev *fdev,
unsigned int sqid)
{
return fun_db_addr(fdev, sqid * 2);
}
static inline u32 __iomem *fun_cq_db_addr(const struct fun_dev *fdev,
unsigned int cqid)
{
return fun_db_addr(fdev, cqid * 2 + 1);
}
int fun_get_res_count(struct fun_dev *fdev, enum fun_admin_op res);
int fun_res_destroy(struct fun_dev *fdev, enum fun_admin_op res,
unsigned int flags, u32 id);
int fun_bind(struct fun_dev *fdev, enum fun_admin_bind_type type0,
unsigned int id0, enum fun_admin_bind_type type1,
unsigned int id1);
int fun_submit_admin_cmd(struct fun_dev *fdev, struct fun_admin_req_common *cmd,
fun_admin_callback_t cb, void *cb_data, bool wait_ok);
int fun_submit_admin_sync_cmd(struct fun_dev *fdev,
struct fun_admin_req_common *cmd, void *rsp,
size_t rspsize, unsigned int timeout);
int fun_dev_enable(struct fun_dev *fdev, struct pci_dev *pdev,
const struct fun_dev_params *areq, const char *name);
void fun_dev_disable(struct fun_dev *fdev);
int fun_reserve_irqs(struct fun_dev *fdev, unsigned int nirqs,
u16 *irq_indices);
void fun_release_irqs(struct fun_dev *fdev, unsigned int nirqs,
u16 *irq_indices);
void fun_serv_stop(struct fun_dev *fd);
void fun_serv_restart(struct fun_dev *fd);
void fun_serv_sched(struct fun_dev *fd);
#endif /* _FUNDEV_H */
This diff is collapsed.
This diff is collapsed.
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
#ifndef _FUN_QEUEUE_H
#define _FUN_QEUEUE_H
#include <linux/interrupt.h>
#include <linux/io.h>
struct device;
struct fun_dev;
struct fun_queue;
struct fun_cqe_info;
struct fun_rsp_common;
typedef void (*cq_callback_t)(struct fun_queue *funq, void *data, void *msg,
const struct fun_cqe_info *info);
struct fun_rq_info {
dma_addr_t dma;
struct page *page;
};
/* A queue group consisting of an SQ, a CQ, and an optional RQ. */
struct fun_queue {
struct fun_dev *fdev;
spinlock_t sq_lock;
dma_addr_t cq_dma_addr;
dma_addr_t sq_dma_addr;
dma_addr_t rq_dma_addr;
u32 __iomem *cq_db;
u32 __iomem *sq_db;
u32 __iomem *rq_db;
void *cqes;
void *sq_cmds;
struct fun_eprq_rqbuf *rqes;
struct fun_rq_info *rq_info;
u32 cqid;
u32 sqid;
u32 rqid;
u32 cq_depth;
u32 sq_depth;
u32 rq_depth;
u16 cq_head;
u16 sq_tail;
u16 rq_tail;
u8 cqe_size_log2;
u8 sqe_size_log2;
u16 cqe_info_offset;
u16 rq_buf_idx;
int rq_buf_offset;
u16 num_rqe_to_fill;
u8 cq_intcoal_usec;
u8 cq_intcoal_nentries;
u8 sq_intcoal_usec;
u8 sq_intcoal_nentries;
u16 cq_flags;
u16 sq_flags;
u16 rq_flags;
/* SQ head writeback */
u16 sq_comp;
volatile __be64 *sq_head;
cq_callback_t cq_cb;
void *cb_data;
irq_handler_t irq_handler;
void *irq_data;
s16 cq_vector;
u8 cq_phase;
/* I/O q index */
u16 qid;
char irqname[24];
};
static inline void *fun_sqe_at(const struct fun_queue *funq, unsigned int pos)
{
return funq->sq_cmds + (pos << funq->sqe_size_log2);
}
static inline void funq_sq_post_tail(struct fun_queue *funq, u16 tail)
{
if (++tail == funq->sq_depth)
tail = 0;
funq->sq_tail = tail;
writel(tail, funq->sq_db);
}
static inline struct fun_cqe_info *funq_cqe_info(const struct fun_queue *funq,
void *cqe)
{
return cqe + funq->cqe_info_offset;
}
static inline void funq_rq_post(struct fun_queue *funq)
{
writel(funq->rq_tail, funq->rq_db);
}
struct fun_queue_alloc_req {
u8 cqe_size_log2;
u8 sqe_size_log2;
u16 cq_flags;
u16 sq_flags;
u16 rq_flags;
u32 cq_depth;
u32 sq_depth;
u32 rq_depth;
u8 cq_intcoal_usec;
u8 cq_intcoal_nentries;
u8 sq_intcoal_usec;
u8 sq_intcoal_nentries;
};
int fun_sq_create(struct fun_dev *fdev, u16 flags, u32 sqid, u32 cqid,
u8 sqe_size_log2, u32 sq_depth, dma_addr_t dma_addr,
u8 coal_nentries, u8 coal_usec, u32 irq_num,
u32 scan_start_id, u32 scan_end_id,
u32 rq_buf_size_log2, u32 *sqidp, u32 __iomem **dbp);
int fun_cq_create(struct fun_dev *fdev, u16 flags, u32 cqid, u32 rqid,
u8 cqe_size_log2, u32 cq_depth, dma_addr_t dma_addr,
u16 headroom, u16 tailroom, u8 coal_nentries, u8 coal_usec,
u32 irq_num, u32 scan_start_id, u32 scan_end_id,
u32 *cqidp, u32 __iomem **dbp);
void *fun_alloc_ring_mem(struct device *dma_dev, size_t depth,
size_t hw_desc_sz, size_t sw_desc_size, bool wb,
int numa_node, dma_addr_t *dma_addr, void **sw_va,
volatile __be64 **wb_va);
void fun_free_ring_mem(struct device *dma_dev, size_t depth, size_t hw_desc_sz,
bool wb, void *hw_va, dma_addr_t dma_addr, void *sw_va);
#define fun_destroy_sq(fdev, sqid) \
fun_res_destroy((fdev), FUN_ADMIN_OP_EPSQ, 0, (sqid))
#define fun_destroy_cq(fdev, cqid) \
fun_res_destroy((fdev), FUN_ADMIN_OP_EPCQ, 0, (cqid))
struct fun_queue *fun_alloc_queue(struct fun_dev *fdev, int qid,
const struct fun_queue_alloc_req *req);
void fun_free_queue(struct fun_queue *funq);
static inline void fun_set_cq_callback(struct fun_queue *funq, cq_callback_t cb,
void *cb_data)
{
funq->cq_cb = cb;
funq->cb_data = cb_data;
}
int fun_create_rq(struct fun_queue *funq);
int fun_create_queue(struct fun_queue *funq);
void fun_free_irq(struct fun_queue *funq);
int fun_request_irq(struct fun_queue *funq, const char *devname,
irq_handler_t handler, void *data);
unsigned int __fun_process_cq(struct fun_queue *funq, unsigned int max);
unsigned int fun_process_cq(struct fun_queue *funq, unsigned int max);
#endif /* _FUN_QEUEUE_H */
# SPDX-License-Identifier: GPL-2.0-only
#
# Fungible Ethernet driver configuration
#
config FUN_ETH
tristate "Fungible Ethernet device driver"
depends on PCI && PCI_MSI
depends on TLS && TLS_DEVICE || TLS_DEVICE=n
select NET_DEVLINK
select FUN_CORE
help
This driver supports the Ethernet functionality of Fungible adapters.
It works with both physical and virtual functions.
To compile this driver as a module, choose M here. The module
will be called funeth.
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
ccflags-y += -I$(srctree)/$(src)/../funcore -I$(srctree)/$(src)
obj-$(CONFIG_FUN_ETH) += funeth.o
funeth-y := funeth_main.o funeth_rx.o funeth_tx.o funeth_devlink.o \
funeth_ethtool.o
funeth-$(CONFIG_TLS_DEVICE) += funeth_ktls.o
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
#ifndef _FUN_PORT_H
#define _FUN_PORT_H
enum port_mac_rx_stats {
PORT_MAC_RX_etherStatsOctets = 0x0,
PORT_MAC_RX_OctetsReceivedOK = 0x1,
PORT_MAC_RX_aAlignmentErrors = 0x2,
PORT_MAC_RX_aPAUSEMACCtrlFramesReceived = 0x3,
PORT_MAC_RX_aFrameTooLongErrors = 0x4,
PORT_MAC_RX_aInRangeLengthErrors = 0x5,
PORT_MAC_RX_aFramesReceivedOK = 0x6,
PORT_MAC_RX_aFrameCheckSequenceErrors = 0x7,
PORT_MAC_RX_VLANReceivedOK = 0x8,
PORT_MAC_RX_ifInErrors = 0x9,
PORT_MAC_RX_ifInUcastPkts = 0xa,
PORT_MAC_RX_ifInMulticastPkts = 0xb,
PORT_MAC_RX_ifInBroadcastPkts = 0xc,
PORT_MAC_RX_etherStatsDropEvents = 0xd,
PORT_MAC_RX_etherStatsPkts = 0xe,
PORT_MAC_RX_etherStatsUndersizePkts = 0xf,
PORT_MAC_RX_etherStatsPkts64Octets = 0x10,
PORT_MAC_RX_etherStatsPkts65to127Octets = 0x11,
PORT_MAC_RX_etherStatsPkts128to255Octets = 0x12,
PORT_MAC_RX_etherStatsPkts256to511Octets = 0x13,
PORT_MAC_RX_etherStatsPkts512to1023Octets = 0x14,
PORT_MAC_RX_etherStatsPkts1024to1518Octets = 0x15,
PORT_MAC_RX_etherStatsPkts1519toMaxOctets = 0x16,
PORT_MAC_RX_etherStatsOversizePkts = 0x17,
PORT_MAC_RX_etherStatsJabbers = 0x18,
PORT_MAC_RX_etherStatsFragments = 0x19,
PORT_MAC_RX_CBFCPAUSEFramesReceived_0 = 0x1a,
PORT_MAC_RX_CBFCPAUSEFramesReceived_1 = 0x1b,
PORT_MAC_RX_CBFCPAUSEFramesReceived_2 = 0x1c,
PORT_MAC_RX_CBFCPAUSEFramesReceived_3 = 0x1d,
PORT_MAC_RX_CBFCPAUSEFramesReceived_4 = 0x1e,
PORT_MAC_RX_CBFCPAUSEFramesReceived_5 = 0x1f,
PORT_MAC_RX_CBFCPAUSEFramesReceived_6 = 0x20,
PORT_MAC_RX_CBFCPAUSEFramesReceived_7 = 0x21,
PORT_MAC_RX_CBFCPAUSEFramesReceived_8 = 0x22,
PORT_MAC_RX_CBFCPAUSEFramesReceived_9 = 0x23,
PORT_MAC_RX_CBFCPAUSEFramesReceived_10 = 0x24,
PORT_MAC_RX_CBFCPAUSEFramesReceived_11 = 0x25,
PORT_MAC_RX_CBFCPAUSEFramesReceived_12 = 0x26,
PORT_MAC_RX_CBFCPAUSEFramesReceived_13 = 0x27,
PORT_MAC_RX_CBFCPAUSEFramesReceived_14 = 0x28,
PORT_MAC_RX_CBFCPAUSEFramesReceived_15 = 0x29,
PORT_MAC_RX_MACControlFramesReceived = 0x2a,
PORT_MAC_RX_STATS_MAX = 0x2b,
};
enum port_mac_tx_stats {
PORT_MAC_TX_etherStatsOctets = 0x0,
PORT_MAC_TX_OctetsTransmittedOK = 0x1,
PORT_MAC_TX_aPAUSEMACCtrlFramesTransmitted = 0x2,
PORT_MAC_TX_aFramesTransmittedOK = 0x3,
PORT_MAC_TX_VLANTransmittedOK = 0x4,
PORT_MAC_TX_ifOutErrors = 0x5,
PORT_MAC_TX_ifOutUcastPkts = 0x6,
PORT_MAC_TX_ifOutMulticastPkts = 0x7,
PORT_MAC_TX_ifOutBroadcastPkts = 0x8,
PORT_MAC_TX_etherStatsPkts64Octets = 0x9,
PORT_MAC_TX_etherStatsPkts65to127Octets = 0xa,
PORT_MAC_TX_etherStatsPkts128to255Octets = 0xb,
PORT_MAC_TX_etherStatsPkts256to511Octets = 0xc,
PORT_MAC_TX_etherStatsPkts512to1023Octets = 0xd,
PORT_MAC_TX_etherStatsPkts1024to1518Octets = 0xe,
PORT_MAC_TX_etherStatsPkts1519toMaxOctets = 0xf,
PORT_MAC_TX_CBFCPAUSEFramesTransmitted_0 = 0x10,
PORT_MAC_TX_CBFCPAUSEFramesTransmitted_1 = 0x11,
PORT_MAC_TX_CBFCPAUSEFramesTransmitted_2 = 0x12,
PORT_MAC_TX_CBFCPAUSEFramesTransmitted_3 = 0x13,
PORT_MAC_TX_CBFCPAUSEFramesTransmitted_4 = 0x14,
PORT_MAC_TX_CBFCPAUSEFramesTransmitted_5 = 0x15,
PORT_MAC_TX_CBFCPAUSEFramesTransmitted_6 = 0x16,
PORT_MAC_TX_CBFCPAUSEFramesTransmitted_7 = 0x17,
PORT_MAC_TX_CBFCPAUSEFramesTransmitted_8 = 0x18,
PORT_MAC_TX_CBFCPAUSEFramesTransmitted_9 = 0x19,
PORT_MAC_TX_CBFCPAUSEFramesTransmitted_10 = 0x1a,
PORT_MAC_TX_CBFCPAUSEFramesTransmitted_11 = 0x1b,
PORT_MAC_TX_CBFCPAUSEFramesTransmitted_12 = 0x1c,
PORT_MAC_TX_CBFCPAUSEFramesTransmitted_13 = 0x1d,
PORT_MAC_TX_CBFCPAUSEFramesTransmitted_14 = 0x1e,
PORT_MAC_TX_CBFCPAUSEFramesTransmitted_15 = 0x1f,
PORT_MAC_TX_MACControlFramesTransmitted = 0x20,
PORT_MAC_TX_etherStatsPkts = 0x21,
PORT_MAC_TX_STATS_MAX = 0x22,
};
enum port_mac_fec_stats {
PORT_MAC_FEC_Correctable = 0x0,
PORT_MAC_FEC_Uncorrectable = 0x1,
PORT_MAC_FEC_STATS_MAX = 0x2,
};
#endif /* _FUN_PORT_H */
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
#ifndef _FUNETH_H
#define _FUNETH_H
#include <uapi/linux/if_ether.h>
#include <uapi/linux/net_tstamp.h>
#include <linux/mutex.h>
#include <linux/seqlock.h>
#include <linux/xarray.h>
#include <net/devlink.h>
#include "fun_dev.h"
#define ADMIN_SQE_SIZE SZ_128
#define ADMIN_CQE_SIZE SZ_64
#define ADMIN_RSP_MAX_LEN (ADMIN_CQE_SIZE - sizeof(struct fun_cqe_info))
#define FUN_MAX_MTU 9024
#define SQ_DEPTH 512U
#define CQ_DEPTH 1024U
#define RQ_DEPTH (512U / (PAGE_SIZE / 4096))
#define CQ_INTCOAL_USEC 10
#define CQ_INTCOAL_NPKT 16
#define SQ_INTCOAL_USEC 10
#define SQ_INTCOAL_NPKT 16
#define INVALID_LPORT 0xffff
#define FUN_PORT_CAP_PAUSE_MASK (FUN_PORT_CAP_TX_PAUSE | FUN_PORT_CAP_RX_PAUSE)
struct fun_vport_info {
u8 mac[ETH_ALEN];
u16 vlan;
__be16 vlan_proto;
u8 qos;
u8 spoofchk:1;
u8 trusted:1;
unsigned int max_rate;
};
/* "subclass" of fun_dev for Ethernet functions */
struct fun_ethdev {
struct fun_dev fdev;
/* the function's network ports */
struct net_device **netdevs;
unsigned int num_ports;
/* configuration for the function's virtual ports */
unsigned int num_vports;
struct fun_vport_info *vport_info;
struct mutex state_mutex; /* nests inside RTNL if both taken */
unsigned int nsqs_per_port;
};
static inline struct fun_ethdev *to_fun_ethdev(struct fun_dev *p)
{
return container_of(p, struct fun_ethdev, fdev);
}
struct fun_qset {
struct funeth_rxq **rxqs;
struct funeth_txq **txqs;
struct funeth_txq **xdpqs;
unsigned int nrxqs;
unsigned int ntxqs;
unsigned int nxdpqs;
unsigned int rxq_start;
unsigned int txq_start;
unsigned int xdpq_start;
unsigned int cq_depth;
unsigned int rq_depth;
unsigned int sq_depth;
int state;
};
/* Per netdevice driver state, i.e., netdev_priv. */
struct funeth_priv {
struct fun_dev *fdev;
struct pci_dev *pdev;
struct net_device *netdev;
struct funeth_rxq * __rcu *rxqs;
struct funeth_txq **txqs;
struct funeth_txq * __rcu *xdpqs;
struct xarray irqs;
unsigned int num_tx_irqs;
unsigned int num_rx_irqs;
unsigned int rx_irq_ofst;
unsigned int lane_attrs;
u16 lport;
/* link settings */
u64 port_caps;
u64 advertising;
u64 lp_advertising;
unsigned int link_speed;
u8 xcvr_type;
u8 active_fc;
u8 active_fec;
u8 link_down_reason;
seqcount_t link_seq;
u32 msg_enable;
unsigned int num_xdpqs;
/* ethtool, etc. config parameters */
unsigned int sq_depth;
unsigned int rq_depth;
unsigned int cq_depth;
unsigned int cq_irq_db;
u8 tx_coal_usec;
u8 tx_coal_count;
u8 rx_coal_usec;
u8 rx_coal_count;
struct hwtstamp_config hwtstamp_cfg;
/* cumulative queue stats from earlier queue instances */
u64 tx_packets;
u64 tx_bytes;
u64 tx_dropped;
u64 rx_packets;
u64 rx_bytes;
u64 rx_dropped;
/* RSS */
unsigned int rss_hw_id;
enum fun_eth_hash_alg hash_algo;
u8 rss_key[FUN_ETH_RSS_MAX_KEY_SIZE];
unsigned int indir_table_nentries;
u32 indir_table[FUN_ETH_RSS_MAX_INDIR_ENT];
dma_addr_t rss_dma_addr;
void *rss_cfg;
/* DMA area for port stats */
dma_addr_t stats_dma_addr;
__be64 *stats;
struct bpf_prog *xdp_prog;
struct devlink_port dl_port;
/* kTLS state */
unsigned int ktls_id;
atomic64_t tx_tls_add;
atomic64_t tx_tls_del;
atomic64_t tx_tls_resync;
};
void fun_set_ethtool_ops(struct net_device *netdev);
int fun_port_write_cmd(struct funeth_priv *fp, int key, u64 data);
int fun_port_read_cmd(struct funeth_priv *fp, int key, u64 *data);
int fun_create_and_bind_tx(struct funeth_priv *fp, u32 sqid);
int fun_replace_queues(struct net_device *dev, struct fun_qset *newqs,
struct netlink_ext_ack *extack);
int fun_change_num_queues(struct net_device *dev, unsigned int ntx,
unsigned int nrx);
void fun_set_ring_count(struct net_device *netdev, unsigned int ntx,
unsigned int nrx);
int fun_config_rss(struct net_device *dev, int algo, const u8 *key,
const u32 *qtable, u8 op);
#endif /* _FUNETH_H */
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
#include "funeth.h"
#include "funeth_devlink.h"
static int fun_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
struct netlink_ext_ack *extack)
{
int err;
err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
if (err)
return err;
return 0;
}
static const struct devlink_ops fun_dl_ops = {
.info_get = fun_dl_info_get,
};
struct devlink *fun_devlink_alloc(struct device *dev)
{
return devlink_alloc(&fun_dl_ops, sizeof(struct fun_ethdev), dev);
}
void fun_devlink_free(struct devlink *devlink)
{
devlink_free(devlink);
}
void fun_devlink_register(struct devlink *devlink)
{
devlink_register(devlink);
}
void fun_devlink_unregister(struct devlink *devlink)
{
devlink_unregister(devlink);
}
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
#ifndef __FUNETH_DEVLINK_H
#define __FUNETH_DEVLINK_H
#include <net/devlink.h>
struct devlink *fun_devlink_alloc(struct device *dev);
void fun_devlink_free(struct devlink *devlink);
void fun_devlink_register(struct devlink *devlink);
void fun_devlink_unregister(struct devlink *devlink);
#endif /* __FUNETH_DEVLINK_H */
This diff is collapsed.
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
#include "funeth.h"
#include "funeth_ktls.h"
static int fun_admin_ktls_create(struct funeth_priv *fp, unsigned int id)
{
struct fun_admin_ktls_create_req req = {
.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_KTLS,
sizeof(req)),
.subop = FUN_ADMIN_SUBOP_CREATE,
.id = cpu_to_be32(id),
};
return fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0);
}
static int fun_ktls_add(struct net_device *netdev, struct sock *sk,
enum tls_offload_ctx_dir direction,
struct tls_crypto_info *crypto_info,
u32 start_offload_tcp_sn)
{
struct funeth_priv *fp = netdev_priv(netdev);
struct fun_admin_ktls_modify_req req = {
.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_KTLS,
sizeof(req)),
.subop = FUN_ADMIN_SUBOP_MODIFY,
.id = cpu_to_be32(fp->ktls_id),
.tcp_seq = cpu_to_be32(start_offload_tcp_sn),
};
struct fun_admin_ktls_modify_rsp rsp;
struct fun_ktls_tx_ctx *tx_ctx;
int rc;
if (direction != TLS_OFFLOAD_CTX_DIR_TX)
return -EOPNOTSUPP;
if (crypto_info->version == TLS_1_2_VERSION)
req.version = FUN_KTLS_TLSV2;
else
return -EOPNOTSUPP;
switch (crypto_info->cipher_type) {
case TLS_CIPHER_AES_GCM_128: {
struct tls12_crypto_info_aes_gcm_128 *c = (void *)crypto_info;
req.cipher = FUN_KTLS_CIPHER_AES_GCM_128;
memcpy(req.key, c->key, sizeof(c->key));
memcpy(req.iv, c->iv, sizeof(c->iv));
memcpy(req.salt, c->salt, sizeof(c->salt));
memcpy(req.record_seq, c->rec_seq, sizeof(c->rec_seq));
break;
}
default:
return -EOPNOTSUPP;
}
rc = fun_submit_admin_sync_cmd(fp->fdev, &req.common, &rsp,
sizeof(rsp), 0);
memzero_explicit(&req, sizeof(req));
if (rc)
return rc;
tx_ctx = tls_driver_ctx(sk, direction);
tx_ctx->tlsid = rsp.tlsid;
tx_ctx->next_seq = start_offload_tcp_sn;
atomic64_inc(&fp->tx_tls_add);
return 0;
}
static void fun_ktls_del(struct net_device *netdev,
struct tls_context *tls_ctx,
enum tls_offload_ctx_dir direction)
{
struct funeth_priv *fp = netdev_priv(netdev);
struct fun_admin_ktls_modify_req req;
struct fun_ktls_tx_ctx *tx_ctx;
if (direction != TLS_OFFLOAD_CTX_DIR_TX)
return;
tx_ctx = __tls_driver_ctx(tls_ctx, direction);
req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_KTLS,
offsetof(struct fun_admin_ktls_modify_req, tcp_seq));
req.subop = FUN_ADMIN_SUBOP_MODIFY;
req.flags = cpu_to_be16(FUN_KTLS_MODIFY_REMOVE);
req.id = cpu_to_be32(fp->ktls_id);
req.tlsid = tx_ctx->tlsid;
fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0);
atomic64_inc(&fp->tx_tls_del);
}
static int fun_ktls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
u8 *rcd_sn, enum tls_offload_ctx_dir direction)
{
struct funeth_priv *fp = netdev_priv(netdev);
struct fun_admin_ktls_modify_req req;
struct fun_ktls_tx_ctx *tx_ctx;
int rc;
if (direction != TLS_OFFLOAD_CTX_DIR_TX)
return -EOPNOTSUPP;
tx_ctx = tls_driver_ctx(sk, direction);
req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_KTLS,
offsetof(struct fun_admin_ktls_modify_req, key));
req.subop = FUN_ADMIN_SUBOP_MODIFY;
req.flags = 0;
req.id = cpu_to_be32(fp->ktls_id);
req.tlsid = tx_ctx->tlsid;
req.tcp_seq = cpu_to_be32(seq);
req.version = 0;
req.cipher = 0;
memcpy(req.record_seq, rcd_sn, sizeof(req.record_seq));
atomic64_inc(&fp->tx_tls_resync);
rc = fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0);
if (!rc)
tx_ctx->next_seq = seq;
return rc;
}
static const struct tlsdev_ops fun_ktls_ops = {
.tls_dev_add = fun_ktls_add,
.tls_dev_del = fun_ktls_del,
.tls_dev_resync = fun_ktls_resync,
};
int fun_ktls_init(struct net_device *netdev)
{
struct funeth_priv *fp = netdev_priv(netdev);
int rc;
rc = fun_admin_ktls_create(fp, netdev->dev_port);
if (rc)
return rc;
fp->ktls_id = netdev->dev_port;
netdev->tlsdev_ops = &fun_ktls_ops;
netdev->hw_features |= NETIF_F_HW_TLS_TX;
netdev->features |= NETIF_F_HW_TLS_TX;
return 0;
}
void fun_ktls_cleanup(struct funeth_priv *fp)
{
if (fp->ktls_id == FUN_HCI_ID_INVALID)
return;
fun_res_destroy(fp->fdev, FUN_ADMIN_OP_KTLS, 0, fp->ktls_id);
fp->ktls_id = FUN_HCI_ID_INVALID;
}
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
#ifndef _FUN_KTLS_H
#define _FUN_KTLS_H
struct net_device;
struct funeth_priv;
#ifdef CONFIG_TLS_DEVICE
#include <net/tls.h>
struct fun_ktls_tx_ctx {
__be64 tlsid;
u32 next_seq;
};
int fun_ktls_init(struct net_device *netdev);
void fun_ktls_cleanup(struct funeth_priv *fp);
#else
static inline void fun_ktls_init(struct net_device *netdev)
{
}
static inline void fun_ktls_cleanup(struct funeth_priv *fp)
{
}
#endif
#endif /* _FUN_KTLS_H */
This diff is collapsed.
This diff is collapsed.
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM funeth
#if !defined(_TRACE_FUNETH_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_FUNETH_H
#include <linux/tracepoint.h>
#include "funeth_txrx.h"
TRACE_EVENT(funeth_tx,
TP_PROTO(const struct funeth_txq *txq,
u32 len,
u32 sqe_idx,
u32 ngle),
TP_ARGS(txq, len, sqe_idx, ngle),
TP_STRUCT__entry(
__field(u32, qidx)
__field(u32, len)
__field(u32, sqe_idx)
__field(u32, ngle)
__string(devname, txq->netdev->name)
),
TP_fast_assign(
__entry->qidx = txq->qidx;
__entry->len = len;
__entry->sqe_idx = sqe_idx;
__entry->ngle = ngle;
__assign_str(devname, txq->netdev->name);
),
TP_printk("%s: Txq %u, SQE idx %u, len %u, num GLEs %u",
__get_str(devname), __entry->qidx, __entry->sqe_idx,
__entry->len, __entry->ngle)
);
TRACE_EVENT(funeth_tx_free,
TP_PROTO(const struct funeth_txq *txq,
u32 sqe_idx,
u32 num_sqes,
u32 hw_head),
TP_ARGS(txq, sqe_idx, num_sqes, hw_head),
TP_STRUCT__entry(
__field(u32, qidx)
__field(u32, sqe_idx)
__field(u32, num_sqes)
__field(u32, hw_head)
__string(devname, txq->netdev->name)
),
TP_fast_assign(
__entry->qidx = txq->qidx;
__entry->sqe_idx = sqe_idx;
__entry->num_sqes = num_sqes;
__entry->hw_head = hw_head;
__assign_str(devname, txq->netdev->name);
),
TP_printk("%s: Txq %u, SQE idx %u, SQEs %u, HW head %u",
__get_str(devname), __entry->qidx, __entry->sqe_idx,
__entry->num_sqes, __entry->hw_head)
);
TRACE_EVENT(funeth_rx,
TP_PROTO(const struct funeth_rxq *rxq,
u32 num_rqes,
u32 pkt_len,
u32 hash,
u32 cls_vec),
TP_ARGS(rxq, num_rqes, pkt_len, hash, cls_vec),
TP_STRUCT__entry(
__field(u32, qidx)
__field(u32, cq_head)
__field(u32, num_rqes)
__field(u32, len)
__field(u32, hash)
__field(u32, cls_vec)
__string(devname, rxq->netdev->name)
),
TP_fast_assign(
__entry->qidx = rxq->qidx;
__entry->cq_head = rxq->cq_head;
__entry->num_rqes = num_rqes;
__entry->len = pkt_len;
__entry->hash = hash;
__entry->cls_vec = cls_vec;
__assign_str(devname, rxq->netdev->name);
),
TP_printk("%s: Rxq %u, CQ head %u, RQEs %u, len %u, hash %u, CV %#x",
__get_str(devname), __entry->qidx, __entry->cq_head,
__entry->num_rqes, __entry->len, __entry->hash,
__entry->cls_vec)
);
#endif /* _TRACE_FUNETH_H */
/* Below must be outside protection. */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE funeth_trace
#include <trace/define_trace.h>
This diff is collapsed.
This diff is collapsed.
......@@ -2561,6 +2561,8 @@
#define PCI_VENDOR_ID_HYGON 0x1d94
#define PCI_VENDOR_ID_FUNGIBLE 0x1dad
#define PCI_VENDOR_ID_HXT 0x1dbf
#define PCI_VENDOR_ID_TEKRAM 0x1de1
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment