Commit dcbe4ea1 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch '1GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2023-05-18 (igc, igb, e1000e)

This series contains updates to igc, igb, and e1000e drivers.

Kurt Kanzenbach adds calls to txq_trans_cond_update() for XDP transmit
on igc.

Tom Rix makes definition of igb_pm_ops conditional on CONFIG_PM for igb.

Baozhu Ni adds a missing kdoc description on e1000e.

* '1GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue:
  e1000e: Add @adapter description to kdoc
  igb: Define igb_pm_ops conditionally on CONFIG_PM
  igc: Avoid transmit queue timeout for XDP
====================

Link: https://lore.kernel.org/r/20230518170942.418109-1-anthony.l.nguyen@intel.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 20d5e0ef c4dc8dc3
......@@ -4198,7 +4198,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
/**
* e1000e_trigger_lsc - trigger an LSC interrupt
* @adapter:
* @adapter: board private structure
*
* Fire a link status change interrupt to start the watchdog.
**/
......
......@@ -183,11 +183,13 @@ static int igb_resume(struct device *);
static int igb_runtime_suspend(struct device *dev);
static int igb_runtime_resume(struct device *dev);
static int igb_runtime_idle(struct device *dev);
#ifdef CONFIG_PM
static const struct dev_pm_ops igb_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
igb_runtime_idle)
};
#endif
static void igb_shutdown(struct pci_dev *);
static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
#ifdef CONFIG_IGB_DCA
......
......@@ -2411,6 +2411,8 @@ static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp)
nq = txring_txq(ring);
__netif_tx_lock(nq, cpu);
/* Avoid transmit queue timeout since we share it with the slow path */
txq_trans_cond_update(nq);
res = igc_xdp_init_tx_descriptor(ring, xdpf);
__netif_tx_unlock(nq);
return res;
......@@ -2829,6 +2831,9 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring)
__netif_tx_lock(nq, cpu);
/* Avoid transmit queue timeout since we share it with the slow path */
txq_trans_cond_update(nq);
budget = igc_desc_unused(ring);
while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) {
......@@ -6354,6 +6359,9 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
__netif_tx_lock(nq, cpu);
/* Avoid transmit queue timeout since we share it with the slow path */
txq_trans_cond_update(nq);
drops = 0;
for (i = 0; i < num_frames; i++) {
int err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment