Commit 82192cb4 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'ena-capabilities-field-and-cosmetic-changes'

Arthur Kiyanovski says:

====================
ENA: capabilities field and cosmetic changes

Add a new capabilities bitmask field to get indication of
capabilities supported by the device. Use the capabilities
field to query the device for ENI stats support.

Other patches are cosmetic changes like fixing readme
mistakes, removing unused variables etc...
====================

Link: https://lore.kernel.org/r/20220107202346.3522-1-akiyano@amazon.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents bf44077c 9fe890cc
...@@ -135,7 +135,7 @@ The ENA driver supports two Queue Operation modes for Tx SQs: ...@@ -135,7 +135,7 @@ The ENA driver supports two Queue Operation modes for Tx SQs:
- **Low Latency Queue (LLQ) mode or "push-mode":** - **Low Latency Queue (LLQ) mode or "push-mode":**
In this mode the driver pushes the transmit descriptors and the In this mode the driver pushes the transmit descriptors and the
first 128 bytes of the packet directly to the ENA device memory first 96 bytes of the packet directly to the ENA device memory
space. The rest of the packet payload is fetched by the space. The rest of the packet payload is fetched by the
device. For this operation mode, the driver uses a dedicated PCI device. For this operation mode, the driver uses a dedicated PCI
device memory BAR, which is mapped with write-combine capability. device memory BAR, which is mapped with write-combine capability.
......
...@@ -48,6 +48,11 @@ enum ena_admin_aq_feature_id { ...@@ -48,6 +48,11 @@ enum ena_admin_aq_feature_id {
ENA_ADMIN_FEATURES_OPCODE_NUM = 32, ENA_ADMIN_FEATURES_OPCODE_NUM = 32,
}; };
/* device capabilities */
enum ena_admin_aq_caps_id {
ENA_ADMIN_ENI_STATS = 0,
};
enum ena_admin_placement_policy_type { enum ena_admin_placement_policy_type {
/* descriptors and headers are in host memory */ /* descriptors and headers are in host memory */
ENA_ADMIN_PLACEMENT_POLICY_HOST = 1, ENA_ADMIN_PLACEMENT_POLICY_HOST = 1,
...@@ -455,7 +460,10 @@ struct ena_admin_device_attr_feature_desc { ...@@ -455,7 +460,10 @@ struct ena_admin_device_attr_feature_desc {
*/ */
u32 supported_features; u32 supported_features;
u32 reserved3; /* bitmap of ena_admin_aq_caps_id, which represents device
* capabilities.
*/
u32 capabilities;
/* Indicates how many bits are used physical address access. */ /* Indicates how many bits are used physical address access. */
u32 phys_addr_width; u32 phys_addr_width;
......
...@@ -1971,6 +1971,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, ...@@ -1971,6 +1971,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
sizeof(get_resp.u.dev_attr)); sizeof(get_resp.u.dev_attr));
ena_dev->supported_features = get_resp.u.dev_attr.supported_features; ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
ena_dev->capabilities = get_resp.u.dev_attr.capabilities;
if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
rc = ena_com_get_feature(ena_dev, &get_resp, rc = ena_com_get_feature(ena_dev, &get_resp,
...@@ -2223,6 +2224,13 @@ int ena_com_get_eni_stats(struct ena_com_dev *ena_dev, ...@@ -2223,6 +2224,13 @@ int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
struct ena_com_stats_ctx ctx; struct ena_com_stats_ctx ctx;
int ret; int ret;
if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) {
netdev_err(ena_dev->net_device,
"Capability %d isn't supported\n",
ENA_ADMIN_ENI_STATS);
return -EOPNOTSUPP;
}
memset(&ctx, 0x0, sizeof(ctx)); memset(&ctx, 0x0, sizeof(ctx));
ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI); ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI);
if (likely(ret == 0)) if (likely(ret == 0))
......
...@@ -314,6 +314,7 @@ struct ena_com_dev { ...@@ -314,6 +314,7 @@ struct ena_com_dev {
struct ena_rss rss; struct ena_rss rss;
u32 supported_features; u32 supported_features;
u32 capabilities;
u32 dma_addr_bits; u32 dma_addr_bits;
struct ena_host_attribute host_attr; struct ena_host_attribute host_attr;
...@@ -967,6 +968,18 @@ static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_d ...@@ -967,6 +968,18 @@ static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_d
ena_dev->adaptive_coalescing = false; ena_dev->adaptive_coalescing = false;
} }
/* ena_com_get_cap - query whether device supports a capability.
* @ena_dev: ENA communication layer struct
* @cap_id: enum value representing the capability
*
* @return - true if capability is supported or false otherwise
*/
static inline bool ena_com_get_cap(struct ena_com_dev *ena_dev,
enum ena_admin_aq_caps_id cap_id)
{
return !!(ena_dev->capabilities & BIT(cap_id));
}
/* ena_com_update_intr_reg - Prepare interrupt register /* ena_com_update_intr_reg - Prepare interrupt register
* @intr_reg: interrupt register to update. * @intr_reg: interrupt register to update.
* @rx_delay_interval: Rx interval in usecs * @rx_delay_interval: Rx interval in usecs
......
...@@ -82,7 +82,7 @@ static const struct ena_stats ena_stats_rx_strings[] = { ...@@ -82,7 +82,7 @@ static const struct ena_stats ena_stats_rx_strings[] = {
ENA_STAT_RX_ENTRY(rx_copybreak_pkt), ENA_STAT_RX_ENTRY(rx_copybreak_pkt),
ENA_STAT_RX_ENTRY(csum_good), ENA_STAT_RX_ENTRY(csum_good),
ENA_STAT_RX_ENTRY(refil_partial), ENA_STAT_RX_ENTRY(refil_partial),
ENA_STAT_RX_ENTRY(bad_csum), ENA_STAT_RX_ENTRY(csum_bad),
ENA_STAT_RX_ENTRY(page_alloc_fail), ENA_STAT_RX_ENTRY(page_alloc_fail),
ENA_STAT_RX_ENTRY(skb_alloc_fail), ENA_STAT_RX_ENTRY(skb_alloc_fail),
ENA_STAT_RX_ENTRY(dma_mapping_err), ENA_STAT_RX_ENTRY(dma_mapping_err),
...@@ -110,8 +110,7 @@ static const struct ena_stats ena_stats_ena_com_strings[] = { ...@@ -110,8 +110,7 @@ static const struct ena_stats ena_stats_ena_com_strings[] = {
#define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings)
#define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings)
#define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings) #define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings)
#define ENA_STATS_ARRAY_ENI(adapter) \ #define ENA_STATS_ARRAY_ENI(adapter) ARRAY_SIZE(ena_stats_eni_strings)
(ARRAY_SIZE(ena_stats_eni_strings) * (adapter)->eni_stats_supported)
static void ena_safe_update_stat(u64 *src, u64 *dst, static void ena_safe_update_stat(u64 *src, u64 *dst,
struct u64_stats_sync *syncp) struct u64_stats_sync *syncp)
...@@ -213,8 +212,9 @@ static void ena_get_ethtool_stats(struct net_device *netdev, ...@@ -213,8 +212,9 @@ static void ena_get_ethtool_stats(struct net_device *netdev,
u64 *data) u64 *data)
{ {
struct ena_adapter *adapter = netdev_priv(netdev); struct ena_adapter *adapter = netdev_priv(netdev);
struct ena_com_dev *dev = adapter->ena_dev;
ena_get_stats(adapter, data, adapter->eni_stats_supported); ena_get_stats(adapter, data, ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS));
} }
static int ena_get_sw_stats_count(struct ena_adapter *adapter) static int ena_get_sw_stats_count(struct ena_adapter *adapter)
...@@ -226,7 +226,9 @@ static int ena_get_sw_stats_count(struct ena_adapter *adapter) ...@@ -226,7 +226,9 @@ static int ena_get_sw_stats_count(struct ena_adapter *adapter)
static int ena_get_hw_stats_count(struct ena_adapter *adapter) static int ena_get_hw_stats_count(struct ena_adapter *adapter)
{ {
return ENA_STATS_ARRAY_ENI(adapter); bool supported = ena_com_get_cap(adapter->ena_dev, ENA_ADMIN_ENI_STATS);
return ENA_STATS_ARRAY_ENI(adapter) * supported;
} }
int ena_get_sset_count(struct net_device *netdev, int sset) int ena_get_sset_count(struct net_device *netdev, int sset)
...@@ -316,10 +318,11 @@ static void ena_get_ethtool_strings(struct net_device *netdev, ...@@ -316,10 +318,11 @@ static void ena_get_ethtool_strings(struct net_device *netdev,
u8 *data) u8 *data)
{ {
struct ena_adapter *adapter = netdev_priv(netdev); struct ena_adapter *adapter = netdev_priv(netdev);
struct ena_com_dev *dev = adapter->ena_dev;
switch (sset) { switch (sset) {
case ETH_SS_STATS: case ETH_SS_STATS:
ena_get_strings(adapter, data, adapter->eni_stats_supported); ena_get_strings(adapter, data, ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS));
break; break;
} }
} }
......
...@@ -103,7 +103,7 @@ static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue) ...@@ -103,7 +103,7 @@ static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
return; return;
adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD; ena_reset_device(adapter, ENA_REGS_RESET_OS_NETDEV_WD);
ena_increase_stat(&adapter->dev_stats.tx_timeout, 1, &adapter->syncp); ena_increase_stat(&adapter->dev_stats.tx_timeout, 1, &adapter->syncp);
netif_err(adapter, tx_err, dev, "Transmit time out\n"); netif_err(adapter, tx_err, dev, "Transmit time out\n");
...@@ -166,11 +166,9 @@ static int ena_xmit_common(struct net_device *dev, ...@@ -166,11 +166,9 @@ static int ena_xmit_common(struct net_device *dev,
"Failed to prepare tx bufs\n"); "Failed to prepare tx bufs\n");
ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1, ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1,
&ring->syncp); &ring->syncp);
if (rc != -ENOMEM) { if (rc != -ENOMEM)
adapter->reset_reason = ena_reset_device(adapter,
ENA_REGS_RESET_DRIVER_INVALID_STATE; ENA_REGS_RESET_DRIVER_INVALID_STATE);
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
}
return rc; return rc;
} }
...@@ -1269,20 +1267,18 @@ static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id, ...@@ -1269,20 +1267,18 @@ static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
netif_err(ring->adapter, netif_err(ring->adapter,
tx_done, tx_done,
ring->netdev, ring->netdev,
"tx_info doesn't have valid %s", "tx_info doesn't have valid %s. qid %u req_id %u",
is_xdp ? "xdp frame" : "skb"); is_xdp ? "xdp frame" : "skb", ring->qid, req_id);
else else
netif_err(ring->adapter, netif_err(ring->adapter,
tx_done, tx_done,
ring->netdev, ring->netdev,
"Invalid req_id: %hu\n", "Invalid req_id %u in qid %u\n",
req_id); req_id, ring->qid);
ena_increase_stat(&ring->tx_stats.bad_req_id, 1, &ring->syncp); ena_increase_stat(&ring->tx_stats.bad_req_id, 1, &ring->syncp);
ena_reset_device(ring->adapter, ENA_REGS_RESET_INV_TX_REQ_ID);
/* Trigger device reset */
ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
set_bit(ENA_FLAG_TRIGGER_RESET, &ring->adapter->flags);
return -EFAULT; return -EFAULT;
} }
...@@ -1445,10 +1441,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, ...@@ -1445,10 +1441,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
netif_err(adapter, rx_err, rx_ring->netdev, netif_err(adapter, rx_err, rx_ring->netdev,
"Page is NULL. qid %u req_id %u\n", rx_ring->qid, req_id); "Page is NULL. qid %u req_id %u\n", rx_ring->qid, req_id);
ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, &rx_ring->syncp); ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, &rx_ring->syncp);
adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; ena_reset_device(adapter, ENA_REGS_RESET_INV_RX_REQ_ID);
/* Make sure reset reason is set before triggering the reset */
smp_mb__before_atomic();
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
return NULL; return NULL;
} }
...@@ -1558,7 +1551,7 @@ static void ena_rx_checksum(struct ena_ring *rx_ring, ...@@ -1558,7 +1551,7 @@ static void ena_rx_checksum(struct ena_ring *rx_ring,
(ena_rx_ctx->l3_csum_err))) { (ena_rx_ctx->l3_csum_err))) {
/* ipv4 checksum error */ /* ipv4 checksum error */
skb->ip_summed = CHECKSUM_NONE; skb->ip_summed = CHECKSUM_NONE;
ena_increase_stat(&rx_ring->rx_stats.bad_csum, 1, ena_increase_stat(&rx_ring->rx_stats.csum_bad, 1,
&rx_ring->syncp); &rx_ring->syncp);
netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
"RX IPv4 header checksum error\n"); "RX IPv4 header checksum error\n");
...@@ -1570,7 +1563,7 @@ static void ena_rx_checksum(struct ena_ring *rx_ring, ...@@ -1570,7 +1563,7 @@ static void ena_rx_checksum(struct ena_ring *rx_ring,
(ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) { (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) {
if (unlikely(ena_rx_ctx->l4_csum_err)) { if (unlikely(ena_rx_ctx->l4_csum_err)) {
/* TCP/UDP checksum error */ /* TCP/UDP checksum error */
ena_increase_stat(&rx_ring->rx_stats.bad_csum, 1, ena_increase_stat(&rx_ring->rx_stats.csum_bad, 1,
&rx_ring->syncp); &rx_ring->syncp);
netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
"RX L4 checksum error\n"); "RX L4 checksum error\n");
...@@ -1781,15 +1774,12 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, ...@@ -1781,15 +1774,12 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
if (rc == -ENOSPC) { if (rc == -ENOSPC) {
ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1, ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1,
&rx_ring->syncp); &rx_ring->syncp);
adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS; ena_reset_device(adapter, ENA_REGS_RESET_TOO_MANY_RX_DESCS);
} else { } else {
ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1,
&rx_ring->syncp); &rx_ring->syncp);
adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; ena_reset_device(adapter, ENA_REGS_RESET_INV_RX_REQ_ID);
} }
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
return 0; return 0;
} }
...@@ -3253,11 +3243,11 @@ static void ena_config_debug_area(struct ena_adapter *adapter) ...@@ -3253,11 +3243,11 @@ static void ena_config_debug_area(struct ena_adapter *adapter)
int ena_update_hw_stats(struct ena_adapter *adapter) int ena_update_hw_stats(struct ena_adapter *adapter)
{ {
int rc = 0; int rc;
rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_stats); rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_stats);
if (rc) { if (rc) {
dev_info_once(&adapter->pdev->dev, "Failed to get ENI stats\n"); netdev_err(adapter->netdev, "Failed to get ENI stats\n");
return rc; return rc;
} }
...@@ -3654,8 +3644,6 @@ static int ena_restore_device(struct ena_adapter *adapter) ...@@ -3654,8 +3644,6 @@ static int ena_restore_device(struct ena_adapter *adapter)
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
adapter->last_keep_alive_jiffies = jiffies; adapter->last_keep_alive_jiffies = jiffies;
dev_err(&pdev->dev, "Device reset completed successfully\n");
return rc; return rc;
err_disable_msix: err_disable_msix:
ena_free_mgmnt_irq(adapter); ena_free_mgmnt_irq(adapter);
...@@ -3685,6 +3673,8 @@ static void ena_fw_reset_device(struct work_struct *work) ...@@ -3685,6 +3673,8 @@ static void ena_fw_reset_device(struct work_struct *work)
if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
ena_destroy_device(adapter, false); ena_destroy_device(adapter, false);
ena_restore_device(adapter); ena_restore_device(adapter);
dev_err(&adapter->pdev->dev, "Device reset completed successfully\n");
} }
rtnl_unlock(); rtnl_unlock();
...@@ -3707,9 +3697,8 @@ static int check_for_rx_interrupt_queue(struct ena_adapter *adapter, ...@@ -3707,9 +3697,8 @@ static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
netif_err(adapter, rx_err, adapter->netdev, netif_err(adapter, rx_err, adapter->netdev,
"Potential MSIX issue on Rx side Queue = %d. Reset the device\n", "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
rx_ring->qid); rx_ring->qid);
adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
smp_mb__before_atomic(); ena_reset_device(adapter, ENA_REGS_RESET_MISS_INTERRUPT);
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
return -EIO; return -EIO;
} }
...@@ -3746,9 +3735,7 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter, ...@@ -3746,9 +3735,7 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
netif_err(adapter, tx_err, adapter->netdev, netif_err(adapter, tx_err, adapter->netdev,
"Potential MSIX issue on Tx side Queue = %d. Reset the device\n", "Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
tx_ring->qid); tx_ring->qid);
adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT; ena_reset_device(adapter, ENA_REGS_RESET_MISS_INTERRUPT);
smp_mb__before_atomic();
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
return -EIO; return -EIO;
} }
...@@ -3774,9 +3761,7 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter, ...@@ -3774,9 +3761,7 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
"The number of lost tx completions is above the threshold (%d > %d). Reset the device\n", "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
missed_tx, missed_tx,
adapter->missing_tx_completion_threshold); adapter->missing_tx_completion_threshold);
adapter->reset_reason = ena_reset_device(adapter, ENA_REGS_RESET_MISS_TX_CMPL);
ENA_REGS_RESET_MISS_TX_CMPL;
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
rc = -EIO; rc = -EIO;
} }
...@@ -3897,8 +3882,7 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter) ...@@ -3897,8 +3882,7 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter)
"Keep alive watchdog timeout.\n"); "Keep alive watchdog timeout.\n");
ena_increase_stat(&adapter->dev_stats.wd_expired, 1, ena_increase_stat(&adapter->dev_stats.wd_expired, 1,
&adapter->syncp); &adapter->syncp);
adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO; ena_reset_device(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO);
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
} }
} }
...@@ -3909,8 +3893,7 @@ static void check_for_admin_com_state(struct ena_adapter *adapter) ...@@ -3909,8 +3893,7 @@ static void check_for_admin_com_state(struct ena_adapter *adapter)
"ENA admin queue is not in running state!\n"); "ENA admin queue is not in running state!\n");
ena_increase_stat(&adapter->dev_stats.admin_q_pause, 1, ena_increase_stat(&adapter->dev_stats.admin_q_pause, 1,
&adapter->syncp); &adapter->syncp);
adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO; ena_reset_device(adapter, ENA_REGS_RESET_ADMIN_TO);
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
} }
} }
...@@ -4110,7 +4093,7 @@ static int ena_rss_init_default(struct ena_adapter *adapter) ...@@ -4110,7 +4093,7 @@ static int ena_rss_init_default(struct ena_adapter *adapter)
val = ethtool_rxfh_indir_default(i, adapter->num_io_queues); val = ethtool_rxfh_indir_default(i, adapter->num_io_queues);
rc = ena_com_indirect_table_fill_entry(ena_dev, i, rc = ena_com_indirect_table_fill_entry(ena_dev, i,
ENA_IO_RXQ_IDX(val)); ENA_IO_RXQ_IDX(val));
if (unlikely(rc && (rc != -EOPNOTSUPP))) { if (unlikely(rc)) {
dev_err(dev, "Cannot fill indirect table\n"); dev_err(dev, "Cannot fill indirect table\n");
goto err_fill_indir; goto err_fill_indir;
} }
...@@ -4146,10 +4129,11 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev) ...@@ -4146,10 +4129,11 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
} }
static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx) static void ena_calc_io_queue_size(struct ena_adapter *adapter,
struct ena_com_dev_get_features_ctx *get_feat_ctx)
{ {
struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq; struct ena_admin_feature_llq_desc *llq = &get_feat_ctx->llq;
struct ena_com_dev *ena_dev = ctx->ena_dev; struct ena_com_dev *ena_dev = adapter->ena_dev;
u32 tx_queue_size = ENA_DEFAULT_RING_SIZE; u32 tx_queue_size = ENA_DEFAULT_RING_SIZE;
u32 rx_queue_size = ENA_DEFAULT_RING_SIZE; u32 rx_queue_size = ENA_DEFAULT_RING_SIZE;
u32 max_tx_queue_size; u32 max_tx_queue_size;
...@@ -4157,7 +4141,7 @@ static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx) ...@@ -4157,7 +4141,7 @@ static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
struct ena_admin_queue_ext_feature_fields *max_queue_ext = struct ena_admin_queue_ext_feature_fields *max_queue_ext =
&ctx->get_feat_ctx->max_queue_ext.max_queue_ext; &get_feat_ctx->max_queue_ext.max_queue_ext;
max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth, max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth,
max_queue_ext->max_rx_sq_depth); max_queue_ext->max_rx_sq_depth);
max_tx_queue_size = max_queue_ext->max_tx_cq_depth; max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
...@@ -4169,13 +4153,13 @@ static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx) ...@@ -4169,13 +4153,13 @@ static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
max_tx_queue_size = min_t(u32, max_tx_queue_size, max_tx_queue_size = min_t(u32, max_tx_queue_size,
max_queue_ext->max_tx_sq_depth); max_queue_ext->max_tx_sq_depth);
ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
max_queue_ext->max_per_packet_tx_descs); max_queue_ext->max_per_packet_tx_descs);
ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
max_queue_ext->max_per_packet_rx_descs); max_queue_ext->max_per_packet_rx_descs);
} else { } else {
struct ena_admin_queue_feature_desc *max_queues = struct ena_admin_queue_feature_desc *max_queues =
&ctx->get_feat_ctx->max_queues; &get_feat_ctx->max_queues;
max_rx_queue_size = min_t(u32, max_queues->max_cq_depth, max_rx_queue_size = min_t(u32, max_queues->max_cq_depth,
max_queues->max_sq_depth); max_queues->max_sq_depth);
max_tx_queue_size = max_queues->max_cq_depth; max_tx_queue_size = max_queues->max_cq_depth;
...@@ -4187,10 +4171,10 @@ static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx) ...@@ -4187,10 +4171,10 @@ static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
max_tx_queue_size = min_t(u32, max_tx_queue_size, max_tx_queue_size = min_t(u32, max_tx_queue_size,
max_queues->max_sq_depth); max_queues->max_sq_depth);
ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
max_queues->max_packet_tx_descs); max_queues->max_packet_tx_descs);
ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
max_queues->max_packet_rx_descs); max_queues->max_packet_rx_descs);
} }
max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size); max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size);
...@@ -4204,12 +4188,10 @@ static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx) ...@@ -4204,12 +4188,10 @@ static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
tx_queue_size = rounddown_pow_of_two(tx_queue_size); tx_queue_size = rounddown_pow_of_two(tx_queue_size);
rx_queue_size = rounddown_pow_of_two(rx_queue_size); rx_queue_size = rounddown_pow_of_two(rx_queue_size);
ctx->max_tx_queue_size = max_tx_queue_size; adapter->max_tx_ring_size = max_tx_queue_size;
ctx->max_rx_queue_size = max_rx_queue_size; adapter->max_rx_ring_size = max_rx_queue_size;
ctx->tx_queue_size = tx_queue_size; adapter->requested_tx_ring_size = tx_queue_size;
ctx->rx_queue_size = rx_queue_size; adapter->requested_rx_ring_size = rx_queue_size;
return 0;
} }
/* ena_probe - Device Initialization Routine /* ena_probe - Device Initialization Routine
...@@ -4224,7 +4206,6 @@ static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx) ...@@ -4224,7 +4206,6 @@ static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
*/ */
static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{ {
struct ena_calc_queue_size_ctx calc_queue_ctx = {};
struct ena_com_dev_get_features_ctx get_feat_ctx; struct ena_com_dev_get_features_ctx get_feat_ctx;
struct ena_com_dev *ena_dev = NULL; struct ena_com_dev *ena_dev = NULL;
struct ena_adapter *adapter; struct ena_adapter *adapter;
...@@ -4309,10 +4290,6 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -4309,10 +4290,6 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_device_destroy; goto err_device_destroy;
} }
calc_queue_ctx.ena_dev = ena_dev;
calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
calc_queue_ctx.pdev = pdev;
/* Initial TX and RX interrupt delay. Assumes 1 usec granularity. /* Initial TX and RX interrupt delay. Assumes 1 usec granularity.
* Updated during device initialization with the real granularity * Updated during device initialization with the real granularity
*/ */
...@@ -4320,8 +4297,8 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -4320,8 +4297,8 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS; ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS;
ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION; ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, &get_feat_ctx); max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, &get_feat_ctx);
rc = ena_calc_io_queue_size(&calc_queue_ctx); ena_calc_io_queue_size(adapter, &get_feat_ctx);
if (rc || !max_num_io_queues) { if (unlikely(!max_num_io_queues)) {
rc = -EFAULT; rc = -EFAULT;
goto err_device_destroy; goto err_device_destroy;
} }
...@@ -4330,13 +4307,6 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -4330,13 +4307,6 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->reset_reason = ENA_REGS_RESET_NORMAL; adapter->reset_reason = ENA_REGS_RESET_NORMAL;
adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size;
adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size;
adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
adapter->num_io_queues = max_num_io_queues; adapter->num_io_queues = max_num_io_queues;
adapter->max_num_io_queues = max_num_io_queues; adapter->max_num_io_queues = max_num_io_queues;
adapter->last_monitored_tx_qid = 0; adapter->last_monitored_tx_qid = 0;
...@@ -4387,11 +4357,6 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -4387,11 +4357,6 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ena_config_debug_area(adapter); ena_config_debug_area(adapter);
if (!ena_update_hw_stats(adapter))
adapter->eni_stats_supported = true;
else
adapter->eni_stats_supported = false;
memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len); memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len);
netif_carrier_off(netdev); netif_carrier_off(netdev);
......
...@@ -140,18 +140,6 @@ struct ena_napi { ...@@ -140,18 +140,6 @@ struct ena_napi {
struct dim dim; struct dim dim;
}; };
struct ena_calc_queue_size_ctx {
struct ena_com_dev_get_features_ctx *get_feat_ctx;
struct ena_com_dev *ena_dev;
struct pci_dev *pdev;
u32 tx_queue_size;
u32 rx_queue_size;
u32 max_tx_queue_size;
u32 max_rx_queue_size;
u16 max_tx_sgl_size;
u16 max_rx_sgl_size;
};
struct ena_tx_buffer { struct ena_tx_buffer {
struct sk_buff *skb; struct sk_buff *skb;
/* num of ena desc for this specific skb /* num of ena desc for this specific skb
...@@ -216,7 +204,7 @@ struct ena_stats_rx { ...@@ -216,7 +204,7 @@ struct ena_stats_rx {
u64 rx_copybreak_pkt; u64 rx_copybreak_pkt;
u64 csum_good; u64 csum_good;
u64 refil_partial; u64 refil_partial;
u64 bad_csum; u64 csum_bad;
u64 page_alloc_fail; u64 page_alloc_fail;
u64 skb_alloc_fail; u64 skb_alloc_fail;
u64 dma_mapping_err; u64 dma_mapping_err;
...@@ -379,7 +367,6 @@ struct ena_adapter { ...@@ -379,7 +367,6 @@ struct ena_adapter {
struct u64_stats_sync syncp; struct u64_stats_sync syncp;
struct ena_stats_dev dev_stats; struct ena_stats_dev dev_stats;
struct ena_admin_eni_stats eni_stats; struct ena_admin_eni_stats eni_stats;
bool eni_stats_supported;
/* last queue index that was checked for uncompleted tx packets */ /* last queue index that was checked for uncompleted tx packets */
u32 last_monitored_tx_qid; u32 last_monitored_tx_qid;
...@@ -407,6 +394,15 @@ int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count); ...@@ -407,6 +394,15 @@ int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count);
int ena_get_sset_count(struct net_device *netdev, int sset); int ena_get_sset_count(struct net_device *netdev, int sset);
static inline void ena_reset_device(struct ena_adapter *adapter,
enum ena_regs_reset_reason_types reset_reason)
{
adapter->reset_reason = reset_reason;
/* Make sure reset reason is set before triggering the reset */
smp_mb__before_atomic();
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
}
enum ena_xdp_errors_t { enum ena_xdp_errors_t {
ENA_XDP_ALLOWED = 0, ENA_XDP_ALLOWED = 0,
ENA_XDP_CURRENT_MTU_TOO_LARGE, ENA_XDP_CURRENT_MTU_TOO_LARGE,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment