Commit 10a435ab authored by David S. Miller's avatar David S. Miller

Merge branch 'aquantia-next'

Igor Russkikh says:

====================
Aquantia atlantic driver update 2018/01

This patch is a set of cleanups and bugfixes in preparation to new
Aquantia hardware support.

Standard ARRAY_SIZE is now used through all the code,
some unused abstraction structures removed and cleaned up,
duplicate declarations removed.

Also two large declaration styling fixes:
- Hardware register set defines are lined up with kernel style
- Hardware access functions were not prefixed, now already
  defined hw_atl prefix is used.

patch v2 changes:
- patch reorganized because of its big size. New HW support
  will be submitted as a separate patchset.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3b039b42 9ec03bf6
......@@ -16,7 +16,6 @@
#include <linux/pci.h>
#include "ver.h"
#include "aq_nic.h"
#include "aq_cfg.h"
#include "aq_utils.h"
......
......@@ -7,7 +7,7 @@
* version 2, as published by the Free Software Foundation.
*/
/* File aq_hw.h: Declaraion of abstract interface for NIC hardware specific
/* File aq_hw.h: Declaration of abstract interface for NIC hardware specific
* functions.
*/
......@@ -15,6 +15,8 @@
#define AQ_HW_H
#include "aq_common.h"
#include "aq_rss.h"
#include "hw_atl/hw_atl_utils.h"
/* NIC H/W capabilities */
struct aq_hw_caps_s {
......@@ -86,13 +88,33 @@ struct aq_stats_s {
#define AQ_HW_FLAG_ERRORS (AQ_HW_FLAG_ERR_HW | AQ_HW_FLAG_ERR_UNPLUG)
#define AQ_NIC_FLAGS_IS_NOT_READY (AQ_NIC_FLAG_STOPPING | \
AQ_NIC_FLAG_RESETTING | AQ_NIC_FLAG_CLOSING | \
AQ_NIC_FLAG_ERR_UNPLUG | AQ_NIC_FLAG_ERR_HW)
#define AQ_NIC_FLAGS_IS_NOT_TX_READY (AQ_NIC_FLAGS_IS_NOT_READY | \
AQ_NIC_LINK_DOWN)
struct aq_hw_s {
struct aq_obj_s header;
atomic_t flags;
struct aq_nic_cfg_s *aq_nic_cfg;
struct aq_pci_func_s *aq_pci_func;
void __iomem *mmio;
unsigned int not_ff_addr;
struct aq_hw_link_status_s aq_link_status;
struct hw_aq_atl_utils_mbox mbox;
struct hw_atl_stats_s last_stats;
struct aq_stats_s curr_stats;
u64 speed;
u32 itr_tx;
u32 itr_rx;
unsigned int chip_features;
u32 fw_ver_actual;
atomic_t dpc;
u32 mbox_addr;
u32 rpc_addr;
u32 rpc_tid;
struct hw_aq_atl_utils_fw_rpc rpc;
};
struct aq_ring_s;
......@@ -102,7 +124,7 @@ struct sk_buff;
struct aq_hw_ops {
struct aq_hw_s *(*create)(struct aq_pci_func_s *aq_pci_func,
unsigned int port, struct aq_hw_ops *ops);
unsigned int port);
void (*destroy)(struct aq_hw_s *self);
......@@ -124,7 +146,6 @@ struct aq_hw_ops {
struct aq_ring_s *aq_ring);
int (*hw_get_mac_permanent)(struct aq_hw_s *self,
struct aq_hw_caps_s *aq_hw_caps,
u8 *mac);
int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr);
......@@ -135,8 +156,7 @@ struct aq_hw_ops {
int (*hw_reset)(struct aq_hw_s *self);
int (*hw_init)(struct aq_hw_s *self, struct aq_nic_cfg_s *aq_nic_cfg,
u8 *mac_addr);
int (*hw_init)(struct aq_hw_s *self, u8 *mac_addr);
int (*hw_start)(struct aq_hw_s *self);
......@@ -184,7 +204,8 @@ struct aq_hw_ops {
struct aq_rss_parameters *rss_params);
int (*hw_get_regs)(struct aq_hw_s *self,
struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff);
const struct aq_hw_caps_s *aq_hw_caps,
u32 *regs_buff);
int (*hw_update_stats)(struct aq_hw_s *self);
......
......@@ -40,7 +40,7 @@ u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg)
u32 value = readl(hw->mmio + reg);
if ((~0U) == value && (~0U) == readl(hw->mmio + hw->not_ff_addr))
aq_utils_obj_set(&hw->header.flags, AQ_HW_FLAG_ERR_UNPLUG);
aq_utils_obj_set(&hw->flags, AQ_HW_FLAG_ERR_UNPLUG);
return value;
}
......@@ -54,11 +54,11 @@ int aq_hw_err_from_flags(struct aq_hw_s *hw)
{
int err = 0;
if (aq_utils_obj_test(&hw->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) {
if (aq_utils_obj_test(&hw->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
err = -ENXIO;
goto err_exit;
}
if (aq_utils_obj_test(&hw->header.flags, AQ_HW_FLAG_ERR_HW)) {
if (aq_utils_obj_test(&hw->flags, AQ_HW_FLAG_ERR_HW)) {
err = -EIO;
goto err_exit;
}
......
......@@ -13,37 +13,32 @@
#include "aq_nic.h"
#include "aq_pci_func.h"
#include "aq_ethtool.h"
#include "hw_atl/hw_atl_a0.h"
#include "hw_atl/hw_atl_b0.h"
#include <linux/netdevice.h>
#include <linux/module.h>
static const struct pci_device_id aq_pci_tbl[] = {
{ PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_0001), },
{ PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D100), },
{ PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D107), },
{ PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D108), },
{ PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D109), },
{}
};
MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
MODULE_LICENSE("GPL v2");
MODULE_VERSION(AQ_CFG_DRV_VERSION);
MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR);
MODULE_DESCRIPTION(AQ_CFG_DRV_DESC);
static struct aq_hw_ops *aq_pci_probe_get_hw_ops_by_id(struct pci_dev *pdev)
static const struct net_device_ops aq_ndev_ops;
struct net_device *aq_ndev_alloc(void)
{
struct aq_hw_ops *ops = NULL;
struct net_device *ndev = NULL;
struct aq_nic_s *aq_nic = NULL;
ops = hw_atl_a0_get_ops_by_id(pdev);
if (!ops)
ops = hw_atl_b0_get_ops_by_id(pdev);
ndev = alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_CFG_VECS_MAX);
if (!ndev)
return NULL;
return ops;
aq_nic = netdev_priv(ndev);
aq_nic->ndev = ndev;
ndev->netdev_ops = &aq_ndev_ops;
ndev->ethtool_ops = &aq_ethtool_ops;
return ndev;
}
static int aq_ndev_open(struct net_device *ndev)
......@@ -170,66 +165,3 @@ static const struct net_device_ops aq_ndev_ops = {
.ndo_set_mac_address = aq_ndev_set_mac_address,
.ndo_set_features = aq_ndev_set_features
};
static int aq_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_id)
{
struct aq_hw_ops *aq_hw_ops = NULL;
struct aq_pci_func_s *aq_pci_func = NULL;
int err = 0;
err = pci_enable_device(pdev);
if (err < 0)
goto err_exit;
aq_hw_ops = aq_pci_probe_get_hw_ops_by_id(pdev);
aq_pci_func = aq_pci_func_alloc(aq_hw_ops, pdev,
&aq_ndev_ops, &aq_ethtool_ops);
if (!aq_pci_func) {
err = -ENOMEM;
goto err_exit;
}
err = aq_pci_func_init(aq_pci_func);
if (err < 0)
goto err_exit;
err_exit:
if (err < 0) {
if (aq_pci_func)
aq_pci_func_free(aq_pci_func);
}
return err;
}
static void aq_pci_remove(struct pci_dev *pdev)
{
struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
aq_pci_func_deinit(aq_pci_func);
aq_pci_func_free(aq_pci_func);
}
static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg)
{
struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg);
}
static int aq_pci_resume(struct pci_dev *pdev)
{
struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
pm_message_t pm_msg = PMSG_RESTORE;
return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg);
}
static struct pci_driver aq_pci_ops = {
.name = AQ_CFG_DRV_NAME,
.id_table = aq_pci_tbl,
.probe = aq_pci_probe,
.remove = aq_pci_remove,
.suspend = aq_pci_suspend,
.resume = aq_pci_resume,
};
module_pci_driver(aq_pci_ops);
......@@ -14,4 +14,6 @@
#include "aq_common.h"
struct net_device *aq_ndev_alloc(void);
#endif /* AQ_MAIN_H */
......@@ -14,7 +14,7 @@
#include "aq_vec.h"
#include "aq_hw.h"
#include "aq_pci_func.h"
#include "aq_nic_internal.h"
#include "aq_main.h"
#include <linux/moduleparam.h>
#include <linux/netdevice.h>
......@@ -150,9 +150,9 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
self->link_status = self->aq_hw->aq_link_status;
if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) {
aq_utils_obj_set(&self->header.flags,
aq_utils_obj_set(&self->flags,
AQ_NIC_FLAG_STARTED);
aq_utils_obj_clear(&self->header.flags,
aq_utils_obj_clear(&self->flags,
AQ_NIC_LINK_DOWN);
netif_carrier_on(self->ndev);
netif_tx_wake_all_queues(self->ndev);
......@@ -160,7 +160,7 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) {
netif_carrier_off(self->ndev);
netif_tx_disable(self->ndev);
aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
aq_utils_obj_set(&self->flags, AQ_NIC_LINK_DOWN);
}
return 0;
}
......@@ -171,7 +171,7 @@ static void aq_nic_service_timer_cb(struct timer_list *t)
int ctimer = AQ_CFG_SERVICE_TIMER_INTERVAL;
int err = 0;
if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
if (aq_utils_obj_test(&self->flags, AQ_NIC_FLAGS_IS_NOT_READY))
goto err_exit;
err = aq_nic_update_link_status(self);
......@@ -205,14 +205,7 @@ static void aq_nic_polling_timer_cb(struct timer_list *t)
AQ_CFG_POLLING_TIMER_INTERVAL);
}
static struct net_device *aq_nic_ndev_alloc(void)
{
return alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_CFG_VECS_MAX);
}
struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
const struct ethtool_ops *et_ops,
struct pci_dev *pdev,
struct aq_nic_s *aq_nic_alloc_cold(struct pci_dev *pdev,
struct aq_pci_func_s *aq_pci_func,
unsigned int port,
const struct aq_hw_ops *aq_hw_ops)
......@@ -221,7 +214,7 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
struct aq_nic_s *self = NULL;
int err = 0;
ndev = aq_nic_ndev_alloc();
ndev = aq_ndev_alloc();
if (!ndev) {
err = -ENOMEM;
goto err_exit;
......@@ -229,9 +222,6 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
self = netdev_priv(ndev);
ndev->netdev_ops = ndev_ops;
ndev->ethtool_ops = et_ops;
SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->if_port = port;
......@@ -242,8 +232,9 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
self->aq_hw_ops = *aq_hw_ops;
self->port = (u8)port;
self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port,
&self->aq_hw_ops);
self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port);
self->aq_hw->aq_nic_cfg = &self->aq_nic_cfg;
err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps,
pdev->device, pdev->subsystem_device);
if (err < 0)
......@@ -268,7 +259,6 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
goto err_exit;
}
err = self->aq_hw_ops.hw_get_mac_permanent(self->aq_hw,
self->aq_nic_cfg.aq_hw_caps,
self->ndev->dev_addr);
if (err < 0)
goto err_exit;
......@@ -295,7 +285,7 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
int aq_nic_ndev_init(struct aq_nic_s *self)
{
struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps;
const struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps;
struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg;
self->ndev->hw_features |= aq_hw_caps->hw_features;
......@@ -366,11 +356,6 @@ void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
self->aq_ring_tx[idx] = ring;
}
struct device *aq_nic_get_dev(struct aq_nic_s *self)
{
return self->ndev->dev.parent;
}
struct net_device *aq_nic_get_ndev(struct aq_nic_s *self)
{
return self->ndev;
......@@ -387,7 +372,7 @@ int aq_nic_init(struct aq_nic_s *self)
if (err < 0)
goto err_exit;
err = self->aq_hw_ops.hw_init(self->aq_hw, &self->aq_nic_cfg,
err = self->aq_hw_ops.hw_init(self->aq_hw,
aq_nic_get_ndev(self)->dev_addr);
if (err < 0)
goto err_exit;
......@@ -992,7 +977,7 @@ void aq_nic_free_hot_resources(struct aq_nic_s *self)
if (!self)
goto err_exit;
for (i = AQ_DIMOF(self->aq_vec); i--;) {
for (i = ARRAY_SIZE(self->aq_vec); i--;) {
if (self->aq_vec[i]) {
aq_vec_free(self->aq_vec[i]);
self->aq_vec[i] = NULL;
......
......@@ -14,10 +14,13 @@
#include "aq_common.h"
#include "aq_rss.h"
#include "aq_hw.h"
struct aq_ring_s;
struct aq_pci_func_s;
struct aq_hw_ops;
struct aq_fw_s;
struct aq_vec_s;
#define AQ_NIC_FC_OFF 0U
#define AQ_NIC_FC_TX 1U
......@@ -33,7 +36,7 @@ struct aq_hw_ops;
#define AQ_NIC_RATE_100M BIT(5)
struct aq_nic_cfg_s {
struct aq_hw_caps_s *aq_hw_caps;
const struct aq_hw_caps_s *aq_hw_caps;
u64 hw_features;
u32 rxds; /* rx ring size, descriptors # */
u32 txds; /* tx ring size, descriptors # */
......@@ -44,7 +47,6 @@ struct aq_nic_cfg_s {
u16 tx_itr;
u32 num_rss_queues;
u32 mtu;
u32 ucp_0x364;
u32 flow_control;
u32 link_speed_msk;
u32 vlan_id;
......@@ -69,9 +71,38 @@ struct aq_nic_cfg_s {
#define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \
((_TC_) * AQ_CFG_TCS_MAX + (_VEC_))
struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
const struct ethtool_ops *et_ops,
struct pci_dev *pdev,
struct aq_nic_s {
atomic_t flags;
struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
struct aq_ring_s *aq_ring_tx[AQ_CFG_VECS_MAX * AQ_CFG_TCS_MAX];
struct aq_hw_s *aq_hw;
struct net_device *ndev;
struct aq_pci_func_s *aq_pci_func;
unsigned int aq_vecs;
unsigned int packet_filter;
unsigned int power_state;
u8 port;
struct aq_hw_ops aq_hw_ops;
struct aq_hw_caps_s aq_hw_caps;
struct aq_nic_cfg_s aq_nic_cfg;
struct timer_list service_timer;
struct timer_list polling_timer;
struct aq_hw_link_status_s link_status;
struct {
u32 count;
u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN];
} mc_list;
struct pci_dev *pdev;
unsigned int msix_entry_mask;
};
static inline struct device *aq_nic_get_dev(struct aq_nic_s *self)
{
return self->ndev->dev.parent;
}
struct aq_nic_s *aq_nic_alloc_cold(struct pci_dev *pdev,
struct aq_pci_func_s *aq_pci_func,
unsigned int port,
const struct aq_hw_ops *aq_hw_ops);
......
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
/* File aq_nic_internal.h: Definition of private object structure. */
#ifndef AQ_NIC_INTERNAL_H
#define AQ_NIC_INTERNAL_H
struct aq_nic_s {
struct aq_obj_s header;
struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
struct aq_ring_s *aq_ring_tx[AQ_CFG_VECS_MAX * AQ_CFG_TCS_MAX];
struct aq_hw_s *aq_hw;
struct net_device *ndev;
struct aq_pci_func_s *aq_pci_func;
unsigned int aq_vecs;
unsigned int packet_filter;
unsigned int power_state;
u8 port;
struct aq_hw_ops aq_hw_ops;
struct aq_hw_caps_s aq_hw_caps;
struct aq_nic_cfg_s aq_nic_cfg;
struct timer_list service_timer;
struct timer_list polling_timer;
struct aq_hw_link_status_s link_status;
struct {
u32 count;
u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN];
} mc_list;
};
#define AQ_NIC_FLAGS_IS_NOT_READY (AQ_NIC_FLAG_STOPPING | \
AQ_NIC_FLAG_RESETTING | AQ_NIC_FLAG_CLOSING | \
AQ_NIC_FLAG_ERR_UNPLUG | AQ_NIC_FLAG_ERR_HW)
#define AQ_NIC_FLAGS_IS_NOT_TX_READY (AQ_NIC_FLAGS_IS_NOT_READY | \
AQ_NIC_LINK_DOWN)
#endif /* AQ_NIC_INTERNAL_H */
......@@ -9,11 +9,15 @@
/* File aq_pci_func.c: Definition of PCI functions. */
#include <linux/interrupt.h>
#include <linux/module.h>
#include "aq_pci_func.h"
#include "aq_nic.h"
#include "aq_vec.h"
#include "aq_hw.h"
#include <linux/interrupt.h>
#include "hw_atl/hw_atl_a0.h"
#include "hw_atl/hw_atl_b0.h"
struct aq_pci_func_s {
struct pci_dev *pdev;
......@@ -29,10 +33,30 @@ struct aq_pci_func_s {
struct aq_hw_caps_s aq_hw_caps;
};
struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops,
struct pci_dev *pdev,
const struct net_device_ops *ndev_ops,
const struct ethtool_ops *eth_ops)
static const struct pci_device_id aq_pci_tbl[] = {
{ PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_0001), },
{ PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D100), },
{ PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D107), },
{ PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D108), },
{ PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D109), },
{}
};
MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
static const struct aq_hw_ops *aq_pci_probe_get_hw_ops_by_id(struct pci_dev *pdev)
{
const struct aq_hw_ops *ops = NULL;
ops = hw_atl_a0_get_ops_by_id(pdev);
if (!ops)
ops = hw_atl_b0_get_ops_by_id(pdev);
return ops;
}
struct aq_pci_func_s *aq_pci_func_alloc(const struct aq_hw_ops *aq_hw_ops,
struct pci_dev *pdev)
{
struct aq_pci_func_s *self = NULL;
int err = 0;
......@@ -59,8 +83,7 @@ struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops,
self->ports = self->aq_hw_caps.ports;
for (port = 0; port < self->ports; ++port) {
struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops,
pdev, self,
struct aq_nic_s *aq_nic = aq_nic_alloc_cold(pdev, self,
port, aq_hw_ops);
if (!aq_nic) {
......@@ -297,3 +320,65 @@ int aq_pci_func_change_pm_state(struct aq_pci_func_s *self,
err_exit:
return err;
}
static int aq_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_id)
{
const struct aq_hw_ops *aq_hw_ops = NULL;
struct aq_pci_func_s *aq_pci_func = NULL;
int err = 0;
err = pci_enable_device(pdev);
if (err < 0)
goto err_exit;
aq_hw_ops = aq_pci_probe_get_hw_ops_by_id(pdev);
aq_pci_func = aq_pci_func_alloc(aq_hw_ops, pdev);
if (!aq_pci_func) {
err = -ENOMEM;
goto err_exit;
}
err = aq_pci_func_init(aq_pci_func);
if (err < 0)
goto err_exit;
err_exit:
if (err < 0) {
if (aq_pci_func)
aq_pci_func_free(aq_pci_func);
}
return err;
}
static void aq_pci_remove(struct pci_dev *pdev)
{
struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
aq_pci_func_deinit(aq_pci_func);
aq_pci_func_free(aq_pci_func);
}
static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg)
{
struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg);
}
static int aq_pci_resume(struct pci_dev *pdev)
{
struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
pm_message_t pm_msg = PMSG_RESTORE;
return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg);
}
static struct pci_driver aq_pci_ops = {
.name = AQ_CFG_DRV_NAME,
.id_table = aq_pci_tbl,
.probe = aq_pci_probe,
.remove = aq_pci_remove,
.suspend = aq_pci_suspend,
.resume = aq_pci_resume,
};
module_pci_driver(aq_pci_ops);
......@@ -13,11 +13,10 @@
#define AQ_PCI_FUNC_H
#include "aq_common.h"
#include "aq_nic.h"
struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *hw_ops,
struct pci_dev *pdev,
const struct net_device_ops *ndev_ops,
const struct ethtool_ops *eth_ops);
struct aq_pci_func_s *aq_pci_func_alloc(const struct aq_hw_ops *hw_ops,
struct pci_dev *pdev);
int aq_pci_func_init(struct aq_pci_func_s *self);
int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i,
char *name, void *aq_vec,
......
......@@ -279,10 +279,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
skb_record_rx_queue(skb, self->idx);
napi_gro_receive(napi, skb);
++self->stats.rx.packets;
self->stats.rx.bytes += skb->len;
napi_gro_receive(napi, skb);
}
err_exit:
......
......@@ -15,6 +15,7 @@
#include "aq_common.h"
struct page;
struct aq_nic_cfg_s;
/* TxC SOP DX EOP
* +----------+----------+----------+-----------
......@@ -105,7 +106,6 @@ union aq_ring_stats_s {
};
struct aq_ring_s {
struct aq_obj_s header;
struct aq_ring_buff_s *buff_ring;
u8 *dx_ring; /* descriptors ring, dma shared mem */
struct aq_nic_s *aq_nic;
......
......@@ -14,12 +14,6 @@
#include "aq_common.h"
#define AQ_DIMOF(_ARY_) ARRAY_SIZE(_ARY_)
struct aq_obj_s {
atomic_t flags;
};
static inline void aq_utils_obj_set(atomic_t *flags, u32 mask)
{
unsigned long flags_old, flags_new;
......
......@@ -19,8 +19,7 @@
#include <linux/netdevice.h>
struct aq_vec_s {
struct aq_obj_s header;
struct aq_hw_ops *aq_hw_ops;
const struct aq_hw_ops *aq_hw_ops;
struct aq_hw_s *aq_hw;
struct aq_nic_s *aq_nic;
unsigned int tx_rings;
......@@ -166,7 +165,7 @@ struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
return self;
}
int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops,
int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
struct aq_hw_s *aq_hw)
{
struct aq_ring_s *ring = NULL;
......
......@@ -19,6 +19,8 @@
struct aq_hw_s;
struct aq_hw_ops;
struct aq_nic_s;
struct aq_nic_cfg_s;
struct aq_ring_stats_rx_s;
struct aq_ring_stats_tx_s;
......@@ -26,7 +28,7 @@ irqreturn_t aq_vec_isr(int irq, void *private);
irqreturn_t aq_vec_isr_legacy(int irq, void *private);
struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
struct aq_nic_cfg_s *aq_nic_cfg);
int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops,
int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
struct aq_hw_s *aq_hw);
void aq_vec_deinit(struct aq_vec_s *self);
void aq_vec_free(struct aq_vec_s *self);
......
......@@ -12,6 +12,7 @@
#include "../aq_hw.h"
#include "../aq_hw_utils.h"
#include "../aq_ring.h"
#include "../aq_nic.h"
#include "hw_atl_a0.h"
#include "hw_atl_utils.h"
#include "hw_atl_llh.h"
......@@ -36,21 +37,20 @@ static int hw_atl_a0_get_hw_caps(struct aq_hw_s *self,
}
static struct aq_hw_s *hw_atl_a0_create(struct aq_pci_func_s *aq_pci_func,
unsigned int port,
struct aq_hw_ops *ops)
unsigned int port)
{
struct hw_atl_s *self = NULL;
struct aq_hw_s *self = NULL;
self = kzalloc(sizeof(*self), GFP_KERNEL);
if (!self)
goto err_exit;
self->base.aq_pci_func = aq_pci_func;
self->aq_pci_func = aq_pci_func;
self->base.not_ff_addr = 0x10U;
self->not_ff_addr = 0x10U;
err_exit:
return (struct aq_hw_s *)self;
return self;
}
static void hw_atl_a0_destroy(struct aq_hw_s *self)
......@@ -62,24 +62,24 @@ static int hw_atl_a0_hw_reset(struct aq_hw_s *self)
{
int err = 0;
glb_glb_reg_res_dis_set(self, 1U);
pci_pci_reg_res_dis_set(self, 0U);
rx_rx_reg_res_dis_set(self, 0U);
tx_tx_reg_res_dis_set(self, 0U);
hw_atl_glb_glb_reg_res_dis_set(self, 1U);
hw_atl_pci_pci_reg_res_dis_set(self, 0U);
hw_atl_rx_rx_reg_res_dis_set(self, 0U);
hw_atl_tx_tx_reg_res_dis_set(self, 0U);
HW_ATL_FLUSH();
glb_soft_res_set(self, 1);
hw_atl_glb_soft_res_set(self, 1);
/* check 10 times by 1ms */
AQ_HW_WAIT_FOR(glb_soft_res_get(self) == 0, 1000U, 10U);
AQ_HW_WAIT_FOR(hw_atl_glb_soft_res_get(self) == 0, 1000U, 10U);
if (err < 0)
goto err_exit;
itr_irq_reg_res_dis_set(self, 0U);
itr_res_irq_set(self, 1U);
hw_atl_itr_irq_reg_res_dis_set(self, 0U);
hw_atl_itr_res_irq_set(self, 1U);
/* check 10 times by 1ms */
AQ_HW_WAIT_FOR(itr_res_irq_get(self) == 0, 1000U, 10U);
AQ_HW_WAIT_FOR(hw_atl_itr_res_irq_get(self) == 0, 1000U, 10U);
if (err < 0)
goto err_exit;
......@@ -99,30 +99,32 @@ static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self)
bool is_rx_flow_control = false;
/* TPS Descriptor rate init */
tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
/* TPS VM init */
tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
/* TPS TC credits init */
tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
/* Tx buf size */
buff_size = HW_ATL_A0_TXBUF_MAX;
tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
tpb_tx_buff_hi_threshold_per_tc_set(self,
(buff_size * (1024 / 32U) * 66U) /
hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self,
(buff_size *
(1024 / 32U) * 66U) /
100U, tc);
tpb_tx_buff_lo_threshold_per_tc_set(self,
(buff_size * (1024 / 32U) * 50U) /
hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self,
(buff_size *
(1024 / 32U) * 50U) /
100U, tc);
/* QoS Rx buf size per TC */
......@@ -130,20 +132,20 @@ static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self)
is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
buff_size = HW_ATL_A0_RXBUF_MAX;
rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
rpb_rx_buff_hi_threshold_per_tc_set(self,
hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self,
(buff_size *
(1024U / 32U) * 66U) /
100U, tc);
rpb_rx_buff_lo_threshold_per_tc_set(self,
hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self,
(buff_size *
(1024U / 32U) * 50U) /
100U, tc);
rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
hw_atl_rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
/* QoS 802.1p priority -> TC mapping */
for (i_priority = 8U; i_priority--;)
rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
return aq_hw_err_from_flags(self);
}
......@@ -151,20 +153,19 @@ static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self)
static int hw_atl_a0_hw_rss_hash_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params)
{
struct aq_nic_cfg_s *cfg = NULL;
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
int err = 0;
unsigned int i = 0U;
unsigned int addr = 0U;
cfg = self->aq_nic_cfg;
for (i = 10, addr = 0U; i--; ++addr) {
u32 key_data = cfg->is_rss ?
__swab32(rss_params->hash_secret_key[i]) : 0U;
rpf_rss_key_wr_data_set(self, key_data);
rpf_rss_key_addr_set(self, addr);
rpf_rss_key_wr_en_set(self, 1U);
AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0, 1000U, 10U);
hw_atl_rpf_rss_key_wr_data_set(self, key_data);
hw_atl_rpf_rss_key_addr_set(self, addr);
hw_atl_rpf_rss_key_wr_en_set(self, 1U);
AQ_HW_WAIT_FOR(hw_atl_rpf_rss_key_wr_en_get(self) == 0,
1000U, 10U);
if (err < 0)
goto err_exit;
}
......@@ -193,11 +194,12 @@ static int hw_atl_a0_hw_rss_set(struct aq_hw_s *self,
((i * 3U) & 0xFU));
}
for (i = AQ_DIMOF(bitary); i--;) {
rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
rpf_rss_redir_tbl_addr_set(self, i);
rpf_rss_redir_wr_en_set(self, 1U);
AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0, 1000U, 10U);
for (i = ARRAY_SIZE(bitary); i--;) {
hw_atl_rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
hw_atl_rpf_rss_redir_tbl_addr_set(self, i);
hw_atl_rpf_rss_redir_wr_en_set(self, 1U);
AQ_HW_WAIT_FOR(hw_atl_rpf_rss_redir_wr_en_get(self) == 0,
1000U, 10U);
if (err < 0)
goto err_exit;
}
......@@ -212,35 +214,35 @@ static int hw_atl_a0_hw_offload_set(struct aq_hw_s *self,
struct aq_nic_cfg_s *aq_nic_cfg)
{
/* TX checksums offloads*/
tpo_ipv4header_crc_offload_en_set(self, 1);
tpo_tcp_udp_crc_offload_en_set(self, 1);
hw_atl_tpo_ipv4header_crc_offload_en_set(self, 1);
hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
/* RX checksums offloads*/
rpo_ipv4header_crc_offload_en_set(self, 1);
rpo_tcp_udp_crc_offload_en_set(self, 1);
hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1);
hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1);
/* LSO offloads*/
tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s *self)
{
thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
/* Tx interrupts */
tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
0x00010000U : 0x00000000U);
tdm_tx_dca_en_set(self, 0U);
tdm_tx_dca_mode_set(self, 0U);
hw_atl_tdm_tx_dca_en_set(self, 0U);
hw_atl_tdm_tx_dca_mode_set(self, 0U);
tpb_tx_path_scp_ins_en_set(self, 1U);
hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U);
return aq_hw_err_from_flags(self);
}
......@@ -251,38 +253,38 @@ static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s *self)
int i;
/* Rx TC/RSS number config */
rpb_rpf_rx_traf_class_mode_set(self, 1U);
hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U);
/* Rx flow control */
rpb_rx_flow_ctl_mode_set(self, 1U);
hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
/* RSS Ring selection */
reg_rx_flr_rss_control1set(self, cfg->is_rss ?
hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ?
0xB3333333U : 0x00000000U);
/* Multicast filters */
for (i = HW_ATL_A0_MAC_MAX; i--;) {
rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
rpfl2unicast_flr_act_set(self, 1U, i);
hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
hw_atl_rpfl2unicast_flr_act_set(self, 1U, i);
}
reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
hw_atl_reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
/* Vlan filters */
rpf_vlan_outer_etht_set(self, 0x88A8U);
rpf_vlan_inner_etht_set(self, 0x8100U);
rpf_vlan_prom_mode_en_set(self, 1);
hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
/* Rx Interrupts */
rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
rpfl2broadcast_flr_act_set(self, 1U);
rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
rdm_rx_dca_en_set(self, 0U);
rdm_rx_dca_mode_set(self, 0U);
hw_atl_rdm_rx_dca_en_set(self, 0U);
hw_atl_rdm_rx_dca_mode_set(self, 0U);
return aq_hw_err_from_flags(self);
}
......@@ -301,10 +303,10 @@ static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
(mac_addr[4] << 8) | mac_addr[5];
rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC);
rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_A0_MAC);
rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_A0_MAC);
rpfl2_uc_flr_en_set(self, 1U, HW_ATL_A0_MAC);
hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC);
hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_A0_MAC);
hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_A0_MAC);
hw_atl_rpfl2_uc_flr_en_set(self, 1U, HW_ATL_A0_MAC);
err = aq_hw_err_from_flags(self);
......@@ -312,9 +314,7 @@ static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
return err;
}
static int hw_atl_a0_hw_init(struct aq_hw_s *self,
struct aq_nic_cfg_s *aq_nic_cfg,
u8 *mac_addr)
static int hw_atl_a0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
{
static u32 aq_hw_atl_igcr_table_[4][2] = {
{ 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */
......@@ -325,10 +325,7 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self,
int err = 0;
self->aq_nic_cfg = aq_nic_cfg;
hw_atl_utils_hw_chip_features_init(self,
&PHAL_ATLANTIC_A0->chip_features);
struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
hw_atl_a0_hw_init_tx_path(self);
hw_atl_a0_hw_init_rx_path(self);
......@@ -337,8 +334,8 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self,
hw_atl_utils_mpi_set(self, MPI_INIT, aq_nic_cfg->link_speed_msk);
reg_tx_dma_debug_ctl_set(self, 0x800000b8U);
reg_tx_dma_debug_ctl_set(self, 0x000000b8U);
hw_atl_reg_tx_dma_debug_ctl_set(self, 0x800000b8U);
hw_atl_reg_tx_dma_debug_ctl_set(self, 0x000000b8U);
hw_atl_a0_hw_qos_set(self);
hw_atl_a0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
......@@ -353,15 +350,14 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self,
goto err_exit;
/* Interrupts */
reg_irq_glb_ctl_set(self,
hw_atl_reg_irq_glb_ctl_set(self,
aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
[(aq_nic_cfg->vecs > 1U) ?
1 : 0]);
[(aq_nic_cfg->vecs > 1U) ? 1 : 0]);
itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
/* Interrupts */
reg_gen_irq_map_set(self,
hw_atl_reg_gen_irq_map_set(self,
((HW_ATL_A0_ERR_INT << 0x18) | (1U << 0x1F)) |
((HW_ATL_A0_ERR_INT << 0x10) | (1U << 0x17)) |
((HW_ATL_A0_ERR_INT << 8) | (1U << 0xF)) |
......@@ -376,28 +372,28 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self,
static int hw_atl_a0_hw_ring_tx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
tdm_tx_desc_en_set(self, 1, ring->idx);
hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_ring_rx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
rdm_rx_desc_en_set(self, 1, ring->idx);
hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_start(struct aq_hw_s *self)
{
tpb_tx_buff_en_set(self, 1);
rpb_rx_buff_en_set(self, 1);
hw_atl_tpb_tx_buff_en_set(self, 1);
hw_atl_rpb_rx_buff_en_set(self, 1);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_tx_ring_tail_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
return 0;
}
......@@ -483,36 +479,37 @@ static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self,
u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
rdm_rx_desc_en_set(self, false, aq_ring->idx);
hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
aq_ring->idx);
reg_rx_dma_desc_base_addressmswset(self,
dma_desc_addr_msw, aq_ring->idx);
hw_atl_reg_rx_dma_desc_base_addressmswset(self,
dma_desc_addr_msw,
aq_ring->idx);
rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
rdm_rx_desc_data_buff_size_set(self,
hw_atl_rdm_rx_desc_data_buff_size_set(self,
AQ_CFG_RX_FRAME_MAX / 1024U,
aq_ring->idx);
rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
hw_atl_rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
/* Rx ring set mode */
/* Mapping interrupt vector */
itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
itr_irq_map_en_rx_set(self, true, aq_ring->idx);
hw_atl_itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
hw_atl_itr_irq_map_en_rx_set(self, true, aq_ring->idx);
rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
hw_atl_rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
hw_atl_rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
hw_atl_rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
return aq_hw_err_from_flags(self);
}
......@@ -524,25 +521,25 @@ static int hw_atl_a0_hw_ring_tx_init(struct aq_hw_s *self,
u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
aq_ring->idx);
reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
hw_atl_reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
aq_ring->idx);
tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
hw_atl_tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
hw_atl_a0_hw_tx_ring_tail_update(self, aq_ring);
/* Set Tx threshold */
tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
hw_atl_tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
/* Mapping interrupt vector */
itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
itr_irq_map_en_tx_set(self, true, aq_ring->idx);
hw_atl_itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
hw_atl_itr_irq_map_en_tx_set(self, true, aq_ring->idx);
tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
hw_atl_tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
hw_atl_tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
return aq_hw_err_from_flags(self);
}
......@@ -563,7 +560,7 @@ static int hw_atl_a0_hw_ring_rx_fill(struct aq_hw_s *self,
rxd->hdr_addr = 0U;
}
reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
hw_atl_reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
return aq_hw_err_from_flags(self);
}
......@@ -572,13 +569,13 @@ static int hw_atl_a0_hw_ring_tx_head_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
int err = 0;
unsigned int hw_head_ = tdm_tx_desc_head_ptr_get(self, ring->idx);
unsigned int hw_head = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
if (aq_utils_obj_test(&self->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) {
if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
err = -ENXIO;
goto err_exit;
}
ring->hw_head = hw_head_;
ring->hw_head = hw_head;
err = aq_hw_err_from_flags(self);
err_exit:
......@@ -602,15 +599,16 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
if (!(rxd_wb->status & 0x5U)) { /* RxD is not done */
if ((1U << 4) &
reg_rx_dma_desc_status_get(self, ring->idx)) {
rdm_rx_desc_en_set(self, false, ring->idx);
rdm_rx_desc_res_set(self, true, ring->idx);
rdm_rx_desc_res_set(self, false, ring->idx);
rdm_rx_desc_en_set(self, true, ring->idx);
hw_atl_reg_rx_dma_desc_status_get(self, ring->idx)) {
hw_atl_rdm_rx_desc_en_set(self, false, ring->idx);
hw_atl_rdm_rx_desc_res_set(self, true, ring->idx);
hw_atl_rdm_rx_desc_res_set(self, false, ring->idx);
hw_atl_rdm_rx_desc_en_set(self, true, ring->idx);
}
if (ring->hw_head ||
(rdm_rx_desc_head_ptr_get(self, ring->idx) < 2U)) {
(hw_atl_rdm_rx_desc_head_ptr_get(self,
ring->idx) < 2U)) {
break;
} else if (!(rxd_wb->status & 0x1U)) {
struct hw_atl_rxd_wb_s *rxd_wb1 =
......@@ -693,26 +691,25 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
static int hw_atl_a0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
{
itr_irq_msk_setlsw_set(self, LODWORD(mask) |
hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask) |
(1U << HW_ATL_A0_ERR_INT));
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
{
itr_irq_msk_clearlsw_set(self, LODWORD(mask));
itr_irq_status_clearlsw_set(self, LODWORD(mask));
if ((1U << 16) & reg_gen_irq_status_get(self))
hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask));
hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
atomic_inc(&PHAL_ATLANTIC_A0->dpc);
if ((1U << 16) & hw_atl_reg_gen_irq_status_get(self))
atomic_inc(&self->dpc);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
{
*mask = itr_irq_statuslsw_get(self);
*mask = hw_atl_itr_irq_statuslsw_get(self);
return aq_hw_err_from_flags(self);
}
......@@ -723,15 +720,17 @@ static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self,
{
unsigned int i = 0U;
rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
rpfl2multicast_flr_en_set(self, IS_FILTER_ENABLED(IFF_MULTICAST), 0);
rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
hw_atl_rpfl2promiscuous_mode_en_set(self,
IS_FILTER_ENABLED(IFF_PROMISC));
hw_atl_rpfl2multicast_flr_en_set(self,
IS_FILTER_ENABLED(IFF_MULTICAST), 0);
hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
self->aq_nic_cfg->is_mc_list_enabled =
IS_FILTER_ENABLED(IFF_MULTICAST);
for (i = HW_ATL_A0_MAC_MIN; i < HW_ATL_A0_MAC_MAX; ++i)
rpfl2_uc_flr_en_set(self,
hw_atl_rpfl2_uc_flr_en_set(self,
(self->aq_nic_cfg->is_mc_list_enabled &&
(i <= self->aq_nic_cfg->mc_list_count)) ?
1U : 0U, i);
......@@ -761,15 +760,17 @@ static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self,
u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
(ar_mac[i][4] << 8) | ar_mac[i][5];
rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC_MIN + i);
hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC_MIN + i);
rpfl2unicast_dest_addresslsw_set(self,
l, HW_ATL_A0_MAC_MIN + i);
hw_atl_rpfl2unicast_dest_addresslsw_set(self,
l,
HW_ATL_A0_MAC_MIN + i);
rpfl2unicast_dest_addressmsw_set(self,
h, HW_ATL_A0_MAC_MIN + i);
hw_atl_rpfl2unicast_dest_addressmsw_set(self,
h,
HW_ATL_A0_MAC_MIN + i);
rpfl2_uc_flr_en_set(self,
hw_atl_rpfl2_uc_flr_en_set(self,
(self->aq_nic_cfg->is_mc_list_enabled),
HW_ATL_A0_MAC_MIN + i);
}
......@@ -823,7 +824,7 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self)
}
for (i = HW_ATL_A0_RINGS_MAX; i--;)
reg_irq_thr_set(self, itr_rx, i);
hw_atl_reg_irq_thr_set(self, itr_rx, i);
return aq_hw_err_from_flags(self);
}
......@@ -837,14 +838,14 @@ static int hw_atl_a0_hw_stop(struct aq_hw_s *self)
static int hw_atl_a0_hw_ring_tx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
tdm_tx_desc_en_set(self, 0U, ring->idx);
hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
rdm_rx_desc_en_set(self, 0U, ring->idx);
hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
return aq_hw_err_from_flags(self);
}
......@@ -860,7 +861,7 @@ static int hw_atl_a0_hw_set_speed(struct aq_hw_s *self, u32 speed)
return err;
}
static struct aq_hw_ops hw_atl_ops_ = {
static const struct aq_hw_ops hw_atl_ops_ = {
.create = hw_atl_a0_create,
.destroy = hw_atl_a0_destroy,
.get_hw_caps = hw_atl_a0_get_hw_caps,
......@@ -903,7 +904,7 @@ static struct aq_hw_ops hw_atl_ops_ = {
.hw_get_fw_version = hw_atl_utils_get_fw_version,
};
struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev)
const struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev)
{
bool is_vid_ok = (pdev->vendor == PCI_VENDOR_ID_AQUANTIA);
bool is_did_ok = ((pdev->device == HW_ATL_DEVICE_ID_0001) ||
......
......@@ -29,6 +29,6 @@
#endif
struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev);
const struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev);
#endif /* HW_ATL_A0_H */
......@@ -88,37 +88,6 @@
#define HW_ATL_A0_FW_VER_EXPECTED 0x01050006U
/* Hardware tx descriptor */
struct __packed hw_atl_txd_s {
u64 buf_addr;
u32 ctl;
u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */
};
/* Hardware tx context descriptor */
struct __packed hw_atl_txc_s {
u32 rsvd;
u32 len;
u32 ctl;
u32 len2;
};
/* Hardware rx descriptor */
struct __packed hw_atl_rxd_s {
u64 buf_addr;
u64 hdr_addr;
};
/* Hardware rx descriptor writeback */
struct __packed hw_atl_rxd_wb_s {
u32 type;
u32 rss_hash;
u16 status;
u16 pkt_len;
u16 next_desc_ptr;
u16 vlan;
};
/* HW layer capabilities */
static struct aq_hw_caps_s hw_atl_a0_hw_caps_ = {
.ports = 1U,
......
......@@ -12,6 +12,7 @@
#include "../aq_hw.h"
#include "../aq_hw_utils.h"
#include "../aq_ring.h"
#include "../aq_nic.h"
#include "hw_atl_b0.h"
#include "hw_atl_utils.h"
#include "hw_atl_llh.h"
......@@ -37,21 +38,20 @@ static int hw_atl_b0_get_hw_caps(struct aq_hw_s *self,
}
static struct aq_hw_s *hw_atl_b0_create(struct aq_pci_func_s *aq_pci_func,
unsigned int port,
struct aq_hw_ops *ops)
unsigned int port)
{
struct hw_atl_s *self = NULL;
struct aq_hw_s *self = NULL;
self = kzalloc(sizeof(*self), GFP_KERNEL);
if (!self)
goto err_exit;
self->base.aq_pci_func = aq_pci_func;
self->aq_pci_func = aq_pci_func;
self->base.not_ff_addr = 0x10U;
self->not_ff_addr = 0x10U;
err_exit:
return (struct aq_hw_s *)self;
return self;
}
static void hw_atl_b0_destroy(struct aq_hw_s *self)
......@@ -63,24 +63,24 @@ static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
{
int err = 0;
glb_glb_reg_res_dis_set(self, 1U);
pci_pci_reg_res_dis_set(self, 0U);
rx_rx_reg_res_dis_set(self, 0U);
tx_tx_reg_res_dis_set(self, 0U);
hw_atl_glb_glb_reg_res_dis_set(self, 1U);
hw_atl_pci_pci_reg_res_dis_set(self, 0U);
hw_atl_rx_rx_reg_res_dis_set(self, 0U);
hw_atl_tx_tx_reg_res_dis_set(self, 0U);
HW_ATL_FLUSH();
glb_soft_res_set(self, 1);
hw_atl_glb_soft_res_set(self, 1);
/* check 10 times by 1ms */
AQ_HW_WAIT_FOR(glb_soft_res_get(self) == 0, 1000U, 10U);
AQ_HW_WAIT_FOR(hw_atl_glb_soft_res_get(self) == 0, 1000U, 10U);
if (err < 0)
goto err_exit;
itr_irq_reg_res_dis_set(self, 0U);
itr_res_irq_set(self, 1U);
hw_atl_itr_irq_reg_res_dis_set(self, 0U);
hw_atl_itr_res_irq_set(self, 1U);
/* check 10 times by 1ms */
AQ_HW_WAIT_FOR(itr_res_irq_get(self) == 0, 1000U, 10U);
AQ_HW_WAIT_FOR(hw_atl_itr_res_irq_get(self) == 0, 1000U, 10U);
if (err < 0)
goto err_exit;
......@@ -100,30 +100,32 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
bool is_rx_flow_control = false;
/* TPS Descriptor rate init */
tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
/* TPS VM init */
tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
/* TPS TC credits init */
tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
/* Tx buf size */
buff_size = HW_ATL_B0_TXBUF_MAX;
tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
tpb_tx_buff_hi_threshold_per_tc_set(self,
(buff_size * (1024 / 32U) * 66U) /
hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self,
(buff_size *
(1024 / 32U) * 66U) /
100U, tc);
tpb_tx_buff_lo_threshold_per_tc_set(self,
(buff_size * (1024 / 32U) * 50U) /
hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self,
(buff_size *
(1024 / 32U) * 50U) /
100U, tc);
/* QoS Rx buf size per TC */
......@@ -131,20 +133,20 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
buff_size = HW_ATL_B0_RXBUF_MAX;
rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
rpb_rx_buff_hi_threshold_per_tc_set(self,
hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self,
(buff_size *
(1024U / 32U) * 66U) /
100U, tc);
rpb_rx_buff_lo_threshold_per_tc_set(self,
hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self,
(buff_size *
(1024U / 32U) * 50U) /
100U, tc);
rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
hw_atl_rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
/* QoS 802.1p priority -> TC mapping */
for (i_priority = 8U; i_priority--;)
rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
return aq_hw_err_from_flags(self);
}
......@@ -152,20 +154,19 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params)
{
struct aq_nic_cfg_s *cfg = NULL;
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
int err = 0;
unsigned int i = 0U;
unsigned int addr = 0U;
cfg = self->aq_nic_cfg;
for (i = 10, addr = 0U; i--; ++addr) {
u32 key_data = cfg->is_rss ?
__swab32(rss_params->hash_secret_key[i]) : 0U;
rpf_rss_key_wr_data_set(self, key_data);
rpf_rss_key_addr_set(self, addr);
rpf_rss_key_wr_en_set(self, 1U);
AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0, 1000U, 10U);
hw_atl_rpf_rss_key_wr_data_set(self, key_data);
hw_atl_rpf_rss_key_addr_set(self, addr);
hw_atl_rpf_rss_key_wr_en_set(self, 1U);
AQ_HW_WAIT_FOR(hw_atl_rpf_rss_key_wr_en_get(self) == 0,
1000U, 10U);
if (err < 0)
goto err_exit;
}
......@@ -194,11 +195,12 @@ static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
((i * 3U) & 0xFU));
}
for (i = AQ_DIMOF(bitary); i--;) {
rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
rpf_rss_redir_tbl_addr_set(self, i);
rpf_rss_redir_wr_en_set(self, 1U);
AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0, 1000U, 10U);
for (i = ARRAY_SIZE(bitary); i--;) {
hw_atl_rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
hw_atl_rpf_rss_redir_tbl_addr_set(self, i);
hw_atl_rpf_rss_redir_wr_en_set(self, 1U);
AQ_HW_WAIT_FOR(hw_atl_rpf_rss_redir_wr_en_get(self) == 0,
1000U, 10U);
if (err < 0)
goto err_exit;
}
......@@ -215,15 +217,15 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
unsigned int i;
/* TX checksums offloads*/
tpo_ipv4header_crc_offload_en_set(self, 1);
tpo_tcp_udp_crc_offload_en_set(self, 1);
hw_atl_tpo_ipv4header_crc_offload_en_set(self, 1);
hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
/* RX checksums offloads*/
rpo_ipv4header_crc_offload_en_set(self, 1);
rpo_tcp_udp_crc_offload_en_set(self, 1);
hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1);
hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1);
/* LSO offloads*/
tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
/* LRO offloads */
{
......@@ -232,43 +234,44 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0));
for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++)
rpo_lro_max_num_of_descriptors_set(self, val, i);
hw_atl_rpo_lro_max_num_of_descriptors_set(self, val, i);
rpo_lro_time_base_divider_set(self, 0x61AU);
rpo_lro_inactive_interval_set(self, 0);
rpo_lro_max_coalescing_interval_set(self, 2);
hw_atl_rpo_lro_time_base_divider_set(self, 0x61AU);
hw_atl_rpo_lro_inactive_interval_set(self, 0);
hw_atl_rpo_lro_max_coalescing_interval_set(self, 2);
rpo_lro_qsessions_lim_set(self, 1U);
hw_atl_rpo_lro_qsessions_lim_set(self, 1U);
rpo_lro_total_desc_lim_set(self, 2U);
hw_atl_rpo_lro_total_desc_lim_set(self, 2U);
rpo_lro_patch_optimization_en_set(self, 0U);
hw_atl_rpo_lro_patch_optimization_en_set(self, 0U);
rpo_lro_min_pay_of_first_pkt_set(self, 10U);
hw_atl_rpo_lro_min_pay_of_first_pkt_set(self, 10U);
rpo_lro_pkt_lim_set(self, 1U);
hw_atl_rpo_lro_pkt_lim_set(self, 1U);
rpo_lro_en_set(self, aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
hw_atl_rpo_lro_en_set(self,
aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
}
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
{
thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
/* Tx interrupts */
tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
0x00010000U : 0x00000000U);
tdm_tx_dca_en_set(self, 0U);
tdm_tx_dca_mode_set(self, 0U);
hw_atl_tdm_tx_dca_en_set(self, 0U);
hw_atl_tdm_tx_dca_mode_set(self, 0U);
tpb_tx_path_scp_ins_en_set(self, 1U);
hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U);
return aq_hw_err_from_flags(self);
}
......@@ -279,55 +282,55 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
int i;
/* Rx TC/RSS number config */
rpb_rpf_rx_traf_class_mode_set(self, 1U);
hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U);
/* Rx flow control */
rpb_rx_flow_ctl_mode_set(self, 1U);
hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
/* RSS Ring selection */
reg_rx_flr_rss_control1set(self, cfg->is_rss ?
hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ?
0xB3333333U : 0x00000000U);
/* Multicast filters */
for (i = HW_ATL_B0_MAC_MAX; i--;) {
rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
rpfl2unicast_flr_act_set(self, 1U, i);
hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
hw_atl_rpfl2unicast_flr_act_set(self, 1U, i);
}
reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
hw_atl_reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
/* Vlan filters */
rpf_vlan_outer_etht_set(self, 0x88A8U);
rpf_vlan_inner_etht_set(self, 0x8100U);
hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
if (cfg->vlan_id) {
rpf_vlan_flr_act_set(self, 1U, 0U);
rpf_vlan_id_flr_set(self, 0U, 0U);
rpf_vlan_flr_en_set(self, 0U, 0U);
hw_atl_rpf_vlan_flr_act_set(self, 1U, 0U);
hw_atl_rpf_vlan_id_flr_set(self, 0U, 0U);
hw_atl_rpf_vlan_flr_en_set(self, 0U, 0U);
rpf_vlan_accept_untagged_packets_set(self, 1U);
rpf_vlan_untagged_act_set(self, 1U);
hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
hw_atl_rpf_vlan_untagged_act_set(self, 1U);
rpf_vlan_flr_act_set(self, 1U, 1U);
rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U);
rpf_vlan_flr_en_set(self, 1U, 1U);
hw_atl_rpf_vlan_flr_act_set(self, 1U, 1U);
hw_atl_rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U);
hw_atl_rpf_vlan_flr_en_set(self, 1U, 1U);
} else {
rpf_vlan_prom_mode_en_set(self, 1);
hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
}
/* Rx Interrupts */
rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
aq_hw_write_reg(self, 0x00005040U,
IS_CHIP_FEATURE(RPF2) ? 0x000F0000U : 0x00000000U);
rpfl2broadcast_flr_act_set(self, 1U);
rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
rdm_rx_dca_en_set(self, 0U);
rdm_rx_dca_mode_set(self, 0U);
hw_atl_rdm_rx_dca_en_set(self, 0U);
hw_atl_rdm_rx_dca_mode_set(self, 0U);
return aq_hw_err_from_flags(self);
}
......@@ -346,10 +349,10 @@ static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
(mac_addr[4] << 8) | mac_addr[5];
rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC);
rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC);
rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC);
rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC);
hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC);
hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC);
hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC);
hw_atl_rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC);
err = aq_hw_err_from_flags(self);
......@@ -357,9 +360,7 @@ static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
return err;
}
static int hw_atl_b0_hw_init(struct aq_hw_s *self,
struct aq_nic_cfg_s *aq_nic_cfg,
u8 *mac_addr)
static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
{
static u32 aq_hw_atl_igcr_table_[4][2] = {
{ 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */
......@@ -371,10 +372,7 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self,
int err = 0;
u32 val;
self->aq_nic_cfg = aq_nic_cfg;
hw_atl_utils_hw_chip_features_init(self,
&PHAL_ATLANTIC_B0->chip_features);
struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
hw_atl_b0_hw_init_tx_path(self);
hw_atl_b0_hw_init_rx_path(self);
......@@ -388,14 +386,15 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self,
hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
/* Force limit MRRS on RDM/TDM to 2K */
val = aq_hw_read_reg(self, pci_reg_control6_adr);
aq_hw_write_reg(self, pci_reg_control6_adr, (val & ~0x707) | 0x404);
val = aq_hw_read_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR);
aq_hw_write_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR,
(val & ~0x707) | 0x404);
/* TX DMA total request limit. B0 hardware is not capable to
* handle more than (8K-MRRS) incoming DMA data.
* Value 24 in 256byte units
*/
aq_hw_write_reg(self, tx_dma_total_req_limit_adr, 24);
aq_hw_write_reg(self, HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR, 24);
/* Reset link status and read out initial hardware counters */
self->aq_link_status.mbps = 0;
......@@ -406,15 +405,15 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self,
goto err_exit;
/* Interrupts */
reg_irq_glb_ctl_set(self,
hw_atl_reg_irq_glb_ctl_set(self,
aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
[(aq_nic_cfg->vecs > 1U) ?
1 : 0]);
itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
/* Interrupts */
reg_gen_irq_map_set(self,
hw_atl_reg_gen_irq_map_set(self,
((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) |
((HW_ATL_B0_ERR_INT << 0x10) | (1U << 0x17)), 0U);
......@@ -427,28 +426,28 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self,
static int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
tdm_tx_desc_en_set(self, 1, ring->idx);
hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
rdm_rx_desc_en_set(self, 1, ring->idx);
hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_start(struct aq_hw_s *self)
{
tpb_tx_buff_en_set(self, 1);
rpb_rx_buff_en_set(self, 1);
hw_atl_tpb_tx_buff_en_set(self, 1);
hw_atl_rpb_rx_buff_en_set(self, 1);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
return 0;
}
......@@ -534,36 +533,36 @@ static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self,
u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
rdm_rx_desc_en_set(self, false, aq_ring->idx);
hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
aq_ring->idx);
reg_rx_dma_desc_base_addressmswset(self,
hw_atl_reg_rx_dma_desc_base_addressmswset(self,
dma_desc_addr_msw, aq_ring->idx);
rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
rdm_rx_desc_data_buff_size_set(self,
hw_atl_rdm_rx_desc_data_buff_size_set(self,
AQ_CFG_RX_FRAME_MAX / 1024U,
aq_ring->idx);
rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
hw_atl_rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
/* Rx ring set mode */
/* Mapping interrupt vector */
itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
itr_irq_map_en_rx_set(self, true, aq_ring->idx);
hw_atl_itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
hw_atl_itr_irq_map_en_rx_set(self, true, aq_ring->idx);
rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
hw_atl_rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
hw_atl_rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
hw_atl_rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
return aq_hw_err_from_flags(self);
}
......@@ -575,25 +574,25 @@ static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self,
u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
aq_ring->idx);
reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
hw_atl_reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
aq_ring->idx);
tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
hw_atl_tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
hw_atl_b0_hw_tx_ring_tail_update(self, aq_ring);
/* Set Tx threshold */
tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
hw_atl_tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
/* Mapping interrupt vector */
itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
itr_irq_map_en_tx_set(self, true, aq_ring->idx);
hw_atl_itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
hw_atl_itr_irq_map_en_tx_set(self, true, aq_ring->idx);
tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
hw_atl_tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
hw_atl_tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
return aq_hw_err_from_flags(self);
}
......@@ -614,7 +613,7 @@ static int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self,
rxd->hdr_addr = 0U;
}
reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
hw_atl_reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
return aq_hw_err_from_flags(self);
}
......@@ -623,9 +622,9 @@ static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
int err = 0;
unsigned int hw_head_ = tdm_tx_desc_head_ptr_get(self, ring->idx);
unsigned int hw_head_ = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
if (aq_utils_obj_test(&self->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) {
if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
err = -ENXIO;
goto err_exit;
}
......@@ -728,22 +727,22 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
static int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
{
itr_irq_msk_setlsw_set(self, LODWORD(mask));
hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask));
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
{
itr_irq_msk_clearlsw_set(self, LODWORD(mask));
itr_irq_status_clearlsw_set(self, LODWORD(mask));
hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask));
hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
atomic_inc(&PHAL_ATLANTIC_B0->dpc);
atomic_inc(&self->dpc);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
{
*mask = itr_irq_statuslsw_get(self);
*mask = hw_atl_itr_irq_statuslsw_get(self);
return aq_hw_err_from_flags(self);
}
......@@ -754,19 +753,19 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
{
unsigned int i = 0U;
rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
rpfl2multicast_flr_en_set(self,
hw_atl_rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
hw_atl_rpfl2multicast_flr_en_set(self,
IS_FILTER_ENABLED(IFF_MULTICAST), 0);
rpfl2_accept_all_mc_packets_set(self,
hw_atl_rpfl2_accept_all_mc_packets_set(self,
IS_FILTER_ENABLED(IFF_ALLMULTI));
rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
self->aq_nic_cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST);
for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i)
rpfl2_uc_flr_en_set(self,
hw_atl_rpfl2_uc_flr_en_set(self,
(self->aq_nic_cfg->is_mc_list_enabled &&
(i <= self->aq_nic_cfg->mc_list_count)) ?
1U : 0U, i);
......@@ -796,15 +795,15 @@ static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
(ar_mac[i][4] << 8) | ar_mac[i][5];
rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i);
hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i);
rpfl2unicast_dest_addresslsw_set(self,
hw_atl_rpfl2unicast_dest_addresslsw_set(self,
l, HW_ATL_B0_MAC_MIN + i);
rpfl2unicast_dest_addressmsw_set(self,
hw_atl_rpfl2unicast_dest_addressmsw_set(self,
h, HW_ATL_B0_MAC_MIN + i);
rpfl2_uc_flr_en_set(self,
hw_atl_rpfl2_uc_flr_en_set(self,
(self->aq_nic_cfg->is_mc_list_enabled),
HW_ATL_B0_MAC_MIN + i);
}
......@@ -824,10 +823,10 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
switch (self->aq_nic_cfg->itr) {
case AQ_CFG_INTERRUPT_MODERATION_ON:
case AQ_CFG_INTERRUPT_MODERATION_AUTO:
tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
tdm_tdm_intr_moder_en_set(self, 1U);
rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
rdm_rdm_intr_moder_en_set(self, 1U);
hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
hw_atl_tdm_tdm_intr_moder_en_set(self, 1U);
hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
hw_atl_rdm_rdm_intr_moder_en_set(self, 1U);
if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) {
/* HW timers are in 2us units */
......@@ -887,18 +886,18 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
}
break;
case AQ_CFG_INTERRUPT_MODERATION_OFF:
tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
tdm_tdm_intr_moder_en_set(self, 0U);
rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
rdm_rdm_intr_moder_en_set(self, 0U);
hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
hw_atl_tdm_tdm_intr_moder_en_set(self, 0U);
hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
hw_atl_rdm_rdm_intr_moder_en_set(self, 0U);
itr_tx = 0U;
itr_rx = 0U;
break;
}
for (i = HW_ATL_B0_RINGS_MAX; i--;) {
reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
hw_atl_reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
hw_atl_reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
}
return aq_hw_err_from_flags(self);
......@@ -913,14 +912,14 @@ static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
tdm_tx_desc_en_set(self, 0U, ring->idx);
hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
rdm_rx_desc_en_set(self, 0U, ring->idx);
hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
return aq_hw_err_from_flags(self);
}
......@@ -936,7 +935,7 @@ static int hw_atl_b0_hw_set_speed(struct aq_hw_s *self, u32 speed)
return err;
}
static struct aq_hw_ops hw_atl_ops_ = {
static const struct aq_hw_ops hw_atl_ops_ = {
.create = hw_atl_b0_create,
.destroy = hw_atl_b0_destroy,
.get_hw_caps = hw_atl_b0_get_hw_caps,
......@@ -979,7 +978,7 @@ static struct aq_hw_ops hw_atl_ops_ = {
.hw_get_fw_version = hw_atl_utils_get_fw_version,
};
struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev)
const struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev)
{
bool is_vid_ok = (pdev->vendor == PCI_VENDOR_ID_AQUANTIA);
bool is_did_ok = ((pdev->device == HW_ATL_DEVICE_ID_0001) ||
......
......@@ -29,6 +29,6 @@
#endif
struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev);
const struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev);
#endif /* HW_ATL_B0_H */
......@@ -142,37 +142,6 @@
#define HW_ATL_INTR_MODER_MAX 0x1FF
#define HW_ATL_INTR_MODER_MIN 0xFF
/* Hardware tx descriptor */
struct __packed hw_atl_txd_s {
u64 buf_addr;
u32 ctl;
u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */
};
/* Hardware tx context descriptor */
struct __packed hw_atl_txc_s {
u32 rsvd;
u32 len;
u32 ctl;
u32 len2;
};
/* Hardware rx descriptor */
struct __packed hw_atl_rxd_s {
u64 buf_addr;
u64 hdr_addr;
};
/* Hardware rx descriptor writeback */
struct __packed hw_atl_rxd_wb_s {
u32 type;
u32 rss_hash;
u16 status;
u16 pkt_len;
u16 next_desc_ptr;
u16 vlan;
};
/* HW layer capabilities */
static struct aq_hw_caps_s hw_atl_b0_hw_caps_ = {
.ports = 1U,
......
......@@ -16,111 +16,115 @@
#include "../aq_hw_utils.h"
/* global */
void reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem, u32 semaphore)
void hw_atl_reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem,
u32 semaphore)
{
aq_hw_write_reg(aq_hw, glb_cpu_sem_adr(semaphore), glb_cpu_sem);
aq_hw_write_reg(aq_hw, HW_ATL_GLB_CPU_SEM_ADR(semaphore), glb_cpu_sem);
}
u32 reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore)
u32 hw_atl_reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore)
{
return aq_hw_read_reg(aq_hw, glb_cpu_sem_adr(semaphore));
return aq_hw_read_reg(aq_hw, HW_ATL_GLB_CPU_SEM_ADR(semaphore));
}
void glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis)
void hw_atl_glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis)
{
aq_hw_write_reg_bit(aq_hw, glb_reg_res_dis_adr,
glb_reg_res_dis_msk,
glb_reg_res_dis_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_GLB_REG_RES_DIS_ADR,
HW_ATL_GLB_REG_RES_DIS_MSK,
HW_ATL_GLB_REG_RES_DIS_SHIFT,
glb_reg_res_dis);
}
void glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res)
void hw_atl_glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res)
{
aq_hw_write_reg_bit(aq_hw, glb_soft_res_adr, glb_soft_res_msk,
glb_soft_res_shift, soft_res);
aq_hw_write_reg_bit(aq_hw, HW_ATL_GLB_SOFT_RES_ADR,
HW_ATL_GLB_SOFT_RES_MSK,
HW_ATL_GLB_SOFT_RES_SHIFT, soft_res);
}
u32 glb_soft_res_get(struct aq_hw_s *aq_hw)
u32 hw_atl_glb_soft_res_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg_bit(aq_hw, glb_soft_res_adr,
glb_soft_res_msk,
glb_soft_res_shift);
return aq_hw_read_reg_bit(aq_hw, HW_ATL_GLB_SOFT_RES_ADR,
HW_ATL_GLB_SOFT_RES_MSK,
HW_ATL_GLB_SOFT_RES_SHIFT);
}
u32 reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw)
u32 hw_atl_reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, rx_dma_stat_counter7_adr);
return aq_hw_read_reg(aq_hw, HW_ATL_RX_DMA_STAT_COUNTER7_ADR);
}
u32 reg_glb_mif_id_get(struct aq_hw_s *aq_hw)
u32 hw_atl_reg_glb_mif_id_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, glb_mif_id_adr);
return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MIF_ID_ADR);
}
/* stats */
u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw)
u32 hw_atl_rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, rpb_rx_dma_drop_pkt_cnt_adr);
return aq_hw_read_reg(aq_hw, HW_ATL_RPB_RX_DMA_DROP_PKT_CNT_ADR);
}
u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
u32 hw_atl_stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, stats_rx_dma_good_octet_counterlsw__adr);
return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERLSW);
}
u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
u32 hw_atl_stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, stats_rx_dma_good_pkt_counterlsw__adr);
return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERLSW);
}
u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
u32 hw_atl_stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, stats_tx_dma_good_octet_counterlsw__adr);
return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERLSW);
}
u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
u32 hw_atl_stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, stats_tx_dma_good_pkt_counterlsw__adr);
return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERLSW);
}
u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
u32 hw_atl_stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, stats_rx_dma_good_octet_countermsw__adr);
return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERMSW);
}
u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
u32 hw_atl_stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, stats_rx_dma_good_pkt_countermsw__adr);
return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERMSW);
}
u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
u32 hw_atl_stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, stats_tx_dma_good_octet_countermsw__adr);
return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERMSW);
}
u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
u32 hw_atl_stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, stats_tx_dma_good_pkt_countermsw__adr);
return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERMSW);
}
/* interrupt */
void itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, u32 irq_auto_masklsw)
void hw_atl_itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw,
u32 irq_auto_masklsw)
{
aq_hw_write_reg(aq_hw, itr_iamrlsw_adr, irq_auto_masklsw);
aq_hw_write_reg(aq_hw, HW_ATL_ITR_IAMRLSW_ADR, irq_auto_masklsw);
}
void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx)
void hw_atl_itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx,
u32 rx)
{
/* register address for bitfield imr_rx{r}_en */
static u32 itr_imr_rxren_adr[32] = {
0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU,
0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU,
0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU,
0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU
};
/* bitmask for bitfield imr_rx{r}_en */
......@@ -149,18 +153,19 @@ void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx)
irq_map_en_rx);
}
void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx)
void hw_atl_itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx,
u32 tx)
{
/* register address for bitfield imr_tx{t}_en */
static u32 itr_imr_txten_adr[32] = {
0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU,
0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU,
0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU,
0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU
};
/* bitmask for bitfield imr_tx{t}_en */
......@@ -189,30 +194,30 @@ void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx)
irq_map_en_tx);
}
void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx)
void hw_atl_itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx)
{
/* register address for bitfield imr_rx{r}[4:0] */
static u32 itr_imr_rxr_adr[32] = {
0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU,
0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU,
0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU,
0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU
};
/* bitmask for bitfield imr_rx{r}[4:0] */
static u32 itr_imr_rxr_msk[32] = {
0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU
0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU
};
/* lower bit position of bitfield imr_rx{r}[4:0] */
......@@ -229,30 +234,30 @@ void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx)
irq_map_rx);
}
void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx)
void hw_atl_itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx)
{
/* register address for bitfield imr_tx{t}[4:0] */
static u32 itr_imr_txt_adr[32] = {
0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU,
0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU,
0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU,
0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU
};
/* bitmask for bitfield imr_tx{t}[4:0] */
static u32 itr_imr_txt_msk[32] = {
0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U
0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U
};
/* lower bit position of bitfield imr_tx{t}[4:0] */
......@@ -269,429 +274,463 @@ void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx)
irq_map_tx);
}
void itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_clearlsw)
void hw_atl_itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw,
u32 irq_msk_clearlsw)
{
aq_hw_write_reg(aq_hw, itr_imcrlsw_adr, irq_msk_clearlsw);
aq_hw_write_reg(aq_hw, HW_ATL_ITR_IMCRLSW_ADR, irq_msk_clearlsw);
}
void itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw)
void hw_atl_itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw)
{
aq_hw_write_reg(aq_hw, itr_imsrlsw_adr, irq_msk_setlsw);
aq_hw_write_reg(aq_hw, HW_ATL_ITR_IMSRLSW_ADR, irq_msk_setlsw);
}
void itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis)
void hw_atl_itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis)
{
aq_hw_write_reg_bit(aq_hw, itr_reg_res_dsbl_adr,
itr_reg_res_dsbl_msk,
itr_reg_res_dsbl_shift, irq_reg_res_dis);
aq_hw_write_reg_bit(aq_hw, HW_ATL_ITR_REG_RES_DSBL_ADR,
HW_ATL_ITR_REG_RES_DSBL_MSK,
HW_ATL_ITR_REG_RES_DSBL_SHIFT, irq_reg_res_dis);
}
void itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
void hw_atl_itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
u32 irq_status_clearlsw)
{
aq_hw_write_reg(aq_hw, itr_iscrlsw_adr, irq_status_clearlsw);
aq_hw_write_reg(aq_hw, HW_ATL_ITR_ISCRLSW_ADR, irq_status_clearlsw);
}
u32 itr_irq_statuslsw_get(struct aq_hw_s *aq_hw)
u32 hw_atl_itr_irq_statuslsw_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, itr_isrlsw_adr);
return aq_hw_read_reg(aq_hw, HW_ATL_ITR_ISRLSW_ADR);
}
u32 itr_res_irq_get(struct aq_hw_s *aq_hw)
u32 hw_atl_itr_res_irq_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg_bit(aq_hw, itr_res_adr, itr_res_msk,
itr_res_shift);
return aq_hw_read_reg_bit(aq_hw, HW_ATL_ITR_RES_ADR, HW_ATL_ITR_RES_MSK,
HW_ATL_ITR_RES_SHIFT);
}
void itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq)
void hw_atl_itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq)
{
aq_hw_write_reg_bit(aq_hw, itr_res_adr, itr_res_msk,
itr_res_shift, res_irq);
aq_hw_write_reg_bit(aq_hw, HW_ATL_ITR_RES_ADR, HW_ATL_ITR_RES_MSK,
HW_ATL_ITR_RES_SHIFT, res_irq);
}
/* rdm */
void rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
void hw_atl_rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
{
aq_hw_write_reg_bit(aq_hw, rdm_dcadcpuid_adr(dca),
rdm_dcadcpuid_msk,
rdm_dcadcpuid_shift, cpuid);
aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADCPUID_ADR(dca),
HW_ATL_RDM_DCADCPUID_MSK,
HW_ATL_RDM_DCADCPUID_SHIFT, cpuid);
}
void rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en)
void hw_atl_rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en)
{
aq_hw_write_reg_bit(aq_hw, rdm_dca_en_adr, rdm_dca_en_msk,
rdm_dca_en_shift, rx_dca_en);
aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCA_EN_ADR, HW_ATL_RDM_DCA_EN_MSK,
HW_ATL_RDM_DCA_EN_SHIFT, rx_dca_en);
}
void rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode)
void hw_atl_rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode)
{
aq_hw_write_reg_bit(aq_hw, rdm_dca_mode_adr, rdm_dca_mode_msk,
rdm_dca_mode_shift, rx_dca_mode);
aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCA_MODE_ADR,
HW_ATL_RDM_DCA_MODE_MSK,
HW_ATL_RDM_DCA_MODE_SHIFT, rx_dca_mode);
}
void rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
u32 rx_desc_data_buff_size, u32 descriptor)
void hw_atl_rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
u32 rx_desc_data_buff_size,
u32 descriptor)
{
aq_hw_write_reg_bit(aq_hw, rdm_descddata_size_adr(descriptor),
rdm_descddata_size_msk,
rdm_descddata_size_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDDATA_SIZE_ADR(descriptor),
HW_ATL_RDM_DESCDDATA_SIZE_MSK,
HW_ATL_RDM_DESCDDATA_SIZE_SHIFT,
rx_desc_data_buff_size);
}
void rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en, u32 dca)
void hw_atl_rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en,
u32 dca)
{
aq_hw_write_reg_bit(aq_hw, rdm_dcaddesc_en_adr(dca),
rdm_dcaddesc_en_msk,
rdm_dcaddesc_en_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADDESC_EN_ADR(dca),
HW_ATL_RDM_DCADDESC_EN_MSK,
HW_ATL_RDM_DCADDESC_EN_SHIFT,
rx_desc_dca_en);
}
void rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en, u32 descriptor)
void hw_atl_rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en,
u32 descriptor)
{
aq_hw_write_reg_bit(aq_hw, rdm_descden_adr(descriptor),
rdm_descden_msk,
rdm_descden_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDEN_ADR(descriptor),
HW_ATL_RDM_DESCDEN_MSK,
HW_ATL_RDM_DESCDEN_SHIFT,
rx_desc_en);
}
void rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
u32 rx_desc_head_buff_size, u32 descriptor)
void hw_atl_rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
u32 rx_desc_head_buff_size,
u32 descriptor)
{
aq_hw_write_reg_bit(aq_hw, rdm_descdhdr_size_adr(descriptor),
rdm_descdhdr_size_msk,
rdm_descdhdr_size_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDHDR_SIZE_ADR(descriptor),
HW_ATL_RDM_DESCDHDR_SIZE_MSK,
HW_ATL_RDM_DESCDHDR_SIZE_SHIFT,
rx_desc_head_buff_size);
}
void rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
u32 rx_desc_head_splitting, u32 descriptor)
void hw_atl_rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
u32 rx_desc_head_splitting,
u32 descriptor)
{
aq_hw_write_reg_bit(aq_hw, rdm_descdhdr_split_adr(descriptor),
rdm_descdhdr_split_msk,
rdm_descdhdr_split_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDHDR_SPLIT_ADR(descriptor),
HW_ATL_RDM_DESCDHDR_SPLIT_MSK,
HW_ATL_RDM_DESCDHDR_SPLIT_SHIFT,
rx_desc_head_splitting);
}
u32 rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
u32 hw_atl_rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
{
return aq_hw_read_reg_bit(aq_hw, rdm_descdhd_adr(descriptor),
rdm_descdhd_msk, rdm_descdhd_shift);
return aq_hw_read_reg_bit(aq_hw, HW_ATL_RDM_DESCDHD_ADR(descriptor),
HW_ATL_RDM_DESCDHD_MSK,
HW_ATL_RDM_DESCDHD_SHIFT);
}
void rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len, u32 descriptor)
void hw_atl_rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len,
u32 descriptor)
{
aq_hw_write_reg_bit(aq_hw, rdm_descdlen_adr(descriptor),
rdm_descdlen_msk, rdm_descdlen_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDLEN_ADR(descriptor),
HW_ATL_RDM_DESCDLEN_MSK, HW_ATL_RDM_DESCDLEN_SHIFT,
rx_desc_len);
}
void rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res, u32 descriptor)
void hw_atl_rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res,
u32 descriptor)
{
aq_hw_write_reg_bit(aq_hw, rdm_descdreset_adr(descriptor),
rdm_descdreset_msk, rdm_descdreset_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDRESET_ADR(descriptor),
HW_ATL_RDM_DESCDRESET_MSK,
HW_ATL_RDM_DESCDRESET_SHIFT,
rx_desc_res);
}
void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
void hw_atl_rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
u32 rx_desc_wr_wb_irq_en)
{
aq_hw_write_reg_bit(aq_hw, rdm_int_desc_wrb_en_adr,
rdm_int_desc_wrb_en_msk,
rdm_int_desc_wrb_en_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_INT_DESC_WRB_EN_ADR,
HW_ATL_RDM_INT_DESC_WRB_EN_MSK,
HW_ATL_RDM_INT_DESC_WRB_EN_SHIFT,
rx_desc_wr_wb_irq_en);
}
void rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en, u32 dca)
void hw_atl_rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en,
u32 dca)
{
aq_hw_write_reg_bit(aq_hw, rdm_dcadhdr_en_adr(dca),
rdm_dcadhdr_en_msk,
rdm_dcadhdr_en_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADHDR_EN_ADR(dca),
HW_ATL_RDM_DCADHDR_EN_MSK,
HW_ATL_RDM_DCADHDR_EN_SHIFT,
rx_head_dca_en);
}
void rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, u32 dca)
void hw_atl_rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en,
u32 dca)
{
aq_hw_write_reg_bit(aq_hw, rdm_dcadpay_en_adr(dca),
rdm_dcadpay_en_msk, rdm_dcadpay_en_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADPAY_EN_ADR(dca),
HW_ATL_RDM_DCADPAY_EN_MSK,
HW_ATL_RDM_DCADPAY_EN_SHIFT,
rx_pld_dca_en);
}
void rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, u32 rdm_intr_moder_en)
void hw_atl_rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
u32 rdm_intr_moder_en)
{
aq_hw_write_reg_bit(aq_hw, rdm_int_rim_en_adr,
rdm_int_rim_en_msk,
rdm_int_rim_en_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_INT_RIM_EN_ADR,
HW_ATL_RDM_INT_RIM_EN_MSK,
HW_ATL_RDM_INT_RIM_EN_SHIFT,
rdm_intr_moder_en);
}
/* reg */
void reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, u32 regidx)
void hw_atl_reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map,
u32 regidx)
{
aq_hw_write_reg(aq_hw, gen_intr_map_adr(regidx), gen_intr_map);
aq_hw_write_reg(aq_hw, HW_ATL_GEN_INTR_MAP_ADR(regidx), gen_intr_map);
}
u32 reg_gen_irq_status_get(struct aq_hw_s *aq_hw)
u32 hw_atl_reg_gen_irq_status_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, gen_intr_stat_adr);
return aq_hw_read_reg(aq_hw, HW_ATL_GEN_INTR_STAT_ADR);
}
void reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl)
void hw_atl_reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl)
{
aq_hw_write_reg(aq_hw, intr_glb_ctl_adr, intr_glb_ctl);
aq_hw_write_reg(aq_hw, HW_ATL_INTR_GLB_CTL_ADR, intr_glb_ctl);
}
void reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle)
void hw_atl_reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle)
{
aq_hw_write_reg(aq_hw, intr_thr_adr(throttle), intr_thr);
aq_hw_write_reg(aq_hw, HW_ATL_INTR_THR_ADR(throttle), intr_thr);
}
void reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
void hw_atl_reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
u32 rx_dma_desc_base_addrlsw,
u32 descriptor)
{
aq_hw_write_reg(aq_hw, rx_dma_desc_base_addrlsw_adr(descriptor),
aq_hw_write_reg(aq_hw, HW_ATL_RX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor),
rx_dma_desc_base_addrlsw);
}
void reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
void hw_atl_reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
u32 rx_dma_desc_base_addrmsw,
u32 descriptor)
{
aq_hw_write_reg(aq_hw, rx_dma_desc_base_addrmsw_adr(descriptor),
aq_hw_write_reg(aq_hw, HW_ATL_RX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor),
rx_dma_desc_base_addrmsw);
}
u32 reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor)
u32 hw_atl_reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor)
{
return aq_hw_read_reg(aq_hw, rx_dma_desc_stat_adr(descriptor));
return aq_hw_read_reg(aq_hw, HW_ATL_RX_DMA_DESC_STAT_ADR(descriptor));
}
void reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
u32 rx_dma_desc_tail_ptr, u32 descriptor)
void hw_atl_reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
u32 rx_dma_desc_tail_ptr,
u32 descriptor)
{
aq_hw_write_reg(aq_hw, rx_dma_desc_tail_ptr_adr(descriptor),
aq_hw_write_reg(aq_hw, HW_ATL_RX_DMA_DESC_TAIL_PTR_ADR(descriptor),
rx_dma_desc_tail_ptr);
}
void reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr_msk)
void hw_atl_reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw,
u32 rx_flr_mcst_flr_msk)
{
aq_hw_write_reg(aq_hw, rx_flr_mcst_flr_msk_adr, rx_flr_mcst_flr_msk);
aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_MCST_FLR_MSK_ADR,
rx_flr_mcst_flr_msk);
}
void reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
void hw_atl_reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
u32 filter)
{
aq_hw_write_reg(aq_hw, rx_flr_mcst_flr_adr(filter), rx_flr_mcst_flr);
aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_MCST_FLR_ADR(filter),
rx_flr_mcst_flr);
}
void reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw, u32 rx_flr_rss_control1)
void hw_atl_reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw,
u32 rx_flr_rss_control1)
{
aq_hw_write_reg(aq_hw, rx_flr_rss_control1_adr, rx_flr_rss_control1);
aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_RSS_CONTROL1_ADR,
rx_flr_rss_control1);
}
void reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_filter_control2)
void hw_atl_reg_rx_flr_control2_set(struct aq_hw_s *aq_hw,
u32 rx_filter_control2)
{
aq_hw_write_reg(aq_hw, rx_flr_control2_adr, rx_filter_control2);
aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_CONTROL2_ADR, rx_filter_control2);
}
void reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
void hw_atl_reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
u32 rx_intr_moderation_ctl,
u32 queue)
{
aq_hw_write_reg(aq_hw, rx_intr_moderation_ctl_adr(queue),
aq_hw_write_reg(aq_hw, HW_ATL_RX_INTR_MODERATION_CTL_ADR(queue),
rx_intr_moderation_ctl);
}
void reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, u32 tx_dma_debug_ctl)
void hw_atl_reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw,
u32 tx_dma_debug_ctl)
{
aq_hw_write_reg(aq_hw, tx_dma_debug_ctl_adr, tx_dma_debug_ctl);
aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DEBUG_CTL_ADR, tx_dma_debug_ctl);
}
void reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
void hw_atl_reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
u32 tx_dma_desc_base_addrlsw,
u32 descriptor)
{
aq_hw_write_reg(aq_hw, tx_dma_desc_base_addrlsw_adr(descriptor),
aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor),
tx_dma_desc_base_addrlsw);
}
void reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
void hw_atl_reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
u32 tx_dma_desc_base_addrmsw,
u32 descriptor)
{
aq_hw_write_reg(aq_hw, tx_dma_desc_base_addrmsw_adr(descriptor),
aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor),
tx_dma_desc_base_addrmsw);
}
void reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
u32 tx_dma_desc_tail_ptr, u32 descriptor)
void hw_atl_reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
u32 tx_dma_desc_tail_ptr,
u32 descriptor)
{
aq_hw_write_reg(aq_hw, tx_dma_desc_tail_ptr_adr(descriptor),
aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DESC_TAIL_PTR_ADR(descriptor),
tx_dma_desc_tail_ptr);
}
void reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
void hw_atl_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
u32 tx_intr_moderation_ctl,
u32 queue)
{
aq_hw_write_reg(aq_hw, tx_intr_moderation_ctl_adr(queue),
aq_hw_write_reg(aq_hw, HW_ATL_TX_INTR_MODERATION_CTL_ADR(queue),
tx_intr_moderation_ctl);
}
/* RPB: rx packet buffer */
void rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk)
void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk)
{
aq_hw_write_reg_bit(aq_hw, rpb_dma_sys_lbk_adr,
rpb_dma_sys_lbk_msk,
rpb_dma_sys_lbk_shift, dma_sys_lbk);
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_DMA_SYS_LBK_ADR,
HW_ATL_RPB_DMA_SYS_LBK_MSK,
HW_ATL_RPB_DMA_SYS_LBK_SHIFT, dma_sys_lbk);
}
void rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
u32 rx_traf_class_mode)
{
aq_hw_write_reg_bit(aq_hw, rpb_rpf_rx_tc_mode_adr,
rpb_rpf_rx_tc_mode_msk,
rpb_rpf_rx_tc_mode_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RPF_RX_TC_MODE_ADR,
HW_ATL_RPB_RPF_RX_TC_MODE_MSK,
HW_ATL_RPB_RPF_RX_TC_MODE_SHIFT,
rx_traf_class_mode);
}
void rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en)
void hw_atl_rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en)
{
aq_hw_write_reg_bit(aq_hw, rpb_rx_buf_en_adr, rpb_rx_buf_en_msk,
rpb_rx_buf_en_shift, rx_buff_en);
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RX_BUF_EN_ADR,
HW_ATL_RPB_RX_BUF_EN_MSK,
HW_ATL_RPB_RX_BUF_EN_SHIFT, rx_buff_en);
}
void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
void hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
u32 rx_buff_hi_threshold_per_tc,
u32 buffer)
{
aq_hw_write_reg_bit(aq_hw, rpb_rxbhi_thresh_adr(buffer),
rpb_rxbhi_thresh_msk, rpb_rxbhi_thresh_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBHI_THRESH_ADR(buffer),
HW_ATL_RPB_RXBHI_THRESH_MSK,
HW_ATL_RPB_RXBHI_THRESH_SHIFT,
rx_buff_hi_threshold_per_tc);
}
void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
void hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
u32 rx_buff_lo_threshold_per_tc,
u32 buffer)
{
aq_hw_write_reg_bit(aq_hw, rpb_rxblo_thresh_adr(buffer),
rpb_rxblo_thresh_msk,
rpb_rxblo_thresh_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBLO_THRESH_ADR(buffer),
HW_ATL_RPB_RXBLO_THRESH_MSK,
HW_ATL_RPB_RXBLO_THRESH_SHIFT,
rx_buff_lo_threshold_per_tc);
}
void rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode)
void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode)
{
aq_hw_write_reg_bit(aq_hw, rpb_rx_fc_mode_adr,
rpb_rx_fc_mode_msk,
rpb_rx_fc_mode_shift, rx_flow_ctl_mode);
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RX_FC_MODE_ADR,
HW_ATL_RPB_RX_FC_MODE_MSK,
HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode);
}
void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
u32 rx_pkt_buff_size_per_tc, u32 buffer)
{
aq_hw_write_reg_bit(aq_hw, rpb_rxbbuf_size_adr(buffer),
rpb_rxbbuf_size_msk, rpb_rxbbuf_size_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBBUF_SIZE_ADR(buffer),
HW_ATL_RPB_RXBBUF_SIZE_MSK,
HW_ATL_RPB_RXBBUF_SIZE_SHIFT,
rx_pkt_buff_size_per_tc);
}
void rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
u32 buffer)
{
aq_hw_write_reg_bit(aq_hw, rpb_rxbxoff_en_adr(buffer),
rpb_rxbxoff_en_msk, rpb_rxbxoff_en_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBXOFF_EN_ADR(buffer),
HW_ATL_RPB_RXBXOFF_EN_MSK,
HW_ATL_RPB_RXBXOFF_EN_SHIFT,
rx_xoff_en_per_tc);
}
/* rpf */
void rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
void hw_atl_rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
u32 l2broadcast_count_threshold)
{
aq_hw_write_reg_bit(aq_hw, rpfl2bc_thresh_adr,
rpfl2bc_thresh_msk,
rpfl2bc_thresh_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2BC_THRESH_ADR,
HW_ATL_RPFL2BC_THRESH_MSK,
HW_ATL_RPFL2BC_THRESH_SHIFT,
l2broadcast_count_threshold);
}
void rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en)
void hw_atl_rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en)
{
aq_hw_write_reg_bit(aq_hw, rpfl2bc_en_adr, rpfl2bc_en_msk,
rpfl2bc_en_shift, l2broadcast_en);
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2BC_EN_ADR, HW_ATL_RPFL2BC_EN_MSK,
HW_ATL_RPFL2BC_EN_SHIFT, l2broadcast_en);
}
void rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2broadcast_flr_act)
void hw_atl_rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw,
u32 l2broadcast_flr_act)
{
aq_hw_write_reg_bit(aq_hw, rpfl2bc_act_adr, rpfl2bc_act_msk,
rpfl2bc_act_shift, l2broadcast_flr_act);
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2BC_ACT_ADR,
HW_ATL_RPFL2BC_ACT_MSK,
HW_ATL_RPFL2BC_ACT_SHIFT, l2broadcast_flr_act);
}
void rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, u32 l2multicast_flr_en,
void hw_atl_rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw,
u32 l2multicast_flr_en,
u32 filter)
{
aq_hw_write_reg_bit(aq_hw, rpfl2mc_enf_adr(filter),
rpfl2mc_enf_msk,
rpfl2mc_enf_shift, l2multicast_flr_en);
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2MC_ENF_ADR(filter),
HW_ATL_RPFL2MC_ENF_MSK,
HW_ATL_RPFL2MC_ENF_SHIFT, l2multicast_flr_en);
}
void rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
void hw_atl_rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
u32 l2promiscuous_mode_en)
{
aq_hw_write_reg_bit(aq_hw, rpfl2promis_mode_adr,
rpfl2promis_mode_msk,
rpfl2promis_mode_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2PROMIS_MODE_ADR,
HW_ATL_RPFL2PROMIS_MODE_MSK,
HW_ATL_RPFL2PROMIS_MODE_SHIFT,
l2promiscuous_mode_en);
}
void rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_act,
void hw_atl_rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw,
u32 l2unicast_flr_act,
u32 filter)
{
aq_hw_write_reg_bit(aq_hw, rpfl2uc_actf_adr(filter),
rpfl2uc_actf_msk, rpfl2uc_actf_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2UC_ACTF_ADR(filter),
HW_ATL_RPFL2UC_ACTF_MSK, HW_ATL_RPFL2UC_ACTF_SHIFT,
l2unicast_flr_act);
}
void rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
void hw_atl_rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
u32 filter)
{
aq_hw_write_reg_bit(aq_hw, rpfl2uc_enf_adr(filter),
rpfl2uc_enf_msk,
rpfl2uc_enf_shift, l2unicast_flr_en);
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2UC_ENF_ADR(filter),
HW_ATL_RPFL2UC_ENF_MSK,
HW_ATL_RPFL2UC_ENF_SHIFT, l2unicast_flr_en);
}
void rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
void hw_atl_rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
u32 l2unicast_dest_addresslsw,
u32 filter)
{
aq_hw_write_reg(aq_hw, rpfl2uc_daflsw_adr(filter),
aq_hw_write_reg(aq_hw, HW_ATL_RPFL2UC_DAFLSW_ADR(filter),
l2unicast_dest_addresslsw);
}
void rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
void hw_atl_rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
u32 l2unicast_dest_addressmsw,
u32 filter)
{
aq_hw_write_reg_bit(aq_hw, rpfl2uc_dafmsw_adr(filter),
rpfl2uc_dafmsw_msk, rpfl2uc_dafmsw_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2UC_DAFMSW_ADR(filter),
HW_ATL_RPFL2UC_DAFMSW_MSK,
HW_ATL_RPFL2UC_DAFMSW_SHIFT,
l2unicast_dest_addressmsw);
}
void rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
void hw_atl_rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
u32 l2_accept_all_mc_packets)
{
aq_hw_write_reg_bit(aq_hw, rpfl2mc_accept_all_adr,
rpfl2mc_accept_all_msk,
rpfl2mc_accept_all_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2MC_ACCEPT_ALL_ADR,
HW_ATL_RPFL2MC_ACCEPT_ALL_MSK,
HW_ATL_RPFL2MC_ACCEPT_ALL_SHIFT,
l2_accept_all_mc_packets);
}
void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
void hw_atl_rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
u32 user_priority_tc_map, u32 tc)
{
/* register address for bitfield rx_tc_up{t}[2:0] */
static u32 rpf_rpb_rx_tc_upt_adr[8] = {
0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U,
0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U
0x000054c4U, 0x000054C4U, 0x000054C4U, 0x000054C4U,
0x000054c4U, 0x000054C4U, 0x000054C4U, 0x000054C4U
};
/* bitmask for bitfield rx_tc_up{t}[2:0] */
......@@ -711,271 +750,288 @@ void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
user_priority_tc_map);
}
void rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr)
void hw_atl_rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr)
{
aq_hw_write_reg_bit(aq_hw, rpf_rss_key_addr_adr,
rpf_rss_key_addr_msk,
rpf_rss_key_addr_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_KEY_ADDR_ADR,
HW_ATL_RPF_RSS_KEY_ADDR_MSK,
HW_ATL_RPF_RSS_KEY_ADDR_SHIFT,
rss_key_addr);
}
void rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data)
void hw_atl_rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data)
{
aq_hw_write_reg(aq_hw, rpf_rss_key_wr_data_adr,
aq_hw_write_reg(aq_hw, HW_ATL_RPF_RSS_KEY_WR_DATA_ADR,
rss_key_wr_data);
}
u32 rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw)
u32 hw_atl_rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg_bit(aq_hw, rpf_rss_key_wr_eni_adr,
rpf_rss_key_wr_eni_msk,
rpf_rss_key_wr_eni_shift);
return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPF_RSS_KEY_WR_ENI_ADR,
HW_ATL_RPF_RSS_KEY_WR_ENI_MSK,
HW_ATL_RPF_RSS_KEY_WR_ENI_SHIFT);
}
void rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en)
void hw_atl_rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en)
{
aq_hw_write_reg_bit(aq_hw, rpf_rss_key_wr_eni_adr,
rpf_rss_key_wr_eni_msk,
rpf_rss_key_wr_eni_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_KEY_WR_ENI_ADR,
HW_ATL_RPF_RSS_KEY_WR_ENI_MSK,
HW_ATL_RPF_RSS_KEY_WR_ENI_SHIFT,
rss_key_wr_en);
}
void rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw, u32 rss_redir_tbl_addr)
void hw_atl_rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw,
u32 rss_redir_tbl_addr)
{
aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_addr_adr,
rpf_rss_redir_addr_msk,
rpf_rss_redir_addr_shift, rss_redir_tbl_addr);
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_ADDR_ADR,
HW_ATL_RPF_RSS_REDIR_ADDR_MSK,
HW_ATL_RPF_RSS_REDIR_ADDR_SHIFT,
rss_redir_tbl_addr);
}
void rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
void hw_atl_rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
u32 rss_redir_tbl_wr_data)
{
aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_wr_data_adr,
rpf_rss_redir_wr_data_msk,
rpf_rss_redir_wr_data_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_WR_DATA_ADR,
HW_ATL_RPF_RSS_REDIR_WR_DATA_MSK,
HW_ATL_RPF_RSS_REDIR_WR_DATA_SHIFT,
rss_redir_tbl_wr_data);
}
u32 rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw)
u32 hw_atl_rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg_bit(aq_hw, rpf_rss_redir_wr_eni_adr,
rpf_rss_redir_wr_eni_msk,
rpf_rss_redir_wr_eni_shift);
return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_WR_ENI_ADR,
HW_ATL_RPF_RSS_REDIR_WR_ENI_MSK,
HW_ATL_RPF_RSS_REDIR_WR_ENI_SHIFT);
}
void rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en)
void hw_atl_rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en)
{
aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_wr_eni_adr,
rpf_rss_redir_wr_eni_msk,
rpf_rss_redir_wr_eni_shift, rss_redir_wr_en);
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_WR_ENI_ADR,
HW_ATL_RPF_RSS_REDIR_WR_ENI_MSK,
HW_ATL_RPF_RSS_REDIR_WR_ENI_SHIFT, rss_redir_wr_en);
}
void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw, u32 tpo_to_rpf_sys_lbk)
void hw_atl_rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw,
u32 tpo_to_rpf_sys_lbk)
{
aq_hw_write_reg_bit(aq_hw, rpf_tpo_rpf_sys_lbk_adr,
rpf_tpo_rpf_sys_lbk_msk,
rpf_tpo_rpf_sys_lbk_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_TPO_RPF_SYS_LBK_ADR,
HW_ATL_RPF_TPO_RPF_SYS_LBK_MSK,
HW_ATL_RPF_TPO_RPF_SYS_LBK_SHIFT,
tpo_to_rpf_sys_lbk);
}
void rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht)
void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht)
{
aq_hw_write_reg_bit(aq_hw, rpf_vl_inner_tpid_adr,
rpf_vl_inner_tpid_msk,
rpf_vl_inner_tpid_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_INNER_TPID_ADR,
HW_ATL_RPF_VL_INNER_TPID_MSK,
HW_ATL_RPF_VL_INNER_TPID_SHIFT,
vlan_inner_etht);
}
void rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht)
void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht)
{
aq_hw_write_reg_bit(aq_hw, rpf_vl_outer_tpid_adr,
rpf_vl_outer_tpid_msk,
rpf_vl_outer_tpid_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_OUTER_TPID_ADR,
HW_ATL_RPF_VL_OUTER_TPID_MSK,
HW_ATL_RPF_VL_OUTER_TPID_SHIFT,
vlan_outer_etht);
}
void rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, u32 vlan_prom_mode_en)
void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw,
u32 vlan_prom_mode_en)
{
aq_hw_write_reg_bit(aq_hw, rpf_vl_promis_mode_adr,
rpf_vl_promis_mode_msk,
rpf_vl_promis_mode_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_PROMIS_MODE_ADR,
HW_ATL_RPF_VL_PROMIS_MODE_MSK,
HW_ATL_RPF_VL_PROMIS_MODE_SHIFT,
vlan_prom_mode_en);
}
void rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
u32 vlan_accept_untagged_packets)
void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
u32 vlan_acc_untagged_packets)
{
aq_hw_write_reg_bit(aq_hw, rpf_vl_accept_untagged_mode_adr,
rpf_vl_accept_untagged_mode_msk,
rpf_vl_accept_untagged_mode_shift,
vlan_accept_untagged_packets);
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_ADR,
HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSK,
HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_SHIFT,
vlan_acc_untagged_packets);
}
void rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, u32 vlan_untagged_act)
void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw,
u32 vlan_untagged_act)
{
aq_hw_write_reg_bit(aq_hw, rpf_vl_untagged_act_adr,
rpf_vl_untagged_act_msk,
rpf_vl_untagged_act_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_UNTAGGED_ACT_ADR,
HW_ATL_RPF_VL_UNTAGGED_ACT_MSK,
HW_ATL_RPF_VL_UNTAGGED_ACT_SHIFT,
vlan_untagged_act);
}
void rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, u32 filter)
void hw_atl_rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en,
u32 filter)
{
aq_hw_write_reg_bit(aq_hw, rpf_vl_en_f_adr(filter),
rpf_vl_en_f_msk,
rpf_vl_en_f_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_EN_F_ADR(filter),
HW_ATL_RPF_VL_EN_F_MSK,
HW_ATL_RPF_VL_EN_F_SHIFT,
vlan_flr_en);
}
void rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_flr_act, u32 filter)
void hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_flr_act,
u32 filter)
{
aq_hw_write_reg_bit(aq_hw, rpf_vl_act_f_adr(filter),
rpf_vl_act_f_msk,
rpf_vl_act_f_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ACT_F_ADR(filter),
HW_ATL_RPF_VL_ACT_F_MSK,
HW_ATL_RPF_VL_ACT_F_SHIFT,
vlan_flr_act);
}
void rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, u32 filter)
void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
u32 filter)
{
aq_hw_write_reg_bit(aq_hw, rpf_vl_id_f_adr(filter),
rpf_vl_id_f_msk,
rpf_vl_id_f_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ID_F_ADR(filter),
HW_ATL_RPF_VL_ID_F_MSK,
HW_ATL_RPF_VL_ID_F_SHIFT,
vlan_id_flr);
}
void rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, u32 filter)
void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
u32 filter)
{
aq_hw_write_reg_bit(aq_hw, rpf_et_enf_adr(filter),
rpf_et_enf_msk,
rpf_et_enf_shift, etht_flr_en);
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_ENF_ADR(filter),
HW_ATL_RPF_ET_ENF_MSK,
HW_ATL_RPF_ET_ENF_SHIFT, etht_flr_en);
}
void rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
u32 etht_user_priority_en, u32 filter)
{
aq_hw_write_reg_bit(aq_hw, rpf_et_upfen_adr(filter),
rpf_et_upfen_msk, rpf_et_upfen_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_UPFEN_ADR(filter),
HW_ATL_RPF_ET_UPFEN_MSK, HW_ATL_RPF_ET_UPFEN_SHIFT,
etht_user_priority_en);
}
void rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue_en,
void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw,
u32 etht_rx_queue_en,
u32 filter)
{
aq_hw_write_reg_bit(aq_hw, rpf_et_rxqfen_adr(filter),
rpf_et_rxqfen_msk, rpf_et_rxqfen_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_RXQFEN_ADR(filter),
HW_ATL_RPF_ET_RXQFEN_MSK,
HW_ATL_RPF_ET_RXQFEN_SHIFT,
etht_rx_queue_en);
}
void rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, u32 etht_user_priority,
void hw_atl_rpf_etht_user_priority_set(struct aq_hw_s *aq_hw,
u32 etht_user_priority,
u32 filter)
{
aq_hw_write_reg_bit(aq_hw, rpf_et_upf_adr(filter),
rpf_et_upf_msk,
rpf_et_upf_shift, etht_user_priority);
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_UPF_ADR(filter),
HW_ATL_RPF_ET_UPF_MSK,
HW_ATL_RPF_ET_UPF_SHIFT, etht_user_priority);
}
void rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
void hw_atl_rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
u32 filter)
{
aq_hw_write_reg_bit(aq_hw, rpf_et_rxqf_adr(filter),
rpf_et_rxqf_msk,
rpf_et_rxqf_shift, etht_rx_queue);
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_RXQF_ADR(filter),
HW_ATL_RPF_ET_RXQF_MSK,
HW_ATL_RPF_ET_RXQF_SHIFT, etht_rx_queue);
}
void rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
u32 filter)
{
aq_hw_write_reg_bit(aq_hw, rpf_et_mng_rxqf_adr(filter),
rpf_et_mng_rxqf_msk, rpf_et_mng_rxqf_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_MNG_RXQF_ADR(filter),
HW_ATL_RPF_ET_MNG_RXQF_MSK,
HW_ATL_RPF_ET_MNG_RXQF_SHIFT,
etht_mgt_queue);
}
void rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act, u32 filter)
void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
u32 filter)
{
aq_hw_write_reg_bit(aq_hw, rpf_et_actf_adr(filter),
rpf_et_actf_msk,
rpf_et_actf_shift, etht_flr_act);
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_ACTF_ADR(filter),
HW_ATL_RPF_ET_ACTF_MSK,
HW_ATL_RPF_ET_ACTF_SHIFT, etht_flr_act);
}
void rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter)
void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter)
{
aq_hw_write_reg_bit(aq_hw, rpf_et_valf_adr(filter),
rpf_et_valf_msk,
rpf_et_valf_shift, etht_flr);
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_VALF_ADR(filter),
HW_ATL_RPF_ET_VALF_MSK,
HW_ATL_RPF_ET_VALF_SHIFT, etht_flr);
}
/* RPO: rx packet offload */
void rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
void hw_atl_rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
u32 ipv4header_crc_offload_en)
{
aq_hw_write_reg_bit(aq_hw, rpo_ipv4chk_en_adr,
rpo_ipv4chk_en_msk,
rpo_ipv4chk_en_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_IPV4CHK_EN_ADR,
HW_ATL_RPO_IPV4CHK_EN_MSK,
HW_ATL_RPO_IPV4CHK_EN_SHIFT,
ipv4header_crc_offload_en);
}
void rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
u32 rx_desc_vlan_stripping, u32 descriptor)
void hw_atl_rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
u32 rx_desc_vlan_stripping,
u32 descriptor)
{
aq_hw_write_reg_bit(aq_hw, rpo_descdvl_strip_adr(descriptor),
rpo_descdvl_strip_msk,
rpo_descdvl_strip_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_DESCDVL_STRIP_ADR(descriptor),
HW_ATL_RPO_DESCDVL_STRIP_MSK,
HW_ATL_RPO_DESCDVL_STRIP_SHIFT,
rx_desc_vlan_stripping);
}
void rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
void hw_atl_rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
u32 tcp_udp_crc_offload_en)
{
aq_hw_write_reg_bit(aq_hw, rpol4chk_en_adr, rpol4chk_en_msk,
rpol4chk_en_shift, tcp_udp_crc_offload_en);
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPOL4CHK_EN_ADR,
HW_ATL_RPOL4CHK_EN_MSK,
HW_ATL_RPOL4CHK_EN_SHIFT, tcp_udp_crc_offload_en);
}
void rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en)
void hw_atl_rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en)
{
aq_hw_write_reg(aq_hw, rpo_lro_en_adr, lro_en);
aq_hw_write_reg(aq_hw, HW_ATL_RPO_LRO_EN_ADR, lro_en);
}
void rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
void hw_atl_rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
u32 lro_patch_optimization_en)
{
aq_hw_write_reg_bit(aq_hw, rpo_lro_ptopt_en_adr,
rpo_lro_ptopt_en_msk,
rpo_lro_ptopt_en_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_PTOPT_EN_ADR,
HW_ATL_RPO_LRO_PTOPT_EN_MSK,
HW_ATL_RPO_LRO_PTOPT_EN_SHIFT,
lro_patch_optimization_en);
}
void rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw,
void hw_atl_rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw,
u32 lro_qsessions_lim)
{
aq_hw_write_reg_bit(aq_hw, rpo_lro_qses_lmt_adr,
rpo_lro_qses_lmt_msk,
rpo_lro_qses_lmt_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_QSES_LMT_ADR,
HW_ATL_RPO_LRO_QSES_LMT_MSK,
HW_ATL_RPO_LRO_QSES_LMT_SHIFT,
lro_qsessions_lim);
}
void rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, u32 lro_total_desc_lim)
void hw_atl_rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw,
u32 lro_total_desc_lim)
{
aq_hw_write_reg_bit(aq_hw, rpo_lro_tot_dsc_lmt_adr,
rpo_lro_tot_dsc_lmt_msk,
rpo_lro_tot_dsc_lmt_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_TOT_DSC_LMT_ADR,
HW_ATL_RPO_LRO_TOT_DSC_LMT_MSK,
HW_ATL_RPO_LRO_TOT_DSC_LMT_SHIFT,
lro_total_desc_lim);
}
void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
void hw_atl_rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
u32 lro_min_pld_of_first_pkt)
{
aq_hw_write_reg_bit(aq_hw, rpo_lro_pkt_min_adr,
rpo_lro_pkt_min_msk,
rpo_lro_pkt_min_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_PKT_MIN_ADR,
HW_ATL_RPO_LRO_PKT_MIN_MSK,
HW_ATL_RPO_LRO_PKT_MIN_SHIFT,
lro_min_pld_of_first_pkt);
}
void rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_pkt_lim)
void hw_atl_rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_pkt_lim)
{
aq_hw_write_reg(aq_hw, rpo_lro_rsc_max_adr, lro_pkt_lim);
aq_hw_write_reg(aq_hw, HW_ATL_RPO_LRO_RSC_MAX_ADR, lro_pkt_lim);
}
void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
void hw_atl_rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
u32 lro_max_number_of_descriptors,
u32 lro)
{
......@@ -1017,378 +1073,390 @@ void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
lro_max_number_of_descriptors);
}
void rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
void hw_atl_rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
u32 lro_time_base_divider)
{
aq_hw_write_reg_bit(aq_hw, rpo_lro_tb_div_adr,
rpo_lro_tb_div_msk,
rpo_lro_tb_div_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_TB_DIV_ADR,
HW_ATL_RPO_LRO_TB_DIV_MSK,
HW_ATL_RPO_LRO_TB_DIV_SHIFT,
lro_time_base_divider);
}
void rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
void hw_atl_rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
u32 lro_inactive_interval)
{
aq_hw_write_reg_bit(aq_hw, rpo_lro_ina_ival_adr,
rpo_lro_ina_ival_msk,
rpo_lro_ina_ival_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_INA_IVAL_ADR,
HW_ATL_RPO_LRO_INA_IVAL_MSK,
HW_ATL_RPO_LRO_INA_IVAL_SHIFT,
lro_inactive_interval);
}
void rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
u32 lro_max_coalescing_interval)
void hw_atl_rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
u32 lro_max_coal_interval)
{
aq_hw_write_reg_bit(aq_hw, rpo_lro_max_ival_adr,
rpo_lro_max_ival_msk,
rpo_lro_max_ival_shift,
lro_max_coalescing_interval);
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_MAX_IVAL_ADR,
HW_ATL_RPO_LRO_MAX_IVAL_MSK,
HW_ATL_RPO_LRO_MAX_IVAL_SHIFT,
lro_max_coal_interval);
}
/* rx */
void rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis)
void hw_atl_rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis)
{
aq_hw_write_reg_bit(aq_hw, rx_reg_res_dsbl_adr,
rx_reg_res_dsbl_msk,
rx_reg_res_dsbl_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_RX_REG_RES_DSBL_ADR,
HW_ATL_RX_REG_RES_DSBL_MSK,
HW_ATL_RX_REG_RES_DSBL_SHIFT,
rx_reg_res_dis);
}
/* tdm */
void tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
void hw_atl_tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
{
aq_hw_write_reg_bit(aq_hw, tdm_dcadcpuid_adr(dca),
tdm_dcadcpuid_msk,
tdm_dcadcpuid_shift, cpuid);
aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCADCPUID_ADR(dca),
HW_ATL_TDM_DCADCPUID_MSK,
HW_ATL_TDM_DCADCPUID_SHIFT, cpuid);
}
void tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
void hw_atl_tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
u32 large_send_offload_en)
{
aq_hw_write_reg(aq_hw, tdm_lso_en_adr, large_send_offload_en);
aq_hw_write_reg(aq_hw, HW_ATL_TDM_LSO_EN_ADR, large_send_offload_en);
}
void tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en)
void hw_atl_tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en)
{
aq_hw_write_reg_bit(aq_hw, tdm_dca_en_adr, tdm_dca_en_msk,
tdm_dca_en_shift, tx_dca_en);
aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCA_EN_ADR, HW_ATL_TDM_DCA_EN_MSK,
HW_ATL_TDM_DCA_EN_SHIFT, tx_dca_en);
}
void tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode)
void hw_atl_tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode)
{
aq_hw_write_reg_bit(aq_hw, tdm_dca_mode_adr, tdm_dca_mode_msk,
tdm_dca_mode_shift, tx_dca_mode);
aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCA_MODE_ADR,
HW_ATL_TDM_DCA_MODE_MSK,
HW_ATL_TDM_DCA_MODE_SHIFT, tx_dca_mode);
}
void tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, u32 dca)
void hw_atl_tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en,
u32 dca)
{
aq_hw_write_reg_bit(aq_hw, tdm_dcaddesc_en_adr(dca),
tdm_dcaddesc_en_msk, tdm_dcaddesc_en_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCADDESC_EN_ADR(dca),
HW_ATL_TDM_DCADDESC_EN_MSK,
HW_ATL_TDM_DCADDESC_EN_SHIFT,
tx_desc_dca_en);
}
void tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, u32 descriptor)
void hw_atl_tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en,
u32 descriptor)
{
aq_hw_write_reg_bit(aq_hw, tdm_descden_adr(descriptor),
tdm_descden_msk,
tdm_descden_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DESCDEN_ADR(descriptor),
HW_ATL_TDM_DESCDEN_MSK,
HW_ATL_TDM_DESCDEN_SHIFT,
tx_desc_en);
}
u32 tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
u32 hw_atl_tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
{
return aq_hw_read_reg_bit(aq_hw, tdm_descdhd_adr(descriptor),
tdm_descdhd_msk, tdm_descdhd_shift);
return aq_hw_read_reg_bit(aq_hw, HW_ATL_TDM_DESCDHD_ADR(descriptor),
HW_ATL_TDM_DESCDHD_MSK,
HW_ATL_TDM_DESCDHD_SHIFT);
}
void tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
void hw_atl_tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
u32 descriptor)
{
aq_hw_write_reg_bit(aq_hw, tdm_descdlen_adr(descriptor),
tdm_descdlen_msk,
tdm_descdlen_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DESCDLEN_ADR(descriptor),
HW_ATL_TDM_DESCDLEN_MSK,
HW_ATL_TDM_DESCDLEN_SHIFT,
tx_desc_len);
}
void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
void hw_atl_tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
u32 tx_desc_wr_wb_irq_en)
{
aq_hw_write_reg_bit(aq_hw, tdm_int_desc_wrb_en_adr,
tdm_int_desc_wrb_en_msk,
tdm_int_desc_wrb_en_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_INT_DESC_WRB_EN_ADR,
HW_ATL_TDM_INT_DESC_WRB_EN_MSK,
HW_ATL_TDM_INT_DESC_WRB_EN_SHIFT,
tx_desc_wr_wb_irq_en);
}
void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
void hw_atl_tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
u32 tx_desc_wr_wb_threshold,
u32 descriptor)
{
aq_hw_write_reg_bit(aq_hw, tdm_descdwrb_thresh_adr(descriptor),
tdm_descdwrb_thresh_msk,
tdm_descdwrb_thresh_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DESCDWRB_THRESH_ADR(descriptor),
HW_ATL_TDM_DESCDWRB_THRESH_MSK,
HW_ATL_TDM_DESCDWRB_THRESH_SHIFT,
tx_desc_wr_wb_threshold);
}
void tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
void hw_atl_tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
u32 tdm_irq_moderation_en)
{
aq_hw_write_reg_bit(aq_hw, tdm_int_mod_en_adr,
tdm_int_mod_en_msk,
tdm_int_mod_en_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_INT_MOD_EN_ADR,
HW_ATL_TDM_INT_MOD_EN_MSK,
HW_ATL_TDM_INT_MOD_EN_SHIFT,
tdm_irq_moderation_en);
}
/* thm */
void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
void hw_atl_thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
u32 lso_tcp_flag_of_first_pkt)
{
aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_first_adr,
thm_lso_tcp_flag_first_msk,
thm_lso_tcp_flag_first_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_THM_LSO_TCP_FLAG_FIRST_ADR,
HW_ATL_THM_LSO_TCP_FLAG_FIRST_MSK,
HW_ATL_THM_LSO_TCP_FLAG_FIRST_SHIFT,
lso_tcp_flag_of_first_pkt);
}
void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
void hw_atl_thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
u32 lso_tcp_flag_of_last_pkt)
{
aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_last_adr,
thm_lso_tcp_flag_last_msk,
thm_lso_tcp_flag_last_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_THM_LSO_TCP_FLAG_LAST_ADR,
HW_ATL_THM_LSO_TCP_FLAG_LAST_MSK,
HW_ATL_THM_LSO_TCP_FLAG_LAST_SHIFT,
lso_tcp_flag_of_last_pkt);
}
void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
u32 lso_tcp_flag_of_middle_pkt)
{
aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_mid_adr,
thm_lso_tcp_flag_mid_msk,
thm_lso_tcp_flag_mid_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_THM_LSO_TCP_FLAG_MID_ADR,
HW_ATL_THM_LSO_TCP_FLAG_MID_MSK,
HW_ATL_THM_LSO_TCP_FLAG_MID_SHIFT,
lso_tcp_flag_of_middle_pkt);
}
/* TPB: tx packet buffer */
void tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en)
void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en)
{
aq_hw_write_reg_bit(aq_hw, tpb_tx_buf_en_adr, tpb_tx_buf_en_msk,
tpb_tx_buf_en_shift, tx_buff_en);
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_BUF_EN_ADR,
HW_ATL_TPB_TX_BUF_EN_MSK,
HW_ATL_TPB_TX_BUF_EN_SHIFT, tx_buff_en);
}
void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
void hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
u32 tx_buff_hi_threshold_per_tc,
u32 buffer)
{
aq_hw_write_reg_bit(aq_hw, tpb_txbhi_thresh_adr(buffer),
tpb_txbhi_thresh_msk, tpb_txbhi_thresh_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBHI_THRESH_ADR(buffer),
HW_ATL_TPB_TXBHI_THRESH_MSK,
HW_ATL_TPB_TXBHI_THRESH_SHIFT,
tx_buff_hi_threshold_per_tc);
}
void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
void hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
u32 tx_buff_lo_threshold_per_tc,
u32 buffer)
{
aq_hw_write_reg_bit(aq_hw, tpb_txblo_thresh_adr(buffer),
tpb_txblo_thresh_msk, tpb_txblo_thresh_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBLO_THRESH_ADR(buffer),
HW_ATL_TPB_TXBLO_THRESH_MSK,
HW_ATL_TPB_TXBLO_THRESH_SHIFT,
tx_buff_lo_threshold_per_tc);
}
void tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en)
void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en)
{
aq_hw_write_reg_bit(aq_hw, tpb_dma_sys_lbk_adr,
tpb_dma_sys_lbk_msk,
tpb_dma_sys_lbk_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_DMA_SYS_LBK_ADR,
HW_ATL_TPB_DMA_SYS_LBK_MSK,
HW_ATL_TPB_DMA_SYS_LBK_SHIFT,
tx_dma_sys_lbk_en);
}
void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_buff_size_per_tc, u32 buffer)
{
aq_hw_write_reg_bit(aq_hw, tpb_txbbuf_size_adr(buffer),
tpb_txbbuf_size_msk,
tpb_txbbuf_size_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBBUF_SIZE_ADR(buffer),
HW_ATL_TPB_TXBBUF_SIZE_MSK,
HW_ATL_TPB_TXBBUF_SIZE_SHIFT,
tx_pkt_buff_size_per_tc);
}
void tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en)
void hw_atl_tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en)
{
aq_hw_write_reg_bit(aq_hw, tpb_tx_scp_ins_en_adr,
tpb_tx_scp_ins_en_msk,
tpb_tx_scp_ins_en_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_SCP_INS_EN_ADR,
HW_ATL_TPB_TX_SCP_INS_EN_MSK,
HW_ATL_TPB_TX_SCP_INS_EN_SHIFT,
tx_path_scp_ins_en);
}
/* TPO: tx packet offload */
void tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
void hw_atl_tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
u32 ipv4header_crc_offload_en)
{
aq_hw_write_reg_bit(aq_hw, tpo_ipv4chk_en_adr,
tpo_ipv4chk_en_msk,
tpo_ipv4chk_en_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPO_IPV4CHK_EN_ADR,
HW_ATL_TPO_IPV4CHK_EN_MSK,
HW_ATL_TPO_IPV4CHK_EN_SHIFT,
ipv4header_crc_offload_en);
}
void tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
void hw_atl_tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
u32 tcp_udp_crc_offload_en)
{
aq_hw_write_reg_bit(aq_hw, tpol4chk_en_adr,
tpol4chk_en_msk,
tpol4chk_en_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPOL4CHK_EN_ADR,
HW_ATL_TPOL4CHK_EN_MSK,
HW_ATL_TPOL4CHK_EN_SHIFT,
tcp_udp_crc_offload_en);
}
void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_pkt_sys_lbk_en)
void hw_atl_tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_sys_lbk_en)
{
aq_hw_write_reg_bit(aq_hw, tpo_pkt_sys_lbk_adr,
tpo_pkt_sys_lbk_msk,
tpo_pkt_sys_lbk_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPO_PKT_SYS_LBK_ADR,
HW_ATL_TPO_PKT_SYS_LBK_MSK,
HW_ATL_TPO_PKT_SYS_LBK_SHIFT,
tx_pkt_sys_lbk_en);
}
/* TPS: tx packet scheduler */
void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
void hw_atl_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_data_arb_mode)
{
aq_hw_write_reg_bit(aq_hw, tps_data_tc_arb_mode_adr,
tps_data_tc_arb_mode_msk,
tps_data_tc_arb_mode_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TC_ARB_MODE_ADR,
HW_ATL_TPS_DATA_TC_ARB_MODE_MSK,
HW_ATL_TPS_DATA_TC_ARB_MODE_SHIFT,
tx_pkt_shed_data_arb_mode);
}
void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
void hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
u32 curr_time_res)
{
aq_hw_write_reg_bit(aq_hw, tps_desc_rate_ta_rst_adr,
tps_desc_rate_ta_rst_msk,
tps_desc_rate_ta_rst_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_TA_RST_ADR,
HW_ATL_TPS_DESC_RATE_TA_RST_MSK,
HW_ATL_TPS_DESC_RATE_TA_RST_SHIFT,
curr_time_res);
}
void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
void hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_desc_rate_lim)
{
aq_hw_write_reg_bit(aq_hw, tps_desc_rate_lim_adr,
tps_desc_rate_lim_msk,
tps_desc_rate_lim_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_LIM_ADR,
HW_ATL_TPS_DESC_RATE_LIM_MSK,
HW_ATL_TPS_DESC_RATE_LIM_SHIFT,
tx_pkt_shed_desc_rate_lim);
}
void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_desc_tc_arb_mode)
void hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
u32 arb_mode)
{
aq_hw_write_reg_bit(aq_hw, tps_desc_tc_arb_mode_adr,
tps_desc_tc_arb_mode_msk,
tps_desc_tc_arb_mode_shift,
tx_pkt_shed_desc_tc_arb_mode);
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TC_ARB_MODE_ADR,
HW_ATL_TPS_DESC_TC_ARB_MODE_MSK,
HW_ATL_TPS_DESC_TC_ARB_MODE_SHIFT,
arb_mode);
}
void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_desc_tc_max_credit,
void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
u32 max_credit,
u32 tc)
{
aq_hw_write_reg_bit(aq_hw, tps_desc_tctcredit_max_adr(tc),
tps_desc_tctcredit_max_msk,
tps_desc_tctcredit_max_shift,
tx_pkt_shed_desc_tc_max_credit);
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TCTCREDIT_MAX_ADR(tc),
HW_ATL_TPS_DESC_TCTCREDIT_MAX_MSK,
HW_ATL_TPS_DESC_TCTCREDIT_MAX_SHIFT,
max_credit);
}
void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_desc_tc_weight, u32 tc)
void hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_desc_tc_weight,
u32 tc)
{
aq_hw_write_reg_bit(aq_hw, tps_desc_tctweight_adr(tc),
tps_desc_tctweight_msk,
tps_desc_tctweight_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TCTWEIGHT_ADR(tc),
HW_ATL_TPS_DESC_TCTWEIGHT_MSK,
HW_ATL_TPS_DESC_TCTWEIGHT_SHIFT,
tx_pkt_shed_desc_tc_weight);
}
void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_desc_vm_arb_mode)
void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
u32 arb_mode)
{
aq_hw_write_reg_bit(aq_hw, tps_desc_vm_arb_mode_adr,
tps_desc_vm_arb_mode_msk,
tps_desc_vm_arb_mode_shift,
tx_pkt_shed_desc_vm_arb_mode);
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_VM_ARB_MODE_ADR,
HW_ATL_TPS_DESC_VM_ARB_MODE_MSK,
HW_ATL_TPS_DESC_VM_ARB_MODE_SHIFT,
arb_mode);
}
void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_tc_data_max_credit,
void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
u32 max_credit,
u32 tc)
{
aq_hw_write_reg_bit(aq_hw, tps_data_tctcredit_max_adr(tc),
tps_data_tctcredit_max_msk,
tps_data_tctcredit_max_shift,
tx_pkt_shed_tc_data_max_credit);
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TCTCREDIT_MAX_ADR(tc),
HW_ATL_TPS_DATA_TCTCREDIT_MAX_MSK,
HW_ATL_TPS_DATA_TCTCREDIT_MAX_SHIFT,
max_credit);
}
void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_tc_data_weight, u32 tc)
void hw_atl_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_tc_data_weight,
u32 tc)
{
aq_hw_write_reg_bit(aq_hw, tps_data_tctweight_adr(tc),
tps_data_tctweight_msk,
tps_data_tctweight_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TCTWEIGHT_ADR(tc),
HW_ATL_TPS_DATA_TCTWEIGHT_MSK,
HW_ATL_TPS_DATA_TCTWEIGHT_SHIFT,
tx_pkt_shed_tc_data_weight);
}
/* tx */
void tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis)
void hw_atl_tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis)
{
aq_hw_write_reg_bit(aq_hw, tx_reg_res_dsbl_adr,
tx_reg_res_dsbl_msk,
tx_reg_res_dsbl_shift, tx_reg_res_dis);
aq_hw_write_reg_bit(aq_hw, HW_ATL_TX_REG_RES_DSBL_ADR,
HW_ATL_TX_REG_RES_DSBL_MSK,
HW_ATL_TX_REG_RES_DSBL_SHIFT, tx_reg_res_dis);
}
/* msm */
u32 msm_reg_access_status_get(struct aq_hw_s *aq_hw)
u32 hw_atl_msm_reg_access_status_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg_bit(aq_hw, msm_reg_access_busy_adr,
msm_reg_access_busy_msk,
msm_reg_access_busy_shift);
return aq_hw_read_reg_bit(aq_hw, HW_ATL_MSM_REG_ACCESS_BUSY_ADR,
HW_ATL_MSM_REG_ACCESS_BUSY_MSK,
HW_ATL_MSM_REG_ACCESS_BUSY_SHIFT);
}
void msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
void hw_atl_msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
u32 reg_addr_for_indirect_addr)
{
aq_hw_write_reg_bit(aq_hw, msm_reg_addr_adr,
msm_reg_addr_msk,
msm_reg_addr_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_ADDR_ADR,
HW_ATL_MSM_REG_ADDR_MSK,
HW_ATL_MSM_REG_ADDR_SHIFT,
reg_addr_for_indirect_addr);
}
void msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe)
void hw_atl_msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe)
{
aq_hw_write_reg_bit(aq_hw, msm_reg_rd_strobe_adr,
msm_reg_rd_strobe_msk,
msm_reg_rd_strobe_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_RD_STROBE_ADR,
HW_ATL_MSM_REG_RD_STROBE_MSK,
HW_ATL_MSM_REG_RD_STROBE_SHIFT,
reg_rd_strobe);
}
u32 msm_reg_rd_data_get(struct aq_hw_s *aq_hw)
u32 hw_atl_msm_reg_rd_data_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, msm_reg_rd_data_adr);
return aq_hw_read_reg(aq_hw, HW_ATL_MSM_REG_RD_DATA_ADR);
}
void msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data)
void hw_atl_msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data)
{
aq_hw_write_reg(aq_hw, msm_reg_wr_data_adr, reg_wr_data);
aq_hw_write_reg(aq_hw, HW_ATL_MSM_REG_WR_DATA_ADR, reg_wr_data);
}
void msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe)
void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe)
{
aq_hw_write_reg_bit(aq_hw, msm_reg_wr_strobe_adr,
msm_reg_wr_strobe_msk,
msm_reg_wr_strobe_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_WR_STROBE_ADR,
HW_ATL_MSM_REG_WR_STROBE_MSK,
HW_ATL_MSM_REG_WR_STROBE_SHIFT,
reg_wr_strobe);
}
/* pci */
void pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis)
void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis)
{
aq_hw_write_reg_bit(aq_hw, pci_reg_res_dsbl_adr,
pci_reg_res_dsbl_msk,
pci_reg_res_dsbl_shift,
aq_hw_write_reg_bit(aq_hw, HW_ATL_PCI_REG_RES_DSBL_ADR,
HW_ATL_PCI_REG_RES_DSBL_MSK,
HW_ATL_PCI_REG_RES_DSBL_SHIFT,
pci_reg_res_dis);
}
void reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw, u32 glb_cpu_scratch_scp,
void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
u32 glb_cpu_scratch_scp,
u32 scratch_scp)
{
aq_hw_write_reg(aq_hw, glb_cpu_scratch_scp_adr(scratch_scp),
aq_hw_write_reg(aq_hw, HW_ATL_GLB_CPU_SCRATCH_SCP_ADR(scratch_scp),
glb_cpu_scratch_scp);
}
......@@ -21,657 +21,681 @@ struct aq_hw_s;
/* global */
/* set global microprocessor semaphore */
void reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem,
void hw_atl_reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem,
u32 semaphore);
/* get global microprocessor semaphore */
u32 reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore);
u32 hw_atl_reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore);
/* set global register reset disable */
void glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis);
void hw_atl_glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis);
/* set soft reset */
void glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res);
void hw_atl_glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res);
/* get soft reset */
u32 glb_soft_res_get(struct aq_hw_s *aq_hw);
u32 hw_atl_glb_soft_res_get(struct aq_hw_s *aq_hw);
/* stats */
u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw);
u32 hw_atl_rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw);
/* get rx dma good octet counter lsw */
u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
u32 hw_atl_stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
/* get rx dma good packet counter lsw */
u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
u32 hw_atl_stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
/* get tx dma good octet counter lsw */
u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
u32 hw_atl_stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
/* get tx dma good packet counter lsw */
u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
u32 hw_atl_stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
/* get rx dma good octet counter msw */
u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
u32 hw_atl_stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
/* get rx dma good packet counter msw */
u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
u32 hw_atl_stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
/* get tx dma good octet counter msw */
u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
u32 hw_atl_stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
/* get tx dma good packet counter msw */
u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
u32 hw_atl_stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
/* get msm rx errors counter register */
u32 reg_mac_msm_rx_errs_cnt_get(struct aq_hw_s *aq_hw);
u32 hw_atl_reg_mac_msm_rx_errs_cnt_get(struct aq_hw_s *aq_hw);
/* get msm rx unicast frames counter register */
u32 reg_mac_msm_rx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
u32 hw_atl_reg_mac_msm_rx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
/* get msm rx multicast frames counter register */
u32 reg_mac_msm_rx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
u32 hw_atl_reg_mac_msm_rx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
/* get msm rx broadcast frames counter register */
u32 reg_mac_msm_rx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
u32 hw_atl_reg_mac_msm_rx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
/* get msm rx broadcast octets counter register 1 */
u32 reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
u32 hw_atl_reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
/* get msm rx unicast octets counter register 0 */
u32 reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
u32 hw_atl_reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
/* get rx dma statistics counter 7 */
u32 reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw);
u32 hw_atl_reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw);
/* get msm tx errors counter register */
u32 reg_mac_msm_tx_errs_cnt_get(struct aq_hw_s *aq_hw);
u32 hw_atl_reg_mac_msm_tx_errs_cnt_get(struct aq_hw_s *aq_hw);
/* get msm tx unicast frames counter register */
u32 reg_mac_msm_tx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
u32 hw_atl_reg_mac_msm_tx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
/* get msm tx multicast frames counter register */
u32 reg_mac_msm_tx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
u32 hw_atl_reg_mac_msm_tx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
/* get msm tx broadcast frames counter register */
u32 reg_mac_msm_tx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
u32 hw_atl_reg_mac_msm_tx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
/* get msm tx multicast octets counter register 1 */
u32 reg_mac_msm_tx_mcst_octets_counter1get(struct aq_hw_s *aq_hw);
u32 hw_atl_reg_mac_msm_tx_mcst_octets_counter1get(struct aq_hw_s *aq_hw);
/* get msm tx broadcast octets counter register 1 */
u32 reg_mac_msm_tx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
u32 hw_atl_reg_mac_msm_tx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
/* get msm tx unicast octets counter register 0 */
u32 reg_mac_msm_tx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
u32 hw_atl_reg_mac_msm_tx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
/* get global mif identification */
u32 reg_glb_mif_id_get(struct aq_hw_s *aq_hw);
u32 hw_atl_reg_glb_mif_id_get(struct aq_hw_s *aq_hw);
/* interrupt */
/* set interrupt auto mask lsw */
void itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, u32 irq_auto_masklsw);
void hw_atl_itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw,
u32 irq_auto_masklsw);
/* set interrupt mapping enable rx */
void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx);
void hw_atl_itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx,
u32 rx);
/* set interrupt mapping enable tx */
void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx);
void hw_atl_itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx,
u32 tx);
/* set interrupt mapping rx */
void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx);
void hw_atl_itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx);
/* set interrupt mapping tx */
void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx);
void hw_atl_itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx);
/* set interrupt mask clear lsw */
void itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_clearlsw);
void hw_atl_itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw,
u32 irq_msk_clearlsw);
/* set interrupt mask set lsw */
void itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw);
void hw_atl_itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw);
/* set interrupt register reset disable */
void itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis);
void hw_atl_itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis);
/* set interrupt status clear lsw */
void itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
void hw_atl_itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
u32 irq_status_clearlsw);
/* get interrupt status lsw */
u32 itr_irq_statuslsw_get(struct aq_hw_s *aq_hw);
u32 hw_atl_itr_irq_statuslsw_get(struct aq_hw_s *aq_hw);
/* get reset interrupt */
u32 itr_res_irq_get(struct aq_hw_s *aq_hw);
u32 hw_atl_itr_res_irq_get(struct aq_hw_s *aq_hw);
/* set reset interrupt */
void itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq);
void hw_atl_itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq);
/* rdm */
/* set cpu id */
void rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
void hw_atl_rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
/* set rx dca enable */
void rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en);
void hw_atl_rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en);
/* set rx dca mode */
void rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode);
void hw_atl_rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode);
/* set rx descriptor data buffer size */
void rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
void hw_atl_rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
u32 rx_desc_data_buff_size,
u32 descriptor);
/* set rx descriptor dca enable */
void rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en,
void hw_atl_rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en,
u32 dca);
/* set rx descriptor enable */
void rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en,
void hw_atl_rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en,
u32 descriptor);
/* set rx descriptor header splitting */
void rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
void hw_atl_rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
u32 rx_desc_head_splitting,
u32 descriptor);
/* get rx descriptor head pointer */
u32 rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
u32 hw_atl_rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
/* set rx descriptor length */
void rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len,
void hw_atl_rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len,
u32 descriptor);
/* set rx descriptor write-back interrupt enable */
void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
void hw_atl_rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
u32 rx_desc_wr_wb_irq_en);
/* set rx header dca enable */
void rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en,
void hw_atl_rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en,
u32 dca);
/* set rx payload dca enable */
void rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, u32 dca);
void hw_atl_rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en,
u32 dca);
/* set rx descriptor header buffer size */
void rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
void hw_atl_rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
u32 rx_desc_head_buff_size,
u32 descriptor);
/* set rx descriptor reset */
void rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res,
void hw_atl_rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res,
u32 descriptor);
/* Set RDM Interrupt Moderation Enable */
void rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, u32 rdm_intr_moder_en);
void hw_atl_rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
u32 rdm_intr_moder_en);
/* reg */
/* set general interrupt mapping register */
void reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, u32 regidx);
void hw_atl_reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map,
u32 regidx);
/* get general interrupt status register */
u32 reg_gen_irq_status_get(struct aq_hw_s *aq_hw);
u32 hw_atl_reg_gen_irq_status_get(struct aq_hw_s *aq_hw);
/* set interrupt global control register */
void reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl);
void hw_atl_reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl);
/* set interrupt throttle register */
void reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle);
void hw_atl_reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle);
/* set rx dma descriptor base address lsw */
void reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
void hw_atl_reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
u32 rx_dma_desc_base_addrlsw,
u32 descriptor);
/* set rx dma descriptor base address msw */
void reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
void hw_atl_reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
u32 rx_dma_desc_base_addrmsw,
u32 descriptor);
/* get rx dma descriptor status register */
u32 reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor);
u32 hw_atl_reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor);
/* set rx dma descriptor tail pointer register */
void reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
void hw_atl_reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
u32 rx_dma_desc_tail_ptr,
u32 descriptor);
/* set rx filter multicast filter mask register */
void reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw,
void hw_atl_reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw,
u32 rx_flr_mcst_flr_msk);
/* set rx filter multicast filter register */
void reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
void hw_atl_reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
u32 filter);
/* set rx filter rss control register 1 */
void reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw,
void hw_atl_reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw,
u32 rx_flr_rss_control1);
/* Set RX Filter Control Register 2 */
void reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_flr_control2);
void hw_atl_reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_flr_control2);
/* Set RX Interrupt Moderation Control Register */
void reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
void hw_atl_reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
u32 rx_intr_moderation_ctl,
u32 queue);
/* set tx dma debug control */
void reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, u32 tx_dma_debug_ctl);
void hw_atl_reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw,
u32 tx_dma_debug_ctl);
/* set tx dma descriptor base address lsw */
void reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
void hw_atl_reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
u32 tx_dma_desc_base_addrlsw,
u32 descriptor);
/* set tx dma descriptor base address msw */
void reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
void hw_atl_reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
u32 tx_dma_desc_base_addrmsw,
u32 descriptor);
/* set tx dma descriptor tail pointer register */
void reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
void hw_atl_reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
u32 tx_dma_desc_tail_ptr,
u32 descriptor);
/* Set TX Interrupt Moderation Control Register */
void reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
void hw_atl_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
u32 tx_intr_moderation_ctl,
u32 queue);
/* set global microprocessor scratch pad */
void reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
u32 glb_cpu_scratch_scp, u32 scratch_scp);
void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
u32 glb_cpu_scratch_scp,
u32 scratch_scp);
/* rpb */
/* set dma system loopback */
void rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk);
void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk);
/* set rx traffic class mode */
void rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
u32 rx_traf_class_mode);
/* set rx buffer enable */
void rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en);
void hw_atl_rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en);
/* set rx buffer high threshold (per tc) */
void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
void hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
u32 rx_buff_hi_threshold_per_tc,
u32 buffer);
/* set rx buffer low threshold (per tc) */
void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
void hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
u32 rx_buff_lo_threshold_per_tc,
u32 buffer);
/* set rx flow control mode */
void rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode);
void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode);
/* set rx packet buffer size (per tc) */
void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
u32 rx_pkt_buff_size_per_tc,
u32 buffer);
/* set rx xoff enable (per tc) */
void rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
u32 buffer);
/* rpf */
/* set l2 broadcast count threshold */
void rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
void hw_atl_rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
u32 l2broadcast_count_threshold);
/* set l2 broadcast enable */
void rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en);
void hw_atl_rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en);
/* set l2 broadcast filter action */
void rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw,
void hw_atl_rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw,
u32 l2broadcast_flr_act);
/* set l2 multicast filter enable */
void rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, u32 l2multicast_flr_en,
void hw_atl_rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw,
u32 l2multicast_flr_en,
u32 filter);
/* set l2 promiscuous mode enable */
void rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
void hw_atl_rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
u32 l2promiscuous_mode_en);
/* set l2 unicast filter action */
void rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_act,
void hw_atl_rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw,
u32 l2unicast_flr_act,
u32 filter);
/* set l2 unicast filter enable */
void rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
void hw_atl_rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
u32 filter);
/* set l2 unicast destination address lsw */
void rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
void hw_atl_rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
u32 l2unicast_dest_addresslsw,
u32 filter);
/* set l2 unicast destination address msw */
void rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
void hw_atl_rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
u32 l2unicast_dest_addressmsw,
u32 filter);
/* Set L2 Accept all Multicast packets */
void rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
void hw_atl_rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
u32 l2_accept_all_mc_packets);
/* set user-priority tc mapping */
void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
void hw_atl_rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
u32 user_priority_tc_map, u32 tc);
/* set rss key address */
void rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr);
void hw_atl_rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr);
/* set rss key write data */
void rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data);
void hw_atl_rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data);
/* get rss key write enable */
u32 rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw);
u32 hw_atl_rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw);
/* set rss key write enable */
void rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en);
void hw_atl_rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en);
/* set rss redirection table address */
void rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw,
void hw_atl_rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw,
u32 rss_redir_tbl_addr);
/* set rss redirection table write data */
void rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
void hw_atl_rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
u32 rss_redir_tbl_wr_data);
/* get rss redirection write enable */
u32 rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw);
u32 hw_atl_rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw);
/* set rss redirection write enable */
void rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en);
void hw_atl_rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en);
/* set tpo to rpf system loopback */
void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw,
void hw_atl_rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw,
u32 tpo_to_rpf_sys_lbk);
/* set vlan inner ethertype */
void rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht);
void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht);
/* set vlan outer ethertype */
void rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht);
void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht);
/* set vlan promiscuous mode enable */
void rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, u32 vlan_prom_mode_en);
void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw,
u32 vlan_prom_mode_en);
/* Set VLAN untagged action */
void rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, u32 vlan_untagged_act);
void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw,
u32 vlan_untagged_act);
/* Set VLAN accept untagged packets */
void rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
u32 vlan_accept_untagged_packets);
void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
u32 vlan_acc_untagged_packets);
/* Set VLAN filter enable */
void rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, u32 filter);
void hw_atl_rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en,
u32 filter);
/* Set VLAN Filter Action */
void rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act,
void hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act,
u32 filter);
/* Set VLAN ID Filter */
void rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, u32 filter);
void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
u32 filter);
/* set ethertype filter enable */
void rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, u32 filter);
void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
u32 filter);
/* set ethertype user-priority enable */
void rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
u32 etht_user_priority_en, u32 filter);
void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
u32 etht_user_priority_en,
u32 filter);
/* set ethertype rx queue enable */
void rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue_en,
void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw,
u32 etht_rx_queue_en,
u32 filter);
/* set ethertype rx queue */
void rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
void hw_atl_rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
u32 filter);
/* set ethertype user-priority */
void rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, u32 etht_user_priority,
void hw_atl_rpf_etht_user_priority_set(struct aq_hw_s *aq_hw,
u32 etht_user_priority,
u32 filter);
/* set ethertype management queue */
void rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
u32 filter);
/* set ethertype filter action */
void rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
u32 filter);
/* set ethertype filter */
void rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter);
void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter);
/* rpo */
/* set ipv4 header checksum offload enable */
void rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
void hw_atl_rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
u32 ipv4header_crc_offload_en);
/* set rx descriptor vlan stripping */
void rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
void hw_atl_rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
u32 rx_desc_vlan_stripping,
u32 descriptor);
/* set tcp/udp checksum offload enable */
void rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
void hw_atl_rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
u32 tcp_udp_crc_offload_en);
/* Set LRO Patch Optimization Enable. */
void rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
void hw_atl_rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
u32 lro_patch_optimization_en);
/* Set Large Receive Offload Enable */
void rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en);
void hw_atl_rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en);
/* Set LRO Q Sessions Limit */
void rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw, u32 lro_qsessions_lim);
void hw_atl_rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw,
u32 lro_qsessions_lim);
/* Set LRO Total Descriptor Limit */
void rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, u32 lro_total_desc_lim);
void hw_atl_rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw,
u32 lro_total_desc_lim);
/* Set LRO Min Payload of First Packet */
void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
void hw_atl_rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
u32 lro_min_pld_of_first_pkt);
/* Set LRO Packet Limit */
void rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_packet_lim);
void hw_atl_rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_packet_lim);
/* Set LRO Max Number of Descriptors */
void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
void hw_atl_rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
u32 lro_max_desc_num, u32 lro);
/* Set LRO Time Base Divider */
void rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
void hw_atl_rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
u32 lro_time_base_divider);
/*Set LRO Inactive Interval */
void rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
void hw_atl_rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
u32 lro_inactive_interval);
/*Set LRO Max Coalescing Interval */
void rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
u32 lro_max_coalescing_interval);
void hw_atl_rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
u32 lro_max_coal_interval);
/* rx */
/* set rx register reset disable */
void rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis);
void hw_atl_rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis);
/* tdm */
/* set cpu id */
void tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
void hw_atl_tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
/* set large send offload enable */
void tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
void hw_atl_tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
u32 large_send_offload_en);
/* set tx descriptor enable */
void tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, u32 descriptor);
void hw_atl_tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en,
u32 descriptor);
/* set tx dca enable */
void tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en);
void hw_atl_tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en);
/* set tx dca mode */
void tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode);
void hw_atl_tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode);
/* set tx descriptor dca enable */
void tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, u32 dca);
void hw_atl_tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en,
u32 dca);
/* get tx descriptor head pointer */
u32 tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
u32 hw_atl_tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
/* set tx descriptor length */
void tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
void hw_atl_tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
u32 descriptor);
/* set tx descriptor write-back interrupt enable */
void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
void hw_atl_tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
u32 tx_desc_wr_wb_irq_en);
/* set tx descriptor write-back threshold */
void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
void hw_atl_tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
u32 tx_desc_wr_wb_threshold,
u32 descriptor);
/* Set TDM Interrupt Moderation Enable */
void tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
void hw_atl_tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
u32 tdm_irq_moderation_en);
/* thm */
/* set lso tcp flag of first packet */
void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
void hw_atl_thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
u32 lso_tcp_flag_of_first_pkt);
/* set lso tcp flag of last packet */
void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
void hw_atl_thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
u32 lso_tcp_flag_of_last_pkt);
/* set lso tcp flag of middle packet */
void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
u32 lso_tcp_flag_of_middle_pkt);
/* tpb */
/* set tx buffer enable */
void tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en);
void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en);
/* set tx buffer high threshold (per tc) */
void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
void hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
u32 tx_buff_hi_threshold_per_tc,
u32 buffer);
/* set tx buffer low threshold (per tc) */
void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
void hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
u32 tx_buff_lo_threshold_per_tc,
u32 buffer);
/* set tx dma system loopback enable */
void tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en);
void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en);
/* set tx packet buffer size (per tc) */
void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_buff_size_per_tc, u32 buffer);
/* set tx path pad insert enable */
void tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en);
void hw_atl_tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en);
/* tpo */
/* set ipv4 header checksum offload enable */
void tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
void hw_atl_tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
u32 ipv4header_crc_offload_en);
/* set tcp/udp checksum offload enable */
void tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
void hw_atl_tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
u32 tcp_udp_crc_offload_en);
/* set tx pkt system loopback enable */
void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_pkt_sys_lbk_en);
void hw_atl_tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_sys_lbk_en);
/* tps */
/* set tx packet scheduler data arbitration mode */
void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
void hw_atl_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_data_arb_mode);
/* set tx packet scheduler descriptor rate current time reset */
void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
void hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
u32 curr_time_res);
/* set tx packet scheduler descriptor rate limit */
void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
void hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_desc_rate_lim);
/* set tx packet scheduler descriptor tc arbitration mode */
void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_desc_tc_arb_mode);
void hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
u32 arb_mode);
/* set tx packet scheduler descriptor tc max credit */
void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_desc_tc_max_credit,
void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
u32 max_credit,
u32 tc);
/* set tx packet scheduler descriptor tc weight */
void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
void hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_desc_tc_weight,
u32 tc);
/* set tx packet scheduler descriptor vm arbitration mode */
void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_desc_vm_arb_mode);
void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
u32 arb_mode);
/* set tx packet scheduler tc data max credit */
void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_tc_data_max_credit,
void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
u32 max_credit,
u32 tc);
/* set tx packet scheduler tc data weight */
void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
void hw_atl_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_tc_data_weight,
u32 tc);
/* tx */
/* set tx register reset disable */
void tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis);
void hw_atl_tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis);
/* msm */
/* get register access status */
u32 msm_reg_access_status_get(struct aq_hw_s *aq_hw);
u32 hw_atl_msm_reg_access_status_get(struct aq_hw_s *aq_hw);
/* set register address for indirect address */
void msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
void hw_atl_msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
u32 reg_addr_for_indirect_addr);
/* set register read strobe */
void msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe);
void hw_atl_msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe);
/* get register read data */
u32 msm_reg_rd_data_get(struct aq_hw_s *aq_hw);
u32 hw_atl_msm_reg_rd_data_get(struct aq_hw_s *aq_hw);
/* set register write data */
void msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data);
void hw_atl_msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data);
/* set register write strobe */
void msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe);
void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe);
/* pci */
/* set pci register reset disable */
void pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis);
void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis);
#endif /* HW_ATL_LLH_H */
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -11,11 +11,9 @@
* abstraction layer.
*/
#include "../aq_hw.h"
#include "../aq_nic.h"
#include "../aq_hw_utils.h"
#include "../aq_pci_func.h"
#include "../aq_ring.h"
#include "../aq_vec.h"
#include "hw_atl_utils.h"
#include "hw_atl_llh.h"
......@@ -37,15 +35,15 @@ static int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
{
int err = 0;
AQ_HW_WAIT_FOR(reg_glb_cpu_sem_get(self,
AQ_HW_WAIT_FOR(hw_atl_reg_glb_cpu_sem_get(self,
HW_ATL_FW_SM_RAM) == 1U,
1U, 10000U);
if (err < 0) {
bool is_locked;
reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
is_locked = reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
is_locked = hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
if (!is_locked) {
err = -ETIME;
goto err_exit;
......@@ -66,7 +64,7 @@ static int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
*(p++) = aq_hw_read_reg(self, 0x0000020CU);
}
reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
err_exit:
return err;
......@@ -78,7 +76,7 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
int err = 0;
bool is_locked;
is_locked = reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
is_locked = hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
if (!is_locked) {
err = -ETIME;
goto err_exit;
......@@ -97,7 +95,7 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
}
}
reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
err_exit:
return err;
......@@ -119,7 +117,7 @@ static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual)
}
static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
struct aq_hw_caps_s *aq_hw_caps)
const struct aq_hw_caps_s *aq_hw_caps)
{
int err = 0;
......@@ -133,10 +131,10 @@ static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370);
}
reg_glb_cpu_scratch_scp_set(self, 0x00000000U, 25U);
hw_atl_reg_glb_cpu_scratch_scp_set(self, 0x00000000U, 25U);
/* check 10 times by 1ms */
AQ_HW_WAIT_FOR(0U != (PHAL_ATLANTIC_A0->mbox_addr =
AQ_HW_WAIT_FOR(0U != (self->mbox_addr =
aq_hw_read_reg(self, 0x360U)), 1000U, 10U);
err = hw_atl_utils_ver_match(aq_hw_caps->fw_ver_expected,
......@@ -174,14 +172,14 @@ static int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size)
err = -1;
goto err_exit;
}
err = hw_atl_utils_fw_upload_dwords(self, PHAL_ATLANTIC->rpc_addr,
(u32 *)(void *)&PHAL_ATLANTIC->rpc,
err = hw_atl_utils_fw_upload_dwords(self, self->rpc_addr,
(u32 *)(void *)&self->rpc,
(rpc_size + sizeof(u32) -
sizeof(u8)) / sizeof(u32));
if (err < 0)
goto err_exit;
sw.tid = 0xFFFFU & (++PHAL_ATLANTIC->rpc_tid);
sw.tid = 0xFFFFU & (++self->rpc_tid);
sw.len = (u16)rpc_size;
aq_hw_write_reg(self, HW_ATL_RPC_CONTROL_ADR, sw.val);
......@@ -199,7 +197,7 @@ static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
do {
sw.val = aq_hw_read_reg(self, HW_ATL_RPC_CONTROL_ADR);
PHAL_ATLANTIC->rpc_tid = sw.tid;
self->rpc_tid = sw.tid;
AQ_HW_WAIT_FOR(sw.tid ==
(fw.val =
......@@ -221,9 +219,9 @@ static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
if (fw.len) {
err =
hw_atl_utils_fw_downld_dwords(self,
PHAL_ATLANTIC->rpc_addr,
self->rpc_addr,
(u32 *)(void *)
&PHAL_ATLANTIC->rpc,
&self->rpc,
(fw.len + sizeof(u32) -
sizeof(u8)) /
sizeof(u32));
......@@ -231,19 +229,18 @@ static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
goto err_exit;
}
*rpc = &PHAL_ATLANTIC->rpc;
*rpc = &self->rpc;
}
err_exit:
return err;
}
static int hw_atl_utils_mpi_create(struct aq_hw_s *self,
struct aq_hw_caps_s *aq_hw_caps)
static int hw_atl_utils_mpi_create(struct aq_hw_s *self)
{
int err = 0;
err = hw_atl_utils_init_ucp(self, aq_hw_caps);
err = hw_atl_utils_init_ucp(self, self->aq_nic_cfg->aq_hw_caps);
if (err < 0)
goto err_exit;
......@@ -259,7 +256,7 @@ int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
struct hw_aq_atl_utils_mbox_header *pmbox)
{
return hw_atl_utils_fw_downld_dwords(self,
PHAL_ATLANTIC->mbox_addr,
self->mbox_addr,
(u32 *)(void *)pmbox,
sizeof(*pmbox) / sizeof(u32));
}
......@@ -270,7 +267,7 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
int err = 0;
err = hw_atl_utils_fw_downld_dwords(self,
PHAL_ATLANTIC->mbox_addr,
self->mbox_addr,
(u32 *)(void *)pmbox,
sizeof(*pmbox) / sizeof(u32));
if (err < 0)
......@@ -281,9 +278,9 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
self->aq_nic_cfg->mtu : 1514U;
pmbox->stats.ubrc = pmbox->stats.uprc * mtu;
pmbox->stats.ubtc = pmbox->stats.uptc * mtu;
pmbox->stats.dpc = atomic_read(&PHAL_ATLANTIC_A0->dpc);
pmbox->stats.dpc = atomic_read(&self->dpc);
} else {
pmbox->stats.dpc = reg_rx_dma_stat_counter7get(self);
pmbox->stats.dpc = hw_atl_reg_rx_dma_stat_counter7get(self);
}
err_exit:;
......@@ -365,7 +362,6 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
}
int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
struct aq_hw_caps_s *aq_hw_caps,
u8 *mac)
{
int err = 0;
......@@ -376,9 +372,9 @@ int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
self->mmio = aq_pci_func_get_mmio(self->aq_pci_func);
hw_atl_utils_hw_chip_features_init(self,
&PHAL_ATLANTIC_A0->chip_features);
&self->chip_features);
err = hw_atl_utils_mpi_create(self, aq_hw_caps);
err = hw_atl_utils_mpi_create(self);
if (err < 0)
goto err_exit;
......@@ -396,7 +392,7 @@ int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
aq_hw_read_reg(self, 0x00000374U) +
(40U * 4U),
mac_addr,
AQ_DIMOF(mac_addr));
ARRAY_SIZE(mac_addr));
if (err < 0) {
mac_addr[0] = 0U;
mac_addr[1] = 0U;
......@@ -465,7 +461,7 @@ unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps)
void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
{
u32 chip_features = 0U;
u32 val = reg_glb_mif_id_get(self);
u32 val = hw_atl_reg_glb_mif_id_get(self);
u32 mif_rev = val & 0xFFU;
if ((3U & mif_rev) == 1U) {
......@@ -500,13 +496,13 @@ int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
int hw_atl_utils_update_stats(struct aq_hw_s *self)
{
struct hw_atl_s *hw_self = PHAL_ATLANTIC;
struct hw_aq_atl_utils_mbox mbox;
hw_atl_utils_mpi_read_stats(self, &mbox);
#define AQ_SDELTA(_N_) (hw_self->curr_stats._N_ += \
mbox.stats._N_ - hw_self->last_stats._N_)
#define AQ_SDELTA(_N_) (self->curr_stats._N_ += \
mbox.stats._N_ - self->last_stats._N_)
if (self->aq_link_status.mbps) {
AQ_SDELTA(uprc);
AQ_SDELTA(mprc);
......@@ -527,19 +523,19 @@ int hw_atl_utils_update_stats(struct aq_hw_s *self)
AQ_SDELTA(dpc);
}
#undef AQ_SDELTA
hw_self->curr_stats.dma_pkt_rc = stats_rx_dma_good_pkt_counterlsw_get(self);
hw_self->curr_stats.dma_pkt_tc = stats_tx_dma_good_pkt_counterlsw_get(self);
hw_self->curr_stats.dma_oct_rc = stats_rx_dma_good_octet_counterlsw_get(self);
hw_self->curr_stats.dma_oct_tc = stats_tx_dma_good_octet_counterlsw_get(self);
self->curr_stats.dma_pkt_rc = hw_atl_stats_rx_dma_good_pkt_counterlsw_get(self);
self->curr_stats.dma_pkt_tc = hw_atl_stats_tx_dma_good_pkt_counterlsw_get(self);
self->curr_stats.dma_oct_rc = hw_atl_stats_rx_dma_good_octet_counterlsw_get(self);
self->curr_stats.dma_oct_tc = hw_atl_stats_tx_dma_good_octet_counterlsw_get(self);
memcpy(&hw_self->last_stats, &mbox.stats, sizeof(mbox.stats));
memcpy(&self->last_stats, &mbox.stats, sizeof(mbox.stats));
return 0;
}
struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self)
{
return &PHAL_ATLANTIC->curr_stats;
return &self->curr_stats;
}
static const u32 hw_atl_utils_hw_mac_regs[] = {
......@@ -568,7 +564,7 @@ static const u32 hw_atl_utils_hw_mac_regs[] = {
};
int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
struct aq_hw_caps_s *aq_hw_caps,
const struct aq_hw_caps_s *aq_hw_caps,
u32 *regs_buff)
{
unsigned int i = 0U;
......
......@@ -14,10 +14,39 @@
#ifndef HW_ATL_UTILS_H
#define HW_ATL_UTILS_H
#include "../aq_common.h"
#define HW_ATL_FLUSH() { (void)aq_hw_read_reg(self, 0x10); }
/* Hardware tx descriptor */
struct __packed hw_atl_txd_s {
u64 buf_addr;
u32 ctl;
u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */
};
/* Hardware tx context descriptor */
struct __packed hw_atl_txc_s {
u32 rsvd;
u32 len;
u32 ctl;
u32 len2;
};
/* Hardware rx descriptor */
struct __packed hw_atl_rxd_s {
u64 buf_addr;
u64 hdr_addr;
};
/* Hardware rx descriptor writeback */
struct __packed hw_atl_rxd_wb_s {
u32 type;
u32 rss_hash;
u16 status;
u16 pkt_len;
u16 next_desc_ptr;
u16 vlan;
};
struct __packed hw_atl_stats_s {
u32 uprc;
u32 mprc;
......@@ -126,26 +155,6 @@ struct __packed hw_aq_atl_utils_mbox {
struct hw_atl_stats_s stats;
};
struct __packed hw_atl_s {
struct aq_hw_s base;
struct hw_atl_stats_s last_stats;
struct aq_stats_s curr_stats;
u64 speed;
unsigned int chip_features;
u32 fw_ver_actual;
atomic_t dpc;
u32 mbox_addr;
u32 rpc_addr;
u32 rpc_tid;
struct hw_aq_atl_utils_fw_rpc rpc;
};
#define SELF ((struct hw_atl_s *)self)
#define PHAL_ATLANTIC ((struct hw_atl_s *)((void *)(self)))
#define PHAL_ATLANTIC_A0 ((struct hw_atl_s *)((void *)(self)))
#define PHAL_ATLANTIC_B0 ((struct hw_atl_s *)((void *)(self)))
#define HAL_ATLANTIC_UTILS_CHIP_MIPS 0x00000001U
#define HAL_ATLANTIC_UTILS_CHIP_TPO2 0x00000002U
#define HAL_ATLANTIC_UTILS_CHIP_RPF2 0x00000004U
......@@ -154,7 +163,7 @@ struct __packed hw_atl_s {
#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 0x02000000U
#define IS_CHIP_FEATURE(_F_) (HAL_ATLANTIC_UTILS_CHIP_##_F_ & \
PHAL_ATLANTIC->chip_features)
self->chip_features)
enum hal_atl_utils_fw_state_e {
MPI_DEINIT = 0,
......@@ -171,6 +180,10 @@ enum hal_atl_utils_fw_state_e {
#define HAL_ATLANTIC_RATE_100M BIT(5)
#define HAL_ATLANTIC_RATE_INVALID BIT(6)
struct aq_hw_s;
struct aq_hw_caps_s;
struct aq_hw_link_status_s;
void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p);
int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
......@@ -189,13 +202,12 @@ int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed,
int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self);
int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
struct aq_hw_caps_s *aq_hw_caps,
u8 *mac);
unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps);
int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
struct aq_hw_caps_s *aq_hw_caps,
const struct aq_hw_caps_s *aq_hw_caps,
u32 *regs_buff);
int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment