Commit 0cb98e84 authored by David S. Miller's avatar David S. Miller

Merge branch 'octeontx2-af-CGX-LMAC-link-bringup-and-cleanups'

Linu Cherian says:

====================
octeontx2-af: CGX LMAC link bringup and cleanups

Patch 1: Code cleanup
Patch 2: Adds support for an unhandled hardware configuration
Patch 3: Preparatory patch for enabling cgx lmac links
Patch 4: Support for enabling cgx lmac links
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents e159e592 d3b2b9ab
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
* @resp: command response * @resp: command response
* @link_info: link related information * @link_info: link related information
* @event_cb: callback for linkchange events * @event_cb: callback for linkchange events
* @event_cb_lock: lock for serializing callback with unregister
* @cmd_pend: flag set before new command is started * @cmd_pend: flag set before new command is started
* flag cleared after command response is received * flag cleared after command response is received
* @cgx: parent cgx port * @cgx: parent cgx port
...@@ -43,6 +44,7 @@ struct lmac { ...@@ -43,6 +44,7 @@ struct lmac {
u64 resp; u64 resp;
struct cgx_link_user_info link_info; struct cgx_link_user_info link_info;
struct cgx_event_cb event_cb; struct cgx_event_cb event_cb;
spinlock_t event_cb_lock;
bool cmd_pend; bool cmd_pend;
struct cgx *cgx; struct cgx *cgx;
u8 lmac_id; u8 lmac_id;
...@@ -55,6 +57,8 @@ struct cgx { ...@@ -55,6 +57,8 @@ struct cgx {
u8 cgx_id; u8 cgx_id;
u8 lmac_count; u8 lmac_count;
struct lmac *lmac_idmap[MAX_LMAC_PER_CGX]; struct lmac *lmac_idmap[MAX_LMAC_PER_CGX];
struct work_struct cgx_cmd_work;
struct workqueue_struct *cgx_cmd_workq;
struct list_head cgx_list; struct list_head cgx_list;
}; };
...@@ -66,6 +70,9 @@ static u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX]; ...@@ -66,6 +70,9 @@ static u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX];
/* Convert firmware lmac type encoding to string */ /* Convert firmware lmac type encoding to string */
static char *cgx_lmactype_string[LMAC_MODE_MAX]; static char *cgx_lmactype_string[LMAC_MODE_MAX];
/* CGX PHY management internal APIs */
static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
/* Supported devices */ /* Supported devices */
static const struct pci_device_id cgx_id_table[] = { static const struct pci_device_id cgx_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) }, { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
...@@ -92,17 +99,21 @@ static inline struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx) ...@@ -92,17 +99,21 @@ static inline struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
return cgx->lmac_idmap[lmac_id]; return cgx->lmac_idmap[lmac_id];
} }
int cgx_get_cgx_cnt(void) int cgx_get_cgxcnt_max(void)
{ {
struct cgx *cgx_dev; struct cgx *cgx_dev;
int count = 0; int idmax = -ENODEV;
list_for_each_entry(cgx_dev, &cgx_list, cgx_list) list_for_each_entry(cgx_dev, &cgx_list, cgx_list)
count++; if (cgx_dev->cgx_id > idmax)
idmax = cgx_dev->cgx_id;
if (idmax < 0)
return 0;
return count; return idmax + 1;
} }
EXPORT_SYMBOL(cgx_get_cgx_cnt); EXPORT_SYMBOL(cgx_get_cgxcnt_max);
int cgx_get_lmac_cnt(void *cgxd) int cgx_get_lmac_cnt(void *cgxd)
{ {
...@@ -445,6 +456,9 @@ static inline void cgx_link_change_handler(u64 lstat, ...@@ -445,6 +456,9 @@ static inline void cgx_link_change_handler(u64 lstat,
lmac->link_info = event.link_uinfo; lmac->link_info = event.link_uinfo;
linfo = &lmac->link_info; linfo = &lmac->link_info;
/* Ensure callback doesn't get unregistered until we finish it */
spin_lock(&lmac->event_cb_lock);
if (!lmac->event_cb.notify_link_chg) { if (!lmac->event_cb.notify_link_chg) {
dev_dbg(dev, "cgx port %d:%d Link change handler null", dev_dbg(dev, "cgx port %d:%d Link change handler null",
cgx->cgx_id, lmac->lmac_id); cgx->cgx_id, lmac->lmac_id);
...@@ -455,11 +469,13 @@ static inline void cgx_link_change_handler(u64 lstat, ...@@ -455,11 +469,13 @@ static inline void cgx_link_change_handler(u64 lstat,
dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n", dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n",
cgx->cgx_id, lmac->lmac_id, cgx->cgx_id, lmac->lmac_id,
linfo->link_up ? "UP" : "DOWN", linfo->speed); linfo->link_up ? "UP" : "DOWN", linfo->speed);
return; goto err;
} }
if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data)) if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data))
dev_err(dev, "event notification failure\n"); dev_err(dev, "event notification failure\n");
err:
spin_unlock(&lmac->event_cb_lock);
} }
static inline bool cgx_cmdresp_is_linkevent(u64 event) static inline bool cgx_cmdresp_is_linkevent(u64 event)
...@@ -548,6 +564,38 @@ int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id) ...@@ -548,6 +564,38 @@ int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id)
} }
EXPORT_SYMBOL(cgx_lmac_evh_register); EXPORT_SYMBOL(cgx_lmac_evh_register);
int cgx_lmac_evh_unregister(void *cgxd, int lmac_id)
{
struct lmac *lmac;
unsigned long flags;
struct cgx *cgx = cgxd;
lmac = lmac_pdata(lmac_id, cgx);
if (!lmac)
return -ENODEV;
spin_lock_irqsave(&lmac->event_cb_lock, flags);
lmac->event_cb.notify_link_chg = NULL;
lmac->event_cb.data = NULL;
spin_unlock_irqrestore(&lmac->event_cb_lock, flags);
return 0;
}
EXPORT_SYMBOL(cgx_lmac_evh_unregister);
static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
{
u64 req = 0;
u64 resp;
if (enable)
req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req);
else
req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req);
return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
}
static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx) static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
{ {
u64 req = 0; u64 req = 0;
...@@ -581,6 +629,34 @@ static int cgx_lmac_verify_fwi_version(struct cgx *cgx) ...@@ -581,6 +629,34 @@ static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
return 0; return 0;
} }
static void cgx_lmac_linkup_work(struct work_struct *work)
{
struct cgx *cgx = container_of(work, struct cgx, cgx_cmd_work);
struct device *dev = &cgx->pdev->dev;
int i, err;
/* Do Link up for all the lmacs */
for (i = 0; i < cgx->lmac_count; i++) {
err = cgx_fwi_link_change(cgx, i, true);
if (err)
dev_info(dev, "cgx port %d:%d Link up command failed\n",
cgx->cgx_id, i);
}
}
int cgx_lmac_linkup_start(void *cgxd)
{
struct cgx *cgx = cgxd;
if (!cgx)
return -ENODEV;
queue_work(cgx->cgx_cmd_workq, &cgx->cgx_cmd_work);
return 0;
}
EXPORT_SYMBOL(cgx_lmac_linkup_start);
static int cgx_lmac_init(struct cgx *cgx) static int cgx_lmac_init(struct cgx *cgx)
{ {
struct lmac *lmac; struct lmac *lmac;
...@@ -602,6 +678,7 @@ static int cgx_lmac_init(struct cgx *cgx) ...@@ -602,6 +678,7 @@ static int cgx_lmac_init(struct cgx *cgx)
lmac->cgx = cgx; lmac->cgx = cgx;
init_waitqueue_head(&lmac->wq_cmd_cmplt); init_waitqueue_head(&lmac->wq_cmd_cmplt);
mutex_init(&lmac->cmd_lock); mutex_init(&lmac->cmd_lock);
spin_lock_init(&lmac->event_cb_lock);
err = request_irq(pci_irq_vector(cgx->pdev, err = request_irq(pci_irq_vector(cgx->pdev,
CGX_LMAC_FWI + i * 9), CGX_LMAC_FWI + i * 9),
cgx_fwi_event_handler, 0, lmac->name, lmac); cgx_fwi_event_handler, 0, lmac->name, lmac);
...@@ -624,6 +701,12 @@ static int cgx_lmac_exit(struct cgx *cgx) ...@@ -624,6 +701,12 @@ static int cgx_lmac_exit(struct cgx *cgx)
struct lmac *lmac; struct lmac *lmac;
int i; int i;
if (cgx->cgx_cmd_workq) {
flush_workqueue(cgx->cgx_cmd_workq);
destroy_workqueue(cgx->cgx_cmd_workq);
cgx->cgx_cmd_workq = NULL;
}
/* Free all lmac related resources */ /* Free all lmac related resources */
for (i = 0; i < cgx->lmac_count; i++) { for (i = 0; i < cgx->lmac_count; i++) {
lmac = cgx->lmac_idmap[i]; lmac = cgx->lmac_idmap[i];
...@@ -679,8 +762,19 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -679,8 +762,19 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_release_regions; goto err_release_regions;
} }
cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
& CGX_ID_MASK;
/* init wq for processing linkup requests */
INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work);
cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0);
if (!cgx->cgx_cmd_workq) {
dev_err(dev, "alloc workqueue failed for cgx cmd");
err = -ENOMEM;
goto err_release_regions;
}
list_add(&cgx->cgx_list, &cgx_list); list_add(&cgx->cgx_list, &cgx_list);
cgx->cgx_id = cgx_get_cgx_cnt() - 1;
cgx_link_usertable_init(); cgx_link_usertable_init();
......
...@@ -20,41 +20,41 @@ ...@@ -20,41 +20,41 @@
/* PCI BAR nos */ /* PCI BAR nos */
#define PCI_CFG_REG_BAR_NUM 0 #define PCI_CFG_REG_BAR_NUM 0
#define MAX_CGX 3 #define CGX_ID_MASK 0x7
#define MAX_LMAC_PER_CGX 4 #define MAX_LMAC_PER_CGX 4
#define CGX_FIFO_LEN 65536 /* 64K for both Rx & Tx */ #define CGX_FIFO_LEN 65536 /* 64K for both Rx & Tx */
#define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX) #define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX)
/* Registers */ /* Registers */
#define CGXX_CMRX_CFG 0x00 #define CGXX_CMRX_CFG 0x00
#define CMR_EN BIT_ULL(55) #define CMR_EN BIT_ULL(55)
#define DATA_PKT_TX_EN BIT_ULL(53) #define DATA_PKT_TX_EN BIT_ULL(53)
#define DATA_PKT_RX_EN BIT_ULL(54) #define DATA_PKT_RX_EN BIT_ULL(54)
#define CGX_LMAC_TYPE_SHIFT 40 #define CGX_LMAC_TYPE_SHIFT 40
#define CGX_LMAC_TYPE_MASK 0xF #define CGX_LMAC_TYPE_MASK 0xF
#define CGXX_CMRX_INT 0x040 #define CGXX_CMRX_INT 0x040
#define FW_CGX_INT BIT_ULL(1) #define FW_CGX_INT BIT_ULL(1)
#define CGXX_CMRX_INT_ENA_W1S 0x058 #define CGXX_CMRX_INT_ENA_W1S 0x058
#define CGXX_CMRX_RX_ID_MAP 0x060 #define CGXX_CMRX_RX_ID_MAP 0x060
#define CGXX_CMRX_RX_STAT0 0x070 #define CGXX_CMRX_RX_STAT0 0x070
#define CGXX_CMRX_RX_LMACS 0x128 #define CGXX_CMRX_RX_LMACS 0x128
#define CGXX_CMRX_RX_DMAC_CTL0 0x1F8 #define CGXX_CMRX_RX_DMAC_CTL0 0x1F8
#define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3) #define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3)
#define CGX_DMAC_CAM_ACCEPT BIT_ULL(3) #define CGX_DMAC_CAM_ACCEPT BIT_ULL(3)
#define CGX_DMAC_MCAST_MODE BIT_ULL(1) #define CGX_DMAC_MCAST_MODE BIT_ULL(1)
#define CGX_DMAC_BCAST_MODE BIT_ULL(0) #define CGX_DMAC_BCAST_MODE BIT_ULL(0)
#define CGXX_CMRX_RX_DMAC_CAM0 0x200 #define CGXX_CMRX_RX_DMAC_CAM0 0x200
#define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48) #define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48)
#define CGXX_CMRX_RX_DMAC_CAM1 0x400 #define CGXX_CMRX_RX_DMAC_CAM1 0x400
#define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0) #define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0)
#define CGXX_CMRX_TX_STAT0 0x700 #define CGXX_CMRX_TX_STAT0 0x700
#define CGXX_SCRATCH0_REG 0x1050 #define CGXX_SCRATCH0_REG 0x1050
#define CGXX_SCRATCH1_REG 0x1058 #define CGXX_SCRATCH1_REG 0x1058
#define CGX_CONST 0x2000 #define CGX_CONST 0x2000
#define CGXX_SPUX_CONTROL1 0x10000 #define CGXX_SPUX_CONTROL1 0x10000
#define CGXX_SPUX_CONTROL1_LBK BIT_ULL(14) #define CGXX_SPUX_CONTROL1_LBK BIT_ULL(14)
#define CGXX_GMP_PCS_MRX_CTL 0x30000 #define CGXX_GMP_PCS_MRX_CTL 0x30000
#define CGXX_GMP_PCS_MRX_CTL_LBK BIT_ULL(14) #define CGXX_GMP_PCS_MRX_CTL_LBK BIT_ULL(14)
#define CGX_COMMAND_REG CGXX_SCRATCH1_REG #define CGX_COMMAND_REG CGXX_SCRATCH1_REG
#define CGX_EVENT_REG CGXX_SCRATCH0_REG #define CGX_EVENT_REG CGXX_SCRATCH0_REG
...@@ -95,11 +95,12 @@ struct cgx_event_cb { ...@@ -95,11 +95,12 @@ struct cgx_event_cb {
extern struct pci_driver cgx_driver; extern struct pci_driver cgx_driver;
int cgx_get_cgx_cnt(void); int cgx_get_cgxcnt_max(void);
int cgx_get_lmac_cnt(void *cgxd); int cgx_get_lmac_cnt(void *cgxd);
void *cgx_get_pdata(int cgx_id); void *cgx_get_pdata(int cgx_id);
int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind); int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind);
int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id); int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id);
int cgx_lmac_evh_unregister(void *cgxd, int lmac_id);
int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat); int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat);
int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat); int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat);
int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable); int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable);
...@@ -109,4 +110,5 @@ void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable); ...@@ -109,4 +110,5 @@ void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable);
int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable); int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable);
int cgx_get_link_info(void *cgxd, int lmac_id, int cgx_get_link_info(void *cgxd, int lmac_id,
struct cgx_link_user_info *linfo); struct cgx_link_user_info *linfo);
int cgx_lmac_linkup_start(void *cgxd);
#endif /* CGX_H */ #endif /* CGX_H */
...@@ -78,8 +78,6 @@ enum cgx_cmd_id { ...@@ -78,8 +78,6 @@ enum cgx_cmd_id {
CGX_CMD_LINK_STATE_CHANGE, CGX_CMD_LINK_STATE_CHANGE,
CGX_CMD_MODE_CHANGE, /* hot plug support */ CGX_CMD_MODE_CHANGE, /* hot plug support */
CGX_CMD_INTF_SHUTDOWN, CGX_CMD_INTF_SHUTDOWN,
CGX_CMD_IRQ_ENABLE,
CGX_CMD_IRQ_DISABLE,
}; };
/* async event ids */ /* async event ids */
......
...@@ -811,17 +811,26 @@ static int rvu_setup_hw_resources(struct rvu *rvu) ...@@ -811,17 +811,26 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
err = rvu_npc_init(rvu); err = rvu_npc_init(rvu);
if (err) if (err)
return err; goto exit;
err = rvu_cgx_init(rvu);
if (err)
goto exit;
err = rvu_npa_init(rvu); err = rvu_npa_init(rvu);
if (err) if (err)
return err; goto cgx_err;
err = rvu_nix_init(rvu); err = rvu_nix_init(rvu);
if (err) if (err)
return err; goto cgx_err;
return 0; return 0;
cgx_err:
rvu_cgx_exit(rvu);
exit:
return err;
} }
/* NPA and NIX admin queue APIs */ /* NPA and NIX admin queue APIs */
...@@ -2419,13 +2428,9 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -2419,13 +2428,9 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err) if (err)
goto err_hwsetup; goto err_hwsetup;
err = rvu_cgx_probe(rvu);
if (err)
goto err_mbox;
err = rvu_flr_init(rvu); err = rvu_flr_init(rvu);
if (err) if (err)
goto err_cgx; goto err_mbox;
err = rvu_register_interrupts(rvu); err = rvu_register_interrupts(rvu);
if (err) if (err)
...@@ -2441,11 +2446,10 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -2441,11 +2446,10 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rvu_unregister_interrupts(rvu); rvu_unregister_interrupts(rvu);
err_flr: err_flr:
rvu_flr_wq_destroy(rvu); rvu_flr_wq_destroy(rvu);
err_cgx:
rvu_cgx_wq_destroy(rvu);
err_mbox: err_mbox:
rvu_mbox_destroy(&rvu->afpf_wq_info); rvu_mbox_destroy(&rvu->afpf_wq_info);
err_hwsetup: err_hwsetup:
rvu_cgx_exit(rvu);
rvu_reset_all_blocks(rvu); rvu_reset_all_blocks(rvu);
rvu_free_hw_resources(rvu); rvu_free_hw_resources(rvu);
err_release_regions: err_release_regions:
...@@ -2465,7 +2469,7 @@ static void rvu_remove(struct pci_dev *pdev) ...@@ -2465,7 +2469,7 @@ static void rvu_remove(struct pci_dev *pdev)
rvu_unregister_interrupts(rvu); rvu_unregister_interrupts(rvu);
rvu_flr_wq_destroy(rvu); rvu_flr_wq_destroy(rvu);
rvu_cgx_wq_destroy(rvu); rvu_cgx_exit(rvu);
rvu_mbox_destroy(&rvu->afpf_wq_info); rvu_mbox_destroy(&rvu->afpf_wq_info);
rvu_disable_sriov(rvu); rvu_disable_sriov(rvu);
rvu_reset_all_blocks(rvu); rvu_reset_all_blocks(rvu);
......
...@@ -226,7 +226,7 @@ struct rvu { ...@@ -226,7 +226,7 @@ struct rvu {
/* CGX */ /* CGX */
#define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */ #define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */
u8 cgx_mapped_pfs; u8 cgx_mapped_pfs;
u8 cgx_cnt; /* available cgx ports */ u8 cgx_cnt_max; /* CGX port count max */
u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */ u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */
u16 *cgxlmac2pf_map; /* bitmap of mapped pfs for u16 *cgxlmac2pf_map; /* bitmap of mapped pfs for
* every cgx lmac port * every cgx lmac port
...@@ -316,8 +316,8 @@ static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id) ...@@ -316,8 +316,8 @@ static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
*lmac_id = (map & 0xF); *lmac_id = (map & 0xF);
} }
int rvu_cgx_probe(struct rvu *rvu); int rvu_cgx_init(struct rvu *rvu);
void rvu_cgx_wq_destroy(struct rvu *rvu); int rvu_cgx_exit(struct rvu *rvu);
void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu); void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu);
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start); int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start);
int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req, int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
......
...@@ -52,7 +52,7 @@ static inline u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id) ...@@ -52,7 +52,7 @@ static inline u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu) void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
{ {
if (cgx_id >= rvu->cgx_cnt) if (cgx_id >= rvu->cgx_cnt_max)
return NULL; return NULL;
return rvu->cgx_idmap[cgx_id]; return rvu->cgx_idmap[cgx_id];
...@@ -61,38 +61,40 @@ void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu) ...@@ -61,38 +61,40 @@ void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
static int rvu_map_cgx_lmac_pf(struct rvu *rvu) static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
{ {
struct npc_pkind *pkind = &rvu->hw->pkind; struct npc_pkind *pkind = &rvu->hw->pkind;
int cgx_cnt = rvu->cgx_cnt; int cgx_cnt_max = rvu->cgx_cnt_max;
int cgx, lmac_cnt, lmac; int cgx, lmac_cnt, lmac;
int pf = PF_CGXMAP_BASE; int pf = PF_CGXMAP_BASE;
int size, free_pkind; int size, free_pkind;
if (!cgx_cnt) if (!cgx_cnt_max)
return 0; return 0;
if (cgx_cnt > 0xF || MAX_LMAC_PER_CGX > 0xF) if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF)
return -EINVAL; return -EINVAL;
/* Alloc map table /* Alloc map table
* An additional entry is required since PF id starts from 1 and * An additional entry is required since PF id starts from 1 and
* hence entry at offset 0 is invalid. * hence entry at offset 0 is invalid.
*/ */
size = (cgx_cnt * MAX_LMAC_PER_CGX + 1) * sizeof(u8); size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
rvu->pf2cgxlmac_map = devm_kzalloc(rvu->dev, size, GFP_KERNEL); rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
if (!rvu->pf2cgxlmac_map) if (!rvu->pf2cgxlmac_map)
return -ENOMEM; return -ENOMEM;
/* Initialize offset 0 with an invalid cgx and lmac id */ /* Initialize all entries with an invalid cgx and lmac id */
rvu->pf2cgxlmac_map[0] = 0xFF; memset(rvu->pf2cgxlmac_map, 0xFF, size);
/* Reverse map table */ /* Reverse map table */
rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev, rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
cgx_cnt * MAX_LMAC_PER_CGX * sizeof(u16), cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16),
GFP_KERNEL); GFP_KERNEL);
if (!rvu->cgxlmac2pf_map) if (!rvu->cgxlmac2pf_map)
return -ENOMEM; return -ENOMEM;
rvu->cgx_mapped_pfs = 0; rvu->cgx_mapped_pfs = 0;
for (cgx = 0; cgx < cgx_cnt; cgx++) { for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
if (!rvu_cgx_pdata(cgx, rvu))
continue;
lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
for (lmac = 0; lmac < lmac_cnt; lmac++, pf++) { for (lmac = 0; lmac < lmac_cnt; lmac++, pf++) {
rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac); rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
...@@ -216,7 +218,7 @@ static void cgx_evhandler_task(struct work_struct *work) ...@@ -216,7 +218,7 @@ static void cgx_evhandler_task(struct work_struct *work)
} while (1); } while (1);
} }
static void cgx_lmac_event_handler_init(struct rvu *rvu) static int cgx_lmac_event_handler_init(struct rvu *rvu)
{ {
struct cgx_event_cb cb; struct cgx_event_cb cb;
int cgx, lmac, err; int cgx, lmac, err;
...@@ -228,14 +230,16 @@ static void cgx_lmac_event_handler_init(struct rvu *rvu) ...@@ -228,14 +230,16 @@ static void cgx_lmac_event_handler_init(struct rvu *rvu)
rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0); rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
if (!rvu->cgx_evh_wq) { if (!rvu->cgx_evh_wq) {
dev_err(rvu->dev, "alloc workqueue failed"); dev_err(rvu->dev, "alloc workqueue failed");
return; return -ENOMEM;
} }
cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */ cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
cb.data = rvu; cb.data = rvu;
for (cgx = 0; cgx < rvu->cgx_cnt; cgx++) { for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
cgxd = rvu_cgx_pdata(cgx, rvu); cgxd = rvu_cgx_pdata(cgx, rvu);
if (!cgxd)
continue;
for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) { for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) {
err = cgx_lmac_evh_register(&cb, cgxd, lmac); err = cgx_lmac_evh_register(&cb, cgxd, lmac);
if (err) if (err)
...@@ -244,9 +248,11 @@ static void cgx_lmac_event_handler_init(struct rvu *rvu) ...@@ -244,9 +248,11 @@ static void cgx_lmac_event_handler_init(struct rvu *rvu)
cgx, lmac); cgx, lmac);
} }
} }
return 0;
} }
void rvu_cgx_wq_destroy(struct rvu *rvu) static void rvu_cgx_wq_destroy(struct rvu *rvu)
{ {
if (rvu->cgx_evh_wq) { if (rvu->cgx_evh_wq) {
flush_workqueue(rvu->cgx_evh_wq); flush_workqueue(rvu->cgx_evh_wq);
...@@ -255,25 +261,28 @@ void rvu_cgx_wq_destroy(struct rvu *rvu) ...@@ -255,25 +261,28 @@ void rvu_cgx_wq_destroy(struct rvu *rvu)
} }
} }
int rvu_cgx_probe(struct rvu *rvu) int rvu_cgx_init(struct rvu *rvu)
{ {
int i, err; int cgx, err;
void *cgxd;
/* find available cgx ports */ /* CGX port id starts from 0 and are not necessarily contiguous
rvu->cgx_cnt = cgx_get_cgx_cnt(); * Hence we allocate resources based on the maximum port id value.
if (!rvu->cgx_cnt) { */
rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
if (!rvu->cgx_cnt_max) {
dev_info(rvu->dev, "No CGX devices found!\n"); dev_info(rvu->dev, "No CGX devices found!\n");
return -ENODEV; return -ENODEV;
} }
rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt * sizeof(void *), rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
GFP_KERNEL); sizeof(void *), GFP_KERNEL);
if (!rvu->cgx_idmap) if (!rvu->cgx_idmap)
return -ENOMEM; return -ENOMEM;
/* Initialize the cgxdata table */ /* Initialize the cgxdata table */
for (i = 0; i < rvu->cgx_cnt; i++) for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++)
rvu->cgx_idmap[i] = cgx_get_pdata(i); rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx);
/* Map CGX LMAC interfaces to RVU PFs */ /* Map CGX LMAC interfaces to RVU PFs */
err = rvu_map_cgx_lmac_pf(rvu); err = rvu_map_cgx_lmac_pf(rvu);
...@@ -281,7 +290,47 @@ int rvu_cgx_probe(struct rvu *rvu) ...@@ -281,7 +290,47 @@ int rvu_cgx_probe(struct rvu *rvu)
return err; return err;
/* Register for CGX events */ /* Register for CGX events */
cgx_lmac_event_handler_init(rvu); err = cgx_lmac_event_handler_init(rvu);
if (err)
return err;
/* Ensure event handler registration is completed, before
* we turn on the links
*/
mb();
/* Do link up for all CGX ports */
for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
cgxd = rvu_cgx_pdata(cgx, rvu);
if (!cgxd)
continue;
err = cgx_lmac_linkup_start(cgxd);
if (err)
dev_err(rvu->dev,
"Link up process failed to start on cgx %d\n",
cgx);
}
return 0;
}
int rvu_cgx_exit(struct rvu *rvu)
{
int cgx, lmac;
void *cgxd;
for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
cgxd = rvu_cgx_pdata(cgx, rvu);
if (!cgxd)
continue;
for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++)
cgx_lmac_evh_unregister(cgxd, lmac);
}
/* Ensure event handler unregister is completed */
mb();
rvu_cgx_wq_destroy(rvu);
return 0; return 0;
} }
......
...@@ -2107,8 +2107,10 @@ static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr) ...@@ -2107,8 +2107,10 @@ static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS); status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
/* Check if CGX devices are ready */ /* Check if CGX devices are ready */
for (idx = 0; idx < cgx_get_cgx_cnt(); idx++) { for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
if (status & (BIT_ULL(16 + idx))) /* Skip when cgx port is not available */
if (!rvu_cgx_pdata(idx, rvu) ||
(status & (BIT_ULL(16 + idx))))
continue; continue;
dev_err(rvu->dev, dev_err(rvu->dev,
"CGX%d didn't respond to NIX X2P calibration\n", idx); "CGX%d didn't respond to NIX X2P calibration\n", idx);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment