Commit d9110b0b authored by SrujanaChalla's avatar SrujanaChalla Committed by Herbert Xu

crypto: marvell - add support for OCTEON TX CPT engine

Add support for the cryptographic acceleration unit (CPT) on
OcteonTX CN83XX SoC.
Co-developed-by: default avatarLukasz Bartosik <lbartosik@marvell.com>
Signed-off-by: default avatarLukasz Bartosik <lbartosik@marvell.com>
Signed-off-by: default avatarSrujanaChalla <schalla@marvell.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 655ff1a1
...@@ -10018,6 +10018,7 @@ F: Documentation/devicetree/bindings/phy/phy-mvebu-utmi.txt ...@@ -10018,6 +10018,7 @@ F: Documentation/devicetree/bindings/phy/phy-mvebu-utmi.txt
MARVELL CRYPTO DRIVER MARVELL CRYPTO DRIVER
M: Boris Brezillon <bbrezillon@kernel.org> M: Boris Brezillon <bbrezillon@kernel.org>
M: Arnaud Ebalard <arno@natisbad.org> M: Arnaud Ebalard <arno@natisbad.org>
M: Srujana Challa <schalla@marvell.com>
F: drivers/crypto/marvell/ F: drivers/crypto/marvell/
S: Maintained S: Maintained
L: linux-crypto@vger.kernel.org L: linux-crypto@vger.kernel.org
......
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CRYPTO_DEV_OCTEONTX_CPT) += octeontx-cpt.o
octeontx-cpt-objs := otx_cptpf_main.o otx_cptpf_mbox.o otx_cptpf_ucode.o
/* SPDX-License-Identifier: GPL-2.0
* Marvell OcteonTX CPT driver
*
* Copyright (C) 2019 Marvell International Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __OTX_CPT_COMMON_H
#define __OTX_CPT_COMMON_H
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/device.h>
#define OTX_CPT_MAX_MBOX_DATA_STR_SIZE 64
enum otx_cptpf_type {
OTX_CPT_AE = 2,
OTX_CPT_SE = 3,
BAD_OTX_CPTPF_TYPE,
};
enum otx_cptvf_type {
OTX_CPT_AE_TYPES = 1,
OTX_CPT_SE_TYPES = 2,
BAD_OTX_CPTVF_TYPE,
};
/* VF-PF message opcodes */
enum otx_cpt_mbox_opcode {
OTX_CPT_MSG_VF_UP = 1,
OTX_CPT_MSG_VF_DOWN,
OTX_CPT_MSG_READY,
OTX_CPT_MSG_QLEN,
OTX_CPT_MSG_QBIND_GRP,
OTX_CPT_MSG_VQ_PRIORITY,
OTX_CPT_MSG_PF_TYPE,
OTX_CPT_MSG_ACK,
OTX_CPT_MSG_NACK
};
/* OcteonTX CPT mailbox structure */
struct otx_cpt_mbox {
u64 msg; /* Message type MBOX[0] */
u64 data;/* Data MBOX[1] */
};
#endif /* __OTX_CPT_COMMON_H */
/* SPDX-License-Identifier: GPL-2.0
* Marvell OcteonTX CPT driver
*
* Copyright (C) 2019 Marvell International Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __OTX_CPT_HW_TYPES_H
#define __OTX_CPT_HW_TYPES_H
#include <linux/types.h>
/* Device IDs */
#define OTX_CPT_PCI_PF_DEVICE_ID 0xa040
#define OTX_CPT_PCI_PF_SUBSYS_ID 0xa340
/* Configuration and status registers are in BAR0 on OcteonTX platform */
#define OTX_CPT_PF_PCI_CFG_BAR 0
/* Mailbox interrupts offset */
#define OTX_CPT_PF_MBOX_INT 3
#define OTX_CPT_PF_INT_VEC_E_MBOXX(x, a) ((x) + (a))
/* Number of MSIX supported in PF */
#define OTX_CPT_PF_MSIX_VECTORS 4
/* Maximum supported microcode groups */
#define OTX_CPT_MAX_ENGINE_GROUPS 8
/* OcteonTX CPT PF registers */
#define OTX_CPT_PF_CONSTANTS (0x0ll)
#define OTX_CPT_PF_RESET (0x100ll)
#define OTX_CPT_PF_DIAG (0x120ll)
#define OTX_CPT_PF_BIST_STATUS (0x160ll)
#define OTX_CPT_PF_ECC0_CTL (0x200ll)
#define OTX_CPT_PF_ECC0_FLIP (0x210ll)
#define OTX_CPT_PF_ECC0_INT (0x220ll)
#define OTX_CPT_PF_ECC0_INT_W1S (0x230ll)
#define OTX_CPT_PF_ECC0_ENA_W1S (0x240ll)
#define OTX_CPT_PF_ECC0_ENA_W1C (0x250ll)
#define OTX_CPT_PF_MBOX_INTX(b) (0x400ll | (u64)(b) << 3)
#define OTX_CPT_PF_MBOX_INT_W1SX(b) (0x420ll | (u64)(b) << 3)
#define OTX_CPT_PF_MBOX_ENA_W1CX(b) (0x440ll | (u64)(b) << 3)
#define OTX_CPT_PF_MBOX_ENA_W1SX(b) (0x460ll | (u64)(b) << 3)
#define OTX_CPT_PF_EXEC_INT (0x500ll)
#define OTX_CPT_PF_EXEC_INT_W1S (0x520ll)
#define OTX_CPT_PF_EXEC_ENA_W1C (0x540ll)
#define OTX_CPT_PF_EXEC_ENA_W1S (0x560ll)
#define OTX_CPT_PF_GX_EN(b) (0x600ll | (u64)(b) << 3)
#define OTX_CPT_PF_EXEC_INFO (0x700ll)
#define OTX_CPT_PF_EXEC_BUSY (0x800ll)
#define OTX_CPT_PF_EXEC_INFO0 (0x900ll)
#define OTX_CPT_PF_EXEC_INFO1 (0x910ll)
#define OTX_CPT_PF_INST_REQ_PC (0x10000ll)
#define OTX_CPT_PF_INST_LATENCY_PC (0x10020ll)
#define OTX_CPT_PF_RD_REQ_PC (0x10040ll)
#define OTX_CPT_PF_RD_LATENCY_PC (0x10060ll)
#define OTX_CPT_PF_RD_UC_PC (0x10080ll)
#define OTX_CPT_PF_ACTIVE_CYCLES_PC (0x10100ll)
#define OTX_CPT_PF_EXE_CTL (0x4000000ll)
#define OTX_CPT_PF_EXE_STATUS (0x4000008ll)
#define OTX_CPT_PF_EXE_CLK (0x4000010ll)
#define OTX_CPT_PF_EXE_DBG_CTL (0x4000018ll)
#define OTX_CPT_PF_EXE_DBG_DATA (0x4000020ll)
#define OTX_CPT_PF_EXE_BIST_STATUS (0x4000028ll)
#define OTX_CPT_PF_EXE_REQ_TIMER (0x4000030ll)
#define OTX_CPT_PF_EXE_MEM_CTL (0x4000038ll)
#define OTX_CPT_PF_EXE_PERF_CTL (0x4001000ll)
#define OTX_CPT_PF_EXE_DBG_CNTX(b) (0x4001100ll | (u64)(b) << 3)
#define OTX_CPT_PF_EXE_PERF_EVENT_CNT (0x4001180ll)
#define OTX_CPT_PF_EXE_EPCI_INBX_CNT(b) (0x4001200ll | (u64)(b) << 3)
#define OTX_CPT_PF_EXE_EPCI_OUTBX_CNT(b) (0x4001240ll | (u64)(b) << 3)
#define OTX_CPT_PF_ENGX_UCODE_BASE(b) (0x4002000ll | (u64)(b) << 3)
#define OTX_CPT_PF_QX_CTL(b) (0x8000000ll | (u64)(b) << 20)
#define OTX_CPT_PF_QX_GMCTL(b) (0x8000020ll | (u64)(b) << 20)
#define OTX_CPT_PF_QX_CTL2(b) (0x8000100ll | (u64)(b) << 20)
#define OTX_CPT_PF_VFX_MBOXX(b, c) (0x8001000ll | (u64)(b) << 20 | \
(u64)(c) << 8)
/*
* Register (NCB) otx_cpt#_pf_bist_status
*
* CPT PF Control Bist Status Register
* This register has the BIST status of memories. Each bit is the BIST result
* of an individual memory (per bit, 0 = pass and 1 = fail).
* otx_cptx_pf_bist_status_s
* Word0
* bstatus [29:0](RO/H) BIST status. One bit per memory, enumerated by
* CPT_RAMS_E.
*/
union otx_cptx_pf_bist_status {
u64 u;
struct otx_cptx_pf_bist_status_s {
#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
u64 reserved_30_63:34;
u64 bstatus:30;
#else /* Word 0 - Little Endian */
u64 bstatus:30;
u64 reserved_30_63:34;
#endif /* Word 0 - End */
} s;
};
/*
* Register (NCB) otx_cpt#_pf_constants
*
* CPT PF Constants Register
* This register contains implementation-related parameters of CPT in CNXXXX.
* otx_cptx_pf_constants_s
* Word 0
* reserved_40_63:24 [63:40] Reserved.
* epcis:8 [39:32](RO) Number of EPCI busses.
* grps:8 [31:24](RO) Number of engine groups implemented.
* ae:8 [23:16](RO/H) Number of AEs. In CNXXXX, for CPT0 returns 0x0,
* for CPT1 returns 0x18, or less if there are fuse-disables.
* se:8 [15:8](RO/H) Number of SEs. In CNXXXX, for CPT0 returns 0x30,
* or less if there are fuse-disables, for CPT1 returns 0x0.
* vq:8 [7:0](RO) Number of VQs.
*/
union otx_cptx_pf_constants {
u64 u;
struct otx_cptx_pf_constants_s {
#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
u64 reserved_40_63:24;
u64 epcis:8;
u64 grps:8;
u64 ae:8;
u64 se:8;
u64 vq:8;
#else /* Word 0 - Little Endian */
u64 vq:8;
u64 se:8;
u64 ae:8;
u64 grps:8;
u64 epcis:8;
u64 reserved_40_63:24;
#endif /* Word 0 - End */
} s;
};
/*
* Register (NCB) otx_cpt#_pf_exe_bist_status
*
* CPT PF Engine Bist Status Register
* This register has the BIST status of each engine. Each bit is the
* BIST result of an individual engine (per bit, 0 = pass and 1 = fail).
* otx_cptx_pf_exe_bist_status_s
* Word0
* reserved_48_63:16 [63:48] reserved
* bstatus:48 [47:0](RO/H) BIST status. One bit per engine.
*
*/
union otx_cptx_pf_exe_bist_status {
u64 u;
struct otx_cptx_pf_exe_bist_status_s {
#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
u64 reserved_48_63:16;
u64 bstatus:48;
#else /* Word 0 - Little Endian */
u64 bstatus:48;
u64 reserved_48_63:16;
#endif /* Word 0 - End */
} s;
};
/*
* Register (NCB) otx_cpt#_pf_q#_ctl
*
* CPT Queue Control Register
* This register configures queues. This register should be changed only
* when quiescent (see CPT()_VQ()_INPROG[INFLIGHT]).
* otx_cptx_pf_qx_ctl_s
* Word0
* reserved_60_63:4 [63:60] reserved.
* aura:12; [59:48](R/W) Guest-aura for returning this queue's
* instruction-chunk buffers to FPA. Only used when [INST_FREE] is set.
* For the FPA to not discard the request, FPA_PF_MAP() must map
* [AURA] and CPT()_PF_Q()_GMCTL[GMID] as valid.
* reserved_45_47:3 [47:45] reserved.
* size:13 [44:32](R/W) Command-buffer size, in number of 64-bit words per
* command buffer segment. Must be 8*n + 1, where n is the number of
* instructions per buffer segment.
* reserved_11_31:21 [31:11] Reserved.
* cont_err:1 [10:10](R/W) Continue on error.
* 0 = When CPT()_VQ()_MISC_INT[NWRP], CPT()_VQ()_MISC_INT[IRDE] or
* CPT()_VQ()_MISC_INT[DOVF] are set by hardware or software via
* CPT()_VQ()_MISC_INT_W1S, then CPT()_VQ()_CTL[ENA] is cleared. Due to
* pipelining, additional instructions may have been processed between the
* instruction causing the error and the next instruction in the disabled
* queue (the instruction at CPT()_VQ()_SADDR).
* 1 = Ignore errors and continue processing instructions.
* For diagnostic use only.
* inst_free:1 [9:9](R/W) Instruction FPA free. When set, when CPT reaches the
* end of an instruction chunk, that chunk will be freed to the FPA.
* inst_be:1 [8:8](R/W) Instruction big-endian control. When set, instructions,
* instruction next chunk pointers, and result structures are stored in
* big-endian format in memory.
* iqb_ldwb:1 [7:7](R/W) Instruction load don't write back.
* 0 = The hardware issues NCB transient load (LDT) towards the cache,
* which if the line hits and is is dirty will cause the line to be
* written back before being replaced.
* 1 = The hardware issues NCB LDWB read-and-invalidate command towards
* the cache when fetching the last word of instructions; as a result the
* line will not be written back when replaced. This improves
* performance, but software must not read the instructions after they are
* posted to the hardware. Reads that do not consume the last word of a
* cache line always use LDI.
* reserved_4_6:3 [6:4] Reserved.
* grp:3; [3:1](R/W) Engine group.
* pri:1; [0:0](R/W) Queue priority.
* 1 = This queue has higher priority. Round-robin between higher
* priority queues.
* 0 = This queue has lower priority. Round-robin between lower
* priority queues.
*/
union otx_cptx_pf_qx_ctl {
u64 u;
struct otx_cptx_pf_qx_ctl_s {
#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
u64 reserved_60_63:4;
u64 aura:12;
u64 reserved_45_47:3;
u64 size:13;
u64 reserved_11_31:21;
u64 cont_err:1;
u64 inst_free:1;
u64 inst_be:1;
u64 iqb_ldwb:1;
u64 reserved_4_6:3;
u64 grp:3;
u64 pri:1;
#else /* Word 0 - Little Endian */
u64 pri:1;
u64 grp:3;
u64 reserved_4_6:3;
u64 iqb_ldwb:1;
u64 inst_be:1;
u64 inst_free:1;
u64 cont_err:1;
u64 reserved_11_31:21;
u64 size:13;
u64 reserved_45_47:3;
u64 aura:12;
u64 reserved_60_63:4;
#endif /* Word 0 - End */
} s;
};
#endif /* __OTX_CPT_HW_TYPES_H */
/* SPDX-License-Identifier: GPL-2.0
* Marvell OcteonTX CPT driver
*
* Copyright (C) 2019 Marvell International Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __OTX_CPTPF_H
#define __OTX_CPTPF_H
#include <linux/types.h>
#include <linux/device.h>
#include "otx_cptpf_ucode.h"
/*
* OcteonTX CPT device structure
*/
struct otx_cpt_device {
void __iomem *reg_base; /* Register start address */
struct pci_dev *pdev; /* Pci device handle */
struct otx_cpt_eng_grps eng_grps;/* Engine groups information */
struct list_head list;
u8 pf_type; /* PF type SE or AE */
u8 max_vfs; /* Maximum number of VFs supported by the CPT */
u8 vfs_enabled; /* Number of enabled VFs */
};
void otx_cpt_mbox_intr_handler(struct otx_cpt_device *cpt, int mbx);
void otx_cpt_disable_all_cores(struct otx_cpt_device *cpt);
#endif /* __OTX_CPTPF_H */
// SPDX-License-Identifier: GPL-2.0
/* Marvell OcteonTX CPT driver
*
* Copyright (C) 2019 Marvell International Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include "otx_cpt_common.h"
#include "otx_cptpf.h"
#define DRV_NAME "octeontx-cpt"
#define DRV_VERSION "1.0"
static void otx_cpt_disable_mbox_interrupts(struct otx_cpt_device *cpt)
{
/* Disable mbox(0) interrupts for all VFs */
writeq(~0ull, cpt->reg_base + OTX_CPT_PF_MBOX_ENA_W1CX(0));
}
static void otx_cpt_enable_mbox_interrupts(struct otx_cpt_device *cpt)
{
/* Enable mbox(0) interrupts for all VFs */
writeq(~0ull, cpt->reg_base + OTX_CPT_PF_MBOX_ENA_W1SX(0));
}
static irqreturn_t otx_cpt_mbx0_intr_handler(int __always_unused irq,
void *cpt)
{
otx_cpt_mbox_intr_handler(cpt, 0);
return IRQ_HANDLED;
}
static void otx_cpt_reset(struct otx_cpt_device *cpt)
{
writeq(1, cpt->reg_base + OTX_CPT_PF_RESET);
}
static void otx_cpt_find_max_enabled_cores(struct otx_cpt_device *cpt)
{
union otx_cptx_pf_constants pf_cnsts = {0};
pf_cnsts.u = readq(cpt->reg_base + OTX_CPT_PF_CONSTANTS);
cpt->eng_grps.avail.max_se_cnt = pf_cnsts.s.se;
cpt->eng_grps.avail.max_ae_cnt = pf_cnsts.s.ae;
}
static u32 otx_cpt_check_bist_status(struct otx_cpt_device *cpt)
{
union otx_cptx_pf_bist_status bist_sts = {0};
bist_sts.u = readq(cpt->reg_base + OTX_CPT_PF_BIST_STATUS);
return bist_sts.u;
}
static u64 otx_cpt_check_exe_bist_status(struct otx_cpt_device *cpt)
{
union otx_cptx_pf_exe_bist_status bist_sts = {0};
bist_sts.u = readq(cpt->reg_base + OTX_CPT_PF_EXE_BIST_STATUS);
return bist_sts.u;
}
static int otx_cpt_device_init(struct otx_cpt_device *cpt)
{
struct device *dev = &cpt->pdev->dev;
u16 sdevid;
u64 bist;
/* Reset the PF when probed first */
otx_cpt_reset(cpt);
mdelay(100);
pci_read_config_word(cpt->pdev, PCI_SUBSYSTEM_ID, &sdevid);
/* Check BIST status */
bist = (u64)otx_cpt_check_bist_status(cpt);
if (bist) {
dev_err(dev, "RAM BIST failed with code 0x%llx", bist);
return -ENODEV;
}
bist = otx_cpt_check_exe_bist_status(cpt);
if (bist) {
dev_err(dev, "Engine BIST failed with code 0x%llx", bist);
return -ENODEV;
}
/* Get max enabled cores */
otx_cpt_find_max_enabled_cores(cpt);
if ((sdevid == OTX_CPT_PCI_PF_SUBSYS_ID) &&
(cpt->eng_grps.avail.max_se_cnt == 0)) {
cpt->pf_type = OTX_CPT_AE;
} else if ((sdevid == OTX_CPT_PCI_PF_SUBSYS_ID) &&
(cpt->eng_grps.avail.max_ae_cnt == 0)) {
cpt->pf_type = OTX_CPT_SE;
}
/* Get max VQs/VFs supported by the device */
cpt->max_vfs = pci_sriov_get_totalvfs(cpt->pdev);
/* Disable all cores */
otx_cpt_disable_all_cores(cpt);
return 0;
}
static int otx_cpt_register_interrupts(struct otx_cpt_device *cpt)
{
struct device *dev = &cpt->pdev->dev;
u32 mbox_int_idx = OTX_CPT_PF_MBOX_INT;
u32 num_vec = OTX_CPT_PF_MSIX_VECTORS;
int ret;
/* Enable MSI-X */
ret = pci_alloc_irq_vectors(cpt->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
if (ret < 0) {
dev_err(&cpt->pdev->dev,
"Request for #%d msix vectors failed\n",
num_vec);
return ret;
}
/* Register mailbox interrupt handlers */
ret = request_irq(pci_irq_vector(cpt->pdev,
OTX_CPT_PF_INT_VEC_E_MBOXX(mbox_int_idx, 0)),
otx_cpt_mbx0_intr_handler, 0, "CPT Mbox0", cpt);
if (ret) {
dev_err(dev, "Request irq failed\n");
pci_free_irq_vectors(cpt->pdev);
return ret;
}
/* Enable mailbox interrupt */
otx_cpt_enable_mbox_interrupts(cpt);
return 0;
}
static void otx_cpt_unregister_interrupts(struct otx_cpt_device *cpt)
{
u32 mbox_int_idx = OTX_CPT_PF_MBOX_INT;
otx_cpt_disable_mbox_interrupts(cpt);
free_irq(pci_irq_vector(cpt->pdev,
OTX_CPT_PF_INT_VEC_E_MBOXX(mbox_int_idx, 0)),
cpt);
pci_free_irq_vectors(cpt->pdev);
}
static int otx_cpt_sriov_configure(struct pci_dev *pdev, int numvfs)
{
struct otx_cpt_device *cpt = pci_get_drvdata(pdev);
int ret = 0;
if (numvfs > cpt->max_vfs)
numvfs = cpt->max_vfs;
if (numvfs > 0) {
ret = otx_cpt_try_create_default_eng_grps(cpt->pdev,
&cpt->eng_grps,
cpt->pf_type);
if (ret)
return ret;
cpt->vfs_enabled = numvfs;
ret = pci_enable_sriov(pdev, numvfs);
if (ret) {
cpt->vfs_enabled = 0;
return ret;
}
otx_cpt_set_eng_grps_is_rdonly(&cpt->eng_grps, true);
try_module_get(THIS_MODULE);
ret = numvfs;
} else {
pci_disable_sriov(pdev);
otx_cpt_set_eng_grps_is_rdonly(&cpt->eng_grps, false);
module_put(THIS_MODULE);
cpt->vfs_enabled = 0;
}
dev_notice(&cpt->pdev->dev, "VFs enabled: %d\n", ret);
return ret;
}
static int otx_cpt_probe(struct pci_dev *pdev,
const struct pci_device_id __always_unused *ent)
{
struct device *dev = &pdev->dev;
struct otx_cpt_device *cpt;
int err;
cpt = devm_kzalloc(dev, sizeof(*cpt), GFP_KERNEL);
if (!cpt)
return -ENOMEM;
pci_set_drvdata(pdev, cpt);
cpt->pdev = pdev;
err = pci_enable_device(pdev);
if (err) {
dev_err(dev, "Failed to enable PCI device\n");
goto err_clear_drvdata;
}
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
dev_err(dev, "PCI request regions failed 0x%x\n", err);
goto err_disable_device;
}
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
if (err) {
dev_err(dev, "Unable to get usable DMA configuration\n");
goto err_release_regions;
}
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
if (err) {
dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n");
goto err_release_regions;
}
/* MAP PF's configuration registers */
cpt->reg_base = pci_iomap(pdev, OTX_CPT_PF_PCI_CFG_BAR, 0);
if (!cpt->reg_base) {
dev_err(dev, "Cannot map config register space, aborting\n");
err = -ENOMEM;
goto err_release_regions;
}
/* CPT device HW initialization */
err = otx_cpt_device_init(cpt);
if (err)
goto err_unmap_region;
/* Register interrupts */
err = otx_cpt_register_interrupts(cpt);
if (err)
goto err_unmap_region;
/* Initialize engine groups */
err = otx_cpt_init_eng_grps(pdev, &cpt->eng_grps, cpt->pf_type);
if (err)
goto err_unregister_interrupts;
return 0;
err_unregister_interrupts:
otx_cpt_unregister_interrupts(cpt);
err_unmap_region:
pci_iounmap(pdev, cpt->reg_base);
err_release_regions:
pci_release_regions(pdev);
err_disable_device:
pci_disable_device(pdev);
err_clear_drvdata:
pci_set_drvdata(pdev, NULL);
return err;
}
static void otx_cpt_remove(struct pci_dev *pdev)
{
struct otx_cpt_device *cpt = pci_get_drvdata(pdev);
if (!cpt)
return;
/* Disable VFs */
pci_disable_sriov(pdev);
/* Cleanup engine groups */
otx_cpt_cleanup_eng_grps(pdev, &cpt->eng_grps);
/* Disable CPT PF interrupts */
otx_cpt_unregister_interrupts(cpt);
/* Disengage SE and AE cores from all groups */
otx_cpt_disable_all_cores(cpt);
pci_iounmap(pdev, cpt->reg_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
/* Supported devices */
static const struct pci_device_id otx_cpt_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OTX_CPT_PCI_PF_DEVICE_ID) },
{ 0, } /* end of table */
};
static struct pci_driver otx_cpt_pci_driver = {
.name = DRV_NAME,
.id_table = otx_cpt_id_table,
.probe = otx_cpt_probe,
.remove = otx_cpt_remove,
.sriov_configure = otx_cpt_sriov_configure
};
module_pci_driver(otx_cpt_pci_driver);
MODULE_AUTHOR("Marvell International Ltd.");
MODULE_DESCRIPTION("Marvell OcteonTX CPT Physical Function Driver");
MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, otx_cpt_id_table);
// SPDX-License-Identifier: GPL-2.0
/* Marvell OcteonTX CPT driver
*
* Copyright (C) 2019 Marvell International Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include "otx_cpt_common.h"
#include "otx_cptpf.h"
static char *get_mbox_opcode_str(int msg_opcode)
{
char *str = "Unknown";
switch (msg_opcode) {
case OTX_CPT_MSG_VF_UP:
str = "UP";
break;
case OTX_CPT_MSG_VF_DOWN:
str = "DOWN";
break;
case OTX_CPT_MSG_READY:
str = "READY";
break;
case OTX_CPT_MSG_QLEN:
str = "QLEN";
break;
case OTX_CPT_MSG_QBIND_GRP:
str = "QBIND_GRP";
break;
case OTX_CPT_MSG_VQ_PRIORITY:
str = "VQ_PRIORITY";
break;
case OTX_CPT_MSG_PF_TYPE:
str = "PF_TYPE";
break;
case OTX_CPT_MSG_ACK:
str = "ACK";
break;
case OTX_CPT_MSG_NACK:
str = "NACK";
break;
}
return str;
}
static void dump_mbox_msg(struct otx_cpt_mbox *mbox_msg, int vf_id)
{
char raw_data_str[OTX_CPT_MAX_MBOX_DATA_STR_SIZE];
hex_dump_to_buffer(mbox_msg, sizeof(struct otx_cpt_mbox), 16, 8,
raw_data_str, OTX_CPT_MAX_MBOX_DATA_STR_SIZE, false);
if (vf_id >= 0)
pr_debug("MBOX opcode %s received from VF%d raw_data %s",
get_mbox_opcode_str(mbox_msg->msg), vf_id,
raw_data_str);
else
pr_debug("MBOX opcode %s received from PF raw_data %s",
get_mbox_opcode_str(mbox_msg->msg), raw_data_str);
}
static void otx_cpt_send_msg_to_vf(struct otx_cpt_device *cpt, int vf,
struct otx_cpt_mbox *mbx)
{
/* Writing mbox(0) causes interrupt */
writeq(mbx->data, cpt->reg_base + OTX_CPT_PF_VFX_MBOXX(vf, 1));
writeq(mbx->msg, cpt->reg_base + OTX_CPT_PF_VFX_MBOXX(vf, 0));
}
/*
* ACKs VF's mailbox message
* @vf: VF to which ACK to be sent
*/
static void otx_cpt_mbox_send_ack(struct otx_cpt_device *cpt, int vf,
struct otx_cpt_mbox *mbx)
{
mbx->data = 0ull;
mbx->msg = OTX_CPT_MSG_ACK;
otx_cpt_send_msg_to_vf(cpt, vf, mbx);
}
/* NACKs VF's mailbox message that PF is not able to complete the action */
static void otx_cptpf_mbox_send_nack(struct otx_cpt_device *cpt, int vf,
struct otx_cpt_mbox *mbx)
{
mbx->data = 0ull;
mbx->msg = OTX_CPT_MSG_NACK;
otx_cpt_send_msg_to_vf(cpt, vf, mbx);
}
static void otx_cpt_clear_mbox_intr(struct otx_cpt_device *cpt, u32 vf)
{
/* W1C for the VF */
writeq(1ull << vf, cpt->reg_base + OTX_CPT_PF_MBOX_INTX(0));
}
/*
* Configure QLEN/Chunk sizes for VF
*/
static void otx_cpt_cfg_qlen_for_vf(struct otx_cpt_device *cpt, int vf,
u32 size)
{
union otx_cptx_pf_qx_ctl pf_qx_ctl;
pf_qx_ctl.u = readq(cpt->reg_base + OTX_CPT_PF_QX_CTL(vf));
pf_qx_ctl.s.size = size;
pf_qx_ctl.s.cont_err = true;
writeq(pf_qx_ctl.u, cpt->reg_base + OTX_CPT_PF_QX_CTL(vf));
}
/*
* Configure VQ priority
*/
static void otx_cpt_cfg_vq_priority(struct otx_cpt_device *cpt, int vf, u32 pri)
{
union otx_cptx_pf_qx_ctl pf_qx_ctl;
pf_qx_ctl.u = readq(cpt->reg_base + OTX_CPT_PF_QX_CTL(vf));
pf_qx_ctl.s.pri = pri;
writeq(pf_qx_ctl.u, cpt->reg_base + OTX_CPT_PF_QX_CTL(vf));
}
static int otx_cpt_bind_vq_to_grp(struct otx_cpt_device *cpt, u8 q, u8 grp)
{
struct device *dev = &cpt->pdev->dev;
struct otx_cpt_eng_grp_info *eng_grp;
union otx_cptx_pf_qx_ctl pf_qx_ctl;
struct otx_cpt_ucode *ucode;
if (q >= cpt->max_vfs) {
dev_err(dev, "Requested queue %d is > than maximum avail %d",
q, cpt->max_vfs);
return -EINVAL;
}
if (grp >= OTX_CPT_MAX_ENGINE_GROUPS) {
dev_err(dev, "Requested group %d is > than maximum avail %d",
grp, OTX_CPT_MAX_ENGINE_GROUPS);
return -EINVAL;
}
eng_grp = &cpt->eng_grps.grp[grp];
if (!eng_grp->is_enabled) {
dev_err(dev, "Requested engine group %d is disabled", grp);
return -EINVAL;
}
pf_qx_ctl.u = readq(cpt->reg_base + OTX_CPT_PF_QX_CTL(q));
pf_qx_ctl.s.grp = grp;
writeq(pf_qx_ctl.u, cpt->reg_base + OTX_CPT_PF_QX_CTL(q));
if (eng_grp->mirror.is_ena)
ucode = &eng_grp->g->grp[eng_grp->mirror.idx].ucode[0];
else
ucode = &eng_grp->ucode[0];
if (otx_cpt_uc_supports_eng_type(ucode, OTX_CPT_SE_TYPES))
return OTX_CPT_SE_TYPES;
else if (otx_cpt_uc_supports_eng_type(ucode, OTX_CPT_AE_TYPES))
return OTX_CPT_AE_TYPES;
else
return BAD_OTX_CPTVF_TYPE;
}
/* Interrupt handler to handle mailbox messages from VFs */
static void otx_cpt_handle_mbox_intr(struct otx_cpt_device *cpt, int vf)
{
int vftype = 0;
struct otx_cpt_mbox mbx = {};
struct device *dev = &cpt->pdev->dev;
/*
* MBOX[0] contains msg
* MBOX[1] contains data
*/
mbx.msg = readq(cpt->reg_base + OTX_CPT_PF_VFX_MBOXX(vf, 0));
mbx.data = readq(cpt->reg_base + OTX_CPT_PF_VFX_MBOXX(vf, 1));
dump_mbox_msg(&mbx, vf);
switch (mbx.msg) {
case OTX_CPT_MSG_VF_UP:
mbx.msg = OTX_CPT_MSG_VF_UP;
mbx.data = cpt->vfs_enabled;
otx_cpt_send_msg_to_vf(cpt, vf, &mbx);
break;
case OTX_CPT_MSG_READY:
mbx.msg = OTX_CPT_MSG_READY;
mbx.data = vf;
otx_cpt_send_msg_to_vf(cpt, vf, &mbx);
break;
case OTX_CPT_MSG_VF_DOWN:
/* First msg in VF teardown sequence */
otx_cpt_mbox_send_ack(cpt, vf, &mbx);
break;
case OTX_CPT_MSG_QLEN:
otx_cpt_cfg_qlen_for_vf(cpt, vf, mbx.data);
otx_cpt_mbox_send_ack(cpt, vf, &mbx);
break;
case OTX_CPT_MSG_QBIND_GRP:
vftype = otx_cpt_bind_vq_to_grp(cpt, vf, (u8)mbx.data);
if ((vftype != OTX_CPT_AE_TYPES) &&
(vftype != OTX_CPT_SE_TYPES)) {
dev_err(dev, "VF%d binding to eng group %llu failed",
vf, mbx.data);
otx_cptpf_mbox_send_nack(cpt, vf, &mbx);
} else {
mbx.msg = OTX_CPT_MSG_QBIND_GRP;
mbx.data = vftype;
otx_cpt_send_msg_to_vf(cpt, vf, &mbx);
}
break;
case OTX_CPT_MSG_PF_TYPE:
mbx.msg = OTX_CPT_MSG_PF_TYPE;
mbx.data = cpt->pf_type;
otx_cpt_send_msg_to_vf(cpt, vf, &mbx);
break;
case OTX_CPT_MSG_VQ_PRIORITY:
otx_cpt_cfg_vq_priority(cpt, vf, mbx.data);
otx_cpt_mbox_send_ack(cpt, vf, &mbx);
break;
default:
dev_err(&cpt->pdev->dev, "Invalid msg from VF%d, msg 0x%llx\n",
vf, mbx.msg);
break;
}
}
void otx_cpt_mbox_intr_handler (struct otx_cpt_device *cpt, int mbx)
{
u64 intr;
u8 vf;
intr = readq(cpt->reg_base + OTX_CPT_PF_MBOX_INTX(0));
pr_debug("PF interrupt mbox%d mask 0x%llx\n", mbx, intr);
for (vf = 0; vf < cpt->max_vfs; vf++) {
if (intr & (1ULL << vf)) {
otx_cpt_handle_mbox_intr(cpt, vf);
otx_cpt_clear_mbox_intr(cpt, vf);
}
}
}
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0
* Marvell OcteonTX CPT driver
*
* Copyright (C) 2019 Marvell International Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __OTX_CPTPF_UCODE_H
#define __OTX_CPTPF_UCODE_H
#include <linux/pci.h>
#include <linux/types.h>
#include <linux/module.h>
#include "otx_cpt_hw_types.h"
/* CPT ucode name maximum length */
#define OTX_CPT_UCODE_NAME_LENGTH 64
/*
* On OcteonTX 83xx platform, only one type of engines is allowed to be
* attached to an engine group.
*/
#define OTX_CPT_MAX_ETYPES_PER_GRP 1
/* Default tar archive file names */
#define OTX_CPT_UCODE_TAR_FILE_NAME "cpt8x-mc.tar"
/* CPT ucode alignment */
#define OTX_CPT_UCODE_ALIGNMENT 128
/* CPT ucode signature size */
#define OTX_CPT_UCODE_SIGN_LEN 256
/* Microcode version string length */
#define OTX_CPT_UCODE_VER_STR_SZ 44
/* Maximum number of supported engines/cores on OcteonTX 83XX platform */
#define OTX_CPT_MAX_ENGINES 64
#define OTX_CPT_ENGS_BITMASK_LEN (OTX_CPT_MAX_ENGINES/(BITS_PER_BYTE * \
sizeof(unsigned long)))
/* Microcode types */
enum otx_cpt_ucode_type {
OTX_CPT_AE_UC_TYPE = 1, /* AE-MAIN */
OTX_CPT_SE_UC_TYPE1 = 20, /* SE-MAIN - combination of 21 and 22 */
OTX_CPT_SE_UC_TYPE2 = 21, /* Fast Path IPSec + AirCrypto */
OTX_CPT_SE_UC_TYPE3 = 22, /*
* Hash + HMAC + FlexiCrypto + RNG + Full
* Feature IPSec + AirCrypto + Kasumi
*/
};
struct otx_cpt_bitmap {
unsigned long bits[OTX_CPT_ENGS_BITMASK_LEN];
int size;
};
struct otx_cpt_engines {
int type;
int count;
};
/* Microcode version number */
struct otx_cpt_ucode_ver_num {
u8 nn;
u8 xx;
u8 yy;
u8 zz;
};
struct otx_cpt_ucode_hdr {
struct otx_cpt_ucode_ver_num ver_num;
u8 ver_str[OTX_CPT_UCODE_VER_STR_SZ];
u32 code_length;
u32 padding[3];
};
struct otx_cpt_ucode {
u8 ver_str[OTX_CPT_UCODE_VER_STR_SZ];/*
* ucode version in readable format
*/
struct otx_cpt_ucode_ver_num ver_num;/* ucode version number */
char filename[OTX_CPT_UCODE_NAME_LENGTH]; /* ucode filename */
dma_addr_t dma; /* phys address of ucode image */
dma_addr_t align_dma; /* aligned phys address of ucode image */
void *va; /* virt address of ucode image */
void *align_va; /* aligned virt address of ucode image */
u32 size; /* ucode image size */
int type; /* ucode image type SE or AE */
};
struct tar_ucode_info_t {
struct list_head list;
struct otx_cpt_ucode ucode;/* microcode information */
const u8 *ucode_ptr; /* pointer to microcode in tar archive */
};
/* Maximum and current number of engines available for all engine groups */
struct otx_cpt_engs_available {
int max_se_cnt;
int max_ae_cnt;
int se_cnt;
int ae_cnt;
};
/* Engines reserved to an engine group */
struct otx_cpt_engs_rsvd {
int type; /* engine type */
int count; /* number of engines attached */
int offset; /* constant offset of engine type in the bitmap */
unsigned long *bmap; /* attached engines bitmap */
struct otx_cpt_ucode *ucode; /* ucode used by these engines */
};
struct otx_cpt_mirror_info {
int is_ena; /*
* is mirroring enabled, it is set only for engine
* group which mirrors another engine group
*/
int idx; /*
* index of engine group which is mirrored by this
* group, set only for engine group which mirrors
* another group
*/
int ref_count; /*
* number of times this engine group is mirrored by
* other groups, this is set only for engine group
* which is mirrored by other group(s)
*/
};
struct otx_cpt_eng_grp_info {
struct otx_cpt_eng_grps *g; /* pointer to engine_groups structure */
struct device_attribute info_attr; /* group info entry attr */
/* engines attached */
struct otx_cpt_engs_rsvd engs[OTX_CPT_MAX_ETYPES_PER_GRP];
/* Microcode information */
struct otx_cpt_ucode ucode[OTX_CPT_MAX_ETYPES_PER_GRP];
/* sysfs info entry name */
char sysfs_info_name[OTX_CPT_UCODE_NAME_LENGTH];
/* engine group mirroring information */
struct otx_cpt_mirror_info mirror;
int idx; /* engine group index */
bool is_enabled; /*
* is engine group enabled, engine group is enabled
* when it has engines attached and ucode loaded
*/
};
struct otx_cpt_eng_grps {
struct otx_cpt_eng_grp_info grp[OTX_CPT_MAX_ENGINE_GROUPS];
struct device_attribute ucode_load_attr;/* ucode load attr */
struct otx_cpt_engs_available avail;
struct mutex lock;
void *obj;
int engs_num; /* total number of engines supported */
int eng_types_supported; /* engine types supported SE, AE */
u8 eng_ref_cnt[OTX_CPT_MAX_ENGINES];/* engines reference count */
bool is_ucode_load_created; /* is ucode_load sysfs entry created */
bool is_first_try; /* is this first try to create kcrypto engine grp */
bool is_rdonly; /* do engine groups configuration can be modified */
};
int otx_cpt_init_eng_grps(struct pci_dev *pdev,
struct otx_cpt_eng_grps *eng_grps, int pf_type);
void otx_cpt_cleanup_eng_grps(struct pci_dev *pdev,
struct otx_cpt_eng_grps *eng_grps);
int otx_cpt_try_create_default_eng_grps(struct pci_dev *pdev,
struct otx_cpt_eng_grps *eng_grps,
int pf_type);
void otx_cpt_set_eng_grps_is_rdonly(struct otx_cpt_eng_grps *eng_grps,
bool is_rdonly);
int otx_cpt_uc_supports_eng_type(struct otx_cpt_ucode *ucode, int eng_type);
int otx_cpt_eng_grp_has_eng_type(struct otx_cpt_eng_grp_info *eng_grp,
int eng_type);
#endif /* __OTX_CPTPF_UCODE_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment