Commit 720419f0 authored by Brijesh Singh's avatar Brijesh Singh Committed by Herbert Xu

crypto: ccp - Introduce the AMD Secure Processor device

The CCP device is part of the AMD Secure Processor. In order to expand
the usage of the AMD Secure Processor, create a framework that allows
functional components of the AMD Secure Processor to be initialized and
handled appropriately.
Signed-off-by: default avatarBrijesh Singh <brijesh.singh@amd.com>
Acked-by: default avatarGary R Hook <gary.hook@amd.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 970e8303
...@@ -540,11 +540,11 @@ config CRYPTO_DEV_ATMEL_ECC ...@@ -540,11 +540,11 @@ config CRYPTO_DEV_ATMEL_ECC
will be called atmel-ecc. will be called atmel-ecc.
config CRYPTO_DEV_CCP config CRYPTO_DEV_CCP
bool "Support for AMD Cryptographic Coprocessor" bool "Support for AMD Secure Processor"
depends on ((X86 && PCI) || (ARM64 && (OF_ADDRESS || ACPI))) && HAS_IOMEM depends on ((X86 && PCI) || (ARM64 && (OF_ADDRESS || ACPI))) && HAS_IOMEM
help help
The AMD Cryptographic Coprocessor provides hardware offload support The AMD Secure Processor provides support for the Cryptographic Coprocessor
for encryption, hashing and related operations. (CCP) and the Platform Security Processor (PSP) devices.
if CRYPTO_DEV_CCP if CRYPTO_DEV_CCP
source "drivers/crypto/ccp/Kconfig" source "drivers/crypto/ccp/Kconfig"
......
config CRYPTO_DEV_CCP_DD config CRYPTO_DEV_CCP_DD
tristate "Cryptographic Coprocessor device driver" tristate "Secure Processor device driver"
depends on CRYPTO_DEV_CCP
default m default m
help
Provides AMD Secure Processor device driver.
If you choose 'M' here, this module will be called ccp.
config CRYPTO_DEV_SP_CCP
bool "Cryptographic Coprocessor device"
default y
depends on CRYPTO_DEV_CCP_DD
select HW_RANDOM select HW_RANDOM
select DMA_ENGINE select DMA_ENGINE
select DMADEVICES select DMADEVICES
select CRYPTO_SHA1 select CRYPTO_SHA1
select CRYPTO_SHA256 select CRYPTO_SHA256
help help
Provides the interface to use the AMD Cryptographic Coprocessor Provides the support for AMD Cryptographic Coprocessor (CCP) device
which can be used to offload encryption operations such as SHA, which can be used to offload encryption operations such as SHA, AES
AES and more. If you choose 'M' here, this module will be called and more.
ccp.
config CRYPTO_DEV_CCP_CRYPTO config CRYPTO_DEV_CCP_CRYPTO
tristate "Encryption and hashing offload support" tristate "Encryption and hashing offload support"
depends on CRYPTO_DEV_CCP_DD
default m default m
depends on CRYPTO_DEV_CCP_DD
depends on CRYPTO_DEV_SP_CCP
select CRYPTO_HASH select CRYPTO_HASH
select CRYPTO_BLKCIPHER select CRYPTO_BLKCIPHER
select CRYPTO_AUTHENC select CRYPTO_AUTHENC
......
obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o
ccp-objs := ccp-dev.o \ ccp-objs := sp-dev.o ccp-platform.o
ccp-$(CONFIG_CRYPTO_DEV_SP_CCP) += ccp-dev.o \
ccp-ops.o \ ccp-ops.o \
ccp-dev-v3.o \ ccp-dev-v3.o \
ccp-dev-v5.o \ ccp-dev-v5.o \
ccp-platform.o \
ccp-dmaengine.o \ ccp-dmaengine.o \
ccp-debugfs.o ccp-debugfs.o
ccp-$(CONFIG_PCI) += ccp-pci.o ccp-$(CONFIG_PCI) += ccp-pci.o
......
...@@ -359,8 +359,7 @@ static void ccp_irq_bh(unsigned long data) ...@@ -359,8 +359,7 @@ static void ccp_irq_bh(unsigned long data)
static irqreturn_t ccp_irq_handler(int irq, void *data) static irqreturn_t ccp_irq_handler(int irq, void *data)
{ {
struct device *dev = data; struct ccp_device *ccp = (struct ccp_device *)data;
struct ccp_device *ccp = dev_get_drvdata(dev);
ccp_disable_queue_interrupts(ccp); ccp_disable_queue_interrupts(ccp);
if (ccp->use_tasklet) if (ccp->use_tasklet)
...@@ -597,6 +596,5 @@ const struct ccp_vdata ccpv3 = { ...@@ -597,6 +596,5 @@ const struct ccp_vdata ccpv3 = {
.version = CCP_VERSION(3, 0), .version = CCP_VERSION(3, 0),
.setup = NULL, .setup = NULL,
.perform = &ccp3_actions, .perform = &ccp3_actions,
.bar = 2,
.offset = 0x20000, .offset = 0x20000,
}; };
...@@ -769,8 +769,7 @@ static void ccp5_irq_bh(unsigned long data) ...@@ -769,8 +769,7 @@ static void ccp5_irq_bh(unsigned long data)
static irqreturn_t ccp5_irq_handler(int irq, void *data) static irqreturn_t ccp5_irq_handler(int irq, void *data)
{ {
struct device *dev = data; struct ccp_device *ccp = (struct ccp_device *)data;
struct ccp_device *ccp = dev_get_drvdata(dev);
ccp5_disable_queue_interrupts(ccp); ccp5_disable_queue_interrupts(ccp);
ccp->total_interrupts++; ccp->total_interrupts++;
...@@ -1113,7 +1112,6 @@ const struct ccp_vdata ccpv5a = { ...@@ -1113,7 +1112,6 @@ const struct ccp_vdata ccpv5a = {
.version = CCP_VERSION(5, 0), .version = CCP_VERSION(5, 0),
.setup = ccp5_config, .setup = ccp5_config,
.perform = &ccp5_actions, .perform = &ccp5_actions,
.bar = 2,
.offset = 0x0, .offset = 0x0,
}; };
...@@ -1122,6 +1120,5 @@ const struct ccp_vdata ccpv5b = { ...@@ -1122,6 +1120,5 @@ const struct ccp_vdata ccpv5b = {
.dma_chan_attr = DMA_PRIVATE, .dma_chan_attr = DMA_PRIVATE,
.setup = ccp5other_config, .setup = ccp5other_config,
.perform = &ccp5_actions, .perform = &ccp5_actions,
.bar = 2,
.offset = 0x0, .offset = 0x0,
}; };
...@@ -111,13 +111,6 @@ static LIST_HEAD(ccp_units); ...@@ -111,13 +111,6 @@ static LIST_HEAD(ccp_units);
static DEFINE_SPINLOCK(ccp_rr_lock); static DEFINE_SPINLOCK(ccp_rr_lock);
static struct ccp_device *ccp_rr; static struct ccp_device *ccp_rr;
/* Ever-increasing value to produce unique unit numbers */
static atomic_t ccp_unit_ordinal;
static unsigned int ccp_increment_unit_ordinal(void)
{
return atomic_inc_return(&ccp_unit_ordinal);
}
/** /**
* ccp_add_device - add a CCP device to the list * ccp_add_device - add a CCP device to the list
* *
...@@ -465,14 +458,17 @@ int ccp_cmd_queue_thread(void *data) ...@@ -465,14 +458,17 @@ int ccp_cmd_queue_thread(void *data)
* *
* @dev: device struct of the CCP * @dev: device struct of the CCP
*/ */
struct ccp_device *ccp_alloc_struct(struct device *dev) struct ccp_device *ccp_alloc_struct(struct sp_device *sp)
{ {
struct device *dev = sp->dev;
struct ccp_device *ccp; struct ccp_device *ccp;
ccp = devm_kzalloc(dev, sizeof(*ccp), GFP_KERNEL); ccp = devm_kzalloc(dev, sizeof(*ccp), GFP_KERNEL);
if (!ccp) if (!ccp)
return NULL; return NULL;
ccp->dev = dev; ccp->dev = dev;
ccp->sp = sp;
ccp->axcache = sp->axcache;
INIT_LIST_HEAD(&ccp->cmd); INIT_LIST_HEAD(&ccp->cmd);
INIT_LIST_HEAD(&ccp->backlog); INIT_LIST_HEAD(&ccp->backlog);
...@@ -487,9 +483,8 @@ struct ccp_device *ccp_alloc_struct(struct device *dev) ...@@ -487,9 +483,8 @@ struct ccp_device *ccp_alloc_struct(struct device *dev)
init_waitqueue_head(&ccp->sb_queue); init_waitqueue_head(&ccp->sb_queue);
init_waitqueue_head(&ccp->suspend_queue); init_waitqueue_head(&ccp->suspend_queue);
ccp->ord = ccp_increment_unit_ordinal(); snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", sp->ord);
snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord); snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", sp->ord);
snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", ccp->ord);
return ccp; return ccp;
} }
...@@ -540,8 +535,9 @@ bool ccp_queues_suspended(struct ccp_device *ccp) ...@@ -540,8 +535,9 @@ bool ccp_queues_suspended(struct ccp_device *ccp)
return ccp->cmd_q_count == suspended; return ccp->cmd_q_count == suspended;
} }
int ccp_dev_suspend(struct ccp_device *ccp, pm_message_t state) int ccp_dev_suspend(struct sp_device *sp, pm_message_t state)
{ {
struct ccp_device *ccp = sp->ccp_data;
unsigned long flags; unsigned long flags;
unsigned int i; unsigned int i;
...@@ -563,8 +559,9 @@ int ccp_dev_suspend(struct ccp_device *ccp, pm_message_t state) ...@@ -563,8 +559,9 @@ int ccp_dev_suspend(struct ccp_device *ccp, pm_message_t state)
return 0; return 0;
} }
int ccp_dev_resume(struct ccp_device *ccp) int ccp_dev_resume(struct sp_device *sp)
{ {
struct ccp_device *ccp = sp->ccp_data;
unsigned long flags; unsigned long flags;
unsigned int i; unsigned int i;
...@@ -584,71 +581,54 @@ int ccp_dev_resume(struct ccp_device *ccp) ...@@ -584,71 +581,54 @@ int ccp_dev_resume(struct ccp_device *ccp)
} }
#endif #endif
int ccp_dev_init(struct ccp_device *ccp) int ccp_dev_init(struct sp_device *sp)
{ {
ccp->io_regs = ccp->io_map + ccp->vdata->offset; struct device *dev = sp->dev;
struct ccp_device *ccp;
if (ccp->vdata->setup) int ret;
ccp->vdata->setup(ccp);
return ccp->vdata->perform->init(ccp);
}
void ccp_dev_destroy(struct ccp_device *ccp) ret = -ENOMEM;
{ ccp = ccp_alloc_struct(sp);
if (!ccp) if (!ccp)
return; goto e_err;
sp->ccp_data = ccp;
ccp->vdata = (struct ccp_vdata *)sp->dev_vdata->ccp_vdata;
if (!ccp->vdata || !ccp->vdata->version) {
ret = -ENODEV;
dev_err(dev, "missing driver data\n");
goto e_err;
}
ccp->vdata->perform->destroy(ccp); ccp->get_irq = sp->get_irq;
} ccp->free_irq = sp->free_irq;
static int __init ccp_mod_init(void) ccp->io_regs = sp->io_map + ccp->vdata->offset;
{ if (ccp->vdata->setup)
#ifdef CONFIG_X86 ccp->vdata->setup(ccp);
int ret;
ret = ccp_pci_init(); ret = ccp->vdata->perform->init(ccp);
if (ret) if (ret)
return ret; goto e_err;
/* Don't leave the driver loaded if init failed */ dev_notice(dev, "ccp enabled\n");
if (ccp_present() != 0) {
ccp_pci_exit();
return -ENODEV;
}
return 0; return 0;
#endif
#ifdef CONFIG_ARM64
int ret;
ret = ccp_platform_init(); e_err:
if (ret) sp->ccp_data = NULL;
return ret;
/* Don't leave the driver loaded if init failed */ dev_notice(dev, "ccp initialization failed\n");
if (ccp_present() != 0) {
ccp_platform_exit();
return -ENODEV;
}
return 0;
#endif
return -ENODEV; return ret;
} }
static void __exit ccp_mod_exit(void) void ccp_dev_destroy(struct sp_device *sp)
{ {
#ifdef CONFIG_X86 struct ccp_device *ccp = sp->ccp_data;
ccp_pci_exit();
#endif
#ifdef CONFIG_ARM64 if (!ccp)
ccp_platform_exit(); return;
#endif
}
module_init(ccp_mod_init); ccp->vdata->perform->destroy(ccp);
module_exit(ccp_mod_exit); }
...@@ -27,6 +27,8 @@ ...@@ -27,6 +27,8 @@
#include <linux/irqreturn.h> #include <linux/irqreturn.h>
#include <linux/dmaengine.h> #include <linux/dmaengine.h>
#include "sp-dev.h"
#define MAX_CCP_NAME_LEN 16 #define MAX_CCP_NAME_LEN 16
#define MAX_DMAPOOL_NAME_LEN 32 #define MAX_DMAPOOL_NAME_LEN 32
...@@ -344,6 +346,7 @@ struct ccp_device { ...@@ -344,6 +346,7 @@ struct ccp_device {
char rngname[MAX_CCP_NAME_LEN]; char rngname[MAX_CCP_NAME_LEN];
struct device *dev; struct device *dev;
struct sp_device *sp;
/* Bus specific device information /* Bus specific device information
*/ */
...@@ -362,7 +365,6 @@ struct ccp_device { ...@@ -362,7 +365,6 @@ struct ccp_device {
* them. * them.
*/ */
struct mutex req_mutex ____cacheline_aligned; struct mutex req_mutex ____cacheline_aligned;
void __iomem *io_map;
void __iomem *io_regs; void __iomem *io_regs;
/* Master lists that all cmds are queued on. Because there can be /* Master lists that all cmds are queued on. Because there can be
...@@ -637,7 +639,7 @@ void ccp_del_device(struct ccp_device *ccp); ...@@ -637,7 +639,7 @@ void ccp_del_device(struct ccp_device *ccp);
extern void ccp_log_error(struct ccp_device *, int); extern void ccp_log_error(struct ccp_device *, int);
struct ccp_device *ccp_alloc_struct(struct device *dev); struct ccp_device *ccp_alloc_struct(struct sp_device *sp);
bool ccp_queues_suspended(struct ccp_device *ccp); bool ccp_queues_suspended(struct ccp_device *ccp);
int ccp_cmd_queue_thread(void *data); int ccp_cmd_queue_thread(void *data);
int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait); int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait);
...@@ -652,11 +654,6 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp); ...@@ -652,11 +654,6 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp);
void ccp5_debugfs_setup(struct ccp_device *ccp); void ccp5_debugfs_setup(struct ccp_device *ccp);
void ccp5_debugfs_destroy(void); void ccp5_debugfs_destroy(void);
int ccp_dev_init(struct ccp_device *ccp);
void ccp_dev_destroy(struct ccp_device *ccp);
int ccp_dev_suspend(struct ccp_device *ccp, pm_message_t state);
int ccp_dev_resume(struct ccp_device *ccp);
/* Structure for computation functions that are device-specific */ /* Structure for computation functions that are device-specific */
struct ccp_actions { struct ccp_actions {
int (*aes)(struct ccp_op *); int (*aes)(struct ccp_op *);
...@@ -674,16 +671,6 @@ struct ccp_actions { ...@@ -674,16 +671,6 @@ struct ccp_actions {
irqreturn_t (*irqhandler)(int, void *); irqreturn_t (*irqhandler)(int, void *);
}; };
/* Structure to hold CCP version-specific values */
struct ccp_vdata {
const unsigned int version;
const unsigned int dma_chan_attr;
void (*setup)(struct ccp_device *);
const struct ccp_actions *perform;
const unsigned int bar;
const unsigned int offset;
};
extern const struct ccp_vdata ccpv3_platform; extern const struct ccp_vdata ccpv3_platform;
extern const struct ccp_vdata ccpv3; extern const struct ccp_vdata ccpv3;
extern const struct ccp_vdata ccpv5a; extern const struct ccp_vdata ccpv5a;
......
...@@ -40,7 +40,8 @@ struct ccp_pci { ...@@ -40,7 +40,8 @@ struct ccp_pci {
static int ccp_get_msix_irqs(struct ccp_device *ccp) static int ccp_get_msix_irqs(struct ccp_device *ccp)
{ {
struct ccp_pci *ccp_pci = ccp->dev_specific; struct sp_device *sp = ccp->sp;
struct ccp_pci *ccp_pci = sp->dev_specific;
struct device *dev = ccp->dev; struct device *dev = ccp->dev;
struct pci_dev *pdev = to_pci_dev(dev); struct pci_dev *pdev = to_pci_dev(dev);
struct msix_entry msix_entry[MSIX_VECTORS]; struct msix_entry msix_entry[MSIX_VECTORS];
...@@ -58,11 +59,11 @@ static int ccp_get_msix_irqs(struct ccp_device *ccp) ...@@ -58,11 +59,11 @@ static int ccp_get_msix_irqs(struct ccp_device *ccp)
for (v = 0; v < ccp_pci->msix_count; v++) { for (v = 0; v < ccp_pci->msix_count; v++) {
/* Set the interrupt names and request the irqs */ /* Set the interrupt names and request the irqs */
snprintf(ccp_pci->msix[v].name, name_len, "%s-%u", snprintf(ccp_pci->msix[v].name, name_len, "%s-%u",
ccp->name, v); sp->name, v);
ccp_pci->msix[v].vector = msix_entry[v].vector; ccp_pci->msix[v].vector = msix_entry[v].vector;
ret = request_irq(ccp_pci->msix[v].vector, ret = request_irq(ccp_pci->msix[v].vector,
ccp->vdata->perform->irqhandler, ccp->vdata->perform->irqhandler,
0, ccp_pci->msix[v].name, dev); 0, ccp_pci->msix[v].name, ccp);
if (ret) { if (ret) {
dev_notice(dev, "unable to allocate MSI-X IRQ (%d)\n", dev_notice(dev, "unable to allocate MSI-X IRQ (%d)\n",
ret); ret);
...@@ -86,6 +87,7 @@ static int ccp_get_msix_irqs(struct ccp_device *ccp) ...@@ -86,6 +87,7 @@ static int ccp_get_msix_irqs(struct ccp_device *ccp)
static int ccp_get_msi_irq(struct ccp_device *ccp) static int ccp_get_msi_irq(struct ccp_device *ccp)
{ {
struct sp_device *sp = ccp->sp;
struct device *dev = ccp->dev; struct device *dev = ccp->dev;
struct pci_dev *pdev = to_pci_dev(dev); struct pci_dev *pdev = to_pci_dev(dev);
int ret; int ret;
...@@ -96,7 +98,7 @@ static int ccp_get_msi_irq(struct ccp_device *ccp) ...@@ -96,7 +98,7 @@ static int ccp_get_msi_irq(struct ccp_device *ccp)
ccp->irq = pdev->irq; ccp->irq = pdev->irq;
ret = request_irq(ccp->irq, ccp->vdata->perform->irqhandler, 0, ret = request_irq(ccp->irq, ccp->vdata->perform->irqhandler, 0,
ccp->name, dev); sp->name, ccp);
if (ret) { if (ret) {
dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret); dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret);
goto e_msi; goto e_msi;
...@@ -134,17 +136,18 @@ static int ccp_get_irqs(struct ccp_device *ccp) ...@@ -134,17 +136,18 @@ static int ccp_get_irqs(struct ccp_device *ccp)
static void ccp_free_irqs(struct ccp_device *ccp) static void ccp_free_irqs(struct ccp_device *ccp)
{ {
struct ccp_pci *ccp_pci = ccp->dev_specific; struct sp_device *sp = ccp->sp;
struct ccp_pci *ccp_pci = sp->dev_specific;
struct device *dev = ccp->dev; struct device *dev = ccp->dev;
struct pci_dev *pdev = to_pci_dev(dev); struct pci_dev *pdev = to_pci_dev(dev);
if (ccp_pci->msix_count) { if (ccp_pci->msix_count) {
while (ccp_pci->msix_count--) while (ccp_pci->msix_count--)
free_irq(ccp_pci->msix[ccp_pci->msix_count].vector, free_irq(ccp_pci->msix[ccp_pci->msix_count].vector,
dev); ccp);
pci_disable_msix(pdev); pci_disable_msix(pdev);
} else if (ccp->irq) { } else if (ccp->irq) {
free_irq(ccp->irq, dev); free_irq(ccp->irq, ccp);
pci_disable_msi(pdev); pci_disable_msi(pdev);
} }
ccp->irq = 0; ccp->irq = 0;
...@@ -152,7 +155,7 @@ static void ccp_free_irqs(struct ccp_device *ccp) ...@@ -152,7 +155,7 @@ static void ccp_free_irqs(struct ccp_device *ccp)
static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{ {
struct ccp_device *ccp; struct sp_device *sp;
struct ccp_pci *ccp_pci; struct ccp_pci *ccp_pci;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
void __iomem * const *iomap_table; void __iomem * const *iomap_table;
...@@ -160,23 +163,23 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -160,23 +163,23 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
int ret; int ret;
ret = -ENOMEM; ret = -ENOMEM;
ccp = ccp_alloc_struct(dev); sp = sp_alloc_struct(dev);
if (!ccp) if (!sp)
goto e_err; goto e_err;
ccp_pci = devm_kzalloc(dev, sizeof(*ccp_pci), GFP_KERNEL); ccp_pci = devm_kzalloc(dev, sizeof(*ccp_pci), GFP_KERNEL);
if (!ccp_pci) if (!ccp_pci)
goto e_err; goto e_err;
ccp->dev_specific = ccp_pci; sp->dev_specific = ccp_pci;
ccp->vdata = (struct ccp_vdata *)id->driver_data; sp->dev_vdata = (struct sp_dev_vdata *)id->driver_data;
if (!ccp->vdata || !ccp->vdata->version) { if (!sp->dev_vdata) {
ret = -ENODEV; ret = -ENODEV;
dev_err(dev, "missing driver data\n"); dev_err(dev, "missing driver data\n");
goto e_err; goto e_err;
} }
ccp->get_irq = ccp_get_irqs; sp->get_irq = ccp_get_irqs;
ccp->free_irq = ccp_free_irqs; sp->free_irq = ccp_free_irqs;
ret = pcim_enable_device(pdev); ret = pcim_enable_device(pdev);
if (ret) { if (ret) {
...@@ -198,8 +201,8 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -198,8 +201,8 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto e_err; goto e_err;
} }
ccp->io_map = iomap_table[ccp->vdata->bar]; sp->io_map = iomap_table[sp->dev_vdata->bar];
if (!ccp->io_map) { if (!sp->io_map) {
dev_err(dev, "ioremap failed\n"); dev_err(dev, "ioremap failed\n");
ret = -ENOMEM; ret = -ENOMEM;
goto e_err; goto e_err;
...@@ -217,9 +220,9 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -217,9 +220,9 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
} }
} }
dev_set_drvdata(dev, ccp); dev_set_drvdata(dev, sp);
ret = ccp_dev_init(ccp); ret = sp_init(sp);
if (ret) if (ret)
goto e_err; goto e_err;
...@@ -235,12 +238,12 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -235,12 +238,12 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
static void ccp_pci_remove(struct pci_dev *pdev) static void ccp_pci_remove(struct pci_dev *pdev)
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct ccp_device *ccp = dev_get_drvdata(dev); struct sp_device *sp = dev_get_drvdata(dev);
if (!ccp) if (!sp)
return; return;
ccp_dev_destroy(ccp); sp_destroy(sp);
dev_notice(dev, "disabled\n"); dev_notice(dev, "disabled\n");
} }
...@@ -249,24 +252,44 @@ static void ccp_pci_remove(struct pci_dev *pdev) ...@@ -249,24 +252,44 @@ static void ccp_pci_remove(struct pci_dev *pdev)
static int ccp_pci_suspend(struct pci_dev *pdev, pm_message_t state) static int ccp_pci_suspend(struct pci_dev *pdev, pm_message_t state)
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct ccp_device *ccp = dev_get_drvdata(dev); struct sp_device *sp = dev_get_drvdata(dev);
return ccp_dev_suspend(ccp, state); return sp_suspend(sp, state);
} }
static int ccp_pci_resume(struct pci_dev *pdev) static int ccp_pci_resume(struct pci_dev *pdev)
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct ccp_device *ccp = dev_get_drvdata(dev); struct sp_device *sp = dev_get_drvdata(dev);
return ccp_dev_resume(ccp); return sp_resume(sp);
} }
#endif #endif
static const struct sp_dev_vdata dev_vdata[] = {
{
.bar = 2,
#ifdef CONFIG_CRYPTO_DEV_SP_CCP
.ccp_vdata = &ccpv3,
#endif
},
{
.bar = 2,
#ifdef CONFIG_CRYPTO_DEV_SP_CCP
.ccp_vdata = &ccpv5a,
#endif
},
{
.bar = 2,
#ifdef CONFIG_CRYPTO_DEV_SP_CCP
.ccp_vdata = &ccpv5b,
#endif
},
};
static const struct pci_device_id ccp_pci_table[] = { static const struct pci_device_id ccp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&ccpv3 }, { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&dev_vdata[0] },
{ PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&ccpv5a }, { PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&dev_vdata[1] },
{ PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&ccpv5b }, { PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] },
/* Last entry must be zero */ /* Last entry must be zero */
{ 0, } { 0, }
}; };
......
...@@ -35,26 +35,26 @@ struct ccp_platform { ...@@ -35,26 +35,26 @@ struct ccp_platform {
static const struct acpi_device_id ccp_acpi_match[]; static const struct acpi_device_id ccp_acpi_match[];
static const struct of_device_id ccp_of_match[]; static const struct of_device_id ccp_of_match[];
static struct ccp_vdata *ccp_get_of_version(struct platform_device *pdev) static struct sp_dev_vdata *ccp_get_of_version(struct platform_device *pdev)
{ {
#ifdef CONFIG_OF #ifdef CONFIG_OF
const struct of_device_id *match; const struct of_device_id *match;
match = of_match_node(ccp_of_match, pdev->dev.of_node); match = of_match_node(ccp_of_match, pdev->dev.of_node);
if (match && match->data) if (match && match->data)
return (struct ccp_vdata *)match->data; return (struct sp_dev_vdata *)match->data;
#endif #endif
return NULL; return NULL;
} }
static struct ccp_vdata *ccp_get_acpi_version(struct platform_device *pdev) static struct sp_dev_vdata *ccp_get_acpi_version(struct platform_device *pdev)
{ {
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
const struct acpi_device_id *match; const struct acpi_device_id *match;
match = acpi_match_device(ccp_acpi_match, &pdev->dev); match = acpi_match_device(ccp_acpi_match, &pdev->dev);
if (match && match->driver_data) if (match && match->driver_data)
return (struct ccp_vdata *)match->driver_data; return (struct sp_dev_vdata *)match->driver_data;
#endif #endif
return NULL; return NULL;
} }
...@@ -73,7 +73,7 @@ static int ccp_get_irq(struct ccp_device *ccp) ...@@ -73,7 +73,7 @@ static int ccp_get_irq(struct ccp_device *ccp)
ccp->irq = ret; ccp->irq = ret;
ret = request_irq(ccp->irq, ccp->vdata->perform->irqhandler, 0, ret = request_irq(ccp->irq, ccp->vdata->perform->irqhandler, 0,
ccp->name, dev); ccp->name, ccp);
if (ret) { if (ret) {
dev_notice(dev, "unable to allocate IRQ (%d)\n", ret); dev_notice(dev, "unable to allocate IRQ (%d)\n", ret);
return ret; return ret;
...@@ -99,14 +99,12 @@ static int ccp_get_irqs(struct ccp_device *ccp) ...@@ -99,14 +99,12 @@ static int ccp_get_irqs(struct ccp_device *ccp)
static void ccp_free_irqs(struct ccp_device *ccp) static void ccp_free_irqs(struct ccp_device *ccp)
{ {
struct device *dev = ccp->dev; free_irq(ccp->irq, ccp);
free_irq(ccp->irq, dev);
} }
static int ccp_platform_probe(struct platform_device *pdev) static int ccp_platform_probe(struct platform_device *pdev)
{ {
struct ccp_device *ccp; struct sp_device *sp;
struct ccp_platform *ccp_platform; struct ccp_platform *ccp_platform;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
enum dev_dma_attr attr; enum dev_dma_attr attr;
...@@ -114,32 +112,31 @@ static int ccp_platform_probe(struct platform_device *pdev) ...@@ -114,32 +112,31 @@ static int ccp_platform_probe(struct platform_device *pdev)
int ret; int ret;
ret = -ENOMEM; ret = -ENOMEM;
ccp = ccp_alloc_struct(dev); sp = sp_alloc_struct(dev);
if (!ccp) if (!sp)
goto e_err; goto e_err;
ccp_platform = devm_kzalloc(dev, sizeof(*ccp_platform), GFP_KERNEL); ccp_platform = devm_kzalloc(dev, sizeof(*ccp_platform), GFP_KERNEL);
if (!ccp_platform) if (!ccp_platform)
goto e_err; goto e_err;
ccp->dev_specific = ccp_platform; sp->dev_specific = ccp_platform;
ccp->vdata = pdev->dev.of_node ? ccp_get_of_version(pdev) sp->dev_vdata = pdev->dev.of_node ? ccp_get_of_version(pdev)
: ccp_get_acpi_version(pdev); : ccp_get_acpi_version(pdev);
if (!ccp->vdata || !ccp->vdata->version) { if (!sp->dev_vdata) {
ret = -ENODEV; ret = -ENODEV;
dev_err(dev, "missing driver data\n"); dev_err(dev, "missing driver data\n");
goto e_err; goto e_err;
} }
ccp->get_irq = ccp_get_irqs; sp->get_irq = ccp_get_irqs;
ccp->free_irq = ccp_free_irqs; sp->free_irq = ccp_free_irqs;
ior = platform_get_resource(pdev, IORESOURCE_MEM, 0); ior = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ccp->io_map = devm_ioremap_resource(dev, ior); sp->io_map = devm_ioremap_resource(dev, ior);
if (IS_ERR(ccp->io_map)) { if (IS_ERR(sp->io_map)) {
ret = PTR_ERR(ccp->io_map); ret = PTR_ERR(sp->io_map);
goto e_err; goto e_err;
} }
ccp->io_regs = ccp->io_map;
attr = device_get_dma_attr(dev); attr = device_get_dma_attr(dev);
if (attr == DEV_DMA_NOT_SUPPORTED) { if (attr == DEV_DMA_NOT_SUPPORTED) {
...@@ -149,9 +146,9 @@ static int ccp_platform_probe(struct platform_device *pdev) ...@@ -149,9 +146,9 @@ static int ccp_platform_probe(struct platform_device *pdev)
ccp_platform->coherent = (attr == DEV_DMA_COHERENT); ccp_platform->coherent = (attr == DEV_DMA_COHERENT);
if (ccp_platform->coherent) if (ccp_platform->coherent)
ccp->axcache = CACHE_WB_NO_ALLOC; sp->axcache = CACHE_WB_NO_ALLOC;
else else
ccp->axcache = CACHE_NONE; sp->axcache = CACHE_NONE;
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (ret) { if (ret) {
...@@ -159,9 +156,9 @@ static int ccp_platform_probe(struct platform_device *pdev) ...@@ -159,9 +156,9 @@ static int ccp_platform_probe(struct platform_device *pdev)
goto e_err; goto e_err;
} }
dev_set_drvdata(dev, ccp); dev_set_drvdata(dev, sp);
ret = ccp_dev_init(ccp); ret = sp_init(sp);
if (ret) if (ret)
goto e_err; goto e_err;
...@@ -177,9 +174,9 @@ static int ccp_platform_probe(struct platform_device *pdev) ...@@ -177,9 +174,9 @@ static int ccp_platform_probe(struct platform_device *pdev)
static int ccp_platform_remove(struct platform_device *pdev) static int ccp_platform_remove(struct platform_device *pdev)
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct ccp_device *ccp = dev_get_drvdata(dev); struct sp_device *sp = dev_get_drvdata(dev);
ccp_dev_destroy(ccp); sp_destroy(sp);
dev_notice(dev, "disabled\n"); dev_notice(dev, "disabled\n");
...@@ -191,23 +188,32 @@ static int ccp_platform_suspend(struct platform_device *pdev, ...@@ -191,23 +188,32 @@ static int ccp_platform_suspend(struct platform_device *pdev,
pm_message_t state) pm_message_t state)
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct ccp_device *ccp = dev_get_drvdata(dev); struct sp_device *sp = dev_get_drvdata(dev);
return ccp_dev_suspend(ccp, state); return sp_suspend(sp, state);
} }
static int ccp_platform_resume(struct platform_device *pdev) static int ccp_platform_resume(struct platform_device *pdev)
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct ccp_device *ccp = dev_get_drvdata(dev); struct sp_device *sp = dev_get_drvdata(dev);
return ccp_dev_resume(ccp); return sp_resume(sp);
} }
#endif #endif
static const struct sp_dev_vdata dev_vdata[] = {
{
.bar = 0,
#ifdef CONFIG_CRYPTO_DEV_SP_CCP
.ccp_vdata = &ccpv3_platform,
#endif
},
};
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
static const struct acpi_device_id ccp_acpi_match[] = { static const struct acpi_device_id ccp_acpi_match[] = {
{ "AMDI0C00", (kernel_ulong_t)&ccpv3 }, { "AMDI0C00", (kernel_ulong_t)&dev_vdata[0] },
{ }, { },
}; };
MODULE_DEVICE_TABLE(acpi, ccp_acpi_match); MODULE_DEVICE_TABLE(acpi, ccp_acpi_match);
...@@ -216,7 +222,7 @@ MODULE_DEVICE_TABLE(acpi, ccp_acpi_match); ...@@ -216,7 +222,7 @@ MODULE_DEVICE_TABLE(acpi, ccp_acpi_match);
#ifdef CONFIG_OF #ifdef CONFIG_OF
static const struct of_device_id ccp_of_match[] = { static const struct of_device_id ccp_of_match[] = {
{ .compatible = "amd,ccp-seattle-v1a", { .compatible = "amd,ccp-seattle-v1a",
.data = (const void *)&ccpv3_platform }, .data = (const void *)&dev_vdata[0] },
{ }, { },
}; };
MODULE_DEVICE_TABLE(of, ccp_of_match); MODULE_DEVICE_TABLE(of, ccp_of_match);
......
/*
* AMD Secure Processor driver
*
* Copyright (C) 2017 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
* Author: Brijesh Singh <brijesh.singh@amd.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/spinlock_types.h>
#include <linux/types.h>
#include <linux/ccp.h>
#include "ccp-dev.h"
#include "sp-dev.h"
MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
MODULE_AUTHOR("Gary R Hook <gary.hook@amd.com>");
MODULE_LICENSE("GPL");
MODULE_VERSION("1.1.0");
MODULE_DESCRIPTION("AMD Secure Processor driver");
/* List of SPs, SP count, read-write access lock, and access functions
*
* Lock structure: get sp_unit_lock for reading whenever we need to
* examine the SP list.
*/
static DEFINE_RWLOCK(sp_unit_lock);
static LIST_HEAD(sp_units);
/* Ever-increasing value to produce unique unit numbers */
static atomic_t sp_ordinal;
static void sp_add_device(struct sp_device *sp)
{
unsigned long flags;
write_lock_irqsave(&sp_unit_lock, flags);
list_add_tail(&sp->entry, &sp_units);
write_unlock_irqrestore(&sp_unit_lock, flags);
}
static void sp_del_device(struct sp_device *sp)
{
unsigned long flags;
write_lock_irqsave(&sp_unit_lock, flags);
list_del(&sp->entry);
write_unlock_irqrestore(&sp_unit_lock, flags);
}
/**
* sp_alloc_struct - allocate and initialize the sp_device struct
*
* @dev: device struct of the SP
*/
struct sp_device *sp_alloc_struct(struct device *dev)
{
struct sp_device *sp;
sp = devm_kzalloc(dev, sizeof(*sp), GFP_KERNEL);
if (!sp)
return NULL;
sp->dev = dev;
sp->ord = atomic_inc_return(&sp_ordinal);
snprintf(sp->name, SP_MAX_NAME_LEN, "sp-%u", sp->ord);
return sp;
}
int sp_init(struct sp_device *sp)
{
sp_add_device(sp);
if (sp->dev_vdata->ccp_vdata)
ccp_dev_init(sp);
return 0;
}
void sp_destroy(struct sp_device *sp)
{
if (sp->dev_vdata->ccp_vdata)
ccp_dev_destroy(sp);
sp_del_device(sp);
}
#ifdef CONFIG_PM
int sp_suspend(struct sp_device *sp, pm_message_t state)
{
int ret;
if (sp->dev_vdata->ccp_vdata) {
ret = ccp_dev_suspend(sp, state);
if (ret)
return ret;
}
return 0;
}
int sp_resume(struct sp_device *sp)
{
int ret;
if (sp->dev_vdata->ccp_vdata) {
ret = ccp_dev_resume(sp);
if (ret)
return ret;
}
return 0;
}
#endif
static int __init sp_mod_init(void)
{
#ifdef CONFIG_X86
int ret;
ret = ccp_pci_init();
if (ret)
return ret;
/* Don't leave the driver loaded if init failed */
if (ccp_present() != 0) {
ccp_pci_exit();
return -ENODEV;
}
return 0;
#endif
#ifdef CONFIG_ARM64
int ret;
ret = ccp_platform_init();
if (ret)
return ret;
/* Don't leave the driver loaded if init failed */
if (ccp_present() != 0) {
ccp_platform_exit();
return -ENODEV;
}
return 0;
#endif
return -ENODEV;
}
static void __exit sp_mod_exit(void)
{
#ifdef CONFIG_X86
ccp_pci_exit();
#endif
#ifdef CONFIG_ARM64
ccp_platform_exit();
#endif
}
module_init(sp_mod_init);
module_exit(sp_mod_exit);
/*
* AMD Secure Processor driver
*
* Copyright (C) 2017 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
* Author: Brijesh Singh <brijesh.singh@amd.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __SP_DEV_H__
#define __SP_DEV_H__
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/dmapool.h>
#include <linux/hw_random.h>
#include <linux/bitops.h>
#include <linux/interrupt.h>
#include <linux/irqreturn.h>
#define SP_MAX_NAME_LEN 32
#define CACHE_NONE 0x00
#define CACHE_WB_NO_ALLOC 0xb7
/* Structure to hold CCP device data */
struct ccp_device;
struct ccp_vdata {
const unsigned int version;
const unsigned int dma_chan_attr;
void (*setup)(struct ccp_device *);
const struct ccp_actions *perform;
const unsigned int offset;
};
/* Structure to hold SP device data */
struct sp_dev_vdata {
const unsigned int bar;
const struct ccp_vdata *ccp_vdata;
void *psp_vdata;
};
struct sp_device {
struct list_head entry;
struct device *dev;
struct sp_dev_vdata *dev_vdata;
unsigned int ord;
char name[SP_MAX_NAME_LEN];
/* Bus specific device information */
void *dev_specific;
/* I/O area used for device communication. */
void __iomem *io_map;
/* DMA caching attribute support */
unsigned int axcache;
bool irq_registered;
int (*get_irq)(struct ccp_device *ccp);
void (*free_irq)(struct ccp_device *ccp);
void *ccp_data;
void *psp_data;
};
int sp_pci_init(void);
void sp_pci_exit(void);
int sp_platform_init(void);
void sp_platform_exit(void);
struct sp_device *sp_alloc_struct(struct device *dev);
int sp_init(struct sp_device *sp);
void sp_destroy(struct sp_device *sp);
struct sp_device *sp_get_master(void);
int sp_suspend(struct sp_device *sp, pm_message_t state);
int sp_resume(struct sp_device *sp);
#ifdef CONFIG_CRYPTO_DEV_SP_CCP
int ccp_dev_init(struct sp_device *sp);
void ccp_dev_destroy(struct sp_device *sp);
int ccp_dev_suspend(struct sp_device *sp, pm_message_t state);
int ccp_dev_resume(struct sp_device *sp);
#else /* !CONFIG_CRYPTO_DEV_SP_CCP */
static inline int ccp_dev_init(struct sp_device *sp)
{
return 0;
}
static inline void ccp_dev_destroy(struct sp_device *sp) { }
static inline int ccp_dev_suspend(struct sp_device *sp, pm_message_t state)
{
return 0;
}
static inline int ccp_dev_resume(struct sp_device *sp)
{
return 0;
}
#endif /* CONFIG_CRYPTO_DEV_SP_CCP */
#endif
...@@ -23,8 +23,7 @@ ...@@ -23,8 +23,7 @@
struct ccp_device; struct ccp_device;
struct ccp_cmd; struct ccp_cmd;
#if defined(CONFIG_CRYPTO_DEV_CCP_DD) || \ #if defined(CONFIG_CRYPTO_DEV_SP_CCP)
defined(CONFIG_CRYPTO_DEV_CCP_DD_MODULE)
/** /**
* ccp_present - check if a CCP device is present * ccp_present - check if a CCP device is present
...@@ -70,7 +69,7 @@ unsigned int ccp_version(void); ...@@ -70,7 +69,7 @@ unsigned int ccp_version(void);
*/ */
int ccp_enqueue_cmd(struct ccp_cmd *cmd); int ccp_enqueue_cmd(struct ccp_cmd *cmd);
#else /* CONFIG_CRYPTO_DEV_CCP_DD is not enabled */ #else /* CONFIG_CRYPTO_DEV_CCP_SP_DEV is not enabled */
static inline int ccp_present(void) static inline int ccp_present(void)
{ {
...@@ -87,7 +86,7 @@ static inline int ccp_enqueue_cmd(struct ccp_cmd *cmd) ...@@ -87,7 +86,7 @@ static inline int ccp_enqueue_cmd(struct ccp_cmd *cmd)
return -ENODEV; return -ENODEV;
} }
#endif /* CONFIG_CRYPTO_DEV_CCP_DD */ #endif /* CONFIG_CRYPTO_DEV_SP_CCP */
/***** AES engine *****/ /***** AES engine *****/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment