Commit ec8f5d8f authored by Stanimir Varbanov's avatar Stanimir Varbanov Committed by Herbert Xu

crypto: qce - Qualcomm crypto engine driver

The driver is separated by functional parts. The core part
implements a platform driver probe and remove callbaks.
The probe enables clocks, checks crypto version, initialize
and request dma channels, create done tasklet and init
crypto queue and finally register the algorithms into crypto
core subsystem.

- DMA and SG helper functions
 implement dmaengine and sg-list helper functions used by
 other parts of the crypto driver.

- ablkcipher algorithms
 implementation of AES, DES and 3DES crypto API callbacks,
 the crypto register alg function, the async request handler
 and its dma done callback function.

- SHA and HMAC transforms
 implementation and registration of ahash crypto type.
 It includes sha1, sha256, hmac(sha1) and hmac(sha256).

- infrastructure to setup the crypto hw
 contains functions used to setup/prepare hardware registers for
 all algorithms supported by the crypto block. It also exports
 few helper functions needed by algorithms:
	- to check hardware status
	- to start crypto hardware
	- to translate data stream to big endian form

 Adds register addresses and bit/masks used by the driver
 as well.
Signed-off-by: default avatarStanimir Varbanov <svarbanov@mm-sol.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 002c77a4
This diff is collapsed.
/*
* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _CIPHER_H_
#define _CIPHER_H_
#include "common.h"
#include "core.h"
#define QCE_MAX_KEY_SIZE 64
struct qce_cipher_ctx {
u8 enc_key[QCE_MAX_KEY_SIZE];
unsigned int enc_keylen;
struct crypto_ablkcipher *fallback;
};
/**
* struct qce_cipher_reqctx - holds private cipher objects per request
* @flags: operation flags
* @iv: pointer to the IV
* @ivsize: IV size
* @src_nents: source entries
* @dst_nents: destination entries
* @src_chained: is source chained
* @dst_chained: is destination chained
* @result_sg: scatterlist used for result buffer
* @dst_tbl: destination sg table
* @dst_sg: destination sg pointer table beginning
* @src_tbl: source sg table
* @src_sg: source sg pointer table beginning;
* @cryptlen: crypto length
*/
struct qce_cipher_reqctx {
unsigned long flags;
u8 *iv;
unsigned int ivsize;
int src_nents;
int dst_nents;
bool src_chained;
bool dst_chained;
struct scatterlist result_sg;
struct sg_table dst_tbl;
struct scatterlist *dst_sg;
struct sg_table src_tbl;
struct scatterlist *src_sg;
unsigned int cryptlen;
};
static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_tfm *tfm)
{
struct crypto_alg *alg = tfm->__crt_alg;
return container_of(alg, struct qce_alg_template, alg.crypto);
}
extern const struct qce_algo_ops ablkcipher_ops;
#endif /* _CIPHER_H_ */
This diff is collapsed.
/*
* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _COMMON_H_
#define _COMMON_H_
#include <linux/crypto.h>
#include <linux/types.h>
#include <crypto/aes.h>
#include <crypto/hash.h>
/* key size in bytes */
#define QCE_SHA_HMAC_KEY_SIZE 64
#define QCE_MAX_CIPHER_KEY_SIZE AES_KEYSIZE_256
/* IV length in bytes */
#define QCE_AES_IV_LENGTH AES_BLOCK_SIZE
/* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
#define QCE_MAX_IV_SIZE AES_BLOCK_SIZE
/* maximum nonce bytes */
#define QCE_MAX_NONCE 16
#define QCE_MAX_NONCE_WORDS (QCE_MAX_NONCE / sizeof(u32))
/* burst size alignment requirement */
#define QCE_MAX_ALIGN_SIZE 64
/* cipher algorithms */
#define QCE_ALG_DES BIT(0)
#define QCE_ALG_3DES BIT(1)
#define QCE_ALG_AES BIT(2)
/* hash and hmac algorithms */
#define QCE_HASH_SHA1 BIT(3)
#define QCE_HASH_SHA256 BIT(4)
#define QCE_HASH_SHA1_HMAC BIT(5)
#define QCE_HASH_SHA256_HMAC BIT(6)
#define QCE_HASH_AES_CMAC BIT(7)
/* cipher modes */
#define QCE_MODE_CBC BIT(8)
#define QCE_MODE_ECB BIT(9)
#define QCE_MODE_CTR BIT(10)
#define QCE_MODE_XTS BIT(11)
#define QCE_MODE_CCM BIT(12)
#define QCE_MODE_MASK GENMASK(12, 8)
/* cipher encryption/decryption operations */
#define QCE_ENCRYPT BIT(13)
#define QCE_DECRYPT BIT(14)
#define IS_DES(flags) (flags & QCE_ALG_DES)
#define IS_3DES(flags) (flags & QCE_ALG_3DES)
#define IS_AES(flags) (flags & QCE_ALG_AES)
#define IS_SHA1(flags) (flags & QCE_HASH_SHA1)
#define IS_SHA256(flags) (flags & QCE_HASH_SHA256)
#define IS_SHA1_HMAC(flags) (flags & QCE_HASH_SHA1_HMAC)
#define IS_SHA256_HMAC(flags) (flags & QCE_HASH_SHA256_HMAC)
#define IS_CMAC(flags) (flags & QCE_HASH_AES_CMAC)
#define IS_SHA(flags) (IS_SHA1(flags) || IS_SHA256(flags))
#define IS_SHA_HMAC(flags) \
(IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags))
#define IS_CBC(mode) (mode & QCE_MODE_CBC)
#define IS_ECB(mode) (mode & QCE_MODE_ECB)
#define IS_CTR(mode) (mode & QCE_MODE_CTR)
#define IS_XTS(mode) (mode & QCE_MODE_XTS)
#define IS_CCM(mode) (mode & QCE_MODE_CCM)
#define IS_ENCRYPT(dir) (dir & QCE_ENCRYPT)
#define IS_DECRYPT(dir) (dir & QCE_DECRYPT)
struct qce_alg_template {
struct list_head entry;
u32 crypto_alg_type;
unsigned long alg_flags;
const __be32 *std_iv;
union {
struct crypto_alg crypto;
struct ahash_alg ahash;
} alg;
struct qce_device *qce;
};
void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len);
int qce_check_status(struct qce_device *qce, u32 *status);
void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step);
int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
u32 offset);
#endif /* _COMMON_H_ */
/*
* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
#include <crypto/sha.h>
#include "core.h"
#include "cipher.h"
#include "sha.h"
#define QCE_MAJOR_VERSION5 0x05
#define QCE_QUEUE_LENGTH 1
static const struct qce_algo_ops *qce_ops[] = {
&ablkcipher_ops,
&ahash_ops,
};
static void qce_unregister_algs(struct qce_device *qce)
{
const struct qce_algo_ops *ops;
int i;
for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
ops = qce_ops[i];
ops->unregister_algs(qce);
}
}
static int qce_register_algs(struct qce_device *qce)
{
const struct qce_algo_ops *ops;
int i, ret = -ENODEV;
for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
ops = qce_ops[i];
ret = ops->register_algs(qce);
if (ret)
break;
}
return ret;
}
static int qce_handle_request(struct crypto_async_request *async_req)
{
int ret = -EINVAL, i;
const struct qce_algo_ops *ops;
u32 type = crypto_tfm_alg_type(async_req->tfm);
for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
ops = qce_ops[i];
if (type != ops->type)
continue;
ret = ops->async_req_handle(async_req);
break;
}
return ret;
}
static int qce_handle_queue(struct qce_device *qce,
struct crypto_async_request *req)
{
struct crypto_async_request *async_req, *backlog;
unsigned long flags;
int ret = 0, err;
spin_lock_irqsave(&qce->lock, flags);
if (req)
ret = crypto_enqueue_request(&qce->queue, req);
/* busy, do not dequeue request */
if (qce->req) {
spin_unlock_irqrestore(&qce->lock, flags);
return ret;
}
backlog = crypto_get_backlog(&qce->queue);
async_req = crypto_dequeue_request(&qce->queue);
if (async_req)
qce->req = async_req;
spin_unlock_irqrestore(&qce->lock, flags);
if (!async_req)
return ret;
if (backlog) {
spin_lock_bh(&qce->lock);
backlog->complete(backlog, -EINPROGRESS);
spin_unlock_bh(&qce->lock);
}
err = qce_handle_request(async_req);
if (err) {
qce->result = err;
tasklet_schedule(&qce->done_tasklet);
}
return ret;
}
static void qce_tasklet_req_done(unsigned long data)
{
struct qce_device *qce = (struct qce_device *)data;
struct crypto_async_request *req;
unsigned long flags;
spin_lock_irqsave(&qce->lock, flags);
req = qce->req;
qce->req = NULL;
spin_unlock_irqrestore(&qce->lock, flags);
if (req)
req->complete(req, qce->result);
qce_handle_queue(qce, NULL);
}
static int qce_async_request_enqueue(struct qce_device *qce,
struct crypto_async_request *req)
{
return qce_handle_queue(qce, req);
}
static void qce_async_request_done(struct qce_device *qce, int ret)
{
qce->result = ret;
tasklet_schedule(&qce->done_tasklet);
}
static int qce_check_version(struct qce_device *qce)
{
u32 major, minor, step;
qce_get_version(qce, &major, &minor, &step);
/*
* the driver does not support v5 with minor 0 because it has special
* alignment requirements.
*/
if (major != QCE_MAJOR_VERSION5 || minor == 0)
return -ENODEV;
qce->burst_size = QCE_BAM_BURST_SIZE;
qce->pipe_pair_id = 1;
dev_dbg(qce->dev, "Crypto device found, version %d.%d.%d\n",
major, minor, step);
return 0;
}
static int qce_crypto_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct qce_device *qce;
struct resource *res;
int ret;
qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL);
if (!qce)
return -ENOMEM;
qce->dev = dev;
platform_set_drvdata(pdev, qce);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
qce->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(qce->base))
return PTR_ERR(qce->base);
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret < 0)
return ret;
qce->core = devm_clk_get(qce->dev, "core");
if (IS_ERR(qce->core))
return PTR_ERR(qce->core);
qce->iface = devm_clk_get(qce->dev, "iface");
if (IS_ERR(qce->iface))
return PTR_ERR(qce->iface);
qce->bus = devm_clk_get(qce->dev, "bus");
if (IS_ERR(qce->bus))
return PTR_ERR(qce->bus);
ret = clk_prepare_enable(qce->core);
if (ret)
return ret;
ret = clk_prepare_enable(qce->iface);
if (ret)
goto err_clks_core;
ret = clk_prepare_enable(qce->bus);
if (ret)
goto err_clks_iface;
ret = qce_dma_request(qce->dev, &qce->dma);
if (ret)
goto err_clks;
ret = qce_check_version(qce);
if (ret)
goto err_clks;
spin_lock_init(&qce->lock);
tasklet_init(&qce->done_tasklet, qce_tasklet_req_done,
(unsigned long)qce);
crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH);
qce->async_req_enqueue = qce_async_request_enqueue;
qce->async_req_done = qce_async_request_done;
ret = qce_register_algs(qce);
if (ret)
goto err_dma;
return 0;
err_dma:
qce_dma_release(&qce->dma);
err_clks:
clk_disable_unprepare(qce->bus);
err_clks_iface:
clk_disable_unprepare(qce->iface);
err_clks_core:
clk_disable_unprepare(qce->core);
return ret;
}
static int qce_crypto_remove(struct platform_device *pdev)
{
struct qce_device *qce = platform_get_drvdata(pdev);
tasklet_kill(&qce->done_tasklet);
qce_unregister_algs(qce);
qce_dma_release(&qce->dma);
clk_disable_unprepare(qce->bus);
clk_disable_unprepare(qce->iface);
clk_disable_unprepare(qce->core);
return 0;
}
static const struct of_device_id qce_crypto_of_match[] = {
{ .compatible = "qcom,crypto-v5.1", },
{}
};
MODULE_DEVICE_TABLE(of, qce_crypto_of_match);
static struct platform_driver qce_crypto_driver = {
.probe = qce_crypto_probe,
.remove = qce_crypto_remove,
.driver = {
.owner = THIS_MODULE,
.name = KBUILD_MODNAME,
.of_match_table = qce_crypto_of_match,
},
};
module_platform_driver(qce_crypto_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Qualcomm crypto engine driver");
MODULE_ALIAS("platform:" KBUILD_MODNAME);
MODULE_AUTHOR("The Linux Foundation");
/*
* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _CORE_H_
#define _CORE_H_
#include "dma.h"
/**
* struct qce_device - crypto engine device structure
* @queue: crypto request queue
* @lock: the lock protects queue and req
* @done_tasklet: done tasklet object
* @req: current active request
* @result: result of current transform
* @base: virtual IO base
* @dev: pointer to device structure
* @core: core device clock
* @iface: interface clock
* @bus: bus clock
* @dma: pointer to dma data
* @burst_size: the crypto burst size
* @pipe_pair_id: which pipe pair id the device using
* @async_req_enqueue: invoked by every algorithm to enqueue a request
* @async_req_done: invoked by every algorithm to finish its request
*/
struct qce_device {
struct crypto_queue queue;
spinlock_t lock;
struct tasklet_struct done_tasklet;
struct crypto_async_request *req;
int result;
void __iomem *base;
struct device *dev;
struct clk *core, *iface, *bus;
struct qce_dma_data dma;
int burst_size;
unsigned int pipe_pair_id;
int (*async_req_enqueue)(struct qce_device *qce,
struct crypto_async_request *req);
void (*async_req_done)(struct qce_device *qce, int ret);
};
/**
* struct qce_algo_ops - algorithm operations per crypto type
* @type: should be CRYPTO_ALG_TYPE_XXX
* @register_algs: invoked by core to register the algorithms
* @unregister_algs: invoked by core to unregister the algorithms
* @async_req_handle: invoked by core to handle enqueued request
*/
struct qce_algo_ops {
u32 type;
int (*register_algs)(struct qce_device *qce);
void (*unregister_algs)(struct qce_device *qce);
int (*async_req_handle)(struct crypto_async_request *async_req);
};
#endif /* _CORE_H_ */
/*
* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/dmaengine.h>
#include <crypto/scatterwalk.h>
#include "dma.h"
int qce_dma_request(struct device *dev, struct qce_dma_data *dma)
{
int ret;
dma->txchan = dma_request_slave_channel_reason(dev, "tx");
if (IS_ERR(dma->txchan))
return PTR_ERR(dma->txchan);
dma->rxchan = dma_request_slave_channel_reason(dev, "rx");
if (IS_ERR(dma->rxchan)) {
ret = PTR_ERR(dma->rxchan);
goto error_rx;
}
dma->result_buf = kmalloc(QCE_RESULT_BUF_SZ + QCE_IGNORE_BUF_SZ,
GFP_KERNEL);
if (!dma->result_buf) {
ret = -ENOMEM;
goto error_nomem;
}
dma->ignore_buf = dma->result_buf + QCE_RESULT_BUF_SZ;
return 0;
error_nomem:
dma_release_channel(dma->rxchan);
error_rx:
dma_release_channel(dma->txchan);
return ret;
}
void qce_dma_release(struct qce_dma_data *dma)
{
dma_release_channel(dma->txchan);
dma_release_channel(dma->rxchan);
kfree(dma->result_buf);
}
int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, bool chained)
{
int err;
if (chained) {
while (sg) {
err = dma_map_sg(dev, sg, 1, dir);
if (!err)
return -EFAULT;
sg = scatterwalk_sg_next(sg);
}
} else {
err = dma_map_sg(dev, sg, nents, dir);
if (!err)
return -EFAULT;
}
return nents;
}
void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, bool chained)
{
if (chained)
while (sg) {
dma_unmap_sg(dev, sg, 1, dir);
sg = scatterwalk_sg_next(sg);
}
else
dma_unmap_sg(dev, sg, nents, dir);
}
int qce_countsg(struct scatterlist *sglist, int nbytes, bool *chained)
{
struct scatterlist *sg = sglist;
int nents = 0;
if (chained)
*chained = false;
while (nbytes > 0 && sg) {
nents++;
nbytes -= sg->length;
if (!sg_is_last(sg) && (sg + 1)->length == 0 && chained)
*chained = true;
sg = scatterwalk_sg_next(sg);
}
return nents;
}
struct scatterlist *
qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl)
{
struct scatterlist *sg = sgt->sgl, *sg_last = NULL;
while (sg) {
if (!sg_page(sg))
break;
sg = sg_next(sg);
}
if (!sg)
return ERR_PTR(-EINVAL);
while (new_sgl && sg) {
sg_set_page(sg, sg_page(new_sgl), new_sgl->length,
new_sgl->offset);
sg_last = sg;
sg = sg_next(sg);
new_sgl = sg_next(new_sgl);
}
return sg_last;
}
static int qce_dma_prep_sg(struct dma_chan *chan, struct scatterlist *sg,
int nents, unsigned long flags,
enum dma_transfer_direction dir,
dma_async_tx_callback cb, void *cb_param)
{
struct dma_async_tx_descriptor *desc;
dma_cookie_t cookie;
if (!sg || !nents)
return -EINVAL;
desc = dmaengine_prep_slave_sg(chan, sg, nents, dir, flags);
if (!desc)
return -EINVAL;
desc->callback = cb;
desc->callback_param = cb_param;
cookie = dmaengine_submit(desc);
return dma_submit_error(cookie);
}
int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *rx_sg,
int rx_nents, struct scatterlist *tx_sg, int tx_nents,
dma_async_tx_callback cb, void *cb_param)
{
struct dma_chan *rxchan = dma->rxchan;
struct dma_chan *txchan = dma->txchan;
unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
int ret;
ret = qce_dma_prep_sg(rxchan, rx_sg, rx_nents, flags, DMA_MEM_TO_DEV,
NULL, NULL);
if (ret)
return ret;
return qce_dma_prep_sg(txchan, tx_sg, tx_nents, flags, DMA_DEV_TO_MEM,
cb, cb_param);
}
void qce_dma_issue_pending(struct qce_dma_data *dma)
{
dma_async_issue_pending(dma->rxchan);
dma_async_issue_pending(dma->txchan);
}
int qce_dma_terminate_all(struct qce_dma_data *dma)
{
int ret;
ret = dmaengine_terminate_all(dma->rxchan);
return ret ?: dmaengine_terminate_all(dma->txchan);
}
/*
* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _DMA_H_
#define _DMA_H_
/* maximum data transfer block size between BAM and CE */
#define QCE_BAM_BURST_SIZE 64
#define QCE_AUTHIV_REGS_CNT 16
#define QCE_AUTH_BYTECOUNT_REGS_CNT 4
#define QCE_CNTRIV_REGS_CNT 4
struct qce_result_dump {
u32 auth_iv[QCE_AUTHIV_REGS_CNT];
u32 auth_byte_count[QCE_AUTH_BYTECOUNT_REGS_CNT];
u32 encr_cntr_iv[QCE_CNTRIV_REGS_CNT];
u32 status;
u32 status2;
};
#define QCE_IGNORE_BUF_SZ (2 * QCE_BAM_BURST_SIZE)
#define QCE_RESULT_BUF_SZ \
ALIGN(sizeof(struct qce_result_dump), QCE_BAM_BURST_SIZE)
struct qce_dma_data {
struct dma_chan *txchan;
struct dma_chan *rxchan;
struct qce_result_dump *result_buf;
void *ignore_buf;
};
int qce_dma_request(struct device *dev, struct qce_dma_data *dma);
void qce_dma_release(struct qce_dma_data *dma);
int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in,
int in_ents, struct scatterlist *sg_out, int out_ents,
dma_async_tx_callback cb, void *cb_param);
void qce_dma_issue_pending(struct qce_dma_data *dma);
int qce_dma_terminate_all(struct qce_dma_data *dma);
int qce_countsg(struct scatterlist *sg_list, int nbytes, bool *chained);
void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, bool chained);
int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, bool chained);
struct scatterlist *
qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add);
#endif /* _DMA_H_ */
/*
* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _REGS_V5_H_
#define _REGS_V5_H_
#include <linux/bitops.h>
#define REG_VERSION 0x000
#define REG_STATUS 0x100
#define REG_STATUS2 0x104
#define REG_ENGINES_AVAIL 0x108
#define REG_FIFO_SIZES 0x10c
#define REG_SEG_SIZE 0x110
#define REG_GOPROC 0x120
#define REG_ENCR_SEG_CFG 0x200
#define REG_ENCR_SEG_SIZE 0x204
#define REG_ENCR_SEG_START 0x208
#define REG_CNTR0_IV0 0x20c
#define REG_CNTR1_IV1 0x210
#define REG_CNTR2_IV2 0x214
#define REG_CNTR3_IV3 0x218
#define REG_CNTR_MASK 0x21C
#define REG_ENCR_CCM_INT_CNTR0 0x220
#define REG_ENCR_CCM_INT_CNTR1 0x224
#define REG_ENCR_CCM_INT_CNTR2 0x228
#define REG_ENCR_CCM_INT_CNTR3 0x22c
#define REG_ENCR_XTS_DU_SIZE 0x230
#define REG_CNTR_MASK2 0x234
#define REG_CNTR_MASK1 0x238
#define REG_CNTR_MASK0 0x23c
#define REG_AUTH_SEG_CFG 0x300
#define REG_AUTH_SEG_SIZE 0x304
#define REG_AUTH_SEG_START 0x308
#define REG_AUTH_IV0 0x310
#define REG_AUTH_IV1 0x314
#define REG_AUTH_IV2 0x318
#define REG_AUTH_IV3 0x31c
#define REG_AUTH_IV4 0x320
#define REG_AUTH_IV5 0x324
#define REG_AUTH_IV6 0x328
#define REG_AUTH_IV7 0x32c
#define REG_AUTH_IV8 0x330
#define REG_AUTH_IV9 0x334
#define REG_AUTH_IV10 0x338
#define REG_AUTH_IV11 0x33c
#define REG_AUTH_IV12 0x340
#define REG_AUTH_IV13 0x344
#define REG_AUTH_IV14 0x348
#define REG_AUTH_IV15 0x34c
#define REG_AUTH_INFO_NONCE0 0x350
#define REG_AUTH_INFO_NONCE1 0x354
#define REG_AUTH_INFO_NONCE2 0x358
#define REG_AUTH_INFO_NONCE3 0x35c
#define REG_AUTH_BYTECNT0 0x390
#define REG_AUTH_BYTECNT1 0x394
#define REG_AUTH_BYTECNT2 0x398
#define REG_AUTH_BYTECNT3 0x39c
#define REG_AUTH_EXP_MAC0 0x3a0
#define REG_AUTH_EXP_MAC1 0x3a4
#define REG_AUTH_EXP_MAC2 0x3a8
#define REG_AUTH_EXP_MAC3 0x3ac
#define REG_AUTH_EXP_MAC4 0x3b0
#define REG_AUTH_EXP_MAC5 0x3b4
#define REG_AUTH_EXP_MAC6 0x3b8
#define REG_AUTH_EXP_MAC7 0x3bc
#define REG_CONFIG 0x400
#define REG_GOPROC_QC_KEY 0x1000
#define REG_GOPROC_OEM_KEY 0x2000
#define REG_ENCR_KEY0 0x3000
#define REG_ENCR_KEY1 0x3004
#define REG_ENCR_KEY2 0x3008
#define REG_ENCR_KEY3 0x300c
#define REG_ENCR_KEY4 0x3010
#define REG_ENCR_KEY5 0x3014
#define REG_ENCR_KEY6 0x3018
#define REG_ENCR_KEY7 0x301c
#define REG_ENCR_XTS_KEY0 0x3020
#define REG_ENCR_XTS_KEY1 0x3024
#define REG_ENCR_XTS_KEY2 0x3028
#define REG_ENCR_XTS_KEY3 0x302c
#define REG_ENCR_XTS_KEY4 0x3030
#define REG_ENCR_XTS_KEY5 0x3034
#define REG_ENCR_XTS_KEY6 0x3038
#define REG_ENCR_XTS_KEY7 0x303c
#define REG_AUTH_KEY0 0x3040
#define REG_AUTH_KEY1 0x3044
#define REG_AUTH_KEY2 0x3048
#define REG_AUTH_KEY3 0x304c
#define REG_AUTH_KEY4 0x3050
#define REG_AUTH_KEY5 0x3054
#define REG_AUTH_KEY6 0x3058
#define REG_AUTH_KEY7 0x305c
#define REG_AUTH_KEY8 0x3060
#define REG_AUTH_KEY9 0x3064
#define REG_AUTH_KEY10 0x3068
#define REG_AUTH_KEY11 0x306c
#define REG_AUTH_KEY12 0x3070
#define REG_AUTH_KEY13 0x3074
#define REG_AUTH_KEY14 0x3078
#define REG_AUTH_KEY15 0x307c
/* Register bits - REG_VERSION */
#define CORE_STEP_REV_SHIFT 0
#define CORE_STEP_REV_MASK GENMASK(15, 0)
#define CORE_MINOR_REV_SHIFT 16
#define CORE_MINOR_REV_MASK GENMASK(23, 16)
#define CORE_MAJOR_REV_SHIFT 24
#define CORE_MAJOR_REV_MASK GENMASK(31, 24)
/* Register bits - REG_STATUS */
#define MAC_FAILED_SHIFT 31
#define DOUT_SIZE_AVAIL_SHIFT 26
#define DOUT_SIZE_AVAIL_MASK GENMASK(30, 26)
#define DIN_SIZE_AVAIL_SHIFT 21
#define DIN_SIZE_AVAIL_MASK GENMASK(25, 21)
#define HSD_ERR_SHIFT 20
#define ACCESS_VIOL_SHIFT 19
#define PIPE_ACTIVE_ERR_SHIFT 18
#define CFG_CHNG_ERR_SHIFT 17
#define DOUT_ERR_SHIFT 16
#define DIN_ERR_SHIFT 15
#define AXI_ERR_SHIFT 14
#define CRYPTO_STATE_SHIFT 10
#define CRYPTO_STATE_MASK GENMASK(13, 10)
#define ENCR_BUSY_SHIFT 9
#define AUTH_BUSY_SHIFT 8
#define DOUT_INTR_SHIFT 7
#define DIN_INTR_SHIFT 6
#define OP_DONE_INTR_SHIFT 5
#define ERR_INTR_SHIFT 4
#define DOUT_RDY_SHIFT 3
#define DIN_RDY_SHIFT 2
#define OPERATION_DONE_SHIFT 1
#define SW_ERR_SHIFT 0
/* Register bits - REG_STATUS2 */
#define AXI_EXTRA_SHIFT 1
#define LOCKED_SHIFT 2
/* Register bits - REG_CONFIG */
#define REQ_SIZE_SHIFT 17
#define REQ_SIZE_MASK GENMASK(20, 17)
#define REQ_SIZE_ENUM_1_BEAT 0
#define REQ_SIZE_ENUM_2_BEAT 1
#define REQ_SIZE_ENUM_3_BEAT 2
#define REQ_SIZE_ENUM_4_BEAT 3
#define REQ_SIZE_ENUM_5_BEAT 4
#define REQ_SIZE_ENUM_6_BEAT 5
#define REQ_SIZE_ENUM_7_BEAT 6
#define REQ_SIZE_ENUM_8_BEAT 7
#define REQ_SIZE_ENUM_9_BEAT 8
#define REQ_SIZE_ENUM_10_BEAT 9
#define REQ_SIZE_ENUM_11_BEAT 10
#define REQ_SIZE_ENUM_12_BEAT 11
#define REQ_SIZE_ENUM_13_BEAT 12
#define REQ_SIZE_ENUM_14_BEAT 13
#define REQ_SIZE_ENUM_15_BEAT 14
#define REQ_SIZE_ENUM_16_BEAT 15
#define MAX_QUEUED_REQ_SHIFT 14
#define MAX_QUEUED_REQ_MASK GENMASK(24, 16)
#define ENUM_1_QUEUED_REQS 0
#define ENUM_2_QUEUED_REQS 1
#define ENUM_3_QUEUED_REQS 2
#define IRQ_ENABLES_SHIFT 10
#define IRQ_ENABLES_MASK GENMASK(13, 10)
#define LITTLE_ENDIAN_MODE_SHIFT 9
#define PIPE_SET_SELECT_SHIFT 5
#define PIPE_SET_SELECT_MASK GENMASK(8, 5)
#define HIGH_SPD_EN_N_SHIFT 4
#define MASK_DOUT_INTR_SHIFT 3
#define MASK_DIN_INTR_SHIFT 2
#define MASK_OP_DONE_INTR_SHIFT 1
#define MASK_ERR_INTR_SHIFT 0
/* Register bits - REG_AUTH_SEG_CFG */
#define COMP_EXP_MAC_SHIFT 24
#define COMP_EXP_MAC_DISABLED 0
#define COMP_EXP_MAC_ENABLED 1
#define F9_DIRECTION_SHIFT 23
#define F9_DIRECTION_UPLINK 0
#define F9_DIRECTION_DOWNLINK 1
#define AUTH_NONCE_NUM_WORDS_SHIFT 20
#define AUTH_NONCE_NUM_WORDS_MASK GENMASK(22, 20)
#define USE_PIPE_KEY_AUTH_SHIFT 19
#define USE_HW_KEY_AUTH_SHIFT 18
#define AUTH_FIRST_SHIFT 17
#define AUTH_LAST_SHIFT 16
#define AUTH_POS_SHIFT 14
#define AUTH_POS_MASK GENMASK(15, 14)
#define AUTH_POS_BEFORE 0
#define AUTH_POS_AFTER 1
#define AUTH_SIZE_SHIFT 9
#define AUTH_SIZE_MASK GENMASK(13, 9)
#define AUTH_SIZE_SHA1 0
#define AUTH_SIZE_SHA256 1
#define AUTH_SIZE_ENUM_1_BYTES 0
#define AUTH_SIZE_ENUM_2_BYTES 1
#define AUTH_SIZE_ENUM_3_BYTES 2
#define AUTH_SIZE_ENUM_4_BYTES 3
#define AUTH_SIZE_ENUM_5_BYTES 4
#define AUTH_SIZE_ENUM_6_BYTES 5
#define AUTH_SIZE_ENUM_7_BYTES 6
#define AUTH_SIZE_ENUM_8_BYTES 7
#define AUTH_SIZE_ENUM_9_BYTES 8
#define AUTH_SIZE_ENUM_10_BYTES 9
#define AUTH_SIZE_ENUM_11_BYTES 10
#define AUTH_SIZE_ENUM_12_BYTES 11
#define AUTH_SIZE_ENUM_13_BYTES 12
#define AUTH_SIZE_ENUM_14_BYTES 13
#define AUTH_SIZE_ENUM_15_BYTES 14
#define AUTH_SIZE_ENUM_16_BYTES 15
#define AUTH_MODE_SHIFT 6
#define AUTH_MODE_MASK GENMASK(8, 6)
#define AUTH_MODE_HASH 0
#define AUTH_MODE_HMAC 1
#define AUTH_MODE_CCM 0
#define AUTH_MODE_CMAC 1
#define AUTH_KEY_SIZE_SHIFT 3
#define AUTH_KEY_SIZE_MASK GENMASK(5, 3)
#define AUTH_KEY_SZ_AES128 0
#define AUTH_KEY_SZ_AES256 2
#define AUTH_ALG_SHIFT 0
#define AUTH_ALG_MASK GENMASK(2, 0)
#define AUTH_ALG_NONE 0
#define AUTH_ALG_SHA 1
#define AUTH_ALG_AES 2
#define AUTH_ALG_KASUMI 3
#define AUTH_ALG_SNOW3G 4
#define AUTH_ALG_ZUC 5
/* Register bits - REG_ENCR_XTS_DU_SIZE */
#define ENCR_XTS_DU_SIZE_SHIFT 0
#define ENCR_XTS_DU_SIZE_MASK GENMASK(19, 0)
/* Register bits - REG_ENCR_SEG_CFG */
#define F8_KEYSTREAM_ENABLE_SHIFT 17
#define F8_KEYSTREAM_DISABLED 0
#define F8_KEYSTREAM_ENABLED 1
#define F8_DIRECTION_SHIFT 16
#define F8_DIRECTION_UPLINK 0
#define F8_DIRECTION_DOWNLINK 1
#define USE_PIPE_KEY_ENCR_SHIFT 15
#define USE_PIPE_KEY_ENCR_ENABLED 1
#define USE_KEY_REGISTERS 0
#define USE_HW_KEY_ENCR_SHIFT 14
#define USE_KEY_REG 0
#define USE_HW_KEY 1
#define LAST_CCM_SHIFT 13
#define LAST_CCM_XFR 1
#define INTERM_CCM_XFR 0
#define CNTR_ALG_SHIFT 11
#define CNTR_ALG_MASK GENMASK(12, 11)
#define CNTR_ALG_NIST 0
#define ENCODE_SHIFT 10
#define ENCR_MODE_SHIFT 6
#define ENCR_MODE_MASK GENMASK(9, 6)
#define ENCR_MODE_ECB 0
#define ENCR_MODE_CBC 1
#define ENCR_MODE_CTR 2
#define ENCR_MODE_XTS 3
#define ENCR_MODE_CCM 4
#define ENCR_KEY_SZ_SHIFT 3
#define ENCR_KEY_SZ_MASK GENMASK(5, 3)
#define ENCR_KEY_SZ_DES 0
#define ENCR_KEY_SZ_3DES 1
#define ENCR_KEY_SZ_AES128 0
#define ENCR_KEY_SZ_AES256 2
#define ENCR_ALG_SHIFT 0
#define ENCR_ALG_MASK GENMASK(2, 0)
#define ENCR_ALG_NONE 0
#define ENCR_ALG_DES 1
#define ENCR_ALG_AES 2
#define ENCR_ALG_KASUMI 4
#define ENCR_ALG_SNOW_3G 5
#define ENCR_ALG_ZUC 6
/* Register bits - REG_GOPROC */
#define GO_SHIFT 0
#define CLR_CNTXT_SHIFT 1
#define RESULTS_DUMP_SHIFT 2
/* Register bits - REG_ENGINES_AVAIL */
#define ENCR_AES_SEL_SHIFT 0
#define DES_SEL_SHIFT 1
#define ENCR_SNOW3G_SEL_SHIFT 2
#define ENCR_KASUMI_SEL_SHIFT 3
#define SHA_SEL_SHIFT 4
#define SHA512_SEL_SHIFT 5
#define AUTH_AES_SEL_SHIFT 6
#define AUTH_SNOW3G_SEL_SHIFT 7
#define AUTH_KASUMI_SEL_SHIFT 8
#define BAM_PIPE_SETS_SHIFT 9
#define BAM_PIPE_SETS_MASK GENMASK(12, 9)
#define AXI_WR_BEATS_SHIFT 13
#define AXI_WR_BEATS_MASK GENMASK(18, 13)
#define AXI_RD_BEATS_SHIFT 19
#define AXI_RD_BEATS_MASK GENMASK(24, 19)
#define ENCR_ZUC_SEL_SHIFT 26
#define AUTH_ZUC_SEL_SHIFT 27
#define ZUC_ENABLE_SHIFT 28
#endif /* _REGS_V5_H_ */
This diff is collapsed.
/*
* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _SHA_H_
#define _SHA_H_
#include <crypto/scatterwalk.h>
#include <crypto/sha.h>
#include "common.h"
#include "core.h"
#define QCE_SHA_MAX_BLOCKSIZE SHA256_BLOCK_SIZE
#define QCE_SHA_MAX_DIGESTSIZE SHA256_DIGEST_SIZE
struct qce_sha_ctx {
u8 authkey[QCE_SHA_MAX_BLOCKSIZE];
};
/**
* struct qce_sha_reqctx - holds private ahash objects per request
* @buf: used during update, import and export
* @tmpbuf: buffer for internal use
* @digest: calculated digest buffer
* @buflen: length of the buffer
* @flags: operation flags
* @src_orig: original request sg list
* @nbytes_orig: original request number of bytes
* @src_chained: is source scatterlist chained
* @src_nents: source number of entries
* @byte_count: byte count
* @count: save count in states during update, import and export
* @first_blk: is it the first block
* @last_blk: is it the last block
* @sg: used to chain sg lists
* @authkey: pointer to auth key in sha ctx
* @authklen: auth key length
* @result_sg: scatterlist used for result buffer
*/
struct qce_sha_reqctx {
u8 buf[QCE_SHA_MAX_BLOCKSIZE];
u8 tmpbuf[QCE_SHA_MAX_BLOCKSIZE];
u8 digest[QCE_SHA_MAX_DIGESTSIZE];
unsigned int buflen;
unsigned long flags;
struct scatterlist *src_orig;
unsigned int nbytes_orig;
bool src_chained;
int src_nents;
__be32 byte_count[2];
u64 count;
bool first_blk;
bool last_blk;
struct scatterlist sg[2];
u8 *authkey;
unsigned int authklen;
struct scatterlist result_sg;
};
static inline struct qce_alg_template *to_ahash_tmpl(struct crypto_tfm *tfm)
{
struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
struct ahash_alg *alg = container_of(crypto_hash_alg_common(ahash),
struct ahash_alg, halg);
return container_of(alg, struct qce_alg_template, alg.ahash);
}
extern const struct qce_algo_ops ahash_ops;
#endif /* _SHA_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment