Commit defc7d75 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto fixes from Herbert Xu:

 - self-test failure of crc32c on powerpc

 - regressions of ecb(aes) when used with xts/lrw in s5p-sss

 - a number of bugs in the omap RNG driver

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
  crypto: s5p-sss - Fix spinlock recursion on LRW(AES)
  hwrng: omap - Do not access INTMASK_REG on EIP76
  hwrng: omap - use devm_clk_get() instead of of_clk_get()
  hwrng: omap - write registers after enabling the clock
  crypto: s5p-sss - Fix completing crypto request in IRQ handler
  crypto: powerpc - Fix initialisation of crc32c context
parents ae50dfd6 28b62b14
...@@ -52,7 +52,7 @@ static int crc32c_vpmsum_cra_init(struct crypto_tfm *tfm) ...@@ -52,7 +52,7 @@ static int crc32c_vpmsum_cra_init(struct crypto_tfm *tfm)
{ {
u32 *key = crypto_tfm_ctx(tfm); u32 *key = crypto_tfm_ctx(tfm);
*key = 0; *key = ~0;
return 0; return 0;
} }
......
...@@ -397,9 +397,8 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv, ...@@ -397,9 +397,8 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
irq, err); irq, err);
return err; return err;
} }
omap_rng_write(priv, RNG_INTMASK_REG, RNG_SHUTDOWN_OFLO_MASK);
priv->clk = of_clk_get(pdev->dev.of_node, 0); priv->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER) if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
return -EPROBE_DEFER; return -EPROBE_DEFER;
if (!IS_ERR(priv->clk)) { if (!IS_ERR(priv->clk)) {
...@@ -408,6 +407,19 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv, ...@@ -408,6 +407,19 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
dev_err(&pdev->dev, "unable to enable the clk, " dev_err(&pdev->dev, "unable to enable the clk, "
"err = %d\n", err); "err = %d\n", err);
} }
/*
* On OMAP4, enabling the shutdown_oflo interrupt is
* done in the interrupt mask register. There is no
* such register on EIP76, and it's enabled by the
* same bit in the control register
*/
if (priv->pdata->regs[RNG_INTMASK_REG])
omap_rng_write(priv, RNG_INTMASK_REG,
RNG_SHUTDOWN_OFLO_MASK);
else
omap_rng_write(priv, RNG_CONTROL_REG,
RNG_SHUTDOWN_OFLO_MASK);
} }
return 0; return 0;
} }
......
...@@ -270,7 +270,7 @@ static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg, ...@@ -270,7 +270,7 @@ static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
scatterwalk_done(&walk, out, 0); scatterwalk_done(&walk, out, 0);
} }
static void s5p_aes_complete(struct s5p_aes_dev *dev, int err) static void s5p_sg_done(struct s5p_aes_dev *dev)
{ {
if (dev->sg_dst_cpy) { if (dev->sg_dst_cpy) {
dev_dbg(dev->dev, dev_dbg(dev->dev,
...@@ -281,8 +281,11 @@ static void s5p_aes_complete(struct s5p_aes_dev *dev, int err) ...@@ -281,8 +281,11 @@ static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
} }
s5p_free_sg_cpy(dev, &dev->sg_src_cpy); s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
s5p_free_sg_cpy(dev, &dev->sg_dst_cpy); s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
}
/* holding a lock outside */ /* Calls the completion. Cannot be called with dev->lock hold. */
static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
{
dev->req->base.complete(&dev->req->base, err); dev->req->base.complete(&dev->req->base, err);
dev->busy = false; dev->busy = false;
} }
...@@ -368,51 +371,44 @@ static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg) ...@@ -368,51 +371,44 @@ static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
} }
/* /*
* Returns true if new transmitting (output) data is ready and its * Returns -ERRNO on error (mapping of new data failed).
* address+length have to be written to device (by calling * On success returns:
* s5p_set_dma_outdata()). False otherwise. * - 0 if there is no more data,
* - 1 if new transmitting (output) data is ready and its address+length
* have to be written to device (by calling s5p_set_dma_outdata()).
*/ */
static bool s5p_aes_tx(struct s5p_aes_dev *dev) static int s5p_aes_tx(struct s5p_aes_dev *dev)
{ {
int err = 0; int ret = 0;
bool ret = false;
s5p_unset_outdata(dev); s5p_unset_outdata(dev);
if (!sg_is_last(dev->sg_dst)) { if (!sg_is_last(dev->sg_dst)) {
err = s5p_set_outdata(dev, sg_next(dev->sg_dst)); ret = s5p_set_outdata(dev, sg_next(dev->sg_dst));
if (err) if (!ret)
s5p_aes_complete(dev, err); ret = 1;
else
ret = true;
} else {
s5p_aes_complete(dev, err);
dev->busy = true;
tasklet_schedule(&dev->tasklet);
} }
return ret; return ret;
} }
/* /*
* Returns true if new receiving (input) data is ready and its * Returns -ERRNO on error (mapping of new data failed).
* address+length have to be written to device (by calling * On success returns:
* s5p_set_dma_indata()). False otherwise. * - 0 if there is no more data,
* - 1 if new receiving (input) data is ready and its address+length
* have to be written to device (by calling s5p_set_dma_indata()).
*/ */
static bool s5p_aes_rx(struct s5p_aes_dev *dev) static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/)
{ {
int err; int ret = 0;
bool ret = false;
s5p_unset_indata(dev); s5p_unset_indata(dev);
if (!sg_is_last(dev->sg_src)) { if (!sg_is_last(dev->sg_src)) {
err = s5p_set_indata(dev, sg_next(dev->sg_src)); ret = s5p_set_indata(dev, sg_next(dev->sg_src));
if (err) if (!ret)
s5p_aes_complete(dev, err); ret = 1;
else
ret = true;
} }
return ret; return ret;
...@@ -422,33 +418,73 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id) ...@@ -422,33 +418,73 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
{ {
struct platform_device *pdev = dev_id; struct platform_device *pdev = dev_id;
struct s5p_aes_dev *dev = platform_get_drvdata(pdev); struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
bool set_dma_tx = false; int err_dma_tx = 0;
bool set_dma_rx = false; int err_dma_rx = 0;
bool tx_end = false;
unsigned long flags; unsigned long flags;
uint32_t status; uint32_t status;
int err;
spin_lock_irqsave(&dev->lock, flags); spin_lock_irqsave(&dev->lock, flags);
/*
* Handle rx or tx interrupt. If there is still data (scatterlist did not
* reach end), then map next scatterlist entry.
* In case of such mapping error, s5p_aes_complete() should be called.
*
* If there is no more data in tx scatter list, call s5p_aes_complete()
* and schedule new tasklet.
*/
status = SSS_READ(dev, FCINTSTAT); status = SSS_READ(dev, FCINTSTAT);
if (status & SSS_FCINTSTAT_BRDMAINT) if (status & SSS_FCINTSTAT_BRDMAINT)
set_dma_rx = s5p_aes_rx(dev); err_dma_rx = s5p_aes_rx(dev);
if (status & SSS_FCINTSTAT_BTDMAINT)
set_dma_tx = s5p_aes_tx(dev); if (status & SSS_FCINTSTAT_BTDMAINT) {
if (sg_is_last(dev->sg_dst))
tx_end = true;
err_dma_tx = s5p_aes_tx(dev);
}
SSS_WRITE(dev, FCINTPEND, status); SSS_WRITE(dev, FCINTPEND, status);
/* if (err_dma_rx < 0) {
* Writing length of DMA block (either receiving or transmitting) err = err_dma_rx;
* will start the operation immediately, so this should be done goto error;
* at the end (even after clearing pending interrupts to not miss the }
* interrupt). if (err_dma_tx < 0) {
*/ err = err_dma_tx;
if (set_dma_tx) goto error;
s5p_set_dma_outdata(dev, dev->sg_dst); }
if (set_dma_rx)
s5p_set_dma_indata(dev, dev->sg_src); if (tx_end) {
s5p_sg_done(dev);
spin_unlock_irqrestore(&dev->lock, flags);
s5p_aes_complete(dev, 0);
dev->busy = true;
tasklet_schedule(&dev->tasklet);
} else {
/*
* Writing length of DMA block (either receiving or
* transmitting) will start the operation immediately, so this
* should be done at the end (even after clearing pending
* interrupts to not miss the interrupt).
*/
if (err_dma_tx == 1)
s5p_set_dma_outdata(dev, dev->sg_dst);
if (err_dma_rx == 1)
s5p_set_dma_indata(dev, dev->sg_src);
spin_unlock_irqrestore(&dev->lock, flags);
}
return IRQ_HANDLED;
error:
s5p_sg_done(dev);
spin_unlock_irqrestore(&dev->lock, flags); spin_unlock_irqrestore(&dev->lock, flags);
s5p_aes_complete(dev, err);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -597,8 +633,9 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode) ...@@ -597,8 +633,9 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
s5p_unset_indata(dev); s5p_unset_indata(dev);
indata_error: indata_error:
s5p_aes_complete(dev, err); s5p_sg_done(dev);
spin_unlock_irqrestore(&dev->lock, flags); spin_unlock_irqrestore(&dev->lock, flags);
s5p_aes_complete(dev, err);
} }
static void s5p_tasklet_cb(unsigned long data) static void s5p_tasklet_cb(unsigned long data)
...@@ -805,8 +842,9 @@ static int s5p_aes_probe(struct platform_device *pdev) ...@@ -805,8 +842,9 @@ static int s5p_aes_probe(struct platform_device *pdev)
dev_warn(dev, "feed control interrupt is not available.\n"); dev_warn(dev, "feed control interrupt is not available.\n");
goto err_irq; goto err_irq;
} }
err = devm_request_irq(dev, pdata->irq_fc, s5p_aes_interrupt, err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL,
IRQF_SHARED, pdev->name, pdev); s5p_aes_interrupt, IRQF_ONESHOT,
pdev->name, pdev);
if (err < 0) { if (err < 0) {
dev_warn(dev, "feed control interrupt is not available.\n"); dev_warn(dev, "feed control interrupt is not available.\n");
goto err_irq; goto err_irq;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment