Commit 0fc0f18b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto fixes from Herbert Xu:
 "This fixes the following issues:

   - fix chacha20 crash on zero-length input due to unset IV

   - fix potential race conditions in mcryptd with spinlock

   - only wait once at top of algif recvmsg to avoid inconsistencies

   - fix potential use-after-free in algif_aead/algif_skcipher"

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
  crypto: af_alg - fix race accessing cipher request
  crypto: mcryptd - protect the per-CPU queue with a lock
  crypto: af_alg - wait for data at beginning of recvmsg
  crypto: skcipher - set walk.iv for zero-length inputs
parents 6ed16756 d53c5135
...@@ -1138,12 +1138,6 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, ...@@ -1138,12 +1138,6 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
if (!af_alg_readable(sk)) if (!af_alg_readable(sk))
break; break;
if (!ctx->used) {
err = af_alg_wait_for_data(sk, flags);
if (err)
return err;
}
seglen = min_t(size_t, (maxsize - len), seglen = min_t(size_t, (maxsize - len),
msg_data_left(msg)); msg_data_left(msg));
......
...@@ -111,6 +111,12 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, ...@@ -111,6 +111,12 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
size_t usedpages = 0; /* [in] RX bufs to be used from user */ size_t usedpages = 0; /* [in] RX bufs to be used from user */
size_t processed = 0; /* [in] TX bufs to be consumed */ size_t processed = 0; /* [in] TX bufs to be consumed */
if (!ctx->used) {
err = af_alg_wait_for_data(sk, flags);
if (err)
return err;
}
/* /*
* Data length provided by caller via sendmsg/sendpage that has not * Data length provided by caller via sendmsg/sendpage that has not
* yet been processed. * yet been processed.
...@@ -285,6 +291,10 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, ...@@ -285,6 +291,10 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
/* AIO operation */ /* AIO operation */
sock_hold(sk); sock_hold(sk);
areq->iocb = msg->msg_iocb; areq->iocb = msg->msg_iocb;
/* Remember output size that will be generated. */
areq->outlen = outlen;
aead_request_set_callback(&areq->cra_u.aead_req, aead_request_set_callback(&areq->cra_u.aead_req,
CRYPTO_TFM_REQ_MAY_BACKLOG, CRYPTO_TFM_REQ_MAY_BACKLOG,
af_alg_async_cb, areq); af_alg_async_cb, areq);
...@@ -292,12 +302,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, ...@@ -292,12 +302,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
crypto_aead_decrypt(&areq->cra_u.aead_req); crypto_aead_decrypt(&areq->cra_u.aead_req);
/* AIO operation in progress */ /* AIO operation in progress */
if (err == -EINPROGRESS || err == -EBUSY) { if (err == -EINPROGRESS || err == -EBUSY)
/* Remember output size that will be generated. */
areq->outlen = outlen;
return -EIOCBQUEUED; return -EIOCBQUEUED;
}
sock_put(sk); sock_put(sk);
} else { } else {
......
...@@ -72,6 +72,12 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, ...@@ -72,6 +72,12 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
int err = 0; int err = 0;
size_t len = 0; size_t len = 0;
if (!ctx->used) {
err = af_alg_wait_for_data(sk, flags);
if (err)
return err;
}
/* Allocate cipher request for current operation. */ /* Allocate cipher request for current operation. */
areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) + areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) +
crypto_skcipher_reqsize(tfm)); crypto_skcipher_reqsize(tfm));
...@@ -119,6 +125,10 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, ...@@ -119,6 +125,10 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
/* AIO operation */ /* AIO operation */
sock_hold(sk); sock_hold(sk);
areq->iocb = msg->msg_iocb; areq->iocb = msg->msg_iocb;
/* Remember output size that will be generated. */
areq->outlen = len;
skcipher_request_set_callback(&areq->cra_u.skcipher_req, skcipher_request_set_callback(&areq->cra_u.skcipher_req,
CRYPTO_TFM_REQ_MAY_SLEEP, CRYPTO_TFM_REQ_MAY_SLEEP,
af_alg_async_cb, areq); af_alg_async_cb, areq);
...@@ -127,12 +137,8 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, ...@@ -127,12 +137,8 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
crypto_skcipher_decrypt(&areq->cra_u.skcipher_req); crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
/* AIO operation in progress */ /* AIO operation in progress */
if (err == -EINPROGRESS || err == -EBUSY) { if (err == -EINPROGRESS || err == -EBUSY)
/* Remember output size that will be generated. */
areq->outlen = len;
return -EIOCBQUEUED; return -EIOCBQUEUED;
}
sock_put(sk); sock_put(sk);
} else { } else {
......
...@@ -81,6 +81,7 @@ static int mcryptd_init_queue(struct mcryptd_queue *queue, ...@@ -81,6 +81,7 @@ static int mcryptd_init_queue(struct mcryptd_queue *queue,
pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue); pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
INIT_WORK(&cpu_queue->work, mcryptd_queue_worker); INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
spin_lock_init(&cpu_queue->q_lock);
} }
return 0; return 0;
} }
...@@ -104,15 +105,16 @@ static int mcryptd_enqueue_request(struct mcryptd_queue *queue, ...@@ -104,15 +105,16 @@ static int mcryptd_enqueue_request(struct mcryptd_queue *queue,
int cpu, err; int cpu, err;
struct mcryptd_cpu_queue *cpu_queue; struct mcryptd_cpu_queue *cpu_queue;
cpu = get_cpu(); cpu_queue = raw_cpu_ptr(queue->cpu_queue);
cpu_queue = this_cpu_ptr(queue->cpu_queue); spin_lock(&cpu_queue->q_lock);
rctx->tag.cpu = cpu; cpu = smp_processor_id();
rctx->tag.cpu = smp_processor_id();
err = crypto_enqueue_request(&cpu_queue->queue, request); err = crypto_enqueue_request(&cpu_queue->queue, request);
pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n", pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
cpu, cpu_queue, request); cpu, cpu_queue, request);
spin_unlock(&cpu_queue->q_lock);
queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
put_cpu();
return err; return err;
} }
...@@ -161,16 +163,11 @@ static void mcryptd_queue_worker(struct work_struct *work) ...@@ -161,16 +163,11 @@ static void mcryptd_queue_worker(struct work_struct *work)
cpu_queue = container_of(work, struct mcryptd_cpu_queue, work); cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
i = 0; i = 0;
while (i < MCRYPTD_BATCH || single_task_running()) { while (i < MCRYPTD_BATCH || single_task_running()) {
/*
* preempt_disable/enable is used to prevent spin_lock_bh(&cpu_queue->q_lock);
* being preempted by mcryptd_enqueue_request()
*/
local_bh_disable();
preempt_disable();
backlog = crypto_get_backlog(&cpu_queue->queue); backlog = crypto_get_backlog(&cpu_queue->queue);
req = crypto_dequeue_request(&cpu_queue->queue); req = crypto_dequeue_request(&cpu_queue->queue);
preempt_enable(); spin_unlock_bh(&cpu_queue->q_lock);
local_bh_enable();
if (!req) { if (!req) {
mcryptd_opportunistic_flush(); mcryptd_opportunistic_flush();
...@@ -185,7 +182,7 @@ static void mcryptd_queue_worker(struct work_struct *work) ...@@ -185,7 +182,7 @@ static void mcryptd_queue_worker(struct work_struct *work)
++i; ++i;
} }
if (cpu_queue->queue.qlen) if (cpu_queue->queue.qlen)
queue_work(kcrypto_wq, &cpu_queue->work); queue_work_on(smp_processor_id(), kcrypto_wq, &cpu_queue->work);
} }
void mcryptd_flusher(struct work_struct *__work) void mcryptd_flusher(struct work_struct *__work)
......
...@@ -449,6 +449,8 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk, ...@@ -449,6 +449,8 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
walk->total = req->cryptlen; walk->total = req->cryptlen;
walk->nbytes = 0; walk->nbytes = 0;
walk->iv = req->iv;
walk->oiv = req->iv;
if (unlikely(!walk->total)) if (unlikely(!walk->total))
return 0; return 0;
...@@ -456,9 +458,6 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk, ...@@ -456,9 +458,6 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
scatterwalk_start(&walk->in, req->src); scatterwalk_start(&walk->in, req->src);
scatterwalk_start(&walk->out, req->dst); scatterwalk_start(&walk->out, req->dst);
walk->iv = req->iv;
walk->oiv = req->iv;
walk->flags &= ~SKCIPHER_WALK_SLEEP; walk->flags &= ~SKCIPHER_WALK_SLEEP;
walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
SKCIPHER_WALK_SLEEP : 0; SKCIPHER_WALK_SLEEP : 0;
...@@ -510,6 +509,8 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk, ...@@ -510,6 +509,8 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
int err; int err;
walk->nbytes = 0; walk->nbytes = 0;
walk->iv = req->iv;
walk->oiv = req->iv;
if (unlikely(!walk->total)) if (unlikely(!walk->total))
return 0; return 0;
...@@ -525,9 +526,6 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk, ...@@ -525,9 +526,6 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
scatterwalk_done(&walk->in, 0, walk->total); scatterwalk_done(&walk->in, 0, walk->total);
scatterwalk_done(&walk->out, 0, walk->total); scatterwalk_done(&walk->out, 0, walk->total);
walk->iv = req->iv;
walk->oiv = req->iv;
if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
walk->flags |= SKCIPHER_WALK_SLEEP; walk->flags |= SKCIPHER_WALK_SLEEP;
else else
......
...@@ -27,6 +27,7 @@ static inline struct mcryptd_ahash *__mcryptd_ahash_cast( ...@@ -27,6 +27,7 @@ static inline struct mcryptd_ahash *__mcryptd_ahash_cast(
struct mcryptd_cpu_queue { struct mcryptd_cpu_queue {
struct crypto_queue queue; struct crypto_queue queue;
spinlock_t q_lock;
struct work_struct work; struct work_struct work;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment