Commit 0a67c46d authored by Bob Pearson's avatar Bob Pearson Committed by Jason Gunthorpe

RDMA/rxe: Protect user space index loads/stores

Modify the queue APIs to protect all user space index loads with
smp_load_acquire() and all user space index stores with
smp_store_release(). Base this on the types of the queues which can be one
of ..KERNEL, ..FROM_USER, ..TO_USER. Kernel space indices are protected by
locks which also provide memory barriers.

Link: https://lore.kernel.org/r/20210527194748.662636-3-rpearsonhpe@gmail.comSigned-off-by: default avatarBob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 59daff49
...@@ -66,12 +66,22 @@ static inline int queue_empty(struct rxe_queue *q) ...@@ -66,12 +66,22 @@ static inline int queue_empty(struct rxe_queue *q)
u32 prod; u32 prod;
u32 cons; u32 cons;
/* make sure all changes to queue complete before switch (q->type) {
* testing queue empty case QUEUE_TYPE_FROM_USER:
*/ /* protect user space index */
prod = smp_load_acquire(&q->buf->producer_index); prod = smp_load_acquire(&q->buf->producer_index);
/* same */ cons = q->buf->consumer_index;
cons = smp_load_acquire(&q->buf->consumer_index); break;
case QUEUE_TYPE_TO_USER:
prod = q->buf->producer_index;
/* protect user space index */
cons = smp_load_acquire(&q->buf->consumer_index);
break;
case QUEUE_TYPE_KERNEL:
prod = q->buf->producer_index;
cons = q->buf->consumer_index;
break;
}
return ((prod - cons) & q->index_mask) == 0; return ((prod - cons) & q->index_mask) == 0;
} }
...@@ -81,95 +91,151 @@ static inline int queue_full(struct rxe_queue *q) ...@@ -81,95 +91,151 @@ static inline int queue_full(struct rxe_queue *q)
u32 prod; u32 prod;
u32 cons; u32 cons;
/* make sure all changes to queue complete before switch (q->type) {
* testing queue full case QUEUE_TYPE_FROM_USER:
*/ /* protect user space index */
prod = smp_load_acquire(&q->buf->producer_index); prod = smp_load_acquire(&q->buf->producer_index);
/* same */ cons = q->buf->consumer_index;
cons = smp_load_acquire(&q->buf->consumer_index); break;
case QUEUE_TYPE_TO_USER:
prod = q->buf->producer_index;
/* protect user space index */
cons = smp_load_acquire(&q->buf->consumer_index);
break;
case QUEUE_TYPE_KERNEL:
prod = q->buf->producer_index;
cons = q->buf->consumer_index;
break;
}
return ((prod + 1 - cons) & q->index_mask) == 0; return ((prod + 1 - cons) & q->index_mask) == 0;
} }
static inline void advance_producer(struct rxe_queue *q) static inline unsigned int queue_count(const struct rxe_queue *q)
{ {
u32 prod; u32 prod;
u32 cons;
prod = (q->buf->producer_index + 1) & q->index_mask; switch (q->type) {
case QUEUE_TYPE_FROM_USER:
/* protect user space index */
prod = smp_load_acquire(&q->buf->producer_index);
cons = q->buf->consumer_index;
break;
case QUEUE_TYPE_TO_USER:
prod = q->buf->producer_index;
/* protect user space index */
cons = smp_load_acquire(&q->buf->consumer_index);
break;
case QUEUE_TYPE_KERNEL:
prod = q->buf->producer_index;
cons = q->buf->consumer_index;
break;
}
return (prod - cons) & q->index_mask;
}
static inline void advance_producer(struct rxe_queue *q)
{
u32 prod;
/* make sure all changes to queue complete before if (q->type == QUEUE_TYPE_FROM_USER) {
* changing producer index /* protect user space index */
*/ prod = smp_load_acquire(&q->buf->producer_index);
smp_store_release(&q->buf->producer_index, prod); prod = (prod + 1) & q->index_mask;
/* same */
smp_store_release(&q->buf->producer_index, prod);
} else {
prod = q->buf->producer_index;
q->buf->producer_index = (prod + 1) & q->index_mask;
}
} }
static inline void advance_consumer(struct rxe_queue *q) static inline void advance_consumer(struct rxe_queue *q)
{ {
u32 cons; u32 cons;
cons = (q->buf->consumer_index + 1) & q->index_mask; if (q->type == QUEUE_TYPE_TO_USER) {
/* protect user space index */
/* make sure all changes to queue complete before cons = smp_load_acquire(&q->buf->consumer_index);
* changing consumer index cons = (cons + 1) & q->index_mask;
*/ /* same */
smp_store_release(&q->buf->consumer_index, cons); smp_store_release(&q->buf->consumer_index, cons);
} else {
cons = q->buf->consumer_index;
q->buf->consumer_index = (cons + 1) & q->index_mask;
}
} }
static inline void *producer_addr(struct rxe_queue *q) static inline void *producer_addr(struct rxe_queue *q)
{ {
return q->buf->data + ((q->buf->producer_index & q->index_mask) u32 prod;
<< q->log2_elem_size);
if (q->type == QUEUE_TYPE_FROM_USER)
/* protect user space index */
prod = smp_load_acquire(&q->buf->producer_index);
else
prod = q->buf->producer_index;
return q->buf->data + ((prod & q->index_mask) << q->log2_elem_size);
} }
static inline void *consumer_addr(struct rxe_queue *q) static inline void *consumer_addr(struct rxe_queue *q)
{ {
return q->buf->data + ((q->buf->consumer_index & q->index_mask) u32 cons;
<< q->log2_elem_size);
if (q->type == QUEUE_TYPE_TO_USER)
/* protect user space index */
cons = smp_load_acquire(&q->buf->consumer_index);
else
cons = q->buf->consumer_index;
return q->buf->data + ((cons & q->index_mask) << q->log2_elem_size);
} }
static inline unsigned int producer_index(struct rxe_queue *q) static inline unsigned int producer_index(struct rxe_queue *q)
{ {
u32 index; u32 prod;
if (q->type == QUEUE_TYPE_FROM_USER)
/* protect user space index */
prod = smp_load_acquire(&q->buf->producer_index);
else
prod = q->buf->producer_index;
/* make sure all changes to queue prod &= q->index_mask;
* complete before getting producer index
*/
index = smp_load_acquire(&q->buf->producer_index);
index &= q->index_mask;
return index; return prod;
} }
static inline unsigned int consumer_index(struct rxe_queue *q) static inline unsigned int consumer_index(struct rxe_queue *q)
{ {
u32 index; u32 cons;
/* make sure all changes to queue if (q->type == QUEUE_TYPE_TO_USER)
* complete before getting consumer index /* protect user space index */
*/ cons = smp_load_acquire(&q->buf->consumer_index);
index = smp_load_acquire(&q->buf->consumer_index); else
index &= q->index_mask; cons = q->buf->consumer_index;
return index; cons &= q->index_mask;
return cons;
} }
static inline void *addr_from_index(struct rxe_queue *q, unsigned int index) static inline void *addr_from_index(struct rxe_queue *q,
unsigned int index)
{ {
return q->buf->data + ((index & q->index_mask) return q->buf->data + ((index & q->index_mask)
<< q->buf->log2_elem_size); << q->buf->log2_elem_size);
} }
static inline unsigned int index_from_addr(const struct rxe_queue *q, static inline unsigned int index_from_addr(const struct rxe_queue *q,
const void *addr) const void *addr)
{ {
return (((u8 *)addr - q->buf->data) >> q->log2_elem_size) return (((u8 *)addr - q->buf->data) >> q->log2_elem_size)
& q->index_mask; & q->index_mask;
}
static inline unsigned int queue_count(const struct rxe_queue *q)
{
return (q->buf->producer_index - q->buf->consumer_index)
& q->index_mask;
} }
static inline void *queue_head(struct rxe_queue *q) static inline void *queue_head(struct rxe_queue *q)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment