Commit dcd0eeca authored by K. Y. Srinivasan's avatar K. Y. Srinivasan Committed by Greg Kroah-Hartman

Drivers: hv: vmbus: Use the new virt_xx barrier code

Use the virt_xx barriers that have been defined for use in virtual machines.
Signed-off-by: default avatarK. Y. Srinivasan <kys@microsoft.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent d45faaee
...@@ -33,14 +33,14 @@ ...@@ -33,14 +33,14 @@
void hv_begin_read(struct hv_ring_buffer_info *rbi) void hv_begin_read(struct hv_ring_buffer_info *rbi)
{ {
rbi->ring_buffer->interrupt_mask = 1; rbi->ring_buffer->interrupt_mask = 1;
mb(); virt_mb();
} }
u32 hv_end_read(struct hv_ring_buffer_info *rbi) u32 hv_end_read(struct hv_ring_buffer_info *rbi)
{ {
rbi->ring_buffer->interrupt_mask = 0; rbi->ring_buffer->interrupt_mask = 0;
mb(); virt_mb();
/* /*
* Now check to see if the ring buffer is still empty. * Now check to see if the ring buffer is still empty.
...@@ -68,12 +68,12 @@ u32 hv_end_read(struct hv_ring_buffer_info *rbi) ...@@ -68,12 +68,12 @@ u32 hv_end_read(struct hv_ring_buffer_info *rbi)
static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi) static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
{ {
mb(); virt_mb();
if (READ_ONCE(rbi->ring_buffer->interrupt_mask)) if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
return false; return false;
/* check interrupt_mask before read_index */ /* check interrupt_mask before read_index */
rmb(); virt_rmb();
/* /*
* This is the only case we need to signal when the * This is the only case we need to signal when the
* ring transitions from being empty to non-empty. * ring transitions from being empty to non-empty.
...@@ -115,7 +115,7 @@ static bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi) ...@@ -115,7 +115,7 @@ static bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
* read index, we could miss sending the interrupt. Issue a full * read index, we could miss sending the interrupt. Issue a full
* memory barrier to address this. * memory barrier to address this.
*/ */
mb(); virt_mb();
pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz); pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
/* If the other end is not blocked on write don't bother. */ /* If the other end is not blocked on write don't bother. */
...@@ -371,7 +371,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, ...@@ -371,7 +371,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
sizeof(u64)); sizeof(u64));
/* Issue a full memory barrier before updating the write index */ /* Issue a full memory barrier before updating the write index */
mb(); virt_mb();
/* Now, update the write location */ /* Now, update the write location */
hv_set_next_write_location(outring_info, next_write_location); hv_set_next_write_location(outring_info, next_write_location);
...@@ -447,7 +447,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, ...@@ -447,7 +447,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
* the writer may start writing to the read area once the read index * the writer may start writing to the read area once the read index
* is updated. * is updated.
*/ */
mb(); virt_mb();
/* Update the read index */ /* Update the read index */
hv_set_next_read_location(inring_info, next_read_location); hv_set_next_read_location(inring_info, next_read_location);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment