Commit da095a99 authored by Stefano Stabellini's avatar Stefano Stabellini Committed by David Vrabel

xen/arm: introduce GNTTABOP_cache_flush

Introduce support for new hypercall GNTTABOP_cache_flush.
Use it to perform cache flashing on pages used for dma when necessary.

If GNTTABOP_cache_flush is supported by the hypervisor, we don't need to
bounce dma map operations that involve foreign grants and non-coherent
devices.
Signed-off-by: default avatarStefano Stabellini <stefano.stabellini@eu.citrix.com>
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Acked-by: default avatarIan Campbell <ian.campbell@citrix.com>
parent a4dba130
......@@ -12,6 +12,7 @@
#include <linux/swiotlb.h>
#include <xen/xen.h>
#include <xen/interface/grant_table.h>
#include <xen/interface/memory.h>
#include <xen/swiotlb-xen.h>
......@@ -24,12 +25,14 @@ enum dma_cache_op {
DMA_UNMAP,
DMA_MAP,
};
static bool hypercall_cflush = false;
/* functions called by SWIOTLB */
static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
size_t size, enum dma_data_direction dir, enum dma_cache_op op)
{
struct gnttab_cache_flush cflush;
unsigned long pfn;
size_t left = size;
......@@ -39,7 +42,26 @@ static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
do {
size_t len = left;
/* TODO: cache flush */
/* buffers in highmem or foreign pages cannot cross page
* boundaries */
if (len + offset > PAGE_SIZE)
len = PAGE_SIZE - offset;
cflush.op = 0;
cflush.a.dev_bus_addr = pfn << PAGE_SHIFT;
cflush.offset = offset;
cflush.length = len;
if (op == DMA_UNMAP && dir != DMA_TO_DEVICE)
cflush.op = GNTTAB_CACHE_INVAL;
if (op == DMA_MAP) {
if (dir == DMA_FROM_DEVICE)
cflush.op = GNTTAB_CACHE_INVAL;
else
cflush.op = GNTTAB_CACHE_CLEAN;
}
if (cflush.op)
HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1);
offset = 0;
pfn++;
......@@ -104,7 +126,7 @@ bool xen_arch_need_swiotlb(struct device *dev,
unsigned long pfn,
unsigned long mfn)
{
return ((pfn != mfn) && !is_device_dma_coherent(dev));
return (!hypercall_cflush && (pfn != mfn) && !is_device_dma_coherent(dev));
}
int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
......@@ -147,10 +169,18 @@ static struct dma_map_ops xen_swiotlb_dma_ops = {
int __init xen_mm_init(void)
{
struct gnttab_cache_flush cflush;
if (!xen_initial_domain())
return 0;
xen_swiotlb_init(1, false);
xen_dma_ops = &xen_swiotlb_dma_ops;
cflush.op = 0;
cflush.a.dev_bus_addr = 0;
cflush.offset = 0;
cflush.length = 0;
if (HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1) != -ENOSYS)
hypercall_cflush = true;
return 0;
}
arch_initcall(xen_mm_init);
......@@ -478,6 +478,25 @@ struct gnttab_get_version {
};
DEFINE_GUEST_HANDLE_STRUCT(gnttab_get_version);
/*
* Issue one or more cache maintenance operations on a portion of a
* page granted to the calling domain by a foreign domain.
*/
#define GNTTABOP_cache_flush 12
struct gnttab_cache_flush {
union {
uint64_t dev_bus_addr;
grant_ref_t ref;
} a;
uint16_t offset; /* offset from start of grant */
uint16_t length; /* size within the grant */
#define GNTTAB_CACHE_CLEAN (1<<0)
#define GNTTAB_CACHE_INVAL (1<<1)
#define GNTTAB_CACHE_SOURCE_GREF (1<<31)
uint32_t op;
};
DEFINE_GUEST_HANDLE_STRUCT(gnttab_cache_flush);
/*
* Bitfield values for update_pin_status.flags.
*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment