Commit b12d691e authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Linus Torvalds

i915: fix remap_io_sg to verify the pgprot

remap_io_sg claims that the pgprot is pre-verified using an io_mapping,
but actually does not get passed an io_mapping and just uses the pgprot in
the VMA.  Remove the apply_to_page_range abuse and just loop over
remap_pfn_range for each segment.

Note: this could use io_mapping_map_user by passing an iomap to
remap_io_sg if the maintainers can verify that the pgprot in the iomap in
the only caller is indeed the desired one here.

Link: https://lkml.kernel.org/r/20210326055505.1424432-5-hch@lst.deSigned-off-by: default avatarChristoph Hellwig <hch@lst.de>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Jani Nikula <jani.nikula@linux.intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b739f125
...@@ -28,46 +28,10 @@ ...@@ -28,46 +28,10 @@
#include "i915_drv.h" #include "i915_drv.h"
struct remap_pfn { #define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
struct mm_struct *mm;
unsigned long pfn;
pgprot_t prot;
struct sgt_iter sgt;
resource_size_t iobase;
};
#define use_dma(io) ((io) != -1) #define use_dma(io) ((io) != -1)
static inline unsigned long sgt_pfn(const struct remap_pfn *r)
{
if (use_dma(r->iobase))
return (r->sgt.dma + r->sgt.curr + r->iobase) >> PAGE_SHIFT;
else
return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT);
}
static int remap_sg(pte_t *pte, unsigned long addr, void *data)
{
struct remap_pfn *r = data;
if (GEM_WARN_ON(!r->sgt.sgp))
return -EINVAL;
/* Special PTE are not associated with any struct page */
set_pte_at(r->mm, addr, pte,
pte_mkspecial(pfn_pte(sgt_pfn(r), r->prot)));
r->pfn++; /* track insertions in case we need to unwind later */
r->sgt.curr += PAGE_SIZE;
if (r->sgt.curr >= r->sgt.max)
r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), use_dma(r->iobase));
return 0;
}
#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
/** /**
* remap_io_sg - remap an IO mapping to userspace * remap_io_sg - remap an IO mapping to userspace
* @vma: user vma to map to * @vma: user vma to map to
...@@ -82,12 +46,7 @@ int remap_io_sg(struct vm_area_struct *vma, ...@@ -82,12 +46,7 @@ int remap_io_sg(struct vm_area_struct *vma,
unsigned long addr, unsigned long size, unsigned long addr, unsigned long size,
struct scatterlist *sgl, resource_size_t iobase) struct scatterlist *sgl, resource_size_t iobase)
{ {
struct remap_pfn r = { unsigned long pfn, len, remapped = 0;
.mm = vma->vm_mm,
.prot = vma->vm_page_prot,
.sgt = __sgt_iter(sgl, use_dma(iobase)),
.iobase = iobase,
};
int err; int err;
/* We rely on prevalidation of the io-mapping to skip track_pfn(). */ /* We rely on prevalidation of the io-mapping to skip track_pfn(). */
...@@ -96,11 +55,25 @@ int remap_io_sg(struct vm_area_struct *vma, ...@@ -96,11 +55,25 @@ int remap_io_sg(struct vm_area_struct *vma,
if (!use_dma(iobase)) if (!use_dma(iobase))
flush_cache_range(vma, addr, size); flush_cache_range(vma, addr, size);
err = apply_to_page_range(r.mm, addr, size, remap_sg, &r); do {
if (unlikely(err)) { if (use_dma(iobase)) {
zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT); if (!sg_dma_len(sgl))
return err; break;
} pfn = (sg_dma_address(sgl) + iobase) >> PAGE_SHIFT;
len = sg_dma_len(sgl);
return 0; } else {
pfn = page_to_pfn(sg_page(sgl));
len = sgl->length;
}
err = remap_pfn_range(vma, addr + remapped, pfn, len,
vma->vm_page_prot);
if (err)
break;
remapped += len;
} while ((sgl = __sg_next(sgl)));
if (err)
zap_vma_ptes(vma, addr, remapped);
return err;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment