Commit 293837b9 authored by Linus Torvalds's avatar Linus Torvalds

Revert "i915: fix remap_io_sg to verify the pgprot"

This reverts commit b12d691e.

It turns out this is not ready for primetime yet.  The intentions are
good, but using remap_pfn_range() requires that there is nothing already
mapped in the area, and the i915 code seems to very much intentionally
remap the same area multiple times.

That will then just trigger the

                BUG_ON(!pte_none(*pte));

in mm/memory.c: remap_pte_range().

There are also reports of mapping type inconsistencies, resulting in
warnings and in screen corruption.

Link: https://lore.kernel.org/lkml/20210519024322.GA29704@xsang-OptiPlex-9020/
Link: https://lore.kernel.org/lkml/YKUjvoaKKggAmpIR@sf/
Link: https://lore.kernel.org/lkml/b6b61cf0-5874-f4c0-1fcc-4b3848451c31@redhat.com/Reported-by: default avatarkernel test robot <oliver.sang@intel.com>
Reported-by: default avatarKalle Valo <kvalo@codeaurora.org>
Reported-by: default avatarHans de Goede <hdegoede@redhat.com>
Reported-by: default avatarSergei Trofimovich <slyfox@gentoo.org>
Acked-by: default avatarChristoph Hellwig <hch@lst.de>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Jani Nikula <jani.nikula@linux.intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 8ac91e6c
...@@ -28,10 +28,46 @@ ...@@ -28,10 +28,46 @@
#include "i915_drv.h" #include "i915_drv.h"
#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP) struct remap_pfn {
struct mm_struct *mm;
unsigned long pfn;
pgprot_t prot;
struct sgt_iter sgt;
resource_size_t iobase;
};
#define use_dma(io) ((io) != -1) #define use_dma(io) ((io) != -1)
static inline unsigned long sgt_pfn(const struct remap_pfn *r)
{
if (use_dma(r->iobase))
return (r->sgt.dma + r->sgt.curr + r->iobase) >> PAGE_SHIFT;
else
return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT);
}
static int remap_sg(pte_t *pte, unsigned long addr, void *data)
{
struct remap_pfn *r = data;
if (GEM_WARN_ON(!r->sgt.sgp))
return -EINVAL;
/* Special PTE are not associated with any struct page */
set_pte_at(r->mm, addr, pte,
pte_mkspecial(pfn_pte(sgt_pfn(r), r->prot)));
r->pfn++; /* track insertions in case we need to unwind later */
r->sgt.curr += PAGE_SIZE;
if (r->sgt.curr >= r->sgt.max)
r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), use_dma(r->iobase));
return 0;
}
#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
/** /**
* remap_io_sg - remap an IO mapping to userspace * remap_io_sg - remap an IO mapping to userspace
* @vma: user vma to map to * @vma: user vma to map to
...@@ -46,7 +82,12 @@ int remap_io_sg(struct vm_area_struct *vma, ...@@ -46,7 +82,12 @@ int remap_io_sg(struct vm_area_struct *vma,
unsigned long addr, unsigned long size, unsigned long addr, unsigned long size,
struct scatterlist *sgl, resource_size_t iobase) struct scatterlist *sgl, resource_size_t iobase)
{ {
unsigned long pfn, len, remapped = 0; struct remap_pfn r = {
.mm = vma->vm_mm,
.prot = vma->vm_page_prot,
.sgt = __sgt_iter(sgl, use_dma(iobase)),
.iobase = iobase,
};
int err; int err;
/* We rely on prevalidation of the io-mapping to skip track_pfn(). */ /* We rely on prevalidation of the io-mapping to skip track_pfn(). */
...@@ -55,25 +96,11 @@ int remap_io_sg(struct vm_area_struct *vma, ...@@ -55,25 +96,11 @@ int remap_io_sg(struct vm_area_struct *vma,
if (!use_dma(iobase)) if (!use_dma(iobase))
flush_cache_range(vma, addr, size); flush_cache_range(vma, addr, size);
do { err = apply_to_page_range(r.mm, addr, size, remap_sg, &r);
if (use_dma(iobase)) { if (unlikely(err)) {
if (!sg_dma_len(sgl)) zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT);
break; return err;
pfn = (sg_dma_address(sgl) + iobase) >> PAGE_SHIFT; }
len = sg_dma_len(sgl);
} else { return 0;
pfn = page_to_pfn(sg_page(sgl));
len = sgl->length;
}
err = remap_pfn_range(vma, addr + remapped, pfn, len,
vma->vm_page_prot);
if (err)
break;
remapped += len;
} while ((sgl = __sg_next(sgl)));
if (err)
zap_vma_ptes(vma, addr, remapped);
return err;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment