Commit 31929ae0 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-iommufd' of git://git.kernel.org/pub/scm/linux/kernel/git/jgg/iommufd

Pull iommufd updates from Jason Gunthorpe:
 "Just two syzkaller fixes, both for the same basic issue: using the
  area pointer during an access forced unmap while the locks protecting
  it were let go"

* tag 'for-linus-iommufd' of git://git.kernel.org/pub/scm/linux/kernel/git/jgg/iommufd:
  iommufd: Call iopt_area_contig_done() under the lock
  iommufd: Do not access the area pointer after unlocking
parents d35ac6ac dbe245cd
...@@ -553,8 +553,8 @@ void iommufd_access_unpin_pages(struct iommufd_access *access, ...@@ -553,8 +553,8 @@ void iommufd_access_unpin_pages(struct iommufd_access *access,
iopt_area_iova_to_index( iopt_area_iova_to_index(
area, area,
min(last_iova, iopt_area_last_iova(area)))); min(last_iova, iopt_area_last_iova(area))));
up_read(&iopt->iova_rwsem);
WARN_ON(!iopt_area_contig_done(&iter)); WARN_ON(!iopt_area_contig_done(&iter));
up_read(&iopt->iova_rwsem);
} }
EXPORT_SYMBOL_NS_GPL(iommufd_access_unpin_pages, IOMMUFD); EXPORT_SYMBOL_NS_GPL(iommufd_access_unpin_pages, IOMMUFD);
......
...@@ -458,6 +458,7 @@ static int iopt_unmap_iova_range(struct io_pagetable *iopt, unsigned long start, ...@@ -458,6 +458,7 @@ static int iopt_unmap_iova_range(struct io_pagetable *iopt, unsigned long start,
{ {
struct iopt_area *area; struct iopt_area *area;
unsigned long unmapped_bytes = 0; unsigned long unmapped_bytes = 0;
unsigned int tries = 0;
int rc = -ENOENT; int rc = -ENOENT;
/* /*
...@@ -484,19 +485,26 @@ static int iopt_unmap_iova_range(struct io_pagetable *iopt, unsigned long start, ...@@ -484,19 +485,26 @@ static int iopt_unmap_iova_range(struct io_pagetable *iopt, unsigned long start,
goto out_unlock_iova; goto out_unlock_iova;
} }
if (area_first != start)
tries = 0;
/* /*
* num_accesses writers must hold the iova_rwsem too, so we can * num_accesses writers must hold the iova_rwsem too, so we can
* safely read it under the write side of the iovam_rwsem * safely read it under the write side of the iovam_rwsem
* without the pages->mutex. * without the pages->mutex.
*/ */
if (area->num_accesses) { if (area->num_accesses) {
size_t length = iopt_area_length(area);
start = area_first; start = area_first;
area->prevent_access = true; area->prevent_access = true;
up_write(&iopt->iova_rwsem); up_write(&iopt->iova_rwsem);
up_read(&iopt->domains_rwsem); up_read(&iopt->domains_rwsem);
iommufd_access_notify_unmap(iopt, area_first,
iopt_area_length(area)); iommufd_access_notify_unmap(iopt, area_first, length);
if (WARN_ON(READ_ONCE(area->num_accesses))) /* Something is not responding to unmap requests. */
tries++;
if (WARN_ON(tries > 100))
return -EDEADLOCK; return -EDEADLOCK;
goto again; goto again;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment