Commit 4debf771 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-iommufd' of git://git.kernel.org/pub/scm/linux/kernel/git/jgg/iommufd

Pull iommufd updates from Jason Gunthorpe:
 "On top of the vfio updates is built some new iommufd functionality:

   - IOMMU_HWPT_ALLOC allows userspace to directly create the low level
     IO Page table objects and affiliate them with IOAS objects that
     hold the translation mapping. This is the basic functionality for
     the normal IOMMU_DOMAIN_PAGING domains.

   - VFIO_DEVICE_ATTACH_IOMMUFD_PT can be used to replace the current
     translation. This is wired up to through all the layers down to the
     driver so the driver has the ability to implement a hitless
     replacement. This is necessary to fully support guest behaviors
     when emulating HW (eg guest atomic change of translation)

   - IOMMU_GET_HW_INFO returns information about the IOMMU driver HW
     that owns a VFIO device. This includes support for the Intel iommu,
     and patches have been posted for all the other server IOMMU.

  Along the way are a number of internal items:

   - New iommufd kernel APIs: iommufd_ctx_has_group(),
        iommufd_device_to_ictx(), iommufd_device_to_id(),
        iommufd_access_detach(), iommufd_ctx_from_fd(),
        iommufd_device_replace()

   - iommufd now internally tracks iommu_groups as it needs some
     per-group data

   - Reorganize how the internal hwpt allocation flows to have more
     robust locking

   - Improve the access interfaces to support detach and replace of an
     IOAS from an access

   - New selftests and a rework of how the selftests creates a mock
     iommu driver to be more like a real iommu driver"

Link: https://lore.kernel.org/lkml/ZO%2FTe6LU1ENf58ZW@nvidia.com/

* tag 'for-linus-iommufd' of git://git.kernel.org/pub/scm/linux/kernel/git/jgg/iommufd: (34 commits)
  iommufd/selftest: Don't leak the platform device memory when unloading the module
  iommu/vt-d: Implement hw_info for iommu capability query
  iommufd/selftest: Add coverage for IOMMU_GET_HW_INFO ioctl
  iommufd: Add IOMMU_GET_HW_INFO
  iommu: Add new iommu op to get iommu hardware information
  iommu: Move dev_iommu_ops() to private header
  iommufd: Remove iommufd_ref_to_users()
  iommufd/selftest: Make the mock iommu driver into a real driver
  vfio: Support IO page table replacement
  iommufd/selftest: Add IOMMU_TEST_OP_ACCESS_REPLACE_IOAS coverage
  iommufd: Add iommufd_access_replace() API
  iommufd: Use iommufd_access_change_ioas in iommufd_access_destroy_object
  iommufd: Add iommufd_access_change_ioas(_id) helpers
  iommufd: Allow passing in iopt_access_list_id to iopt_remove_access()
  vfio: Do not allow !ops->dma_unmap in vfio_pin/unpin_pages()
  iommufd/selftest: Add a selftest for IOMMU_HWPT_ALLOC
  iommufd/selftest: Return the real idev id from selftest mock_domain
  iommufd: Add IOMMU_HWPT_ALLOC
  iommufd/selftest: Test iommufd_device_replace()
  iommufd: Make destroy_rwsem use a lock class per object type
  ...
parents ec0e2dc8 eb501c2d
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/syscore_ops.h> #include <linux/syscore_ops.h>
#include <linux/tboot.h> #include <linux/tboot.h>
#include <uapi/linux/iommufd.h>
#include "iommu.h" #include "iommu.h"
#include "../dma-iommu.h" #include "../dma-iommu.h"
...@@ -4732,8 +4733,26 @@ static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid) ...@@ -4732,8 +4733,26 @@ static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
intel_pasid_tear_down_entry(iommu, dev, pasid, false); intel_pasid_tear_down_entry(iommu, dev, pasid, false);
} }
static void *intel_iommu_hw_info(struct device *dev, u32 *length, u32 *type)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
struct iommu_hw_info_vtd *vtd;
vtd = kzalloc(sizeof(*vtd), GFP_KERNEL);
if (!vtd)
return ERR_PTR(-ENOMEM);
vtd->cap_reg = iommu->cap;
vtd->ecap_reg = iommu->ecap;
*length = sizeof(*vtd);
*type = IOMMU_HW_INFO_TYPE_INTEL_VTD;
return vtd;
}
const struct iommu_ops intel_iommu_ops = { const struct iommu_ops intel_iommu_ops = {
.capable = intel_iommu_capable, .capable = intel_iommu_capable,
.hw_info = intel_iommu_hw_info,
.domain_alloc = intel_iommu_domain_alloc, .domain_alloc = intel_iommu_domain_alloc,
.probe_device = intel_iommu_probe_device, .probe_device = intel_iommu_probe_device,
.probe_finalize = intel_iommu_probe_finalize, .probe_finalize = intel_iommu_probe_finalize,
......
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES.
*/
#ifndef __LINUX_IOMMU_PRIV_H
#define __LINUX_IOMMU_PRIV_H
#include <linux/iommu.h>
static inline const struct iommu_ops *dev_iommu_ops(struct device *dev)
{
/*
* Assume that valid ops must be installed if iommu_probe_device()
* has succeeded. The device ops are essentially for internal use
* within the IOMMU subsystem itself, so we should be able to trust
* ourselves not to misuse the helper.
*/
return dev->iommu->iommu_dev->ops;
}
int iommu_group_replace_domain(struct iommu_group *group,
struct iommu_domain *new_domain);
int iommu_device_register_bus(struct iommu_device *iommu,
const struct iommu_ops *ops, struct bus_type *bus,
struct notifier_block *nb);
void iommu_device_unregister_bus(struct iommu_device *iommu,
struct bus_type *bus,
struct notifier_block *nb);
#endif /* __LINUX_IOMMU_PRIV_H */
...@@ -34,8 +34,10 @@ ...@@ -34,8 +34,10 @@
#include <linux/msi.h> #include <linux/msi.h>
#include "dma-iommu.h" #include "dma-iommu.h"
#include "iommu-priv.h"
#include "iommu-sva.h" #include "iommu-sva.h"
#include "iommu-priv.h"
static struct kset *iommu_group_kset; static struct kset *iommu_group_kset;
static DEFINE_IDA(iommu_group_ida); static DEFINE_IDA(iommu_group_ida);
...@@ -287,6 +289,48 @@ void iommu_device_unregister(struct iommu_device *iommu) ...@@ -287,6 +289,48 @@ void iommu_device_unregister(struct iommu_device *iommu)
} }
EXPORT_SYMBOL_GPL(iommu_device_unregister); EXPORT_SYMBOL_GPL(iommu_device_unregister);
#if IS_ENABLED(CONFIG_IOMMUFD_TEST)
void iommu_device_unregister_bus(struct iommu_device *iommu,
struct bus_type *bus,
struct notifier_block *nb)
{
bus_unregister_notifier(bus, nb);
iommu_device_unregister(iommu);
}
EXPORT_SYMBOL_GPL(iommu_device_unregister_bus);
/*
* Register an iommu driver against a single bus. This is only used by iommufd
* selftest to create a mock iommu driver. The caller must provide
* some memory to hold a notifier_block.
*/
int iommu_device_register_bus(struct iommu_device *iommu,
const struct iommu_ops *ops, struct bus_type *bus,
struct notifier_block *nb)
{
int err;
iommu->ops = ops;
nb->notifier_call = iommu_bus_notifier;
err = bus_register_notifier(bus, nb);
if (err)
return err;
spin_lock(&iommu_device_lock);
list_add_tail(&iommu->list, &iommu_device_list);
spin_unlock(&iommu_device_lock);
bus->iommu_ops = ops;
err = bus_iommu_probe(bus);
if (err) {
iommu_device_unregister_bus(iommu, bus, nb);
return err;
}
return 0;
}
EXPORT_SYMBOL_GPL(iommu_device_register_bus);
#endif
static struct dev_iommu *dev_iommu_get(struct device *dev) static struct dev_iommu *dev_iommu_get(struct device *dev)
{ {
struct dev_iommu *param = dev->iommu; struct dev_iommu *param = dev->iommu;
...@@ -2114,6 +2158,32 @@ int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) ...@@ -2114,6 +2158,32 @@ int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
} }
EXPORT_SYMBOL_GPL(iommu_attach_group); EXPORT_SYMBOL_GPL(iommu_attach_group);
/**
* iommu_group_replace_domain - replace the domain that a group is attached to
* @new_domain: new IOMMU domain to replace with
* @group: IOMMU group that will be attached to the new domain
*
* This API allows the group to switch domains without being forced to go to
* the blocking domain in-between.
*
* If the currently attached domain is a core domain (e.g. a default_domain),
* it will act just like the iommu_attach_group().
*/
int iommu_group_replace_domain(struct iommu_group *group,
struct iommu_domain *new_domain)
{
int ret;
if (!new_domain)
return -EINVAL;
mutex_lock(&group->mutex);
ret = __iommu_group_set_domain(group, new_domain);
mutex_unlock(&group->mutex);
return ret;
}
EXPORT_SYMBOL_NS_GPL(iommu_group_replace_domain, IOMMUFD_INTERNAL);
static int __iommu_device_set_domain(struct iommu_group *group, static int __iommu_device_set_domain(struct iommu_group *group,
struct device *dev, struct device *dev,
struct iommu_domain *new_domain, struct iommu_domain *new_domain,
...@@ -2642,6 +2712,14 @@ int iommu_set_pgtable_quirks(struct iommu_domain *domain, ...@@ -2642,6 +2712,14 @@ int iommu_set_pgtable_quirks(struct iommu_domain *domain,
} }
EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks); EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks);
/**
* iommu_get_resv_regions - get reserved regions
* @dev: device for which to get reserved regions
* @list: reserved region list for device
*
* This returns a list of reserved IOVA regions specific to this device.
* A domain user should not map IOVA in these ranges.
*/
void iommu_get_resv_regions(struct device *dev, struct list_head *list) void iommu_get_resv_regions(struct device *dev, struct list_head *list)
{ {
const struct iommu_ops *ops = dev_iommu_ops(dev); const struct iommu_ops *ops = dev_iommu_ops(dev);
...@@ -2649,9 +2727,10 @@ void iommu_get_resv_regions(struct device *dev, struct list_head *list) ...@@ -2649,9 +2727,10 @@ void iommu_get_resv_regions(struct device *dev, struct list_head *list)
if (ops->get_resv_regions) if (ops->get_resv_regions)
ops->get_resv_regions(dev, list); ops->get_resv_regions(dev, list);
} }
EXPORT_SYMBOL_GPL(iommu_get_resv_regions);
/** /**
* iommu_put_resv_regions - release resered regions * iommu_put_resv_regions - release reserved regions
* @dev: device for which to free reserved regions * @dev: device for which to free reserved regions
* @list: reserved region list for device * @list: reserved region list for device
* *
......
This diff is collapsed.
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
*/ */
#include <linux/iommu.h> #include <linux/iommu.h>
#include <uapi/linux/iommufd.h>
#include "iommufd_private.h" #include "iommufd_private.h"
...@@ -11,8 +12,6 @@ void iommufd_hw_pagetable_destroy(struct iommufd_object *obj) ...@@ -11,8 +12,6 @@ void iommufd_hw_pagetable_destroy(struct iommufd_object *obj)
struct iommufd_hw_pagetable *hwpt = struct iommufd_hw_pagetable *hwpt =
container_of(obj, struct iommufd_hw_pagetable, obj); container_of(obj, struct iommufd_hw_pagetable, obj);
WARN_ON(!list_empty(&hwpt->devices));
if (!list_empty(&hwpt->hwpt_item)) { if (!list_empty(&hwpt->hwpt_item)) {
mutex_lock(&hwpt->ioas->mutex); mutex_lock(&hwpt->ioas->mutex);
list_del(&hwpt->hwpt_item); list_del(&hwpt->hwpt_item);
...@@ -25,7 +24,35 @@ void iommufd_hw_pagetable_destroy(struct iommufd_object *obj) ...@@ -25,7 +24,35 @@ void iommufd_hw_pagetable_destroy(struct iommufd_object *obj)
iommu_domain_free(hwpt->domain); iommu_domain_free(hwpt->domain);
refcount_dec(&hwpt->ioas->obj.users); refcount_dec(&hwpt->ioas->obj.users);
mutex_destroy(&hwpt->devices_lock); }
void iommufd_hw_pagetable_abort(struct iommufd_object *obj)
{
struct iommufd_hw_pagetable *hwpt =
container_of(obj, struct iommufd_hw_pagetable, obj);
/* The ioas->mutex must be held until finalize is called. */
lockdep_assert_held(&hwpt->ioas->mutex);
if (!list_empty(&hwpt->hwpt_item)) {
list_del_init(&hwpt->hwpt_item);
iopt_table_remove_domain(&hwpt->ioas->iopt, hwpt->domain);
}
iommufd_hw_pagetable_destroy(obj);
}
int iommufd_hw_pagetable_enforce_cc(struct iommufd_hw_pagetable *hwpt)
{
if (hwpt->enforce_cache_coherency)
return 0;
if (hwpt->domain->ops->enforce_cache_coherency)
hwpt->enforce_cache_coherency =
hwpt->domain->ops->enforce_cache_coherency(
hwpt->domain);
if (!hwpt->enforce_cache_coherency)
return -EINVAL;
return 0;
} }
/** /**
...@@ -38,6 +65,10 @@ void iommufd_hw_pagetable_destroy(struct iommufd_object *obj) ...@@ -38,6 +65,10 @@ void iommufd_hw_pagetable_destroy(struct iommufd_object *obj)
* Allocate a new iommu_domain and return it as a hw_pagetable. The HWPT * Allocate a new iommu_domain and return it as a hw_pagetable. The HWPT
* will be linked to the given ioas and upon return the underlying iommu_domain * will be linked to the given ioas and upon return the underlying iommu_domain
* is fully popoulated. * is fully popoulated.
*
* The caller must hold the ioas->mutex until after
* iommufd_object_abort_and_destroy() or iommufd_object_finalize() is called on
* the returned hwpt.
*/ */
struct iommufd_hw_pagetable * struct iommufd_hw_pagetable *
iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
...@@ -52,9 +83,7 @@ iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, ...@@ -52,9 +83,7 @@ iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
if (IS_ERR(hwpt)) if (IS_ERR(hwpt))
return hwpt; return hwpt;
INIT_LIST_HEAD(&hwpt->devices);
INIT_LIST_HEAD(&hwpt->hwpt_item); INIT_LIST_HEAD(&hwpt->hwpt_item);
mutex_init(&hwpt->devices_lock);
/* Pairs with iommufd_hw_pagetable_destroy() */ /* Pairs with iommufd_hw_pagetable_destroy() */
refcount_inc(&ioas->obj.users); refcount_inc(&ioas->obj.users);
hwpt->ioas = ioas; hwpt->ioas = ioas;
...@@ -65,7 +94,18 @@ iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, ...@@ -65,7 +94,18 @@ iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
goto out_abort; goto out_abort;
} }
mutex_lock(&hwpt->devices_lock); /*
* Set the coherency mode before we do iopt_table_add_domain() as some
* iommus have a per-PTE bit that controls it and need to decide before
* doing any maps. It is an iommu driver bug to report
* IOMMU_CAP_ENFORCE_CACHE_COHERENCY but fail enforce_cache_coherency on
* a new domain.
*/
if (idev->enforce_cache_coherency) {
rc = iommufd_hw_pagetable_enforce_cc(hwpt);
if (WARN_ON(rc))
goto out_abort;
}
/* /*
* immediate_attach exists only to accommodate iommu drivers that cannot * immediate_attach exists only to accommodate iommu drivers that cannot
...@@ -76,30 +116,64 @@ iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, ...@@ -76,30 +116,64 @@ iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
if (immediate_attach) { if (immediate_attach) {
rc = iommufd_hw_pagetable_attach(hwpt, idev); rc = iommufd_hw_pagetable_attach(hwpt, idev);
if (rc) if (rc)
goto out_unlock; goto out_abort;
} }
rc = iopt_table_add_domain(&hwpt->ioas->iopt, hwpt->domain); rc = iopt_table_add_domain(&hwpt->ioas->iopt, hwpt->domain);
if (rc) if (rc)
goto out_detach; goto out_detach;
list_add_tail(&hwpt->hwpt_item, &hwpt->ioas->hwpt_list); list_add_tail(&hwpt->hwpt_item, &hwpt->ioas->hwpt_list);
if (immediate_attach) {
/* See iommufd_device_do_attach() */
refcount_inc(&hwpt->obj.users);
idev->hwpt = hwpt;
list_add(&idev->devices_item, &hwpt->devices);
}
mutex_unlock(&hwpt->devices_lock);
return hwpt; return hwpt;
out_detach: out_detach:
if (immediate_attach) if (immediate_attach)
iommufd_hw_pagetable_detach(hwpt, idev); iommufd_hw_pagetable_detach(idev);
out_unlock:
mutex_unlock(&hwpt->devices_lock);
out_abort: out_abort:
iommufd_object_abort_and_destroy(ictx, &hwpt->obj); iommufd_object_abort_and_destroy(ictx, &hwpt->obj);
return ERR_PTR(rc); return ERR_PTR(rc);
} }
int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd)
{
struct iommu_hwpt_alloc *cmd = ucmd->cmd;
struct iommufd_hw_pagetable *hwpt;
struct iommufd_device *idev;
struct iommufd_ioas *ioas;
int rc;
if (cmd->flags || cmd->__reserved)
return -EOPNOTSUPP;
idev = iommufd_get_device(ucmd, cmd->dev_id);
if (IS_ERR(idev))
return PTR_ERR(idev);
ioas = iommufd_get_ioas(ucmd->ictx, cmd->pt_id);
if (IS_ERR(ioas)) {
rc = PTR_ERR(ioas);
goto out_put_idev;
}
mutex_lock(&ioas->mutex);
hwpt = iommufd_hw_pagetable_alloc(ucmd->ictx, ioas, idev, false);
if (IS_ERR(hwpt)) {
rc = PTR_ERR(hwpt);
goto out_unlock;
}
cmd->out_hwpt_id = hwpt->obj.id;
rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
if (rc)
goto out_hwpt;
iommufd_object_finalize(ucmd->ictx, &hwpt->obj);
goto out_unlock;
out_hwpt:
iommufd_object_abort_and_destroy(ucmd->ictx, &hwpt->obj);
out_unlock:
mutex_unlock(&ioas->mutex);
iommufd_put_object(&ioas->obj);
out_put_idev:
iommufd_put_object(&idev->obj);
return rc;
}
...@@ -1158,36 +1158,36 @@ int iopt_add_access(struct io_pagetable *iopt, struct iommufd_access *access) ...@@ -1158,36 +1158,36 @@ int iopt_add_access(struct io_pagetable *iopt, struct iommufd_access *access)
} }
void iopt_remove_access(struct io_pagetable *iopt, void iopt_remove_access(struct io_pagetable *iopt,
struct iommufd_access *access) struct iommufd_access *access,
u32 iopt_access_list_id)
{ {
down_write(&iopt->domains_rwsem); down_write(&iopt->domains_rwsem);
down_write(&iopt->iova_rwsem); down_write(&iopt->iova_rwsem);
WARN_ON(xa_erase(&iopt->access_list, access->iopt_access_list_id) != WARN_ON(xa_erase(&iopt->access_list, iopt_access_list_id) != access);
access);
WARN_ON(iopt_calculate_iova_alignment(iopt)); WARN_ON(iopt_calculate_iova_alignment(iopt));
up_write(&iopt->iova_rwsem); up_write(&iopt->iova_rwsem);
up_write(&iopt->domains_rwsem); up_write(&iopt->domains_rwsem);
} }
/* Narrow the valid_iova_itree to include reserved ranges from a group. */ /* Narrow the valid_iova_itree to include reserved ranges from a device. */
int iopt_table_enforce_group_resv_regions(struct io_pagetable *iopt, int iopt_table_enforce_dev_resv_regions(struct io_pagetable *iopt,
struct device *device, struct device *dev,
struct iommu_group *group,
phys_addr_t *sw_msi_start) phys_addr_t *sw_msi_start)
{ {
struct iommu_resv_region *resv; struct iommu_resv_region *resv;
struct iommu_resv_region *tmp; LIST_HEAD(resv_regions);
LIST_HEAD(group_resv_regions);
unsigned int num_hw_msi = 0; unsigned int num_hw_msi = 0;
unsigned int num_sw_msi = 0; unsigned int num_sw_msi = 0;
int rc; int rc;
if (iommufd_should_fail())
return -EINVAL;
down_write(&iopt->iova_rwsem); down_write(&iopt->iova_rwsem);
rc = iommu_get_group_resv_regions(group, &group_resv_regions); /* FIXME: drivers allocate memory but there is no failure propogated */
if (rc) iommu_get_resv_regions(dev, &resv_regions);
goto out_unlock;
list_for_each_entry(resv, &group_resv_regions, list) { list_for_each_entry(resv, &resv_regions, list) {
if (resv->type == IOMMU_RESV_DIRECT_RELAXABLE) if (resv->type == IOMMU_RESV_DIRECT_RELAXABLE)
continue; continue;
...@@ -1199,7 +1199,7 @@ int iopt_table_enforce_group_resv_regions(struct io_pagetable *iopt, ...@@ -1199,7 +1199,7 @@ int iopt_table_enforce_group_resv_regions(struct io_pagetable *iopt,
} }
rc = iopt_reserve_iova(iopt, resv->start, rc = iopt_reserve_iova(iopt, resv->start,
resv->length - 1 + resv->start, device); resv->length - 1 + resv->start, dev);
if (rc) if (rc)
goto out_reserved; goto out_reserved;
} }
...@@ -1214,11 +1214,9 @@ int iopt_table_enforce_group_resv_regions(struct io_pagetable *iopt, ...@@ -1214,11 +1214,9 @@ int iopt_table_enforce_group_resv_regions(struct io_pagetable *iopt,
goto out_free_resv; goto out_free_resv;
out_reserved: out_reserved:
__iopt_remove_reserved_iova(iopt, device); __iopt_remove_reserved_iova(iopt, dev);
out_free_resv: out_free_resv:
list_for_each_entry_safe(resv, tmp, &group_resv_regions, list) iommu_put_resv_regions(dev, &resv_regions);
kfree(resv);
out_unlock:
up_write(&iopt->iova_rwsem); up_write(&iopt->iova_rwsem);
return rc; return rc;
} }
...@@ -17,6 +17,7 @@ struct iommufd_device; ...@@ -17,6 +17,7 @@ struct iommufd_device;
struct iommufd_ctx { struct iommufd_ctx {
struct file *file; struct file *file;
struct xarray objects; struct xarray objects;
struct xarray groups;
u8 account_mode; u8 account_mode;
/* Compatibility with VFIO no iommu */ /* Compatibility with VFIO no iommu */
...@@ -75,9 +76,8 @@ int iopt_table_add_domain(struct io_pagetable *iopt, ...@@ -75,9 +76,8 @@ int iopt_table_add_domain(struct io_pagetable *iopt,
struct iommu_domain *domain); struct iommu_domain *domain);
void iopt_table_remove_domain(struct io_pagetable *iopt, void iopt_table_remove_domain(struct io_pagetable *iopt,
struct iommu_domain *domain); struct iommu_domain *domain);
int iopt_table_enforce_group_resv_regions(struct io_pagetable *iopt, int iopt_table_enforce_dev_resv_regions(struct io_pagetable *iopt,
struct device *device, struct device *dev,
struct iommu_group *group,
phys_addr_t *sw_msi_start); phys_addr_t *sw_msi_start);
int iopt_set_allow_iova(struct io_pagetable *iopt, int iopt_set_allow_iova(struct io_pagetable *iopt,
struct rb_root_cached *allowed_iova); struct rb_root_cached *allowed_iova);
...@@ -119,6 +119,7 @@ enum iommufd_object_type { ...@@ -119,6 +119,7 @@ enum iommufd_object_type {
#ifdef CONFIG_IOMMUFD_TEST #ifdef CONFIG_IOMMUFD_TEST
IOMMUFD_OBJ_SELFTEST, IOMMUFD_OBJ_SELFTEST,
#endif #endif
IOMMUFD_OBJ_MAX,
}; };
/* Base struct for all objects with a userspace ID handle. */ /* Base struct for all objects with a userspace ID handle. */
...@@ -148,29 +149,6 @@ static inline void iommufd_put_object(struct iommufd_object *obj) ...@@ -148,29 +149,6 @@ static inline void iommufd_put_object(struct iommufd_object *obj)
up_read(&obj->destroy_rwsem); up_read(&obj->destroy_rwsem);
} }
/**
* iommufd_ref_to_users() - Switch from destroy_rwsem to users refcount
* protection
* @obj - Object to release
*
* Objects have two refcount protections (destroy_rwsem and the refcount_t
* users). Holding either of these will prevent the object from being destroyed.
*
* Depending on the use case, one protection or the other is appropriate. In
* most cases references are being protected by the destroy_rwsem. This allows
* orderly destruction of the object because iommufd_object_destroy_user() will
* wait for it to become unlocked. However, as a rwsem, it cannot be held across
* a system call return. So cases that have longer term needs must switch
* to the weaker users refcount_t.
*
* With users protection iommufd_object_destroy_user() will return false,
* refusing to destroy the object, causing -EBUSY to userspace.
*/
static inline void iommufd_ref_to_users(struct iommufd_object *obj)
{
up_read(&obj->destroy_rwsem);
/* iommufd_lock_obj() obtains users as well */
}
void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj); void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj);
void iommufd_object_abort_and_destroy(struct iommufd_ctx *ictx, void iommufd_object_abort_and_destroy(struct iommufd_ctx *ictx,
struct iommufd_object *obj); struct iommufd_object *obj);
...@@ -260,18 +238,39 @@ struct iommufd_hw_pagetable { ...@@ -260,18 +238,39 @@ struct iommufd_hw_pagetable {
bool msi_cookie : 1; bool msi_cookie : 1;
/* Head at iommufd_ioas::hwpt_list */ /* Head at iommufd_ioas::hwpt_list */
struct list_head hwpt_item; struct list_head hwpt_item;
struct mutex devices_lock;
struct list_head devices;
}; };
struct iommufd_hw_pagetable * struct iommufd_hw_pagetable *
iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
struct iommufd_device *idev, bool immediate_attach); struct iommufd_device *idev, bool immediate_attach);
int iommufd_hw_pagetable_enforce_cc(struct iommufd_hw_pagetable *hwpt);
int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt, int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
struct iommufd_device *idev); struct iommufd_device *idev);
void iommufd_hw_pagetable_detach(struct iommufd_hw_pagetable *hwpt, struct iommufd_hw_pagetable *
struct iommufd_device *idev); iommufd_hw_pagetable_detach(struct iommufd_device *idev);
void iommufd_hw_pagetable_destroy(struct iommufd_object *obj); void iommufd_hw_pagetable_destroy(struct iommufd_object *obj);
void iommufd_hw_pagetable_abort(struct iommufd_object *obj);
int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd);
static inline void iommufd_hw_pagetable_put(struct iommufd_ctx *ictx,
struct iommufd_hw_pagetable *hwpt)
{
lockdep_assert_not_held(&hwpt->ioas->mutex);
if (hwpt->auto_domain)
iommufd_object_deref_user(ictx, &hwpt->obj);
else
refcount_dec(&hwpt->obj.users);
}
struct iommufd_group {
struct kref ref;
struct mutex lock;
struct iommufd_ctx *ictx;
struct iommu_group *group;
struct iommufd_hw_pagetable *hwpt;
struct list_head device_list;
phys_addr_t sw_msi_start;
};
/* /*
* A iommufd_device object represents the binding relationship between a * A iommufd_device object represents the binding relationship between a
...@@ -281,16 +280,23 @@ void iommufd_hw_pagetable_destroy(struct iommufd_object *obj); ...@@ -281,16 +280,23 @@ void iommufd_hw_pagetable_destroy(struct iommufd_object *obj);
struct iommufd_device { struct iommufd_device {
struct iommufd_object obj; struct iommufd_object obj;
struct iommufd_ctx *ictx; struct iommufd_ctx *ictx;
struct iommufd_hw_pagetable *hwpt; struct iommufd_group *igroup;
/* Head at iommufd_hw_pagetable::devices */ struct list_head group_item;
struct list_head devices_item;
/* always the physical device */ /* always the physical device */
struct device *dev; struct device *dev;
struct iommu_group *group;
bool enforce_cache_coherency; bool enforce_cache_coherency;
}; };
static inline struct iommufd_device *
iommufd_get_device(struct iommufd_ucmd *ucmd, u32 id)
{
return container_of(iommufd_get_object(ucmd->ictx, id,
IOMMUFD_OBJ_DEVICE),
struct iommufd_device, obj);
}
void iommufd_device_destroy(struct iommufd_object *obj); void iommufd_device_destroy(struct iommufd_object *obj);
int iommufd_get_hw_info(struct iommufd_ucmd *ucmd);
struct iommufd_access { struct iommufd_access {
struct iommufd_object obj; struct iommufd_object obj;
...@@ -306,7 +312,8 @@ struct iommufd_access { ...@@ -306,7 +312,8 @@ struct iommufd_access {
int iopt_add_access(struct io_pagetable *iopt, struct iommufd_access *access); int iopt_add_access(struct io_pagetable *iopt, struct iommufd_access *access);
void iopt_remove_access(struct io_pagetable *iopt, void iopt_remove_access(struct io_pagetable *iopt,
struct iommufd_access *access); struct iommufd_access *access,
u32 iopt_access_list_id);
void iommufd_access_destroy_object(struct iommufd_object *obj); void iommufd_access_destroy_object(struct iommufd_object *obj);
#ifdef CONFIG_IOMMUFD_TEST #ifdef CONFIG_IOMMUFD_TEST
...@@ -316,7 +323,7 @@ extern size_t iommufd_test_memory_limit; ...@@ -316,7 +323,7 @@ extern size_t iommufd_test_memory_limit;
void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd, void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
unsigned int ioas_id, u64 *iova, u32 *flags); unsigned int ioas_id, u64 *iova, u32 *flags);
bool iommufd_should_fail(void); bool iommufd_should_fail(void);
void __init iommufd_test_init(void); int __init iommufd_test_init(void);
void iommufd_test_exit(void); void iommufd_test_exit(void);
bool iommufd_selftest_is_mock_dev(struct device *dev); bool iommufd_selftest_is_mock_dev(struct device *dev);
#else #else
...@@ -329,8 +336,9 @@ static inline bool iommufd_should_fail(void) ...@@ -329,8 +336,9 @@ static inline bool iommufd_should_fail(void)
{ {
return false; return false;
} }
static inline void __init iommufd_test_init(void) static inline int __init iommufd_test_init(void)
{ {
return 0;
} }
static inline void iommufd_test_exit(void) static inline void iommufd_test_exit(void)
{ {
......
...@@ -17,6 +17,8 @@ enum { ...@@ -17,6 +17,8 @@ enum {
IOMMU_TEST_OP_ACCESS_PAGES, IOMMU_TEST_OP_ACCESS_PAGES,
IOMMU_TEST_OP_ACCESS_RW, IOMMU_TEST_OP_ACCESS_RW,
IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT, IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT,
IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE,
IOMMU_TEST_OP_ACCESS_REPLACE_IOAS,
}; };
enum { enum {
...@@ -51,7 +53,12 @@ struct iommu_test_cmd { ...@@ -51,7 +53,12 @@ struct iommu_test_cmd {
struct { struct {
__u32 out_stdev_id; __u32 out_stdev_id;
__u32 out_hwpt_id; __u32 out_hwpt_id;
/* out_idev_id is the standard iommufd_bind object */
__u32 out_idev_id;
} mock_domain; } mock_domain;
struct {
__u32 pt_id;
} mock_domain_replace;
struct { struct {
__aligned_u64 iova; __aligned_u64 iova;
__aligned_u64 length; __aligned_u64 length;
...@@ -85,9 +92,21 @@ struct iommu_test_cmd { ...@@ -85,9 +92,21 @@ struct iommu_test_cmd {
struct { struct {
__u32 limit; __u32 limit;
} memory_limit; } memory_limit;
struct {
__u32 ioas_id;
} access_replace_ioas;
}; };
__u32 last; __u32 last;
}; };
#define IOMMU_TEST_CMD _IO(IOMMUFD_TYPE, IOMMUFD_CMD_BASE + 32) #define IOMMU_TEST_CMD _IO(IOMMUFD_TYPE, IOMMUFD_CMD_BASE + 32)
/* Mock structs for IOMMU_DEVICE_GET_HW_INFO ioctl */
#define IOMMU_HW_INFO_TYPE_SELFTEST 0xfeedbeef
#define IOMMU_HW_INFO_SELFTEST_REGVAL 0xdeadbeef
struct iommu_test_hw_info {
__u32 flags;
__u32 test_reg;
};
#endif #endif
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
struct iommufd_object_ops { struct iommufd_object_ops {
void (*destroy)(struct iommufd_object *obj); void (*destroy)(struct iommufd_object *obj);
void (*abort)(struct iommufd_object *obj);
}; };
static const struct iommufd_object_ops iommufd_object_ops[]; static const struct iommufd_object_ops iommufd_object_ops[];
static struct miscdevice vfio_misc_dev; static struct miscdevice vfio_misc_dev;
...@@ -32,6 +33,7 @@ struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx, ...@@ -32,6 +33,7 @@ struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx,
size_t size, size_t size,
enum iommufd_object_type type) enum iommufd_object_type type)
{ {
static struct lock_class_key obj_keys[IOMMUFD_OBJ_MAX];
struct iommufd_object *obj; struct iommufd_object *obj;
int rc; int rc;
...@@ -39,7 +41,15 @@ struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx, ...@@ -39,7 +41,15 @@ struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx,
if (!obj) if (!obj)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
obj->type = type; obj->type = type;
init_rwsem(&obj->destroy_rwsem); /*
* In most cases the destroy_rwsem is obtained with try so it doesn't
* interact with lockdep, however on destroy we have to sleep. This
* means if we have to destroy an object while holding a get on another
* object it triggers lockdep. Using one locking class per object type
* is a simple and reasonable way to avoid this.
*/
__init_rwsem(&obj->destroy_rwsem, "iommufd_object::destroy_rwsem",
&obj_keys[type]);
refcount_set(&obj->users, 1); refcount_set(&obj->users, 1);
/* /*
...@@ -95,6 +105,9 @@ void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj) ...@@ -95,6 +105,9 @@ void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj)
void iommufd_object_abort_and_destroy(struct iommufd_ctx *ictx, void iommufd_object_abort_and_destroy(struct iommufd_ctx *ictx,
struct iommufd_object *obj) struct iommufd_object *obj)
{ {
if (iommufd_object_ops[obj->type].abort)
iommufd_object_ops[obj->type].abort(obj);
else
iommufd_object_ops[obj->type].destroy(obj); iommufd_object_ops[obj->type].destroy(obj);
iommufd_object_abort(ictx, obj); iommufd_object_abort(ictx, obj);
} }
...@@ -223,6 +236,7 @@ static int iommufd_fops_open(struct inode *inode, struct file *filp) ...@@ -223,6 +236,7 @@ static int iommufd_fops_open(struct inode *inode, struct file *filp)
} }
xa_init_flags(&ictx->objects, XA_FLAGS_ALLOC1 | XA_FLAGS_ACCOUNT); xa_init_flags(&ictx->objects, XA_FLAGS_ALLOC1 | XA_FLAGS_ACCOUNT);
xa_init(&ictx->groups);
ictx->file = filp; ictx->file = filp;
filp->private_data = ictx; filp->private_data = ictx;
return 0; return 0;
...@@ -258,6 +272,7 @@ static int iommufd_fops_release(struct inode *inode, struct file *filp) ...@@ -258,6 +272,7 @@ static int iommufd_fops_release(struct inode *inode, struct file *filp)
if (WARN_ON(!destroyed)) if (WARN_ON(!destroyed))
break; break;
} }
WARN_ON(!xa_empty(&ictx->groups));
kfree(ictx); kfree(ictx);
return 0; return 0;
} }
...@@ -290,6 +305,8 @@ static int iommufd_option(struct iommufd_ucmd *ucmd) ...@@ -290,6 +305,8 @@ static int iommufd_option(struct iommufd_ucmd *ucmd)
union ucmd_buffer { union ucmd_buffer {
struct iommu_destroy destroy; struct iommu_destroy destroy;
struct iommu_hw_info info;
struct iommu_hwpt_alloc hwpt;
struct iommu_ioas_alloc alloc; struct iommu_ioas_alloc alloc;
struct iommu_ioas_allow_iovas allow_iovas; struct iommu_ioas_allow_iovas allow_iovas;
struct iommu_ioas_copy ioas_copy; struct iommu_ioas_copy ioas_copy;
...@@ -321,6 +338,10 @@ struct iommufd_ioctl_op { ...@@ -321,6 +338,10 @@ struct iommufd_ioctl_op {
} }
static const struct iommufd_ioctl_op iommufd_ioctl_ops[] = { static const struct iommufd_ioctl_op iommufd_ioctl_ops[] = {
IOCTL_OP(IOMMU_DESTROY, iommufd_destroy, struct iommu_destroy, id), IOCTL_OP(IOMMU_DESTROY, iommufd_destroy, struct iommu_destroy, id),
IOCTL_OP(IOMMU_GET_HW_INFO, iommufd_get_hw_info, struct iommu_hw_info,
__reserved),
IOCTL_OP(IOMMU_HWPT_ALLOC, iommufd_hwpt_alloc, struct iommu_hwpt_alloc,
__reserved),
IOCTL_OP(IOMMU_IOAS_ALLOC, iommufd_ioas_alloc_ioctl, IOCTL_OP(IOMMU_IOAS_ALLOC, iommufd_ioas_alloc_ioctl,
struct iommu_ioas_alloc, out_ioas_id), struct iommu_ioas_alloc, out_ioas_id),
IOCTL_OP(IOMMU_IOAS_ALLOW_IOVAS, iommufd_ioas_allow_iovas, IOCTL_OP(IOMMU_IOAS_ALLOW_IOVAS, iommufd_ioas_allow_iovas,
...@@ -463,6 +484,7 @@ static const struct iommufd_object_ops iommufd_object_ops[] = { ...@@ -463,6 +484,7 @@ static const struct iommufd_object_ops iommufd_object_ops[] = {
}, },
[IOMMUFD_OBJ_HW_PAGETABLE] = { [IOMMUFD_OBJ_HW_PAGETABLE] = {
.destroy = iommufd_hw_pagetable_destroy, .destroy = iommufd_hw_pagetable_destroy,
.abort = iommufd_hw_pagetable_abort,
}, },
#ifdef CONFIG_IOMMUFD_TEST #ifdef CONFIG_IOMMUFD_TEST
[IOMMUFD_OBJ_SELFTEST] = { [IOMMUFD_OBJ_SELFTEST] = {
...@@ -501,8 +523,14 @@ static int __init iommufd_init(void) ...@@ -501,8 +523,14 @@ static int __init iommufd_init(void)
if (ret) if (ret)
goto err_misc; goto err_misc;
} }
iommufd_test_init(); ret = iommufd_test_init();
if (ret)
goto err_vfio_misc;
return 0; return 0;
err_vfio_misc:
if (IS_ENABLED(CONFIG_IOMMUFD_VFIO_CONTAINER))
misc_deregister(&vfio_misc_dev);
err_misc: err_misc:
misc_deregister(&iommu_misc_dev); misc_deregister(&iommu_misc_dev);
return ret; return ret;
...@@ -523,5 +551,6 @@ module_exit(iommufd_exit); ...@@ -523,5 +551,6 @@ module_exit(iommufd_exit);
MODULE_ALIAS_MISCDEV(VFIO_MINOR); MODULE_ALIAS_MISCDEV(VFIO_MINOR);
MODULE_ALIAS("devname:vfio/vfio"); MODULE_ALIAS("devname:vfio/vfio");
#endif #endif
MODULE_IMPORT_NS(IOMMUFD_INTERNAL);
MODULE_DESCRIPTION("I/O Address Space Management for passthrough devices"); MODULE_DESCRIPTION("I/O Address Space Management for passthrough devices");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
...@@ -9,14 +9,17 @@ ...@@ -9,14 +9,17 @@
#include <linux/file.h> #include <linux/file.h>
#include <linux/anon_inodes.h> #include <linux/anon_inodes.h>
#include <linux/fault-inject.h> #include <linux/fault-inject.h>
#include <linux/platform_device.h>
#include <uapi/linux/iommufd.h> #include <uapi/linux/iommufd.h>
#include "../iommu-priv.h"
#include "io_pagetable.h" #include "io_pagetable.h"
#include "iommufd_private.h" #include "iommufd_private.h"
#include "iommufd_test.h" #include "iommufd_test.h"
static DECLARE_FAULT_ATTR(fail_iommufd); static DECLARE_FAULT_ATTR(fail_iommufd);
static struct dentry *dbgfs_root; static struct dentry *dbgfs_root;
static struct platform_device *selftest_iommu_dev;
size_t iommufd_test_memory_limit = 65536; size_t iommufd_test_memory_limit = 65536;
...@@ -128,6 +131,21 @@ static struct iommu_domain mock_blocking_domain = { ...@@ -128,6 +131,21 @@ static struct iommu_domain mock_blocking_domain = {
.ops = &mock_blocking_ops, .ops = &mock_blocking_ops,
}; };
static void *mock_domain_hw_info(struct device *dev, u32 *length, u32 *type)
{
struct iommu_test_hw_info *info;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return ERR_PTR(-ENOMEM);
info->test_reg = IOMMU_HW_INFO_SELFTEST_REGVAL;
*length = sizeof(*info);
*type = IOMMU_HW_INFO_TYPE_SELFTEST;
return info;
}
static struct iommu_domain *mock_domain_alloc(unsigned int iommu_domain_type) static struct iommu_domain *mock_domain_alloc(unsigned int iommu_domain_type)
{ {
struct mock_iommu_domain *mock; struct mock_iommu_domain *mock;
...@@ -135,7 +153,7 @@ static struct iommu_domain *mock_domain_alloc(unsigned int iommu_domain_type) ...@@ -135,7 +153,7 @@ static struct iommu_domain *mock_domain_alloc(unsigned int iommu_domain_type)
if (iommu_domain_type == IOMMU_DOMAIN_BLOCKED) if (iommu_domain_type == IOMMU_DOMAIN_BLOCKED)
return &mock_blocking_domain; return &mock_blocking_domain;
if (WARN_ON(iommu_domain_type != IOMMU_DOMAIN_UNMANAGED)) if (iommu_domain_type != IOMMU_DOMAIN_UNMANAGED)
return NULL; return NULL;
mock = kzalloc(sizeof(*mock), GFP_KERNEL); mock = kzalloc(sizeof(*mock), GFP_KERNEL);
...@@ -276,12 +294,23 @@ static void mock_domain_set_plaform_dma_ops(struct device *dev) ...@@ -276,12 +294,23 @@ static void mock_domain_set_plaform_dma_ops(struct device *dev)
*/ */
} }
static struct iommu_device mock_iommu_device = {
};
static struct iommu_device *mock_probe_device(struct device *dev)
{
return &mock_iommu_device;
}
static const struct iommu_ops mock_ops = { static const struct iommu_ops mock_ops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.pgsize_bitmap = MOCK_IO_PAGE_SIZE, .pgsize_bitmap = MOCK_IO_PAGE_SIZE,
.hw_info = mock_domain_hw_info,
.domain_alloc = mock_domain_alloc, .domain_alloc = mock_domain_alloc,
.capable = mock_domain_capable, .capable = mock_domain_capable,
.set_platform_dma_ops = mock_domain_set_plaform_dma_ops, .set_platform_dma_ops = mock_domain_set_plaform_dma_ops,
.device_group = generic_device_group,
.probe_device = mock_probe_device,
.default_domain_ops = .default_domain_ops =
&(struct iommu_domain_ops){ &(struct iommu_domain_ops){
.free = mock_domain_free, .free = mock_domain_free,
...@@ -292,10 +321,6 @@ static const struct iommu_ops mock_ops = { ...@@ -292,10 +321,6 @@ static const struct iommu_ops mock_ops = {
}, },
}; };
static struct iommu_device mock_iommu_device = {
.ops = &mock_ops,
};
static inline struct iommufd_hw_pagetable * static inline struct iommufd_hw_pagetable *
get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id, get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id,
struct mock_iommu_domain **mock) struct mock_iommu_domain **mock)
...@@ -316,22 +341,29 @@ get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id, ...@@ -316,22 +341,29 @@ get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id,
return hwpt; return hwpt;
} }
static struct bus_type iommufd_mock_bus_type = { struct mock_bus_type {
struct bus_type bus;
struct notifier_block nb;
};
static struct mock_bus_type iommufd_mock_bus_type = {
.bus = {
.name = "iommufd_mock", .name = "iommufd_mock",
.iommu_ops = &mock_ops, },
}; };
static atomic_t mock_dev_num;
static void mock_dev_release(struct device *dev) static void mock_dev_release(struct device *dev)
{ {
struct mock_dev *mdev = container_of(dev, struct mock_dev, dev); struct mock_dev *mdev = container_of(dev, struct mock_dev, dev);
atomic_dec(&mock_dev_num);
kfree(mdev); kfree(mdev);
} }
static struct mock_dev *mock_dev_create(void) static struct mock_dev *mock_dev_create(void)
{ {
struct iommu_group *iommu_group;
struct dev_iommu *dev_iommu;
struct mock_dev *mdev; struct mock_dev *mdev;
int rc; int rc;
...@@ -341,51 +373,18 @@ static struct mock_dev *mock_dev_create(void) ...@@ -341,51 +373,18 @@ static struct mock_dev *mock_dev_create(void)
device_initialize(&mdev->dev); device_initialize(&mdev->dev);
mdev->dev.release = mock_dev_release; mdev->dev.release = mock_dev_release;
mdev->dev.bus = &iommufd_mock_bus_type; mdev->dev.bus = &iommufd_mock_bus_type.bus;
iommu_group = iommu_group_alloc();
if (IS_ERR(iommu_group)) {
rc = PTR_ERR(iommu_group);
goto err_put;
}
rc = dev_set_name(&mdev->dev, "iommufd_mock%u", rc = dev_set_name(&mdev->dev, "iommufd_mock%u",
iommu_group_id(iommu_group)); atomic_inc_return(&mock_dev_num));
if (rc) if (rc)
goto err_group; goto err_put;
/*
* The iommu core has no way to associate a single device with an iommu
* driver (heck currently it can't even support two iommu_drivers
* registering). Hack it together with an open coded dev_iommu_get().
* Notice that the normal notifier triggered iommu release process also
* does not work here because this bus is not in iommu_buses.
*/
mdev->dev.iommu = kzalloc(sizeof(*dev_iommu), GFP_KERNEL);
if (!mdev->dev.iommu) {
rc = -ENOMEM;
goto err_group;
}
mutex_init(&mdev->dev.iommu->lock);
mdev->dev.iommu->iommu_dev = &mock_iommu_device;
rc = device_add(&mdev->dev); rc = device_add(&mdev->dev);
if (rc) if (rc)
goto err_dev_iommu; goto err_put;
rc = iommu_group_add_device(iommu_group, &mdev->dev);
if (rc)
goto err_del;
iommu_group_put(iommu_group);
return mdev; return mdev;
err_del:
device_del(&mdev->dev);
err_dev_iommu:
kfree(mdev->dev.iommu);
mdev->dev.iommu = NULL;
err_group:
iommu_group_put(iommu_group);
err_put: err_put:
put_device(&mdev->dev); put_device(&mdev->dev);
return ERR_PTR(rc); return ERR_PTR(rc);
...@@ -393,11 +392,7 @@ static struct mock_dev *mock_dev_create(void) ...@@ -393,11 +392,7 @@ static struct mock_dev *mock_dev_create(void)
static void mock_dev_destroy(struct mock_dev *mdev) static void mock_dev_destroy(struct mock_dev *mdev)
{ {
iommu_group_remove_device(&mdev->dev); device_unregister(&mdev->dev);
device_del(&mdev->dev);
kfree(mdev->dev.iommu);
mdev->dev.iommu = NULL;
put_device(&mdev->dev);
} }
bool iommufd_selftest_is_mock_dev(struct device *dev) bool iommufd_selftest_is_mock_dev(struct device *dev)
...@@ -443,9 +438,15 @@ static int iommufd_test_mock_domain(struct iommufd_ucmd *ucmd, ...@@ -443,9 +438,15 @@ static int iommufd_test_mock_domain(struct iommufd_ucmd *ucmd,
/* Userspace must destroy the device_id to destroy the object */ /* Userspace must destroy the device_id to destroy the object */
cmd->mock_domain.out_hwpt_id = pt_id; cmd->mock_domain.out_hwpt_id = pt_id;
cmd->mock_domain.out_stdev_id = sobj->obj.id; cmd->mock_domain.out_stdev_id = sobj->obj.id;
cmd->mock_domain.out_idev_id = idev_id;
rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
if (rc)
goto out_detach;
iommufd_object_finalize(ucmd->ictx, &sobj->obj); iommufd_object_finalize(ucmd->ictx, &sobj->obj);
return iommufd_ucmd_respond(ucmd, sizeof(*cmd)); return 0;
out_detach:
iommufd_device_detach(idev);
out_unbind: out_unbind:
iommufd_device_unbind(idev); iommufd_device_unbind(idev);
out_mdev: out_mdev:
...@@ -455,6 +456,42 @@ static int iommufd_test_mock_domain(struct iommufd_ucmd *ucmd, ...@@ -455,6 +456,42 @@ static int iommufd_test_mock_domain(struct iommufd_ucmd *ucmd,
return rc; return rc;
} }
/* Replace the mock domain with a manually allocated hw_pagetable */
static int iommufd_test_mock_domain_replace(struct iommufd_ucmd *ucmd,
unsigned int device_id, u32 pt_id,
struct iommu_test_cmd *cmd)
{
struct iommufd_object *dev_obj;
struct selftest_obj *sobj;
int rc;
/*
* Prefer to use the OBJ_SELFTEST because the destroy_rwsem will ensure
* it doesn't race with detach, which is not allowed.
*/
dev_obj =
iommufd_get_object(ucmd->ictx, device_id, IOMMUFD_OBJ_SELFTEST);
if (IS_ERR(dev_obj))
return PTR_ERR(dev_obj);
sobj = container_of(dev_obj, struct selftest_obj, obj);
if (sobj->type != TYPE_IDEV) {
rc = -EINVAL;
goto out_dev_obj;
}
rc = iommufd_device_replace(sobj->idev.idev, &pt_id);
if (rc)
goto out_dev_obj;
cmd->mock_domain_replace.pt_id = pt_id;
rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
out_dev_obj:
iommufd_put_object(dev_obj);
return rc;
}
/* Add an additional reserved IOVA to the IOAS */ /* Add an additional reserved IOVA to the IOAS */
static int iommufd_test_add_reserved(struct iommufd_ucmd *ucmd, static int iommufd_test_add_reserved(struct iommufd_ucmd *ucmd,
unsigned int mockpt_id, unsigned int mockpt_id,
...@@ -748,6 +785,22 @@ static int iommufd_test_create_access(struct iommufd_ucmd *ucmd, ...@@ -748,6 +785,22 @@ static int iommufd_test_create_access(struct iommufd_ucmd *ucmd,
return rc; return rc;
} }
static int iommufd_test_access_replace_ioas(struct iommufd_ucmd *ucmd,
unsigned int access_id,
unsigned int ioas_id)
{
struct selftest_access *staccess;
int rc;
staccess = iommufd_access_get(access_id);
if (IS_ERR(staccess))
return PTR_ERR(staccess);
rc = iommufd_access_replace(staccess->access, ioas_id);
fput(staccess->file);
return rc;
}
/* Check that the pages in a page array match the pages in the user VA */ /* Check that the pages in a page array match the pages in the user VA */
static int iommufd_test_check_pages(void __user *uptr, struct page **pages, static int iommufd_test_check_pages(void __user *uptr, struct page **pages,
size_t npages) size_t npages)
...@@ -948,6 +1001,9 @@ int iommufd_test(struct iommufd_ucmd *ucmd) ...@@ -948,6 +1001,9 @@ int iommufd_test(struct iommufd_ucmd *ucmd)
cmd->add_reserved.length); cmd->add_reserved.length);
case IOMMU_TEST_OP_MOCK_DOMAIN: case IOMMU_TEST_OP_MOCK_DOMAIN:
return iommufd_test_mock_domain(ucmd, cmd); return iommufd_test_mock_domain(ucmd, cmd);
case IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE:
return iommufd_test_mock_domain_replace(
ucmd, cmd->id, cmd->mock_domain_replace.pt_id, cmd);
case IOMMU_TEST_OP_MD_CHECK_MAP: case IOMMU_TEST_OP_MD_CHECK_MAP:
return iommufd_test_md_check_pa( return iommufd_test_md_check_pa(
ucmd, cmd->id, cmd->check_map.iova, ucmd, cmd->id, cmd->check_map.iova,
...@@ -960,6 +1016,9 @@ int iommufd_test(struct iommufd_ucmd *ucmd) ...@@ -960,6 +1016,9 @@ int iommufd_test(struct iommufd_ucmd *ucmd)
case IOMMU_TEST_OP_CREATE_ACCESS: case IOMMU_TEST_OP_CREATE_ACCESS:
return iommufd_test_create_access(ucmd, cmd->id, return iommufd_test_create_access(ucmd, cmd->id,
cmd->create_access.flags); cmd->create_access.flags);
case IOMMU_TEST_OP_ACCESS_REPLACE_IOAS:
return iommufd_test_access_replace_ioas(
ucmd, cmd->id, cmd->access_replace_ioas.ioas_id);
case IOMMU_TEST_OP_ACCESS_PAGES: case IOMMU_TEST_OP_ACCESS_PAGES:
return iommufd_test_access_pages( return iommufd_test_access_pages(
ucmd, cmd->id, cmd->access_pages.iova, ucmd, cmd->id, cmd->access_pages.iova,
...@@ -992,15 +1051,57 @@ bool iommufd_should_fail(void) ...@@ -992,15 +1051,57 @@ bool iommufd_should_fail(void)
return should_fail(&fail_iommufd, 1); return should_fail(&fail_iommufd, 1);
} }
void __init iommufd_test_init(void) int __init iommufd_test_init(void)
{ {
struct platform_device_info pdevinfo = {
.name = "iommufd_selftest_iommu",
};
int rc;
dbgfs_root = dbgfs_root =
fault_create_debugfs_attr("fail_iommufd", NULL, &fail_iommufd); fault_create_debugfs_attr("fail_iommufd", NULL, &fail_iommufd);
WARN_ON(bus_register(&iommufd_mock_bus_type));
selftest_iommu_dev = platform_device_register_full(&pdevinfo);
if (IS_ERR(selftest_iommu_dev)) {
rc = PTR_ERR(selftest_iommu_dev);
goto err_dbgfs;
}
rc = bus_register(&iommufd_mock_bus_type.bus);
if (rc)
goto err_platform;
rc = iommu_device_sysfs_add(&mock_iommu_device,
&selftest_iommu_dev->dev, NULL, "%s",
dev_name(&selftest_iommu_dev->dev));
if (rc)
goto err_bus;
rc = iommu_device_register_bus(&mock_iommu_device, &mock_ops,
&iommufd_mock_bus_type.bus,
&iommufd_mock_bus_type.nb);
if (rc)
goto err_sysfs;
return 0;
err_sysfs:
iommu_device_sysfs_remove(&mock_iommu_device);
err_bus:
bus_unregister(&iommufd_mock_bus_type.bus);
err_platform:
platform_device_unregister(selftest_iommu_dev);
err_dbgfs:
debugfs_remove_recursive(dbgfs_root);
return rc;
} }
void iommufd_test_exit(void) void iommufd_test_exit(void)
{ {
iommu_device_sysfs_remove(&mock_iommu_device);
iommu_device_unregister_bus(&mock_iommu_device,
&iommufd_mock_bus_type.bus,
&iommufd_mock_bus_type.nb);
bus_unregister(&iommufd_mock_bus_type.bus);
platform_device_unregister(selftest_iommu_dev);
debugfs_remove_recursive(dbgfs_root); debugfs_remove_recursive(dbgfs_root);
bus_unregister(&iommufd_mock_bus_type);
} }
...@@ -146,8 +146,8 @@ int vfio_iommufd_physical_attach_ioas(struct vfio_device *vdev, u32 *pt_id) ...@@ -146,8 +146,8 @@ int vfio_iommufd_physical_attach_ioas(struct vfio_device *vdev, u32 *pt_id)
return -EINVAL; return -EINVAL;
if (vdev->iommufd_attached) if (vdev->iommufd_attached)
return -EBUSY; rc = iommufd_device_replace(vdev->iommufd_device, pt_id);
else
rc = iommufd_device_attach(vdev->iommufd_device, pt_id); rc = iommufd_device_attach(vdev->iommufd_device, pt_id);
if (rc) if (rc)
return rc; return rc;
...@@ -223,7 +223,8 @@ int vfio_iommufd_emulated_attach_ioas(struct vfio_device *vdev, u32 *pt_id) ...@@ -223,7 +223,8 @@ int vfio_iommufd_emulated_attach_ioas(struct vfio_device *vdev, u32 *pt_id)
lockdep_assert_held(&vdev->dev_set->lock); lockdep_assert_held(&vdev->dev_set->lock);
if (vdev->iommufd_attached) if (vdev->iommufd_attached)
return -EBUSY; rc = iommufd_access_replace(vdev->iommufd_access, *pt_id);
else
rc = iommufd_access_attach(vdev->iommufd_access, *pt_id); rc = iommufd_access_attach(vdev->iommufd_access, *pt_id);
if (rc) if (rc)
return rc; return rc;
......
...@@ -1536,6 +1536,8 @@ int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova, ...@@ -1536,6 +1536,8 @@ int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova,
/* group->container cannot change while a vfio device is open */ /* group->container cannot change while a vfio device is open */
if (!pages || !npage || WARN_ON(!vfio_assert_device_open(device))) if (!pages || !npage || WARN_ON(!vfio_assert_device_open(device)))
return -EINVAL; return -EINVAL;
if (!device->ops->dma_unmap)
return -EINVAL;
if (vfio_device_has_container(device)) if (vfio_device_has_container(device))
return vfio_device_container_pin_pages(device, iova, return vfio_device_container_pin_pages(device, iova,
npage, prot, pages); npage, prot, pages);
...@@ -1573,6 +1575,8 @@ void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage) ...@@ -1573,6 +1575,8 @@ void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage)
{ {
if (WARN_ON(!vfio_assert_device_open(device))) if (WARN_ON(!vfio_assert_device_open(device)))
return; return;
if (WARN_ON(!device->ops->dma_unmap))
return;
if (vfio_device_has_container(device)) { if (vfio_device_has_container(device)) {
vfio_device_container_unpin_pages(device, iova, npage); vfio_device_container_unpin_pages(device, iova, npage);
......
...@@ -228,6 +228,10 @@ struct iommu_iotlb_gather { ...@@ -228,6 +228,10 @@ struct iommu_iotlb_gather {
/** /**
* struct iommu_ops - iommu ops and capabilities * struct iommu_ops - iommu ops and capabilities
* @capable: check capability * @capable: check capability
* @hw_info: report iommu hardware information. The data buffer returned by this
* op is allocated in the iommu driver and freed by the caller after
* use. The information type is one of enum iommu_hw_info_type defined
* in include/uapi/linux/iommufd.h.
* @domain_alloc: allocate iommu domain * @domain_alloc: allocate iommu domain
* @probe_device: Add device to iommu driver handling * @probe_device: Add device to iommu driver handling
* @release_device: Remove device from iommu driver handling * @release_device: Remove device from iommu driver handling
...@@ -257,6 +261,7 @@ struct iommu_iotlb_gather { ...@@ -257,6 +261,7 @@ struct iommu_iotlb_gather {
*/ */
struct iommu_ops { struct iommu_ops {
bool (*capable)(struct device *dev, enum iommu_cap); bool (*capable)(struct device *dev, enum iommu_cap);
void *(*hw_info)(struct device *dev, u32 *length, u32 *type);
/* Domain allocation and freeing by the iommu driver */ /* Domain allocation and freeing by the iommu driver */
struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
...@@ -450,17 +455,6 @@ static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) ...@@ -450,17 +455,6 @@ static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
}; };
} }
static inline const struct iommu_ops *dev_iommu_ops(struct device *dev)
{
/*
* Assume that valid ops must be installed if iommu_probe_device()
* has succeeded. The device ops are essentially for internal use
* within the IOMMU subsystem itself, so we should be able to trust
* ourselves not to misuse the helper.
*/
return dev->iommu->iommu_dev->ops;
}
extern int bus_iommu_probe(const struct bus_type *bus); extern int bus_iommu_probe(const struct bus_type *bus);
extern bool iommu_present(const struct bus_type *bus); extern bool iommu_present(const struct bus_type *bus);
extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap); extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap);
......
...@@ -23,6 +23,7 @@ struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx, ...@@ -23,6 +23,7 @@ struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
void iommufd_device_unbind(struct iommufd_device *idev); void iommufd_device_unbind(struct iommufd_device *idev);
int iommufd_device_attach(struct iommufd_device *idev, u32 *pt_id); int iommufd_device_attach(struct iommufd_device *idev, u32 *pt_id);
int iommufd_device_replace(struct iommufd_device *idev, u32 *pt_id);
void iommufd_device_detach(struct iommufd_device *idev); void iommufd_device_detach(struct iommufd_device *idev);
struct iommufd_ctx *iommufd_device_to_ictx(struct iommufd_device *idev); struct iommufd_ctx *iommufd_device_to_ictx(struct iommufd_device *idev);
...@@ -48,6 +49,7 @@ iommufd_access_create(struct iommufd_ctx *ictx, ...@@ -48,6 +49,7 @@ iommufd_access_create(struct iommufd_ctx *ictx,
const struct iommufd_access_ops *ops, void *data, u32 *id); const struct iommufd_access_ops *ops, void *data, u32 *id);
void iommufd_access_destroy(struct iommufd_access *access); void iommufd_access_destroy(struct iommufd_access *access);
int iommufd_access_attach(struct iommufd_access *access, u32 ioas_id); int iommufd_access_attach(struct iommufd_access *access, u32 ioas_id);
int iommufd_access_replace(struct iommufd_access *access, u32 ioas_id);
void iommufd_access_detach(struct iommufd_access *access); void iommufd_access_detach(struct iommufd_access *access);
void iommufd_ctx_get(struct iommufd_ctx *ictx); void iommufd_ctx_get(struct iommufd_ctx *ictx);
......
...@@ -45,6 +45,8 @@ enum { ...@@ -45,6 +45,8 @@ enum {
IOMMUFD_CMD_IOAS_UNMAP, IOMMUFD_CMD_IOAS_UNMAP,
IOMMUFD_CMD_OPTION, IOMMUFD_CMD_OPTION,
IOMMUFD_CMD_VFIO_IOAS, IOMMUFD_CMD_VFIO_IOAS,
IOMMUFD_CMD_HWPT_ALLOC,
IOMMUFD_CMD_GET_HW_INFO,
}; };
/** /**
...@@ -344,4 +346,99 @@ struct iommu_vfio_ioas { ...@@ -344,4 +346,99 @@ struct iommu_vfio_ioas {
__u16 __reserved; __u16 __reserved;
}; };
#define IOMMU_VFIO_IOAS _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VFIO_IOAS) #define IOMMU_VFIO_IOAS _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VFIO_IOAS)
/**
* struct iommu_hwpt_alloc - ioctl(IOMMU_HWPT_ALLOC)
* @size: sizeof(struct iommu_hwpt_alloc)
* @flags: Must be 0
* @dev_id: The device to allocate this HWPT for
* @pt_id: The IOAS to connect this HWPT to
* @out_hwpt_id: The ID of the new HWPT
* @__reserved: Must be 0
*
* Explicitly allocate a hardware page table object. This is the same object
* type that is returned by iommufd_device_attach() and represents the
* underlying iommu driver's iommu_domain kernel object.
*
* A HWPT will be created with the IOVA mappings from the given IOAS.
*/
struct iommu_hwpt_alloc {
__u32 size;
__u32 flags;
__u32 dev_id;
__u32 pt_id;
__u32 out_hwpt_id;
__u32 __reserved;
};
#define IOMMU_HWPT_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HWPT_ALLOC)
/**
* struct iommu_hw_info_vtd - Intel VT-d hardware information
*
* @flags: Must be 0
* @__reserved: Must be 0
*
* @cap_reg: Value of Intel VT-d capability register defined in VT-d spec
* section 11.4.2 Capability Register.
* @ecap_reg: Value of Intel VT-d capability register defined in VT-d spec
* section 11.4.3 Extended Capability Register.
*
* User needs to understand the Intel VT-d specification to decode the
* register value.
*/
struct iommu_hw_info_vtd {
__u32 flags;
__u32 __reserved;
__aligned_u64 cap_reg;
__aligned_u64 ecap_reg;
};
/**
* enum iommu_hw_info_type - IOMMU Hardware Info Types
* @IOMMU_HW_INFO_TYPE_NONE: Used by the drivers that do not report hardware
* info
* @IOMMU_HW_INFO_TYPE_INTEL_VTD: Intel VT-d iommu info type
*/
enum iommu_hw_info_type {
IOMMU_HW_INFO_TYPE_NONE,
IOMMU_HW_INFO_TYPE_INTEL_VTD,
};
/**
* struct iommu_hw_info - ioctl(IOMMU_GET_HW_INFO)
* @size: sizeof(struct iommu_hw_info)
* @flags: Must be 0
* @dev_id: The device bound to the iommufd
* @data_len: Input the length of a user buffer in bytes. Output the length of
* data that kernel supports
* @data_uptr: User pointer to a user-space buffer used by the kernel to fill
* the iommu type specific hardware information data
* @out_data_type: Output the iommu hardware info type as defined in the enum
* iommu_hw_info_type.
* @__reserved: Must be 0
*
* Query an iommu type specific hardware information data from an iommu behind
* a given device that has been bound to iommufd. This hardware info data will
* be used to sync capabilities between the virtual iommu and the physical
* iommu, e.g. a nested translation setup needs to check the hardware info, so
* a guest stage-1 page table can be compatible with the physical iommu.
*
* To capture an iommu type specific hardware information data, @data_uptr and
* its length @data_len must be provided. Trailing bytes will be zeroed if the
* user buffer is larger than the data that kernel has. Otherwise, kernel only
* fills the buffer using the given length in @data_len. If the ioctl succeeds,
* @data_len will be updated to the length that kernel actually supports,
* @out_data_type will be filled to decode the data filled in the buffer
* pointed by @data_uptr. Input @data_len == zero is allowed.
*/
struct iommu_hw_info {
__u32 size;
__u32 flags;
__u32 dev_id;
__u32 data_len;
__aligned_u64 data_uptr;
__u32 out_data_type;
__u32 __reserved;
};
#define IOMMU_GET_HW_INFO _IO(IOMMUFD_TYPE, IOMMUFD_CMD_GET_HW_INFO)
#endif #endif
...@@ -940,6 +940,12 @@ struct vfio_device_bind_iommufd { ...@@ -940,6 +940,12 @@ struct vfio_device_bind_iommufd {
* Undo by VFIO_DEVICE_DETACH_IOMMUFD_PT or device fd close. This is only * Undo by VFIO_DEVICE_DETACH_IOMMUFD_PT or device fd close. This is only
* allowed on cdev fds. * allowed on cdev fds.
* *
* If a vfio device is currently attached to a valid hw_pagetable, without doing
* a VFIO_DEVICE_DETACH_IOMMUFD_PT, a second VFIO_DEVICE_ATTACH_IOMMUFD_PT ioctl
* passing in another hw_pagetable (hwpt) id is allowed. This action, also known
* as a hw_pagetable replacement, will replace the device's currently attached
* hw_pagetable with a new hw_pagetable corresponding to the given pt_id.
*
* Return: 0 on success, -errno on failure. * Return: 0 on success, -errno on failure.
*/ */
struct vfio_device_attach_iommufd_pt { struct vfio_device_attach_iommufd_pt {
......
...@@ -9,9 +9,6 @@ ...@@ -9,9 +9,6 @@
#include "iommufd_utils.h" #include "iommufd_utils.h"
static void *buffer;
static unsigned long PAGE_SIZE;
static unsigned long HUGEPAGE_SIZE; static unsigned long HUGEPAGE_SIZE;
#define MOCK_PAGE_SIZE (PAGE_SIZE / 2) #define MOCK_PAGE_SIZE (PAGE_SIZE / 2)
...@@ -116,6 +113,7 @@ TEST_F(iommufd, cmd_length) ...@@ -116,6 +113,7 @@ TEST_F(iommufd, cmd_length)
} }
TEST_LENGTH(iommu_destroy, IOMMU_DESTROY); TEST_LENGTH(iommu_destroy, IOMMU_DESTROY);
TEST_LENGTH(iommu_hw_info, IOMMU_GET_HW_INFO);
TEST_LENGTH(iommu_ioas_alloc, IOMMU_IOAS_ALLOC); TEST_LENGTH(iommu_ioas_alloc, IOMMU_IOAS_ALLOC);
TEST_LENGTH(iommu_ioas_iova_ranges, IOMMU_IOAS_IOVA_RANGES); TEST_LENGTH(iommu_ioas_iova_ranges, IOMMU_IOAS_IOVA_RANGES);
TEST_LENGTH(iommu_ioas_allow_iovas, IOMMU_IOAS_ALLOW_IOVAS); TEST_LENGTH(iommu_ioas_allow_iovas, IOMMU_IOAS_ALLOW_IOVAS);
...@@ -188,6 +186,7 @@ FIXTURE(iommufd_ioas) ...@@ -188,6 +186,7 @@ FIXTURE(iommufd_ioas)
uint32_t ioas_id; uint32_t ioas_id;
uint32_t stdev_id; uint32_t stdev_id;
uint32_t hwpt_id; uint32_t hwpt_id;
uint32_t device_id;
uint64_t base_iova; uint64_t base_iova;
}; };
...@@ -214,7 +213,7 @@ FIXTURE_SETUP(iommufd_ioas) ...@@ -214,7 +213,7 @@ FIXTURE_SETUP(iommufd_ioas)
for (i = 0; i != variant->mock_domains; i++) { for (i = 0; i != variant->mock_domains; i++) {
test_cmd_mock_domain(self->ioas_id, &self->stdev_id, test_cmd_mock_domain(self->ioas_id, &self->stdev_id,
&self->hwpt_id); &self->hwpt_id, &self->device_id);
self->base_iova = MOCK_APERTURE_START; self->base_iova = MOCK_APERTURE_START;
} }
} }
...@@ -265,7 +264,7 @@ TEST_F(iommufd_ioas, hwpt_attach) ...@@ -265,7 +264,7 @@ TEST_F(iommufd_ioas, hwpt_attach)
{ {
/* Create a device attached directly to a hwpt */ /* Create a device attached directly to a hwpt */
if (self->stdev_id) { if (self->stdev_id) {
test_cmd_mock_domain(self->hwpt_id, NULL, NULL); test_cmd_mock_domain(self->hwpt_id, NULL, NULL, NULL);
} else { } else {
test_err_mock_domain(ENOENT, self->hwpt_id, NULL, NULL); test_err_mock_domain(ENOENT, self->hwpt_id, NULL, NULL);
} }
...@@ -293,6 +292,40 @@ TEST_F(iommufd_ioas, ioas_area_auto_destroy) ...@@ -293,6 +292,40 @@ TEST_F(iommufd_ioas, ioas_area_auto_destroy)
} }
} }
TEST_F(iommufd_ioas, get_hw_info)
{
struct iommu_test_hw_info buffer_exact;
struct iommu_test_hw_info_buffer_larger {
struct iommu_test_hw_info info;
uint64_t trailing_bytes;
} buffer_larger;
struct iommu_test_hw_info_buffer_smaller {
__u32 flags;
} buffer_smaller;
if (self->device_id) {
/* Provide a zero-size user_buffer */
test_cmd_get_hw_info(self->device_id, NULL, 0);
/* Provide a user_buffer with exact size */
test_cmd_get_hw_info(self->device_id, &buffer_exact, sizeof(buffer_exact));
/*
* Provide a user_buffer with size larger than the exact size to check if
* kernel zero the trailing bytes.
*/
test_cmd_get_hw_info(self->device_id, &buffer_larger, sizeof(buffer_larger));
/*
* Provide a user_buffer with size smaller than the exact size to check if
* the fields within the size range still gets updated.
*/
test_cmd_get_hw_info(self->device_id, &buffer_smaller, sizeof(buffer_smaller));
} else {
test_err_get_hw_info(ENOENT, self->device_id,
&buffer_exact, sizeof(buffer_exact));
test_err_get_hw_info(ENOENT, self->device_id,
&buffer_larger, sizeof(buffer_larger));
}
}
TEST_F(iommufd_ioas, area) TEST_F(iommufd_ioas, area)
{ {
int i; int i;
...@@ -684,7 +717,7 @@ TEST_F(iommufd_ioas, access_pin) ...@@ -684,7 +717,7 @@ TEST_F(iommufd_ioas, access_pin)
_IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES), _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
&access_cmd)); &access_cmd));
test_cmd_mock_domain(self->ioas_id, &mock_stdev_id, test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
&mock_hwpt_id); &mock_hwpt_id, NULL);
check_map_cmd.id = mock_hwpt_id; check_map_cmd.id = mock_hwpt_id;
ASSERT_EQ(0, ioctl(self->fd, ASSERT_EQ(0, ioctl(self->fd,
_IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP), _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP),
...@@ -839,7 +872,7 @@ TEST_F(iommufd_ioas, fork_gone) ...@@ -839,7 +872,7 @@ TEST_F(iommufd_ioas, fork_gone)
* If a domain already existed then everything was pinned within * If a domain already existed then everything was pinned within
* the fork, so this copies from one domain to another. * the fork, so this copies from one domain to another.
*/ */
test_cmd_mock_domain(self->ioas_id, NULL, NULL); test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
check_access_rw(_metadata, self->fd, access_id, check_access_rw(_metadata, self->fd, access_id,
MOCK_APERTURE_START, 0); MOCK_APERTURE_START, 0);
...@@ -888,7 +921,7 @@ TEST_F(iommufd_ioas, fork_present) ...@@ -888,7 +921,7 @@ TEST_F(iommufd_ioas, fork_present)
ASSERT_EQ(8, read(efd, &tmp, sizeof(tmp))); ASSERT_EQ(8, read(efd, &tmp, sizeof(tmp)));
/* Read pages from the remote process */ /* Read pages from the remote process */
test_cmd_mock_domain(self->ioas_id, NULL, NULL); test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
check_access_rw(_metadata, self->fd, access_id, MOCK_APERTURE_START, 0); check_access_rw(_metadata, self->fd, access_id, MOCK_APERTURE_START, 0);
ASSERT_EQ(0, close(pipefds[1])); ASSERT_EQ(0, close(pipefds[1]));
...@@ -1035,6 +1068,8 @@ FIXTURE(iommufd_mock_domain) ...@@ -1035,6 +1068,8 @@ FIXTURE(iommufd_mock_domain)
uint32_t ioas_id; uint32_t ioas_id;
uint32_t hwpt_id; uint32_t hwpt_id;
uint32_t hwpt_ids[2]; uint32_t hwpt_ids[2];
uint32_t stdev_ids[2];
uint32_t idev_ids[2];
int mmap_flags; int mmap_flags;
size_t mmap_buf_size; size_t mmap_buf_size;
}; };
...@@ -1056,7 +1091,8 @@ FIXTURE_SETUP(iommufd_mock_domain) ...@@ -1056,7 +1091,8 @@ FIXTURE_SETUP(iommufd_mock_domain)
ASSERT_GE(ARRAY_SIZE(self->hwpt_ids), variant->mock_domains); ASSERT_GE(ARRAY_SIZE(self->hwpt_ids), variant->mock_domains);
for (i = 0; i != variant->mock_domains; i++) for (i = 0; i != variant->mock_domains; i++)
test_cmd_mock_domain(self->ioas_id, NULL, &self->hwpt_ids[i]); test_cmd_mock_domain(self->ioas_id, &self->stdev_ids[i],
&self->hwpt_ids[i], &self->idev_ids[i]);
self->hwpt_id = self->hwpt_ids[0]; self->hwpt_id = self->hwpt_ids[0];
self->mmap_flags = MAP_SHARED | MAP_ANONYMOUS; self->mmap_flags = MAP_SHARED | MAP_ANONYMOUS;
...@@ -1250,7 +1286,7 @@ TEST_F(iommufd_mock_domain, all_aligns_copy) ...@@ -1250,7 +1286,7 @@ TEST_F(iommufd_mock_domain, all_aligns_copy)
/* Add and destroy a domain while the area exists */ /* Add and destroy a domain while the area exists */
old_id = self->hwpt_ids[1]; old_id = self->hwpt_ids[1];
test_cmd_mock_domain(self->ioas_id, &mock_stdev_id, test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
&self->hwpt_ids[1]); &self->hwpt_ids[1], NULL);
check_mock_iova(buf + start, iova, length); check_mock_iova(buf + start, iova, length);
check_refs(buf + start / PAGE_SIZE * PAGE_SIZE, check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
...@@ -1283,7 +1319,13 @@ TEST_F(iommufd_mock_domain, user_copy) ...@@ -1283,7 +1319,13 @@ TEST_F(iommufd_mock_domain, user_copy)
.dst_iova = MOCK_APERTURE_START, .dst_iova = MOCK_APERTURE_START,
.length = BUFFER_SIZE, .length = BUFFER_SIZE,
}; };
unsigned int ioas_id; struct iommu_ioas_unmap unmap_cmd = {
.size = sizeof(unmap_cmd),
.ioas_id = self->ioas_id,
.iova = MOCK_APERTURE_START,
.length = BUFFER_SIZE,
};
unsigned int new_ioas_id, ioas_id;
/* Pin the pages in an IOAS with no domains then copy to an IOAS with domains */ /* Pin the pages in an IOAS with no domains then copy to an IOAS with domains */
test_ioctl_ioas_alloc(&ioas_id); test_ioctl_ioas_alloc(&ioas_id);
...@@ -1301,13 +1343,77 @@ TEST_F(iommufd_mock_domain, user_copy) ...@@ -1301,13 +1343,77 @@ TEST_F(iommufd_mock_domain, user_copy)
ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd)); ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd));
check_mock_iova(buffer, MOCK_APERTURE_START, BUFFER_SIZE); check_mock_iova(buffer, MOCK_APERTURE_START, BUFFER_SIZE);
/* Now replace the ioas with a new one */
test_ioctl_ioas_alloc(&new_ioas_id);
test_ioctl_ioas_map_id(new_ioas_id, buffer, BUFFER_SIZE,
&copy_cmd.src_iova);
test_cmd_access_replace_ioas(access_cmd.id, new_ioas_id);
/* Destroy the old ioas and cleanup copied mapping */
ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_UNMAP, &unmap_cmd));
test_ioctl_destroy(ioas_id);
/* Then run the same test again with the new ioas */
access_cmd.access_pages.iova = copy_cmd.src_iova;
ASSERT_EQ(0,
ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
&access_cmd));
copy_cmd.src_ioas_id = new_ioas_id;
ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd));
check_mock_iova(buffer, MOCK_APERTURE_START, BUFFER_SIZE);
test_cmd_destroy_access_pages( test_cmd_destroy_access_pages(
access_cmd.id, access_cmd.access_pages.out_access_pages_id); access_cmd.id, access_cmd.access_pages.out_access_pages_id);
test_cmd_destroy_access(access_cmd.id); test_cmd_destroy_access(access_cmd.id);
test_ioctl_destroy(new_ioas_id);
}
TEST_F(iommufd_mock_domain, replace)
{
uint32_t ioas_id;
test_ioctl_ioas_alloc(&ioas_id);
test_cmd_mock_domain_replace(self->stdev_ids[0], ioas_id);
/*
* Replacing the IOAS causes the prior HWPT to be deallocated, thus we
* should get enoent when we try to use it.
*/
if (variant->mock_domains == 1)
test_err_mock_domain_replace(ENOENT, self->stdev_ids[0],
self->hwpt_ids[0]);
test_cmd_mock_domain_replace(self->stdev_ids[0], ioas_id);
if (variant->mock_domains >= 2) {
test_cmd_mock_domain_replace(self->stdev_ids[0],
self->hwpt_ids[1]);
test_cmd_mock_domain_replace(self->stdev_ids[0],
self->hwpt_ids[1]);
test_cmd_mock_domain_replace(self->stdev_ids[0],
self->hwpt_ids[0]);
}
test_cmd_mock_domain_replace(self->stdev_ids[0], self->ioas_id);
test_ioctl_destroy(ioas_id); test_ioctl_destroy(ioas_id);
} }
TEST_F(iommufd_mock_domain, alloc_hwpt)
{
int i;
for (i = 0; i != variant->mock_domains; i++) {
uint32_t stddev_id;
uint32_t hwpt_id;
test_cmd_hwpt_alloc(self->idev_ids[0], self->ioas_id, &hwpt_id);
test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
test_ioctl_destroy(stddev_id);
test_ioctl_destroy(hwpt_id);
}
}
/* VFIO compatibility IOCTLs */ /* VFIO compatibility IOCTLs */
TEST_F(iommufd, simple_ioctls) TEST_F(iommufd, simple_ioctls)
...@@ -1429,7 +1535,7 @@ FIXTURE_SETUP(vfio_compat_mock_domain) ...@@ -1429,7 +1535,7 @@ FIXTURE_SETUP(vfio_compat_mock_domain)
/* Create what VFIO would consider a group */ /* Create what VFIO would consider a group */
test_ioctl_ioas_alloc(&self->ioas_id); test_ioctl_ioas_alloc(&self->ioas_id);
test_cmd_mock_domain(self->ioas_id, NULL, NULL); test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
/* Attach it to the vfio compat */ /* Attach it to the vfio compat */
vfio_ioas_cmd.ioas_id = self->ioas_id; vfio_ioas_cmd.ioas_id = self->ioas_id;
......
...@@ -41,6 +41,8 @@ static int writeat(int dfd, const char *fn, const char *val) ...@@ -41,6 +41,8 @@ static int writeat(int dfd, const char *fn, const char *val)
static __attribute__((constructor)) void setup_buffer(void) static __attribute__((constructor)) void setup_buffer(void)
{ {
PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
BUFFER_SIZE = 2*1024*1024; BUFFER_SIZE = 2*1024*1024;
buffer = mmap(0, BUFFER_SIZE, PROT_READ | PROT_WRITE, buffer = mmap(0, BUFFER_SIZE, PROT_READ | PROT_WRITE,
...@@ -313,7 +315,7 @@ TEST_FAIL_NTH(basic_fail_nth, map_domain) ...@@ -313,7 +315,7 @@ TEST_FAIL_NTH(basic_fail_nth, map_domain)
fail_nth_enable(); fail_nth_enable();
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id)) if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
return -1; return -1;
if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova, if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova,
...@@ -324,7 +326,7 @@ TEST_FAIL_NTH(basic_fail_nth, map_domain) ...@@ -324,7 +326,7 @@ TEST_FAIL_NTH(basic_fail_nth, map_domain)
if (_test_ioctl_destroy(self->fd, stdev_id)) if (_test_ioctl_destroy(self->fd, stdev_id))
return -1; return -1;
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id)) if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
return -1; return -1;
return 0; return 0;
} }
...@@ -348,12 +350,13 @@ TEST_FAIL_NTH(basic_fail_nth, map_two_domains) ...@@ -348,12 +350,13 @@ TEST_FAIL_NTH(basic_fail_nth, map_two_domains)
if (_test_ioctl_set_temp_memory_limit(self->fd, 32)) if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
return -1; return -1;
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id)) if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
return -1; return -1;
fail_nth_enable(); fail_nth_enable();
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id2, &hwpt_id2)) if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id2, &hwpt_id2,
NULL))
return -1; return -1;
if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova, if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova,
...@@ -367,9 +370,10 @@ TEST_FAIL_NTH(basic_fail_nth, map_two_domains) ...@@ -367,9 +370,10 @@ TEST_FAIL_NTH(basic_fail_nth, map_two_domains)
if (_test_ioctl_destroy(self->fd, stdev_id2)) if (_test_ioctl_destroy(self->fd, stdev_id2))
return -1; return -1;
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id)) if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
return -1; return -1;
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id2, &hwpt_id2)) if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id2, &hwpt_id2,
NULL))
return -1; return -1;
return 0; return 0;
} }
...@@ -526,7 +530,7 @@ TEST_FAIL_NTH(basic_fail_nth, access_pin_domain) ...@@ -526,7 +530,7 @@ TEST_FAIL_NTH(basic_fail_nth, access_pin_domain)
if (_test_ioctl_set_temp_memory_limit(self->fd, 32)) if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
return -1; return -1;
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id)) if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
return -1; return -1;
if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova, if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova,
...@@ -569,4 +573,57 @@ TEST_FAIL_NTH(basic_fail_nth, access_pin_domain) ...@@ -569,4 +573,57 @@ TEST_FAIL_NTH(basic_fail_nth, access_pin_domain)
return 0; return 0;
} }
/* device.c */
TEST_FAIL_NTH(basic_fail_nth, device)
{
struct iommu_test_hw_info info;
uint32_t ioas_id;
uint32_t ioas_id2;
uint32_t stdev_id;
uint32_t idev_id;
uint32_t hwpt_id;
__u64 iova;
self->fd = open("/dev/iommu", O_RDWR);
if (self->fd == -1)
return -1;
if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
return -1;
if (_test_ioctl_ioas_alloc(self->fd, &ioas_id2))
return -1;
iova = MOCK_APERTURE_START;
if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, PAGE_SIZE, &iova,
IOMMU_IOAS_MAP_FIXED_IOVA |
IOMMU_IOAS_MAP_WRITEABLE |
IOMMU_IOAS_MAP_READABLE))
return -1;
if (_test_ioctl_ioas_map(self->fd, ioas_id2, buffer, PAGE_SIZE, &iova,
IOMMU_IOAS_MAP_FIXED_IOVA |
IOMMU_IOAS_MAP_WRITEABLE |
IOMMU_IOAS_MAP_READABLE))
return -1;
fail_nth_enable();
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, NULL,
&idev_id))
return -1;
if (_test_cmd_get_hw_info(self->fd, idev_id, &info, sizeof(info)))
return -1;
if (_test_cmd_hwpt_alloc(self->fd, idev_id, ioas_id, &hwpt_id))
return -1;
if (_test_cmd_mock_domain_replace(self->fd, stdev_id, ioas_id2, NULL))
return -1;
if (_test_cmd_mock_domain_replace(self->fd, stdev_id, hwpt_id, NULL))
return -1;
return 0;
}
TEST_HARNESS_MAIN TEST_HARNESS_MAIN
...@@ -19,6 +19,12 @@ ...@@ -19,6 +19,12 @@
static void *buffer; static void *buffer;
static unsigned long BUFFER_SIZE; static unsigned long BUFFER_SIZE;
static unsigned long PAGE_SIZE;
#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
#define offsetofend(TYPE, MEMBER) \
(offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
/* /*
* Have the kernel check the refcount on pages. I don't know why a freshly * Have the kernel check the refcount on pages. I don't know why a freshly
* mmap'd anon non-compound page starts out with a ref of 3 * mmap'd anon non-compound page starts out with a ref of 3
...@@ -39,7 +45,7 @@ static unsigned long BUFFER_SIZE; ...@@ -39,7 +45,7 @@ static unsigned long BUFFER_SIZE;
}) })
static int _test_cmd_mock_domain(int fd, unsigned int ioas_id, __u32 *stdev_id, static int _test_cmd_mock_domain(int fd, unsigned int ioas_id, __u32 *stdev_id,
__u32 *hwpt_id) __u32 *hwpt_id, __u32 *idev_id)
{ {
struct iommu_test_cmd cmd = { struct iommu_test_cmd cmd = {
.size = sizeof(cmd), .size = sizeof(cmd),
...@@ -57,14 +63,84 @@ static int _test_cmd_mock_domain(int fd, unsigned int ioas_id, __u32 *stdev_id, ...@@ -57,14 +63,84 @@ static int _test_cmd_mock_domain(int fd, unsigned int ioas_id, __u32 *stdev_id,
assert(cmd.id != 0); assert(cmd.id != 0);
if (hwpt_id) if (hwpt_id)
*hwpt_id = cmd.mock_domain.out_hwpt_id; *hwpt_id = cmd.mock_domain.out_hwpt_id;
if (idev_id)
*idev_id = cmd.mock_domain.out_idev_id;
return 0; return 0;
} }
#define test_cmd_mock_domain(ioas_id, stdev_id, hwpt_id) \ #define test_cmd_mock_domain(ioas_id, stdev_id, hwpt_id, idev_id) \
ASSERT_EQ(0, \ ASSERT_EQ(0, _test_cmd_mock_domain(self->fd, ioas_id, stdev_id, \
_test_cmd_mock_domain(self->fd, ioas_id, stdev_id, hwpt_id)) hwpt_id, idev_id))
#define test_err_mock_domain(_errno, ioas_id, stdev_id, hwpt_id) \ #define test_err_mock_domain(_errno, ioas_id, stdev_id, hwpt_id) \
EXPECT_ERRNO(_errno, _test_cmd_mock_domain(self->fd, ioas_id, \ EXPECT_ERRNO(_errno, _test_cmd_mock_domain(self->fd, ioas_id, \
stdev_id, hwpt_id)) stdev_id, hwpt_id, NULL))
static int _test_cmd_mock_domain_replace(int fd, __u32 stdev_id, __u32 pt_id,
__u32 *hwpt_id)
{
struct iommu_test_cmd cmd = {
.size = sizeof(cmd),
.op = IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE,
.id = stdev_id,
.mock_domain_replace = {
.pt_id = pt_id,
},
};
int ret;
ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
if (ret)
return ret;
if (hwpt_id)
*hwpt_id = cmd.mock_domain_replace.pt_id;
return 0;
}
#define test_cmd_mock_domain_replace(stdev_id, pt_id) \
ASSERT_EQ(0, _test_cmd_mock_domain_replace(self->fd, stdev_id, pt_id, \
NULL))
#define test_err_mock_domain_replace(_errno, stdev_id, pt_id) \
EXPECT_ERRNO(_errno, _test_cmd_mock_domain_replace(self->fd, stdev_id, \
pt_id, NULL))
static int _test_cmd_hwpt_alloc(int fd, __u32 device_id, __u32 pt_id,
__u32 *hwpt_id)
{
struct iommu_hwpt_alloc cmd = {
.size = sizeof(cmd),
.dev_id = device_id,
.pt_id = pt_id,
};
int ret;
ret = ioctl(fd, IOMMU_HWPT_ALLOC, &cmd);
if (ret)
return ret;
if (hwpt_id)
*hwpt_id = cmd.out_hwpt_id;
return 0;
}
#define test_cmd_hwpt_alloc(device_id, pt_id, hwpt_id) \
ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, hwpt_id))
static int _test_cmd_access_replace_ioas(int fd, __u32 access_id,
unsigned int ioas_id)
{
struct iommu_test_cmd cmd = {
.size = sizeof(cmd),
.op = IOMMU_TEST_OP_ACCESS_REPLACE_IOAS,
.id = access_id,
.access_replace_ioas = { .ioas_id = ioas_id },
};
int ret;
ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
if (ret)
return ret;
return 0;
}
#define test_cmd_access_replace_ioas(access_id, ioas_id) \
ASSERT_EQ(0, _test_cmd_access_replace_ioas(self->fd, access_id, ioas_id))
static int _test_cmd_create_access(int fd, unsigned int ioas_id, static int _test_cmd_create_access(int fd, unsigned int ioas_id,
__u32 *access_id, unsigned int flags) __u32 *access_id, unsigned int flags)
...@@ -276,3 +352,61 @@ static void teardown_iommufd(int fd, struct __test_metadata *_metadata) ...@@ -276,3 +352,61 @@ static void teardown_iommufd(int fd, struct __test_metadata *_metadata)
}) })
#endif #endif
/* @data can be NULL */
static int _test_cmd_get_hw_info(int fd, __u32 device_id,
void *data, size_t data_len)
{
struct iommu_test_hw_info *info = (struct iommu_test_hw_info *)data;
struct iommu_hw_info cmd = {
.size = sizeof(cmd),
.dev_id = device_id,
.data_len = data_len,
.data_uptr = (uint64_t)data,
};
int ret;
ret = ioctl(fd, IOMMU_GET_HW_INFO, &cmd);
if (ret)
return ret;
assert(cmd.out_data_type == IOMMU_HW_INFO_TYPE_SELFTEST);
/*
* The struct iommu_test_hw_info should be the one defined
* by the current kernel.
*/
assert(cmd.data_len == sizeof(struct iommu_test_hw_info));
/*
* Trailing bytes should be 0 if user buffer is larger than
* the data that kernel reports.
*/
if (data_len > cmd.data_len) {
char *ptr = (char *)(data + cmd.data_len);
int idx = 0;
while (idx < data_len - cmd.data_len) {
assert(!*(ptr + idx));
idx++;
}
}
if (info) {
if (data_len >= offsetofend(struct iommu_test_hw_info, test_reg))
assert(info->test_reg == IOMMU_HW_INFO_SELFTEST_REGVAL);
if (data_len >= offsetofend(struct iommu_test_hw_info, flags))
assert(!info->flags);
}
return 0;
}
#define test_cmd_get_hw_info(device_id, data, data_len) \
ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, \
data, data_len))
#define test_err_get_hw_info(_errno, device_id, data, data_len) \
EXPECT_ERRNO(_errno, \
_test_cmd_get_hw_info(self->fd, device_id, \
data, data_len))
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment