Commit c9b258c6 authored by Suravee Suthikulpanit's avatar Suravee Suthikulpanit Committed by Joerg Roedel

iommu/amd: Prepare for generic IO page table framework

Add initial hook up code to implement generic IO page table framework.
Signed-off-by: default avatarSuravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Link: https://lore.kernel.org/r/20201215073705.123786-3-suravee.suthikulpanit@amd.comSigned-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 8c112a6b
......@@ -10,6 +10,7 @@ config AMD_IOMMU
select IOMMU_API
select IOMMU_IOVA
select IOMMU_DMA
select IOMMU_IO_PGTABLE
depends on X86_64 && PCI && ACPI && HAVE_CMPXCHG_DOUBLE
help
With this option you can enable support for AMD IOMMU hardware in
......
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o
obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o io_pgtable.o
obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += debugfs.o
obj-$(CONFIG_AMD_IOMMU_V2) += iommu_v2.o
......@@ -15,6 +15,7 @@
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/irqreturn.h>
#include <linux/io-pgtable.h>
/*
* Maximum number of IOMMUs supported
......@@ -252,6 +253,19 @@
#define GA_GUEST_NR 0x1
#define IOMMU_IN_ADDR_BIT_SIZE 52
#define IOMMU_OUT_ADDR_BIT_SIZE 52
/*
* This bitmap is used to advertise the page sizes our hardware support
* to the IOMMU core, which will then use this information to split
* physically contiguous memory regions it is mapping into page sizes
* that we support.
*
* 512GB Pages are not supported due to a hardware bug
*/
#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38))
/* Bit value definition for dte irq remapping fields*/
#define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6)
#define DTE_IRQ_REMAP_INTCTL_MASK (0x3ULL << 60)
......@@ -466,6 +480,26 @@ struct amd_irte_ops;
#define AMD_IOMMU_FLAG_TRANS_PRE_ENABLED (1 << 0)
#define io_pgtable_to_data(x) \
container_of((x), struct amd_io_pgtable, iop)
#define io_pgtable_ops_to_data(x) \
io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
#define io_pgtable_ops_to_domain(x) \
container_of(io_pgtable_ops_to_data(x), \
struct protection_domain, iop)
#define io_pgtable_cfg_to_data(x) \
container_of((x), struct amd_io_pgtable, pgtbl_cfg)
struct amd_io_pgtable {
struct io_pgtable_cfg pgtbl_cfg;
struct io_pgtable iop;
int mode;
u64 *root;
};
/*
* This structure contains generic data for IOMMU protection domains
* independent of their use.
......@@ -474,6 +508,7 @@ struct protection_domain {
struct list_head dev_list; /* List of all devices in this domain */
struct iommu_domain domain; /* generic domain handle used by
iommu core code */
struct amd_io_pgtable iop;
spinlock_t lock; /* mostly used to lock the page table*/
u16 id; /* the domain id written to the device table */
atomic64_t pt_root; /* pgtable root and pgtable mode */
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* CPU-agnostic AMD IO page table allocator.
*
* Copyright (C) 2020 Advanced Micro Devices, Inc.
* Author: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
*/
#define pr_fmt(fmt) "AMD-Vi: " fmt
#define dev_fmt(fmt) pr_fmt(fmt)
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/io-pgtable.h>
#include <linux/kernel.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/dma-mapping.h>
#include <asm/barrier.h>
#include "amd_iommu_types.h"
#include "amd_iommu.h"
static void v1_tlb_flush_all(void *cookie)
{
}
static void v1_tlb_flush_walk(unsigned long iova, size_t size,
size_t granule, void *cookie)
{
}
static void v1_tlb_add_page(struct iommu_iotlb_gather *gather,
unsigned long iova, size_t granule,
void *cookie)
{
}
static const struct iommu_flush_ops v1_flush_ops = {
.tlb_flush_all = v1_tlb_flush_all,
.tlb_flush_walk = v1_tlb_flush_walk,
.tlb_add_page = v1_tlb_add_page,
};
/*
* ----------------------------------------------------
*/
static void v1_free_pgtable(struct io_pgtable *iop)
{
}
static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
{
struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES,
cfg->ias = IOMMU_IN_ADDR_BIT_SIZE,
cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE,
cfg->tlb = &v1_flush_ops;
return &pgtable->iop;
}
struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns = {
.alloc = v1_alloc_pgtable,
.free = v1_free_pgtable,
};
......@@ -57,16 +57,6 @@
#define HT_RANGE_START (0xfd00000000ULL)
#define HT_RANGE_END (0xffffffffffULL)
/*
* This bitmap is used to advertise the page sizes our hardware support
* to the IOMMU core, which will then use this information to split
* physically contiguous memory regions it is mapping into page sizes
* that we support.
*
* 512GB Pages are not supported due to a hardware bug
*/
#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38))
#define DEFAULT_PGTABLE_LEVEL PAGE_MODE_3_LEVEL
static DEFINE_SPINLOCK(pd_bitmap_lock);
......
......@@ -24,6 +24,9 @@ io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = {
#ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S
[ARM_V7S] = &io_pgtable_arm_v7s_init_fns,
#endif
#ifdef CONFIG_AMD_IOMMU
[AMD_IOMMU_V1] = &io_pgtable_amd_iommu_v1_init_fns,
#endif
};
struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
......
......@@ -15,6 +15,7 @@ enum io_pgtable_fmt {
ARM_64_LPAE_S2,
ARM_V7S,
ARM_MALI_LPAE,
AMD_IOMMU_V1,
IO_PGTABLE_NUM_FMTS,
};
......@@ -251,5 +252,6 @@ extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns;
extern struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns;
#endif /* __IO_PGTABLE_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment