Commit efe15df0 authored by Jason Gunthorpe's avatar Jason Gunthorpe Committed by Will Deacon

iommu/arm-smmu-v3: Move the STE generation for S1 and S2 domains into functions

This is preparation to move the STE calculation higher up in to the call
chain and remove arm_smmu_write_strtab_ent(). These new functions will be
called directly from attach_dev.
Reviewed-by: default avatarMoritz Fischer <mdf@kernel.org>
Reviewed-by: default avatarMichael Shavit <mshavit@google.com>
Reviewed-by: default avatarNicolin Chen <nicolinc@nvidia.com>
Reviewed-by: default avatarMostafa Saleh <smostafa@google.com>
Tested-by: default avatarShameer Kolothum <shameerali.kolothum.thodi@huawei.com>
Tested-by: default avatarNicolin Chen <nicolinc@nvidia.com>
Tested-by: default avatarMoritz Fischer <moritzf@google.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/3-v6-96275f25c39d+2d4-smmuv3_newapi_p1_jgg@nvidia.comSigned-off-by: default avatarWill Deacon <will@kernel.org>
parent 7686aa5f
...@@ -1465,13 +1465,89 @@ static void arm_smmu_make_bypass_ste(struct arm_smmu_ste *target) ...@@ -1465,13 +1465,89 @@ static void arm_smmu_make_bypass_ste(struct arm_smmu_ste *target)
FIELD_PREP(STRTAB_STE_1_SHCFG, STRTAB_STE_1_SHCFG_INCOMING)); FIELD_PREP(STRTAB_STE_1_SHCFG, STRTAB_STE_1_SHCFG_INCOMING));
} }
static void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target,
struct arm_smmu_master *master)
{
struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
struct arm_smmu_device *smmu = master->smmu;
memset(target, 0, sizeof(*target));
target->data[0] = cpu_to_le64(
STRTAB_STE_0_V |
FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) |
FIELD_PREP(STRTAB_STE_0_S1FMT, cd_table->s1fmt) |
(cd_table->cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
FIELD_PREP(STRTAB_STE_0_S1CDMAX, cd_table->s1cdmax));
target->data[1] = cpu_to_le64(
FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) |
FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) |
FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) |
FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) |
((smmu->features & ARM_SMMU_FEAT_STALLS &&
!master->stall_enabled) ?
STRTAB_STE_1_S1STALLD :
0) |
FIELD_PREP(STRTAB_STE_1_EATS,
master->ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0));
if (smmu->features & ARM_SMMU_FEAT_E2H) {
/*
* To support BTM the streamworld needs to match the
* configuration of the CPU so that the ASID broadcasts are
* properly matched. This means either S/NS-EL2-E2H (hypervisor)
* or NS-EL1 (guest). Since an SVA domain can be installed in a
* PASID this should always use a BTM compatible configuration
* if the HW supports it.
*/
target->data[1] |= cpu_to_le64(
FIELD_PREP(STRTAB_STE_1_STRW, STRTAB_STE_1_STRW_EL2));
} else {
target->data[1] |= cpu_to_le64(
FIELD_PREP(STRTAB_STE_1_STRW, STRTAB_STE_1_STRW_NSEL1));
/*
* VMID 0 is reserved for stage-2 bypass EL1 STEs, see
* arm_smmu_domain_alloc_id()
*/
target->data[2] =
cpu_to_le64(FIELD_PREP(STRTAB_STE_2_S2VMID, 0));
}
}
static void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
struct arm_smmu_master *master,
struct arm_smmu_domain *smmu_domain)
{
struct arm_smmu_s2_cfg *s2_cfg = &smmu_domain->s2_cfg;
memset(target, 0, sizeof(*target));
target->data[0] = cpu_to_le64(
STRTAB_STE_0_V |
FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS));
target->data[1] = cpu_to_le64(
FIELD_PREP(STRTAB_STE_1_EATS,
master->ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0) |
FIELD_PREP(STRTAB_STE_1_SHCFG,
STRTAB_STE_1_SHCFG_INCOMING));
target->data[2] = cpu_to_le64(
FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) |
FIELD_PREP(STRTAB_STE_2_VTCR, s2_cfg->vtcr) |
STRTAB_STE_2_S2AA64 |
#ifdef __BIG_ENDIAN
STRTAB_STE_2_S2ENDI |
#endif
STRTAB_STE_2_S2PTW |
STRTAB_STE_2_S2R);
target->data[3] = cpu_to_le64(s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK);
}
static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid, static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
struct arm_smmu_ste *dst) struct arm_smmu_ste *dst)
{ {
u64 val;
struct arm_smmu_device *smmu = master->smmu;
struct arm_smmu_ctx_desc_cfg *cd_table = NULL;
struct arm_smmu_s2_cfg *s2_cfg = NULL;
struct arm_smmu_domain *smmu_domain = master->domain; struct arm_smmu_domain *smmu_domain = master->domain;
struct arm_smmu_ste target = {}; struct arm_smmu_ste target = {};
...@@ -1486,63 +1562,15 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid, ...@@ -1486,63 +1562,15 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
switch (smmu_domain->stage) { switch (smmu_domain->stage) {
case ARM_SMMU_DOMAIN_S1: case ARM_SMMU_DOMAIN_S1:
cd_table = &master->cd_table; arm_smmu_make_cdtable_ste(&target, master);
break; break;
case ARM_SMMU_DOMAIN_S2: case ARM_SMMU_DOMAIN_S2:
s2_cfg = &smmu_domain->s2_cfg; arm_smmu_make_s2_domain_ste(&target, master, smmu_domain);
break; break;
case ARM_SMMU_DOMAIN_BYPASS: case ARM_SMMU_DOMAIN_BYPASS:
arm_smmu_make_bypass_ste(&target); arm_smmu_make_bypass_ste(&target);
arm_smmu_write_ste(master, sid, dst, &target); break;
return;
}
/* Nuke the existing STE_0 value, as we're going to rewrite it */
val = STRTAB_STE_0_V;
if (cd_table) {
u64 strw = smmu->features & ARM_SMMU_FEAT_E2H ?
STRTAB_STE_1_STRW_EL2 : STRTAB_STE_1_STRW_NSEL1;
target.data[1] = cpu_to_le64(
FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) |
FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) |
FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) |
FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) |
FIELD_PREP(STRTAB_STE_1_STRW, strw));
if (smmu->features & ARM_SMMU_FEAT_STALLS &&
!master->stall_enabled)
target.data[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
val |= (cd_table->cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) |
FIELD_PREP(STRTAB_STE_0_S1CDMAX, cd_table->s1cdmax) |
FIELD_PREP(STRTAB_STE_0_S1FMT, cd_table->s1fmt);
}
if (s2_cfg) {
target.data[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
STRTAB_STE_1_SHCFG_INCOMING));
target.data[2] = cpu_to_le64(
FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) |
FIELD_PREP(STRTAB_STE_2_VTCR, s2_cfg->vtcr) |
#ifdef __BIG_ENDIAN
STRTAB_STE_2_S2ENDI |
#endif
STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
STRTAB_STE_2_S2R);
target.data[3] = cpu_to_le64(s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK);
val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS);
} }
if (master->ats_enabled)
target.data[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_EATS,
STRTAB_STE_1_EATS_TRANS));
target.data[0] = cpu_to_le64(val);
arm_smmu_write_ste(master, sid, dst, &target); arm_smmu_write_ste(master, sid, dst, &target);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment