Commit 7686aa5f authored by Jason Gunthorpe's avatar Jason Gunthorpe Committed by Will Deacon

iommu/arm-smmu-v3: Consolidate the STE generation for abort/bypass

This allows writing the flow of arm_smmu_write_strtab_ent() around abort
and bypass domains more naturally.

Note that the core code no longer supplies NULL domains, though there is
still a flow in the driver that end up in arm_smmu_write_strtab_ent() with
NULL. A later patch will remove it.

Remove the duplicate calculation of the STE in arm_smmu_init_bypass_stes()
and remove the force parameter. arm_smmu_rmr_install_bypass_ste() can now
simply invoke arm_smmu_make_bypass_ste() directly.

Rename arm_smmu_init_bypass_stes() to arm_smmu_init_initial_stes() to
better reflect its purpose.
Reviewed-by: default avatarMichael Shavit <mshavit@google.com>
Reviewed-by: default avatarNicolin Chen <nicolinc@nvidia.com>
Reviewed-by: default avatarMostafa Saleh <smostafa@google.com>
Tested-by: default avatarShameer Kolothum <shameerali.kolothum.thodi@huawei.com>
Tested-by: default avatarNicolin Chen <nicolinc@nvidia.com>
Tested-by: default avatarMoritz Fischer <moritzf@google.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/2-v6-96275f25c39d+2d4-smmuv3_newapi_p1_jgg@nvidia.comSigned-off-by: default avatarWill Deacon <will@kernel.org>
parent 7da51af9
...@@ -1447,6 +1447,24 @@ static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid) ...@@ -1447,6 +1447,24 @@ static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd); arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
} }
static void arm_smmu_make_abort_ste(struct arm_smmu_ste *target)
{
memset(target, 0, sizeof(*target));
target->data[0] = cpu_to_le64(
STRTAB_STE_0_V |
FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT));
}
static void arm_smmu_make_bypass_ste(struct arm_smmu_ste *target)
{
memset(target, 0, sizeof(*target));
target->data[0] = cpu_to_le64(
STRTAB_STE_0_V |
FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS));
target->data[1] = cpu_to_le64(
FIELD_PREP(STRTAB_STE_1_SHCFG, STRTAB_STE_1_SHCFG_INCOMING));
}
static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid, static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
struct arm_smmu_ste *dst) struct arm_smmu_ste *dst)
{ {
...@@ -1457,7 +1475,15 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid, ...@@ -1457,7 +1475,15 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
struct arm_smmu_domain *smmu_domain = master->domain; struct arm_smmu_domain *smmu_domain = master->domain;
struct arm_smmu_ste target = {}; struct arm_smmu_ste target = {};
if (smmu_domain) { if (!smmu_domain) {
if (disable_bypass)
arm_smmu_make_abort_ste(&target);
else
arm_smmu_make_bypass_ste(&target);
arm_smmu_write_ste(master, sid, dst, &target);
return;
}
switch (smmu_domain->stage) { switch (smmu_domain->stage) {
case ARM_SMMU_DOMAIN_S1: case ARM_SMMU_DOMAIN_S1:
cd_table = &master->cd_table; cd_table = &master->cd_table;
...@@ -1465,29 +1491,15 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid, ...@@ -1465,29 +1491,15 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
case ARM_SMMU_DOMAIN_S2: case ARM_SMMU_DOMAIN_S2:
s2_cfg = &smmu_domain->s2_cfg; s2_cfg = &smmu_domain->s2_cfg;
break; break;
default: case ARM_SMMU_DOMAIN_BYPASS:
break; arm_smmu_make_bypass_ste(&target);
} arm_smmu_write_ste(master, sid, dst, &target);
return;
} }
/* Nuke the existing STE_0 value, as we're going to rewrite it */ /* Nuke the existing STE_0 value, as we're going to rewrite it */
val = STRTAB_STE_0_V; val = STRTAB_STE_0_V;
/* Bypass/fault */
if (!smmu_domain || !(cd_table || s2_cfg)) {
if (!smmu_domain && disable_bypass)
val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT);
else
val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
target.data[0] = cpu_to_le64(val);
target.data[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
STRTAB_STE_1_SHCFG_INCOMING));
target.data[2] = 0; /* Nuke the VMID */
arm_smmu_write_ste(master, sid, dst, &target);
return;
}
if (cd_table) { if (cd_table) {
u64 strw = smmu->features & ARM_SMMU_FEAT_E2H ? u64 strw = smmu->features & ARM_SMMU_FEAT_E2H ?
STRTAB_STE_1_STRW_EL2 : STRTAB_STE_1_STRW_NSEL1; STRTAB_STE_1_STRW_EL2 : STRTAB_STE_1_STRW_NSEL1;
...@@ -1534,22 +1546,20 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid, ...@@ -1534,22 +1546,20 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
arm_smmu_write_ste(master, sid, dst, &target); arm_smmu_write_ste(master, sid, dst, &target);
} }
static void arm_smmu_init_bypass_stes(struct arm_smmu_ste *strtab, /*
unsigned int nent, bool force) * This can safely directly manipulate the STE memory without a sync sequence
* because the STE table has not been installed in the SMMU yet.
*/
static void arm_smmu_init_initial_stes(struct arm_smmu_ste *strtab,
unsigned int nent)
{ {
unsigned int i; unsigned int i;
u64 val = STRTAB_STE_0_V;
if (disable_bypass && !force)
val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT);
else
val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
for (i = 0; i < nent; ++i) { for (i = 0; i < nent; ++i) {
strtab->data[0] = cpu_to_le64(val); if (disable_bypass)
strtab->data[1] = cpu_to_le64(FIELD_PREP( arm_smmu_make_abort_ste(strtab);
STRTAB_STE_1_SHCFG, STRTAB_STE_1_SHCFG_INCOMING)); else
strtab->data[2] = 0; arm_smmu_make_bypass_ste(strtab);
strtab++; strtab++;
} }
} }
...@@ -1577,7 +1587,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid) ...@@ -1577,7 +1587,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
return -ENOMEM; return -ENOMEM;
} }
arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT, false); arm_smmu_init_initial_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
arm_smmu_write_strtab_l1_desc(strtab, desc); arm_smmu_write_strtab_l1_desc(strtab, desc);
return 0; return 0;
} }
...@@ -3196,7 +3206,7 @@ static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu) ...@@ -3196,7 +3206,7 @@ static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits); reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits);
cfg->strtab_base_cfg = reg; cfg->strtab_base_cfg = reg;
arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents, false); arm_smmu_init_initial_stes(strtab, cfg->num_l1_ents);
return 0; return 0;
} }
...@@ -3907,7 +3917,6 @@ static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu) ...@@ -3907,7 +3917,6 @@ static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu)
iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list); iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
list_for_each_entry(e, &rmr_list, list) { list_for_each_entry(e, &rmr_list, list) {
struct arm_smmu_ste *step;
struct iommu_iort_rmr_data *rmr; struct iommu_iort_rmr_data *rmr;
int ret, i; int ret, i;
...@@ -3920,8 +3929,12 @@ static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu) ...@@ -3920,8 +3929,12 @@ static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu)
continue; continue;
} }
step = arm_smmu_get_step_for_sid(smmu, rmr->sids[i]); /*
arm_smmu_init_bypass_stes(step, 1, true); * STE table is not programmed to HW, see
* arm_smmu_initial_bypass_stes()
*/
arm_smmu_make_bypass_ste(
arm_smmu_get_step_for_sid(smmu, rmr->sids[i]));
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment