Commit e9d1e4ff authored by Jason Gunthorpe's avatar Jason Gunthorpe Committed by Will Deacon

iommu/arm-smmu-v3: Move the CD generation for S1 domains into a function

Introduce arm_smmu_make_s1_cd() to build the CD from the paging S1 domain,
and reorganize all the places programming S1 domain CD table entries to
call it.

Split arm_smmu_update_s1_domain_cd_entry() from
arm_smmu_update_ctx_desc_devices() so that the S1 path has its own call
chain separate from the unrelated SVA path.

arm_smmu_update_s1_domain_cd_entry() only works on S1 domains attached to
RIDs and refreshes all their CDs. Remove case (3) from
arm_smmu_write_ctx_desc() as it is now handled by directly calling
arm_smmu_write_cd_entry().

Remove the forced clear of the CD during S1 domain attach,
arm_smmu_write_cd_entry() will do this automatically if necessary.
Tested-by: default avatarNicolin Chen <nicolinc@nvidia.com>
Tested-by: default avatarShameer Kolothum <shameerali.kolothum.thodi@huawei.com>
Reviewed-by: default avatarMichael Shavit <mshavit@google.com>
Reviewed-by: default avatarNicolin Chen <nicolinc@nvidia.com>
Reviewed-by: default avatarMostafa Saleh <smostafa@google.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/3-v9-5040dc602008+177d7-smmuv3_newapi_p2_jgg@nvidia.com
[will: Drop unused arm_smmu_clean_cd_entry() function]
Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 78a5fbe8
...@@ -53,6 +53,29 @@ static void arm_smmu_update_ctx_desc_devices(struct arm_smmu_domain *smmu_domain ...@@ -53,6 +53,29 @@ static void arm_smmu_update_ctx_desc_devices(struct arm_smmu_domain *smmu_domain
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
} }
static void
arm_smmu_update_s1_domain_cd_entry(struct arm_smmu_domain *smmu_domain)
{
struct arm_smmu_master *master;
struct arm_smmu_cd target_cd;
unsigned long flags;
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_for_each_entry(master, &smmu_domain->devices, domain_head) {
struct arm_smmu_cd *cdptr;
/* S1 domains only support RID attachment right now */
cdptr = arm_smmu_get_cd_ptr(master, IOMMU_NO_PASID);
if (WARN_ON(!cdptr))
continue;
arm_smmu_make_s1_cd(&target_cd, master, smmu_domain);
arm_smmu_write_cd_entry(master, IOMMU_NO_PASID, cdptr,
&target_cd);
}
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
}
/* /*
* Check if the CPU ASID is available on the SMMU side. If a private context * Check if the CPU ASID is available on the SMMU side. If a private context
* descriptor is using it, try to replace it. * descriptor is using it, try to replace it.
...@@ -96,7 +119,7 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid) ...@@ -96,7 +119,7 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
* be some overlap between use of both ASIDs, until we invalidate the * be some overlap between use of both ASIDs, until we invalidate the
* TLB. * TLB.
*/ */
arm_smmu_update_ctx_desc_devices(smmu_domain, IOMMU_NO_PASID, cd); arm_smmu_update_s1_domain_cd_entry(smmu_domain);
/* Invalidate TLB entries previously associated with that context */ /* Invalidate TLB entries previously associated with that context */
arm_smmu_tlb_inv_asid(smmu, asid); arm_smmu_tlb_inv_asid(smmu, asid);
......
...@@ -1203,7 +1203,7 @@ static void arm_smmu_write_cd_l1_desc(__le64 *dst, ...@@ -1203,7 +1203,7 @@ static void arm_smmu_write_cd_l1_desc(__le64 *dst,
WRITE_ONCE(*dst, cpu_to_le64(val)); WRITE_ONCE(*dst, cpu_to_le64(val));
} }
static struct arm_smmu_cd *arm_smmu_get_cd_ptr(struct arm_smmu_master *master, struct arm_smmu_cd *arm_smmu_get_cd_ptr(struct arm_smmu_master *master,
u32 ssid) u32 ssid)
{ {
__le64 *l1ptr; __le64 *l1ptr;
...@@ -1269,7 +1269,7 @@ static const struct arm_smmu_entry_writer_ops arm_smmu_cd_writer_ops = { ...@@ -1269,7 +1269,7 @@ static const struct arm_smmu_entry_writer_ops arm_smmu_cd_writer_ops = {
.get_used = arm_smmu_get_cd_used, .get_used = arm_smmu_get_cd_used,
}; };
static void arm_smmu_write_cd_entry(struct arm_smmu_master *master, int ssid, void arm_smmu_write_cd_entry(struct arm_smmu_master *master, int ssid,
struct arm_smmu_cd *cdptr, struct arm_smmu_cd *cdptr,
const struct arm_smmu_cd *target) const struct arm_smmu_cd *target)
{ {
...@@ -1284,6 +1284,32 @@ static void arm_smmu_write_cd_entry(struct arm_smmu_master *master, int ssid, ...@@ -1284,6 +1284,32 @@ static void arm_smmu_write_cd_entry(struct arm_smmu_master *master, int ssid,
arm_smmu_write_entry(&cd_writer.writer, cdptr->data, target->data); arm_smmu_write_entry(&cd_writer.writer, cdptr->data, target->data);
} }
void arm_smmu_make_s1_cd(struct arm_smmu_cd *target,
struct arm_smmu_master *master,
struct arm_smmu_domain *smmu_domain)
{
struct arm_smmu_ctx_desc *cd = &smmu_domain->cd;
memset(target, 0, sizeof(*target));
target->data[0] = cpu_to_le64(
cd->tcr |
#ifdef __BIG_ENDIAN
CTXDESC_CD_0_ENDI |
#endif
CTXDESC_CD_0_V |
CTXDESC_CD_0_AA64 |
(master->stall_enabled ? CTXDESC_CD_0_S : 0) |
CTXDESC_CD_0_R |
CTXDESC_CD_0_A |
CTXDESC_CD_0_ASET |
FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid)
);
target->data[1] = cpu_to_le64(cd->ttbr & CTXDESC_CD_1_TTB0_MASK);
target->data[3] = cpu_to_le64(cd->mair);
}
int arm_smmu_write_ctx_desc(struct arm_smmu_master *master, int ssid, int arm_smmu_write_ctx_desc(struct arm_smmu_master *master, int ssid,
struct arm_smmu_ctx_desc *cd) struct arm_smmu_ctx_desc *cd)
{ {
...@@ -1292,14 +1318,11 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_master *master, int ssid, ...@@ -1292,14 +1318,11 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_master *master, int ssid,
* *
* (1) Install primary CD, for normal DMA traffic (SSID = IOMMU_NO_PASID = 0). * (1) Install primary CD, for normal DMA traffic (SSID = IOMMU_NO_PASID = 0).
* (2) Install a secondary CD, for SID+SSID traffic. * (2) Install a secondary CD, for SID+SSID traffic.
* (3) Update ASID of a CD. Atomically write the first 64 bits of the
* CD, then invalidate the old entry and mappings.
* (4) Quiesce the context without clearing the valid bit. Disable * (4) Quiesce the context without clearing the valid bit. Disable
* translation, and ignore any translation fault. * translation, and ignore any translation fault.
* (5) Remove a secondary CD. * (5) Remove a secondary CD.
*/ */
u64 val; u64 val;
bool cd_live;
struct arm_smmu_cd target; struct arm_smmu_cd target;
struct arm_smmu_cd *cdptr = &target; struct arm_smmu_cd *cdptr = &target;
struct arm_smmu_cd *cd_table_entry; struct arm_smmu_cd *cd_table_entry;
...@@ -1315,7 +1338,6 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_master *master, int ssid, ...@@ -1315,7 +1338,6 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_master *master, int ssid,
target = *cd_table_entry; target = *cd_table_entry;
val = le64_to_cpu(cdptr->data[0]); val = le64_to_cpu(cdptr->data[0]);
cd_live = !!(val & CTXDESC_CD_0_V);
if (!cd) { /* (5) */ if (!cd) { /* (5) */
memset(cdptr, 0, sizeof(*cdptr)); memset(cdptr, 0, sizeof(*cdptr));
...@@ -1328,13 +1350,6 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_master *master, int ssid, ...@@ -1328,13 +1350,6 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_master *master, int ssid,
val &= ~(CTXDESC_CD_0_S | CTXDESC_CD_0_R); val &= ~(CTXDESC_CD_0_S | CTXDESC_CD_0_R);
val |= CTXDESC_CD_0_TCR_EPD0; val |= CTXDESC_CD_0_TCR_EPD0;
cdptr->data[1] &= ~cpu_to_le64(CTXDESC_CD_1_TTB0_MASK); cdptr->data[1] &= ~cpu_to_le64(CTXDESC_CD_1_TTB0_MASK);
} else if (cd_live) { /* (3) */
val &= ~CTXDESC_CD_0_ASID;
val |= FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid);
/*
* Until CD+TLB invalidation, both ASIDs may be used for tagging
* this substream's traffic
*/
} else { /* (1) and (2) */ } else { /* (1) and (2) */
cdptr->data[1] = cpu_to_le64(cd->ttbr & CTXDESC_CD_1_TTB0_MASK); cdptr->data[1] = cpu_to_le64(cd->ttbr & CTXDESC_CD_1_TTB0_MASK);
cdptr->data[2] = 0; cdptr->data[2] = 0;
...@@ -2633,29 +2648,29 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) ...@@ -2633,29 +2648,29 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
switch (smmu_domain->stage) { switch (smmu_domain->stage) {
case ARM_SMMU_DOMAIN_S1: case ARM_SMMU_DOMAIN_S1: {
struct arm_smmu_cd target_cd;
struct arm_smmu_cd *cdptr;
if (!master->cd_table.cdtab) { if (!master->cd_table.cdtab) {
ret = arm_smmu_alloc_cd_tables(master); ret = arm_smmu_alloc_cd_tables(master);
if (ret) if (ret)
goto out_list_del; goto out_list_del;
} else {
/*
* arm_smmu_write_ctx_desc() relies on the entry being
* invalid to work, clear any existing entry.
*/
ret = arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID,
NULL);
if (ret)
goto out_list_del;
} }
ret = arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID, &smmu_domain->cd); cdptr = arm_smmu_get_cd_ptr(master, IOMMU_NO_PASID);
if (ret) if (!cdptr) {
ret = -ENOMEM;
goto out_list_del; goto out_list_del;
}
arm_smmu_make_s1_cd(&target_cd, master, smmu_domain);
arm_smmu_write_cd_entry(master, IOMMU_NO_PASID, cdptr,
&target_cd);
arm_smmu_make_cdtable_ste(&target, master); arm_smmu_make_cdtable_ste(&target, master);
arm_smmu_install_ste_for_dev(master, &target); arm_smmu_install_ste_for_dev(master, &target);
break; break;
}
case ARM_SMMU_DOMAIN_S2: case ARM_SMMU_DOMAIN_S2:
arm_smmu_make_s2_domain_ste(&target, master, smmu_domain); arm_smmu_make_s2_domain_ste(&target, master, smmu_domain);
arm_smmu_install_ste_for_dev(master, &target); arm_smmu_install_ste_for_dev(master, &target);
......
...@@ -751,6 +751,15 @@ extern struct xarray arm_smmu_asid_xa; ...@@ -751,6 +751,15 @@ extern struct xarray arm_smmu_asid_xa;
extern struct mutex arm_smmu_asid_lock; extern struct mutex arm_smmu_asid_lock;
extern struct arm_smmu_ctx_desc quiet_cd; extern struct arm_smmu_ctx_desc quiet_cd;
struct arm_smmu_cd *arm_smmu_get_cd_ptr(struct arm_smmu_master *master,
u32 ssid);
void arm_smmu_make_s1_cd(struct arm_smmu_cd *target,
struct arm_smmu_master *master,
struct arm_smmu_domain *smmu_domain);
void arm_smmu_write_cd_entry(struct arm_smmu_master *master, int ssid,
struct arm_smmu_cd *cdptr,
const struct arm_smmu_cd *target);
int arm_smmu_write_ctx_desc(struct arm_smmu_master *smmu_master, int ssid, int arm_smmu_write_ctx_desc(struct arm_smmu_master *smmu_master, int ssid,
struct arm_smmu_ctx_desc *cd); struct arm_smmu_ctx_desc *cd);
void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid); void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment