Skip to content

Commit

Permalink
Merge tag 'arm-smmu-updates' of git://git.kernel.org/pub/scm/linux/ke…
Browse files Browse the repository at this point in the history
…rnel/git/will/linux into arm/smmu

Arm SMMU updates for 6.7

- Device-tree binding update:
  * Add qcom,sm7150-smmu-v2 for Adreno on SM7150 SoC

- SMMUv2:
  * Support for Qualcomm SDM670 (MDSS) and SM7150 SoCs

- SMMUv3:
  * Large refactoring of the context descriptor code to
    move the CD table into the master, paving the way
    for '->set_dev_pasid()' support on non-SVA domains

  * Minor cleanups to the SVA code
  • Loading branch information
joergroedel committed Oct 16, 2023
2 parents 6465e26 + 5486509 commit aa5cabc
Show file tree
Hide file tree
Showing 5 changed files with 204 additions and 174 deletions.
4 changes: 3 additions & 1 deletion Documentation/devicetree/bindings/iommu/arm,smmu.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,7 @@ properties:
- qcom,sdm630-smmu-v2
- qcom,sdm845-smmu-v2
- qcom,sm6350-smmu-v2
- qcom,sm7150-smmu-v2
- const: qcom,adreno-smmu
- const: qcom,smmu-v2
- description: Qcom Adreno GPUs on Google Cheza platform
Expand Down Expand Up @@ -270,6 +271,7 @@ allOf:
contains:
enum:
- qcom,msm8998-smmu-v2
- qcom,sdm630-smmu-v2
then:
anyOf:
- properties:
Expand Down Expand Up @@ -311,7 +313,6 @@ allOf:
compatible:
contains:
enum:
- qcom,sdm630-smmu-v2
- qcom,sm6375-smmu-v2
then:
anyOf:
Expand Down Expand Up @@ -409,6 +410,7 @@ allOf:
contains:
enum:
- qcom,sm6350-smmu-v2
- qcom,sm7150-smmu-v2
- qcom,sm8150-smmu-500
- qcom,sm8250-smmu-500
then:
Expand Down
89 changes: 59 additions & 30 deletions drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
Original file line number Diff line number Diff line change
Expand Up @@ -25,18 +25,35 @@ struct arm_smmu_mmu_notifier {
#define mn_to_smmu(mn) container_of(mn, struct arm_smmu_mmu_notifier, mn)

struct arm_smmu_bond {
struct iommu_sva sva;
struct mm_struct *mm;
struct arm_smmu_mmu_notifier *smmu_mn;
struct list_head list;
refcount_t refs;
};

#define sva_to_bond(handle) \
container_of(handle, struct arm_smmu_bond, sva)

static DEFINE_MUTEX(sva_lock);

/*
* Write the CD to the CD tables for all masters that this domain is attached
* to. Note that this is only used to update existing CD entries in the target
* CD table, for which it's assumed that arm_smmu_write_ctx_desc can't fail.
*/
static void arm_smmu_update_ctx_desc_devices(struct arm_smmu_domain *smmu_domain,
int ssid,
struct arm_smmu_ctx_desc *cd)
{
struct arm_smmu_master *master;
unsigned long flags;

spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_for_each_entry(master, &smmu_domain->devices, domain_head) {
arm_smmu_write_ctx_desc(master, ssid, cd);
}
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
}

/*
* Check if the CPU ASID is available on the SMMU side. If a private context
* descriptor is using it, try to replace it.
Expand All @@ -62,7 +79,7 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
return cd;
}

smmu_domain = container_of(cd, struct arm_smmu_domain, s1_cfg.cd);
smmu_domain = container_of(cd, struct arm_smmu_domain, cd);
smmu = smmu_domain->smmu;

ret = xa_alloc(&arm_smmu_asid_xa, &new_asid, cd,
Expand All @@ -80,7 +97,7 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
* be some overlap between use of both ASIDs, until we invalidate the
* TLB.
*/
arm_smmu_write_ctx_desc(smmu_domain, IOMMU_NO_PASID, cd);
arm_smmu_update_ctx_desc_devices(smmu_domain, IOMMU_NO_PASID, cd);

/* Invalidate TLB entries previously associated with that context */
arm_smmu_tlb_inv_asid(smmu, asid);
Expand Down Expand Up @@ -186,6 +203,15 @@ static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd)
}
}

/*
* Cloned from the MAX_TLBI_OPS in arch/arm64/include/asm/tlbflush.h, this
* is used as a threshold to replace per-page TLBI commands to issue in the
* command queue with an address-space TLBI command, when SMMU w/o a range
* invalidation feature handles too many per-page TLBI commands, which will
* otherwise result in a soft lockup.
*/
#define CMDQ_MAX_TLBI_OPS (1 << (PAGE_SHIFT - 3))

static void arm_smmu_mm_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
Expand All @@ -201,8 +227,13 @@ static void arm_smmu_mm_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
* range. So do a simple translation here by calculating size correctly.
*/
size = end - start;
if (size == ULONG_MAX)
size = 0;
if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_RANGE_INV)) {
if (size >= CMDQ_MAX_TLBI_OPS * PAGE_SIZE)
size = 0;
} else {
if (size == ULONG_MAX)
size = 0;
}

if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM)) {
if (!size)
Expand Down Expand Up @@ -233,7 +264,7 @@ static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
* DMA may still be running. Keep the cd valid to avoid C_BAD_CD events,
* but disable translation.
*/
arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, &quiet_cd);
arm_smmu_update_ctx_desc_devices(smmu_domain, mm->pasid, &quiet_cd);

arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid);
arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0);
Expand All @@ -259,8 +290,10 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
struct mm_struct *mm)
{
int ret;
unsigned long flags;
struct arm_smmu_ctx_desc *cd;
struct arm_smmu_mmu_notifier *smmu_mn;
struct arm_smmu_master *master;

list_for_each_entry(smmu_mn, &smmu_domain->mmu_notifiers, list) {
if (smmu_mn->mn.mm == mm) {
Expand Down Expand Up @@ -290,7 +323,16 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
goto err_free_cd;
}

ret = arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, cd);
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_for_each_entry(master, &smmu_domain->devices, domain_head) {
ret = arm_smmu_write_ctx_desc(master, mm->pasid, cd);
if (ret) {
list_for_each_entry_from_reverse(master, &smmu_domain->devices, domain_head)
arm_smmu_write_ctx_desc(master, mm->pasid, NULL);
break;
}
}
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
if (ret)
goto err_put_notifier;

Expand All @@ -315,7 +357,8 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
return;

list_del(&smmu_mn->list);
arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, NULL);

arm_smmu_update_ctx_desc_devices(smmu_domain, mm->pasid, NULL);

/*
* If we went through clear(), we've already invalidated, and no
Expand All @@ -331,8 +374,7 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
arm_smmu_free_shared_cd(cd);
}

static struct iommu_sva *
__arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
static int __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
{
int ret;
struct arm_smmu_bond *bond;
Expand All @@ -341,23 +383,13 @@ __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);

if (!master || !master->sva_enabled)
return ERR_PTR(-ENODEV);

/* If bind() was already called for this {dev, mm} pair, reuse it. */
list_for_each_entry(bond, &master->bonds, list) {
if (bond->mm == mm) {
refcount_inc(&bond->refs);
return &bond->sva;
}
}
return -ENODEV;

bond = kzalloc(sizeof(*bond), GFP_KERNEL);
if (!bond)
return ERR_PTR(-ENOMEM);
return -ENOMEM;

bond->mm = mm;
bond->sva.dev = dev;
refcount_set(&bond->refs, 1);

bond->smmu_mn = arm_smmu_mmu_notifier_get(smmu_domain, mm);
if (IS_ERR(bond->smmu_mn)) {
Expand All @@ -366,11 +398,11 @@ __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
}

list_add(&bond->list, &master->bonds);
return &bond->sva;
return 0;

err_free_bond:
kfree(bond);
return ERR_PTR(ret);
return ret;
}

bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
Expand Down Expand Up @@ -536,7 +568,7 @@ void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
}
}

if (!WARN_ON(!bond) && refcount_dec_and_test(&bond->refs)) {
if (!WARN_ON(!bond)) {
list_del(&bond->list);
arm_smmu_mmu_notifier_put(bond->smmu_mn);
kfree(bond);
Expand All @@ -548,13 +580,10 @@ static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
struct device *dev, ioasid_t id)
{
int ret = 0;
struct iommu_sva *handle;
struct mm_struct *mm = domain->mm;

mutex_lock(&sva_lock);
handle = __arm_smmu_sva_bind(dev, mm);
if (IS_ERR(handle))
ret = PTR_ERR(handle);
ret = __arm_smmu_sva_bind(dev, mm);
mutex_unlock(&sva_lock);

return ret;
Expand Down
Loading

0 comments on commit aa5cabc

Please sign in to comment.