@@ -25,18 +25,35 @@ struct arm_smmu_mmu_notifier {
2525#define mn_to_smmu (mn ) container_of(mn, struct arm_smmu_mmu_notifier, mn)
2626
2727struct arm_smmu_bond {
28- struct iommu_sva sva ;
2928 struct mm_struct * mm ;
3029 struct arm_smmu_mmu_notifier * smmu_mn ;
3130 struct list_head list ;
32- refcount_t refs ;
3331};
3432
3533#define sva_to_bond (handle ) \
3634 container_of(handle, struct arm_smmu_bond, sva)
3735
3836static DEFINE_MUTEX (sva_lock );
3937
38+ /*
39+ * Write the CD to the CD tables for all masters that this domain is attached
40+ * to. Note that this is only used to update existing CD entries in the target
41+ * CD table, for which it's assumed that arm_smmu_write_ctx_desc can't fail.
42+ */
43+ static void arm_smmu_update_ctx_desc_devices (struct arm_smmu_domain * smmu_domain ,
44+ int ssid ,
45+ struct arm_smmu_ctx_desc * cd )
46+ {
47+ struct arm_smmu_master * master ;
48+ unsigned long flags ;
49+
50+ spin_lock_irqsave (& smmu_domain -> devices_lock , flags );
51+ list_for_each_entry (master , & smmu_domain -> devices , domain_head ) {
52+ arm_smmu_write_ctx_desc (master , ssid , cd );
53+ }
54+ spin_unlock_irqrestore (& smmu_domain -> devices_lock , flags );
55+ }
56+
4057/*
4158 * Check if the CPU ASID is available on the SMMU side. If a private context
4259 * descriptor is using it, try to replace it.
@@ -62,7 +79,7 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
6279 return cd ;
6380 }
6481
65- smmu_domain = container_of (cd , struct arm_smmu_domain , s1_cfg . cd );
82+ smmu_domain = container_of (cd , struct arm_smmu_domain , cd );
6683 smmu = smmu_domain -> smmu ;
6784
6885 ret = xa_alloc (& arm_smmu_asid_xa , & new_asid , cd ,
@@ -80,7 +97,7 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
8097 * be some overlap between use of both ASIDs, until we invalidate the
8198 * TLB.
8299 */
83- arm_smmu_write_ctx_desc (smmu_domain , IOMMU_NO_PASID , cd );
100+ arm_smmu_update_ctx_desc_devices (smmu_domain , IOMMU_NO_PASID , cd );
84101
85102 /* Invalidate TLB entries previously associated with that context */
86103 arm_smmu_tlb_inv_asid (smmu , asid );
@@ -186,6 +203,15 @@ static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd)
186203 }
187204}
188205
206+ /*
207+ * Cloned from the MAX_TLBI_OPS in arch/arm64/include/asm/tlbflush.h, this
208+ * is used as a threshold to replace per-page TLBI commands to issue in the
209+ * command queue with an address-space TLBI command, when SMMU w/o a range
210+ * invalidation feature handles too many per-page TLBI commands, which will
211+ * otherwise result in a soft lockup.
212+ */
213+ #define CMDQ_MAX_TLBI_OPS (1 << (PAGE_SHIFT - 3))
214+
189215static void arm_smmu_mm_arch_invalidate_secondary_tlbs (struct mmu_notifier * mn ,
190216 struct mm_struct * mm ,
191217 unsigned long start ,
@@ -201,8 +227,13 @@ static void arm_smmu_mm_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
201227 * range. So do a simple translation here by calculating size correctly.
202228 */
203229 size = end - start ;
204- if (size == ULONG_MAX )
205- size = 0 ;
230+ if (!(smmu_domain -> smmu -> features & ARM_SMMU_FEAT_RANGE_INV )) {
231+ if (size >= CMDQ_MAX_TLBI_OPS * PAGE_SIZE )
232+ size = 0 ;
233+ } else {
234+ if (size == ULONG_MAX )
235+ size = 0 ;
236+ }
206237
207238 if (!(smmu_domain -> smmu -> features & ARM_SMMU_FEAT_BTM )) {
208239 if (!size )
@@ -233,7 +264,7 @@ static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
233264 * DMA may still be running. Keep the cd valid to avoid C_BAD_CD events,
234265 * but disable translation.
235266 */
236- arm_smmu_write_ctx_desc (smmu_domain , mm -> pasid , & quiet_cd );
267+ arm_smmu_update_ctx_desc_devices (smmu_domain , mm -> pasid , & quiet_cd );
237268
238269 arm_smmu_tlb_inv_asid (smmu_domain -> smmu , smmu_mn -> cd -> asid );
239270 arm_smmu_atc_inv_domain (smmu_domain , mm -> pasid , 0 , 0 );
@@ -259,8 +290,10 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
259290 struct mm_struct * mm )
260291{
261292 int ret ;
293+ unsigned long flags ;
262294 struct arm_smmu_ctx_desc * cd ;
263295 struct arm_smmu_mmu_notifier * smmu_mn ;
296+ struct arm_smmu_master * master ;
264297
265298 list_for_each_entry (smmu_mn , & smmu_domain -> mmu_notifiers , list ) {
266299 if (smmu_mn -> mn .mm == mm ) {
@@ -290,7 +323,16 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
290323 goto err_free_cd ;
291324 }
292325
293- ret = arm_smmu_write_ctx_desc (smmu_domain , mm -> pasid , cd );
326+ spin_lock_irqsave (& smmu_domain -> devices_lock , flags );
327+ list_for_each_entry (master , & smmu_domain -> devices , domain_head ) {
328+ ret = arm_smmu_write_ctx_desc (master , mm -> pasid , cd );
329+ if (ret ) {
330+ list_for_each_entry_from_reverse (master , & smmu_domain -> devices , domain_head )
331+ arm_smmu_write_ctx_desc (master , mm -> pasid , NULL );
332+ break ;
333+ }
334+ }
335+ spin_unlock_irqrestore (& smmu_domain -> devices_lock , flags );
294336 if (ret )
295337 goto err_put_notifier ;
296338
@@ -315,7 +357,8 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
315357 return ;
316358
317359 list_del (& smmu_mn -> list );
318- arm_smmu_write_ctx_desc (smmu_domain , mm -> pasid , NULL );
360+
361+ arm_smmu_update_ctx_desc_devices (smmu_domain , mm -> pasid , NULL );
319362
320363 /*
321364 * If we went through clear(), we've already invalidated, and no
@@ -331,8 +374,7 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
331374 arm_smmu_free_shared_cd (cd );
332375}
333376
334- static struct iommu_sva *
335- __arm_smmu_sva_bind (struct device * dev , struct mm_struct * mm )
377+ static int __arm_smmu_sva_bind (struct device * dev , struct mm_struct * mm )
336378{
337379 int ret ;
338380 struct arm_smmu_bond * bond ;
@@ -341,23 +383,13 @@ __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
341383 struct arm_smmu_domain * smmu_domain = to_smmu_domain (domain );
342384
343385 if (!master || !master -> sva_enabled )
344- return ERR_PTR (- ENODEV );
345-
346- /* If bind() was already called for this {dev, mm} pair, reuse it. */
347- list_for_each_entry (bond , & master -> bonds , list ) {
348- if (bond -> mm == mm ) {
349- refcount_inc (& bond -> refs );
350- return & bond -> sva ;
351- }
352- }
386+ return - ENODEV ;
353387
354388 bond = kzalloc (sizeof (* bond ), GFP_KERNEL );
355389 if (!bond )
356- return ERR_PTR ( - ENOMEM ) ;
390+ return - ENOMEM ;
357391
358392 bond -> mm = mm ;
359- bond -> sva .dev = dev ;
360- refcount_set (& bond -> refs , 1 );
361393
362394 bond -> smmu_mn = arm_smmu_mmu_notifier_get (smmu_domain , mm );
363395 if (IS_ERR (bond -> smmu_mn )) {
@@ -366,11 +398,11 @@ __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
366398 }
367399
368400 list_add (& bond -> list , & master -> bonds );
369- return & bond -> sva ;
401+ return 0 ;
370402
371403err_free_bond :
372404 kfree (bond );
373- return ERR_PTR ( ret ) ;
405+ return ret ;
374406}
375407
376408bool arm_smmu_sva_supported (struct arm_smmu_device * smmu )
@@ -536,7 +568,7 @@ void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
536568 }
537569 }
538570
539- if (!WARN_ON (!bond ) && refcount_dec_and_test ( & bond -> refs ) ) {
571+ if (!WARN_ON (!bond )) {
540572 list_del (& bond -> list );
541573 arm_smmu_mmu_notifier_put (bond -> smmu_mn );
542574 kfree (bond );
@@ -548,13 +580,10 @@ static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
548580 struct device * dev , ioasid_t id )
549581{
550582 int ret = 0 ;
551- struct iommu_sva * handle ;
552583 struct mm_struct * mm = domain -> mm ;
553584
554585 mutex_lock (& sva_lock );
555- handle = __arm_smmu_sva_bind (dev , mm );
556- if (IS_ERR (handle ))
557- ret = PTR_ERR (handle );
586+ ret = __arm_smmu_sva_bind (dev , mm );
558587 mutex_unlock (& sva_lock );
559588
560589 return ret ;
0 commit comments