@@ -1289,52 +1289,13 @@ static void iommu_disable_translation(struct intel_iommu *iommu)
12891289 raw_spin_unlock_irqrestore (& iommu -> register_lock , flag );
12901290}
12911291
1292- static int iommu_init_domains (struct intel_iommu * iommu )
1293- {
1294- u32 ndomains ;
1295-
1296- ndomains = cap_ndoms (iommu -> cap );
1297- pr_debug ("%s: Number of Domains supported <%d>\n" ,
1298- iommu -> name , ndomains );
1299-
1300- spin_lock_init (& iommu -> lock );
1301-
1302- iommu -> domain_ids = bitmap_zalloc (ndomains , GFP_KERNEL );
1303- if (!iommu -> domain_ids )
1304- return - ENOMEM ;
1305-
1306- /*
1307- * If Caching mode is set, then invalid translations are tagged
1308- * with domain-id 0, hence we need to pre-allocate it. We also
1309- * use domain-id 0 as a marker for non-allocated domain-id, so
1310- * make sure it is not used for a real domain.
1311- */
1312- set_bit (0 , iommu -> domain_ids );
1313-
1314- /*
1315- * Vt-d spec rev3.0 (section 6.2.3.1) requires that each pasid
1316- * entry for first-level or pass-through translation modes should
1317- * be programmed with a domain id different from those used for
1318- * second-level or nested translation. We reserve a domain id for
1319- * this purpose. This domain id is also used for identity domain
1320- * in legacy mode.
1321- */
1322- set_bit (FLPT_DEFAULT_DID , iommu -> domain_ids );
1323-
1324- return 0 ;
1325- }
1326-
13271292static void disable_dmar_iommu (struct intel_iommu * iommu )
13281293{
1329- if (!iommu -> domain_ids )
1330- return ;
1331-
13321294 /*
13331295 * All iommu domains must have been detached from the devices,
13341296 * hence there should be no domain IDs in use.
13351297 */
1336- if (WARN_ON (bitmap_weight (iommu -> domain_ids , cap_ndoms (iommu -> cap ))
1337- > NUM_RESERVED_DID ))
1298+ if (WARN_ON (!ida_is_empty (& iommu -> domain_ida )))
13381299 return ;
13391300
13401301 if (iommu -> gcmd & DMA_GCMD_TE )
@@ -1343,11 +1304,6 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
13431304
13441305static void free_dmar_iommu (struct intel_iommu * iommu )
13451306{
1346- if (iommu -> domain_ids ) {
1347- bitmap_free (iommu -> domain_ids );
1348- iommu -> domain_ids = NULL ;
1349- }
1350-
13511307 if (iommu -> copied_tables ) {
13521308 bitmap_free (iommu -> copied_tables );
13531309 iommu -> copied_tables = NULL ;
@@ -1380,7 +1336,6 @@ static bool first_level_by_default(struct intel_iommu *iommu)
13801336int domain_attach_iommu (struct dmar_domain * domain , struct intel_iommu * iommu )
13811337{
13821338 struct iommu_domain_info * info , * curr ;
1383- unsigned long ndomains ;
13841339 int num , ret = - ENOSPC ;
13851340
13861341 if (domain -> domain .type == IOMMU_DOMAIN_SVA )
@@ -1399,14 +1354,13 @@ int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
13991354 return 0 ;
14001355 }
14011356
1402- ndomains = cap_ndoms ( iommu -> cap );
1403- num = find_first_zero_bit (iommu -> domain_ids , ndomains );
1404- if (num >= ndomains ) {
1357+ num = ida_alloc_range ( & iommu -> domain_ida , IDA_START_DID ,
1358+ cap_ndoms (iommu -> cap ) - 1 , GFP_ATOMIC );
1359+ if (num < 0 ) {
14051360 pr_err ("%s: No free domain ids\n" , iommu -> name );
14061361 goto err_unlock ;
14071362 }
14081363
1409- set_bit (num , iommu -> domain_ids );
14101364 info -> refcnt = 1 ;
14111365 info -> did = num ;
14121366 info -> iommu = iommu ;
@@ -1421,7 +1375,7 @@ int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
14211375 return 0 ;
14221376
14231377err_clear :
1424- clear_bit ( info -> did , iommu -> domain_ids );
1378+ ida_free ( & iommu -> domain_ida , info -> did );
14251379err_unlock :
14261380 spin_unlock (& iommu -> lock );
14271381 kfree (info );
@@ -1438,7 +1392,7 @@ void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
14381392 spin_lock (& iommu -> lock );
14391393 info = xa_load (& domain -> iommu_array , iommu -> seq_id );
14401394 if (-- info -> refcnt == 0 ) {
1441- clear_bit ( info -> did , iommu -> domain_ids );
1395+ ida_free ( & iommu -> domain_ida , info -> did );
14421396 xa_erase (& domain -> iommu_array , iommu -> seq_id );
14431397 domain -> nid = NUMA_NO_NODE ;
14441398 kfree (info );
@@ -2041,7 +1995,7 @@ static int copy_context_table(struct intel_iommu *iommu,
20411995
20421996 did = context_domain_id (& ce );
20431997 if (did >= 0 && did < cap_ndoms (iommu -> cap ))
2044- set_bit ( did , iommu -> domain_ids );
1998+ ida_alloc_range ( & iommu -> domain_ida , did , did , GFP_KERNEL );
20451999
20462000 set_context_copied (iommu , bus , devfn );
20472001 new_ce [idx ] = ce ;
@@ -2168,11 +2122,6 @@ static int __init init_dmars(void)
21682122 }
21692123
21702124 intel_iommu_init_qi (iommu );
2171-
2172- ret = iommu_init_domains (iommu );
2173- if (ret )
2174- goto free_iommu ;
2175-
21762125 init_translation_status (iommu );
21772126
21782127 if (translation_pre_enabled (iommu ) && !is_kdump_kernel ()) {
@@ -2650,9 +2599,7 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
26502599 if (iommu -> gcmd & DMA_GCMD_TE )
26512600 iommu_disable_translation (iommu );
26522601
2653- ret = iommu_init_domains (iommu );
2654- if (ret == 0 )
2655- ret = iommu_alloc_root_entry (iommu );
2602+ ret = iommu_alloc_root_entry (iommu );
26562603 if (ret )
26572604 goto out ;
26582605
@@ -2971,9 +2918,14 @@ static ssize_t domains_used_show(struct device *dev,
29712918 struct device_attribute * attr , char * buf )
29722919{
29732920 struct intel_iommu * iommu = dev_to_intel_iommu (dev );
2974- return sysfs_emit (buf , "%d\n" ,
2975- bitmap_weight (iommu -> domain_ids ,
2976- cap_ndoms (iommu -> cap )));
2921+ unsigned int count = 0 ;
2922+ int id ;
2923+
2924+ for (id = 0 ; id < cap_ndoms (iommu -> cap ); id ++ )
2925+ if (ida_exists (& iommu -> domain_ida , id ))
2926+ count ++ ;
2927+
2928+ return sysfs_emit (buf , "%d\n" , count );
29772929}
29782930static DEVICE_ATTR_RO (domains_used );
29792931
0 commit comments