5353enum {
5454 DDW_EXT_SIZE = 0 ,
5555 DDW_EXT_RESET_DMA_WIN = 1 ,
56- DDW_EXT_QUERY_OUT_SIZE = 2
56+ DDW_EXT_QUERY_OUT_SIZE = 2 ,
57+ DDW_EXT_LIMITED_ADDR_MODE = 3
5758};
5859
5960static struct iommu_table * iommu_pseries_alloc_table (int node )
@@ -1336,6 +1337,54 @@ static void reset_dma_window(struct pci_dev *dev, struct device_node *par_dn)
13361337 ret );
13371338}
13381339
1340+ /*
1341+ * Platforms support placing PHB in limited address mode starting with LoPAR
1342+ * level 2.13 implement. In this mode, the DMA address returned by DDW is over
1343+ * 4GB but, less than 64-bits. This benefits IO adapters that don't support
1344+ * 64-bits for DMA addresses.
1345+ */
1346+ static int limited_dma_window (struct pci_dev * dev , struct device_node * par_dn )
1347+ {
1348+ int ret ;
1349+ u32 cfg_addr , reset_dma_win , las_supported ;
1350+ u64 buid ;
1351+ struct device_node * dn ;
1352+ struct pci_dn * pdn ;
1353+
1354+ ret = ddw_read_ext (par_dn , DDW_EXT_RESET_DMA_WIN , & reset_dma_win );
1355+ if (ret )
1356+ goto out ;
1357+
1358+ ret = ddw_read_ext (par_dn , DDW_EXT_LIMITED_ADDR_MODE , & las_supported );
1359+
1360+ /* Limited Address Space extension available on the platform but DDW in
1361+ * limited addressing mode not supported
1362+ */
1363+ if (!ret && !las_supported )
1364+ ret = - EPROTO ;
1365+
1366+ if (ret ) {
1367+ dev_info (& dev -> dev , "Limited Address Space for DDW not Supported, err: %d" , ret );
1368+ goto out ;
1369+ }
1370+
1371+ dn = pci_device_to_OF_node (dev );
1372+ pdn = PCI_DN (dn );
1373+ buid = pdn -> phb -> buid ;
1374+ cfg_addr = (pdn -> busno << 16 ) | (pdn -> devfn << 8 );
1375+
1376+ ret = rtas_call (reset_dma_win , 4 , 1 , NULL , cfg_addr , BUID_HI (buid ),
1377+ BUID_LO (buid ), 1 );
1378+ if (ret )
1379+ dev_info (& dev -> dev ,
1380+ "ibm,reset-pe-dma-windows(%x) for Limited Addr Support: %x %x %x returned %d " ,
1381+ reset_dma_win , cfg_addr , BUID_HI (buid ), BUID_LO (buid ),
1382+ ret );
1383+
1384+ out :
1385+ return ret ;
1386+ }
1387+
13391388/* Return largest page shift based on "IO Page Sizes" output of ibm,query-pe-dma-window. */
13401389static int iommu_get_page_shift (u32 query_page_size )
13411390{
@@ -1403,7 +1452,7 @@ static struct property *ddw_property_create(const char *propname, u32 liobn, u64
14031452 *
14041453 * returns true if can map all pages (direct mapping), false otherwise..
14051454 */
1406- static bool enable_ddw (struct pci_dev * dev , struct device_node * pdn )
1455+ static bool enable_ddw (struct pci_dev * dev , struct device_node * pdn , u64 dma_mask )
14071456{
14081457 int len = 0 , ret ;
14091458 int max_ram_len = order_base_2 (ddw_memory_hotplug_max ());
@@ -1422,6 +1471,9 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
14221471 bool pmem_present ;
14231472 struct pci_dn * pci = PCI_DN (pdn );
14241473 struct property * default_win = NULL ;
1474+ bool limited_addr_req = false, limited_addr_enabled = false;
1475+ int dev_max_ddw ;
1476+ int ddw_sz ;
14251477
14261478 dn = of_find_node_by_type (NULL , "ibm,pmemory" );
14271479 pmem_present = dn != NULL ;
@@ -1448,7 +1500,6 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
14481500 * the ibm,ddw-applicable property holds the tokens for:
14491501 * ibm,query-pe-dma-window
14501502 * ibm,create-pe-dma-window
1451- * ibm,remove-pe-dma-window
14521503 * for the given node in that order.
14531504 * the property is actually in the parent, not the PE
14541505 */
@@ -1468,6 +1519,20 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
14681519 if (ret != 0 )
14691520 goto out_failed ;
14701521
1522+ /* DMA Limited Addressing required? This is when the driver has
1523+ * requested to create DDW but supports mask which is less than 64-bits
1524+ */
1525+ limited_addr_req = (dma_mask != DMA_BIT_MASK (64 ));
1526+
1527+ /* place the PHB in Limited Addressing mode */
1528+ if (limited_addr_req ) {
1529+ if (limited_dma_window (dev , pdn ))
1530+ goto out_failed ;
1531+
1532+ /* PHB is in Limited address mode */
1533+ limited_addr_enabled = true;
1534+ }
1535+
14711536 /*
14721537 * If there is no window available, remove the default DMA window,
14731538 * if it's present. This will make all the resources available to the
@@ -1514,6 +1579,15 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
15141579 goto out_failed ;
15151580 }
15161581
1582+ /* Maximum DMA window size that the device can address (in log2) */
1583+ dev_max_ddw = fls64 (dma_mask );
1584+
1585+ /* If the device DMA mask is less than 64-bits, make sure the DMA window
1586+ * size is not bigger than what the device can access
1587+ */
1588+ ddw_sz = min (order_base_2 (query .largest_available_block << page_shift ),
1589+ dev_max_ddw );
1590+
15171591 /*
15181592 * The "ibm,pmemory" can appear anywhere in the address space.
15191593 * Assuming it is still backed by page structs, try MAX_PHYSMEM_BITS
@@ -1522,23 +1596,21 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
15221596 */
15231597 len = max_ram_len ;
15241598 if (pmem_present ) {
1525- if (query .largest_available_block >=
1526- (1ULL << (MAX_PHYSMEM_BITS - page_shift )))
1599+ if (ddw_sz >= MAX_PHYSMEM_BITS )
15271600 len = MAX_PHYSMEM_BITS ;
15281601 else
15291602 dev_info (& dev -> dev , "Skipping ibm,pmemory" );
15301603 }
15311604
15321605 /* check if the available block * number of ptes will map everything */
1533- if (query . largest_available_block < ( 1ULL << ( len - page_shift )) ) {
1606+ if (ddw_sz < len ) {
15341607 dev_dbg (& dev -> dev ,
15351608 "can't map partition max 0x%llx with %llu %llu-sized pages\n" ,
15361609 1ULL << len ,
15371610 query .largest_available_block ,
15381611 1ULL << page_shift );
15391612
1540- len = order_base_2 (query .largest_available_block << page_shift );
1541-
1613+ len = ddw_sz ;
15421614 dynamic_mapping = true;
15431615 } else {
15441616 direct_mapping = !default_win_removed ||
@@ -1552,8 +1624,9 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
15521624 */
15531625 if (default_win_removed && pmem_present && !direct_mapping ) {
15541626 /* DDW is big enough to be split */
1555- if ((query .largest_available_block << page_shift ) >=
1556- MIN_DDW_VPMEM_DMA_WINDOW + (1ULL << max_ram_len )) {
1627+ if ((1ULL << ddw_sz ) >=
1628+ MIN_DDW_VPMEM_DMA_WINDOW + (1ULL << max_ram_len )) {
1629+
15571630 direct_mapping = true;
15581631
15591632 /* offset of the Dynamic part of DDW */
@@ -1564,8 +1637,7 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
15641637 dynamic_mapping = true;
15651638
15661639 /* create max size DDW possible */
1567- len = order_base_2 (query .largest_available_block
1568- << page_shift );
1640+ len = ddw_sz ;
15691641 }
15701642 }
15711643
@@ -1694,7 +1766,7 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
16941766 __remove_dma_window (pdn , ddw_avail , create .liobn );
16951767
16961768out_failed :
1697- if (default_win_removed )
1769+ if (default_win_removed || limited_addr_enabled )
16981770 reset_dma_window (dev , pdn );
16991771
17001772 fpdn = kzalloc (sizeof (* fpdn ), GFP_KERNEL );
@@ -1713,6 +1785,9 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
17131785 dev -> dev .bus_dma_limit = dev -> dev .archdata .dma_offset +
17141786 (1ULL << max_ram_len );
17151787
1788+ dev_info (& dev -> dev , "lsa_required: %x, lsa_enabled: %x, direct mapping: %x\n" ,
1789+ limited_addr_req , limited_addr_enabled , direct_mapping );
1790+
17161791 return direct_mapping ;
17171792}
17181793
@@ -1838,8 +1913,11 @@ static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask)
18381913{
18391914 struct device_node * dn = pci_device_to_OF_node (pdev ), * pdn ;
18401915
1841- /* only attempt to use a new window if 64-bit DMA is requested */
1842- if (dma_mask < DMA_BIT_MASK (64 ))
1916+ /* For DDW, DMA mask should be more than 32-bits. For mask more then
1917+ * 32-bits but less then 64-bits, DMA addressing is supported in
1918+ * Limited Addressing mode.
1919+ */
1920+ if (dma_mask <= DMA_BIT_MASK (32 ))
18431921 return false;
18441922
18451923 dev_dbg (& pdev -> dev , "node is %pOF\n" , dn );
@@ -1852,7 +1930,7 @@ static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask)
18521930 */
18531931 pdn = pci_dma_find (dn , NULL );
18541932 if (pdn && PCI_DN (pdn ))
1855- return enable_ddw (pdev , pdn );
1933+ return enable_ddw (pdev , pdn , dma_mask );
18561934
18571935 return false;
18581936}
0 commit comments