|
20 | 20 | #include <linux/mutex.h> |
21 | 21 | #include <linux/notifier.h> |
22 | 22 | #include <linux/pci.h> |
| 23 | +#include <linux/pfn_t.h> |
23 | 24 | #include <linux/pm_runtime.h> |
24 | 25 | #include <linux/slab.h> |
25 | 26 | #include <linux/types.h> |
@@ -1652,45 +1653,70 @@ static unsigned long vma_to_pfn(struct vm_area_struct *vma) |
1652 | 1653 | return (pci_resource_start(vdev->pdev, index) >> PAGE_SHIFT) + pgoff; |
1653 | 1654 | } |
1654 | 1655 |
|
1655 | | -static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf) |
| 1656 | +static vm_fault_t vfio_pci_mmap_huge_fault(struct vm_fault *vmf, |
| 1657 | + unsigned int order) |
1656 | 1658 | { |
1657 | 1659 | struct vm_area_struct *vma = vmf->vma; |
1658 | 1660 | struct vfio_pci_core_device *vdev = vma->vm_private_data; |
1659 | 1661 | unsigned long pfn, pgoff = vmf->pgoff - vma->vm_pgoff; |
1660 | | - unsigned long addr = vma->vm_start; |
1661 | 1662 | vm_fault_t ret = VM_FAULT_SIGBUS; |
1662 | 1663 |
|
| 1664 | + if (order && (vmf->address & ((PAGE_SIZE << order) - 1) || |
| 1665 | + vmf->address + (PAGE_SIZE << order) > vma->vm_end)) { |
| 1666 | + ret = VM_FAULT_FALLBACK; |
| 1667 | + goto out; |
| 1668 | + } |
| 1669 | + |
1663 | 1670 | pfn = vma_to_pfn(vma); |
1664 | 1671 |
|
1665 | 1672 | down_read(&vdev->memory_lock); |
1666 | 1673 |
|
1667 | 1674 | if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev)) |
1668 | 1675 | goto out_unlock; |
1669 | 1676 |
|
1670 | | - ret = vmf_insert_pfn(vma, vmf->address, pfn + pgoff); |
1671 | | - if (ret & VM_FAULT_ERROR) |
1672 | | - goto out_unlock; |
1673 | | - |
1674 | | - /* |
1675 | | - * Pre-fault the remainder of the vma, abort further insertions and |
1676 | | - * supress error if fault is encountered during pre-fault. |
1677 | | - */ |
1678 | | - for (; addr < vma->vm_end; addr += PAGE_SIZE, pfn++) { |
1679 | | - if (addr == vmf->address) |
1680 | | - continue; |
1681 | | - |
1682 | | - if (vmf_insert_pfn(vma, addr, pfn) & VM_FAULT_ERROR) |
1683 | | - break; |
| 1677 | + switch (order) { |
| 1678 | + case 0: |
| 1679 | + ret = vmf_insert_pfn(vma, vmf->address, pfn + pgoff); |
| 1680 | + break; |
| 1681 | +#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP |
| 1682 | + case PMD_ORDER: |
| 1683 | + ret = vmf_insert_pfn_pmd(vmf, __pfn_to_pfn_t(pfn + pgoff, |
| 1684 | + PFN_DEV), false); |
| 1685 | + break; |
| 1686 | +#endif |
| 1687 | +#ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP |
| 1688 | + case PUD_ORDER: |
| 1689 | + ret = vmf_insert_pfn_pud(vmf, __pfn_to_pfn_t(pfn + pgoff, |
| 1690 | + PFN_DEV), false); |
| 1691 | + break; |
| 1692 | +#endif |
| 1693 | + default: |
| 1694 | + ret = VM_FAULT_FALLBACK; |
1684 | 1695 | } |
1685 | 1696 |
|
1686 | 1697 | out_unlock: |
1687 | 1698 | up_read(&vdev->memory_lock); |
| 1699 | +out: |
| 1700 | + dev_dbg_ratelimited(&vdev->pdev->dev, |
| 1701 | + "%s(,order = %d) BAR %ld page offset 0x%lx: 0x%x\n", |
| 1702 | + __func__, order, |
| 1703 | + vma->vm_pgoff >> |
| 1704 | + (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT), |
| 1705 | + pgoff, (unsigned int)ret); |
1688 | 1706 |
|
1689 | 1707 | return ret; |
1690 | 1708 | } |
1691 | 1709 |
|
| 1710 | +static vm_fault_t vfio_pci_mmap_page_fault(struct vm_fault *vmf) |
| 1711 | +{ |
| 1712 | + return vfio_pci_mmap_huge_fault(vmf, 0); |
| 1713 | +} |
| 1714 | + |
1692 | 1715 | static const struct vm_operations_struct vfio_pci_mmap_ops = { |
1693 | | - .fault = vfio_pci_mmap_fault, |
| 1716 | + .fault = vfio_pci_mmap_page_fault, |
| 1717 | +#ifdef CONFIG_ARCH_SUPPORTS_HUGE_PFNMAP |
| 1718 | + .huge_fault = vfio_pci_mmap_huge_fault, |
| 1719 | +#endif |
1694 | 1720 | }; |
1695 | 1721 |
|
1696 | 1722 | int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma) |
|
0 commit comments