|
20 | 20 | #include <linux/pgtable.h> |
21 | 21 | #include <asm/page-states.h> |
22 | 22 | #include <asm/pgalloc.h> |
| 23 | +#include <asm/gmap_helpers.h> |
23 | 24 | #include <asm/gmap.h> |
24 | 25 | #include <asm/page.h> |
25 | 26 |
|
@@ -617,63 +618,20 @@ EXPORT_SYMBOL(__gmap_link); |
617 | 618 | */ |
618 | 619 | void __gmap_zap(struct gmap *gmap, unsigned long gaddr) |
619 | 620 | { |
620 | | - struct vm_area_struct *vma; |
621 | 621 | unsigned long vmaddr; |
622 | | - spinlock_t *ptl; |
623 | | - pte_t *ptep; |
| 622 | + |
| 623 | + mmap_assert_locked(gmap->mm); |
624 | 624 |
|
625 | 625 | /* Find the vm address for the guest address */ |
626 | 626 | vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host, |
627 | 627 | gaddr >> PMD_SHIFT); |
628 | 628 | if (vmaddr) { |
629 | 629 | vmaddr |= gaddr & ~PMD_MASK; |
630 | | - |
631 | | - vma = vma_lookup(gmap->mm, vmaddr); |
632 | | - if (!vma || is_vm_hugetlb_page(vma)) |
633 | | - return; |
634 | | - |
635 | | - /* Get pointer to the page table entry */ |
636 | | - ptep = get_locked_pte(gmap->mm, vmaddr, &ptl); |
637 | | - if (likely(ptep)) { |
638 | | - ptep_zap_unused(gmap->mm, vmaddr, ptep, 0); |
639 | | - pte_unmap_unlock(ptep, ptl); |
640 | | - } |
| 630 | + gmap_helper_zap_one_page(gmap->mm, vmaddr); |
641 | 631 | } |
642 | 632 | } |
643 | 633 | EXPORT_SYMBOL_GPL(__gmap_zap); |
644 | 634 |
|
645 | | -void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to) |
646 | | -{ |
647 | | - unsigned long gaddr, vmaddr, size; |
648 | | - struct vm_area_struct *vma; |
649 | | - |
650 | | - mmap_read_lock(gmap->mm); |
651 | | - for (gaddr = from; gaddr < to; |
652 | | - gaddr = (gaddr + PMD_SIZE) & PMD_MASK) { |
653 | | - /* Find the vm address for the guest address */ |
654 | | - vmaddr = (unsigned long) |
655 | | - radix_tree_lookup(&gmap->guest_to_host, |
656 | | - gaddr >> PMD_SHIFT); |
657 | | - if (!vmaddr) |
658 | | - continue; |
659 | | - vmaddr |= gaddr & ~PMD_MASK; |
660 | | - /* Find vma in the parent mm */ |
661 | | - vma = find_vma(gmap->mm, vmaddr); |
662 | | - if (!vma) |
663 | | - continue; |
664 | | - /* |
665 | | - * We do not discard pages that are backed by |
666 | | - * hugetlbfs, so we don't have to refault them. |
667 | | - */ |
668 | | - if (is_vm_hugetlb_page(vma)) |
669 | | - continue; |
670 | | - size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK)); |
671 | | - zap_page_range_single(vma, vmaddr, size, NULL); |
672 | | - } |
673 | | - mmap_read_unlock(gmap->mm); |
674 | | -} |
675 | | -EXPORT_SYMBOL_GPL(gmap_discard); |
676 | | - |
677 | 635 | static LIST_HEAD(gmap_notifier_list); |
678 | 636 | static DEFINE_SPINLOCK(gmap_notifier_lock); |
679 | 637 |
|
@@ -2266,138 +2224,6 @@ int s390_enable_sie(void) |
2266 | 2224 | } |
2267 | 2225 | EXPORT_SYMBOL_GPL(s390_enable_sie); |
2268 | 2226 |
|
2269 | | -static int find_zeropage_pte_entry(pte_t *pte, unsigned long addr, |
2270 | | - unsigned long end, struct mm_walk *walk) |
2271 | | -{ |
2272 | | - unsigned long *found_addr = walk->private; |
2273 | | - |
2274 | | - /* Return 1 of the page is a zeropage. */ |
2275 | | - if (is_zero_pfn(pte_pfn(*pte))) { |
2276 | | - /* |
2277 | | - * Shared zeropage in e.g., a FS DAX mapping? We cannot do the |
2278 | | - * right thing and likely don't care: FAULT_FLAG_UNSHARE |
2279 | | - * currently only works in COW mappings, which is also where |
2280 | | - * mm_forbids_zeropage() is checked. |
2281 | | - */ |
2282 | | - if (!is_cow_mapping(walk->vma->vm_flags)) |
2283 | | - return -EFAULT; |
2284 | | - |
2285 | | - *found_addr = addr; |
2286 | | - return 1; |
2287 | | - } |
2288 | | - return 0; |
2289 | | -} |
2290 | | - |
2291 | | -static const struct mm_walk_ops find_zeropage_ops = { |
2292 | | - .pte_entry = find_zeropage_pte_entry, |
2293 | | - .walk_lock = PGWALK_WRLOCK, |
2294 | | -}; |
2295 | | - |
2296 | | -/* |
2297 | | - * Unshare all shared zeropages, replacing them by anonymous pages. Note that |
2298 | | - * we cannot simply zap all shared zeropages, because this could later |
2299 | | - * trigger unexpected userfaultfd missing events. |
2300 | | - * |
2301 | | - * This must be called after mm->context.allow_cow_sharing was |
2302 | | - * set to 0, to avoid future mappings of shared zeropages. |
2303 | | - * |
2304 | | - * mm contracts with s390, that even if mm were to remove a page table, |
2305 | | - * and racing with walk_page_range_vma() calling pte_offset_map_lock() |
2306 | | - * would fail, it will never insert a page table containing empty zero |
2307 | | - * pages once mm_forbids_zeropage(mm) i.e. |
2308 | | - * mm->context.allow_cow_sharing is set to 0. |
2309 | | - */ |
2310 | | -static int __s390_unshare_zeropages(struct mm_struct *mm) |
2311 | | -{ |
2312 | | - struct vm_area_struct *vma; |
2313 | | - VMA_ITERATOR(vmi, mm, 0); |
2314 | | - unsigned long addr; |
2315 | | - vm_fault_t fault; |
2316 | | - int rc; |
2317 | | - |
2318 | | - for_each_vma(vmi, vma) { |
2319 | | - /* |
2320 | | - * We could only look at COW mappings, but it's more future |
2321 | | - * proof to catch unexpected zeropages in other mappings and |
2322 | | - * fail. |
2323 | | - */ |
2324 | | - if ((vma->vm_flags & VM_PFNMAP) || is_vm_hugetlb_page(vma)) |
2325 | | - continue; |
2326 | | - addr = vma->vm_start; |
2327 | | - |
2328 | | -retry: |
2329 | | - rc = walk_page_range_vma(vma, addr, vma->vm_end, |
2330 | | - &find_zeropage_ops, &addr); |
2331 | | - if (rc < 0) |
2332 | | - return rc; |
2333 | | - else if (!rc) |
2334 | | - continue; |
2335 | | - |
2336 | | - /* addr was updated by find_zeropage_pte_entry() */ |
2337 | | - fault = handle_mm_fault(vma, addr, |
2338 | | - FAULT_FLAG_UNSHARE | FAULT_FLAG_REMOTE, |
2339 | | - NULL); |
2340 | | - if (fault & VM_FAULT_OOM) |
2341 | | - return -ENOMEM; |
2342 | | - /* |
2343 | | - * See break_ksm(): even after handle_mm_fault() returned 0, we |
2344 | | - * must start the lookup from the current address, because |
2345 | | - * handle_mm_fault() may back out if there's any difficulty. |
2346 | | - * |
2347 | | - * VM_FAULT_SIGBUS and VM_FAULT_SIGSEGV are unexpected but |
2348 | | - * maybe they could trigger in the future on concurrent |
2349 | | - * truncation. In that case, the shared zeropage would be gone |
2350 | | - * and we can simply retry and make progress. |
2351 | | - */ |
2352 | | - cond_resched(); |
2353 | | - goto retry; |
2354 | | - } |
2355 | | - |
2356 | | - return 0; |
2357 | | -} |
2358 | | - |
2359 | | -static int __s390_disable_cow_sharing(struct mm_struct *mm) |
2360 | | -{ |
2361 | | - int rc; |
2362 | | - |
2363 | | - if (!mm->context.allow_cow_sharing) |
2364 | | - return 0; |
2365 | | - |
2366 | | - mm->context.allow_cow_sharing = 0; |
2367 | | - |
2368 | | - /* Replace all shared zeropages by anonymous pages. */ |
2369 | | - rc = __s390_unshare_zeropages(mm); |
2370 | | - /* |
2371 | | - * Make sure to disable KSM (if enabled for the whole process or |
2372 | | - * individual VMAs). Note that nothing currently hinders user space |
2373 | | - * from re-enabling it. |
2374 | | - */ |
2375 | | - if (!rc) |
2376 | | - rc = ksm_disable(mm); |
2377 | | - if (rc) |
2378 | | - mm->context.allow_cow_sharing = 1; |
2379 | | - return rc; |
2380 | | -} |
2381 | | - |
2382 | | -/* |
2383 | | - * Disable most COW-sharing of memory pages for the whole process: |
2384 | | - * (1) Disable KSM and unmerge/unshare any KSM pages. |
2385 | | - * (2) Disallow shared zeropages and unshare any zerpages that are mapped. |
2386 | | - * |
2387 | | - * Not that we currently don't bother with COW-shared pages that are shared |
2388 | | - * with parent/child processes due to fork(). |
2389 | | - */ |
2390 | | -int s390_disable_cow_sharing(void) |
2391 | | -{ |
2392 | | - int rc; |
2393 | | - |
2394 | | - mmap_write_lock(current->mm); |
2395 | | - rc = __s390_disable_cow_sharing(current->mm); |
2396 | | - mmap_write_unlock(current->mm); |
2397 | | - return rc; |
2398 | | -} |
2399 | | -EXPORT_SYMBOL_GPL(s390_disable_cow_sharing); |
2400 | | - |
2401 | 2227 | /* |
2402 | 2228 | * Enable storage key handling from now on and initialize the storage |
2403 | 2229 | * keys with the default key. |
@@ -2465,7 +2291,7 @@ int s390_enable_skey(void) |
2465 | 2291 | goto out_up; |
2466 | 2292 |
|
2467 | 2293 | mm->context.uses_skeys = 1; |
2468 | | - rc = __s390_disable_cow_sharing(mm); |
| 2294 | + rc = gmap_helper_disable_cow_sharing(); |
2469 | 2295 | if (rc) { |
2470 | 2296 | mm->context.uses_skeys = 0; |
2471 | 2297 | goto out_up; |
|
0 commit comments