|
18 | 18 | #include <linux/memblock.h> |
19 | 19 | #include <linux/notifier.h> |
20 | 20 | #include <linux/page-isolation.h> |
| 21 | +#include <linux/vmalloc.h> |
21 | 22 |
|
22 | 23 | #include <asm/early_ioremap.h> |
23 | 24 |
|
@@ -274,6 +275,37 @@ struct folio *kho_restore_folio(phys_addr_t phys) |
274 | 275 | } |
275 | 276 | EXPORT_SYMBOL_GPL(kho_restore_folio); |
276 | 277 |
|
| 278 | +/** |
| 279 | + * kho_restore_pages - restore list of contiguous order 0 pages. |
| 280 | + * @phys: physical address of the first page. |
| 281 | + * @nr_pages: number of pages. |
| 282 | + * |
| 283 | + * Restore a contiguous list of order 0 pages that was preserved with |
| 284 | + * kho_preserve_pages(). |
| 285 | + * |
| 286 | + * Return: 0 on success, error code on failure |
| 287 | + */ |
| 288 | +struct page *kho_restore_pages(phys_addr_t phys, unsigned int nr_pages) |
| 289 | +{ |
| 290 | + const unsigned long start_pfn = PHYS_PFN(phys); |
| 291 | + const unsigned long end_pfn = start_pfn + nr_pages; |
| 292 | + unsigned long pfn = start_pfn; |
| 293 | + |
| 294 | + while (pfn < end_pfn) { |
| 295 | + const unsigned int order = |
| 296 | + min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn)); |
| 297 | + struct page *page = kho_restore_page(PFN_PHYS(pfn)); |
| 298 | + |
| 299 | + if (!page) |
| 300 | + return NULL; |
| 301 | + split_page(page, order); |
| 302 | + pfn += 1 << order; |
| 303 | + } |
| 304 | + |
| 305 | + return pfn_to_page(start_pfn); |
| 306 | +} |
| 307 | +EXPORT_SYMBOL_GPL(kho_restore_pages); |
| 308 | + |
277 | 309 | /* Serialize and deserialize struct kho_mem_phys across kexec |
278 | 310 | * |
279 | 311 | * Record all the bitmaps in a linked list of pages for the next kernel to |
@@ -763,6 +795,255 @@ int kho_preserve_pages(struct page *page, unsigned int nr_pages) |
763 | 795 | } |
764 | 796 | EXPORT_SYMBOL_GPL(kho_preserve_pages); |
765 | 797 |
|
| 798 | +struct kho_vmalloc_hdr { |
| 799 | + DECLARE_KHOSER_PTR(next, struct kho_vmalloc_chunk *); |
| 800 | +}; |
| 801 | + |
| 802 | +#define KHO_VMALLOC_SIZE \ |
| 803 | + ((PAGE_SIZE - sizeof(struct kho_vmalloc_hdr)) / \ |
| 804 | + sizeof(phys_addr_t)) |
| 805 | + |
| 806 | +struct kho_vmalloc_chunk { |
| 807 | + struct kho_vmalloc_hdr hdr; |
| 808 | + phys_addr_t phys[KHO_VMALLOC_SIZE]; |
| 809 | +}; |
| 810 | + |
| 811 | +static_assert(sizeof(struct kho_vmalloc_chunk) == PAGE_SIZE); |
| 812 | + |
| 813 | +/* vmalloc flags KHO supports */ |
| 814 | +#define KHO_VMALLOC_SUPPORTED_FLAGS (VM_ALLOC | VM_ALLOW_HUGE_VMAP) |
| 815 | + |
| 816 | +/* KHO internal flags for vmalloc preservations */ |
| 817 | +#define KHO_VMALLOC_ALLOC 0x0001 |
| 818 | +#define KHO_VMALLOC_HUGE_VMAP 0x0002 |
| 819 | + |
| 820 | +static unsigned short vmalloc_flags_to_kho(unsigned int vm_flags) |
| 821 | +{ |
| 822 | + unsigned short kho_flags = 0; |
| 823 | + |
| 824 | + if (vm_flags & VM_ALLOC) |
| 825 | + kho_flags |= KHO_VMALLOC_ALLOC; |
| 826 | + if (vm_flags & VM_ALLOW_HUGE_VMAP) |
| 827 | + kho_flags |= KHO_VMALLOC_HUGE_VMAP; |
| 828 | + |
| 829 | + return kho_flags; |
| 830 | +} |
| 831 | + |
| 832 | +static unsigned int kho_flags_to_vmalloc(unsigned short kho_flags) |
| 833 | +{ |
| 834 | + unsigned int vm_flags = 0; |
| 835 | + |
| 836 | + if (kho_flags & KHO_VMALLOC_ALLOC) |
| 837 | + vm_flags |= VM_ALLOC; |
| 838 | + if (kho_flags & KHO_VMALLOC_HUGE_VMAP) |
| 839 | + vm_flags |= VM_ALLOW_HUGE_VMAP; |
| 840 | + |
| 841 | + return vm_flags; |
| 842 | +} |
| 843 | + |
| 844 | +static struct kho_vmalloc_chunk *new_vmalloc_chunk(struct kho_vmalloc_chunk *cur) |
| 845 | +{ |
| 846 | + struct kho_vmalloc_chunk *chunk; |
| 847 | + int err; |
| 848 | + |
| 849 | + chunk = (struct kho_vmalloc_chunk *)get_zeroed_page(GFP_KERNEL); |
| 850 | + if (!chunk) |
| 851 | + return NULL; |
| 852 | + |
| 853 | + err = kho_preserve_pages(virt_to_page(chunk), 1); |
| 854 | + if (err) |
| 855 | + goto err_free; |
| 856 | + if (cur) |
| 857 | + KHOSER_STORE_PTR(cur->hdr.next, chunk); |
| 858 | + return chunk; |
| 859 | + |
| 860 | +err_free: |
| 861 | + free_page((unsigned long)chunk); |
| 862 | + return NULL; |
| 863 | +} |
| 864 | + |
| 865 | +static void kho_vmalloc_unpreserve_chunk(struct kho_vmalloc_chunk *chunk) |
| 866 | +{ |
| 867 | + struct kho_mem_track *track = &kho_out.ser.track; |
| 868 | + unsigned long pfn = PHYS_PFN(virt_to_phys(chunk)); |
| 869 | + |
| 870 | + __kho_unpreserve(track, pfn, pfn + 1); |
| 871 | + |
| 872 | + for (int i = 0; chunk->phys[i]; i++) { |
| 873 | + pfn = PHYS_PFN(chunk->phys[i]); |
| 874 | + __kho_unpreserve(track, pfn, pfn + 1); |
| 875 | + } |
| 876 | +} |
| 877 | + |
| 878 | +static void kho_vmalloc_free_chunks(struct kho_vmalloc *kho_vmalloc) |
| 879 | +{ |
| 880 | + struct kho_vmalloc_chunk *chunk = KHOSER_LOAD_PTR(kho_vmalloc->first); |
| 881 | + |
| 882 | + while (chunk) { |
| 883 | + struct kho_vmalloc_chunk *tmp = chunk; |
| 884 | + |
| 885 | + kho_vmalloc_unpreserve_chunk(chunk); |
| 886 | + |
| 887 | + chunk = KHOSER_LOAD_PTR(chunk->hdr.next); |
| 888 | + free_page((unsigned long)tmp); |
| 889 | + } |
| 890 | +} |
| 891 | + |
| 892 | +/** |
| 893 | + * kho_preserve_vmalloc - preserve memory allocated with vmalloc() across kexec |
| 894 | + * @ptr: pointer to the area in vmalloc address space |
| 895 | + * @preservation: placeholder for preservation metadata |
| 896 | + * |
| 897 | + * Instructs KHO to preserve the area in vmalloc address space at @ptr. The |
| 898 | + * physical pages mapped at @ptr will be preserved and on successful return |
| 899 | + * @preservation will hold the physical address of a structure that describes |
| 900 | + * the preservation. |
| 901 | + * |
| 902 | + * NOTE: The memory allocated with vmalloc_node() variants cannot be reliably |
| 903 | + * restored on the same node |
| 904 | + * |
| 905 | + * Return: 0 on success, error code on failure |
| 906 | + */ |
| 907 | +int kho_preserve_vmalloc(void *ptr, struct kho_vmalloc *preservation) |
| 908 | +{ |
| 909 | + struct kho_vmalloc_chunk *chunk; |
| 910 | + struct vm_struct *vm = find_vm_area(ptr); |
| 911 | + unsigned int order, flags, nr_contig_pages; |
| 912 | + unsigned int idx = 0; |
| 913 | + int err; |
| 914 | + |
| 915 | + if (!vm) |
| 916 | + return -EINVAL; |
| 917 | + |
| 918 | + if (vm->flags & ~KHO_VMALLOC_SUPPORTED_FLAGS) |
| 919 | + return -EOPNOTSUPP; |
| 920 | + |
| 921 | + flags = vmalloc_flags_to_kho(vm->flags); |
| 922 | + order = get_vm_area_page_order(vm); |
| 923 | + |
| 924 | + chunk = new_vmalloc_chunk(NULL); |
| 925 | + if (!chunk) |
| 926 | + return -ENOMEM; |
| 927 | + KHOSER_STORE_PTR(preservation->first, chunk); |
| 928 | + |
| 929 | + nr_contig_pages = (1 << order); |
| 930 | + for (int i = 0; i < vm->nr_pages; i += nr_contig_pages) { |
| 931 | + phys_addr_t phys = page_to_phys(vm->pages[i]); |
| 932 | + |
| 933 | + err = kho_preserve_pages(vm->pages[i], nr_contig_pages); |
| 934 | + if (err) |
| 935 | + goto err_free; |
| 936 | + |
| 937 | + chunk->phys[idx++] = phys; |
| 938 | + if (idx == ARRAY_SIZE(chunk->phys)) { |
| 939 | + chunk = new_vmalloc_chunk(chunk); |
| 940 | + if (!chunk) |
| 941 | + goto err_free; |
| 942 | + idx = 0; |
| 943 | + } |
| 944 | + } |
| 945 | + |
| 946 | + preservation->total_pages = vm->nr_pages; |
| 947 | + preservation->flags = flags; |
| 948 | + preservation->order = order; |
| 949 | + |
| 950 | + return 0; |
| 951 | + |
| 952 | +err_free: |
| 953 | + kho_vmalloc_free_chunks(preservation); |
| 954 | + return err; |
| 955 | +} |
| 956 | +EXPORT_SYMBOL_GPL(kho_preserve_vmalloc); |
| 957 | + |
| 958 | +/** |
| 959 | + * kho_restore_vmalloc - recreates and populates an area in vmalloc address |
| 960 | + * space from the preserved memory. |
| 961 | + * @preservation: preservation metadata. |
| 962 | + * |
| 963 | + * Recreates an area in vmalloc address space and populates it with memory that |
| 964 | + * was preserved using kho_preserve_vmalloc(). |
| 965 | + * |
| 966 | + * Return: pointer to the area in the vmalloc address space, NULL on failure. |
| 967 | + */ |
| 968 | +void *kho_restore_vmalloc(const struct kho_vmalloc *preservation) |
| 969 | +{ |
| 970 | + struct kho_vmalloc_chunk *chunk = KHOSER_LOAD_PTR(preservation->first); |
| 971 | + unsigned int align, order, shift, vm_flags; |
| 972 | + unsigned long total_pages, contig_pages; |
| 973 | + unsigned long addr, size; |
| 974 | + struct vm_struct *area; |
| 975 | + struct page **pages; |
| 976 | + unsigned int idx = 0; |
| 977 | + int err; |
| 978 | + |
| 979 | + vm_flags = kho_flags_to_vmalloc(preservation->flags); |
| 980 | + if (vm_flags & ~KHO_VMALLOC_SUPPORTED_FLAGS) |
| 981 | + return NULL; |
| 982 | + |
| 983 | + total_pages = preservation->total_pages; |
| 984 | + pages = kvmalloc_array(total_pages, sizeof(*pages), GFP_KERNEL); |
| 985 | + if (!pages) |
| 986 | + return NULL; |
| 987 | + order = preservation->order; |
| 988 | + contig_pages = (1 << order); |
| 989 | + shift = PAGE_SHIFT + order; |
| 990 | + align = 1 << shift; |
| 991 | + |
| 992 | + while (chunk) { |
| 993 | + struct page *page; |
| 994 | + |
| 995 | + for (int i = 0; chunk->phys[i]; i++) { |
| 996 | + phys_addr_t phys = chunk->phys[i]; |
| 997 | + |
| 998 | + if (idx + contig_pages > total_pages) |
| 999 | + goto err_free_pages_array; |
| 1000 | + |
| 1001 | + page = kho_restore_pages(phys, contig_pages); |
| 1002 | + if (!page) |
| 1003 | + goto err_free_pages_array; |
| 1004 | + |
| 1005 | + for (int j = 0; j < contig_pages; j++) |
| 1006 | + pages[idx++] = page; |
| 1007 | + |
| 1008 | + phys += contig_pages * PAGE_SIZE; |
| 1009 | + } |
| 1010 | + |
| 1011 | + page = kho_restore_pages(virt_to_phys(chunk), 1); |
| 1012 | + if (!page) |
| 1013 | + goto err_free_pages_array; |
| 1014 | + chunk = KHOSER_LOAD_PTR(chunk->hdr.next); |
| 1015 | + __free_page(page); |
| 1016 | + } |
| 1017 | + |
| 1018 | + if (idx != total_pages) |
| 1019 | + goto err_free_pages_array; |
| 1020 | + |
| 1021 | + area = __get_vm_area_node(total_pages * PAGE_SIZE, align, shift, |
| 1022 | + vm_flags, VMALLOC_START, VMALLOC_END, |
| 1023 | + NUMA_NO_NODE, GFP_KERNEL, |
| 1024 | + __builtin_return_address(0)); |
| 1025 | + if (!area) |
| 1026 | + goto err_free_pages_array; |
| 1027 | + |
| 1028 | + addr = (unsigned long)area->addr; |
| 1029 | + size = get_vm_area_size(area); |
| 1030 | + err = vmap_pages_range(addr, addr + size, PAGE_KERNEL, pages, shift); |
| 1031 | + if (err) |
| 1032 | + goto err_free_vm_area; |
| 1033 | + |
| 1034 | + area->nr_pages = total_pages; |
| 1035 | + area->pages = pages; |
| 1036 | + |
| 1037 | + return area->addr; |
| 1038 | + |
| 1039 | +err_free_vm_area: |
| 1040 | + free_vm_area(area); |
| 1041 | +err_free_pages_array: |
| 1042 | + kvfree(pages); |
| 1043 | + return NULL; |
| 1044 | +} |
| 1045 | +EXPORT_SYMBOL_GPL(kho_restore_vmalloc); |
| 1046 | + |
766 | 1047 | /* Handling for debug/kho/out */ |
767 | 1048 |
|
768 | 1049 | static struct dentry *debugfs_root; |
|
0 commit comments