Lines Matching +full:smem +full:- +full:part

1 // SPDX-License-Identifier: GPL-2.0-only
68 for (; size; addr += pagesize, size -= pagesize) in range_is_swapped()
81 if (pipe(comm_pipes->child_ready) < 0) in setup_comm_pipes()
82 return -errno; in setup_comm_pipes()
83 if (pipe(comm_pipes->parent_ready) < 0) { in setup_comm_pipes()
84 close(comm_pipes->child_ready[0]); in setup_comm_pipes()
85 close(comm_pipes->child_ready[1]); in setup_comm_pipes()
86 return -errno; in setup_comm_pipes()
94 close(comm_pipes->child_ready[0]); in close_comm_pipes()
95 close(comm_pipes->child_ready[1]); in close_comm_pipes()
96 close(comm_pipes->parent_ready[0]); in close_comm_pipes()
97 close(comm_pipes->parent_ready[1]); in close_comm_pipes()
110 write(comm_pipes->child_ready[1], "0", 1); in child_memcmp_fn()
111 while (read(comm_pipes->parent_ready[0], &buf, 1) != 1) in child_memcmp_fn()
137 return -errno; in child_vmsplice_memcmp_fn()
139 /* Trigger a read-only pin. */ in child_vmsplice_memcmp_fn()
142 return -errno; in child_vmsplice_memcmp_fn()
144 return -EINVAL; in child_vmsplice_memcmp_fn()
148 return -errno; in child_vmsplice_memcmp_fn()
151 write(comm_pipes->child_ready[1], "0", 1); in child_vmsplice_memcmp_fn()
152 while (read(comm_pipes->parent_ready[0], &buf, 1) != 1) in child_vmsplice_memcmp_fn()
157 cur = read(fds[0], new + total, transferred - total); in child_vmsplice_memcmp_fn()
159 return -errno; in child_vmsplice_memcmp_fn()
194 * write-faults by directly mapping pages writable. in do_test_cow_in_parent()
214 ret = -EINVAL; in do_test_cow_in_parent()
317 cur = read(fds[0], new + total, transferred - total); in do_test_vmsplice_in_parent()
460 if (cqe->res != size) { in do_test_iouring()
469 cur = pread(fd, tmp + total, size - total, total); in do_test_iouring()
685 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); in do_run_with_base_page()
746 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); in do_run_with_thp()
752 /* We need a THP-aligned memory area. */ in do_run_with_thp()
753 mem = (char *)(((uintptr_t)mmap_mem + thpsize) & ~(thpsize - 1)); in do_run_with_thp()
762 * Try to populate a THP. Touch the first sub-page and test if we get in do_run_with_thp()
763 * another sub-page populated automatically. in do_run_with_thp()
780 * Trigger PTE-mapping the THP by temporarily mapping a single in do_run_with_thp()
797 * Discard all but a single subpage of that PTE-mapped THP. What in do_run_with_thp()
800 ret = madvise(mem + pagesize, thpsize - pagesize, MADV_DONTNEED); in do_run_with_thp()
814 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); in do_run_with_thp()
833 ret = madvise(mem + pagesize, thpsize - pagesize, MADV_DONTFORK); in do_run_with_thp()
847 ret = madvise(mem + pagesize, thpsize - pagesize, MADV_DOFORK); in do_run_with_thp()
886 ksft_print_msg("[RUN] %s ... with swapped-out THP\n", desc); in run_with_thp_swap()
892 ksft_print_msg("[RUN] %s ... with PTE-mapped THP\n", desc); in run_with_pte_mapped_thp()
898 ksft_print_msg("[RUN] %s ... with swapped-out, PTE-mapped THP\n", desc); in run_with_pte_mapped_thp_swap()
910 ksft_print_msg("[RUN] %s ... with single PTE of swapped-out THP\n", desc); in run_with_single_pte_of_thp_swap()
936 mem = mmap(NULL, hugetlbsize, PROT_READ | PROT_WRITE, flags, -1, 0); in run_with_hugetlb()
949 dummy = mmap(NULL, hugetlbsize, PROT_READ | PROT_WRITE, flags, -1, 0); in run_with_hugetlb()
991 * This is CVE-2020-29374 reported by Jann Horn.
1030 "R/O-mapping a page registered as iouring fixed buffer",
1045 * Take a R/O longterm pin on a R/O-mapped shared anonymous page.
1050 "R/O GUP pin on R/O-mapped shared page",
1053 /* Same as above, but using GUP-fast. */
1055 "R/O GUP-fast pin on R/O-mapped shared page",
1059 * Take a R/O longterm pin on a R/O-mapped exclusive anonymous page that
1064 "R/O GUP pin on R/O-mapped previously-shared page",
1067 /* Same as above, but using GUP-fast. */
1069 "R/O GUP-fast pin on R/O-mapped previously-shared page",
1073 * Take a R/O longterm pin on a R/O-mapped exclusive anonymous page.
1078 "R/O GUP pin on R/O-mapped exclusive page",
1081 /* Same as above, but using GUP-fast. */
1083 "R/O GUP-fast pin on R/O-mapped exclusive page",
1092 run_with_base_page(test_case->fn, test_case->desc); in run_anon_test_case()
1093 run_with_base_page_swap(test_case->fn, test_case->desc); in run_anon_test_case()
1095 run_with_thp(test_case->fn, test_case->desc); in run_anon_test_case()
1096 run_with_thp_swap(test_case->fn, test_case->desc); in run_anon_test_case()
1097 run_with_pte_mapped_thp(test_case->fn, test_case->desc); in run_anon_test_case()
1098 run_with_pte_mapped_thp_swap(test_case->fn, test_case->desc); in run_anon_test_case()
1099 run_with_single_pte_of_thp(test_case->fn, test_case->desc); in run_anon_test_case()
1100 run_with_single_pte_of_thp_swap(test_case->fn, test_case->desc); in run_anon_test_case()
1101 run_with_partial_mremap_thp(test_case->fn, test_case->desc); in run_anon_test_case()
1102 run_with_partial_shared_thp(test_case->fn, test_case->desc); in run_anon_test_case()
1105 run_with_hugetlb(test_case->fn, test_case->desc, in run_anon_test_case()
1149 * Trigger PTE-mapping the THP by temporarily mapping a single subpage in do_test_anon_thp_collapse()
1165 /* Collapse before actually COW-sharing the page. */ in do_test_anon_thp_collapse()
1174 /* COW-share the full PTE-mapped THP. */ in do_test_anon_thp_collapse()
1177 /* Don't COW-share the upper part of the THP. */ in do_test_anon_thp_collapse()
1185 /* Don't COW-share the lower part of the THP. */ in do_test_anon_thp_collapse()
1239 /* Collapse before anyone modified the COW-shared page. */ in do_test_anon_thp_collapse()
1261 ret = -EINVAL; in do_test_anon_thp_collapse()
1297 * Re-mapping a PTE-mapped anon THP using a single PMD ("in-place
1305 /* Basic COW test, but collapse after COW-sharing a full THP. */
1311 * Basic COW test, but collapse after COW-sharing the lower half of a
1319 * Basic COW test, but collapse after COW-sharing the upper half of a
1340 ksft_print_msg("[RUN] %s\n", test_case->desc); in run_anon_thp_test_cases()
1341 do_run_with_thp(test_case->fn, THP_RUN_PMD); in run_anon_thp_test_cases()
1350 typedef void (*non_anon_test_fn)(char *mem, const char *smem, size_t size);
1352 static void test_cow(char *mem, const char *smem, size_t size) in test_cow() argument
1357 memcpy(old, smem, size); in test_cow()
1363 ksft_test_result(!memcmp(smem, old, size), in test_cow()
1368 static void test_ro_pin(char *mem, const char *smem, size_t size) in test_ro_pin() argument
1373 static void test_ro_fast_pin(char *mem, const char *smem, size_t size) in test_ro_fast_pin() argument
1380 char *mem, *smem, tmp; in run_with_zeropage() local
1385 MAP_PRIVATE | MAP_ANON, -1, 0); in run_with_zeropage()
1391 smem = mmap(NULL, pagesize, PROT_READ, MAP_PRIVATE | MAP_ANON, -1, 0); in run_with_zeropage()
1398 tmp = *mem + *smem; in run_with_zeropage()
1401 fn(mem, smem, pagesize); in run_with_zeropage()
1404 if (smem != MAP_FAILED) in run_with_zeropage()
1405 munmap(smem, pagesize); in run_with_zeropage()
1410 char *mem, *smem, *mmap_mem, *mmap_smem, tmp; in run_with_huge_zeropage() local
1424 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); in run_with_huge_zeropage()
1430 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); in run_with_huge_zeropage()
1436 /* We need a THP-aligned memory area. */ in run_with_huge_zeropage()
1437 mem = (char *)(((uintptr_t)mmap_mem + thpsize) & ~(thpsize - 1)); in run_with_huge_zeropage()
1438 smem = (char *)(((uintptr_t)mmap_smem + thpsize) & ~(thpsize - 1)); in run_with_huge_zeropage()
1441 ret |= madvise(smem, thpsize, MADV_HUGEPAGE); in run_with_huge_zeropage()
1449 * the first sub-page and test if we get another sub-page populated in run_with_huge_zeropage()
1452 tmp = *mem + *smem; in run_with_huge_zeropage()
1455 !pagemap_is_populated(pagemap_fd, smem + pagesize)) { in run_with_huge_zeropage()
1460 fn(mem, smem, thpsize); in run_with_huge_zeropage()
1469 char *mem, *smem, tmp; in run_with_memfd() local
1492 smem = mmap(NULL, pagesize, PROT_READ, MAP_SHARED, fd, 0); in run_with_memfd()
1499 tmp = *mem + *smem; in run_with_memfd()
1502 fn(mem, smem, pagesize); in run_with_memfd()
1505 if (smem != MAP_FAILED) in run_with_memfd()
1506 munmap(smem, pagesize); in run_with_memfd()
1513 char *mem, *smem, tmp; in run_with_tmpfile() local
1543 smem = mmap(NULL, pagesize, PROT_READ, MAP_SHARED, fd, 0); in run_with_tmpfile()
1550 tmp = *mem + *smem; in run_with_tmpfile()
1553 fn(mem, smem, pagesize); in run_with_tmpfile()
1556 if (smem != MAP_FAILED) in run_with_tmpfile()
1557 munmap(smem, pagesize); in run_with_tmpfile()
1566 char *mem, *smem, tmp; in run_with_memfd_hugetlb() local
1593 smem = mmap(NULL, hugetlbsize, PROT_READ, MAP_SHARED, fd, 0); in run_with_memfd_hugetlb()
1600 tmp = *mem + *smem; in run_with_memfd_hugetlb()
1603 fn(mem, smem, hugetlbsize); in run_with_memfd_hugetlb()
1607 munmap(smem, hugetlbsize); in run_with_memfd_hugetlb()
1639 /* Same as above, but using GUP-fast. */
1641 "R/O longterm GUP-fast pin",
1650 run_with_zeropage(test_case->fn, test_case->desc); in run_non_anon_test_case()
1651 run_with_memfd(test_case->fn, test_case->desc); in run_non_anon_test_case()
1652 run_with_tmpfile(test_case->fn, test_case->desc); in run_non_anon_test_case()
1654 run_with_huge_zeropage(test_case->fn, test_case->desc); in run_non_anon_test_case()
1656 run_with_memfd_hugetlb(test_case->fn, test_case->desc, in run_non_anon_test_case()
1664 ksft_print_msg("[RUN] Non-anonymous memory tests in private mappings\n"); in run_non_anon_test_cases()