xref: /openbmc/linux/lib/kunit_iov_iter.c (revision a3c57ab7)
12d71340fSDavid Howells // SPDX-License-Identifier: GPL-2.0-only
22d71340fSDavid Howells /* I/O iterator tests.  This can only test kernel-backed iterator types.
32d71340fSDavid Howells  *
42d71340fSDavid Howells  * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
52d71340fSDavid Howells  * Written by David Howells (dhowells@redhat.com)
62d71340fSDavid Howells  */
72d71340fSDavid Howells 
82d71340fSDavid Howells #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
92d71340fSDavid Howells 
102d71340fSDavid Howells #include <linux/module.h>
112d71340fSDavid Howells #include <linux/vmalloc.h>
122d71340fSDavid Howells #include <linux/mm.h>
132d71340fSDavid Howells #include <linux/uio.h>
142d71340fSDavid Howells #include <linux/bvec.h>
152d71340fSDavid Howells #include <kunit/test.h>
162d71340fSDavid Howells 
172d71340fSDavid Howells MODULE_DESCRIPTION("iov_iter testing");
182d71340fSDavid Howells MODULE_AUTHOR("David Howells <dhowells@redhat.com>");
192d71340fSDavid Howells MODULE_LICENSE("GPL");
202d71340fSDavid Howells 
212d71340fSDavid Howells struct kvec_test_range {
222d71340fSDavid Howells 	int	from, to;
232d71340fSDavid Howells };
242d71340fSDavid Howells 
252d71340fSDavid Howells static const struct kvec_test_range kvec_test_ranges[] = {
262d71340fSDavid Howells 	{ 0x00002, 0x00002 },
272d71340fSDavid Howells 	{ 0x00027, 0x03000 },
282d71340fSDavid Howells 	{ 0x05193, 0x18794 },
292d71340fSDavid Howells 	{ 0x20000, 0x20000 },
302d71340fSDavid Howells 	{ 0x20000, 0x24000 },
312d71340fSDavid Howells 	{ 0x24000, 0x27001 },
322d71340fSDavid Howells 	{ 0x29000, 0xffffb },
332d71340fSDavid Howells 	{ 0xffffd, 0xffffe },
342d71340fSDavid Howells 	{ -1 }
352d71340fSDavid Howells };
362d71340fSDavid Howells 
pattern(unsigned long x)372d71340fSDavid Howells static inline u8 pattern(unsigned long x)
382d71340fSDavid Howells {
392d71340fSDavid Howells 	return x & 0xff;
402d71340fSDavid Howells }
412d71340fSDavid Howells 
iov_kunit_unmap(void * data)422d71340fSDavid Howells static void iov_kunit_unmap(void *data)
432d71340fSDavid Howells {
442d71340fSDavid Howells 	vunmap(data);
452d71340fSDavid Howells }
462d71340fSDavid Howells 
iov_kunit_create_buffer(struct kunit * test,struct page *** ppages,size_t npages)472d71340fSDavid Howells static void *__init iov_kunit_create_buffer(struct kunit *test,
482d71340fSDavid Howells 					    struct page ***ppages,
492d71340fSDavid Howells 					    size_t npages)
502d71340fSDavid Howells {
512d71340fSDavid Howells 	struct page **pages;
522d71340fSDavid Howells 	unsigned long got;
532d71340fSDavid Howells 	void *buffer;
542d71340fSDavid Howells 
552d71340fSDavid Howells 	pages = kunit_kcalloc(test, npages, sizeof(struct page *), GFP_KERNEL);
562d71340fSDavid Howells         KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pages);
572d71340fSDavid Howells 	*ppages = pages;
582d71340fSDavid Howells 
592d71340fSDavid Howells 	got = alloc_pages_bulk_array(GFP_KERNEL, npages, pages);
602d71340fSDavid Howells 	if (got != npages) {
612d71340fSDavid Howells 		release_pages(pages, got);
622d71340fSDavid Howells 		KUNIT_ASSERT_EQ(test, got, npages);
632d71340fSDavid Howells 	}
642d71340fSDavid Howells 
652d71340fSDavid Howells 	buffer = vmap(pages, npages, VM_MAP | VM_MAP_PUT_PAGES, PAGE_KERNEL);
662d71340fSDavid Howells         KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buffer);
672d71340fSDavid Howells 
682d71340fSDavid Howells 	kunit_add_action_or_reset(test, iov_kunit_unmap, buffer);
692d71340fSDavid Howells 	return buffer;
702d71340fSDavid Howells }
712d71340fSDavid Howells 
iov_kunit_load_kvec(struct kunit * test,struct iov_iter * iter,int dir,struct kvec * kvec,unsigned int kvmax,void * buffer,size_t bufsize,const struct kvec_test_range * pr)722d71340fSDavid Howells static void __init iov_kunit_load_kvec(struct kunit *test,
732d71340fSDavid Howells 				       struct iov_iter *iter, int dir,
742d71340fSDavid Howells 				       struct kvec *kvec, unsigned int kvmax,
752d71340fSDavid Howells 				       void *buffer, size_t bufsize,
762d71340fSDavid Howells 				       const struct kvec_test_range *pr)
772d71340fSDavid Howells {
782d71340fSDavid Howells 	size_t size = 0;
792d71340fSDavid Howells 	int i;
802d71340fSDavid Howells 
812d71340fSDavid Howells 	for (i = 0; i < kvmax; i++, pr++) {
822d71340fSDavid Howells 		if (pr->from < 0)
832d71340fSDavid Howells 			break;
842d71340fSDavid Howells 		KUNIT_ASSERT_GE(test, pr->to, pr->from);
852d71340fSDavid Howells 		KUNIT_ASSERT_LE(test, pr->to, bufsize);
862d71340fSDavid Howells 		kvec[i].iov_base = buffer + pr->from;
872d71340fSDavid Howells 		kvec[i].iov_len = pr->to - pr->from;
882d71340fSDavid Howells 		size += pr->to - pr->from;
892d71340fSDavid Howells 	}
902d71340fSDavid Howells 	KUNIT_ASSERT_LE(test, size, bufsize);
912d71340fSDavid Howells 
922d71340fSDavid Howells 	iov_iter_kvec(iter, dir, kvec, i, size);
932d71340fSDavid Howells }
942d71340fSDavid Howells 
952d71340fSDavid Howells /*
962d71340fSDavid Howells  * Test copying to a ITER_KVEC-type iterator.
972d71340fSDavid Howells  */
iov_kunit_copy_to_kvec(struct kunit * test)982d71340fSDavid Howells static void __init iov_kunit_copy_to_kvec(struct kunit *test)
992d71340fSDavid Howells {
1002d71340fSDavid Howells 	const struct kvec_test_range *pr;
1012d71340fSDavid Howells 	struct iov_iter iter;
1022d71340fSDavid Howells 	struct page **spages, **bpages;
1032d71340fSDavid Howells 	struct kvec kvec[8];
1042d71340fSDavid Howells 	u8 *scratch, *buffer;
1052d71340fSDavid Howells 	size_t bufsize, npages, size, copied;
1062d71340fSDavid Howells 	int i, patt;
1072d71340fSDavid Howells 
1082d71340fSDavid Howells 	bufsize = 0x100000;
1092d71340fSDavid Howells 	npages = bufsize / PAGE_SIZE;
1102d71340fSDavid Howells 
1112d71340fSDavid Howells 	scratch = iov_kunit_create_buffer(test, &spages, npages);
1122d71340fSDavid Howells 	for (i = 0; i < bufsize; i++)
1132d71340fSDavid Howells 		scratch[i] = pattern(i);
1142d71340fSDavid Howells 
1152d71340fSDavid Howells 	buffer = iov_kunit_create_buffer(test, &bpages, npages);
1162d71340fSDavid Howells 	memset(buffer, 0, bufsize);
1172d71340fSDavid Howells 
1182d71340fSDavid Howells 	iov_kunit_load_kvec(test, &iter, READ, kvec, ARRAY_SIZE(kvec),
1192d71340fSDavid Howells 			    buffer, bufsize, kvec_test_ranges);
1202d71340fSDavid Howells 	size = iter.count;
1212d71340fSDavid Howells 
1222d71340fSDavid Howells 	copied = copy_to_iter(scratch, size, &iter);
1232d71340fSDavid Howells 
1242d71340fSDavid Howells 	KUNIT_EXPECT_EQ(test, copied, size);
1252d71340fSDavid Howells 	KUNIT_EXPECT_EQ(test, iter.count, 0);
1262d71340fSDavid Howells 	KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
1272d71340fSDavid Howells 
1282d71340fSDavid Howells 	/* Build the expected image in the scratch buffer. */
1292d71340fSDavid Howells 	patt = 0;
1302d71340fSDavid Howells 	memset(scratch, 0, bufsize);
1312d71340fSDavid Howells 	for (pr = kvec_test_ranges; pr->from >= 0; pr++)
1322d71340fSDavid Howells 		for (i = pr->from; i < pr->to; i++)
1332d71340fSDavid Howells 			scratch[i] = pattern(patt++);
1342d71340fSDavid Howells 
1352d71340fSDavid Howells 	/* Compare the images */
1362d71340fSDavid Howells 	for (i = 0; i < bufsize; i++) {
1372d71340fSDavid Howells 		KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
1382d71340fSDavid Howells 		if (buffer[i] != scratch[i])
1392d71340fSDavid Howells 			return;
1402d71340fSDavid Howells 	}
1412d71340fSDavid Howells 
1422d71340fSDavid Howells 	KUNIT_SUCCEED();
1432d71340fSDavid Howells }
1442d71340fSDavid Howells 
1452d71340fSDavid Howells /*
1462d71340fSDavid Howells  * Test copying from a ITER_KVEC-type iterator.
1472d71340fSDavid Howells  */
iov_kunit_copy_from_kvec(struct kunit * test)1482d71340fSDavid Howells static void __init iov_kunit_copy_from_kvec(struct kunit *test)
1492d71340fSDavid Howells {
1502d71340fSDavid Howells 	const struct kvec_test_range *pr;
1512d71340fSDavid Howells 	struct iov_iter iter;
1522d71340fSDavid Howells 	struct page **spages, **bpages;
1532d71340fSDavid Howells 	struct kvec kvec[8];
1542d71340fSDavid Howells 	u8 *scratch, *buffer;
1552d71340fSDavid Howells 	size_t bufsize, npages, size, copied;
1562d71340fSDavid Howells 	int i, j;
1572d71340fSDavid Howells 
1582d71340fSDavid Howells 	bufsize = 0x100000;
1592d71340fSDavid Howells 	npages = bufsize / PAGE_SIZE;
1602d71340fSDavid Howells 
1612d71340fSDavid Howells 	buffer = iov_kunit_create_buffer(test, &bpages, npages);
1622d71340fSDavid Howells 	for (i = 0; i < bufsize; i++)
1632d71340fSDavid Howells 		buffer[i] = pattern(i);
1642d71340fSDavid Howells 
1652d71340fSDavid Howells 	scratch = iov_kunit_create_buffer(test, &spages, npages);
1662d71340fSDavid Howells 	memset(scratch, 0, bufsize);
1672d71340fSDavid Howells 
1682d71340fSDavid Howells 	iov_kunit_load_kvec(test, &iter, WRITE, kvec, ARRAY_SIZE(kvec),
1692d71340fSDavid Howells 			    buffer, bufsize, kvec_test_ranges);
1702d71340fSDavid Howells 	size = min(iter.count, bufsize);
1712d71340fSDavid Howells 
1722d71340fSDavid Howells 	copied = copy_from_iter(scratch, size, &iter);
1732d71340fSDavid Howells 
1742d71340fSDavid Howells 	KUNIT_EXPECT_EQ(test, copied, size);
1752d71340fSDavid Howells 	KUNIT_EXPECT_EQ(test, iter.count, 0);
1762d71340fSDavid Howells 	KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
1772d71340fSDavid Howells 
1782d71340fSDavid Howells 	/* Build the expected image in the main buffer. */
1792d71340fSDavid Howells 	i = 0;
1802d71340fSDavid Howells 	memset(buffer, 0, bufsize);
1812d71340fSDavid Howells 	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
1822d71340fSDavid Howells 		for (j = pr->from; j < pr->to; j++) {
1832d71340fSDavid Howells 			buffer[i++] = pattern(j);
1842d71340fSDavid Howells 			if (i >= bufsize)
1852d71340fSDavid Howells 				goto stop;
1862d71340fSDavid Howells 		}
1872d71340fSDavid Howells 	}
1882d71340fSDavid Howells stop:
1892d71340fSDavid Howells 
1902d71340fSDavid Howells 	/* Compare the images */
1912d71340fSDavid Howells 	for (i = 0; i < bufsize; i++) {
1922d71340fSDavid Howells 		KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
1932d71340fSDavid Howells 		if (scratch[i] != buffer[i])
1942d71340fSDavid Howells 			return;
1952d71340fSDavid Howells 	}
1962d71340fSDavid Howells 
1972d71340fSDavid Howells 	KUNIT_SUCCEED();
1982d71340fSDavid Howells }
1992d71340fSDavid Howells 
2002d71340fSDavid Howells struct bvec_test_range {
2012d71340fSDavid Howells 	int	page, from, to;
2022d71340fSDavid Howells };
2032d71340fSDavid Howells 
2042d71340fSDavid Howells static const struct bvec_test_range bvec_test_ranges[] = {
2052d71340fSDavid Howells 	{ 0, 0x0002, 0x0002 },
2062d71340fSDavid Howells 	{ 1, 0x0027, 0x0893 },
2072d71340fSDavid Howells 	{ 2, 0x0193, 0x0794 },
2082d71340fSDavid Howells 	{ 3, 0x0000, 0x1000 },
2092d71340fSDavid Howells 	{ 4, 0x0000, 0x1000 },
2102d71340fSDavid Howells 	{ 5, 0x0000, 0x1000 },
2112d71340fSDavid Howells 	{ 6, 0x0000, 0x0ffb },
2122d71340fSDavid Howells 	{ 6, 0x0ffd, 0x0ffe },
2132d71340fSDavid Howells 	{ -1, -1, -1 }
2142d71340fSDavid Howells };
2152d71340fSDavid Howells 
iov_kunit_load_bvec(struct kunit * test,struct iov_iter * iter,int dir,struct bio_vec * bvec,unsigned int bvmax,struct page ** pages,size_t npages,size_t bufsize,const struct bvec_test_range * pr)2162d71340fSDavid Howells static void __init iov_kunit_load_bvec(struct kunit *test,
2172d71340fSDavid Howells 				       struct iov_iter *iter, int dir,
2182d71340fSDavid Howells 				       struct bio_vec *bvec, unsigned int bvmax,
2192d71340fSDavid Howells 				       struct page **pages, size_t npages,
2202d71340fSDavid Howells 				       size_t bufsize,
2212d71340fSDavid Howells 				       const struct bvec_test_range *pr)
2222d71340fSDavid Howells {
2232d71340fSDavid Howells 	struct page *can_merge = NULL, *page;
2242d71340fSDavid Howells 	size_t size = 0;
2252d71340fSDavid Howells 	int i;
2262d71340fSDavid Howells 
2272d71340fSDavid Howells 	for (i = 0; i < bvmax; i++, pr++) {
2282d71340fSDavid Howells 		if (pr->from < 0)
2292d71340fSDavid Howells 			break;
2302d71340fSDavid Howells 		KUNIT_ASSERT_LT(test, pr->page, npages);
2312d71340fSDavid Howells 		KUNIT_ASSERT_LT(test, pr->page * PAGE_SIZE, bufsize);
2322d71340fSDavid Howells 		KUNIT_ASSERT_GE(test, pr->from, 0);
2332d71340fSDavid Howells 		KUNIT_ASSERT_GE(test, pr->to, pr->from);
2342d71340fSDavid Howells 		KUNIT_ASSERT_LE(test, pr->to, PAGE_SIZE);
2352d71340fSDavid Howells 
2362d71340fSDavid Howells 		page = pages[pr->page];
2372d71340fSDavid Howells 		if (pr->from == 0 && pr->from != pr->to && page == can_merge) {
2382d71340fSDavid Howells 			i--;
2392d71340fSDavid Howells 			bvec[i].bv_len += pr->to;
2402d71340fSDavid Howells 		} else {
2412d71340fSDavid Howells 			bvec_set_page(&bvec[i], page, pr->to - pr->from, pr->from);
2422d71340fSDavid Howells 		}
2432d71340fSDavid Howells 
2442d71340fSDavid Howells 		size += pr->to - pr->from;
2452d71340fSDavid Howells 		if ((pr->to & ~PAGE_MASK) == 0)
2462d71340fSDavid Howells 			can_merge = page + pr->to / PAGE_SIZE;
2472d71340fSDavid Howells 		else
2482d71340fSDavid Howells 			can_merge = NULL;
2492d71340fSDavid Howells 	}
2502d71340fSDavid Howells 
2512d71340fSDavid Howells 	iov_iter_bvec(iter, dir, bvec, i, size);
2522d71340fSDavid Howells }
2532d71340fSDavid Howells 
2542d71340fSDavid Howells /*
2552d71340fSDavid Howells  * Test copying to a ITER_BVEC-type iterator.
2562d71340fSDavid Howells  */
iov_kunit_copy_to_bvec(struct kunit * test)2572d71340fSDavid Howells static void __init iov_kunit_copy_to_bvec(struct kunit *test)
2582d71340fSDavid Howells {
2592d71340fSDavid Howells 	const struct bvec_test_range *pr;
2602d71340fSDavid Howells 	struct iov_iter iter;
2612d71340fSDavid Howells 	struct bio_vec bvec[8];
2622d71340fSDavid Howells 	struct page **spages, **bpages;
2632d71340fSDavid Howells 	u8 *scratch, *buffer;
2642d71340fSDavid Howells 	size_t bufsize, npages, size, copied;
2652d71340fSDavid Howells 	int i, b, patt;
2662d71340fSDavid Howells 
2672d71340fSDavid Howells 	bufsize = 0x100000;
2682d71340fSDavid Howells 	npages = bufsize / PAGE_SIZE;
2692d71340fSDavid Howells 
2702d71340fSDavid Howells 	scratch = iov_kunit_create_buffer(test, &spages, npages);
2712d71340fSDavid Howells 	for (i = 0; i < bufsize; i++)
2722d71340fSDavid Howells 		scratch[i] = pattern(i);
2732d71340fSDavid Howells 
2742d71340fSDavid Howells 	buffer = iov_kunit_create_buffer(test, &bpages, npages);
2752d71340fSDavid Howells 	memset(buffer, 0, bufsize);
2762d71340fSDavid Howells 
2772d71340fSDavid Howells 	iov_kunit_load_bvec(test, &iter, READ, bvec, ARRAY_SIZE(bvec),
2782d71340fSDavid Howells 			    bpages, npages, bufsize, bvec_test_ranges);
2792d71340fSDavid Howells 	size = iter.count;
2802d71340fSDavid Howells 
2812d71340fSDavid Howells 	copied = copy_to_iter(scratch, size, &iter);
2822d71340fSDavid Howells 
2832d71340fSDavid Howells 	KUNIT_EXPECT_EQ(test, copied, size);
2842d71340fSDavid Howells 	KUNIT_EXPECT_EQ(test, iter.count, 0);
2852d71340fSDavid Howells 	KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
2862d71340fSDavid Howells 
2872d71340fSDavid Howells 	/* Build the expected image in the scratch buffer. */
2882d71340fSDavid Howells 	b = 0;
2892d71340fSDavid Howells 	patt = 0;
2902d71340fSDavid Howells 	memset(scratch, 0, bufsize);
2912d71340fSDavid Howells 	for (pr = bvec_test_ranges; pr->from >= 0; pr++, b++) {
2922d71340fSDavid Howells 		u8 *p = scratch + pr->page * PAGE_SIZE;
2932d71340fSDavid Howells 
2942d71340fSDavid Howells 		for (i = pr->from; i < pr->to; i++)
2952d71340fSDavid Howells 			p[i] = pattern(patt++);
2962d71340fSDavid Howells 	}
2972d71340fSDavid Howells 
2982d71340fSDavid Howells 	/* Compare the images */
2992d71340fSDavid Howells 	for (i = 0; i < bufsize; i++) {
3002d71340fSDavid Howells 		KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
3012d71340fSDavid Howells 		if (buffer[i] != scratch[i])
3022d71340fSDavid Howells 			return;
3032d71340fSDavid Howells 	}
3042d71340fSDavid Howells 
3052d71340fSDavid Howells 	KUNIT_SUCCEED();
3062d71340fSDavid Howells }
3072d71340fSDavid Howells 
3082d71340fSDavid Howells /*
3092d71340fSDavid Howells  * Test copying from a ITER_BVEC-type iterator.
3102d71340fSDavid Howells  */
iov_kunit_copy_from_bvec(struct kunit * test)3112d71340fSDavid Howells static void __init iov_kunit_copy_from_bvec(struct kunit *test)
3122d71340fSDavid Howells {
3132d71340fSDavid Howells 	const struct bvec_test_range *pr;
3142d71340fSDavid Howells 	struct iov_iter iter;
3152d71340fSDavid Howells 	struct bio_vec bvec[8];
3162d71340fSDavid Howells 	struct page **spages, **bpages;
3172d71340fSDavid Howells 	u8 *scratch, *buffer;
3182d71340fSDavid Howells 	size_t bufsize, npages, size, copied;
3192d71340fSDavid Howells 	int i, j;
3202d71340fSDavid Howells 
3212d71340fSDavid Howells 	bufsize = 0x100000;
3222d71340fSDavid Howells 	npages = bufsize / PAGE_SIZE;
3232d71340fSDavid Howells 
3242d71340fSDavid Howells 	buffer = iov_kunit_create_buffer(test, &bpages, npages);
3252d71340fSDavid Howells 	for (i = 0; i < bufsize; i++)
3262d71340fSDavid Howells 		buffer[i] = pattern(i);
3272d71340fSDavid Howells 
3282d71340fSDavid Howells 	scratch = iov_kunit_create_buffer(test, &spages, npages);
3292d71340fSDavid Howells 	memset(scratch, 0, bufsize);
3302d71340fSDavid Howells 
3312d71340fSDavid Howells 	iov_kunit_load_bvec(test, &iter, WRITE, bvec, ARRAY_SIZE(bvec),
3322d71340fSDavid Howells 			    bpages, npages, bufsize, bvec_test_ranges);
3332d71340fSDavid Howells 	size = iter.count;
3342d71340fSDavid Howells 
3352d71340fSDavid Howells 	copied = copy_from_iter(scratch, size, &iter);
3362d71340fSDavid Howells 
3372d71340fSDavid Howells 	KUNIT_EXPECT_EQ(test, copied, size);
3382d71340fSDavid Howells 	KUNIT_EXPECT_EQ(test, iter.count, 0);
3392d71340fSDavid Howells 	KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
3402d71340fSDavid Howells 
3412d71340fSDavid Howells 	/* Build the expected image in the main buffer. */
3422d71340fSDavid Howells 	i = 0;
3432d71340fSDavid Howells 	memset(buffer, 0, bufsize);
3442d71340fSDavid Howells 	for (pr = bvec_test_ranges; pr->from >= 0; pr++) {
3452d71340fSDavid Howells 		size_t patt = pr->page * PAGE_SIZE;
3462d71340fSDavid Howells 
3472d71340fSDavid Howells 		for (j = pr->from; j < pr->to; j++) {
3482d71340fSDavid Howells 			buffer[i++] = pattern(patt + j);
3492d71340fSDavid Howells 			if (i >= bufsize)
3502d71340fSDavid Howells 				goto stop;
3512d71340fSDavid Howells 		}
3522d71340fSDavid Howells 	}
3532d71340fSDavid Howells stop:
3542d71340fSDavid Howells 
3552d71340fSDavid Howells 	/* Compare the images */
3562d71340fSDavid Howells 	for (i = 0; i < bufsize; i++) {
3572d71340fSDavid Howells 		KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
3582d71340fSDavid Howells 		if (scratch[i] != buffer[i])
3592d71340fSDavid Howells 			return;
3602d71340fSDavid Howells 	}
3612d71340fSDavid Howells 
3622d71340fSDavid Howells 	KUNIT_SUCCEED();
3632d71340fSDavid Howells }
3642d71340fSDavid Howells 
iov_kunit_destroy_xarray(void * data)3652d71340fSDavid Howells static void iov_kunit_destroy_xarray(void *data)
3662d71340fSDavid Howells {
3672d71340fSDavid Howells 	struct xarray *xarray = data;
3682d71340fSDavid Howells 
3692d71340fSDavid Howells 	xa_destroy(xarray);
3702d71340fSDavid Howells 	kfree(xarray);
3712d71340fSDavid Howells }
3722d71340fSDavid Howells 
iov_kunit_load_xarray(struct kunit * test,struct iov_iter * iter,int dir,struct xarray * xarray,struct page ** pages,size_t npages)3732d71340fSDavid Howells static void __init iov_kunit_load_xarray(struct kunit *test,
3742d71340fSDavid Howells 					 struct iov_iter *iter, int dir,
3752d71340fSDavid Howells 					 struct xarray *xarray,
3762d71340fSDavid Howells 					 struct page **pages, size_t npages)
3772d71340fSDavid Howells {
3782d71340fSDavid Howells 	size_t size = 0;
3792d71340fSDavid Howells 	int i;
3802d71340fSDavid Howells 
3812d71340fSDavid Howells 	for (i = 0; i < npages; i++) {
3822d71340fSDavid Howells 		void *x = xa_store(xarray, i, pages[i], GFP_KERNEL);
3832d71340fSDavid Howells 
3842d71340fSDavid Howells 		KUNIT_ASSERT_FALSE(test, xa_is_err(x));
3852d71340fSDavid Howells 		size += PAGE_SIZE;
3862d71340fSDavid Howells 	}
3872d71340fSDavid Howells 	iov_iter_xarray(iter, dir, xarray, 0, size);
3882d71340fSDavid Howells }
3892d71340fSDavid Howells 
iov_kunit_create_xarray(struct kunit * test)3902d71340fSDavid Howells static struct xarray *iov_kunit_create_xarray(struct kunit *test)
3912d71340fSDavid Howells {
3922d71340fSDavid Howells 	struct xarray *xarray;
3932d71340fSDavid Howells 
3942d71340fSDavid Howells 	xarray = kzalloc(sizeof(struct xarray), GFP_KERNEL);
3952d71340fSDavid Howells 	xa_init(xarray);
3962d71340fSDavid Howells 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xarray);
3972d71340fSDavid Howells 	kunit_add_action_or_reset(test, iov_kunit_destroy_xarray, xarray);
3982d71340fSDavid Howells 	return xarray;
3992d71340fSDavid Howells }
4002d71340fSDavid Howells 
4012d71340fSDavid Howells /*
4022d71340fSDavid Howells  * Test copying to a ITER_XARRAY-type iterator.
4032d71340fSDavid Howells  */
iov_kunit_copy_to_xarray(struct kunit * test)4042d71340fSDavid Howells static void __init iov_kunit_copy_to_xarray(struct kunit *test)
4052d71340fSDavid Howells {
4062d71340fSDavid Howells 	const struct kvec_test_range *pr;
4072d71340fSDavid Howells 	struct iov_iter iter;
4082d71340fSDavid Howells 	struct xarray *xarray;
4092d71340fSDavid Howells 	struct page **spages, **bpages;
4102d71340fSDavid Howells 	u8 *scratch, *buffer;
4112d71340fSDavid Howells 	size_t bufsize, npages, size, copied;
4122d71340fSDavid Howells 	int i, patt;
4132d71340fSDavid Howells 
4142d71340fSDavid Howells 	bufsize = 0x100000;
4152d71340fSDavid Howells 	npages = bufsize / PAGE_SIZE;
4162d71340fSDavid Howells 
4172d71340fSDavid Howells 	xarray = iov_kunit_create_xarray(test);
4182d71340fSDavid Howells 
4192d71340fSDavid Howells 	scratch = iov_kunit_create_buffer(test, &spages, npages);
4202d71340fSDavid Howells 	for (i = 0; i < bufsize; i++)
4212d71340fSDavid Howells 		scratch[i] = pattern(i);
4222d71340fSDavid Howells 
4232d71340fSDavid Howells 	buffer = iov_kunit_create_buffer(test, &bpages, npages);
4242d71340fSDavid Howells 	memset(buffer, 0, bufsize);
4252d71340fSDavid Howells 
4262d71340fSDavid Howells 	iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
4272d71340fSDavid Howells 
4282d71340fSDavid Howells 	i = 0;
4292d71340fSDavid Howells 	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
4302d71340fSDavid Howells 		size = pr->to - pr->from;
4312d71340fSDavid Howells 		KUNIT_ASSERT_LE(test, pr->to, bufsize);
4322d71340fSDavid Howells 
4332d71340fSDavid Howells 		iov_iter_xarray(&iter, READ, xarray, pr->from, size);
4342d71340fSDavid Howells 		copied = copy_to_iter(scratch + i, size, &iter);
4352d71340fSDavid Howells 
4362d71340fSDavid Howells 		KUNIT_EXPECT_EQ(test, copied, size);
4372d71340fSDavid Howells 		KUNIT_EXPECT_EQ(test, iter.count, 0);
4382d71340fSDavid Howells 		KUNIT_EXPECT_EQ(test, iter.iov_offset, size);
4392d71340fSDavid Howells 		i += size;
4402d71340fSDavid Howells 	}
4412d71340fSDavid Howells 
4422d71340fSDavid Howells 	/* Build the expected image in the scratch buffer. */
4432d71340fSDavid Howells 	patt = 0;
4442d71340fSDavid Howells 	memset(scratch, 0, bufsize);
4452d71340fSDavid Howells 	for (pr = kvec_test_ranges; pr->from >= 0; pr++)
4462d71340fSDavid Howells 		for (i = pr->from; i < pr->to; i++)
4472d71340fSDavid Howells 			scratch[i] = pattern(patt++);
4482d71340fSDavid Howells 
4492d71340fSDavid Howells 	/* Compare the images */
4502d71340fSDavid Howells 	for (i = 0; i < bufsize; i++) {
4512d71340fSDavid Howells 		KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
4522d71340fSDavid Howells 		if (buffer[i] != scratch[i])
4532d71340fSDavid Howells 			return;
4542d71340fSDavid Howells 	}
4552d71340fSDavid Howells 
4562d71340fSDavid Howells 	KUNIT_SUCCEED();
4572d71340fSDavid Howells }
4582d71340fSDavid Howells 
4592d71340fSDavid Howells /*
4602d71340fSDavid Howells  * Test copying from a ITER_XARRAY-type iterator.
4612d71340fSDavid Howells  */
iov_kunit_copy_from_xarray(struct kunit * test)4622d71340fSDavid Howells static void __init iov_kunit_copy_from_xarray(struct kunit *test)
4632d71340fSDavid Howells {
4642d71340fSDavid Howells 	const struct kvec_test_range *pr;
4652d71340fSDavid Howells 	struct iov_iter iter;
4662d71340fSDavid Howells 	struct xarray *xarray;
4672d71340fSDavid Howells 	struct page **spages, **bpages;
4682d71340fSDavid Howells 	u8 *scratch, *buffer;
4692d71340fSDavid Howells 	size_t bufsize, npages, size, copied;
4702d71340fSDavid Howells 	int i, j;
4712d71340fSDavid Howells 
4722d71340fSDavid Howells 	bufsize = 0x100000;
4732d71340fSDavid Howells 	npages = bufsize / PAGE_SIZE;
4742d71340fSDavid Howells 
4752d71340fSDavid Howells 	xarray = iov_kunit_create_xarray(test);
4762d71340fSDavid Howells 
4772d71340fSDavid Howells 	buffer = iov_kunit_create_buffer(test, &bpages, npages);
4782d71340fSDavid Howells 	for (i = 0; i < bufsize; i++)
4792d71340fSDavid Howells 		buffer[i] = pattern(i);
4802d71340fSDavid Howells 
4812d71340fSDavid Howells 	scratch = iov_kunit_create_buffer(test, &spages, npages);
4822d71340fSDavid Howells 	memset(scratch, 0, bufsize);
4832d71340fSDavid Howells 
4842d71340fSDavid Howells 	iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
4852d71340fSDavid Howells 
4862d71340fSDavid Howells 	i = 0;
4872d71340fSDavid Howells 	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
4882d71340fSDavid Howells 		size = pr->to - pr->from;
4892d71340fSDavid Howells 		KUNIT_ASSERT_LE(test, pr->to, bufsize);
4902d71340fSDavid Howells 
4912d71340fSDavid Howells 		iov_iter_xarray(&iter, WRITE, xarray, pr->from, size);
4922d71340fSDavid Howells 		copied = copy_from_iter(scratch + i, size, &iter);
4932d71340fSDavid Howells 
4942d71340fSDavid Howells 		KUNIT_EXPECT_EQ(test, copied, size);
4952d71340fSDavid Howells 		KUNIT_EXPECT_EQ(test, iter.count, 0);
4962d71340fSDavid Howells 		KUNIT_EXPECT_EQ(test, iter.iov_offset, size);
4972d71340fSDavid Howells 		i += size;
4982d71340fSDavid Howells 	}
4992d71340fSDavid Howells 
5002d71340fSDavid Howells 	/* Build the expected image in the main buffer. */
5012d71340fSDavid Howells 	i = 0;
5022d71340fSDavid Howells 	memset(buffer, 0, bufsize);
5032d71340fSDavid Howells 	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
5042d71340fSDavid Howells 		for (j = pr->from; j < pr->to; j++) {
5052d71340fSDavid Howells 			buffer[i++] = pattern(j);
5062d71340fSDavid Howells 			if (i >= bufsize)
5072d71340fSDavid Howells 				goto stop;
5082d71340fSDavid Howells 		}
5092d71340fSDavid Howells 	}
5102d71340fSDavid Howells stop:
5112d71340fSDavid Howells 
5122d71340fSDavid Howells 	/* Compare the images */
5132d71340fSDavid Howells 	for (i = 0; i < bufsize; i++) {
5142d71340fSDavid Howells 		KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
5152d71340fSDavid Howells 		if (scratch[i] != buffer[i])
5162d71340fSDavid Howells 			return;
5172d71340fSDavid Howells 	}
5182d71340fSDavid Howells 
5192d71340fSDavid Howells 	KUNIT_SUCCEED();
5202d71340fSDavid Howells }
5212d71340fSDavid Howells 
522*a3c57ab7SDavid Howells /*
523*a3c57ab7SDavid Howells  * Test the extraction of ITER_KVEC-type iterators.
524*a3c57ab7SDavid Howells  */
iov_kunit_extract_pages_kvec(struct kunit * test)525*a3c57ab7SDavid Howells static void __init iov_kunit_extract_pages_kvec(struct kunit *test)
526*a3c57ab7SDavid Howells {
527*a3c57ab7SDavid Howells 	const struct kvec_test_range *pr;
528*a3c57ab7SDavid Howells 	struct iov_iter iter;
529*a3c57ab7SDavid Howells 	struct page **bpages, *pagelist[8], **pages = pagelist;
530*a3c57ab7SDavid Howells 	struct kvec kvec[8];
531*a3c57ab7SDavid Howells 	u8 *buffer;
532*a3c57ab7SDavid Howells 	ssize_t len;
533*a3c57ab7SDavid Howells 	size_t bufsize, size = 0, npages;
534*a3c57ab7SDavid Howells 	int i, from;
535*a3c57ab7SDavid Howells 
536*a3c57ab7SDavid Howells 	bufsize = 0x100000;
537*a3c57ab7SDavid Howells 	npages = bufsize / PAGE_SIZE;
538*a3c57ab7SDavid Howells 
539*a3c57ab7SDavid Howells 	buffer = iov_kunit_create_buffer(test, &bpages, npages);
540*a3c57ab7SDavid Howells 
541*a3c57ab7SDavid Howells 	iov_kunit_load_kvec(test, &iter, READ, kvec, ARRAY_SIZE(kvec),
542*a3c57ab7SDavid Howells 			    buffer, bufsize, kvec_test_ranges);
543*a3c57ab7SDavid Howells 	size = iter.count;
544*a3c57ab7SDavid Howells 
545*a3c57ab7SDavid Howells 	pr = kvec_test_ranges;
546*a3c57ab7SDavid Howells 	from = pr->from;
547*a3c57ab7SDavid Howells 	do {
548*a3c57ab7SDavid Howells 		size_t offset0 = LONG_MAX;
549*a3c57ab7SDavid Howells 
550*a3c57ab7SDavid Howells 		for (i = 0; i < ARRAY_SIZE(pagelist); i++)
551*a3c57ab7SDavid Howells 			pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
552*a3c57ab7SDavid Howells 
553*a3c57ab7SDavid Howells 		len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
554*a3c57ab7SDavid Howells 					     ARRAY_SIZE(pagelist), 0, &offset0);
555*a3c57ab7SDavid Howells 		KUNIT_EXPECT_GE(test, len, 0);
556*a3c57ab7SDavid Howells 		if (len < 0)
557*a3c57ab7SDavid Howells 			break;
558*a3c57ab7SDavid Howells 		KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
559*a3c57ab7SDavid Howells 		KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
560*a3c57ab7SDavid Howells 		KUNIT_EXPECT_LE(test, len, size);
561*a3c57ab7SDavid Howells 		KUNIT_EXPECT_EQ(test, iter.count, size - len);
562*a3c57ab7SDavid Howells 		size -= len;
563*a3c57ab7SDavid Howells 
564*a3c57ab7SDavid Howells 		if (len == 0)
565*a3c57ab7SDavid Howells 			break;
566*a3c57ab7SDavid Howells 
567*a3c57ab7SDavid Howells 		for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
568*a3c57ab7SDavid Howells 			struct page *p;
569*a3c57ab7SDavid Howells 			ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
570*a3c57ab7SDavid Howells 			int ix;
571*a3c57ab7SDavid Howells 
572*a3c57ab7SDavid Howells 			KUNIT_ASSERT_GE(test, part, 0);
573*a3c57ab7SDavid Howells 			while (from == pr->to) {
574*a3c57ab7SDavid Howells 				pr++;
575*a3c57ab7SDavid Howells 				from = pr->from;
576*a3c57ab7SDavid Howells 				if (from < 0)
577*a3c57ab7SDavid Howells 					goto stop;
578*a3c57ab7SDavid Howells 			}
579*a3c57ab7SDavid Howells 			ix = from / PAGE_SIZE;
580*a3c57ab7SDavid Howells 			KUNIT_ASSERT_LT(test, ix, npages);
581*a3c57ab7SDavid Howells 			p = bpages[ix];
582*a3c57ab7SDavid Howells 			KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
583*a3c57ab7SDavid Howells 			KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
584*a3c57ab7SDavid Howells 			from += part;
585*a3c57ab7SDavid Howells 			len -= part;
586*a3c57ab7SDavid Howells 			KUNIT_ASSERT_GE(test, len, 0);
587*a3c57ab7SDavid Howells 			if (len == 0)
588*a3c57ab7SDavid Howells 				break;
589*a3c57ab7SDavid Howells 			offset0 = 0;
590*a3c57ab7SDavid Howells 		}
591*a3c57ab7SDavid Howells 
592*a3c57ab7SDavid Howells 		if (test->status == KUNIT_FAILURE)
593*a3c57ab7SDavid Howells 			break;
594*a3c57ab7SDavid Howells 	} while (iov_iter_count(&iter) > 0);
595*a3c57ab7SDavid Howells 
596*a3c57ab7SDavid Howells stop:
597*a3c57ab7SDavid Howells 	KUNIT_EXPECT_EQ(test, size, 0);
598*a3c57ab7SDavid Howells 	KUNIT_EXPECT_EQ(test, iter.count, 0);
599*a3c57ab7SDavid Howells 	KUNIT_SUCCEED();
600*a3c57ab7SDavid Howells }
601*a3c57ab7SDavid Howells 
602*a3c57ab7SDavid Howells /*
603*a3c57ab7SDavid Howells  * Test the extraction of ITER_BVEC-type iterators.
604*a3c57ab7SDavid Howells  */
iov_kunit_extract_pages_bvec(struct kunit * test)605*a3c57ab7SDavid Howells static void __init iov_kunit_extract_pages_bvec(struct kunit *test)
606*a3c57ab7SDavid Howells {
607*a3c57ab7SDavid Howells 	const struct bvec_test_range *pr;
608*a3c57ab7SDavid Howells 	struct iov_iter iter;
609*a3c57ab7SDavid Howells 	struct page **bpages, *pagelist[8], **pages = pagelist;
610*a3c57ab7SDavid Howells 	struct bio_vec bvec[8];
611*a3c57ab7SDavid Howells 	ssize_t len;
612*a3c57ab7SDavid Howells 	size_t bufsize, size = 0, npages;
613*a3c57ab7SDavid Howells 	int i, from;
614*a3c57ab7SDavid Howells 
615*a3c57ab7SDavid Howells 	bufsize = 0x100000;
616*a3c57ab7SDavid Howells 	npages = bufsize / PAGE_SIZE;
617*a3c57ab7SDavid Howells 
618*a3c57ab7SDavid Howells 	iov_kunit_create_buffer(test, &bpages, npages);
619*a3c57ab7SDavid Howells 	iov_kunit_load_bvec(test, &iter, READ, bvec, ARRAY_SIZE(bvec),
620*a3c57ab7SDavid Howells 			    bpages, npages, bufsize, bvec_test_ranges);
621*a3c57ab7SDavid Howells 	size = iter.count;
622*a3c57ab7SDavid Howells 
623*a3c57ab7SDavid Howells 	pr = bvec_test_ranges;
624*a3c57ab7SDavid Howells 	from = pr->from;
625*a3c57ab7SDavid Howells 	do {
626*a3c57ab7SDavid Howells 		size_t offset0 = LONG_MAX;
627*a3c57ab7SDavid Howells 
628*a3c57ab7SDavid Howells 		for (i = 0; i < ARRAY_SIZE(pagelist); i++)
629*a3c57ab7SDavid Howells 			pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
630*a3c57ab7SDavid Howells 
631*a3c57ab7SDavid Howells 		len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
632*a3c57ab7SDavid Howells 					     ARRAY_SIZE(pagelist), 0, &offset0);
633*a3c57ab7SDavid Howells 		KUNIT_EXPECT_GE(test, len, 0);
634*a3c57ab7SDavid Howells 		if (len < 0)
635*a3c57ab7SDavid Howells 			break;
636*a3c57ab7SDavid Howells 		KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
637*a3c57ab7SDavid Howells 		KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
638*a3c57ab7SDavid Howells 		KUNIT_EXPECT_LE(test, len, size);
639*a3c57ab7SDavid Howells 		KUNIT_EXPECT_EQ(test, iter.count, size - len);
640*a3c57ab7SDavid Howells 		size -= len;
641*a3c57ab7SDavid Howells 
642*a3c57ab7SDavid Howells 		if (len == 0)
643*a3c57ab7SDavid Howells 			break;
644*a3c57ab7SDavid Howells 
645*a3c57ab7SDavid Howells 		for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
646*a3c57ab7SDavid Howells 			struct page *p;
647*a3c57ab7SDavid Howells 			ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
648*a3c57ab7SDavid Howells 			int ix;
649*a3c57ab7SDavid Howells 
650*a3c57ab7SDavid Howells 			KUNIT_ASSERT_GE(test, part, 0);
651*a3c57ab7SDavid Howells 			while (from == pr->to) {
652*a3c57ab7SDavid Howells 				pr++;
653*a3c57ab7SDavid Howells 				from = pr->from;
654*a3c57ab7SDavid Howells 				if (from < 0)
655*a3c57ab7SDavid Howells 					goto stop;
656*a3c57ab7SDavid Howells 			}
657*a3c57ab7SDavid Howells 			ix = pr->page + from / PAGE_SIZE;
658*a3c57ab7SDavid Howells 			KUNIT_ASSERT_LT(test, ix, npages);
659*a3c57ab7SDavid Howells 			p = bpages[ix];
660*a3c57ab7SDavid Howells 			KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
661*a3c57ab7SDavid Howells 			KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
662*a3c57ab7SDavid Howells 			from += part;
663*a3c57ab7SDavid Howells 			len -= part;
664*a3c57ab7SDavid Howells 			KUNIT_ASSERT_GE(test, len, 0);
665*a3c57ab7SDavid Howells 			if (len == 0)
666*a3c57ab7SDavid Howells 				break;
667*a3c57ab7SDavid Howells 			offset0 = 0;
668*a3c57ab7SDavid Howells 		}
669*a3c57ab7SDavid Howells 
670*a3c57ab7SDavid Howells 		if (test->status == KUNIT_FAILURE)
671*a3c57ab7SDavid Howells 			break;
672*a3c57ab7SDavid Howells 	} while (iov_iter_count(&iter) > 0);
673*a3c57ab7SDavid Howells 
674*a3c57ab7SDavid Howells stop:
675*a3c57ab7SDavid Howells 	KUNIT_EXPECT_EQ(test, size, 0);
676*a3c57ab7SDavid Howells 	KUNIT_EXPECT_EQ(test, iter.count, 0);
677*a3c57ab7SDavid Howells 	KUNIT_SUCCEED();
678*a3c57ab7SDavid Howells }
679*a3c57ab7SDavid Howells 
680*a3c57ab7SDavid Howells /*
681*a3c57ab7SDavid Howells  * Test the extraction of ITER_XARRAY-type iterators.
682*a3c57ab7SDavid Howells  */
iov_kunit_extract_pages_xarray(struct kunit * test)683*a3c57ab7SDavid Howells static void __init iov_kunit_extract_pages_xarray(struct kunit *test)
684*a3c57ab7SDavid Howells {
685*a3c57ab7SDavid Howells 	const struct kvec_test_range *pr;
686*a3c57ab7SDavid Howells 	struct iov_iter iter;
687*a3c57ab7SDavid Howells 	struct xarray *xarray;
688*a3c57ab7SDavid Howells 	struct page **bpages, *pagelist[8], **pages = pagelist;
689*a3c57ab7SDavid Howells 	ssize_t len;
690*a3c57ab7SDavid Howells 	size_t bufsize, size = 0, npages;
691*a3c57ab7SDavid Howells 	int i, from;
692*a3c57ab7SDavid Howells 
693*a3c57ab7SDavid Howells 	bufsize = 0x100000;
694*a3c57ab7SDavid Howells 	npages = bufsize / PAGE_SIZE;
695*a3c57ab7SDavid Howells 
696*a3c57ab7SDavid Howells 	xarray = iov_kunit_create_xarray(test);
697*a3c57ab7SDavid Howells 
698*a3c57ab7SDavid Howells 	iov_kunit_create_buffer(test, &bpages, npages);
699*a3c57ab7SDavid Howells 	iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
700*a3c57ab7SDavid Howells 
701*a3c57ab7SDavid Howells 	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
702*a3c57ab7SDavid Howells 		from = pr->from;
703*a3c57ab7SDavid Howells 		size = pr->to - from;
704*a3c57ab7SDavid Howells 		KUNIT_ASSERT_LE(test, pr->to, bufsize);
705*a3c57ab7SDavid Howells 
706*a3c57ab7SDavid Howells 		iov_iter_xarray(&iter, WRITE, xarray, from, size);
707*a3c57ab7SDavid Howells 
708*a3c57ab7SDavid Howells 		do {
709*a3c57ab7SDavid Howells 			size_t offset0 = LONG_MAX;
710*a3c57ab7SDavid Howells 
711*a3c57ab7SDavid Howells 			for (i = 0; i < ARRAY_SIZE(pagelist); i++)
712*a3c57ab7SDavid Howells 				pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
713*a3c57ab7SDavid Howells 
714*a3c57ab7SDavid Howells 			len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
715*a3c57ab7SDavid Howells 						     ARRAY_SIZE(pagelist), 0, &offset0);
716*a3c57ab7SDavid Howells 			KUNIT_EXPECT_GE(test, len, 0);
717*a3c57ab7SDavid Howells 			if (len < 0)
718*a3c57ab7SDavid Howells 				break;
719*a3c57ab7SDavid Howells 			KUNIT_EXPECT_LE(test, len, size);
720*a3c57ab7SDavid Howells 			KUNIT_EXPECT_EQ(test, iter.count, size - len);
721*a3c57ab7SDavid Howells 			if (len == 0)
722*a3c57ab7SDavid Howells 				break;
723*a3c57ab7SDavid Howells 			size -= len;
724*a3c57ab7SDavid Howells 			KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
725*a3c57ab7SDavid Howells 			KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
726*a3c57ab7SDavid Howells 
727*a3c57ab7SDavid Howells 			for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
728*a3c57ab7SDavid Howells 				struct page *p;
729*a3c57ab7SDavid Howells 				ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
730*a3c57ab7SDavid Howells 				int ix;
731*a3c57ab7SDavid Howells 
732*a3c57ab7SDavid Howells 				KUNIT_ASSERT_GE(test, part, 0);
733*a3c57ab7SDavid Howells 				ix = from / PAGE_SIZE;
734*a3c57ab7SDavid Howells 				KUNIT_ASSERT_LT(test, ix, npages);
735*a3c57ab7SDavid Howells 				p = bpages[ix];
736*a3c57ab7SDavid Howells 				KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
737*a3c57ab7SDavid Howells 				KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
738*a3c57ab7SDavid Howells 				from += part;
739*a3c57ab7SDavid Howells 				len -= part;
740*a3c57ab7SDavid Howells 				KUNIT_ASSERT_GE(test, len, 0);
741*a3c57ab7SDavid Howells 				if (len == 0)
742*a3c57ab7SDavid Howells 					break;
743*a3c57ab7SDavid Howells 				offset0 = 0;
744*a3c57ab7SDavid Howells 			}
745*a3c57ab7SDavid Howells 
746*a3c57ab7SDavid Howells 			if (test->status == KUNIT_FAILURE)
747*a3c57ab7SDavid Howells 				goto stop;
748*a3c57ab7SDavid Howells 		} while (iov_iter_count(&iter) > 0);
749*a3c57ab7SDavid Howells 
750*a3c57ab7SDavid Howells 		KUNIT_EXPECT_EQ(test, size, 0);
751*a3c57ab7SDavid Howells 		KUNIT_EXPECT_EQ(test, iter.count, 0);
752*a3c57ab7SDavid Howells 		KUNIT_EXPECT_EQ(test, iter.iov_offset, pr->to - pr->from);
753*a3c57ab7SDavid Howells 	}
754*a3c57ab7SDavid Howells 
755*a3c57ab7SDavid Howells stop:
756*a3c57ab7SDavid Howells 	KUNIT_SUCCEED();
757*a3c57ab7SDavid Howells }
758*a3c57ab7SDavid Howells 
7592d71340fSDavid Howells static struct kunit_case __refdata iov_kunit_cases[] = {
7602d71340fSDavid Howells 	KUNIT_CASE(iov_kunit_copy_to_kvec),
7612d71340fSDavid Howells 	KUNIT_CASE(iov_kunit_copy_from_kvec),
7622d71340fSDavid Howells 	KUNIT_CASE(iov_kunit_copy_to_bvec),
7632d71340fSDavid Howells 	KUNIT_CASE(iov_kunit_copy_from_bvec),
7642d71340fSDavid Howells 	KUNIT_CASE(iov_kunit_copy_to_xarray),
7652d71340fSDavid Howells 	KUNIT_CASE(iov_kunit_copy_from_xarray),
766*a3c57ab7SDavid Howells 	KUNIT_CASE(iov_kunit_extract_pages_kvec),
767*a3c57ab7SDavid Howells 	KUNIT_CASE(iov_kunit_extract_pages_bvec),
768*a3c57ab7SDavid Howells 	KUNIT_CASE(iov_kunit_extract_pages_xarray),
7692d71340fSDavid Howells 	{}
7702d71340fSDavid Howells };
7712d71340fSDavid Howells 
7722d71340fSDavid Howells static struct kunit_suite iov_kunit_suite = {
7732d71340fSDavid Howells 	.name = "iov_iter",
7742d71340fSDavid Howells 	.test_cases = iov_kunit_cases,
7752d71340fSDavid Howells };
7762d71340fSDavid Howells 
7772d71340fSDavid Howells kunit_test_suites(&iov_kunit_suite);
778