1 /* 2 * Copyright © 2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #include <linux/prime_numbers.h> 25 #include <linux/random.h> 26 27 #include "../i915_selftest.h" 28 29 #define PFN_BIAS (1 << 10) 30 31 struct pfn_table { 32 struct sg_table st; 33 unsigned long start, end; 34 }; 35 36 typedef unsigned int (*npages_fn_t)(unsigned long n, 37 unsigned long count, 38 struct rnd_state *rnd); 39 40 static noinline int expect_pfn_sg(struct pfn_table *pt, 41 npages_fn_t npages_fn, 42 struct rnd_state *rnd, 43 const char *who, 44 unsigned long timeout) 45 { 46 struct scatterlist *sg; 47 unsigned long pfn, n; 48 49 pfn = pt->start; 50 for_each_sg(pt->st.sgl, sg, pt->st.nents, n) { 51 struct page *page = sg_page(sg); 52 unsigned int npages = npages_fn(n, pt->st.nents, rnd); 53 54 if (page_to_pfn(page) != pfn) { 55 pr_err("%s: %s left pages out of order, expected pfn %lu, found pfn %lu (using for_each_sg)\n", 56 __func__, who, pfn, page_to_pfn(page)); 57 return -EINVAL; 58 } 59 60 if (sg->length != npages * PAGE_SIZE) { 61 pr_err("%s: %s copied wrong sg length, expected size %lu, found %u (using for_each_sg)\n", 62 __func__, who, npages * PAGE_SIZE, sg->length); 63 return -EINVAL; 64 } 65 66 if (igt_timeout(timeout, "%s timed out\n", who)) 67 return -EINTR; 68 69 pfn += npages; 70 } 71 if (pfn != pt->end) { 72 pr_err("%s: %s finished on wrong pfn, expected %lu, found %lu\n", 73 __func__, who, pt->end, pfn); 74 return -EINVAL; 75 } 76 77 return 0; 78 } 79 80 static noinline int expect_pfn_sg_page_iter(struct pfn_table *pt, 81 const char *who, 82 unsigned long timeout) 83 { 84 struct sg_page_iter sgiter; 85 unsigned long pfn; 86 87 pfn = pt->start; 88 for_each_sg_page(pt->st.sgl, &sgiter, pt->st.nents, 0) { 89 struct page *page = sg_page_iter_page(&sgiter); 90 91 if (page != pfn_to_page(pfn)) { 92 pr_err("%s: %s left pages out of order, expected pfn %lu, found pfn %lu (using for_each_sg_page)\n", 93 __func__, who, pfn, page_to_pfn(page)); 94 return -EINVAL; 95 } 96 97 if (igt_timeout(timeout, "%s timed out\n", who)) 98 return -EINTR; 99 100 pfn++; 101 } 102 if (pfn != pt->end) { 103 pr_err("%s: %s finished on wrong pfn, expected %lu, found %lu\n", 104 __func__, who, pt->end, pfn); 105 return -EINVAL; 106 } 107 108 return 0; 109 } 110 111 static noinline int expect_pfn_sgtiter(struct pfn_table *pt, 112 const char *who, 113 unsigned long timeout) 114 { 115 struct sgt_iter sgt; 116 struct page *page; 117 unsigned long pfn; 118 119 pfn = pt->start; 120 for_each_sgt_page(page, sgt, &pt->st) { 121 if (page != pfn_to_page(pfn)) { 122 pr_err("%s: %s left pages out of order, expected pfn %lu, found pfn %lu (using for_each_sgt_page)\n", 123 __func__, who, pfn, page_to_pfn(page)); 124 return -EINVAL; 125 } 126 127 if (igt_timeout(timeout, "%s timed out\n", who)) 128 return -EINTR; 129 130 pfn++; 131 } 132 if (pfn != pt->end) { 133 pr_err("%s: %s finished on wrong pfn, expected %lu, found %lu\n", 134 __func__, who, pt->end, pfn); 135 return -EINVAL; 136 } 137 138 return 0; 139 } 140 141 static int expect_pfn_sgtable(struct pfn_table *pt, 142 npages_fn_t npages_fn, 143 struct rnd_state *rnd, 144 const char *who, 145 unsigned long timeout) 146 { 147 int err; 148 149 err = expect_pfn_sg(pt, npages_fn, rnd, who, timeout); 150 if (err) 151 return err; 152 153 err = expect_pfn_sg_page_iter(pt, who, timeout); 154 if (err) 155 return err; 156 157 err = expect_pfn_sgtiter(pt, who, timeout); 158 if (err) 159 return err; 160 161 return 0; 162 } 163 164 static unsigned int one(unsigned long n, 165 unsigned long count, 166 struct rnd_state *rnd) 167 { 168 return 1; 169 } 170 171 static unsigned int grow(unsigned long n, 172 unsigned long count, 173 struct rnd_state *rnd) 174 { 175 return n + 1; 176 } 177 178 static unsigned int shrink(unsigned long n, 179 unsigned long count, 180 struct rnd_state *rnd) 181 { 182 return count - n; 183 } 184 185 static unsigned int random(unsigned long n, 186 unsigned long count, 187 struct rnd_state *rnd) 188 { 189 return 1 + (prandom_u32_state(rnd) % 1024); 190 } 191 192 static inline bool page_contiguous(struct page *first, 193 struct page *last, 194 unsigned long npages) 195 { 196 return first + npages == last; 197 } 198 199 static int alloc_table(struct pfn_table *pt, 200 unsigned long count, unsigned long max, 201 npages_fn_t npages_fn, 202 struct rnd_state *rnd, 203 int alloc_error) 204 { 205 struct scatterlist *sg; 206 unsigned long n, pfn; 207 208 if (sg_alloc_table(&pt->st, max, 209 GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN)) 210 return alloc_error; 211 212 /* count should be less than 20 to prevent overflowing sg->length */ 213 GEM_BUG_ON(overflows_type(count * PAGE_SIZE, sg->length)); 214 215 /* Construct a table where each scatterlist contains different number 216 * of entries. The idea is to check that we can iterate the individual 217 * pages from inside the coalesced lists. 218 */ 219 pt->start = PFN_BIAS; 220 pfn = pt->start; 221 sg = pt->st.sgl; 222 for (n = 0; n < count; n++) { 223 unsigned long npages = npages_fn(n, count, rnd); 224 225 /* Nobody expects the Sparse Memmap! */ 226 if (!page_contiguous(pfn_to_page(pfn), 227 pfn_to_page(pfn + npages), 228 npages)) { 229 sg_free_table(&pt->st); 230 return -ENOSPC; 231 } 232 233 if (n) 234 sg = sg_next(sg); 235 sg_set_page(sg, pfn_to_page(pfn), npages * PAGE_SIZE, 0); 236 237 GEM_BUG_ON(page_to_pfn(sg_page(sg)) != pfn); 238 GEM_BUG_ON(sg->length != npages * PAGE_SIZE); 239 GEM_BUG_ON(sg->offset != 0); 240 241 pfn += npages; 242 } 243 sg_mark_end(sg); 244 pt->st.nents = n; 245 pt->end = pfn; 246 247 return 0; 248 } 249 250 static const npages_fn_t npages_funcs[] = { 251 one, 252 grow, 253 shrink, 254 random, 255 NULL, 256 }; 257 258 static int igt_sg_alloc(void *ignored) 259 { 260 IGT_TIMEOUT(end_time); 261 const unsigned long max_order = 20; /* approximating a 4GiB object */ 262 struct rnd_state prng; 263 unsigned long prime; 264 int alloc_error = -ENOMEM; 265 266 for_each_prime_number(prime, max_order) { 267 unsigned long size = BIT(prime); 268 int offset; 269 270 for (offset = -1; offset <= 1; offset++) { 271 unsigned long sz = size + offset; 272 const npages_fn_t *npages; 273 struct pfn_table pt; 274 int err; 275 276 for (npages = npages_funcs; *npages; npages++) { 277 prandom_seed_state(&prng, 278 i915_selftest.random_seed); 279 err = alloc_table(&pt, sz, sz, *npages, &prng, 280 alloc_error); 281 if (err == -ENOSPC) 282 break; 283 if (err) 284 return err; 285 286 prandom_seed_state(&prng, 287 i915_selftest.random_seed); 288 err = expect_pfn_sgtable(&pt, *npages, &prng, 289 "sg_alloc_table", 290 end_time); 291 sg_free_table(&pt.st); 292 if (err) 293 return err; 294 } 295 } 296 297 /* Test at least one continuation before accepting oom */ 298 if (size > SG_MAX_SINGLE_ALLOC) 299 alloc_error = -ENOSPC; 300 } 301 302 return 0; 303 } 304 305 static int igt_sg_trim(void *ignored) 306 { 307 IGT_TIMEOUT(end_time); 308 const unsigned long max = PAGE_SIZE; /* not prime! */ 309 struct pfn_table pt; 310 unsigned long prime; 311 int alloc_error = -ENOMEM; 312 313 for_each_prime_number(prime, max) { 314 const npages_fn_t *npages; 315 int err; 316 317 for (npages = npages_funcs; *npages; npages++) { 318 struct rnd_state prng; 319 320 prandom_seed_state(&prng, i915_selftest.random_seed); 321 err = alloc_table(&pt, prime, max, *npages, &prng, 322 alloc_error); 323 if (err == -ENOSPC) 324 break; 325 if (err) 326 return err; 327 328 if (i915_sg_trim(&pt.st)) { 329 if (pt.st.orig_nents != prime || 330 pt.st.nents != prime) { 331 pr_err("i915_sg_trim failed (nents %u, orig_nents %u), expected %lu\n", 332 pt.st.nents, pt.st.orig_nents, prime); 333 err = -EINVAL; 334 } else { 335 prandom_seed_state(&prng, 336 i915_selftest.random_seed); 337 err = expect_pfn_sgtable(&pt, 338 *npages, &prng, 339 "i915_sg_trim", 340 end_time); 341 } 342 } 343 sg_free_table(&pt.st); 344 if (err) 345 return err; 346 } 347 348 /* Test at least one continuation before accepting oom */ 349 if (prime > SG_MAX_SINGLE_ALLOC) 350 alloc_error = -ENOSPC; 351 } 352 353 return 0; 354 } 355 356 int scatterlist_mock_selftests(void) 357 { 358 static const struct i915_subtest tests[] = { 359 SUBTEST(igt_sg_alloc), 360 SUBTEST(igt_sg_trim), 361 }; 362 363 return i915_subtests(tests, NULL); 364 } 365