1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2014-2016 Intel Corporation 5 */ 6 7 #include "i915_drv.h" 8 #include "i915_gem_object.h" 9 #include "i915_scatterlist.h" 10 #include "i915_gem_lmem.h" 11 #include "i915_gem_mman.h" 12 13 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, 14 struct sg_table *pages, 15 unsigned int sg_page_sizes) 16 { 17 struct drm_i915_private *i915 = to_i915(obj->base.dev); 18 unsigned long supported = INTEL_INFO(i915)->page_sizes; 19 int i; 20 21 lockdep_assert_held(&obj->mm.lock); 22 23 if (i915_gem_object_is_volatile(obj)) 24 obj->mm.madv = I915_MADV_DONTNEED; 25 26 /* Make the pages coherent with the GPU (flushing any swapin). */ 27 if (obj->cache_dirty) { 28 obj->write_domain = 0; 29 if (i915_gem_object_has_struct_page(obj)) 30 drm_clflush_sg(pages); 31 obj->cache_dirty = false; 32 } 33 34 obj->mm.get_page.sg_pos = pages->sgl; 35 obj->mm.get_page.sg_idx = 0; 36 obj->mm.get_dma_page.sg_pos = pages->sgl; 37 obj->mm.get_dma_page.sg_idx = 0; 38 39 obj->mm.pages = pages; 40 41 if (i915_gem_object_is_tiled(obj) && 42 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) { 43 GEM_BUG_ON(obj->mm.quirked); 44 __i915_gem_object_pin_pages(obj); 45 obj->mm.quirked = true; 46 } 47 48 GEM_BUG_ON(!sg_page_sizes); 49 obj->mm.page_sizes.phys = sg_page_sizes; 50 51 /* 52 * Calculate the supported page-sizes which fit into the given 53 * sg_page_sizes. This will give us the page-sizes which we may be able 54 * to use opportunistically when later inserting into the GTT. For 55 * example if phys=2G, then in theory we should be able to use 1G, 2M, 56 * 64K or 4K pages, although in practice this will depend on a number of 57 * other factors. 58 */ 59 obj->mm.page_sizes.sg = 0; 60 for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) { 61 if (obj->mm.page_sizes.phys & ~0u << i) 62 obj->mm.page_sizes.sg |= BIT(i); 63 } 64 GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg)); 65 66 if (i915_gem_object_is_shrinkable(obj)) { 67 struct list_head *list; 68 unsigned long flags; 69 70 spin_lock_irqsave(&i915->mm.obj_lock, flags); 71 72 i915->mm.shrink_count++; 73 i915->mm.shrink_memory += obj->base.size; 74 75 if (obj->mm.madv != I915_MADV_WILLNEED) 76 list = &i915->mm.purge_list; 77 else 78 list = &i915->mm.shrink_list; 79 list_add_tail(&obj->mm.link, list); 80 81 atomic_set(&obj->mm.shrink_pin, 0); 82 spin_unlock_irqrestore(&i915->mm.obj_lock, flags); 83 } 84 } 85 86 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj) 87 { 88 struct drm_i915_private *i915 = to_i915(obj->base.dev); 89 int err; 90 91 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) { 92 drm_dbg(&i915->drm, 93 "Attempting to obtain a purgeable object\n"); 94 return -EFAULT; 95 } 96 97 err = obj->ops->get_pages(obj); 98 GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj)); 99 100 return err; 101 } 102 103 /* Ensure that the associated pages are gathered from the backing storage 104 * and pinned into our object. i915_gem_object_pin_pages() may be called 105 * multiple times before they are released by a single call to 106 * i915_gem_object_unpin_pages() - once the pages are no longer referenced 107 * either as a result of memory pressure (reaping pages under the shrinker) 108 * or as the object is itself released. 109 */ 110 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj) 111 { 112 int err; 113 114 err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES); 115 if (err) 116 return err; 117 118 if (unlikely(!i915_gem_object_has_pages(obj))) { 119 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); 120 121 err = ____i915_gem_object_get_pages(obj); 122 if (err) 123 goto unlock; 124 125 smp_mb__before_atomic(); 126 } 127 atomic_inc(&obj->mm.pages_pin_count); 128 129 unlock: 130 mutex_unlock(&obj->mm.lock); 131 return err; 132 } 133 134 /* Immediately discard the backing storage */ 135 void i915_gem_object_truncate(struct drm_i915_gem_object *obj) 136 { 137 drm_gem_free_mmap_offset(&obj->base); 138 if (obj->ops->truncate) 139 obj->ops->truncate(obj); 140 } 141 142 /* Try to discard unwanted pages */ 143 void i915_gem_object_writeback(struct drm_i915_gem_object *obj) 144 { 145 lockdep_assert_held(&obj->mm.lock); 146 GEM_BUG_ON(i915_gem_object_has_pages(obj)); 147 148 if (obj->ops->writeback) 149 obj->ops->writeback(obj); 150 } 151 152 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) 153 { 154 struct radix_tree_iter iter; 155 void __rcu **slot; 156 157 rcu_read_lock(); 158 radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0) 159 radix_tree_delete(&obj->mm.get_page.radix, iter.index); 160 radix_tree_for_each_slot(slot, &obj->mm.get_dma_page.radix, &iter, 0) 161 radix_tree_delete(&obj->mm.get_dma_page.radix, iter.index); 162 rcu_read_unlock(); 163 } 164 165 static void unmap_object(struct drm_i915_gem_object *obj, void *ptr) 166 { 167 if (is_vmalloc_addr(ptr)) 168 vunmap(ptr); 169 } 170 171 struct sg_table * 172 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) 173 { 174 struct sg_table *pages; 175 176 pages = fetch_and_zero(&obj->mm.pages); 177 if (IS_ERR_OR_NULL(pages)) 178 return pages; 179 180 if (i915_gem_object_is_volatile(obj)) 181 obj->mm.madv = I915_MADV_WILLNEED; 182 183 i915_gem_object_make_unshrinkable(obj); 184 185 if (obj->mm.mapping) { 186 unmap_object(obj, page_mask_bits(obj->mm.mapping)); 187 obj->mm.mapping = NULL; 188 } 189 190 __i915_gem_object_reset_page_iter(obj); 191 obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0; 192 193 return pages; 194 } 195 196 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj) 197 { 198 struct sg_table *pages; 199 int err; 200 201 if (i915_gem_object_has_pinned_pages(obj)) 202 return -EBUSY; 203 204 /* May be called by shrinker from within get_pages() (on another bo) */ 205 mutex_lock(&obj->mm.lock); 206 if (unlikely(atomic_read(&obj->mm.pages_pin_count))) { 207 err = -EBUSY; 208 goto unlock; 209 } 210 211 i915_gem_object_release_mmap_offset(obj); 212 213 /* 214 * ->put_pages might need to allocate memory for the bit17 swizzle 215 * array, hence protect them from being reaped by removing them from gtt 216 * lists early. 217 */ 218 pages = __i915_gem_object_unset_pages(obj); 219 220 /* 221 * XXX Temporary hijinx to avoid updating all backends to handle 222 * NULL pages. In the future, when we have more asynchronous 223 * get_pages backends we should be better able to handle the 224 * cancellation of the async task in a more uniform manner. 225 */ 226 if (!pages && !i915_gem_object_needs_async_cancel(obj)) 227 pages = ERR_PTR(-EINVAL); 228 229 if (!IS_ERR(pages)) 230 obj->ops->put_pages(obj, pages); 231 232 err = 0; 233 unlock: 234 mutex_unlock(&obj->mm.lock); 235 236 return err; 237 } 238 239 /* The 'mapping' part of i915_gem_object_pin_map() below */ 240 static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj, 241 enum i915_map_type type) 242 { 243 unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i; 244 struct page *stack[32], **pages = stack, *page; 245 struct sgt_iter iter; 246 pgprot_t pgprot; 247 void *vaddr; 248 249 switch (type) { 250 default: 251 MISSING_CASE(type); 252 fallthrough; /* to use PAGE_KERNEL anyway */ 253 case I915_MAP_WB: 254 /* 255 * On 32b, highmem using a finite set of indirect PTE (i.e. 256 * vmap) to provide virtual mappings of the high pages. 257 * As these are finite, map_new_virtual() must wait for some 258 * other kmap() to finish when it runs out. If we map a large 259 * number of objects, there is no method for it to tell us 260 * to release the mappings, and we deadlock. 261 * 262 * However, if we make an explicit vmap of the page, that 263 * uses a larger vmalloc arena, and also has the ability 264 * to tell us to release unwanted mappings. Most importantly, 265 * it will fail and propagate an error instead of waiting 266 * forever. 267 * 268 * So if the page is beyond the 32b boundary, make an explicit 269 * vmap. 270 */ 271 if (n_pages == 1 && !PageHighMem(sg_page(obj->mm.pages->sgl))) 272 return page_address(sg_page(obj->mm.pages->sgl)); 273 pgprot = PAGE_KERNEL; 274 break; 275 case I915_MAP_WC: 276 pgprot = pgprot_writecombine(PAGE_KERNEL_IO); 277 break; 278 } 279 280 if (n_pages > ARRAY_SIZE(stack)) { 281 /* Too big for stack -- allocate temporary array instead */ 282 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); 283 if (!pages) 284 return NULL; 285 } 286 287 i = 0; 288 for_each_sgt_page(page, iter, obj->mm.pages) 289 pages[i++] = page; 290 vaddr = vmap(pages, n_pages, 0, pgprot); 291 if (pages != stack) 292 kvfree(pages); 293 return vaddr; 294 } 295 296 static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj, 297 enum i915_map_type type) 298 { 299 resource_size_t iomap = obj->mm.region->iomap.base - 300 obj->mm.region->region.start; 301 unsigned long n_pfn = obj->base.size >> PAGE_SHIFT; 302 unsigned long stack[32], *pfns = stack, i; 303 struct sgt_iter iter; 304 dma_addr_t addr; 305 void *vaddr; 306 307 if (type != I915_MAP_WC) 308 return NULL; 309 310 if (n_pfn > ARRAY_SIZE(stack)) { 311 /* Too big for stack -- allocate temporary array instead */ 312 pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL); 313 if (!pfns) 314 return NULL; 315 } 316 317 i = 0; 318 for_each_sgt_daddr(addr, iter, obj->mm.pages) 319 pfns[i++] = (iomap + addr) >> PAGE_SHIFT; 320 vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO)); 321 if (pfns != stack) 322 kvfree(pfns); 323 return vaddr; 324 } 325 326 /* get, pin, and map the pages of the object into kernel space */ 327 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, 328 enum i915_map_type type) 329 { 330 enum i915_map_type has_type; 331 unsigned int flags; 332 bool pinned; 333 void *ptr; 334 int err; 335 336 flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM; 337 if (!i915_gem_object_type_has(obj, flags)) 338 return ERR_PTR(-ENXIO); 339 340 err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES); 341 if (err) 342 return ERR_PTR(err); 343 344 pinned = !(type & I915_MAP_OVERRIDE); 345 type &= ~I915_MAP_OVERRIDE; 346 347 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) { 348 if (unlikely(!i915_gem_object_has_pages(obj))) { 349 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); 350 351 err = ____i915_gem_object_get_pages(obj); 352 if (err) 353 goto err_unlock; 354 355 smp_mb__before_atomic(); 356 } 357 atomic_inc(&obj->mm.pages_pin_count); 358 pinned = false; 359 } 360 GEM_BUG_ON(!i915_gem_object_has_pages(obj)); 361 362 ptr = page_unpack_bits(obj->mm.mapping, &has_type); 363 if (ptr && has_type != type) { 364 if (pinned) { 365 err = -EBUSY; 366 goto err_unpin; 367 } 368 369 unmap_object(obj, ptr); 370 371 ptr = obj->mm.mapping = NULL; 372 } 373 374 if (!ptr) { 375 if (GEM_WARN_ON(type == I915_MAP_WC && 376 !static_cpu_has(X86_FEATURE_PAT))) 377 ptr = NULL; 378 else if (i915_gem_object_has_struct_page(obj)) 379 ptr = i915_gem_object_map_page(obj, type); 380 else 381 ptr = i915_gem_object_map_pfn(obj, type); 382 if (!ptr) { 383 err = -ENOMEM; 384 goto err_unpin; 385 } 386 387 obj->mm.mapping = page_pack_bits(ptr, type); 388 } 389 390 out_unlock: 391 mutex_unlock(&obj->mm.lock); 392 return ptr; 393 394 err_unpin: 395 atomic_dec(&obj->mm.pages_pin_count); 396 err_unlock: 397 ptr = ERR_PTR(err); 398 goto out_unlock; 399 } 400 401 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj, 402 unsigned long offset, 403 unsigned long size) 404 { 405 enum i915_map_type has_type; 406 void *ptr; 407 408 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 409 GEM_BUG_ON(range_overflows_t(typeof(obj->base.size), 410 offset, size, obj->base.size)); 411 412 wmb(); /* let all previous writes be visible to coherent partners */ 413 obj->mm.dirty = true; 414 415 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) 416 return; 417 418 ptr = page_unpack_bits(obj->mm.mapping, &has_type); 419 if (has_type == I915_MAP_WC) 420 return; 421 422 drm_clflush_virt_range(ptr + offset, size); 423 if (size == obj->base.size) { 424 obj->write_domain &= ~I915_GEM_DOMAIN_CPU; 425 obj->cache_dirty = false; 426 } 427 } 428 429 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj) 430 { 431 GEM_BUG_ON(!obj->mm.mapping); 432 433 /* 434 * We allow removing the mapping from underneath pinned pages! 435 * 436 * Furthermore, since this is an unsafe operation reserved only 437 * for construction time manipulation, we ignore locking prudence. 438 */ 439 unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping))); 440 441 i915_gem_object_unpin_map(obj); 442 } 443 444 struct scatterlist * 445 __i915_gem_object_get_sg(struct drm_i915_gem_object *obj, 446 struct i915_gem_object_page_iter *iter, 447 unsigned int n, 448 unsigned int *offset) 449 { 450 const bool dma = iter == &obj->mm.get_dma_page; 451 struct scatterlist *sg; 452 unsigned int idx, count; 453 454 might_sleep(); 455 GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT); 456 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 457 458 /* As we iterate forward through the sg, we record each entry in a 459 * radixtree for quick repeated (backwards) lookups. If we have seen 460 * this index previously, we will have an entry for it. 461 * 462 * Initial lookup is O(N), but this is amortized to O(1) for 463 * sequential page access (where each new request is consecutive 464 * to the previous one). Repeated lookups are O(lg(obj->base.size)), 465 * i.e. O(1) with a large constant! 466 */ 467 if (n < READ_ONCE(iter->sg_idx)) 468 goto lookup; 469 470 mutex_lock(&iter->lock); 471 472 /* We prefer to reuse the last sg so that repeated lookup of this 473 * (or the subsequent) sg are fast - comparing against the last 474 * sg is faster than going through the radixtree. 475 */ 476 477 sg = iter->sg_pos; 478 idx = iter->sg_idx; 479 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg); 480 481 while (idx + count <= n) { 482 void *entry; 483 unsigned long i; 484 int ret; 485 486 /* If we cannot allocate and insert this entry, or the 487 * individual pages from this range, cancel updating the 488 * sg_idx so that on this lookup we are forced to linearly 489 * scan onwards, but on future lookups we will try the 490 * insertion again (in which case we need to be careful of 491 * the error return reporting that we have already inserted 492 * this index). 493 */ 494 ret = radix_tree_insert(&iter->radix, idx, sg); 495 if (ret && ret != -EEXIST) 496 goto scan; 497 498 entry = xa_mk_value(idx); 499 for (i = 1; i < count; i++) { 500 ret = radix_tree_insert(&iter->radix, idx + i, entry); 501 if (ret && ret != -EEXIST) 502 goto scan; 503 } 504 505 idx += count; 506 sg = ____sg_next(sg); 507 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg); 508 } 509 510 scan: 511 iter->sg_pos = sg; 512 iter->sg_idx = idx; 513 514 mutex_unlock(&iter->lock); 515 516 if (unlikely(n < idx)) /* insertion completed by another thread */ 517 goto lookup; 518 519 /* In case we failed to insert the entry into the radixtree, we need 520 * to look beyond the current sg. 521 */ 522 while (idx + count <= n) { 523 idx += count; 524 sg = ____sg_next(sg); 525 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg); 526 } 527 528 *offset = n - idx; 529 return sg; 530 531 lookup: 532 rcu_read_lock(); 533 534 sg = radix_tree_lookup(&iter->radix, n); 535 GEM_BUG_ON(!sg); 536 537 /* If this index is in the middle of multi-page sg entry, 538 * the radix tree will contain a value entry that points 539 * to the start of that range. We will return the pointer to 540 * the base page and the offset of this page within the 541 * sg entry's range. 542 */ 543 *offset = 0; 544 if (unlikely(xa_is_value(sg))) { 545 unsigned long base = xa_to_value(sg); 546 547 sg = radix_tree_lookup(&iter->radix, base); 548 GEM_BUG_ON(!sg); 549 550 *offset = n - base; 551 } 552 553 rcu_read_unlock(); 554 555 return sg; 556 } 557 558 struct page * 559 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n) 560 { 561 struct scatterlist *sg; 562 unsigned int offset; 563 564 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj)); 565 566 sg = i915_gem_object_get_sg(obj, n, &offset); 567 return nth_page(sg_page(sg), offset); 568 } 569 570 /* Like i915_gem_object_get_page(), but mark the returned page dirty */ 571 struct page * 572 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, 573 unsigned int n) 574 { 575 struct page *page; 576 577 page = i915_gem_object_get_page(obj, n); 578 if (!obj->mm.dirty) 579 set_page_dirty(page); 580 581 return page; 582 } 583 584 dma_addr_t 585 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj, 586 unsigned long n, 587 unsigned int *len) 588 { 589 struct scatterlist *sg; 590 unsigned int offset; 591 592 sg = i915_gem_object_get_sg_dma(obj, n, &offset); 593 594 if (len) 595 *len = sg_dma_len(sg) - (offset << PAGE_SHIFT); 596 597 return sg_dma_address(sg) + (offset << PAGE_SHIFT); 598 } 599 600 dma_addr_t 601 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, 602 unsigned long n) 603 { 604 return i915_gem_object_get_dma_address_len(obj, n, NULL); 605 } 606