1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2014-2016 Intel Corporation 5 */ 6 7 #include "i915_drv.h" 8 #include "i915_gem_object.h" 9 #include "i915_scatterlist.h" 10 #include "i915_gem_lmem.h" 11 #include "i915_gem_mman.h" 12 13 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, 14 struct sg_table *pages, 15 unsigned int sg_page_sizes) 16 { 17 struct drm_i915_private *i915 = to_i915(obj->base.dev); 18 unsigned long supported = INTEL_INFO(i915)->page_sizes; 19 bool shrinkable; 20 int i; 21 22 lockdep_assert_held(&obj->mm.lock); 23 24 if (i915_gem_object_is_volatile(obj)) 25 obj->mm.madv = I915_MADV_DONTNEED; 26 27 /* Make the pages coherent with the GPU (flushing any swapin). */ 28 if (obj->cache_dirty) { 29 obj->write_domain = 0; 30 if (i915_gem_object_has_struct_page(obj)) 31 drm_clflush_sg(pages); 32 obj->cache_dirty = false; 33 } 34 35 obj->mm.get_page.sg_pos = pages->sgl; 36 obj->mm.get_page.sg_idx = 0; 37 obj->mm.get_dma_page.sg_pos = pages->sgl; 38 obj->mm.get_dma_page.sg_idx = 0; 39 40 obj->mm.pages = pages; 41 42 GEM_BUG_ON(!sg_page_sizes); 43 obj->mm.page_sizes.phys = sg_page_sizes; 44 45 /* 46 * Calculate the supported page-sizes which fit into the given 47 * sg_page_sizes. This will give us the page-sizes which we may be able 48 * to use opportunistically when later inserting into the GTT. For 49 * example if phys=2G, then in theory we should be able to use 1G, 2M, 50 * 64K or 4K pages, although in practice this will depend on a number of 51 * other factors. 52 */ 53 obj->mm.page_sizes.sg = 0; 54 for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) { 55 if (obj->mm.page_sizes.phys & ~0u << i) 56 obj->mm.page_sizes.sg |= BIT(i); 57 } 58 GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg)); 59 60 shrinkable = i915_gem_object_is_shrinkable(obj); 61 62 if (i915_gem_object_is_tiled(obj) && 63 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) { 64 GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj)); 65 i915_gem_object_set_tiling_quirk(obj); 66 shrinkable = false; 67 } 68 69 if (shrinkable) { 70 struct list_head *list; 71 unsigned long flags; 72 73 spin_lock_irqsave(&i915->mm.obj_lock, flags); 74 75 i915->mm.shrink_count++; 76 i915->mm.shrink_memory += obj->base.size; 77 78 if (obj->mm.madv != I915_MADV_WILLNEED) 79 list = &i915->mm.purge_list; 80 else 81 list = &i915->mm.shrink_list; 82 list_add_tail(&obj->mm.link, list); 83 84 atomic_set(&obj->mm.shrink_pin, 0); 85 spin_unlock_irqrestore(&i915->mm.obj_lock, flags); 86 } 87 } 88 89 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj) 90 { 91 struct drm_i915_private *i915 = to_i915(obj->base.dev); 92 int err; 93 94 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) { 95 drm_dbg(&i915->drm, 96 "Attempting to obtain a purgeable object\n"); 97 return -EFAULT; 98 } 99 100 err = obj->ops->get_pages(obj); 101 GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj)); 102 103 return err; 104 } 105 106 /* Ensure that the associated pages are gathered from the backing storage 107 * and pinned into our object. i915_gem_object_pin_pages() may be called 108 * multiple times before they are released by a single call to 109 * i915_gem_object_unpin_pages() - once the pages are no longer referenced 110 * either as a result of memory pressure (reaping pages under the shrinker) 111 * or as the object is itself released. 112 */ 113 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj) 114 { 115 int err; 116 117 err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES); 118 if (err) 119 return err; 120 121 if (unlikely(!i915_gem_object_has_pages(obj))) { 122 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); 123 124 err = ____i915_gem_object_get_pages(obj); 125 if (err) 126 goto unlock; 127 128 smp_mb__before_atomic(); 129 } 130 atomic_inc(&obj->mm.pages_pin_count); 131 132 unlock: 133 mutex_unlock(&obj->mm.lock); 134 return err; 135 } 136 137 /* Immediately discard the backing storage */ 138 void i915_gem_object_truncate(struct drm_i915_gem_object *obj) 139 { 140 drm_gem_free_mmap_offset(&obj->base); 141 if (obj->ops->truncate) 142 obj->ops->truncate(obj); 143 } 144 145 /* Try to discard unwanted pages */ 146 void i915_gem_object_writeback(struct drm_i915_gem_object *obj) 147 { 148 lockdep_assert_held(&obj->mm.lock); 149 GEM_BUG_ON(i915_gem_object_has_pages(obj)); 150 151 if (obj->ops->writeback) 152 obj->ops->writeback(obj); 153 } 154 155 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) 156 { 157 struct radix_tree_iter iter; 158 void __rcu **slot; 159 160 rcu_read_lock(); 161 radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0) 162 radix_tree_delete(&obj->mm.get_page.radix, iter.index); 163 radix_tree_for_each_slot(slot, &obj->mm.get_dma_page.radix, &iter, 0) 164 radix_tree_delete(&obj->mm.get_dma_page.radix, iter.index); 165 rcu_read_unlock(); 166 } 167 168 static void unmap_object(struct drm_i915_gem_object *obj, void *ptr) 169 { 170 if (is_vmalloc_addr(ptr)) 171 vunmap(ptr); 172 } 173 174 struct sg_table * 175 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) 176 { 177 struct sg_table *pages; 178 179 pages = fetch_and_zero(&obj->mm.pages); 180 if (IS_ERR_OR_NULL(pages)) 181 return pages; 182 183 if (i915_gem_object_is_volatile(obj)) 184 obj->mm.madv = I915_MADV_WILLNEED; 185 186 i915_gem_object_make_unshrinkable(obj); 187 188 if (obj->mm.mapping) { 189 unmap_object(obj, page_mask_bits(obj->mm.mapping)); 190 obj->mm.mapping = NULL; 191 } 192 193 __i915_gem_object_reset_page_iter(obj); 194 obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0; 195 196 return pages; 197 } 198 199 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj) 200 { 201 struct sg_table *pages; 202 int err; 203 204 if (i915_gem_object_has_pinned_pages(obj)) 205 return -EBUSY; 206 207 /* May be called by shrinker from within get_pages() (on another bo) */ 208 mutex_lock(&obj->mm.lock); 209 if (unlikely(atomic_read(&obj->mm.pages_pin_count))) { 210 err = -EBUSY; 211 goto unlock; 212 } 213 214 i915_gem_object_release_mmap_offset(obj); 215 216 /* 217 * ->put_pages might need to allocate memory for the bit17 swizzle 218 * array, hence protect them from being reaped by removing them from gtt 219 * lists early. 220 */ 221 pages = __i915_gem_object_unset_pages(obj); 222 223 /* 224 * XXX Temporary hijinx to avoid updating all backends to handle 225 * NULL pages. In the future, when we have more asynchronous 226 * get_pages backends we should be better able to handle the 227 * cancellation of the async task in a more uniform manner. 228 */ 229 if (!pages && !i915_gem_object_needs_async_cancel(obj)) 230 pages = ERR_PTR(-EINVAL); 231 232 if (!IS_ERR(pages)) 233 obj->ops->put_pages(obj, pages); 234 235 err = 0; 236 unlock: 237 mutex_unlock(&obj->mm.lock); 238 239 return err; 240 } 241 242 /* The 'mapping' part of i915_gem_object_pin_map() below */ 243 static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj, 244 enum i915_map_type type) 245 { 246 unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i; 247 struct page *stack[32], **pages = stack, *page; 248 struct sgt_iter iter; 249 pgprot_t pgprot; 250 void *vaddr; 251 252 switch (type) { 253 default: 254 MISSING_CASE(type); 255 fallthrough; /* to use PAGE_KERNEL anyway */ 256 case I915_MAP_WB: 257 /* 258 * On 32b, highmem using a finite set of indirect PTE (i.e. 259 * vmap) to provide virtual mappings of the high pages. 260 * As these are finite, map_new_virtual() must wait for some 261 * other kmap() to finish when it runs out. If we map a large 262 * number of objects, there is no method for it to tell us 263 * to release the mappings, and we deadlock. 264 * 265 * However, if we make an explicit vmap of the page, that 266 * uses a larger vmalloc arena, and also has the ability 267 * to tell us to release unwanted mappings. Most importantly, 268 * it will fail and propagate an error instead of waiting 269 * forever. 270 * 271 * So if the page is beyond the 32b boundary, make an explicit 272 * vmap. 273 */ 274 if (n_pages == 1 && !PageHighMem(sg_page(obj->mm.pages->sgl))) 275 return page_address(sg_page(obj->mm.pages->sgl)); 276 pgprot = PAGE_KERNEL; 277 break; 278 case I915_MAP_WC: 279 pgprot = pgprot_writecombine(PAGE_KERNEL_IO); 280 break; 281 } 282 283 if (n_pages > ARRAY_SIZE(stack)) { 284 /* Too big for stack -- allocate temporary array instead */ 285 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); 286 if (!pages) 287 return ERR_PTR(-ENOMEM); 288 } 289 290 i = 0; 291 for_each_sgt_page(page, iter, obj->mm.pages) 292 pages[i++] = page; 293 vaddr = vmap(pages, n_pages, 0, pgprot); 294 if (pages != stack) 295 kvfree(pages); 296 297 return vaddr ?: ERR_PTR(-ENOMEM); 298 } 299 300 static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj, 301 enum i915_map_type type) 302 { 303 resource_size_t iomap = obj->mm.region->iomap.base - 304 obj->mm.region->region.start; 305 unsigned long n_pfn = obj->base.size >> PAGE_SHIFT; 306 unsigned long stack[32], *pfns = stack, i; 307 struct sgt_iter iter; 308 dma_addr_t addr; 309 void *vaddr; 310 311 if (type != I915_MAP_WC) 312 return ERR_PTR(-ENODEV); 313 314 if (n_pfn > ARRAY_SIZE(stack)) { 315 /* Too big for stack -- allocate temporary array instead */ 316 pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL); 317 if (!pfns) 318 return ERR_PTR(-ENOMEM); 319 } 320 321 i = 0; 322 for_each_sgt_daddr(addr, iter, obj->mm.pages) 323 pfns[i++] = (iomap + addr) >> PAGE_SHIFT; 324 vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO)); 325 if (pfns != stack) 326 kvfree(pfns); 327 328 return vaddr ?: ERR_PTR(-ENOMEM); 329 } 330 331 /* get, pin, and map the pages of the object into kernel space */ 332 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, 333 enum i915_map_type type) 334 { 335 enum i915_map_type has_type; 336 unsigned int flags; 337 bool pinned; 338 void *ptr; 339 int err; 340 341 flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM; 342 if (!i915_gem_object_type_has(obj, flags)) 343 return ERR_PTR(-ENXIO); 344 345 err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES); 346 if (err) 347 return ERR_PTR(err); 348 349 pinned = !(type & I915_MAP_OVERRIDE); 350 type &= ~I915_MAP_OVERRIDE; 351 352 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) { 353 if (unlikely(!i915_gem_object_has_pages(obj))) { 354 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); 355 356 err = ____i915_gem_object_get_pages(obj); 357 if (err) { 358 ptr = ERR_PTR(err); 359 goto out_unlock; 360 } 361 362 smp_mb__before_atomic(); 363 } 364 atomic_inc(&obj->mm.pages_pin_count); 365 pinned = false; 366 } 367 GEM_BUG_ON(!i915_gem_object_has_pages(obj)); 368 369 ptr = page_unpack_bits(obj->mm.mapping, &has_type); 370 if (ptr && has_type != type) { 371 if (pinned) { 372 ptr = ERR_PTR(-EBUSY); 373 goto err_unpin; 374 } 375 376 unmap_object(obj, ptr); 377 378 ptr = obj->mm.mapping = NULL; 379 } 380 381 if (!ptr) { 382 if (GEM_WARN_ON(type == I915_MAP_WC && 383 !static_cpu_has(X86_FEATURE_PAT))) 384 ptr = ERR_PTR(-ENODEV); 385 else if (i915_gem_object_has_struct_page(obj)) 386 ptr = i915_gem_object_map_page(obj, type); 387 else 388 ptr = i915_gem_object_map_pfn(obj, type); 389 if (IS_ERR(ptr)) 390 goto err_unpin; 391 392 obj->mm.mapping = page_pack_bits(ptr, type); 393 } 394 395 out_unlock: 396 mutex_unlock(&obj->mm.lock); 397 return ptr; 398 399 err_unpin: 400 atomic_dec(&obj->mm.pages_pin_count); 401 goto out_unlock; 402 } 403 404 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj, 405 unsigned long offset, 406 unsigned long size) 407 { 408 enum i915_map_type has_type; 409 void *ptr; 410 411 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 412 GEM_BUG_ON(range_overflows_t(typeof(obj->base.size), 413 offset, size, obj->base.size)); 414 415 wmb(); /* let all previous writes be visible to coherent partners */ 416 obj->mm.dirty = true; 417 418 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) 419 return; 420 421 ptr = page_unpack_bits(obj->mm.mapping, &has_type); 422 if (has_type == I915_MAP_WC) 423 return; 424 425 drm_clflush_virt_range(ptr + offset, size); 426 if (size == obj->base.size) { 427 obj->write_domain &= ~I915_GEM_DOMAIN_CPU; 428 obj->cache_dirty = false; 429 } 430 } 431 432 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj) 433 { 434 GEM_BUG_ON(!obj->mm.mapping); 435 436 /* 437 * We allow removing the mapping from underneath pinned pages! 438 * 439 * Furthermore, since this is an unsafe operation reserved only 440 * for construction time manipulation, we ignore locking prudence. 441 */ 442 unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping))); 443 444 i915_gem_object_unpin_map(obj); 445 } 446 447 struct scatterlist * 448 __i915_gem_object_get_sg(struct drm_i915_gem_object *obj, 449 struct i915_gem_object_page_iter *iter, 450 unsigned int n, 451 unsigned int *offset) 452 { 453 const bool dma = iter == &obj->mm.get_dma_page; 454 struct scatterlist *sg; 455 unsigned int idx, count; 456 457 might_sleep(); 458 GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT); 459 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 460 461 /* As we iterate forward through the sg, we record each entry in a 462 * radixtree for quick repeated (backwards) lookups. If we have seen 463 * this index previously, we will have an entry for it. 464 * 465 * Initial lookup is O(N), but this is amortized to O(1) for 466 * sequential page access (where each new request is consecutive 467 * to the previous one). Repeated lookups are O(lg(obj->base.size)), 468 * i.e. O(1) with a large constant! 469 */ 470 if (n < READ_ONCE(iter->sg_idx)) 471 goto lookup; 472 473 mutex_lock(&iter->lock); 474 475 /* We prefer to reuse the last sg so that repeated lookup of this 476 * (or the subsequent) sg are fast - comparing against the last 477 * sg is faster than going through the radixtree. 478 */ 479 480 sg = iter->sg_pos; 481 idx = iter->sg_idx; 482 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg); 483 484 while (idx + count <= n) { 485 void *entry; 486 unsigned long i; 487 int ret; 488 489 /* If we cannot allocate and insert this entry, or the 490 * individual pages from this range, cancel updating the 491 * sg_idx so that on this lookup we are forced to linearly 492 * scan onwards, but on future lookups we will try the 493 * insertion again (in which case we need to be careful of 494 * the error return reporting that we have already inserted 495 * this index). 496 */ 497 ret = radix_tree_insert(&iter->radix, idx, sg); 498 if (ret && ret != -EEXIST) 499 goto scan; 500 501 entry = xa_mk_value(idx); 502 for (i = 1; i < count; i++) { 503 ret = radix_tree_insert(&iter->radix, idx + i, entry); 504 if (ret && ret != -EEXIST) 505 goto scan; 506 } 507 508 idx += count; 509 sg = ____sg_next(sg); 510 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg); 511 } 512 513 scan: 514 iter->sg_pos = sg; 515 iter->sg_idx = idx; 516 517 mutex_unlock(&iter->lock); 518 519 if (unlikely(n < idx)) /* insertion completed by another thread */ 520 goto lookup; 521 522 /* In case we failed to insert the entry into the radixtree, we need 523 * to look beyond the current sg. 524 */ 525 while (idx + count <= n) { 526 idx += count; 527 sg = ____sg_next(sg); 528 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg); 529 } 530 531 *offset = n - idx; 532 return sg; 533 534 lookup: 535 rcu_read_lock(); 536 537 sg = radix_tree_lookup(&iter->radix, n); 538 GEM_BUG_ON(!sg); 539 540 /* If this index is in the middle of multi-page sg entry, 541 * the radix tree will contain a value entry that points 542 * to the start of that range. We will return the pointer to 543 * the base page and the offset of this page within the 544 * sg entry's range. 545 */ 546 *offset = 0; 547 if (unlikely(xa_is_value(sg))) { 548 unsigned long base = xa_to_value(sg); 549 550 sg = radix_tree_lookup(&iter->radix, base); 551 GEM_BUG_ON(!sg); 552 553 *offset = n - base; 554 } 555 556 rcu_read_unlock(); 557 558 return sg; 559 } 560 561 struct page * 562 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n) 563 { 564 struct scatterlist *sg; 565 unsigned int offset; 566 567 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj)); 568 569 sg = i915_gem_object_get_sg(obj, n, &offset); 570 return nth_page(sg_page(sg), offset); 571 } 572 573 /* Like i915_gem_object_get_page(), but mark the returned page dirty */ 574 struct page * 575 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, 576 unsigned int n) 577 { 578 struct page *page; 579 580 page = i915_gem_object_get_page(obj, n); 581 if (!obj->mm.dirty) 582 set_page_dirty(page); 583 584 return page; 585 } 586 587 dma_addr_t 588 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj, 589 unsigned long n, 590 unsigned int *len) 591 { 592 struct scatterlist *sg; 593 unsigned int offset; 594 595 sg = i915_gem_object_get_sg_dma(obj, n, &offset); 596 597 if (len) 598 *len = sg_dma_len(sg) - (offset << PAGE_SHIFT); 599 600 return sg_dma_address(sg) + (offset << PAGE_SHIFT); 601 } 602 603 dma_addr_t 604 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, 605 unsigned long n) 606 { 607 return i915_gem_object_get_dma_address_len(obj, n, NULL); 608 } 609