1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2014-2016 Intel Corporation 5 */ 6 7 #include "i915_drv.h" 8 #include "i915_gem_object.h" 9 #include "i915_scatterlist.h" 10 #include "i915_gem_lmem.h" 11 #include "i915_gem_mman.h" 12 13 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, 14 struct sg_table *pages, 15 unsigned int sg_page_sizes) 16 { 17 struct drm_i915_private *i915 = to_i915(obj->base.dev); 18 unsigned long supported = INTEL_INFO(i915)->page_sizes; 19 int i; 20 21 lockdep_assert_held(&obj->mm.lock); 22 23 if (i915_gem_object_is_volatile(obj)) 24 obj->mm.madv = I915_MADV_DONTNEED; 25 26 /* Make the pages coherent with the GPU (flushing any swapin). */ 27 if (obj->cache_dirty) { 28 obj->write_domain = 0; 29 if (i915_gem_object_has_struct_page(obj)) 30 drm_clflush_sg(pages); 31 obj->cache_dirty = false; 32 } 33 34 obj->mm.get_page.sg_pos = pages->sgl; 35 obj->mm.get_page.sg_idx = 0; 36 37 obj->mm.pages = pages; 38 39 if (i915_gem_object_is_tiled(obj) && 40 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) { 41 GEM_BUG_ON(obj->mm.quirked); 42 __i915_gem_object_pin_pages(obj); 43 obj->mm.quirked = true; 44 } 45 46 GEM_BUG_ON(!sg_page_sizes); 47 obj->mm.page_sizes.phys = sg_page_sizes; 48 49 /* 50 * Calculate the supported page-sizes which fit into the given 51 * sg_page_sizes. This will give us the page-sizes which we may be able 52 * to use opportunistically when later inserting into the GTT. For 53 * example if phys=2G, then in theory we should be able to use 1G, 2M, 54 * 64K or 4K pages, although in practice this will depend on a number of 55 * other factors. 56 */ 57 obj->mm.page_sizes.sg = 0; 58 for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) { 59 if (obj->mm.page_sizes.phys & ~0u << i) 60 obj->mm.page_sizes.sg |= BIT(i); 61 } 62 GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg)); 63 64 if (i915_gem_object_is_shrinkable(obj)) { 65 struct list_head *list; 66 unsigned long flags; 67 68 spin_lock_irqsave(&i915->mm.obj_lock, flags); 69 70 i915->mm.shrink_count++; 71 i915->mm.shrink_memory += obj->base.size; 72 73 if (obj->mm.madv != I915_MADV_WILLNEED) 74 list = &i915->mm.purge_list; 75 else 76 list = &i915->mm.shrink_list; 77 list_add_tail(&obj->mm.link, list); 78 79 atomic_set(&obj->mm.shrink_pin, 0); 80 spin_unlock_irqrestore(&i915->mm.obj_lock, flags); 81 } 82 } 83 84 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj) 85 { 86 struct drm_i915_private *i915 = to_i915(obj->base.dev); 87 int err; 88 89 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) { 90 drm_dbg(&i915->drm, 91 "Attempting to obtain a purgeable object\n"); 92 return -EFAULT; 93 } 94 95 err = obj->ops->get_pages(obj); 96 GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj)); 97 98 return err; 99 } 100 101 /* Ensure that the associated pages are gathered from the backing storage 102 * and pinned into our object. i915_gem_object_pin_pages() may be called 103 * multiple times before they are released by a single call to 104 * i915_gem_object_unpin_pages() - once the pages are no longer referenced 105 * either as a result of memory pressure (reaping pages under the shrinker) 106 * or as the object is itself released. 107 */ 108 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj) 109 { 110 int err; 111 112 err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES); 113 if (err) 114 return err; 115 116 if (unlikely(!i915_gem_object_has_pages(obj))) { 117 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); 118 119 err = ____i915_gem_object_get_pages(obj); 120 if (err) 121 goto unlock; 122 123 smp_mb__before_atomic(); 124 } 125 atomic_inc(&obj->mm.pages_pin_count); 126 127 unlock: 128 mutex_unlock(&obj->mm.lock); 129 return err; 130 } 131 132 /* Immediately discard the backing storage */ 133 void i915_gem_object_truncate(struct drm_i915_gem_object *obj) 134 { 135 drm_gem_free_mmap_offset(&obj->base); 136 if (obj->ops->truncate) 137 obj->ops->truncate(obj); 138 } 139 140 /* Try to discard unwanted pages */ 141 void i915_gem_object_writeback(struct drm_i915_gem_object *obj) 142 { 143 lockdep_assert_held(&obj->mm.lock); 144 GEM_BUG_ON(i915_gem_object_has_pages(obj)); 145 146 if (obj->ops->writeback) 147 obj->ops->writeback(obj); 148 } 149 150 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) 151 { 152 struct radix_tree_iter iter; 153 void __rcu **slot; 154 155 rcu_read_lock(); 156 radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0) 157 radix_tree_delete(&obj->mm.get_page.radix, iter.index); 158 rcu_read_unlock(); 159 } 160 161 static void unmap_object(struct drm_i915_gem_object *obj, void *ptr) 162 { 163 if (is_vmalloc_addr(ptr)) 164 vunmap(ptr); 165 else 166 kunmap(kmap_to_page(ptr)); 167 } 168 169 struct sg_table * 170 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) 171 { 172 struct sg_table *pages; 173 174 pages = fetch_and_zero(&obj->mm.pages); 175 if (IS_ERR_OR_NULL(pages)) 176 return pages; 177 178 if (i915_gem_object_is_volatile(obj)) 179 obj->mm.madv = I915_MADV_WILLNEED; 180 181 i915_gem_object_make_unshrinkable(obj); 182 183 if (obj->mm.mapping) { 184 unmap_object(obj, page_mask_bits(obj->mm.mapping)); 185 obj->mm.mapping = NULL; 186 } 187 188 __i915_gem_object_reset_page_iter(obj); 189 obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0; 190 191 return pages; 192 } 193 194 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj) 195 { 196 struct sg_table *pages; 197 int err; 198 199 if (i915_gem_object_has_pinned_pages(obj)) 200 return -EBUSY; 201 202 GEM_BUG_ON(atomic_read(&obj->bind_count)); 203 204 /* May be called by shrinker from within get_pages() (on another bo) */ 205 mutex_lock(&obj->mm.lock); 206 if (unlikely(atomic_read(&obj->mm.pages_pin_count))) { 207 err = -EBUSY; 208 goto unlock; 209 } 210 211 i915_gem_object_release_mmap_offset(obj); 212 213 /* 214 * ->put_pages might need to allocate memory for the bit17 swizzle 215 * array, hence protect them from being reaped by removing them from gtt 216 * lists early. 217 */ 218 pages = __i915_gem_object_unset_pages(obj); 219 220 /* 221 * XXX Temporary hijinx to avoid updating all backends to handle 222 * NULL pages. In the future, when we have more asynchronous 223 * get_pages backends we should be better able to handle the 224 * cancellation of the async task in a more uniform manner. 225 */ 226 if (!pages && !i915_gem_object_needs_async_cancel(obj)) 227 pages = ERR_PTR(-EINVAL); 228 229 if (!IS_ERR(pages)) 230 obj->ops->put_pages(obj, pages); 231 232 err = 0; 233 unlock: 234 mutex_unlock(&obj->mm.lock); 235 236 return err; 237 } 238 239 static inline pte_t iomap_pte(resource_size_t base, 240 dma_addr_t offset, 241 pgprot_t prot) 242 { 243 return pte_mkspecial(pfn_pte((base + offset) >> PAGE_SHIFT, prot)); 244 } 245 246 /* The 'mapping' part of i915_gem_object_pin_map() below */ 247 static void *i915_gem_object_map(struct drm_i915_gem_object *obj, 248 enum i915_map_type type) 249 { 250 unsigned long n_pte = obj->base.size >> PAGE_SHIFT; 251 struct sg_table *sgt = obj->mm.pages; 252 pte_t *stack[32], **mem; 253 struct vm_struct *area; 254 pgprot_t pgprot; 255 256 if (!i915_gem_object_has_struct_page(obj) && type != I915_MAP_WC) 257 return NULL; 258 259 /* A single page can always be kmapped */ 260 if (n_pte == 1 && type == I915_MAP_WB) 261 return kmap(sg_page(sgt->sgl)); 262 263 mem = stack; 264 if (n_pte > ARRAY_SIZE(stack)) { 265 /* Too big for stack -- allocate temporary array instead */ 266 mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL); 267 if (!mem) 268 return NULL; 269 } 270 271 area = alloc_vm_area(obj->base.size, mem); 272 if (!area) { 273 if (mem != stack) 274 kvfree(mem); 275 return NULL; 276 } 277 278 switch (type) { 279 default: 280 MISSING_CASE(type); 281 /* fallthrough - to use PAGE_KERNEL anyway */ 282 case I915_MAP_WB: 283 pgprot = PAGE_KERNEL; 284 break; 285 case I915_MAP_WC: 286 pgprot = pgprot_writecombine(PAGE_KERNEL_IO); 287 break; 288 } 289 290 if (i915_gem_object_has_struct_page(obj)) { 291 struct sgt_iter iter; 292 struct page *page; 293 pte_t **ptes = mem; 294 295 for_each_sgt_page(page, iter, sgt) 296 **ptes++ = mk_pte(page, pgprot); 297 } else { 298 resource_size_t iomap; 299 struct sgt_iter iter; 300 pte_t **ptes = mem; 301 dma_addr_t addr; 302 303 iomap = obj->mm.region->iomap.base; 304 iomap -= obj->mm.region->region.start; 305 306 for_each_sgt_daddr(addr, iter, sgt) 307 **ptes++ = iomap_pte(iomap, addr, pgprot); 308 } 309 310 if (mem != stack) 311 kvfree(mem); 312 313 return area->addr; 314 } 315 316 /* get, pin, and map the pages of the object into kernel space */ 317 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, 318 enum i915_map_type type) 319 { 320 enum i915_map_type has_type; 321 unsigned int flags; 322 bool pinned; 323 void *ptr; 324 int err; 325 326 flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM; 327 if (!i915_gem_object_type_has(obj, flags)) 328 return ERR_PTR(-ENXIO); 329 330 err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES); 331 if (err) 332 return ERR_PTR(err); 333 334 pinned = !(type & I915_MAP_OVERRIDE); 335 type &= ~I915_MAP_OVERRIDE; 336 337 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) { 338 if (unlikely(!i915_gem_object_has_pages(obj))) { 339 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); 340 341 err = ____i915_gem_object_get_pages(obj); 342 if (err) 343 goto err_unlock; 344 345 smp_mb__before_atomic(); 346 } 347 atomic_inc(&obj->mm.pages_pin_count); 348 pinned = false; 349 } 350 GEM_BUG_ON(!i915_gem_object_has_pages(obj)); 351 352 ptr = page_unpack_bits(obj->mm.mapping, &has_type); 353 if (ptr && has_type != type) { 354 if (pinned) { 355 err = -EBUSY; 356 goto err_unpin; 357 } 358 359 unmap_object(obj, ptr); 360 361 ptr = obj->mm.mapping = NULL; 362 } 363 364 if (!ptr) { 365 ptr = i915_gem_object_map(obj, type); 366 if (!ptr) { 367 err = -ENOMEM; 368 goto err_unpin; 369 } 370 371 obj->mm.mapping = page_pack_bits(ptr, type); 372 } 373 374 out_unlock: 375 mutex_unlock(&obj->mm.lock); 376 return ptr; 377 378 err_unpin: 379 atomic_dec(&obj->mm.pages_pin_count); 380 err_unlock: 381 ptr = ERR_PTR(err); 382 goto out_unlock; 383 } 384 385 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj, 386 unsigned long offset, 387 unsigned long size) 388 { 389 enum i915_map_type has_type; 390 void *ptr; 391 392 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 393 GEM_BUG_ON(range_overflows_t(typeof(obj->base.size), 394 offset, size, obj->base.size)); 395 396 obj->mm.dirty = true; 397 398 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) 399 return; 400 401 ptr = page_unpack_bits(obj->mm.mapping, &has_type); 402 if (has_type == I915_MAP_WC) 403 return; 404 405 drm_clflush_virt_range(ptr + offset, size); 406 if (size == obj->base.size) { 407 obj->write_domain &= ~I915_GEM_DOMAIN_CPU; 408 obj->cache_dirty = false; 409 } 410 } 411 412 struct scatterlist * 413 i915_gem_object_get_sg(struct drm_i915_gem_object *obj, 414 unsigned int n, 415 unsigned int *offset) 416 { 417 struct i915_gem_object_page_iter *iter = &obj->mm.get_page; 418 struct scatterlist *sg; 419 unsigned int idx, count; 420 421 might_sleep(); 422 GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT); 423 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 424 425 /* As we iterate forward through the sg, we record each entry in a 426 * radixtree for quick repeated (backwards) lookups. If we have seen 427 * this index previously, we will have an entry for it. 428 * 429 * Initial lookup is O(N), but this is amortized to O(1) for 430 * sequential page access (where each new request is consecutive 431 * to the previous one). Repeated lookups are O(lg(obj->base.size)), 432 * i.e. O(1) with a large constant! 433 */ 434 if (n < READ_ONCE(iter->sg_idx)) 435 goto lookup; 436 437 mutex_lock(&iter->lock); 438 439 /* We prefer to reuse the last sg so that repeated lookup of this 440 * (or the subsequent) sg are fast - comparing against the last 441 * sg is faster than going through the radixtree. 442 */ 443 444 sg = iter->sg_pos; 445 idx = iter->sg_idx; 446 count = __sg_page_count(sg); 447 448 while (idx + count <= n) { 449 void *entry; 450 unsigned long i; 451 int ret; 452 453 /* If we cannot allocate and insert this entry, or the 454 * individual pages from this range, cancel updating the 455 * sg_idx so that on this lookup we are forced to linearly 456 * scan onwards, but on future lookups we will try the 457 * insertion again (in which case we need to be careful of 458 * the error return reporting that we have already inserted 459 * this index). 460 */ 461 ret = radix_tree_insert(&iter->radix, idx, sg); 462 if (ret && ret != -EEXIST) 463 goto scan; 464 465 entry = xa_mk_value(idx); 466 for (i = 1; i < count; i++) { 467 ret = radix_tree_insert(&iter->radix, idx + i, entry); 468 if (ret && ret != -EEXIST) 469 goto scan; 470 } 471 472 idx += count; 473 sg = ____sg_next(sg); 474 count = __sg_page_count(sg); 475 } 476 477 scan: 478 iter->sg_pos = sg; 479 iter->sg_idx = idx; 480 481 mutex_unlock(&iter->lock); 482 483 if (unlikely(n < idx)) /* insertion completed by another thread */ 484 goto lookup; 485 486 /* In case we failed to insert the entry into the radixtree, we need 487 * to look beyond the current sg. 488 */ 489 while (idx + count <= n) { 490 idx += count; 491 sg = ____sg_next(sg); 492 count = __sg_page_count(sg); 493 } 494 495 *offset = n - idx; 496 return sg; 497 498 lookup: 499 rcu_read_lock(); 500 501 sg = radix_tree_lookup(&iter->radix, n); 502 GEM_BUG_ON(!sg); 503 504 /* If this index is in the middle of multi-page sg entry, 505 * the radix tree will contain a value entry that points 506 * to the start of that range. We will return the pointer to 507 * the base page and the offset of this page within the 508 * sg entry's range. 509 */ 510 *offset = 0; 511 if (unlikely(xa_is_value(sg))) { 512 unsigned long base = xa_to_value(sg); 513 514 sg = radix_tree_lookup(&iter->radix, base); 515 GEM_BUG_ON(!sg); 516 517 *offset = n - base; 518 } 519 520 rcu_read_unlock(); 521 522 return sg; 523 } 524 525 struct page * 526 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n) 527 { 528 struct scatterlist *sg; 529 unsigned int offset; 530 531 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj)); 532 533 sg = i915_gem_object_get_sg(obj, n, &offset); 534 return nth_page(sg_page(sg), offset); 535 } 536 537 /* Like i915_gem_object_get_page(), but mark the returned page dirty */ 538 struct page * 539 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, 540 unsigned int n) 541 { 542 struct page *page; 543 544 page = i915_gem_object_get_page(obj, n); 545 if (!obj->mm.dirty) 546 set_page_dirty(page); 547 548 return page; 549 } 550 551 dma_addr_t 552 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj, 553 unsigned long n, 554 unsigned int *len) 555 { 556 struct scatterlist *sg; 557 unsigned int offset; 558 559 sg = i915_gem_object_get_sg(obj, n, &offset); 560 561 if (len) 562 *len = sg_dma_len(sg) - (offset << PAGE_SHIFT); 563 564 return sg_dma_address(sg) + (offset << PAGE_SHIFT); 565 } 566 567 dma_addr_t 568 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, 569 unsigned long n) 570 { 571 return i915_gem_object_get_dma_address_len(obj, n, NULL); 572 } 573