1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2016 Intel Corporation
5  */
6 
7 #include "i915_drv.h"
8 #include "i915_gem_object.h"
9 #include "i915_scatterlist.h"
10 #include "i915_gem_lmem.h"
11 #include "i915_gem_mman.h"
12 
13 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
14 				 struct sg_table *pages,
15 				 unsigned int sg_page_sizes)
16 {
17 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
18 	unsigned long supported = INTEL_INFO(i915)->page_sizes;
19 	bool shrinkable;
20 	int i;
21 
22 	assert_object_held_shared(obj);
23 
24 	if (i915_gem_object_is_volatile(obj))
25 		obj->mm.madv = I915_MADV_DONTNEED;
26 
27 	/* Make the pages coherent with the GPU (flushing any swapin). */
28 	if (obj->cache_dirty) {
29 		WARN_ON_ONCE(IS_DGFX(i915));
30 		obj->write_domain = 0;
31 		if (i915_gem_object_has_struct_page(obj))
32 			drm_clflush_sg(pages);
33 		obj->cache_dirty = false;
34 	}
35 
36 	obj->mm.get_page.sg_pos = pages->sgl;
37 	obj->mm.get_page.sg_idx = 0;
38 	obj->mm.get_dma_page.sg_pos = pages->sgl;
39 	obj->mm.get_dma_page.sg_idx = 0;
40 
41 	obj->mm.pages = pages;
42 
43 	GEM_BUG_ON(!sg_page_sizes);
44 	obj->mm.page_sizes.phys = sg_page_sizes;
45 
46 	/*
47 	 * Calculate the supported page-sizes which fit into the given
48 	 * sg_page_sizes. This will give us the page-sizes which we may be able
49 	 * to use opportunistically when later inserting into the GTT. For
50 	 * example if phys=2G, then in theory we should be able to use 1G, 2M,
51 	 * 64K or 4K pages, although in practice this will depend on a number of
52 	 * other factors.
53 	 */
54 	obj->mm.page_sizes.sg = 0;
55 	for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
56 		if (obj->mm.page_sizes.phys & ~0u << i)
57 			obj->mm.page_sizes.sg |= BIT(i);
58 	}
59 	GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
60 
61 	shrinkable = i915_gem_object_is_shrinkable(obj);
62 
63 	if (i915_gem_object_is_tiled(obj) &&
64 	    i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
65 		GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
66 		i915_gem_object_set_tiling_quirk(obj);
67 		GEM_BUG_ON(!list_empty(&obj->mm.link));
68 		atomic_inc(&obj->mm.shrink_pin);
69 		shrinkable = false;
70 	}
71 
72 	if (shrinkable && !i915_gem_object_has_self_managed_shrink_list(obj)) {
73 		struct list_head *list;
74 		unsigned long flags;
75 
76 		assert_object_held(obj);
77 		spin_lock_irqsave(&i915->mm.obj_lock, flags);
78 
79 		i915->mm.shrink_count++;
80 		i915->mm.shrink_memory += obj->base.size;
81 
82 		if (obj->mm.madv != I915_MADV_WILLNEED)
83 			list = &i915->mm.purge_list;
84 		else
85 			list = &i915->mm.shrink_list;
86 		list_add_tail(&obj->mm.link, list);
87 
88 		atomic_set(&obj->mm.shrink_pin, 0);
89 		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
90 	}
91 }
92 
93 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
94 {
95 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
96 	int err;
97 
98 	assert_object_held_shared(obj);
99 
100 	if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
101 		drm_dbg(&i915->drm,
102 			"Attempting to obtain a purgeable object\n");
103 		return -EFAULT;
104 	}
105 
106 	err = obj->ops->get_pages(obj);
107 	GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
108 
109 	return err;
110 }
111 
112 /* Ensure that the associated pages are gathered from the backing storage
113  * and pinned into our object. i915_gem_object_pin_pages() may be called
114  * multiple times before they are released by a single call to
115  * i915_gem_object_unpin_pages() - once the pages are no longer referenced
116  * either as a result of memory pressure (reaping pages under the shrinker)
117  * or as the object is itself released.
118  */
119 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
120 {
121 	int err;
122 
123 	assert_object_held(obj);
124 
125 	assert_object_held_shared(obj);
126 
127 	if (unlikely(!i915_gem_object_has_pages(obj))) {
128 		GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
129 
130 		err = ____i915_gem_object_get_pages(obj);
131 		if (err)
132 			return err;
133 
134 		smp_mb__before_atomic();
135 	}
136 	atomic_inc(&obj->mm.pages_pin_count);
137 
138 	return 0;
139 }
140 
141 int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj)
142 {
143 	struct i915_gem_ww_ctx ww;
144 	int err;
145 
146 	i915_gem_ww_ctx_init(&ww, true);
147 retry:
148 	err = i915_gem_object_lock(obj, &ww);
149 	if (!err)
150 		err = i915_gem_object_pin_pages(obj);
151 
152 	if (err == -EDEADLK) {
153 		err = i915_gem_ww_ctx_backoff(&ww);
154 		if (!err)
155 			goto retry;
156 	}
157 	i915_gem_ww_ctx_fini(&ww);
158 	return err;
159 }
160 
161 /* Immediately discard the backing storage */
162 int i915_gem_object_truncate(struct drm_i915_gem_object *obj)
163 {
164 	if (obj->ops->truncate)
165 		return obj->ops->truncate(obj);
166 
167 	return 0;
168 }
169 
170 /* Try to discard unwanted pages */
171 void i915_gem_object_writeback(struct drm_i915_gem_object *obj)
172 {
173 	assert_object_held_shared(obj);
174 	GEM_BUG_ON(i915_gem_object_has_pages(obj));
175 
176 	if (obj->ops->writeback)
177 		obj->ops->writeback(obj);
178 }
179 
180 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
181 {
182 	struct radix_tree_iter iter;
183 	void __rcu **slot;
184 
185 	rcu_read_lock();
186 	radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
187 		radix_tree_delete(&obj->mm.get_page.radix, iter.index);
188 	radix_tree_for_each_slot(slot, &obj->mm.get_dma_page.radix, &iter, 0)
189 		radix_tree_delete(&obj->mm.get_dma_page.radix, iter.index);
190 	rcu_read_unlock();
191 }
192 
193 static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
194 {
195 	if (is_vmalloc_addr(ptr))
196 		vunmap(ptr);
197 }
198 
199 struct sg_table *
200 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
201 {
202 	struct sg_table *pages;
203 
204 	assert_object_held_shared(obj);
205 
206 	pages = fetch_and_zero(&obj->mm.pages);
207 	if (IS_ERR_OR_NULL(pages))
208 		return pages;
209 
210 	if (i915_gem_object_is_volatile(obj))
211 		obj->mm.madv = I915_MADV_WILLNEED;
212 
213 	if (!i915_gem_object_has_self_managed_shrink_list(obj))
214 		i915_gem_object_make_unshrinkable(obj);
215 
216 	if (obj->mm.mapping) {
217 		unmap_object(obj, page_mask_bits(obj->mm.mapping));
218 		obj->mm.mapping = NULL;
219 	}
220 
221 	__i915_gem_object_reset_page_iter(obj);
222 	obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
223 
224 	return pages;
225 }
226 
227 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
228 {
229 	struct sg_table *pages;
230 
231 	if (i915_gem_object_has_pinned_pages(obj))
232 		return -EBUSY;
233 
234 	/* May be called by shrinker from within get_pages() (on another bo) */
235 	assert_object_held_shared(obj);
236 
237 	i915_gem_object_release_mmap_offset(obj);
238 
239 	/*
240 	 * ->put_pages might need to allocate memory for the bit17 swizzle
241 	 * array, hence protect them from being reaped by removing them from gtt
242 	 * lists early.
243 	 */
244 	pages = __i915_gem_object_unset_pages(obj);
245 
246 	/*
247 	 * XXX Temporary hijinx to avoid updating all backends to handle
248 	 * NULL pages. In the future, when we have more asynchronous
249 	 * get_pages backends we should be better able to handle the
250 	 * cancellation of the async task in a more uniform manner.
251 	 */
252 	if (!IS_ERR_OR_NULL(pages))
253 		obj->ops->put_pages(obj, pages);
254 
255 	return 0;
256 }
257 
258 /* The 'mapping' part of i915_gem_object_pin_map() below */
259 static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
260 				      enum i915_map_type type)
261 {
262 	unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i;
263 	struct page *stack[32], **pages = stack, *page;
264 	struct sgt_iter iter;
265 	pgprot_t pgprot;
266 	void *vaddr;
267 
268 	switch (type) {
269 	default:
270 		MISSING_CASE(type);
271 		fallthrough;	/* to use PAGE_KERNEL anyway */
272 	case I915_MAP_WB:
273 		/*
274 		 * On 32b, highmem using a finite set of indirect PTE (i.e.
275 		 * vmap) to provide virtual mappings of the high pages.
276 		 * As these are finite, map_new_virtual() must wait for some
277 		 * other kmap() to finish when it runs out. If we map a large
278 		 * number of objects, there is no method for it to tell us
279 		 * to release the mappings, and we deadlock.
280 		 *
281 		 * However, if we make an explicit vmap of the page, that
282 		 * uses a larger vmalloc arena, and also has the ability
283 		 * to tell us to release unwanted mappings. Most importantly,
284 		 * it will fail and propagate an error instead of waiting
285 		 * forever.
286 		 *
287 		 * So if the page is beyond the 32b boundary, make an explicit
288 		 * vmap.
289 		 */
290 		if (n_pages == 1 && !PageHighMem(sg_page(obj->mm.pages->sgl)))
291 			return page_address(sg_page(obj->mm.pages->sgl));
292 		pgprot = PAGE_KERNEL;
293 		break;
294 	case I915_MAP_WC:
295 		pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
296 		break;
297 	}
298 
299 	if (n_pages > ARRAY_SIZE(stack)) {
300 		/* Too big for stack -- allocate temporary array instead */
301 		pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
302 		if (!pages)
303 			return ERR_PTR(-ENOMEM);
304 	}
305 
306 	i = 0;
307 	for_each_sgt_page(page, iter, obj->mm.pages)
308 		pages[i++] = page;
309 	vaddr = vmap(pages, n_pages, 0, pgprot);
310 	if (pages != stack)
311 		kvfree(pages);
312 
313 	return vaddr ?: ERR_PTR(-ENOMEM);
314 }
315 
316 static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
317 				     enum i915_map_type type)
318 {
319 	resource_size_t iomap = obj->mm.region->iomap.base -
320 		obj->mm.region->region.start;
321 	unsigned long n_pfn = obj->base.size >> PAGE_SHIFT;
322 	unsigned long stack[32], *pfns = stack, i;
323 	struct sgt_iter iter;
324 	dma_addr_t addr;
325 	void *vaddr;
326 
327 	GEM_BUG_ON(type != I915_MAP_WC);
328 
329 	if (n_pfn > ARRAY_SIZE(stack)) {
330 		/* Too big for stack -- allocate temporary array instead */
331 		pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL);
332 		if (!pfns)
333 			return ERR_PTR(-ENOMEM);
334 	}
335 
336 	i = 0;
337 	for_each_sgt_daddr(addr, iter, obj->mm.pages)
338 		pfns[i++] = (iomap + addr) >> PAGE_SHIFT;
339 	vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO));
340 	if (pfns != stack)
341 		kvfree(pfns);
342 
343 	return vaddr ?: ERR_PTR(-ENOMEM);
344 }
345 
346 /* get, pin, and map the pages of the object into kernel space */
347 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
348 			      enum i915_map_type type)
349 {
350 	enum i915_map_type has_type;
351 	bool pinned;
352 	void *ptr;
353 	int err;
354 
355 	if (!i915_gem_object_has_struct_page(obj) &&
356 	    !i915_gem_object_has_iomem(obj))
357 		return ERR_PTR(-ENXIO);
358 
359 	assert_object_held(obj);
360 
361 	pinned = !(type & I915_MAP_OVERRIDE);
362 	type &= ~I915_MAP_OVERRIDE;
363 
364 	if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
365 		if (unlikely(!i915_gem_object_has_pages(obj))) {
366 			GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
367 
368 			err = ____i915_gem_object_get_pages(obj);
369 			if (err)
370 				return ERR_PTR(err);
371 
372 			smp_mb__before_atomic();
373 		}
374 		atomic_inc(&obj->mm.pages_pin_count);
375 		pinned = false;
376 	}
377 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
378 
379 	/*
380 	 * For discrete our CPU mappings needs to be consistent in order to
381 	 * function correctly on !x86. When mapping things through TTM, we use
382 	 * the same rules to determine the caching type.
383 	 *
384 	 * The caching rules, starting from DG1:
385 	 *
386 	 *	- If the object can be placed in device local-memory, then the
387 	 *	  pages should be allocated and mapped as write-combined only.
388 	 *
389 	 *	- Everything else is always allocated and mapped as write-back,
390 	 *	  with the guarantee that everything is also coherent with the
391 	 *	  GPU.
392 	 *
393 	 * Internal users of lmem are already expected to get this right, so no
394 	 * fudging needed there.
395 	 */
396 	if (i915_gem_object_placement_possible(obj, INTEL_MEMORY_LOCAL)) {
397 		if (type != I915_MAP_WC && !obj->mm.n_placements) {
398 			ptr = ERR_PTR(-ENODEV);
399 			goto err_unpin;
400 		}
401 
402 		type = I915_MAP_WC;
403 	} else if (IS_DGFX(to_i915(obj->base.dev))) {
404 		type = I915_MAP_WB;
405 	}
406 
407 	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
408 	if (ptr && has_type != type) {
409 		if (pinned) {
410 			ptr = ERR_PTR(-EBUSY);
411 			goto err_unpin;
412 		}
413 
414 		unmap_object(obj, ptr);
415 
416 		ptr = obj->mm.mapping = NULL;
417 	}
418 
419 	if (!ptr) {
420 		err = i915_gem_object_wait_moving_fence(obj, true);
421 		if (err) {
422 			ptr = ERR_PTR(err);
423 			goto err_unpin;
424 		}
425 
426 		if (GEM_WARN_ON(type == I915_MAP_WC && !pat_enabled()))
427 			ptr = ERR_PTR(-ENODEV);
428 		else if (i915_gem_object_has_struct_page(obj))
429 			ptr = i915_gem_object_map_page(obj, type);
430 		else
431 			ptr = i915_gem_object_map_pfn(obj, type);
432 		if (IS_ERR(ptr))
433 			goto err_unpin;
434 
435 		obj->mm.mapping = page_pack_bits(ptr, type);
436 	}
437 
438 	return ptr;
439 
440 err_unpin:
441 	atomic_dec(&obj->mm.pages_pin_count);
442 	return ptr;
443 }
444 
445 void *i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
446 				       enum i915_map_type type)
447 {
448 	void *ret;
449 
450 	i915_gem_object_lock(obj, NULL);
451 	ret = i915_gem_object_pin_map(obj, type);
452 	i915_gem_object_unlock(obj);
453 
454 	return ret;
455 }
456 
457 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
458 				 unsigned long offset,
459 				 unsigned long size)
460 {
461 	enum i915_map_type has_type;
462 	void *ptr;
463 
464 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
465 	GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
466 				     offset, size, obj->base.size));
467 
468 	wmb(); /* let all previous writes be visible to coherent partners */
469 	obj->mm.dirty = true;
470 
471 	if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
472 		return;
473 
474 	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
475 	if (has_type == I915_MAP_WC)
476 		return;
477 
478 	drm_clflush_virt_range(ptr + offset, size);
479 	if (size == obj->base.size) {
480 		obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
481 		obj->cache_dirty = false;
482 	}
483 }
484 
485 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj)
486 {
487 	GEM_BUG_ON(!obj->mm.mapping);
488 
489 	/*
490 	 * We allow removing the mapping from underneath pinned pages!
491 	 *
492 	 * Furthermore, since this is an unsafe operation reserved only
493 	 * for construction time manipulation, we ignore locking prudence.
494 	 */
495 	unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping)));
496 
497 	i915_gem_object_unpin_map(obj);
498 }
499 
500 struct scatterlist *
501 __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
502 			 struct i915_gem_object_page_iter *iter,
503 			 unsigned int n,
504 			 unsigned int *offset,
505 			 bool dma)
506 {
507 	struct scatterlist *sg;
508 	unsigned int idx, count;
509 
510 	might_sleep();
511 	GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
512 	if (!i915_gem_object_has_pinned_pages(obj))
513 		assert_object_held(obj);
514 
515 	/* As we iterate forward through the sg, we record each entry in a
516 	 * radixtree for quick repeated (backwards) lookups. If we have seen
517 	 * this index previously, we will have an entry for it.
518 	 *
519 	 * Initial lookup is O(N), but this is amortized to O(1) for
520 	 * sequential page access (where each new request is consecutive
521 	 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
522 	 * i.e. O(1) with a large constant!
523 	 */
524 	if (n < READ_ONCE(iter->sg_idx))
525 		goto lookup;
526 
527 	mutex_lock(&iter->lock);
528 
529 	/* We prefer to reuse the last sg so that repeated lookup of this
530 	 * (or the subsequent) sg are fast - comparing against the last
531 	 * sg is faster than going through the radixtree.
532 	 */
533 
534 	sg = iter->sg_pos;
535 	idx = iter->sg_idx;
536 	count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
537 
538 	while (idx + count <= n) {
539 		void *entry;
540 		unsigned long i;
541 		int ret;
542 
543 		/* If we cannot allocate and insert this entry, or the
544 		 * individual pages from this range, cancel updating the
545 		 * sg_idx so that on this lookup we are forced to linearly
546 		 * scan onwards, but on future lookups we will try the
547 		 * insertion again (in which case we need to be careful of
548 		 * the error return reporting that we have already inserted
549 		 * this index).
550 		 */
551 		ret = radix_tree_insert(&iter->radix, idx, sg);
552 		if (ret && ret != -EEXIST)
553 			goto scan;
554 
555 		entry = xa_mk_value(idx);
556 		for (i = 1; i < count; i++) {
557 			ret = radix_tree_insert(&iter->radix, idx + i, entry);
558 			if (ret && ret != -EEXIST)
559 				goto scan;
560 		}
561 
562 		idx += count;
563 		sg = ____sg_next(sg);
564 		count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
565 	}
566 
567 scan:
568 	iter->sg_pos = sg;
569 	iter->sg_idx = idx;
570 
571 	mutex_unlock(&iter->lock);
572 
573 	if (unlikely(n < idx)) /* insertion completed by another thread */
574 		goto lookup;
575 
576 	/* In case we failed to insert the entry into the radixtree, we need
577 	 * to look beyond the current sg.
578 	 */
579 	while (idx + count <= n) {
580 		idx += count;
581 		sg = ____sg_next(sg);
582 		count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
583 	}
584 
585 	*offset = n - idx;
586 	return sg;
587 
588 lookup:
589 	rcu_read_lock();
590 
591 	sg = radix_tree_lookup(&iter->radix, n);
592 	GEM_BUG_ON(!sg);
593 
594 	/* If this index is in the middle of multi-page sg entry,
595 	 * the radix tree will contain a value entry that points
596 	 * to the start of that range. We will return the pointer to
597 	 * the base page and the offset of this page within the
598 	 * sg entry's range.
599 	 */
600 	*offset = 0;
601 	if (unlikely(xa_is_value(sg))) {
602 		unsigned long base = xa_to_value(sg);
603 
604 		sg = radix_tree_lookup(&iter->radix, base);
605 		GEM_BUG_ON(!sg);
606 
607 		*offset = n - base;
608 	}
609 
610 	rcu_read_unlock();
611 
612 	return sg;
613 }
614 
615 struct page *
616 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
617 {
618 	struct scatterlist *sg;
619 	unsigned int offset;
620 
621 	GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
622 
623 	sg = i915_gem_object_get_sg(obj, n, &offset);
624 	return nth_page(sg_page(sg), offset);
625 }
626 
627 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
628 struct page *
629 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
630 			       unsigned int n)
631 {
632 	struct page *page;
633 
634 	page = i915_gem_object_get_page(obj, n);
635 	if (!obj->mm.dirty)
636 		set_page_dirty(page);
637 
638 	return page;
639 }
640 
641 dma_addr_t
642 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
643 				    unsigned long n,
644 				    unsigned int *len)
645 {
646 	struct scatterlist *sg;
647 	unsigned int offset;
648 
649 	sg = i915_gem_object_get_sg_dma(obj, n, &offset);
650 
651 	if (len)
652 		*len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
653 
654 	return sg_dma_address(sg) + (offset << PAGE_SHIFT);
655 }
656 
657 dma_addr_t
658 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
659 				unsigned long n)
660 {
661 	return i915_gem_object_get_dma_address_len(obj, n, NULL);
662 }
663