1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2016 Intel Corporation
5  */
6 
7 #include "i915_drv.h"
8 #include "i915_gem_object.h"
9 #include "i915_scatterlist.h"
10 #include "i915_gem_lmem.h"
11 #include "i915_gem_mman.h"
12 
13 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
14 				 struct sg_table *pages,
15 				 unsigned int sg_page_sizes)
16 {
17 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
18 	unsigned long supported = INTEL_INFO(i915)->page_sizes;
19 	int i;
20 
21 	lockdep_assert_held(&obj->mm.lock);
22 
23 	if (i915_gem_object_is_volatile(obj))
24 		obj->mm.madv = I915_MADV_DONTNEED;
25 
26 	/* Make the pages coherent with the GPU (flushing any swapin). */
27 	if (obj->cache_dirty) {
28 		obj->write_domain = 0;
29 		if (i915_gem_object_has_struct_page(obj))
30 			drm_clflush_sg(pages);
31 		obj->cache_dirty = false;
32 	}
33 
34 	obj->mm.get_page.sg_pos = pages->sgl;
35 	obj->mm.get_page.sg_idx = 0;
36 
37 	obj->mm.pages = pages;
38 
39 	if (i915_gem_object_is_tiled(obj) &&
40 	    i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
41 		GEM_BUG_ON(obj->mm.quirked);
42 		__i915_gem_object_pin_pages(obj);
43 		obj->mm.quirked = true;
44 	}
45 
46 	GEM_BUG_ON(!sg_page_sizes);
47 	obj->mm.page_sizes.phys = sg_page_sizes;
48 
49 	/*
50 	 * Calculate the supported page-sizes which fit into the given
51 	 * sg_page_sizes. This will give us the page-sizes which we may be able
52 	 * to use opportunistically when later inserting into the GTT. For
53 	 * example if phys=2G, then in theory we should be able to use 1G, 2M,
54 	 * 64K or 4K pages, although in practice this will depend on a number of
55 	 * other factors.
56 	 */
57 	obj->mm.page_sizes.sg = 0;
58 	for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
59 		if (obj->mm.page_sizes.phys & ~0u << i)
60 			obj->mm.page_sizes.sg |= BIT(i);
61 	}
62 	GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
63 
64 	if (i915_gem_object_is_shrinkable(obj)) {
65 		struct list_head *list;
66 		unsigned long flags;
67 
68 		spin_lock_irqsave(&i915->mm.obj_lock, flags);
69 
70 		i915->mm.shrink_count++;
71 		i915->mm.shrink_memory += obj->base.size;
72 
73 		if (obj->mm.madv != I915_MADV_WILLNEED)
74 			list = &i915->mm.purge_list;
75 		else
76 			list = &i915->mm.shrink_list;
77 		list_add_tail(&obj->mm.link, list);
78 
79 		atomic_set(&obj->mm.shrink_pin, 0);
80 		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
81 	}
82 }
83 
84 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
85 {
86 	int err;
87 
88 	if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
89 		DRM_DEBUG("Attempting to obtain a purgeable object\n");
90 		return -EFAULT;
91 	}
92 
93 	err = obj->ops->get_pages(obj);
94 	GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
95 
96 	return err;
97 }
98 
99 /* Ensure that the associated pages are gathered from the backing storage
100  * and pinned into our object. i915_gem_object_pin_pages() may be called
101  * multiple times before they are released by a single call to
102  * i915_gem_object_unpin_pages() - once the pages are no longer referenced
103  * either as a result of memory pressure (reaping pages under the shrinker)
104  * or as the object is itself released.
105  */
106 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
107 {
108 	int err;
109 
110 	err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
111 	if (err)
112 		return err;
113 
114 	if (unlikely(!i915_gem_object_has_pages(obj))) {
115 		GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
116 
117 		err = ____i915_gem_object_get_pages(obj);
118 		if (err)
119 			goto unlock;
120 
121 		smp_mb__before_atomic();
122 	}
123 	atomic_inc(&obj->mm.pages_pin_count);
124 
125 unlock:
126 	mutex_unlock(&obj->mm.lock);
127 	return err;
128 }
129 
130 /* Immediately discard the backing storage */
131 void i915_gem_object_truncate(struct drm_i915_gem_object *obj)
132 {
133 	drm_gem_free_mmap_offset(&obj->base);
134 	if (obj->ops->truncate)
135 		obj->ops->truncate(obj);
136 }
137 
138 /* Try to discard unwanted pages */
139 void i915_gem_object_writeback(struct drm_i915_gem_object *obj)
140 {
141 	lockdep_assert_held(&obj->mm.lock);
142 	GEM_BUG_ON(i915_gem_object_has_pages(obj));
143 
144 	if (obj->ops->writeback)
145 		obj->ops->writeback(obj);
146 }
147 
148 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
149 {
150 	struct radix_tree_iter iter;
151 	void __rcu **slot;
152 
153 	rcu_read_lock();
154 	radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
155 		radix_tree_delete(&obj->mm.get_page.radix, iter.index);
156 	rcu_read_unlock();
157 }
158 
159 static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
160 {
161 	if (is_vmalloc_addr(ptr))
162 		vunmap(ptr);
163 	else
164 		kunmap(kmap_to_page(ptr));
165 }
166 
167 struct sg_table *
168 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
169 {
170 	struct sg_table *pages;
171 
172 	pages = fetch_and_zero(&obj->mm.pages);
173 	if (IS_ERR_OR_NULL(pages))
174 		return pages;
175 
176 	if (i915_gem_object_is_volatile(obj))
177 		obj->mm.madv = I915_MADV_WILLNEED;
178 
179 	i915_gem_object_make_unshrinkable(obj);
180 
181 	if (obj->mm.mapping) {
182 		unmap_object(obj, page_mask_bits(obj->mm.mapping));
183 		obj->mm.mapping = NULL;
184 	}
185 
186 	__i915_gem_object_reset_page_iter(obj);
187 	obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
188 
189 	return pages;
190 }
191 
192 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
193 {
194 	struct sg_table *pages;
195 	int err;
196 
197 	if (i915_gem_object_has_pinned_pages(obj))
198 		return -EBUSY;
199 
200 	GEM_BUG_ON(atomic_read(&obj->bind_count));
201 
202 	/* May be called by shrinker from within get_pages() (on another bo) */
203 	mutex_lock(&obj->mm.lock);
204 	if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
205 		err = -EBUSY;
206 		goto unlock;
207 	}
208 
209 	i915_gem_object_release_mmap_offset(obj);
210 
211 	/*
212 	 * ->put_pages might need to allocate memory for the bit17 swizzle
213 	 * array, hence protect them from being reaped by removing them from gtt
214 	 * lists early.
215 	 */
216 	pages = __i915_gem_object_unset_pages(obj);
217 
218 	/*
219 	 * XXX Temporary hijinx to avoid updating all backends to handle
220 	 * NULL pages. In the future, when we have more asynchronous
221 	 * get_pages backends we should be better able to handle the
222 	 * cancellation of the async task in a more uniform manner.
223 	 */
224 	if (!pages && !i915_gem_object_needs_async_cancel(obj))
225 		pages = ERR_PTR(-EINVAL);
226 
227 	if (!IS_ERR(pages))
228 		obj->ops->put_pages(obj, pages);
229 
230 	err = 0;
231 unlock:
232 	mutex_unlock(&obj->mm.lock);
233 
234 	return err;
235 }
236 
237 static inline pte_t iomap_pte(resource_size_t base,
238 			      dma_addr_t offset,
239 			      pgprot_t prot)
240 {
241 	return pte_mkspecial(pfn_pte((base + offset) >> PAGE_SHIFT, prot));
242 }
243 
244 /* The 'mapping' part of i915_gem_object_pin_map() below */
245 static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
246 				 enum i915_map_type type)
247 {
248 	unsigned long n_pte = obj->base.size >> PAGE_SHIFT;
249 	struct sg_table *sgt = obj->mm.pages;
250 	pte_t *stack[32], **mem;
251 	struct vm_struct *area;
252 	pgprot_t pgprot;
253 
254 	if (!i915_gem_object_has_struct_page(obj) && type != I915_MAP_WC)
255 		return NULL;
256 
257 	/* A single page can always be kmapped */
258 	if (n_pte == 1 && type == I915_MAP_WB)
259 		return kmap(sg_page(sgt->sgl));
260 
261 	mem = stack;
262 	if (n_pte > ARRAY_SIZE(stack)) {
263 		/* Too big for stack -- allocate temporary array instead */
264 		mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL);
265 		if (!mem)
266 			return NULL;
267 	}
268 
269 	area = alloc_vm_area(obj->base.size, mem);
270 	if (!area) {
271 		if (mem != stack)
272 			kvfree(mem);
273 		return NULL;
274 	}
275 
276 	switch (type) {
277 	default:
278 		MISSING_CASE(type);
279 		/* fallthrough - to use PAGE_KERNEL anyway */
280 	case I915_MAP_WB:
281 		pgprot = PAGE_KERNEL;
282 		break;
283 	case I915_MAP_WC:
284 		pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
285 		break;
286 	}
287 
288 	if (i915_gem_object_has_struct_page(obj)) {
289 		struct sgt_iter iter;
290 		struct page *page;
291 		pte_t **ptes = mem;
292 
293 		for_each_sgt_page(page, iter, sgt)
294 			**ptes++ = mk_pte(page, pgprot);
295 	} else {
296 		resource_size_t iomap;
297 		struct sgt_iter iter;
298 		pte_t **ptes = mem;
299 		dma_addr_t addr;
300 
301 		iomap = obj->mm.region->iomap.base;
302 		iomap -= obj->mm.region->region.start;
303 
304 		for_each_sgt_daddr(addr, iter, sgt)
305 			**ptes++ = iomap_pte(iomap, addr, pgprot);
306 	}
307 
308 	if (mem != stack)
309 		kvfree(mem);
310 
311 	return area->addr;
312 }
313 
314 /* get, pin, and map the pages of the object into kernel space */
315 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
316 			      enum i915_map_type type)
317 {
318 	enum i915_map_type has_type;
319 	unsigned int flags;
320 	bool pinned;
321 	void *ptr;
322 	int err;
323 
324 	flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM;
325 	if (!i915_gem_object_type_has(obj, flags))
326 		return ERR_PTR(-ENXIO);
327 
328 	err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
329 	if (err)
330 		return ERR_PTR(err);
331 
332 	pinned = !(type & I915_MAP_OVERRIDE);
333 	type &= ~I915_MAP_OVERRIDE;
334 
335 	if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
336 		if (unlikely(!i915_gem_object_has_pages(obj))) {
337 			GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
338 
339 			err = ____i915_gem_object_get_pages(obj);
340 			if (err)
341 				goto err_unlock;
342 
343 			smp_mb__before_atomic();
344 		}
345 		atomic_inc(&obj->mm.pages_pin_count);
346 		pinned = false;
347 	}
348 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
349 
350 	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
351 	if (ptr && has_type != type) {
352 		if (pinned) {
353 			err = -EBUSY;
354 			goto err_unpin;
355 		}
356 
357 		unmap_object(obj, ptr);
358 
359 		ptr = obj->mm.mapping = NULL;
360 	}
361 
362 	if (!ptr) {
363 		ptr = i915_gem_object_map(obj, type);
364 		if (!ptr) {
365 			err = -ENOMEM;
366 			goto err_unpin;
367 		}
368 
369 		obj->mm.mapping = page_pack_bits(ptr, type);
370 	}
371 
372 out_unlock:
373 	mutex_unlock(&obj->mm.lock);
374 	return ptr;
375 
376 err_unpin:
377 	atomic_dec(&obj->mm.pages_pin_count);
378 err_unlock:
379 	ptr = ERR_PTR(err);
380 	goto out_unlock;
381 }
382 
383 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
384 				 unsigned long offset,
385 				 unsigned long size)
386 {
387 	enum i915_map_type has_type;
388 	void *ptr;
389 
390 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
391 	GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
392 				     offset, size, obj->base.size));
393 
394 	obj->mm.dirty = true;
395 
396 	if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
397 		return;
398 
399 	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
400 	if (has_type == I915_MAP_WC)
401 		return;
402 
403 	drm_clflush_virt_range(ptr + offset, size);
404 	if (size == obj->base.size) {
405 		obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
406 		obj->cache_dirty = false;
407 	}
408 }
409 
410 struct scatterlist *
411 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
412 		       unsigned int n,
413 		       unsigned int *offset)
414 {
415 	struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
416 	struct scatterlist *sg;
417 	unsigned int idx, count;
418 
419 	might_sleep();
420 	GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
421 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
422 
423 	/* As we iterate forward through the sg, we record each entry in a
424 	 * radixtree for quick repeated (backwards) lookups. If we have seen
425 	 * this index previously, we will have an entry for it.
426 	 *
427 	 * Initial lookup is O(N), but this is amortized to O(1) for
428 	 * sequential page access (where each new request is consecutive
429 	 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
430 	 * i.e. O(1) with a large constant!
431 	 */
432 	if (n < READ_ONCE(iter->sg_idx))
433 		goto lookup;
434 
435 	mutex_lock(&iter->lock);
436 
437 	/* We prefer to reuse the last sg so that repeated lookup of this
438 	 * (or the subsequent) sg are fast - comparing against the last
439 	 * sg is faster than going through the radixtree.
440 	 */
441 
442 	sg = iter->sg_pos;
443 	idx = iter->sg_idx;
444 	count = __sg_page_count(sg);
445 
446 	while (idx + count <= n) {
447 		void *entry;
448 		unsigned long i;
449 		int ret;
450 
451 		/* If we cannot allocate and insert this entry, or the
452 		 * individual pages from this range, cancel updating the
453 		 * sg_idx so that on this lookup we are forced to linearly
454 		 * scan onwards, but on future lookups we will try the
455 		 * insertion again (in which case we need to be careful of
456 		 * the error return reporting that we have already inserted
457 		 * this index).
458 		 */
459 		ret = radix_tree_insert(&iter->radix, idx, sg);
460 		if (ret && ret != -EEXIST)
461 			goto scan;
462 
463 		entry = xa_mk_value(idx);
464 		for (i = 1; i < count; i++) {
465 			ret = radix_tree_insert(&iter->radix, idx + i, entry);
466 			if (ret && ret != -EEXIST)
467 				goto scan;
468 		}
469 
470 		idx += count;
471 		sg = ____sg_next(sg);
472 		count = __sg_page_count(sg);
473 	}
474 
475 scan:
476 	iter->sg_pos = sg;
477 	iter->sg_idx = idx;
478 
479 	mutex_unlock(&iter->lock);
480 
481 	if (unlikely(n < idx)) /* insertion completed by another thread */
482 		goto lookup;
483 
484 	/* In case we failed to insert the entry into the radixtree, we need
485 	 * to look beyond the current sg.
486 	 */
487 	while (idx + count <= n) {
488 		idx += count;
489 		sg = ____sg_next(sg);
490 		count = __sg_page_count(sg);
491 	}
492 
493 	*offset = n - idx;
494 	return sg;
495 
496 lookup:
497 	rcu_read_lock();
498 
499 	sg = radix_tree_lookup(&iter->radix, n);
500 	GEM_BUG_ON(!sg);
501 
502 	/* If this index is in the middle of multi-page sg entry,
503 	 * the radix tree will contain a value entry that points
504 	 * to the start of that range. We will return the pointer to
505 	 * the base page and the offset of this page within the
506 	 * sg entry's range.
507 	 */
508 	*offset = 0;
509 	if (unlikely(xa_is_value(sg))) {
510 		unsigned long base = xa_to_value(sg);
511 
512 		sg = radix_tree_lookup(&iter->radix, base);
513 		GEM_BUG_ON(!sg);
514 
515 		*offset = n - base;
516 	}
517 
518 	rcu_read_unlock();
519 
520 	return sg;
521 }
522 
523 struct page *
524 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
525 {
526 	struct scatterlist *sg;
527 	unsigned int offset;
528 
529 	GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
530 
531 	sg = i915_gem_object_get_sg(obj, n, &offset);
532 	return nth_page(sg_page(sg), offset);
533 }
534 
535 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
536 struct page *
537 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
538 			       unsigned int n)
539 {
540 	struct page *page;
541 
542 	page = i915_gem_object_get_page(obj, n);
543 	if (!obj->mm.dirty)
544 		set_page_dirty(page);
545 
546 	return page;
547 }
548 
549 dma_addr_t
550 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
551 				    unsigned long n,
552 				    unsigned int *len)
553 {
554 	struct scatterlist *sg;
555 	unsigned int offset;
556 
557 	sg = i915_gem_object_get_sg(obj, n, &offset);
558 
559 	if (len)
560 		*len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
561 
562 	return sg_dma_address(sg) + (offset << PAGE_SHIFT);
563 }
564 
565 dma_addr_t
566 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
567 				unsigned long n)
568 {
569 	return i915_gem_object_get_dma_address_len(obj, n, NULL);
570 }
571