1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include <drm/ttm/ttm_bo_driver.h>
7 #include <drm/ttm/ttm_placement.h>
8 
9 #include "i915_drv.h"
10 #include "intel_memory_region.h"
11 #include "intel_region_ttm.h"
12 
13 #include "gem/i915_gem_object.h"
14 #include "gem/i915_gem_region.h"
15 #include "gem/i915_gem_ttm.h"
16 #include "gem/i915_gem_mman.h"
17 
18 #include "gt/intel_migrate.h"
19 #include "gt/intel_engine_pm.h"
20 
21 #define I915_PL_LMEM0 TTM_PL_PRIV
22 #define I915_PL_SYSTEM TTM_PL_SYSTEM
23 #define I915_PL_STOLEN TTM_PL_VRAM
24 #define I915_PL_GGTT TTM_PL_TT
25 
26 #define I915_TTM_PRIO_PURGE     0
27 #define I915_TTM_PRIO_NO_PAGES  1
28 #define I915_TTM_PRIO_HAS_PAGES 2
29 
30 /*
31  * Size of struct ttm_place vector in on-stack struct ttm_placement allocs
32  */
33 #define I915_TTM_MAX_PLACEMENTS INTEL_REGION_UNKNOWN
34 
35 /**
36  * struct i915_ttm_tt - TTM page vector with additional private information
37  * @ttm: The base TTM page vector.
38  * @dev: The struct device used for dma mapping and unmapping.
39  * @cached_st: The cached scatter-gather table.
40  *
41  * Note that DMA may be going on right up to the point where the page-
42  * vector is unpopulated in delayed destroy. Hence keep the
43  * scatter-gather table mapped and cached up to that point. This is
44  * different from the cached gem object io scatter-gather table which
45  * doesn't have an associated dma mapping.
46  */
47 struct i915_ttm_tt {
48 	struct ttm_tt ttm;
49 	struct device *dev;
50 	struct sg_table *cached_st;
51 };
52 
53 static const struct ttm_place sys_placement_flags = {
54 	.fpfn = 0,
55 	.lpfn = 0,
56 	.mem_type = I915_PL_SYSTEM,
57 	.flags = 0,
58 };
59 
60 static struct ttm_placement i915_sys_placement = {
61 	.num_placement = 1,
62 	.placement = &sys_placement_flags,
63 	.num_busy_placement = 1,
64 	.busy_placement = &sys_placement_flags,
65 };
66 
67 static int i915_ttm_err_to_gem(int err)
68 {
69 	/* Fastpath */
70 	if (likely(!err))
71 		return 0;
72 
73 	switch (err) {
74 	case -EBUSY:
75 		/*
76 		 * TTM likes to convert -EDEADLK to -EBUSY, and wants us to
77 		 * restart the operation, since we don't record the contending
78 		 * lock. We use -EAGAIN to restart.
79 		 */
80 		return -EAGAIN;
81 	case -ENOSPC:
82 		/*
83 		 * Memory type / region is full, and we can't evict.
84 		 * Except possibly system, that returns -ENOMEM;
85 		 */
86 		return -ENXIO;
87 	default:
88 		break;
89 	}
90 
91 	return err;
92 }
93 
94 static bool gpu_binds_iomem(struct ttm_resource *mem)
95 {
96 	return mem->mem_type != TTM_PL_SYSTEM;
97 }
98 
99 static bool cpu_maps_iomem(struct ttm_resource *mem)
100 {
101 	/* Once / if we support GGTT, this is also false for cached ttm_tts */
102 	return mem->mem_type != TTM_PL_SYSTEM;
103 }
104 
105 static enum i915_cache_level
106 i915_ttm_cache_level(struct drm_i915_private *i915, struct ttm_resource *res,
107 		     struct ttm_tt *ttm)
108 {
109 	return ((HAS_LLC(i915) || HAS_SNOOP(i915)) && !gpu_binds_iomem(res) &&
110 		ttm->caching == ttm_cached) ? I915_CACHE_LLC :
111 		I915_CACHE_NONE;
112 }
113 
114 static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj);
115 
116 static enum ttm_caching
117 i915_ttm_select_tt_caching(const struct drm_i915_gem_object *obj)
118 {
119 	/*
120 	 * Objects only allowed in system get cached cpu-mappings.
121 	 * Other objects get WC mapping for now. Even if in system.
122 	 */
123 	if (obj->mm.region->type == INTEL_MEMORY_SYSTEM &&
124 	    obj->mm.n_placements <= 1)
125 		return ttm_cached;
126 
127 	return ttm_write_combined;
128 }
129 
130 static void
131 i915_ttm_place_from_region(const struct intel_memory_region *mr,
132 			   struct ttm_place *place,
133 			   unsigned int flags)
134 {
135 	memset(place, 0, sizeof(*place));
136 	place->mem_type = intel_region_to_ttm_type(mr);
137 
138 	if (flags & I915_BO_ALLOC_CONTIGUOUS)
139 		place->flags = TTM_PL_FLAG_CONTIGUOUS;
140 }
141 
142 static void
143 i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj,
144 			    struct ttm_place *requested,
145 			    struct ttm_place *busy,
146 			    struct ttm_placement *placement)
147 {
148 	unsigned int num_allowed = obj->mm.n_placements;
149 	unsigned int flags = obj->flags;
150 	unsigned int i;
151 
152 	placement->num_placement = 1;
153 	i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] :
154 				   obj->mm.region, requested, flags);
155 
156 	/* Cache this on object? */
157 	placement->num_busy_placement = num_allowed;
158 	for (i = 0; i < placement->num_busy_placement; ++i)
159 		i915_ttm_place_from_region(obj->mm.placements[i], busy + i, flags);
160 
161 	if (num_allowed == 0) {
162 		*busy = *requested;
163 		placement->num_busy_placement = 1;
164 	}
165 
166 	placement->placement = requested;
167 	placement->busy_placement = busy;
168 }
169 
170 static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
171 					 uint32_t page_flags)
172 {
173 	struct ttm_resource_manager *man =
174 		ttm_manager_type(bo->bdev, bo->resource->mem_type);
175 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
176 	struct i915_ttm_tt *i915_tt;
177 	int ret;
178 
179 	i915_tt = kzalloc(sizeof(*i915_tt), GFP_KERNEL);
180 	if (!i915_tt)
181 		return NULL;
182 
183 	if (obj->flags & I915_BO_ALLOC_CPU_CLEAR &&
184 	    man->use_tt)
185 		page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
186 
187 	ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags,
188 			  i915_ttm_select_tt_caching(obj));
189 	if (ret) {
190 		kfree(i915_tt);
191 		return NULL;
192 	}
193 
194 	i915_tt->dev = obj->base.dev->dev;
195 
196 	return &i915_tt->ttm;
197 }
198 
199 static void i915_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
200 {
201 	struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
202 
203 	if (i915_tt->cached_st) {
204 		dma_unmap_sgtable(i915_tt->dev, i915_tt->cached_st,
205 				  DMA_BIDIRECTIONAL, 0);
206 		sg_free_table(i915_tt->cached_st);
207 		kfree(i915_tt->cached_st);
208 		i915_tt->cached_st = NULL;
209 	}
210 	ttm_pool_free(&bdev->pool, ttm);
211 }
212 
213 static void i915_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
214 {
215 	struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
216 
217 	ttm_tt_destroy_common(bdev, ttm);
218 	ttm_tt_fini(ttm);
219 	kfree(i915_tt);
220 }
221 
222 static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo,
223 				       const struct ttm_place *place)
224 {
225 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
226 
227 	/* Will do for now. Our pinned objects are still on TTM's LRU lists */
228 	return i915_gem_object_evictable(obj);
229 }
230 
231 static void i915_ttm_evict_flags(struct ttm_buffer_object *bo,
232 				 struct ttm_placement *placement)
233 {
234 	*placement = i915_sys_placement;
235 }
236 
237 static int i915_ttm_move_notify(struct ttm_buffer_object *bo)
238 {
239 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
240 	int ret;
241 
242 	ret = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
243 	if (ret)
244 		return ret;
245 
246 	ret = __i915_gem_object_put_pages(obj);
247 	if (ret)
248 		return ret;
249 
250 	return 0;
251 }
252 
253 static void i915_ttm_free_cached_io_st(struct drm_i915_gem_object *obj)
254 {
255 	struct radix_tree_iter iter;
256 	void __rcu **slot;
257 
258 	if (!obj->ttm.cached_io_st)
259 		return;
260 
261 	rcu_read_lock();
262 	radix_tree_for_each_slot(slot, &obj->ttm.get_io_page.radix, &iter, 0)
263 		radix_tree_delete(&obj->ttm.get_io_page.radix, iter.index);
264 	rcu_read_unlock();
265 
266 	sg_free_table(obj->ttm.cached_io_st);
267 	kfree(obj->ttm.cached_io_st);
268 	obj->ttm.cached_io_st = NULL;
269 }
270 
271 static void
272 i915_ttm_adjust_domains_after_move(struct drm_i915_gem_object *obj)
273 {
274 	struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
275 
276 	if (cpu_maps_iomem(bo->resource) || bo->ttm->caching != ttm_cached) {
277 		obj->write_domain = I915_GEM_DOMAIN_WC;
278 		obj->read_domains = I915_GEM_DOMAIN_WC;
279 	} else {
280 		obj->write_domain = I915_GEM_DOMAIN_CPU;
281 		obj->read_domains = I915_GEM_DOMAIN_CPU;
282 	}
283 }
284 
285 static void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj)
286 {
287 	struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
288 	unsigned int cache_level;
289 	unsigned int i;
290 
291 	/*
292 	 * If object was moved to an allowable region, update the object
293 	 * region to consider it migrated. Note that if it's currently not
294 	 * in an allowable region, it's evicted and we don't update the
295 	 * object region.
296 	 */
297 	if (intel_region_to_ttm_type(obj->mm.region) != bo->resource->mem_type) {
298 		for (i = 0; i < obj->mm.n_placements; ++i) {
299 			struct intel_memory_region *mr = obj->mm.placements[i];
300 
301 			if (intel_region_to_ttm_type(mr) == bo->resource->mem_type &&
302 			    mr != obj->mm.region) {
303 				i915_gem_object_release_memory_region(obj);
304 				i915_gem_object_init_memory_region(obj, mr);
305 				break;
306 			}
307 		}
308 	}
309 
310 	obj->mem_flags &= ~(I915_BO_FLAG_STRUCT_PAGE | I915_BO_FLAG_IOMEM);
311 
312 	obj->mem_flags |= cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM :
313 		I915_BO_FLAG_STRUCT_PAGE;
314 
315 	cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource,
316 					   bo->ttm);
317 	i915_gem_object_set_cache_coherency(obj, cache_level);
318 }
319 
320 static void i915_ttm_purge(struct drm_i915_gem_object *obj)
321 {
322 	struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
323 	struct ttm_operation_ctx ctx = {
324 		.interruptible = true,
325 		.no_wait_gpu = false,
326 	};
327 	struct ttm_placement place = {};
328 	int ret;
329 
330 	if (obj->mm.madv == __I915_MADV_PURGED)
331 		return;
332 
333 	/* TTM's purge interface. Note that we might be reentering. */
334 	ret = ttm_bo_validate(bo, &place, &ctx);
335 	if (!ret) {
336 		obj->write_domain = 0;
337 		obj->read_domains = 0;
338 		i915_ttm_adjust_gem_after_move(obj);
339 		i915_ttm_free_cached_io_st(obj);
340 		obj->mm.madv = __I915_MADV_PURGED;
341 	}
342 }
343 
344 static void i915_ttm_swap_notify(struct ttm_buffer_object *bo)
345 {
346 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
347 	int ret = i915_ttm_move_notify(bo);
348 
349 	GEM_WARN_ON(ret);
350 	GEM_WARN_ON(obj->ttm.cached_io_st);
351 	if (!ret && obj->mm.madv != I915_MADV_WILLNEED)
352 		i915_ttm_purge(obj);
353 }
354 
355 static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
356 {
357 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
358 
359 	if (likely(obj))
360 		i915_ttm_free_cached_io_st(obj);
361 }
362 
363 static struct intel_memory_region *
364 i915_ttm_region(struct ttm_device *bdev, int ttm_mem_type)
365 {
366 	struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev);
367 
368 	/* There's some room for optimization here... */
369 	GEM_BUG_ON(ttm_mem_type != I915_PL_SYSTEM &&
370 		   ttm_mem_type < I915_PL_LMEM0);
371 	if (ttm_mem_type == I915_PL_SYSTEM)
372 		return intel_memory_region_lookup(i915, INTEL_MEMORY_SYSTEM,
373 						  0);
374 
375 	return intel_memory_region_lookup(i915, INTEL_MEMORY_LOCAL,
376 					  ttm_mem_type - I915_PL_LMEM0);
377 }
378 
379 static struct sg_table *i915_ttm_tt_get_st(struct ttm_tt *ttm)
380 {
381 	struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
382 	struct sg_table *st;
383 	int ret;
384 
385 	if (i915_tt->cached_st)
386 		return i915_tt->cached_st;
387 
388 	st = kzalloc(sizeof(*st), GFP_KERNEL);
389 	if (!st)
390 		return ERR_PTR(-ENOMEM);
391 
392 	ret = sg_alloc_table_from_pages_segment(st,
393 			ttm->pages, ttm->num_pages,
394 			0, (unsigned long)ttm->num_pages << PAGE_SHIFT,
395 			i915_sg_segment_size(), GFP_KERNEL);
396 	if (ret) {
397 		kfree(st);
398 		return ERR_PTR(ret);
399 	}
400 
401 	ret = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0);
402 	if (ret) {
403 		sg_free_table(st);
404 		kfree(st);
405 		return ERR_PTR(ret);
406 	}
407 
408 	i915_tt->cached_st = st;
409 	return st;
410 }
411 
412 static struct sg_table *
413 i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
414 			 struct ttm_resource *res)
415 {
416 	struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
417 
418 	if (!gpu_binds_iomem(res))
419 		return i915_ttm_tt_get_st(bo->ttm);
420 
421 	/*
422 	 * If CPU mapping differs, we need to add the ttm_tt pages to
423 	 * the resulting st. Might make sense for GGTT.
424 	 */
425 	GEM_WARN_ON(!cpu_maps_iomem(res));
426 	return intel_region_ttm_resource_to_st(obj->mm.region, res);
427 }
428 
429 static int i915_ttm_accel_move(struct ttm_buffer_object *bo,
430 			       struct ttm_resource *dst_mem,
431 			       struct sg_table *dst_st)
432 {
433 	struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915),
434 						     bdev);
435 	struct ttm_resource_manager *src_man =
436 		ttm_manager_type(bo->bdev, bo->resource->mem_type);
437 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
438 	struct sg_table *src_st;
439 	struct i915_request *rq;
440 	struct ttm_tt *ttm = bo->ttm;
441 	enum i915_cache_level src_level, dst_level;
442 	int ret;
443 
444 	if (!i915->gt.migrate.context)
445 		return -EINVAL;
446 
447 	dst_level = i915_ttm_cache_level(i915, dst_mem, ttm);
448 	if (!ttm || !ttm_tt_is_populated(ttm)) {
449 		if (bo->type == ttm_bo_type_kernel)
450 			return -EINVAL;
451 
452 		if (ttm && !(ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC))
453 			return 0;
454 
455 		intel_engine_pm_get(i915->gt.migrate.context->engine);
456 		ret = intel_context_migrate_clear(i915->gt.migrate.context, NULL,
457 						  dst_st->sgl, dst_level,
458 						  gpu_binds_iomem(dst_mem),
459 						  0, &rq);
460 
461 		if (!ret && rq) {
462 			i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
463 			i915_request_put(rq);
464 		}
465 		intel_engine_pm_put(i915->gt.migrate.context->engine);
466 	} else {
467 		src_st = src_man->use_tt ? i915_ttm_tt_get_st(ttm) :
468 			obj->ttm.cached_io_st;
469 
470 		src_level = i915_ttm_cache_level(i915, bo->resource, ttm);
471 		intel_engine_pm_get(i915->gt.migrate.context->engine);
472 		ret = intel_context_migrate_copy(i915->gt.migrate.context,
473 						 NULL, src_st->sgl, src_level,
474 						 gpu_binds_iomem(bo->resource),
475 						 dst_st->sgl, dst_level,
476 						 gpu_binds_iomem(dst_mem),
477 						 &rq);
478 		if (!ret && rq) {
479 			i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
480 			i915_request_put(rq);
481 		}
482 		intel_engine_pm_put(i915->gt.migrate.context->engine);
483 	}
484 
485 	return ret;
486 }
487 
488 static int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
489 			 struct ttm_operation_ctx *ctx,
490 			 struct ttm_resource *dst_mem,
491 			 struct ttm_place *hop)
492 {
493 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
494 	struct ttm_resource_manager *dst_man =
495 		ttm_manager_type(bo->bdev, dst_mem->mem_type);
496 	struct intel_memory_region *dst_reg, *src_reg;
497 	union {
498 		struct ttm_kmap_iter_tt tt;
499 		struct ttm_kmap_iter_iomap io;
500 	} _dst_iter, _src_iter;
501 	struct ttm_kmap_iter *dst_iter, *src_iter;
502 	struct sg_table *dst_st;
503 	int ret;
504 
505 	dst_reg = i915_ttm_region(bo->bdev, dst_mem->mem_type);
506 	src_reg = i915_ttm_region(bo->bdev, bo->resource->mem_type);
507 	GEM_BUG_ON(!dst_reg || !src_reg);
508 
509 	/* Sync for now. We could do the actual copy async. */
510 	ret = ttm_bo_wait_ctx(bo, ctx);
511 	if (ret)
512 		return ret;
513 
514 	ret = i915_ttm_move_notify(bo);
515 	if (ret)
516 		return ret;
517 
518 	if (obj->mm.madv != I915_MADV_WILLNEED) {
519 		i915_ttm_purge(obj);
520 		ttm_resource_free(bo, &dst_mem);
521 		return 0;
522 	}
523 
524 	/* Populate ttm with pages if needed. Typically system memory. */
525 	if (bo->ttm && (dst_man->use_tt ||
526 			(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED))) {
527 		ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
528 		if (ret)
529 			return ret;
530 	}
531 
532 	dst_st = i915_ttm_resource_get_st(obj, dst_mem);
533 	if (IS_ERR(dst_st))
534 		return PTR_ERR(dst_st);
535 
536 	ret = i915_ttm_accel_move(bo, dst_mem, dst_st);
537 	if (ret) {
538 		/* If we start mapping GGTT, we can no longer use man::use_tt here. */
539 		dst_iter = !cpu_maps_iomem(dst_mem) ?
540 			ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm) :
541 			ttm_kmap_iter_iomap_init(&_dst_iter.io, &dst_reg->iomap,
542 						 dst_st, dst_reg->region.start);
543 
544 		src_iter = !cpu_maps_iomem(bo->resource) ?
545 			ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm) :
546 			ttm_kmap_iter_iomap_init(&_src_iter.io, &src_reg->iomap,
547 						 obj->ttm.cached_io_st,
548 						 src_reg->region.start);
549 
550 		ttm_move_memcpy(bo, dst_mem->num_pages, dst_iter, src_iter);
551 	}
552 	/* Below dst_mem becomes bo->resource. */
553 	ttm_bo_move_sync_cleanup(bo, dst_mem);
554 	i915_ttm_adjust_domains_after_move(obj);
555 	i915_ttm_free_cached_io_st(obj);
556 
557 	if (gpu_binds_iomem(dst_mem) || cpu_maps_iomem(dst_mem)) {
558 		obj->ttm.cached_io_st = dst_st;
559 		obj->ttm.get_io_page.sg_pos = dst_st->sgl;
560 		obj->ttm.get_io_page.sg_idx = 0;
561 	}
562 
563 	i915_ttm_adjust_gem_after_move(obj);
564 	return 0;
565 }
566 
567 static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
568 {
569 	if (!cpu_maps_iomem(mem))
570 		return 0;
571 
572 	mem->bus.caching = ttm_write_combined;
573 	mem->bus.is_iomem = true;
574 
575 	return 0;
576 }
577 
578 static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
579 					 unsigned long page_offset)
580 {
581 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
582 	unsigned long base = obj->mm.region->iomap.base - obj->mm.region->region.start;
583 	struct scatterlist *sg;
584 	unsigned int ofs;
585 
586 	GEM_WARN_ON(bo->ttm);
587 
588 	sg = __i915_gem_object_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs, true);
589 
590 	return ((base + sg_dma_address(sg)) >> PAGE_SHIFT) + ofs;
591 }
592 
593 static struct ttm_device_funcs i915_ttm_bo_driver = {
594 	.ttm_tt_create = i915_ttm_tt_create,
595 	.ttm_tt_unpopulate = i915_ttm_tt_unpopulate,
596 	.ttm_tt_destroy = i915_ttm_tt_destroy,
597 	.eviction_valuable = i915_ttm_eviction_valuable,
598 	.evict_flags = i915_ttm_evict_flags,
599 	.move = i915_ttm_move,
600 	.swap_notify = i915_ttm_swap_notify,
601 	.delete_mem_notify = i915_ttm_delete_mem_notify,
602 	.io_mem_reserve = i915_ttm_io_mem_reserve,
603 	.io_mem_pfn = i915_ttm_io_mem_pfn,
604 };
605 
606 /**
607  * i915_ttm_driver - Return a pointer to the TTM device funcs
608  *
609  * Return: Pointer to statically allocated TTM device funcs.
610  */
611 struct ttm_device_funcs *i915_ttm_driver(void)
612 {
613 	return &i915_ttm_bo_driver;
614 }
615 
616 static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
617 				struct ttm_placement *placement)
618 {
619 	struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
620 	struct ttm_operation_ctx ctx = {
621 		.interruptible = true,
622 		.no_wait_gpu = false,
623 	};
624 	struct sg_table *st;
625 	int real_num_busy;
626 	int ret;
627 
628 	/* First try only the requested placement. No eviction. */
629 	real_num_busy = fetch_and_zero(&placement->num_busy_placement);
630 	ret = ttm_bo_validate(bo, placement, &ctx);
631 	if (ret) {
632 		ret = i915_ttm_err_to_gem(ret);
633 		/*
634 		 * Anything that wants to restart the operation gets to
635 		 * do that.
636 		 */
637 		if (ret == -EDEADLK || ret == -EINTR || ret == -ERESTARTSYS ||
638 		    ret == -EAGAIN)
639 			return ret;
640 
641 		/*
642 		 * If the initial attempt fails, allow all accepted placements,
643 		 * evicting if necessary.
644 		 */
645 		placement->num_busy_placement = real_num_busy;
646 		ret = ttm_bo_validate(bo, placement, &ctx);
647 		if (ret)
648 			return i915_ttm_err_to_gem(ret);
649 	}
650 
651 	i915_ttm_adjust_lru(obj);
652 	if (bo->ttm && !ttm_tt_is_populated(bo->ttm)) {
653 		ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx);
654 		if (ret)
655 			return ret;
656 
657 		i915_ttm_adjust_domains_after_move(obj);
658 		i915_ttm_adjust_gem_after_move(obj);
659 	}
660 
661 	if (!i915_gem_object_has_pages(obj)) {
662 		/* Object either has a page vector or is an iomem object */
663 		st = bo->ttm ? i915_ttm_tt_get_st(bo->ttm) : obj->ttm.cached_io_st;
664 		if (IS_ERR(st))
665 			return PTR_ERR(st);
666 
667 		__i915_gem_object_set_pages(obj, st, i915_sg_dma_sizes(st->sgl));
668 	}
669 
670 	return ret;
671 }
672 
673 static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
674 {
675 	struct ttm_place requested, busy[I915_TTM_MAX_PLACEMENTS];
676 	struct ttm_placement placement;
677 
678 	GEM_BUG_ON(obj->mm.n_placements > I915_TTM_MAX_PLACEMENTS);
679 
680 	/* Move to the requested placement. */
681 	i915_ttm_placement_from_obj(obj, &requested, busy, &placement);
682 
683 	return __i915_ttm_get_pages(obj, &placement);
684 }
685 
686 /**
687  * DOC: Migration vs eviction
688  *
689  * GEM migration may not be the same as TTM migration / eviction. If
690  * the TTM core decides to evict an object it may be evicted to a
691  * TTM memory type that is not in the object's allowable GEM regions, or
692  * in fact theoretically to a TTM memory type that doesn't correspond to
693  * a GEM memory region. In that case the object's GEM region is not
694  * updated, and the data is migrated back to the GEM region at
695  * get_pages time. TTM may however set up CPU ptes to the object even
696  * when it is evicted.
697  * Gem forced migration using the i915_ttm_migrate() op, is allowed even
698  * to regions that are not in the object's list of allowable placements.
699  */
700 static int i915_ttm_migrate(struct drm_i915_gem_object *obj,
701 			    struct intel_memory_region *mr)
702 {
703 	struct ttm_place requested;
704 	struct ttm_placement placement;
705 	int ret;
706 
707 	i915_ttm_place_from_region(mr, &requested, obj->flags);
708 	placement.num_placement = 1;
709 	placement.num_busy_placement = 1;
710 	placement.placement = &requested;
711 	placement.busy_placement = &requested;
712 
713 	ret = __i915_ttm_get_pages(obj, &placement);
714 	if (ret)
715 		return ret;
716 
717 	/*
718 	 * Reinitialize the region bindings. This is primarily
719 	 * required for objects where the new region is not in
720 	 * its allowable placements.
721 	 */
722 	if (obj->mm.region != mr) {
723 		i915_gem_object_release_memory_region(obj);
724 		i915_gem_object_init_memory_region(obj, mr);
725 	}
726 
727 	return 0;
728 }
729 
730 static void i915_ttm_put_pages(struct drm_i915_gem_object *obj,
731 			       struct sg_table *st)
732 {
733 	/*
734 	 * We're currently not called from a shrinker, so put_pages()
735 	 * typically means the object is about to destroyed, or called
736 	 * from move_notify(). So just avoid doing much for now.
737 	 * If the object is not destroyed next, The TTM eviction logic
738 	 * and shrinkers will move it out if needed.
739 	 */
740 
741 	i915_ttm_adjust_lru(obj);
742 }
743 
744 static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
745 {
746 	struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
747 
748 	/*
749 	 * Don't manipulate the TTM LRUs while in TTM bo destruction.
750 	 * We're called through i915_ttm_delete_mem_notify().
751 	 */
752 	if (!kref_read(&bo->kref))
753 		return;
754 
755 	/*
756 	 * Put on the correct LRU list depending on the MADV status
757 	 */
758 	spin_lock(&bo->bdev->lru_lock);
759 	if (obj->mm.madv != I915_MADV_WILLNEED) {
760 		bo->priority = I915_TTM_PRIO_PURGE;
761 	} else if (!i915_gem_object_has_pages(obj)) {
762 		if (bo->priority < I915_TTM_PRIO_HAS_PAGES)
763 			bo->priority = I915_TTM_PRIO_HAS_PAGES;
764 	} else {
765 		if (bo->priority > I915_TTM_PRIO_NO_PAGES)
766 			bo->priority = I915_TTM_PRIO_NO_PAGES;
767 	}
768 
769 	ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
770 	spin_unlock(&bo->bdev->lru_lock);
771 }
772 
773 /*
774  * TTM-backed gem object destruction requires some clarification.
775  * Basically we have two possibilities here. We can either rely on the
776  * i915 delayed destruction and put the TTM object when the object
777  * is idle. This would be detected by TTM which would bypass the
778  * TTM delayed destroy handling. The other approach is to put the TTM
779  * object early and rely on the TTM destroyed handling, and then free
780  * the leftover parts of the GEM object once TTM's destroyed list handling is
781  * complete. For now, we rely on the latter for two reasons:
782  * a) TTM can evict an object even when it's on the delayed destroy list,
783  * which in theory allows for complete eviction.
784  * b) There is work going on in TTM to allow freeing an object even when
785  * it's not idle, and using the TTM destroyed list handling could help us
786  * benefit from that.
787  */
788 static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj)
789 {
790 	if (obj->ttm.created) {
791 		ttm_bo_put(i915_gem_to_ttm(obj));
792 	} else {
793 		__i915_gem_free_object(obj);
794 		call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
795 	}
796 }
797 
798 static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
799 {
800 	struct vm_area_struct *area = vmf->vma;
801 	struct drm_i915_gem_object *obj =
802 		i915_ttm_to_gem(area->vm_private_data);
803 
804 	/* Sanity check that we allow writing into this object */
805 	if (unlikely(i915_gem_object_is_readonly(obj) &&
806 		     area->vm_flags & VM_WRITE))
807 		return VM_FAULT_SIGBUS;
808 
809 	return ttm_bo_vm_fault(vmf);
810 }
811 
812 static int
813 vm_access_ttm(struct vm_area_struct *area, unsigned long addr,
814 	      void *buf, int len, int write)
815 {
816 	struct drm_i915_gem_object *obj =
817 		i915_ttm_to_gem(area->vm_private_data);
818 
819 	if (i915_gem_object_is_readonly(obj) && write)
820 		return -EACCES;
821 
822 	return ttm_bo_vm_access(area, addr, buf, len, write);
823 }
824 
825 static void ttm_vm_open(struct vm_area_struct *vma)
826 {
827 	struct drm_i915_gem_object *obj =
828 		i915_ttm_to_gem(vma->vm_private_data);
829 
830 	GEM_BUG_ON(!obj);
831 	i915_gem_object_get(obj);
832 }
833 
834 static void ttm_vm_close(struct vm_area_struct *vma)
835 {
836 	struct drm_i915_gem_object *obj =
837 		i915_ttm_to_gem(vma->vm_private_data);
838 
839 	GEM_BUG_ON(!obj);
840 	i915_gem_object_put(obj);
841 }
842 
843 static const struct vm_operations_struct vm_ops_ttm = {
844 	.fault = vm_fault_ttm,
845 	.access = vm_access_ttm,
846 	.open = ttm_vm_open,
847 	.close = ttm_vm_close,
848 };
849 
850 static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
851 {
852 	/* The ttm_bo must be allocated with I915_BO_ALLOC_USER */
853 	GEM_BUG_ON(!drm_mm_node_allocated(&obj->base.vma_node.vm_node));
854 
855 	return drm_vma_node_offset_addr(&obj->base.vma_node);
856 }
857 
858 static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
859 	.name = "i915_gem_object_ttm",
860 
861 	.get_pages = i915_ttm_get_pages,
862 	.put_pages = i915_ttm_put_pages,
863 	.truncate = i915_ttm_purge,
864 	.adjust_lru = i915_ttm_adjust_lru,
865 	.delayed_free = i915_ttm_delayed_free,
866 	.migrate = i915_ttm_migrate,
867 	.mmap_offset = i915_ttm_mmap_offset,
868 	.mmap_ops = &vm_ops_ttm,
869 };
870 
871 void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
872 {
873 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
874 
875 	/* This releases all gem object bindings to the backend. */
876 	__i915_gem_free_object(obj);
877 
878 	i915_gem_object_release_memory_region(obj);
879 	mutex_destroy(&obj->ttm.get_io_page.lock);
880 
881 	if (obj->ttm.created)
882 		call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
883 }
884 
885 /**
886  * __i915_gem_ttm_object_init - Initialize a ttm-backed i915 gem object
887  * @mem: The initial memory region for the object.
888  * @obj: The gem object.
889  * @size: Object size in bytes.
890  * @flags: gem object flags.
891  *
892  * Return: 0 on success, negative error code on failure.
893  */
894 int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
895 			       struct drm_i915_gem_object *obj,
896 			       resource_size_t size,
897 			       resource_size_t page_size,
898 			       unsigned int flags)
899 {
900 	static struct lock_class_key lock_class;
901 	struct drm_i915_private *i915 = mem->i915;
902 	struct ttm_operation_ctx ctx = {
903 		.interruptible = true,
904 		.no_wait_gpu = false,
905 	};
906 	enum ttm_bo_type bo_type;
907 	int ret;
908 
909 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
910 	i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags);
911 	i915_gem_object_init_memory_region(obj, mem);
912 	i915_gem_object_make_unshrinkable(obj);
913 	INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN);
914 	mutex_init(&obj->ttm.get_io_page.lock);
915 	bo_type = (obj->flags & I915_BO_ALLOC_USER) ? ttm_bo_type_device :
916 		ttm_bo_type_kernel;
917 
918 	obj->base.vma_node.driver_private = i915_gem_to_ttm(obj);
919 
920 	/* Forcing the page size is kernel internal only */
921 	GEM_BUG_ON(page_size && obj->mm.n_placements);
922 
923 	/*
924 	 * If this function fails, it will call the destructor, but
925 	 * our caller still owns the object. So no freeing in the
926 	 * destructor until obj->ttm.created is true.
927 	 * Similarly, in delayed_destroy, we can't call ttm_bo_put()
928 	 * until successful initialization.
929 	 */
930 	ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), size,
931 				   bo_type, &i915_sys_placement,
932 				   page_size >> PAGE_SHIFT,
933 				   &ctx, NULL, NULL, i915_ttm_bo_destroy);
934 	if (ret)
935 		return i915_ttm_err_to_gem(ret);
936 
937 	obj->ttm.created = true;
938 	i915_ttm_adjust_domains_after_move(obj);
939 	i915_ttm_adjust_gem_after_move(obj);
940 	i915_gem_object_unlock(obj);
941 
942 	return 0;
943 }
944 
945 static const struct intel_memory_region_ops ttm_system_region_ops = {
946 	.init_object = __i915_gem_ttm_object_init,
947 };
948 
949 struct intel_memory_region *
950 i915_gem_ttm_system_setup(struct drm_i915_private *i915,
951 			  u16 type, u16 instance)
952 {
953 	struct intel_memory_region *mr;
954 
955 	mr = intel_memory_region_create(i915, 0,
956 					totalram_pages() << PAGE_SHIFT,
957 					PAGE_SIZE, 0,
958 					type, instance,
959 					&ttm_system_region_ops);
960 	if (IS_ERR(mr))
961 		return mr;
962 
963 	intel_memory_region_set_name(mr, "system-ttm");
964 	return mr;
965 }
966