1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include <drm/ttm/ttm_bo_driver.h>
7 #include <drm/ttm/ttm_placement.h>
8 
9 #include "i915_drv.h"
10 #include "intel_memory_region.h"
11 #include "intel_region_ttm.h"
12 
13 #include "gem/i915_gem_object.h"
14 #include "gem/i915_gem_region.h"
15 #include "gem/i915_gem_ttm.h"
16 #include "gem/i915_gem_mman.h"
17 
18 #define I915_PL_LMEM0 TTM_PL_PRIV
19 #define I915_PL_SYSTEM TTM_PL_SYSTEM
20 #define I915_PL_STOLEN TTM_PL_VRAM
21 #define I915_PL_GGTT TTM_PL_TT
22 
23 #define I915_TTM_PRIO_PURGE     0
24 #define I915_TTM_PRIO_NO_PAGES  1
25 #define I915_TTM_PRIO_HAS_PAGES 2
26 
27 /**
28  * struct i915_ttm_tt - TTM page vector with additional private information
29  * @ttm: The base TTM page vector.
30  * @dev: The struct device used for dma mapping and unmapping.
31  * @cached_st: The cached scatter-gather table.
32  *
33  * Note that DMA may be going on right up to the point where the page-
34  * vector is unpopulated in delayed destroy. Hence keep the
35  * scatter-gather table mapped and cached up to that point. This is
36  * different from the cached gem object io scatter-gather table which
37  * doesn't have an associated dma mapping.
38  */
39 struct i915_ttm_tt {
40 	struct ttm_tt ttm;
41 	struct device *dev;
42 	struct sg_table *cached_st;
43 };
44 
45 static const struct ttm_place lmem0_sys_placement_flags[] = {
46 	{
47 		.fpfn = 0,
48 		.lpfn = 0,
49 		.mem_type = I915_PL_LMEM0,
50 		.flags = 0,
51 	}, {
52 		.fpfn = 0,
53 		.lpfn = 0,
54 		.mem_type = I915_PL_SYSTEM,
55 		.flags = 0,
56 	}
57 };
58 
59 static struct ttm_placement i915_lmem0_placement = {
60 	.num_placement = 1,
61 	.placement = &lmem0_sys_placement_flags[0],
62 	.num_busy_placement = 1,
63 	.busy_placement = &lmem0_sys_placement_flags[0],
64 };
65 
66 static struct ttm_placement i915_sys_placement = {
67 	.num_placement = 1,
68 	.placement = &lmem0_sys_placement_flags[1],
69 	.num_busy_placement = 1,
70 	.busy_placement = &lmem0_sys_placement_flags[1],
71 };
72 
73 static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj);
74 
75 static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
76 					 uint32_t page_flags)
77 {
78 	struct ttm_resource_manager *man =
79 		ttm_manager_type(bo->bdev, bo->resource->mem_type);
80 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
81 	struct i915_ttm_tt *i915_tt;
82 	int ret;
83 
84 	i915_tt = kzalloc(sizeof(*i915_tt), GFP_KERNEL);
85 	if (!i915_tt)
86 		return NULL;
87 
88 	if (obj->flags & I915_BO_ALLOC_CPU_CLEAR &&
89 	    man->use_tt)
90 		page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
91 
92 	ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags, ttm_write_combined);
93 	if (ret) {
94 		kfree(i915_tt);
95 		return NULL;
96 	}
97 
98 	i915_tt->dev = obj->base.dev->dev;
99 
100 	return &i915_tt->ttm;
101 }
102 
103 static void i915_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
104 {
105 	struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
106 
107 	if (i915_tt->cached_st) {
108 		dma_unmap_sgtable(i915_tt->dev, i915_tt->cached_st,
109 				  DMA_BIDIRECTIONAL, 0);
110 		sg_free_table(i915_tt->cached_st);
111 		kfree(i915_tt->cached_st);
112 		i915_tt->cached_st = NULL;
113 	}
114 	ttm_pool_free(&bdev->pool, ttm);
115 }
116 
117 static void i915_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
118 {
119 	struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
120 
121 	ttm_tt_destroy_common(bdev, ttm);
122 	kfree(i915_tt);
123 }
124 
125 static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo,
126 				       const struct ttm_place *place)
127 {
128 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
129 
130 	/* Will do for now. Our pinned objects are still on TTM's LRU lists */
131 	if (!i915_gem_object_evictable(obj))
132 		return false;
133 
134 	/* This isn't valid with a buddy allocator */
135 	return ttm_bo_eviction_valuable(bo, place);
136 }
137 
138 static void i915_ttm_evict_flags(struct ttm_buffer_object *bo,
139 				 struct ttm_placement *placement)
140 {
141 	*placement = i915_sys_placement;
142 }
143 
144 static int i915_ttm_move_notify(struct ttm_buffer_object *bo)
145 {
146 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
147 	int ret;
148 
149 	ret = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
150 	if (ret)
151 		return ret;
152 
153 	ret = __i915_gem_object_put_pages(obj);
154 	if (ret)
155 		return ret;
156 
157 	return 0;
158 }
159 
160 static void i915_ttm_free_cached_io_st(struct drm_i915_gem_object *obj)
161 {
162 	struct radix_tree_iter iter;
163 	void __rcu **slot;
164 
165 	if (!obj->ttm.cached_io_st)
166 		return;
167 
168 	rcu_read_lock();
169 	radix_tree_for_each_slot(slot, &obj->ttm.get_io_page.radix, &iter, 0)
170 		radix_tree_delete(&obj->ttm.get_io_page.radix, iter.index);
171 	rcu_read_unlock();
172 
173 	sg_free_table(obj->ttm.cached_io_st);
174 	kfree(obj->ttm.cached_io_st);
175 	obj->ttm.cached_io_st = NULL;
176 }
177 
178 static void i915_ttm_purge(struct drm_i915_gem_object *obj)
179 {
180 	struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
181 	struct ttm_operation_ctx ctx = {
182 		.interruptible = true,
183 		.no_wait_gpu = false,
184 	};
185 	struct ttm_placement place = {};
186 	int ret;
187 
188 	if (obj->mm.madv == __I915_MADV_PURGED)
189 		return;
190 
191 	/* TTM's purge interface. Note that we might be reentering. */
192 	ret = ttm_bo_validate(bo, &place, &ctx);
193 
194 	if (!ret) {
195 		i915_ttm_free_cached_io_st(obj);
196 		obj->mm.madv = __I915_MADV_PURGED;
197 	}
198 }
199 
200 static void i915_ttm_swap_notify(struct ttm_buffer_object *bo)
201 {
202 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
203 	int ret = i915_ttm_move_notify(bo);
204 
205 	GEM_WARN_ON(ret);
206 	GEM_WARN_ON(obj->ttm.cached_io_st);
207 	if (!ret && obj->mm.madv != I915_MADV_WILLNEED)
208 		i915_ttm_purge(obj);
209 }
210 
211 static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
212 {
213 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
214 
215 	if (likely(obj)) {
216 		/* This releases all gem object bindings to the backend. */
217 		__i915_gem_free_object(obj);
218 	}
219 }
220 
221 static struct intel_memory_region *
222 i915_ttm_region(struct ttm_device *bdev, int ttm_mem_type)
223 {
224 	struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev);
225 
226 	/* There's some room for optimization here... */
227 	GEM_BUG_ON(ttm_mem_type != I915_PL_SYSTEM &&
228 		   ttm_mem_type < I915_PL_LMEM0);
229 	if (ttm_mem_type == I915_PL_SYSTEM)
230 		return intel_memory_region_lookup(i915, INTEL_MEMORY_SYSTEM,
231 						  0);
232 
233 	return intel_memory_region_lookup(i915, INTEL_MEMORY_LOCAL,
234 					  ttm_mem_type - I915_PL_LMEM0);
235 }
236 
237 static struct sg_table *i915_ttm_tt_get_st(struct ttm_tt *ttm)
238 {
239 	struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
240 	struct scatterlist *sg;
241 	struct sg_table *st;
242 	int ret;
243 
244 	if (i915_tt->cached_st)
245 		return i915_tt->cached_st;
246 
247 	st = kzalloc(sizeof(*st), GFP_KERNEL);
248 	if (!st)
249 		return ERR_PTR(-ENOMEM);
250 
251 	sg = __sg_alloc_table_from_pages
252 		(st, ttm->pages, ttm->num_pages, 0,
253 		 (unsigned long)ttm->num_pages << PAGE_SHIFT,
254 		 i915_sg_segment_size(), NULL, 0, GFP_KERNEL);
255 	if (IS_ERR(sg)) {
256 		kfree(st);
257 		return ERR_CAST(sg);
258 	}
259 
260 	ret = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0);
261 	if (ret) {
262 		sg_free_table(st);
263 		kfree(st);
264 		return ERR_PTR(ret);
265 	}
266 
267 	i915_tt->cached_st = st;
268 	return st;
269 }
270 
271 static struct sg_table *
272 i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
273 			 struct ttm_resource *res)
274 {
275 	struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
276 	struct ttm_resource_manager *man =
277 		ttm_manager_type(bo->bdev, res->mem_type);
278 
279 	if (man->use_tt)
280 		return i915_ttm_tt_get_st(bo->ttm);
281 
282 	return intel_region_ttm_node_to_st(obj->mm.region, res);
283 }
284 
285 static int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
286 			 struct ttm_operation_ctx *ctx,
287 			 struct ttm_resource *dst_mem,
288 			 struct ttm_place *hop)
289 {
290 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
291 	struct ttm_resource_manager *dst_man =
292 		ttm_manager_type(bo->bdev, dst_mem->mem_type);
293 	struct ttm_resource_manager *src_man =
294 		ttm_manager_type(bo->bdev, bo->resource->mem_type);
295 	struct intel_memory_region *dst_reg, *src_reg;
296 	union {
297 		struct ttm_kmap_iter_tt tt;
298 		struct ttm_kmap_iter_iomap io;
299 	} _dst_iter, _src_iter;
300 	struct ttm_kmap_iter *dst_iter, *src_iter;
301 	struct sg_table *dst_st;
302 	int ret;
303 
304 	dst_reg = i915_ttm_region(bo->bdev, dst_mem->mem_type);
305 	src_reg = i915_ttm_region(bo->bdev, bo->resource->mem_type);
306 	GEM_BUG_ON(!dst_reg || !src_reg);
307 
308 	/* Sync for now. We could do the actual copy async. */
309 	ret = ttm_bo_wait_ctx(bo, ctx);
310 	if (ret)
311 		return ret;
312 
313 	ret = i915_ttm_move_notify(bo);
314 	if (ret)
315 		return ret;
316 
317 	if (obj->mm.madv != I915_MADV_WILLNEED) {
318 		i915_ttm_purge(obj);
319 		ttm_resource_free(bo, &dst_mem);
320 		return 0;
321 	}
322 
323 	/* Populate ttm with pages if needed. Typically system memory. */
324 	if (bo->ttm && (dst_man->use_tt ||
325 			(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED))) {
326 		ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
327 		if (ret)
328 			return ret;
329 	}
330 
331 	dst_st = i915_ttm_resource_get_st(obj, dst_mem);
332 	if (IS_ERR(dst_st))
333 		return PTR_ERR(dst_st);
334 
335 	/* If we start mapping GGTT, we can no longer use man::use_tt here. */
336 	dst_iter = dst_man->use_tt ?
337 		ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm) :
338 		ttm_kmap_iter_iomap_init(&_dst_iter.io, &dst_reg->iomap,
339 					 dst_st, dst_reg->region.start);
340 
341 	src_iter = src_man->use_tt ?
342 		ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm) :
343 		ttm_kmap_iter_iomap_init(&_src_iter.io, &src_reg->iomap,
344 					 obj->ttm.cached_io_st,
345 					 src_reg->region.start);
346 
347 	ttm_move_memcpy(bo, dst_mem->num_pages, dst_iter, src_iter);
348 	ttm_bo_move_sync_cleanup(bo, dst_mem);
349 	i915_ttm_free_cached_io_st(obj);
350 
351 	if (!dst_man->use_tt) {
352 		obj->ttm.cached_io_st = dst_st;
353 		obj->ttm.get_io_page.sg_pos = dst_st->sgl;
354 		obj->ttm.get_io_page.sg_idx = 0;
355 	}
356 
357 	return 0;
358 }
359 
360 static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
361 {
362 	if (mem->mem_type < I915_PL_LMEM0)
363 		return 0;
364 
365 	mem->bus.caching = ttm_write_combined;
366 	mem->bus.is_iomem = true;
367 
368 	return 0;
369 }
370 
371 static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
372 					 unsigned long page_offset)
373 {
374 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
375 	unsigned long base = obj->mm.region->iomap.base - obj->mm.region->region.start;
376 	struct scatterlist *sg;
377 	unsigned int ofs;
378 
379 	GEM_WARN_ON(bo->ttm);
380 
381 	sg = __i915_gem_object_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs, true, true);
382 
383 	return ((base + sg_dma_address(sg)) >> PAGE_SHIFT) + ofs;
384 }
385 
386 static struct ttm_device_funcs i915_ttm_bo_driver = {
387 	.ttm_tt_create = i915_ttm_tt_create,
388 	.ttm_tt_unpopulate = i915_ttm_tt_unpopulate,
389 	.ttm_tt_destroy = i915_ttm_tt_destroy,
390 	.eviction_valuable = i915_ttm_eviction_valuable,
391 	.evict_flags = i915_ttm_evict_flags,
392 	.move = i915_ttm_move,
393 	.swap_notify = i915_ttm_swap_notify,
394 	.delete_mem_notify = i915_ttm_delete_mem_notify,
395 	.io_mem_reserve = i915_ttm_io_mem_reserve,
396 	.io_mem_pfn = i915_ttm_io_mem_pfn,
397 };
398 
399 /**
400  * i915_ttm_driver - Return a pointer to the TTM device funcs
401  *
402  * Return: Pointer to statically allocated TTM device funcs.
403  */
404 struct ttm_device_funcs *i915_ttm_driver(void)
405 {
406 	return &i915_ttm_bo_driver;
407 }
408 
409 static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
410 {
411 	struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
412 	struct ttm_operation_ctx ctx = {
413 		.interruptible = true,
414 		.no_wait_gpu = false,
415 	};
416 	struct sg_table *st;
417 	int ret;
418 
419 	/* Move to the requested placement. */
420 	ret = ttm_bo_validate(bo, &i915_lmem0_placement, &ctx);
421 	if (ret)
422 		return ret == -ENOSPC ? -ENXIO : ret;
423 
424 	/* Object either has a page vector or is an iomem object */
425 	st = bo->ttm ? i915_ttm_tt_get_st(bo->ttm) : obj->ttm.cached_io_st;
426 	if (IS_ERR(st))
427 		return PTR_ERR(st);
428 
429 	__i915_gem_object_set_pages(obj, st, i915_sg_dma_sizes(st->sgl));
430 
431 	i915_ttm_adjust_lru(obj);
432 
433 	return ret;
434 }
435 
436 static void i915_ttm_put_pages(struct drm_i915_gem_object *obj,
437 			       struct sg_table *st)
438 {
439 	/*
440 	 * We're currently not called from a shrinker, so put_pages()
441 	 * typically means the object is about to destroyed, or called
442 	 * from move_notify(). So just avoid doing much for now.
443 	 * If the object is not destroyed next, The TTM eviction logic
444 	 * and shrinkers will move it out if needed.
445 	 */
446 
447 	i915_ttm_adjust_lru(obj);
448 }
449 
450 static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
451 {
452 	struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
453 
454 	/*
455 	 * Don't manipulate the TTM LRUs while in TTM bo destruction.
456 	 * We're called through i915_ttm_delete_mem_notify().
457 	 */
458 	if (!kref_read(&bo->kref))
459 		return;
460 
461 	/*
462 	 * Put on the correct LRU list depending on the MADV status
463 	 */
464 	spin_lock(&bo->bdev->lru_lock);
465 	if (obj->mm.madv != I915_MADV_WILLNEED) {
466 		bo->priority = I915_TTM_PRIO_PURGE;
467 	} else if (!i915_gem_object_has_pages(obj)) {
468 		if (bo->priority < I915_TTM_PRIO_HAS_PAGES)
469 			bo->priority = I915_TTM_PRIO_HAS_PAGES;
470 	} else {
471 		if (bo->priority > I915_TTM_PRIO_NO_PAGES)
472 			bo->priority = I915_TTM_PRIO_NO_PAGES;
473 	}
474 
475 	ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
476 	spin_unlock(&bo->bdev->lru_lock);
477 }
478 
479 /*
480  * TTM-backed gem object destruction requires some clarification.
481  * Basically we have two possibilities here. We can either rely on the
482  * i915 delayed destruction and put the TTM object when the object
483  * is idle. This would be detected by TTM which would bypass the
484  * TTM delayed destroy handling. The other approach is to put the TTM
485  * object early and rely on the TTM destroyed handling, and then free
486  * the leftover parts of the GEM object once TTM's destroyed list handling is
487  * complete. For now, we rely on the latter for two reasons:
488  * a) TTM can evict an object even when it's on the delayed destroy list,
489  * which in theory allows for complete eviction.
490  * b) There is work going on in TTM to allow freeing an object even when
491  * it's not idle, and using the TTM destroyed list handling could help us
492  * benefit from that.
493  */
494 static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj)
495 {
496 	if (obj->ttm.created) {
497 		ttm_bo_put(i915_gem_to_ttm(obj));
498 	} else {
499 		__i915_gem_free_object(obj);
500 		call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
501 	}
502 }
503 
504 static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
505 {
506 	struct vm_area_struct *area = vmf->vma;
507 	struct drm_i915_gem_object *obj =
508 		i915_ttm_to_gem(area->vm_private_data);
509 
510 	/* Sanity check that we allow writing into this object */
511 	if (unlikely(i915_gem_object_is_readonly(obj) &&
512 		     area->vm_flags & VM_WRITE))
513 		return VM_FAULT_SIGBUS;
514 
515 	return ttm_bo_vm_fault(vmf);
516 }
517 
518 static int
519 vm_access_ttm(struct vm_area_struct *area, unsigned long addr,
520 	      void *buf, int len, int write)
521 {
522 	struct drm_i915_gem_object *obj =
523 		i915_ttm_to_gem(area->vm_private_data);
524 
525 	if (i915_gem_object_is_readonly(obj) && write)
526 		return -EACCES;
527 
528 	return ttm_bo_vm_access(area, addr, buf, len, write);
529 }
530 
531 static void ttm_vm_open(struct vm_area_struct *vma)
532 {
533 	struct drm_i915_gem_object *obj =
534 		i915_ttm_to_gem(vma->vm_private_data);
535 
536 	GEM_BUG_ON(!obj);
537 	i915_gem_object_get(obj);
538 }
539 
540 static void ttm_vm_close(struct vm_area_struct *vma)
541 {
542 	struct drm_i915_gem_object *obj =
543 		i915_ttm_to_gem(vma->vm_private_data);
544 
545 	GEM_BUG_ON(!obj);
546 	i915_gem_object_put(obj);
547 }
548 
549 static const struct vm_operations_struct vm_ops_ttm = {
550 	.fault = vm_fault_ttm,
551 	.access = vm_access_ttm,
552 	.open = ttm_vm_open,
553 	.close = ttm_vm_close,
554 };
555 
556 static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
557 {
558 	/* The ttm_bo must be allocated with I915_BO_ALLOC_USER */
559 	GEM_BUG_ON(!drm_mm_node_allocated(&obj->base.vma_node.vm_node));
560 
561 	return drm_vma_node_offset_addr(&obj->base.vma_node);
562 }
563 
564 const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
565 	.name = "i915_gem_object_ttm",
566 	.flags = I915_GEM_OBJECT_HAS_IOMEM,
567 
568 	.get_pages = i915_ttm_get_pages,
569 	.put_pages = i915_ttm_put_pages,
570 	.truncate = i915_ttm_purge,
571 	.adjust_lru = i915_ttm_adjust_lru,
572 	.delayed_free = i915_ttm_delayed_free,
573 	.mmap_offset = i915_ttm_mmap_offset,
574 	.mmap_ops = &vm_ops_ttm,
575 };
576 
577 void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
578 {
579 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
580 
581 	i915_gem_object_release_memory_region(obj);
582 	mutex_destroy(&obj->ttm.get_io_page.lock);
583 	if (obj->ttm.created)
584 		call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
585 }
586 
587 /**
588  * __i915_gem_ttm_object_init - Initialize a ttm-backed i915 gem object
589  * @mem: The initial memory region for the object.
590  * @obj: The gem object.
591  * @size: Object size in bytes.
592  * @flags: gem object flags.
593  *
594  * Return: 0 on success, negative error code on failure.
595  */
596 int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
597 			       struct drm_i915_gem_object *obj,
598 			       resource_size_t size,
599 			       unsigned int flags)
600 {
601 	static struct lock_class_key lock_class;
602 	struct drm_i915_private *i915 = mem->i915;
603 	enum ttm_bo_type bo_type;
604 	size_t alignment = 0;
605 	int ret;
606 
607 	/* Adjust alignment to GPU- and CPU huge page sizes. */
608 
609 	if (mem->is_range_manager) {
610 		if (size >= SZ_1G)
611 			alignment = SZ_1G >> PAGE_SHIFT;
612 		else if (size >= SZ_2M)
613 			alignment = SZ_2M >> PAGE_SHIFT;
614 		else if (size >= SZ_64K)
615 			alignment = SZ_64K >> PAGE_SHIFT;
616 	}
617 
618 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
619 	i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags);
620 	i915_gem_object_init_memory_region(obj, mem);
621 	i915_gem_object_make_unshrinkable(obj);
622 	obj->read_domains = I915_GEM_DOMAIN_WC | I915_GEM_DOMAIN_GTT;
623 	i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
624 	INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN);
625 	mutex_init(&obj->ttm.get_io_page.lock);
626 
627 	bo_type = (obj->flags & I915_BO_ALLOC_USER) ? ttm_bo_type_device :
628 		ttm_bo_type_kernel;
629 
630 	/*
631 	 * If this function fails, it will call the destructor, but
632 	 * our caller still owns the object. So no freeing in the
633 	 * destructor until obj->ttm.created is true.
634 	 * Similarly, in delayed_destroy, we can't call ttm_bo_put()
635 	 * until successful initialization.
636 	 */
637 	obj->base.vma_node.driver_private = i915_gem_to_ttm(obj);
638 	ret = ttm_bo_init(&i915->bdev, i915_gem_to_ttm(obj), size,
639 			  bo_type, &i915_sys_placement, alignment,
640 			  true, NULL, NULL, i915_ttm_bo_destroy);
641 
642 	if (!ret)
643 		obj->ttm.created = true;
644 
645 	/* i915 wants -ENXIO when out of memory region space. */
646 	return (ret == -ENOSPC) ? -ENXIO : ret;
647 }
648