xref: /openbmc/linux/drivers/gpu/drm/i915/gt/intel_ggtt.c (revision 75016ca3)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <linux/agp_backend.h>
7 #include <linux/stop_machine.h>
8 
9 #include <asm/set_memory.h>
10 #include <asm/smp.h>
11 
12 #include <drm/i915_drm.h>
13 #include <drm/intel-gtt.h>
14 
15 #include "gem/i915_gem_lmem.h"
16 
17 #include "intel_gt.h"
18 #include "i915_drv.h"
19 #include "i915_scatterlist.h"
20 #include "i915_vgpu.h"
21 
22 #include "intel_gtt.h"
23 #include "gen8_ppgtt.h"
24 
25 static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
26 				   unsigned long color,
27 				   u64 *start,
28 				   u64 *end)
29 {
30 	if (i915_node_color_differs(node, color))
31 		*start += I915_GTT_PAGE_SIZE;
32 
33 	/*
34 	 * Also leave a space between the unallocated reserved node after the
35 	 * GTT and any objects within the GTT, i.e. we use the color adjustment
36 	 * to insert a guard page to prevent prefetches crossing over the
37 	 * GTT boundary.
38 	 */
39 	node = list_next_entry(node, node_list);
40 	if (node->color != color)
41 		*end -= I915_GTT_PAGE_SIZE;
42 }
43 
44 static int ggtt_init_hw(struct i915_ggtt *ggtt)
45 {
46 	struct drm_i915_private *i915 = ggtt->vm.i915;
47 
48 	i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
49 
50 	ggtt->vm.is_ggtt = true;
51 
52 	/* Only VLV supports read-only GGTT mappings */
53 	ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
54 
55 	if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
56 		ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust;
57 
58 	if (ggtt->mappable_end) {
59 		if (!io_mapping_init_wc(&ggtt->iomap,
60 					ggtt->gmadr.start,
61 					ggtt->mappable_end)) {
62 			ggtt->vm.cleanup(&ggtt->vm);
63 			return -EIO;
64 		}
65 
66 		ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start,
67 					      ggtt->mappable_end);
68 	}
69 
70 	intel_ggtt_init_fences(ggtt);
71 
72 	return 0;
73 }
74 
75 /**
76  * i915_ggtt_init_hw - Initialize GGTT hardware
77  * @i915: i915 device
78  */
79 int i915_ggtt_init_hw(struct drm_i915_private *i915)
80 {
81 	int ret;
82 
83 	/*
84 	 * Note that we use page colouring to enforce a guard page at the
85 	 * end of the address space. This is required as the CS may prefetch
86 	 * beyond the end of the batch buffer, across the page boundary,
87 	 * and beyond the end of the GTT if we do not provide a guard.
88 	 */
89 	ret = ggtt_init_hw(&i915->ggtt);
90 	if (ret)
91 		return ret;
92 
93 	return 0;
94 }
95 
96 /*
97  * Certain Gen5 chipsets require idling the GPU before
98  * unmapping anything from the GTT when VT-d is enabled.
99  */
100 static bool needs_idle_maps(struct drm_i915_private *i915)
101 {
102 	/*
103 	 * Query intel_iommu to see if we need the workaround. Presumably that
104 	 * was loaded first.
105 	 */
106 	if (!intel_vtd_active(i915))
107 		return false;
108 
109 	if (GRAPHICS_VER(i915) == 5 && IS_MOBILE(i915))
110 		return true;
111 
112 	if (GRAPHICS_VER(i915) == 12)
113 		return true; /* XXX DMAR fault reason 7 */
114 
115 	return false;
116 }
117 
118 /**
119  * i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM
120  * @vm: The VM to suspend the mappings for
121  *
122  * Suspend the memory mappings for all objects mapped to HW via the GGTT or a
123  * DPT page table.
124  */
125 void i915_ggtt_suspend_vm(struct i915_address_space *vm)
126 {
127 	struct i915_vma *vma, *vn;
128 	int open;
129 
130 	drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
131 
132 	mutex_lock(&vm->mutex);
133 
134 	/* Skip rewriting PTE on VMA unbind. */
135 	open = atomic_xchg(&vm->open, 0);
136 
137 	list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
138 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
139 		i915_vma_wait_for_bind(vma);
140 
141 		if (i915_vma_is_pinned(vma))
142 			continue;
143 
144 		if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
145 			__i915_vma_evict(vma);
146 			drm_mm_remove_node(&vma->node);
147 		}
148 	}
149 
150 	vm->clear_range(vm, 0, vm->total);
151 
152 	atomic_set(&vm->open, open);
153 
154 	mutex_unlock(&vm->mutex);
155 }
156 
157 void i915_ggtt_suspend(struct i915_ggtt *ggtt)
158 {
159 	i915_ggtt_suspend_vm(&ggtt->vm);
160 	ggtt->invalidate(ggtt);
161 
162 	intel_gt_check_and_clear_faults(ggtt->vm.gt);
163 }
164 
165 void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
166 {
167 	struct intel_uncore *uncore = ggtt->vm.gt->uncore;
168 
169 	spin_lock_irq(&uncore->lock);
170 	intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
171 	intel_uncore_read_fw(uncore, GFX_FLSH_CNTL_GEN6);
172 	spin_unlock_irq(&uncore->lock);
173 }
174 
175 static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
176 {
177 	struct intel_uncore *uncore = ggtt->vm.gt->uncore;
178 
179 	/*
180 	 * Note that as an uncached mmio write, this will flush the
181 	 * WCB of the writes into the GGTT before it triggers the invalidate.
182 	 */
183 	intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
184 }
185 
186 static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
187 {
188 	struct intel_uncore *uncore = ggtt->vm.gt->uncore;
189 	struct drm_i915_private *i915 = ggtt->vm.i915;
190 
191 	gen8_ggtt_invalidate(ggtt);
192 
193 	if (GRAPHICS_VER(i915) >= 12)
194 		intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR,
195 				      GEN12_GUC_TLB_INV_CR_INVALIDATE);
196 	else
197 		intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
198 }
199 
200 static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
201 {
202 	intel_gtt_chipset_flush();
203 }
204 
205 u64 gen8_ggtt_pte_encode(dma_addr_t addr,
206 			 enum i915_cache_level level,
207 			 u32 flags)
208 {
209 	gen8_pte_t pte = addr | GEN8_PAGE_PRESENT;
210 
211 	if (flags & PTE_LM)
212 		pte |= GEN12_GGTT_PTE_LM;
213 
214 	return pte;
215 }
216 
217 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
218 {
219 	writeq(pte, addr);
220 }
221 
222 static void gen8_ggtt_insert_page(struct i915_address_space *vm,
223 				  dma_addr_t addr,
224 				  u64 offset,
225 				  enum i915_cache_level level,
226 				  u32 flags)
227 {
228 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
229 	gen8_pte_t __iomem *pte =
230 		(gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
231 
232 	gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, flags));
233 
234 	ggtt->invalidate(ggtt);
235 }
236 
237 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
238 				     struct i915_vma *vma,
239 				     enum i915_cache_level level,
240 				     u32 flags)
241 {
242 	const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, flags);
243 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
244 	gen8_pte_t __iomem *gte;
245 	gen8_pte_t __iomem *end;
246 	struct sgt_iter iter;
247 	dma_addr_t addr;
248 
249 	/*
250 	 * Note that we ignore PTE_READ_ONLY here. The caller must be careful
251 	 * not to allow the user to override access to a read only page.
252 	 */
253 
254 	gte = (gen8_pte_t __iomem *)ggtt->gsm;
255 	gte += vma->node.start / I915_GTT_PAGE_SIZE;
256 	end = gte + vma->node.size / I915_GTT_PAGE_SIZE;
257 
258 	for_each_sgt_daddr(addr, iter, vma->pages)
259 		gen8_set_pte(gte++, pte_encode | addr);
260 	GEM_BUG_ON(gte > end);
261 
262 	/* Fill the allocated but "unused" space beyond the end of the buffer */
263 	while (gte < end)
264 		gen8_set_pte(gte++, vm->scratch[0]->encode);
265 
266 	/*
267 	 * We want to flush the TLBs only after we're certain all the PTE
268 	 * updates have finished.
269 	 */
270 	ggtt->invalidate(ggtt);
271 }
272 
273 static void gen6_ggtt_insert_page(struct i915_address_space *vm,
274 				  dma_addr_t addr,
275 				  u64 offset,
276 				  enum i915_cache_level level,
277 				  u32 flags)
278 {
279 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
280 	gen6_pte_t __iomem *pte =
281 		(gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
282 
283 	iowrite32(vm->pte_encode(addr, level, flags), pte);
284 
285 	ggtt->invalidate(ggtt);
286 }
287 
288 /*
289  * Binds an object into the global gtt with the specified cache level.
290  * The object will be accessible to the GPU via commands whose operands
291  * reference offsets within the global GTT as well as accessible by the GPU
292  * through the GMADR mapped BAR (i915->mm.gtt->gtt).
293  */
294 static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
295 				     struct i915_vma *vma,
296 				     enum i915_cache_level level,
297 				     u32 flags)
298 {
299 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
300 	gen6_pte_t __iomem *gte;
301 	gen6_pte_t __iomem *end;
302 	struct sgt_iter iter;
303 	dma_addr_t addr;
304 
305 	gte = (gen6_pte_t __iomem *)ggtt->gsm;
306 	gte += vma->node.start / I915_GTT_PAGE_SIZE;
307 	end = gte + vma->node.size / I915_GTT_PAGE_SIZE;
308 
309 	for_each_sgt_daddr(addr, iter, vma->pages)
310 		iowrite32(vm->pte_encode(addr, level, flags), gte++);
311 	GEM_BUG_ON(gte > end);
312 
313 	/* Fill the allocated but "unused" space beyond the end of the buffer */
314 	while (gte < end)
315 		iowrite32(vm->scratch[0]->encode, gte++);
316 
317 	/*
318 	 * We want to flush the TLBs only after we're certain all the PTE
319 	 * updates have finished.
320 	 */
321 	ggtt->invalidate(ggtt);
322 }
323 
324 static void nop_clear_range(struct i915_address_space *vm,
325 			    u64 start, u64 length)
326 {
327 }
328 
329 static void gen8_ggtt_clear_range(struct i915_address_space *vm,
330 				  u64 start, u64 length)
331 {
332 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
333 	unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
334 	unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
335 	const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
336 	gen8_pte_t __iomem *gtt_base =
337 		(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
338 	const int max_entries = ggtt_total_entries(ggtt) - first_entry;
339 	int i;
340 
341 	if (WARN(num_entries > max_entries,
342 		 "First entry = %d; Num entries = %d (max=%d)\n",
343 		 first_entry, num_entries, max_entries))
344 		num_entries = max_entries;
345 
346 	for (i = 0; i < num_entries; i++)
347 		gen8_set_pte(&gtt_base[i], scratch_pte);
348 }
349 
350 static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
351 {
352 	/*
353 	 * Make sure the internal GAM fifo has been cleared of all GTT
354 	 * writes before exiting stop_machine(). This guarantees that
355 	 * any aperture accesses waiting to start in another process
356 	 * cannot back up behind the GTT writes causing a hang.
357 	 * The register can be any arbitrary GAM register.
358 	 */
359 	intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6);
360 }
361 
362 struct insert_page {
363 	struct i915_address_space *vm;
364 	dma_addr_t addr;
365 	u64 offset;
366 	enum i915_cache_level level;
367 };
368 
369 static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
370 {
371 	struct insert_page *arg = _arg;
372 
373 	gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
374 	bxt_vtd_ggtt_wa(arg->vm);
375 
376 	return 0;
377 }
378 
379 static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
380 					  dma_addr_t addr,
381 					  u64 offset,
382 					  enum i915_cache_level level,
383 					  u32 unused)
384 {
385 	struct insert_page arg = { vm, addr, offset, level };
386 
387 	stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
388 }
389 
390 struct insert_entries {
391 	struct i915_address_space *vm;
392 	struct i915_vma *vma;
393 	enum i915_cache_level level;
394 	u32 flags;
395 };
396 
397 static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
398 {
399 	struct insert_entries *arg = _arg;
400 
401 	gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags);
402 	bxt_vtd_ggtt_wa(arg->vm);
403 
404 	return 0;
405 }
406 
407 static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
408 					     struct i915_vma *vma,
409 					     enum i915_cache_level level,
410 					     u32 flags)
411 {
412 	struct insert_entries arg = { vm, vma, level, flags };
413 
414 	stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
415 }
416 
417 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
418 				  u64 start, u64 length)
419 {
420 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
421 	unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
422 	unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
423 	gen6_pte_t scratch_pte, __iomem *gtt_base =
424 		(gen6_pte_t __iomem *)ggtt->gsm + first_entry;
425 	const int max_entries = ggtt_total_entries(ggtt) - first_entry;
426 	int i;
427 
428 	if (WARN(num_entries > max_entries,
429 		 "First entry = %d; Num entries = %d (max=%d)\n",
430 		 first_entry, num_entries, max_entries))
431 		num_entries = max_entries;
432 
433 	scratch_pte = vm->scratch[0]->encode;
434 	for (i = 0; i < num_entries; i++)
435 		iowrite32(scratch_pte, &gtt_base[i]);
436 }
437 
438 static void i915_ggtt_insert_page(struct i915_address_space *vm,
439 				  dma_addr_t addr,
440 				  u64 offset,
441 				  enum i915_cache_level cache_level,
442 				  u32 unused)
443 {
444 	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
445 		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
446 
447 	intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
448 }
449 
450 static void i915_ggtt_insert_entries(struct i915_address_space *vm,
451 				     struct i915_vma *vma,
452 				     enum i915_cache_level cache_level,
453 				     u32 unused)
454 {
455 	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
456 		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
457 
458 	intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
459 				    flags);
460 }
461 
462 static void i915_ggtt_clear_range(struct i915_address_space *vm,
463 				  u64 start, u64 length)
464 {
465 	intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
466 }
467 
468 static void ggtt_bind_vma(struct i915_address_space *vm,
469 			  struct i915_vm_pt_stash *stash,
470 			  struct i915_vma *vma,
471 			  enum i915_cache_level cache_level,
472 			  u32 flags)
473 {
474 	struct drm_i915_gem_object *obj = vma->obj;
475 	u32 pte_flags;
476 
477 	if (i915_vma_is_bound(vma, ~flags & I915_VMA_BIND_MASK))
478 		return;
479 
480 	/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
481 	pte_flags = 0;
482 	if (i915_gem_object_is_readonly(obj))
483 		pte_flags |= PTE_READ_ONLY;
484 	if (i915_gem_object_is_lmem(obj))
485 		pte_flags |= PTE_LM;
486 
487 	vm->insert_entries(vm, vma, cache_level, pte_flags);
488 	vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
489 }
490 
491 static void ggtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
492 {
493 	vm->clear_range(vm, vma->node.start, vma->size);
494 }
495 
496 static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
497 {
498 	u64 size;
499 	int ret;
500 
501 	if (!intel_uc_uses_guc(&ggtt->vm.gt->uc))
502 		return 0;
503 
504 	GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
505 	size = ggtt->vm.total - GUC_GGTT_TOP;
506 
507 	ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size,
508 				   GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
509 				   PIN_NOEVICT);
510 	if (ret)
511 		drm_dbg(&ggtt->vm.i915->drm,
512 			"Failed to reserve top of GGTT for GuC\n");
513 
514 	return ret;
515 }
516 
517 static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
518 {
519 	if (drm_mm_node_allocated(&ggtt->uc_fw))
520 		drm_mm_remove_node(&ggtt->uc_fw);
521 }
522 
523 static void cleanup_init_ggtt(struct i915_ggtt *ggtt)
524 {
525 	ggtt_release_guc_top(ggtt);
526 	if (drm_mm_node_allocated(&ggtt->error_capture))
527 		drm_mm_remove_node(&ggtt->error_capture);
528 	mutex_destroy(&ggtt->error_mutex);
529 }
530 
531 static int init_ggtt(struct i915_ggtt *ggtt)
532 {
533 	/*
534 	 * Let GEM Manage all of the aperture.
535 	 *
536 	 * However, leave one page at the end still bound to the scratch page.
537 	 * There are a number of places where the hardware apparently prefetches
538 	 * past the end of the object, and we've seen multiple hangs with the
539 	 * GPU head pointer stuck in a batchbuffer bound at the last page of the
540 	 * aperture.  One page should be enough to keep any prefetching inside
541 	 * of the aperture.
542 	 */
543 	unsigned long hole_start, hole_end;
544 	struct drm_mm_node *entry;
545 	int ret;
546 
547 	/*
548 	 * GuC requires all resources that we're sharing with it to be placed in
549 	 * non-WOPCM memory. If GuC is not present or not in use we still need a
550 	 * small bias as ring wraparound at offset 0 sometimes hangs. No idea
551 	 * why.
552 	 */
553 	ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
554 			       intel_wopcm_guc_size(&ggtt->vm.i915->wopcm));
555 
556 	ret = intel_vgt_balloon(ggtt);
557 	if (ret)
558 		return ret;
559 
560 	mutex_init(&ggtt->error_mutex);
561 	if (ggtt->mappable_end) {
562 		/*
563 		 * Reserve a mappable slot for our lockless error capture.
564 		 *
565 		 * We strongly prefer taking address 0x0 in order to protect
566 		 * other critical buffers against accidental overwrites,
567 		 * as writing to address 0 is a very common mistake.
568 		 *
569 		 * Since 0 may already be in use by the system (e.g. the BIOS
570 		 * framebuffer), we let the reservation fail quietly and hope
571 		 * 0 remains reserved always.
572 		 *
573 		 * If we fail to reserve 0, and then fail to find any space
574 		 * for an error-capture, remain silent. We can afford not
575 		 * to reserve an error_capture node as we have fallback
576 		 * paths, and we trust that 0 will remain reserved. However,
577 		 * the only likely reason for failure to insert is a driver
578 		 * bug, which we expect to cause other failures...
579 		 */
580 		ggtt->error_capture.size = I915_GTT_PAGE_SIZE;
581 		ggtt->error_capture.color = I915_COLOR_UNEVICTABLE;
582 		if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture))
583 			drm_mm_insert_node_in_range(&ggtt->vm.mm,
584 						    &ggtt->error_capture,
585 						    ggtt->error_capture.size, 0,
586 						    ggtt->error_capture.color,
587 						    0, ggtt->mappable_end,
588 						    DRM_MM_INSERT_LOW);
589 	}
590 	if (drm_mm_node_allocated(&ggtt->error_capture))
591 		drm_dbg(&ggtt->vm.i915->drm,
592 			"Reserved GGTT:[%llx, %llx] for use by error capture\n",
593 			ggtt->error_capture.start,
594 			ggtt->error_capture.start + ggtt->error_capture.size);
595 
596 	/*
597 	 * The upper portion of the GuC address space has a sizeable hole
598 	 * (several MB) that is inaccessible by GuC. Reserve this range within
599 	 * GGTT as it can comfortably hold GuC/HuC firmware images.
600 	 */
601 	ret = ggtt_reserve_guc_top(ggtt);
602 	if (ret)
603 		goto err;
604 
605 	/* Clear any non-preallocated blocks */
606 	drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
607 		drm_dbg(&ggtt->vm.i915->drm,
608 			"clearing unused GTT space: [%lx, %lx]\n",
609 			hole_start, hole_end);
610 		ggtt->vm.clear_range(&ggtt->vm, hole_start,
611 				     hole_end - hole_start);
612 	}
613 
614 	/* And finally clear the reserved guard page */
615 	ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
616 
617 	return 0;
618 
619 err:
620 	cleanup_init_ggtt(ggtt);
621 	return ret;
622 }
623 
624 static void aliasing_gtt_bind_vma(struct i915_address_space *vm,
625 				  struct i915_vm_pt_stash *stash,
626 				  struct i915_vma *vma,
627 				  enum i915_cache_level cache_level,
628 				  u32 flags)
629 {
630 	u32 pte_flags;
631 
632 	/* Currently applicable only to VLV */
633 	pte_flags = 0;
634 	if (i915_gem_object_is_readonly(vma->obj))
635 		pte_flags |= PTE_READ_ONLY;
636 
637 	if (flags & I915_VMA_LOCAL_BIND)
638 		ppgtt_bind_vma(&i915_vm_to_ggtt(vm)->alias->vm,
639 			       stash, vma, cache_level, flags);
640 
641 	if (flags & I915_VMA_GLOBAL_BIND)
642 		vm->insert_entries(vm, vma, cache_level, pte_flags);
643 }
644 
645 static void aliasing_gtt_unbind_vma(struct i915_address_space *vm,
646 				    struct i915_vma *vma)
647 {
648 	if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
649 		vm->clear_range(vm, vma->node.start, vma->size);
650 
651 	if (i915_vma_is_bound(vma, I915_VMA_LOCAL_BIND))
652 		ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma);
653 }
654 
655 static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
656 {
657 	struct i915_vm_pt_stash stash = {};
658 	struct i915_ppgtt *ppgtt;
659 	int err;
660 
661 	ppgtt = i915_ppgtt_create(ggtt->vm.gt, 0);
662 	if (IS_ERR(ppgtt))
663 		return PTR_ERR(ppgtt);
664 
665 	if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
666 		err = -ENODEV;
667 		goto err_ppgtt;
668 	}
669 
670 	err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, ggtt->vm.total);
671 	if (err)
672 		goto err_ppgtt;
673 
674 	i915_gem_object_lock(ppgtt->vm.scratch[0], NULL);
675 	err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
676 	i915_gem_object_unlock(ppgtt->vm.scratch[0]);
677 	if (err)
678 		goto err_stash;
679 
680 	/*
681 	 * Note we only pre-allocate as far as the end of the global
682 	 * GTT. On 48b / 4-level page-tables, the difference is very,
683 	 * very significant! We have to preallocate as GVT/vgpu does
684 	 * not like the page directory disappearing.
685 	 */
686 	ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, ggtt->vm.total);
687 
688 	ggtt->alias = ppgtt;
689 	ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags;
690 
691 	GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
692 	ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
693 
694 	GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
695 	ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
696 
697 	i915_vm_free_pt_stash(&ppgtt->vm, &stash);
698 	return 0;
699 
700 err_stash:
701 	i915_vm_free_pt_stash(&ppgtt->vm, &stash);
702 err_ppgtt:
703 	i915_vm_put(&ppgtt->vm);
704 	return err;
705 }
706 
707 static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt)
708 {
709 	struct i915_ppgtt *ppgtt;
710 
711 	ppgtt = fetch_and_zero(&ggtt->alias);
712 	if (!ppgtt)
713 		return;
714 
715 	i915_vm_put(&ppgtt->vm);
716 
717 	ggtt->vm.vma_ops.bind_vma   = ggtt_bind_vma;
718 	ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
719 }
720 
721 int i915_init_ggtt(struct drm_i915_private *i915)
722 {
723 	int ret;
724 
725 	ret = init_ggtt(&i915->ggtt);
726 	if (ret)
727 		return ret;
728 
729 	if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) {
730 		ret = init_aliasing_ppgtt(&i915->ggtt);
731 		if (ret)
732 			cleanup_init_ggtt(&i915->ggtt);
733 	}
734 
735 	return 0;
736 }
737 
738 static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
739 {
740 	struct i915_vma *vma, *vn;
741 
742 	atomic_set(&ggtt->vm.open, 0);
743 
744 	flush_workqueue(ggtt->vm.i915->wq);
745 
746 	mutex_lock(&ggtt->vm.mutex);
747 
748 	list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
749 		WARN_ON(__i915_vma_unbind(vma));
750 
751 	if (drm_mm_node_allocated(&ggtt->error_capture))
752 		drm_mm_remove_node(&ggtt->error_capture);
753 	mutex_destroy(&ggtt->error_mutex);
754 
755 	ggtt_release_guc_top(ggtt);
756 	intel_vgt_deballoon(ggtt);
757 
758 	ggtt->vm.cleanup(&ggtt->vm);
759 
760 	mutex_unlock(&ggtt->vm.mutex);
761 	i915_address_space_fini(&ggtt->vm);
762 
763 	arch_phys_wc_del(ggtt->mtrr);
764 
765 	if (ggtt->iomap.size)
766 		io_mapping_fini(&ggtt->iomap);
767 }
768 
769 /**
770  * i915_ggtt_driver_release - Clean up GGTT hardware initialization
771  * @i915: i915 device
772  */
773 void i915_ggtt_driver_release(struct drm_i915_private *i915)
774 {
775 	struct i915_ggtt *ggtt = &i915->ggtt;
776 
777 	fini_aliasing_ppgtt(ggtt);
778 
779 	intel_ggtt_fini_fences(ggtt);
780 	ggtt_cleanup_hw(ggtt);
781 }
782 
783 /**
784  * i915_ggtt_driver_late_release - Cleanup of GGTT that needs to be done after
785  * all free objects have been drained.
786  * @i915: i915 device
787  */
788 void i915_ggtt_driver_late_release(struct drm_i915_private *i915)
789 {
790 	struct i915_ggtt *ggtt = &i915->ggtt;
791 
792 	GEM_WARN_ON(kref_read(&ggtt->vm.resv_ref) != 1);
793 	dma_resv_fini(&ggtt->vm._resv);
794 }
795 
796 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
797 {
798 	snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
799 	snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
800 	return snb_gmch_ctl << 20;
801 }
802 
803 static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
804 {
805 	bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
806 	bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
807 	if (bdw_gmch_ctl)
808 		bdw_gmch_ctl = 1 << bdw_gmch_ctl;
809 
810 #ifdef CONFIG_X86_32
811 	/* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
812 	if (bdw_gmch_ctl > 4)
813 		bdw_gmch_ctl = 4;
814 #endif
815 
816 	return bdw_gmch_ctl << 20;
817 }
818 
819 static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
820 {
821 	gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
822 	gmch_ctrl &= SNB_GMCH_GGMS_MASK;
823 
824 	if (gmch_ctrl)
825 		return 1 << (20 + gmch_ctrl);
826 
827 	return 0;
828 }
829 
830 static unsigned int gen6_gttmmadr_size(struct drm_i915_private *i915)
831 {
832 	/*
833 	 * GEN6: GTTMMADR size is 4MB and GTTADR starts at 2MB offset
834 	 * GEN8: GTTMMADR size is 16MB and GTTADR starts at 8MB offset
835 	 */
836 	GEM_BUG_ON(GRAPHICS_VER(i915) < 6);
837 	return (GRAPHICS_VER(i915) < 8) ? SZ_4M : SZ_16M;
838 }
839 
840 static unsigned int gen6_gttadr_offset(struct drm_i915_private *i915)
841 {
842 	return gen6_gttmmadr_size(i915) / 2;
843 }
844 
845 static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
846 {
847 	struct drm_i915_private *i915 = ggtt->vm.i915;
848 	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
849 	phys_addr_t phys_addr;
850 	u32 pte_flags;
851 	int ret;
852 
853 	GEM_WARN_ON(pci_resource_len(pdev, 0) != gen6_gttmmadr_size(i915));
854 	phys_addr = pci_resource_start(pdev, 0) + gen6_gttadr_offset(i915);
855 
856 	/*
857 	 * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
858 	 * will be dropped. For WC mappings in general we have 64 byte burst
859 	 * writes when the WC buffer is flushed, so we can't use it, but have to
860 	 * resort to an uncached mapping. The WC issue is easily caught by the
861 	 * readback check when writing GTT PTE entries.
862 	 */
863 	if (IS_GEN9_LP(i915) || GRAPHICS_VER(i915) >= 11)
864 		ggtt->gsm = ioremap(phys_addr, size);
865 	else
866 		ggtt->gsm = ioremap_wc(phys_addr, size);
867 	if (!ggtt->gsm) {
868 		drm_err(&i915->drm, "Failed to map the ggtt page table\n");
869 		return -ENOMEM;
870 	}
871 
872 	kref_init(&ggtt->vm.resv_ref);
873 	ret = setup_scratch_page(&ggtt->vm);
874 	if (ret) {
875 		drm_err(&i915->drm, "Scratch setup failed\n");
876 		/* iounmap will also get called at remove, but meh */
877 		iounmap(ggtt->gsm);
878 		return ret;
879 	}
880 
881 	pte_flags = 0;
882 	if (i915_gem_object_is_lmem(ggtt->vm.scratch[0]))
883 		pte_flags |= PTE_LM;
884 
885 	ggtt->vm.scratch[0]->encode =
886 		ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]),
887 				    I915_CACHE_NONE, pte_flags);
888 
889 	return 0;
890 }
891 
892 static void gen6_gmch_remove(struct i915_address_space *vm)
893 {
894 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
895 
896 	iounmap(ggtt->gsm);
897 	free_scratch(vm);
898 }
899 
900 static struct resource pci_resource(struct pci_dev *pdev, int bar)
901 {
902 	return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar),
903 					       pci_resource_len(pdev, bar));
904 }
905 
906 static int gen8_gmch_probe(struct i915_ggtt *ggtt)
907 {
908 	struct drm_i915_private *i915 = ggtt->vm.i915;
909 	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
910 	unsigned int size;
911 	u16 snb_gmch_ctl;
912 
913 	/* TODO: We're not aware of mappable constraints on gen8 yet */
914 	if (!HAS_LMEM(i915)) {
915 		ggtt->gmadr = pci_resource(pdev, 2);
916 		ggtt->mappable_end = resource_size(&ggtt->gmadr);
917 	}
918 
919 	pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
920 	if (IS_CHERRYVIEW(i915))
921 		size = chv_get_total_gtt_size(snb_gmch_ctl);
922 	else
923 		size = gen8_get_total_gtt_size(snb_gmch_ctl);
924 
925 	ggtt->vm.alloc_pt_dma = alloc_pt_dma;
926 	ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
927 	ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY;
928 
929 	ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
930 	ggtt->vm.cleanup = gen6_gmch_remove;
931 	ggtt->vm.insert_page = gen8_ggtt_insert_page;
932 	ggtt->vm.clear_range = nop_clear_range;
933 	if (intel_scanout_needs_vtd_wa(i915))
934 		ggtt->vm.clear_range = gen8_ggtt_clear_range;
935 
936 	ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
937 
938 	/*
939 	 * Serialize GTT updates with aperture access on BXT if VT-d is on,
940 	 * and always on CHV.
941 	 */
942 	if (intel_vm_no_concurrent_access_wa(i915)) {
943 		ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
944 		ggtt->vm.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
945 		ggtt->vm.bind_async_flags =
946 			I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
947 	}
948 
949 	ggtt->invalidate = gen8_ggtt_invalidate;
950 
951 	ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
952 	ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
953 
954 	ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
955 
956 	setup_private_pat(ggtt->vm.gt->uncore);
957 
958 	return ggtt_probe_common(ggtt, size);
959 }
960 
961 static u64 snb_pte_encode(dma_addr_t addr,
962 			  enum i915_cache_level level,
963 			  u32 flags)
964 {
965 	gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
966 
967 	switch (level) {
968 	case I915_CACHE_L3_LLC:
969 	case I915_CACHE_LLC:
970 		pte |= GEN6_PTE_CACHE_LLC;
971 		break;
972 	case I915_CACHE_NONE:
973 		pte |= GEN6_PTE_UNCACHED;
974 		break;
975 	default:
976 		MISSING_CASE(level);
977 	}
978 
979 	return pte;
980 }
981 
982 static u64 ivb_pte_encode(dma_addr_t addr,
983 			  enum i915_cache_level level,
984 			  u32 flags)
985 {
986 	gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
987 
988 	switch (level) {
989 	case I915_CACHE_L3_LLC:
990 		pte |= GEN7_PTE_CACHE_L3_LLC;
991 		break;
992 	case I915_CACHE_LLC:
993 		pte |= GEN6_PTE_CACHE_LLC;
994 		break;
995 	case I915_CACHE_NONE:
996 		pte |= GEN6_PTE_UNCACHED;
997 		break;
998 	default:
999 		MISSING_CASE(level);
1000 	}
1001 
1002 	return pte;
1003 }
1004 
1005 static u64 byt_pte_encode(dma_addr_t addr,
1006 			  enum i915_cache_level level,
1007 			  u32 flags)
1008 {
1009 	gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
1010 
1011 	if (!(flags & PTE_READ_ONLY))
1012 		pte |= BYT_PTE_WRITEABLE;
1013 
1014 	if (level != I915_CACHE_NONE)
1015 		pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
1016 
1017 	return pte;
1018 }
1019 
1020 static u64 hsw_pte_encode(dma_addr_t addr,
1021 			  enum i915_cache_level level,
1022 			  u32 flags)
1023 {
1024 	gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
1025 
1026 	if (level != I915_CACHE_NONE)
1027 		pte |= HSW_WB_LLC_AGE3;
1028 
1029 	return pte;
1030 }
1031 
1032 static u64 iris_pte_encode(dma_addr_t addr,
1033 			   enum i915_cache_level level,
1034 			   u32 flags)
1035 {
1036 	gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
1037 
1038 	switch (level) {
1039 	case I915_CACHE_NONE:
1040 		break;
1041 	case I915_CACHE_WT:
1042 		pte |= HSW_WT_ELLC_LLC_AGE3;
1043 		break;
1044 	default:
1045 		pte |= HSW_WB_ELLC_LLC_AGE3;
1046 		break;
1047 	}
1048 
1049 	return pte;
1050 }
1051 
1052 static int gen6_gmch_probe(struct i915_ggtt *ggtt)
1053 {
1054 	struct drm_i915_private *i915 = ggtt->vm.i915;
1055 	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
1056 	unsigned int size;
1057 	u16 snb_gmch_ctl;
1058 
1059 	ggtt->gmadr = pci_resource(pdev, 2);
1060 	ggtt->mappable_end = resource_size(&ggtt->gmadr);
1061 
1062 	/*
1063 	 * 64/512MB is the current min/max we actually know of, but this is
1064 	 * just a coarse sanity check.
1065 	 */
1066 	if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
1067 		drm_err(&i915->drm, "Unknown GMADR size (%pa)\n",
1068 			&ggtt->mappable_end);
1069 		return -ENXIO;
1070 	}
1071 
1072 	pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1073 
1074 	size = gen6_get_total_gtt_size(snb_gmch_ctl);
1075 	ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
1076 
1077 	ggtt->vm.alloc_pt_dma = alloc_pt_dma;
1078 	ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
1079 
1080 	ggtt->vm.clear_range = nop_clear_range;
1081 	if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915))
1082 		ggtt->vm.clear_range = gen6_ggtt_clear_range;
1083 	ggtt->vm.insert_page = gen6_ggtt_insert_page;
1084 	ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
1085 	ggtt->vm.cleanup = gen6_gmch_remove;
1086 
1087 	ggtt->invalidate = gen6_ggtt_invalidate;
1088 
1089 	if (HAS_EDRAM(i915))
1090 		ggtt->vm.pte_encode = iris_pte_encode;
1091 	else if (IS_HASWELL(i915))
1092 		ggtt->vm.pte_encode = hsw_pte_encode;
1093 	else if (IS_VALLEYVIEW(i915))
1094 		ggtt->vm.pte_encode = byt_pte_encode;
1095 	else if (GRAPHICS_VER(i915) >= 7)
1096 		ggtt->vm.pte_encode = ivb_pte_encode;
1097 	else
1098 		ggtt->vm.pte_encode = snb_pte_encode;
1099 
1100 	ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
1101 	ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
1102 
1103 	return ggtt_probe_common(ggtt, size);
1104 }
1105 
1106 static void i915_gmch_remove(struct i915_address_space *vm)
1107 {
1108 	intel_gmch_remove();
1109 }
1110 
1111 static int i915_gmch_probe(struct i915_ggtt *ggtt)
1112 {
1113 	struct drm_i915_private *i915 = ggtt->vm.i915;
1114 	phys_addr_t gmadr_base;
1115 	int ret;
1116 
1117 	ret = intel_gmch_probe(i915->bridge_dev, to_pci_dev(i915->drm.dev), NULL);
1118 	if (!ret) {
1119 		drm_err(&i915->drm, "failed to set up gmch\n");
1120 		return -EIO;
1121 	}
1122 
1123 	intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
1124 
1125 	ggtt->gmadr =
1126 		(struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
1127 
1128 	ggtt->vm.alloc_pt_dma = alloc_pt_dma;
1129 	ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
1130 
1131 	if (needs_idle_maps(i915)) {
1132 		drm_notice(&i915->drm,
1133 			   "Flushing DMA requests before IOMMU unmaps; performance may be degraded\n");
1134 		ggtt->do_idle_maps = true;
1135 	}
1136 
1137 	ggtt->vm.insert_page = i915_ggtt_insert_page;
1138 	ggtt->vm.insert_entries = i915_ggtt_insert_entries;
1139 	ggtt->vm.clear_range = i915_ggtt_clear_range;
1140 	ggtt->vm.cleanup = i915_gmch_remove;
1141 
1142 	ggtt->invalidate = gmch_ggtt_invalidate;
1143 
1144 	ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
1145 	ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
1146 
1147 	if (unlikely(ggtt->do_idle_maps))
1148 		drm_notice(&i915->drm,
1149 			   "Applying Ironlake quirks for intel_iommu\n");
1150 
1151 	return 0;
1152 }
1153 
1154 static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
1155 {
1156 	struct drm_i915_private *i915 = gt->i915;
1157 	int ret;
1158 
1159 	ggtt->vm.gt = gt;
1160 	ggtt->vm.i915 = i915;
1161 	ggtt->vm.dma = i915->drm.dev;
1162 	dma_resv_init(&ggtt->vm._resv);
1163 
1164 	if (GRAPHICS_VER(i915) <= 5)
1165 		ret = i915_gmch_probe(ggtt);
1166 	else if (GRAPHICS_VER(i915) < 8)
1167 		ret = gen6_gmch_probe(ggtt);
1168 	else
1169 		ret = gen8_gmch_probe(ggtt);
1170 	if (ret) {
1171 		dma_resv_fini(&ggtt->vm._resv);
1172 		return ret;
1173 	}
1174 
1175 	if ((ggtt->vm.total - 1) >> 32) {
1176 		drm_err(&i915->drm,
1177 			"We never expected a Global GTT with more than 32bits"
1178 			" of address space! Found %lldM!\n",
1179 			ggtt->vm.total >> 20);
1180 		ggtt->vm.total = 1ULL << 32;
1181 		ggtt->mappable_end =
1182 			min_t(u64, ggtt->mappable_end, ggtt->vm.total);
1183 	}
1184 
1185 	if (ggtt->mappable_end > ggtt->vm.total) {
1186 		drm_err(&i915->drm,
1187 			"mappable aperture extends past end of GGTT,"
1188 			" aperture=%pa, total=%llx\n",
1189 			&ggtt->mappable_end, ggtt->vm.total);
1190 		ggtt->mappable_end = ggtt->vm.total;
1191 	}
1192 
1193 	/* GMADR is the PCI mmio aperture into the global GTT. */
1194 	drm_dbg(&i915->drm, "GGTT size = %lluM\n", ggtt->vm.total >> 20);
1195 	drm_dbg(&i915->drm, "GMADR size = %lluM\n",
1196 		(u64)ggtt->mappable_end >> 20);
1197 	drm_dbg(&i915->drm, "DSM size = %lluM\n",
1198 		(u64)resource_size(&intel_graphics_stolen_res) >> 20);
1199 
1200 	return 0;
1201 }
1202 
1203 /**
1204  * i915_ggtt_probe_hw - Probe GGTT hardware location
1205  * @i915: i915 device
1206  */
1207 int i915_ggtt_probe_hw(struct drm_i915_private *i915)
1208 {
1209 	int ret;
1210 
1211 	ret = ggtt_probe_hw(&i915->ggtt, to_gt(i915));
1212 	if (ret)
1213 		return ret;
1214 
1215 	if (intel_vtd_active(i915))
1216 		drm_info(&i915->drm, "VT-d active for gfx access\n");
1217 
1218 	return 0;
1219 }
1220 
1221 int i915_ggtt_enable_hw(struct drm_i915_private *i915)
1222 {
1223 	if (GRAPHICS_VER(i915) < 6 && !intel_enable_gtt())
1224 		return -EIO;
1225 
1226 	return 0;
1227 }
1228 
1229 void i915_ggtt_enable_guc(struct i915_ggtt *ggtt)
1230 {
1231 	GEM_BUG_ON(ggtt->invalidate != gen8_ggtt_invalidate);
1232 
1233 	ggtt->invalidate = guc_ggtt_invalidate;
1234 
1235 	ggtt->invalidate(ggtt);
1236 }
1237 
1238 void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
1239 {
1240 	/* XXX Temporary pardon for error unload */
1241 	if (ggtt->invalidate == gen8_ggtt_invalidate)
1242 		return;
1243 
1244 	/* We should only be called after i915_ggtt_enable_guc() */
1245 	GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate);
1246 
1247 	ggtt->invalidate = gen8_ggtt_invalidate;
1248 
1249 	ggtt->invalidate(ggtt);
1250 }
1251 
1252 /**
1253  * i915_ggtt_resume_vm - Restore the memory mappings for a GGTT or DPT VM
1254  * @vm: The VM to restore the mappings for
1255  *
1256  * Restore the memory mappings for all objects mapped to HW via the GGTT or a
1257  * DPT page table.
1258  *
1259  * Returns %true if restoring the mapping for any object that was in a write
1260  * domain before suspend.
1261  */
1262 bool i915_ggtt_resume_vm(struct i915_address_space *vm)
1263 {
1264 	struct i915_vma *vma;
1265 	bool write_domain_objs = false;
1266 	int open;
1267 
1268 	drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
1269 
1270 	/* First fill our portion of the GTT with scratch pages */
1271 	vm->clear_range(vm, 0, vm->total);
1272 
1273 	/* Skip rewriting PTE on VMA unbind. */
1274 	open = atomic_xchg(&vm->open, 0);
1275 
1276 	/* clflush objects bound into the GGTT and rebind them. */
1277 	list_for_each_entry(vma, &vm->bound_list, vm_link) {
1278 		struct drm_i915_gem_object *obj = vma->obj;
1279 		unsigned int was_bound =
1280 			atomic_read(&vma->flags) & I915_VMA_BIND_MASK;
1281 
1282 		GEM_BUG_ON(!was_bound);
1283 		vma->ops->bind_vma(vm, NULL, vma,
1284 				   obj ? obj->cache_level : 0,
1285 				   was_bound);
1286 		if (obj) { /* only used during resume => exclusive access */
1287 			write_domain_objs |= fetch_and_zero(&obj->write_domain);
1288 			obj->read_domains |= I915_GEM_DOMAIN_GTT;
1289 		}
1290 	}
1291 
1292 	atomic_set(&vm->open, open);
1293 
1294 	return write_domain_objs;
1295 }
1296 
1297 void i915_ggtt_resume(struct i915_ggtt *ggtt)
1298 {
1299 	bool flush;
1300 
1301 	intel_gt_check_and_clear_faults(ggtt->vm.gt);
1302 
1303 	flush = i915_ggtt_resume_vm(&ggtt->vm);
1304 
1305 	ggtt->invalidate(ggtt);
1306 
1307 	if (flush)
1308 		wbinvd_on_all_cpus();
1309 
1310 	if (GRAPHICS_VER(ggtt->vm.i915) >= 8)
1311 		setup_private_pat(ggtt->vm.gt->uncore);
1312 
1313 	intel_ggtt_restore_fences(ggtt);
1314 }
1315