xref: /openbmc/linux/drivers/gpu/drm/i915/gt/intel_gtt.c (revision 61bf3293)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <linux/slab.h> /* fault-inject.h is not standalone! */
7 
8 #include <linux/fault-inject.h>
9 
10 #include "i915_trace.h"
11 #include "intel_gt.h"
12 #include "intel_gtt.h"
13 
14 void stash_init(struct pagestash *stash)
15 {
16 	pagevec_init(&stash->pvec);
17 	spin_lock_init(&stash->lock);
18 }
19 
20 static struct page *stash_pop_page(struct pagestash *stash)
21 {
22 	struct page *page = NULL;
23 
24 	spin_lock(&stash->lock);
25 	if (likely(stash->pvec.nr))
26 		page = stash->pvec.pages[--stash->pvec.nr];
27 	spin_unlock(&stash->lock);
28 
29 	return page;
30 }
31 
32 static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec)
33 {
34 	unsigned int nr;
35 
36 	spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING);
37 
38 	nr = min_t(typeof(nr), pvec->nr, pagevec_space(&stash->pvec));
39 	memcpy(stash->pvec.pages + stash->pvec.nr,
40 	       pvec->pages + pvec->nr - nr,
41 	       sizeof(pvec->pages[0]) * nr);
42 	stash->pvec.nr += nr;
43 
44 	spin_unlock(&stash->lock);
45 
46 	pvec->nr -= nr;
47 }
48 
49 static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
50 {
51 	struct pagevec stack;
52 	struct page *page;
53 
54 	if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
55 		i915_gem_shrink_all(vm->i915);
56 
57 	page = stash_pop_page(&vm->free_pages);
58 	if (page)
59 		return page;
60 
61 	if (!vm->pt_kmap_wc)
62 		return alloc_page(gfp);
63 
64 	/* Look in our global stash of WC pages... */
65 	page = stash_pop_page(&vm->i915->mm.wc_stash);
66 	if (page)
67 		return page;
68 
69 	/*
70 	 * Otherwise batch allocate pages to amortize cost of set_pages_wc.
71 	 *
72 	 * We have to be careful as page allocation may trigger the shrinker
73 	 * (via direct reclaim) which will fill up the WC stash underneath us.
74 	 * So we add our WB pages into a temporary pvec on the stack and merge
75 	 * them into the WC stash after all the allocations are complete.
76 	 */
77 	pagevec_init(&stack);
78 	do {
79 		struct page *page;
80 
81 		page = alloc_page(gfp);
82 		if (unlikely(!page))
83 			break;
84 
85 		stack.pages[stack.nr++] = page;
86 	} while (pagevec_space(&stack));
87 
88 	if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) {
89 		page = stack.pages[--stack.nr];
90 
91 		/* Merge spare WC pages to the global stash */
92 		if (stack.nr)
93 			stash_push_pagevec(&vm->i915->mm.wc_stash, &stack);
94 
95 		/* Push any surplus WC pages onto the local VM stash */
96 		if (stack.nr)
97 			stash_push_pagevec(&vm->free_pages, &stack);
98 	}
99 
100 	/* Return unwanted leftovers */
101 	if (unlikely(stack.nr)) {
102 		WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr));
103 		__pagevec_release(&stack);
104 	}
105 
106 	return page;
107 }
108 
109 static void vm_free_pages_release(struct i915_address_space *vm,
110 				  bool immediate)
111 {
112 	struct pagevec *pvec = &vm->free_pages.pvec;
113 	struct pagevec stack;
114 
115 	lockdep_assert_held(&vm->free_pages.lock);
116 	GEM_BUG_ON(!pagevec_count(pvec));
117 
118 	if (vm->pt_kmap_wc) {
119 		/*
120 		 * When we use WC, first fill up the global stash and then
121 		 * only if full immediately free the overflow.
122 		 */
123 		stash_push_pagevec(&vm->i915->mm.wc_stash, pvec);
124 
125 		/*
126 		 * As we have made some room in the VM's free_pages,
127 		 * we can wait for it to fill again. Unless we are
128 		 * inside i915_address_space_fini() and must
129 		 * immediately release the pages!
130 		 */
131 		if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1))
132 			return;
133 
134 		/*
135 		 * We have to drop the lock to allow ourselves to sleep,
136 		 * so take a copy of the pvec and clear the stash for
137 		 * others to use it as we sleep.
138 		 */
139 		stack = *pvec;
140 		pagevec_reinit(pvec);
141 		spin_unlock(&vm->free_pages.lock);
142 
143 		pvec = &stack;
144 		set_pages_array_wb(pvec->pages, pvec->nr);
145 
146 		spin_lock(&vm->free_pages.lock);
147 	}
148 
149 	__pagevec_release(pvec);
150 }
151 
152 static void vm_free_page(struct i915_address_space *vm, struct page *page)
153 {
154 	/*
155 	 * On !llc, we need to change the pages back to WB. We only do so
156 	 * in bulk, so we rarely need to change the page attributes here,
157 	 * but doing so requires a stop_machine() from deep inside arch/x86/mm.
158 	 * To make detection of the possible sleep more likely, use an
159 	 * unconditional might_sleep() for everybody.
160 	 */
161 	might_sleep();
162 	spin_lock(&vm->free_pages.lock);
163 	while (!pagevec_space(&vm->free_pages.pvec))
164 		vm_free_pages_release(vm, false);
165 	GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec) >= PAGEVEC_SIZE);
166 	pagevec_add(&vm->free_pages.pvec, page);
167 	spin_unlock(&vm->free_pages.lock);
168 }
169 
170 void __i915_vm_close(struct i915_address_space *vm)
171 {
172 	struct i915_vma *vma, *vn;
173 
174 	if (!atomic_dec_and_mutex_lock(&vm->open, &vm->mutex))
175 		return;
176 
177 	list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
178 		struct drm_i915_gem_object *obj = vma->obj;
179 
180 		/* Keep the obj (and hence the vma) alive as _we_ destroy it */
181 		if (!kref_get_unless_zero(&obj->base.refcount))
182 			continue;
183 
184 		atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
185 		WARN_ON(__i915_vma_unbind(vma));
186 		__i915_vma_put(vma);
187 
188 		i915_gem_object_put(obj);
189 	}
190 	GEM_BUG_ON(!list_empty(&vm->bound_list));
191 
192 	mutex_unlock(&vm->mutex);
193 }
194 
195 void i915_address_space_fini(struct i915_address_space *vm)
196 {
197 	spin_lock(&vm->free_pages.lock);
198 	if (pagevec_count(&vm->free_pages.pvec))
199 		vm_free_pages_release(vm, true);
200 	GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec));
201 	spin_unlock(&vm->free_pages.lock);
202 
203 	drm_mm_takedown(&vm->mm);
204 
205 	mutex_destroy(&vm->mutex);
206 }
207 
208 static void __i915_vm_release(struct work_struct *work)
209 {
210 	struct i915_address_space *vm =
211 		container_of(work, struct i915_address_space, rcu.work);
212 
213 	vm->cleanup(vm);
214 	i915_address_space_fini(vm);
215 
216 	kfree(vm);
217 }
218 
219 void i915_vm_release(struct kref *kref)
220 {
221 	struct i915_address_space *vm =
222 		container_of(kref, struct i915_address_space, ref);
223 
224 	GEM_BUG_ON(i915_is_ggtt(vm));
225 	trace_i915_ppgtt_release(vm);
226 
227 	queue_rcu_work(vm->i915->wq, &vm->rcu);
228 }
229 
230 void i915_address_space_init(struct i915_address_space *vm, int subclass)
231 {
232 	kref_init(&vm->ref);
233 	INIT_RCU_WORK(&vm->rcu, __i915_vm_release);
234 	atomic_set(&vm->open, 1);
235 
236 	/*
237 	 * The vm->mutex must be reclaim safe (for use in the shrinker).
238 	 * Do a dummy acquire now under fs_reclaim so that any allocation
239 	 * attempt holding the lock is immediately reported by lockdep.
240 	 */
241 	mutex_init(&vm->mutex);
242 	lockdep_set_subclass(&vm->mutex, subclass);
243 	i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
244 
245 	GEM_BUG_ON(!vm->total);
246 	drm_mm_init(&vm->mm, 0, vm->total);
247 	vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
248 
249 	stash_init(&vm->free_pages);
250 
251 	INIT_LIST_HEAD(&vm->bound_list);
252 }
253 
254 void clear_pages(struct i915_vma *vma)
255 {
256 	GEM_BUG_ON(!vma->pages);
257 
258 	if (vma->pages != vma->obj->mm.pages) {
259 		sg_free_table(vma->pages);
260 		kfree(vma->pages);
261 	}
262 	vma->pages = NULL;
263 
264 	memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
265 }
266 
267 static int __setup_page_dma(struct i915_address_space *vm,
268 			    struct i915_page_dma *p,
269 			    gfp_t gfp)
270 {
271 	p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL);
272 	if (unlikely(!p->page))
273 		return -ENOMEM;
274 
275 	p->daddr = dma_map_page_attrs(vm->dma,
276 				      p->page, 0, PAGE_SIZE,
277 				      PCI_DMA_BIDIRECTIONAL,
278 				      DMA_ATTR_SKIP_CPU_SYNC |
279 				      DMA_ATTR_NO_WARN);
280 	if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
281 		vm_free_page(vm, p->page);
282 		return -ENOMEM;
283 	}
284 
285 	return 0;
286 }
287 
288 int setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p)
289 {
290 	return __setup_page_dma(vm, p, __GFP_HIGHMEM);
291 }
292 
293 void cleanup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p)
294 {
295 	dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
296 	vm_free_page(vm, p->page);
297 }
298 
299 void
300 fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count)
301 {
302 	kunmap_atomic(memset64(kmap_atomic(p->page), val, count));
303 }
304 
305 static void poison_scratch_page(struct page *page, unsigned long size)
306 {
307 	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
308 		return;
309 
310 	GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
311 
312 	do {
313 		void *vaddr;
314 
315 		vaddr = kmap(page);
316 		memset(vaddr, POISON_FREE, PAGE_SIZE);
317 		kunmap(page);
318 
319 		page = pfn_to_page(page_to_pfn(page) + 1);
320 		size -= PAGE_SIZE;
321 	} while (size);
322 }
323 
324 int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
325 {
326 	unsigned long size;
327 
328 	/*
329 	 * In order to utilize 64K pages for an object with a size < 2M, we will
330 	 * need to support a 64K scratch page, given that every 16th entry for a
331 	 * page-table operating in 64K mode must point to a properly aligned 64K
332 	 * region, including any PTEs which happen to point to scratch.
333 	 *
334 	 * This is only relevant for the 48b PPGTT where we support
335 	 * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
336 	 * scratch (read-only) between all vm, we create one 64k scratch page
337 	 * for all.
338 	 */
339 	size = I915_GTT_PAGE_SIZE_4K;
340 	if (i915_vm_is_4lvl(vm) &&
341 	    HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
342 		size = I915_GTT_PAGE_SIZE_64K;
343 		gfp |= __GFP_NOWARN;
344 	}
345 	gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL;
346 
347 	do {
348 		unsigned int order = get_order(size);
349 		struct page *page;
350 		dma_addr_t addr;
351 
352 		page = alloc_pages(gfp, order);
353 		if (unlikely(!page))
354 			goto skip;
355 
356 		/*
357 		 * Use a non-zero scratch page for debugging.
358 		 *
359 		 * We want a value that should be reasonably obvious
360 		 * to spot in the error state, while also causing a GPU hang
361 		 * if executed. We prefer using a clear page in production, so
362 		 * should it ever be accidentally used, the effect should be
363 		 * fairly benign.
364 		 */
365 		poison_scratch_page(page, size);
366 
367 		addr = dma_map_page_attrs(vm->dma,
368 					  page, 0, size,
369 					  PCI_DMA_BIDIRECTIONAL,
370 					  DMA_ATTR_SKIP_CPU_SYNC |
371 					  DMA_ATTR_NO_WARN);
372 		if (unlikely(dma_mapping_error(vm->dma, addr)))
373 			goto free_page;
374 
375 		if (unlikely(!IS_ALIGNED(addr, size)))
376 			goto unmap_page;
377 
378 		vm->scratch[0].base.page = page;
379 		vm->scratch[0].base.daddr = addr;
380 		vm->scratch_order = order;
381 		return 0;
382 
383 unmap_page:
384 		dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL);
385 free_page:
386 		__free_pages(page, order);
387 skip:
388 		if (size == I915_GTT_PAGE_SIZE_4K)
389 			return -ENOMEM;
390 
391 		size = I915_GTT_PAGE_SIZE_4K;
392 		gfp &= ~__GFP_NOWARN;
393 	} while (1);
394 }
395 
396 void cleanup_scratch_page(struct i915_address_space *vm)
397 {
398 	struct i915_page_dma *p = px_base(&vm->scratch[0]);
399 	unsigned int order = vm->scratch_order;
400 
401 	dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT,
402 		       PCI_DMA_BIDIRECTIONAL);
403 	__free_pages(p->page, order);
404 }
405 
406 void free_scratch(struct i915_address_space *vm)
407 {
408 	int i;
409 
410 	if (!px_dma(&vm->scratch[0])) /* set to 0 on clones */
411 		return;
412 
413 	for (i = 1; i <= vm->top; i++) {
414 		if (!px_dma(&vm->scratch[i]))
415 			break;
416 		cleanup_page_dma(vm, px_base(&vm->scratch[i]));
417 	}
418 
419 	cleanup_scratch_page(vm);
420 }
421 
422 void gtt_write_workarounds(struct intel_gt *gt)
423 {
424 	struct drm_i915_private *i915 = gt->i915;
425 	struct intel_uncore *uncore = gt->uncore;
426 
427 	/*
428 	 * This function is for gtt related workarounds. This function is
429 	 * called on driver load and after a GPU reset, so you can place
430 	 * workarounds here even if they get overwritten by GPU reset.
431 	 */
432 	/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
433 	if (IS_BROADWELL(i915))
434 		intel_uncore_write(uncore,
435 				   GEN8_L3_LRA_1_GPGPU,
436 				   GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
437 	else if (IS_CHERRYVIEW(i915))
438 		intel_uncore_write(uncore,
439 				   GEN8_L3_LRA_1_GPGPU,
440 				   GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
441 	else if (IS_GEN9_LP(i915))
442 		intel_uncore_write(uncore,
443 				   GEN8_L3_LRA_1_GPGPU,
444 				   GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
445 	else if (INTEL_GEN(i915) >= 9 && INTEL_GEN(i915) <= 11)
446 		intel_uncore_write(uncore,
447 				   GEN8_L3_LRA_1_GPGPU,
448 				   GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
449 
450 	/*
451 	 * To support 64K PTEs we need to first enable the use of the
452 	 * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
453 	 * mmio, otherwise the page-walker will simply ignore the IPS bit. This
454 	 * shouldn't be needed after GEN10.
455 	 *
456 	 * 64K pages were first introduced from BDW+, although technically they
457 	 * only *work* from gen9+. For pre-BDW we instead have the option for
458 	 * 32K pages, but we don't currently have any support for it in our
459 	 * driver.
460 	 */
461 	if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
462 	    INTEL_GEN(i915) <= 10)
463 		intel_uncore_rmw(uncore,
464 				 GEN8_GAMW_ECO_DEV_RW_IA,
465 				 0,
466 				 GAMW_ECO_ENABLE_64K_IPS_FIELD);
467 
468 	if (IS_GEN_RANGE(i915, 8, 11)) {
469 		bool can_use_gtt_cache = true;
470 
471 		/*
472 		 * According to the BSpec if we use 2M/1G pages then we also
473 		 * need to disable the GTT cache. At least on BDW we can see
474 		 * visual corruption when using 2M pages, and not disabling the
475 		 * GTT cache.
476 		 */
477 		if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_2M))
478 			can_use_gtt_cache = false;
479 
480 		/* WaGttCachingOffByDefault */
481 		intel_uncore_write(uncore,
482 				   HSW_GTT_CACHE_EN,
483 				   can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
484 		drm_WARN_ON_ONCE(&i915->drm, can_use_gtt_cache &&
485 				 intel_uncore_read(uncore,
486 						   HSW_GTT_CACHE_EN) == 0);
487 	}
488 }
489 
490 static void tgl_setup_private_ppat(struct intel_uncore *uncore)
491 {
492 	/* TGL doesn't support LLC or AGE settings */
493 	intel_uncore_write(uncore, GEN12_PAT_INDEX(0), GEN8_PPAT_WB);
494 	intel_uncore_write(uncore, GEN12_PAT_INDEX(1), GEN8_PPAT_WC);
495 	intel_uncore_write(uncore, GEN12_PAT_INDEX(2), GEN8_PPAT_WT);
496 	intel_uncore_write(uncore, GEN12_PAT_INDEX(3), GEN8_PPAT_UC);
497 	intel_uncore_write(uncore, GEN12_PAT_INDEX(4), GEN8_PPAT_WB);
498 	intel_uncore_write(uncore, GEN12_PAT_INDEX(5), GEN8_PPAT_WB);
499 	intel_uncore_write(uncore, GEN12_PAT_INDEX(6), GEN8_PPAT_WB);
500 	intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
501 }
502 
503 static void cnl_setup_private_ppat(struct intel_uncore *uncore)
504 {
505 	intel_uncore_write(uncore,
506 			   GEN10_PAT_INDEX(0),
507 			   GEN8_PPAT_WB | GEN8_PPAT_LLC);
508 	intel_uncore_write(uncore,
509 			   GEN10_PAT_INDEX(1),
510 			   GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
511 	intel_uncore_write(uncore,
512 			   GEN10_PAT_INDEX(2),
513 			   GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
514 	intel_uncore_write(uncore,
515 			   GEN10_PAT_INDEX(3),
516 			   GEN8_PPAT_UC);
517 	intel_uncore_write(uncore,
518 			   GEN10_PAT_INDEX(4),
519 			   GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
520 	intel_uncore_write(uncore,
521 			   GEN10_PAT_INDEX(5),
522 			   GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
523 	intel_uncore_write(uncore,
524 			   GEN10_PAT_INDEX(6),
525 			   GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
526 	intel_uncore_write(uncore,
527 			   GEN10_PAT_INDEX(7),
528 			   GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
529 }
530 
531 /*
532  * The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
533  * bits. When using advanced contexts each context stores its own PAT, but
534  * writing this data shouldn't be harmful even in those cases.
535  */
536 static void bdw_setup_private_ppat(struct intel_uncore *uncore)
537 {
538 	u64 pat;
539 
540 	pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) |	/* for normal objects, no eLLC */
541 	      GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) |	/* for something pointing to ptes? */
542 	      GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) |	/* for scanout with eLLC */
543 	      GEN8_PPAT(3, GEN8_PPAT_UC) |			/* Uncached objects, mostly for scanout */
544 	      GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
545 	      GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
546 	      GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
547 	      GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
548 
549 	intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
550 	intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
551 }
552 
553 static void chv_setup_private_ppat(struct intel_uncore *uncore)
554 {
555 	u64 pat;
556 
557 	/*
558 	 * Map WB on BDW to snooped on CHV.
559 	 *
560 	 * Only the snoop bit has meaning for CHV, the rest is
561 	 * ignored.
562 	 *
563 	 * The hardware will never snoop for certain types of accesses:
564 	 * - CPU GTT (GMADR->GGTT->no snoop->memory)
565 	 * - PPGTT page tables
566 	 * - some other special cycles
567 	 *
568 	 * As with BDW, we also need to consider the following for GT accesses:
569 	 * "For GGTT, there is NO pat_sel[2:0] from the entry,
570 	 * so RTL will always use the value corresponding to
571 	 * pat_sel = 000".
572 	 * Which means we must set the snoop bit in PAT entry 0
573 	 * in order to keep the global status page working.
574 	 */
575 
576 	pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
577 	      GEN8_PPAT(1, 0) |
578 	      GEN8_PPAT(2, 0) |
579 	      GEN8_PPAT(3, 0) |
580 	      GEN8_PPAT(4, CHV_PPAT_SNOOP) |
581 	      GEN8_PPAT(5, CHV_PPAT_SNOOP) |
582 	      GEN8_PPAT(6, CHV_PPAT_SNOOP) |
583 	      GEN8_PPAT(7, CHV_PPAT_SNOOP);
584 
585 	intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
586 	intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
587 }
588 
589 void setup_private_pat(struct intel_uncore *uncore)
590 {
591 	struct drm_i915_private *i915 = uncore->i915;
592 
593 	GEM_BUG_ON(INTEL_GEN(i915) < 8);
594 
595 	if (INTEL_GEN(i915) >= 12)
596 		tgl_setup_private_ppat(uncore);
597 	else if (INTEL_GEN(i915) >= 10)
598 		cnl_setup_private_ppat(uncore);
599 	else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915))
600 		chv_setup_private_ppat(uncore);
601 	else
602 		bdw_setup_private_ppat(uncore);
603 }
604 
605 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
606 #include "selftests/mock_gtt.c"
607 #endif
608