xref: /openbmc/linux/drivers/gpu/drm/i915/gt/gen8_ppgtt.c (revision f519cd13)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <linux/log2.h>
7 
8 #include "gen8_ppgtt.h"
9 #include "i915_scatterlist.h"
10 #include "i915_trace.h"
11 #include "i915_vgpu.h"
12 #include "intel_gt.h"
13 #include "intel_gtt.h"
14 
15 static u64 gen8_pde_encode(const dma_addr_t addr,
16 			   const enum i915_cache_level level)
17 {
18 	u64 pde = addr | _PAGE_PRESENT | _PAGE_RW;
19 
20 	if (level != I915_CACHE_NONE)
21 		pde |= PPAT_CACHED_PDE;
22 	else
23 		pde |= PPAT_UNCACHED;
24 
25 	return pde;
26 }
27 
28 static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
29 {
30 	struct drm_i915_private *i915 = ppgtt->vm.i915;
31 	struct intel_uncore *uncore = ppgtt->vm.gt->uncore;
32 	enum vgt_g2v_type msg;
33 	int i;
34 
35 	if (create)
36 		atomic_inc(px_used(ppgtt->pd)); /* never remove */
37 	else
38 		atomic_dec(px_used(ppgtt->pd));
39 
40 	mutex_lock(&i915->vgpu.lock);
41 
42 	if (i915_vm_is_4lvl(&ppgtt->vm)) {
43 		const u64 daddr = px_dma(ppgtt->pd);
44 
45 		intel_uncore_write(uncore,
46 				   vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
47 		intel_uncore_write(uncore,
48 				   vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
49 
50 		msg = create ?
51 			VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
52 			VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY;
53 	} else {
54 		for (i = 0; i < GEN8_3LVL_PDPES; i++) {
55 			const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
56 
57 			intel_uncore_write(uncore,
58 					   vgtif_reg(pdp[i].lo),
59 					   lower_32_bits(daddr));
60 			intel_uncore_write(uncore,
61 					   vgtif_reg(pdp[i].hi),
62 					   upper_32_bits(daddr));
63 		}
64 
65 		msg = create ?
66 			VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
67 			VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY;
68 	}
69 
70 	/* g2v_notify atomically (via hv trap) consumes the message packet. */
71 	intel_uncore_write(uncore, vgtif_reg(g2v_notify), msg);
72 
73 	mutex_unlock(&i915->vgpu.lock);
74 }
75 
76 /* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */
77 #define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */
78 #define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE))
79 #define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64))
80 #define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES))
81 #define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl))
82 #define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl))
83 #define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl))
84 
85 #define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
86 
87 static inline unsigned int
88 gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
89 {
90 	const int shift = gen8_pd_shift(lvl);
91 	const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
92 
93 	GEM_BUG_ON(start >= end);
94 	end += ~mask >> gen8_pd_shift(1);
95 
96 	*idx = i915_pde_index(start, shift);
97 	if ((start ^ end) & mask)
98 		return GEN8_PDES - *idx;
99 	else
100 		return i915_pde_index(end, shift) - *idx;
101 }
102 
103 static inline bool gen8_pd_contains(u64 start, u64 end, int lvl)
104 {
105 	const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
106 
107 	GEM_BUG_ON(start >= end);
108 	return (start ^ end) & mask && (start & ~mask) == 0;
109 }
110 
111 static inline unsigned int gen8_pt_count(u64 start, u64 end)
112 {
113 	GEM_BUG_ON(start >= end);
114 	if ((start ^ end) >> gen8_pd_shift(1))
115 		return GEN8_PDES - (start & (GEN8_PDES - 1));
116 	else
117 		return end - start;
118 }
119 
120 static inline unsigned int
121 gen8_pd_top_count(const struct i915_address_space *vm)
122 {
123 	unsigned int shift = __gen8_pte_shift(vm->top);
124 	return (vm->total + (1ull << shift) - 1) >> shift;
125 }
126 
127 static inline struct i915_page_directory *
128 gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
129 {
130 	struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
131 
132 	if (vm->top == 2)
133 		return ppgtt->pd;
134 	else
135 		return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
136 }
137 
138 static inline struct i915_page_directory *
139 gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
140 {
141 	return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
142 }
143 
144 static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
145 				 struct i915_page_directory *pd,
146 				 int count, int lvl)
147 {
148 	if (lvl) {
149 		void **pde = pd->entry;
150 
151 		do {
152 			if (!*pde)
153 				continue;
154 
155 			__gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1);
156 		} while (pde++, --count);
157 	}
158 
159 	free_px(vm, pd);
160 }
161 
162 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
163 {
164 	struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
165 
166 	if (intel_vgpu_active(vm->i915))
167 		gen8_ppgtt_notify_vgt(ppgtt, false);
168 
169 	__gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top);
170 	free_scratch(vm);
171 }
172 
173 static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
174 			      struct i915_page_directory * const pd,
175 			      u64 start, const u64 end, int lvl)
176 {
177 	const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
178 	unsigned int idx, len;
179 
180 	GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
181 
182 	len = gen8_pd_range(start, end, lvl--, &idx);
183 	DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
184 	    __func__, vm, lvl + 1, start, end,
185 	    idx, len, atomic_read(px_used(pd)));
186 	GEM_BUG_ON(!len || len >= atomic_read(px_used(pd)));
187 
188 	do {
189 		struct i915_page_table *pt = pd->entry[idx];
190 
191 		if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) &&
192 		    gen8_pd_contains(start, end, lvl)) {
193 			DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n",
194 			    __func__, vm, lvl + 1, idx, start, end);
195 			clear_pd_entry(pd, idx, scratch);
196 			__gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl);
197 			start += (u64)I915_PDES << gen8_pd_shift(lvl);
198 			continue;
199 		}
200 
201 		if (lvl) {
202 			start = __gen8_ppgtt_clear(vm, as_pd(pt),
203 						   start, end, lvl);
204 		} else {
205 			unsigned int count;
206 			u64 *vaddr;
207 
208 			count = gen8_pt_count(start, end);
209 			DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n",
210 			    __func__, vm, lvl, start, end,
211 			    gen8_pd_index(start, 0), count,
212 			    atomic_read(&pt->used));
213 			GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
214 
215 			vaddr = kmap_atomic_px(pt);
216 			memset64(vaddr + gen8_pd_index(start, 0),
217 				 vm->scratch[0].encode,
218 				 count);
219 			kunmap_atomic(vaddr);
220 
221 			atomic_sub(count, &pt->used);
222 			start += count;
223 		}
224 
225 		if (release_pd_entry(pd, idx, pt, scratch))
226 			free_px(vm, pt);
227 	} while (idx++, --len);
228 
229 	return start;
230 }
231 
232 static void gen8_ppgtt_clear(struct i915_address_space *vm,
233 			     u64 start, u64 length)
234 {
235 	GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
236 	GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
237 	GEM_BUG_ON(range_overflows(start, length, vm->total));
238 
239 	start >>= GEN8_PTE_SHIFT;
240 	length >>= GEN8_PTE_SHIFT;
241 	GEM_BUG_ON(length == 0);
242 
243 	__gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
244 			   start, start + length, vm->top);
245 }
246 
247 static int __gen8_ppgtt_alloc(struct i915_address_space * const vm,
248 			      struct i915_page_directory * const pd,
249 			      u64 * const start, const u64 end, int lvl)
250 {
251 	const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
252 	struct i915_page_table *alloc = NULL;
253 	unsigned int idx, len;
254 	int ret = 0;
255 
256 	GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
257 
258 	len = gen8_pd_range(*start, end, lvl--, &idx);
259 	DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
260 	    __func__, vm, lvl + 1, *start, end,
261 	    idx, len, atomic_read(px_used(pd)));
262 	GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1));
263 
264 	spin_lock(&pd->lock);
265 	GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */
266 	do {
267 		struct i915_page_table *pt = pd->entry[idx];
268 
269 		if (!pt) {
270 			spin_unlock(&pd->lock);
271 
272 			DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n",
273 			    __func__, vm, lvl + 1, idx);
274 
275 			pt = fetch_and_zero(&alloc);
276 			if (lvl) {
277 				if (!pt) {
278 					pt = &alloc_pd(vm)->pt;
279 					if (IS_ERR(pt)) {
280 						ret = PTR_ERR(pt);
281 						goto out;
282 					}
283 				}
284 
285 				fill_px(pt, vm->scratch[lvl].encode);
286 			} else {
287 				if (!pt) {
288 					pt = alloc_pt(vm);
289 					if (IS_ERR(pt)) {
290 						ret = PTR_ERR(pt);
291 						goto out;
292 					}
293 				}
294 
295 				if (intel_vgpu_active(vm->i915) ||
296 				    gen8_pt_count(*start, end) < I915_PDES)
297 					fill_px(pt, vm->scratch[lvl].encode);
298 			}
299 
300 			spin_lock(&pd->lock);
301 			if (likely(!pd->entry[idx]))
302 				set_pd_entry(pd, idx, pt);
303 			else
304 				alloc = pt, pt = pd->entry[idx];
305 		}
306 
307 		if (lvl) {
308 			atomic_inc(&pt->used);
309 			spin_unlock(&pd->lock);
310 
311 			ret = __gen8_ppgtt_alloc(vm, as_pd(pt),
312 						 start, end, lvl);
313 			if (unlikely(ret)) {
314 				if (release_pd_entry(pd, idx, pt, scratch))
315 					free_px(vm, pt);
316 				goto out;
317 			}
318 
319 			spin_lock(&pd->lock);
320 			atomic_dec(&pt->used);
321 			GEM_BUG_ON(!atomic_read(&pt->used));
322 		} else {
323 			unsigned int count = gen8_pt_count(*start, end);
324 
325 			DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n",
326 			    __func__, vm, lvl, *start, end,
327 			    gen8_pd_index(*start, 0), count,
328 			    atomic_read(&pt->used));
329 
330 			atomic_add(count, &pt->used);
331 			/* All other pdes may be simultaneously removed */
332 			GEM_BUG_ON(atomic_read(&pt->used) > NALLOC * I915_PDES);
333 			*start += count;
334 		}
335 	} while (idx++, --len);
336 	spin_unlock(&pd->lock);
337 out:
338 	if (alloc)
339 		free_px(vm, alloc);
340 	return ret;
341 }
342 
343 static int gen8_ppgtt_alloc(struct i915_address_space *vm,
344 			    u64 start, u64 length)
345 {
346 	u64 from;
347 	int err;
348 
349 	GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
350 	GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
351 	GEM_BUG_ON(range_overflows(start, length, vm->total));
352 
353 	start >>= GEN8_PTE_SHIFT;
354 	length >>= GEN8_PTE_SHIFT;
355 	GEM_BUG_ON(length == 0);
356 	from = start;
357 
358 	err = __gen8_ppgtt_alloc(vm, i915_vm_to_ppgtt(vm)->pd,
359 				 &start, start + length, vm->top);
360 	if (unlikely(err && from != start))
361 		__gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
362 				   from, start, vm->top);
363 
364 	return err;
365 }
366 
367 static __always_inline u64
368 gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
369 		      struct i915_page_directory *pdp,
370 		      struct sgt_dma *iter,
371 		      u64 idx,
372 		      enum i915_cache_level cache_level,
373 		      u32 flags)
374 {
375 	struct i915_page_directory *pd;
376 	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
377 	gen8_pte_t *vaddr;
378 
379 	pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
380 	vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
381 	do {
382 		vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
383 
384 		iter->dma += I915_GTT_PAGE_SIZE;
385 		if (iter->dma >= iter->max) {
386 			iter->sg = __sg_next(iter->sg);
387 			if (!iter->sg) {
388 				idx = 0;
389 				break;
390 			}
391 
392 			iter->dma = sg_dma_address(iter->sg);
393 			iter->max = iter->dma + iter->sg->length;
394 		}
395 
396 		if (gen8_pd_index(++idx, 0) == 0) {
397 			if (gen8_pd_index(idx, 1) == 0) {
398 				/* Limited by sg length for 3lvl */
399 				if (gen8_pd_index(idx, 2) == 0)
400 					break;
401 
402 				pd = pdp->entry[gen8_pd_index(idx, 2)];
403 			}
404 
405 			kunmap_atomic(vaddr);
406 			vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
407 		}
408 	} while (1);
409 	kunmap_atomic(vaddr);
410 
411 	return idx;
412 }
413 
414 static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
415 				   struct sgt_dma *iter,
416 				   enum i915_cache_level cache_level,
417 				   u32 flags)
418 {
419 	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
420 	u64 start = vma->node.start;
421 	dma_addr_t rem = iter->sg->length;
422 
423 	GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm));
424 
425 	do {
426 		struct i915_page_directory * const pdp =
427 			gen8_pdp_for_page_address(vma->vm, start);
428 		struct i915_page_directory * const pd =
429 			i915_pd_entry(pdp, __gen8_pte_index(start, 2));
430 		gen8_pte_t encode = pte_encode;
431 		unsigned int maybe_64K = -1;
432 		unsigned int page_size;
433 		gen8_pte_t *vaddr;
434 		u16 index;
435 
436 		if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
437 		    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
438 		    rem >= I915_GTT_PAGE_SIZE_2M &&
439 		    !__gen8_pte_index(start, 0)) {
440 			index = __gen8_pte_index(start, 1);
441 			encode |= GEN8_PDE_PS_2M;
442 			page_size = I915_GTT_PAGE_SIZE_2M;
443 
444 			vaddr = kmap_atomic_px(pd);
445 		} else {
446 			struct i915_page_table *pt =
447 				i915_pt_entry(pd, __gen8_pte_index(start, 1));
448 
449 			index = __gen8_pte_index(start, 0);
450 			page_size = I915_GTT_PAGE_SIZE;
451 
452 			if (!index &&
453 			    vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
454 			    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
455 			    (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
456 			     rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
457 				maybe_64K = __gen8_pte_index(start, 1);
458 
459 			vaddr = kmap_atomic_px(pt);
460 		}
461 
462 		do {
463 			GEM_BUG_ON(iter->sg->length < page_size);
464 			vaddr[index++] = encode | iter->dma;
465 
466 			start += page_size;
467 			iter->dma += page_size;
468 			rem -= page_size;
469 			if (iter->dma >= iter->max) {
470 				iter->sg = __sg_next(iter->sg);
471 				if (!iter->sg)
472 					break;
473 
474 				rem = iter->sg->length;
475 				iter->dma = sg_dma_address(iter->sg);
476 				iter->max = iter->dma + rem;
477 
478 				if (maybe_64K != -1 && index < I915_PDES &&
479 				    !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
480 				      (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
481 				       rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)))
482 					maybe_64K = -1;
483 
484 				if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
485 					break;
486 			}
487 		} while (rem >= page_size && index < I915_PDES);
488 
489 		kunmap_atomic(vaddr);
490 
491 		/*
492 		 * Is it safe to mark the 2M block as 64K? -- Either we have
493 		 * filled whole page-table with 64K entries, or filled part of
494 		 * it and have reached the end of the sg table and we have
495 		 * enough padding.
496 		 */
497 		if (maybe_64K != -1 &&
498 		    (index == I915_PDES ||
499 		     (i915_vm_has_scratch_64K(vma->vm) &&
500 		      !iter->sg && IS_ALIGNED(vma->node.start +
501 					      vma->node.size,
502 					      I915_GTT_PAGE_SIZE_2M)))) {
503 			vaddr = kmap_atomic_px(pd);
504 			vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
505 			kunmap_atomic(vaddr);
506 			page_size = I915_GTT_PAGE_SIZE_64K;
507 
508 			/*
509 			 * We write all 4K page entries, even when using 64K
510 			 * pages. In order to verify that the HW isn't cheating
511 			 * by using the 4K PTE instead of the 64K PTE, we want
512 			 * to remove all the surplus entries. If the HW skipped
513 			 * the 64K PTE, it will read/write into the scratch page
514 			 * instead - which we detect as missing results during
515 			 * selftests.
516 			 */
517 			if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
518 				u16 i;
519 
520 				encode = vma->vm->scratch[0].encode;
521 				vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K));
522 
523 				for (i = 1; i < index; i += 16)
524 					memset64(vaddr + i, encode, 15);
525 
526 				kunmap_atomic(vaddr);
527 			}
528 		}
529 
530 		vma->page_sizes.gtt |= page_size;
531 	} while (iter->sg);
532 }
533 
534 static void gen8_ppgtt_insert(struct i915_address_space *vm,
535 			      struct i915_vma *vma,
536 			      enum i915_cache_level cache_level,
537 			      u32 flags)
538 {
539 	struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
540 	struct sgt_dma iter = sgt_dma(vma);
541 
542 	if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
543 		gen8_ppgtt_insert_huge(vma, &iter, cache_level, flags);
544 	} else  {
545 		u64 idx = vma->node.start >> GEN8_PTE_SHIFT;
546 
547 		do {
548 			struct i915_page_directory * const pdp =
549 				gen8_pdp_for_page_index(vm, idx);
550 
551 			idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx,
552 						    cache_level, flags);
553 		} while (idx);
554 
555 		vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
556 	}
557 }
558 
559 static int gen8_init_scratch(struct i915_address_space *vm)
560 {
561 	int ret;
562 	int i;
563 
564 	/*
565 	 * If everybody agrees to not to write into the scratch page,
566 	 * we can reuse it for all vm, keeping contexts and processes separate.
567 	 */
568 	if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) {
569 		struct i915_address_space *clone = vm->gt->vm;
570 
571 		GEM_BUG_ON(!clone->has_read_only);
572 
573 		vm->scratch_order = clone->scratch_order;
574 		memcpy(vm->scratch, clone->scratch, sizeof(vm->scratch));
575 		px_dma(&vm->scratch[0]) = 0; /* no xfer of ownership */
576 		return 0;
577 	}
578 
579 	ret = setup_scratch_page(vm, __GFP_HIGHMEM);
580 	if (ret)
581 		return ret;
582 
583 	vm->scratch[0].encode =
584 		gen8_pte_encode(px_dma(&vm->scratch[0]),
585 				I915_CACHE_LLC, vm->has_read_only);
586 
587 	for (i = 1; i <= vm->top; i++) {
588 		if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[i]))))
589 			goto free_scratch;
590 
591 		fill_px(&vm->scratch[i], vm->scratch[i - 1].encode);
592 		vm->scratch[i].encode =
593 			gen8_pde_encode(px_dma(&vm->scratch[i]),
594 					I915_CACHE_LLC);
595 	}
596 
597 	return 0;
598 
599 free_scratch:
600 	free_scratch(vm);
601 	return -ENOMEM;
602 }
603 
604 static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
605 {
606 	struct i915_address_space *vm = &ppgtt->vm;
607 	struct i915_page_directory *pd = ppgtt->pd;
608 	unsigned int idx;
609 
610 	GEM_BUG_ON(vm->top != 2);
611 	GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES);
612 
613 	for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) {
614 		struct i915_page_directory *pde;
615 
616 		pde = alloc_pd(vm);
617 		if (IS_ERR(pde))
618 			return PTR_ERR(pde);
619 
620 		fill_px(pde, vm->scratch[1].encode);
621 		set_pd_entry(pd, idx, pde);
622 		atomic_inc(px_used(pde)); /* keep pinned */
623 	}
624 	wmb();
625 
626 	return 0;
627 }
628 
629 static struct i915_page_directory *
630 gen8_alloc_top_pd(struct i915_address_space *vm)
631 {
632 	const unsigned int count = gen8_pd_top_count(vm);
633 	struct i915_page_directory *pd;
634 
635 	GEM_BUG_ON(count > ARRAY_SIZE(pd->entry));
636 
637 	pd = __alloc_pd(offsetof(typeof(*pd), entry[count]));
638 	if (unlikely(!pd))
639 		return ERR_PTR(-ENOMEM);
640 
641 	if (unlikely(setup_page_dma(vm, px_base(pd)))) {
642 		kfree(pd);
643 		return ERR_PTR(-ENOMEM);
644 	}
645 
646 	fill_page_dma(px_base(pd), vm->scratch[vm->top].encode, count);
647 	atomic_inc(px_used(pd)); /* mark as pinned */
648 	return pd;
649 }
650 
651 /*
652  * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
653  * with a net effect resembling a 2-level page table in normal x86 terms. Each
654  * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
655  * space.
656  *
657  */
658 struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
659 {
660 	struct i915_ppgtt *ppgtt;
661 	int err;
662 
663 	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
664 	if (!ppgtt)
665 		return ERR_PTR(-ENOMEM);
666 
667 	ppgtt_init(ppgtt, gt);
668 	ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
669 
670 	/*
671 	 * From bdw, there is hw support for read-only pages in the PPGTT.
672 	 *
673 	 * Gen11 has HSDES#:1807136187 unresolved. Disable ro support
674 	 * for now.
675 	 *
676 	 * Gen12 has inherited the same read-only fault issue from gen11.
677 	 */
678 	ppgtt->vm.has_read_only = !IS_GEN_RANGE(gt->i915, 11, 12);
679 
680 	/*
681 	 * There are only few exceptions for gen >=6. chv and bxt.
682 	 * And we are not sure about the latter so play safe for now.
683 	 */
684 	if (IS_CHERRYVIEW(gt->i915) || IS_BROXTON(gt->i915))
685 		ppgtt->vm.pt_kmap_wc = true;
686 
687 	err = gen8_init_scratch(&ppgtt->vm);
688 	if (err)
689 		goto err_free;
690 
691 	ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm);
692 	if (IS_ERR(ppgtt->pd)) {
693 		err = PTR_ERR(ppgtt->pd);
694 		goto err_free_scratch;
695 	}
696 
697 	if (!i915_vm_is_4lvl(&ppgtt->vm)) {
698 		err = gen8_preallocate_top_level_pdp(ppgtt);
699 		if (err)
700 			goto err_free_pd;
701 	}
702 
703 	ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
704 	ppgtt->vm.insert_entries = gen8_ppgtt_insert;
705 	ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
706 	ppgtt->vm.clear_range = gen8_ppgtt_clear;
707 
708 	if (intel_vgpu_active(gt->i915))
709 		gen8_ppgtt_notify_vgt(ppgtt, true);
710 
711 	ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
712 
713 	return ppgtt;
714 
715 err_free_pd:
716 	__gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd,
717 			     gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top);
718 err_free_scratch:
719 	free_scratch(&ppgtt->vm);
720 err_free:
721 	kfree(ppgtt);
722 	return ERR_PTR(err);
723 }
724