xref: /openbmc/linux/drivers/gpu/drm/i915/gt/gen8_ppgtt.c (revision 9e859eb9)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <linux/log2.h>
7 
8 #include "gen8_ppgtt.h"
9 #include "i915_scatterlist.h"
10 #include "i915_trace.h"
11 #include "i915_pvinfo.h"
12 #include "i915_vgpu.h"
13 #include "intel_gt.h"
14 #include "intel_gtt.h"
15 
16 static u64 gen8_pde_encode(const dma_addr_t addr,
17 			   const enum i915_cache_level level)
18 {
19 	u64 pde = addr | _PAGE_PRESENT | _PAGE_RW;
20 
21 	if (level != I915_CACHE_NONE)
22 		pde |= PPAT_CACHED_PDE;
23 	else
24 		pde |= PPAT_UNCACHED;
25 
26 	return pde;
27 }
28 
29 static u64 gen8_pte_encode(dma_addr_t addr,
30 			   enum i915_cache_level level,
31 			   u32 flags)
32 {
33 	gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
34 
35 	if (unlikely(flags & PTE_READ_ONLY))
36 		pte &= ~_PAGE_RW;
37 
38 	switch (level) {
39 	case I915_CACHE_NONE:
40 		pte |= PPAT_UNCACHED;
41 		break;
42 	case I915_CACHE_WT:
43 		pte |= PPAT_DISPLAY_ELLC;
44 		break;
45 	default:
46 		pte |= PPAT_CACHED;
47 		break;
48 	}
49 
50 	return pte;
51 }
52 
53 static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
54 {
55 	struct drm_i915_private *i915 = ppgtt->vm.i915;
56 	struct intel_uncore *uncore = ppgtt->vm.gt->uncore;
57 	enum vgt_g2v_type msg;
58 	int i;
59 
60 	if (create)
61 		atomic_inc(px_used(ppgtt->pd)); /* never remove */
62 	else
63 		atomic_dec(px_used(ppgtt->pd));
64 
65 	mutex_lock(&i915->vgpu.lock);
66 
67 	if (i915_vm_is_4lvl(&ppgtt->vm)) {
68 		const u64 daddr = px_dma(ppgtt->pd);
69 
70 		intel_uncore_write(uncore,
71 				   vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
72 		intel_uncore_write(uncore,
73 				   vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
74 
75 		msg = create ?
76 			VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
77 			VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY;
78 	} else {
79 		for (i = 0; i < GEN8_3LVL_PDPES; i++) {
80 			const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
81 
82 			intel_uncore_write(uncore,
83 					   vgtif_reg(pdp[i].lo),
84 					   lower_32_bits(daddr));
85 			intel_uncore_write(uncore,
86 					   vgtif_reg(pdp[i].hi),
87 					   upper_32_bits(daddr));
88 		}
89 
90 		msg = create ?
91 			VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
92 			VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY;
93 	}
94 
95 	/* g2v_notify atomically (via hv trap) consumes the message packet. */
96 	intel_uncore_write(uncore, vgtif_reg(g2v_notify), msg);
97 
98 	mutex_unlock(&i915->vgpu.lock);
99 }
100 
101 /* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */
102 #define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */
103 #define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE))
104 #define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64))
105 #define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES))
106 #define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl))
107 #define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl))
108 #define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl))
109 
110 #define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
111 
112 static inline unsigned int
113 gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
114 {
115 	const int shift = gen8_pd_shift(lvl);
116 	const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
117 
118 	GEM_BUG_ON(start >= end);
119 	end += ~mask >> gen8_pd_shift(1);
120 
121 	*idx = i915_pde_index(start, shift);
122 	if ((start ^ end) & mask)
123 		return GEN8_PDES - *idx;
124 	else
125 		return i915_pde_index(end, shift) - *idx;
126 }
127 
128 static inline bool gen8_pd_contains(u64 start, u64 end, int lvl)
129 {
130 	const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
131 
132 	GEM_BUG_ON(start >= end);
133 	return (start ^ end) & mask && (start & ~mask) == 0;
134 }
135 
136 static inline unsigned int gen8_pt_count(u64 start, u64 end)
137 {
138 	GEM_BUG_ON(start >= end);
139 	if ((start ^ end) >> gen8_pd_shift(1))
140 		return GEN8_PDES - (start & (GEN8_PDES - 1));
141 	else
142 		return end - start;
143 }
144 
145 static inline unsigned int
146 gen8_pd_top_count(const struct i915_address_space *vm)
147 {
148 	unsigned int shift = __gen8_pte_shift(vm->top);
149 	return (vm->total + (1ull << shift) - 1) >> shift;
150 }
151 
152 static inline struct i915_page_directory *
153 gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
154 {
155 	struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
156 
157 	if (vm->top == 2)
158 		return ppgtt->pd;
159 	else
160 		return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
161 }
162 
163 static inline struct i915_page_directory *
164 gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
165 {
166 	return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
167 }
168 
169 static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
170 				 struct i915_page_directory *pd,
171 				 int count, int lvl)
172 {
173 	if (lvl) {
174 		void **pde = pd->entry;
175 
176 		do {
177 			if (!*pde)
178 				continue;
179 
180 			__gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1);
181 		} while (pde++, --count);
182 	}
183 
184 	free_px(vm, pd);
185 }
186 
187 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
188 {
189 	struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
190 
191 	if (intel_vgpu_active(vm->i915))
192 		gen8_ppgtt_notify_vgt(ppgtt, false);
193 
194 	__gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top);
195 	free_scratch(vm);
196 }
197 
198 static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
199 			      struct i915_page_directory * const pd,
200 			      u64 start, const u64 end, int lvl)
201 {
202 	const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
203 	unsigned int idx, len;
204 
205 	GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
206 
207 	len = gen8_pd_range(start, end, lvl--, &idx);
208 	DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
209 	    __func__, vm, lvl + 1, start, end,
210 	    idx, len, atomic_read(px_used(pd)));
211 	GEM_BUG_ON(!len || len >= atomic_read(px_used(pd)));
212 
213 	do {
214 		struct i915_page_table *pt = pd->entry[idx];
215 
216 		if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) &&
217 		    gen8_pd_contains(start, end, lvl)) {
218 			DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n",
219 			    __func__, vm, lvl + 1, idx, start, end);
220 			clear_pd_entry(pd, idx, scratch);
221 			__gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl);
222 			start += (u64)I915_PDES << gen8_pd_shift(lvl);
223 			continue;
224 		}
225 
226 		if (lvl) {
227 			start = __gen8_ppgtt_clear(vm, as_pd(pt),
228 						   start, end, lvl);
229 		} else {
230 			unsigned int count;
231 			u64 *vaddr;
232 
233 			count = gen8_pt_count(start, end);
234 			DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n",
235 			    __func__, vm, lvl, start, end,
236 			    gen8_pd_index(start, 0), count,
237 			    atomic_read(&pt->used));
238 			GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
239 
240 			vaddr = kmap_atomic_px(pt);
241 			memset64(vaddr + gen8_pd_index(start, 0),
242 				 vm->scratch[0].encode,
243 				 count);
244 			kunmap_atomic(vaddr);
245 
246 			atomic_sub(count, &pt->used);
247 			start += count;
248 		}
249 
250 		if (release_pd_entry(pd, idx, pt, scratch))
251 			free_px(vm, pt);
252 	} while (idx++, --len);
253 
254 	return start;
255 }
256 
257 static void gen8_ppgtt_clear(struct i915_address_space *vm,
258 			     u64 start, u64 length)
259 {
260 	GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
261 	GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
262 	GEM_BUG_ON(range_overflows(start, length, vm->total));
263 
264 	start >>= GEN8_PTE_SHIFT;
265 	length >>= GEN8_PTE_SHIFT;
266 	GEM_BUG_ON(length == 0);
267 
268 	__gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
269 			   start, start + length, vm->top);
270 }
271 
272 static int __gen8_ppgtt_alloc(struct i915_address_space * const vm,
273 			      struct i915_page_directory * const pd,
274 			      u64 * const start, const u64 end, int lvl)
275 {
276 	const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
277 	struct i915_page_table *alloc = NULL;
278 	unsigned int idx, len;
279 	int ret = 0;
280 
281 	GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
282 
283 	len = gen8_pd_range(*start, end, lvl--, &idx);
284 	DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
285 	    __func__, vm, lvl + 1, *start, end,
286 	    idx, len, atomic_read(px_used(pd)));
287 	GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1));
288 
289 	spin_lock(&pd->lock);
290 	GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */
291 	do {
292 		struct i915_page_table *pt = pd->entry[idx];
293 
294 		if (!pt) {
295 			spin_unlock(&pd->lock);
296 
297 			DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n",
298 			    __func__, vm, lvl + 1, idx);
299 
300 			pt = fetch_and_zero(&alloc);
301 			if (lvl) {
302 				if (!pt) {
303 					pt = &alloc_pd(vm)->pt;
304 					if (IS_ERR(pt)) {
305 						ret = PTR_ERR(pt);
306 						goto out;
307 					}
308 				}
309 
310 				fill_px(pt, vm->scratch[lvl].encode);
311 			} else {
312 				if (!pt) {
313 					pt = alloc_pt(vm);
314 					if (IS_ERR(pt)) {
315 						ret = PTR_ERR(pt);
316 						goto out;
317 					}
318 				}
319 
320 				if (intel_vgpu_active(vm->i915) ||
321 				    gen8_pt_count(*start, end) < I915_PDES)
322 					fill_px(pt, vm->scratch[lvl].encode);
323 			}
324 
325 			spin_lock(&pd->lock);
326 			if (likely(!pd->entry[idx]))
327 				set_pd_entry(pd, idx, pt);
328 			else
329 				alloc = pt, pt = pd->entry[idx];
330 		}
331 
332 		if (lvl) {
333 			atomic_inc(&pt->used);
334 			spin_unlock(&pd->lock);
335 
336 			ret = __gen8_ppgtt_alloc(vm, as_pd(pt),
337 						 start, end, lvl);
338 			if (unlikely(ret)) {
339 				if (release_pd_entry(pd, idx, pt, scratch))
340 					free_px(vm, pt);
341 				goto out;
342 			}
343 
344 			spin_lock(&pd->lock);
345 			atomic_dec(&pt->used);
346 			GEM_BUG_ON(!atomic_read(&pt->used));
347 		} else {
348 			unsigned int count = gen8_pt_count(*start, end);
349 
350 			DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n",
351 			    __func__, vm, lvl, *start, end,
352 			    gen8_pd_index(*start, 0), count,
353 			    atomic_read(&pt->used));
354 
355 			atomic_add(count, &pt->used);
356 			/* All other pdes may be simultaneously removed */
357 			GEM_BUG_ON(atomic_read(&pt->used) > NALLOC * I915_PDES);
358 			*start += count;
359 		}
360 	} while (idx++, --len);
361 	spin_unlock(&pd->lock);
362 out:
363 	if (alloc)
364 		free_px(vm, alloc);
365 	return ret;
366 }
367 
368 static int gen8_ppgtt_alloc(struct i915_address_space *vm,
369 			    u64 start, u64 length)
370 {
371 	u64 from;
372 	int err;
373 
374 	GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
375 	GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
376 	GEM_BUG_ON(range_overflows(start, length, vm->total));
377 
378 	start >>= GEN8_PTE_SHIFT;
379 	length >>= GEN8_PTE_SHIFT;
380 	GEM_BUG_ON(length == 0);
381 	from = start;
382 
383 	err = __gen8_ppgtt_alloc(vm, i915_vm_to_ppgtt(vm)->pd,
384 				 &start, start + length, vm->top);
385 	if (unlikely(err && from != start))
386 		__gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
387 				   from, start, vm->top);
388 
389 	return err;
390 }
391 
392 static __always_inline u64
393 gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
394 		      struct i915_page_directory *pdp,
395 		      struct sgt_dma *iter,
396 		      u64 idx,
397 		      enum i915_cache_level cache_level,
398 		      u32 flags)
399 {
400 	struct i915_page_directory *pd;
401 	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
402 	gen8_pte_t *vaddr;
403 
404 	pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
405 	vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
406 	do {
407 		GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE);
408 		vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
409 
410 		iter->dma += I915_GTT_PAGE_SIZE;
411 		if (iter->dma >= iter->max) {
412 			iter->sg = __sg_next(iter->sg);
413 			if (!iter->sg) {
414 				idx = 0;
415 				break;
416 			}
417 
418 			iter->dma = sg_dma_address(iter->sg);
419 			iter->max = iter->dma + iter->sg->length;
420 		}
421 
422 		if (gen8_pd_index(++idx, 0) == 0) {
423 			if (gen8_pd_index(idx, 1) == 0) {
424 				/* Limited by sg length for 3lvl */
425 				if (gen8_pd_index(idx, 2) == 0)
426 					break;
427 
428 				pd = pdp->entry[gen8_pd_index(idx, 2)];
429 			}
430 
431 			kunmap_atomic(vaddr);
432 			vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
433 		}
434 	} while (1);
435 	kunmap_atomic(vaddr);
436 
437 	return idx;
438 }
439 
440 static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
441 				   struct sgt_dma *iter,
442 				   enum i915_cache_level cache_level,
443 				   u32 flags)
444 {
445 	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
446 	u64 start = vma->node.start;
447 	dma_addr_t rem = iter->sg->length;
448 
449 	GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm));
450 
451 	do {
452 		struct i915_page_directory * const pdp =
453 			gen8_pdp_for_page_address(vma->vm, start);
454 		struct i915_page_directory * const pd =
455 			i915_pd_entry(pdp, __gen8_pte_index(start, 2));
456 		gen8_pte_t encode = pte_encode;
457 		unsigned int maybe_64K = -1;
458 		unsigned int page_size;
459 		gen8_pte_t *vaddr;
460 		u16 index;
461 
462 		if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
463 		    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
464 		    rem >= I915_GTT_PAGE_SIZE_2M &&
465 		    !__gen8_pte_index(start, 0)) {
466 			index = __gen8_pte_index(start, 1);
467 			encode |= GEN8_PDE_PS_2M;
468 			page_size = I915_GTT_PAGE_SIZE_2M;
469 
470 			vaddr = kmap_atomic_px(pd);
471 		} else {
472 			struct i915_page_table *pt =
473 				i915_pt_entry(pd, __gen8_pte_index(start, 1));
474 
475 			index = __gen8_pte_index(start, 0);
476 			page_size = I915_GTT_PAGE_SIZE;
477 
478 			if (!index &&
479 			    vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
480 			    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
481 			    (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
482 			     rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
483 				maybe_64K = __gen8_pte_index(start, 1);
484 
485 			vaddr = kmap_atomic_px(pt);
486 		}
487 
488 		do {
489 			GEM_BUG_ON(iter->sg->length < page_size);
490 			vaddr[index++] = encode | iter->dma;
491 
492 			start += page_size;
493 			iter->dma += page_size;
494 			rem -= page_size;
495 			if (iter->dma >= iter->max) {
496 				iter->sg = __sg_next(iter->sg);
497 				if (!iter->sg)
498 					break;
499 
500 				rem = iter->sg->length;
501 				iter->dma = sg_dma_address(iter->sg);
502 				iter->max = iter->dma + rem;
503 
504 				if (maybe_64K != -1 && index < I915_PDES &&
505 				    !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
506 				      (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
507 				       rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)))
508 					maybe_64K = -1;
509 
510 				if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
511 					break;
512 			}
513 		} while (rem >= page_size && index < I915_PDES);
514 
515 		kunmap_atomic(vaddr);
516 
517 		/*
518 		 * Is it safe to mark the 2M block as 64K? -- Either we have
519 		 * filled whole page-table with 64K entries, or filled part of
520 		 * it and have reached the end of the sg table and we have
521 		 * enough padding.
522 		 */
523 		if (maybe_64K != -1 &&
524 		    (index == I915_PDES ||
525 		     (i915_vm_has_scratch_64K(vma->vm) &&
526 		      !iter->sg && IS_ALIGNED(vma->node.start +
527 					      vma->node.size,
528 					      I915_GTT_PAGE_SIZE_2M)))) {
529 			vaddr = kmap_atomic_px(pd);
530 			vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
531 			kunmap_atomic(vaddr);
532 			page_size = I915_GTT_PAGE_SIZE_64K;
533 
534 			/*
535 			 * We write all 4K page entries, even when using 64K
536 			 * pages. In order to verify that the HW isn't cheating
537 			 * by using the 4K PTE instead of the 64K PTE, we want
538 			 * to remove all the surplus entries. If the HW skipped
539 			 * the 64K PTE, it will read/write into the scratch page
540 			 * instead - which we detect as missing results during
541 			 * selftests.
542 			 */
543 			if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
544 				u16 i;
545 
546 				encode = vma->vm->scratch[0].encode;
547 				vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K));
548 
549 				for (i = 1; i < index; i += 16)
550 					memset64(vaddr + i, encode, 15);
551 
552 				kunmap_atomic(vaddr);
553 			}
554 		}
555 
556 		vma->page_sizes.gtt |= page_size;
557 	} while (iter->sg);
558 }
559 
560 static void gen8_ppgtt_insert(struct i915_address_space *vm,
561 			      struct i915_vma *vma,
562 			      enum i915_cache_level cache_level,
563 			      u32 flags)
564 {
565 	struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
566 	struct sgt_dma iter = sgt_dma(vma);
567 
568 	if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
569 		gen8_ppgtt_insert_huge(vma, &iter, cache_level, flags);
570 	} else  {
571 		u64 idx = vma->node.start >> GEN8_PTE_SHIFT;
572 
573 		do {
574 			struct i915_page_directory * const pdp =
575 				gen8_pdp_for_page_index(vm, idx);
576 
577 			idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx,
578 						    cache_level, flags);
579 		} while (idx);
580 
581 		vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
582 	}
583 }
584 
585 static int gen8_init_scratch(struct i915_address_space *vm)
586 {
587 	int ret;
588 	int i;
589 
590 	/*
591 	 * If everybody agrees to not to write into the scratch page,
592 	 * we can reuse it for all vm, keeping contexts and processes separate.
593 	 */
594 	if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) {
595 		struct i915_address_space *clone = vm->gt->vm;
596 
597 		GEM_BUG_ON(!clone->has_read_only);
598 
599 		vm->scratch_order = clone->scratch_order;
600 		memcpy(vm->scratch, clone->scratch, sizeof(vm->scratch));
601 		px_dma(&vm->scratch[0]) = 0; /* no xfer of ownership */
602 		return 0;
603 	}
604 
605 	ret = setup_scratch_page(vm, __GFP_HIGHMEM);
606 	if (ret)
607 		return ret;
608 
609 	vm->scratch[0].encode =
610 		gen8_pte_encode(px_dma(&vm->scratch[0]),
611 				I915_CACHE_LLC, vm->has_read_only);
612 
613 	for (i = 1; i <= vm->top; i++) {
614 		if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[i]))))
615 			goto free_scratch;
616 
617 		fill_px(&vm->scratch[i], vm->scratch[i - 1].encode);
618 		vm->scratch[i].encode =
619 			gen8_pde_encode(px_dma(&vm->scratch[i]),
620 					I915_CACHE_LLC);
621 	}
622 
623 	return 0;
624 
625 free_scratch:
626 	free_scratch(vm);
627 	return -ENOMEM;
628 }
629 
630 static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
631 {
632 	struct i915_address_space *vm = &ppgtt->vm;
633 	struct i915_page_directory *pd = ppgtt->pd;
634 	unsigned int idx;
635 
636 	GEM_BUG_ON(vm->top != 2);
637 	GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES);
638 
639 	for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) {
640 		struct i915_page_directory *pde;
641 
642 		pde = alloc_pd(vm);
643 		if (IS_ERR(pde))
644 			return PTR_ERR(pde);
645 
646 		fill_px(pde, vm->scratch[1].encode);
647 		set_pd_entry(pd, idx, pde);
648 		atomic_inc(px_used(pde)); /* keep pinned */
649 	}
650 	wmb();
651 
652 	return 0;
653 }
654 
655 static struct i915_page_directory *
656 gen8_alloc_top_pd(struct i915_address_space *vm)
657 {
658 	const unsigned int count = gen8_pd_top_count(vm);
659 	struct i915_page_directory *pd;
660 
661 	GEM_BUG_ON(count > ARRAY_SIZE(pd->entry));
662 
663 	pd = __alloc_pd(offsetof(typeof(*pd), entry[count]));
664 	if (unlikely(!pd))
665 		return ERR_PTR(-ENOMEM);
666 
667 	if (unlikely(setup_page_dma(vm, px_base(pd)))) {
668 		kfree(pd);
669 		return ERR_PTR(-ENOMEM);
670 	}
671 
672 	fill_page_dma(px_base(pd), vm->scratch[vm->top].encode, count);
673 	atomic_inc(px_used(pd)); /* mark as pinned */
674 	return pd;
675 }
676 
677 /*
678  * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
679  * with a net effect resembling a 2-level page table in normal x86 terms. Each
680  * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
681  * space.
682  *
683  */
684 struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
685 {
686 	struct i915_ppgtt *ppgtt;
687 	int err;
688 
689 	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
690 	if (!ppgtt)
691 		return ERR_PTR(-ENOMEM);
692 
693 	ppgtt_init(ppgtt, gt);
694 	ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
695 
696 	/*
697 	 * From bdw, there is hw support for read-only pages in the PPGTT.
698 	 *
699 	 * Gen11 has HSDES#:1807136187 unresolved. Disable ro support
700 	 * for now.
701 	 *
702 	 * Gen12 has inherited the same read-only fault issue from gen11.
703 	 */
704 	ppgtt->vm.has_read_only = !IS_GEN_RANGE(gt->i915, 11, 12);
705 
706 	/*
707 	 * There are only few exceptions for gen >=6. chv and bxt.
708 	 * And we are not sure about the latter so play safe for now.
709 	 */
710 	if (IS_CHERRYVIEW(gt->i915) || IS_BROXTON(gt->i915))
711 		ppgtt->vm.pt_kmap_wc = true;
712 
713 	err = gen8_init_scratch(&ppgtt->vm);
714 	if (err)
715 		goto err_free;
716 
717 	ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm);
718 	if (IS_ERR(ppgtt->pd)) {
719 		err = PTR_ERR(ppgtt->pd);
720 		goto err_free_scratch;
721 	}
722 
723 	if (!i915_vm_is_4lvl(&ppgtt->vm)) {
724 		err = gen8_preallocate_top_level_pdp(ppgtt);
725 		if (err)
726 			goto err_free_pd;
727 	}
728 
729 	ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
730 	ppgtt->vm.insert_entries = gen8_ppgtt_insert;
731 	ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
732 	ppgtt->vm.clear_range = gen8_ppgtt_clear;
733 
734 	ppgtt->vm.pte_encode = gen8_pte_encode;
735 
736 	if (intel_vgpu_active(gt->i915))
737 		gen8_ppgtt_notify_vgt(ppgtt, true);
738 
739 	ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
740 
741 	return ppgtt;
742 
743 err_free_pd:
744 	__gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd,
745 			     gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top);
746 err_free_scratch:
747 	free_scratch(&ppgtt->vm);
748 err_free:
749 	kfree(ppgtt);
750 	return ERR_PTR(err);
751 }
752