xref: /openbmc/linux/drivers/gpu/drm/i915/gt/gen8_ppgtt.c (revision 9834dfef)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <linux/log2.h>
7 
8 #include "gen8_ppgtt.h"
9 #include "i915_scatterlist.h"
10 #include "i915_trace.h"
11 #include "i915_pvinfo.h"
12 #include "i915_vgpu.h"
13 #include "intel_gt.h"
14 #include "intel_gtt.h"
15 
16 static u64 gen8_pde_encode(const dma_addr_t addr,
17 			   const enum i915_cache_level level)
18 {
19 	u64 pde = addr | _PAGE_PRESENT | _PAGE_RW;
20 
21 	if (level != I915_CACHE_NONE)
22 		pde |= PPAT_CACHED_PDE;
23 	else
24 		pde |= PPAT_UNCACHED;
25 
26 	return pde;
27 }
28 
29 static u64 gen8_pte_encode(dma_addr_t addr,
30 			   enum i915_cache_level level,
31 			   u32 flags)
32 {
33 	gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
34 
35 	if (unlikely(flags & PTE_READ_ONLY))
36 		pte &= ~_PAGE_RW;
37 
38 	switch (level) {
39 	case I915_CACHE_NONE:
40 		pte |= PPAT_UNCACHED;
41 		break;
42 	case I915_CACHE_WT:
43 		pte |= PPAT_DISPLAY_ELLC;
44 		break;
45 	default:
46 		pte |= PPAT_CACHED;
47 		break;
48 	}
49 
50 	return pte;
51 }
52 
53 static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
54 {
55 	struct drm_i915_private *i915 = ppgtt->vm.i915;
56 	struct intel_uncore *uncore = ppgtt->vm.gt->uncore;
57 	enum vgt_g2v_type msg;
58 	int i;
59 
60 	if (create)
61 		atomic_inc(px_used(ppgtt->pd)); /* never remove */
62 	else
63 		atomic_dec(px_used(ppgtt->pd));
64 
65 	mutex_lock(&i915->vgpu.lock);
66 
67 	if (i915_vm_is_4lvl(&ppgtt->vm)) {
68 		const u64 daddr = px_dma(ppgtt->pd);
69 
70 		intel_uncore_write(uncore,
71 				   vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
72 		intel_uncore_write(uncore,
73 				   vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
74 
75 		msg = create ?
76 			VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
77 			VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY;
78 	} else {
79 		for (i = 0; i < GEN8_3LVL_PDPES; i++) {
80 			const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
81 
82 			intel_uncore_write(uncore,
83 					   vgtif_reg(pdp[i].lo),
84 					   lower_32_bits(daddr));
85 			intel_uncore_write(uncore,
86 					   vgtif_reg(pdp[i].hi),
87 					   upper_32_bits(daddr));
88 		}
89 
90 		msg = create ?
91 			VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
92 			VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY;
93 	}
94 
95 	/* g2v_notify atomically (via hv trap) consumes the message packet. */
96 	intel_uncore_write(uncore, vgtif_reg(g2v_notify), msg);
97 
98 	mutex_unlock(&i915->vgpu.lock);
99 }
100 
101 /* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */
102 #define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */
103 #define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE))
104 #define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64))
105 #define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES))
106 #define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl))
107 #define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl))
108 #define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl))
109 
110 #define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
111 
112 static unsigned int
113 gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
114 {
115 	const int shift = gen8_pd_shift(lvl);
116 	const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
117 
118 	GEM_BUG_ON(start >= end);
119 	end += ~mask >> gen8_pd_shift(1);
120 
121 	*idx = i915_pde_index(start, shift);
122 	if ((start ^ end) & mask)
123 		return GEN8_PDES - *idx;
124 	else
125 		return i915_pde_index(end, shift) - *idx;
126 }
127 
128 static bool gen8_pd_contains(u64 start, u64 end, int lvl)
129 {
130 	const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
131 
132 	GEM_BUG_ON(start >= end);
133 	return (start ^ end) & mask && (start & ~mask) == 0;
134 }
135 
136 static unsigned int gen8_pt_count(u64 start, u64 end)
137 {
138 	GEM_BUG_ON(start >= end);
139 	if ((start ^ end) >> gen8_pd_shift(1))
140 		return GEN8_PDES - (start & (GEN8_PDES - 1));
141 	else
142 		return end - start;
143 }
144 
145 static unsigned int gen8_pd_top_count(const struct i915_address_space *vm)
146 {
147 	unsigned int shift = __gen8_pte_shift(vm->top);
148 	return (vm->total + (1ull << shift) - 1) >> shift;
149 }
150 
151 static struct i915_page_directory *
152 gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
153 {
154 	struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
155 
156 	if (vm->top == 2)
157 		return ppgtt->pd;
158 	else
159 		return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
160 }
161 
162 static struct i915_page_directory *
163 gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
164 {
165 	return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
166 }
167 
168 static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
169 				 struct i915_page_directory *pd,
170 				 int count, int lvl)
171 {
172 	if (lvl) {
173 		void **pde = pd->entry;
174 
175 		do {
176 			if (!*pde)
177 				continue;
178 
179 			__gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1);
180 		} while (pde++, --count);
181 	}
182 
183 	free_px(vm, &pd->pt, lvl);
184 }
185 
186 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
187 {
188 	struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
189 
190 	if (intel_vgpu_active(vm->i915))
191 		gen8_ppgtt_notify_vgt(ppgtt, false);
192 
193 	__gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top);
194 	free_scratch(vm);
195 }
196 
197 static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
198 			      struct i915_page_directory * const pd,
199 			      u64 start, const u64 end, int lvl)
200 {
201 	const struct drm_i915_gem_object * const scratch = vm->scratch[lvl];
202 	unsigned int idx, len;
203 
204 	GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
205 
206 	len = gen8_pd_range(start, end, lvl--, &idx);
207 	DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
208 	    __func__, vm, lvl + 1, start, end,
209 	    idx, len, atomic_read(px_used(pd)));
210 	GEM_BUG_ON(!len || len >= atomic_read(px_used(pd)));
211 
212 	do {
213 		struct i915_page_table *pt = pd->entry[idx];
214 
215 		if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) &&
216 		    gen8_pd_contains(start, end, lvl)) {
217 			DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n",
218 			    __func__, vm, lvl + 1, idx, start, end);
219 			clear_pd_entry(pd, idx, scratch);
220 			__gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl);
221 			start += (u64)I915_PDES << gen8_pd_shift(lvl);
222 			continue;
223 		}
224 
225 		if (lvl) {
226 			start = __gen8_ppgtt_clear(vm, as_pd(pt),
227 						   start, end, lvl);
228 		} else {
229 			unsigned int count;
230 			u64 *vaddr;
231 
232 			count = gen8_pt_count(start, end);
233 			DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n",
234 			    __func__, vm, lvl, start, end,
235 			    gen8_pd_index(start, 0), count,
236 			    atomic_read(&pt->used));
237 			GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
238 
239 			vaddr = kmap_atomic_px(pt);
240 			memset64(vaddr + gen8_pd_index(start, 0),
241 				 vm->scratch[0]->encode,
242 				 count);
243 			kunmap_atomic(vaddr);
244 
245 			atomic_sub(count, &pt->used);
246 			start += count;
247 		}
248 
249 		if (release_pd_entry(pd, idx, pt, scratch))
250 			free_px(vm, pt, lvl);
251 	} while (idx++, --len);
252 
253 	return start;
254 }
255 
256 static void gen8_ppgtt_clear(struct i915_address_space *vm,
257 			     u64 start, u64 length)
258 {
259 	GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
260 	GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
261 	GEM_BUG_ON(range_overflows(start, length, vm->total));
262 
263 	start >>= GEN8_PTE_SHIFT;
264 	length >>= GEN8_PTE_SHIFT;
265 	GEM_BUG_ON(length == 0);
266 
267 	__gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
268 			   start, start + length, vm->top);
269 }
270 
271 static void __gen8_ppgtt_alloc(struct i915_address_space * const vm,
272 			       struct i915_vm_pt_stash *stash,
273 			       struct i915_page_directory * const pd,
274 			       u64 * const start, const u64 end, int lvl)
275 {
276 	unsigned int idx, len;
277 
278 	GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
279 
280 	len = gen8_pd_range(*start, end, lvl--, &idx);
281 	DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
282 	    __func__, vm, lvl + 1, *start, end,
283 	    idx, len, atomic_read(px_used(pd)));
284 	GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1));
285 
286 	spin_lock(&pd->lock);
287 	GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */
288 	do {
289 		struct i915_page_table *pt = pd->entry[idx];
290 
291 		if (!pt) {
292 			spin_unlock(&pd->lock);
293 
294 			DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n",
295 			    __func__, vm, lvl + 1, idx);
296 
297 			pt = stash->pt[!!lvl];
298 			__i915_gem_object_pin_pages(pt->base);
299 			i915_gem_object_make_unshrinkable(pt->base);
300 
301 			if (lvl ||
302 			    gen8_pt_count(*start, end) < I915_PDES ||
303 			    intel_vgpu_active(vm->i915))
304 				fill_px(pt, vm->scratch[lvl]->encode);
305 
306 			spin_lock(&pd->lock);
307 			if (likely(!pd->entry[idx])) {
308 				stash->pt[!!lvl] = pt->stash;
309 				atomic_set(&pt->used, 0);
310 				set_pd_entry(pd, idx, pt);
311 			} else {
312 				pt = pd->entry[idx];
313 			}
314 		}
315 
316 		if (lvl) {
317 			atomic_inc(&pt->used);
318 			spin_unlock(&pd->lock);
319 
320 			__gen8_ppgtt_alloc(vm, stash,
321 					   as_pd(pt), start, end, lvl);
322 
323 			spin_lock(&pd->lock);
324 			atomic_dec(&pt->used);
325 			GEM_BUG_ON(!atomic_read(&pt->used));
326 		} else {
327 			unsigned int count = gen8_pt_count(*start, end);
328 
329 			DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n",
330 			    __func__, vm, lvl, *start, end,
331 			    gen8_pd_index(*start, 0), count,
332 			    atomic_read(&pt->used));
333 
334 			atomic_add(count, &pt->used);
335 			/* All other pdes may be simultaneously removed */
336 			GEM_BUG_ON(atomic_read(&pt->used) > NALLOC * I915_PDES);
337 			*start += count;
338 		}
339 	} while (idx++, --len);
340 	spin_unlock(&pd->lock);
341 }
342 
343 static void gen8_ppgtt_alloc(struct i915_address_space *vm,
344 			     struct i915_vm_pt_stash *stash,
345 			     u64 start, u64 length)
346 {
347 	GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
348 	GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
349 	GEM_BUG_ON(range_overflows(start, length, vm->total));
350 
351 	start >>= GEN8_PTE_SHIFT;
352 	length >>= GEN8_PTE_SHIFT;
353 	GEM_BUG_ON(length == 0);
354 
355 	__gen8_ppgtt_alloc(vm, stash, i915_vm_to_ppgtt(vm)->pd,
356 			   &start, start + length, vm->top);
357 }
358 
359 static __always_inline u64
360 gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
361 		      struct i915_page_directory *pdp,
362 		      struct sgt_dma *iter,
363 		      u64 idx,
364 		      enum i915_cache_level cache_level,
365 		      u32 flags)
366 {
367 	struct i915_page_directory *pd;
368 	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
369 	gen8_pte_t *vaddr;
370 
371 	pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
372 	vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
373 	do {
374 		GEM_BUG_ON(sg_dma_len(iter->sg) < I915_GTT_PAGE_SIZE);
375 		vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
376 
377 		iter->dma += I915_GTT_PAGE_SIZE;
378 		if (iter->dma >= iter->max) {
379 			iter->sg = __sg_next(iter->sg);
380 			if (!iter->sg || sg_dma_len(iter->sg) == 0) {
381 				idx = 0;
382 				break;
383 			}
384 
385 			iter->dma = sg_dma_address(iter->sg);
386 			iter->max = iter->dma + sg_dma_len(iter->sg);
387 		}
388 
389 		if (gen8_pd_index(++idx, 0) == 0) {
390 			if (gen8_pd_index(idx, 1) == 0) {
391 				/* Limited by sg length for 3lvl */
392 				if (gen8_pd_index(idx, 2) == 0)
393 					break;
394 
395 				pd = pdp->entry[gen8_pd_index(idx, 2)];
396 			}
397 
398 			clflush_cache_range(vaddr, PAGE_SIZE);
399 			kunmap_atomic(vaddr);
400 			vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
401 		}
402 	} while (1);
403 	clflush_cache_range(vaddr, PAGE_SIZE);
404 	kunmap_atomic(vaddr);
405 
406 	return idx;
407 }
408 
409 static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
410 				   struct sgt_dma *iter,
411 				   enum i915_cache_level cache_level,
412 				   u32 flags)
413 {
414 	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
415 	unsigned int rem = sg_dma_len(iter->sg);
416 	u64 start = vma->node.start;
417 
418 	GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm));
419 
420 	do {
421 		struct i915_page_directory * const pdp =
422 			gen8_pdp_for_page_address(vma->vm, start);
423 		struct i915_page_directory * const pd =
424 			i915_pd_entry(pdp, __gen8_pte_index(start, 2));
425 		gen8_pte_t encode = pte_encode;
426 		unsigned int maybe_64K = -1;
427 		unsigned int page_size;
428 		gen8_pte_t *vaddr;
429 		u16 index;
430 
431 		if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
432 		    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
433 		    rem >= I915_GTT_PAGE_SIZE_2M &&
434 		    !__gen8_pte_index(start, 0)) {
435 			index = __gen8_pte_index(start, 1);
436 			encode |= GEN8_PDE_PS_2M;
437 			page_size = I915_GTT_PAGE_SIZE_2M;
438 
439 			vaddr = kmap_atomic_px(pd);
440 		} else {
441 			struct i915_page_table *pt =
442 				i915_pt_entry(pd, __gen8_pte_index(start, 1));
443 
444 			index = __gen8_pte_index(start, 0);
445 			page_size = I915_GTT_PAGE_SIZE;
446 
447 			if (!index &&
448 			    vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
449 			    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
450 			    (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
451 			     rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
452 				maybe_64K = __gen8_pte_index(start, 1);
453 
454 			vaddr = kmap_atomic_px(pt);
455 		}
456 
457 		do {
458 			GEM_BUG_ON(sg_dma_len(iter->sg) < page_size);
459 			vaddr[index++] = encode | iter->dma;
460 
461 			start += page_size;
462 			iter->dma += page_size;
463 			rem -= page_size;
464 			if (iter->dma >= iter->max) {
465 				iter->sg = __sg_next(iter->sg);
466 				if (!iter->sg)
467 					break;
468 
469 				rem = sg_dma_len(iter->sg);
470 				if (!rem)
471 					break;
472 
473 				iter->dma = sg_dma_address(iter->sg);
474 				iter->max = iter->dma + rem;
475 
476 				if (maybe_64K != -1 && index < I915_PDES &&
477 				    !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
478 				      (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
479 				       rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)))
480 					maybe_64K = -1;
481 
482 				if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
483 					break;
484 			}
485 		} while (rem >= page_size && index < I915_PDES);
486 
487 		clflush_cache_range(vaddr, PAGE_SIZE);
488 		kunmap_atomic(vaddr);
489 
490 		/*
491 		 * Is it safe to mark the 2M block as 64K? -- Either we have
492 		 * filled whole page-table with 64K entries, or filled part of
493 		 * it and have reached the end of the sg table and we have
494 		 * enough padding.
495 		 */
496 		if (maybe_64K != -1 &&
497 		    (index == I915_PDES ||
498 		     (i915_vm_has_scratch_64K(vma->vm) &&
499 		      !iter->sg && IS_ALIGNED(vma->node.start +
500 					      vma->node.size,
501 					      I915_GTT_PAGE_SIZE_2M)))) {
502 			vaddr = kmap_atomic_px(pd);
503 			vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
504 			kunmap_atomic(vaddr);
505 			page_size = I915_GTT_PAGE_SIZE_64K;
506 
507 			/*
508 			 * We write all 4K page entries, even when using 64K
509 			 * pages. In order to verify that the HW isn't cheating
510 			 * by using the 4K PTE instead of the 64K PTE, we want
511 			 * to remove all the surplus entries. If the HW skipped
512 			 * the 64K PTE, it will read/write into the scratch page
513 			 * instead - which we detect as missing results during
514 			 * selftests.
515 			 */
516 			if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
517 				u16 i;
518 
519 				encode = vma->vm->scratch[0]->encode;
520 				vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K));
521 
522 				for (i = 1; i < index; i += 16)
523 					memset64(vaddr + i, encode, 15);
524 
525 				kunmap_atomic(vaddr);
526 			}
527 		}
528 
529 		vma->page_sizes.gtt |= page_size;
530 	} while (iter->sg && sg_dma_len(iter->sg));
531 }
532 
533 static void gen8_ppgtt_insert(struct i915_address_space *vm,
534 			      struct i915_vma *vma,
535 			      enum i915_cache_level cache_level,
536 			      u32 flags)
537 {
538 	struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
539 	struct sgt_dma iter = sgt_dma(vma);
540 
541 	if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
542 		gen8_ppgtt_insert_huge(vma, &iter, cache_level, flags);
543 	} else  {
544 		u64 idx = vma->node.start >> GEN8_PTE_SHIFT;
545 
546 		do {
547 			struct i915_page_directory * const pdp =
548 				gen8_pdp_for_page_index(vm, idx);
549 
550 			idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx,
551 						    cache_level, flags);
552 		} while (idx);
553 
554 		vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
555 	}
556 }
557 
558 static int gen8_init_scratch(struct i915_address_space *vm)
559 {
560 	int ret;
561 	int i;
562 
563 	/*
564 	 * If everybody agrees to not to write into the scratch page,
565 	 * we can reuse it for all vm, keeping contexts and processes separate.
566 	 */
567 	if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) {
568 		struct i915_address_space *clone = vm->gt->vm;
569 
570 		GEM_BUG_ON(!clone->has_read_only);
571 
572 		vm->scratch_order = clone->scratch_order;
573 		for (i = 0; i <= vm->top; i++)
574 			vm->scratch[i] = i915_gem_object_get(clone->scratch[i]);
575 
576 		return 0;
577 	}
578 
579 	ret = setup_scratch_page(vm);
580 	if (ret)
581 		return ret;
582 
583 	vm->scratch[0]->encode =
584 		gen8_pte_encode(px_dma(vm->scratch[0]),
585 				I915_CACHE_LLC, vm->has_read_only);
586 
587 	for (i = 1; i <= vm->top; i++) {
588 		struct drm_i915_gem_object *obj;
589 
590 		obj = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
591 		if (IS_ERR(obj))
592 			goto free_scratch;
593 
594 		ret = pin_pt_dma(vm, obj);
595 		if (ret) {
596 			i915_gem_object_put(obj);
597 			goto free_scratch;
598 		}
599 
600 		fill_px(obj, vm->scratch[i - 1]->encode);
601 		obj->encode = gen8_pde_encode(px_dma(obj), I915_CACHE_LLC);
602 
603 		vm->scratch[i] = obj;
604 	}
605 
606 	return 0;
607 
608 free_scratch:
609 	while (i--)
610 		i915_gem_object_put(vm->scratch[i]);
611 	return -ENOMEM;
612 }
613 
614 static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
615 {
616 	struct i915_address_space *vm = &ppgtt->vm;
617 	struct i915_page_directory *pd = ppgtt->pd;
618 	unsigned int idx;
619 
620 	GEM_BUG_ON(vm->top != 2);
621 	GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES);
622 
623 	for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) {
624 		struct i915_page_directory *pde;
625 		int err;
626 
627 		pde = alloc_pd(vm);
628 		if (IS_ERR(pde))
629 			return PTR_ERR(pde);
630 
631 		err = pin_pt_dma(vm, pde->pt.base);
632 		if (err) {
633 			i915_gem_object_put(pde->pt.base);
634 			free_pd(vm, pde);
635 			return err;
636 		}
637 
638 		fill_px(pde, vm->scratch[1]->encode);
639 		set_pd_entry(pd, idx, pde);
640 		atomic_inc(px_used(pde)); /* keep pinned */
641 	}
642 	wmb();
643 
644 	return 0;
645 }
646 
647 static struct i915_page_directory *
648 gen8_alloc_top_pd(struct i915_address_space *vm)
649 {
650 	const unsigned int count = gen8_pd_top_count(vm);
651 	struct i915_page_directory *pd;
652 	int err;
653 
654 	GEM_BUG_ON(count > I915_PDES);
655 
656 	pd = __alloc_pd(count);
657 	if (unlikely(!pd))
658 		return ERR_PTR(-ENOMEM);
659 
660 	pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
661 	if (IS_ERR(pd->pt.base)) {
662 		err = PTR_ERR(pd->pt.base);
663 		pd->pt.base = NULL;
664 		goto err_pd;
665 	}
666 
667 	err = pin_pt_dma(vm, pd->pt.base);
668 	if (err)
669 		goto err_pd;
670 
671 	fill_page_dma(px_base(pd), vm->scratch[vm->top]->encode, count);
672 	atomic_inc(px_used(pd)); /* mark as pinned */
673 	return pd;
674 
675 err_pd:
676 	free_pd(vm, pd);
677 	return ERR_PTR(err);
678 }
679 
680 /*
681  * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
682  * with a net effect resembling a 2-level page table in normal x86 terms. Each
683  * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
684  * space.
685  *
686  */
687 struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
688 {
689 	struct i915_ppgtt *ppgtt;
690 	int err;
691 
692 	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
693 	if (!ppgtt)
694 		return ERR_PTR(-ENOMEM);
695 
696 	ppgtt_init(ppgtt, gt);
697 	ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
698 	ppgtt->vm.pd_shift = ilog2(SZ_4K * SZ_4K / sizeof(gen8_pte_t));
699 
700 	/*
701 	 * From bdw, there is hw support for read-only pages in the PPGTT.
702 	 *
703 	 * Gen11 has HSDES#:1807136187 unresolved. Disable ro support
704 	 * for now.
705 	 *
706 	 * Gen12 has inherited the same read-only fault issue from gen11.
707 	 */
708 	ppgtt->vm.has_read_only = !IS_GEN_RANGE(gt->i915, 11, 12);
709 
710 	ppgtt->vm.alloc_pt_dma = alloc_pt_dma;
711 
712 	err = gen8_init_scratch(&ppgtt->vm);
713 	if (err)
714 		goto err_free;
715 
716 	ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm);
717 	if (IS_ERR(ppgtt->pd)) {
718 		err = PTR_ERR(ppgtt->pd);
719 		goto err_free_scratch;
720 	}
721 
722 	if (!i915_vm_is_4lvl(&ppgtt->vm)) {
723 		err = gen8_preallocate_top_level_pdp(ppgtt);
724 		if (err)
725 			goto err_free_pd;
726 	}
727 
728 	ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
729 	ppgtt->vm.insert_entries = gen8_ppgtt_insert;
730 	ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
731 	ppgtt->vm.clear_range = gen8_ppgtt_clear;
732 
733 	ppgtt->vm.pte_encode = gen8_pte_encode;
734 
735 	if (intel_vgpu_active(gt->i915))
736 		gen8_ppgtt_notify_vgt(ppgtt, true);
737 
738 	ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
739 
740 	return ppgtt;
741 
742 err_free_pd:
743 	__gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd,
744 			     gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top);
745 err_free_scratch:
746 	free_scratch(&ppgtt->vm);
747 err_free:
748 	kfree(ppgtt);
749 	return ERR_PTR(err);
750 }
751