xref: /openbmc/linux/drivers/gpu/drm/i915/gt/gen8_ppgtt.c (revision f28d4266)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <linux/log2.h>
7 
8 #include "gem/i915_gem_lmem.h"
9 
10 #include "gen8_ppgtt.h"
11 #include "i915_scatterlist.h"
12 #include "i915_trace.h"
13 #include "i915_pvinfo.h"
14 #include "i915_vgpu.h"
15 #include "intel_gt.h"
16 #include "intel_gtt.h"
17 
18 static u64 gen8_pde_encode(const dma_addr_t addr,
19 			   const enum i915_cache_level level)
20 {
21 	u64 pde = addr | GEN8_PAGE_PRESENT | GEN8_PAGE_RW;
22 
23 	if (level != I915_CACHE_NONE)
24 		pde |= PPAT_CACHED_PDE;
25 	else
26 		pde |= PPAT_UNCACHED;
27 
28 	return pde;
29 }
30 
31 static u64 gen8_pte_encode(dma_addr_t addr,
32 			   enum i915_cache_level level,
33 			   u32 flags)
34 {
35 	gen8_pte_t pte = addr | GEN8_PAGE_PRESENT | GEN8_PAGE_RW;
36 
37 	if (unlikely(flags & PTE_READ_ONLY))
38 		pte &= ~GEN8_PAGE_RW;
39 
40 	if (flags & PTE_LM)
41 		pte |= GEN12_PPGTT_PTE_LM;
42 
43 	switch (level) {
44 	case I915_CACHE_NONE:
45 		pte |= PPAT_UNCACHED;
46 		break;
47 	case I915_CACHE_WT:
48 		pte |= PPAT_DISPLAY_ELLC;
49 		break;
50 	default:
51 		pte |= PPAT_CACHED;
52 		break;
53 	}
54 
55 	return pte;
56 }
57 
58 static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
59 {
60 	struct drm_i915_private *i915 = ppgtt->vm.i915;
61 	struct intel_uncore *uncore = ppgtt->vm.gt->uncore;
62 	enum vgt_g2v_type msg;
63 	int i;
64 
65 	if (create)
66 		atomic_inc(px_used(ppgtt->pd)); /* never remove */
67 	else
68 		atomic_dec(px_used(ppgtt->pd));
69 
70 	mutex_lock(&i915->vgpu.lock);
71 
72 	if (i915_vm_is_4lvl(&ppgtt->vm)) {
73 		const u64 daddr = px_dma(ppgtt->pd);
74 
75 		intel_uncore_write(uncore,
76 				   vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
77 		intel_uncore_write(uncore,
78 				   vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
79 
80 		msg = create ?
81 			VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
82 			VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY;
83 	} else {
84 		for (i = 0; i < GEN8_3LVL_PDPES; i++) {
85 			const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
86 
87 			intel_uncore_write(uncore,
88 					   vgtif_reg(pdp[i].lo),
89 					   lower_32_bits(daddr));
90 			intel_uncore_write(uncore,
91 					   vgtif_reg(pdp[i].hi),
92 					   upper_32_bits(daddr));
93 		}
94 
95 		msg = create ?
96 			VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
97 			VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY;
98 	}
99 
100 	/* g2v_notify atomically (via hv trap) consumes the message packet. */
101 	intel_uncore_write(uncore, vgtif_reg(g2v_notify), msg);
102 
103 	mutex_unlock(&i915->vgpu.lock);
104 }
105 
106 /* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */
107 #define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */
108 #define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE))
109 #define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64))
110 #define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES))
111 #define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl))
112 #define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl))
113 #define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl))
114 
115 #define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
116 
117 static unsigned int
118 gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
119 {
120 	const int shift = gen8_pd_shift(lvl);
121 	const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
122 
123 	GEM_BUG_ON(start >= end);
124 	end += ~mask >> gen8_pd_shift(1);
125 
126 	*idx = i915_pde_index(start, shift);
127 	if ((start ^ end) & mask)
128 		return GEN8_PDES - *idx;
129 	else
130 		return i915_pde_index(end, shift) - *idx;
131 }
132 
133 static bool gen8_pd_contains(u64 start, u64 end, int lvl)
134 {
135 	const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
136 
137 	GEM_BUG_ON(start >= end);
138 	return (start ^ end) & mask && (start & ~mask) == 0;
139 }
140 
141 static unsigned int gen8_pt_count(u64 start, u64 end)
142 {
143 	GEM_BUG_ON(start >= end);
144 	if ((start ^ end) >> gen8_pd_shift(1))
145 		return GEN8_PDES - (start & (GEN8_PDES - 1));
146 	else
147 		return end - start;
148 }
149 
150 static unsigned int gen8_pd_top_count(const struct i915_address_space *vm)
151 {
152 	unsigned int shift = __gen8_pte_shift(vm->top);
153 
154 	return (vm->total + (1ull << shift) - 1) >> shift;
155 }
156 
157 static struct i915_page_directory *
158 gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
159 {
160 	struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
161 
162 	if (vm->top == 2)
163 		return ppgtt->pd;
164 	else
165 		return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
166 }
167 
168 static struct i915_page_directory *
169 gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
170 {
171 	return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
172 }
173 
174 static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
175 				 struct i915_page_directory *pd,
176 				 int count, int lvl)
177 {
178 	if (lvl) {
179 		void **pde = pd->entry;
180 
181 		do {
182 			if (!*pde)
183 				continue;
184 
185 			__gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1);
186 		} while (pde++, --count);
187 	}
188 
189 	free_px(vm, &pd->pt, lvl);
190 }
191 
192 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
193 {
194 	struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
195 
196 	if (intel_vgpu_active(vm->i915))
197 		gen8_ppgtt_notify_vgt(ppgtt, false);
198 
199 	if (ppgtt->pd)
200 		__gen8_ppgtt_cleanup(vm, ppgtt->pd,
201 				     gen8_pd_top_count(vm), vm->top);
202 
203 	free_scratch(vm);
204 }
205 
206 static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
207 			      struct i915_page_directory * const pd,
208 			      u64 start, const u64 end, int lvl)
209 {
210 	const struct drm_i915_gem_object * const scratch = vm->scratch[lvl];
211 	unsigned int idx, len;
212 
213 	GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
214 
215 	len = gen8_pd_range(start, end, lvl--, &idx);
216 	DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
217 	    __func__, vm, lvl + 1, start, end,
218 	    idx, len, atomic_read(px_used(pd)));
219 	GEM_BUG_ON(!len || len >= atomic_read(px_used(pd)));
220 
221 	do {
222 		struct i915_page_table *pt = pd->entry[idx];
223 
224 		if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) &&
225 		    gen8_pd_contains(start, end, lvl)) {
226 			DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n",
227 			    __func__, vm, lvl + 1, idx, start, end);
228 			clear_pd_entry(pd, idx, scratch);
229 			__gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl);
230 			start += (u64)I915_PDES << gen8_pd_shift(lvl);
231 			continue;
232 		}
233 
234 		if (lvl) {
235 			start = __gen8_ppgtt_clear(vm, as_pd(pt),
236 						   start, end, lvl);
237 		} else {
238 			unsigned int count;
239 			unsigned int pte = gen8_pd_index(start, 0);
240 			unsigned int num_ptes;
241 			u64 *vaddr;
242 
243 			count = gen8_pt_count(start, end);
244 			DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n",
245 			    __func__, vm, lvl, start, end,
246 			    gen8_pd_index(start, 0), count,
247 			    atomic_read(&pt->used));
248 			GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
249 
250 			num_ptes = count;
251 			if (pt->is_compact) {
252 				GEM_BUG_ON(num_ptes % 16);
253 				GEM_BUG_ON(pte % 16);
254 				num_ptes /= 16;
255 				pte /= 16;
256 			}
257 
258 			vaddr = px_vaddr(pt);
259 			memset64(vaddr + pte,
260 				 vm->scratch[0]->encode,
261 				 num_ptes);
262 
263 			atomic_sub(count, &pt->used);
264 			start += count;
265 		}
266 
267 		if (release_pd_entry(pd, idx, pt, scratch))
268 			free_px(vm, pt, lvl);
269 	} while (idx++, --len);
270 
271 	return start;
272 }
273 
274 static void gen8_ppgtt_clear(struct i915_address_space *vm,
275 			     u64 start, u64 length)
276 {
277 	GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
278 	GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
279 	GEM_BUG_ON(range_overflows(start, length, vm->total));
280 
281 	start >>= GEN8_PTE_SHIFT;
282 	length >>= GEN8_PTE_SHIFT;
283 	GEM_BUG_ON(length == 0);
284 
285 	__gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
286 			   start, start + length, vm->top);
287 }
288 
289 static void __gen8_ppgtt_alloc(struct i915_address_space * const vm,
290 			       struct i915_vm_pt_stash *stash,
291 			       struct i915_page_directory * const pd,
292 			       u64 * const start, const u64 end, int lvl)
293 {
294 	unsigned int idx, len;
295 
296 	GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
297 
298 	len = gen8_pd_range(*start, end, lvl--, &idx);
299 	DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
300 	    __func__, vm, lvl + 1, *start, end,
301 	    idx, len, atomic_read(px_used(pd)));
302 	GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1));
303 
304 	spin_lock(&pd->lock);
305 	GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */
306 	do {
307 		struct i915_page_table *pt = pd->entry[idx];
308 
309 		if (!pt) {
310 			spin_unlock(&pd->lock);
311 
312 			DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n",
313 			    __func__, vm, lvl + 1, idx);
314 
315 			pt = stash->pt[!!lvl];
316 			__i915_gem_object_pin_pages(pt->base);
317 
318 			fill_px(pt, vm->scratch[lvl]->encode);
319 
320 			spin_lock(&pd->lock);
321 			if (likely(!pd->entry[idx])) {
322 				stash->pt[!!lvl] = pt->stash;
323 				atomic_set(&pt->used, 0);
324 				set_pd_entry(pd, idx, pt);
325 			} else {
326 				pt = pd->entry[idx];
327 			}
328 		}
329 
330 		if (lvl) {
331 			atomic_inc(&pt->used);
332 			spin_unlock(&pd->lock);
333 
334 			__gen8_ppgtt_alloc(vm, stash,
335 					   as_pd(pt), start, end, lvl);
336 
337 			spin_lock(&pd->lock);
338 			atomic_dec(&pt->used);
339 			GEM_BUG_ON(!atomic_read(&pt->used));
340 		} else {
341 			unsigned int count = gen8_pt_count(*start, end);
342 
343 			DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n",
344 			    __func__, vm, lvl, *start, end,
345 			    gen8_pd_index(*start, 0), count,
346 			    atomic_read(&pt->used));
347 
348 			atomic_add(count, &pt->used);
349 			/* All other pdes may be simultaneously removed */
350 			GEM_BUG_ON(atomic_read(&pt->used) > NALLOC * I915_PDES);
351 			*start += count;
352 		}
353 	} while (idx++, --len);
354 	spin_unlock(&pd->lock);
355 }
356 
357 static void gen8_ppgtt_alloc(struct i915_address_space *vm,
358 			     struct i915_vm_pt_stash *stash,
359 			     u64 start, u64 length)
360 {
361 	GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
362 	GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
363 	GEM_BUG_ON(range_overflows(start, length, vm->total));
364 
365 	start >>= GEN8_PTE_SHIFT;
366 	length >>= GEN8_PTE_SHIFT;
367 	GEM_BUG_ON(length == 0);
368 
369 	__gen8_ppgtt_alloc(vm, stash, i915_vm_to_ppgtt(vm)->pd,
370 			   &start, start + length, vm->top);
371 }
372 
373 static void __gen8_ppgtt_foreach(struct i915_address_space *vm,
374 				 struct i915_page_directory *pd,
375 				 u64 *start, u64 end, int lvl,
376 				 void (*fn)(struct i915_address_space *vm,
377 					    struct i915_page_table *pt,
378 					    void *data),
379 				 void *data)
380 {
381 	unsigned int idx, len;
382 
383 	len = gen8_pd_range(*start, end, lvl--, &idx);
384 
385 	spin_lock(&pd->lock);
386 	do {
387 		struct i915_page_table *pt = pd->entry[idx];
388 
389 		atomic_inc(&pt->used);
390 		spin_unlock(&pd->lock);
391 
392 		if (lvl) {
393 			__gen8_ppgtt_foreach(vm, as_pd(pt), start, end, lvl,
394 					     fn, data);
395 		} else {
396 			fn(vm, pt, data);
397 			*start += gen8_pt_count(*start, end);
398 		}
399 
400 		spin_lock(&pd->lock);
401 		atomic_dec(&pt->used);
402 	} while (idx++, --len);
403 	spin_unlock(&pd->lock);
404 }
405 
406 static void gen8_ppgtt_foreach(struct i915_address_space *vm,
407 			       u64 start, u64 length,
408 			       void (*fn)(struct i915_address_space *vm,
409 					  struct i915_page_table *pt,
410 					  void *data),
411 			       void *data)
412 {
413 	start >>= GEN8_PTE_SHIFT;
414 	length >>= GEN8_PTE_SHIFT;
415 
416 	__gen8_ppgtt_foreach(vm, i915_vm_to_ppgtt(vm)->pd,
417 			     &start, start + length, vm->top,
418 			     fn, data);
419 }
420 
421 static __always_inline u64
422 gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
423 		      struct i915_page_directory *pdp,
424 		      struct sgt_dma *iter,
425 		      u64 idx,
426 		      enum i915_cache_level cache_level,
427 		      u32 flags)
428 {
429 	struct i915_page_directory *pd;
430 	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
431 	gen8_pte_t *vaddr;
432 
433 	pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
434 	vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
435 	do {
436 		GEM_BUG_ON(sg_dma_len(iter->sg) < I915_GTT_PAGE_SIZE);
437 		vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
438 
439 		iter->dma += I915_GTT_PAGE_SIZE;
440 		if (iter->dma >= iter->max) {
441 			iter->sg = __sg_next(iter->sg);
442 			if (!iter->sg || sg_dma_len(iter->sg) == 0) {
443 				idx = 0;
444 				break;
445 			}
446 
447 			iter->dma = sg_dma_address(iter->sg);
448 			iter->max = iter->dma + sg_dma_len(iter->sg);
449 		}
450 
451 		if (gen8_pd_index(++idx, 0) == 0) {
452 			if (gen8_pd_index(idx, 1) == 0) {
453 				/* Limited by sg length for 3lvl */
454 				if (gen8_pd_index(idx, 2) == 0)
455 					break;
456 
457 				pd = pdp->entry[gen8_pd_index(idx, 2)];
458 			}
459 
460 			drm_clflush_virt_range(vaddr, PAGE_SIZE);
461 			vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
462 		}
463 	} while (1);
464 	drm_clflush_virt_range(vaddr, PAGE_SIZE);
465 
466 	return idx;
467 }
468 
469 static void
470 xehpsdv_ppgtt_insert_huge(struct i915_address_space *vm,
471 			  struct i915_vma_resource *vma_res,
472 			  struct sgt_dma *iter,
473 			  enum i915_cache_level cache_level,
474 			  u32 flags)
475 {
476 	const gen8_pte_t pte_encode = vm->pte_encode(0, cache_level, flags);
477 	unsigned int rem = sg_dma_len(iter->sg);
478 	u64 start = vma_res->start;
479 
480 	GEM_BUG_ON(!i915_vm_is_4lvl(vm));
481 
482 	do {
483 		struct i915_page_directory * const pdp =
484 			gen8_pdp_for_page_address(vm, start);
485 		struct i915_page_directory * const pd =
486 			i915_pd_entry(pdp, __gen8_pte_index(start, 2));
487 		struct i915_page_table *pt =
488 			i915_pt_entry(pd, __gen8_pte_index(start, 1));
489 		gen8_pte_t encode = pte_encode;
490 		unsigned int page_size;
491 		gen8_pte_t *vaddr;
492 		u16 index, max;
493 
494 		max = I915_PDES;
495 
496 		if (vma_res->bi.page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
497 		    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
498 		    rem >= I915_GTT_PAGE_SIZE_2M &&
499 		    !__gen8_pte_index(start, 0)) {
500 			index = __gen8_pte_index(start, 1);
501 			encode |= GEN8_PDE_PS_2M;
502 			page_size = I915_GTT_PAGE_SIZE_2M;
503 
504 			vaddr = px_vaddr(pd);
505 		} else {
506 			if (encode & GEN12_PPGTT_PTE_LM) {
507 				GEM_BUG_ON(__gen8_pte_index(start, 0) % 16);
508 				GEM_BUG_ON(rem < I915_GTT_PAGE_SIZE_64K);
509 				GEM_BUG_ON(!IS_ALIGNED(iter->dma,
510 						       I915_GTT_PAGE_SIZE_64K));
511 
512 				index = __gen8_pte_index(start, 0) / 16;
513 				page_size = I915_GTT_PAGE_SIZE_64K;
514 
515 				max /= 16;
516 
517 				vaddr = px_vaddr(pd);
518 				vaddr[__gen8_pte_index(start, 1)] |= GEN12_PDE_64K;
519 
520 				pt->is_compact = true;
521 			} else {
522 				GEM_BUG_ON(pt->is_compact);
523 				index =  __gen8_pte_index(start, 0);
524 				page_size = I915_GTT_PAGE_SIZE;
525 			}
526 
527 			vaddr = px_vaddr(pt);
528 		}
529 
530 		do {
531 			GEM_BUG_ON(rem < page_size);
532 			vaddr[index++] = encode | iter->dma;
533 
534 			start += page_size;
535 			iter->dma += page_size;
536 			rem -= page_size;
537 			if (iter->dma >= iter->max) {
538 				iter->sg = __sg_next(iter->sg);
539 				if (!iter->sg)
540 					break;
541 
542 				rem = sg_dma_len(iter->sg);
543 				if (!rem)
544 					break;
545 
546 				iter->dma = sg_dma_address(iter->sg);
547 				iter->max = iter->dma + rem;
548 
549 				if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
550 					break;
551 			}
552 		} while (rem >= page_size && index < max);
553 
554 		vma_res->page_sizes_gtt |= page_size;
555 	} while (iter->sg && sg_dma_len(iter->sg));
556 }
557 
558 static void gen8_ppgtt_insert_huge(struct i915_address_space *vm,
559 				   struct i915_vma_resource *vma_res,
560 				   struct sgt_dma *iter,
561 				   enum i915_cache_level cache_level,
562 				   u32 flags)
563 {
564 	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
565 	unsigned int rem = sg_dma_len(iter->sg);
566 	u64 start = vma_res->start;
567 
568 	GEM_BUG_ON(!i915_vm_is_4lvl(vm));
569 
570 	do {
571 		struct i915_page_directory * const pdp =
572 			gen8_pdp_for_page_address(vm, start);
573 		struct i915_page_directory * const pd =
574 			i915_pd_entry(pdp, __gen8_pte_index(start, 2));
575 		gen8_pte_t encode = pte_encode;
576 		unsigned int maybe_64K = -1;
577 		unsigned int page_size;
578 		gen8_pte_t *vaddr;
579 		u16 index;
580 
581 		if (vma_res->bi.page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
582 		    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
583 		    rem >= I915_GTT_PAGE_SIZE_2M &&
584 		    !__gen8_pte_index(start, 0)) {
585 			index = __gen8_pte_index(start, 1);
586 			encode |= GEN8_PDE_PS_2M;
587 			page_size = I915_GTT_PAGE_SIZE_2M;
588 
589 			vaddr = px_vaddr(pd);
590 		} else {
591 			struct i915_page_table *pt =
592 				i915_pt_entry(pd, __gen8_pte_index(start, 1));
593 
594 			index = __gen8_pte_index(start, 0);
595 			page_size = I915_GTT_PAGE_SIZE;
596 
597 			if (!index &&
598 			    vma_res->bi.page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
599 			    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
600 			    (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
601 			     rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
602 				maybe_64K = __gen8_pte_index(start, 1);
603 
604 			vaddr = px_vaddr(pt);
605 		}
606 
607 		do {
608 			GEM_BUG_ON(sg_dma_len(iter->sg) < page_size);
609 			vaddr[index++] = encode | iter->dma;
610 
611 			start += page_size;
612 			iter->dma += page_size;
613 			rem -= page_size;
614 			if (iter->dma >= iter->max) {
615 				iter->sg = __sg_next(iter->sg);
616 				if (!iter->sg)
617 					break;
618 
619 				rem = sg_dma_len(iter->sg);
620 				if (!rem)
621 					break;
622 
623 				iter->dma = sg_dma_address(iter->sg);
624 				iter->max = iter->dma + rem;
625 
626 				if (maybe_64K != -1 && index < I915_PDES &&
627 				    !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
628 				      (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
629 				       rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)))
630 					maybe_64K = -1;
631 
632 				if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
633 					break;
634 			}
635 		} while (rem >= page_size && index < I915_PDES);
636 
637 		drm_clflush_virt_range(vaddr, PAGE_SIZE);
638 
639 		/*
640 		 * Is it safe to mark the 2M block as 64K? -- Either we have
641 		 * filled whole page-table with 64K entries, or filled part of
642 		 * it and have reached the end of the sg table and we have
643 		 * enough padding.
644 		 */
645 		if (maybe_64K != -1 &&
646 		    (index == I915_PDES ||
647 		     (i915_vm_has_scratch_64K(vm) &&
648 		      !iter->sg && IS_ALIGNED(vma_res->start +
649 					      vma_res->node_size,
650 					      I915_GTT_PAGE_SIZE_2M)))) {
651 			vaddr = px_vaddr(pd);
652 			vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
653 			drm_clflush_virt_range(vaddr, PAGE_SIZE);
654 			page_size = I915_GTT_PAGE_SIZE_64K;
655 
656 			/*
657 			 * We write all 4K page entries, even when using 64K
658 			 * pages. In order to verify that the HW isn't cheating
659 			 * by using the 4K PTE instead of the 64K PTE, we want
660 			 * to remove all the surplus entries. If the HW skipped
661 			 * the 64K PTE, it will read/write into the scratch page
662 			 * instead - which we detect as missing results during
663 			 * selftests.
664 			 */
665 			if (I915_SELFTEST_ONLY(vm->scrub_64K)) {
666 				u16 i;
667 
668 				encode = vm->scratch[0]->encode;
669 				vaddr = px_vaddr(i915_pt_entry(pd, maybe_64K));
670 
671 				for (i = 1; i < index; i += 16)
672 					memset64(vaddr + i, encode, 15);
673 
674 				drm_clflush_virt_range(vaddr, PAGE_SIZE);
675 			}
676 		}
677 
678 		vma_res->page_sizes_gtt |= page_size;
679 	} while (iter->sg && sg_dma_len(iter->sg));
680 }
681 
682 static void gen8_ppgtt_insert(struct i915_address_space *vm,
683 			      struct i915_vma_resource *vma_res,
684 			      enum i915_cache_level cache_level,
685 			      u32 flags)
686 {
687 	struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
688 	struct sgt_dma iter = sgt_dma(vma_res);
689 
690 	if (vma_res->bi.page_sizes.sg > I915_GTT_PAGE_SIZE) {
691 		if (HAS_64K_PAGES(vm->i915))
692 			xehpsdv_ppgtt_insert_huge(vm, vma_res, &iter, cache_level, flags);
693 		else
694 			gen8_ppgtt_insert_huge(vm, vma_res, &iter, cache_level, flags);
695 	} else  {
696 		u64 idx = vma_res->start >> GEN8_PTE_SHIFT;
697 
698 		do {
699 			struct i915_page_directory * const pdp =
700 				gen8_pdp_for_page_index(vm, idx);
701 
702 			idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx,
703 						    cache_level, flags);
704 		} while (idx);
705 
706 		vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
707 	}
708 }
709 
710 static void gen8_ppgtt_insert_entry(struct i915_address_space *vm,
711 				    dma_addr_t addr,
712 				    u64 offset,
713 				    enum i915_cache_level level,
714 				    u32 flags)
715 {
716 	u64 idx = offset >> GEN8_PTE_SHIFT;
717 	struct i915_page_directory * const pdp =
718 		gen8_pdp_for_page_index(vm, idx);
719 	struct i915_page_directory *pd =
720 		i915_pd_entry(pdp, gen8_pd_index(idx, 2));
721 	struct i915_page_table *pt = i915_pt_entry(pd, gen8_pd_index(idx, 1));
722 	gen8_pte_t *vaddr;
723 
724 	GEM_BUG_ON(pt->is_compact);
725 
726 	vaddr = px_vaddr(pt);
727 	vaddr[gen8_pd_index(idx, 0)] = gen8_pte_encode(addr, level, flags);
728 	drm_clflush_virt_range(&vaddr[gen8_pd_index(idx, 0)], sizeof(*vaddr));
729 }
730 
731 static void __xehpsdv_ppgtt_insert_entry_lm(struct i915_address_space *vm,
732 					    dma_addr_t addr,
733 					    u64 offset,
734 					    enum i915_cache_level level,
735 					    u32 flags)
736 {
737 	u64 idx = offset >> GEN8_PTE_SHIFT;
738 	struct i915_page_directory * const pdp =
739 		gen8_pdp_for_page_index(vm, idx);
740 	struct i915_page_directory *pd =
741 		i915_pd_entry(pdp, gen8_pd_index(idx, 2));
742 	struct i915_page_table *pt = i915_pt_entry(pd, gen8_pd_index(idx, 1));
743 	gen8_pte_t *vaddr;
744 
745 	GEM_BUG_ON(!IS_ALIGNED(addr, SZ_64K));
746 	GEM_BUG_ON(!IS_ALIGNED(offset, SZ_64K));
747 
748 	if (!pt->is_compact) {
749 		vaddr = px_vaddr(pd);
750 		vaddr[gen8_pd_index(idx, 1)] |= GEN12_PDE_64K;
751 		pt->is_compact = true;
752 	}
753 
754 	vaddr = px_vaddr(pt);
755 	vaddr[gen8_pd_index(idx, 0) / 16] = gen8_pte_encode(addr, level, flags);
756 }
757 
758 static void xehpsdv_ppgtt_insert_entry(struct i915_address_space *vm,
759 				       dma_addr_t addr,
760 				       u64 offset,
761 				       enum i915_cache_level level,
762 				       u32 flags)
763 {
764 	if (flags & PTE_LM)
765 		return __xehpsdv_ppgtt_insert_entry_lm(vm, addr, offset,
766 						       level, flags);
767 
768 	return gen8_ppgtt_insert_entry(vm, addr, offset, level, flags);
769 }
770 
771 static int gen8_init_scratch(struct i915_address_space *vm)
772 {
773 	u32 pte_flags;
774 	int ret;
775 	int i;
776 
777 	/*
778 	 * If everybody agrees to not to write into the scratch page,
779 	 * we can reuse it for all vm, keeping contexts and processes separate.
780 	 */
781 	if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) {
782 		struct i915_address_space *clone = vm->gt->vm;
783 
784 		GEM_BUG_ON(!clone->has_read_only);
785 
786 		vm->scratch_order = clone->scratch_order;
787 		for (i = 0; i <= vm->top; i++)
788 			vm->scratch[i] = i915_gem_object_get(clone->scratch[i]);
789 
790 		return 0;
791 	}
792 
793 	ret = setup_scratch_page(vm);
794 	if (ret)
795 		return ret;
796 
797 	pte_flags = vm->has_read_only;
798 	if (i915_gem_object_is_lmem(vm->scratch[0]))
799 		pte_flags |= PTE_LM;
800 
801 	vm->scratch[0]->encode =
802 		gen8_pte_encode(px_dma(vm->scratch[0]),
803 				I915_CACHE_NONE, pte_flags);
804 
805 	for (i = 1; i <= vm->top; i++) {
806 		struct drm_i915_gem_object *obj;
807 
808 		obj = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
809 		if (IS_ERR(obj)) {
810 			ret = PTR_ERR(obj);
811 			goto free_scratch;
812 		}
813 
814 		ret = map_pt_dma(vm, obj);
815 		if (ret) {
816 			i915_gem_object_put(obj);
817 			goto free_scratch;
818 		}
819 
820 		fill_px(obj, vm->scratch[i - 1]->encode);
821 		obj->encode = gen8_pde_encode(px_dma(obj), I915_CACHE_NONE);
822 
823 		vm->scratch[i] = obj;
824 	}
825 
826 	return 0;
827 
828 free_scratch:
829 	while (i--)
830 		i915_gem_object_put(vm->scratch[i]);
831 	vm->scratch[0] = NULL;
832 	return ret;
833 }
834 
835 static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
836 {
837 	struct i915_address_space *vm = &ppgtt->vm;
838 	struct i915_page_directory *pd = ppgtt->pd;
839 	unsigned int idx;
840 
841 	GEM_BUG_ON(vm->top != 2);
842 	GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES);
843 
844 	for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) {
845 		struct i915_page_directory *pde;
846 		int err;
847 
848 		pde = alloc_pd(vm);
849 		if (IS_ERR(pde))
850 			return PTR_ERR(pde);
851 
852 		err = map_pt_dma(vm, pde->pt.base);
853 		if (err) {
854 			free_pd(vm, pde);
855 			return err;
856 		}
857 
858 		fill_px(pde, vm->scratch[1]->encode);
859 		set_pd_entry(pd, idx, pde);
860 		atomic_inc(px_used(pde)); /* keep pinned */
861 	}
862 	wmb();
863 
864 	return 0;
865 }
866 
867 static struct i915_page_directory *
868 gen8_alloc_top_pd(struct i915_address_space *vm)
869 {
870 	const unsigned int count = gen8_pd_top_count(vm);
871 	struct i915_page_directory *pd;
872 	int err;
873 
874 	GEM_BUG_ON(count > I915_PDES);
875 
876 	pd = __alloc_pd(count);
877 	if (unlikely(!pd))
878 		return ERR_PTR(-ENOMEM);
879 
880 	pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
881 	if (IS_ERR(pd->pt.base)) {
882 		err = PTR_ERR(pd->pt.base);
883 		pd->pt.base = NULL;
884 		goto err_pd;
885 	}
886 
887 	err = map_pt_dma(vm, pd->pt.base);
888 	if (err)
889 		goto err_pd;
890 
891 	fill_page_dma(px_base(pd), vm->scratch[vm->top]->encode, count);
892 	atomic_inc(px_used(pd)); /* mark as pinned */
893 	return pd;
894 
895 err_pd:
896 	free_pd(vm, pd);
897 	return ERR_PTR(err);
898 }
899 
900 /*
901  * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
902  * with a net effect resembling a 2-level page table in normal x86 terms. Each
903  * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
904  * space.
905  *
906  */
907 struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
908 				     unsigned long lmem_pt_obj_flags)
909 {
910 	struct i915_page_directory *pd;
911 	struct i915_ppgtt *ppgtt;
912 	int err;
913 
914 	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
915 	if (!ppgtt)
916 		return ERR_PTR(-ENOMEM);
917 
918 	ppgtt_init(ppgtt, gt, lmem_pt_obj_flags);
919 	ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
920 	ppgtt->vm.pd_shift = ilog2(SZ_4K * SZ_4K / sizeof(gen8_pte_t));
921 
922 	/*
923 	 * From bdw, there is hw support for read-only pages in the PPGTT.
924 	 *
925 	 * Gen11 has HSDES#:1807136187 unresolved. Disable ro support
926 	 * for now.
927 	 *
928 	 * Gen12 has inherited the same read-only fault issue from gen11.
929 	 */
930 	ppgtt->vm.has_read_only = !IS_GRAPHICS_VER(gt->i915, 11, 12);
931 
932 	if (HAS_LMEM(gt->i915))
933 		ppgtt->vm.alloc_pt_dma = alloc_pt_lmem;
934 	else
935 		ppgtt->vm.alloc_pt_dma = alloc_pt_dma;
936 
937 	/*
938 	 * On some platforms the hw has dropped support for 4K GTT pages
939 	 * when dealing with LMEM, and due to the design of 64K GTT
940 	 * pages in the hw, we can only mark the *entire* page-table as
941 	 * operating in 64K GTT mode, since the enable bit is still on
942 	 * the pde, and not the pte. And since we still need to allow
943 	 * 4K GTT pages for SMEM objects, we can't have a "normal" 4K
944 	 * page-table with scratch pointing to LMEM, since that's
945 	 * undefined from the hw pov. The simplest solution is to just
946 	 * move the 64K scratch page to SMEM on all platforms and call
947 	 * it a day, since that should work for all configurations.
948 	 *
949 	 * Using SMEM instead of LMEM has the additional advantage of
950 	 * not reserving high performance memory for a "never" used
951 	 * filler page. It also removes the device access that would
952 	 * be required to initialise the scratch page, reducing pressure
953 	 * on an even scarcer resource.
954 	 */
955 	ppgtt->vm.alloc_scratch_dma = alloc_pt_dma;
956 
957 	ppgtt->vm.pte_encode = gen8_pte_encode;
958 
959 	ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
960 	ppgtt->vm.insert_entries = gen8_ppgtt_insert;
961 	if (HAS_64K_PAGES(gt->i915))
962 		ppgtt->vm.insert_page = xehpsdv_ppgtt_insert_entry;
963 	else
964 		ppgtt->vm.insert_page = gen8_ppgtt_insert_entry;
965 	ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
966 	ppgtt->vm.clear_range = gen8_ppgtt_clear;
967 	ppgtt->vm.foreach = gen8_ppgtt_foreach;
968 	ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
969 
970 	err = gen8_init_scratch(&ppgtt->vm);
971 	if (err)
972 		goto err_put;
973 
974 	pd = gen8_alloc_top_pd(&ppgtt->vm);
975 	if (IS_ERR(pd)) {
976 		err = PTR_ERR(pd);
977 		goto err_put;
978 	}
979 	ppgtt->pd = pd;
980 
981 	if (!i915_vm_is_4lvl(&ppgtt->vm)) {
982 		err = gen8_preallocate_top_level_pdp(ppgtt);
983 		if (err)
984 			goto err_put;
985 	}
986 
987 	if (intel_vgpu_active(gt->i915))
988 		gen8_ppgtt_notify_vgt(ppgtt, true);
989 
990 	return ppgtt;
991 
992 err_put:
993 	i915_vm_put(&ppgtt->vm);
994 	return ERR_PTR(err);
995 }
996