1 /*
2  * Copyright © 2010 Daniel Vetter
3  * Copyright © 2011-2014 Intel Corporation
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  *
24  */
25 
26 #include <linux/slab.h> /* fault-inject.h is not standalone! */
27 
28 #include <linux/fault-inject.h>
29 #include <linux/log2.h>
30 #include <linux/random.h>
31 #include <linux/seq_file.h>
32 #include <linux/stop_machine.h>
33 
34 #include <asm/set_memory.h>
35 
36 #include <drm/drmP.h>
37 #include <drm/i915_drm.h>
38 
39 #include "i915_drv.h"
40 #include "i915_vgpu.h"
41 #include "i915_trace.h"
42 #include "intel_drv.h"
43 #include "intel_frontbuffer.h"
44 
45 #define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
46 
47 /**
48  * DOC: Global GTT views
49  *
50  * Background and previous state
51  *
52  * Historically objects could exists (be bound) in global GTT space only as
53  * singular instances with a view representing all of the object's backing pages
54  * in a linear fashion. This view will be called a normal view.
55  *
56  * To support multiple views of the same object, where the number of mapped
57  * pages is not equal to the backing store, or where the layout of the pages
58  * is not linear, concept of a GGTT view was added.
59  *
60  * One example of an alternative view is a stereo display driven by a single
61  * image. In this case we would have a framebuffer looking like this
62  * (2x2 pages):
63  *
64  *    12
65  *    34
66  *
67  * Above would represent a normal GGTT view as normally mapped for GPU or CPU
68  * rendering. In contrast, fed to the display engine would be an alternative
69  * view which could look something like this:
70  *
71  *   1212
72  *   3434
73  *
74  * In this example both the size and layout of pages in the alternative view is
75  * different from the normal view.
76  *
77  * Implementation and usage
78  *
79  * GGTT views are implemented using VMAs and are distinguished via enum
80  * i915_ggtt_view_type and struct i915_ggtt_view.
81  *
82  * A new flavour of core GEM functions which work with GGTT bound objects were
83  * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
84  * renaming  in large amounts of code. They take the struct i915_ggtt_view
85  * parameter encapsulating all metadata required to implement a view.
86  *
87  * As a helper for callers which are only interested in the normal view,
88  * globally const i915_ggtt_view_normal singleton instance exists. All old core
89  * GEM API functions, the ones not taking the view parameter, are operating on,
90  * or with the normal GGTT view.
91  *
92  * Code wanting to add or use a new GGTT view needs to:
93  *
94  * 1. Add a new enum with a suitable name.
95  * 2. Extend the metadata in the i915_ggtt_view structure if required.
96  * 3. Add support to i915_get_vma_pages().
97  *
98  * New views are required to build a scatter-gather table from within the
99  * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
100  * exists for the lifetime of an VMA.
101  *
102  * Core API is designed to have copy semantics which means that passed in
103  * struct i915_ggtt_view does not need to be persistent (left around after
104  * calling the core API functions).
105  *
106  */
107 
108 static int
109 i915_get_ggtt_vma_pages(struct i915_vma *vma);
110 
111 static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv)
112 {
113 	/* Note that as an uncached mmio write, this should flush the
114 	 * WCB of the writes into the GGTT before it triggers the invalidate.
115 	 */
116 	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
117 }
118 
119 static void guc_ggtt_invalidate(struct drm_i915_private *dev_priv)
120 {
121 	gen6_ggtt_invalidate(dev_priv);
122 	I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
123 }
124 
125 static void gmch_ggtt_invalidate(struct drm_i915_private *dev_priv)
126 {
127 	intel_gtt_chipset_flush();
128 }
129 
130 static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
131 {
132 	i915->ggtt.invalidate(i915);
133 }
134 
135 int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
136 			       	int enable_ppgtt)
137 {
138 	bool has_full_ppgtt;
139 	bool has_full_48bit_ppgtt;
140 
141 	if (!dev_priv->info.has_aliasing_ppgtt)
142 		return 0;
143 
144 	has_full_ppgtt = dev_priv->info.has_full_ppgtt;
145 	has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
146 
147 	if (intel_vgpu_active(dev_priv)) {
148 		/* GVT-g has no support for 32bit ppgtt */
149 		has_full_ppgtt = false;
150 		has_full_48bit_ppgtt = intel_vgpu_has_full_48bit_ppgtt(dev_priv);
151 	}
152 
153 	/*
154 	 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
155 	 * execlists, the sole mechanism available to submit work.
156 	 */
157 	if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
158 		return 0;
159 
160 	if (enable_ppgtt == 1)
161 		return 1;
162 
163 	if (enable_ppgtt == 2 && has_full_ppgtt)
164 		return 2;
165 
166 	if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
167 		return 3;
168 
169 	/* Disable ppgtt on SNB if VT-d is on. */
170 	if (IS_GEN6(dev_priv) && intel_vtd_active()) {
171 		DRM_INFO("Disabling PPGTT because VT-d is on\n");
172 		return 0;
173 	}
174 
175 	/* Early VLV doesn't have this */
176 	if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) {
177 		DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
178 		return 0;
179 	}
180 
181 	if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
182 		if (has_full_48bit_ppgtt)
183 			return 3;
184 
185 		if (has_full_ppgtt)
186 			return 2;
187 	}
188 
189 	return 1;
190 }
191 
192 static int ppgtt_bind_vma(struct i915_vma *vma,
193 			  enum i915_cache_level cache_level,
194 			  u32 unused)
195 {
196 	u32 pte_flags;
197 	int ret;
198 
199 	if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
200 		ret = vma->vm->allocate_va_range(vma->vm, vma->node.start,
201 						 vma->size);
202 		if (ret)
203 			return ret;
204 	}
205 
206 	/* Currently applicable only to VLV */
207 	pte_flags = 0;
208 	if (vma->obj->gt_ro)
209 		pte_flags |= PTE_READ_ONLY;
210 
211 	vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
212 
213 	return 0;
214 }
215 
216 static void ppgtt_unbind_vma(struct i915_vma *vma)
217 {
218 	vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
219 }
220 
221 static int ppgtt_set_pages(struct i915_vma *vma)
222 {
223 	GEM_BUG_ON(vma->pages);
224 
225 	vma->pages = vma->obj->mm.pages;
226 
227 	vma->page_sizes = vma->obj->mm.page_sizes;
228 
229 	return 0;
230 }
231 
232 static void clear_pages(struct i915_vma *vma)
233 {
234 	GEM_BUG_ON(!vma->pages);
235 
236 	if (vma->pages != vma->obj->mm.pages) {
237 		sg_free_table(vma->pages);
238 		kfree(vma->pages);
239 	}
240 	vma->pages = NULL;
241 
242 	memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
243 }
244 
245 static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
246 				  enum i915_cache_level level)
247 {
248 	gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW;
249 	pte |= addr;
250 
251 	switch (level) {
252 	case I915_CACHE_NONE:
253 		pte |= PPAT_UNCACHED;
254 		break;
255 	case I915_CACHE_WT:
256 		pte |= PPAT_DISPLAY_ELLC;
257 		break;
258 	default:
259 		pte |= PPAT_CACHED;
260 		break;
261 	}
262 
263 	return pte;
264 }
265 
266 static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
267 				  const enum i915_cache_level level)
268 {
269 	gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
270 	pde |= addr;
271 	if (level != I915_CACHE_NONE)
272 		pde |= PPAT_CACHED_PDE;
273 	else
274 		pde |= PPAT_UNCACHED;
275 	return pde;
276 }
277 
278 #define gen8_pdpe_encode gen8_pde_encode
279 #define gen8_pml4e_encode gen8_pde_encode
280 
281 static gen6_pte_t snb_pte_encode(dma_addr_t addr,
282 				 enum i915_cache_level level,
283 				 u32 unused)
284 {
285 	gen6_pte_t pte = GEN6_PTE_VALID;
286 	pte |= GEN6_PTE_ADDR_ENCODE(addr);
287 
288 	switch (level) {
289 	case I915_CACHE_L3_LLC:
290 	case I915_CACHE_LLC:
291 		pte |= GEN6_PTE_CACHE_LLC;
292 		break;
293 	case I915_CACHE_NONE:
294 		pte |= GEN6_PTE_UNCACHED;
295 		break;
296 	default:
297 		MISSING_CASE(level);
298 	}
299 
300 	return pte;
301 }
302 
303 static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
304 				 enum i915_cache_level level,
305 				 u32 unused)
306 {
307 	gen6_pte_t pte = GEN6_PTE_VALID;
308 	pte |= GEN6_PTE_ADDR_ENCODE(addr);
309 
310 	switch (level) {
311 	case I915_CACHE_L3_LLC:
312 		pte |= GEN7_PTE_CACHE_L3_LLC;
313 		break;
314 	case I915_CACHE_LLC:
315 		pte |= GEN6_PTE_CACHE_LLC;
316 		break;
317 	case I915_CACHE_NONE:
318 		pte |= GEN6_PTE_UNCACHED;
319 		break;
320 	default:
321 		MISSING_CASE(level);
322 	}
323 
324 	return pte;
325 }
326 
327 static gen6_pte_t byt_pte_encode(dma_addr_t addr,
328 				 enum i915_cache_level level,
329 				 u32 flags)
330 {
331 	gen6_pte_t pte = GEN6_PTE_VALID;
332 	pte |= GEN6_PTE_ADDR_ENCODE(addr);
333 
334 	if (!(flags & PTE_READ_ONLY))
335 		pte |= BYT_PTE_WRITEABLE;
336 
337 	if (level != I915_CACHE_NONE)
338 		pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
339 
340 	return pte;
341 }
342 
343 static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
344 				 enum i915_cache_level level,
345 				 u32 unused)
346 {
347 	gen6_pte_t pte = GEN6_PTE_VALID;
348 	pte |= HSW_PTE_ADDR_ENCODE(addr);
349 
350 	if (level != I915_CACHE_NONE)
351 		pte |= HSW_WB_LLC_AGE3;
352 
353 	return pte;
354 }
355 
356 static gen6_pte_t iris_pte_encode(dma_addr_t addr,
357 				  enum i915_cache_level level,
358 				  u32 unused)
359 {
360 	gen6_pte_t pte = GEN6_PTE_VALID;
361 	pte |= HSW_PTE_ADDR_ENCODE(addr);
362 
363 	switch (level) {
364 	case I915_CACHE_NONE:
365 		break;
366 	case I915_CACHE_WT:
367 		pte |= HSW_WT_ELLC_LLC_AGE3;
368 		break;
369 	default:
370 		pte |= HSW_WB_ELLC_LLC_AGE3;
371 		break;
372 	}
373 
374 	return pte;
375 }
376 
377 static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
378 {
379 	struct pagevec *pvec = &vm->free_pages;
380 	struct pagevec stash;
381 
382 	if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
383 		i915_gem_shrink_all(vm->i915);
384 
385 	if (likely(pvec->nr))
386 		return pvec->pages[--pvec->nr];
387 
388 	if (!vm->pt_kmap_wc)
389 		return alloc_page(gfp);
390 
391 	/* A placeholder for a specific mutex to guard the WC stash */
392 	lockdep_assert_held(&vm->i915->drm.struct_mutex);
393 
394 	/* Look in our global stash of WC pages... */
395 	pvec = &vm->i915->mm.wc_stash;
396 	if (likely(pvec->nr))
397 		return pvec->pages[--pvec->nr];
398 
399 	/*
400 	 * Otherwise batch allocate pages to amoritize cost of set_pages_wc.
401 	 *
402 	 * We have to be careful as page allocation may trigger the shrinker
403 	 * (via direct reclaim) which will fill up the WC stash underneath us.
404 	 * So we add our WB pages into a temporary pvec on the stack and merge
405 	 * them into the WC stash after all the allocations are complete.
406 	 */
407 	pagevec_init(&stash);
408 	do {
409 		struct page *page;
410 
411 		page = alloc_page(gfp);
412 		if (unlikely(!page))
413 			break;
414 
415 		stash.pages[stash.nr++] = page;
416 	} while (stash.nr < pagevec_space(pvec));
417 
418 	if (stash.nr) {
419 		int nr = min_t(int, stash.nr, pagevec_space(pvec));
420 		struct page **pages = stash.pages + stash.nr - nr;
421 
422 		if (nr && !set_pages_array_wc(pages, nr)) {
423 			memcpy(pvec->pages + pvec->nr,
424 			       pages, sizeof(pages[0]) * nr);
425 			pvec->nr += nr;
426 			stash.nr -= nr;
427 		}
428 
429 		pagevec_release(&stash);
430 	}
431 
432 	return likely(pvec->nr) ? pvec->pages[--pvec->nr] : NULL;
433 }
434 
435 static void vm_free_pages_release(struct i915_address_space *vm,
436 				  bool immediate)
437 {
438 	struct pagevec *pvec = &vm->free_pages;
439 
440 	GEM_BUG_ON(!pagevec_count(pvec));
441 
442 	if (vm->pt_kmap_wc) {
443 		struct pagevec *stash = &vm->i915->mm.wc_stash;
444 
445 		/* When we use WC, first fill up the global stash and then
446 		 * only if full immediately free the overflow.
447 		 */
448 
449 		lockdep_assert_held(&vm->i915->drm.struct_mutex);
450 		if (pagevec_space(stash)) {
451 			do {
452 				stash->pages[stash->nr++] =
453 					pvec->pages[--pvec->nr];
454 				if (!pvec->nr)
455 					return;
456 			} while (pagevec_space(stash));
457 
458 			/* As we have made some room in the VM's free_pages,
459 			 * we can wait for it to fill again. Unless we are
460 			 * inside i915_address_space_fini() and must
461 			 * immediately release the pages!
462 			 */
463 			if (!immediate)
464 				return;
465 		}
466 
467 		set_pages_array_wb(pvec->pages, pvec->nr);
468 	}
469 
470 	__pagevec_release(pvec);
471 }
472 
473 static void vm_free_page(struct i915_address_space *vm, struct page *page)
474 {
475 	/*
476 	 * On !llc, we need to change the pages back to WB. We only do so
477 	 * in bulk, so we rarely need to change the page attributes here,
478 	 * but doing so requires a stop_machine() from deep inside arch/x86/mm.
479 	 * To make detection of the possible sleep more likely, use an
480 	 * unconditional might_sleep() for everybody.
481 	 */
482 	might_sleep();
483 	if (!pagevec_add(&vm->free_pages, page))
484 		vm_free_pages_release(vm, false);
485 }
486 
487 static int __setup_page_dma(struct i915_address_space *vm,
488 			    struct i915_page_dma *p,
489 			    gfp_t gfp)
490 {
491 	p->page = vm_alloc_page(vm, gfp | __GFP_NOWARN | __GFP_NORETRY);
492 	if (unlikely(!p->page))
493 		return -ENOMEM;
494 
495 	p->daddr = dma_map_page(vm->dma, p->page, 0, PAGE_SIZE,
496 				PCI_DMA_BIDIRECTIONAL);
497 	if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
498 		vm_free_page(vm, p->page);
499 		return -ENOMEM;
500 	}
501 
502 	return 0;
503 }
504 
505 static int setup_page_dma(struct i915_address_space *vm,
506 			  struct i915_page_dma *p)
507 {
508 	return __setup_page_dma(vm, p, I915_GFP_DMA);
509 }
510 
511 static void cleanup_page_dma(struct i915_address_space *vm,
512 			     struct i915_page_dma *p)
513 {
514 	dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
515 	vm_free_page(vm, p->page);
516 }
517 
518 #define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
519 
520 #define setup_px(vm, px) setup_page_dma((vm), px_base(px))
521 #define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px))
522 #define fill_px(ppgtt, px, v) fill_page_dma((vm), px_base(px), (v))
523 #define fill32_px(ppgtt, px, v) fill_page_dma_32((vm), px_base(px), (v))
524 
525 static void fill_page_dma(struct i915_address_space *vm,
526 			  struct i915_page_dma *p,
527 			  const u64 val)
528 {
529 	u64 * const vaddr = kmap_atomic(p->page);
530 
531 	memset64(vaddr, val, PAGE_SIZE / sizeof(val));
532 
533 	kunmap_atomic(vaddr);
534 }
535 
536 static void fill_page_dma_32(struct i915_address_space *vm,
537 			     struct i915_page_dma *p,
538 			     const u32 v)
539 {
540 	fill_page_dma(vm, p, (u64)v << 32 | v);
541 }
542 
543 static int
544 setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
545 {
546 	struct page *page = NULL;
547 	dma_addr_t addr;
548 	int order;
549 
550 	/*
551 	 * In order to utilize 64K pages for an object with a size < 2M, we will
552 	 * need to support a 64K scratch page, given that every 16th entry for a
553 	 * page-table operating in 64K mode must point to a properly aligned 64K
554 	 * region, including any PTEs which happen to point to scratch.
555 	 *
556 	 * This is only relevant for the 48b PPGTT where we support
557 	 * huge-gtt-pages, see also i915_vma_insert().
558 	 *
559 	 * TODO: we should really consider write-protecting the scratch-page and
560 	 * sharing between ppgtt
561 	 */
562 	if (i915_vm_is_48bit(vm) &&
563 	    HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
564 		order = get_order(I915_GTT_PAGE_SIZE_64K);
565 		page = alloc_pages(gfp | __GFP_ZERO | __GFP_NOWARN, order);
566 		if (page) {
567 			addr = dma_map_page(vm->dma, page, 0,
568 					    I915_GTT_PAGE_SIZE_64K,
569 					    PCI_DMA_BIDIRECTIONAL);
570 			if (unlikely(dma_mapping_error(vm->dma, addr))) {
571 				__free_pages(page, order);
572 				page = NULL;
573 			}
574 
575 			if (!IS_ALIGNED(addr, I915_GTT_PAGE_SIZE_64K)) {
576 				dma_unmap_page(vm->dma, addr,
577 					       I915_GTT_PAGE_SIZE_64K,
578 					       PCI_DMA_BIDIRECTIONAL);
579 				__free_pages(page, order);
580 				page = NULL;
581 			}
582 		}
583 	}
584 
585 	if (!page) {
586 		order = 0;
587 		page = alloc_page(gfp | __GFP_ZERO);
588 		if (unlikely(!page))
589 			return -ENOMEM;
590 
591 		addr = dma_map_page(vm->dma, page, 0, PAGE_SIZE,
592 				    PCI_DMA_BIDIRECTIONAL);
593 		if (unlikely(dma_mapping_error(vm->dma, addr))) {
594 			__free_page(page);
595 			return -ENOMEM;
596 		}
597 	}
598 
599 	vm->scratch_page.page = page;
600 	vm->scratch_page.daddr = addr;
601 	vm->scratch_page.order = order;
602 
603 	return 0;
604 }
605 
606 static void cleanup_scratch_page(struct i915_address_space *vm)
607 {
608 	struct i915_page_dma *p = &vm->scratch_page;
609 
610 	dma_unmap_page(vm->dma, p->daddr, BIT(p->order) << PAGE_SHIFT,
611 		       PCI_DMA_BIDIRECTIONAL);
612 	__free_pages(p->page, p->order);
613 }
614 
615 static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
616 {
617 	struct i915_page_table *pt;
618 
619 	pt = kmalloc(sizeof(*pt), GFP_KERNEL | __GFP_NOWARN);
620 	if (unlikely(!pt))
621 		return ERR_PTR(-ENOMEM);
622 
623 	if (unlikely(setup_px(vm, pt))) {
624 		kfree(pt);
625 		return ERR_PTR(-ENOMEM);
626 	}
627 
628 	pt->used_ptes = 0;
629 	return pt;
630 }
631 
632 static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
633 {
634 	cleanup_px(vm, pt);
635 	kfree(pt);
636 }
637 
638 static void gen8_initialize_pt(struct i915_address_space *vm,
639 			       struct i915_page_table *pt)
640 {
641 	fill_px(vm, pt,
642 		gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC));
643 }
644 
645 static void gen6_initialize_pt(struct i915_address_space *vm,
646 			       struct i915_page_table *pt)
647 {
648 	fill32_px(vm, pt,
649 		  vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
650 }
651 
652 static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
653 {
654 	struct i915_page_directory *pd;
655 
656 	pd = kzalloc(sizeof(*pd), GFP_KERNEL | __GFP_NOWARN);
657 	if (unlikely(!pd))
658 		return ERR_PTR(-ENOMEM);
659 
660 	if (unlikely(setup_px(vm, pd))) {
661 		kfree(pd);
662 		return ERR_PTR(-ENOMEM);
663 	}
664 
665 	pd->used_pdes = 0;
666 	return pd;
667 }
668 
669 static void free_pd(struct i915_address_space *vm,
670 		    struct i915_page_directory *pd)
671 {
672 	cleanup_px(vm, pd);
673 	kfree(pd);
674 }
675 
676 static void gen8_initialize_pd(struct i915_address_space *vm,
677 			       struct i915_page_directory *pd)
678 {
679 	unsigned int i;
680 
681 	fill_px(vm, pd,
682 		gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC));
683 	for (i = 0; i < I915_PDES; i++)
684 		pd->page_table[i] = vm->scratch_pt;
685 }
686 
687 static int __pdp_init(struct i915_address_space *vm,
688 		      struct i915_page_directory_pointer *pdp)
689 {
690 	const unsigned int pdpes = i915_pdpes_per_pdp(vm);
691 	unsigned int i;
692 
693 	pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory),
694 					    GFP_KERNEL | __GFP_NOWARN);
695 	if (unlikely(!pdp->page_directory))
696 		return -ENOMEM;
697 
698 	for (i = 0; i < pdpes; i++)
699 		pdp->page_directory[i] = vm->scratch_pd;
700 
701 	return 0;
702 }
703 
704 static void __pdp_fini(struct i915_page_directory_pointer *pdp)
705 {
706 	kfree(pdp->page_directory);
707 	pdp->page_directory = NULL;
708 }
709 
710 static inline bool use_4lvl(const struct i915_address_space *vm)
711 {
712 	return i915_vm_is_48bit(vm);
713 }
714 
715 static struct i915_page_directory_pointer *
716 alloc_pdp(struct i915_address_space *vm)
717 {
718 	struct i915_page_directory_pointer *pdp;
719 	int ret = -ENOMEM;
720 
721 	WARN_ON(!use_4lvl(vm));
722 
723 	pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
724 	if (!pdp)
725 		return ERR_PTR(-ENOMEM);
726 
727 	ret = __pdp_init(vm, pdp);
728 	if (ret)
729 		goto fail_bitmap;
730 
731 	ret = setup_px(vm, pdp);
732 	if (ret)
733 		goto fail_page_m;
734 
735 	return pdp;
736 
737 fail_page_m:
738 	__pdp_fini(pdp);
739 fail_bitmap:
740 	kfree(pdp);
741 
742 	return ERR_PTR(ret);
743 }
744 
745 static void free_pdp(struct i915_address_space *vm,
746 		     struct i915_page_directory_pointer *pdp)
747 {
748 	__pdp_fini(pdp);
749 
750 	if (!use_4lvl(vm))
751 		return;
752 
753 	cleanup_px(vm, pdp);
754 	kfree(pdp);
755 }
756 
757 static void gen8_initialize_pdp(struct i915_address_space *vm,
758 				struct i915_page_directory_pointer *pdp)
759 {
760 	gen8_ppgtt_pdpe_t scratch_pdpe;
761 
762 	scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
763 
764 	fill_px(vm, pdp, scratch_pdpe);
765 }
766 
767 static void gen8_initialize_pml4(struct i915_address_space *vm,
768 				 struct i915_pml4 *pml4)
769 {
770 	unsigned int i;
771 
772 	fill_px(vm, pml4,
773 		gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC));
774 	for (i = 0; i < GEN8_PML4ES_PER_PML4; i++)
775 		pml4->pdps[i] = vm->scratch_pdp;
776 }
777 
778 /* Broadwell Page Directory Pointer Descriptors */
779 static int gen8_write_pdp(struct drm_i915_gem_request *req,
780 			  unsigned entry,
781 			  dma_addr_t addr)
782 {
783 	struct intel_engine_cs *engine = req->engine;
784 	u32 *cs;
785 
786 	BUG_ON(entry >= 4);
787 
788 	cs = intel_ring_begin(req, 6);
789 	if (IS_ERR(cs))
790 		return PTR_ERR(cs);
791 
792 	*cs++ = MI_LOAD_REGISTER_IMM(1);
793 	*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, entry));
794 	*cs++ = upper_32_bits(addr);
795 	*cs++ = MI_LOAD_REGISTER_IMM(1);
796 	*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
797 	*cs++ = lower_32_bits(addr);
798 	intel_ring_advance(req, cs);
799 
800 	return 0;
801 }
802 
803 static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt,
804 			       struct drm_i915_gem_request *req)
805 {
806 	int i, ret;
807 
808 	for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) {
809 		const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
810 
811 		ret = gen8_write_pdp(req, i, pd_daddr);
812 		if (ret)
813 			return ret;
814 	}
815 
816 	return 0;
817 }
818 
819 static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt,
820 			       struct drm_i915_gem_request *req)
821 {
822 	return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
823 }
824 
825 /* PDE TLBs are a pain to invalidate on GEN8+. When we modify
826  * the page table structures, we mark them dirty so that
827  * context switching/execlist queuing code takes extra steps
828  * to ensure that tlbs are flushed.
829  */
830 static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
831 {
832 	ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.i915)->ring_mask;
833 }
834 
835 /* Removes entries from a single page table, releasing it if it's empty.
836  * Caller can use the return value to update higher-level entries.
837  */
838 static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
839 				struct i915_page_table *pt,
840 				u64 start, u64 length)
841 {
842 	unsigned int num_entries = gen8_pte_count(start, length);
843 	unsigned int pte = gen8_pte_index(start);
844 	unsigned int pte_end = pte + num_entries;
845 	const gen8_pte_t scratch_pte =
846 		gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
847 	gen8_pte_t *vaddr;
848 
849 	GEM_BUG_ON(num_entries > pt->used_ptes);
850 
851 	pt->used_ptes -= num_entries;
852 	if (!pt->used_ptes)
853 		return true;
854 
855 	vaddr = kmap_atomic_px(pt);
856 	while (pte < pte_end)
857 		vaddr[pte++] = scratch_pte;
858 	kunmap_atomic(vaddr);
859 
860 	return false;
861 }
862 
863 static void gen8_ppgtt_set_pde(struct i915_address_space *vm,
864 			       struct i915_page_directory *pd,
865 			       struct i915_page_table *pt,
866 			       unsigned int pde)
867 {
868 	gen8_pde_t *vaddr;
869 
870 	pd->page_table[pde] = pt;
871 
872 	vaddr = kmap_atomic_px(pd);
873 	vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC);
874 	kunmap_atomic(vaddr);
875 }
876 
877 static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
878 				struct i915_page_directory *pd,
879 				u64 start, u64 length)
880 {
881 	struct i915_page_table *pt;
882 	u32 pde;
883 
884 	gen8_for_each_pde(pt, pd, start, length, pde) {
885 		GEM_BUG_ON(pt == vm->scratch_pt);
886 
887 		if (!gen8_ppgtt_clear_pt(vm, pt, start, length))
888 			continue;
889 
890 		gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde);
891 		GEM_BUG_ON(!pd->used_pdes);
892 		pd->used_pdes--;
893 
894 		free_pt(vm, pt);
895 	}
896 
897 	return !pd->used_pdes;
898 }
899 
900 static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
901 				struct i915_page_directory_pointer *pdp,
902 				struct i915_page_directory *pd,
903 				unsigned int pdpe)
904 {
905 	gen8_ppgtt_pdpe_t *vaddr;
906 
907 	pdp->page_directory[pdpe] = pd;
908 	if (!use_4lvl(vm))
909 		return;
910 
911 	vaddr = kmap_atomic_px(pdp);
912 	vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
913 	kunmap_atomic(vaddr);
914 }
915 
916 /* Removes entries from a single page dir pointer, releasing it if it's empty.
917  * Caller can use the return value to update higher-level entries
918  */
919 static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
920 				 struct i915_page_directory_pointer *pdp,
921 				 u64 start, u64 length)
922 {
923 	struct i915_page_directory *pd;
924 	unsigned int pdpe;
925 
926 	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
927 		GEM_BUG_ON(pd == vm->scratch_pd);
928 
929 		if (!gen8_ppgtt_clear_pd(vm, pd, start, length))
930 			continue;
931 
932 		gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
933 		GEM_BUG_ON(!pdp->used_pdpes);
934 		pdp->used_pdpes--;
935 
936 		free_pd(vm, pd);
937 	}
938 
939 	return !pdp->used_pdpes;
940 }
941 
942 static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm,
943 				  u64 start, u64 length)
944 {
945 	gen8_ppgtt_clear_pdp(vm, &i915_vm_to_ppgtt(vm)->pdp, start, length);
946 }
947 
948 static void gen8_ppgtt_set_pml4e(struct i915_pml4 *pml4,
949 				 struct i915_page_directory_pointer *pdp,
950 				 unsigned int pml4e)
951 {
952 	gen8_ppgtt_pml4e_t *vaddr;
953 
954 	pml4->pdps[pml4e] = pdp;
955 
956 	vaddr = kmap_atomic_px(pml4);
957 	vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
958 	kunmap_atomic(vaddr);
959 }
960 
961 /* Removes entries from a single pml4.
962  * This is the top-level structure in 4-level page tables used on gen8+.
963  * Empty entries are always scratch pml4e.
964  */
965 static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
966 				  u64 start, u64 length)
967 {
968 	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
969 	struct i915_pml4 *pml4 = &ppgtt->pml4;
970 	struct i915_page_directory_pointer *pdp;
971 	unsigned int pml4e;
972 
973 	GEM_BUG_ON(!use_4lvl(vm));
974 
975 	gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
976 		GEM_BUG_ON(pdp == vm->scratch_pdp);
977 
978 		if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length))
979 			continue;
980 
981 		gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
982 
983 		free_pdp(vm, pdp);
984 	}
985 }
986 
987 static inline struct sgt_dma {
988 	struct scatterlist *sg;
989 	dma_addr_t dma, max;
990 } sgt_dma(struct i915_vma *vma) {
991 	struct scatterlist *sg = vma->pages->sgl;
992 	dma_addr_t addr = sg_dma_address(sg);
993 	return (struct sgt_dma) { sg, addr, addr + sg->length };
994 }
995 
996 struct gen8_insert_pte {
997 	u16 pml4e;
998 	u16 pdpe;
999 	u16 pde;
1000 	u16 pte;
1001 };
1002 
1003 static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start)
1004 {
1005 	return (struct gen8_insert_pte) {
1006 		 gen8_pml4e_index(start),
1007 		 gen8_pdpe_index(start),
1008 		 gen8_pde_index(start),
1009 		 gen8_pte_index(start),
1010 	};
1011 }
1012 
1013 static __always_inline bool
1014 gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
1015 			      struct i915_page_directory_pointer *pdp,
1016 			      struct sgt_dma *iter,
1017 			      struct gen8_insert_pte *idx,
1018 			      enum i915_cache_level cache_level)
1019 {
1020 	struct i915_page_directory *pd;
1021 	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
1022 	gen8_pte_t *vaddr;
1023 	bool ret;
1024 
1025 	GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
1026 	pd = pdp->page_directory[idx->pdpe];
1027 	vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
1028 	do {
1029 		vaddr[idx->pte] = pte_encode | iter->dma;
1030 
1031 		iter->dma += PAGE_SIZE;
1032 		if (iter->dma >= iter->max) {
1033 			iter->sg = __sg_next(iter->sg);
1034 			if (!iter->sg) {
1035 				ret = false;
1036 				break;
1037 			}
1038 
1039 			iter->dma = sg_dma_address(iter->sg);
1040 			iter->max = iter->dma + iter->sg->length;
1041 		}
1042 
1043 		if (++idx->pte == GEN8_PTES) {
1044 			idx->pte = 0;
1045 
1046 			if (++idx->pde == I915_PDES) {
1047 				idx->pde = 0;
1048 
1049 				/* Limited by sg length for 3lvl */
1050 				if (++idx->pdpe == GEN8_PML4ES_PER_PML4) {
1051 					idx->pdpe = 0;
1052 					ret = true;
1053 					break;
1054 				}
1055 
1056 				GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
1057 				pd = pdp->page_directory[idx->pdpe];
1058 			}
1059 
1060 			kunmap_atomic(vaddr);
1061 			vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
1062 		}
1063 	} while (1);
1064 	kunmap_atomic(vaddr);
1065 
1066 	return ret;
1067 }
1068 
1069 static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
1070 				   struct i915_vma *vma,
1071 				   enum i915_cache_level cache_level,
1072 				   u32 unused)
1073 {
1074 	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1075 	struct sgt_dma iter = sgt_dma(vma);
1076 	struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
1077 
1078 	gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
1079 				      cache_level);
1080 
1081 	vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
1082 }
1083 
1084 static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
1085 					   struct i915_page_directory_pointer **pdps,
1086 					   struct sgt_dma *iter,
1087 					   enum i915_cache_level cache_level)
1088 {
1089 	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
1090 	u64 start = vma->node.start;
1091 	dma_addr_t rem = iter->sg->length;
1092 
1093 	do {
1094 		struct gen8_insert_pte idx = gen8_insert_pte(start);
1095 		struct i915_page_directory_pointer *pdp = pdps[idx.pml4e];
1096 		struct i915_page_directory *pd = pdp->page_directory[idx.pdpe];
1097 		unsigned int page_size;
1098 		bool maybe_64K = false;
1099 		gen8_pte_t encode = pte_encode;
1100 		gen8_pte_t *vaddr;
1101 		u16 index, max;
1102 
1103 		if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
1104 		    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
1105 		    rem >= I915_GTT_PAGE_SIZE_2M && !idx.pte) {
1106 			index = idx.pde;
1107 			max = I915_PDES;
1108 			page_size = I915_GTT_PAGE_SIZE_2M;
1109 
1110 			encode |= GEN8_PDE_PS_2M;
1111 
1112 			vaddr = kmap_atomic_px(pd);
1113 		} else {
1114 			struct i915_page_table *pt = pd->page_table[idx.pde];
1115 
1116 			index = idx.pte;
1117 			max = GEN8_PTES;
1118 			page_size = I915_GTT_PAGE_SIZE;
1119 
1120 			if (!index &&
1121 			    vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
1122 			    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
1123 			    (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
1124 			     rem >= (max - index) << PAGE_SHIFT))
1125 				maybe_64K = true;
1126 
1127 			vaddr = kmap_atomic_px(pt);
1128 		}
1129 
1130 		do {
1131 			GEM_BUG_ON(iter->sg->length < page_size);
1132 			vaddr[index++] = encode | iter->dma;
1133 
1134 			start += page_size;
1135 			iter->dma += page_size;
1136 			rem -= page_size;
1137 			if (iter->dma >= iter->max) {
1138 				iter->sg = __sg_next(iter->sg);
1139 				if (!iter->sg)
1140 					break;
1141 
1142 				rem = iter->sg->length;
1143 				iter->dma = sg_dma_address(iter->sg);
1144 				iter->max = iter->dma + rem;
1145 
1146 				if (maybe_64K && index < max &&
1147 				    !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
1148 				      (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
1149 				       rem >= (max - index) << PAGE_SHIFT)))
1150 					maybe_64K = false;
1151 
1152 				if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
1153 					break;
1154 			}
1155 		} while (rem >= page_size && index < max);
1156 
1157 		kunmap_atomic(vaddr);
1158 
1159 		/*
1160 		 * Is it safe to mark the 2M block as 64K? -- Either we have
1161 		 * filled whole page-table with 64K entries, or filled part of
1162 		 * it and have reached the end of the sg table and we have
1163 		 * enough padding.
1164 		 */
1165 		if (maybe_64K &&
1166 		    (index == max ||
1167 		     (i915_vm_has_scratch_64K(vma->vm) &&
1168 		      !iter->sg && IS_ALIGNED(vma->node.start +
1169 					      vma->node.size,
1170 					      I915_GTT_PAGE_SIZE_2M)))) {
1171 			vaddr = kmap_atomic_px(pd);
1172 			vaddr[idx.pde] |= GEN8_PDE_IPS_64K;
1173 			kunmap_atomic(vaddr);
1174 			page_size = I915_GTT_PAGE_SIZE_64K;
1175 		}
1176 
1177 		vma->page_sizes.gtt |= page_size;
1178 	} while (iter->sg);
1179 }
1180 
1181 static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
1182 				   struct i915_vma *vma,
1183 				   enum i915_cache_level cache_level,
1184 				   u32 unused)
1185 {
1186 	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1187 	struct sgt_dma iter = sgt_dma(vma);
1188 	struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
1189 
1190 	if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
1191 		gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level);
1192 	} else {
1193 		struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
1194 
1195 		while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++],
1196 						     &iter, &idx, cache_level))
1197 			GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
1198 
1199 		vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
1200 	}
1201 }
1202 
1203 static void gen8_free_page_tables(struct i915_address_space *vm,
1204 				  struct i915_page_directory *pd)
1205 {
1206 	int i;
1207 
1208 	if (!px_page(pd))
1209 		return;
1210 
1211 	for (i = 0; i < I915_PDES; i++) {
1212 		if (pd->page_table[i] != vm->scratch_pt)
1213 			free_pt(vm, pd->page_table[i]);
1214 	}
1215 }
1216 
1217 static int gen8_init_scratch(struct i915_address_space *vm)
1218 {
1219 	int ret;
1220 
1221 	ret = setup_scratch_page(vm, I915_GFP_DMA);
1222 	if (ret)
1223 		return ret;
1224 
1225 	vm->scratch_pt = alloc_pt(vm);
1226 	if (IS_ERR(vm->scratch_pt)) {
1227 		ret = PTR_ERR(vm->scratch_pt);
1228 		goto free_scratch_page;
1229 	}
1230 
1231 	vm->scratch_pd = alloc_pd(vm);
1232 	if (IS_ERR(vm->scratch_pd)) {
1233 		ret = PTR_ERR(vm->scratch_pd);
1234 		goto free_pt;
1235 	}
1236 
1237 	if (use_4lvl(vm)) {
1238 		vm->scratch_pdp = alloc_pdp(vm);
1239 		if (IS_ERR(vm->scratch_pdp)) {
1240 			ret = PTR_ERR(vm->scratch_pdp);
1241 			goto free_pd;
1242 		}
1243 	}
1244 
1245 	gen8_initialize_pt(vm, vm->scratch_pt);
1246 	gen8_initialize_pd(vm, vm->scratch_pd);
1247 	if (use_4lvl(vm))
1248 		gen8_initialize_pdp(vm, vm->scratch_pdp);
1249 
1250 	return 0;
1251 
1252 free_pd:
1253 	free_pd(vm, vm->scratch_pd);
1254 free_pt:
1255 	free_pt(vm, vm->scratch_pt);
1256 free_scratch_page:
1257 	cleanup_scratch_page(vm);
1258 
1259 	return ret;
1260 }
1261 
1262 static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
1263 {
1264 	struct i915_address_space *vm = &ppgtt->base;
1265 	struct drm_i915_private *dev_priv = vm->i915;
1266 	enum vgt_g2v_type msg;
1267 	int i;
1268 
1269 	if (use_4lvl(vm)) {
1270 		const u64 daddr = px_dma(&ppgtt->pml4);
1271 
1272 		I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
1273 		I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
1274 
1275 		msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
1276 				VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
1277 	} else {
1278 		for (i = 0; i < GEN8_3LVL_PDPES; i++) {
1279 			const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
1280 
1281 			I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
1282 			I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
1283 		}
1284 
1285 		msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
1286 				VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
1287 	}
1288 
1289 	I915_WRITE(vgtif_reg(g2v_notify), msg);
1290 
1291 	return 0;
1292 }
1293 
1294 static void gen8_free_scratch(struct i915_address_space *vm)
1295 {
1296 	if (use_4lvl(vm))
1297 		free_pdp(vm, vm->scratch_pdp);
1298 	free_pd(vm, vm->scratch_pd);
1299 	free_pt(vm, vm->scratch_pt);
1300 	cleanup_scratch_page(vm);
1301 }
1302 
1303 static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm,
1304 				    struct i915_page_directory_pointer *pdp)
1305 {
1306 	const unsigned int pdpes = i915_pdpes_per_pdp(vm);
1307 	int i;
1308 
1309 	for (i = 0; i < pdpes; i++) {
1310 		if (pdp->page_directory[i] == vm->scratch_pd)
1311 			continue;
1312 
1313 		gen8_free_page_tables(vm, pdp->page_directory[i]);
1314 		free_pd(vm, pdp->page_directory[i]);
1315 	}
1316 
1317 	free_pdp(vm, pdp);
1318 }
1319 
1320 static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
1321 {
1322 	int i;
1323 
1324 	for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
1325 		if (ppgtt->pml4.pdps[i] == ppgtt->base.scratch_pdp)
1326 			continue;
1327 
1328 		gen8_ppgtt_cleanup_3lvl(&ppgtt->base, ppgtt->pml4.pdps[i]);
1329 	}
1330 
1331 	cleanup_px(&ppgtt->base, &ppgtt->pml4);
1332 }
1333 
1334 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
1335 {
1336 	struct drm_i915_private *dev_priv = vm->i915;
1337 	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1338 
1339 	if (intel_vgpu_active(dev_priv))
1340 		gen8_ppgtt_notify_vgt(ppgtt, false);
1341 
1342 	if (use_4lvl(vm))
1343 		gen8_ppgtt_cleanup_4lvl(ppgtt);
1344 	else
1345 		gen8_ppgtt_cleanup_3lvl(&ppgtt->base, &ppgtt->pdp);
1346 
1347 	gen8_free_scratch(vm);
1348 }
1349 
1350 static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
1351 			       struct i915_page_directory *pd,
1352 			       u64 start, u64 length)
1353 {
1354 	struct i915_page_table *pt;
1355 	u64 from = start;
1356 	unsigned int pde;
1357 
1358 	gen8_for_each_pde(pt, pd, start, length, pde) {
1359 		int count = gen8_pte_count(start, length);
1360 
1361 		if (pt == vm->scratch_pt) {
1362 			pd->used_pdes++;
1363 
1364 			pt = alloc_pt(vm);
1365 			if (IS_ERR(pt)) {
1366 				pd->used_pdes--;
1367 				goto unwind;
1368 			}
1369 
1370 			if (count < GEN8_PTES || intel_vgpu_active(vm->i915))
1371 				gen8_initialize_pt(vm, pt);
1372 
1373 			gen8_ppgtt_set_pde(vm, pd, pt, pde);
1374 			GEM_BUG_ON(pd->used_pdes > I915_PDES);
1375 		}
1376 
1377 		pt->used_ptes += count;
1378 	}
1379 	return 0;
1380 
1381 unwind:
1382 	gen8_ppgtt_clear_pd(vm, pd, from, start - from);
1383 	return -ENOMEM;
1384 }
1385 
1386 static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
1387 				struct i915_page_directory_pointer *pdp,
1388 				u64 start, u64 length)
1389 {
1390 	struct i915_page_directory *pd;
1391 	u64 from = start;
1392 	unsigned int pdpe;
1393 	int ret;
1394 
1395 	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1396 		if (pd == vm->scratch_pd) {
1397 			pdp->used_pdpes++;
1398 
1399 			pd = alloc_pd(vm);
1400 			if (IS_ERR(pd)) {
1401 				pdp->used_pdpes--;
1402 				goto unwind;
1403 			}
1404 
1405 			gen8_initialize_pd(vm, pd);
1406 			gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
1407 			GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm));
1408 
1409 			mark_tlbs_dirty(i915_vm_to_ppgtt(vm));
1410 		}
1411 
1412 		ret = gen8_ppgtt_alloc_pd(vm, pd, start, length);
1413 		if (unlikely(ret))
1414 			goto unwind_pd;
1415 	}
1416 
1417 	return 0;
1418 
1419 unwind_pd:
1420 	if (!pd->used_pdes) {
1421 		gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
1422 		GEM_BUG_ON(!pdp->used_pdpes);
1423 		pdp->used_pdpes--;
1424 		free_pd(vm, pd);
1425 	}
1426 unwind:
1427 	gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
1428 	return -ENOMEM;
1429 }
1430 
1431 static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm,
1432 				 u64 start, u64 length)
1433 {
1434 	return gen8_ppgtt_alloc_pdp(vm,
1435 				    &i915_vm_to_ppgtt(vm)->pdp, start, length);
1436 }
1437 
1438 static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
1439 				 u64 start, u64 length)
1440 {
1441 	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1442 	struct i915_pml4 *pml4 = &ppgtt->pml4;
1443 	struct i915_page_directory_pointer *pdp;
1444 	u64 from = start;
1445 	u32 pml4e;
1446 	int ret;
1447 
1448 	gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1449 		if (pml4->pdps[pml4e] == vm->scratch_pdp) {
1450 			pdp = alloc_pdp(vm);
1451 			if (IS_ERR(pdp))
1452 				goto unwind;
1453 
1454 			gen8_initialize_pdp(vm, pdp);
1455 			gen8_ppgtt_set_pml4e(pml4, pdp, pml4e);
1456 		}
1457 
1458 		ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length);
1459 		if (unlikely(ret))
1460 			goto unwind_pdp;
1461 	}
1462 
1463 	return 0;
1464 
1465 unwind_pdp:
1466 	if (!pdp->used_pdpes) {
1467 		gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
1468 		free_pdp(vm, pdp);
1469 	}
1470 unwind:
1471 	gen8_ppgtt_clear_4lvl(vm, from, start - from);
1472 	return -ENOMEM;
1473 }
1474 
1475 static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
1476 			  struct i915_page_directory_pointer *pdp,
1477 			  u64 start, u64 length,
1478 			  gen8_pte_t scratch_pte,
1479 			  struct seq_file *m)
1480 {
1481 	struct i915_address_space *vm = &ppgtt->base;
1482 	struct i915_page_directory *pd;
1483 	u32 pdpe;
1484 
1485 	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1486 		struct i915_page_table *pt;
1487 		u64 pd_len = length;
1488 		u64 pd_start = start;
1489 		u32 pde;
1490 
1491 		if (pdp->page_directory[pdpe] == ppgtt->base.scratch_pd)
1492 			continue;
1493 
1494 		seq_printf(m, "\tPDPE #%d\n", pdpe);
1495 		gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
1496 			u32 pte;
1497 			gen8_pte_t *pt_vaddr;
1498 
1499 			if (pd->page_table[pde] == ppgtt->base.scratch_pt)
1500 				continue;
1501 
1502 			pt_vaddr = kmap_atomic_px(pt);
1503 			for (pte = 0; pte < GEN8_PTES; pte += 4) {
1504 				u64 va = (pdpe << GEN8_PDPE_SHIFT |
1505 					  pde << GEN8_PDE_SHIFT |
1506 					  pte << GEN8_PTE_SHIFT);
1507 				int i;
1508 				bool found = false;
1509 
1510 				for (i = 0; i < 4; i++)
1511 					if (pt_vaddr[pte + i] != scratch_pte)
1512 						found = true;
1513 				if (!found)
1514 					continue;
1515 
1516 				seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
1517 				for (i = 0; i < 4; i++) {
1518 					if (pt_vaddr[pte + i] != scratch_pte)
1519 						seq_printf(m, " %llx", pt_vaddr[pte + i]);
1520 					else
1521 						seq_puts(m, "  SCRATCH ");
1522 				}
1523 				seq_puts(m, "\n");
1524 			}
1525 			kunmap_atomic(pt_vaddr);
1526 		}
1527 	}
1528 }
1529 
1530 static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1531 {
1532 	struct i915_address_space *vm = &ppgtt->base;
1533 	const gen8_pte_t scratch_pte =
1534 		gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
1535 	u64 start = 0, length = ppgtt->base.total;
1536 
1537 	if (use_4lvl(vm)) {
1538 		u64 pml4e;
1539 		struct i915_pml4 *pml4 = &ppgtt->pml4;
1540 		struct i915_page_directory_pointer *pdp;
1541 
1542 		gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1543 			if (pml4->pdps[pml4e] == ppgtt->base.scratch_pdp)
1544 				continue;
1545 
1546 			seq_printf(m, "    PML4E #%llu\n", pml4e);
1547 			gen8_dump_pdp(ppgtt, pdp, start, length, scratch_pte, m);
1548 		}
1549 	} else {
1550 		gen8_dump_pdp(ppgtt, &ppgtt->pdp, start, length, scratch_pte, m);
1551 	}
1552 }
1553 
1554 static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
1555 {
1556 	struct i915_address_space *vm = &ppgtt->base;
1557 	struct i915_page_directory_pointer *pdp = &ppgtt->pdp;
1558 	struct i915_page_directory *pd;
1559 	u64 start = 0, length = ppgtt->base.total;
1560 	u64 from = start;
1561 	unsigned int pdpe;
1562 
1563 	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1564 		pd = alloc_pd(vm);
1565 		if (IS_ERR(pd))
1566 			goto unwind;
1567 
1568 		gen8_initialize_pd(vm, pd);
1569 		gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
1570 		pdp->used_pdpes++;
1571 	}
1572 
1573 	pdp->used_pdpes++; /* never remove */
1574 	return 0;
1575 
1576 unwind:
1577 	start -= from;
1578 	gen8_for_each_pdpe(pd, pdp, from, start, pdpe) {
1579 		gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
1580 		free_pd(vm, pd);
1581 	}
1582 	pdp->used_pdpes = 0;
1583 	return -ENOMEM;
1584 }
1585 
1586 /*
1587  * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
1588  * with a net effect resembling a 2-level page table in normal x86 terms. Each
1589  * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
1590  * space.
1591  *
1592  */
1593 static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1594 {
1595 	struct i915_address_space *vm = &ppgtt->base;
1596 	struct drm_i915_private *dev_priv = vm->i915;
1597 	int ret;
1598 
1599 	ppgtt->base.total = USES_FULL_48BIT_PPGTT(dev_priv) ?
1600 		1ULL << 48 :
1601 		1ULL << 32;
1602 
1603 	/* There are only few exceptions for gen >=6. chv and bxt.
1604 	 * And we are not sure about the latter so play safe for now.
1605 	 */
1606 	if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
1607 		ppgtt->base.pt_kmap_wc = true;
1608 
1609 	ret = gen8_init_scratch(&ppgtt->base);
1610 	if (ret) {
1611 		ppgtt->base.total = 0;
1612 		return ret;
1613 	}
1614 
1615 	if (use_4lvl(vm)) {
1616 		ret = setup_px(&ppgtt->base, &ppgtt->pml4);
1617 		if (ret)
1618 			goto free_scratch;
1619 
1620 		gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);
1621 
1622 		ppgtt->switch_mm = gen8_mm_switch_4lvl;
1623 		ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_4lvl;
1624 		ppgtt->base.insert_entries = gen8_ppgtt_insert_4lvl;
1625 		ppgtt->base.clear_range = gen8_ppgtt_clear_4lvl;
1626 	} else {
1627 		ret = __pdp_init(&ppgtt->base, &ppgtt->pdp);
1628 		if (ret)
1629 			goto free_scratch;
1630 
1631 		if (intel_vgpu_active(dev_priv)) {
1632 			ret = gen8_preallocate_top_level_pdp(ppgtt);
1633 			if (ret) {
1634 				__pdp_fini(&ppgtt->pdp);
1635 				goto free_scratch;
1636 			}
1637 		}
1638 
1639 		ppgtt->switch_mm = gen8_mm_switch_3lvl;
1640 		ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_3lvl;
1641 		ppgtt->base.insert_entries = gen8_ppgtt_insert_3lvl;
1642 		ppgtt->base.clear_range = gen8_ppgtt_clear_3lvl;
1643 	}
1644 
1645 	if (intel_vgpu_active(dev_priv))
1646 		gen8_ppgtt_notify_vgt(ppgtt, true);
1647 
1648 	ppgtt->base.cleanup = gen8_ppgtt_cleanup;
1649 	ppgtt->base.unbind_vma = ppgtt_unbind_vma;
1650 	ppgtt->base.bind_vma = ppgtt_bind_vma;
1651 	ppgtt->base.set_pages = ppgtt_set_pages;
1652 	ppgtt->base.clear_pages = clear_pages;
1653 	ppgtt->debug_dump = gen8_dump_ppgtt;
1654 
1655 	return 0;
1656 
1657 free_scratch:
1658 	gen8_free_scratch(&ppgtt->base);
1659 	return ret;
1660 }
1661 
1662 static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1663 {
1664 	struct i915_address_space *vm = &ppgtt->base;
1665 	struct i915_page_table *unused;
1666 	gen6_pte_t scratch_pte;
1667 	u32 pd_entry, pte, pde;
1668 	u32 start = 0, length = ppgtt->base.total;
1669 
1670 	scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
1671 				     I915_CACHE_LLC, 0);
1672 
1673 	gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
1674 		u32 expected;
1675 		gen6_pte_t *pt_vaddr;
1676 		const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
1677 		pd_entry = readl(ppgtt->pd_addr + pde);
1678 		expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
1679 
1680 		if (pd_entry != expected)
1681 			seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
1682 				   pde,
1683 				   pd_entry,
1684 				   expected);
1685 		seq_printf(m, "\tPDE: %x\n", pd_entry);
1686 
1687 		pt_vaddr = kmap_atomic_px(ppgtt->pd.page_table[pde]);
1688 
1689 		for (pte = 0; pte < GEN6_PTES; pte+=4) {
1690 			unsigned long va =
1691 				(pde * PAGE_SIZE * GEN6_PTES) +
1692 				(pte * PAGE_SIZE);
1693 			int i;
1694 			bool found = false;
1695 			for (i = 0; i < 4; i++)
1696 				if (pt_vaddr[pte + i] != scratch_pte)
1697 					found = true;
1698 			if (!found)
1699 				continue;
1700 
1701 			seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
1702 			for (i = 0; i < 4; i++) {
1703 				if (pt_vaddr[pte + i] != scratch_pte)
1704 					seq_printf(m, " %08x", pt_vaddr[pte + i]);
1705 				else
1706 					seq_puts(m, "  SCRATCH ");
1707 			}
1708 			seq_puts(m, "\n");
1709 		}
1710 		kunmap_atomic(pt_vaddr);
1711 	}
1712 }
1713 
1714 /* Write pde (index) from the page directory @pd to the page table @pt */
1715 static inline void gen6_write_pde(const struct i915_hw_ppgtt *ppgtt,
1716 				  const unsigned int pde,
1717 				  const struct i915_page_table *pt)
1718 {
1719 	/* Caller needs to make sure the write completes if necessary */
1720 	writel_relaxed(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
1721 		       ppgtt->pd_addr + pde);
1722 }
1723 
1724 /* Write all the page tables found in the ppgtt structure to incrementing page
1725  * directories. */
1726 static void gen6_write_page_range(struct i915_hw_ppgtt *ppgtt,
1727 				  u32 start, u32 length)
1728 {
1729 	struct i915_page_table *pt;
1730 	unsigned int pde;
1731 
1732 	gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde)
1733 		gen6_write_pde(ppgtt, pde, pt);
1734 
1735 	mark_tlbs_dirty(ppgtt);
1736 	wmb();
1737 }
1738 
1739 static inline u32 get_pd_offset(struct i915_hw_ppgtt *ppgtt)
1740 {
1741 	GEM_BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
1742 	return ppgtt->pd.base.ggtt_offset << 10;
1743 }
1744 
1745 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
1746 			 struct drm_i915_gem_request *req)
1747 {
1748 	struct intel_engine_cs *engine = req->engine;
1749 	u32 *cs;
1750 
1751 	/* NB: TLBs must be flushed and invalidated before a switch */
1752 	cs = intel_ring_begin(req, 6);
1753 	if (IS_ERR(cs))
1754 		return PTR_ERR(cs);
1755 
1756 	*cs++ = MI_LOAD_REGISTER_IMM(2);
1757 	*cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
1758 	*cs++ = PP_DIR_DCLV_2G;
1759 	*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1760 	*cs++ = get_pd_offset(ppgtt);
1761 	*cs++ = MI_NOOP;
1762 	intel_ring_advance(req, cs);
1763 
1764 	return 0;
1765 }
1766 
1767 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
1768 			  struct drm_i915_gem_request *req)
1769 {
1770 	struct intel_engine_cs *engine = req->engine;
1771 	u32 *cs;
1772 
1773 	/* NB: TLBs must be flushed and invalidated before a switch */
1774 	cs = intel_ring_begin(req, 6);
1775 	if (IS_ERR(cs))
1776 		return PTR_ERR(cs);
1777 
1778 	*cs++ = MI_LOAD_REGISTER_IMM(2);
1779 	*cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
1780 	*cs++ = PP_DIR_DCLV_2G;
1781 	*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1782 	*cs++ = get_pd_offset(ppgtt);
1783 	*cs++ = MI_NOOP;
1784 	intel_ring_advance(req, cs);
1785 
1786 	return 0;
1787 }
1788 
1789 static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
1790 			  struct drm_i915_gem_request *req)
1791 {
1792 	struct intel_engine_cs *engine = req->engine;
1793 	struct drm_i915_private *dev_priv = req->i915;
1794 
1795 	I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
1796 	I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
1797 	return 0;
1798 }
1799 
1800 static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
1801 {
1802 	struct intel_engine_cs *engine;
1803 	enum intel_engine_id id;
1804 
1805 	for_each_engine(engine, dev_priv, id) {
1806 		u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ?
1807 				 GEN8_GFX_PPGTT_48B : 0;
1808 		I915_WRITE(RING_MODE_GEN7(engine),
1809 			   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
1810 	}
1811 }
1812 
1813 static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
1814 {
1815 	struct intel_engine_cs *engine;
1816 	u32 ecochk, ecobits;
1817 	enum intel_engine_id id;
1818 
1819 	ecobits = I915_READ(GAC_ECO_BITS);
1820 	I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
1821 
1822 	ecochk = I915_READ(GAM_ECOCHK);
1823 	if (IS_HASWELL(dev_priv)) {
1824 		ecochk |= ECOCHK_PPGTT_WB_HSW;
1825 	} else {
1826 		ecochk |= ECOCHK_PPGTT_LLC_IVB;
1827 		ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
1828 	}
1829 	I915_WRITE(GAM_ECOCHK, ecochk);
1830 
1831 	for_each_engine(engine, dev_priv, id) {
1832 		/* GFX_MODE is per-ring on gen7+ */
1833 		I915_WRITE(RING_MODE_GEN7(engine),
1834 			   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
1835 	}
1836 }
1837 
1838 static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
1839 {
1840 	u32 ecochk, gab_ctl, ecobits;
1841 
1842 	ecobits = I915_READ(GAC_ECO_BITS);
1843 	I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
1844 		   ECOBITS_PPGTT_CACHE64B);
1845 
1846 	gab_ctl = I915_READ(GAB_CTL);
1847 	I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
1848 
1849 	ecochk = I915_READ(GAM_ECOCHK);
1850 	I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
1851 
1852 	I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
1853 }
1854 
1855 /* PPGTT support for Sandybdrige/Gen6 and later */
1856 static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
1857 				   u64 start, u64 length)
1858 {
1859 	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1860 	unsigned int first_entry = start >> PAGE_SHIFT;
1861 	unsigned int pde = first_entry / GEN6_PTES;
1862 	unsigned int pte = first_entry % GEN6_PTES;
1863 	unsigned int num_entries = length >> PAGE_SHIFT;
1864 	gen6_pte_t scratch_pte =
1865 		vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
1866 
1867 	while (num_entries) {
1868 		struct i915_page_table *pt = ppgtt->pd.page_table[pde++];
1869 		unsigned int end = min(pte + num_entries, GEN6_PTES);
1870 		gen6_pte_t *vaddr;
1871 
1872 		num_entries -= end - pte;
1873 
1874 		/* Note that the hw doesn't support removing PDE on the fly
1875 		 * (they are cached inside the context with no means to
1876 		 * invalidate the cache), so we can only reset the PTE
1877 		 * entries back to scratch.
1878 		 */
1879 
1880 		vaddr = kmap_atomic_px(pt);
1881 		do {
1882 			vaddr[pte++] = scratch_pte;
1883 		} while (pte < end);
1884 		kunmap_atomic(vaddr);
1885 
1886 		pte = 0;
1887 	}
1888 }
1889 
1890 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
1891 				      struct i915_vma *vma,
1892 				      enum i915_cache_level cache_level,
1893 				      u32 flags)
1894 {
1895 	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1896 	unsigned first_entry = vma->node.start >> PAGE_SHIFT;
1897 	unsigned act_pt = first_entry / GEN6_PTES;
1898 	unsigned act_pte = first_entry % GEN6_PTES;
1899 	const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
1900 	struct sgt_dma iter = sgt_dma(vma);
1901 	gen6_pte_t *vaddr;
1902 
1903 	vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
1904 	do {
1905 		vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
1906 
1907 		iter.dma += PAGE_SIZE;
1908 		if (iter.dma == iter.max) {
1909 			iter.sg = __sg_next(iter.sg);
1910 			if (!iter.sg)
1911 				break;
1912 
1913 			iter.dma = sg_dma_address(iter.sg);
1914 			iter.max = iter.dma + iter.sg->length;
1915 		}
1916 
1917 		if (++act_pte == GEN6_PTES) {
1918 			kunmap_atomic(vaddr);
1919 			vaddr = kmap_atomic_px(ppgtt->pd.page_table[++act_pt]);
1920 			act_pte = 0;
1921 		}
1922 	} while (1);
1923 	kunmap_atomic(vaddr);
1924 
1925 	vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
1926 }
1927 
1928 static int gen6_alloc_va_range(struct i915_address_space *vm,
1929 			       u64 start, u64 length)
1930 {
1931 	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1932 	struct i915_page_table *pt;
1933 	u64 from = start;
1934 	unsigned int pde;
1935 	bool flush = false;
1936 
1937 	gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
1938 		if (pt == vm->scratch_pt) {
1939 			pt = alloc_pt(vm);
1940 			if (IS_ERR(pt))
1941 				goto unwind_out;
1942 
1943 			gen6_initialize_pt(vm, pt);
1944 			ppgtt->pd.page_table[pde] = pt;
1945 			gen6_write_pde(ppgtt, pde, pt);
1946 			flush = true;
1947 		}
1948 	}
1949 
1950 	if (flush) {
1951 		mark_tlbs_dirty(ppgtt);
1952 		wmb();
1953 	}
1954 
1955 	return 0;
1956 
1957 unwind_out:
1958 	gen6_ppgtt_clear_range(vm, from, start);
1959 	return -ENOMEM;
1960 }
1961 
1962 static int gen6_init_scratch(struct i915_address_space *vm)
1963 {
1964 	int ret;
1965 
1966 	ret = setup_scratch_page(vm, I915_GFP_DMA);
1967 	if (ret)
1968 		return ret;
1969 
1970 	vm->scratch_pt = alloc_pt(vm);
1971 	if (IS_ERR(vm->scratch_pt)) {
1972 		cleanup_scratch_page(vm);
1973 		return PTR_ERR(vm->scratch_pt);
1974 	}
1975 
1976 	gen6_initialize_pt(vm, vm->scratch_pt);
1977 
1978 	return 0;
1979 }
1980 
1981 static void gen6_free_scratch(struct i915_address_space *vm)
1982 {
1983 	free_pt(vm, vm->scratch_pt);
1984 	cleanup_scratch_page(vm);
1985 }
1986 
1987 static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
1988 {
1989 	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1990 	struct i915_page_directory *pd = &ppgtt->pd;
1991 	struct i915_page_table *pt;
1992 	u32 pde;
1993 
1994 	drm_mm_remove_node(&ppgtt->node);
1995 
1996 	gen6_for_all_pdes(pt, pd, pde)
1997 		if (pt != vm->scratch_pt)
1998 			free_pt(vm, pt);
1999 
2000 	gen6_free_scratch(vm);
2001 }
2002 
2003 static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
2004 {
2005 	struct i915_address_space *vm = &ppgtt->base;
2006 	struct drm_i915_private *dev_priv = ppgtt->base.i915;
2007 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2008 	int ret;
2009 
2010 	/* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
2011 	 * allocator works in address space sizes, so it's multiplied by page
2012 	 * size. We allocate at the top of the GTT to avoid fragmentation.
2013 	 */
2014 	BUG_ON(!drm_mm_initialized(&ggtt->base.mm));
2015 
2016 	ret = gen6_init_scratch(vm);
2017 	if (ret)
2018 		return ret;
2019 
2020 	ret = i915_gem_gtt_insert(&ggtt->base, &ppgtt->node,
2021 				  GEN6_PD_SIZE, GEN6_PD_ALIGN,
2022 				  I915_COLOR_UNEVICTABLE,
2023 				  0, ggtt->base.total,
2024 				  PIN_HIGH);
2025 	if (ret)
2026 		goto err_out;
2027 
2028 	if (ppgtt->node.start < ggtt->mappable_end)
2029 		DRM_DEBUG("Forced to use aperture for PDEs\n");
2030 
2031 	ppgtt->pd.base.ggtt_offset =
2032 		ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
2033 
2034 	ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm +
2035 		ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
2036 
2037 	return 0;
2038 
2039 err_out:
2040 	gen6_free_scratch(vm);
2041 	return ret;
2042 }
2043 
2044 static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
2045 {
2046 	return gen6_ppgtt_allocate_page_directories(ppgtt);
2047 }
2048 
2049 static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
2050 				  u64 start, u64 length)
2051 {
2052 	struct i915_page_table *unused;
2053 	u32 pde;
2054 
2055 	gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde)
2056 		ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
2057 }
2058 
2059 static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
2060 {
2061 	struct drm_i915_private *dev_priv = ppgtt->base.i915;
2062 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2063 	int ret;
2064 
2065 	ppgtt->base.pte_encode = ggtt->base.pte_encode;
2066 	if (intel_vgpu_active(dev_priv) || IS_GEN6(dev_priv))
2067 		ppgtt->switch_mm = gen6_mm_switch;
2068 	else if (IS_HASWELL(dev_priv))
2069 		ppgtt->switch_mm = hsw_mm_switch;
2070 	else if (IS_GEN7(dev_priv))
2071 		ppgtt->switch_mm = gen7_mm_switch;
2072 	else
2073 		BUG();
2074 
2075 	ret = gen6_ppgtt_alloc(ppgtt);
2076 	if (ret)
2077 		return ret;
2078 
2079 	ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
2080 
2081 	gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
2082 	gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
2083 
2084 	ret = gen6_alloc_va_range(&ppgtt->base, 0, ppgtt->base.total);
2085 	if (ret) {
2086 		gen6_ppgtt_cleanup(&ppgtt->base);
2087 		return ret;
2088 	}
2089 
2090 	ppgtt->base.clear_range = gen6_ppgtt_clear_range;
2091 	ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
2092 	ppgtt->base.unbind_vma = ppgtt_unbind_vma;
2093 	ppgtt->base.bind_vma = ppgtt_bind_vma;
2094 	ppgtt->base.set_pages = ppgtt_set_pages;
2095 	ppgtt->base.clear_pages = clear_pages;
2096 	ppgtt->base.cleanup = gen6_ppgtt_cleanup;
2097 	ppgtt->debug_dump = gen6_dump_ppgtt;
2098 
2099 	DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
2100 			 ppgtt->node.size >> 20,
2101 			 ppgtt->node.start / PAGE_SIZE);
2102 
2103 	DRM_DEBUG_DRIVER("Adding PPGTT at offset %x\n",
2104 			 ppgtt->pd.base.ggtt_offset << 10);
2105 
2106 	return 0;
2107 }
2108 
2109 static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
2110 			   struct drm_i915_private *dev_priv)
2111 {
2112 	ppgtt->base.i915 = dev_priv;
2113 	ppgtt->base.dma = &dev_priv->drm.pdev->dev;
2114 
2115 	if (INTEL_INFO(dev_priv)->gen < 8)
2116 		return gen6_ppgtt_init(ppgtt);
2117 	else
2118 		return gen8_ppgtt_init(ppgtt);
2119 }
2120 
2121 static void i915_address_space_init(struct i915_address_space *vm,
2122 				    struct drm_i915_private *dev_priv,
2123 				    const char *name)
2124 {
2125 	i915_gem_timeline_init(dev_priv, &vm->timeline, name);
2126 
2127 	drm_mm_init(&vm->mm, 0, vm->total);
2128 	vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
2129 
2130 	INIT_LIST_HEAD(&vm->active_list);
2131 	INIT_LIST_HEAD(&vm->inactive_list);
2132 	INIT_LIST_HEAD(&vm->unbound_list);
2133 
2134 	list_add_tail(&vm->global_link, &dev_priv->vm_list);
2135 	pagevec_init(&vm->free_pages);
2136 }
2137 
2138 static void i915_address_space_fini(struct i915_address_space *vm)
2139 {
2140 	if (pagevec_count(&vm->free_pages))
2141 		vm_free_pages_release(vm, true);
2142 
2143 	i915_gem_timeline_fini(&vm->timeline);
2144 	drm_mm_takedown(&vm->mm);
2145 	list_del(&vm->global_link);
2146 }
2147 
2148 static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
2149 {
2150 	/* This function is for gtt related workarounds. This function is
2151 	 * called on driver load and after a GPU reset, so you can place
2152 	 * workarounds here even if they get overwritten by GPU reset.
2153 	 */
2154 	/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl */
2155 	if (IS_BROADWELL(dev_priv))
2156 		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
2157 	else if (IS_CHERRYVIEW(dev_priv))
2158 		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
2159 	else if (IS_GEN9_BC(dev_priv) || IS_GEN10(dev_priv))
2160 		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
2161 	else if (IS_GEN9_LP(dev_priv))
2162 		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
2163 
2164 	/*
2165 	 * To support 64K PTEs we need to first enable the use of the
2166 	 * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
2167 	 * mmio, otherwise the page-walker will simply ignore the IPS bit. This
2168 	 * shouldn't be needed after GEN10.
2169 	 *
2170 	 * 64K pages were first introduced from BDW+, although technically they
2171 	 * only *work* from gen9+. For pre-BDW we instead have the option for
2172 	 * 32K pages, but we don't currently have any support for it in our
2173 	 * driver.
2174 	 */
2175 	if (HAS_PAGE_SIZES(dev_priv, I915_GTT_PAGE_SIZE_64K) &&
2176 	    INTEL_GEN(dev_priv) <= 10)
2177 		I915_WRITE(GEN8_GAMW_ECO_DEV_RW_IA,
2178 			   I915_READ(GEN8_GAMW_ECO_DEV_RW_IA) |
2179 			   GAMW_ECO_ENABLE_64K_IPS_FIELD);
2180 }
2181 
2182 int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
2183 {
2184 	gtt_write_workarounds(dev_priv);
2185 
2186 	/* In the case of execlists, PPGTT is enabled by the context descriptor
2187 	 * and the PDPs are contained within the context itself.  We don't
2188 	 * need to do anything here. */
2189 	if (HAS_LOGICAL_RING_CONTEXTS(dev_priv))
2190 		return 0;
2191 
2192 	if (!USES_PPGTT(dev_priv))
2193 		return 0;
2194 
2195 	if (IS_GEN6(dev_priv))
2196 		gen6_ppgtt_enable(dev_priv);
2197 	else if (IS_GEN7(dev_priv))
2198 		gen7_ppgtt_enable(dev_priv);
2199 	else if (INTEL_GEN(dev_priv) >= 8)
2200 		gen8_ppgtt_enable(dev_priv);
2201 	else
2202 		MISSING_CASE(INTEL_GEN(dev_priv));
2203 
2204 	return 0;
2205 }
2206 
2207 struct i915_hw_ppgtt *
2208 i915_ppgtt_create(struct drm_i915_private *dev_priv,
2209 		  struct drm_i915_file_private *fpriv,
2210 		  const char *name)
2211 {
2212 	struct i915_hw_ppgtt *ppgtt;
2213 	int ret;
2214 
2215 	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
2216 	if (!ppgtt)
2217 		return ERR_PTR(-ENOMEM);
2218 
2219 	ret = __hw_ppgtt_init(ppgtt, dev_priv);
2220 	if (ret) {
2221 		kfree(ppgtt);
2222 		return ERR_PTR(ret);
2223 	}
2224 
2225 	kref_init(&ppgtt->ref);
2226 	i915_address_space_init(&ppgtt->base, dev_priv, name);
2227 	ppgtt->base.file = fpriv;
2228 
2229 	trace_i915_ppgtt_create(&ppgtt->base);
2230 
2231 	return ppgtt;
2232 }
2233 
2234 void i915_ppgtt_close(struct i915_address_space *vm)
2235 {
2236 	struct list_head *phases[] = {
2237 		&vm->active_list,
2238 		&vm->inactive_list,
2239 		&vm->unbound_list,
2240 		NULL,
2241 	}, **phase;
2242 
2243 	GEM_BUG_ON(vm->closed);
2244 	vm->closed = true;
2245 
2246 	for (phase = phases; *phase; phase++) {
2247 		struct i915_vma *vma, *vn;
2248 
2249 		list_for_each_entry_safe(vma, vn, *phase, vm_link)
2250 			if (!i915_vma_is_closed(vma))
2251 				i915_vma_close(vma);
2252 	}
2253 }
2254 
2255 void i915_ppgtt_release(struct kref *kref)
2256 {
2257 	struct i915_hw_ppgtt *ppgtt =
2258 		container_of(kref, struct i915_hw_ppgtt, ref);
2259 
2260 	trace_i915_ppgtt_release(&ppgtt->base);
2261 
2262 	/* vmas should already be unbound and destroyed */
2263 	WARN_ON(!list_empty(&ppgtt->base.active_list));
2264 	WARN_ON(!list_empty(&ppgtt->base.inactive_list));
2265 	WARN_ON(!list_empty(&ppgtt->base.unbound_list));
2266 
2267 	ppgtt->base.cleanup(&ppgtt->base);
2268 	i915_address_space_fini(&ppgtt->base);
2269 	kfree(ppgtt);
2270 }
2271 
2272 /* Certain Gen5 chipsets require require idling the GPU before
2273  * unmapping anything from the GTT when VT-d is enabled.
2274  */
2275 static bool needs_idle_maps(struct drm_i915_private *dev_priv)
2276 {
2277 	/* Query intel_iommu to see if we need the workaround. Presumably that
2278 	 * was loaded first.
2279 	 */
2280 	return IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_vtd_active();
2281 }
2282 
2283 static void gen6_check_and_clear_faults(struct drm_i915_private *dev_priv)
2284 {
2285 	struct intel_engine_cs *engine;
2286 	enum intel_engine_id id;
2287 	u32 fault;
2288 
2289 	for_each_engine(engine, dev_priv, id) {
2290 		fault = I915_READ(RING_FAULT_REG(engine));
2291 		if (fault & RING_FAULT_VALID) {
2292 			DRM_DEBUG_DRIVER("Unexpected fault\n"
2293 					 "\tAddr: 0x%08lx\n"
2294 					 "\tAddress space: %s\n"
2295 					 "\tSource ID: %d\n"
2296 					 "\tType: %d\n",
2297 					 fault & PAGE_MASK,
2298 					 fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
2299 					 RING_FAULT_SRCID(fault),
2300 					 RING_FAULT_FAULT_TYPE(fault));
2301 			I915_WRITE(RING_FAULT_REG(engine),
2302 				   fault & ~RING_FAULT_VALID);
2303 		}
2304 	}
2305 
2306 	POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
2307 }
2308 
2309 static void gen8_check_and_clear_faults(struct drm_i915_private *dev_priv)
2310 {
2311 	u32 fault = I915_READ(GEN8_RING_FAULT_REG);
2312 
2313 	if (fault & RING_FAULT_VALID) {
2314 		u32 fault_data0, fault_data1;
2315 		u64 fault_addr;
2316 
2317 		fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
2318 		fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
2319 		fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
2320 			     ((u64)fault_data0 << 12);
2321 
2322 		DRM_DEBUG_DRIVER("Unexpected fault\n"
2323 				 "\tAddr: 0x%08x_%08x\n"
2324 				 "\tAddress space: %s\n"
2325 				 "\tEngine ID: %d\n"
2326 				 "\tSource ID: %d\n"
2327 				 "\tType: %d\n",
2328 				 upper_32_bits(fault_addr),
2329 				 lower_32_bits(fault_addr),
2330 				 fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
2331 				 GEN8_RING_FAULT_ENGINE_ID(fault),
2332 				 RING_FAULT_SRCID(fault),
2333 				 RING_FAULT_FAULT_TYPE(fault));
2334 		I915_WRITE(GEN8_RING_FAULT_REG,
2335 			   fault & ~RING_FAULT_VALID);
2336 	}
2337 
2338 	POSTING_READ(GEN8_RING_FAULT_REG);
2339 }
2340 
2341 void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
2342 {
2343 	/* From GEN8 onwards we only have one 'All Engine Fault Register' */
2344 	if (INTEL_GEN(dev_priv) >= 8)
2345 		gen8_check_and_clear_faults(dev_priv);
2346 	else if (INTEL_GEN(dev_priv) >= 6)
2347 		gen6_check_and_clear_faults(dev_priv);
2348 	else
2349 		return;
2350 }
2351 
2352 void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
2353 {
2354 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2355 
2356 	/* Don't bother messing with faults pre GEN6 as we have little
2357 	 * documentation supporting that it's a good idea.
2358 	 */
2359 	if (INTEL_GEN(dev_priv) < 6)
2360 		return;
2361 
2362 	i915_check_and_clear_faults(dev_priv);
2363 
2364 	ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
2365 
2366 	i915_ggtt_invalidate(dev_priv);
2367 }
2368 
2369 int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
2370 			       struct sg_table *pages)
2371 {
2372 	do {
2373 		if (dma_map_sg(&obj->base.dev->pdev->dev,
2374 			       pages->sgl, pages->nents,
2375 			       PCI_DMA_BIDIRECTIONAL))
2376 			return 0;
2377 
2378 		/* If the DMA remap fails, one cause can be that we have
2379 		 * too many objects pinned in a small remapping table,
2380 		 * such as swiotlb. Incrementally purge all other objects and
2381 		 * try again - if there are no more pages to remove from
2382 		 * the DMA remapper, i915_gem_shrink will return 0.
2383 		 */
2384 		GEM_BUG_ON(obj->mm.pages == pages);
2385 	} while (i915_gem_shrink(to_i915(obj->base.dev),
2386 				 obj->base.size >> PAGE_SHIFT, NULL,
2387 				 I915_SHRINK_BOUND |
2388 				 I915_SHRINK_UNBOUND |
2389 				 I915_SHRINK_ACTIVE));
2390 
2391 	return -ENOSPC;
2392 }
2393 
2394 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
2395 {
2396 	writeq(pte, addr);
2397 }
2398 
2399 static void gen8_ggtt_insert_page(struct i915_address_space *vm,
2400 				  dma_addr_t addr,
2401 				  u64 offset,
2402 				  enum i915_cache_level level,
2403 				  u32 unused)
2404 {
2405 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2406 	gen8_pte_t __iomem *pte =
2407 		(gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
2408 
2409 	gen8_set_pte(pte, gen8_pte_encode(addr, level));
2410 
2411 	ggtt->invalidate(vm->i915);
2412 }
2413 
2414 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
2415 				     struct i915_vma *vma,
2416 				     enum i915_cache_level level,
2417 				     u32 unused)
2418 {
2419 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2420 	struct sgt_iter sgt_iter;
2421 	gen8_pte_t __iomem *gtt_entries;
2422 	const gen8_pte_t pte_encode = gen8_pte_encode(0, level);
2423 	dma_addr_t addr;
2424 
2425 	gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
2426 	gtt_entries += vma->node.start >> PAGE_SHIFT;
2427 	for_each_sgt_dma(addr, sgt_iter, vma->pages)
2428 		gen8_set_pte(gtt_entries++, pte_encode | addr);
2429 
2430 	wmb();
2431 
2432 	/* This next bit makes the above posting read even more important. We
2433 	 * want to flush the TLBs only after we're certain all the PTE updates
2434 	 * have finished.
2435 	 */
2436 	ggtt->invalidate(vm->i915);
2437 }
2438 
2439 static void gen6_ggtt_insert_page(struct i915_address_space *vm,
2440 				  dma_addr_t addr,
2441 				  u64 offset,
2442 				  enum i915_cache_level level,
2443 				  u32 flags)
2444 {
2445 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2446 	gen6_pte_t __iomem *pte =
2447 		(gen6_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
2448 
2449 	iowrite32(vm->pte_encode(addr, level, flags), pte);
2450 
2451 	ggtt->invalidate(vm->i915);
2452 }
2453 
2454 /*
2455  * Binds an object into the global gtt with the specified cache level. The object
2456  * will be accessible to the GPU via commands whose operands reference offsets
2457  * within the global GTT as well as accessible by the GPU through the GMADR
2458  * mapped BAR (dev_priv->mm.gtt->gtt).
2459  */
2460 static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
2461 				     struct i915_vma *vma,
2462 				     enum i915_cache_level level,
2463 				     u32 flags)
2464 {
2465 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2466 	gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
2467 	unsigned int i = vma->node.start >> PAGE_SHIFT;
2468 	struct sgt_iter iter;
2469 	dma_addr_t addr;
2470 	for_each_sgt_dma(addr, iter, vma->pages)
2471 		iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
2472 	wmb();
2473 
2474 	/* This next bit makes the above posting read even more important. We
2475 	 * want to flush the TLBs only after we're certain all the PTE updates
2476 	 * have finished.
2477 	 */
2478 	ggtt->invalidate(vm->i915);
2479 }
2480 
2481 static void nop_clear_range(struct i915_address_space *vm,
2482 			    u64 start, u64 length)
2483 {
2484 }
2485 
2486 static void gen8_ggtt_clear_range(struct i915_address_space *vm,
2487 				  u64 start, u64 length)
2488 {
2489 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2490 	unsigned first_entry = start >> PAGE_SHIFT;
2491 	unsigned num_entries = length >> PAGE_SHIFT;
2492 	const gen8_pte_t scratch_pte =
2493 		gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
2494 	gen8_pte_t __iomem *gtt_base =
2495 		(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
2496 	const int max_entries = ggtt_total_entries(ggtt) - first_entry;
2497 	int i;
2498 
2499 	if (WARN(num_entries > max_entries,
2500 		 "First entry = %d; Num entries = %d (max=%d)\n",
2501 		 first_entry, num_entries, max_entries))
2502 		num_entries = max_entries;
2503 
2504 	for (i = 0; i < num_entries; i++)
2505 		gen8_set_pte(&gtt_base[i], scratch_pte);
2506 }
2507 
2508 static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
2509 {
2510 	struct drm_i915_private *dev_priv = vm->i915;
2511 
2512 	/*
2513 	 * Make sure the internal GAM fifo has been cleared of all GTT
2514 	 * writes before exiting stop_machine(). This guarantees that
2515 	 * any aperture accesses waiting to start in another process
2516 	 * cannot back up behind the GTT writes causing a hang.
2517 	 * The register can be any arbitrary GAM register.
2518 	 */
2519 	POSTING_READ(GFX_FLSH_CNTL_GEN6);
2520 }
2521 
2522 struct insert_page {
2523 	struct i915_address_space *vm;
2524 	dma_addr_t addr;
2525 	u64 offset;
2526 	enum i915_cache_level level;
2527 };
2528 
2529 static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
2530 {
2531 	struct insert_page *arg = _arg;
2532 
2533 	gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
2534 	bxt_vtd_ggtt_wa(arg->vm);
2535 
2536 	return 0;
2537 }
2538 
2539 static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
2540 					  dma_addr_t addr,
2541 					  u64 offset,
2542 					  enum i915_cache_level level,
2543 					  u32 unused)
2544 {
2545 	struct insert_page arg = { vm, addr, offset, level };
2546 
2547 	stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
2548 }
2549 
2550 struct insert_entries {
2551 	struct i915_address_space *vm;
2552 	struct i915_vma *vma;
2553 	enum i915_cache_level level;
2554 };
2555 
2556 static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
2557 {
2558 	struct insert_entries *arg = _arg;
2559 
2560 	gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, 0);
2561 	bxt_vtd_ggtt_wa(arg->vm);
2562 
2563 	return 0;
2564 }
2565 
2566 static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
2567 					     struct i915_vma *vma,
2568 					     enum i915_cache_level level,
2569 					     u32 unused)
2570 {
2571 	struct insert_entries arg = { vm, vma, level };
2572 
2573 	stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
2574 }
2575 
2576 struct clear_range {
2577 	struct i915_address_space *vm;
2578 	u64 start;
2579 	u64 length;
2580 };
2581 
2582 static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
2583 {
2584 	struct clear_range *arg = _arg;
2585 
2586 	gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
2587 	bxt_vtd_ggtt_wa(arg->vm);
2588 
2589 	return 0;
2590 }
2591 
2592 static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
2593 					  u64 start,
2594 					  u64 length)
2595 {
2596 	struct clear_range arg = { vm, start, length };
2597 
2598 	stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
2599 }
2600 
2601 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2602 				  u64 start, u64 length)
2603 {
2604 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2605 	unsigned first_entry = start >> PAGE_SHIFT;
2606 	unsigned num_entries = length >> PAGE_SHIFT;
2607 	gen6_pte_t scratch_pte, __iomem *gtt_base =
2608 		(gen6_pte_t __iomem *)ggtt->gsm + first_entry;
2609 	const int max_entries = ggtt_total_entries(ggtt) - first_entry;
2610 	int i;
2611 
2612 	if (WARN(num_entries > max_entries,
2613 		 "First entry = %d; Num entries = %d (max=%d)\n",
2614 		 first_entry, num_entries, max_entries))
2615 		num_entries = max_entries;
2616 
2617 	scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
2618 				     I915_CACHE_LLC, 0);
2619 
2620 	for (i = 0; i < num_entries; i++)
2621 		iowrite32(scratch_pte, &gtt_base[i]);
2622 }
2623 
2624 static void i915_ggtt_insert_page(struct i915_address_space *vm,
2625 				  dma_addr_t addr,
2626 				  u64 offset,
2627 				  enum i915_cache_level cache_level,
2628 				  u32 unused)
2629 {
2630 	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2631 		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2632 
2633 	intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
2634 }
2635 
2636 static void i915_ggtt_insert_entries(struct i915_address_space *vm,
2637 				     struct i915_vma *vma,
2638 				     enum i915_cache_level cache_level,
2639 				     u32 unused)
2640 {
2641 	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2642 		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2643 
2644 	intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
2645 				    flags);
2646 }
2647 
2648 static void i915_ggtt_clear_range(struct i915_address_space *vm,
2649 				  u64 start, u64 length)
2650 {
2651 	intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
2652 }
2653 
2654 static int ggtt_bind_vma(struct i915_vma *vma,
2655 			 enum i915_cache_level cache_level,
2656 			 u32 flags)
2657 {
2658 	struct drm_i915_private *i915 = vma->vm->i915;
2659 	struct drm_i915_gem_object *obj = vma->obj;
2660 	u32 pte_flags;
2661 
2662 	/* Currently applicable only to VLV */
2663 	pte_flags = 0;
2664 	if (obj->gt_ro)
2665 		pte_flags |= PTE_READ_ONLY;
2666 
2667 	intel_runtime_pm_get(i915);
2668 	vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
2669 	intel_runtime_pm_put(i915);
2670 
2671 	vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
2672 
2673 	/*
2674 	 * Without aliasing PPGTT there's no difference between
2675 	 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
2676 	 * upgrade to both bound if we bind either to avoid double-binding.
2677 	 */
2678 	vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
2679 
2680 	return 0;
2681 }
2682 
2683 static void ggtt_unbind_vma(struct i915_vma *vma)
2684 {
2685 	struct drm_i915_private *i915 = vma->vm->i915;
2686 
2687 	intel_runtime_pm_get(i915);
2688 	vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
2689 	intel_runtime_pm_put(i915);
2690 }
2691 
2692 static int aliasing_gtt_bind_vma(struct i915_vma *vma,
2693 				 enum i915_cache_level cache_level,
2694 				 u32 flags)
2695 {
2696 	struct drm_i915_private *i915 = vma->vm->i915;
2697 	u32 pte_flags;
2698 	int ret;
2699 
2700 	/* Currently applicable only to VLV */
2701 	pte_flags = 0;
2702 	if (vma->obj->gt_ro)
2703 		pte_flags |= PTE_READ_ONLY;
2704 
2705 	if (flags & I915_VMA_LOCAL_BIND) {
2706 		struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
2707 
2708 		if (!(vma->flags & I915_VMA_LOCAL_BIND) &&
2709 		    appgtt->base.allocate_va_range) {
2710 			ret = appgtt->base.allocate_va_range(&appgtt->base,
2711 							     vma->node.start,
2712 							     vma->size);
2713 			if (ret)
2714 				return ret;
2715 		}
2716 
2717 		appgtt->base.insert_entries(&appgtt->base, vma, cache_level,
2718 					    pte_flags);
2719 	}
2720 
2721 	if (flags & I915_VMA_GLOBAL_BIND) {
2722 		intel_runtime_pm_get(i915);
2723 		vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
2724 		intel_runtime_pm_put(i915);
2725 	}
2726 
2727 	return 0;
2728 }
2729 
2730 static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
2731 {
2732 	struct drm_i915_private *i915 = vma->vm->i915;
2733 
2734 	if (vma->flags & I915_VMA_GLOBAL_BIND) {
2735 		intel_runtime_pm_get(i915);
2736 		vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
2737 		intel_runtime_pm_put(i915);
2738 	}
2739 
2740 	if (vma->flags & I915_VMA_LOCAL_BIND) {
2741 		struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->base;
2742 
2743 		vm->clear_range(vm, vma->node.start, vma->size);
2744 	}
2745 }
2746 
2747 void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
2748 			       struct sg_table *pages)
2749 {
2750 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2751 	struct device *kdev = &dev_priv->drm.pdev->dev;
2752 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2753 
2754 	if (unlikely(ggtt->do_idle_maps)) {
2755 		if (i915_gem_wait_for_idle(dev_priv, 0)) {
2756 			DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
2757 			/* Wait a bit, in hopes it avoids the hang */
2758 			udelay(10);
2759 		}
2760 	}
2761 
2762 	dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
2763 }
2764 
2765 static int ggtt_set_pages(struct i915_vma *vma)
2766 {
2767 	int ret;
2768 
2769 	GEM_BUG_ON(vma->pages);
2770 
2771 	ret = i915_get_ggtt_vma_pages(vma);
2772 	if (ret)
2773 		return ret;
2774 
2775 	vma->page_sizes = vma->obj->mm.page_sizes;
2776 
2777 	return 0;
2778 }
2779 
2780 static void i915_gtt_color_adjust(const struct drm_mm_node *node,
2781 				  unsigned long color,
2782 				  u64 *start,
2783 				  u64 *end)
2784 {
2785 	if (node->allocated && node->color != color)
2786 		*start += I915_GTT_PAGE_SIZE;
2787 
2788 	/* Also leave a space between the unallocated reserved node after the
2789 	 * GTT and any objects within the GTT, i.e. we use the color adjustment
2790 	 * to insert a guard page to prevent prefetches crossing over the
2791 	 * GTT boundary.
2792 	 */
2793 	node = list_next_entry(node, node_list);
2794 	if (node->color != color)
2795 		*end -= I915_GTT_PAGE_SIZE;
2796 }
2797 
2798 int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
2799 {
2800 	struct i915_ggtt *ggtt = &i915->ggtt;
2801 	struct i915_hw_ppgtt *ppgtt;
2802 	int err;
2803 
2804 	ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM), "[alias]");
2805 	if (IS_ERR(ppgtt))
2806 		return PTR_ERR(ppgtt);
2807 
2808 	if (WARN_ON(ppgtt->base.total < ggtt->base.total)) {
2809 		err = -ENODEV;
2810 		goto err_ppgtt;
2811 	}
2812 
2813 	if (ppgtt->base.allocate_va_range) {
2814 		/* Note we only pre-allocate as far as the end of the global
2815 		 * GTT. On 48b / 4-level page-tables, the difference is very,
2816 		 * very significant! We have to preallocate as GVT/vgpu does
2817 		 * not like the page directory disappearing.
2818 		 */
2819 		err = ppgtt->base.allocate_va_range(&ppgtt->base,
2820 						    0, ggtt->base.total);
2821 		if (err)
2822 			goto err_ppgtt;
2823 	}
2824 
2825 	i915->mm.aliasing_ppgtt = ppgtt;
2826 
2827 	WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
2828 	ggtt->base.bind_vma = aliasing_gtt_bind_vma;
2829 
2830 	WARN_ON(ggtt->base.unbind_vma != ggtt_unbind_vma);
2831 	ggtt->base.unbind_vma = aliasing_gtt_unbind_vma;
2832 
2833 	return 0;
2834 
2835 err_ppgtt:
2836 	i915_ppgtt_put(ppgtt);
2837 	return err;
2838 }
2839 
2840 void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915)
2841 {
2842 	struct i915_ggtt *ggtt = &i915->ggtt;
2843 	struct i915_hw_ppgtt *ppgtt;
2844 
2845 	ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt);
2846 	if (!ppgtt)
2847 		return;
2848 
2849 	i915_ppgtt_put(ppgtt);
2850 
2851 	ggtt->base.bind_vma = ggtt_bind_vma;
2852 	ggtt->base.unbind_vma = ggtt_unbind_vma;
2853 }
2854 
2855 int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
2856 {
2857 	/* Let GEM Manage all of the aperture.
2858 	 *
2859 	 * However, leave one page at the end still bound to the scratch page.
2860 	 * There are a number of places where the hardware apparently prefetches
2861 	 * past the end of the object, and we've seen multiple hangs with the
2862 	 * GPU head pointer stuck in a batchbuffer bound at the last page of the
2863 	 * aperture.  One page should be enough to keep any prefetching inside
2864 	 * of the aperture.
2865 	 */
2866 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2867 	unsigned long hole_start, hole_end;
2868 	struct drm_mm_node *entry;
2869 	int ret;
2870 
2871 	ret = intel_vgt_balloon(dev_priv);
2872 	if (ret)
2873 		return ret;
2874 
2875 	/* Reserve a mappable slot for our lockless error capture */
2876 	ret = drm_mm_insert_node_in_range(&ggtt->base.mm, &ggtt->error_capture,
2877 					  PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
2878 					  0, ggtt->mappable_end,
2879 					  DRM_MM_INSERT_LOW);
2880 	if (ret)
2881 		return ret;
2882 
2883 	/* Clear any non-preallocated blocks */
2884 	drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
2885 		DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
2886 			      hole_start, hole_end);
2887 		ggtt->base.clear_range(&ggtt->base, hole_start,
2888 				       hole_end - hole_start);
2889 	}
2890 
2891 	/* And finally clear the reserved guard page */
2892 	ggtt->base.clear_range(&ggtt->base,
2893 			       ggtt->base.total - PAGE_SIZE, PAGE_SIZE);
2894 
2895 	if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
2896 		ret = i915_gem_init_aliasing_ppgtt(dev_priv);
2897 		if (ret)
2898 			goto err;
2899 	}
2900 
2901 	return 0;
2902 
2903 err:
2904 	drm_mm_remove_node(&ggtt->error_capture);
2905 	return ret;
2906 }
2907 
2908 /**
2909  * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
2910  * @dev_priv: i915 device
2911  */
2912 void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
2913 {
2914 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2915 	struct i915_vma *vma, *vn;
2916 	struct pagevec *pvec;
2917 
2918 	ggtt->base.closed = true;
2919 
2920 	mutex_lock(&dev_priv->drm.struct_mutex);
2921 	WARN_ON(!list_empty(&ggtt->base.active_list));
2922 	list_for_each_entry_safe(vma, vn, &ggtt->base.inactive_list, vm_link)
2923 		WARN_ON(i915_vma_unbind(vma));
2924 	mutex_unlock(&dev_priv->drm.struct_mutex);
2925 
2926 	i915_gem_cleanup_stolen(&dev_priv->drm);
2927 
2928 	mutex_lock(&dev_priv->drm.struct_mutex);
2929 	i915_gem_fini_aliasing_ppgtt(dev_priv);
2930 
2931 	if (drm_mm_node_allocated(&ggtt->error_capture))
2932 		drm_mm_remove_node(&ggtt->error_capture);
2933 
2934 	if (drm_mm_initialized(&ggtt->base.mm)) {
2935 		intel_vgt_deballoon(dev_priv);
2936 		i915_address_space_fini(&ggtt->base);
2937 	}
2938 
2939 	ggtt->base.cleanup(&ggtt->base);
2940 
2941 	pvec = &dev_priv->mm.wc_stash;
2942 	if (pvec->nr) {
2943 		set_pages_array_wb(pvec->pages, pvec->nr);
2944 		__pagevec_release(pvec);
2945 	}
2946 
2947 	mutex_unlock(&dev_priv->drm.struct_mutex);
2948 
2949 	arch_phys_wc_del(ggtt->mtrr);
2950 	io_mapping_fini(&ggtt->iomap);
2951 }
2952 
2953 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
2954 {
2955 	snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
2956 	snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
2957 	return snb_gmch_ctl << 20;
2958 }
2959 
2960 static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
2961 {
2962 	bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
2963 	bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
2964 	if (bdw_gmch_ctl)
2965 		bdw_gmch_ctl = 1 << bdw_gmch_ctl;
2966 
2967 #ifdef CONFIG_X86_32
2968 	/* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
2969 	if (bdw_gmch_ctl > 4)
2970 		bdw_gmch_ctl = 4;
2971 #endif
2972 
2973 	return bdw_gmch_ctl << 20;
2974 }
2975 
2976 static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
2977 {
2978 	gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
2979 	gmch_ctrl &= SNB_GMCH_GGMS_MASK;
2980 
2981 	if (gmch_ctrl)
2982 		return 1 << (20 + gmch_ctrl);
2983 
2984 	return 0;
2985 }
2986 
2987 static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
2988 {
2989 	struct drm_i915_private *dev_priv = ggtt->base.i915;
2990 	struct pci_dev *pdev = dev_priv->drm.pdev;
2991 	phys_addr_t phys_addr;
2992 	int ret;
2993 
2994 	/* For Modern GENs the PTEs and register space are split in the BAR */
2995 	phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
2996 
2997 	/*
2998 	 * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range
2999 	 * will be dropped. For WC mappings in general we have 64 byte burst
3000 	 * writes when the WC buffer is flushed, so we can't use it, but have to
3001 	 * resort to an uncached mapping. The WC issue is easily caught by the
3002 	 * readback check when writing GTT PTE entries.
3003 	 */
3004 	if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10)
3005 		ggtt->gsm = ioremap_nocache(phys_addr, size);
3006 	else
3007 		ggtt->gsm = ioremap_wc(phys_addr, size);
3008 	if (!ggtt->gsm) {
3009 		DRM_ERROR("Failed to map the ggtt page table\n");
3010 		return -ENOMEM;
3011 	}
3012 
3013 	ret = setup_scratch_page(&ggtt->base, GFP_DMA32);
3014 	if (ret) {
3015 		DRM_ERROR("Scratch setup failed\n");
3016 		/* iounmap will also get called at remove, but meh */
3017 		iounmap(ggtt->gsm);
3018 		return ret;
3019 	}
3020 
3021 	return 0;
3022 }
3023 
3024 static struct intel_ppat_entry *
3025 __alloc_ppat_entry(struct intel_ppat *ppat, unsigned int index, u8 value)
3026 {
3027 	struct intel_ppat_entry *entry = &ppat->entries[index];
3028 
3029 	GEM_BUG_ON(index >= ppat->max_entries);
3030 	GEM_BUG_ON(test_bit(index, ppat->used));
3031 
3032 	entry->ppat = ppat;
3033 	entry->value = value;
3034 	kref_init(&entry->ref);
3035 	set_bit(index, ppat->used);
3036 	set_bit(index, ppat->dirty);
3037 
3038 	return entry;
3039 }
3040 
3041 static void __free_ppat_entry(struct intel_ppat_entry *entry)
3042 {
3043 	struct intel_ppat *ppat = entry->ppat;
3044 	unsigned int index = entry - ppat->entries;
3045 
3046 	GEM_BUG_ON(index >= ppat->max_entries);
3047 	GEM_BUG_ON(!test_bit(index, ppat->used));
3048 
3049 	entry->value = ppat->clear_value;
3050 	clear_bit(index, ppat->used);
3051 	set_bit(index, ppat->dirty);
3052 }
3053 
3054 /**
3055  * intel_ppat_get - get a usable PPAT entry
3056  * @i915: i915 device instance
3057  * @value: the PPAT value required by the caller
3058  *
3059  * The function tries to search if there is an existing PPAT entry which
3060  * matches with the required value. If perfectly matched, the existing PPAT
3061  * entry will be used. If only partially matched, it will try to check if
3062  * there is any available PPAT index. If yes, it will allocate a new PPAT
3063  * index for the required entry and update the HW. If not, the partially
3064  * matched entry will be used.
3065  */
3066 const struct intel_ppat_entry *
3067 intel_ppat_get(struct drm_i915_private *i915, u8 value)
3068 {
3069 	struct intel_ppat *ppat = &i915->ppat;
3070 	struct intel_ppat_entry *entry = NULL;
3071 	unsigned int scanned, best_score;
3072 	int i;
3073 
3074 	GEM_BUG_ON(!ppat->max_entries);
3075 
3076 	scanned = best_score = 0;
3077 	for_each_set_bit(i, ppat->used, ppat->max_entries) {
3078 		unsigned int score;
3079 
3080 		score = ppat->match(ppat->entries[i].value, value);
3081 		if (score > best_score) {
3082 			entry = &ppat->entries[i];
3083 			if (score == INTEL_PPAT_PERFECT_MATCH) {
3084 				kref_get(&entry->ref);
3085 				return entry;
3086 			}
3087 			best_score = score;
3088 		}
3089 		scanned++;
3090 	}
3091 
3092 	if (scanned == ppat->max_entries) {
3093 		if (!entry)
3094 			return ERR_PTR(-ENOSPC);
3095 
3096 		kref_get(&entry->ref);
3097 		return entry;
3098 	}
3099 
3100 	i = find_first_zero_bit(ppat->used, ppat->max_entries);
3101 	entry = __alloc_ppat_entry(ppat, i, value);
3102 	ppat->update_hw(i915);
3103 	return entry;
3104 }
3105 
3106 static void release_ppat(struct kref *kref)
3107 {
3108 	struct intel_ppat_entry *entry =
3109 		container_of(kref, struct intel_ppat_entry, ref);
3110 	struct drm_i915_private *i915 = entry->ppat->i915;
3111 
3112 	__free_ppat_entry(entry);
3113 	entry->ppat->update_hw(i915);
3114 }
3115 
3116 /**
3117  * intel_ppat_put - put back the PPAT entry got from intel_ppat_get()
3118  * @entry: an intel PPAT entry
3119  *
3120  * Put back the PPAT entry got from intel_ppat_get(). If the PPAT index of the
3121  * entry is dynamically allocated, its reference count will be decreased. Once
3122  * the reference count becomes into zero, the PPAT index becomes free again.
3123  */
3124 void intel_ppat_put(const struct intel_ppat_entry *entry)
3125 {
3126 	struct intel_ppat *ppat = entry->ppat;
3127 	unsigned int index = entry - ppat->entries;
3128 
3129 	GEM_BUG_ON(!ppat->max_entries);
3130 
3131 	kref_put(&ppat->entries[index].ref, release_ppat);
3132 }
3133 
3134 static void cnl_private_pat_update_hw(struct drm_i915_private *dev_priv)
3135 {
3136 	struct intel_ppat *ppat = &dev_priv->ppat;
3137 	int i;
3138 
3139 	for_each_set_bit(i, ppat->dirty, ppat->max_entries) {
3140 		I915_WRITE(GEN10_PAT_INDEX(i), ppat->entries[i].value);
3141 		clear_bit(i, ppat->dirty);
3142 	}
3143 }
3144 
3145 static void bdw_private_pat_update_hw(struct drm_i915_private *dev_priv)
3146 {
3147 	struct intel_ppat *ppat = &dev_priv->ppat;
3148 	u64 pat = 0;
3149 	int i;
3150 
3151 	for (i = 0; i < ppat->max_entries; i++)
3152 		pat |= GEN8_PPAT(i, ppat->entries[i].value);
3153 
3154 	bitmap_clear(ppat->dirty, 0, ppat->max_entries);
3155 
3156 	I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
3157 	I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
3158 }
3159 
3160 static unsigned int bdw_private_pat_match(u8 src, u8 dst)
3161 {
3162 	unsigned int score = 0;
3163 	enum {
3164 		AGE_MATCH = BIT(0),
3165 		TC_MATCH = BIT(1),
3166 		CA_MATCH = BIT(2),
3167 	};
3168 
3169 	/* Cache attribute has to be matched. */
3170 	if (GEN8_PPAT_GET_CA(src) != GEN8_PPAT_GET_CA(dst))
3171 		return 0;
3172 
3173 	score |= CA_MATCH;
3174 
3175 	if (GEN8_PPAT_GET_TC(src) == GEN8_PPAT_GET_TC(dst))
3176 		score |= TC_MATCH;
3177 
3178 	if (GEN8_PPAT_GET_AGE(src) == GEN8_PPAT_GET_AGE(dst))
3179 		score |= AGE_MATCH;
3180 
3181 	if (score == (AGE_MATCH | TC_MATCH | CA_MATCH))
3182 		return INTEL_PPAT_PERFECT_MATCH;
3183 
3184 	return score;
3185 }
3186 
3187 static unsigned int chv_private_pat_match(u8 src, u8 dst)
3188 {
3189 	return (CHV_PPAT_GET_SNOOP(src) == CHV_PPAT_GET_SNOOP(dst)) ?
3190 		INTEL_PPAT_PERFECT_MATCH : 0;
3191 }
3192 
3193 static void cnl_setup_private_ppat(struct intel_ppat *ppat)
3194 {
3195 	ppat->max_entries = 8;
3196 	ppat->update_hw = cnl_private_pat_update_hw;
3197 	ppat->match = bdw_private_pat_match;
3198 	ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
3199 
3200 	__alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC);
3201 	__alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
3202 	__alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
3203 	__alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC);
3204 	__alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
3205 	__alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
3206 	__alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
3207 	__alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
3208 }
3209 
3210 /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
3211  * bits. When using advanced contexts each context stores its own PAT, but
3212  * writing this data shouldn't be harmful even in those cases. */
3213 static void bdw_setup_private_ppat(struct intel_ppat *ppat)
3214 {
3215 	ppat->max_entries = 8;
3216 	ppat->update_hw = bdw_private_pat_update_hw;
3217 	ppat->match = bdw_private_pat_match;
3218 	ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
3219 
3220 	if (!USES_PPGTT(ppat->i915)) {
3221 		/* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
3222 		 * so RTL will always use the value corresponding to
3223 		 * pat_sel = 000".
3224 		 * So let's disable cache for GGTT to avoid screen corruptions.
3225 		 * MOCS still can be used though.
3226 		 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
3227 		 * before this patch, i.e. the same uncached + snooping access
3228 		 * like on gen6/7 seems to be in effect.
3229 		 * - So this just fixes blitter/render access. Again it looks
3230 		 * like it's not just uncached access, but uncached + snooping.
3231 		 * So we can still hold onto all our assumptions wrt cpu
3232 		 * clflushing on LLC machines.
3233 		 */
3234 		__alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC);
3235 		return;
3236 	}
3237 
3238 	__alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC);      /* for normal objects, no eLLC */
3239 	__alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);  /* for something pointing to ptes? */
3240 	__alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);  /* for scanout with eLLC */
3241 	__alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC);                      /* Uncached objects, mostly for scanout */
3242 	__alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
3243 	__alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
3244 	__alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
3245 	__alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
3246 }
3247 
3248 static void chv_setup_private_ppat(struct intel_ppat *ppat)
3249 {
3250 	ppat->max_entries = 8;
3251 	ppat->update_hw = bdw_private_pat_update_hw;
3252 	ppat->match = chv_private_pat_match;
3253 	ppat->clear_value = CHV_PPAT_SNOOP;
3254 
3255 	/*
3256 	 * Map WB on BDW to snooped on CHV.
3257 	 *
3258 	 * Only the snoop bit has meaning for CHV, the rest is
3259 	 * ignored.
3260 	 *
3261 	 * The hardware will never snoop for certain types of accesses:
3262 	 * - CPU GTT (GMADR->GGTT->no snoop->memory)
3263 	 * - PPGTT page tables
3264 	 * - some other special cycles
3265 	 *
3266 	 * As with BDW, we also need to consider the following for GT accesses:
3267 	 * "For GGTT, there is NO pat_sel[2:0] from the entry,
3268 	 * so RTL will always use the value corresponding to
3269 	 * pat_sel = 000".
3270 	 * Which means we must set the snoop bit in PAT entry 0
3271 	 * in order to keep the global status page working.
3272 	 */
3273 
3274 	__alloc_ppat_entry(ppat, 0, CHV_PPAT_SNOOP);
3275 	__alloc_ppat_entry(ppat, 1, 0);
3276 	__alloc_ppat_entry(ppat, 2, 0);
3277 	__alloc_ppat_entry(ppat, 3, 0);
3278 	__alloc_ppat_entry(ppat, 4, CHV_PPAT_SNOOP);
3279 	__alloc_ppat_entry(ppat, 5, CHV_PPAT_SNOOP);
3280 	__alloc_ppat_entry(ppat, 6, CHV_PPAT_SNOOP);
3281 	__alloc_ppat_entry(ppat, 7, CHV_PPAT_SNOOP);
3282 }
3283 
3284 static void gen6_gmch_remove(struct i915_address_space *vm)
3285 {
3286 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
3287 
3288 	iounmap(ggtt->gsm);
3289 	cleanup_scratch_page(vm);
3290 }
3291 
3292 static void setup_private_pat(struct drm_i915_private *dev_priv)
3293 {
3294 	struct intel_ppat *ppat = &dev_priv->ppat;
3295 	int i;
3296 
3297 	ppat->i915 = dev_priv;
3298 
3299 	if (INTEL_GEN(dev_priv) >= 10)
3300 		cnl_setup_private_ppat(ppat);
3301 	else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
3302 		chv_setup_private_ppat(ppat);
3303 	else
3304 		bdw_setup_private_ppat(ppat);
3305 
3306 	GEM_BUG_ON(ppat->max_entries > INTEL_MAX_PPAT_ENTRIES);
3307 
3308 	for_each_clear_bit(i, ppat->used, ppat->max_entries) {
3309 		ppat->entries[i].value = ppat->clear_value;
3310 		ppat->entries[i].ppat = ppat;
3311 		set_bit(i, ppat->dirty);
3312 	}
3313 
3314 	ppat->update_hw(dev_priv);
3315 }
3316 
3317 static int gen8_gmch_probe(struct i915_ggtt *ggtt)
3318 {
3319 	struct drm_i915_private *dev_priv = ggtt->base.i915;
3320 	struct pci_dev *pdev = dev_priv->drm.pdev;
3321 	unsigned int size;
3322 	u16 snb_gmch_ctl;
3323 	int err;
3324 
3325 	/* TODO: We're not aware of mappable constraints on gen8 yet */
3326 	ggtt->gmadr =
3327 		(struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
3328 						 pci_resource_len(pdev, 2));
3329 	ggtt->mappable_end = resource_size(&ggtt->gmadr);
3330 
3331 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
3332 	if (!err)
3333 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
3334 	if (err)
3335 		DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
3336 
3337 	pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3338 
3339 	if (INTEL_GEN(dev_priv) >= 9) {
3340 		size = gen8_get_total_gtt_size(snb_gmch_ctl);
3341 	} else if (IS_CHERRYVIEW(dev_priv)) {
3342 		size = chv_get_total_gtt_size(snb_gmch_ctl);
3343 	} else {
3344 		size = gen8_get_total_gtt_size(snb_gmch_ctl);
3345 	}
3346 
3347 	ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
3348 	ggtt->base.cleanup = gen6_gmch_remove;
3349 	ggtt->base.bind_vma = ggtt_bind_vma;
3350 	ggtt->base.unbind_vma = ggtt_unbind_vma;
3351 	ggtt->base.set_pages = ggtt_set_pages;
3352 	ggtt->base.clear_pages = clear_pages;
3353 	ggtt->base.insert_page = gen8_ggtt_insert_page;
3354 	ggtt->base.clear_range = nop_clear_range;
3355 	if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
3356 		ggtt->base.clear_range = gen8_ggtt_clear_range;
3357 
3358 	ggtt->base.insert_entries = gen8_ggtt_insert_entries;
3359 
3360 	/* Serialize GTT updates with aperture access on BXT if VT-d is on. */
3361 	if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
3362 		ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
3363 		ggtt->base.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
3364 		if (ggtt->base.clear_range != nop_clear_range)
3365 			ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL;
3366 	}
3367 
3368 	ggtt->invalidate = gen6_ggtt_invalidate;
3369 
3370 	setup_private_pat(dev_priv);
3371 
3372 	return ggtt_probe_common(ggtt, size);
3373 }
3374 
3375 static int gen6_gmch_probe(struct i915_ggtt *ggtt)
3376 {
3377 	struct drm_i915_private *dev_priv = ggtt->base.i915;
3378 	struct pci_dev *pdev = dev_priv->drm.pdev;
3379 	unsigned int size;
3380 	u16 snb_gmch_ctl;
3381 	int err;
3382 
3383 	ggtt->gmadr =
3384 		(struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
3385 						 pci_resource_len(pdev, 2));
3386 	ggtt->mappable_end = resource_size(&ggtt->gmadr);
3387 
3388 	/* 64/512MB is the current min/max we actually know of, but this is just
3389 	 * a coarse sanity check.
3390 	 */
3391 	if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
3392 		DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end);
3393 		return -ENXIO;
3394 	}
3395 
3396 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
3397 	if (!err)
3398 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
3399 	if (err)
3400 		DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
3401 	pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3402 
3403 	size = gen6_get_total_gtt_size(snb_gmch_ctl);
3404 	ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
3405 
3406 	ggtt->base.clear_range = gen6_ggtt_clear_range;
3407 	ggtt->base.insert_page = gen6_ggtt_insert_page;
3408 	ggtt->base.insert_entries = gen6_ggtt_insert_entries;
3409 	ggtt->base.bind_vma = ggtt_bind_vma;
3410 	ggtt->base.unbind_vma = ggtt_unbind_vma;
3411 	ggtt->base.set_pages = ggtt_set_pages;
3412 	ggtt->base.clear_pages = clear_pages;
3413 	ggtt->base.cleanup = gen6_gmch_remove;
3414 
3415 	ggtt->invalidate = gen6_ggtt_invalidate;
3416 
3417 	if (HAS_EDRAM(dev_priv))
3418 		ggtt->base.pte_encode = iris_pte_encode;
3419 	else if (IS_HASWELL(dev_priv))
3420 		ggtt->base.pte_encode = hsw_pte_encode;
3421 	else if (IS_VALLEYVIEW(dev_priv))
3422 		ggtt->base.pte_encode = byt_pte_encode;
3423 	else if (INTEL_GEN(dev_priv) >= 7)
3424 		ggtt->base.pte_encode = ivb_pte_encode;
3425 	else
3426 		ggtt->base.pte_encode = snb_pte_encode;
3427 
3428 	return ggtt_probe_common(ggtt, size);
3429 }
3430 
3431 static void i915_gmch_remove(struct i915_address_space *vm)
3432 {
3433 	intel_gmch_remove();
3434 }
3435 
3436 static int i915_gmch_probe(struct i915_ggtt *ggtt)
3437 {
3438 	struct drm_i915_private *dev_priv = ggtt->base.i915;
3439 	phys_addr_t gmadr_base;
3440 	int ret;
3441 
3442 	ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
3443 	if (!ret) {
3444 		DRM_ERROR("failed to set up gmch\n");
3445 		return -EIO;
3446 	}
3447 
3448 	intel_gtt_get(&ggtt->base.total,
3449 		      &gmadr_base,
3450 		      &ggtt->mappable_end);
3451 
3452 	ggtt->gmadr =
3453 		(struct resource) DEFINE_RES_MEM(gmadr_base,
3454 						 ggtt->mappable_end);
3455 
3456 	ggtt->do_idle_maps = needs_idle_maps(dev_priv);
3457 	ggtt->base.insert_page = i915_ggtt_insert_page;
3458 	ggtt->base.insert_entries = i915_ggtt_insert_entries;
3459 	ggtt->base.clear_range = i915_ggtt_clear_range;
3460 	ggtt->base.bind_vma = ggtt_bind_vma;
3461 	ggtt->base.unbind_vma = ggtt_unbind_vma;
3462 	ggtt->base.set_pages = ggtt_set_pages;
3463 	ggtt->base.clear_pages = clear_pages;
3464 	ggtt->base.cleanup = i915_gmch_remove;
3465 
3466 	ggtt->invalidate = gmch_ggtt_invalidate;
3467 
3468 	if (unlikely(ggtt->do_idle_maps))
3469 		DRM_INFO("applying Ironlake quirks for intel_iommu\n");
3470 
3471 	return 0;
3472 }
3473 
3474 /**
3475  * i915_ggtt_probe_hw - Probe GGTT hardware location
3476  * @dev_priv: i915 device
3477  */
3478 int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
3479 {
3480 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
3481 	int ret;
3482 
3483 	ggtt->base.i915 = dev_priv;
3484 	ggtt->base.dma = &dev_priv->drm.pdev->dev;
3485 
3486 	if (INTEL_GEN(dev_priv) <= 5)
3487 		ret = i915_gmch_probe(ggtt);
3488 	else if (INTEL_GEN(dev_priv) < 8)
3489 		ret = gen6_gmch_probe(ggtt);
3490 	else
3491 		ret = gen8_gmch_probe(ggtt);
3492 	if (ret)
3493 		return ret;
3494 
3495 	/* Trim the GGTT to fit the GuC mappable upper range (when enabled).
3496 	 * This is easier than doing range restriction on the fly, as we
3497 	 * currently don't have any bits spare to pass in this upper
3498 	 * restriction!
3499 	 */
3500 	if (USES_GUC(dev_priv)) {
3501 		ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
3502 		ggtt->mappable_end = min_t(u64, ggtt->mappable_end, ggtt->base.total);
3503 	}
3504 
3505 	if ((ggtt->base.total - 1) >> 32) {
3506 		DRM_ERROR("We never expected a Global GTT with more than 32bits"
3507 			  " of address space! Found %lldM!\n",
3508 			  ggtt->base.total >> 20);
3509 		ggtt->base.total = 1ULL << 32;
3510 		ggtt->mappable_end = min_t(u64, ggtt->mappable_end, ggtt->base.total);
3511 	}
3512 
3513 	if (ggtt->mappable_end > ggtt->base.total) {
3514 		DRM_ERROR("mappable aperture extends past end of GGTT,"
3515 			  " aperture=%pa, total=%llx\n",
3516 			  &ggtt->mappable_end, ggtt->base.total);
3517 		ggtt->mappable_end = ggtt->base.total;
3518 	}
3519 
3520 	/* GMADR is the PCI mmio aperture into the global GTT. */
3521 	DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->base.total >> 20);
3522 	DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
3523 	DRM_DEBUG_DRIVER("DSM size = %lluM\n",
3524 			 (u64)resource_size(&intel_graphics_stolen_res) >> 20);
3525 	if (intel_vtd_active())
3526 		DRM_INFO("VT-d active for gfx access\n");
3527 
3528 	return 0;
3529 }
3530 
3531 /**
3532  * i915_ggtt_init_hw - Initialize GGTT hardware
3533  * @dev_priv: i915 device
3534  */
3535 int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
3536 {
3537 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
3538 	int ret;
3539 
3540 	INIT_LIST_HEAD(&dev_priv->vm_list);
3541 
3542 	/* Note that we use page colouring to enforce a guard page at the
3543 	 * end of the address space. This is required as the CS may prefetch
3544 	 * beyond the end of the batch buffer, across the page boundary,
3545 	 * and beyond the end of the GTT if we do not provide a guard.
3546 	 */
3547 	mutex_lock(&dev_priv->drm.struct_mutex);
3548 	i915_address_space_init(&ggtt->base, dev_priv, "[global]");
3549 	if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
3550 		ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
3551 	mutex_unlock(&dev_priv->drm.struct_mutex);
3552 
3553 	if (!io_mapping_init_wc(&dev_priv->ggtt.iomap,
3554 				dev_priv->ggtt.gmadr.start,
3555 				dev_priv->ggtt.mappable_end)) {
3556 		ret = -EIO;
3557 		goto out_gtt_cleanup;
3558 	}
3559 
3560 	ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end);
3561 
3562 	/*
3563 	 * Initialise stolen early so that we may reserve preallocated
3564 	 * objects for the BIOS to KMS transition.
3565 	 */
3566 	ret = i915_gem_init_stolen(dev_priv);
3567 	if (ret)
3568 		goto out_gtt_cleanup;
3569 
3570 	return 0;
3571 
3572 out_gtt_cleanup:
3573 	ggtt->base.cleanup(&ggtt->base);
3574 	return ret;
3575 }
3576 
3577 int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
3578 {
3579 	if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt())
3580 		return -EIO;
3581 
3582 	return 0;
3583 }
3584 
3585 void i915_ggtt_enable_guc(struct drm_i915_private *i915)
3586 {
3587 	GEM_BUG_ON(i915->ggtt.invalidate != gen6_ggtt_invalidate);
3588 
3589 	i915->ggtt.invalidate = guc_ggtt_invalidate;
3590 
3591 	i915_ggtt_invalidate(i915);
3592 }
3593 
3594 void i915_ggtt_disable_guc(struct drm_i915_private *i915)
3595 {
3596 	/* We should only be called after i915_ggtt_enable_guc() */
3597 	GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate);
3598 
3599 	i915->ggtt.invalidate = gen6_ggtt_invalidate;
3600 
3601 	i915_ggtt_invalidate(i915);
3602 }
3603 
3604 void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
3605 {
3606 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
3607 	struct drm_i915_gem_object *obj, *on;
3608 
3609 	i915_check_and_clear_faults(dev_priv);
3610 
3611 	/* First fill our portion of the GTT with scratch pages */
3612 	ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
3613 
3614 	ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
3615 
3616 	/* clflush objects bound into the GGTT and rebind them. */
3617 	list_for_each_entry_safe(obj, on, &dev_priv->mm.bound_list, mm.link) {
3618 		bool ggtt_bound = false;
3619 		struct i915_vma *vma;
3620 
3621 		for_each_ggtt_vma(vma, obj) {
3622 			if (!i915_vma_unbind(vma))
3623 				continue;
3624 
3625 			WARN_ON(i915_vma_bind(vma, obj->cache_level,
3626 					      PIN_UPDATE));
3627 			ggtt_bound = true;
3628 		}
3629 
3630 		if (ggtt_bound)
3631 			WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
3632 	}
3633 
3634 	ggtt->base.closed = false;
3635 
3636 	if (INTEL_GEN(dev_priv) >= 8) {
3637 		struct intel_ppat *ppat = &dev_priv->ppat;
3638 
3639 		bitmap_set(ppat->dirty, 0, ppat->max_entries);
3640 		dev_priv->ppat.update_hw(dev_priv);
3641 		return;
3642 	}
3643 
3644 	if (USES_PPGTT(dev_priv)) {
3645 		struct i915_address_space *vm;
3646 
3647 		list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
3648 			struct i915_hw_ppgtt *ppgtt;
3649 
3650 			if (i915_is_ggtt(vm))
3651 				ppgtt = dev_priv->mm.aliasing_ppgtt;
3652 			else
3653 				ppgtt = i915_vm_to_ppgtt(vm);
3654 
3655 			gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
3656 		}
3657 	}
3658 
3659 	i915_ggtt_invalidate(dev_priv);
3660 }
3661 
3662 static struct scatterlist *
3663 rotate_pages(const dma_addr_t *in, unsigned int offset,
3664 	     unsigned int width, unsigned int height,
3665 	     unsigned int stride,
3666 	     struct sg_table *st, struct scatterlist *sg)
3667 {
3668 	unsigned int column, row;
3669 	unsigned int src_idx;
3670 
3671 	for (column = 0; column < width; column++) {
3672 		src_idx = stride * (height - 1) + column;
3673 		for (row = 0; row < height; row++) {
3674 			st->nents++;
3675 			/* We don't need the pages, but need to initialize
3676 			 * the entries so the sg list can be happily traversed.
3677 			 * The only thing we need are DMA addresses.
3678 			 */
3679 			sg_set_page(sg, NULL, PAGE_SIZE, 0);
3680 			sg_dma_address(sg) = in[offset + src_idx];
3681 			sg_dma_len(sg) = PAGE_SIZE;
3682 			sg = sg_next(sg);
3683 			src_idx -= stride;
3684 		}
3685 	}
3686 
3687 	return sg;
3688 }
3689 
3690 static noinline struct sg_table *
3691 intel_rotate_pages(struct intel_rotation_info *rot_info,
3692 		   struct drm_i915_gem_object *obj)
3693 {
3694 	const unsigned long n_pages = obj->base.size / PAGE_SIZE;
3695 	unsigned int size = intel_rotation_info_size(rot_info);
3696 	struct sgt_iter sgt_iter;
3697 	dma_addr_t dma_addr;
3698 	unsigned long i;
3699 	dma_addr_t *page_addr_list;
3700 	struct sg_table *st;
3701 	struct scatterlist *sg;
3702 	int ret = -ENOMEM;
3703 
3704 	/* Allocate a temporary list of source pages for random access. */
3705 	page_addr_list = kvmalloc_array(n_pages,
3706 					sizeof(dma_addr_t),
3707 					GFP_KERNEL);
3708 	if (!page_addr_list)
3709 		return ERR_PTR(ret);
3710 
3711 	/* Allocate target SG list. */
3712 	st = kmalloc(sizeof(*st), GFP_KERNEL);
3713 	if (!st)
3714 		goto err_st_alloc;
3715 
3716 	ret = sg_alloc_table(st, size, GFP_KERNEL);
3717 	if (ret)
3718 		goto err_sg_alloc;
3719 
3720 	/* Populate source page list from the object. */
3721 	i = 0;
3722 	for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
3723 		page_addr_list[i++] = dma_addr;
3724 
3725 	GEM_BUG_ON(i != n_pages);
3726 	st->nents = 0;
3727 	sg = st->sgl;
3728 
3729 	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
3730 		sg = rotate_pages(page_addr_list, rot_info->plane[i].offset,
3731 				  rot_info->plane[i].width, rot_info->plane[i].height,
3732 				  rot_info->plane[i].stride, st, sg);
3733 	}
3734 
3735 	kvfree(page_addr_list);
3736 
3737 	return st;
3738 
3739 err_sg_alloc:
3740 	kfree(st);
3741 err_st_alloc:
3742 	kvfree(page_addr_list);
3743 
3744 	DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
3745 			 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
3746 
3747 	return ERR_PTR(ret);
3748 }
3749 
3750 static noinline struct sg_table *
3751 intel_partial_pages(const struct i915_ggtt_view *view,
3752 		    struct drm_i915_gem_object *obj)
3753 {
3754 	struct sg_table *st;
3755 	struct scatterlist *sg, *iter;
3756 	unsigned int count = view->partial.size;
3757 	unsigned int offset;
3758 	int ret = -ENOMEM;
3759 
3760 	st = kmalloc(sizeof(*st), GFP_KERNEL);
3761 	if (!st)
3762 		goto err_st_alloc;
3763 
3764 	ret = sg_alloc_table(st, count, GFP_KERNEL);
3765 	if (ret)
3766 		goto err_sg_alloc;
3767 
3768 	iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
3769 	GEM_BUG_ON(!iter);
3770 
3771 	sg = st->sgl;
3772 	st->nents = 0;
3773 	do {
3774 		unsigned int len;
3775 
3776 		len = min(iter->length - (offset << PAGE_SHIFT),
3777 			  count << PAGE_SHIFT);
3778 		sg_set_page(sg, NULL, len, 0);
3779 		sg_dma_address(sg) =
3780 			sg_dma_address(iter) + (offset << PAGE_SHIFT);
3781 		sg_dma_len(sg) = len;
3782 
3783 		st->nents++;
3784 		count -= len >> PAGE_SHIFT;
3785 		if (count == 0) {
3786 			sg_mark_end(sg);
3787 			return st;
3788 		}
3789 
3790 		sg = __sg_next(sg);
3791 		iter = __sg_next(iter);
3792 		offset = 0;
3793 	} while (1);
3794 
3795 err_sg_alloc:
3796 	kfree(st);
3797 err_st_alloc:
3798 	return ERR_PTR(ret);
3799 }
3800 
3801 static int
3802 i915_get_ggtt_vma_pages(struct i915_vma *vma)
3803 {
3804 	int ret;
3805 
3806 	/* The vma->pages are only valid within the lifespan of the borrowed
3807 	 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
3808 	 * must be the vma->pages. A simple rule is that vma->pages must only
3809 	 * be accessed when the obj->mm.pages are pinned.
3810 	 */
3811 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
3812 
3813 	switch (vma->ggtt_view.type) {
3814 	case I915_GGTT_VIEW_NORMAL:
3815 		vma->pages = vma->obj->mm.pages;
3816 		return 0;
3817 
3818 	case I915_GGTT_VIEW_ROTATED:
3819 		vma->pages =
3820 			intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
3821 		break;
3822 
3823 	case I915_GGTT_VIEW_PARTIAL:
3824 		vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
3825 		break;
3826 
3827 	default:
3828 		WARN_ONCE(1, "GGTT view %u not implemented!\n",
3829 			  vma->ggtt_view.type);
3830 		return -EINVAL;
3831 	}
3832 
3833 	ret = 0;
3834 	if (unlikely(IS_ERR(vma->pages))) {
3835 		ret = PTR_ERR(vma->pages);
3836 		vma->pages = NULL;
3837 		DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
3838 			  vma->ggtt_view.type, ret);
3839 	}
3840 	return ret;
3841 }
3842 
3843 /**
3844  * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
3845  * @vm: the &struct i915_address_space
3846  * @node: the &struct drm_mm_node (typically i915_vma.mode)
3847  * @size: how much space to allocate inside the GTT,
3848  *        must be #I915_GTT_PAGE_SIZE aligned
3849  * @offset: where to insert inside the GTT,
3850  *          must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
3851  *          (@offset + @size) must fit within the address space
3852  * @color: color to apply to node, if this node is not from a VMA,
3853  *         color must be #I915_COLOR_UNEVICTABLE
3854  * @flags: control search and eviction behaviour
3855  *
3856  * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
3857  * the address space (using @size and @color). If the @node does not fit, it
3858  * tries to evict any overlapping nodes from the GTT, including any
3859  * neighbouring nodes if the colors do not match (to ensure guard pages between
3860  * differing domains). See i915_gem_evict_for_node() for the gory details
3861  * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
3862  * evicting active overlapping objects, and any overlapping node that is pinned
3863  * or marked as unevictable will also result in failure.
3864  *
3865  * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3866  * asked to wait for eviction and interrupted.
3867  */
3868 int i915_gem_gtt_reserve(struct i915_address_space *vm,
3869 			 struct drm_mm_node *node,
3870 			 u64 size, u64 offset, unsigned long color,
3871 			 unsigned int flags)
3872 {
3873 	int err;
3874 
3875 	GEM_BUG_ON(!size);
3876 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3877 	GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
3878 	GEM_BUG_ON(range_overflows(offset, size, vm->total));
3879 	GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
3880 	GEM_BUG_ON(drm_mm_node_allocated(node));
3881 
3882 	node->size = size;
3883 	node->start = offset;
3884 	node->color = color;
3885 
3886 	err = drm_mm_reserve_node(&vm->mm, node);
3887 	if (err != -ENOSPC)
3888 		return err;
3889 
3890 	if (flags & PIN_NOEVICT)
3891 		return -ENOSPC;
3892 
3893 	err = i915_gem_evict_for_node(vm, node, flags);
3894 	if (err == 0)
3895 		err = drm_mm_reserve_node(&vm->mm, node);
3896 
3897 	return err;
3898 }
3899 
3900 static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
3901 {
3902 	u64 range, addr;
3903 
3904 	GEM_BUG_ON(range_overflows(start, len, end));
3905 	GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
3906 
3907 	range = round_down(end - len, align) - round_up(start, align);
3908 	if (range) {
3909 		if (sizeof(unsigned long) == sizeof(u64)) {
3910 			addr = get_random_long();
3911 		} else {
3912 			addr = get_random_int();
3913 			if (range > U32_MAX) {
3914 				addr <<= 32;
3915 				addr |= get_random_int();
3916 			}
3917 		}
3918 		div64_u64_rem(addr, range, &addr);
3919 		start += addr;
3920 	}
3921 
3922 	return round_up(start, align);
3923 }
3924 
3925 /**
3926  * i915_gem_gtt_insert - insert a node into an address_space (GTT)
3927  * @vm: the &struct i915_address_space
3928  * @node: the &struct drm_mm_node (typically i915_vma.node)
3929  * @size: how much space to allocate inside the GTT,
3930  *        must be #I915_GTT_PAGE_SIZE aligned
3931  * @alignment: required alignment of starting offset, may be 0 but
3932  *             if specified, this must be a power-of-two and at least
3933  *             #I915_GTT_MIN_ALIGNMENT
3934  * @color: color to apply to node
3935  * @start: start of any range restriction inside GTT (0 for all),
3936  *         must be #I915_GTT_PAGE_SIZE aligned
3937  * @end: end of any range restriction inside GTT (U64_MAX for all),
3938  *       must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
3939  * @flags: control search and eviction behaviour
3940  *
3941  * i915_gem_gtt_insert() first searches for an available hole into which
3942  * is can insert the node. The hole address is aligned to @alignment and
3943  * its @size must then fit entirely within the [@start, @end] bounds. The
3944  * nodes on either side of the hole must match @color, or else a guard page
3945  * will be inserted between the two nodes (or the node evicted). If no
3946  * suitable hole is found, first a victim is randomly selected and tested
3947  * for eviction, otherwise then the LRU list of objects within the GTT
3948  * is scanned to find the first set of replacement nodes to create the hole.
3949  * Those old overlapping nodes are evicted from the GTT (and so must be
3950  * rebound before any future use). Any node that is currently pinned cannot
3951  * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
3952  * active and #PIN_NONBLOCK is specified, that node is also skipped when
3953  * searching for an eviction candidate. See i915_gem_evict_something() for
3954  * the gory details on the eviction algorithm.
3955  *
3956  * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3957  * asked to wait for eviction and interrupted.
3958  */
3959 int i915_gem_gtt_insert(struct i915_address_space *vm,
3960 			struct drm_mm_node *node,
3961 			u64 size, u64 alignment, unsigned long color,
3962 			u64 start, u64 end, unsigned int flags)
3963 {
3964 	enum drm_mm_insert_mode mode;
3965 	u64 offset;
3966 	int err;
3967 
3968 	lockdep_assert_held(&vm->i915->drm.struct_mutex);
3969 	GEM_BUG_ON(!size);
3970 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3971 	GEM_BUG_ON(alignment && !is_power_of_2(alignment));
3972 	GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
3973 	GEM_BUG_ON(start >= end);
3974 	GEM_BUG_ON(start > 0  && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
3975 	GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
3976 	GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
3977 	GEM_BUG_ON(drm_mm_node_allocated(node));
3978 
3979 	if (unlikely(range_overflows(start, size, end)))
3980 		return -ENOSPC;
3981 
3982 	if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
3983 		return -ENOSPC;
3984 
3985 	mode = DRM_MM_INSERT_BEST;
3986 	if (flags & PIN_HIGH)
3987 		mode = DRM_MM_INSERT_HIGH;
3988 	if (flags & PIN_MAPPABLE)
3989 		mode = DRM_MM_INSERT_LOW;
3990 
3991 	/* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
3992 	 * so we know that we always have a minimum alignment of 4096.
3993 	 * The drm_mm range manager is optimised to return results
3994 	 * with zero alignment, so where possible use the optimal
3995 	 * path.
3996 	 */
3997 	BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
3998 	if (alignment <= I915_GTT_MIN_ALIGNMENT)
3999 		alignment = 0;
4000 
4001 	err = drm_mm_insert_node_in_range(&vm->mm, node,
4002 					  size, alignment, color,
4003 					  start, end, mode);
4004 	if (err != -ENOSPC)
4005 		return err;
4006 
4007 	if (flags & PIN_NOEVICT)
4008 		return -ENOSPC;
4009 
4010 	/* No free space, pick a slot at random.
4011 	 *
4012 	 * There is a pathological case here using a GTT shared between
4013 	 * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
4014 	 *
4015 	 *    |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
4016 	 *         (64k objects)             (448k objects)
4017 	 *
4018 	 * Now imagine that the eviction LRU is ordered top-down (just because
4019 	 * pathology meets real life), and that we need to evict an object to
4020 	 * make room inside the aperture. The eviction scan then has to walk
4021 	 * the 448k list before it finds one within range. And now imagine that
4022 	 * it has to search for a new hole between every byte inside the memcpy,
4023 	 * for several simultaneous clients.
4024 	 *
4025 	 * On a full-ppgtt system, if we have run out of available space, there
4026 	 * will be lots and lots of objects in the eviction list! Again,
4027 	 * searching that LRU list may be slow if we are also applying any
4028 	 * range restrictions (e.g. restriction to low 4GiB) and so, for
4029 	 * simplicity and similarilty between different GTT, try the single
4030 	 * random replacement first.
4031 	 */
4032 	offset = random_offset(start, end,
4033 			       size, alignment ?: I915_GTT_MIN_ALIGNMENT);
4034 	err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
4035 	if (err != -ENOSPC)
4036 		return err;
4037 
4038 	/* Randomly selected placement is pinned, do a search */
4039 	err = i915_gem_evict_something(vm, size, alignment, color,
4040 				       start, end, flags);
4041 	if (err)
4042 		return err;
4043 
4044 	return drm_mm_insert_node_in_range(&vm->mm, node,
4045 					   size, alignment, color,
4046 					   start, end, DRM_MM_INSERT_EVICT);
4047 }
4048 
4049 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
4050 #include "selftests/mock_gtt.c"
4051 #include "selftests/i915_gem_gtt.c"
4052 #endif
4053