1 /*
2  * Copyright © 2010 Daniel Vetter
3  * Copyright © 2011-2014 Intel Corporation
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  *
24  */
25 
26 #include <linux/slab.h> /* fault-inject.h is not standalone! */
27 
28 #include <linux/fault-inject.h>
29 #include <linux/log2.h>
30 #include <linux/random.h>
31 #include <linux/seq_file.h>
32 #include <linux/stop_machine.h>
33 
34 #include <asm/set_memory.h>
35 
36 #include <drm/i915_drm.h>
37 
38 #include "i915_drv.h"
39 #include "i915_vgpu.h"
40 #include "i915_reset.h"
41 #include "i915_trace.h"
42 #include "intel_drv.h"
43 #include "intel_frontbuffer.h"
44 
45 #define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
46 
47 /**
48  * DOC: Global GTT views
49  *
50  * Background and previous state
51  *
52  * Historically objects could exists (be bound) in global GTT space only as
53  * singular instances with a view representing all of the object's backing pages
54  * in a linear fashion. This view will be called a normal view.
55  *
56  * To support multiple views of the same object, where the number of mapped
57  * pages is not equal to the backing store, or where the layout of the pages
58  * is not linear, concept of a GGTT view was added.
59  *
60  * One example of an alternative view is a stereo display driven by a single
61  * image. In this case we would have a framebuffer looking like this
62  * (2x2 pages):
63  *
64  *    12
65  *    34
66  *
67  * Above would represent a normal GGTT view as normally mapped for GPU or CPU
68  * rendering. In contrast, fed to the display engine would be an alternative
69  * view which could look something like this:
70  *
71  *   1212
72  *   3434
73  *
74  * In this example both the size and layout of pages in the alternative view is
75  * different from the normal view.
76  *
77  * Implementation and usage
78  *
79  * GGTT views are implemented using VMAs and are distinguished via enum
80  * i915_ggtt_view_type and struct i915_ggtt_view.
81  *
82  * A new flavour of core GEM functions which work with GGTT bound objects were
83  * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
84  * renaming  in large amounts of code. They take the struct i915_ggtt_view
85  * parameter encapsulating all metadata required to implement a view.
86  *
87  * As a helper for callers which are only interested in the normal view,
88  * globally const i915_ggtt_view_normal singleton instance exists. All old core
89  * GEM API functions, the ones not taking the view parameter, are operating on,
90  * or with the normal GGTT view.
91  *
92  * Code wanting to add or use a new GGTT view needs to:
93  *
94  * 1. Add a new enum with a suitable name.
95  * 2. Extend the metadata in the i915_ggtt_view structure if required.
96  * 3. Add support to i915_get_vma_pages().
97  *
98  * New views are required to build a scatter-gather table from within the
99  * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
100  * exists for the lifetime of an VMA.
101  *
102  * Core API is designed to have copy semantics which means that passed in
103  * struct i915_ggtt_view does not need to be persistent (left around after
104  * calling the core API functions).
105  *
106  */
107 
108 static int
109 i915_get_ggtt_vma_pages(struct i915_vma *vma);
110 
111 static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv)
112 {
113 	/*
114 	 * Note that as an uncached mmio write, this will flush the
115 	 * WCB of the writes into the GGTT before it triggers the invalidate.
116 	 */
117 	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
118 }
119 
120 static void guc_ggtt_invalidate(struct drm_i915_private *dev_priv)
121 {
122 	gen6_ggtt_invalidate(dev_priv);
123 	I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
124 }
125 
126 static void gmch_ggtt_invalidate(struct drm_i915_private *dev_priv)
127 {
128 	intel_gtt_chipset_flush();
129 }
130 
131 static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
132 {
133 	i915->ggtt.invalidate(i915);
134 }
135 
136 static int ppgtt_bind_vma(struct i915_vma *vma,
137 			  enum i915_cache_level cache_level,
138 			  u32 unused)
139 {
140 	u32 pte_flags;
141 	int err;
142 
143 	if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
144 		err = vma->vm->allocate_va_range(vma->vm,
145 						 vma->node.start, vma->size);
146 		if (err)
147 			return err;
148 	}
149 
150 	/* Applicable to VLV, and gen8+ */
151 	pte_flags = 0;
152 	if (i915_gem_object_is_readonly(vma->obj))
153 		pte_flags |= PTE_READ_ONLY;
154 
155 	vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
156 
157 	return 0;
158 }
159 
160 static void ppgtt_unbind_vma(struct i915_vma *vma)
161 {
162 	vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
163 }
164 
165 static int ppgtt_set_pages(struct i915_vma *vma)
166 {
167 	GEM_BUG_ON(vma->pages);
168 
169 	vma->pages = vma->obj->mm.pages;
170 
171 	vma->page_sizes = vma->obj->mm.page_sizes;
172 
173 	return 0;
174 }
175 
176 static void clear_pages(struct i915_vma *vma)
177 {
178 	GEM_BUG_ON(!vma->pages);
179 
180 	if (vma->pages != vma->obj->mm.pages) {
181 		sg_free_table(vma->pages);
182 		kfree(vma->pages);
183 	}
184 	vma->pages = NULL;
185 
186 	memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
187 }
188 
189 static u64 gen8_pte_encode(dma_addr_t addr,
190 			   enum i915_cache_level level,
191 			   u32 flags)
192 {
193 	gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
194 
195 	if (unlikely(flags & PTE_READ_ONLY))
196 		pte &= ~_PAGE_RW;
197 
198 	switch (level) {
199 	case I915_CACHE_NONE:
200 		pte |= PPAT_UNCACHED;
201 		break;
202 	case I915_CACHE_WT:
203 		pte |= PPAT_DISPLAY_ELLC;
204 		break;
205 	default:
206 		pte |= PPAT_CACHED;
207 		break;
208 	}
209 
210 	return pte;
211 }
212 
213 static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
214 				  const enum i915_cache_level level)
215 {
216 	gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
217 	pde |= addr;
218 	if (level != I915_CACHE_NONE)
219 		pde |= PPAT_CACHED_PDE;
220 	else
221 		pde |= PPAT_UNCACHED;
222 	return pde;
223 }
224 
225 #define gen8_pdpe_encode gen8_pde_encode
226 #define gen8_pml4e_encode gen8_pde_encode
227 
228 static u64 snb_pte_encode(dma_addr_t addr,
229 			  enum i915_cache_level level,
230 			  u32 flags)
231 {
232 	gen6_pte_t pte = GEN6_PTE_VALID;
233 	pte |= GEN6_PTE_ADDR_ENCODE(addr);
234 
235 	switch (level) {
236 	case I915_CACHE_L3_LLC:
237 	case I915_CACHE_LLC:
238 		pte |= GEN6_PTE_CACHE_LLC;
239 		break;
240 	case I915_CACHE_NONE:
241 		pte |= GEN6_PTE_UNCACHED;
242 		break;
243 	default:
244 		MISSING_CASE(level);
245 	}
246 
247 	return pte;
248 }
249 
250 static u64 ivb_pte_encode(dma_addr_t addr,
251 			  enum i915_cache_level level,
252 			  u32 flags)
253 {
254 	gen6_pte_t pte = GEN6_PTE_VALID;
255 	pte |= GEN6_PTE_ADDR_ENCODE(addr);
256 
257 	switch (level) {
258 	case I915_CACHE_L3_LLC:
259 		pte |= GEN7_PTE_CACHE_L3_LLC;
260 		break;
261 	case I915_CACHE_LLC:
262 		pte |= GEN6_PTE_CACHE_LLC;
263 		break;
264 	case I915_CACHE_NONE:
265 		pte |= GEN6_PTE_UNCACHED;
266 		break;
267 	default:
268 		MISSING_CASE(level);
269 	}
270 
271 	return pte;
272 }
273 
274 static u64 byt_pte_encode(dma_addr_t addr,
275 			  enum i915_cache_level level,
276 			  u32 flags)
277 {
278 	gen6_pte_t pte = GEN6_PTE_VALID;
279 	pte |= GEN6_PTE_ADDR_ENCODE(addr);
280 
281 	if (!(flags & PTE_READ_ONLY))
282 		pte |= BYT_PTE_WRITEABLE;
283 
284 	if (level != I915_CACHE_NONE)
285 		pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
286 
287 	return pte;
288 }
289 
290 static u64 hsw_pte_encode(dma_addr_t addr,
291 			  enum i915_cache_level level,
292 			  u32 flags)
293 {
294 	gen6_pte_t pte = GEN6_PTE_VALID;
295 	pte |= HSW_PTE_ADDR_ENCODE(addr);
296 
297 	if (level != I915_CACHE_NONE)
298 		pte |= HSW_WB_LLC_AGE3;
299 
300 	return pte;
301 }
302 
303 static u64 iris_pte_encode(dma_addr_t addr,
304 			   enum i915_cache_level level,
305 			   u32 flags)
306 {
307 	gen6_pte_t pte = GEN6_PTE_VALID;
308 	pte |= HSW_PTE_ADDR_ENCODE(addr);
309 
310 	switch (level) {
311 	case I915_CACHE_NONE:
312 		break;
313 	case I915_CACHE_WT:
314 		pte |= HSW_WT_ELLC_LLC_AGE3;
315 		break;
316 	default:
317 		pte |= HSW_WB_ELLC_LLC_AGE3;
318 		break;
319 	}
320 
321 	return pte;
322 }
323 
324 static void stash_init(struct pagestash *stash)
325 {
326 	pagevec_init(&stash->pvec);
327 	spin_lock_init(&stash->lock);
328 }
329 
330 static struct page *stash_pop_page(struct pagestash *stash)
331 {
332 	struct page *page = NULL;
333 
334 	spin_lock(&stash->lock);
335 	if (likely(stash->pvec.nr))
336 		page = stash->pvec.pages[--stash->pvec.nr];
337 	spin_unlock(&stash->lock);
338 
339 	return page;
340 }
341 
342 static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec)
343 {
344 	int nr;
345 
346 	spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING);
347 
348 	nr = min_t(int, pvec->nr, pagevec_space(&stash->pvec));
349 	memcpy(stash->pvec.pages + stash->pvec.nr,
350 	       pvec->pages + pvec->nr - nr,
351 	       sizeof(pvec->pages[0]) * nr);
352 	stash->pvec.nr += nr;
353 
354 	spin_unlock(&stash->lock);
355 
356 	pvec->nr -= nr;
357 }
358 
359 static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
360 {
361 	struct pagevec stack;
362 	struct page *page;
363 
364 	if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
365 		i915_gem_shrink_all(vm->i915);
366 
367 	page = stash_pop_page(&vm->free_pages);
368 	if (page)
369 		return page;
370 
371 	if (!vm->pt_kmap_wc)
372 		return alloc_page(gfp);
373 
374 	/* Look in our global stash of WC pages... */
375 	page = stash_pop_page(&vm->i915->mm.wc_stash);
376 	if (page)
377 		return page;
378 
379 	/*
380 	 * Otherwise batch allocate pages to amortize cost of set_pages_wc.
381 	 *
382 	 * We have to be careful as page allocation may trigger the shrinker
383 	 * (via direct reclaim) which will fill up the WC stash underneath us.
384 	 * So we add our WB pages into a temporary pvec on the stack and merge
385 	 * them into the WC stash after all the allocations are complete.
386 	 */
387 	pagevec_init(&stack);
388 	do {
389 		struct page *page;
390 
391 		page = alloc_page(gfp);
392 		if (unlikely(!page))
393 			break;
394 
395 		stack.pages[stack.nr++] = page;
396 	} while (pagevec_space(&stack));
397 
398 	if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) {
399 		page = stack.pages[--stack.nr];
400 
401 		/* Merge spare WC pages to the global stash */
402 		stash_push_pagevec(&vm->i915->mm.wc_stash, &stack);
403 
404 		/* Push any surplus WC pages onto the local VM stash */
405 		if (stack.nr)
406 			stash_push_pagevec(&vm->free_pages, &stack);
407 	}
408 
409 	/* Return unwanted leftovers */
410 	if (unlikely(stack.nr)) {
411 		WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr));
412 		__pagevec_release(&stack);
413 	}
414 
415 	return page;
416 }
417 
418 static void vm_free_pages_release(struct i915_address_space *vm,
419 				  bool immediate)
420 {
421 	struct pagevec *pvec = &vm->free_pages.pvec;
422 	struct pagevec stack;
423 
424 	lockdep_assert_held(&vm->free_pages.lock);
425 	GEM_BUG_ON(!pagevec_count(pvec));
426 
427 	if (vm->pt_kmap_wc) {
428 		/*
429 		 * When we use WC, first fill up the global stash and then
430 		 * only if full immediately free the overflow.
431 		 */
432 		stash_push_pagevec(&vm->i915->mm.wc_stash, pvec);
433 
434 		/*
435 		 * As we have made some room in the VM's free_pages,
436 		 * we can wait for it to fill again. Unless we are
437 		 * inside i915_address_space_fini() and must
438 		 * immediately release the pages!
439 		 */
440 		if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1))
441 			return;
442 
443 		/*
444 		 * We have to drop the lock to allow ourselves to sleep,
445 		 * so take a copy of the pvec and clear the stash for
446 		 * others to use it as we sleep.
447 		 */
448 		stack = *pvec;
449 		pagevec_reinit(pvec);
450 		spin_unlock(&vm->free_pages.lock);
451 
452 		pvec = &stack;
453 		set_pages_array_wb(pvec->pages, pvec->nr);
454 
455 		spin_lock(&vm->free_pages.lock);
456 	}
457 
458 	__pagevec_release(pvec);
459 }
460 
461 static void vm_free_page(struct i915_address_space *vm, struct page *page)
462 {
463 	/*
464 	 * On !llc, we need to change the pages back to WB. We only do so
465 	 * in bulk, so we rarely need to change the page attributes here,
466 	 * but doing so requires a stop_machine() from deep inside arch/x86/mm.
467 	 * To make detection of the possible sleep more likely, use an
468 	 * unconditional might_sleep() for everybody.
469 	 */
470 	might_sleep();
471 	spin_lock(&vm->free_pages.lock);
472 	if (!pagevec_add(&vm->free_pages.pvec, page))
473 		vm_free_pages_release(vm, false);
474 	spin_unlock(&vm->free_pages.lock);
475 }
476 
477 static void i915_address_space_init(struct i915_address_space *vm, int subclass)
478 {
479 	/*
480 	 * The vm->mutex must be reclaim safe (for use in the shrinker).
481 	 * Do a dummy acquire now under fs_reclaim so that any allocation
482 	 * attempt holding the lock is immediately reported by lockdep.
483 	 */
484 	mutex_init(&vm->mutex);
485 	lockdep_set_subclass(&vm->mutex, subclass);
486 	i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
487 
488 	GEM_BUG_ON(!vm->total);
489 	drm_mm_init(&vm->mm, 0, vm->total);
490 	vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
491 
492 	stash_init(&vm->free_pages);
493 
494 	INIT_LIST_HEAD(&vm->unbound_list);
495 	INIT_LIST_HEAD(&vm->bound_list);
496 }
497 
498 static void i915_address_space_fini(struct i915_address_space *vm)
499 {
500 	spin_lock(&vm->free_pages.lock);
501 	if (pagevec_count(&vm->free_pages.pvec))
502 		vm_free_pages_release(vm, true);
503 	GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec));
504 	spin_unlock(&vm->free_pages.lock);
505 
506 	drm_mm_takedown(&vm->mm);
507 
508 	mutex_destroy(&vm->mutex);
509 }
510 
511 static int __setup_page_dma(struct i915_address_space *vm,
512 			    struct i915_page_dma *p,
513 			    gfp_t gfp)
514 {
515 	p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL);
516 	if (unlikely(!p->page))
517 		return -ENOMEM;
518 
519 	p->daddr = dma_map_page_attrs(vm->dma,
520 				      p->page, 0, PAGE_SIZE,
521 				      PCI_DMA_BIDIRECTIONAL,
522 				      DMA_ATTR_SKIP_CPU_SYNC |
523 				      DMA_ATTR_NO_WARN);
524 	if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
525 		vm_free_page(vm, p->page);
526 		return -ENOMEM;
527 	}
528 
529 	return 0;
530 }
531 
532 static int setup_page_dma(struct i915_address_space *vm,
533 			  struct i915_page_dma *p)
534 {
535 	return __setup_page_dma(vm, p, __GFP_HIGHMEM);
536 }
537 
538 static void cleanup_page_dma(struct i915_address_space *vm,
539 			     struct i915_page_dma *p)
540 {
541 	dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
542 	vm_free_page(vm, p->page);
543 }
544 
545 #define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
546 
547 #define setup_px(vm, px) setup_page_dma((vm), px_base(px))
548 #define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px))
549 #define fill_px(vm, px, v) fill_page_dma((vm), px_base(px), (v))
550 #define fill32_px(vm, px, v) fill_page_dma_32((vm), px_base(px), (v))
551 
552 static void fill_page_dma(struct i915_address_space *vm,
553 			  struct i915_page_dma *p,
554 			  const u64 val)
555 {
556 	u64 * const vaddr = kmap_atomic(p->page);
557 
558 	memset64(vaddr, val, PAGE_SIZE / sizeof(val));
559 
560 	kunmap_atomic(vaddr);
561 }
562 
563 static void fill_page_dma_32(struct i915_address_space *vm,
564 			     struct i915_page_dma *p,
565 			     const u32 v)
566 {
567 	fill_page_dma(vm, p, (u64)v << 32 | v);
568 }
569 
570 static int
571 setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
572 {
573 	unsigned long size;
574 
575 	/*
576 	 * In order to utilize 64K pages for an object with a size < 2M, we will
577 	 * need to support a 64K scratch page, given that every 16th entry for a
578 	 * page-table operating in 64K mode must point to a properly aligned 64K
579 	 * region, including any PTEs which happen to point to scratch.
580 	 *
581 	 * This is only relevant for the 48b PPGTT where we support
582 	 * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
583 	 * scratch (read-only) between all vm, we create one 64k scratch page
584 	 * for all.
585 	 */
586 	size = I915_GTT_PAGE_SIZE_4K;
587 	if (i915_vm_is_4lvl(vm) &&
588 	    HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
589 		size = I915_GTT_PAGE_SIZE_64K;
590 		gfp |= __GFP_NOWARN;
591 	}
592 	gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL;
593 
594 	do {
595 		int order = get_order(size);
596 		struct page *page;
597 		dma_addr_t addr;
598 
599 		page = alloc_pages(gfp, order);
600 		if (unlikely(!page))
601 			goto skip;
602 
603 		addr = dma_map_page_attrs(vm->dma,
604 					  page, 0, size,
605 					  PCI_DMA_BIDIRECTIONAL,
606 					  DMA_ATTR_SKIP_CPU_SYNC |
607 					  DMA_ATTR_NO_WARN);
608 		if (unlikely(dma_mapping_error(vm->dma, addr)))
609 			goto free_page;
610 
611 		if (unlikely(!IS_ALIGNED(addr, size)))
612 			goto unmap_page;
613 
614 		vm->scratch_page.page = page;
615 		vm->scratch_page.daddr = addr;
616 		vm->scratch_order = order;
617 		return 0;
618 
619 unmap_page:
620 		dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL);
621 free_page:
622 		__free_pages(page, order);
623 skip:
624 		if (size == I915_GTT_PAGE_SIZE_4K)
625 			return -ENOMEM;
626 
627 		size = I915_GTT_PAGE_SIZE_4K;
628 		gfp &= ~__GFP_NOWARN;
629 	} while (1);
630 }
631 
632 static void cleanup_scratch_page(struct i915_address_space *vm)
633 {
634 	struct i915_page_dma *p = &vm->scratch_page;
635 	int order = vm->scratch_order;
636 
637 	dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT,
638 		       PCI_DMA_BIDIRECTIONAL);
639 	__free_pages(p->page, order);
640 }
641 
642 static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
643 {
644 	struct i915_page_table *pt;
645 
646 	pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
647 	if (unlikely(!pt))
648 		return ERR_PTR(-ENOMEM);
649 
650 	if (unlikely(setup_px(vm, pt))) {
651 		kfree(pt);
652 		return ERR_PTR(-ENOMEM);
653 	}
654 
655 	pt->used_ptes = 0;
656 	return pt;
657 }
658 
659 static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
660 {
661 	cleanup_px(vm, pt);
662 	kfree(pt);
663 }
664 
665 static void gen8_initialize_pt(struct i915_address_space *vm,
666 			       struct i915_page_table *pt)
667 {
668 	fill_px(vm, pt, vm->scratch_pte);
669 }
670 
671 static void gen6_initialize_pt(struct i915_address_space *vm,
672 			       struct i915_page_table *pt)
673 {
674 	fill32_px(vm, pt, vm->scratch_pte);
675 }
676 
677 static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
678 {
679 	struct i915_page_directory *pd;
680 
681 	pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL);
682 	if (unlikely(!pd))
683 		return ERR_PTR(-ENOMEM);
684 
685 	if (unlikely(setup_px(vm, pd))) {
686 		kfree(pd);
687 		return ERR_PTR(-ENOMEM);
688 	}
689 
690 	pd->used_pdes = 0;
691 	return pd;
692 }
693 
694 static void free_pd(struct i915_address_space *vm,
695 		    struct i915_page_directory *pd)
696 {
697 	cleanup_px(vm, pd);
698 	kfree(pd);
699 }
700 
701 static void gen8_initialize_pd(struct i915_address_space *vm,
702 			       struct i915_page_directory *pd)
703 {
704 	fill_px(vm, pd,
705 		gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC));
706 	memset_p((void **)pd->page_table, vm->scratch_pt, I915_PDES);
707 }
708 
709 static int __pdp_init(struct i915_address_space *vm,
710 		      struct i915_page_directory_pointer *pdp)
711 {
712 	const unsigned int pdpes = i915_pdpes_per_pdp(vm);
713 
714 	pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory),
715 					    I915_GFP_ALLOW_FAIL);
716 	if (unlikely(!pdp->page_directory))
717 		return -ENOMEM;
718 
719 	memset_p((void **)pdp->page_directory, vm->scratch_pd, pdpes);
720 
721 	return 0;
722 }
723 
724 static void __pdp_fini(struct i915_page_directory_pointer *pdp)
725 {
726 	kfree(pdp->page_directory);
727 	pdp->page_directory = NULL;
728 }
729 
730 static struct i915_page_directory_pointer *
731 alloc_pdp(struct i915_address_space *vm)
732 {
733 	struct i915_page_directory_pointer *pdp;
734 	int ret = -ENOMEM;
735 
736 	GEM_BUG_ON(!i915_vm_is_4lvl(vm));
737 
738 	pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
739 	if (!pdp)
740 		return ERR_PTR(-ENOMEM);
741 
742 	ret = __pdp_init(vm, pdp);
743 	if (ret)
744 		goto fail_bitmap;
745 
746 	ret = setup_px(vm, pdp);
747 	if (ret)
748 		goto fail_page_m;
749 
750 	return pdp;
751 
752 fail_page_m:
753 	__pdp_fini(pdp);
754 fail_bitmap:
755 	kfree(pdp);
756 
757 	return ERR_PTR(ret);
758 }
759 
760 static void free_pdp(struct i915_address_space *vm,
761 		     struct i915_page_directory_pointer *pdp)
762 {
763 	__pdp_fini(pdp);
764 
765 	if (!i915_vm_is_4lvl(vm))
766 		return;
767 
768 	cleanup_px(vm, pdp);
769 	kfree(pdp);
770 }
771 
772 static void gen8_initialize_pdp(struct i915_address_space *vm,
773 				struct i915_page_directory_pointer *pdp)
774 {
775 	gen8_ppgtt_pdpe_t scratch_pdpe;
776 
777 	scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
778 
779 	fill_px(vm, pdp, scratch_pdpe);
780 }
781 
782 static void gen8_initialize_pml4(struct i915_address_space *vm,
783 				 struct i915_pml4 *pml4)
784 {
785 	fill_px(vm, pml4,
786 		gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC));
787 	memset_p((void **)pml4->pdps, vm->scratch_pdp, GEN8_PML4ES_PER_PML4);
788 }
789 
790 /*
791  * PDE TLBs are a pain to invalidate on GEN8+. When we modify
792  * the page table structures, we mark them dirty so that
793  * context switching/execlist queuing code takes extra steps
794  * to ensure that tlbs are flushed.
795  */
796 static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
797 {
798 	ppgtt->pd_dirty_engines = ALL_ENGINES;
799 }
800 
801 /* Removes entries from a single page table, releasing it if it's empty.
802  * Caller can use the return value to update higher-level entries.
803  */
804 static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm,
805 				struct i915_page_table *pt,
806 				u64 start, u64 length)
807 {
808 	unsigned int num_entries = gen8_pte_count(start, length);
809 	gen8_pte_t *vaddr;
810 
811 	GEM_BUG_ON(num_entries > pt->used_ptes);
812 
813 	pt->used_ptes -= num_entries;
814 	if (!pt->used_ptes)
815 		return true;
816 
817 	vaddr = kmap_atomic_px(pt);
818 	memset64(vaddr + gen8_pte_index(start), vm->scratch_pte, num_entries);
819 	kunmap_atomic(vaddr);
820 
821 	return false;
822 }
823 
824 static void gen8_ppgtt_set_pde(struct i915_address_space *vm,
825 			       struct i915_page_directory *pd,
826 			       struct i915_page_table *pt,
827 			       unsigned int pde)
828 {
829 	gen8_pde_t *vaddr;
830 
831 	pd->page_table[pde] = pt;
832 
833 	vaddr = kmap_atomic_px(pd);
834 	vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC);
835 	kunmap_atomic(vaddr);
836 }
837 
838 static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
839 				struct i915_page_directory *pd,
840 				u64 start, u64 length)
841 {
842 	struct i915_page_table *pt;
843 	u32 pde;
844 
845 	gen8_for_each_pde(pt, pd, start, length, pde) {
846 		GEM_BUG_ON(pt == vm->scratch_pt);
847 
848 		if (!gen8_ppgtt_clear_pt(vm, pt, start, length))
849 			continue;
850 
851 		gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde);
852 		GEM_BUG_ON(!pd->used_pdes);
853 		pd->used_pdes--;
854 
855 		free_pt(vm, pt);
856 	}
857 
858 	return !pd->used_pdes;
859 }
860 
861 static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
862 				struct i915_page_directory_pointer *pdp,
863 				struct i915_page_directory *pd,
864 				unsigned int pdpe)
865 {
866 	gen8_ppgtt_pdpe_t *vaddr;
867 
868 	pdp->page_directory[pdpe] = pd;
869 	if (!i915_vm_is_4lvl(vm))
870 		return;
871 
872 	vaddr = kmap_atomic_px(pdp);
873 	vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
874 	kunmap_atomic(vaddr);
875 }
876 
877 /* Removes entries from a single page dir pointer, releasing it if it's empty.
878  * Caller can use the return value to update higher-level entries
879  */
880 static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
881 				 struct i915_page_directory_pointer *pdp,
882 				 u64 start, u64 length)
883 {
884 	struct i915_page_directory *pd;
885 	unsigned int pdpe;
886 
887 	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
888 		GEM_BUG_ON(pd == vm->scratch_pd);
889 
890 		if (!gen8_ppgtt_clear_pd(vm, pd, start, length))
891 			continue;
892 
893 		gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
894 		GEM_BUG_ON(!pdp->used_pdpes);
895 		pdp->used_pdpes--;
896 
897 		free_pd(vm, pd);
898 	}
899 
900 	return !pdp->used_pdpes;
901 }
902 
903 static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm,
904 				  u64 start, u64 length)
905 {
906 	gen8_ppgtt_clear_pdp(vm, &i915_vm_to_ppgtt(vm)->pdp, start, length);
907 }
908 
909 static void gen8_ppgtt_set_pml4e(struct i915_pml4 *pml4,
910 				 struct i915_page_directory_pointer *pdp,
911 				 unsigned int pml4e)
912 {
913 	gen8_ppgtt_pml4e_t *vaddr;
914 
915 	pml4->pdps[pml4e] = pdp;
916 
917 	vaddr = kmap_atomic_px(pml4);
918 	vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
919 	kunmap_atomic(vaddr);
920 }
921 
922 /* Removes entries from a single pml4.
923  * This is the top-level structure in 4-level page tables used on gen8+.
924  * Empty entries are always scratch pml4e.
925  */
926 static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
927 				  u64 start, u64 length)
928 {
929 	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
930 	struct i915_pml4 *pml4 = &ppgtt->pml4;
931 	struct i915_page_directory_pointer *pdp;
932 	unsigned int pml4e;
933 
934 	GEM_BUG_ON(!i915_vm_is_4lvl(vm));
935 
936 	gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
937 		GEM_BUG_ON(pdp == vm->scratch_pdp);
938 
939 		if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length))
940 			continue;
941 
942 		gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
943 
944 		free_pdp(vm, pdp);
945 	}
946 }
947 
948 static inline struct sgt_dma {
949 	struct scatterlist *sg;
950 	dma_addr_t dma, max;
951 } sgt_dma(struct i915_vma *vma) {
952 	struct scatterlist *sg = vma->pages->sgl;
953 	dma_addr_t addr = sg_dma_address(sg);
954 	return (struct sgt_dma) { sg, addr, addr + sg->length };
955 }
956 
957 struct gen8_insert_pte {
958 	u16 pml4e;
959 	u16 pdpe;
960 	u16 pde;
961 	u16 pte;
962 };
963 
964 static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start)
965 {
966 	return (struct gen8_insert_pte) {
967 		 gen8_pml4e_index(start),
968 		 gen8_pdpe_index(start),
969 		 gen8_pde_index(start),
970 		 gen8_pte_index(start),
971 	};
972 }
973 
974 static __always_inline bool
975 gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
976 			      struct i915_page_directory_pointer *pdp,
977 			      struct sgt_dma *iter,
978 			      struct gen8_insert_pte *idx,
979 			      enum i915_cache_level cache_level,
980 			      u32 flags)
981 {
982 	struct i915_page_directory *pd;
983 	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
984 	gen8_pte_t *vaddr;
985 	bool ret;
986 
987 	GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
988 	pd = pdp->page_directory[idx->pdpe];
989 	vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
990 	do {
991 		vaddr[idx->pte] = pte_encode | iter->dma;
992 
993 		iter->dma += I915_GTT_PAGE_SIZE;
994 		if (iter->dma >= iter->max) {
995 			iter->sg = __sg_next(iter->sg);
996 			if (!iter->sg) {
997 				ret = false;
998 				break;
999 			}
1000 
1001 			iter->dma = sg_dma_address(iter->sg);
1002 			iter->max = iter->dma + iter->sg->length;
1003 		}
1004 
1005 		if (++idx->pte == GEN8_PTES) {
1006 			idx->pte = 0;
1007 
1008 			if (++idx->pde == I915_PDES) {
1009 				idx->pde = 0;
1010 
1011 				/* Limited by sg length for 3lvl */
1012 				if (++idx->pdpe == GEN8_PML4ES_PER_PML4) {
1013 					idx->pdpe = 0;
1014 					ret = true;
1015 					break;
1016 				}
1017 
1018 				GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
1019 				pd = pdp->page_directory[idx->pdpe];
1020 			}
1021 
1022 			kunmap_atomic(vaddr);
1023 			vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
1024 		}
1025 	} while (1);
1026 	kunmap_atomic(vaddr);
1027 
1028 	return ret;
1029 }
1030 
1031 static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
1032 				   struct i915_vma *vma,
1033 				   enum i915_cache_level cache_level,
1034 				   u32 flags)
1035 {
1036 	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1037 	struct sgt_dma iter = sgt_dma(vma);
1038 	struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
1039 
1040 	gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
1041 				      cache_level, flags);
1042 
1043 	vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
1044 }
1045 
1046 static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
1047 					   struct i915_page_directory_pointer **pdps,
1048 					   struct sgt_dma *iter,
1049 					   enum i915_cache_level cache_level,
1050 					   u32 flags)
1051 {
1052 	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
1053 	u64 start = vma->node.start;
1054 	dma_addr_t rem = iter->sg->length;
1055 
1056 	do {
1057 		struct gen8_insert_pte idx = gen8_insert_pte(start);
1058 		struct i915_page_directory_pointer *pdp = pdps[idx.pml4e];
1059 		struct i915_page_directory *pd = pdp->page_directory[idx.pdpe];
1060 		unsigned int page_size;
1061 		bool maybe_64K = false;
1062 		gen8_pte_t encode = pte_encode;
1063 		gen8_pte_t *vaddr;
1064 		u16 index, max;
1065 
1066 		if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
1067 		    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
1068 		    rem >= I915_GTT_PAGE_SIZE_2M && !idx.pte) {
1069 			index = idx.pde;
1070 			max = I915_PDES;
1071 			page_size = I915_GTT_PAGE_SIZE_2M;
1072 
1073 			encode |= GEN8_PDE_PS_2M;
1074 
1075 			vaddr = kmap_atomic_px(pd);
1076 		} else {
1077 			struct i915_page_table *pt = pd->page_table[idx.pde];
1078 
1079 			index = idx.pte;
1080 			max = GEN8_PTES;
1081 			page_size = I915_GTT_PAGE_SIZE;
1082 
1083 			if (!index &&
1084 			    vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
1085 			    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
1086 			    (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
1087 			     rem >= (max - index) * I915_GTT_PAGE_SIZE))
1088 				maybe_64K = true;
1089 
1090 			vaddr = kmap_atomic_px(pt);
1091 		}
1092 
1093 		do {
1094 			GEM_BUG_ON(iter->sg->length < page_size);
1095 			vaddr[index++] = encode | iter->dma;
1096 
1097 			start += page_size;
1098 			iter->dma += page_size;
1099 			rem -= page_size;
1100 			if (iter->dma >= iter->max) {
1101 				iter->sg = __sg_next(iter->sg);
1102 				if (!iter->sg)
1103 					break;
1104 
1105 				rem = iter->sg->length;
1106 				iter->dma = sg_dma_address(iter->sg);
1107 				iter->max = iter->dma + rem;
1108 
1109 				if (maybe_64K && index < max &&
1110 				    !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
1111 				      (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
1112 				       rem >= (max - index) * I915_GTT_PAGE_SIZE)))
1113 					maybe_64K = false;
1114 
1115 				if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
1116 					break;
1117 			}
1118 		} while (rem >= page_size && index < max);
1119 
1120 		kunmap_atomic(vaddr);
1121 
1122 		/*
1123 		 * Is it safe to mark the 2M block as 64K? -- Either we have
1124 		 * filled whole page-table with 64K entries, or filled part of
1125 		 * it and have reached the end of the sg table and we have
1126 		 * enough padding.
1127 		 */
1128 		if (maybe_64K &&
1129 		    (index == max ||
1130 		     (i915_vm_has_scratch_64K(vma->vm) &&
1131 		      !iter->sg && IS_ALIGNED(vma->node.start +
1132 					      vma->node.size,
1133 					      I915_GTT_PAGE_SIZE_2M)))) {
1134 			vaddr = kmap_atomic_px(pd);
1135 			vaddr[idx.pde] |= GEN8_PDE_IPS_64K;
1136 			kunmap_atomic(vaddr);
1137 			page_size = I915_GTT_PAGE_SIZE_64K;
1138 
1139 			/*
1140 			 * We write all 4K page entries, even when using 64K
1141 			 * pages. In order to verify that the HW isn't cheating
1142 			 * by using the 4K PTE instead of the 64K PTE, we want
1143 			 * to remove all the surplus entries. If the HW skipped
1144 			 * the 64K PTE, it will read/write into the scratch page
1145 			 * instead - which we detect as missing results during
1146 			 * selftests.
1147 			 */
1148 			if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
1149 				u16 i;
1150 
1151 				encode = vma->vm->scratch_pte;
1152 				vaddr = kmap_atomic_px(pd->page_table[idx.pde]);
1153 
1154 				for (i = 1; i < index; i += 16)
1155 					memset64(vaddr + i, encode, 15);
1156 
1157 				kunmap_atomic(vaddr);
1158 			}
1159 		}
1160 
1161 		vma->page_sizes.gtt |= page_size;
1162 	} while (iter->sg);
1163 }
1164 
1165 static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
1166 				   struct i915_vma *vma,
1167 				   enum i915_cache_level cache_level,
1168 				   u32 flags)
1169 {
1170 	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1171 	struct sgt_dma iter = sgt_dma(vma);
1172 	struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
1173 
1174 	if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
1175 		gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level,
1176 					       flags);
1177 	} else {
1178 		struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
1179 
1180 		while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++],
1181 						     &iter, &idx, cache_level,
1182 						     flags))
1183 			GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
1184 
1185 		vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
1186 	}
1187 }
1188 
1189 static void gen8_free_page_tables(struct i915_address_space *vm,
1190 				  struct i915_page_directory *pd)
1191 {
1192 	int i;
1193 
1194 	for (i = 0; i < I915_PDES; i++) {
1195 		if (pd->page_table[i] != vm->scratch_pt)
1196 			free_pt(vm, pd->page_table[i]);
1197 	}
1198 }
1199 
1200 static int gen8_init_scratch(struct i915_address_space *vm)
1201 {
1202 	int ret;
1203 
1204 	/*
1205 	 * If everybody agrees to not to write into the scratch page,
1206 	 * we can reuse it for all vm, keeping contexts and processes separate.
1207 	 */
1208 	if (vm->has_read_only &&
1209 	    vm->i915->kernel_context &&
1210 	    vm->i915->kernel_context->ppgtt) {
1211 		struct i915_address_space *clone =
1212 			&vm->i915->kernel_context->ppgtt->vm;
1213 
1214 		GEM_BUG_ON(!clone->has_read_only);
1215 
1216 		vm->scratch_order = clone->scratch_order;
1217 		vm->scratch_pte = clone->scratch_pte;
1218 		vm->scratch_pt  = clone->scratch_pt;
1219 		vm->scratch_pd  = clone->scratch_pd;
1220 		vm->scratch_pdp = clone->scratch_pdp;
1221 		return 0;
1222 	}
1223 
1224 	ret = setup_scratch_page(vm, __GFP_HIGHMEM);
1225 	if (ret)
1226 		return ret;
1227 
1228 	vm->scratch_pte =
1229 		gen8_pte_encode(vm->scratch_page.daddr,
1230 				I915_CACHE_LLC,
1231 				PTE_READ_ONLY);
1232 
1233 	vm->scratch_pt = alloc_pt(vm);
1234 	if (IS_ERR(vm->scratch_pt)) {
1235 		ret = PTR_ERR(vm->scratch_pt);
1236 		goto free_scratch_page;
1237 	}
1238 
1239 	vm->scratch_pd = alloc_pd(vm);
1240 	if (IS_ERR(vm->scratch_pd)) {
1241 		ret = PTR_ERR(vm->scratch_pd);
1242 		goto free_pt;
1243 	}
1244 
1245 	if (i915_vm_is_4lvl(vm)) {
1246 		vm->scratch_pdp = alloc_pdp(vm);
1247 		if (IS_ERR(vm->scratch_pdp)) {
1248 			ret = PTR_ERR(vm->scratch_pdp);
1249 			goto free_pd;
1250 		}
1251 	}
1252 
1253 	gen8_initialize_pt(vm, vm->scratch_pt);
1254 	gen8_initialize_pd(vm, vm->scratch_pd);
1255 	if (i915_vm_is_4lvl(vm))
1256 		gen8_initialize_pdp(vm, vm->scratch_pdp);
1257 
1258 	return 0;
1259 
1260 free_pd:
1261 	free_pd(vm, vm->scratch_pd);
1262 free_pt:
1263 	free_pt(vm, vm->scratch_pt);
1264 free_scratch_page:
1265 	cleanup_scratch_page(vm);
1266 
1267 	return ret;
1268 }
1269 
1270 static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
1271 {
1272 	struct i915_address_space *vm = &ppgtt->vm;
1273 	struct drm_i915_private *dev_priv = vm->i915;
1274 	enum vgt_g2v_type msg;
1275 	int i;
1276 
1277 	if (i915_vm_is_4lvl(vm)) {
1278 		const u64 daddr = px_dma(&ppgtt->pml4);
1279 
1280 		I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
1281 		I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
1282 
1283 		msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
1284 				VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
1285 	} else {
1286 		for (i = 0; i < GEN8_3LVL_PDPES; i++) {
1287 			const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
1288 
1289 			I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
1290 			I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
1291 		}
1292 
1293 		msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
1294 				VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
1295 	}
1296 
1297 	I915_WRITE(vgtif_reg(g2v_notify), msg);
1298 
1299 	return 0;
1300 }
1301 
1302 static void gen8_free_scratch(struct i915_address_space *vm)
1303 {
1304 	if (!vm->scratch_page.daddr)
1305 		return;
1306 
1307 	if (i915_vm_is_4lvl(vm))
1308 		free_pdp(vm, vm->scratch_pdp);
1309 	free_pd(vm, vm->scratch_pd);
1310 	free_pt(vm, vm->scratch_pt);
1311 	cleanup_scratch_page(vm);
1312 }
1313 
1314 static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm,
1315 				    struct i915_page_directory_pointer *pdp)
1316 {
1317 	const unsigned int pdpes = i915_pdpes_per_pdp(vm);
1318 	int i;
1319 
1320 	for (i = 0; i < pdpes; i++) {
1321 		if (pdp->page_directory[i] == vm->scratch_pd)
1322 			continue;
1323 
1324 		gen8_free_page_tables(vm, pdp->page_directory[i]);
1325 		free_pd(vm, pdp->page_directory[i]);
1326 	}
1327 
1328 	free_pdp(vm, pdp);
1329 }
1330 
1331 static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
1332 {
1333 	int i;
1334 
1335 	for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
1336 		if (ppgtt->pml4.pdps[i] == ppgtt->vm.scratch_pdp)
1337 			continue;
1338 
1339 		gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, ppgtt->pml4.pdps[i]);
1340 	}
1341 
1342 	cleanup_px(&ppgtt->vm, &ppgtt->pml4);
1343 }
1344 
1345 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
1346 {
1347 	struct drm_i915_private *dev_priv = vm->i915;
1348 	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1349 
1350 	if (intel_vgpu_active(dev_priv))
1351 		gen8_ppgtt_notify_vgt(ppgtt, false);
1352 
1353 	if (i915_vm_is_4lvl(vm))
1354 		gen8_ppgtt_cleanup_4lvl(ppgtt);
1355 	else
1356 		gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, &ppgtt->pdp);
1357 
1358 	gen8_free_scratch(vm);
1359 }
1360 
1361 static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
1362 			       struct i915_page_directory *pd,
1363 			       u64 start, u64 length)
1364 {
1365 	struct i915_page_table *pt;
1366 	u64 from = start;
1367 	unsigned int pde;
1368 
1369 	gen8_for_each_pde(pt, pd, start, length, pde) {
1370 		int count = gen8_pte_count(start, length);
1371 
1372 		if (pt == vm->scratch_pt) {
1373 			pd->used_pdes++;
1374 
1375 			pt = alloc_pt(vm);
1376 			if (IS_ERR(pt)) {
1377 				pd->used_pdes--;
1378 				goto unwind;
1379 			}
1380 
1381 			if (count < GEN8_PTES || intel_vgpu_active(vm->i915))
1382 				gen8_initialize_pt(vm, pt);
1383 
1384 			gen8_ppgtt_set_pde(vm, pd, pt, pde);
1385 			GEM_BUG_ON(pd->used_pdes > I915_PDES);
1386 		}
1387 
1388 		pt->used_ptes += count;
1389 	}
1390 	return 0;
1391 
1392 unwind:
1393 	gen8_ppgtt_clear_pd(vm, pd, from, start - from);
1394 	return -ENOMEM;
1395 }
1396 
1397 static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
1398 				struct i915_page_directory_pointer *pdp,
1399 				u64 start, u64 length)
1400 {
1401 	struct i915_page_directory *pd;
1402 	u64 from = start;
1403 	unsigned int pdpe;
1404 	int ret;
1405 
1406 	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1407 		if (pd == vm->scratch_pd) {
1408 			pdp->used_pdpes++;
1409 
1410 			pd = alloc_pd(vm);
1411 			if (IS_ERR(pd)) {
1412 				pdp->used_pdpes--;
1413 				goto unwind;
1414 			}
1415 
1416 			gen8_initialize_pd(vm, pd);
1417 			gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
1418 			GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm));
1419 		}
1420 
1421 		ret = gen8_ppgtt_alloc_pd(vm, pd, start, length);
1422 		if (unlikely(ret))
1423 			goto unwind_pd;
1424 	}
1425 
1426 	return 0;
1427 
1428 unwind_pd:
1429 	if (!pd->used_pdes) {
1430 		gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
1431 		GEM_BUG_ON(!pdp->used_pdpes);
1432 		pdp->used_pdpes--;
1433 		free_pd(vm, pd);
1434 	}
1435 unwind:
1436 	gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
1437 	return -ENOMEM;
1438 }
1439 
1440 static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm,
1441 				 u64 start, u64 length)
1442 {
1443 	return gen8_ppgtt_alloc_pdp(vm,
1444 				    &i915_vm_to_ppgtt(vm)->pdp, start, length);
1445 }
1446 
1447 static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
1448 				 u64 start, u64 length)
1449 {
1450 	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1451 	struct i915_pml4 *pml4 = &ppgtt->pml4;
1452 	struct i915_page_directory_pointer *pdp;
1453 	u64 from = start;
1454 	u32 pml4e;
1455 	int ret;
1456 
1457 	gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1458 		if (pml4->pdps[pml4e] == vm->scratch_pdp) {
1459 			pdp = alloc_pdp(vm);
1460 			if (IS_ERR(pdp))
1461 				goto unwind;
1462 
1463 			gen8_initialize_pdp(vm, pdp);
1464 			gen8_ppgtt_set_pml4e(pml4, pdp, pml4e);
1465 		}
1466 
1467 		ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length);
1468 		if (unlikely(ret))
1469 			goto unwind_pdp;
1470 	}
1471 
1472 	return 0;
1473 
1474 unwind_pdp:
1475 	if (!pdp->used_pdpes) {
1476 		gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
1477 		free_pdp(vm, pdp);
1478 	}
1479 unwind:
1480 	gen8_ppgtt_clear_4lvl(vm, from, start - from);
1481 	return -ENOMEM;
1482 }
1483 
1484 static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
1485 {
1486 	struct i915_address_space *vm = &ppgtt->vm;
1487 	struct i915_page_directory_pointer *pdp = &ppgtt->pdp;
1488 	struct i915_page_directory *pd;
1489 	u64 start = 0, length = ppgtt->vm.total;
1490 	u64 from = start;
1491 	unsigned int pdpe;
1492 
1493 	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1494 		pd = alloc_pd(vm);
1495 		if (IS_ERR(pd))
1496 			goto unwind;
1497 
1498 		gen8_initialize_pd(vm, pd);
1499 		gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
1500 		pdp->used_pdpes++;
1501 	}
1502 
1503 	pdp->used_pdpes++; /* never remove */
1504 	return 0;
1505 
1506 unwind:
1507 	start -= from;
1508 	gen8_for_each_pdpe(pd, pdp, from, start, pdpe) {
1509 		gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
1510 		free_pd(vm, pd);
1511 	}
1512 	pdp->used_pdpes = 0;
1513 	return -ENOMEM;
1514 }
1515 
1516 static void ppgtt_init(struct drm_i915_private *i915,
1517 		       struct i915_hw_ppgtt *ppgtt)
1518 {
1519 	kref_init(&ppgtt->ref);
1520 
1521 	ppgtt->vm.i915 = i915;
1522 	ppgtt->vm.dma = &i915->drm.pdev->dev;
1523 	ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
1524 
1525 	i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
1526 
1527 	ppgtt->vm.vma_ops.bind_vma    = ppgtt_bind_vma;
1528 	ppgtt->vm.vma_ops.unbind_vma  = ppgtt_unbind_vma;
1529 	ppgtt->vm.vma_ops.set_pages   = ppgtt_set_pages;
1530 	ppgtt->vm.vma_ops.clear_pages = clear_pages;
1531 }
1532 
1533 /*
1534  * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
1535  * with a net effect resembling a 2-level page table in normal x86 terms. Each
1536  * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
1537  * space.
1538  *
1539  */
1540 static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
1541 {
1542 	struct i915_hw_ppgtt *ppgtt;
1543 	int err;
1544 
1545 	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
1546 	if (!ppgtt)
1547 		return ERR_PTR(-ENOMEM);
1548 
1549 	ppgtt_init(i915, ppgtt);
1550 
1551 	/* From bdw, there is support for read-only pages in the PPGTT. */
1552 	ppgtt->vm.has_read_only = true;
1553 
1554 	/* There are only few exceptions for gen >=6. chv and bxt.
1555 	 * And we are not sure about the latter so play safe for now.
1556 	 */
1557 	if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915))
1558 		ppgtt->vm.pt_kmap_wc = true;
1559 
1560 	err = gen8_init_scratch(&ppgtt->vm);
1561 	if (err)
1562 		goto err_free;
1563 
1564 	if (i915_vm_is_4lvl(&ppgtt->vm)) {
1565 		err = setup_px(&ppgtt->vm, &ppgtt->pml4);
1566 		if (err)
1567 			goto err_scratch;
1568 
1569 		gen8_initialize_pml4(&ppgtt->vm, &ppgtt->pml4);
1570 
1571 		ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_4lvl;
1572 		ppgtt->vm.insert_entries = gen8_ppgtt_insert_4lvl;
1573 		ppgtt->vm.clear_range = gen8_ppgtt_clear_4lvl;
1574 	} else {
1575 		err = __pdp_init(&ppgtt->vm, &ppgtt->pdp);
1576 		if (err)
1577 			goto err_scratch;
1578 
1579 		if (intel_vgpu_active(i915)) {
1580 			err = gen8_preallocate_top_level_pdp(ppgtt);
1581 			if (err) {
1582 				__pdp_fini(&ppgtt->pdp);
1583 				goto err_scratch;
1584 			}
1585 		}
1586 
1587 		ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_3lvl;
1588 		ppgtt->vm.insert_entries = gen8_ppgtt_insert_3lvl;
1589 		ppgtt->vm.clear_range = gen8_ppgtt_clear_3lvl;
1590 	}
1591 
1592 	if (intel_vgpu_active(i915))
1593 		gen8_ppgtt_notify_vgt(ppgtt, true);
1594 
1595 	ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
1596 
1597 	return ppgtt;
1598 
1599 err_scratch:
1600 	gen8_free_scratch(&ppgtt->vm);
1601 err_free:
1602 	kfree(ppgtt);
1603 	return ERR_PTR(err);
1604 }
1605 
1606 /* Write pde (index) from the page directory @pd to the page table @pt */
1607 static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt,
1608 				  const unsigned int pde,
1609 				  const struct i915_page_table *pt)
1610 {
1611 	/* Caller needs to make sure the write completes if necessary */
1612 	iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
1613 		  ppgtt->pd_addr + pde);
1614 }
1615 
1616 static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
1617 {
1618 	struct intel_engine_cs *engine;
1619 	u32 ecochk, ecobits;
1620 	enum intel_engine_id id;
1621 
1622 	ecobits = I915_READ(GAC_ECO_BITS);
1623 	I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
1624 
1625 	ecochk = I915_READ(GAM_ECOCHK);
1626 	if (IS_HASWELL(dev_priv)) {
1627 		ecochk |= ECOCHK_PPGTT_WB_HSW;
1628 	} else {
1629 		ecochk |= ECOCHK_PPGTT_LLC_IVB;
1630 		ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
1631 	}
1632 	I915_WRITE(GAM_ECOCHK, ecochk);
1633 
1634 	for_each_engine(engine, dev_priv, id) {
1635 		/* GFX_MODE is per-ring on gen7+ */
1636 		I915_WRITE(RING_MODE_GEN7(engine),
1637 			   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
1638 	}
1639 }
1640 
1641 static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
1642 {
1643 	u32 ecochk, gab_ctl, ecobits;
1644 
1645 	ecobits = I915_READ(GAC_ECO_BITS);
1646 	I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
1647 		   ECOBITS_PPGTT_CACHE64B);
1648 
1649 	gab_ctl = I915_READ(GAB_CTL);
1650 	I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
1651 
1652 	ecochk = I915_READ(GAM_ECOCHK);
1653 	I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
1654 
1655 	if (HAS_PPGTT(dev_priv)) /* may be disabled for VT-d */
1656 		I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
1657 }
1658 
1659 /* PPGTT support for Sandybdrige/Gen6 and later */
1660 static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
1661 				   u64 start, u64 length)
1662 {
1663 	struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
1664 	unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
1665 	unsigned int pde = first_entry / GEN6_PTES;
1666 	unsigned int pte = first_entry % GEN6_PTES;
1667 	unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
1668 	const gen6_pte_t scratch_pte = vm->scratch_pte;
1669 
1670 	while (num_entries) {
1671 		struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++];
1672 		const unsigned int count = min(num_entries, GEN6_PTES - pte);
1673 		gen6_pte_t *vaddr;
1674 
1675 		GEM_BUG_ON(pt == vm->scratch_pt);
1676 
1677 		num_entries -= count;
1678 
1679 		GEM_BUG_ON(count > pt->used_ptes);
1680 		pt->used_ptes -= count;
1681 		if (!pt->used_ptes)
1682 			ppgtt->scan_for_unused_pt = true;
1683 
1684 		/*
1685 		 * Note that the hw doesn't support removing PDE on the fly
1686 		 * (they are cached inside the context with no means to
1687 		 * invalidate the cache), so we can only reset the PTE
1688 		 * entries back to scratch.
1689 		 */
1690 
1691 		vaddr = kmap_atomic_px(pt);
1692 		memset32(vaddr + pte, scratch_pte, count);
1693 		kunmap_atomic(vaddr);
1694 
1695 		pte = 0;
1696 	}
1697 }
1698 
1699 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
1700 				      struct i915_vma *vma,
1701 				      enum i915_cache_level cache_level,
1702 				      u32 flags)
1703 {
1704 	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1705 	unsigned first_entry = vma->node.start / I915_GTT_PAGE_SIZE;
1706 	unsigned act_pt = first_entry / GEN6_PTES;
1707 	unsigned act_pte = first_entry % GEN6_PTES;
1708 	const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
1709 	struct sgt_dma iter = sgt_dma(vma);
1710 	gen6_pte_t *vaddr;
1711 
1712 	GEM_BUG_ON(ppgtt->pd.page_table[act_pt] == vm->scratch_pt);
1713 
1714 	vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
1715 	do {
1716 		vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
1717 
1718 		iter.dma += I915_GTT_PAGE_SIZE;
1719 		if (iter.dma == iter.max) {
1720 			iter.sg = __sg_next(iter.sg);
1721 			if (!iter.sg)
1722 				break;
1723 
1724 			iter.dma = sg_dma_address(iter.sg);
1725 			iter.max = iter.dma + iter.sg->length;
1726 		}
1727 
1728 		if (++act_pte == GEN6_PTES) {
1729 			kunmap_atomic(vaddr);
1730 			vaddr = kmap_atomic_px(ppgtt->pd.page_table[++act_pt]);
1731 			act_pte = 0;
1732 		}
1733 	} while (1);
1734 	kunmap_atomic(vaddr);
1735 
1736 	vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
1737 }
1738 
1739 static int gen6_alloc_va_range(struct i915_address_space *vm,
1740 			       u64 start, u64 length)
1741 {
1742 	struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
1743 	struct i915_page_table *pt;
1744 	u64 from = start;
1745 	unsigned int pde;
1746 	bool flush = false;
1747 
1748 	gen6_for_each_pde(pt, &ppgtt->base.pd, start, length, pde) {
1749 		const unsigned int count = gen6_pte_count(start, length);
1750 
1751 		if (pt == vm->scratch_pt) {
1752 			pt = alloc_pt(vm);
1753 			if (IS_ERR(pt))
1754 				goto unwind_out;
1755 
1756 			gen6_initialize_pt(vm, pt);
1757 			ppgtt->base.pd.page_table[pde] = pt;
1758 
1759 			if (i915_vma_is_bound(ppgtt->vma,
1760 					      I915_VMA_GLOBAL_BIND)) {
1761 				gen6_write_pde(ppgtt, pde, pt);
1762 				flush = true;
1763 			}
1764 
1765 			GEM_BUG_ON(pt->used_ptes);
1766 		}
1767 
1768 		pt->used_ptes += count;
1769 	}
1770 
1771 	if (flush) {
1772 		mark_tlbs_dirty(&ppgtt->base);
1773 		gen6_ggtt_invalidate(ppgtt->base.vm.i915);
1774 	}
1775 
1776 	return 0;
1777 
1778 unwind_out:
1779 	gen6_ppgtt_clear_range(vm, from, start - from);
1780 	return -ENOMEM;
1781 }
1782 
1783 static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
1784 {
1785 	struct i915_address_space * const vm = &ppgtt->base.vm;
1786 	struct i915_page_table *unused;
1787 	u32 pde;
1788 	int ret;
1789 
1790 	ret = setup_scratch_page(vm, __GFP_HIGHMEM);
1791 	if (ret)
1792 		return ret;
1793 
1794 	vm->scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
1795 					 I915_CACHE_NONE,
1796 					 PTE_READ_ONLY);
1797 
1798 	vm->scratch_pt = alloc_pt(vm);
1799 	if (IS_ERR(vm->scratch_pt)) {
1800 		cleanup_scratch_page(vm);
1801 		return PTR_ERR(vm->scratch_pt);
1802 	}
1803 
1804 	gen6_initialize_pt(vm, vm->scratch_pt);
1805 	gen6_for_all_pdes(unused, &ppgtt->base.pd, pde)
1806 		ppgtt->base.pd.page_table[pde] = vm->scratch_pt;
1807 
1808 	return 0;
1809 }
1810 
1811 static void gen6_ppgtt_free_scratch(struct i915_address_space *vm)
1812 {
1813 	free_pt(vm, vm->scratch_pt);
1814 	cleanup_scratch_page(vm);
1815 }
1816 
1817 static void gen6_ppgtt_free_pd(struct gen6_hw_ppgtt *ppgtt)
1818 {
1819 	struct i915_page_table *pt;
1820 	u32 pde;
1821 
1822 	gen6_for_all_pdes(pt, &ppgtt->base.pd, pde)
1823 		if (pt != ppgtt->base.vm.scratch_pt)
1824 			free_pt(&ppgtt->base.vm, pt);
1825 }
1826 
1827 static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
1828 {
1829 	struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
1830 
1831 	i915_vma_destroy(ppgtt->vma);
1832 
1833 	gen6_ppgtt_free_pd(ppgtt);
1834 	gen6_ppgtt_free_scratch(vm);
1835 }
1836 
1837 static int pd_vma_set_pages(struct i915_vma *vma)
1838 {
1839 	vma->pages = ERR_PTR(-ENODEV);
1840 	return 0;
1841 }
1842 
1843 static void pd_vma_clear_pages(struct i915_vma *vma)
1844 {
1845 	GEM_BUG_ON(!vma->pages);
1846 
1847 	vma->pages = NULL;
1848 }
1849 
1850 static int pd_vma_bind(struct i915_vma *vma,
1851 		       enum i915_cache_level cache_level,
1852 		       u32 unused)
1853 {
1854 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
1855 	struct gen6_hw_ppgtt *ppgtt = vma->private;
1856 	u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
1857 	struct i915_page_table *pt;
1858 	unsigned int pde;
1859 
1860 	ppgtt->base.pd.base.ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
1861 	ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
1862 
1863 	gen6_for_all_pdes(pt, &ppgtt->base.pd, pde)
1864 		gen6_write_pde(ppgtt, pde, pt);
1865 
1866 	mark_tlbs_dirty(&ppgtt->base);
1867 	gen6_ggtt_invalidate(ppgtt->base.vm.i915);
1868 
1869 	return 0;
1870 }
1871 
1872 static void pd_vma_unbind(struct i915_vma *vma)
1873 {
1874 	struct gen6_hw_ppgtt *ppgtt = vma->private;
1875 	struct i915_page_table * const scratch_pt = ppgtt->base.vm.scratch_pt;
1876 	struct i915_page_table *pt;
1877 	unsigned int pde;
1878 
1879 	if (!ppgtt->scan_for_unused_pt)
1880 		return;
1881 
1882 	/* Free all no longer used page tables */
1883 	gen6_for_all_pdes(pt, &ppgtt->base.pd, pde) {
1884 		if (pt->used_ptes || pt == scratch_pt)
1885 			continue;
1886 
1887 		free_pt(&ppgtt->base.vm, pt);
1888 		ppgtt->base.pd.page_table[pde] = scratch_pt;
1889 	}
1890 
1891 	ppgtt->scan_for_unused_pt = false;
1892 }
1893 
1894 static const struct i915_vma_ops pd_vma_ops = {
1895 	.set_pages = pd_vma_set_pages,
1896 	.clear_pages = pd_vma_clear_pages,
1897 	.bind_vma = pd_vma_bind,
1898 	.unbind_vma = pd_vma_unbind,
1899 };
1900 
1901 static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
1902 {
1903 	struct drm_i915_private *i915 = ppgtt->base.vm.i915;
1904 	struct i915_ggtt *ggtt = &i915->ggtt;
1905 	struct i915_vma *vma;
1906 
1907 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
1908 	GEM_BUG_ON(size > ggtt->vm.total);
1909 
1910 	vma = i915_vma_alloc();
1911 	if (!vma)
1912 		return ERR_PTR(-ENOMEM);
1913 
1914 	i915_active_init(i915, &vma->active, NULL);
1915 	INIT_ACTIVE_REQUEST(&vma->last_fence);
1916 
1917 	vma->vm = &ggtt->vm;
1918 	vma->ops = &pd_vma_ops;
1919 	vma->private = ppgtt;
1920 
1921 	vma->size = size;
1922 	vma->fence_size = size;
1923 	vma->flags = I915_VMA_GGTT;
1924 	vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
1925 
1926 	INIT_LIST_HEAD(&vma->obj_link);
1927 
1928 	mutex_lock(&vma->vm->mutex);
1929 	list_add(&vma->vm_link, &vma->vm->unbound_list);
1930 	mutex_unlock(&vma->vm->mutex);
1931 
1932 	return vma;
1933 }
1934 
1935 int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
1936 {
1937 	struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
1938 	int err;
1939 
1940 	GEM_BUG_ON(ppgtt->base.vm.closed);
1941 
1942 	/*
1943 	 * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
1944 	 * which will be pinned into every active context.
1945 	 * (When vma->pin_count becomes atomic, I expect we will naturally
1946 	 * need a larger, unpacked, type and kill this redundancy.)
1947 	 */
1948 	if (ppgtt->pin_count++)
1949 		return 0;
1950 
1951 	/*
1952 	 * PPGTT PDEs reside in the GGTT and consists of 512 entries. The
1953 	 * allocator works in address space sizes, so it's multiplied by page
1954 	 * size. We allocate at the top of the GTT to avoid fragmentation.
1955 	 */
1956 	err = i915_vma_pin(ppgtt->vma,
1957 			   0, GEN6_PD_ALIGN,
1958 			   PIN_GLOBAL | PIN_HIGH);
1959 	if (err)
1960 		goto unpin;
1961 
1962 	return 0;
1963 
1964 unpin:
1965 	ppgtt->pin_count = 0;
1966 	return err;
1967 }
1968 
1969 void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
1970 {
1971 	struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
1972 
1973 	GEM_BUG_ON(!ppgtt->pin_count);
1974 	if (--ppgtt->pin_count)
1975 		return;
1976 
1977 	i915_vma_unpin(ppgtt->vma);
1978 }
1979 
1980 void gen6_ppgtt_unpin_all(struct i915_hw_ppgtt *base)
1981 {
1982 	struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
1983 
1984 	if (!ppgtt->pin_count)
1985 		return;
1986 
1987 	ppgtt->pin_count = 0;
1988 	i915_vma_unpin(ppgtt->vma);
1989 }
1990 
1991 static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
1992 {
1993 	struct i915_ggtt * const ggtt = &i915->ggtt;
1994 	struct gen6_hw_ppgtt *ppgtt;
1995 	int err;
1996 
1997 	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
1998 	if (!ppgtt)
1999 		return ERR_PTR(-ENOMEM);
2000 
2001 	ppgtt_init(i915, &ppgtt->base);
2002 
2003 	ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
2004 	ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
2005 	ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
2006 	ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
2007 
2008 	ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
2009 
2010 	err = gen6_ppgtt_init_scratch(ppgtt);
2011 	if (err)
2012 		goto err_free;
2013 
2014 	ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE);
2015 	if (IS_ERR(ppgtt->vma)) {
2016 		err = PTR_ERR(ppgtt->vma);
2017 		goto err_scratch;
2018 	}
2019 
2020 	return &ppgtt->base;
2021 
2022 err_scratch:
2023 	gen6_ppgtt_free_scratch(&ppgtt->base.vm);
2024 err_free:
2025 	kfree(ppgtt);
2026 	return ERR_PTR(err);
2027 }
2028 
2029 static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
2030 {
2031 	/* This function is for gtt related workarounds. This function is
2032 	 * called on driver load and after a GPU reset, so you can place
2033 	 * workarounds here even if they get overwritten by GPU reset.
2034 	 */
2035 	/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
2036 	if (IS_BROADWELL(dev_priv))
2037 		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
2038 	else if (IS_CHERRYVIEW(dev_priv))
2039 		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
2040 	else if (IS_GEN9_LP(dev_priv))
2041 		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
2042 	else if (INTEL_GEN(dev_priv) >= 9)
2043 		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
2044 
2045 	/*
2046 	 * To support 64K PTEs we need to first enable the use of the
2047 	 * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
2048 	 * mmio, otherwise the page-walker will simply ignore the IPS bit. This
2049 	 * shouldn't be needed after GEN10.
2050 	 *
2051 	 * 64K pages were first introduced from BDW+, although technically they
2052 	 * only *work* from gen9+. For pre-BDW we instead have the option for
2053 	 * 32K pages, but we don't currently have any support for it in our
2054 	 * driver.
2055 	 */
2056 	if (HAS_PAGE_SIZES(dev_priv, I915_GTT_PAGE_SIZE_64K) &&
2057 	    INTEL_GEN(dev_priv) <= 10)
2058 		I915_WRITE(GEN8_GAMW_ECO_DEV_RW_IA,
2059 			   I915_READ(GEN8_GAMW_ECO_DEV_RW_IA) |
2060 			   GAMW_ECO_ENABLE_64K_IPS_FIELD);
2061 }
2062 
2063 int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
2064 {
2065 	gtt_write_workarounds(dev_priv);
2066 
2067 	if (IS_GEN(dev_priv, 6))
2068 		gen6_ppgtt_enable(dev_priv);
2069 	else if (IS_GEN(dev_priv, 7))
2070 		gen7_ppgtt_enable(dev_priv);
2071 
2072 	return 0;
2073 }
2074 
2075 static struct i915_hw_ppgtt *
2076 __hw_ppgtt_create(struct drm_i915_private *i915)
2077 {
2078 	if (INTEL_GEN(i915) < 8)
2079 		return gen6_ppgtt_create(i915);
2080 	else
2081 		return gen8_ppgtt_create(i915);
2082 }
2083 
2084 struct i915_hw_ppgtt *
2085 i915_ppgtt_create(struct drm_i915_private *i915)
2086 {
2087 	struct i915_hw_ppgtt *ppgtt;
2088 
2089 	ppgtt = __hw_ppgtt_create(i915);
2090 	if (IS_ERR(ppgtt))
2091 		return ppgtt;
2092 
2093 	trace_i915_ppgtt_create(&ppgtt->vm);
2094 
2095 	return ppgtt;
2096 }
2097 
2098 static void ppgtt_destroy_vma(struct i915_address_space *vm)
2099 {
2100 	struct list_head *phases[] = {
2101 		&vm->bound_list,
2102 		&vm->unbound_list,
2103 		NULL,
2104 	}, **phase;
2105 
2106 	vm->closed = true;
2107 	for (phase = phases; *phase; phase++) {
2108 		struct i915_vma *vma, *vn;
2109 
2110 		list_for_each_entry_safe(vma, vn, *phase, vm_link)
2111 			i915_vma_destroy(vma);
2112 	}
2113 }
2114 
2115 void i915_ppgtt_release(struct kref *kref)
2116 {
2117 	struct i915_hw_ppgtt *ppgtt =
2118 		container_of(kref, struct i915_hw_ppgtt, ref);
2119 
2120 	trace_i915_ppgtt_release(&ppgtt->vm);
2121 
2122 	ppgtt_destroy_vma(&ppgtt->vm);
2123 
2124 	GEM_BUG_ON(!list_empty(&ppgtt->vm.bound_list));
2125 	GEM_BUG_ON(!list_empty(&ppgtt->vm.unbound_list));
2126 
2127 	ppgtt->vm.cleanup(&ppgtt->vm);
2128 	i915_address_space_fini(&ppgtt->vm);
2129 	kfree(ppgtt);
2130 }
2131 
2132 /* Certain Gen5 chipsets require require idling the GPU before
2133  * unmapping anything from the GTT when VT-d is enabled.
2134  */
2135 static bool needs_idle_maps(struct drm_i915_private *dev_priv)
2136 {
2137 	/* Query intel_iommu to see if we need the workaround. Presumably that
2138 	 * was loaded first.
2139 	 */
2140 	return IS_GEN(dev_priv, 5) && IS_MOBILE(dev_priv) && intel_vtd_active();
2141 }
2142 
2143 static void gen6_check_faults(struct drm_i915_private *dev_priv)
2144 {
2145 	struct intel_engine_cs *engine;
2146 	enum intel_engine_id id;
2147 	u32 fault;
2148 
2149 	for_each_engine(engine, dev_priv, id) {
2150 		fault = I915_READ(RING_FAULT_REG(engine));
2151 		if (fault & RING_FAULT_VALID) {
2152 			DRM_DEBUG_DRIVER("Unexpected fault\n"
2153 					 "\tAddr: 0x%08lx\n"
2154 					 "\tAddress space: %s\n"
2155 					 "\tSource ID: %d\n"
2156 					 "\tType: %d\n",
2157 					 fault & PAGE_MASK,
2158 					 fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
2159 					 RING_FAULT_SRCID(fault),
2160 					 RING_FAULT_FAULT_TYPE(fault));
2161 		}
2162 	}
2163 }
2164 
2165 static void gen8_check_faults(struct drm_i915_private *dev_priv)
2166 {
2167 	u32 fault = I915_READ(GEN8_RING_FAULT_REG);
2168 
2169 	if (fault & RING_FAULT_VALID) {
2170 		u32 fault_data0, fault_data1;
2171 		u64 fault_addr;
2172 
2173 		fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
2174 		fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
2175 		fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
2176 			     ((u64)fault_data0 << 12);
2177 
2178 		DRM_DEBUG_DRIVER("Unexpected fault\n"
2179 				 "\tAddr: 0x%08x_%08x\n"
2180 				 "\tAddress space: %s\n"
2181 				 "\tEngine ID: %d\n"
2182 				 "\tSource ID: %d\n"
2183 				 "\tType: %d\n",
2184 				 upper_32_bits(fault_addr),
2185 				 lower_32_bits(fault_addr),
2186 				 fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
2187 				 GEN8_RING_FAULT_ENGINE_ID(fault),
2188 				 RING_FAULT_SRCID(fault),
2189 				 RING_FAULT_FAULT_TYPE(fault));
2190 	}
2191 }
2192 
2193 void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
2194 {
2195 	/* From GEN8 onwards we only have one 'All Engine Fault Register' */
2196 	if (INTEL_GEN(dev_priv) >= 8)
2197 		gen8_check_faults(dev_priv);
2198 	else if (INTEL_GEN(dev_priv) >= 6)
2199 		gen6_check_faults(dev_priv);
2200 	else
2201 		return;
2202 
2203 	i915_clear_error_registers(dev_priv);
2204 }
2205 
2206 void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
2207 {
2208 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2209 
2210 	/* Don't bother messing with faults pre GEN6 as we have little
2211 	 * documentation supporting that it's a good idea.
2212 	 */
2213 	if (INTEL_GEN(dev_priv) < 6)
2214 		return;
2215 
2216 	i915_check_and_clear_faults(dev_priv);
2217 
2218 	ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
2219 
2220 	i915_ggtt_invalidate(dev_priv);
2221 }
2222 
2223 int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
2224 			       struct sg_table *pages)
2225 {
2226 	do {
2227 		if (dma_map_sg_attrs(&obj->base.dev->pdev->dev,
2228 				     pages->sgl, pages->nents,
2229 				     PCI_DMA_BIDIRECTIONAL,
2230 				     DMA_ATTR_NO_WARN))
2231 			return 0;
2232 
2233 		/*
2234 		 * If the DMA remap fails, one cause can be that we have
2235 		 * too many objects pinned in a small remapping table,
2236 		 * such as swiotlb. Incrementally purge all other objects and
2237 		 * try again - if there are no more pages to remove from
2238 		 * the DMA remapper, i915_gem_shrink will return 0.
2239 		 */
2240 		GEM_BUG_ON(obj->mm.pages == pages);
2241 	} while (i915_gem_shrink(to_i915(obj->base.dev),
2242 				 obj->base.size >> PAGE_SHIFT, NULL,
2243 				 I915_SHRINK_BOUND |
2244 				 I915_SHRINK_UNBOUND));
2245 
2246 	return -ENOSPC;
2247 }
2248 
2249 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
2250 {
2251 	writeq(pte, addr);
2252 }
2253 
2254 static void gen8_ggtt_insert_page(struct i915_address_space *vm,
2255 				  dma_addr_t addr,
2256 				  u64 offset,
2257 				  enum i915_cache_level level,
2258 				  u32 unused)
2259 {
2260 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2261 	gen8_pte_t __iomem *pte =
2262 		(gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
2263 
2264 	gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
2265 
2266 	ggtt->invalidate(vm->i915);
2267 }
2268 
2269 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
2270 				     struct i915_vma *vma,
2271 				     enum i915_cache_level level,
2272 				     u32 flags)
2273 {
2274 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2275 	struct sgt_iter sgt_iter;
2276 	gen8_pte_t __iomem *gtt_entries;
2277 	const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
2278 	dma_addr_t addr;
2279 
2280 	/*
2281 	 * Note that we ignore PTE_READ_ONLY here. The caller must be careful
2282 	 * not to allow the user to override access to a read only page.
2283 	 */
2284 
2285 	gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
2286 	gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE;
2287 	for_each_sgt_dma(addr, sgt_iter, vma->pages)
2288 		gen8_set_pte(gtt_entries++, pte_encode | addr);
2289 
2290 	/*
2291 	 * We want to flush the TLBs only after we're certain all the PTE
2292 	 * updates have finished.
2293 	 */
2294 	ggtt->invalidate(vm->i915);
2295 }
2296 
2297 static void gen6_ggtt_insert_page(struct i915_address_space *vm,
2298 				  dma_addr_t addr,
2299 				  u64 offset,
2300 				  enum i915_cache_level level,
2301 				  u32 flags)
2302 {
2303 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2304 	gen6_pte_t __iomem *pte =
2305 		(gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
2306 
2307 	iowrite32(vm->pte_encode(addr, level, flags), pte);
2308 
2309 	ggtt->invalidate(vm->i915);
2310 }
2311 
2312 /*
2313  * Binds an object into the global gtt with the specified cache level. The object
2314  * will be accessible to the GPU via commands whose operands reference offsets
2315  * within the global GTT as well as accessible by the GPU through the GMADR
2316  * mapped BAR (dev_priv->mm.gtt->gtt).
2317  */
2318 static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
2319 				     struct i915_vma *vma,
2320 				     enum i915_cache_level level,
2321 				     u32 flags)
2322 {
2323 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2324 	gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
2325 	unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE;
2326 	struct sgt_iter iter;
2327 	dma_addr_t addr;
2328 	for_each_sgt_dma(addr, iter, vma->pages)
2329 		iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
2330 
2331 	/*
2332 	 * We want to flush the TLBs only after we're certain all the PTE
2333 	 * updates have finished.
2334 	 */
2335 	ggtt->invalidate(vm->i915);
2336 }
2337 
2338 static void nop_clear_range(struct i915_address_space *vm,
2339 			    u64 start, u64 length)
2340 {
2341 }
2342 
2343 static void gen8_ggtt_clear_range(struct i915_address_space *vm,
2344 				  u64 start, u64 length)
2345 {
2346 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2347 	unsigned first_entry = start / I915_GTT_PAGE_SIZE;
2348 	unsigned num_entries = length / I915_GTT_PAGE_SIZE;
2349 	const gen8_pte_t scratch_pte = vm->scratch_pte;
2350 	gen8_pte_t __iomem *gtt_base =
2351 		(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
2352 	const int max_entries = ggtt_total_entries(ggtt) - first_entry;
2353 	int i;
2354 
2355 	if (WARN(num_entries > max_entries,
2356 		 "First entry = %d; Num entries = %d (max=%d)\n",
2357 		 first_entry, num_entries, max_entries))
2358 		num_entries = max_entries;
2359 
2360 	for (i = 0; i < num_entries; i++)
2361 		gen8_set_pte(&gtt_base[i], scratch_pte);
2362 }
2363 
2364 static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
2365 {
2366 	struct drm_i915_private *dev_priv = vm->i915;
2367 
2368 	/*
2369 	 * Make sure the internal GAM fifo has been cleared of all GTT
2370 	 * writes before exiting stop_machine(). This guarantees that
2371 	 * any aperture accesses waiting to start in another process
2372 	 * cannot back up behind the GTT writes causing a hang.
2373 	 * The register can be any arbitrary GAM register.
2374 	 */
2375 	POSTING_READ(GFX_FLSH_CNTL_GEN6);
2376 }
2377 
2378 struct insert_page {
2379 	struct i915_address_space *vm;
2380 	dma_addr_t addr;
2381 	u64 offset;
2382 	enum i915_cache_level level;
2383 };
2384 
2385 static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
2386 {
2387 	struct insert_page *arg = _arg;
2388 
2389 	gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
2390 	bxt_vtd_ggtt_wa(arg->vm);
2391 
2392 	return 0;
2393 }
2394 
2395 static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
2396 					  dma_addr_t addr,
2397 					  u64 offset,
2398 					  enum i915_cache_level level,
2399 					  u32 unused)
2400 {
2401 	struct insert_page arg = { vm, addr, offset, level };
2402 
2403 	stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
2404 }
2405 
2406 struct insert_entries {
2407 	struct i915_address_space *vm;
2408 	struct i915_vma *vma;
2409 	enum i915_cache_level level;
2410 	u32 flags;
2411 };
2412 
2413 static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
2414 {
2415 	struct insert_entries *arg = _arg;
2416 
2417 	gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags);
2418 	bxt_vtd_ggtt_wa(arg->vm);
2419 
2420 	return 0;
2421 }
2422 
2423 static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
2424 					     struct i915_vma *vma,
2425 					     enum i915_cache_level level,
2426 					     u32 flags)
2427 {
2428 	struct insert_entries arg = { vm, vma, level, flags };
2429 
2430 	stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
2431 }
2432 
2433 struct clear_range {
2434 	struct i915_address_space *vm;
2435 	u64 start;
2436 	u64 length;
2437 };
2438 
2439 static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
2440 {
2441 	struct clear_range *arg = _arg;
2442 
2443 	gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
2444 	bxt_vtd_ggtt_wa(arg->vm);
2445 
2446 	return 0;
2447 }
2448 
2449 static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
2450 					  u64 start,
2451 					  u64 length)
2452 {
2453 	struct clear_range arg = { vm, start, length };
2454 
2455 	stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
2456 }
2457 
2458 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2459 				  u64 start, u64 length)
2460 {
2461 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2462 	unsigned first_entry = start / I915_GTT_PAGE_SIZE;
2463 	unsigned num_entries = length / I915_GTT_PAGE_SIZE;
2464 	gen6_pte_t scratch_pte, __iomem *gtt_base =
2465 		(gen6_pte_t __iomem *)ggtt->gsm + first_entry;
2466 	const int max_entries = ggtt_total_entries(ggtt) - first_entry;
2467 	int i;
2468 
2469 	if (WARN(num_entries > max_entries,
2470 		 "First entry = %d; Num entries = %d (max=%d)\n",
2471 		 first_entry, num_entries, max_entries))
2472 		num_entries = max_entries;
2473 
2474 	scratch_pte = vm->scratch_pte;
2475 
2476 	for (i = 0; i < num_entries; i++)
2477 		iowrite32(scratch_pte, &gtt_base[i]);
2478 }
2479 
2480 static void i915_ggtt_insert_page(struct i915_address_space *vm,
2481 				  dma_addr_t addr,
2482 				  u64 offset,
2483 				  enum i915_cache_level cache_level,
2484 				  u32 unused)
2485 {
2486 	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2487 		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2488 
2489 	intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
2490 }
2491 
2492 static void i915_ggtt_insert_entries(struct i915_address_space *vm,
2493 				     struct i915_vma *vma,
2494 				     enum i915_cache_level cache_level,
2495 				     u32 unused)
2496 {
2497 	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2498 		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2499 
2500 	intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
2501 				    flags);
2502 }
2503 
2504 static void i915_ggtt_clear_range(struct i915_address_space *vm,
2505 				  u64 start, u64 length)
2506 {
2507 	intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
2508 }
2509 
2510 static int ggtt_bind_vma(struct i915_vma *vma,
2511 			 enum i915_cache_level cache_level,
2512 			 u32 flags)
2513 {
2514 	struct drm_i915_private *i915 = vma->vm->i915;
2515 	struct drm_i915_gem_object *obj = vma->obj;
2516 	intel_wakeref_t wakeref;
2517 	u32 pte_flags;
2518 
2519 	/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
2520 	pte_flags = 0;
2521 	if (i915_gem_object_is_readonly(obj))
2522 		pte_flags |= PTE_READ_ONLY;
2523 
2524 	with_intel_runtime_pm(i915, wakeref)
2525 		vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
2526 
2527 	vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
2528 
2529 	/*
2530 	 * Without aliasing PPGTT there's no difference between
2531 	 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
2532 	 * upgrade to both bound if we bind either to avoid double-binding.
2533 	 */
2534 	vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
2535 
2536 	return 0;
2537 }
2538 
2539 static void ggtt_unbind_vma(struct i915_vma *vma)
2540 {
2541 	struct drm_i915_private *i915 = vma->vm->i915;
2542 	intel_wakeref_t wakeref;
2543 
2544 	with_intel_runtime_pm(i915, wakeref)
2545 		vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
2546 }
2547 
2548 static int aliasing_gtt_bind_vma(struct i915_vma *vma,
2549 				 enum i915_cache_level cache_level,
2550 				 u32 flags)
2551 {
2552 	struct drm_i915_private *i915 = vma->vm->i915;
2553 	u32 pte_flags;
2554 	int ret;
2555 
2556 	/* Currently applicable only to VLV */
2557 	pte_flags = 0;
2558 	if (i915_gem_object_is_readonly(vma->obj))
2559 		pte_flags |= PTE_READ_ONLY;
2560 
2561 	if (flags & I915_VMA_LOCAL_BIND) {
2562 		struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
2563 
2564 		if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
2565 			ret = appgtt->vm.allocate_va_range(&appgtt->vm,
2566 							   vma->node.start,
2567 							   vma->size);
2568 			if (ret)
2569 				return ret;
2570 		}
2571 
2572 		appgtt->vm.insert_entries(&appgtt->vm, vma, cache_level,
2573 					  pte_flags);
2574 	}
2575 
2576 	if (flags & I915_VMA_GLOBAL_BIND) {
2577 		intel_wakeref_t wakeref;
2578 
2579 		with_intel_runtime_pm(i915, wakeref) {
2580 			vma->vm->insert_entries(vma->vm, vma,
2581 						cache_level, pte_flags);
2582 		}
2583 	}
2584 
2585 	return 0;
2586 }
2587 
2588 static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
2589 {
2590 	struct drm_i915_private *i915 = vma->vm->i915;
2591 
2592 	if (vma->flags & I915_VMA_GLOBAL_BIND) {
2593 		struct i915_address_space *vm = vma->vm;
2594 		intel_wakeref_t wakeref;
2595 
2596 		with_intel_runtime_pm(i915, wakeref)
2597 			vm->clear_range(vm, vma->node.start, vma->size);
2598 	}
2599 
2600 	if (vma->flags & I915_VMA_LOCAL_BIND) {
2601 		struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->vm;
2602 
2603 		vm->clear_range(vm, vma->node.start, vma->size);
2604 	}
2605 }
2606 
2607 void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
2608 			       struct sg_table *pages)
2609 {
2610 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2611 	struct device *kdev = &dev_priv->drm.pdev->dev;
2612 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2613 
2614 	if (unlikely(ggtt->do_idle_maps)) {
2615 		if (i915_gem_wait_for_idle(dev_priv, 0, MAX_SCHEDULE_TIMEOUT)) {
2616 			DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
2617 			/* Wait a bit, in hopes it avoids the hang */
2618 			udelay(10);
2619 		}
2620 	}
2621 
2622 	dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
2623 }
2624 
2625 static int ggtt_set_pages(struct i915_vma *vma)
2626 {
2627 	int ret;
2628 
2629 	GEM_BUG_ON(vma->pages);
2630 
2631 	ret = i915_get_ggtt_vma_pages(vma);
2632 	if (ret)
2633 		return ret;
2634 
2635 	vma->page_sizes = vma->obj->mm.page_sizes;
2636 
2637 	return 0;
2638 }
2639 
2640 static void i915_gtt_color_adjust(const struct drm_mm_node *node,
2641 				  unsigned long color,
2642 				  u64 *start,
2643 				  u64 *end)
2644 {
2645 	if (node->allocated && node->color != color)
2646 		*start += I915_GTT_PAGE_SIZE;
2647 
2648 	/* Also leave a space between the unallocated reserved node after the
2649 	 * GTT and any objects within the GTT, i.e. we use the color adjustment
2650 	 * to insert a guard page to prevent prefetches crossing over the
2651 	 * GTT boundary.
2652 	 */
2653 	node = list_next_entry(node, node_list);
2654 	if (node->color != color)
2655 		*end -= I915_GTT_PAGE_SIZE;
2656 }
2657 
2658 int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
2659 {
2660 	struct i915_ggtt *ggtt = &i915->ggtt;
2661 	struct i915_hw_ppgtt *ppgtt;
2662 	int err;
2663 
2664 	ppgtt = i915_ppgtt_create(i915);
2665 	if (IS_ERR(ppgtt))
2666 		return PTR_ERR(ppgtt);
2667 
2668 	if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
2669 		err = -ENODEV;
2670 		goto err_ppgtt;
2671 	}
2672 
2673 	/*
2674 	 * Note we only pre-allocate as far as the end of the global
2675 	 * GTT. On 48b / 4-level page-tables, the difference is very,
2676 	 * very significant! We have to preallocate as GVT/vgpu does
2677 	 * not like the page directory disappearing.
2678 	 */
2679 	err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total);
2680 	if (err)
2681 		goto err_ppgtt;
2682 
2683 	i915->mm.aliasing_ppgtt = ppgtt;
2684 
2685 	GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
2686 	ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
2687 
2688 	GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
2689 	ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
2690 
2691 	return 0;
2692 
2693 err_ppgtt:
2694 	i915_ppgtt_put(ppgtt);
2695 	return err;
2696 }
2697 
2698 void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915)
2699 {
2700 	struct i915_ggtt *ggtt = &i915->ggtt;
2701 	struct i915_hw_ppgtt *ppgtt;
2702 
2703 	ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt);
2704 	if (!ppgtt)
2705 		return;
2706 
2707 	i915_ppgtt_put(ppgtt);
2708 
2709 	ggtt->vm.vma_ops.bind_vma   = ggtt_bind_vma;
2710 	ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
2711 }
2712 
2713 int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
2714 {
2715 	/* Let GEM Manage all of the aperture.
2716 	 *
2717 	 * However, leave one page at the end still bound to the scratch page.
2718 	 * There are a number of places where the hardware apparently prefetches
2719 	 * past the end of the object, and we've seen multiple hangs with the
2720 	 * GPU head pointer stuck in a batchbuffer bound at the last page of the
2721 	 * aperture.  One page should be enough to keep any prefetching inside
2722 	 * of the aperture.
2723 	 */
2724 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2725 	unsigned long hole_start, hole_end;
2726 	struct drm_mm_node *entry;
2727 	int ret;
2728 
2729 	/*
2730 	 * GuC requires all resources that we're sharing with it to be placed in
2731 	 * non-WOPCM memory. If GuC is not present or not in use we still need a
2732 	 * small bias as ring wraparound at offset 0 sometimes hangs. No idea
2733 	 * why.
2734 	 */
2735 	ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
2736 			       intel_guc_reserved_gtt_size(&dev_priv->guc));
2737 
2738 	ret = intel_vgt_balloon(dev_priv);
2739 	if (ret)
2740 		return ret;
2741 
2742 	/* Reserve a mappable slot for our lockless error capture */
2743 	ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture,
2744 					  PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
2745 					  0, ggtt->mappable_end,
2746 					  DRM_MM_INSERT_LOW);
2747 	if (ret)
2748 		return ret;
2749 
2750 	/* Clear any non-preallocated blocks */
2751 	drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
2752 		DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
2753 			      hole_start, hole_end);
2754 		ggtt->vm.clear_range(&ggtt->vm, hole_start,
2755 				     hole_end - hole_start);
2756 	}
2757 
2758 	/* And finally clear the reserved guard page */
2759 	ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
2760 
2761 	if (INTEL_PPGTT(dev_priv) == INTEL_PPGTT_ALIASING) {
2762 		ret = i915_gem_init_aliasing_ppgtt(dev_priv);
2763 		if (ret)
2764 			goto err;
2765 	}
2766 
2767 	return 0;
2768 
2769 err:
2770 	drm_mm_remove_node(&ggtt->error_capture);
2771 	return ret;
2772 }
2773 
2774 /**
2775  * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
2776  * @dev_priv: i915 device
2777  */
2778 void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
2779 {
2780 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2781 	struct i915_vma *vma, *vn;
2782 	struct pagevec *pvec;
2783 
2784 	ggtt->vm.closed = true;
2785 
2786 	mutex_lock(&dev_priv->drm.struct_mutex);
2787 	i915_gem_fini_aliasing_ppgtt(dev_priv);
2788 
2789 	list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
2790 		WARN_ON(i915_vma_unbind(vma));
2791 
2792 	if (drm_mm_node_allocated(&ggtt->error_capture))
2793 		drm_mm_remove_node(&ggtt->error_capture);
2794 
2795 	if (drm_mm_initialized(&ggtt->vm.mm)) {
2796 		intel_vgt_deballoon(dev_priv);
2797 		i915_address_space_fini(&ggtt->vm);
2798 	}
2799 
2800 	ggtt->vm.cleanup(&ggtt->vm);
2801 
2802 	pvec = &dev_priv->mm.wc_stash.pvec;
2803 	if (pvec->nr) {
2804 		set_pages_array_wb(pvec->pages, pvec->nr);
2805 		__pagevec_release(pvec);
2806 	}
2807 
2808 	mutex_unlock(&dev_priv->drm.struct_mutex);
2809 
2810 	arch_phys_wc_del(ggtt->mtrr);
2811 	io_mapping_fini(&ggtt->iomap);
2812 
2813 	i915_gem_cleanup_stolen(dev_priv);
2814 }
2815 
2816 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
2817 {
2818 	snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
2819 	snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
2820 	return snb_gmch_ctl << 20;
2821 }
2822 
2823 static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
2824 {
2825 	bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
2826 	bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
2827 	if (bdw_gmch_ctl)
2828 		bdw_gmch_ctl = 1 << bdw_gmch_ctl;
2829 
2830 #ifdef CONFIG_X86_32
2831 	/* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
2832 	if (bdw_gmch_ctl > 4)
2833 		bdw_gmch_ctl = 4;
2834 #endif
2835 
2836 	return bdw_gmch_ctl << 20;
2837 }
2838 
2839 static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
2840 {
2841 	gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
2842 	gmch_ctrl &= SNB_GMCH_GGMS_MASK;
2843 
2844 	if (gmch_ctrl)
2845 		return 1 << (20 + gmch_ctrl);
2846 
2847 	return 0;
2848 }
2849 
2850 static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
2851 {
2852 	struct drm_i915_private *dev_priv = ggtt->vm.i915;
2853 	struct pci_dev *pdev = dev_priv->drm.pdev;
2854 	phys_addr_t phys_addr;
2855 	int ret;
2856 
2857 	/* For Modern GENs the PTEs and register space are split in the BAR */
2858 	phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
2859 
2860 	/*
2861 	 * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range
2862 	 * will be dropped. For WC mappings in general we have 64 byte burst
2863 	 * writes when the WC buffer is flushed, so we can't use it, but have to
2864 	 * resort to an uncached mapping. The WC issue is easily caught by the
2865 	 * readback check when writing GTT PTE entries.
2866 	 */
2867 	if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10)
2868 		ggtt->gsm = ioremap_nocache(phys_addr, size);
2869 	else
2870 		ggtt->gsm = ioremap_wc(phys_addr, size);
2871 	if (!ggtt->gsm) {
2872 		DRM_ERROR("Failed to map the ggtt page table\n");
2873 		return -ENOMEM;
2874 	}
2875 
2876 	ret = setup_scratch_page(&ggtt->vm, GFP_DMA32);
2877 	if (ret) {
2878 		DRM_ERROR("Scratch setup failed\n");
2879 		/* iounmap will also get called at remove, but meh */
2880 		iounmap(ggtt->gsm);
2881 		return ret;
2882 	}
2883 
2884 	ggtt->vm.scratch_pte =
2885 		ggtt->vm.pte_encode(ggtt->vm.scratch_page.daddr,
2886 				    I915_CACHE_NONE, 0);
2887 
2888 	return 0;
2889 }
2890 
2891 static struct intel_ppat_entry *
2892 __alloc_ppat_entry(struct intel_ppat *ppat, unsigned int index, u8 value)
2893 {
2894 	struct intel_ppat_entry *entry = &ppat->entries[index];
2895 
2896 	GEM_BUG_ON(index >= ppat->max_entries);
2897 	GEM_BUG_ON(test_bit(index, ppat->used));
2898 
2899 	entry->ppat = ppat;
2900 	entry->value = value;
2901 	kref_init(&entry->ref);
2902 	set_bit(index, ppat->used);
2903 	set_bit(index, ppat->dirty);
2904 
2905 	return entry;
2906 }
2907 
2908 static void __free_ppat_entry(struct intel_ppat_entry *entry)
2909 {
2910 	struct intel_ppat *ppat = entry->ppat;
2911 	unsigned int index = entry - ppat->entries;
2912 
2913 	GEM_BUG_ON(index >= ppat->max_entries);
2914 	GEM_BUG_ON(!test_bit(index, ppat->used));
2915 
2916 	entry->value = ppat->clear_value;
2917 	clear_bit(index, ppat->used);
2918 	set_bit(index, ppat->dirty);
2919 }
2920 
2921 /**
2922  * intel_ppat_get - get a usable PPAT entry
2923  * @i915: i915 device instance
2924  * @value: the PPAT value required by the caller
2925  *
2926  * The function tries to search if there is an existing PPAT entry which
2927  * matches with the required value. If perfectly matched, the existing PPAT
2928  * entry will be used. If only partially matched, it will try to check if
2929  * there is any available PPAT index. If yes, it will allocate a new PPAT
2930  * index for the required entry and update the HW. If not, the partially
2931  * matched entry will be used.
2932  */
2933 const struct intel_ppat_entry *
2934 intel_ppat_get(struct drm_i915_private *i915, u8 value)
2935 {
2936 	struct intel_ppat *ppat = &i915->ppat;
2937 	struct intel_ppat_entry *entry = NULL;
2938 	unsigned int scanned, best_score;
2939 	int i;
2940 
2941 	GEM_BUG_ON(!ppat->max_entries);
2942 
2943 	scanned = best_score = 0;
2944 	for_each_set_bit(i, ppat->used, ppat->max_entries) {
2945 		unsigned int score;
2946 
2947 		score = ppat->match(ppat->entries[i].value, value);
2948 		if (score > best_score) {
2949 			entry = &ppat->entries[i];
2950 			if (score == INTEL_PPAT_PERFECT_MATCH) {
2951 				kref_get(&entry->ref);
2952 				return entry;
2953 			}
2954 			best_score = score;
2955 		}
2956 		scanned++;
2957 	}
2958 
2959 	if (scanned == ppat->max_entries) {
2960 		if (!entry)
2961 			return ERR_PTR(-ENOSPC);
2962 
2963 		kref_get(&entry->ref);
2964 		return entry;
2965 	}
2966 
2967 	i = find_first_zero_bit(ppat->used, ppat->max_entries);
2968 	entry = __alloc_ppat_entry(ppat, i, value);
2969 	ppat->update_hw(i915);
2970 	return entry;
2971 }
2972 
2973 static void release_ppat(struct kref *kref)
2974 {
2975 	struct intel_ppat_entry *entry =
2976 		container_of(kref, struct intel_ppat_entry, ref);
2977 	struct drm_i915_private *i915 = entry->ppat->i915;
2978 
2979 	__free_ppat_entry(entry);
2980 	entry->ppat->update_hw(i915);
2981 }
2982 
2983 /**
2984  * intel_ppat_put - put back the PPAT entry got from intel_ppat_get()
2985  * @entry: an intel PPAT entry
2986  *
2987  * Put back the PPAT entry got from intel_ppat_get(). If the PPAT index of the
2988  * entry is dynamically allocated, its reference count will be decreased. Once
2989  * the reference count becomes into zero, the PPAT index becomes free again.
2990  */
2991 void intel_ppat_put(const struct intel_ppat_entry *entry)
2992 {
2993 	struct intel_ppat *ppat = entry->ppat;
2994 	unsigned int index = entry - ppat->entries;
2995 
2996 	GEM_BUG_ON(!ppat->max_entries);
2997 
2998 	kref_put(&ppat->entries[index].ref, release_ppat);
2999 }
3000 
3001 static void cnl_private_pat_update_hw(struct drm_i915_private *dev_priv)
3002 {
3003 	struct intel_ppat *ppat = &dev_priv->ppat;
3004 	int i;
3005 
3006 	for_each_set_bit(i, ppat->dirty, ppat->max_entries) {
3007 		I915_WRITE(GEN10_PAT_INDEX(i), ppat->entries[i].value);
3008 		clear_bit(i, ppat->dirty);
3009 	}
3010 }
3011 
3012 static void bdw_private_pat_update_hw(struct drm_i915_private *dev_priv)
3013 {
3014 	struct intel_ppat *ppat = &dev_priv->ppat;
3015 	u64 pat = 0;
3016 	int i;
3017 
3018 	for (i = 0; i < ppat->max_entries; i++)
3019 		pat |= GEN8_PPAT(i, ppat->entries[i].value);
3020 
3021 	bitmap_clear(ppat->dirty, 0, ppat->max_entries);
3022 
3023 	I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
3024 	I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
3025 }
3026 
3027 static unsigned int bdw_private_pat_match(u8 src, u8 dst)
3028 {
3029 	unsigned int score = 0;
3030 	enum {
3031 		AGE_MATCH = BIT(0),
3032 		TC_MATCH = BIT(1),
3033 		CA_MATCH = BIT(2),
3034 	};
3035 
3036 	/* Cache attribute has to be matched. */
3037 	if (GEN8_PPAT_GET_CA(src) != GEN8_PPAT_GET_CA(dst))
3038 		return 0;
3039 
3040 	score |= CA_MATCH;
3041 
3042 	if (GEN8_PPAT_GET_TC(src) == GEN8_PPAT_GET_TC(dst))
3043 		score |= TC_MATCH;
3044 
3045 	if (GEN8_PPAT_GET_AGE(src) == GEN8_PPAT_GET_AGE(dst))
3046 		score |= AGE_MATCH;
3047 
3048 	if (score == (AGE_MATCH | TC_MATCH | CA_MATCH))
3049 		return INTEL_PPAT_PERFECT_MATCH;
3050 
3051 	return score;
3052 }
3053 
3054 static unsigned int chv_private_pat_match(u8 src, u8 dst)
3055 {
3056 	return (CHV_PPAT_GET_SNOOP(src) == CHV_PPAT_GET_SNOOP(dst)) ?
3057 		INTEL_PPAT_PERFECT_MATCH : 0;
3058 }
3059 
3060 static void cnl_setup_private_ppat(struct intel_ppat *ppat)
3061 {
3062 	ppat->max_entries = 8;
3063 	ppat->update_hw = cnl_private_pat_update_hw;
3064 	ppat->match = bdw_private_pat_match;
3065 	ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
3066 
3067 	__alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC);
3068 	__alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
3069 	__alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
3070 	__alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC);
3071 	__alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
3072 	__alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
3073 	__alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
3074 	__alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
3075 }
3076 
3077 /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
3078  * bits. When using advanced contexts each context stores its own PAT, but
3079  * writing this data shouldn't be harmful even in those cases. */
3080 static void bdw_setup_private_ppat(struct intel_ppat *ppat)
3081 {
3082 	ppat->max_entries = 8;
3083 	ppat->update_hw = bdw_private_pat_update_hw;
3084 	ppat->match = bdw_private_pat_match;
3085 	ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
3086 
3087 	if (!HAS_PPGTT(ppat->i915)) {
3088 		/* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
3089 		 * so RTL will always use the value corresponding to
3090 		 * pat_sel = 000".
3091 		 * So let's disable cache for GGTT to avoid screen corruptions.
3092 		 * MOCS still can be used though.
3093 		 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
3094 		 * before this patch, i.e. the same uncached + snooping access
3095 		 * like on gen6/7 seems to be in effect.
3096 		 * - So this just fixes blitter/render access. Again it looks
3097 		 * like it's not just uncached access, but uncached + snooping.
3098 		 * So we can still hold onto all our assumptions wrt cpu
3099 		 * clflushing on LLC machines.
3100 		 */
3101 		__alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC);
3102 		return;
3103 	}
3104 
3105 	__alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC);      /* for normal objects, no eLLC */
3106 	__alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);  /* for something pointing to ptes? */
3107 	__alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);  /* for scanout with eLLC */
3108 	__alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC);                      /* Uncached objects, mostly for scanout */
3109 	__alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
3110 	__alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
3111 	__alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
3112 	__alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
3113 }
3114 
3115 static void chv_setup_private_ppat(struct intel_ppat *ppat)
3116 {
3117 	ppat->max_entries = 8;
3118 	ppat->update_hw = bdw_private_pat_update_hw;
3119 	ppat->match = chv_private_pat_match;
3120 	ppat->clear_value = CHV_PPAT_SNOOP;
3121 
3122 	/*
3123 	 * Map WB on BDW to snooped on CHV.
3124 	 *
3125 	 * Only the snoop bit has meaning for CHV, the rest is
3126 	 * ignored.
3127 	 *
3128 	 * The hardware will never snoop for certain types of accesses:
3129 	 * - CPU GTT (GMADR->GGTT->no snoop->memory)
3130 	 * - PPGTT page tables
3131 	 * - some other special cycles
3132 	 *
3133 	 * As with BDW, we also need to consider the following for GT accesses:
3134 	 * "For GGTT, there is NO pat_sel[2:0] from the entry,
3135 	 * so RTL will always use the value corresponding to
3136 	 * pat_sel = 000".
3137 	 * Which means we must set the snoop bit in PAT entry 0
3138 	 * in order to keep the global status page working.
3139 	 */
3140 
3141 	__alloc_ppat_entry(ppat, 0, CHV_PPAT_SNOOP);
3142 	__alloc_ppat_entry(ppat, 1, 0);
3143 	__alloc_ppat_entry(ppat, 2, 0);
3144 	__alloc_ppat_entry(ppat, 3, 0);
3145 	__alloc_ppat_entry(ppat, 4, CHV_PPAT_SNOOP);
3146 	__alloc_ppat_entry(ppat, 5, CHV_PPAT_SNOOP);
3147 	__alloc_ppat_entry(ppat, 6, CHV_PPAT_SNOOP);
3148 	__alloc_ppat_entry(ppat, 7, CHV_PPAT_SNOOP);
3149 }
3150 
3151 static void gen6_gmch_remove(struct i915_address_space *vm)
3152 {
3153 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
3154 
3155 	iounmap(ggtt->gsm);
3156 	cleanup_scratch_page(vm);
3157 }
3158 
3159 static void setup_private_pat(struct drm_i915_private *dev_priv)
3160 {
3161 	struct intel_ppat *ppat = &dev_priv->ppat;
3162 	int i;
3163 
3164 	ppat->i915 = dev_priv;
3165 
3166 	if (INTEL_GEN(dev_priv) >= 10)
3167 		cnl_setup_private_ppat(ppat);
3168 	else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
3169 		chv_setup_private_ppat(ppat);
3170 	else
3171 		bdw_setup_private_ppat(ppat);
3172 
3173 	GEM_BUG_ON(ppat->max_entries > INTEL_MAX_PPAT_ENTRIES);
3174 
3175 	for_each_clear_bit(i, ppat->used, ppat->max_entries) {
3176 		ppat->entries[i].value = ppat->clear_value;
3177 		ppat->entries[i].ppat = ppat;
3178 		set_bit(i, ppat->dirty);
3179 	}
3180 
3181 	ppat->update_hw(dev_priv);
3182 }
3183 
3184 static int gen8_gmch_probe(struct i915_ggtt *ggtt)
3185 {
3186 	struct drm_i915_private *dev_priv = ggtt->vm.i915;
3187 	struct pci_dev *pdev = dev_priv->drm.pdev;
3188 	unsigned int size;
3189 	u16 snb_gmch_ctl;
3190 	int err;
3191 
3192 	/* TODO: We're not aware of mappable constraints on gen8 yet */
3193 	ggtt->gmadr =
3194 		(struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
3195 						 pci_resource_len(pdev, 2));
3196 	ggtt->mappable_end = resource_size(&ggtt->gmadr);
3197 
3198 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
3199 	if (!err)
3200 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
3201 	if (err)
3202 		DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
3203 
3204 	pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3205 	if (IS_CHERRYVIEW(dev_priv))
3206 		size = chv_get_total_gtt_size(snb_gmch_ctl);
3207 	else
3208 		size = gen8_get_total_gtt_size(snb_gmch_ctl);
3209 
3210 	ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
3211 	ggtt->vm.cleanup = gen6_gmch_remove;
3212 	ggtt->vm.insert_page = gen8_ggtt_insert_page;
3213 	ggtt->vm.clear_range = nop_clear_range;
3214 	if (intel_scanout_needs_vtd_wa(dev_priv))
3215 		ggtt->vm.clear_range = gen8_ggtt_clear_range;
3216 
3217 	ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
3218 
3219 	/* Serialize GTT updates with aperture access on BXT if VT-d is on. */
3220 	if (intel_ggtt_update_needs_vtd_wa(dev_priv) ||
3221 	    IS_CHERRYVIEW(dev_priv) /* fails with concurrent use/update */) {
3222 		ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
3223 		ggtt->vm.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
3224 		if (ggtt->vm.clear_range != nop_clear_range)
3225 			ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
3226 
3227 		/* Prevent recursively calling stop_machine() and deadlocks. */
3228 		dev_info(dev_priv->drm.dev,
3229 			 "Disabling error capture for VT-d workaround\n");
3230 		i915_disable_error_state(dev_priv, -ENODEV);
3231 	}
3232 
3233 	ggtt->invalidate = gen6_ggtt_invalidate;
3234 
3235 	ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
3236 	ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
3237 	ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
3238 	ggtt->vm.vma_ops.clear_pages = clear_pages;
3239 
3240 	ggtt->vm.pte_encode = gen8_pte_encode;
3241 
3242 	setup_private_pat(dev_priv);
3243 
3244 	return ggtt_probe_common(ggtt, size);
3245 }
3246 
3247 static int gen6_gmch_probe(struct i915_ggtt *ggtt)
3248 {
3249 	struct drm_i915_private *dev_priv = ggtt->vm.i915;
3250 	struct pci_dev *pdev = dev_priv->drm.pdev;
3251 	unsigned int size;
3252 	u16 snb_gmch_ctl;
3253 	int err;
3254 
3255 	ggtt->gmadr =
3256 		(struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
3257 						 pci_resource_len(pdev, 2));
3258 	ggtt->mappable_end = resource_size(&ggtt->gmadr);
3259 
3260 	/* 64/512MB is the current min/max we actually know of, but this is just
3261 	 * a coarse sanity check.
3262 	 */
3263 	if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
3264 		DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end);
3265 		return -ENXIO;
3266 	}
3267 
3268 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
3269 	if (!err)
3270 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
3271 	if (err)
3272 		DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
3273 	pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3274 
3275 	size = gen6_get_total_gtt_size(snb_gmch_ctl);
3276 	ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
3277 
3278 	ggtt->vm.clear_range = gen6_ggtt_clear_range;
3279 	ggtt->vm.insert_page = gen6_ggtt_insert_page;
3280 	ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
3281 	ggtt->vm.cleanup = gen6_gmch_remove;
3282 
3283 	ggtt->invalidate = gen6_ggtt_invalidate;
3284 
3285 	if (HAS_EDRAM(dev_priv))
3286 		ggtt->vm.pte_encode = iris_pte_encode;
3287 	else if (IS_HASWELL(dev_priv))
3288 		ggtt->vm.pte_encode = hsw_pte_encode;
3289 	else if (IS_VALLEYVIEW(dev_priv))
3290 		ggtt->vm.pte_encode = byt_pte_encode;
3291 	else if (INTEL_GEN(dev_priv) >= 7)
3292 		ggtt->vm.pte_encode = ivb_pte_encode;
3293 	else
3294 		ggtt->vm.pte_encode = snb_pte_encode;
3295 
3296 	ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
3297 	ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
3298 	ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
3299 	ggtt->vm.vma_ops.clear_pages = clear_pages;
3300 
3301 	return ggtt_probe_common(ggtt, size);
3302 }
3303 
3304 static void i915_gmch_remove(struct i915_address_space *vm)
3305 {
3306 	intel_gmch_remove();
3307 }
3308 
3309 static int i915_gmch_probe(struct i915_ggtt *ggtt)
3310 {
3311 	struct drm_i915_private *dev_priv = ggtt->vm.i915;
3312 	phys_addr_t gmadr_base;
3313 	int ret;
3314 
3315 	ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
3316 	if (!ret) {
3317 		DRM_ERROR("failed to set up gmch\n");
3318 		return -EIO;
3319 	}
3320 
3321 	intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
3322 
3323 	ggtt->gmadr =
3324 		(struct resource) DEFINE_RES_MEM(gmadr_base,
3325 						 ggtt->mappable_end);
3326 
3327 	ggtt->do_idle_maps = needs_idle_maps(dev_priv);
3328 	ggtt->vm.insert_page = i915_ggtt_insert_page;
3329 	ggtt->vm.insert_entries = i915_ggtt_insert_entries;
3330 	ggtt->vm.clear_range = i915_ggtt_clear_range;
3331 	ggtt->vm.cleanup = i915_gmch_remove;
3332 
3333 	ggtt->invalidate = gmch_ggtt_invalidate;
3334 
3335 	ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
3336 	ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
3337 	ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
3338 	ggtt->vm.vma_ops.clear_pages = clear_pages;
3339 
3340 	if (unlikely(ggtt->do_idle_maps))
3341 		DRM_INFO("applying Ironlake quirks for intel_iommu\n");
3342 
3343 	return 0;
3344 }
3345 
3346 /**
3347  * i915_ggtt_probe_hw - Probe GGTT hardware location
3348  * @dev_priv: i915 device
3349  */
3350 int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
3351 {
3352 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
3353 	int ret;
3354 
3355 	ggtt->vm.i915 = dev_priv;
3356 	ggtt->vm.dma = &dev_priv->drm.pdev->dev;
3357 
3358 	if (INTEL_GEN(dev_priv) <= 5)
3359 		ret = i915_gmch_probe(ggtt);
3360 	else if (INTEL_GEN(dev_priv) < 8)
3361 		ret = gen6_gmch_probe(ggtt);
3362 	else
3363 		ret = gen8_gmch_probe(ggtt);
3364 	if (ret)
3365 		return ret;
3366 
3367 	/* Trim the GGTT to fit the GuC mappable upper range (when enabled).
3368 	 * This is easier than doing range restriction on the fly, as we
3369 	 * currently don't have any bits spare to pass in this upper
3370 	 * restriction!
3371 	 */
3372 	if (USES_GUC(dev_priv)) {
3373 		ggtt->vm.total = min_t(u64, ggtt->vm.total, GUC_GGTT_TOP);
3374 		ggtt->mappable_end =
3375 			min_t(u64, ggtt->mappable_end, ggtt->vm.total);
3376 	}
3377 
3378 	if ((ggtt->vm.total - 1) >> 32) {
3379 		DRM_ERROR("We never expected a Global GTT with more than 32bits"
3380 			  " of address space! Found %lldM!\n",
3381 			  ggtt->vm.total >> 20);
3382 		ggtt->vm.total = 1ULL << 32;
3383 		ggtt->mappable_end =
3384 			min_t(u64, ggtt->mappable_end, ggtt->vm.total);
3385 	}
3386 
3387 	if (ggtt->mappable_end > ggtt->vm.total) {
3388 		DRM_ERROR("mappable aperture extends past end of GGTT,"
3389 			  " aperture=%pa, total=%llx\n",
3390 			  &ggtt->mappable_end, ggtt->vm.total);
3391 		ggtt->mappable_end = ggtt->vm.total;
3392 	}
3393 
3394 	/* GMADR is the PCI mmio aperture into the global GTT. */
3395 	DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20);
3396 	DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
3397 	DRM_DEBUG_DRIVER("DSM size = %lluM\n",
3398 			 (u64)resource_size(&intel_graphics_stolen_res) >> 20);
3399 	if (intel_vtd_active())
3400 		DRM_INFO("VT-d active for gfx access\n");
3401 
3402 	return 0;
3403 }
3404 
3405 /**
3406  * i915_ggtt_init_hw - Initialize GGTT hardware
3407  * @dev_priv: i915 device
3408  */
3409 int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
3410 {
3411 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
3412 	int ret;
3413 
3414 	stash_init(&dev_priv->mm.wc_stash);
3415 
3416 	/* Note that we use page colouring to enforce a guard page at the
3417 	 * end of the address space. This is required as the CS may prefetch
3418 	 * beyond the end of the batch buffer, across the page boundary,
3419 	 * and beyond the end of the GTT if we do not provide a guard.
3420 	 */
3421 	mutex_lock(&dev_priv->drm.struct_mutex);
3422 	i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
3423 
3424 	ggtt->vm.is_ggtt = true;
3425 
3426 	/* Only VLV supports read-only GGTT mappings */
3427 	ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv);
3428 
3429 	if (!HAS_LLC(dev_priv) && !HAS_PPGTT(dev_priv))
3430 		ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
3431 	mutex_unlock(&dev_priv->drm.struct_mutex);
3432 
3433 	if (!io_mapping_init_wc(&dev_priv->ggtt.iomap,
3434 				dev_priv->ggtt.gmadr.start,
3435 				dev_priv->ggtt.mappable_end)) {
3436 		ret = -EIO;
3437 		goto out_gtt_cleanup;
3438 	}
3439 
3440 	ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end);
3441 
3442 	/*
3443 	 * Initialise stolen early so that we may reserve preallocated
3444 	 * objects for the BIOS to KMS transition.
3445 	 */
3446 	ret = i915_gem_init_stolen(dev_priv);
3447 	if (ret)
3448 		goto out_gtt_cleanup;
3449 
3450 	return 0;
3451 
3452 out_gtt_cleanup:
3453 	ggtt->vm.cleanup(&ggtt->vm);
3454 	return ret;
3455 }
3456 
3457 int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
3458 {
3459 	if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt())
3460 		return -EIO;
3461 
3462 	return 0;
3463 }
3464 
3465 void i915_ggtt_enable_guc(struct drm_i915_private *i915)
3466 {
3467 	GEM_BUG_ON(i915->ggtt.invalidate != gen6_ggtt_invalidate);
3468 
3469 	i915->ggtt.invalidate = guc_ggtt_invalidate;
3470 
3471 	i915_ggtt_invalidate(i915);
3472 }
3473 
3474 void i915_ggtt_disable_guc(struct drm_i915_private *i915)
3475 {
3476 	/* XXX Temporary pardon for error unload */
3477 	if (i915->ggtt.invalidate == gen6_ggtt_invalidate)
3478 		return;
3479 
3480 	/* We should only be called after i915_ggtt_enable_guc() */
3481 	GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate);
3482 
3483 	i915->ggtt.invalidate = gen6_ggtt_invalidate;
3484 
3485 	i915_ggtt_invalidate(i915);
3486 }
3487 
3488 void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
3489 {
3490 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
3491 	struct i915_vma *vma, *vn;
3492 
3493 	i915_check_and_clear_faults(dev_priv);
3494 
3495 	mutex_lock(&ggtt->vm.mutex);
3496 
3497 	/* First fill our portion of the GTT with scratch pages */
3498 	ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
3499 	ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */
3500 
3501 	/* clflush objects bound into the GGTT and rebind them. */
3502 	list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
3503 		struct drm_i915_gem_object *obj = vma->obj;
3504 
3505 		if (!(vma->flags & I915_VMA_GLOBAL_BIND))
3506 			continue;
3507 
3508 		mutex_unlock(&ggtt->vm.mutex);
3509 
3510 		if (!i915_vma_unbind(vma))
3511 			goto lock;
3512 
3513 		WARN_ON(i915_vma_bind(vma,
3514 				      obj ? obj->cache_level : 0,
3515 				      PIN_UPDATE));
3516 		if (obj)
3517 			WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
3518 
3519 lock:
3520 		mutex_lock(&ggtt->vm.mutex);
3521 	}
3522 
3523 	ggtt->vm.closed = false;
3524 	i915_ggtt_invalidate(dev_priv);
3525 
3526 	mutex_unlock(&ggtt->vm.mutex);
3527 
3528 	if (INTEL_GEN(dev_priv) >= 8) {
3529 		struct intel_ppat *ppat = &dev_priv->ppat;
3530 
3531 		bitmap_set(ppat->dirty, 0, ppat->max_entries);
3532 		dev_priv->ppat.update_hw(dev_priv);
3533 		return;
3534 	}
3535 }
3536 
3537 static struct scatterlist *
3538 rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
3539 	     unsigned int width, unsigned int height,
3540 	     unsigned int stride,
3541 	     struct sg_table *st, struct scatterlist *sg)
3542 {
3543 	unsigned int column, row;
3544 	unsigned int src_idx;
3545 
3546 	for (column = 0; column < width; column++) {
3547 		src_idx = stride * (height - 1) + column + offset;
3548 		for (row = 0; row < height; row++) {
3549 			st->nents++;
3550 			/* We don't need the pages, but need to initialize
3551 			 * the entries so the sg list can be happily traversed.
3552 			 * The only thing we need are DMA addresses.
3553 			 */
3554 			sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
3555 			sg_dma_address(sg) =
3556 				i915_gem_object_get_dma_address(obj, src_idx);
3557 			sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
3558 			sg = sg_next(sg);
3559 			src_idx -= stride;
3560 		}
3561 	}
3562 
3563 	return sg;
3564 }
3565 
3566 static noinline struct sg_table *
3567 intel_rotate_pages(struct intel_rotation_info *rot_info,
3568 		   struct drm_i915_gem_object *obj)
3569 {
3570 	unsigned int size = intel_rotation_info_size(rot_info);
3571 	struct sg_table *st;
3572 	struct scatterlist *sg;
3573 	int ret = -ENOMEM;
3574 	int i;
3575 
3576 	/* Allocate target SG list. */
3577 	st = kmalloc(sizeof(*st), GFP_KERNEL);
3578 	if (!st)
3579 		goto err_st_alloc;
3580 
3581 	ret = sg_alloc_table(st, size, GFP_KERNEL);
3582 	if (ret)
3583 		goto err_sg_alloc;
3584 
3585 	st->nents = 0;
3586 	sg = st->sgl;
3587 
3588 	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
3589 		sg = rotate_pages(obj, rot_info->plane[i].offset,
3590 				  rot_info->plane[i].width, rot_info->plane[i].height,
3591 				  rot_info->plane[i].stride, st, sg);
3592 	}
3593 
3594 	return st;
3595 
3596 err_sg_alloc:
3597 	kfree(st);
3598 err_st_alloc:
3599 
3600 	DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
3601 			 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
3602 
3603 	return ERR_PTR(ret);
3604 }
3605 
3606 static noinline struct sg_table *
3607 intel_partial_pages(const struct i915_ggtt_view *view,
3608 		    struct drm_i915_gem_object *obj)
3609 {
3610 	struct sg_table *st;
3611 	struct scatterlist *sg, *iter;
3612 	unsigned int count = view->partial.size;
3613 	unsigned int offset;
3614 	int ret = -ENOMEM;
3615 
3616 	st = kmalloc(sizeof(*st), GFP_KERNEL);
3617 	if (!st)
3618 		goto err_st_alloc;
3619 
3620 	ret = sg_alloc_table(st, count, GFP_KERNEL);
3621 	if (ret)
3622 		goto err_sg_alloc;
3623 
3624 	iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
3625 	GEM_BUG_ON(!iter);
3626 
3627 	sg = st->sgl;
3628 	st->nents = 0;
3629 	do {
3630 		unsigned int len;
3631 
3632 		len = min(iter->length - (offset << PAGE_SHIFT),
3633 			  count << PAGE_SHIFT);
3634 		sg_set_page(sg, NULL, len, 0);
3635 		sg_dma_address(sg) =
3636 			sg_dma_address(iter) + (offset << PAGE_SHIFT);
3637 		sg_dma_len(sg) = len;
3638 
3639 		st->nents++;
3640 		count -= len >> PAGE_SHIFT;
3641 		if (count == 0) {
3642 			sg_mark_end(sg);
3643 			i915_sg_trim(st); /* Drop any unused tail entries. */
3644 
3645 			return st;
3646 		}
3647 
3648 		sg = __sg_next(sg);
3649 		iter = __sg_next(iter);
3650 		offset = 0;
3651 	} while (1);
3652 
3653 err_sg_alloc:
3654 	kfree(st);
3655 err_st_alloc:
3656 	return ERR_PTR(ret);
3657 }
3658 
3659 static int
3660 i915_get_ggtt_vma_pages(struct i915_vma *vma)
3661 {
3662 	int ret;
3663 
3664 	/* The vma->pages are only valid within the lifespan of the borrowed
3665 	 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
3666 	 * must be the vma->pages. A simple rule is that vma->pages must only
3667 	 * be accessed when the obj->mm.pages are pinned.
3668 	 */
3669 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
3670 
3671 	switch (vma->ggtt_view.type) {
3672 	default:
3673 		GEM_BUG_ON(vma->ggtt_view.type);
3674 		/* fall through */
3675 	case I915_GGTT_VIEW_NORMAL:
3676 		vma->pages = vma->obj->mm.pages;
3677 		return 0;
3678 
3679 	case I915_GGTT_VIEW_ROTATED:
3680 		vma->pages =
3681 			intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
3682 		break;
3683 
3684 	case I915_GGTT_VIEW_PARTIAL:
3685 		vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
3686 		break;
3687 	}
3688 
3689 	ret = 0;
3690 	if (IS_ERR(vma->pages)) {
3691 		ret = PTR_ERR(vma->pages);
3692 		vma->pages = NULL;
3693 		DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
3694 			  vma->ggtt_view.type, ret);
3695 	}
3696 	return ret;
3697 }
3698 
3699 /**
3700  * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
3701  * @vm: the &struct i915_address_space
3702  * @node: the &struct drm_mm_node (typically i915_vma.mode)
3703  * @size: how much space to allocate inside the GTT,
3704  *        must be #I915_GTT_PAGE_SIZE aligned
3705  * @offset: where to insert inside the GTT,
3706  *          must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
3707  *          (@offset + @size) must fit within the address space
3708  * @color: color to apply to node, if this node is not from a VMA,
3709  *         color must be #I915_COLOR_UNEVICTABLE
3710  * @flags: control search and eviction behaviour
3711  *
3712  * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
3713  * the address space (using @size and @color). If the @node does not fit, it
3714  * tries to evict any overlapping nodes from the GTT, including any
3715  * neighbouring nodes if the colors do not match (to ensure guard pages between
3716  * differing domains). See i915_gem_evict_for_node() for the gory details
3717  * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
3718  * evicting active overlapping objects, and any overlapping node that is pinned
3719  * or marked as unevictable will also result in failure.
3720  *
3721  * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3722  * asked to wait for eviction and interrupted.
3723  */
3724 int i915_gem_gtt_reserve(struct i915_address_space *vm,
3725 			 struct drm_mm_node *node,
3726 			 u64 size, u64 offset, unsigned long color,
3727 			 unsigned int flags)
3728 {
3729 	int err;
3730 
3731 	GEM_BUG_ON(!size);
3732 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3733 	GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
3734 	GEM_BUG_ON(range_overflows(offset, size, vm->total));
3735 	GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
3736 	GEM_BUG_ON(drm_mm_node_allocated(node));
3737 
3738 	node->size = size;
3739 	node->start = offset;
3740 	node->color = color;
3741 
3742 	err = drm_mm_reserve_node(&vm->mm, node);
3743 	if (err != -ENOSPC)
3744 		return err;
3745 
3746 	if (flags & PIN_NOEVICT)
3747 		return -ENOSPC;
3748 
3749 	err = i915_gem_evict_for_node(vm, node, flags);
3750 	if (err == 0)
3751 		err = drm_mm_reserve_node(&vm->mm, node);
3752 
3753 	return err;
3754 }
3755 
3756 static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
3757 {
3758 	u64 range, addr;
3759 
3760 	GEM_BUG_ON(range_overflows(start, len, end));
3761 	GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
3762 
3763 	range = round_down(end - len, align) - round_up(start, align);
3764 	if (range) {
3765 		if (sizeof(unsigned long) == sizeof(u64)) {
3766 			addr = get_random_long();
3767 		} else {
3768 			addr = get_random_int();
3769 			if (range > U32_MAX) {
3770 				addr <<= 32;
3771 				addr |= get_random_int();
3772 			}
3773 		}
3774 		div64_u64_rem(addr, range, &addr);
3775 		start += addr;
3776 	}
3777 
3778 	return round_up(start, align);
3779 }
3780 
3781 /**
3782  * i915_gem_gtt_insert - insert a node into an address_space (GTT)
3783  * @vm: the &struct i915_address_space
3784  * @node: the &struct drm_mm_node (typically i915_vma.node)
3785  * @size: how much space to allocate inside the GTT,
3786  *        must be #I915_GTT_PAGE_SIZE aligned
3787  * @alignment: required alignment of starting offset, may be 0 but
3788  *             if specified, this must be a power-of-two and at least
3789  *             #I915_GTT_MIN_ALIGNMENT
3790  * @color: color to apply to node
3791  * @start: start of any range restriction inside GTT (0 for all),
3792  *         must be #I915_GTT_PAGE_SIZE aligned
3793  * @end: end of any range restriction inside GTT (U64_MAX for all),
3794  *       must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
3795  * @flags: control search and eviction behaviour
3796  *
3797  * i915_gem_gtt_insert() first searches for an available hole into which
3798  * is can insert the node. The hole address is aligned to @alignment and
3799  * its @size must then fit entirely within the [@start, @end] bounds. The
3800  * nodes on either side of the hole must match @color, or else a guard page
3801  * will be inserted between the two nodes (or the node evicted). If no
3802  * suitable hole is found, first a victim is randomly selected and tested
3803  * for eviction, otherwise then the LRU list of objects within the GTT
3804  * is scanned to find the first set of replacement nodes to create the hole.
3805  * Those old overlapping nodes are evicted from the GTT (and so must be
3806  * rebound before any future use). Any node that is currently pinned cannot
3807  * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
3808  * active and #PIN_NONBLOCK is specified, that node is also skipped when
3809  * searching for an eviction candidate. See i915_gem_evict_something() for
3810  * the gory details on the eviction algorithm.
3811  *
3812  * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3813  * asked to wait for eviction and interrupted.
3814  */
3815 int i915_gem_gtt_insert(struct i915_address_space *vm,
3816 			struct drm_mm_node *node,
3817 			u64 size, u64 alignment, unsigned long color,
3818 			u64 start, u64 end, unsigned int flags)
3819 {
3820 	enum drm_mm_insert_mode mode;
3821 	u64 offset;
3822 	int err;
3823 
3824 	lockdep_assert_held(&vm->i915->drm.struct_mutex);
3825 	GEM_BUG_ON(!size);
3826 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3827 	GEM_BUG_ON(alignment && !is_power_of_2(alignment));
3828 	GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
3829 	GEM_BUG_ON(start >= end);
3830 	GEM_BUG_ON(start > 0  && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
3831 	GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
3832 	GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
3833 	GEM_BUG_ON(drm_mm_node_allocated(node));
3834 
3835 	if (unlikely(range_overflows(start, size, end)))
3836 		return -ENOSPC;
3837 
3838 	if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
3839 		return -ENOSPC;
3840 
3841 	mode = DRM_MM_INSERT_BEST;
3842 	if (flags & PIN_HIGH)
3843 		mode = DRM_MM_INSERT_HIGHEST;
3844 	if (flags & PIN_MAPPABLE)
3845 		mode = DRM_MM_INSERT_LOW;
3846 
3847 	/* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
3848 	 * so we know that we always have a minimum alignment of 4096.
3849 	 * The drm_mm range manager is optimised to return results
3850 	 * with zero alignment, so where possible use the optimal
3851 	 * path.
3852 	 */
3853 	BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
3854 	if (alignment <= I915_GTT_MIN_ALIGNMENT)
3855 		alignment = 0;
3856 
3857 	err = drm_mm_insert_node_in_range(&vm->mm, node,
3858 					  size, alignment, color,
3859 					  start, end, mode);
3860 	if (err != -ENOSPC)
3861 		return err;
3862 
3863 	if (mode & DRM_MM_INSERT_ONCE) {
3864 		err = drm_mm_insert_node_in_range(&vm->mm, node,
3865 						  size, alignment, color,
3866 						  start, end,
3867 						  DRM_MM_INSERT_BEST);
3868 		if (err != -ENOSPC)
3869 			return err;
3870 	}
3871 
3872 	if (flags & PIN_NOEVICT)
3873 		return -ENOSPC;
3874 
3875 	/* No free space, pick a slot at random.
3876 	 *
3877 	 * There is a pathological case here using a GTT shared between
3878 	 * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
3879 	 *
3880 	 *    |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
3881 	 *         (64k objects)             (448k objects)
3882 	 *
3883 	 * Now imagine that the eviction LRU is ordered top-down (just because
3884 	 * pathology meets real life), and that we need to evict an object to
3885 	 * make room inside the aperture. The eviction scan then has to walk
3886 	 * the 448k list before it finds one within range. And now imagine that
3887 	 * it has to search for a new hole between every byte inside the memcpy,
3888 	 * for several simultaneous clients.
3889 	 *
3890 	 * On a full-ppgtt system, if we have run out of available space, there
3891 	 * will be lots and lots of objects in the eviction list! Again,
3892 	 * searching that LRU list may be slow if we are also applying any
3893 	 * range restrictions (e.g. restriction to low 4GiB) and so, for
3894 	 * simplicity and similarilty between different GTT, try the single
3895 	 * random replacement first.
3896 	 */
3897 	offset = random_offset(start, end,
3898 			       size, alignment ?: I915_GTT_MIN_ALIGNMENT);
3899 	err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
3900 	if (err != -ENOSPC)
3901 		return err;
3902 
3903 	/* Randomly selected placement is pinned, do a search */
3904 	err = i915_gem_evict_something(vm, size, alignment, color,
3905 				       start, end, flags);
3906 	if (err)
3907 		return err;
3908 
3909 	return drm_mm_insert_node_in_range(&vm->mm, node,
3910 					   size, alignment, color,
3911 					   start, end, DRM_MM_INSERT_EVICT);
3912 }
3913 
3914 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
3915 #include "selftests/mock_gtt.c"
3916 #include "selftests/i915_gem_gtt.c"
3917 #endif
3918