10260c420SBen Widawsky /*
20260c420SBen Widawsky  * Copyright © 2014 Intel Corporation
30260c420SBen Widawsky  *
40260c420SBen Widawsky  * Permission is hereby granted, free of charge, to any person obtaining a
50260c420SBen Widawsky  * copy of this software and associated documentation files (the "Software"),
60260c420SBen Widawsky  * to deal in the Software without restriction, including without limitation
70260c420SBen Widawsky  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
80260c420SBen Widawsky  * and/or sell copies of the Software, and to permit persons to whom the
90260c420SBen Widawsky  * Software is furnished to do so, subject to the following conditions:
100260c420SBen Widawsky  *
110260c420SBen Widawsky  * The above copyright notice and this permission notice (including the next
120260c420SBen Widawsky  * paragraph) shall be included in all copies or substantial portions of the
130260c420SBen Widawsky  * Software.
140260c420SBen Widawsky  *
150260c420SBen Widawsky  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
160260c420SBen Widawsky  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
170260c420SBen Widawsky  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
180260c420SBen Widawsky  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
190260c420SBen Widawsky  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
200260c420SBen Widawsky  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
210260c420SBen Widawsky  * IN THE SOFTWARE.
220260c420SBen Widawsky  *
230260c420SBen Widawsky  * Please try to maintain the following order within this file unless it makes
240260c420SBen Widawsky  * sense to do otherwise. From top to bottom:
250260c420SBen Widawsky  * 1. typedefs
260260c420SBen Widawsky  * 2. #defines, and macros
270260c420SBen Widawsky  * 3. structure definitions
280260c420SBen Widawsky  * 4. function prototypes
290260c420SBen Widawsky  *
300260c420SBen Widawsky  * Within each section, please try to order by generation in ascending order,
310260c420SBen Widawsky  * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
320260c420SBen Widawsky  */
330260c420SBen Widawsky 
340260c420SBen Widawsky #ifndef __I915_GEM_GTT_H__
350260c420SBen Widawsky #define __I915_GEM_GTT_H__
360260c420SBen Widawsky 
378ef8561fSChris Wilson #include <linux/io-mapping.h>
388ef8561fSChris Wilson 
39b0decaf7SChris Wilson #include "i915_gem_request.h"
40b0decaf7SChris Wilson 
414d884705SDaniel Vetter struct drm_i915_file_private;
424d884705SDaniel Vetter 
4307749ef3SMichel Thierry typedef uint32_t gen6_pte_t;
4407749ef3SMichel Thierry typedef uint64_t gen8_pte_t;
4507749ef3SMichel Thierry typedef uint64_t gen8_pde_t;
46762d9936SMichel Thierry typedef uint64_t gen8_ppgtt_pdpe_t;
47762d9936SMichel Thierry typedef uint64_t gen8_ppgtt_pml4e_t;
480260c420SBen Widawsky 
4972e96d64SJoonas Lahtinen #define ggtt_total_entries(ggtt) ((ggtt)->base.total >> PAGE_SHIFT)
500260c420SBen Widawsky 
510260c420SBen Widawsky /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
520260c420SBen Widawsky #define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0))
530260c420SBen Widawsky #define GEN6_PTE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
540260c420SBen Widawsky #define GEN6_PDE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
550260c420SBen Widawsky #define GEN6_PTE_CACHE_LLC		(2 << 1)
560260c420SBen Widawsky #define GEN6_PTE_UNCACHED		(1 << 1)
570260c420SBen Widawsky #define GEN6_PTE_VALID			(1 << 0)
580260c420SBen Widawsky 
5907749ef3SMichel Thierry #define I915_PTES(pte_len)		(PAGE_SIZE / (pte_len))
6007749ef3SMichel Thierry #define I915_PTE_MASK(pte_len)		(I915_PTES(pte_len) - 1)
6107749ef3SMichel Thierry #define I915_PDES			512
6207749ef3SMichel Thierry #define I915_PDE_MASK			(I915_PDES - 1)
63678d96fbSBen Widawsky #define NUM_PTE(pde_shift)     (1 << (pde_shift - PAGE_SHIFT))
6407749ef3SMichel Thierry 
6507749ef3SMichel Thierry #define GEN6_PTES			I915_PTES(sizeof(gen6_pte_t))
6607749ef3SMichel Thierry #define GEN6_PD_SIZE		        (I915_PDES * PAGE_SIZE)
670260c420SBen Widawsky #define GEN6_PD_ALIGN			(PAGE_SIZE * 16)
68678d96fbSBen Widawsky #define GEN6_PDE_SHIFT			22
690260c420SBen Widawsky #define GEN6_PDE_VALID			(1 << 0)
700260c420SBen Widawsky 
710260c420SBen Widawsky #define GEN7_PTE_CACHE_L3_LLC		(3 << 1)
720260c420SBen Widawsky 
730260c420SBen Widawsky #define BYT_PTE_SNOOPED_BY_CPU_CACHES	(1 << 2)
740260c420SBen Widawsky #define BYT_PTE_WRITEABLE		(1 << 1)
750260c420SBen Widawsky 
760260c420SBen Widawsky /* Cacheability Control is a 4-bit value. The low three bits are stored in bits
770260c420SBen Widawsky  * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
780260c420SBen Widawsky  */
790260c420SBen Widawsky #define HSW_CACHEABILITY_CONTROL(bits)	((((bits) & 0x7) << 1) | \
800260c420SBen Widawsky 					 (((bits) & 0x8) << (11 - 3)))
810260c420SBen Widawsky #define HSW_WB_LLC_AGE3			HSW_CACHEABILITY_CONTROL(0x2)
820260c420SBen Widawsky #define HSW_WB_LLC_AGE0			HSW_CACHEABILITY_CONTROL(0x3)
830260c420SBen Widawsky #define HSW_WB_ELLC_LLC_AGE3		HSW_CACHEABILITY_CONTROL(0x8)
840260c420SBen Widawsky #define HSW_WB_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0xb)
850260c420SBen Widawsky #define HSW_WT_ELLC_LLC_AGE3		HSW_CACHEABILITY_CONTROL(0x7)
860260c420SBen Widawsky #define HSW_WT_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0x6)
870260c420SBen Widawsky #define HSW_PTE_UNCACHED		(0)
880260c420SBen Widawsky #define HSW_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0x7f0))
890260c420SBen Widawsky #define HSW_PTE_ADDR_ENCODE(addr)	HSW_GTT_ADDR_ENCODE(addr)
900260c420SBen Widawsky 
910260c420SBen Widawsky /* GEN8 legacy style address is defined as a 3 level page table:
920260c420SBen Widawsky  * 31:30 | 29:21 | 20:12 |  11:0
930260c420SBen Widawsky  * PDPE  |  PDE  |  PTE  | offset
940260c420SBen Widawsky  * The difference as compared to normal x86 3 level page table is the PDPEs are
950260c420SBen Widawsky  * programmed via register.
9681ba8aefSMichel Thierry  *
9781ba8aefSMichel Thierry  * GEN8 48b legacy style address is defined as a 4 level page table:
9881ba8aefSMichel Thierry  * 47:39 | 38:30 | 29:21 | 20:12 |  11:0
9981ba8aefSMichel Thierry  * PML4E | PDPE  |  PDE  |  PTE  | offset
1000260c420SBen Widawsky  */
10181ba8aefSMichel Thierry #define GEN8_PML4ES_PER_PML4		512
10281ba8aefSMichel Thierry #define GEN8_PML4E_SHIFT		39
103762d9936SMichel Thierry #define GEN8_PML4E_MASK			(GEN8_PML4ES_PER_PML4 - 1)
1040260c420SBen Widawsky #define GEN8_PDPE_SHIFT			30
10581ba8aefSMichel Thierry /* NB: GEN8_PDPE_MASK is untrue for 32b platforms, but it has no impact on 32b page
10681ba8aefSMichel Thierry  * tables */
10781ba8aefSMichel Thierry #define GEN8_PDPE_MASK			0x1ff
1080260c420SBen Widawsky #define GEN8_PDE_SHIFT			21
1090260c420SBen Widawsky #define GEN8_PDE_MASK			0x1ff
1100260c420SBen Widawsky #define GEN8_PTE_SHIFT			12
1110260c420SBen Widawsky #define GEN8_PTE_MASK			0x1ff
11276643600SBen Widawsky #define GEN8_LEGACY_PDPES		4
11307749ef3SMichel Thierry #define GEN8_PTES			I915_PTES(sizeof(gen8_pte_t))
1140260c420SBen Widawsky 
11581ba8aefSMichel Thierry #define I915_PDPES_PER_PDP(dev) (USES_FULL_48BIT_PPGTT(dev) ?\
11681ba8aefSMichel Thierry 				 GEN8_PML4ES_PER_PML4 : GEN8_LEGACY_PDPES)
1176ac18502SMichel Thierry 
1180260c420SBen Widawsky #define PPAT_UNCACHED_INDEX		(_PAGE_PWT | _PAGE_PCD)
1190260c420SBen Widawsky #define PPAT_CACHED_PDE_INDEX		0 /* WB LLC */
1200260c420SBen Widawsky #define PPAT_CACHED_INDEX		_PAGE_PAT /* WB LLCeLLC */
1210260c420SBen Widawsky #define PPAT_DISPLAY_ELLC_INDEX		_PAGE_PCD /* WT eLLC */
1220260c420SBen Widawsky 
123ee0ce478SVille Syrjälä #define CHV_PPAT_SNOOP			(1<<6)
1240260c420SBen Widawsky #define GEN8_PPAT_AGE(x)		(x<<4)
1250260c420SBen Widawsky #define GEN8_PPAT_LLCeLLC		(3<<2)
1260260c420SBen Widawsky #define GEN8_PPAT_LLCELLC		(2<<2)
1270260c420SBen Widawsky #define GEN8_PPAT_LLC			(1<<2)
1280260c420SBen Widawsky #define GEN8_PPAT_WB			(3<<0)
1290260c420SBen Widawsky #define GEN8_PPAT_WT			(2<<0)
1300260c420SBen Widawsky #define GEN8_PPAT_WC			(1<<0)
1310260c420SBen Widawsky #define GEN8_PPAT_UC			(0<<0)
1320260c420SBen Widawsky #define GEN8_PPAT_ELLC_OVERRIDE		(0<<2)
1330260c420SBen Widawsky #define GEN8_PPAT(i, x)			((uint64_t) (x) << ((i) * 8))
1340260c420SBen Widawsky 
135fe14d5f4STvrtko Ursulin enum i915_ggtt_view_type {
136fe14d5f4STvrtko Ursulin 	I915_GGTT_VIEW_NORMAL = 0,
1378bd7ef16SJoonas Lahtinen 	I915_GGTT_VIEW_ROTATED,
1388bd7ef16SJoonas Lahtinen 	I915_GGTT_VIEW_PARTIAL,
13950470bb0STvrtko Ursulin };
14050470bb0STvrtko Ursulin 
14150470bb0STvrtko Ursulin struct intel_rotation_info {
1421663b9d6SVille Syrjälä 	struct {
1431663b9d6SVille Syrjälä 		/* tiles */
1446687c906SVille Syrjälä 		unsigned int width, height, stride, offset;
1451663b9d6SVille Syrjälä 	} plane[2];
146fe14d5f4STvrtko Ursulin };
147fe14d5f4STvrtko Ursulin 
148fe14d5f4STvrtko Ursulin struct i915_ggtt_view {
149fe14d5f4STvrtko Ursulin 	enum i915_ggtt_view_type type;
150fe14d5f4STvrtko Ursulin 
1518bd7ef16SJoonas Lahtinen 	union {
1528bd7ef16SJoonas Lahtinen 		struct {
153088e0df4SMichel Thierry 			u64 offset;
1548bd7ef16SJoonas Lahtinen 			unsigned int size;
1558bd7ef16SJoonas Lahtinen 		} partial;
1567723f47dSVille Syrjälä 		struct intel_rotation_info rotated;
1578bd7ef16SJoonas Lahtinen 	} params;
158fe14d5f4STvrtko Ursulin };
159fe14d5f4STvrtko Ursulin 
160fe14d5f4STvrtko Ursulin extern const struct i915_ggtt_view i915_ggtt_view_normal;
1619abc4648SJoonas Lahtinen extern const struct i915_ggtt_view i915_ggtt_view_rotated;
162fe14d5f4STvrtko Ursulin 
1630260c420SBen Widawsky enum i915_cache_level;
164fe14d5f4STvrtko Ursulin 
1650260c420SBen Widawsky /**
1660260c420SBen Widawsky  * A VMA represents a GEM BO that is bound into an address space. Therefore, a
1670260c420SBen Widawsky  * VMA's presence cannot be guaranteed before binding, or after unbinding the
1680260c420SBen Widawsky  * object into/from the address space.
1690260c420SBen Widawsky  *
1700260c420SBen Widawsky  * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
1710260c420SBen Widawsky  * will always be <= an objects lifetime. So object refcounting should cover us.
1720260c420SBen Widawsky  */
1730260c420SBen Widawsky struct i915_vma {
1740260c420SBen Widawsky 	struct drm_mm_node node;
1750260c420SBen Widawsky 	struct drm_i915_gem_object *obj;
1760260c420SBen Widawsky 	struct i915_address_space *vm;
177247177ddSChris Wilson 	struct sg_table *pages;
1788ef8561fSChris Wilson 	void __iomem *iomap;
179de180033SChris Wilson 	u64 size;
1800260c420SBen Widawsky 
1813272db53SChris Wilson 	unsigned int flags;
1823272db53SChris Wilson 	/**
1833272db53SChris Wilson 	 * How many users have pinned this object in GTT space. The following
1843272db53SChris Wilson 	 * users can each hold at most one reference: pwrite/pread, execbuffer
1853272db53SChris Wilson 	 * (objects are not allowed multiple times for the same batchbuffer),
1863272db53SChris Wilson 	 * and the framebuffer code. When switching/pageflipping, the
1873272db53SChris Wilson 	 * framebuffer code has at most two buffers pinned per crtc.
1883272db53SChris Wilson 	 *
1893272db53SChris Wilson 	 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
1903272db53SChris Wilson 	 * bits with absolutely no headroom. So use 4 bits.
1913272db53SChris Wilson 	 */
1923272db53SChris Wilson #define I915_VMA_PIN_MASK 0xf
193305bc234SChris Wilson #define I915_VMA_PIN_OVERFLOW	BIT(5)
194b0decaf7SChris Wilson 
195aff43766STvrtko Ursulin 	/** Flags and address space this VMA is bound to */
196305bc234SChris Wilson #define I915_VMA_GLOBAL_BIND	BIT(6)
197305bc234SChris Wilson #define I915_VMA_LOCAL_BIND	BIT(7)
198305bc234SChris Wilson #define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
1993272db53SChris Wilson 
200305bc234SChris Wilson #define I915_VMA_GGTT	BIT(8)
201305bc234SChris Wilson #define I915_VMA_CLOSED BIT(9)
2023272db53SChris Wilson 
2033272db53SChris Wilson 	unsigned int active;
2043272db53SChris Wilson 	struct i915_gem_active last_read[I915_NUM_ENGINES];
205aff43766STvrtko Ursulin 
206fe14d5f4STvrtko Ursulin 	/**
207fe14d5f4STvrtko Ursulin 	 * Support different GGTT views into the same object.
208fe14d5f4STvrtko Ursulin 	 * This means there can be multiple VMA mappings per object and per VM.
209fe14d5f4STvrtko Ursulin 	 * i915_ggtt_view_type is used to distinguish between those entries.
210fe14d5f4STvrtko Ursulin 	 * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
211fe14d5f4STvrtko Ursulin 	 * assumed in GEM functions which take no ggtt view parameter.
212fe14d5f4STvrtko Ursulin 	 */
213fe14d5f4STvrtko Ursulin 	struct i915_ggtt_view ggtt_view;
214fe14d5f4STvrtko Ursulin 
2150260c420SBen Widawsky 	/** This object's place on the active/inactive lists */
2161c7f4bcaSChris Wilson 	struct list_head vm_link;
2170260c420SBen Widawsky 
2181c7f4bcaSChris Wilson 	struct list_head obj_link; /* Link in the object's VMA list */
2190260c420SBen Widawsky 
2200260c420SBen Widawsky 	/** This vma's place in the batchbuffer or on the eviction list */
2210260c420SBen Widawsky 	struct list_head exec_list;
2220260c420SBen Widawsky 
2230260c420SBen Widawsky 	/**
2240260c420SBen Widawsky 	 * Used for performing relocations during execbuffer insertion.
2250260c420SBen Widawsky 	 */
2260260c420SBen Widawsky 	struct hlist_node exec_node;
2270260c420SBen Widawsky 	unsigned long exec_handle;
2280260c420SBen Widawsky 	struct drm_i915_gem_exec_object2 *exec_entry;
2290260c420SBen Widawsky };
2300260c420SBen Widawsky 
2313272db53SChris Wilson static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
2323272db53SChris Wilson {
2333272db53SChris Wilson 	return vma->flags & I915_VMA_GGTT;
2343272db53SChris Wilson }
2353272db53SChris Wilson 
2363272db53SChris Wilson static inline bool i915_vma_is_closed(const struct i915_vma *vma)
2373272db53SChris Wilson {
2383272db53SChris Wilson 	return vma->flags & I915_VMA_CLOSED;
2393272db53SChris Wilson }
2403272db53SChris Wilson 
241b0decaf7SChris Wilson static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
242b0decaf7SChris Wilson {
243b0decaf7SChris Wilson 	return vma->active;
244b0decaf7SChris Wilson }
245b0decaf7SChris Wilson 
246b0decaf7SChris Wilson static inline bool i915_vma_is_active(const struct i915_vma *vma)
247b0decaf7SChris Wilson {
248b0decaf7SChris Wilson 	return i915_vma_get_active(vma);
249b0decaf7SChris Wilson }
250b0decaf7SChris Wilson 
251b0decaf7SChris Wilson static inline void i915_vma_set_active(struct i915_vma *vma,
252b0decaf7SChris Wilson 				       unsigned int engine)
253b0decaf7SChris Wilson {
254b0decaf7SChris Wilson 	vma->active |= BIT(engine);
255b0decaf7SChris Wilson }
256b0decaf7SChris Wilson 
257b0decaf7SChris Wilson static inline void i915_vma_clear_active(struct i915_vma *vma,
258b0decaf7SChris Wilson 					 unsigned int engine)
259b0decaf7SChris Wilson {
260b0decaf7SChris Wilson 	vma->active &= ~BIT(engine);
261b0decaf7SChris Wilson }
262b0decaf7SChris Wilson 
263b0decaf7SChris Wilson static inline bool i915_vma_has_active_engine(const struct i915_vma *vma,
264b0decaf7SChris Wilson 					      unsigned int engine)
265b0decaf7SChris Wilson {
266b0decaf7SChris Wilson 	return vma->active & BIT(engine);
267b0decaf7SChris Wilson }
268b0decaf7SChris Wilson 
26944159ddbSMika Kuoppala struct i915_page_dma {
270d7b3de91SBen Widawsky 	struct page *page;
27144159ddbSMika Kuoppala 	union {
2727324cc04SBen Widawsky 		dma_addr_t daddr;
273678d96fbSBen Widawsky 
27444159ddbSMika Kuoppala 		/* For gen6/gen7 only. This is the offset in the GGTT
27544159ddbSMika Kuoppala 		 * where the page directory entries for PPGTT begin
27644159ddbSMika Kuoppala 		 */
27744159ddbSMika Kuoppala 		uint32_t ggtt_offset;
27844159ddbSMika Kuoppala 	};
27944159ddbSMika Kuoppala };
28044159ddbSMika Kuoppala 
281567047beSMika Kuoppala #define px_base(px) (&(px)->base)
282567047beSMika Kuoppala #define px_page(px) (px_base(px)->page)
283567047beSMika Kuoppala #define px_dma(px) (px_base(px)->daddr)
284567047beSMika Kuoppala 
285c114f76aSMika Kuoppala struct i915_page_scratch {
286c114f76aSMika Kuoppala 	struct i915_page_dma base;
287c114f76aSMika Kuoppala };
288c114f76aSMika Kuoppala 
28944159ddbSMika Kuoppala struct i915_page_table {
29044159ddbSMika Kuoppala 	struct i915_page_dma base;
29144159ddbSMika Kuoppala 
292678d96fbSBen Widawsky 	unsigned long *used_ptes;
293d7b3de91SBen Widawsky };
294d7b3de91SBen Widawsky 
295ec565b3cSMichel Thierry struct i915_page_directory {
29644159ddbSMika Kuoppala 	struct i915_page_dma base;
2977324cc04SBen Widawsky 
29833c8819fSMichel Thierry 	unsigned long *used_pdes;
299ec565b3cSMichel Thierry 	struct i915_page_table *page_table[I915_PDES]; /* PDEs */
300d7b3de91SBen Widawsky };
301d7b3de91SBen Widawsky 
302ec565b3cSMichel Thierry struct i915_page_directory_pointer {
3036ac18502SMichel Thierry 	struct i915_page_dma base;
3046ac18502SMichel Thierry 
3056ac18502SMichel Thierry 	unsigned long *used_pdpes;
3066ac18502SMichel Thierry 	struct i915_page_directory **page_directory;
307d7b3de91SBen Widawsky };
308d7b3de91SBen Widawsky 
30981ba8aefSMichel Thierry struct i915_pml4 {
31081ba8aefSMichel Thierry 	struct i915_page_dma base;
31181ba8aefSMichel Thierry 
31281ba8aefSMichel Thierry 	DECLARE_BITMAP(used_pml4es, GEN8_PML4ES_PER_PML4);
31381ba8aefSMichel Thierry 	struct i915_page_directory_pointer *pdps[GEN8_PML4ES_PER_PML4];
31481ba8aefSMichel Thierry };
31581ba8aefSMichel Thierry 
3160260c420SBen Widawsky struct i915_address_space {
3170260c420SBen Widawsky 	struct drm_mm mm;
3180260c420SBen Widawsky 	struct drm_device *dev;
3192bfa996eSChris Wilson 	/* Every address space belongs to a struct file - except for the global
3202bfa996eSChris Wilson 	 * GTT that is owned by the driver (and so @file is set to NULL). In
3212bfa996eSChris Wilson 	 * principle, no information should leak from one context to another
3222bfa996eSChris Wilson 	 * (or between files/processes etc) unless explicitly shared by the
3232bfa996eSChris Wilson 	 * owner. Tracking the owner is important in order to free up per-file
3242bfa996eSChris Wilson 	 * objects along with the file, to aide resource tracking, and to
3252bfa996eSChris Wilson 	 * assign blame.
3262bfa996eSChris Wilson 	 */
3272bfa996eSChris Wilson 	struct drm_i915_file_private *file;
3280260c420SBen Widawsky 	struct list_head global_link;
329c44ef60eSMika Kuoppala 	u64 start;		/* Start offset always 0 for dri2 */
330c44ef60eSMika Kuoppala 	u64 total;		/* size addr space maps (ex. 2GB for ggtt) */
3310260c420SBen Widawsky 
33250e046b6SChris Wilson 	bool closed;
33350e046b6SChris Wilson 
334c114f76aSMika Kuoppala 	struct i915_page_scratch *scratch_page;
33579ab9370SMika Kuoppala 	struct i915_page_table *scratch_pt;
33679ab9370SMika Kuoppala 	struct i915_page_directory *scratch_pd;
33769ab76fdSMichel Thierry 	struct i915_page_directory_pointer *scratch_pdp; /* GEN8+ & 48b PPGTT */
3380260c420SBen Widawsky 
3390260c420SBen Widawsky 	/**
3400260c420SBen Widawsky 	 * List of objects currently involved in rendering.
3410260c420SBen Widawsky 	 *
3420260c420SBen Widawsky 	 * Includes buffers having the contents of their GPU caches
34397b2a6a1SJohn Harrison 	 * flushed, not necessarily primitives. last_read_req
3440260c420SBen Widawsky 	 * represents when the rendering involved will be completed.
3450260c420SBen Widawsky 	 *
3460260c420SBen Widawsky 	 * A reference is held on the buffer while on this list.
3470260c420SBen Widawsky 	 */
3480260c420SBen Widawsky 	struct list_head active_list;
3490260c420SBen Widawsky 
3500260c420SBen Widawsky 	/**
3510260c420SBen Widawsky 	 * LRU list of objects which are not in the ringbuffer and
3520260c420SBen Widawsky 	 * are ready to unbind, but are still in the GTT.
3530260c420SBen Widawsky 	 *
35497b2a6a1SJohn Harrison 	 * last_read_req is NULL while an object is in this list.
3550260c420SBen Widawsky 	 *
3560260c420SBen Widawsky 	 * A reference is not held on the buffer while on this list,
3570260c420SBen Widawsky 	 * as merely being GTT-bound shouldn't prevent its being
3580260c420SBen Widawsky 	 * freed, and we'll pull it off the list in the free path.
3590260c420SBen Widawsky 	 */
3600260c420SBen Widawsky 	struct list_head inactive_list;
3610260c420SBen Widawsky 
36250e046b6SChris Wilson 	/**
36350e046b6SChris Wilson 	 * List of vma that have been unbound.
36450e046b6SChris Wilson 	 *
36550e046b6SChris Wilson 	 * A reference is not held on the buffer while on this list.
36650e046b6SChris Wilson 	 */
36750e046b6SChris Wilson 	struct list_head unbound_list;
36850e046b6SChris Wilson 
3690260c420SBen Widawsky 	/* FIXME: Need a more generic return type */
37007749ef3SMichel Thierry 	gen6_pte_t (*pte_encode)(dma_addr_t addr,
3710260c420SBen Widawsky 				 enum i915_cache_level level,
37224f3a8cfSAkash Goel 				 bool valid, u32 flags); /* Create a valid PTE */
373f329f5f6SDaniel Vetter 	/* flags for pte_encode */
374f329f5f6SDaniel Vetter #define PTE_READ_ONLY	(1<<0)
375678d96fbSBen Widawsky 	int (*allocate_va_range)(struct i915_address_space *vm,
376678d96fbSBen Widawsky 				 uint64_t start,
377678d96fbSBen Widawsky 				 uint64_t length);
3780260c420SBen Widawsky 	void (*clear_range)(struct i915_address_space *vm,
3790260c420SBen Widawsky 			    uint64_t start,
3800260c420SBen Widawsky 			    uint64_t length,
3810260c420SBen Widawsky 			    bool use_scratch);
382d6473f56SChris Wilson 	void (*insert_page)(struct i915_address_space *vm,
383d6473f56SChris Wilson 			    dma_addr_t addr,
384d6473f56SChris Wilson 			    uint64_t offset,
385d6473f56SChris Wilson 			    enum i915_cache_level cache_level,
386d6473f56SChris Wilson 			    u32 flags);
3870260c420SBen Widawsky 	void (*insert_entries)(struct i915_address_space *vm,
3880260c420SBen Widawsky 			       struct sg_table *st,
3890260c420SBen Widawsky 			       uint64_t start,
39024f3a8cfSAkash Goel 			       enum i915_cache_level cache_level, u32 flags);
3910260c420SBen Widawsky 	void (*cleanup)(struct i915_address_space *vm);
392777dc5bbSDaniel Vetter 	/** Unmap an object from an address space. This usually consists of
393777dc5bbSDaniel Vetter 	 * setting the valid PTE entries to a reserved scratch page. */
394777dc5bbSDaniel Vetter 	void (*unbind_vma)(struct i915_vma *vma);
395777dc5bbSDaniel Vetter 	/* Map an object into an address space with the given cache flags. */
39670b9f6f8SDaniel Vetter 	int (*bind_vma)(struct i915_vma *vma,
397777dc5bbSDaniel Vetter 			enum i915_cache_level cache_level,
398777dc5bbSDaniel Vetter 			u32 flags);
3990260c420SBen Widawsky };
4000260c420SBen Widawsky 
4012bfa996eSChris Wilson #define i915_is_ggtt(V) (!(V)->file)
402596c5923SChris Wilson 
4030260c420SBen Widawsky /* The Graphics Translation Table is the way in which GEN hardware translates a
4040260c420SBen Widawsky  * Graphics Virtual Address into a Physical Address. In addition to the normal
4050260c420SBen Widawsky  * collateral associated with any va->pa translations GEN hardware also has a
4060260c420SBen Widawsky  * portion of the GTT which can be mapped by the CPU and remain both coherent
4070260c420SBen Widawsky  * and correct (in cases like swizzling). That region is referred to as GMADR in
4080260c420SBen Widawsky  * the spec.
4090260c420SBen Widawsky  */
41062106b4fSJoonas Lahtinen struct i915_ggtt {
4110260c420SBen Widawsky 	struct i915_address_space base;
4120260c420SBen Widawsky 
413c44ef60eSMika Kuoppala 	size_t stolen_size;		/* Total size of stolen memory */
414a9da512bSPaulo Zanoni 	size_t stolen_usable_size;	/* Total size minus BIOS reserved */
415274008e8SSagar Arun Kamble 	size_t stolen_reserved_base;
416274008e8SSagar Arun Kamble 	size_t stolen_reserved_size;
417c44ef60eSMika Kuoppala 	u64 mappable_end;		/* End offset that we can CPU map */
4180260c420SBen Widawsky 	struct io_mapping *mappable;	/* Mapping to our CPU mappable region */
4190260c420SBen Widawsky 	phys_addr_t mappable_base;	/* PA of our GMADR */
4200260c420SBen Widawsky 
4210260c420SBen Widawsky 	/** "Graphics Stolen Memory" holds the global PTEs */
4220260c420SBen Widawsky 	void __iomem *gsm;
4230260c420SBen Widawsky 
4240260c420SBen Widawsky 	bool do_idle_maps;
4250260c420SBen Widawsky 
4260260c420SBen Widawsky 	int mtrr;
4270260c420SBen Widawsky };
4280260c420SBen Widawsky 
4290260c420SBen Widawsky struct i915_hw_ppgtt {
4300260c420SBen Widawsky 	struct i915_address_space base;
4310260c420SBen Widawsky 	struct kref ref;
4320260c420SBen Widawsky 	struct drm_mm_node node;
433563222a7SBen Widawsky 	unsigned long pd_dirty_rings;
4340260c420SBen Widawsky 	union {
43581ba8aefSMichel Thierry 		struct i915_pml4 pml4;		/* GEN8+ & 48b PPGTT */
43681ba8aefSMichel Thierry 		struct i915_page_directory_pointer pdp;	/* GEN8+ */
43781ba8aefSMichel Thierry 		struct i915_page_directory pd;		/* GEN6-7 */
438d7b3de91SBen Widawsky 	};
4390260c420SBen Widawsky 
440678d96fbSBen Widawsky 	gen6_pte_t __iomem *pd_addr;
441678d96fbSBen Widawsky 
4420260c420SBen Widawsky 	int (*enable)(struct i915_hw_ppgtt *ppgtt);
4430260c420SBen Widawsky 	int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
444e85b26dcSJohn Harrison 			 struct drm_i915_gem_request *req);
4450260c420SBen Widawsky 	void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
4460260c420SBen Widawsky };
4470260c420SBen Widawsky 
448731f74c5SDave Gordon /*
449731f74c5SDave Gordon  * gen6_for_each_pde() iterates over every pde from start until start+length.
450731f74c5SDave Gordon  * If start and start+length are not perfectly divisible, the macro will round
451731f74c5SDave Gordon  * down and up as needed. Start=0 and length=2G effectively iterates over
452731f74c5SDave Gordon  * every PDE in the system. The macro modifies ALL its parameters except 'pd',
453731f74c5SDave Gordon  * so each of the other parameters should preferably be a simple variable, or
454731f74c5SDave Gordon  * at most an lvalue with no side-effects!
455678d96fbSBen Widawsky  */
456731f74c5SDave Gordon #define gen6_for_each_pde(pt, pd, start, length, iter)			\
457fdc454c1SMichel Thierry 	for (iter = gen6_pde_index(start);				\
458731f74c5SDave Gordon 	     length > 0 && iter < I915_PDES &&				\
459731f74c5SDave Gordon 		(pt = (pd)->page_table[iter], true);			\
460731f74c5SDave Gordon 	     ({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT);		\
461731f74c5SDave Gordon 		    temp = min(temp - start, length);			\
462731f74c5SDave Gordon 		    start += temp, length -= temp; }), ++iter)
463678d96fbSBen Widawsky 
464731f74c5SDave Gordon #define gen6_for_all_pdes(pt, pd, iter)					\
46509942c65SMichel Thierry 	for (iter = 0;							\
466731f74c5SDave Gordon 	     iter < I915_PDES &&					\
467731f74c5SDave Gordon 		(pt = (pd)->page_table[iter], true);			\
468731f74c5SDave Gordon 	     ++iter)
46909942c65SMichel Thierry 
470678d96fbSBen Widawsky static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
471678d96fbSBen Widawsky {
472678d96fbSBen Widawsky 	const uint32_t mask = NUM_PTE(pde_shift) - 1;
473678d96fbSBen Widawsky 
474678d96fbSBen Widawsky 	return (address >> PAGE_SHIFT) & mask;
475678d96fbSBen Widawsky }
476678d96fbSBen Widawsky 
477678d96fbSBen Widawsky /* Helper to counts the number of PTEs within the given length. This count
478678d96fbSBen Widawsky  * does not cross a page table boundary, so the max value would be
479678d96fbSBen Widawsky  * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
480678d96fbSBen Widawsky */
481678d96fbSBen Widawsky static inline uint32_t i915_pte_count(uint64_t addr, size_t length,
482678d96fbSBen Widawsky 				      uint32_t pde_shift)
483678d96fbSBen Widawsky {
48469603dbbSAlan 	const uint64_t mask = ~((1ULL << pde_shift) - 1);
485678d96fbSBen Widawsky 	uint64_t end;
486678d96fbSBen Widawsky 
487678d96fbSBen Widawsky 	WARN_ON(length == 0);
488678d96fbSBen Widawsky 	WARN_ON(offset_in_page(addr|length));
489678d96fbSBen Widawsky 
490678d96fbSBen Widawsky 	end = addr + length;
491678d96fbSBen Widawsky 
492678d96fbSBen Widawsky 	if ((addr & mask) != (end & mask))
493678d96fbSBen Widawsky 		return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
494678d96fbSBen Widawsky 
495678d96fbSBen Widawsky 	return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
496678d96fbSBen Widawsky }
497678d96fbSBen Widawsky 
498678d96fbSBen Widawsky static inline uint32_t i915_pde_index(uint64_t addr, uint32_t shift)
499678d96fbSBen Widawsky {
500678d96fbSBen Widawsky 	return (addr >> shift) & I915_PDE_MASK;
501678d96fbSBen Widawsky }
502678d96fbSBen Widawsky 
503678d96fbSBen Widawsky static inline uint32_t gen6_pte_index(uint32_t addr)
504678d96fbSBen Widawsky {
505678d96fbSBen Widawsky 	return i915_pte_index(addr, GEN6_PDE_SHIFT);
506678d96fbSBen Widawsky }
507678d96fbSBen Widawsky 
508678d96fbSBen Widawsky static inline size_t gen6_pte_count(uint32_t addr, uint32_t length)
509678d96fbSBen Widawsky {
510678d96fbSBen Widawsky 	return i915_pte_count(addr, length, GEN6_PDE_SHIFT);
511678d96fbSBen Widawsky }
512678d96fbSBen Widawsky 
513678d96fbSBen Widawsky static inline uint32_t gen6_pde_index(uint32_t addr)
514678d96fbSBen Widawsky {
515678d96fbSBen Widawsky 	return i915_pde_index(addr, GEN6_PDE_SHIFT);
516678d96fbSBen Widawsky }
517678d96fbSBen Widawsky 
5189271d959SMichel Thierry /* Equivalent to the gen6 version, For each pde iterates over every pde
5199271d959SMichel Thierry  * between from start until start + length. On gen8+ it simply iterates
5209271d959SMichel Thierry  * over every page directory entry in a page directory.
5219271d959SMichel Thierry  */
522e8ebd8e2SDave Gordon #define gen8_for_each_pde(pt, pd, start, length, iter)			\
5239271d959SMichel Thierry 	for (iter = gen8_pde_index(start);				\
524e8ebd8e2SDave Gordon 	     length > 0 && iter < I915_PDES &&				\
525e8ebd8e2SDave Gordon 		(pt = (pd)->page_table[iter], true);			\
526e8ebd8e2SDave Gordon 	     ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT);		\
527e8ebd8e2SDave Gordon 		    temp = min(temp - start, length);			\
528e8ebd8e2SDave Gordon 		    start += temp, length -= temp; }), ++iter)
5299271d959SMichel Thierry 
530e8ebd8e2SDave Gordon #define gen8_for_each_pdpe(pd, pdp, start, length, iter)		\
5319271d959SMichel Thierry 	for (iter = gen8_pdpe_index(start);				\
532e8ebd8e2SDave Gordon 	     length > 0 && iter < I915_PDPES_PER_PDP(dev) &&		\
533e8ebd8e2SDave Gordon 		(pd = (pdp)->page_directory[iter], true);		\
534e8ebd8e2SDave Gordon 	     ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT);	\
535e8ebd8e2SDave Gordon 		    temp = min(temp - start, length);			\
536e8ebd8e2SDave Gordon 		    start += temp, length -= temp; }), ++iter)
5379271d959SMichel Thierry 
538e8ebd8e2SDave Gordon #define gen8_for_each_pml4e(pdp, pml4, start, length, iter)		\
539762d9936SMichel Thierry 	for (iter = gen8_pml4e_index(start);				\
540e8ebd8e2SDave Gordon 	     length > 0 && iter < GEN8_PML4ES_PER_PML4 &&		\
541e8ebd8e2SDave Gordon 		(pdp = (pml4)->pdps[iter], true);			\
542e8ebd8e2SDave Gordon 	     ({ u64 temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT);	\
543e8ebd8e2SDave Gordon 		    temp = min(temp - start, length);			\
544e8ebd8e2SDave Gordon 		    start += temp, length -= temp; }), ++iter)
545762d9936SMichel Thierry 
5469271d959SMichel Thierry static inline uint32_t gen8_pte_index(uint64_t address)
5479271d959SMichel Thierry {
5489271d959SMichel Thierry 	return i915_pte_index(address, GEN8_PDE_SHIFT);
5499271d959SMichel Thierry }
5509271d959SMichel Thierry 
5519271d959SMichel Thierry static inline uint32_t gen8_pde_index(uint64_t address)
5529271d959SMichel Thierry {
5539271d959SMichel Thierry 	return i915_pde_index(address, GEN8_PDE_SHIFT);
5549271d959SMichel Thierry }
5559271d959SMichel Thierry 
5569271d959SMichel Thierry static inline uint32_t gen8_pdpe_index(uint64_t address)
5579271d959SMichel Thierry {
5589271d959SMichel Thierry 	return (address >> GEN8_PDPE_SHIFT) & GEN8_PDPE_MASK;
5599271d959SMichel Thierry }
5609271d959SMichel Thierry 
5619271d959SMichel Thierry static inline uint32_t gen8_pml4e_index(uint64_t address)
5629271d959SMichel Thierry {
563762d9936SMichel Thierry 	return (address >> GEN8_PML4E_SHIFT) & GEN8_PML4E_MASK;
5649271d959SMichel Thierry }
5659271d959SMichel Thierry 
56633c8819fSMichel Thierry static inline size_t gen8_pte_count(uint64_t address, uint64_t length)
56733c8819fSMichel Thierry {
56833c8819fSMichel Thierry 	return i915_pte_count(address, length, GEN8_PDE_SHIFT);
56933c8819fSMichel Thierry }
57033c8819fSMichel Thierry 
571d852c7bfSMika Kuoppala static inline dma_addr_t
572d852c7bfSMika Kuoppala i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
573d852c7bfSMika Kuoppala {
574d852c7bfSMika Kuoppala 	return test_bit(n, ppgtt->pdp.used_pdpes) ?
575567047beSMika Kuoppala 		px_dma(ppgtt->pdp.page_directory[n]) :
57679ab9370SMika Kuoppala 		px_dma(ppgtt->base.scratch_pd);
577d852c7bfSMika Kuoppala }
578d852c7bfSMika Kuoppala 
57997d6d7abSChris Wilson int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv);
58097d6d7abSChris Wilson int i915_ggtt_init_hw(struct drm_i915_private *dev_priv);
58197d6d7abSChris Wilson int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv);
582f6b9d5caSChris Wilson int i915_gem_init_ggtt(struct drm_i915_private *dev_priv);
58397d6d7abSChris Wilson void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
584ee960be7SDaniel Vetter 
58582460d97SDaniel Vetter int i915_ppgtt_init_hw(struct drm_device *dev);
586ee960be7SDaniel Vetter void i915_ppgtt_release(struct kref *kref);
5872bfa996eSChris Wilson struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv,
5884d884705SDaniel Vetter 					struct drm_i915_file_private *fpriv);
589ee960be7SDaniel Vetter static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
590ee960be7SDaniel Vetter {
591ee960be7SDaniel Vetter 	if (ppgtt)
592ee960be7SDaniel Vetter 		kref_get(&ppgtt->ref);
593ee960be7SDaniel Vetter }
594ee960be7SDaniel Vetter static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
595ee960be7SDaniel Vetter {
596ee960be7SDaniel Vetter 	if (ppgtt)
597ee960be7SDaniel Vetter 		kref_put(&ppgtt->ref, i915_ppgtt_release);
598ee960be7SDaniel Vetter }
5990260c420SBen Widawsky 
600dc97997aSChris Wilson void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
6010260c420SBen Widawsky void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
6020260c420SBen Widawsky void i915_gem_restore_gtt_mappings(struct drm_device *dev);
6030260c420SBen Widawsky 
6040260c420SBen Widawsky int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
6050260c420SBen Widawsky void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
6060260c420SBen Widawsky 
6079abc4648SJoonas Lahtinen static inline bool
6089abc4648SJoonas Lahtinen i915_ggtt_view_equal(const struct i915_ggtt_view *a,
6099abc4648SJoonas Lahtinen                      const struct i915_ggtt_view *b)
6109abc4648SJoonas Lahtinen {
6119abc4648SJoonas Lahtinen 	if (WARN_ON(!a || !b))
6129abc4648SJoonas Lahtinen 		return false;
6139abc4648SJoonas Lahtinen 
6148bd7ef16SJoonas Lahtinen 	if (a->type != b->type)
6158bd7ef16SJoonas Lahtinen 		return false;
616ce7f1728SDaniel Vetter 	if (a->type != I915_GGTT_VIEW_NORMAL)
6178bd7ef16SJoonas Lahtinen 		return !memcmp(&a->params, &b->params, sizeof(a->params));
6188bd7ef16SJoonas Lahtinen 	return true;
6199abc4648SJoonas Lahtinen }
6209abc4648SJoonas Lahtinen 
62159bfa124SChris Wilson /* Flags used by pin/bind&friends. */
622305bc234SChris Wilson #define PIN_NONBLOCK		BIT(0)
623305bc234SChris Wilson #define PIN_MAPPABLE		BIT(1)
624305bc234SChris Wilson #define PIN_ZONE_4G		BIT(2)
625305bc234SChris Wilson 
626305bc234SChris Wilson #define PIN_MBZ			BIT(5) /* I915_VMA_PIN_OVERFLOW */
627305bc234SChris Wilson #define PIN_GLOBAL		BIT(6) /* I915_VMA_GLOBAL_BIND */
628305bc234SChris Wilson #define PIN_USER		BIT(7) /* I915_VMA_LOCAL_BIND */
629305bc234SChris Wilson #define PIN_UPDATE		BIT(8)
630305bc234SChris Wilson 
631305bc234SChris Wilson #define PIN_HIGH		BIT(9)
632305bc234SChris Wilson #define PIN_OFFSET_BIAS		BIT(10)
633305bc234SChris Wilson #define PIN_OFFSET_FIXED	BIT(11)
63459bfa124SChris Wilson #define PIN_OFFSET_MASK		(~4095)
63559bfa124SChris Wilson 
636305bc234SChris Wilson int __i915_vma_do_pin(struct i915_vma *vma,
637305bc234SChris Wilson 		      u64 size, u64 alignment, u64 flags);
638305bc234SChris Wilson static inline int __must_check
639305bc234SChris Wilson i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
640305bc234SChris Wilson {
641305bc234SChris Wilson 	BUILD_BUG_ON(PIN_MBZ != I915_VMA_PIN_OVERFLOW);
642305bc234SChris Wilson 	BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
643305bc234SChris Wilson 	BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
644305bc234SChris Wilson 
645305bc234SChris Wilson 	/* Pin early to prevent the shrinker/eviction logic from destroying
646305bc234SChris Wilson 	 * our vma as we insert and bind.
647305bc234SChris Wilson 	 */
648305bc234SChris Wilson 	if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0))
649305bc234SChris Wilson 		return 0;
650305bc234SChris Wilson 
651305bc234SChris Wilson 	return __i915_vma_do_pin(vma, size, alignment, flags);
652305bc234SChris Wilson }
653305bc234SChris Wilson 
65420dfbde4SChris Wilson static inline int i915_vma_pin_count(const struct i915_vma *vma)
65520dfbde4SChris Wilson {
6563272db53SChris Wilson 	return vma->flags & I915_VMA_PIN_MASK;
65720dfbde4SChris Wilson }
65820dfbde4SChris Wilson 
65920dfbde4SChris Wilson static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
66020dfbde4SChris Wilson {
66120dfbde4SChris Wilson 	return i915_vma_pin_count(vma);
66220dfbde4SChris Wilson }
66320dfbde4SChris Wilson 
66420dfbde4SChris Wilson static inline void __i915_vma_pin(struct i915_vma *vma)
66520dfbde4SChris Wilson {
6663272db53SChris Wilson 	vma->flags++;
667305bc234SChris Wilson 	GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW);
66820dfbde4SChris Wilson }
66920dfbde4SChris Wilson 
67020dfbde4SChris Wilson static inline void __i915_vma_unpin(struct i915_vma *vma)
67120dfbde4SChris Wilson {
67220dfbde4SChris Wilson 	GEM_BUG_ON(!i915_vma_is_pinned(vma));
6733272db53SChris Wilson 	vma->flags--;
67420dfbde4SChris Wilson }
67520dfbde4SChris Wilson 
67620dfbde4SChris Wilson static inline void i915_vma_unpin(struct i915_vma *vma)
67720dfbde4SChris Wilson {
67820dfbde4SChris Wilson 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
67920dfbde4SChris Wilson 	__i915_vma_unpin(vma);
68020dfbde4SChris Wilson }
68120dfbde4SChris Wilson 
6828ef8561fSChris Wilson /**
6838ef8561fSChris Wilson  * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
6848ef8561fSChris Wilson  * @vma: VMA to iomap
6858ef8561fSChris Wilson  *
6868ef8561fSChris Wilson  * The passed in VMA has to be pinned in the global GTT mappable region.
6878ef8561fSChris Wilson  * An extra pinning of the VMA is acquired for the return iomapping,
6888ef8561fSChris Wilson  * the caller must call i915_vma_unpin_iomap to relinquish the pinning
6898ef8561fSChris Wilson  * after the iomapping is no longer required.
6908ef8561fSChris Wilson  *
6918ef8561fSChris Wilson  * Callers must hold the struct_mutex.
6928ef8561fSChris Wilson  *
6938ef8561fSChris Wilson  * Returns a valid iomapped pointer or ERR_PTR.
6948ef8561fSChris Wilson  */
6958ef8561fSChris Wilson void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
696406ea8d2SChris Wilson #define IO_ERR_PTR(x) ((void __iomem *)ERR_PTR(x))
6978ef8561fSChris Wilson 
6988ef8561fSChris Wilson /**
6998ef8561fSChris Wilson  * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
7008ef8561fSChris Wilson  * @vma: VMA to unpin
7018ef8561fSChris Wilson  *
7028ef8561fSChris Wilson  * Unpins the previously iomapped VMA from i915_vma_pin_iomap().
7038ef8561fSChris Wilson  *
7048ef8561fSChris Wilson  * Callers must hold the struct_mutex. This function is only valid to be
7058ef8561fSChris Wilson  * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
7068ef8561fSChris Wilson  */
7078ef8561fSChris Wilson static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
7088ef8561fSChris Wilson {
7098ef8561fSChris Wilson 	lockdep_assert_held(&vma->vm->dev->struct_mutex);
7108ef8561fSChris Wilson 	GEM_BUG_ON(vma->iomap == NULL);
71120dfbde4SChris Wilson 	i915_vma_unpin(vma);
7128ef8561fSChris Wilson }
7138ef8561fSChris Wilson 
7140260c420SBen Widawsky #endif
715