10260c420SBen Widawsky /*
20260c420SBen Widawsky  * Copyright © 2014 Intel Corporation
30260c420SBen Widawsky  *
40260c420SBen Widawsky  * Permission is hereby granted, free of charge, to any person obtaining a
50260c420SBen Widawsky  * copy of this software and associated documentation files (the "Software"),
60260c420SBen Widawsky  * to deal in the Software without restriction, including without limitation
70260c420SBen Widawsky  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
80260c420SBen Widawsky  * and/or sell copies of the Software, and to permit persons to whom the
90260c420SBen Widawsky  * Software is furnished to do so, subject to the following conditions:
100260c420SBen Widawsky  *
110260c420SBen Widawsky  * The above copyright notice and this permission notice (including the next
120260c420SBen Widawsky  * paragraph) shall be included in all copies or substantial portions of the
130260c420SBen Widawsky  * Software.
140260c420SBen Widawsky  *
150260c420SBen Widawsky  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
160260c420SBen Widawsky  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
170260c420SBen Widawsky  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
180260c420SBen Widawsky  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
190260c420SBen Widawsky  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
200260c420SBen Widawsky  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
210260c420SBen Widawsky  * IN THE SOFTWARE.
220260c420SBen Widawsky  *
230260c420SBen Widawsky  * Please try to maintain the following order within this file unless it makes
240260c420SBen Widawsky  * sense to do otherwise. From top to bottom:
250260c420SBen Widawsky  * 1. typedefs
260260c420SBen Widawsky  * 2. #defines, and macros
270260c420SBen Widawsky  * 3. structure definitions
280260c420SBen Widawsky  * 4. function prototypes
290260c420SBen Widawsky  *
300260c420SBen Widawsky  * Within each section, please try to order by generation in ascending order,
310260c420SBen Widawsky  * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
320260c420SBen Widawsky  */
330260c420SBen Widawsky 
340260c420SBen Widawsky #ifndef __I915_GEM_GTT_H__
350260c420SBen Widawsky #define __I915_GEM_GTT_H__
360260c420SBen Widawsky 
374d884705SDaniel Vetter struct drm_i915_file_private;
384d884705SDaniel Vetter 
390260c420SBen Widawsky typedef uint32_t gen6_gtt_pte_t;
400260c420SBen Widawsky typedef uint64_t gen8_gtt_pte_t;
410260c420SBen Widawsky typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
420260c420SBen Widawsky 
430260c420SBen Widawsky #define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
440260c420SBen Widawsky 
450260c420SBen Widawsky #define I915_PPGTT_PT_ENTRIES		(PAGE_SIZE / sizeof(gen6_gtt_pte_t))
460260c420SBen Widawsky /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
470260c420SBen Widawsky #define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0))
480260c420SBen Widawsky #define GEN6_PTE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
490260c420SBen Widawsky #define GEN6_PDE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
500260c420SBen Widawsky #define GEN6_PTE_CACHE_LLC		(2 << 1)
510260c420SBen Widawsky #define GEN6_PTE_UNCACHED		(1 << 1)
520260c420SBen Widawsky #define GEN6_PTE_VALID			(1 << 0)
530260c420SBen Widawsky 
540260c420SBen Widawsky #define GEN6_PPGTT_PD_ENTRIES		512
550260c420SBen Widawsky #define GEN6_PD_SIZE			(GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE)
560260c420SBen Widawsky #define GEN6_PD_ALIGN			(PAGE_SIZE * 16)
570260c420SBen Widawsky #define GEN6_PDE_VALID			(1 << 0)
580260c420SBen Widawsky 
590260c420SBen Widawsky #define GEN7_PTE_CACHE_L3_LLC		(3 << 1)
600260c420SBen Widawsky 
610260c420SBen Widawsky #define BYT_PTE_SNOOPED_BY_CPU_CACHES	(1 << 2)
620260c420SBen Widawsky #define BYT_PTE_WRITEABLE		(1 << 1)
630260c420SBen Widawsky 
640260c420SBen Widawsky /* Cacheability Control is a 4-bit value. The low three bits are stored in bits
650260c420SBen Widawsky  * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
660260c420SBen Widawsky  */
670260c420SBen Widawsky #define HSW_CACHEABILITY_CONTROL(bits)	((((bits) & 0x7) << 1) | \
680260c420SBen Widawsky 					 (((bits) & 0x8) << (11 - 3)))
690260c420SBen Widawsky #define HSW_WB_LLC_AGE3			HSW_CACHEABILITY_CONTROL(0x2)
700260c420SBen Widawsky #define HSW_WB_LLC_AGE0			HSW_CACHEABILITY_CONTROL(0x3)
710260c420SBen Widawsky #define HSW_WB_ELLC_LLC_AGE3		HSW_CACHEABILITY_CONTROL(0x8)
720260c420SBen Widawsky #define HSW_WB_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0xb)
730260c420SBen Widawsky #define HSW_WT_ELLC_LLC_AGE3		HSW_CACHEABILITY_CONTROL(0x7)
740260c420SBen Widawsky #define HSW_WT_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0x6)
750260c420SBen Widawsky #define HSW_PTE_UNCACHED		(0)
760260c420SBen Widawsky #define HSW_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0x7f0))
770260c420SBen Widawsky #define HSW_PTE_ADDR_ENCODE(addr)	HSW_GTT_ADDR_ENCODE(addr)
780260c420SBen Widawsky 
790260c420SBen Widawsky /* GEN8 legacy style address is defined as a 3 level page table:
800260c420SBen Widawsky  * 31:30 | 29:21 | 20:12 |  11:0
810260c420SBen Widawsky  * PDPE  |  PDE  |  PTE  | offset
820260c420SBen Widawsky  * The difference as compared to normal x86 3 level page table is the PDPEs are
830260c420SBen Widawsky  * programmed via register.
840260c420SBen Widawsky  */
850260c420SBen Widawsky #define GEN8_PDPE_SHIFT			30
860260c420SBen Widawsky #define GEN8_PDPE_MASK			0x3
870260c420SBen Widawsky #define GEN8_PDE_SHIFT			21
880260c420SBen Widawsky #define GEN8_PDE_MASK			0x1ff
890260c420SBen Widawsky #define GEN8_PTE_SHIFT			12
900260c420SBen Widawsky #define GEN8_PTE_MASK			0x1ff
910260c420SBen Widawsky #define GEN8_LEGACY_PDPS		4
920260c420SBen Widawsky #define GEN8_PTES_PER_PAGE		(PAGE_SIZE / sizeof(gen8_gtt_pte_t))
930260c420SBen Widawsky #define GEN8_PDES_PER_PAGE		(PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
940260c420SBen Widawsky 
950260c420SBen Widawsky #define PPAT_UNCACHED_INDEX		(_PAGE_PWT | _PAGE_PCD)
960260c420SBen Widawsky #define PPAT_CACHED_PDE_INDEX		0 /* WB LLC */
970260c420SBen Widawsky #define PPAT_CACHED_INDEX		_PAGE_PAT /* WB LLCeLLC */
980260c420SBen Widawsky #define PPAT_DISPLAY_ELLC_INDEX		_PAGE_PCD /* WT eLLC */
990260c420SBen Widawsky 
100ee0ce478SVille Syrjälä #define CHV_PPAT_SNOOP			(1<<6)
1010260c420SBen Widawsky #define GEN8_PPAT_AGE(x)		(x<<4)
1020260c420SBen Widawsky #define GEN8_PPAT_LLCeLLC		(3<<2)
1030260c420SBen Widawsky #define GEN8_PPAT_LLCELLC		(2<<2)
1040260c420SBen Widawsky #define GEN8_PPAT_LLC			(1<<2)
1050260c420SBen Widawsky #define GEN8_PPAT_WB			(3<<0)
1060260c420SBen Widawsky #define GEN8_PPAT_WT			(2<<0)
1070260c420SBen Widawsky #define GEN8_PPAT_WC			(1<<0)
1080260c420SBen Widawsky #define GEN8_PPAT_UC			(0<<0)
1090260c420SBen Widawsky #define GEN8_PPAT_ELLC_OVERRIDE		(0<<2)
1100260c420SBen Widawsky #define GEN8_PPAT(i, x)			((uint64_t) (x) << ((i) * 8))
1110260c420SBen Widawsky 
1120260c420SBen Widawsky enum i915_cache_level;
1130260c420SBen Widawsky /**
1140260c420SBen Widawsky  * A VMA represents a GEM BO that is bound into an address space. Therefore, a
1150260c420SBen Widawsky  * VMA's presence cannot be guaranteed before binding, or after unbinding the
1160260c420SBen Widawsky  * object into/from the address space.
1170260c420SBen Widawsky  *
1180260c420SBen Widawsky  * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
1190260c420SBen Widawsky  * will always be <= an objects lifetime. So object refcounting should cover us.
1200260c420SBen Widawsky  */
1210260c420SBen Widawsky struct i915_vma {
1220260c420SBen Widawsky 	struct drm_mm_node node;
1230260c420SBen Widawsky 	struct drm_i915_gem_object *obj;
1240260c420SBen Widawsky 	struct i915_address_space *vm;
1250260c420SBen Widawsky 
126aff43766STvrtko Ursulin 	/** Flags and address space this VMA is bound to */
127aff43766STvrtko Ursulin #define GLOBAL_BIND	(1<<0)
128aff43766STvrtko Ursulin #define LOCAL_BIND	(1<<1)
129aff43766STvrtko Ursulin #define PTE_READ_ONLY	(1<<2)
130aff43766STvrtko Ursulin 	unsigned int bound : 4;
131aff43766STvrtko Ursulin 
1320260c420SBen Widawsky 	/** This object's place on the active/inactive lists */
1330260c420SBen Widawsky 	struct list_head mm_list;
1340260c420SBen Widawsky 
1350260c420SBen Widawsky 	struct list_head vma_link; /* Link in the object's VMA list */
1360260c420SBen Widawsky 
1370260c420SBen Widawsky 	/** This vma's place in the batchbuffer or on the eviction list */
1380260c420SBen Widawsky 	struct list_head exec_list;
1390260c420SBen Widawsky 
1400260c420SBen Widawsky 	/**
1410260c420SBen Widawsky 	 * Used for performing relocations during execbuffer insertion.
1420260c420SBen Widawsky 	 */
1430260c420SBen Widawsky 	struct hlist_node exec_node;
1440260c420SBen Widawsky 	unsigned long exec_handle;
1450260c420SBen Widawsky 	struct drm_i915_gem_exec_object2 *exec_entry;
1460260c420SBen Widawsky 
1470260c420SBen Widawsky 	/**
1480260c420SBen Widawsky 	 * How many users have pinned this object in GTT space. The following
1494feb7659SDaniel Vetter 	 * users can each hold at most one reference: pwrite/pread, execbuffer
1504feb7659SDaniel Vetter 	 * (objects are not allowed multiple times for the same batchbuffer),
1514feb7659SDaniel Vetter 	 * and the framebuffer code. When switching/pageflipping, the
1524feb7659SDaniel Vetter 	 * framebuffer code has at most two buffers pinned per crtc.
1530260c420SBen Widawsky 	 *
1540260c420SBen Widawsky 	 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
1550260c420SBen Widawsky 	 * bits with absolutely no headroom. So use 4 bits. */
1560260c420SBen Widawsky 	unsigned int pin_count:4;
1570260c420SBen Widawsky #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
1580260c420SBen Widawsky 
1590260c420SBen Widawsky 	/** Unmap an object from an address space. This usually consists of
1600260c420SBen Widawsky 	 * setting the valid PTE entries to a reserved scratch page. */
1610260c420SBen Widawsky 	void (*unbind_vma)(struct i915_vma *vma);
1620260c420SBen Widawsky 	/* Map an object into an address space with the given cache flags. */
1630260c420SBen Widawsky 	void (*bind_vma)(struct i915_vma *vma,
1640260c420SBen Widawsky 			 enum i915_cache_level cache_level,
1650260c420SBen Widawsky 			 u32 flags);
1660260c420SBen Widawsky };
1670260c420SBen Widawsky 
1680260c420SBen Widawsky struct i915_address_space {
1690260c420SBen Widawsky 	struct drm_mm mm;
1700260c420SBen Widawsky 	struct drm_device *dev;
1710260c420SBen Widawsky 	struct list_head global_link;
1720260c420SBen Widawsky 	unsigned long start;		/* Start offset always 0 for dri2 */
1730260c420SBen Widawsky 	size_t total;		/* size addr space maps (ex. 2GB for ggtt) */
1740260c420SBen Widawsky 
1750260c420SBen Widawsky 	struct {
1760260c420SBen Widawsky 		dma_addr_t addr;
1770260c420SBen Widawsky 		struct page *page;
1780260c420SBen Widawsky 	} scratch;
1790260c420SBen Widawsky 
1800260c420SBen Widawsky 	/**
1810260c420SBen Widawsky 	 * List of objects currently involved in rendering.
1820260c420SBen Widawsky 	 *
1830260c420SBen Widawsky 	 * Includes buffers having the contents of their GPU caches
1840260c420SBen Widawsky 	 * flushed, not necessarily primitives.  last_rendering_seqno
1850260c420SBen Widawsky 	 * represents when the rendering involved will be completed.
1860260c420SBen Widawsky 	 *
1870260c420SBen Widawsky 	 * A reference is held on the buffer while on this list.
1880260c420SBen Widawsky 	 */
1890260c420SBen Widawsky 	struct list_head active_list;
1900260c420SBen Widawsky 
1910260c420SBen Widawsky 	/**
1920260c420SBen Widawsky 	 * LRU list of objects which are not in the ringbuffer and
1930260c420SBen Widawsky 	 * are ready to unbind, but are still in the GTT.
1940260c420SBen Widawsky 	 *
1950260c420SBen Widawsky 	 * last_rendering_seqno is 0 while an object is in this list.
1960260c420SBen Widawsky 	 *
1970260c420SBen Widawsky 	 * A reference is not held on the buffer while on this list,
1980260c420SBen Widawsky 	 * as merely being GTT-bound shouldn't prevent its being
1990260c420SBen Widawsky 	 * freed, and we'll pull it off the list in the free path.
2000260c420SBen Widawsky 	 */
2010260c420SBen Widawsky 	struct list_head inactive_list;
2020260c420SBen Widawsky 
2030260c420SBen Widawsky 	/* FIXME: Need a more generic return type */
2040260c420SBen Widawsky 	gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
2050260c420SBen Widawsky 				     enum i915_cache_level level,
20624f3a8cfSAkash Goel 				     bool valid, u32 flags); /* Create a valid PTE */
2070260c420SBen Widawsky 	void (*clear_range)(struct i915_address_space *vm,
2080260c420SBen Widawsky 			    uint64_t start,
2090260c420SBen Widawsky 			    uint64_t length,
2100260c420SBen Widawsky 			    bool use_scratch);
2110260c420SBen Widawsky 	void (*insert_entries)(struct i915_address_space *vm,
2120260c420SBen Widawsky 			       struct sg_table *st,
2130260c420SBen Widawsky 			       uint64_t start,
21424f3a8cfSAkash Goel 			       enum i915_cache_level cache_level, u32 flags);
2150260c420SBen Widawsky 	void (*cleanup)(struct i915_address_space *vm);
2160260c420SBen Widawsky };
2170260c420SBen Widawsky 
2180260c420SBen Widawsky /* The Graphics Translation Table is the way in which GEN hardware translates a
2190260c420SBen Widawsky  * Graphics Virtual Address into a Physical Address. In addition to the normal
2200260c420SBen Widawsky  * collateral associated with any va->pa translations GEN hardware also has a
2210260c420SBen Widawsky  * portion of the GTT which can be mapped by the CPU and remain both coherent
2220260c420SBen Widawsky  * and correct (in cases like swizzling). That region is referred to as GMADR in
2230260c420SBen Widawsky  * the spec.
2240260c420SBen Widawsky  */
2250260c420SBen Widawsky struct i915_gtt {
2260260c420SBen Widawsky 	struct i915_address_space base;
2270260c420SBen Widawsky 	size_t stolen_size;		/* Total size of stolen memory */
2280260c420SBen Widawsky 
2290260c420SBen Widawsky 	unsigned long mappable_end;	/* End offset that we can CPU map */
2300260c420SBen Widawsky 	struct io_mapping *mappable;	/* Mapping to our CPU mappable region */
2310260c420SBen Widawsky 	phys_addr_t mappable_base;	/* PA of our GMADR */
2320260c420SBen Widawsky 
2330260c420SBen Widawsky 	/** "Graphics Stolen Memory" holds the global PTEs */
2340260c420SBen Widawsky 	void __iomem *gsm;
2350260c420SBen Widawsky 
2360260c420SBen Widawsky 	bool do_idle_maps;
2370260c420SBen Widawsky 
2380260c420SBen Widawsky 	int mtrr;
2390260c420SBen Widawsky 
2400260c420SBen Widawsky 	/* global gtt ops */
2410260c420SBen Widawsky 	int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
2420260c420SBen Widawsky 			  size_t *stolen, phys_addr_t *mappable_base,
2430260c420SBen Widawsky 			  unsigned long *mappable_end);
2440260c420SBen Widawsky };
2450260c420SBen Widawsky 
2460260c420SBen Widawsky struct i915_hw_ppgtt {
2470260c420SBen Widawsky 	struct i915_address_space base;
2480260c420SBen Widawsky 	struct kref ref;
2490260c420SBen Widawsky 	struct drm_mm_node node;
2500260c420SBen Widawsky 	unsigned num_pd_entries;
2510260c420SBen Widawsky 	unsigned num_pd_pages; /* gen8+ */
2520260c420SBen Widawsky 	union {
2530260c420SBen Widawsky 		struct page **pt_pages;
2540260c420SBen Widawsky 		struct page **gen8_pt_pages[GEN8_LEGACY_PDPS];
2550260c420SBen Widawsky 	};
2560260c420SBen Widawsky 	struct page *pd_pages;
2570260c420SBen Widawsky 	union {
2580260c420SBen Widawsky 		uint32_t pd_offset;
2590260c420SBen Widawsky 		dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPS];
2600260c420SBen Widawsky 	};
2610260c420SBen Widawsky 	union {
2620260c420SBen Widawsky 		dma_addr_t *pt_dma_addr;
2630260c420SBen Widawsky 		dma_addr_t *gen8_pt_dma_addr[4];
2640260c420SBen Widawsky 	};
2650260c420SBen Widawsky 
2664d884705SDaniel Vetter 	struct drm_i915_file_private *file_priv;
2670260c420SBen Widawsky 
2680260c420SBen Widawsky 	int (*enable)(struct i915_hw_ppgtt *ppgtt);
2690260c420SBen Widawsky 	int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
2706689c167SMcAulay, Alistair 			 struct intel_engine_cs *ring);
2710260c420SBen Widawsky 	void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
2720260c420SBen Widawsky };
2730260c420SBen Widawsky 
2740260c420SBen Widawsky int i915_gem_gtt_init(struct drm_device *dev);
2750260c420SBen Widawsky void i915_gem_init_global_gtt(struct drm_device *dev);
27690d0a0e8SDaniel Vetter void i915_global_gtt_cleanup(struct drm_device *dev);
2770260c420SBen Widawsky 
278ee960be7SDaniel Vetter 
279ee960be7SDaniel Vetter int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
28082460d97SDaniel Vetter int i915_ppgtt_init_hw(struct drm_device *dev);
281ee960be7SDaniel Vetter void i915_ppgtt_release(struct kref *kref);
2824d884705SDaniel Vetter struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
2834d884705SDaniel Vetter 					struct drm_i915_file_private *fpriv);
284ee960be7SDaniel Vetter static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
285ee960be7SDaniel Vetter {
286ee960be7SDaniel Vetter 	if (ppgtt)
287ee960be7SDaniel Vetter 		kref_get(&ppgtt->ref);
288ee960be7SDaniel Vetter }
289ee960be7SDaniel Vetter static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
290ee960be7SDaniel Vetter {
291ee960be7SDaniel Vetter 	if (ppgtt)
292ee960be7SDaniel Vetter 		kref_put(&ppgtt->ref, i915_ppgtt_release);
293ee960be7SDaniel Vetter }
2940260c420SBen Widawsky 
2950260c420SBen Widawsky void i915_check_and_clear_faults(struct drm_device *dev);
2960260c420SBen Widawsky void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
2970260c420SBen Widawsky void i915_gem_restore_gtt_mappings(struct drm_device *dev);
2980260c420SBen Widawsky 
2990260c420SBen Widawsky int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
3000260c420SBen Widawsky void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
3010260c420SBen Widawsky 
3020260c420SBen Widawsky #endif
303