xref: /openbmc/linux/drivers/gpu/drm/i915/gt/intel_gtt.h (revision 5189e312)
12c86e55dSMatthew Auld /* SPDX-License-Identifier: MIT */
22c86e55dSMatthew Auld /*
32c86e55dSMatthew Auld  * Copyright © 2020 Intel Corporation
42c86e55dSMatthew Auld  *
52c86e55dSMatthew Auld  * Please try to maintain the following order within this file unless it makes
62c86e55dSMatthew Auld  * sense to do otherwise. From top to bottom:
72c86e55dSMatthew Auld  * 1. typedefs
82c86e55dSMatthew Auld  * 2. #defines, and macros
92c86e55dSMatthew Auld  * 3. structure definitions
102c86e55dSMatthew Auld  * 4. function prototypes
112c86e55dSMatthew Auld  *
122c86e55dSMatthew Auld  * Within each section, please try to order by generation in ascending order,
132c86e55dSMatthew Auld  * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
142c86e55dSMatthew Auld  */
152c86e55dSMatthew Auld 
162c86e55dSMatthew Auld #ifndef __INTEL_GTT_H__
172c86e55dSMatthew Auld #define __INTEL_GTT_H__
182c86e55dSMatthew Auld 
192c86e55dSMatthew Auld #include <linux/io-mapping.h>
202c86e55dSMatthew Auld #include <linux/kref.h>
212c86e55dSMatthew Auld #include <linux/mm.h>
222c86e55dSMatthew Auld #include <linux/pagevec.h>
232c86e55dSMatthew Auld #include <linux/scatterlist.h>
242c86e55dSMatthew Auld #include <linux/workqueue.h>
252c86e55dSMatthew Auld 
262c86e55dSMatthew Auld #include <drm/drm_mm.h>
272c86e55dSMatthew Auld 
282c86e55dSMatthew Auld #include "gt/intel_reset.h"
292c86e55dSMatthew Auld #include "i915_selftest.h"
3039a2bd34SThomas Hellström #include "i915_vma_resource.h"
312c86e55dSMatthew Auld #include "i915_vma_types.h"
3287bd701eSMatthew Auld #include "i915_params.h"
3387bd701eSMatthew Auld #include "intel_memory_region.h"
342c86e55dSMatthew Auld 
352c86e55dSMatthew Auld #define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
362c86e55dSMatthew Auld 
372c86e55dSMatthew Auld #if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT)
382c86e55dSMatthew Auld #define DBG(...) trace_printk(__VA_ARGS__)
392c86e55dSMatthew Auld #else
402c86e55dSMatthew Auld #define DBG(...)
412c86e55dSMatthew Auld #endif
422c86e55dSMatthew Auld 
432c86e55dSMatthew Auld #define NALLOC 3 /* 1 normal, 1 for concurrent threads, 1 for preallocation */
442c86e55dSMatthew Auld 
452c86e55dSMatthew Auld #define I915_GTT_PAGE_SIZE_4K	BIT_ULL(12)
462c86e55dSMatthew Auld #define I915_GTT_PAGE_SIZE_64K	BIT_ULL(16)
472c86e55dSMatthew Auld #define I915_GTT_PAGE_SIZE_2M	BIT_ULL(21)
482c86e55dSMatthew Auld 
492c86e55dSMatthew Auld #define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
502c86e55dSMatthew Auld #define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
512c86e55dSMatthew Auld 
522c86e55dSMatthew Auld #define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE
532c86e55dSMatthew Auld 
542c86e55dSMatthew Auld #define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
552c86e55dSMatthew Auld 
562c86e55dSMatthew Auld #define I915_FENCE_REG_NONE -1
572c86e55dSMatthew Auld #define I915_MAX_NUM_FENCES 32
582c86e55dSMatthew Auld /* 32 fences + sign bit for FENCE_REG_NONE */
592c86e55dSMatthew Auld #define I915_MAX_NUM_FENCE_BITS 6
602c86e55dSMatthew Auld 
612c86e55dSMatthew Auld typedef u32 gen6_pte_t;
622c86e55dSMatthew Auld typedef u64 gen8_pte_t;
632c86e55dSMatthew Auld 
642c86e55dSMatthew Auld #define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
652c86e55dSMatthew Auld 
662c86e55dSMatthew Auld #define I915_PTES(pte_len)		((unsigned int)(PAGE_SIZE / (pte_len)))
672c86e55dSMatthew Auld #define I915_PTE_MASK(pte_len)		(I915_PTES(pte_len) - 1)
682c86e55dSMatthew Auld #define I915_PDES			512
692c86e55dSMatthew Auld #define I915_PDE_MASK			(I915_PDES - 1)
702c86e55dSMatthew Auld 
712c86e55dSMatthew Auld /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
722c86e55dSMatthew Auld #define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0))
732c86e55dSMatthew Auld #define GEN6_PTE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
742c86e55dSMatthew Auld #define GEN6_PDE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
752c86e55dSMatthew Auld #define GEN6_PTE_CACHE_LLC		(2 << 1)
762c86e55dSMatthew Auld #define GEN6_PTE_UNCACHED		(1 << 1)
772c86e55dSMatthew Auld #define GEN6_PTE_VALID			REG_BIT(0)
782c86e55dSMatthew Auld 
792c86e55dSMatthew Auld #define GEN6_PTES			I915_PTES(sizeof(gen6_pte_t))
802c86e55dSMatthew Auld #define GEN6_PD_SIZE		        (I915_PDES * PAGE_SIZE)
812c86e55dSMatthew Auld #define GEN6_PD_ALIGN			(PAGE_SIZE * 16)
822c86e55dSMatthew Auld #define GEN6_PDE_SHIFT			22
832c86e55dSMatthew Auld #define GEN6_PDE_VALID			REG_BIT(0)
842c86e55dSMatthew Auld #define NUM_PTE(pde_shift)     (1 << (pde_shift - PAGE_SHIFT))
852c86e55dSMatthew Auld 
862c86e55dSMatthew Auld #define GEN7_PTE_CACHE_L3_LLC		(3 << 1)
872c86e55dSMatthew Auld 
882c86e55dSMatthew Auld #define BYT_PTE_SNOOPED_BY_CPU_CACHES	REG_BIT(2)
892c86e55dSMatthew Auld #define BYT_PTE_WRITEABLE		REG_BIT(1)
902c86e55dSMatthew Auld 
9111724eeaSMatthew Auld #define GEN12_PPGTT_PTE_LM	BIT_ULL(11)
92e762bdf5SMatthew Auld 
93e762bdf5SMatthew Auld #define GEN12_GGTT_PTE_LM	BIT_ULL(1)
9411724eeaSMatthew Auld 
95*5189e312SMatthew Auld #define GEN12_PDE_64K BIT(6)
96*5189e312SMatthew Auld 
972c86e55dSMatthew Auld /*
982c86e55dSMatthew Auld  * Cacheability Control is a 4-bit value. The low three bits are stored in bits
992c86e55dSMatthew Auld  * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
1002c86e55dSMatthew Auld  */
1012c86e55dSMatthew Auld #define HSW_CACHEABILITY_CONTROL(bits)	((((bits) & 0x7) << 1) | \
1022c86e55dSMatthew Auld 					 (((bits) & 0x8) << (11 - 3)))
1032c86e55dSMatthew Auld #define HSW_WB_LLC_AGE3			HSW_CACHEABILITY_CONTROL(0x2)
1042c86e55dSMatthew Auld #define HSW_WB_LLC_AGE0			HSW_CACHEABILITY_CONTROL(0x3)
1052c86e55dSMatthew Auld #define HSW_WB_ELLC_LLC_AGE3		HSW_CACHEABILITY_CONTROL(0x8)
1062c86e55dSMatthew Auld #define HSW_WB_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0xb)
1072c86e55dSMatthew Auld #define HSW_WT_ELLC_LLC_AGE3		HSW_CACHEABILITY_CONTROL(0x7)
1082c86e55dSMatthew Auld #define HSW_WT_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0x6)
1092c86e55dSMatthew Auld #define HSW_PTE_UNCACHED		(0)
1102c86e55dSMatthew Auld #define HSW_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0x7f0))
1112c86e55dSMatthew Auld #define HSW_PTE_ADDR_ENCODE(addr)	HSW_GTT_ADDR_ENCODE(addr)
1122c86e55dSMatthew Auld 
1132c86e55dSMatthew Auld /*
1142c86e55dSMatthew Auld  * GEN8 32b style address is defined as a 3 level page table:
1152c86e55dSMatthew Auld  * 31:30 | 29:21 | 20:12 |  11:0
1162c86e55dSMatthew Auld  * PDPE  |  PDE  |  PTE  | offset
1172c86e55dSMatthew Auld  * The difference as compared to normal x86 3 level page table is the PDPEs are
1182c86e55dSMatthew Auld  * programmed via register.
1192c86e55dSMatthew Auld  *
1202c86e55dSMatthew Auld  * GEN8 48b style address is defined as a 4 level page table:
1212c86e55dSMatthew Auld  * 47:39 | 38:30 | 29:21 | 20:12 |  11:0
1222c86e55dSMatthew Auld  * PML4E | PDPE  |  PDE  |  PTE  | offset
1232c86e55dSMatthew Auld  */
1242c86e55dSMatthew Auld #define GEN8_3LVL_PDPES			4
1252c86e55dSMatthew Auld 
1262c86e55dSMatthew Auld #define PPAT_UNCACHED			(_PAGE_PWT | _PAGE_PCD)
1272c86e55dSMatthew Auld #define PPAT_CACHED_PDE			0 /* WB LLC */
1282c86e55dSMatthew Auld #define PPAT_CACHED			_PAGE_PAT /* WB LLCeLLC */
1292c86e55dSMatthew Auld #define PPAT_DISPLAY_ELLC		_PAGE_PCD /* WT eLLC */
1302c86e55dSMatthew Auld 
1312c86e55dSMatthew Auld #define CHV_PPAT_SNOOP			REG_BIT(6)
1322c86e55dSMatthew Auld #define GEN8_PPAT_AGE(x)		((x)<<4)
1332c86e55dSMatthew Auld #define GEN8_PPAT_LLCeLLC		(3<<2)
1342c86e55dSMatthew Auld #define GEN8_PPAT_LLCELLC		(2<<2)
1352c86e55dSMatthew Auld #define GEN8_PPAT_LLC			(1<<2)
1362c86e55dSMatthew Auld #define GEN8_PPAT_WB			(3<<0)
1372c86e55dSMatthew Auld #define GEN8_PPAT_WT			(2<<0)
1382c86e55dSMatthew Auld #define GEN8_PPAT_WC			(1<<0)
1392c86e55dSMatthew Auld #define GEN8_PPAT_UC			(0<<0)
1402c86e55dSMatthew Auld #define GEN8_PPAT_ELLC_OVERRIDE		(0<<2)
1412c86e55dSMatthew Auld #define GEN8_PPAT(i, x)			((u64)(x) << ((i) * 8))
1422c86e55dSMatthew Auld 
1435f978167SMichael Cheng #define GEN8_PAGE_PRESENT		BIT_ULL(0)
1445f978167SMichael Cheng #define GEN8_PAGE_RW			BIT_ULL(1)
1455f978167SMichael Cheng 
1462c86e55dSMatthew Auld #define GEN8_PDE_IPS_64K BIT(11)
1472c86e55dSMatthew Auld #define GEN8_PDE_PS_2M   BIT(7)
1482c86e55dSMatthew Auld 
14989351925SChris Wilson enum i915_cache_level;
15089351925SChris Wilson 
15189351925SChris Wilson struct drm_i915_gem_object;
1520b6bc81dSChris Wilson struct i915_fence_reg;
15389351925SChris Wilson struct i915_vma;
15489351925SChris Wilson struct intel_gt;
1550b6bc81dSChris Wilson 
1562c86e55dSMatthew Auld #define for_each_sgt_daddr(__dp, __iter, __sgt) \
1572c86e55dSMatthew Auld 	__for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE)
1582c86e55dSMatthew Auld 
1592c86e55dSMatthew Auld struct i915_page_table {
16089351925SChris Wilson 	struct drm_i915_gem_object *base;
161cd0452aaSChris Wilson 	union {
1622c86e55dSMatthew Auld 		atomic_t used;
163cd0452aaSChris Wilson 		struct i915_page_table *stash;
164cd0452aaSChris Wilson 	};
165*5189e312SMatthew Auld 	bool is_compact;
1662c86e55dSMatthew Auld };
1672c86e55dSMatthew Auld 
1682c86e55dSMatthew Auld struct i915_page_directory {
1692c86e55dSMatthew Auld 	struct i915_page_table pt;
1702c86e55dSMatthew Auld 	spinlock_t lock;
17182adf901SChris Wilson 	void **entry;
1722c86e55dSMatthew Auld };
1732c86e55dSMatthew Auld 
1742c86e55dSMatthew Auld #define __px_choose_expr(x, type, expr, other) \
1752c86e55dSMatthew Auld 	__builtin_choose_expr( \
1762c86e55dSMatthew Auld 	__builtin_types_compatible_p(typeof(x), type) || \
1772c86e55dSMatthew Auld 	__builtin_types_compatible_p(typeof(x), const type), \
1782c86e55dSMatthew Auld 	({ type __x = (type)(x); expr; }), \
1792c86e55dSMatthew Auld 	other)
1802c86e55dSMatthew Auld 
1812c86e55dSMatthew Auld #define px_base(px) \
18289351925SChris Wilson 	__px_choose_expr(px, struct drm_i915_gem_object *, __x, \
18389351925SChris Wilson 	__px_choose_expr(px, struct i915_page_table *, __x->base, \
18489351925SChris Wilson 	__px_choose_expr(px, struct i915_page_directory *, __x->pt.base, \
18589351925SChris Wilson 	(void)0)))
18689351925SChris Wilson 
18789351925SChris Wilson struct page *__px_page(struct drm_i915_gem_object *p);
18889351925SChris Wilson dma_addr_t __px_dma(struct drm_i915_gem_object *p);
18989351925SChris Wilson #define px_dma(px) (__px_dma(px_base(px)))
1902c86e55dSMatthew Auld 
191529b9ec8SMatthew Auld void *__px_vaddr(struct drm_i915_gem_object *p);
192529b9ec8SMatthew Auld #define px_vaddr(px) (__px_vaddr(px_base(px)))
193529b9ec8SMatthew Auld 
1942c86e55dSMatthew Auld #define px_pt(px) \
1952c86e55dSMatthew Auld 	__px_choose_expr(px, struct i915_page_table *, __x, \
1962c86e55dSMatthew Auld 	__px_choose_expr(px, struct i915_page_directory *, &__x->pt, \
1972c86e55dSMatthew Auld 	(void)0))
1982c86e55dSMatthew Auld #define px_used(px) (&px_pt(px)->used)
1992c86e55dSMatthew Auld 
200cd0452aaSChris Wilson struct i915_vm_pt_stash {
201cd0452aaSChris Wilson 	/* preallocated chains of page tables/directories */
202cd0452aaSChris Wilson 	struct i915_page_table *pt[2];
203cd0452aaSChris Wilson };
204cd0452aaSChris Wilson 
2052c86e55dSMatthew Auld struct i915_vma_ops {
2062c86e55dSMatthew Auld 	/* Map an object into an address space with the given cache flags. */
207cd0452aaSChris Wilson 	void (*bind_vma)(struct i915_address_space *vm,
208cd0452aaSChris Wilson 			 struct i915_vm_pt_stash *stash,
20939a2bd34SThomas Hellström 			 struct i915_vma_resource *vma_res,
2102c86e55dSMatthew Auld 			 enum i915_cache_level cache_level,
2112c86e55dSMatthew Auld 			 u32 flags);
2122c86e55dSMatthew Auld 	/*
2132c86e55dSMatthew Auld 	 * Unmap an object from an address space. This usually consists of
2142c86e55dSMatthew Auld 	 * setting the valid PTE entries to a reserved scratch page.
2152c86e55dSMatthew Auld 	 */
21612b07256SChris Wilson 	void (*unbind_vma)(struct i915_address_space *vm,
21739a2bd34SThomas Hellström 			   struct i915_vma_resource *vma_res);
21839a2bd34SThomas Hellström 
2192c86e55dSMatthew Auld };
2202c86e55dSMatthew Auld 
2212c86e55dSMatthew Auld struct i915_address_space {
2222c86e55dSMatthew Auld 	struct kref ref;
223dcc5d820SDaniel Vetter 	struct work_struct release_work;
2242c86e55dSMatthew Auld 
2252c86e55dSMatthew Auld 	struct drm_mm mm;
2262c86e55dSMatthew Auld 	struct intel_gt *gt;
2272c86e55dSMatthew Auld 	struct drm_i915_private *i915;
2282c86e55dSMatthew Auld 	struct device *dma;
2292c86e55dSMatthew Auld 	u64 total;		/* size addr space maps (ex. 2GB for ggtt) */
2302c86e55dSMatthew Auld 	u64 reserved;		/* size addr space reserved */
23187bd701eSMatthew Auld 	u64 min_alignment[INTEL_MEMORY_STOLEN_LOCAL + 1];
2322c86e55dSMatthew Auld 
2332c86e55dSMatthew Auld 	unsigned int bind_async_flags;
2342c86e55dSMatthew Auld 
2352c86e55dSMatthew Auld 	/*
2362c86e55dSMatthew Auld 	 * Each active user context has its own address space (in full-ppgtt).
2372c86e55dSMatthew Auld 	 * Since the vm may be shared between multiple contexts, we count how
2382c86e55dSMatthew Auld 	 * many contexts keep us "open". Once open hits zero, we are closed
2392c86e55dSMatthew Auld 	 * and do not allow any new attachments, and proceed to shutdown our
2402c86e55dSMatthew Auld 	 * vma and page directories.
2412c86e55dSMatthew Auld 	 */
2422c86e55dSMatthew Auld 	atomic_t open;
2432c86e55dSMatthew Auld 
2442c86e55dSMatthew Auld 	struct mutex mutex; /* protects vma and our lists */
2454d8151aeSThomas Hellström 
2464d8151aeSThomas Hellström 	struct kref resv_ref; /* kref to keep the reservation lock alive. */
2474d8151aeSThomas Hellström 	struct dma_resv _resv; /* reservation lock for all pd objects, and buffer pool */
2482c86e55dSMatthew Auld #define VM_CLASS_GGTT 0
2492c86e55dSMatthew Auld #define VM_CLASS_PPGTT 1
25033e7a975SVille Syrjälä #define VM_CLASS_DPT 2
2512c86e55dSMatthew Auld 
25289351925SChris Wilson 	struct drm_i915_gem_object *scratch[4];
2532c86e55dSMatthew Auld 	/**
2542c86e55dSMatthew Auld 	 * List of vma currently bound.
2552c86e55dSMatthew Auld 	 */
2562c86e55dSMatthew Auld 	struct list_head bound_list;
2572c86e55dSMatthew Auld 
2582c86e55dSMatthew Auld 	/* Global GTT */
2592c86e55dSMatthew Auld 	bool is_ggtt:1;
2602c86e55dSMatthew Auld 
26133e7a975SVille Syrjälä 	/* Display page table */
26233e7a975SVille Syrjälä 	bool is_dpt:1;
26333e7a975SVille Syrjälä 
2642c86e55dSMatthew Auld 	/* Some systems support read-only mappings for GGTT and/or PPGTT */
2652c86e55dSMatthew Auld 	bool has_read_only:1;
2662c86e55dSMatthew Auld 
267cd0452aaSChris Wilson 	u8 top;
268cd0452aaSChris Wilson 	u8 pd_shift;
269cd0452aaSChris Wilson 	u8 scratch_order;
270cd0452aaSChris Wilson 
271a259cc14SThomas Hellström 	/* Flags used when creating page-table objects for this vm */
272a259cc14SThomas Hellström 	unsigned long lmem_pt_obj_flags;
273a259cc14SThomas Hellström 
2742f6b90daSThomas Hellström 	/* Interval tree for pending unbind vma resources */
2752f6b90daSThomas Hellström 	struct rb_root_cached pending_unbind;
2762f6b90daSThomas Hellström 
27789351925SChris Wilson 	struct drm_i915_gem_object *
27889351925SChris Wilson 		(*alloc_pt_dma)(struct i915_address_space *vm, int sz);
279fef53be0SMatthew Auld 	struct drm_i915_gem_object *
280fef53be0SMatthew Auld 		(*alloc_scratch_dma)(struct i915_address_space *vm, int sz);
28189351925SChris Wilson 
2822c86e55dSMatthew Auld 	u64 (*pte_encode)(dma_addr_t addr,
2832c86e55dSMatthew Auld 			  enum i915_cache_level level,
2842c86e55dSMatthew Auld 			  u32 flags); /* Create a valid PTE */
2852c86e55dSMatthew Auld #define PTE_READ_ONLY	BIT(0)
28611724eeaSMatthew Auld #define PTE_LM		BIT(1)
2872c86e55dSMatthew Auld 
288cd0452aaSChris Wilson 	void (*allocate_va_range)(struct i915_address_space *vm,
289cd0452aaSChris Wilson 				  struct i915_vm_pt_stash *stash,
2902c86e55dSMatthew Auld 				  u64 start, u64 length);
2912c86e55dSMatthew Auld 	void (*clear_range)(struct i915_address_space *vm,
2922c86e55dSMatthew Auld 			    u64 start, u64 length);
2932c86e55dSMatthew Auld 	void (*insert_page)(struct i915_address_space *vm,
2942c86e55dSMatthew Auld 			    dma_addr_t addr,
2952c86e55dSMatthew Auld 			    u64 offset,
2962c86e55dSMatthew Auld 			    enum i915_cache_level cache_level,
2972c86e55dSMatthew Auld 			    u32 flags);
2982c86e55dSMatthew Auld 	void (*insert_entries)(struct i915_address_space *vm,
29939a2bd34SThomas Hellström 			       struct i915_vma_resource *vma_res,
3002c86e55dSMatthew Auld 			       enum i915_cache_level cache_level,
3012c86e55dSMatthew Auld 			       u32 flags);
3022c86e55dSMatthew Auld 	void (*cleanup)(struct i915_address_space *vm);
3032c86e55dSMatthew Auld 
3043607e1e9SChris Wilson 	void (*foreach)(struct i915_address_space *vm,
3053607e1e9SChris Wilson 			u64 start, u64 length,
3063607e1e9SChris Wilson 			void (*fn)(struct i915_address_space *vm,
3073607e1e9SChris Wilson 				   struct i915_page_table *pt,
3083607e1e9SChris Wilson 				   void *data),
3093607e1e9SChris Wilson 			void *data);
3103607e1e9SChris Wilson 
3112c86e55dSMatthew Auld 	struct i915_vma_ops vma_ops;
3122c86e55dSMatthew Auld 
3132c86e55dSMatthew Auld 	I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
3142c86e55dSMatthew Auld 	I915_SELFTEST_DECLARE(bool scrub_64K);
3152c86e55dSMatthew Auld };
3162c86e55dSMatthew Auld 
3172c86e55dSMatthew Auld /*
3182c86e55dSMatthew Auld  * The Graphics Translation Table is the way in which GEN hardware translates a
3192c86e55dSMatthew Auld  * Graphics Virtual Address into a Physical Address. In addition to the normal
3202c86e55dSMatthew Auld  * collateral associated with any va->pa translations GEN hardware also has a
3212c86e55dSMatthew Auld  * portion of the GTT which can be mapped by the CPU and remain both coherent
3222c86e55dSMatthew Auld  * and correct (in cases like swizzling). That region is referred to as GMADR in
3232c86e55dSMatthew Auld  * the spec.
3242c86e55dSMatthew Auld  */
3252c86e55dSMatthew Auld struct i915_ggtt {
3262c86e55dSMatthew Auld 	struct i915_address_space vm;
3272c86e55dSMatthew Auld 
3282c86e55dSMatthew Auld 	struct io_mapping iomap;	/* Mapping to our CPU mappable region */
3292c86e55dSMatthew Auld 	struct resource gmadr;          /* GMADR resource */
3302c86e55dSMatthew Auld 	resource_size_t mappable_end;	/* End offset that we can CPU map */
3312c86e55dSMatthew Auld 
3322c86e55dSMatthew Auld 	/** "Graphics Stolen Memory" holds the global PTEs */
3332c86e55dSMatthew Auld 	void __iomem *gsm;
3342c86e55dSMatthew Auld 	void (*invalidate)(struct i915_ggtt *ggtt);
3352c86e55dSMatthew Auld 
3362c86e55dSMatthew Auld 	/** PPGTT used for aliasing the PPGTT with the GTT */
3372c86e55dSMatthew Auld 	struct i915_ppgtt *alias;
3382c86e55dSMatthew Auld 
3392c86e55dSMatthew Auld 	bool do_idle_maps;
3402c86e55dSMatthew Auld 
3412c86e55dSMatthew Auld 	int mtrr;
3422c86e55dSMatthew Auld 
3432c86e55dSMatthew Auld 	/** Bit 6 swizzling required for X tiling */
3442c86e55dSMatthew Auld 	u32 bit_6_swizzle_x;
3452c86e55dSMatthew Auld 	/** Bit 6 swizzling required for Y tiling */
3462c86e55dSMatthew Auld 	u32 bit_6_swizzle_y;
3472c86e55dSMatthew Auld 
3482c86e55dSMatthew Auld 	u32 pin_bias;
3492c86e55dSMatthew Auld 
3502c86e55dSMatthew Auld 	unsigned int num_fences;
3510b6bc81dSChris Wilson 	struct i915_fence_reg *fence_regs;
3522c86e55dSMatthew Auld 	struct list_head fence_list;
3532c86e55dSMatthew Auld 
3542c86e55dSMatthew Auld 	/**
3552c86e55dSMatthew Auld 	 * List of all objects in gtt_space, currently mmaped by userspace.
3562c86e55dSMatthew Auld 	 * All objects within this list must also be on bound_list.
3572c86e55dSMatthew Auld 	 */
3582c86e55dSMatthew Auld 	struct list_head userfault_list;
3592c86e55dSMatthew Auld 
3602c86e55dSMatthew Auld 	/* Manual runtime pm autosuspend delay for user GGTT mmaps */
3612c86e55dSMatthew Auld 	struct intel_wakeref_auto userfault_wakeref;
3622c86e55dSMatthew Auld 
363742379c0SChris Wilson 	struct mutex error_mutex;
3642c86e55dSMatthew Auld 	struct drm_mm_node error_capture;
3652c86e55dSMatthew Auld 	struct drm_mm_node uc_fw;
3662c86e55dSMatthew Auld };
3672c86e55dSMatthew Auld 
3682c86e55dSMatthew Auld struct i915_ppgtt {
3692c86e55dSMatthew Auld 	struct i915_address_space vm;
3702c86e55dSMatthew Auld 
3712c86e55dSMatthew Auld 	struct i915_page_directory *pd;
3722c86e55dSMatthew Auld };
3732c86e55dSMatthew Auld 
3742c86e55dSMatthew Auld #define i915_is_ggtt(vm) ((vm)->is_ggtt)
37533e7a975SVille Syrjälä #define i915_is_dpt(vm) ((vm)->is_dpt)
37674862d4cSImre Deak #define i915_is_ggtt_or_dpt(vm) (i915_is_ggtt(vm) || i915_is_dpt(vm))
3772c86e55dSMatthew Auld 
37826ad4f8bSMaarten Lankhorst int __must_check
37926ad4f8bSMaarten Lankhorst i915_vm_lock_objects(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww);
38026ad4f8bSMaarten Lankhorst 
3812c86e55dSMatthew Auld static inline bool
3822c86e55dSMatthew Auld i915_vm_is_4lvl(const struct i915_address_space *vm)
3832c86e55dSMatthew Auld {
3842c86e55dSMatthew Auld 	return (vm->total - 1) >> 32;
3852c86e55dSMatthew Auld }
3862c86e55dSMatthew Auld 
3872c86e55dSMatthew Auld static inline bool
3882c86e55dSMatthew Auld i915_vm_has_scratch_64K(struct i915_address_space *vm)
3892c86e55dSMatthew Auld {
3902c86e55dSMatthew Auld 	return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
3912c86e55dSMatthew Auld }
3922c86e55dSMatthew Auld 
39387bd701eSMatthew Auld static inline u64 i915_vm_min_alignment(struct i915_address_space *vm,
39487bd701eSMatthew Auld 					enum intel_memory_type type)
39587bd701eSMatthew Auld {
39687bd701eSMatthew Auld 	/* avoid INTEL_MEMORY_MOCK overflow */
39787bd701eSMatthew Auld 	if ((int)type >= ARRAY_SIZE(vm->min_alignment))
39887bd701eSMatthew Auld 		type = INTEL_MEMORY_SYSTEM;
39987bd701eSMatthew Auld 
40087bd701eSMatthew Auld 	return vm->min_alignment[type];
40187bd701eSMatthew Auld }
40287bd701eSMatthew Auld 
40387bd701eSMatthew Auld static inline u64 i915_vm_obj_min_alignment(struct i915_address_space *vm,
40487bd701eSMatthew Auld 					    struct drm_i915_gem_object  *obj)
40587bd701eSMatthew Auld {
40687bd701eSMatthew Auld 	struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
40787bd701eSMatthew Auld 	enum intel_memory_type type = mr ? mr->type : INTEL_MEMORY_SYSTEM;
40887bd701eSMatthew Auld 
40987bd701eSMatthew Auld 	return i915_vm_min_alignment(vm, type);
41087bd701eSMatthew Auld }
41187bd701eSMatthew Auld 
4122c86e55dSMatthew Auld static inline bool
4132c86e55dSMatthew Auld i915_vm_has_cache_coloring(struct i915_address_space *vm)
4142c86e55dSMatthew Auld {
4152c86e55dSMatthew Auld 	return i915_is_ggtt(vm) && vm->mm.color_adjust;
4162c86e55dSMatthew Auld }
4172c86e55dSMatthew Auld 
4182c86e55dSMatthew Auld static inline struct i915_ggtt *
4192c86e55dSMatthew Auld i915_vm_to_ggtt(struct i915_address_space *vm)
4202c86e55dSMatthew Auld {
4212c86e55dSMatthew Auld 	BUILD_BUG_ON(offsetof(struct i915_ggtt, vm));
4222c86e55dSMatthew Auld 	GEM_BUG_ON(!i915_is_ggtt(vm));
4232c86e55dSMatthew Auld 	return container_of(vm, struct i915_ggtt, vm);
4242c86e55dSMatthew Auld }
4252c86e55dSMatthew Auld 
4262c86e55dSMatthew Auld static inline struct i915_ppgtt *
4272c86e55dSMatthew Auld i915_vm_to_ppgtt(struct i915_address_space *vm)
4282c86e55dSMatthew Auld {
4292c86e55dSMatthew Auld 	BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm));
43074862d4cSImre Deak 	GEM_BUG_ON(i915_is_ggtt_or_dpt(vm));
4312c86e55dSMatthew Auld 	return container_of(vm, struct i915_ppgtt, vm);
4322c86e55dSMatthew Auld }
4332c86e55dSMatthew Auld 
4342c86e55dSMatthew Auld static inline struct i915_address_space *
4352c86e55dSMatthew Auld i915_vm_get(struct i915_address_space *vm)
4362c86e55dSMatthew Auld {
4372c86e55dSMatthew Auld 	kref_get(&vm->ref);
4382c86e55dSMatthew Auld 	return vm;
4392c86e55dSMatthew Auld }
4402c86e55dSMatthew Auld 
4414d8151aeSThomas Hellström /**
4424d8151aeSThomas Hellström  * i915_vm_resv_get - Obtain a reference on the vm's reservation lock
4434d8151aeSThomas Hellström  * @vm: The vm whose reservation lock we want to share.
4444d8151aeSThomas Hellström  *
4454d8151aeSThomas Hellström  * Return: A pointer to the vm's reservation lock.
4464d8151aeSThomas Hellström  */
4474d8151aeSThomas Hellström static inline struct dma_resv *i915_vm_resv_get(struct i915_address_space *vm)
4484d8151aeSThomas Hellström {
4494d8151aeSThomas Hellström 	kref_get(&vm->resv_ref);
4504d8151aeSThomas Hellström 	return &vm->_resv;
4514d8151aeSThomas Hellström }
4524d8151aeSThomas Hellström 
4532c86e55dSMatthew Auld void i915_vm_release(struct kref *kref);
4542c86e55dSMatthew Auld 
4554d8151aeSThomas Hellström void i915_vm_resv_release(struct kref *kref);
4564d8151aeSThomas Hellström 
4572c86e55dSMatthew Auld static inline void i915_vm_put(struct i915_address_space *vm)
4582c86e55dSMatthew Auld {
4592c86e55dSMatthew Auld 	kref_put(&vm->ref, i915_vm_release);
4602c86e55dSMatthew Auld }
4612c86e55dSMatthew Auld 
4624d8151aeSThomas Hellström /**
4634d8151aeSThomas Hellström  * i915_vm_resv_put - Release a reference on the vm's reservation lock
4644d8151aeSThomas Hellström  * @resv: Pointer to a reservation lock obtained from i915_vm_resv_get()
4654d8151aeSThomas Hellström  */
4664d8151aeSThomas Hellström static inline void i915_vm_resv_put(struct i915_address_space *vm)
4674d8151aeSThomas Hellström {
4684d8151aeSThomas Hellström 	kref_put(&vm->resv_ref, i915_vm_resv_release);
4694d8151aeSThomas Hellström }
4704d8151aeSThomas Hellström 
4712c86e55dSMatthew Auld static inline struct i915_address_space *
4722c86e55dSMatthew Auld i915_vm_open(struct i915_address_space *vm)
4732c86e55dSMatthew Auld {
4742c86e55dSMatthew Auld 	GEM_BUG_ON(!atomic_read(&vm->open));
4752c86e55dSMatthew Auld 	atomic_inc(&vm->open);
4762c86e55dSMatthew Auld 	return i915_vm_get(vm);
4772c86e55dSMatthew Auld }
4782c86e55dSMatthew Auld 
4792c86e55dSMatthew Auld static inline bool
4802c86e55dSMatthew Auld i915_vm_tryopen(struct i915_address_space *vm)
4812c86e55dSMatthew Auld {
4822c86e55dSMatthew Auld 	if (atomic_add_unless(&vm->open, 1, 0))
4832c86e55dSMatthew Auld 		return i915_vm_get(vm);
4842c86e55dSMatthew Auld 
4852c86e55dSMatthew Auld 	return false;
4862c86e55dSMatthew Auld }
4872c86e55dSMatthew Auld 
4882c86e55dSMatthew Auld void __i915_vm_close(struct i915_address_space *vm);
4892c86e55dSMatthew Auld 
4902c86e55dSMatthew Auld static inline void
4912c86e55dSMatthew Auld i915_vm_close(struct i915_address_space *vm)
4922c86e55dSMatthew Auld {
4932c86e55dSMatthew Auld 	GEM_BUG_ON(!atomic_read(&vm->open));
4942c86e55dSMatthew Auld 	__i915_vm_close(vm);
4952c86e55dSMatthew Auld 
4962c86e55dSMatthew Auld 	i915_vm_put(vm);
4972c86e55dSMatthew Auld }
4982c86e55dSMatthew Auld 
4992c86e55dSMatthew Auld void i915_address_space_init(struct i915_address_space *vm, int subclass);
5002c86e55dSMatthew Auld void i915_address_space_fini(struct i915_address_space *vm);
5012c86e55dSMatthew Auld 
5022c86e55dSMatthew Auld static inline u32 i915_pte_index(u64 address, unsigned int pde_shift)
5032c86e55dSMatthew Auld {
5042c86e55dSMatthew Auld 	const u32 mask = NUM_PTE(pde_shift) - 1;
5052c86e55dSMatthew Auld 
5062c86e55dSMatthew Auld 	return (address >> PAGE_SHIFT) & mask;
5072c86e55dSMatthew Auld }
5082c86e55dSMatthew Auld 
5092c86e55dSMatthew Auld /*
5102c86e55dSMatthew Auld  * Helper to counts the number of PTEs within the given length. This count
5112c86e55dSMatthew Auld  * does not cross a page table boundary, so the max value would be
5122c86e55dSMatthew Auld  * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
5132c86e55dSMatthew Auld  */
5142c86e55dSMatthew Auld static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift)
5152c86e55dSMatthew Auld {
5162c86e55dSMatthew Auld 	const u64 mask = ~((1ULL << pde_shift) - 1);
5172c86e55dSMatthew Auld 	u64 end;
5182c86e55dSMatthew Auld 
5192c86e55dSMatthew Auld 	GEM_BUG_ON(length == 0);
5202c86e55dSMatthew Auld 	GEM_BUG_ON(offset_in_page(addr | length));
5212c86e55dSMatthew Auld 
5222c86e55dSMatthew Auld 	end = addr + length;
5232c86e55dSMatthew Auld 
5242c86e55dSMatthew Auld 	if ((addr & mask) != (end & mask))
5252c86e55dSMatthew Auld 		return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
5262c86e55dSMatthew Auld 
5272c86e55dSMatthew Auld 	return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
5282c86e55dSMatthew Auld }
5292c86e55dSMatthew Auld 
5302c86e55dSMatthew Auld static inline u32 i915_pde_index(u64 addr, u32 shift)
5312c86e55dSMatthew Auld {
5322c86e55dSMatthew Auld 	return (addr >> shift) & I915_PDE_MASK;
5332c86e55dSMatthew Auld }
5342c86e55dSMatthew Auld 
5352c86e55dSMatthew Auld static inline struct i915_page_table *
5362c86e55dSMatthew Auld i915_pt_entry(const struct i915_page_directory * const pd,
5372c86e55dSMatthew Auld 	      const unsigned short n)
5382c86e55dSMatthew Auld {
5392c86e55dSMatthew Auld 	return pd->entry[n];
5402c86e55dSMatthew Auld }
5412c86e55dSMatthew Auld 
5422c86e55dSMatthew Auld static inline struct i915_page_directory *
5432c86e55dSMatthew Auld i915_pd_entry(const struct i915_page_directory * const pdp,
5442c86e55dSMatthew Auld 	      const unsigned short n)
5452c86e55dSMatthew Auld {
5462c86e55dSMatthew Auld 	return pdp->entry[n];
5472c86e55dSMatthew Auld }
5482c86e55dSMatthew Auld 
5492c86e55dSMatthew Auld static inline dma_addr_t
5502c86e55dSMatthew Auld i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
5512c86e55dSMatthew Auld {
55289351925SChris Wilson 	struct i915_page_table *pt = ppgtt->pd->entry[n];
5532c86e55dSMatthew Auld 
55489351925SChris Wilson 	return __px_dma(pt ? px_base(pt) : ppgtt->vm.scratch[ppgtt->vm.top]);
5552c86e55dSMatthew Auld }
5562c86e55dSMatthew Auld 
557a259cc14SThomas Hellström void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt,
558a259cc14SThomas Hellström 		unsigned long lmem_pt_obj_flags);
5592c86e55dSMatthew Auld 
5602c86e55dSMatthew Auld int i915_ggtt_probe_hw(struct drm_i915_private *i915);
5612c86e55dSMatthew Auld int i915_ggtt_init_hw(struct drm_i915_private *i915);
5622c86e55dSMatthew Auld int i915_ggtt_enable_hw(struct drm_i915_private *i915);
5632c86e55dSMatthew Auld void i915_ggtt_enable_guc(struct i915_ggtt *ggtt);
5642c86e55dSMatthew Auld void i915_ggtt_disable_guc(struct i915_ggtt *ggtt);
5652c86e55dSMatthew Auld int i915_init_ggtt(struct drm_i915_private *i915);
5662c86e55dSMatthew Auld void i915_ggtt_driver_release(struct drm_i915_private *i915);
5674d8151aeSThomas Hellström void i915_ggtt_driver_late_release(struct drm_i915_private *i915);
5682c86e55dSMatthew Auld 
5692c86e55dSMatthew Auld static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt)
5702c86e55dSMatthew Auld {
5712c86e55dSMatthew Auld 	return ggtt->mappable_end > 0;
5722c86e55dSMatthew Auld }
5732c86e55dSMatthew Auld 
5742c86e55dSMatthew Auld int i915_ppgtt_init_hw(struct intel_gt *gt);
5752c86e55dSMatthew Auld 
576a259cc14SThomas Hellström struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt,
577a259cc14SThomas Hellström 				     unsigned long lmem_pt_obj_flags);
5782c86e55dSMatthew Auld 
5798d2f683fSImre Deak void i915_ggtt_suspend_vm(struct i915_address_space *vm);
5808d2f683fSImre Deak bool i915_ggtt_resume_vm(struct i915_address_space *vm);
581e986209cSChris Wilson void i915_ggtt_suspend(struct i915_ggtt *gtt);
582e986209cSChris Wilson void i915_ggtt_resume(struct i915_ggtt *ggtt);
5832c86e55dSMatthew Auld 
5842c86e55dSMatthew Auld void
58589351925SChris Wilson fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count);
5862c86e55dSMatthew Auld 
5872c86e55dSMatthew Auld #define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64))
5882c86e55dSMatthew Auld #define fill32_px(px, v) do {						\
5892c86e55dSMatthew Auld 	u64 v__ = lower_32_bits(v);					\
5902c86e55dSMatthew Auld 	fill_px((px), v__ << 32 | v__);					\
5912c86e55dSMatthew Auld } while (0)
5922c86e55dSMatthew Auld 
59389351925SChris Wilson int setup_scratch_page(struct i915_address_space *vm);
5942c86e55dSMatthew Auld void free_scratch(struct i915_address_space *vm);
5952c86e55dSMatthew Auld 
59689351925SChris Wilson struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz);
5976aed5673SMatthew Auld struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz);
5982c86e55dSMatthew Auld struct i915_page_table *alloc_pt(struct i915_address_space *vm);
5992c86e55dSMatthew Auld struct i915_page_directory *alloc_pd(struct i915_address_space *vm);
60082adf901SChris Wilson struct i915_page_directory *__alloc_pd(int npde);
6012c86e55dSMatthew Auld 
602529b9ec8SMatthew Auld int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
603529b9ec8SMatthew Auld int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
6042c86e55dSMatthew Auld 
60582adf901SChris Wilson void free_px(struct i915_address_space *vm,
60682adf901SChris Wilson 	     struct i915_page_table *pt, int lvl);
60782adf901SChris Wilson #define free_pt(vm, px) free_px(vm, px, 0)
60882adf901SChris Wilson #define free_pd(vm, px) free_px(vm, px_pt(px), 1)
6092c86e55dSMatthew Auld 
6102c86e55dSMatthew Auld void
6112c86e55dSMatthew Auld __set_pd_entry(struct i915_page_directory * const pd,
6122c86e55dSMatthew Auld 	       const unsigned short idx,
61389351925SChris Wilson 	       struct i915_page_table *pt,
6142c86e55dSMatthew Auld 	       u64 (*encode)(const dma_addr_t, const enum i915_cache_level));
6152c86e55dSMatthew Auld 
6162c86e55dSMatthew Auld #define set_pd_entry(pd, idx, to) \
61789351925SChris Wilson 	__set_pd_entry((pd), (idx), px_pt(to), gen8_pde_encode)
6182c86e55dSMatthew Auld 
6192c86e55dSMatthew Auld void
6202c86e55dSMatthew Auld clear_pd_entry(struct i915_page_directory * const pd,
6212c86e55dSMatthew Auld 	       const unsigned short idx,
62289351925SChris Wilson 	       const struct drm_i915_gem_object * const scratch);
6232c86e55dSMatthew Auld 
6242c86e55dSMatthew Auld bool
6252c86e55dSMatthew Auld release_pd_entry(struct i915_page_directory * const pd,
6262c86e55dSMatthew Auld 		 const unsigned short idx,
6272c86e55dSMatthew Auld 		 struct i915_page_table * const pt,
62889351925SChris Wilson 		 const struct drm_i915_gem_object * const scratch);
6292c86e55dSMatthew Auld void gen6_ggtt_invalidate(struct i915_ggtt *ggtt);
6302c86e55dSMatthew Auld 
631cd0452aaSChris Wilson void ppgtt_bind_vma(struct i915_address_space *vm,
632cd0452aaSChris Wilson 		    struct i915_vm_pt_stash *stash,
63339a2bd34SThomas Hellström 		    struct i915_vma_resource *vma_res,
63412b07256SChris Wilson 		    enum i915_cache_level cache_level,
63512b07256SChris Wilson 		    u32 flags);
63612b07256SChris Wilson void ppgtt_unbind_vma(struct i915_address_space *vm,
63739a2bd34SThomas Hellström 		      struct i915_vma_resource *vma_res);
63812b07256SChris Wilson 
6392c86e55dSMatthew Auld void gtt_write_workarounds(struct intel_gt *gt);
6402c86e55dSMatthew Auld 
6412c86e55dSMatthew Auld void setup_private_pat(struct intel_uncore *uncore);
6422c86e55dSMatthew Auld 
643cd0452aaSChris Wilson int i915_vm_alloc_pt_stash(struct i915_address_space *vm,
644cd0452aaSChris Wilson 			   struct i915_vm_pt_stash *stash,
645cd0452aaSChris Wilson 			   u64 size);
646529b9ec8SMatthew Auld int i915_vm_map_pt_stash(struct i915_address_space *vm,
64789351925SChris Wilson 			 struct i915_vm_pt_stash *stash);
648cd0452aaSChris Wilson void i915_vm_free_pt_stash(struct i915_address_space *vm,
649cd0452aaSChris Wilson 			   struct i915_vm_pt_stash *stash);
650cd0452aaSChris Wilson 
651a4d86249SChris Wilson struct i915_vma *
652a4d86249SChris Wilson __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size);
653a4d86249SChris Wilson 
6542a665968SMaarten Lankhorst struct i915_vma *
6552a665968SMaarten Lankhorst __vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size);
6562a665968SMaarten Lankhorst 
6572c86e55dSMatthew Auld static inline struct sgt_dma {
6582c86e55dSMatthew Auld 	struct scatterlist *sg;
6592c86e55dSMatthew Auld 	dma_addr_t dma, max;
66039a2bd34SThomas Hellström } sgt_dma(struct i915_vma_resource *vma_res) {
66139a2bd34SThomas Hellström 	struct scatterlist *sg = vma_res->bi.pages->sgl;
6622c86e55dSMatthew Auld 	dma_addr_t addr = sg_dma_address(sg);
6632c86e55dSMatthew Auld 
6648a473dbaSTvrtko Ursulin 	return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) };
6652c86e55dSMatthew Auld }
6662c86e55dSMatthew Auld 
6672c86e55dSMatthew Auld #endif
668