1 /* SPDX-License-Identifier: MIT*/
2 /*
3  * Copyright © 2003-2018 Intel Corporation
4  */
5 
6 #ifndef _INTEL_GPU_COMMANDS_H_
7 #define _INTEL_GPU_COMMANDS_H_
8 
9 #include <linux/bitops.h>
10 
11 /*
12  * Target address alignments required for GPU access e.g.
13  * MI_STORE_DWORD_IMM.
14  */
15 #define alignof_dword 4
16 #define alignof_qword 8
17 
18 /*
19  * Instruction field definitions used by the command parser
20  */
21 #define INSTR_CLIENT_SHIFT      29
22 #define   INSTR_MI_CLIENT       0x0
23 #define   INSTR_BC_CLIENT       0x2
24 #define   INSTR_GSC_CLIENT      0x2 /* MTL+ */
25 #define   INSTR_RC_CLIENT       0x3
26 #define INSTR_SUBCLIENT_SHIFT   27
27 #define INSTR_SUBCLIENT_MASK    0x18000000
28 #define   INSTR_MEDIA_SUBCLIENT 0x2
29 #define INSTR_26_TO_24_MASK	0x7000000
30 #define   INSTR_26_TO_24_SHIFT	24
31 
32 #define __INSTR(client) ((client) << INSTR_CLIENT_SHIFT)
33 
34 /*
35  * Memory interface instructions used by the kernel
36  */
37 #define MI_INSTR(opcode, flags) \
38 	(__INSTR(INSTR_MI_CLIENT) | (opcode) << 23 | (flags))
39 /* Many MI commands use bit 22 of the header dword for GGTT vs PPGTT */
40 #define  MI_GLOBAL_GTT    (1<<22)
41 
42 #define MI_NOOP			MI_INSTR(0, 0)
43 #define MI_SET_PREDICATE	MI_INSTR(0x01, 0)
44 #define   MI_SET_PREDICATE_DISABLE	(0 << 0)
45 #define MI_USER_INTERRUPT	MI_INSTR(0x02, 0)
46 #define MI_WAIT_FOR_EVENT       MI_INSTR(0x03, 0)
47 #define   MI_WAIT_FOR_OVERLAY_FLIP	(1<<16)
48 #define   MI_WAIT_FOR_PLANE_B_FLIP      (1<<6)
49 #define   MI_WAIT_FOR_PLANE_A_FLIP      (1<<2)
50 #define   MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
51 #define MI_FLUSH		MI_INSTR(0x04, 0)
52 #define   MI_READ_FLUSH		(1 << 0)
53 #define   MI_EXE_FLUSH		(1 << 1)
54 #define   MI_NO_WRITE_FLUSH	(1 << 2)
55 #define   MI_SCENE_COUNT	(1 << 3) /* just increment scene count */
56 #define   MI_END_SCENE		(1 << 4) /* flush binner and incr scene count */
57 #define   MI_INVALIDATE_ISP	(1 << 5) /* invalidate indirect state pointers */
58 #define MI_REPORT_HEAD		MI_INSTR(0x07, 0)
59 #define MI_ARB_ON_OFF		MI_INSTR(0x08, 0)
60 #define   MI_ARB_ENABLE			(1<<0)
61 #define   MI_ARB_DISABLE		(0<<0)
62 #define MI_BATCH_BUFFER_END	MI_INSTR(0x0a, 0)
63 #define MI_SUSPEND_FLUSH	MI_INSTR(0x0b, 0)
64 #define   MI_SUSPEND_FLUSH_EN	(1<<0)
65 #define MI_SET_APPID		MI_INSTR(0x0e, 0)
66 #define   MI_SET_APPID_SESSION_ID(x)	((x) << 0)
67 #define MI_OVERLAY_FLIP		MI_INSTR(0x11, 0)
68 #define   MI_OVERLAY_CONTINUE	(0x0<<21)
69 #define   MI_OVERLAY_ON		(0x1<<21)
70 #define   MI_OVERLAY_OFF	(0x2<<21)
71 #define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
72 #define MI_DISPLAY_FLIP		MI_INSTR(0x14, 2)
73 #define MI_DISPLAY_FLIP_I915	MI_INSTR(0x14, 1)
74 #define   MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
75 /* IVB has funny definitions for which plane to flip. */
76 #define   MI_DISPLAY_FLIP_IVB_PLANE_A  (0 << 19)
77 #define   MI_DISPLAY_FLIP_IVB_PLANE_B  (1 << 19)
78 #define   MI_DISPLAY_FLIP_IVB_SPRITE_A (2 << 19)
79 #define   MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
80 #define   MI_DISPLAY_FLIP_IVB_PLANE_C  (4 << 19)
81 #define   MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
82 /* SKL ones */
83 #define   MI_DISPLAY_FLIP_SKL_PLANE_1_A	(0 << 8)
84 #define   MI_DISPLAY_FLIP_SKL_PLANE_1_B	(1 << 8)
85 #define   MI_DISPLAY_FLIP_SKL_PLANE_1_C	(2 << 8)
86 #define   MI_DISPLAY_FLIP_SKL_PLANE_2_A	(4 << 8)
87 #define   MI_DISPLAY_FLIP_SKL_PLANE_2_B	(5 << 8)
88 #define   MI_DISPLAY_FLIP_SKL_PLANE_2_C	(6 << 8)
89 #define   MI_DISPLAY_FLIP_SKL_PLANE_3_A	(7 << 8)
90 #define   MI_DISPLAY_FLIP_SKL_PLANE_3_B	(8 << 8)
91 #define   MI_DISPLAY_FLIP_SKL_PLANE_3_C	(9 << 8)
92 #define MI_SEMAPHORE_MBOX	MI_INSTR(0x16, 1) /* gen6, gen7 */
93 #define   MI_SEMAPHORE_GLOBAL_GTT    (1<<22)
94 #define   MI_SEMAPHORE_UPDATE	    (1<<21)
95 #define   MI_SEMAPHORE_COMPARE	    (1<<20)
96 #define   MI_SEMAPHORE_REGISTER	    (1<<18)
97 #define   MI_SEMAPHORE_SYNC_VR	    (0<<16) /* RCS  wait for VCS  (RVSYNC) */
98 #define   MI_SEMAPHORE_SYNC_VER	    (1<<16) /* RCS  wait for VECS (RVESYNC) */
99 #define   MI_SEMAPHORE_SYNC_BR	    (2<<16) /* RCS  wait for BCS  (RBSYNC) */
100 #define   MI_SEMAPHORE_SYNC_BV	    (0<<16) /* VCS  wait for BCS  (VBSYNC) */
101 #define   MI_SEMAPHORE_SYNC_VEV	    (1<<16) /* VCS  wait for VECS (VVESYNC) */
102 #define   MI_SEMAPHORE_SYNC_RV	    (2<<16) /* VCS  wait for RCS  (VRSYNC) */
103 #define   MI_SEMAPHORE_SYNC_RB	    (0<<16) /* BCS  wait for RCS  (BRSYNC) */
104 #define   MI_SEMAPHORE_SYNC_VEB	    (1<<16) /* BCS  wait for VECS (BVESYNC) */
105 #define   MI_SEMAPHORE_SYNC_VB	    (2<<16) /* BCS  wait for VCS  (BVSYNC) */
106 #define   MI_SEMAPHORE_SYNC_BVE	    (0<<16) /* VECS wait for BCS  (VEBSYNC) */
107 #define   MI_SEMAPHORE_SYNC_VVE	    (1<<16) /* VECS wait for VCS  (VEVSYNC) */
108 #define   MI_SEMAPHORE_SYNC_RVE	    (2<<16) /* VECS wait for RCS  (VERSYNC) */
109 #define   MI_SEMAPHORE_SYNC_INVALID (3<<16)
110 #define   MI_SEMAPHORE_SYNC_MASK    (3<<16)
111 #define MI_SET_CONTEXT		MI_INSTR(0x18, 0)
112 #define   MI_MM_SPACE_GTT		(1<<8)
113 #define   MI_MM_SPACE_PHYSICAL		(0<<8)
114 #define   MI_SAVE_EXT_STATE_EN		(1<<3)
115 #define   MI_RESTORE_EXT_STATE_EN	(1<<2)
116 #define   MI_FORCE_RESTORE		(1<<1)
117 #define   MI_RESTORE_INHIBIT		(1<<0)
118 #define   HSW_MI_RS_SAVE_STATE_EN       (1<<3)
119 #define   HSW_MI_RS_RESTORE_STATE_EN    (1<<2)
120 #define MI_SEMAPHORE_SIGNAL	MI_INSTR(0x1b, 0) /* GEN8+ */
121 #define   MI_SEMAPHORE_TARGET(engine)	((engine)<<15)
122 #define MI_SEMAPHORE_WAIT	MI_INSTR(0x1c, 2) /* GEN8+ */
123 #define MI_SEMAPHORE_WAIT_TOKEN	MI_INSTR(0x1c, 3) /* GEN12+ */
124 #define   MI_SEMAPHORE_REGISTER_POLL	(1 << 16)
125 #define   MI_SEMAPHORE_POLL		(1 << 15)
126 #define   MI_SEMAPHORE_SAD_GT_SDD	(0 << 12)
127 #define   MI_SEMAPHORE_SAD_GTE_SDD	(1 << 12)
128 #define   MI_SEMAPHORE_SAD_LT_SDD	(2 << 12)
129 #define   MI_SEMAPHORE_SAD_LTE_SDD	(3 << 12)
130 #define   MI_SEMAPHORE_SAD_EQ_SDD	(4 << 12)
131 #define   MI_SEMAPHORE_SAD_NEQ_SDD	(5 << 12)
132 #define   MI_SEMAPHORE_TOKEN_MASK	REG_GENMASK(9, 5)
133 #define   MI_SEMAPHORE_TOKEN_SHIFT	5
134 #define MI_STORE_DATA_IMM	MI_INSTR(0x20, 0)
135 #define MI_STORE_DWORD_IMM	MI_INSTR(0x20, 1)
136 #define MI_STORE_DWORD_IMM_GEN4	MI_INSTR(0x20, 2)
137 #define MI_STORE_QWORD_IMM_GEN8 (MI_INSTR(0x20, 3) | REG_BIT(21))
138 #define   MI_MEM_VIRTUAL	(1 << 22) /* 945,g33,965 */
139 #define   MI_USE_GGTT		(1 << 22) /* g4x+ */
140 #define MI_STORE_DWORD_INDEX	MI_INSTR(0x21, 1)
141 #define MI_ATOMIC		MI_INSTR(0x2f, 1)
142 #define MI_ATOMIC_INLINE	(MI_INSTR(0x2f, 9) | MI_ATOMIC_INLINE_DATA)
143 #define   MI_ATOMIC_GLOBAL_GTT		(1 << 22)
144 #define   MI_ATOMIC_INLINE_DATA		(1 << 18)
145 #define   MI_ATOMIC_CS_STALL		(1 << 17)
146 #define	  MI_ATOMIC_MOVE		(0x4 << 8)
147 
148 /*
149  * Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM:
150  * - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw
151  *   simply ignores the register load under certain conditions.
152  * - One can actually load arbitrary many arbitrary registers: Simply issue x
153  *   address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
154  */
155 #define MI_LOAD_REGISTER_IMM(x)	MI_INSTR(0x22, 2*(x)-1)
156 /* Gen11+. addr = base + (ctx_restore ? offset & GENMASK(12,2) : offset) */
157 #define   MI_LRI_LRM_CS_MMIO		REG_BIT(19)
158 #define   MI_LRI_MMIO_REMAP_EN		REG_BIT(17)
159 #define   MI_LRI_FORCE_POSTED		(1<<12)
160 #define MI_LOAD_REGISTER_IMM_MAX_REGS (126)
161 #define MI_STORE_REGISTER_MEM        MI_INSTR(0x24, 1)
162 #define MI_STORE_REGISTER_MEM_GEN8   MI_INSTR(0x24, 2)
163 #define   MI_SRM_LRM_GLOBAL_GTT		(1<<22)
164 #define MI_FLUSH_DW		MI_INSTR(0x26, 1) /* for GEN6 */
165 #define   MI_FLUSH_DW_PROTECTED_MEM_EN	(1 << 22)
166 #define   MI_FLUSH_DW_STORE_INDEX	(1<<21)
167 #define   MI_INVALIDATE_TLB		(1<<18)
168 #define   MI_FLUSH_DW_CCS		(1<<16)
169 #define   MI_FLUSH_DW_OP_STOREDW	(1<<14)
170 #define   MI_FLUSH_DW_OP_MASK		(3<<14)
171 #define   MI_FLUSH_DW_LLC		(1<<9)
172 #define   MI_FLUSH_DW_NOTIFY		(1<<8)
173 #define   MI_INVALIDATE_BSD		(1<<7)
174 #define   MI_FLUSH_DW_USE_GTT		(1<<2)
175 #define   MI_FLUSH_DW_USE_PPGTT		(0<<2)
176 #define MI_LOAD_REGISTER_MEM	   MI_INSTR(0x29, 1)
177 #define MI_LOAD_REGISTER_MEM_GEN8  MI_INSTR(0x29, 2)
178 #define MI_LOAD_REGISTER_REG    MI_INSTR(0x2A, 1)
179 #define   MI_LRR_SOURCE_CS_MMIO		REG_BIT(18)
180 #define MI_BATCH_BUFFER		MI_INSTR(0x30, 1)
181 #define   MI_BATCH_NON_SECURE		(1)
182 /* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
183 #define   MI_BATCH_NON_SECURE_I965	(1<<8)
184 #define   MI_BATCH_PPGTT_HSW		(1<<8)
185 #define   MI_BATCH_NON_SECURE_HSW	(1<<13)
186 #define MI_BATCH_BUFFER_START	MI_INSTR(0x31, 0)
187 #define   MI_BATCH_GTT		    (2<<6) /* aliased with (1<<7) on gen4 */
188 #define MI_BATCH_BUFFER_START_GEN8	MI_INSTR(0x31, 1)
189 #define   MI_BATCH_RESOURCE_STREAMER REG_BIT(10)
190 #define   MI_BATCH_PREDICATE         REG_BIT(15) /* HSW+ on RCS only*/
191 
192 #define MI_OPCODE(x)		(((x) >> 23) & 0x3f)
193 #define IS_MI_LRI_CMD(x)	(MI_OPCODE(x) == MI_OPCODE(MI_INSTR(0x22, 0)))
194 #define MI_LRI_LEN(x)		(((x) & 0xff) + 1)
195 
196 /*
197  * 3D instructions used by the kernel
198  */
199 #define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags))
200 
201 #define GEN9_MEDIA_POOL_STATE     ((0x3 << 29) | (0x2 << 27) | (0x5 << 16) | 4)
202 #define   GEN9_MEDIA_POOL_ENABLE  (1 << 31)
203 #define GFX_OP_RASTER_RULES    ((0x3<<29)|(0x7<<24))
204 #define GFX_OP_SCISSOR         ((0x3<<29)|(0x1c<<24)|(0x10<<19))
205 #define   SC_UPDATE_SCISSOR       (0x1<<1)
206 #define   SC_ENABLE_MASK          (0x1<<0)
207 #define   SC_ENABLE               (0x1<<0)
208 #define GFX_OP_LOAD_INDIRECT   ((0x3<<29)|(0x1d<<24)|(0x7<<16))
209 #define GFX_OP_SCISSOR_INFO    ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
210 #define   SCI_YMIN_MASK      (0xffff<<16)
211 #define   SCI_XMIN_MASK      (0xffff<<0)
212 #define   SCI_YMAX_MASK      (0xffff<<16)
213 #define   SCI_XMAX_MASK      (0xffff<<0)
214 #define GFX_OP_SCISSOR_ENABLE	 ((0x3<<29)|(0x1c<<24)|(0x10<<19))
215 #define GFX_OP_SCISSOR_RECT	 ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
216 #define GFX_OP_COLOR_FACTOR      ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
217 #define GFX_OP_STIPPLE           ((0x3<<29)|(0x1d<<24)|(0x83<<16))
218 #define GFX_OP_MAP_INFO          ((0x3<<29)|(0x1d<<24)|0x4)
219 #define GFX_OP_DESTBUFFER_VARS   ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
220 #define GFX_OP_DESTBUFFER_INFO	 ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
221 #define GFX_OP_DRAWRECT_INFO     ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
222 #define GFX_OP_DRAWRECT_INFO_I965  ((0x7900<<16)|0x2)
223 
224 #define XY_CTRL_SURF_INSTR_SIZE		5
225 #define MI_FLUSH_DW_SIZE		3
226 #define XY_CTRL_SURF_COPY_BLT		((2 << 29) | (0x48 << 22) | 3)
227 #define   SRC_ACCESS_TYPE_SHIFT		21
228 #define   DST_ACCESS_TYPE_SHIFT		20
229 #define   CCS_SIZE_MASK			0x3FF
230 #define   CCS_SIZE_SHIFT		8
231 #define   XY_CTRL_SURF_MOCS_MASK	GENMASK(31, 25)
232 #define   NUM_CCS_BYTES_PER_BLOCK	256
233 #define   NUM_BYTES_PER_CCS_BYTE	256
234 #define   NUM_CCS_BLKS_PER_XFER		1024
235 #define   INDIRECT_ACCESS		0
236 #define   DIRECT_ACCESS			1
237 
238 #define COLOR_BLT_CMD			(2 << 29 | 0x40 << 22 | (5 - 2))
239 #define XY_COLOR_BLT_CMD		(2 << 29 | 0x50 << 22)
240 #define XY_FAST_COLOR_BLT_CMD		(2 << 29 | 0x44 << 22)
241 #define   XY_FAST_COLOR_BLT_DEPTH_32	(2 << 19)
242 #define   XY_FAST_COLOR_BLT_DW		16
243 #define   XY_FAST_COLOR_BLT_MOCS_MASK	GENMASK(27, 21)
244 #define   XY_FAST_COLOR_BLT_MEM_TYPE_SHIFT 31
245 
246 #define   XY_FAST_COPY_BLT_D0_SRC_TILING_MASK     REG_GENMASK(21, 20)
247 #define   XY_FAST_COPY_BLT_D0_DST_TILING_MASK     REG_GENMASK(14, 13)
248 #define   XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(mode)  \
249 	REG_FIELD_PREP(XY_FAST_COPY_BLT_D0_SRC_TILING_MASK, mode)
250 #define   XY_FAST_COPY_BLT_D0_DST_TILE_MODE(mode)  \
251 	REG_FIELD_PREP(XY_FAST_COPY_BLT_D0_DST_TILING_MASK, mode)
252 #define     LINEAR				0
253 #define     TILE_X				0x1
254 #define     XMAJOR				0x1
255 #define     YMAJOR				0x2
256 #define     TILE_64			0x3
257 #define   XY_FAST_COPY_BLT_D1_SRC_TILE4	REG_BIT(31)
258 #define   XY_FAST_COPY_BLT_D1_DST_TILE4	REG_BIT(30)
259 #define BLIT_CCTL_SRC_MOCS_MASK  REG_GENMASK(6, 0)
260 #define BLIT_CCTL_DST_MOCS_MASK  REG_GENMASK(14, 8)
261 /* Note:  MOCS value = (index << 1) */
262 #define BLIT_CCTL_SRC_MOCS(idx) \
263 	REG_FIELD_PREP(BLIT_CCTL_SRC_MOCS_MASK, (idx) << 1)
264 #define BLIT_CCTL_DST_MOCS(idx) \
265 	REG_FIELD_PREP(BLIT_CCTL_DST_MOCS_MASK, (idx) << 1)
266 
267 #define SRC_COPY_BLT_CMD		(2 << 29 | 0x43 << 22)
268 #define GEN9_XY_FAST_COPY_BLT_CMD	(2 << 29 | 0x42 << 22)
269 #define XY_SRC_COPY_BLT_CMD		(2 << 29 | 0x53 << 22)
270 #define XY_MONO_SRC_COPY_IMM_BLT	(2 << 29 | 0x71 << 22 | 5)
271 #define   BLT_WRITE_A			(2<<20)
272 #define   BLT_WRITE_RGB			(1<<20)
273 #define   BLT_WRITE_RGBA		(BLT_WRITE_RGB | BLT_WRITE_A)
274 #define   BLT_DEPTH_8			(0<<24)
275 #define   BLT_DEPTH_16_565		(1<<24)
276 #define   BLT_DEPTH_16_1555		(2<<24)
277 #define   BLT_DEPTH_32			(3<<24)
278 #define   BLT_ROP_SRC_COPY		(0xcc<<16)
279 #define   BLT_ROP_COLOR_COPY		(0xf0<<16)
280 #define XY_SRC_COPY_BLT_SRC_TILED	(1<<15) /* 965+ only */
281 #define XY_SRC_COPY_BLT_DST_TILED	(1<<11) /* 965+ only */
282 #define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
283 #define   ASYNC_FLIP                (1<<22)
284 #define   DISPLAY_PLANE_A           (0<<20)
285 #define   DISPLAY_PLANE_B           (1<<20)
286 #define GFX_OP_PIPE_CONTROL(len)	((0x3<<29)|(0x3<<27)|(0x2<<24)|((len)-2))
287 #define   PIPE_CONTROL_COMMAND_CACHE_INVALIDATE		(1<<29) /* gen11+ */
288 #define   PIPE_CONTROL_TILE_CACHE_FLUSH			(1<<28) /* gen11+ */
289 #define   PIPE_CONTROL_FLUSH_L3				(1<<27)
290 #define   PIPE_CONTROL_AMFS_FLUSH			(1<<25) /* gen12+ */
291 #define   PIPE_CONTROL_GLOBAL_GTT_IVB			(1<<24) /* gen7+ */
292 #define   PIPE_CONTROL_MMIO_WRITE			(1<<23)
293 #define   PIPE_CONTROL_STORE_DATA_INDEX			(1<<21)
294 #define   PIPE_CONTROL_CS_STALL				(1<<20)
295 #define   PIPE_CONTROL_GLOBAL_SNAPSHOT_RESET		(1<<19)
296 #define   PIPE_CONTROL_TLB_INVALIDATE			(1<<18)
297 #define   PIPE_CONTROL_PSD_SYNC				(1<<17) /* gen11+ */
298 #define   PIPE_CONTROL_MEDIA_STATE_CLEAR		(1<<16)
299 #define   PIPE_CONTROL_WRITE_TIMESTAMP			(3<<14)
300 #define   PIPE_CONTROL_QW_WRITE				(1<<14)
301 #define   PIPE_CONTROL_POST_SYNC_OP_MASK                (3<<14)
302 #define   PIPE_CONTROL_DEPTH_STALL			(1<<13)
303 #define   PIPE_CONTROL_CCS_FLUSH			(1<<13) /* MTL+ */
304 #define   PIPE_CONTROL_WRITE_FLUSH			(1<<12)
305 #define   PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH	(1<<12) /* gen6+ */
306 #define   PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE	(1<<11) /* MBZ on ILK */
307 #define   PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE		(1<<10) /* GM45+ only */
308 #define   PIPE_CONTROL_INDIRECT_STATE_DISABLE		(1<<9)
309 #define   PIPE_CONTROL0_HDC_PIPELINE_FLUSH		REG_BIT(9)  /* gen12 */
310 #define   PIPE_CONTROL_NOTIFY				(1<<8)
311 #define   PIPE_CONTROL_FLUSH_ENABLE			(1<<7) /* gen7+ */
312 #define   PIPE_CONTROL_DC_FLUSH_ENABLE			(1<<5)
313 #define   PIPE_CONTROL_VF_CACHE_INVALIDATE		(1<<4)
314 #define   PIPE_CONTROL_CONST_CACHE_INVALIDATE		(1<<3)
315 #define   PIPE_CONTROL_STATE_CACHE_INVALIDATE		(1<<2)
316 #define   PIPE_CONTROL_STALL_AT_SCOREBOARD		(1<<1)
317 #define   PIPE_CONTROL_DEPTH_CACHE_FLUSH		(1<<0)
318 #define   PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
319 
320 /*
321  * 3D-related flags that can't be set on _engines_ that lack access to the 3D
322  * pipeline (i.e., CCS engines).
323  */
324 #define PIPE_CONTROL_3D_ENGINE_FLAGS (\
325 		PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | \
326 		PIPE_CONTROL_DEPTH_CACHE_FLUSH | \
327 		PIPE_CONTROL_TILE_CACHE_FLUSH | \
328 		PIPE_CONTROL_DEPTH_STALL | \
329 		PIPE_CONTROL_STALL_AT_SCOREBOARD | \
330 		PIPE_CONTROL_PSD_SYNC | \
331 		PIPE_CONTROL_AMFS_FLUSH | \
332 		PIPE_CONTROL_VF_CACHE_INVALIDATE | \
333 		PIPE_CONTROL_GLOBAL_SNAPSHOT_RESET)
334 
335 /* 3D-related flags that can't be set on _platforms_ that lack a 3D pipeline */
336 #define PIPE_CONTROL_3D_ARCH_FLAGS ( \
337 		PIPE_CONTROL_3D_ENGINE_FLAGS | \
338 		PIPE_CONTROL_INDIRECT_STATE_DISABLE | \
339 		PIPE_CONTROL_FLUSH_ENABLE | \
340 		PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | \
341 		PIPE_CONTROL_DC_FLUSH_ENABLE)
342 
343 #define MI_MATH(x)			MI_INSTR(0x1a, (x) - 1)
344 #define MI_MATH_INSTR(opcode, op1, op2) ((opcode) << 20 | (op1) << 10 | (op2))
345 /* Opcodes for MI_MATH_INSTR */
346 #define   MI_MATH_NOOP			MI_MATH_INSTR(0x000, 0x0, 0x0)
347 #define   MI_MATH_LOAD(op1, op2)	MI_MATH_INSTR(0x080, op1, op2)
348 #define   MI_MATH_LOADINV(op1, op2)	MI_MATH_INSTR(0x480, op1, op2)
349 #define   MI_MATH_LOAD0(op1)		MI_MATH_INSTR(0x081, op1)
350 #define   MI_MATH_LOAD1(op1)		MI_MATH_INSTR(0x481, op1)
351 #define   MI_MATH_ADD			MI_MATH_INSTR(0x100, 0x0, 0x0)
352 #define   MI_MATH_SUB			MI_MATH_INSTR(0x101, 0x0, 0x0)
353 #define   MI_MATH_AND			MI_MATH_INSTR(0x102, 0x0, 0x0)
354 #define   MI_MATH_OR			MI_MATH_INSTR(0x103, 0x0, 0x0)
355 #define   MI_MATH_XOR			MI_MATH_INSTR(0x104, 0x0, 0x0)
356 #define   MI_MATH_STORE(op1, op2)	MI_MATH_INSTR(0x180, op1, op2)
357 #define   MI_MATH_STOREINV(op1, op2)	MI_MATH_INSTR(0x580, op1, op2)
358 /* Registers used as operands in MI_MATH_INSTR */
359 #define   MI_MATH_REG(x)		(x)
360 #define   MI_MATH_REG_SRCA		0x20
361 #define   MI_MATH_REG_SRCB		0x21
362 #define   MI_MATH_REG_ACCU		0x31
363 #define   MI_MATH_REG_ZF		0x32
364 #define   MI_MATH_REG_CF		0x33
365 
366 /*
367  * Media instructions used by the kernel
368  */
369 #define MEDIA_INSTR(pipe, op, sub_op, flags) \
370 	(__INSTR(INSTR_RC_CLIENT) | (pipe) << INSTR_SUBCLIENT_SHIFT | \
371 	(op) << INSTR_26_TO_24_SHIFT | (sub_op) << 16 | (flags))
372 
373 #define MFX_WAIT				MEDIA_INSTR(1, 0, 0, 0)
374 #define  MFX_WAIT_DW0_MFX_SYNC_CONTROL_FLAG	REG_BIT(8)
375 #define  MFX_WAIT_DW0_PXP_SYNC_CONTROL_FLAG	REG_BIT(9)
376 
377 #define CRYPTO_KEY_EXCHANGE			MEDIA_INSTR(2, 6, 9, 0)
378 
379 /*
380  * Commands used only by the command parser
381  */
382 #define MI_SET_PREDICATE        MI_INSTR(0x01, 0)
383 #define MI_ARB_CHECK            MI_INSTR(0x05, 0)
384 #define MI_RS_CONTROL           MI_INSTR(0x06, 0)
385 #define MI_URB_ATOMIC_ALLOC     MI_INSTR(0x09, 0)
386 #define MI_PREDICATE            MI_INSTR(0x0C, 0)
387 #define MI_RS_CONTEXT           MI_INSTR(0x0F, 0)
388 #define MI_TOPOLOGY_FILTER      MI_INSTR(0x0D, 0)
389 #define MI_LOAD_SCAN_LINES_EXCL MI_INSTR(0x13, 0)
390 #define MI_URB_CLEAR            MI_INSTR(0x19, 0)
391 #define MI_UPDATE_GTT           MI_INSTR(0x23, 0)
392 #define MI_CLFLUSH              MI_INSTR(0x27, 0)
393 #define MI_REPORT_PERF_COUNT    MI_INSTR(0x28, 0)
394 #define   MI_REPORT_PERF_COUNT_GGTT (1<<0)
395 #define MI_RS_STORE_DATA_IMM    MI_INSTR(0x2B, 0)
396 #define MI_LOAD_URB_MEM         MI_INSTR(0x2C, 0)
397 #define MI_STORE_URB_MEM        MI_INSTR(0x2D, 0)
398 #define MI_CONDITIONAL_BATCH_BUFFER_END MI_INSTR(0x36, 0)
399 #define  MI_DO_COMPARE		REG_BIT(21)
400 
401 #define STATE_BASE_ADDRESS \
402 	((0x3 << 29) | (0x0 << 27) | (0x1 << 24) | (0x1 << 16))
403 #define BASE_ADDRESS_MODIFY		REG_BIT(0)
404 #define PIPELINE_SELECT \
405 	((0x3 << 29) | (0x1 << 27) | (0x1 << 24) | (0x4 << 16))
406 #define PIPELINE_SELECT_MEDIA	       REG_BIT(0)
407 #define GFX_OP_3DSTATE_VF_STATISTICS \
408 	((0x3 << 29) | (0x1 << 27) | (0x0 << 24) | (0xB << 16))
409 #define MEDIA_VFE_STATE \
410 	((0x3 << 29) | (0x2 << 27) | (0x0 << 24) | (0x0 << 16))
411 #define  MEDIA_VFE_STATE_MMIO_ACCESS_MASK (0x18)
412 #define MEDIA_INTERFACE_DESCRIPTOR_LOAD \
413 	((0x3 << 29) | (0x2 << 27) | (0x0 << 24) | (0x2 << 16))
414 #define MEDIA_OBJECT \
415 	((0x3 << 29) | (0x2 << 27) | (0x1 << 24) | (0x0 << 16))
416 #define GPGPU_OBJECT                   ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x4<<16))
417 #define GPGPU_WALKER                   ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x5<<16))
418 #define GFX_OP_3DSTATE_DX9_CONSTANTF_VS \
419 	((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x39<<16))
420 #define GFX_OP_3DSTATE_DX9_CONSTANTF_PS \
421 	((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x3A<<16))
422 #define GFX_OP_3DSTATE_SO_DECL_LIST \
423 	((0x3<<29)|(0x3<<27)|(0x1<<24)|(0x17<<16))
424 
425 #define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_VS \
426 	((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x43<<16))
427 #define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_GS \
428 	((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x44<<16))
429 #define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_HS \
430 	((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x45<<16))
431 #define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_DS \
432 	((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x46<<16))
433 #define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS \
434 	((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x47<<16))
435 
436 #define COLOR_BLT     ((0x2<<29)|(0x40<<22))
437 #define SRC_COPY_BLT  ((0x2<<29)|(0x43<<22))
438 
439 #define GSC_INSTR(opcode, data, flags) \
440 	(__INSTR(INSTR_GSC_CLIENT) | (opcode) << 22 | (data) << 9 | (flags))
441 
442 #define GSC_FW_LOAD GSC_INSTR(1, 0, 2)
443 #define   HECI1_FW_LIMIT_VALID (1 << 31)
444 
445 #define GSC_HECI_CMD_PKT GSC_INSTR(0, 0, 6)
446 
447 /*
448  * Used to convert any address to canonical form.
449  * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
450  * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
451  * addresses to be in a canonical form:
452  * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
453  * canonical form [63:48] == [47]."
454  */
455 #define GEN8_HIGH_ADDRESS_BIT 47
gen8_canonical_addr(u64 address)456 static inline u64 gen8_canonical_addr(u64 address)
457 {
458 	return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
459 }
460 
gen8_noncanonical_addr(u64 address)461 static inline u64 gen8_noncanonical_addr(u64 address)
462 {
463 	return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0);
464 }
465 
__gen6_emit_bb_start(u32 * cs,u32 addr,unsigned int flags)466 static inline u32 *__gen6_emit_bb_start(u32 *cs, u32 addr, unsigned int flags)
467 {
468 	*cs++ = MI_BATCH_BUFFER_START | flags;
469 	*cs++ = addr;
470 
471 	return cs;
472 }
473 
474 #endif /* _INTEL_GPU_COMMANDS_H_ */
475