1 /* SPDX-License-Identifier: MIT*/ 2 /* 3 * Copyright © 2003-2018 Intel Corporation 4 */ 5 6 #ifndef _INTEL_GPU_COMMANDS_H_ 7 #define _INTEL_GPU_COMMANDS_H_ 8 9 #include <linux/bitops.h> 10 11 /* 12 * Target address alignments required for GPU access e.g. 13 * MI_STORE_DWORD_IMM. 14 */ 15 #define alignof_dword 4 16 #define alignof_qword 8 17 18 /* 19 * Instruction field definitions used by the command parser 20 */ 21 #define INSTR_CLIENT_SHIFT 29 22 #define INSTR_MI_CLIENT 0x0 23 #define INSTR_BC_CLIENT 0x2 24 #define INSTR_RC_CLIENT 0x3 25 #define INSTR_SUBCLIENT_SHIFT 27 26 #define INSTR_SUBCLIENT_MASK 0x18000000 27 #define INSTR_MEDIA_SUBCLIENT 0x2 28 #define INSTR_26_TO_24_MASK 0x7000000 29 #define INSTR_26_TO_24_SHIFT 24 30 31 #define __INSTR(client) ((client) << INSTR_CLIENT_SHIFT) 32 33 /* 34 * Memory interface instructions used by the kernel 35 */ 36 #define MI_INSTR(opcode, flags) \ 37 (__INSTR(INSTR_MI_CLIENT) | (opcode) << 23 | (flags)) 38 /* Many MI commands use bit 22 of the header dword for GGTT vs PPGTT */ 39 #define MI_GLOBAL_GTT (1<<22) 40 41 #define MI_NOOP MI_INSTR(0, 0) 42 #define MI_SET_PREDICATE MI_INSTR(0x01, 0) 43 #define MI_SET_PREDICATE_DISABLE (0 << 0) 44 #define MI_USER_INTERRUPT MI_INSTR(0x02, 0) 45 #define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0) 46 #define MI_WAIT_FOR_OVERLAY_FLIP (1<<16) 47 #define MI_WAIT_FOR_PLANE_B_FLIP (1<<6) 48 #define MI_WAIT_FOR_PLANE_A_FLIP (1<<2) 49 #define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1) 50 #define MI_FLUSH MI_INSTR(0x04, 0) 51 #define MI_READ_FLUSH (1 << 0) 52 #define MI_EXE_FLUSH (1 << 1) 53 #define MI_NO_WRITE_FLUSH (1 << 2) 54 #define MI_SCENE_COUNT (1 << 3) /* just increment scene count */ 55 #define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ 56 #define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */ 57 #define MI_REPORT_HEAD MI_INSTR(0x07, 0) 58 #define MI_ARB_ON_OFF MI_INSTR(0x08, 0) 59 #define MI_ARB_ENABLE (1<<0) 60 #define MI_ARB_DISABLE (0<<0) 61 #define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) 62 #define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0) 63 #define MI_SUSPEND_FLUSH_EN (1<<0) 64 #define MI_SET_APPID MI_INSTR(0x0e, 0) 65 #define MI_SET_APPID_SESSION_ID(x) ((x) << 0) 66 #define MI_OVERLAY_FLIP MI_INSTR(0x11, 0) 67 #define MI_OVERLAY_CONTINUE (0x0<<21) 68 #define MI_OVERLAY_ON (0x1<<21) 69 #define MI_OVERLAY_OFF (0x2<<21) 70 #define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0) 71 #define MI_DISPLAY_FLIP MI_INSTR(0x14, 2) 72 #define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1) 73 #define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20) 74 /* IVB has funny definitions for which plane to flip. */ 75 #define MI_DISPLAY_FLIP_IVB_PLANE_A (0 << 19) 76 #define MI_DISPLAY_FLIP_IVB_PLANE_B (1 << 19) 77 #define MI_DISPLAY_FLIP_IVB_SPRITE_A (2 << 19) 78 #define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19) 79 #define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19) 80 #define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19) 81 /* SKL ones */ 82 #define MI_DISPLAY_FLIP_SKL_PLANE_1_A (0 << 8) 83 #define MI_DISPLAY_FLIP_SKL_PLANE_1_B (1 << 8) 84 #define MI_DISPLAY_FLIP_SKL_PLANE_1_C (2 << 8) 85 #define MI_DISPLAY_FLIP_SKL_PLANE_2_A (4 << 8) 86 #define MI_DISPLAY_FLIP_SKL_PLANE_2_B (5 << 8) 87 #define MI_DISPLAY_FLIP_SKL_PLANE_2_C (6 << 8) 88 #define MI_DISPLAY_FLIP_SKL_PLANE_3_A (7 << 8) 89 #define MI_DISPLAY_FLIP_SKL_PLANE_3_B (8 << 8) 90 #define MI_DISPLAY_FLIP_SKL_PLANE_3_C (9 << 8) 91 #define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6, gen7 */ 92 #define MI_SEMAPHORE_GLOBAL_GTT (1<<22) 93 #define MI_SEMAPHORE_UPDATE (1<<21) 94 #define MI_SEMAPHORE_COMPARE (1<<20) 95 #define MI_SEMAPHORE_REGISTER (1<<18) 96 #define MI_SEMAPHORE_SYNC_VR (0<<16) /* RCS wait for VCS (RVSYNC) */ 97 #define MI_SEMAPHORE_SYNC_VER (1<<16) /* RCS wait for VECS (RVESYNC) */ 98 #define MI_SEMAPHORE_SYNC_BR (2<<16) /* RCS wait for BCS (RBSYNC) */ 99 #define MI_SEMAPHORE_SYNC_BV (0<<16) /* VCS wait for BCS (VBSYNC) */ 100 #define MI_SEMAPHORE_SYNC_VEV (1<<16) /* VCS wait for VECS (VVESYNC) */ 101 #define MI_SEMAPHORE_SYNC_RV (2<<16) /* VCS wait for RCS (VRSYNC) */ 102 #define MI_SEMAPHORE_SYNC_RB (0<<16) /* BCS wait for RCS (BRSYNC) */ 103 #define MI_SEMAPHORE_SYNC_VEB (1<<16) /* BCS wait for VECS (BVESYNC) */ 104 #define MI_SEMAPHORE_SYNC_VB (2<<16) /* BCS wait for VCS (BVSYNC) */ 105 #define MI_SEMAPHORE_SYNC_BVE (0<<16) /* VECS wait for BCS (VEBSYNC) */ 106 #define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */ 107 #define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */ 108 #define MI_SEMAPHORE_SYNC_INVALID (3<<16) 109 #define MI_SEMAPHORE_SYNC_MASK (3<<16) 110 #define MI_SET_CONTEXT MI_INSTR(0x18, 0) 111 #define MI_MM_SPACE_GTT (1<<8) 112 #define MI_MM_SPACE_PHYSICAL (0<<8) 113 #define MI_SAVE_EXT_STATE_EN (1<<3) 114 #define MI_RESTORE_EXT_STATE_EN (1<<2) 115 #define MI_FORCE_RESTORE (1<<1) 116 #define MI_RESTORE_INHIBIT (1<<0) 117 #define HSW_MI_RS_SAVE_STATE_EN (1<<3) 118 #define HSW_MI_RS_RESTORE_STATE_EN (1<<2) 119 #define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */ 120 #define MI_SEMAPHORE_TARGET(engine) ((engine)<<15) 121 #define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */ 122 #define MI_SEMAPHORE_WAIT_TOKEN MI_INSTR(0x1c, 3) /* GEN12+ */ 123 #define MI_SEMAPHORE_POLL (1 << 15) 124 #define MI_SEMAPHORE_SAD_GT_SDD (0 << 12) 125 #define MI_SEMAPHORE_SAD_GTE_SDD (1 << 12) 126 #define MI_SEMAPHORE_SAD_LT_SDD (2 << 12) 127 #define MI_SEMAPHORE_SAD_LTE_SDD (3 << 12) 128 #define MI_SEMAPHORE_SAD_EQ_SDD (4 << 12) 129 #define MI_SEMAPHORE_SAD_NEQ_SDD (5 << 12) 130 #define MI_SEMAPHORE_TOKEN_MASK REG_GENMASK(9, 5) 131 #define MI_SEMAPHORE_TOKEN_SHIFT 5 132 #define MI_STORE_DATA_IMM MI_INSTR(0x20, 0) 133 #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) 134 #define MI_STORE_DWORD_IMM_GEN4 MI_INSTR(0x20, 2) 135 #define MI_STORE_QWORD_IMM_GEN8 (MI_INSTR(0x20, 3) | REG_BIT(21)) 136 #define MI_MEM_VIRTUAL (1 << 22) /* 945,g33,965 */ 137 #define MI_USE_GGTT (1 << 22) /* g4x+ */ 138 #define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) 139 #define MI_ATOMIC MI_INSTR(0x2f, 1) 140 #define MI_ATOMIC_INLINE (MI_INSTR(0x2f, 9) | MI_ATOMIC_INLINE_DATA) 141 #define MI_ATOMIC_GLOBAL_GTT (1 << 22) 142 #define MI_ATOMIC_INLINE_DATA (1 << 18) 143 #define MI_ATOMIC_CS_STALL (1 << 17) 144 #define MI_ATOMIC_MOVE (0x4 << 8) 145 146 /* 147 * Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM: 148 * - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw 149 * simply ignores the register load under certain conditions. 150 * - One can actually load arbitrary many arbitrary registers: Simply issue x 151 * address/value pairs. Don't overdue it, though, x <= 2^4 must hold! 152 */ 153 #define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1) 154 /* Gen11+. addr = base + (ctx_restore ? offset & GENMASK(12,2) : offset) */ 155 #define MI_LRI_LRM_CS_MMIO REG_BIT(19) 156 #define MI_LRI_MMIO_REMAP_EN REG_BIT(17) 157 #define MI_LRI_FORCE_POSTED (1<<12) 158 #define MI_LOAD_REGISTER_IMM_MAX_REGS (126) 159 #define MI_STORE_REGISTER_MEM MI_INSTR(0x24, 1) 160 #define MI_STORE_REGISTER_MEM_GEN8 MI_INSTR(0x24, 2) 161 #define MI_SRM_LRM_GLOBAL_GTT (1<<22) 162 #define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ 163 #define MI_FLUSH_DW_PROTECTED_MEM_EN (1 << 22) 164 #define MI_FLUSH_DW_STORE_INDEX (1<<21) 165 #define MI_INVALIDATE_TLB (1<<18) 166 #define MI_FLUSH_DW_CCS (1<<16) 167 #define MI_FLUSH_DW_OP_STOREDW (1<<14) 168 #define MI_FLUSH_DW_OP_MASK (3<<14) 169 #define MI_FLUSH_DW_LLC (1<<9) 170 #define MI_FLUSH_DW_NOTIFY (1<<8) 171 #define MI_INVALIDATE_BSD (1<<7) 172 #define MI_FLUSH_DW_USE_GTT (1<<2) 173 #define MI_FLUSH_DW_USE_PPGTT (0<<2) 174 #define MI_LOAD_REGISTER_MEM MI_INSTR(0x29, 1) 175 #define MI_LOAD_REGISTER_MEM_GEN8 MI_INSTR(0x29, 2) 176 #define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 1) 177 #define MI_LRR_SOURCE_CS_MMIO REG_BIT(18) 178 #define MI_BATCH_BUFFER MI_INSTR(0x30, 1) 179 #define MI_BATCH_NON_SECURE (1) 180 /* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */ 181 #define MI_BATCH_NON_SECURE_I965 (1<<8) 182 #define MI_BATCH_PPGTT_HSW (1<<8) 183 #define MI_BATCH_NON_SECURE_HSW (1<<13) 184 #define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) 185 #define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */ 186 #define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1) 187 #define MI_BATCH_RESOURCE_STREAMER REG_BIT(10) 188 #define MI_BATCH_PREDICATE REG_BIT(15) /* HSW+ on RCS only*/ 189 190 /* 191 * 3D instructions used by the kernel 192 */ 193 #define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags)) 194 195 #define GEN9_MEDIA_POOL_STATE ((0x3 << 29) | (0x2 << 27) | (0x5 << 16) | 4) 196 #define GEN9_MEDIA_POOL_ENABLE (1 << 31) 197 #define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24)) 198 #define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19)) 199 #define SC_UPDATE_SCISSOR (0x1<<1) 200 #define SC_ENABLE_MASK (0x1<<0) 201 #define SC_ENABLE (0x1<<0) 202 #define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16)) 203 #define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1)) 204 #define SCI_YMIN_MASK (0xffff<<16) 205 #define SCI_XMIN_MASK (0xffff<<0) 206 #define SCI_YMAX_MASK (0xffff<<16) 207 #define SCI_XMAX_MASK (0xffff<<0) 208 #define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19)) 209 #define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1) 210 #define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0) 211 #define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16)) 212 #define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4) 213 #define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0) 214 #define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) 215 #define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) 216 #define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2) 217 218 #define XY_CTRL_SURF_INSTR_SIZE 5 219 #define MI_FLUSH_DW_SIZE 3 220 #define XY_CTRL_SURF_COPY_BLT ((2 << 29) | (0x48 << 22) | 3) 221 #define SRC_ACCESS_TYPE_SHIFT 21 222 #define DST_ACCESS_TYPE_SHIFT 20 223 #define CCS_SIZE_MASK 0x3FF 224 #define CCS_SIZE_SHIFT 8 225 #define XY_CTRL_SURF_MOCS_MASK GENMASK(31, 25) 226 #define NUM_CCS_BYTES_PER_BLOCK 256 227 #define NUM_BYTES_PER_CCS_BYTE 256 228 #define NUM_CCS_BLKS_PER_XFER 1024 229 #define INDIRECT_ACCESS 0 230 #define DIRECT_ACCESS 1 231 232 #define COLOR_BLT_CMD (2 << 29 | 0x40 << 22 | (5 - 2)) 233 #define XY_COLOR_BLT_CMD (2 << 29 | 0x50 << 22) 234 #define XY_FAST_COLOR_BLT_CMD (2 << 29 | 0x44 << 22) 235 #define XY_FAST_COLOR_BLT_DEPTH_32 (2 << 19) 236 #define XY_FAST_COLOR_BLT_DW 16 237 #define XY_FAST_COLOR_BLT_MOCS_MASK GENMASK(27, 21) 238 #define XY_FAST_COLOR_BLT_MEM_TYPE_SHIFT 31 239 240 #define XY_FAST_COPY_BLT_D0_SRC_TILING_MASK REG_GENMASK(21, 20) 241 #define XY_FAST_COPY_BLT_D0_DST_TILING_MASK REG_GENMASK(14, 13) 242 #define XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(mode) \ 243 REG_FIELD_PREP(XY_FAST_COPY_BLT_D0_SRC_TILING_MASK, mode) 244 #define XY_FAST_COPY_BLT_D0_DST_TILE_MODE(mode) \ 245 REG_FIELD_PREP(XY_FAST_COPY_BLT_D0_DST_TILING_MASK, mode) 246 #define LINEAR 0 247 #define TILE_X 0x1 248 #define XMAJOR 0x1 249 #define YMAJOR 0x2 250 #define TILE_64 0x3 251 #define XY_FAST_COPY_BLT_D1_SRC_TILE4 REG_BIT(31) 252 #define XY_FAST_COPY_BLT_D1_DST_TILE4 REG_BIT(30) 253 #define BLIT_CCTL_SRC_MOCS_MASK REG_GENMASK(6, 0) 254 #define BLIT_CCTL_DST_MOCS_MASK REG_GENMASK(14, 8) 255 /* Note: MOCS value = (index << 1) */ 256 #define BLIT_CCTL_SRC_MOCS(idx) \ 257 REG_FIELD_PREP(BLIT_CCTL_SRC_MOCS_MASK, (idx) << 1) 258 #define BLIT_CCTL_DST_MOCS(idx) \ 259 REG_FIELD_PREP(BLIT_CCTL_DST_MOCS_MASK, (idx) << 1) 260 261 #define SRC_COPY_BLT_CMD (2 << 29 | 0x43 << 22) 262 #define GEN9_XY_FAST_COPY_BLT_CMD (2 << 29 | 0x42 << 22) 263 #define XY_SRC_COPY_BLT_CMD (2 << 29 | 0x53 << 22) 264 #define XY_MONO_SRC_COPY_IMM_BLT (2 << 29 | 0x71 << 22 | 5) 265 #define BLT_WRITE_A (2<<20) 266 #define BLT_WRITE_RGB (1<<20) 267 #define BLT_WRITE_RGBA (BLT_WRITE_RGB | BLT_WRITE_A) 268 #define BLT_DEPTH_8 (0<<24) 269 #define BLT_DEPTH_16_565 (1<<24) 270 #define BLT_DEPTH_16_1555 (2<<24) 271 #define BLT_DEPTH_32 (3<<24) 272 #define BLT_ROP_SRC_COPY (0xcc<<16) 273 #define BLT_ROP_COLOR_COPY (0xf0<<16) 274 #define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */ 275 #define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */ 276 #define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2) 277 #define ASYNC_FLIP (1<<22) 278 #define DISPLAY_PLANE_A (0<<20) 279 #define DISPLAY_PLANE_B (1<<20) 280 #define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|((len)-2)) 281 #define PIPE_CONTROL_COMMAND_CACHE_INVALIDATE (1<<29) /* gen11+ */ 282 #define PIPE_CONTROL_TILE_CACHE_FLUSH (1<<28) /* gen11+ */ 283 #define PIPE_CONTROL_FLUSH_L3 (1<<27) 284 #define PIPE_CONTROL_AMFS_FLUSH (1<<25) /* gen12+ */ 285 #define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */ 286 #define PIPE_CONTROL_MMIO_WRITE (1<<23) 287 #define PIPE_CONTROL_STORE_DATA_INDEX (1<<21) 288 #define PIPE_CONTROL_CS_STALL (1<<20) 289 #define PIPE_CONTROL_GLOBAL_SNAPSHOT_RESET (1<<19) 290 #define PIPE_CONTROL_TLB_INVALIDATE (1<<18) 291 #define PIPE_CONTROL_PSD_SYNC (1<<17) /* gen11+ */ 292 #define PIPE_CONTROL_MEDIA_STATE_CLEAR (1<<16) 293 #define PIPE_CONTROL_WRITE_TIMESTAMP (3<<14) 294 #define PIPE_CONTROL_QW_WRITE (1<<14) 295 #define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14) 296 #define PIPE_CONTROL_DEPTH_STALL (1<<13) 297 #define PIPE_CONTROL_WRITE_FLUSH (1<<12) 298 #define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */ 299 #define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11) /* MBZ on ILK */ 300 #define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */ 301 #define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9) 302 #define PIPE_CONTROL0_HDC_PIPELINE_FLUSH REG_BIT(9) /* gen12 */ 303 #define PIPE_CONTROL_NOTIFY (1<<8) 304 #define PIPE_CONTROL_FLUSH_ENABLE (1<<7) /* gen7+ */ 305 #define PIPE_CONTROL_DC_FLUSH_ENABLE (1<<5) 306 #define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4) 307 #define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3) 308 #define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2) 309 #define PIPE_CONTROL_STALL_AT_SCOREBOARD (1<<1) 310 #define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0) 311 #define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */ 312 313 /* 314 * 3D-related flags that can't be set on _engines_ that lack access to the 3D 315 * pipeline (i.e., CCS engines). 316 */ 317 #define PIPE_CONTROL_3D_ENGINE_FLAGS (\ 318 PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | \ 319 PIPE_CONTROL_DEPTH_CACHE_FLUSH | \ 320 PIPE_CONTROL_TILE_CACHE_FLUSH | \ 321 PIPE_CONTROL_DEPTH_STALL | \ 322 PIPE_CONTROL_STALL_AT_SCOREBOARD | \ 323 PIPE_CONTROL_PSD_SYNC | \ 324 PIPE_CONTROL_AMFS_FLUSH | \ 325 PIPE_CONTROL_VF_CACHE_INVALIDATE | \ 326 PIPE_CONTROL_GLOBAL_SNAPSHOT_RESET) 327 328 /* 3D-related flags that can't be set on _platforms_ that lack a 3D pipeline */ 329 #define PIPE_CONTROL_3D_ARCH_FLAGS ( \ 330 PIPE_CONTROL_3D_ENGINE_FLAGS | \ 331 PIPE_CONTROL_INDIRECT_STATE_DISABLE | \ 332 PIPE_CONTROL_FLUSH_ENABLE | \ 333 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | \ 334 PIPE_CONTROL_DC_FLUSH_ENABLE) 335 336 #define MI_MATH(x) MI_INSTR(0x1a, (x) - 1) 337 #define MI_MATH_INSTR(opcode, op1, op2) ((opcode) << 20 | (op1) << 10 | (op2)) 338 /* Opcodes for MI_MATH_INSTR */ 339 #define MI_MATH_NOOP MI_MATH_INSTR(0x000, 0x0, 0x0) 340 #define MI_MATH_LOAD(op1, op2) MI_MATH_INSTR(0x080, op1, op2) 341 #define MI_MATH_LOADINV(op1, op2) MI_MATH_INSTR(0x480, op1, op2) 342 #define MI_MATH_LOAD0(op1) MI_MATH_INSTR(0x081, op1) 343 #define MI_MATH_LOAD1(op1) MI_MATH_INSTR(0x481, op1) 344 #define MI_MATH_ADD MI_MATH_INSTR(0x100, 0x0, 0x0) 345 #define MI_MATH_SUB MI_MATH_INSTR(0x101, 0x0, 0x0) 346 #define MI_MATH_AND MI_MATH_INSTR(0x102, 0x0, 0x0) 347 #define MI_MATH_OR MI_MATH_INSTR(0x103, 0x0, 0x0) 348 #define MI_MATH_XOR MI_MATH_INSTR(0x104, 0x0, 0x0) 349 #define MI_MATH_STORE(op1, op2) MI_MATH_INSTR(0x180, op1, op2) 350 #define MI_MATH_STOREINV(op1, op2) MI_MATH_INSTR(0x580, op1, op2) 351 /* Registers used as operands in MI_MATH_INSTR */ 352 #define MI_MATH_REG(x) (x) 353 #define MI_MATH_REG_SRCA 0x20 354 #define MI_MATH_REG_SRCB 0x21 355 #define MI_MATH_REG_ACCU 0x31 356 #define MI_MATH_REG_ZF 0x32 357 #define MI_MATH_REG_CF 0x33 358 359 /* 360 * Media instructions used by the kernel 361 */ 362 #define MEDIA_INSTR(pipe, op, sub_op, flags) \ 363 (__INSTR(INSTR_RC_CLIENT) | (pipe) << INSTR_SUBCLIENT_SHIFT | \ 364 (op) << INSTR_26_TO_24_SHIFT | (sub_op) << 16 | (flags)) 365 366 #define MFX_WAIT MEDIA_INSTR(1, 0, 0, 0) 367 #define MFX_WAIT_DW0_MFX_SYNC_CONTROL_FLAG REG_BIT(8) 368 #define MFX_WAIT_DW0_PXP_SYNC_CONTROL_FLAG REG_BIT(9) 369 370 #define CRYPTO_KEY_EXCHANGE MEDIA_INSTR(2, 6, 9, 0) 371 372 /* 373 * Commands used only by the command parser 374 */ 375 #define MI_SET_PREDICATE MI_INSTR(0x01, 0) 376 #define MI_ARB_CHECK MI_INSTR(0x05, 0) 377 #define MI_RS_CONTROL MI_INSTR(0x06, 0) 378 #define MI_URB_ATOMIC_ALLOC MI_INSTR(0x09, 0) 379 #define MI_PREDICATE MI_INSTR(0x0C, 0) 380 #define MI_RS_CONTEXT MI_INSTR(0x0F, 0) 381 #define MI_TOPOLOGY_FILTER MI_INSTR(0x0D, 0) 382 #define MI_LOAD_SCAN_LINES_EXCL MI_INSTR(0x13, 0) 383 #define MI_URB_CLEAR MI_INSTR(0x19, 0) 384 #define MI_UPDATE_GTT MI_INSTR(0x23, 0) 385 #define MI_CLFLUSH MI_INSTR(0x27, 0) 386 #define MI_REPORT_PERF_COUNT MI_INSTR(0x28, 0) 387 #define MI_REPORT_PERF_COUNT_GGTT (1<<0) 388 #define MI_RS_STORE_DATA_IMM MI_INSTR(0x2B, 0) 389 #define MI_LOAD_URB_MEM MI_INSTR(0x2C, 0) 390 #define MI_STORE_URB_MEM MI_INSTR(0x2D, 0) 391 #define MI_CONDITIONAL_BATCH_BUFFER_END MI_INSTR(0x36, 0) 392 393 #define STATE_BASE_ADDRESS \ 394 ((0x3 << 29) | (0x0 << 27) | (0x1 << 24) | (0x1 << 16)) 395 #define BASE_ADDRESS_MODIFY REG_BIT(0) 396 #define PIPELINE_SELECT \ 397 ((0x3 << 29) | (0x1 << 27) | (0x1 << 24) | (0x4 << 16)) 398 #define PIPELINE_SELECT_MEDIA REG_BIT(0) 399 #define GFX_OP_3DSTATE_VF_STATISTICS \ 400 ((0x3 << 29) | (0x1 << 27) | (0x0 << 24) | (0xB << 16)) 401 #define MEDIA_VFE_STATE \ 402 ((0x3 << 29) | (0x2 << 27) | (0x0 << 24) | (0x0 << 16)) 403 #define MEDIA_VFE_STATE_MMIO_ACCESS_MASK (0x18) 404 #define MEDIA_INTERFACE_DESCRIPTOR_LOAD \ 405 ((0x3 << 29) | (0x2 << 27) | (0x0 << 24) | (0x2 << 16)) 406 #define MEDIA_OBJECT \ 407 ((0x3 << 29) | (0x2 << 27) | (0x1 << 24) | (0x0 << 16)) 408 #define GPGPU_OBJECT ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x4<<16)) 409 #define GPGPU_WALKER ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x5<<16)) 410 #define GFX_OP_3DSTATE_DX9_CONSTANTF_VS \ 411 ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x39<<16)) 412 #define GFX_OP_3DSTATE_DX9_CONSTANTF_PS \ 413 ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x3A<<16)) 414 #define GFX_OP_3DSTATE_SO_DECL_LIST \ 415 ((0x3<<29)|(0x3<<27)|(0x1<<24)|(0x17<<16)) 416 417 #define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_VS \ 418 ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x43<<16)) 419 #define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_GS \ 420 ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x44<<16)) 421 #define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_HS \ 422 ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x45<<16)) 423 #define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_DS \ 424 ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x46<<16)) 425 #define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS \ 426 ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x47<<16)) 427 428 #define COLOR_BLT ((0x2<<29)|(0x40<<22)) 429 #define SRC_COPY_BLT ((0x2<<29)|(0x43<<22)) 430 431 /* 432 * Used to convert any address to canonical form. 433 * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS, 434 * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the 435 * addresses to be in a canonical form: 436 * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct 437 * canonical form [63:48] == [47]." 438 */ 439 #define GEN8_HIGH_ADDRESS_BIT 47 440 static inline u64 gen8_canonical_addr(u64 address) 441 { 442 return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT); 443 } 444 445 static inline u64 gen8_noncanonical_addr(u64 address) 446 { 447 return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0); 448 } 449 450 static inline u32 *__gen6_emit_bb_start(u32 *cs, u32 addr, unsigned int flags) 451 { 452 *cs++ = MI_BATCH_BUFFER_START | flags; 453 *cs++ = addr; 454 455 return cs; 456 } 457 458 #endif /* _INTEL_GPU_COMMANDS_H_ */ 459