1 /*
2  * Copyright © 2013 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Brad Volkin <bradley.d.volkin@intel.com>
25  *
26  */
27 
28 #include "i915_drv.h"
29 
30 /**
31  * DOC: batch buffer command parser
32  *
33  * Motivation:
34  * Certain OpenGL features (e.g. transform feedback, performance monitoring)
35  * require userspace code to submit batches containing commands such as
36  * MI_LOAD_REGISTER_IMM to access various registers. Unfortunately, some
37  * generations of the hardware will noop these commands in "unsecure" batches
38  * (which includes all userspace batches submitted via i915) even though the
39  * commands may be safe and represent the intended programming model of the
40  * device.
41  *
42  * The software command parser is similar in operation to the command parsing
43  * done in hardware for unsecure batches. However, the software parser allows
44  * some operations that would be noop'd by hardware, if the parser determines
45  * the operation is safe, and submits the batch as "secure" to prevent hardware
46  * parsing.
47  *
48  * Threats:
49  * At a high level, the hardware (and software) checks attempt to prevent
50  * granting userspace undue privileges. There are three categories of privilege.
51  *
52  * First, commands which are explicitly defined as privileged or which should
53  * only be used by the kernel driver. The parser generally rejects such
54  * commands, though it may allow some from the drm master process.
55  *
56  * Second, commands which access registers. To support correct/enhanced
57  * userspace functionality, particularly certain OpenGL extensions, the parser
58  * provides a whitelist of registers which userspace may safely access (for both
59  * normal and drm master processes).
60  *
61  * Third, commands which access privileged memory (i.e. GGTT, HWS page, etc).
62  * The parser always rejects such commands.
63  *
64  * The majority of the problematic commands fall in the MI_* range, with only a
65  * few specific commands on each ring (e.g. PIPE_CONTROL and MI_FLUSH_DW).
66  *
67  * Implementation:
68  * Each ring maintains tables of commands and registers which the parser uses in
69  * scanning batch buffers submitted to that ring.
70  *
71  * Since the set of commands that the parser must check for is significantly
72  * smaller than the number of commands supported, the parser tables contain only
73  * those commands required by the parser. This generally works because command
74  * opcode ranges have standard command length encodings. So for commands that
75  * the parser does not need to check, it can easily skip them. This is
76  * implemented via a per-ring length decoding vfunc.
77  *
78  * Unfortunately, there are a number of commands that do not follow the standard
79  * length encoding for their opcode range, primarily amongst the MI_* commands.
80  * To handle this, the parser provides a way to define explicit "skip" entries
81  * in the per-ring command tables.
82  *
83  * Other command table entries map fairly directly to high level categories
84  * mentioned above: rejected, master-only, register whitelist. The parser
85  * implements a number of checks, including the privileged memory checks, via a
86  * general bitmasking mechanism.
87  */
88 
89 #define STD_MI_OPCODE_MASK  0xFF800000
90 #define STD_3D_OPCODE_MASK  0xFFFF0000
91 #define STD_2D_OPCODE_MASK  0xFFC00000
92 #define STD_MFX_OPCODE_MASK 0xFFFF0000
93 
94 #define CMD(op, opm, f, lm, fl, ...)				\
95 	{							\
96 		.flags = (fl) | ((f) ? CMD_DESC_FIXED : 0),	\
97 		.cmd = { (op), (opm) },				\
98 		.length = { (lm) },				\
99 		__VA_ARGS__					\
100 	}
101 
102 /* Convenience macros to compress the tables */
103 #define SMI STD_MI_OPCODE_MASK
104 #define S3D STD_3D_OPCODE_MASK
105 #define S2D STD_2D_OPCODE_MASK
106 #define SMFX STD_MFX_OPCODE_MASK
107 #define F true
108 #define S CMD_DESC_SKIP
109 #define R CMD_DESC_REJECT
110 #define W CMD_DESC_REGISTER
111 #define B CMD_DESC_BITMASK
112 #define M CMD_DESC_MASTER
113 
114 /*            Command                          Mask   Fixed Len   Action
115 	      ---------------------------------------------------------- */
116 static const struct drm_i915_cmd_descriptor common_cmds[] = {
117 	CMD(  MI_NOOP,                          SMI,    F,  1,      S  ),
118 	CMD(  MI_USER_INTERRUPT,                SMI,    F,  1,      R  ),
119 	CMD(  MI_WAIT_FOR_EVENT,                SMI,    F,  1,      M  ),
120 	CMD(  MI_ARB_CHECK,                     SMI,    F,  1,      S  ),
121 	CMD(  MI_REPORT_HEAD,                   SMI,    F,  1,      S  ),
122 	CMD(  MI_SUSPEND_FLUSH,                 SMI,    F,  1,      S  ),
123 	CMD(  MI_SEMAPHORE_MBOX,                SMI,   !F,  0xFF,   R  ),
124 	CMD(  MI_STORE_DWORD_INDEX,             SMI,   !F,  0xFF,   R  ),
125 	CMD(  MI_LOAD_REGISTER_IMM(1),          SMI,   !F,  0xFF,   W,
126 	      .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 }    ),
127 	CMD(  MI_STORE_REGISTER_MEM,            SMI,    F,  3,     W | B,
128 	      .reg = { .offset = 1, .mask = 0x007FFFFC },
129 	      .bits = {{
130 			.offset = 0,
131 			.mask = MI_GLOBAL_GTT,
132 			.expected = 0,
133 	      }},						       ),
134 	CMD(  MI_LOAD_REGISTER_MEM,             SMI,    F,  3,     W | B,
135 	      .reg = { .offset = 1, .mask = 0x007FFFFC },
136 	      .bits = {{
137 			.offset = 0,
138 			.mask = MI_GLOBAL_GTT,
139 			.expected = 0,
140 	      }},						       ),
141 	/*
142 	 * MI_BATCH_BUFFER_START requires some special handling. It's not
143 	 * really a 'skip' action but it doesn't seem like it's worth adding
144 	 * a new action. See i915_parse_cmds().
145 	 */
146 	CMD(  MI_BATCH_BUFFER_START,            SMI,   !F,  0xFF,   S  ),
147 };
148 
149 static const struct drm_i915_cmd_descriptor render_cmds[] = {
150 	CMD(  MI_FLUSH,                         SMI,    F,  1,      S  ),
151 	CMD(  MI_ARB_ON_OFF,                    SMI,    F,  1,      R  ),
152 	CMD(  MI_PREDICATE,                     SMI,    F,  1,      S  ),
153 	CMD(  MI_TOPOLOGY_FILTER,               SMI,    F,  1,      S  ),
154 	CMD(  MI_SET_APPID,                     SMI,    F,  1,      S  ),
155 	CMD(  MI_DISPLAY_FLIP,                  SMI,   !F,  0xFF,   R  ),
156 	CMD(  MI_SET_CONTEXT,                   SMI,   !F,  0xFF,   R  ),
157 	CMD(  MI_URB_CLEAR,                     SMI,   !F,  0xFF,   S  ),
158 	CMD(  MI_STORE_DWORD_IMM,               SMI,   !F,  0x3F,   B,
159 	      .bits = {{
160 			.offset = 0,
161 			.mask = MI_GLOBAL_GTT,
162 			.expected = 0,
163 	      }},						       ),
164 	CMD(  MI_UPDATE_GTT,                    SMI,   !F,  0xFF,   R  ),
165 	CMD(  MI_CLFLUSH,                       SMI,   !F,  0x3FF,  B,
166 	      .bits = {{
167 			.offset = 0,
168 			.mask = MI_GLOBAL_GTT,
169 			.expected = 0,
170 	      }},						       ),
171 	CMD(  MI_REPORT_PERF_COUNT,             SMI,   !F,  0x3F,   B,
172 	      .bits = {{
173 			.offset = 1,
174 			.mask = MI_REPORT_PERF_COUNT_GGTT,
175 			.expected = 0,
176 	      }},						       ),
177 	CMD(  MI_CONDITIONAL_BATCH_BUFFER_END,  SMI,   !F,  0xFF,   B,
178 	      .bits = {{
179 			.offset = 0,
180 			.mask = MI_GLOBAL_GTT,
181 			.expected = 0,
182 	      }},						       ),
183 	CMD(  GFX_OP_3DSTATE_VF_STATISTICS,     S3D,    F,  1,      S  ),
184 	CMD(  PIPELINE_SELECT,                  S3D,    F,  1,      S  ),
185 	CMD(  MEDIA_VFE_STATE,			S3D,   !F,  0xFFFF, B,
186 	      .bits = {{
187 			.offset = 2,
188 			.mask = MEDIA_VFE_STATE_MMIO_ACCESS_MASK,
189 			.expected = 0,
190 	      }},						       ),
191 	CMD(  GPGPU_OBJECT,                     S3D,   !F,  0xFF,   S  ),
192 	CMD(  GPGPU_WALKER,                     S3D,   !F,  0xFF,   S  ),
193 	CMD(  GFX_OP_3DSTATE_SO_DECL_LIST,      S3D,   !F,  0x1FF,  S  ),
194 	CMD(  GFX_OP_PIPE_CONTROL(5),           S3D,   !F,  0xFF,   B,
195 	      .bits = {{
196 			.offset = 1,
197 			.mask = (PIPE_CONTROL_MMIO_WRITE | PIPE_CONTROL_NOTIFY),
198 			.expected = 0,
199 	      },
200 	      {
201 			.offset = 1,
202 		        .mask = (PIPE_CONTROL_GLOBAL_GTT_IVB |
203 				 PIPE_CONTROL_STORE_DATA_INDEX),
204 			.expected = 0,
205 			.condition_offset = 1,
206 			.condition_mask = PIPE_CONTROL_POST_SYNC_OP_MASK,
207 	      }},						       ),
208 };
209 
210 static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
211 	CMD(  MI_SET_PREDICATE,                 SMI,    F,  1,      S  ),
212 	CMD(  MI_RS_CONTROL,                    SMI,    F,  1,      S  ),
213 	CMD(  MI_URB_ATOMIC_ALLOC,              SMI,    F,  1,      S  ),
214 	CMD(  MI_SET_APPID,                     SMI,    F,  1,      S  ),
215 	CMD(  MI_RS_CONTEXT,                    SMI,    F,  1,      S  ),
216 	CMD(  MI_LOAD_SCAN_LINES_INCL,          SMI,   !F,  0x3F,   M  ),
217 	CMD(  MI_LOAD_SCAN_LINES_EXCL,          SMI,   !F,  0x3F,   R  ),
218 	CMD(  MI_LOAD_REGISTER_REG,             SMI,   !F,  0xFF,   R  ),
219 	CMD(  MI_RS_STORE_DATA_IMM,             SMI,   !F,  0xFF,   S  ),
220 	CMD(  MI_LOAD_URB_MEM,                  SMI,   !F,  0xFF,   S  ),
221 	CMD(  MI_STORE_URB_MEM,                 SMI,   !F,  0xFF,   S  ),
222 	CMD(  GFX_OP_3DSTATE_DX9_CONSTANTF_VS,  S3D,   !F,  0x7FF,  S  ),
223 	CMD(  GFX_OP_3DSTATE_DX9_CONSTANTF_PS,  S3D,   !F,  0x7FF,  S  ),
224 
225 	CMD(  GFX_OP_3DSTATE_BINDING_TABLE_EDIT_VS,  S3D,   !F,  0x1FF,  S  ),
226 	CMD(  GFX_OP_3DSTATE_BINDING_TABLE_EDIT_GS,  S3D,   !F,  0x1FF,  S  ),
227 	CMD(  GFX_OP_3DSTATE_BINDING_TABLE_EDIT_HS,  S3D,   !F,  0x1FF,  S  ),
228 	CMD(  GFX_OP_3DSTATE_BINDING_TABLE_EDIT_DS,  S3D,   !F,  0x1FF,  S  ),
229 	CMD(  GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS,  S3D,   !F,  0x1FF,  S  ),
230 };
231 
232 static const struct drm_i915_cmd_descriptor video_cmds[] = {
233 	CMD(  MI_ARB_ON_OFF,                    SMI,    F,  1,      R  ),
234 	CMD(  MI_SET_APPID,                     SMI,    F,  1,      S  ),
235 	CMD(  MI_STORE_DWORD_IMM,               SMI,   !F,  0xFF,   B,
236 	      .bits = {{
237 			.offset = 0,
238 			.mask = MI_GLOBAL_GTT,
239 			.expected = 0,
240 	      }},						       ),
241 	CMD(  MI_UPDATE_GTT,                    SMI,   !F,  0x3F,   R  ),
242 	CMD(  MI_FLUSH_DW,                      SMI,   !F,  0x3F,   B,
243 	      .bits = {{
244 			.offset = 0,
245 			.mask = MI_FLUSH_DW_NOTIFY,
246 			.expected = 0,
247 	      },
248 	      {
249 			.offset = 1,
250 			.mask = MI_FLUSH_DW_USE_GTT,
251 			.expected = 0,
252 			.condition_offset = 0,
253 			.condition_mask = MI_FLUSH_DW_OP_MASK,
254 	      },
255 	      {
256 			.offset = 0,
257 			.mask = MI_FLUSH_DW_STORE_INDEX,
258 			.expected = 0,
259 			.condition_offset = 0,
260 			.condition_mask = MI_FLUSH_DW_OP_MASK,
261 	      }},						       ),
262 	CMD(  MI_CONDITIONAL_BATCH_BUFFER_END,  SMI,   !F,  0xFF,   B,
263 	      .bits = {{
264 			.offset = 0,
265 			.mask = MI_GLOBAL_GTT,
266 			.expected = 0,
267 	      }},						       ),
268 	/*
269 	 * MFX_WAIT doesn't fit the way we handle length for most commands.
270 	 * It has a length field but it uses a non-standard length bias.
271 	 * It is always 1 dword though, so just treat it as fixed length.
272 	 */
273 	CMD(  MFX_WAIT,                         SMFX,   F,  1,      S  ),
274 };
275 
276 static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
277 	CMD(  MI_ARB_ON_OFF,                    SMI,    F,  1,      R  ),
278 	CMD(  MI_SET_APPID,                     SMI,    F,  1,      S  ),
279 	CMD(  MI_STORE_DWORD_IMM,               SMI,   !F,  0xFF,   B,
280 	      .bits = {{
281 			.offset = 0,
282 			.mask = MI_GLOBAL_GTT,
283 			.expected = 0,
284 	      }},						       ),
285 	CMD(  MI_UPDATE_GTT,                    SMI,   !F,  0x3F,   R  ),
286 	CMD(  MI_FLUSH_DW,                      SMI,   !F,  0x3F,   B,
287 	      .bits = {{
288 			.offset = 0,
289 			.mask = MI_FLUSH_DW_NOTIFY,
290 			.expected = 0,
291 	      },
292 	      {
293 			.offset = 1,
294 			.mask = MI_FLUSH_DW_USE_GTT,
295 			.expected = 0,
296 			.condition_offset = 0,
297 			.condition_mask = MI_FLUSH_DW_OP_MASK,
298 	      },
299 	      {
300 			.offset = 0,
301 			.mask = MI_FLUSH_DW_STORE_INDEX,
302 			.expected = 0,
303 			.condition_offset = 0,
304 			.condition_mask = MI_FLUSH_DW_OP_MASK,
305 	      }},						       ),
306 	CMD(  MI_CONDITIONAL_BATCH_BUFFER_END,  SMI,   !F,  0xFF,   B,
307 	      .bits = {{
308 			.offset = 0,
309 			.mask = MI_GLOBAL_GTT,
310 			.expected = 0,
311 	      }},						       ),
312 };
313 
314 static const struct drm_i915_cmd_descriptor blt_cmds[] = {
315 	CMD(  MI_DISPLAY_FLIP,                  SMI,   !F,  0xFF,   R  ),
316 	CMD(  MI_STORE_DWORD_IMM,               SMI,   !F,  0x3FF,  B,
317 	      .bits = {{
318 			.offset = 0,
319 			.mask = MI_GLOBAL_GTT,
320 			.expected = 0,
321 	      }},						       ),
322 	CMD(  MI_UPDATE_GTT,                    SMI,   !F,  0x3F,   R  ),
323 	CMD(  MI_FLUSH_DW,                      SMI,   !F,  0x3F,   B,
324 	      .bits = {{
325 			.offset = 0,
326 			.mask = MI_FLUSH_DW_NOTIFY,
327 			.expected = 0,
328 	      },
329 	      {
330 			.offset = 1,
331 			.mask = MI_FLUSH_DW_USE_GTT,
332 			.expected = 0,
333 			.condition_offset = 0,
334 			.condition_mask = MI_FLUSH_DW_OP_MASK,
335 	      },
336 	      {
337 			.offset = 0,
338 			.mask = MI_FLUSH_DW_STORE_INDEX,
339 			.expected = 0,
340 			.condition_offset = 0,
341 			.condition_mask = MI_FLUSH_DW_OP_MASK,
342 	      }},						       ),
343 	CMD(  COLOR_BLT,                        S2D,   !F,  0x3F,   S  ),
344 	CMD(  SRC_COPY_BLT,                     S2D,   !F,  0x3F,   S  ),
345 };
346 
347 static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = {
348 	CMD(  MI_LOAD_SCAN_LINES_INCL,          SMI,   !F,  0x3F,   M  ),
349 	CMD(  MI_LOAD_SCAN_LINES_EXCL,          SMI,   !F,  0x3F,   R  ),
350 };
351 
352 #undef CMD
353 #undef SMI
354 #undef S3D
355 #undef S2D
356 #undef SMFX
357 #undef F
358 #undef S
359 #undef R
360 #undef W
361 #undef B
362 #undef M
363 
364 static const struct drm_i915_cmd_table gen7_render_cmds[] = {
365 	{ common_cmds, ARRAY_SIZE(common_cmds) },
366 	{ render_cmds, ARRAY_SIZE(render_cmds) },
367 };
368 
369 static const struct drm_i915_cmd_table hsw_render_ring_cmds[] = {
370 	{ common_cmds, ARRAY_SIZE(common_cmds) },
371 	{ render_cmds, ARRAY_SIZE(render_cmds) },
372 	{ hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) },
373 };
374 
375 static const struct drm_i915_cmd_table gen7_video_cmds[] = {
376 	{ common_cmds, ARRAY_SIZE(common_cmds) },
377 	{ video_cmds, ARRAY_SIZE(video_cmds) },
378 };
379 
380 static const struct drm_i915_cmd_table hsw_vebox_cmds[] = {
381 	{ common_cmds, ARRAY_SIZE(common_cmds) },
382 	{ vecs_cmds, ARRAY_SIZE(vecs_cmds) },
383 };
384 
385 static const struct drm_i915_cmd_table gen7_blt_cmds[] = {
386 	{ common_cmds, ARRAY_SIZE(common_cmds) },
387 	{ blt_cmds, ARRAY_SIZE(blt_cmds) },
388 };
389 
390 static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = {
391 	{ common_cmds, ARRAY_SIZE(common_cmds) },
392 	{ blt_cmds, ARRAY_SIZE(blt_cmds) },
393 	{ hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) },
394 };
395 
396 /*
397  * Register whitelists, sorted by increasing register offset.
398  */
399 
400 /*
401  * An individual whitelist entry granting access to register addr.  If
402  * mask is non-zero the argument of immediate register writes will be
403  * AND-ed with mask, and the command will be rejected if the result
404  * doesn't match value.
405  *
406  * Registers with non-zero mask are only allowed to be written using
407  * LRI.
408  */
409 struct drm_i915_reg_descriptor {
410 	i915_reg_t addr;
411 	u32 mask;
412 	u32 value;
413 };
414 
415 /* Convenience macro for adding 32-bit registers. */
416 #define REG32(_reg, ...) \
417 	{ .addr = (_reg), __VA_ARGS__ }
418 
419 /*
420  * Convenience macro for adding 64-bit registers.
421  *
422  * Some registers that userspace accesses are 64 bits. The register
423  * access commands only allow 32-bit accesses. Hence, we have to include
424  * entries for both halves of the 64-bit registers.
425  */
426 #define REG64(_reg) \
427 	{ .addr = _reg }, \
428 	{ .addr = _reg ## _UDW }
429 
430 #define REG64_IDX(_reg, idx) \
431 	{ .addr = _reg(idx) }, \
432 	{ .addr = _reg ## _UDW(idx) }
433 
434 static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
435 	REG64(GPGPU_THREADS_DISPATCHED),
436 	REG64(HS_INVOCATION_COUNT),
437 	REG64(DS_INVOCATION_COUNT),
438 	REG64(IA_VERTICES_COUNT),
439 	REG64(IA_PRIMITIVES_COUNT),
440 	REG64(VS_INVOCATION_COUNT),
441 	REG64(GS_INVOCATION_COUNT),
442 	REG64(GS_PRIMITIVES_COUNT),
443 	REG64(CL_INVOCATION_COUNT),
444 	REG64(CL_PRIMITIVES_COUNT),
445 	REG64(PS_INVOCATION_COUNT),
446 	REG64(PS_DEPTH_COUNT),
447 	REG32(OACONTROL), /* Only allowed for LRI and SRM. See below. */
448 	REG64(MI_PREDICATE_SRC0),
449 	REG64(MI_PREDICATE_SRC1),
450 	REG32(GEN7_3DPRIM_END_OFFSET),
451 	REG32(GEN7_3DPRIM_START_VERTEX),
452 	REG32(GEN7_3DPRIM_VERTEX_COUNT),
453 	REG32(GEN7_3DPRIM_INSTANCE_COUNT),
454 	REG32(GEN7_3DPRIM_START_INSTANCE),
455 	REG32(GEN7_3DPRIM_BASE_VERTEX),
456 	REG32(GEN7_GPGPU_DISPATCHDIMX),
457 	REG32(GEN7_GPGPU_DISPATCHDIMY),
458 	REG32(GEN7_GPGPU_DISPATCHDIMZ),
459 	REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 0),
460 	REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 1),
461 	REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 2),
462 	REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 3),
463 	REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 0),
464 	REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 1),
465 	REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 2),
466 	REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 3),
467 	REG32(GEN7_SO_WRITE_OFFSET(0)),
468 	REG32(GEN7_SO_WRITE_OFFSET(1)),
469 	REG32(GEN7_SO_WRITE_OFFSET(2)),
470 	REG32(GEN7_SO_WRITE_OFFSET(3)),
471 	REG32(GEN7_L3SQCREG1),
472 	REG32(GEN7_L3CNTLREG2),
473 	REG32(GEN7_L3CNTLREG3),
474 	REG32(HSW_SCRATCH1,
475 	      .mask = ~HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE,
476 	      .value = 0),
477 	REG32(HSW_ROW_CHICKEN3,
478 	      .mask = ~(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE << 16 |
479                         HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE),
480 	      .value = 0),
481 };
482 
483 static const struct drm_i915_reg_descriptor gen7_blt_regs[] = {
484 	REG32(BCS_SWCTRL),
485 };
486 
487 static const struct drm_i915_reg_descriptor ivb_master_regs[] = {
488 	REG32(FORCEWAKE_MT),
489 	REG32(DERRMR),
490 	REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_A)),
491 	REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_B)),
492 	REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_C)),
493 };
494 
495 static const struct drm_i915_reg_descriptor hsw_master_regs[] = {
496 	REG32(FORCEWAKE_MT),
497 	REG32(DERRMR),
498 };
499 
500 #undef REG64
501 #undef REG32
502 
503 static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
504 {
505 	u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
506 	u32 subclient =
507 		(cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
508 
509 	if (client == INSTR_MI_CLIENT)
510 		return 0x3F;
511 	else if (client == INSTR_RC_CLIENT) {
512 		if (subclient == INSTR_MEDIA_SUBCLIENT)
513 			return 0xFFFF;
514 		else
515 			return 0xFF;
516 	}
517 
518 	DRM_DEBUG_DRIVER("CMD: Abnormal rcs cmd length! 0x%08X\n", cmd_header);
519 	return 0;
520 }
521 
522 static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header)
523 {
524 	u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
525 	u32 subclient =
526 		(cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
527 	u32 op = (cmd_header & INSTR_26_TO_24_MASK) >> INSTR_26_TO_24_SHIFT;
528 
529 	if (client == INSTR_MI_CLIENT)
530 		return 0x3F;
531 	else if (client == INSTR_RC_CLIENT) {
532 		if (subclient == INSTR_MEDIA_SUBCLIENT) {
533 			if (op == 6)
534 				return 0xFFFF;
535 			else
536 				return 0xFFF;
537 		} else
538 			return 0xFF;
539 	}
540 
541 	DRM_DEBUG_DRIVER("CMD: Abnormal bsd cmd length! 0x%08X\n", cmd_header);
542 	return 0;
543 }
544 
545 static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
546 {
547 	u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
548 
549 	if (client == INSTR_MI_CLIENT)
550 		return 0x3F;
551 	else if (client == INSTR_BC_CLIENT)
552 		return 0xFF;
553 
554 	DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
555 	return 0;
556 }
557 
558 static bool validate_cmds_sorted(struct intel_engine_cs *ring,
559 				 const struct drm_i915_cmd_table *cmd_tables,
560 				 int cmd_table_count)
561 {
562 	int i;
563 	bool ret = true;
564 
565 	if (!cmd_tables || cmd_table_count == 0)
566 		return true;
567 
568 	for (i = 0; i < cmd_table_count; i++) {
569 		const struct drm_i915_cmd_table *table = &cmd_tables[i];
570 		u32 previous = 0;
571 		int j;
572 
573 		for (j = 0; j < table->count; j++) {
574 			const struct drm_i915_cmd_descriptor *desc =
575 				&table->table[j];
576 			u32 curr = desc->cmd.value & desc->cmd.mask;
577 
578 			if (curr < previous) {
579 				DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
580 					  ring->id, i, j, curr, previous);
581 				ret = false;
582 			}
583 
584 			previous = curr;
585 		}
586 	}
587 
588 	return ret;
589 }
590 
591 static bool check_sorted(int ring_id,
592 			 const struct drm_i915_reg_descriptor *reg_table,
593 			 int reg_count)
594 {
595 	int i;
596 	u32 previous = 0;
597 	bool ret = true;
598 
599 	for (i = 0; i < reg_count; i++) {
600 		u32 curr = i915_mmio_reg_offset(reg_table[i].addr);
601 
602 		if (curr < previous) {
603 			DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n",
604 				  ring_id, i, curr, previous);
605 			ret = false;
606 		}
607 
608 		previous = curr;
609 	}
610 
611 	return ret;
612 }
613 
614 static bool validate_regs_sorted(struct intel_engine_cs *ring)
615 {
616 	return check_sorted(ring->id, ring->reg_table, ring->reg_count) &&
617 		check_sorted(ring->id, ring->master_reg_table,
618 			     ring->master_reg_count);
619 }
620 
621 struct cmd_node {
622 	const struct drm_i915_cmd_descriptor *desc;
623 	struct hlist_node node;
624 };
625 
626 /*
627  * Different command ranges have different numbers of bits for the opcode. For
628  * example, MI commands use bits 31:23 while 3D commands use bits 31:16. The
629  * problem is that, for example, MI commands use bits 22:16 for other fields
630  * such as GGTT vs PPGTT bits. If we include those bits in the mask then when
631  * we mask a command from a batch it could hash to the wrong bucket due to
632  * non-opcode bits being set. But if we don't include those bits, some 3D
633  * commands may hash to the same bucket due to not including opcode bits that
634  * make the command unique. For now, we will risk hashing to the same bucket.
635  *
636  * If we attempt to generate a perfect hash, we should be able to look at bits
637  * 31:29 of a command from a batch buffer and use the full mask for that
638  * client. The existing INSTR_CLIENT_MASK/SHIFT defines can be used for this.
639  */
640 #define CMD_HASH_MASK STD_MI_OPCODE_MASK
641 
642 static int init_hash_table(struct intel_engine_cs *ring,
643 			   const struct drm_i915_cmd_table *cmd_tables,
644 			   int cmd_table_count)
645 {
646 	int i, j;
647 
648 	hash_init(ring->cmd_hash);
649 
650 	for (i = 0; i < cmd_table_count; i++) {
651 		const struct drm_i915_cmd_table *table = &cmd_tables[i];
652 
653 		for (j = 0; j < table->count; j++) {
654 			const struct drm_i915_cmd_descriptor *desc =
655 				&table->table[j];
656 			struct cmd_node *desc_node =
657 				kmalloc(sizeof(*desc_node), GFP_KERNEL);
658 
659 			if (!desc_node)
660 				return -ENOMEM;
661 
662 			desc_node->desc = desc;
663 			hash_add(ring->cmd_hash, &desc_node->node,
664 				 desc->cmd.value & CMD_HASH_MASK);
665 		}
666 	}
667 
668 	return 0;
669 }
670 
671 static void fini_hash_table(struct intel_engine_cs *ring)
672 {
673 	struct hlist_node *tmp;
674 	struct cmd_node *desc_node;
675 	int i;
676 
677 	hash_for_each_safe(ring->cmd_hash, i, tmp, desc_node, node) {
678 		hash_del(&desc_node->node);
679 		kfree(desc_node);
680 	}
681 }
682 
683 /**
684  * i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer
685  * @ring: the ringbuffer to initialize
686  *
687  * Optionally initializes fields related to batch buffer command parsing in the
688  * struct intel_engine_cs based on whether the platform requires software
689  * command parsing.
690  *
691  * Return: non-zero if initialization fails
692  */
693 int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
694 {
695 	const struct drm_i915_cmd_table *cmd_tables;
696 	int cmd_table_count;
697 	int ret;
698 
699 	if (!IS_GEN7(ring->dev))
700 		return 0;
701 
702 	switch (ring->id) {
703 	case RCS:
704 		if (IS_HASWELL(ring->dev)) {
705 			cmd_tables = hsw_render_ring_cmds;
706 			cmd_table_count =
707 				ARRAY_SIZE(hsw_render_ring_cmds);
708 		} else {
709 			cmd_tables = gen7_render_cmds;
710 			cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
711 		}
712 
713 		ring->reg_table = gen7_render_regs;
714 		ring->reg_count = ARRAY_SIZE(gen7_render_regs);
715 
716 		if (IS_HASWELL(ring->dev)) {
717 			ring->master_reg_table = hsw_master_regs;
718 			ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
719 		} else {
720 			ring->master_reg_table = ivb_master_regs;
721 			ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
722 		}
723 
724 		ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
725 		break;
726 	case VCS:
727 		cmd_tables = gen7_video_cmds;
728 		cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
729 		ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
730 		break;
731 	case BCS:
732 		if (IS_HASWELL(ring->dev)) {
733 			cmd_tables = hsw_blt_ring_cmds;
734 			cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
735 		} else {
736 			cmd_tables = gen7_blt_cmds;
737 			cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
738 		}
739 
740 		ring->reg_table = gen7_blt_regs;
741 		ring->reg_count = ARRAY_SIZE(gen7_blt_regs);
742 
743 		if (IS_HASWELL(ring->dev)) {
744 			ring->master_reg_table = hsw_master_regs;
745 			ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
746 		} else {
747 			ring->master_reg_table = ivb_master_regs;
748 			ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
749 		}
750 
751 		ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
752 		break;
753 	case VECS:
754 		cmd_tables = hsw_vebox_cmds;
755 		cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
756 		/* VECS can use the same length_mask function as VCS */
757 		ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
758 		break;
759 	default:
760 		DRM_ERROR("CMD: cmd_parser_init with unknown ring: %d\n",
761 			  ring->id);
762 		BUG();
763 	}
764 
765 	BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
766 	BUG_ON(!validate_regs_sorted(ring));
767 
768 	WARN_ON(!hash_empty(ring->cmd_hash));
769 
770 	ret = init_hash_table(ring, cmd_tables, cmd_table_count);
771 	if (ret) {
772 		DRM_ERROR("CMD: cmd_parser_init failed!\n");
773 		fini_hash_table(ring);
774 		return ret;
775 	}
776 
777 	ring->needs_cmd_parser = true;
778 
779 	return 0;
780 }
781 
782 /**
783  * i915_cmd_parser_fini_ring() - clean up cmd parser related fields
784  * @ring: the ringbuffer to clean up
785  *
786  * Releases any resources related to command parsing that may have been
787  * initialized for the specified ring.
788  */
789 void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring)
790 {
791 	if (!ring->needs_cmd_parser)
792 		return;
793 
794 	fini_hash_table(ring);
795 }
796 
797 static const struct drm_i915_cmd_descriptor*
798 find_cmd_in_table(struct intel_engine_cs *ring,
799 		  u32 cmd_header)
800 {
801 	struct cmd_node *desc_node;
802 
803 	hash_for_each_possible(ring->cmd_hash, desc_node, node,
804 			       cmd_header & CMD_HASH_MASK) {
805 		const struct drm_i915_cmd_descriptor *desc = desc_node->desc;
806 		u32 masked_cmd = desc->cmd.mask & cmd_header;
807 		u32 masked_value = desc->cmd.value & desc->cmd.mask;
808 
809 		if (masked_cmd == masked_value)
810 			return desc;
811 	}
812 
813 	return NULL;
814 }
815 
816 /*
817  * Returns a pointer to a descriptor for the command specified by cmd_header.
818  *
819  * The caller must supply space for a default descriptor via the default_desc
820  * parameter. If no descriptor for the specified command exists in the ring's
821  * command parser tables, this function fills in default_desc based on the
822  * ring's default length encoding and returns default_desc.
823  */
824 static const struct drm_i915_cmd_descriptor*
825 find_cmd(struct intel_engine_cs *ring,
826 	 u32 cmd_header,
827 	 struct drm_i915_cmd_descriptor *default_desc)
828 {
829 	const struct drm_i915_cmd_descriptor *desc;
830 	u32 mask;
831 
832 	desc = find_cmd_in_table(ring, cmd_header);
833 	if (desc)
834 		return desc;
835 
836 	mask = ring->get_cmd_length_mask(cmd_header);
837 	if (!mask)
838 		return NULL;
839 
840 	BUG_ON(!default_desc);
841 	default_desc->flags = CMD_DESC_SKIP;
842 	default_desc->length.mask = mask;
843 
844 	return default_desc;
845 }
846 
847 static const struct drm_i915_reg_descriptor *
848 find_reg(const struct drm_i915_reg_descriptor *table,
849 	 int count, u32 addr)
850 {
851 	if (table) {
852 		int i;
853 
854 		for (i = 0; i < count; i++) {
855 			if (i915_mmio_reg_offset(table[i].addr) == addr)
856 				return &table[i];
857 		}
858 	}
859 
860 	return NULL;
861 }
862 
863 static u32 *vmap_batch(struct drm_i915_gem_object *obj,
864 		       unsigned start, unsigned len)
865 {
866 	int i;
867 	void *addr = NULL;
868 	struct sg_page_iter sg_iter;
869 	int first_page = start >> PAGE_SHIFT;
870 	int last_page = (len + start + 4095) >> PAGE_SHIFT;
871 	int npages = last_page - first_page;
872 	struct page **pages;
873 
874 	pages = drm_malloc_ab(npages, sizeof(*pages));
875 	if (pages == NULL) {
876 		DRM_DEBUG_DRIVER("Failed to get space for pages\n");
877 		goto finish;
878 	}
879 
880 	i = 0;
881 	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, first_page) {
882 		pages[i++] = sg_page_iter_page(&sg_iter);
883 		if (i == npages)
884 			break;
885 	}
886 
887 	addr = vmap(pages, i, 0, PAGE_KERNEL);
888 	if (addr == NULL) {
889 		DRM_DEBUG_DRIVER("Failed to vmap pages\n");
890 		goto finish;
891 	}
892 
893 finish:
894 	if (pages)
895 		drm_free_large(pages);
896 	return (u32*)addr;
897 }
898 
899 /* Returns a vmap'd pointer to dest_obj, which the caller must unmap */
900 static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
901 		       struct drm_i915_gem_object *src_obj,
902 		       u32 batch_start_offset,
903 		       u32 batch_len)
904 {
905 	int needs_clflush = 0;
906 	void *src_base, *src;
907 	void *dst = NULL;
908 	int ret;
909 
910 	if (batch_len > dest_obj->base.size ||
911 	    batch_len + batch_start_offset > src_obj->base.size)
912 		return ERR_PTR(-E2BIG);
913 
914 	if (WARN_ON(dest_obj->pages_pin_count == 0))
915 		return ERR_PTR(-ENODEV);
916 
917 	ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush);
918 	if (ret) {
919 		DRM_DEBUG_DRIVER("CMD: failed to prepare shadow batch\n");
920 		return ERR_PTR(ret);
921 	}
922 
923 	src_base = vmap_batch(src_obj, batch_start_offset, batch_len);
924 	if (!src_base) {
925 		DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
926 		ret = -ENOMEM;
927 		goto unpin_src;
928 	}
929 
930 	ret = i915_gem_object_set_to_cpu_domain(dest_obj, true);
931 	if (ret) {
932 		DRM_DEBUG_DRIVER("CMD: Failed to set shadow batch to CPU\n");
933 		goto unmap_src;
934 	}
935 
936 	dst = vmap_batch(dest_obj, 0, batch_len);
937 	if (!dst) {
938 		DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n");
939 		ret = -ENOMEM;
940 		goto unmap_src;
941 	}
942 
943 	src = src_base + offset_in_page(batch_start_offset);
944 	if (needs_clflush)
945 		drm_clflush_virt_range(src, batch_len);
946 
947 	memcpy(dst, src, batch_len);
948 
949 unmap_src:
950 	vunmap(src_base);
951 unpin_src:
952 	i915_gem_object_unpin_pages(src_obj);
953 
954 	return ret ? ERR_PTR(ret) : dst;
955 }
956 
957 /**
958  * i915_needs_cmd_parser() - should a given ring use software command parsing?
959  * @ring: the ring in question
960  *
961  * Only certain platforms require software batch buffer command parsing, and
962  * only when enabled via module parameter.
963  *
964  * Return: true if the ring requires software command parsing
965  */
966 bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
967 {
968 	if (!ring->needs_cmd_parser)
969 		return false;
970 
971 	if (!USES_PPGTT(ring->dev))
972 		return false;
973 
974 	return (i915.enable_cmd_parser == 1);
975 }
976 
977 static bool check_cmd(const struct intel_engine_cs *ring,
978 		      const struct drm_i915_cmd_descriptor *desc,
979 		      const u32 *cmd, u32 length,
980 		      const bool is_master,
981 		      bool *oacontrol_set)
982 {
983 	if (desc->flags & CMD_DESC_REJECT) {
984 		DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd);
985 		return false;
986 	}
987 
988 	if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
989 		DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
990 				 *cmd);
991 		return false;
992 	}
993 
994 	if (desc->flags & CMD_DESC_REGISTER) {
995 		/*
996 		 * Get the distance between individual register offset
997 		 * fields if the command can perform more than one
998 		 * access at a time.
999 		 */
1000 		const u32 step = desc->reg.step ? desc->reg.step : length;
1001 		u32 offset;
1002 
1003 		for (offset = desc->reg.offset; offset < length;
1004 		     offset += step) {
1005 			const u32 reg_addr = cmd[offset] & desc->reg.mask;
1006 			const struct drm_i915_reg_descriptor *reg =
1007 				find_reg(ring->reg_table, ring->reg_count,
1008 					 reg_addr);
1009 
1010 			if (!reg && is_master)
1011 				reg = find_reg(ring->master_reg_table,
1012 					       ring->master_reg_count,
1013 					       reg_addr);
1014 
1015 			if (!reg) {
1016 				DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
1017 						 reg_addr, *cmd, ring->id);
1018 				return false;
1019 			}
1020 
1021 			/*
1022 			 * OACONTROL requires some special handling for
1023 			 * writes. We want to make sure that any batch which
1024 			 * enables OA also disables it before the end of the
1025 			 * batch. The goal is to prevent one process from
1026 			 * snooping on the perf data from another process. To do
1027 			 * that, we need to check the value that will be written
1028 			 * to the register. Hence, limit OACONTROL writes to
1029 			 * only MI_LOAD_REGISTER_IMM commands.
1030 			 */
1031 			if (reg_addr == i915_mmio_reg_offset(OACONTROL)) {
1032 				if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
1033 					DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
1034 					return false;
1035 				}
1036 
1037 				if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
1038 					*oacontrol_set = (cmd[offset + 1] != 0);
1039 			}
1040 
1041 			/*
1042 			 * Check the value written to the register against the
1043 			 * allowed mask/value pair given in the whitelist entry.
1044 			 */
1045 			if (reg->mask) {
1046 				if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
1047 					DRM_DEBUG_DRIVER("CMD: Rejected LRM to masked register 0x%08X\n",
1048 							 reg_addr);
1049 					return false;
1050 				}
1051 
1052 				if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) &&
1053 				    (offset + 2 > length ||
1054 				     (cmd[offset + 1] & reg->mask) != reg->value)) {
1055 					DRM_DEBUG_DRIVER("CMD: Rejected LRI to masked register 0x%08X\n",
1056 							 reg_addr);
1057 					return false;
1058 				}
1059 			}
1060 		}
1061 	}
1062 
1063 	if (desc->flags & CMD_DESC_BITMASK) {
1064 		int i;
1065 
1066 		for (i = 0; i < MAX_CMD_DESC_BITMASKS; i++) {
1067 			u32 dword;
1068 
1069 			if (desc->bits[i].mask == 0)
1070 				break;
1071 
1072 			if (desc->bits[i].condition_mask != 0) {
1073 				u32 offset =
1074 					desc->bits[i].condition_offset;
1075 				u32 condition = cmd[offset] &
1076 					desc->bits[i].condition_mask;
1077 
1078 				if (condition == 0)
1079 					continue;
1080 			}
1081 
1082 			dword = cmd[desc->bits[i].offset] &
1083 				desc->bits[i].mask;
1084 
1085 			if (dword != desc->bits[i].expected) {
1086 				DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (ring=%d)\n",
1087 						 *cmd,
1088 						 desc->bits[i].mask,
1089 						 desc->bits[i].expected,
1090 						 dword, ring->id);
1091 				return false;
1092 			}
1093 		}
1094 	}
1095 
1096 	return true;
1097 }
1098 
1099 #define LENGTH_BIAS 2
1100 
1101 /**
1102  * i915_parse_cmds() - parse a submitted batch buffer for privilege violations
1103  * @ring: the ring on which the batch is to execute
1104  * @batch_obj: the batch buffer in question
1105  * @shadow_batch_obj: copy of the batch buffer in question
1106  * @batch_start_offset: byte offset in the batch at which execution starts
1107  * @batch_len: length of the commands in batch_obj
1108  * @is_master: is the submitting process the drm master?
1109  *
1110  * Parses the specified batch buffer looking for privilege violations as
1111  * described in the overview.
1112  *
1113  * Return: non-zero if the parser finds violations or otherwise fails; -EACCES
1114  * if the batch appears legal but should use hardware parsing
1115  */
1116 int i915_parse_cmds(struct intel_engine_cs *ring,
1117 		    struct drm_i915_gem_object *batch_obj,
1118 		    struct drm_i915_gem_object *shadow_batch_obj,
1119 		    u32 batch_start_offset,
1120 		    u32 batch_len,
1121 		    bool is_master)
1122 {
1123 	u32 *cmd, *batch_base, *batch_end;
1124 	struct drm_i915_cmd_descriptor default_desc = { 0 };
1125 	bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
1126 	int ret = 0;
1127 
1128 	batch_base = copy_batch(shadow_batch_obj, batch_obj,
1129 				batch_start_offset, batch_len);
1130 	if (IS_ERR(batch_base)) {
1131 		DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n");
1132 		return PTR_ERR(batch_base);
1133 	}
1134 
1135 	/*
1136 	 * We use the batch length as size because the shadow object is as
1137 	 * large or larger and copy_batch() will write MI_NOPs to the extra
1138 	 * space. Parsing should be faster in some cases this way.
1139 	 */
1140 	batch_end = batch_base + (batch_len / sizeof(*batch_end));
1141 
1142 	cmd = batch_base;
1143 	while (cmd < batch_end) {
1144 		const struct drm_i915_cmd_descriptor *desc;
1145 		u32 length;
1146 
1147 		if (*cmd == MI_BATCH_BUFFER_END)
1148 			break;
1149 
1150 		desc = find_cmd(ring, *cmd, &default_desc);
1151 		if (!desc) {
1152 			DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
1153 					 *cmd);
1154 			ret = -EINVAL;
1155 			break;
1156 		}
1157 
1158 		/*
1159 		 * If the batch buffer contains a chained batch, return an
1160 		 * error that tells the caller to abort and dispatch the
1161 		 * workload as a non-secure batch.
1162 		 */
1163 		if (desc->cmd.value == MI_BATCH_BUFFER_START) {
1164 			ret = -EACCES;
1165 			break;
1166 		}
1167 
1168 		if (desc->flags & CMD_DESC_FIXED)
1169 			length = desc->length.fixed;
1170 		else
1171 			length = ((*cmd & desc->length.mask) + LENGTH_BIAS);
1172 
1173 		if ((batch_end - cmd) < length) {
1174 			DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%u batchlen=%td\n",
1175 					 *cmd,
1176 					 length,
1177 					 batch_end - cmd);
1178 			ret = -EINVAL;
1179 			break;
1180 		}
1181 
1182 		if (!check_cmd(ring, desc, cmd, length, is_master,
1183 			       &oacontrol_set)) {
1184 			ret = -EINVAL;
1185 			break;
1186 		}
1187 
1188 		cmd += length;
1189 	}
1190 
1191 	if (oacontrol_set) {
1192 		DRM_DEBUG_DRIVER("CMD: batch set OACONTROL but did not clear it\n");
1193 		ret = -EINVAL;
1194 	}
1195 
1196 	if (cmd >= batch_end) {
1197 		DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
1198 		ret = -EINVAL;
1199 	}
1200 
1201 	vunmap(batch_base);
1202 
1203 	return ret;
1204 }
1205 
1206 /**
1207  * i915_cmd_parser_get_version() - get the cmd parser version number
1208  *
1209  * The cmd parser maintains a simple increasing integer version number suitable
1210  * for passing to userspace clients to determine what operations are permitted.
1211  *
1212  * Return: the current version number of the cmd parser
1213  */
1214 int i915_cmd_parser_get_version(void)
1215 {
1216 	/*
1217 	 * Command parser version history
1218 	 *
1219 	 * 1. Initial version. Checks batches and reports violations, but leaves
1220 	 *    hardware parsing enabled (so does not allow new use cases).
1221 	 * 2. Allow access to the MI_PREDICATE_SRC0 and
1222 	 *    MI_PREDICATE_SRC1 registers.
1223 	 * 3. Allow access to the GPGPU_THREADS_DISPATCHED register.
1224 	 * 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3.
1225 	 * 5. GPGPU dispatch compute indirect registers.
1226 	 */
1227 	return 5;
1228 }
1229