1 /*
2  * Copyright 2010 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include "drmP.h"
29 #include "radeon.h"
30 #include "evergreend.h"
31 #include "evergreen_reg_safe.h"
32 #include "cayman_reg_safe.h"
33 
34 #define MAX(a,b)                   (((a)>(b))?(a):(b))
35 #define MIN(a,b)                   (((a)<(b))?(a):(b))
36 
37 static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
38 					  struct radeon_cs_reloc **cs_reloc);
39 
40 struct evergreen_cs_track {
41 	u32			group_size;
42 	u32			nbanks;
43 	u32			npipes;
44 	u32			row_size;
45 	/* value we track */
46 	u32			nsamples;		/* unused */
47 	struct radeon_bo	*cb_color_bo[12];
48 	u32			cb_color_bo_offset[12];
49 	struct radeon_bo	*cb_color_fmask_bo[8];	/* unused */
50 	struct radeon_bo	*cb_color_cmask_bo[8];	/* unused */
51 	u32			cb_color_info[12];
52 	u32			cb_color_view[12];
53 	u32			cb_color_pitch[12];
54 	u32			cb_color_slice[12];
55 	u32			cb_color_attrib[12];
56 	u32			cb_color_cmask_slice[8];/* unused */
57 	u32			cb_color_fmask_slice[8];/* unused */
58 	u32			cb_target_mask;
59 	u32			cb_shader_mask; /* unused */
60 	u32			vgt_strmout_config;
61 	u32			vgt_strmout_buffer_config;
62 	struct radeon_bo	*vgt_strmout_bo[4];
63 	u32			vgt_strmout_bo_offset[4];
64 	u32			vgt_strmout_size[4];
65 	u32			db_depth_control;
66 	u32			db_depth_view;
67 	u32			db_depth_slice;
68 	u32			db_depth_size;
69 	u32			db_z_info;
70 	u32			db_z_read_offset;
71 	u32			db_z_write_offset;
72 	struct radeon_bo	*db_z_read_bo;
73 	struct radeon_bo	*db_z_write_bo;
74 	u32			db_s_info;
75 	u32			db_s_read_offset;
76 	u32			db_s_write_offset;
77 	struct radeon_bo	*db_s_read_bo;
78 	struct radeon_bo	*db_s_write_bo;
79 	bool			sx_misc_kill_all_prims;
80 	bool			cb_dirty;
81 	bool			db_dirty;
82 	bool			streamout_dirty;
83 	u32			htile_offset;
84 	u32			htile_surface;
85 	struct radeon_bo	*htile_bo;
86 };
87 
88 static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
89 {
90 	if (tiling_flags & RADEON_TILING_MACRO)
91 		return ARRAY_2D_TILED_THIN1;
92 	else if (tiling_flags & RADEON_TILING_MICRO)
93 		return ARRAY_1D_TILED_THIN1;
94 	else
95 		return ARRAY_LINEAR_GENERAL;
96 }
97 
98 static u32 evergreen_cs_get_num_banks(u32 nbanks)
99 {
100 	switch (nbanks) {
101 	case 2:
102 		return ADDR_SURF_2_BANK;
103 	case 4:
104 		return ADDR_SURF_4_BANK;
105 	case 8:
106 	default:
107 		return ADDR_SURF_8_BANK;
108 	case 16:
109 		return ADDR_SURF_16_BANK;
110 	}
111 }
112 
113 static void evergreen_cs_track_init(struct evergreen_cs_track *track)
114 {
115 	int i;
116 
117 	for (i = 0; i < 8; i++) {
118 		track->cb_color_fmask_bo[i] = NULL;
119 		track->cb_color_cmask_bo[i] = NULL;
120 		track->cb_color_cmask_slice[i] = 0;
121 		track->cb_color_fmask_slice[i] = 0;
122 	}
123 
124 	for (i = 0; i < 12; i++) {
125 		track->cb_color_bo[i] = NULL;
126 		track->cb_color_bo_offset[i] = 0xFFFFFFFF;
127 		track->cb_color_info[i] = 0;
128 		track->cb_color_view[i] = 0xFFFFFFFF;
129 		track->cb_color_pitch[i] = 0;
130 		track->cb_color_slice[i] = 0;
131 	}
132 	track->cb_target_mask = 0xFFFFFFFF;
133 	track->cb_shader_mask = 0xFFFFFFFF;
134 	track->cb_dirty = true;
135 
136 	track->db_depth_view = 0xFFFFC000;
137 	track->db_depth_size = 0xFFFFFFFF;
138 	track->db_depth_control = 0xFFFFFFFF;
139 	track->db_z_info = 0xFFFFFFFF;
140 	track->db_z_read_offset = 0xFFFFFFFF;
141 	track->db_z_write_offset = 0xFFFFFFFF;
142 	track->db_z_read_bo = NULL;
143 	track->db_z_write_bo = NULL;
144 	track->db_s_info = 0xFFFFFFFF;
145 	track->db_s_read_offset = 0xFFFFFFFF;
146 	track->db_s_write_offset = 0xFFFFFFFF;
147 	track->db_s_read_bo = NULL;
148 	track->db_s_write_bo = NULL;
149 	track->db_dirty = true;
150 	track->htile_bo = NULL;
151 	track->htile_offset = 0xFFFFFFFF;
152 	track->htile_surface = 0;
153 
154 	for (i = 0; i < 4; i++) {
155 		track->vgt_strmout_size[i] = 0;
156 		track->vgt_strmout_bo[i] = NULL;
157 		track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
158 	}
159 	track->streamout_dirty = true;
160 	track->sx_misc_kill_all_prims = false;
161 }
162 
163 struct eg_surface {
164 	/* value gathered from cs */
165 	unsigned	nbx;
166 	unsigned	nby;
167 	unsigned	format;
168 	unsigned	mode;
169 	unsigned	nbanks;
170 	unsigned	bankw;
171 	unsigned	bankh;
172 	unsigned	tsplit;
173 	unsigned	mtilea;
174 	unsigned	nsamples;
175 	/* output value */
176 	unsigned	bpe;
177 	unsigned	layer_size;
178 	unsigned	palign;
179 	unsigned	halign;
180 	unsigned long	base_align;
181 };
182 
183 static int evergreen_surface_check_linear(struct radeon_cs_parser *p,
184 					  struct eg_surface *surf,
185 					  const char *prefix)
186 {
187 	surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples;
188 	surf->base_align = surf->bpe;
189 	surf->palign = 1;
190 	surf->halign = 1;
191 	return 0;
192 }
193 
194 static int evergreen_surface_check_linear_aligned(struct radeon_cs_parser *p,
195 						  struct eg_surface *surf,
196 						  const char *prefix)
197 {
198 	struct evergreen_cs_track *track = p->track;
199 	unsigned palign;
200 
201 	palign = MAX(64, track->group_size / surf->bpe);
202 	surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples;
203 	surf->base_align = track->group_size;
204 	surf->palign = palign;
205 	surf->halign = 1;
206 	if (surf->nbx & (palign - 1)) {
207 		if (prefix) {
208 			dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
209 				 __func__, __LINE__, prefix, surf->nbx, palign);
210 		}
211 		return -EINVAL;
212 	}
213 	return 0;
214 }
215 
216 static int evergreen_surface_check_1d(struct radeon_cs_parser *p,
217 				      struct eg_surface *surf,
218 				      const char *prefix)
219 {
220 	struct evergreen_cs_track *track = p->track;
221 	unsigned palign;
222 
223 	palign = track->group_size / (8 * surf->bpe * surf->nsamples);
224 	palign = MAX(8, palign);
225 	surf->layer_size = surf->nbx * surf->nby * surf->bpe;
226 	surf->base_align = track->group_size;
227 	surf->palign = palign;
228 	surf->halign = 8;
229 	if ((surf->nbx & (palign - 1))) {
230 		if (prefix) {
231 			dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d (%d %d %d)\n",
232 				 __func__, __LINE__, prefix, surf->nbx, palign,
233 				 track->group_size, surf->bpe, surf->nsamples);
234 		}
235 		return -EINVAL;
236 	}
237 	if ((surf->nby & (8 - 1))) {
238 		if (prefix) {
239 			dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with 8\n",
240 				 __func__, __LINE__, prefix, surf->nby);
241 		}
242 		return -EINVAL;
243 	}
244 	return 0;
245 }
246 
247 static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
248 				      struct eg_surface *surf,
249 				      const char *prefix)
250 {
251 	struct evergreen_cs_track *track = p->track;
252 	unsigned palign, halign, tileb, slice_pt;
253 
254 	tileb = 64 * surf->bpe * surf->nsamples;
255 	palign = track->group_size / (8 * surf->bpe * surf->nsamples);
256 	palign = MAX(8, palign);
257 	slice_pt = 1;
258 	if (tileb > surf->tsplit) {
259 		slice_pt = tileb / surf->tsplit;
260 	}
261 	tileb = tileb / slice_pt;
262 	/* macro tile width & height */
263 	palign = (8 * surf->bankw * track->npipes) * surf->mtilea;
264 	halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea;
265 	surf->layer_size = surf->nbx * surf->nby * surf->bpe * slice_pt;
266 	surf->base_align = (palign / 8) * (halign / 8) * tileb;
267 	surf->palign = palign;
268 	surf->halign = halign;
269 
270 	if ((surf->nbx & (palign - 1))) {
271 		if (prefix) {
272 			dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
273 				 __func__, __LINE__, prefix, surf->nbx, palign);
274 		}
275 		return -EINVAL;
276 	}
277 	if ((surf->nby & (halign - 1))) {
278 		if (prefix) {
279 			dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with %d\n",
280 				 __func__, __LINE__, prefix, surf->nby, halign);
281 		}
282 		return -EINVAL;
283 	}
284 
285 	return 0;
286 }
287 
288 static int evergreen_surface_check(struct radeon_cs_parser *p,
289 				   struct eg_surface *surf,
290 				   const char *prefix)
291 {
292 	/* some common value computed here */
293 	surf->bpe = r600_fmt_get_blocksize(surf->format);
294 
295 	switch (surf->mode) {
296 	case ARRAY_LINEAR_GENERAL:
297 		return evergreen_surface_check_linear(p, surf, prefix);
298 	case ARRAY_LINEAR_ALIGNED:
299 		return evergreen_surface_check_linear_aligned(p, surf, prefix);
300 	case ARRAY_1D_TILED_THIN1:
301 		return evergreen_surface_check_1d(p, surf, prefix);
302 	case ARRAY_2D_TILED_THIN1:
303 		return evergreen_surface_check_2d(p, surf, prefix);
304 	default:
305 		dev_warn(p->dev, "%s:%d %s invalid array mode %d\n",
306 				__func__, __LINE__, prefix, surf->mode);
307 		return -EINVAL;
308 	}
309 	return -EINVAL;
310 }
311 
312 static int evergreen_surface_value_conv_check(struct radeon_cs_parser *p,
313 					      struct eg_surface *surf,
314 					      const char *prefix)
315 {
316 	switch (surf->mode) {
317 	case ARRAY_2D_TILED_THIN1:
318 		break;
319 	case ARRAY_LINEAR_GENERAL:
320 	case ARRAY_LINEAR_ALIGNED:
321 	case ARRAY_1D_TILED_THIN1:
322 		return 0;
323 	default:
324 		dev_warn(p->dev, "%s:%d %s invalid array mode %d\n",
325 				__func__, __LINE__, prefix, surf->mode);
326 		return -EINVAL;
327 	}
328 
329 	switch (surf->nbanks) {
330 	case 0: surf->nbanks = 2; break;
331 	case 1: surf->nbanks = 4; break;
332 	case 2: surf->nbanks = 8; break;
333 	case 3: surf->nbanks = 16; break;
334 	default:
335 		dev_warn(p->dev, "%s:%d %s invalid number of banks %d\n",
336 			 __func__, __LINE__, prefix, surf->nbanks);
337 		return -EINVAL;
338 	}
339 	switch (surf->bankw) {
340 	case 0: surf->bankw = 1; break;
341 	case 1: surf->bankw = 2; break;
342 	case 2: surf->bankw = 4; break;
343 	case 3: surf->bankw = 8; break;
344 	default:
345 		dev_warn(p->dev, "%s:%d %s invalid bankw %d\n",
346 			 __func__, __LINE__, prefix, surf->bankw);
347 		return -EINVAL;
348 	}
349 	switch (surf->bankh) {
350 	case 0: surf->bankh = 1; break;
351 	case 1: surf->bankh = 2; break;
352 	case 2: surf->bankh = 4; break;
353 	case 3: surf->bankh = 8; break;
354 	default:
355 		dev_warn(p->dev, "%s:%d %s invalid bankh %d\n",
356 			 __func__, __LINE__, prefix, surf->bankh);
357 		return -EINVAL;
358 	}
359 	switch (surf->mtilea) {
360 	case 0: surf->mtilea = 1; break;
361 	case 1: surf->mtilea = 2; break;
362 	case 2: surf->mtilea = 4; break;
363 	case 3: surf->mtilea = 8; break;
364 	default:
365 		dev_warn(p->dev, "%s:%d %s invalid macro tile aspect %d\n",
366 			 __func__, __LINE__, prefix, surf->mtilea);
367 		return -EINVAL;
368 	}
369 	switch (surf->tsplit) {
370 	case 0: surf->tsplit = 64; break;
371 	case 1: surf->tsplit = 128; break;
372 	case 2: surf->tsplit = 256; break;
373 	case 3: surf->tsplit = 512; break;
374 	case 4: surf->tsplit = 1024; break;
375 	case 5: surf->tsplit = 2048; break;
376 	case 6: surf->tsplit = 4096; break;
377 	default:
378 		dev_warn(p->dev, "%s:%d %s invalid tile split %d\n",
379 			 __func__, __LINE__, prefix, surf->tsplit);
380 		return -EINVAL;
381 	}
382 	return 0;
383 }
384 
385 static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned id)
386 {
387 	struct evergreen_cs_track *track = p->track;
388 	struct eg_surface surf;
389 	unsigned pitch, slice, mslice;
390 	unsigned long offset;
391 	int r;
392 
393 	mslice = G_028C6C_SLICE_MAX(track->cb_color_view[id]) + 1;
394 	pitch = track->cb_color_pitch[id];
395 	slice = track->cb_color_slice[id];
396 	surf.nbx = (pitch + 1) * 8;
397 	surf.nby = ((slice + 1) * 64) / surf.nbx;
398 	surf.mode = G_028C70_ARRAY_MODE(track->cb_color_info[id]);
399 	surf.format = G_028C70_FORMAT(track->cb_color_info[id]);
400 	surf.tsplit = G_028C74_TILE_SPLIT(track->cb_color_attrib[id]);
401 	surf.nbanks = G_028C74_NUM_BANKS(track->cb_color_attrib[id]);
402 	surf.bankw = G_028C74_BANK_WIDTH(track->cb_color_attrib[id]);
403 	surf.bankh = G_028C74_BANK_HEIGHT(track->cb_color_attrib[id]);
404 	surf.mtilea = G_028C74_MACRO_TILE_ASPECT(track->cb_color_attrib[id]);
405 	surf.nsamples = 1;
406 
407 	if (!r600_fmt_is_valid_color(surf.format)) {
408 		dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08x)\n",
409 			 __func__, __LINE__, surf.format,
410 			id, track->cb_color_info[id]);
411 		return -EINVAL;
412 	}
413 
414 	r = evergreen_surface_value_conv_check(p, &surf, "cb");
415 	if (r) {
416 		return r;
417 	}
418 
419 	r = evergreen_surface_check(p, &surf, "cb");
420 	if (r) {
421 		dev_warn(p->dev, "%s:%d cb[%d] invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
422 			 __func__, __LINE__, id, track->cb_color_pitch[id],
423 			 track->cb_color_slice[id], track->cb_color_attrib[id],
424 			 track->cb_color_info[id]);
425 		return r;
426 	}
427 
428 	offset = track->cb_color_bo_offset[id] << 8;
429 	if (offset & (surf.base_align - 1)) {
430 		dev_warn(p->dev, "%s:%d cb[%d] bo base %ld not aligned with %ld\n",
431 			 __func__, __LINE__, id, offset, surf.base_align);
432 		return -EINVAL;
433 	}
434 
435 	offset += surf.layer_size * mslice;
436 	if (offset > radeon_bo_size(track->cb_color_bo[id])) {
437 		dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, "
438 			 "offset %d, max layer %d, bo size %ld, slice %d)\n",
439 			 __func__, __LINE__, id, surf.layer_size,
440 			track->cb_color_bo_offset[id] << 8, mslice,
441 			radeon_bo_size(track->cb_color_bo[id]), slice);
442 		dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
443 			 __func__, __LINE__, surf.nbx, surf.nby,
444 			surf.mode, surf.bpe, surf.nsamples,
445 			surf.bankw, surf.bankh,
446 			surf.tsplit, surf.mtilea);
447 		return -EINVAL;
448 	}
449 
450 	return 0;
451 }
452 
453 static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p,
454 						unsigned nbx, unsigned nby)
455 {
456 	struct evergreen_cs_track *track = p->track;
457 	unsigned long size;
458 
459 	if (track->htile_bo == NULL) {
460 		dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
461 				__func__, __LINE__, track->db_z_info);
462 		return -EINVAL;
463 	}
464 
465 	if (G_028ABC_LINEAR(track->htile_surface)) {
466 		/* pitch must be 16 htiles aligned == 16 * 8 pixel aligned */
467 		nbx = round_up(nbx, 16 * 8);
468 		/* height is npipes htiles aligned == npipes * 8 pixel aligned */
469 		nby = round_up(nby, track->npipes * 8);
470 	} else {
471 		switch (track->npipes) {
472 		case 8:
473 			nbx = round_up(nbx, 64 * 8);
474 			nby = round_up(nby, 64 * 8);
475 			break;
476 		case 4:
477 			nbx = round_up(nbx, 64 * 8);
478 			nby = round_up(nby, 32 * 8);
479 			break;
480 		case 2:
481 			nbx = round_up(nbx, 32 * 8);
482 			nby = round_up(nby, 32 * 8);
483 			break;
484 		case 1:
485 			nbx = round_up(nbx, 32 * 8);
486 			nby = round_up(nby, 16 * 8);
487 			break;
488 		default:
489 			dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
490 					__func__, __LINE__, track->npipes);
491 			return -EINVAL;
492 		}
493 	}
494 	/* compute number of htile */
495 	nbx = nbx / 8;
496 	nby = nby / 8;
497 	size = nbx * nby * 4;
498 	size += track->htile_offset;
499 
500 	if (size > radeon_bo_size(track->htile_bo)) {
501 		dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
502 				__func__, __LINE__, radeon_bo_size(track->htile_bo),
503 				size, nbx, nby);
504 		return -EINVAL;
505 	}
506 	return 0;
507 }
508 
509 static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p)
510 {
511 	struct evergreen_cs_track *track = p->track;
512 	struct eg_surface surf;
513 	unsigned pitch, slice, mslice;
514 	unsigned long offset;
515 	int r;
516 
517 	mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
518 	pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size);
519 	slice = track->db_depth_slice;
520 	surf.nbx = (pitch + 1) * 8;
521 	surf.nby = ((slice + 1) * 64) / surf.nbx;
522 	surf.mode = G_028040_ARRAY_MODE(track->db_z_info);
523 	surf.format = G_028044_FORMAT(track->db_s_info);
524 	surf.tsplit = G_028044_TILE_SPLIT(track->db_s_info);
525 	surf.nbanks = G_028040_NUM_BANKS(track->db_z_info);
526 	surf.bankw = G_028040_BANK_WIDTH(track->db_z_info);
527 	surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info);
528 	surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info);
529 	surf.nsamples = 1;
530 
531 	if (surf.format != 1) {
532 		dev_warn(p->dev, "%s:%d stencil invalid format %d\n",
533 			 __func__, __LINE__, surf.format);
534 		return -EINVAL;
535 	}
536 	/* replace by color format so we can use same code */
537 	surf.format = V_028C70_COLOR_8;
538 
539 	r = evergreen_surface_value_conv_check(p, &surf, "stencil");
540 	if (r) {
541 		return r;
542 	}
543 
544 	r = evergreen_surface_check(p, &surf, NULL);
545 	if (r) {
546 		/* old userspace doesn't compute proper depth/stencil alignment
547 		 * check that alignment against a bigger byte per elements and
548 		 * only report if that alignment is wrong too.
549 		 */
550 		surf.format = V_028C70_COLOR_8_8_8_8;
551 		r = evergreen_surface_check(p, &surf, "stencil");
552 		if (r) {
553 			dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
554 				 __func__, __LINE__, track->db_depth_size,
555 				 track->db_depth_slice, track->db_s_info, track->db_z_info);
556 		}
557 		return r;
558 	}
559 
560 	offset = track->db_s_read_offset << 8;
561 	if (offset & (surf.base_align - 1)) {
562 		dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
563 			 __func__, __LINE__, offset, surf.base_align);
564 		return -EINVAL;
565 	}
566 	offset += surf.layer_size * mslice;
567 	if (offset > radeon_bo_size(track->db_s_read_bo)) {
568 		dev_warn(p->dev, "%s:%d stencil read bo too small (layer size %d, "
569 			 "offset %ld, max layer %d, bo size %ld)\n",
570 			 __func__, __LINE__, surf.layer_size,
571 			(unsigned long)track->db_s_read_offset << 8, mslice,
572 			radeon_bo_size(track->db_s_read_bo));
573 		dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
574 			 __func__, __LINE__, track->db_depth_size,
575 			 track->db_depth_slice, track->db_s_info, track->db_z_info);
576 		return -EINVAL;
577 	}
578 
579 	offset = track->db_s_write_offset << 8;
580 	if (offset & (surf.base_align - 1)) {
581 		dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
582 			 __func__, __LINE__, offset, surf.base_align);
583 		return -EINVAL;
584 	}
585 	offset += surf.layer_size * mslice;
586 	if (offset > radeon_bo_size(track->db_s_write_bo)) {
587 		dev_warn(p->dev, "%s:%d stencil write bo too small (layer size %d, "
588 			 "offset %ld, max layer %d, bo size %ld)\n",
589 			 __func__, __LINE__, surf.layer_size,
590 			(unsigned long)track->db_s_write_offset << 8, mslice,
591 			radeon_bo_size(track->db_s_write_bo));
592 		return -EINVAL;
593 	}
594 
595 	/* hyperz */
596 	if (G_028040_TILE_SURFACE_ENABLE(track->db_z_info)) {
597 		r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby);
598 		if (r) {
599 			return r;
600 		}
601 	}
602 
603 	return 0;
604 }
605 
606 static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p)
607 {
608 	struct evergreen_cs_track *track = p->track;
609 	struct eg_surface surf;
610 	unsigned pitch, slice, mslice;
611 	unsigned long offset;
612 	int r;
613 
614 	mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
615 	pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size);
616 	slice = track->db_depth_slice;
617 	surf.nbx = (pitch + 1) * 8;
618 	surf.nby = ((slice + 1) * 64) / surf.nbx;
619 	surf.mode = G_028040_ARRAY_MODE(track->db_z_info);
620 	surf.format = G_028040_FORMAT(track->db_z_info);
621 	surf.tsplit = G_028040_TILE_SPLIT(track->db_z_info);
622 	surf.nbanks = G_028040_NUM_BANKS(track->db_z_info);
623 	surf.bankw = G_028040_BANK_WIDTH(track->db_z_info);
624 	surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info);
625 	surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info);
626 	surf.nsamples = 1;
627 
628 	switch (surf.format) {
629 	case V_028040_Z_16:
630 		surf.format = V_028C70_COLOR_16;
631 		break;
632 	case V_028040_Z_24:
633 	case V_028040_Z_32_FLOAT:
634 		surf.format = V_028C70_COLOR_8_8_8_8;
635 		break;
636 	default:
637 		dev_warn(p->dev, "%s:%d depth invalid format %d\n",
638 			 __func__, __LINE__, surf.format);
639 		return -EINVAL;
640 	}
641 
642 	r = evergreen_surface_value_conv_check(p, &surf, "depth");
643 	if (r) {
644 		dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
645 			 __func__, __LINE__, track->db_depth_size,
646 			 track->db_depth_slice, track->db_z_info);
647 		return r;
648 	}
649 
650 	r = evergreen_surface_check(p, &surf, "depth");
651 	if (r) {
652 		dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
653 			 __func__, __LINE__, track->db_depth_size,
654 			 track->db_depth_slice, track->db_z_info);
655 		return r;
656 	}
657 
658 	offset = track->db_z_read_offset << 8;
659 	if (offset & (surf.base_align - 1)) {
660 		dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
661 			 __func__, __LINE__, offset, surf.base_align);
662 		return -EINVAL;
663 	}
664 	offset += surf.layer_size * mslice;
665 	if (offset > radeon_bo_size(track->db_z_read_bo)) {
666 		dev_warn(p->dev, "%s:%d depth read bo too small (layer size %d, "
667 			 "offset %ld, max layer %d, bo size %ld)\n",
668 			 __func__, __LINE__, surf.layer_size,
669 			(unsigned long)track->db_z_read_offset << 8, mslice,
670 			radeon_bo_size(track->db_z_read_bo));
671 		return -EINVAL;
672 	}
673 
674 	offset = track->db_z_write_offset << 8;
675 	if (offset & (surf.base_align - 1)) {
676 		dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
677 			 __func__, __LINE__, offset, surf.base_align);
678 		return -EINVAL;
679 	}
680 	offset += surf.layer_size * mslice;
681 	if (offset > radeon_bo_size(track->db_z_write_bo)) {
682 		dev_warn(p->dev, "%s:%d depth write bo too small (layer size %d, "
683 			 "offset %ld, max layer %d, bo size %ld)\n",
684 			 __func__, __LINE__, surf.layer_size,
685 			(unsigned long)track->db_z_write_offset << 8, mslice,
686 			radeon_bo_size(track->db_z_write_bo));
687 		return -EINVAL;
688 	}
689 
690 	/* hyperz */
691 	if (G_028040_TILE_SURFACE_ENABLE(track->db_z_info)) {
692 		r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby);
693 		if (r) {
694 			return r;
695 		}
696 	}
697 
698 	return 0;
699 }
700 
701 static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
702 					       struct radeon_bo *texture,
703 					       struct radeon_bo *mipmap,
704 					       unsigned idx)
705 {
706 	struct eg_surface surf;
707 	unsigned long toffset, moffset;
708 	unsigned dim, llevel, mslice, width, height, depth, i;
709 	u32 texdw[8];
710 	int r;
711 
712 	texdw[0] = radeon_get_ib_value(p, idx + 0);
713 	texdw[1] = radeon_get_ib_value(p, idx + 1);
714 	texdw[2] = radeon_get_ib_value(p, idx + 2);
715 	texdw[3] = radeon_get_ib_value(p, idx + 3);
716 	texdw[4] = radeon_get_ib_value(p, idx + 4);
717 	texdw[5] = radeon_get_ib_value(p, idx + 5);
718 	texdw[6] = radeon_get_ib_value(p, idx + 6);
719 	texdw[7] = radeon_get_ib_value(p, idx + 7);
720 	dim = G_030000_DIM(texdw[0]);
721 	llevel = G_030014_LAST_LEVEL(texdw[5]);
722 	mslice = G_030014_LAST_ARRAY(texdw[5]) + 1;
723 	width = G_030000_TEX_WIDTH(texdw[0]) + 1;
724 	height =  G_030004_TEX_HEIGHT(texdw[1]) + 1;
725 	depth = G_030004_TEX_DEPTH(texdw[1]) + 1;
726 	surf.format = G_03001C_DATA_FORMAT(texdw[7]);
727 	surf.nbx = (G_030000_PITCH(texdw[0]) + 1) * 8;
728 	surf.nbx = r600_fmt_get_nblocksx(surf.format, surf.nbx);
729 	surf.nby = r600_fmt_get_nblocksy(surf.format, height);
730 	surf.mode = G_030004_ARRAY_MODE(texdw[1]);
731 	surf.tsplit = G_030018_TILE_SPLIT(texdw[6]);
732 	surf.nbanks = G_03001C_NUM_BANKS(texdw[7]);
733 	surf.bankw = G_03001C_BANK_WIDTH(texdw[7]);
734 	surf.bankh = G_03001C_BANK_HEIGHT(texdw[7]);
735 	surf.mtilea = G_03001C_MACRO_TILE_ASPECT(texdw[7]);
736 	surf.nsamples = 1;
737 	toffset = texdw[2] << 8;
738 	moffset = texdw[3] << 8;
739 
740 	if (!r600_fmt_is_valid_texture(surf.format, p->family)) {
741 		dev_warn(p->dev, "%s:%d texture invalid format %d\n",
742 			 __func__, __LINE__, surf.format);
743 		return -EINVAL;
744 	}
745 	switch (dim) {
746 	case V_030000_SQ_TEX_DIM_1D:
747 	case V_030000_SQ_TEX_DIM_2D:
748 	case V_030000_SQ_TEX_DIM_CUBEMAP:
749 	case V_030000_SQ_TEX_DIM_1D_ARRAY:
750 	case V_030000_SQ_TEX_DIM_2D_ARRAY:
751 		depth = 1;
752 	case V_030000_SQ_TEX_DIM_3D:
753 		break;
754 	default:
755 		dev_warn(p->dev, "%s:%d texture invalid dimension %d\n",
756 			 __func__, __LINE__, dim);
757 		return -EINVAL;
758 	}
759 
760 	r = evergreen_surface_value_conv_check(p, &surf, "texture");
761 	if (r) {
762 		return r;
763 	}
764 
765 	/* align height */
766 	evergreen_surface_check(p, &surf, NULL);
767 	surf.nby = ALIGN(surf.nby, surf.halign);
768 
769 	r = evergreen_surface_check(p, &surf, "texture");
770 	if (r) {
771 		dev_warn(p->dev, "%s:%d texture invalid 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
772 			 __func__, __LINE__, texdw[0], texdw[1], texdw[4],
773 			 texdw[5], texdw[6], texdw[7]);
774 		return r;
775 	}
776 
777 	/* check texture size */
778 	if (toffset & (surf.base_align - 1)) {
779 		dev_warn(p->dev, "%s:%d texture bo base %ld not aligned with %ld\n",
780 			 __func__, __LINE__, toffset, surf.base_align);
781 		return -EINVAL;
782 	}
783 	if (moffset & (surf.base_align - 1)) {
784 		dev_warn(p->dev, "%s:%d mipmap bo base %ld not aligned with %ld\n",
785 			 __func__, __LINE__, moffset, surf.base_align);
786 		return -EINVAL;
787 	}
788 	if (dim == SQ_TEX_DIM_3D) {
789 		toffset += surf.layer_size * depth;
790 	} else {
791 		toffset += surf.layer_size * mslice;
792 	}
793 	if (toffset > radeon_bo_size(texture)) {
794 		dev_warn(p->dev, "%s:%d texture bo too small (layer size %d, "
795 			 "offset %ld, max layer %d, depth %d, bo size %ld) (%d %d)\n",
796 			 __func__, __LINE__, surf.layer_size,
797 			(unsigned long)texdw[2] << 8, mslice,
798 			depth, radeon_bo_size(texture),
799 			surf.nbx, surf.nby);
800 		return -EINVAL;
801 	}
802 
803 	/* check mipmap size */
804 	for (i = 1; i <= llevel; i++) {
805 		unsigned w, h, d;
806 
807 		w = r600_mip_minify(width, i);
808 		h = r600_mip_minify(height, i);
809 		d = r600_mip_minify(depth, i);
810 		surf.nbx = r600_fmt_get_nblocksx(surf.format, w);
811 		surf.nby = r600_fmt_get_nblocksy(surf.format, h);
812 
813 		switch (surf.mode) {
814 		case ARRAY_2D_TILED_THIN1:
815 			if (surf.nbx < surf.palign || surf.nby < surf.halign) {
816 				surf.mode = ARRAY_1D_TILED_THIN1;
817 			}
818 			/* recompute alignment */
819 			evergreen_surface_check(p, &surf, NULL);
820 			break;
821 		case ARRAY_LINEAR_GENERAL:
822 		case ARRAY_LINEAR_ALIGNED:
823 		case ARRAY_1D_TILED_THIN1:
824 			break;
825 		default:
826 			dev_warn(p->dev, "%s:%d invalid array mode %d\n",
827 				 __func__, __LINE__, surf.mode);
828 			return -EINVAL;
829 		}
830 		surf.nbx = ALIGN(surf.nbx, surf.palign);
831 		surf.nby = ALIGN(surf.nby, surf.halign);
832 
833 		r = evergreen_surface_check(p, &surf, "mipmap");
834 		if (r) {
835 			return r;
836 		}
837 
838 		if (dim == SQ_TEX_DIM_3D) {
839 			moffset += surf.layer_size * d;
840 		} else {
841 			moffset += surf.layer_size * mslice;
842 		}
843 		if (moffset > radeon_bo_size(mipmap)) {
844 			dev_warn(p->dev, "%s:%d mipmap [%d] bo too small (layer size %d, "
845 					"offset %ld, coffset %ld, max layer %d, depth %d, "
846 					"bo size %ld) level0 (%d %d %d)\n",
847 					__func__, __LINE__, i, surf.layer_size,
848 					(unsigned long)texdw[3] << 8, moffset, mslice,
849 					d, radeon_bo_size(mipmap),
850 					width, height, depth);
851 			dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
852 				 __func__, __LINE__, surf.nbx, surf.nby,
853 				surf.mode, surf.bpe, surf.nsamples,
854 				surf.bankw, surf.bankh,
855 				surf.tsplit, surf.mtilea);
856 			return -EINVAL;
857 		}
858 	}
859 
860 	return 0;
861 }
862 
863 static int evergreen_cs_track_check(struct radeon_cs_parser *p)
864 {
865 	struct evergreen_cs_track *track = p->track;
866 	unsigned tmp, i;
867 	int r;
868 	unsigned buffer_mask = 0;
869 
870 	/* check streamout */
871 	if (track->streamout_dirty && track->vgt_strmout_config) {
872 		for (i = 0; i < 4; i++) {
873 			if (track->vgt_strmout_config & (1 << i)) {
874 				buffer_mask |= (track->vgt_strmout_buffer_config >> (i * 4)) & 0xf;
875 			}
876 		}
877 
878 		for (i = 0; i < 4; i++) {
879 			if (buffer_mask & (1 << i)) {
880 				if (track->vgt_strmout_bo[i]) {
881 					u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
882 							(u64)track->vgt_strmout_size[i];
883 					if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
884 						DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
885 							  i, offset,
886 							  radeon_bo_size(track->vgt_strmout_bo[i]));
887 						return -EINVAL;
888 					}
889 				} else {
890 					dev_warn(p->dev, "No buffer for streamout %d\n", i);
891 					return -EINVAL;
892 				}
893 			}
894 		}
895 		track->streamout_dirty = false;
896 	}
897 
898 	if (track->sx_misc_kill_all_prims)
899 		return 0;
900 
901 	/* check that we have a cb for each enabled target
902 	 */
903 	if (track->cb_dirty) {
904 		tmp = track->cb_target_mask;
905 		for (i = 0; i < 8; i++) {
906 			if ((tmp >> (i * 4)) & 0xF) {
907 				/* at least one component is enabled */
908 				if (track->cb_color_bo[i] == NULL) {
909 					dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
910 						__func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
911 					return -EINVAL;
912 				}
913 				/* check cb */
914 				r = evergreen_cs_track_validate_cb(p, i);
915 				if (r) {
916 					return r;
917 				}
918 			}
919 		}
920 		track->cb_dirty = false;
921 	}
922 
923 	if (track->db_dirty) {
924 		/* Check stencil buffer */
925 		if (G_028800_STENCIL_ENABLE(track->db_depth_control)) {
926 			r = evergreen_cs_track_validate_stencil(p);
927 			if (r)
928 				return r;
929 		}
930 		/* Check depth buffer */
931 		if (G_028800_Z_ENABLE(track->db_depth_control)) {
932 			r = evergreen_cs_track_validate_depth(p);
933 			if (r)
934 				return r;
935 		}
936 		track->db_dirty = false;
937 	}
938 
939 	return 0;
940 }
941 
942 /**
943  * evergreen_cs_packet_parse() - parse cp packet and point ib index to next packet
944  * @parser:	parser structure holding parsing context.
945  * @pkt:	where to store packet informations
946  *
947  * Assume that chunk_ib_index is properly set. Will return -EINVAL
948  * if packet is bigger than remaining ib size. or if packets is unknown.
949  **/
950 int evergreen_cs_packet_parse(struct radeon_cs_parser *p,
951 			      struct radeon_cs_packet *pkt,
952 			      unsigned idx)
953 {
954 	struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
955 	uint32_t header;
956 
957 	if (idx >= ib_chunk->length_dw) {
958 		DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
959 			  idx, ib_chunk->length_dw);
960 		return -EINVAL;
961 	}
962 	header = radeon_get_ib_value(p, idx);
963 	pkt->idx = idx;
964 	pkt->type = CP_PACKET_GET_TYPE(header);
965 	pkt->count = CP_PACKET_GET_COUNT(header);
966 	pkt->one_reg_wr = 0;
967 	switch (pkt->type) {
968 	case PACKET_TYPE0:
969 		pkt->reg = CP_PACKET0_GET_REG(header);
970 		break;
971 	case PACKET_TYPE3:
972 		pkt->opcode = CP_PACKET3_GET_OPCODE(header);
973 		break;
974 	case PACKET_TYPE2:
975 		pkt->count = -1;
976 		break;
977 	default:
978 		DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
979 		return -EINVAL;
980 	}
981 	if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
982 		DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
983 			  pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
984 		return -EINVAL;
985 	}
986 	return 0;
987 }
988 
989 /**
990  * evergreen_cs_packet_next_reloc() - parse next packet which should be reloc packet3
991  * @parser:		parser structure holding parsing context.
992  * @data:		pointer to relocation data
993  * @offset_start:	starting offset
994  * @offset_mask:	offset mask (to align start offset on)
995  * @reloc:		reloc informations
996  *
997  * Check next packet is relocation packet3, do bo validation and compute
998  * GPU offset using the provided start.
999  **/
1000 static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
1001 					  struct radeon_cs_reloc **cs_reloc)
1002 {
1003 	struct radeon_cs_chunk *relocs_chunk;
1004 	struct radeon_cs_packet p3reloc;
1005 	unsigned idx;
1006 	int r;
1007 
1008 	if (p->chunk_relocs_idx == -1) {
1009 		DRM_ERROR("No relocation chunk !\n");
1010 		return -EINVAL;
1011 	}
1012 	*cs_reloc = NULL;
1013 	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
1014 	r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
1015 	if (r) {
1016 		return r;
1017 	}
1018 	p->idx += p3reloc.count + 2;
1019 	if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
1020 		DRM_ERROR("No packet3 for relocation for packet at %d.\n",
1021 			  p3reloc.idx);
1022 		return -EINVAL;
1023 	}
1024 	idx = radeon_get_ib_value(p, p3reloc.idx + 1);
1025 	if (idx >= relocs_chunk->length_dw) {
1026 		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
1027 			  idx, relocs_chunk->length_dw);
1028 		return -EINVAL;
1029 	}
1030 	/* FIXME: we assume reloc size is 4 dwords */
1031 	*cs_reloc = p->relocs_ptr[(idx / 4)];
1032 	return 0;
1033 }
1034 
1035 /**
1036  * evergreen_cs_packet_next_vline() - parse userspace VLINE packet
1037  * @parser:		parser structure holding parsing context.
1038  *
1039  * Userspace sends a special sequence for VLINE waits.
1040  * PACKET0 - VLINE_START_END + value
1041  * PACKET3 - WAIT_REG_MEM poll vline status reg
1042  * RELOC (P3) - crtc_id in reloc.
1043  *
1044  * This function parses this and relocates the VLINE START END
1045  * and WAIT_REG_MEM packets to the correct crtc.
1046  * It also detects a switched off crtc and nulls out the
1047  * wait in that case.
1048  */
1049 static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
1050 {
1051 	struct drm_mode_object *obj;
1052 	struct drm_crtc *crtc;
1053 	struct radeon_crtc *radeon_crtc;
1054 	struct radeon_cs_packet p3reloc, wait_reg_mem;
1055 	int crtc_id;
1056 	int r;
1057 	uint32_t header, h_idx, reg, wait_reg_mem_info;
1058 	volatile uint32_t *ib;
1059 
1060 	ib = p->ib->ptr;
1061 
1062 	/* parse the WAIT_REG_MEM */
1063 	r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx);
1064 	if (r)
1065 		return r;
1066 
1067 	/* check its a WAIT_REG_MEM */
1068 	if (wait_reg_mem.type != PACKET_TYPE3 ||
1069 	    wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
1070 		DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
1071 		return -EINVAL;
1072 	}
1073 
1074 	wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
1075 	/* bit 4 is reg (0) or mem (1) */
1076 	if (wait_reg_mem_info & 0x10) {
1077 		DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
1078 		return -EINVAL;
1079 	}
1080 	/* waiting for value to be equal */
1081 	if ((wait_reg_mem_info & 0x7) != 0x3) {
1082 		DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
1083 		return -EINVAL;
1084 	}
1085 	if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != EVERGREEN_VLINE_STATUS) {
1086 		DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
1087 		return -EINVAL;
1088 	}
1089 
1090 	if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != EVERGREEN_VLINE_STAT) {
1091 		DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
1092 		return -EINVAL;
1093 	}
1094 
1095 	/* jump over the NOP */
1096 	r = evergreen_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
1097 	if (r)
1098 		return r;
1099 
1100 	h_idx = p->idx - 2;
1101 	p->idx += wait_reg_mem.count + 2;
1102 	p->idx += p3reloc.count + 2;
1103 
1104 	header = radeon_get_ib_value(p, h_idx);
1105 	crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
1106 	reg = CP_PACKET0_GET_REG(header);
1107 	obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
1108 	if (!obj) {
1109 		DRM_ERROR("cannot find crtc %d\n", crtc_id);
1110 		return -EINVAL;
1111 	}
1112 	crtc = obj_to_crtc(obj);
1113 	radeon_crtc = to_radeon_crtc(crtc);
1114 	crtc_id = radeon_crtc->crtc_id;
1115 
1116 	if (!crtc->enabled) {
1117 		/* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
1118 		ib[h_idx + 2] = PACKET2(0);
1119 		ib[h_idx + 3] = PACKET2(0);
1120 		ib[h_idx + 4] = PACKET2(0);
1121 		ib[h_idx + 5] = PACKET2(0);
1122 		ib[h_idx + 6] = PACKET2(0);
1123 		ib[h_idx + 7] = PACKET2(0);
1124 		ib[h_idx + 8] = PACKET2(0);
1125 	} else {
1126 		switch (reg) {
1127 		case EVERGREEN_VLINE_START_END:
1128 			header &= ~R600_CP_PACKET0_REG_MASK;
1129 			header |= (EVERGREEN_VLINE_START_END + radeon_crtc->crtc_offset) >> 2;
1130 			ib[h_idx] = header;
1131 			ib[h_idx + 4] = (EVERGREEN_VLINE_STATUS + radeon_crtc->crtc_offset) >> 2;
1132 			break;
1133 		default:
1134 			DRM_ERROR("unknown crtc reloc\n");
1135 			return -EINVAL;
1136 		}
1137 	}
1138 	return 0;
1139 }
1140 
1141 static int evergreen_packet0_check(struct radeon_cs_parser *p,
1142 				   struct radeon_cs_packet *pkt,
1143 				   unsigned idx, unsigned reg)
1144 {
1145 	int r;
1146 
1147 	switch (reg) {
1148 	case EVERGREEN_VLINE_START_END:
1149 		r = evergreen_cs_packet_parse_vline(p);
1150 		if (r) {
1151 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1152 					idx, reg);
1153 			return r;
1154 		}
1155 		break;
1156 	default:
1157 		printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1158 		       reg, idx);
1159 		return -EINVAL;
1160 	}
1161 	return 0;
1162 }
1163 
1164 static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
1165 				      struct radeon_cs_packet *pkt)
1166 {
1167 	unsigned reg, i;
1168 	unsigned idx;
1169 	int r;
1170 
1171 	idx = pkt->idx + 1;
1172 	reg = pkt->reg;
1173 	for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
1174 		r = evergreen_packet0_check(p, pkt, idx, reg);
1175 		if (r) {
1176 			return r;
1177 		}
1178 	}
1179 	return 0;
1180 }
1181 
1182 /**
1183  * evergreen_cs_check_reg() - check if register is authorized or not
1184  * @parser: parser structure holding parsing context
1185  * @reg: register we are testing
1186  * @idx: index into the cs buffer
1187  *
1188  * This function will test against evergreen_reg_safe_bm and return 0
1189  * if register is safe. If register is not flag as safe this function
1190  * will test it against a list of register needind special handling.
1191  */
1192 static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1193 {
1194 	struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
1195 	struct radeon_cs_reloc *reloc;
1196 	u32 last_reg;
1197 	u32 m, i, tmp, *ib;
1198 	int r;
1199 
1200 	if (p->rdev->family >= CHIP_CAYMAN)
1201 		last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
1202 	else
1203 		last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
1204 
1205 	i = (reg >> 7);
1206 	if (i >= last_reg) {
1207 		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1208 		return -EINVAL;
1209 	}
1210 	m = 1 << ((reg >> 2) & 31);
1211 	if (p->rdev->family >= CHIP_CAYMAN) {
1212 		if (!(cayman_reg_safe_bm[i] & m))
1213 			return 0;
1214 	} else {
1215 		if (!(evergreen_reg_safe_bm[i] & m))
1216 			return 0;
1217 	}
1218 	ib = p->ib->ptr;
1219 	switch (reg) {
1220 	/* force following reg to 0 in an attempt to disable out buffer
1221 	 * which will need us to better understand how it works to perform
1222 	 * security check on it (Jerome)
1223 	 */
1224 	case SQ_ESGS_RING_SIZE:
1225 	case SQ_GSVS_RING_SIZE:
1226 	case SQ_ESTMP_RING_SIZE:
1227 	case SQ_GSTMP_RING_SIZE:
1228 	case SQ_HSTMP_RING_SIZE:
1229 	case SQ_LSTMP_RING_SIZE:
1230 	case SQ_PSTMP_RING_SIZE:
1231 	case SQ_VSTMP_RING_SIZE:
1232 	case SQ_ESGS_RING_ITEMSIZE:
1233 	case SQ_ESTMP_RING_ITEMSIZE:
1234 	case SQ_GSTMP_RING_ITEMSIZE:
1235 	case SQ_GSVS_RING_ITEMSIZE:
1236 	case SQ_GS_VERT_ITEMSIZE:
1237 	case SQ_GS_VERT_ITEMSIZE_1:
1238 	case SQ_GS_VERT_ITEMSIZE_2:
1239 	case SQ_GS_VERT_ITEMSIZE_3:
1240 	case SQ_GSVS_RING_OFFSET_1:
1241 	case SQ_GSVS_RING_OFFSET_2:
1242 	case SQ_GSVS_RING_OFFSET_3:
1243 	case SQ_HSTMP_RING_ITEMSIZE:
1244 	case SQ_LSTMP_RING_ITEMSIZE:
1245 	case SQ_PSTMP_RING_ITEMSIZE:
1246 	case SQ_VSTMP_RING_ITEMSIZE:
1247 	case VGT_TF_RING_SIZE:
1248 		/* get value to populate the IB don't remove */
1249 		/*tmp =radeon_get_ib_value(p, idx);
1250 		  ib[idx] = 0;*/
1251 		break;
1252 	case SQ_ESGS_RING_BASE:
1253 	case SQ_GSVS_RING_BASE:
1254 	case SQ_ESTMP_RING_BASE:
1255 	case SQ_GSTMP_RING_BASE:
1256 	case SQ_HSTMP_RING_BASE:
1257 	case SQ_LSTMP_RING_BASE:
1258 	case SQ_PSTMP_RING_BASE:
1259 	case SQ_VSTMP_RING_BASE:
1260 		r = evergreen_cs_packet_next_reloc(p, &reloc);
1261 		if (r) {
1262 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1263 					"0x%04X\n", reg);
1264 			return -EINVAL;
1265 		}
1266 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1267 		break;
1268 	case DB_DEPTH_CONTROL:
1269 		track->db_depth_control = radeon_get_ib_value(p, idx);
1270 		track->db_dirty = true;
1271 		break;
1272 	case CAYMAN_DB_EQAA:
1273 		if (p->rdev->family < CHIP_CAYMAN) {
1274 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1275 				 "0x%04X\n", reg);
1276 			return -EINVAL;
1277 		}
1278 		break;
1279 	case CAYMAN_DB_DEPTH_INFO:
1280 		if (p->rdev->family < CHIP_CAYMAN) {
1281 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1282 				 "0x%04X\n", reg);
1283 			return -EINVAL;
1284 		}
1285 		break;
1286 	case DB_Z_INFO:
1287 		track->db_z_info = radeon_get_ib_value(p, idx);
1288 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1289 			r = evergreen_cs_packet_next_reloc(p, &reloc);
1290 			if (r) {
1291 				dev_warn(p->dev, "bad SET_CONTEXT_REG "
1292 						"0x%04X\n", reg);
1293 				return -EINVAL;
1294 			}
1295 			ib[idx] &= ~Z_ARRAY_MODE(0xf);
1296 			track->db_z_info &= ~Z_ARRAY_MODE(0xf);
1297 			ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1298 			track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1299 			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1300 				unsigned bankw, bankh, mtaspect, tile_split;
1301 
1302 				evergreen_tiling_fields(reloc->lobj.tiling_flags,
1303 							&bankw, &bankh, &mtaspect,
1304 							&tile_split);
1305 				ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
1306 				ib[idx] |= DB_TILE_SPLIT(tile_split) |
1307 						DB_BANK_WIDTH(bankw) |
1308 						DB_BANK_HEIGHT(bankh) |
1309 						DB_MACRO_TILE_ASPECT(mtaspect);
1310 			}
1311 		}
1312 		track->db_dirty = true;
1313 		break;
1314 	case DB_STENCIL_INFO:
1315 		track->db_s_info = radeon_get_ib_value(p, idx);
1316 		track->db_dirty = true;
1317 		break;
1318 	case DB_DEPTH_VIEW:
1319 		track->db_depth_view = radeon_get_ib_value(p, idx);
1320 		track->db_dirty = true;
1321 		break;
1322 	case DB_DEPTH_SIZE:
1323 		track->db_depth_size = radeon_get_ib_value(p, idx);
1324 		track->db_dirty = true;
1325 		break;
1326 	case R_02805C_DB_DEPTH_SLICE:
1327 		track->db_depth_slice = radeon_get_ib_value(p, idx);
1328 		track->db_dirty = true;
1329 		break;
1330 	case DB_Z_READ_BASE:
1331 		r = evergreen_cs_packet_next_reloc(p, &reloc);
1332 		if (r) {
1333 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1334 					"0x%04X\n", reg);
1335 			return -EINVAL;
1336 		}
1337 		track->db_z_read_offset = radeon_get_ib_value(p, idx);
1338 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1339 		track->db_z_read_bo = reloc->robj;
1340 		track->db_dirty = true;
1341 		break;
1342 	case DB_Z_WRITE_BASE:
1343 		r = evergreen_cs_packet_next_reloc(p, &reloc);
1344 		if (r) {
1345 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1346 					"0x%04X\n", reg);
1347 			return -EINVAL;
1348 		}
1349 		track->db_z_write_offset = radeon_get_ib_value(p, idx);
1350 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1351 		track->db_z_write_bo = reloc->robj;
1352 		track->db_dirty = true;
1353 		break;
1354 	case DB_STENCIL_READ_BASE:
1355 		r = evergreen_cs_packet_next_reloc(p, &reloc);
1356 		if (r) {
1357 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1358 					"0x%04X\n", reg);
1359 			return -EINVAL;
1360 		}
1361 		track->db_s_read_offset = radeon_get_ib_value(p, idx);
1362 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1363 		track->db_s_read_bo = reloc->robj;
1364 		track->db_dirty = true;
1365 		break;
1366 	case DB_STENCIL_WRITE_BASE:
1367 		r = evergreen_cs_packet_next_reloc(p, &reloc);
1368 		if (r) {
1369 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1370 					"0x%04X\n", reg);
1371 			return -EINVAL;
1372 		}
1373 		track->db_s_write_offset = radeon_get_ib_value(p, idx);
1374 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1375 		track->db_s_write_bo = reloc->robj;
1376 		track->db_dirty = true;
1377 		break;
1378 	case VGT_STRMOUT_CONFIG:
1379 		track->vgt_strmout_config = radeon_get_ib_value(p, idx);
1380 		track->streamout_dirty = true;
1381 		break;
1382 	case VGT_STRMOUT_BUFFER_CONFIG:
1383 		track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx);
1384 		track->streamout_dirty = true;
1385 		break;
1386 	case VGT_STRMOUT_BUFFER_BASE_0:
1387 	case VGT_STRMOUT_BUFFER_BASE_1:
1388 	case VGT_STRMOUT_BUFFER_BASE_2:
1389 	case VGT_STRMOUT_BUFFER_BASE_3:
1390 		r = evergreen_cs_packet_next_reloc(p, &reloc);
1391 		if (r) {
1392 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1393 					"0x%04X\n", reg);
1394 			return -EINVAL;
1395 		}
1396 		tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
1397 		track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
1398 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1399 		track->vgt_strmout_bo[tmp] = reloc->robj;
1400 		track->streamout_dirty = true;
1401 		break;
1402 	case VGT_STRMOUT_BUFFER_SIZE_0:
1403 	case VGT_STRMOUT_BUFFER_SIZE_1:
1404 	case VGT_STRMOUT_BUFFER_SIZE_2:
1405 	case VGT_STRMOUT_BUFFER_SIZE_3:
1406 		tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
1407 		/* size in register is DWs, convert to bytes */
1408 		track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
1409 		track->streamout_dirty = true;
1410 		break;
1411 	case CP_COHER_BASE:
1412 		r = evergreen_cs_packet_next_reloc(p, &reloc);
1413 		if (r) {
1414 			dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
1415 					"0x%04X\n", reg);
1416 			return -EINVAL;
1417 		}
1418 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1419 	case CB_TARGET_MASK:
1420 		track->cb_target_mask = radeon_get_ib_value(p, idx);
1421 		track->cb_dirty = true;
1422 		break;
1423 	case CB_SHADER_MASK:
1424 		track->cb_shader_mask = radeon_get_ib_value(p, idx);
1425 		track->cb_dirty = true;
1426 		break;
1427 	case PA_SC_AA_CONFIG:
1428 		if (p->rdev->family >= CHIP_CAYMAN) {
1429 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1430 				 "0x%04X\n", reg);
1431 			return -EINVAL;
1432 		}
1433 		tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK;
1434 		track->nsamples = 1 << tmp;
1435 		break;
1436 	case CAYMAN_PA_SC_AA_CONFIG:
1437 		if (p->rdev->family < CHIP_CAYMAN) {
1438 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1439 				 "0x%04X\n", reg);
1440 			return -EINVAL;
1441 		}
1442 		tmp = radeon_get_ib_value(p, idx) & CAYMAN_MSAA_NUM_SAMPLES_MASK;
1443 		track->nsamples = 1 << tmp;
1444 		break;
1445 	case CB_COLOR0_VIEW:
1446 	case CB_COLOR1_VIEW:
1447 	case CB_COLOR2_VIEW:
1448 	case CB_COLOR3_VIEW:
1449 	case CB_COLOR4_VIEW:
1450 	case CB_COLOR5_VIEW:
1451 	case CB_COLOR6_VIEW:
1452 	case CB_COLOR7_VIEW:
1453 		tmp = (reg - CB_COLOR0_VIEW) / 0x3c;
1454 		track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
1455 		track->cb_dirty = true;
1456 		break;
1457 	case CB_COLOR8_VIEW:
1458 	case CB_COLOR9_VIEW:
1459 	case CB_COLOR10_VIEW:
1460 	case CB_COLOR11_VIEW:
1461 		tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8;
1462 		track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
1463 		track->cb_dirty = true;
1464 		break;
1465 	case CB_COLOR0_INFO:
1466 	case CB_COLOR1_INFO:
1467 	case CB_COLOR2_INFO:
1468 	case CB_COLOR3_INFO:
1469 	case CB_COLOR4_INFO:
1470 	case CB_COLOR5_INFO:
1471 	case CB_COLOR6_INFO:
1472 	case CB_COLOR7_INFO:
1473 		tmp = (reg - CB_COLOR0_INFO) / 0x3c;
1474 		track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
1475 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1476 			r = evergreen_cs_packet_next_reloc(p, &reloc);
1477 			if (r) {
1478 				dev_warn(p->dev, "bad SET_CONTEXT_REG "
1479 						"0x%04X\n", reg);
1480 				return -EINVAL;
1481 			}
1482 			ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1483 			track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1484 		}
1485 		track->cb_dirty = true;
1486 		break;
1487 	case CB_COLOR8_INFO:
1488 	case CB_COLOR9_INFO:
1489 	case CB_COLOR10_INFO:
1490 	case CB_COLOR11_INFO:
1491 		tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
1492 		track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
1493 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1494 			r = evergreen_cs_packet_next_reloc(p, &reloc);
1495 			if (r) {
1496 				dev_warn(p->dev, "bad SET_CONTEXT_REG "
1497 						"0x%04X\n", reg);
1498 				return -EINVAL;
1499 			}
1500 			ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1501 			track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1502 		}
1503 		track->cb_dirty = true;
1504 		break;
1505 	case CB_COLOR0_PITCH:
1506 	case CB_COLOR1_PITCH:
1507 	case CB_COLOR2_PITCH:
1508 	case CB_COLOR3_PITCH:
1509 	case CB_COLOR4_PITCH:
1510 	case CB_COLOR5_PITCH:
1511 	case CB_COLOR6_PITCH:
1512 	case CB_COLOR7_PITCH:
1513 		tmp = (reg - CB_COLOR0_PITCH) / 0x3c;
1514 		track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
1515 		track->cb_dirty = true;
1516 		break;
1517 	case CB_COLOR8_PITCH:
1518 	case CB_COLOR9_PITCH:
1519 	case CB_COLOR10_PITCH:
1520 	case CB_COLOR11_PITCH:
1521 		tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8;
1522 		track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
1523 		track->cb_dirty = true;
1524 		break;
1525 	case CB_COLOR0_SLICE:
1526 	case CB_COLOR1_SLICE:
1527 	case CB_COLOR2_SLICE:
1528 	case CB_COLOR3_SLICE:
1529 	case CB_COLOR4_SLICE:
1530 	case CB_COLOR5_SLICE:
1531 	case CB_COLOR6_SLICE:
1532 	case CB_COLOR7_SLICE:
1533 		tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
1534 		track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
1535 		track->cb_dirty = true;
1536 		break;
1537 	case CB_COLOR8_SLICE:
1538 	case CB_COLOR9_SLICE:
1539 	case CB_COLOR10_SLICE:
1540 	case CB_COLOR11_SLICE:
1541 		tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
1542 		track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
1543 		track->cb_dirty = true;
1544 		break;
1545 	case CB_COLOR0_ATTRIB:
1546 	case CB_COLOR1_ATTRIB:
1547 	case CB_COLOR2_ATTRIB:
1548 	case CB_COLOR3_ATTRIB:
1549 	case CB_COLOR4_ATTRIB:
1550 	case CB_COLOR5_ATTRIB:
1551 	case CB_COLOR6_ATTRIB:
1552 	case CB_COLOR7_ATTRIB:
1553 		r = evergreen_cs_packet_next_reloc(p, &reloc);
1554 		if (r) {
1555 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1556 					"0x%04X\n", reg);
1557 			return -EINVAL;
1558 		}
1559 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1560 			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1561 				unsigned bankw, bankh, mtaspect, tile_split;
1562 
1563 				evergreen_tiling_fields(reloc->lobj.tiling_flags,
1564 							&bankw, &bankh, &mtaspect,
1565 							&tile_split);
1566 				ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
1567 				ib[idx] |= CB_TILE_SPLIT(tile_split) |
1568 					   CB_BANK_WIDTH(bankw) |
1569 					   CB_BANK_HEIGHT(bankh) |
1570 					   CB_MACRO_TILE_ASPECT(mtaspect);
1571 			}
1572 		}
1573 		tmp = ((reg - CB_COLOR0_ATTRIB) / 0x3c);
1574 		track->cb_color_attrib[tmp] = ib[idx];
1575 		track->cb_dirty = true;
1576 		break;
1577 	case CB_COLOR8_ATTRIB:
1578 	case CB_COLOR9_ATTRIB:
1579 	case CB_COLOR10_ATTRIB:
1580 	case CB_COLOR11_ATTRIB:
1581 		r = evergreen_cs_packet_next_reloc(p, &reloc);
1582 		if (r) {
1583 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1584 					"0x%04X\n", reg);
1585 			return -EINVAL;
1586 		}
1587 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1588 			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1589 				unsigned bankw, bankh, mtaspect, tile_split;
1590 
1591 				evergreen_tiling_fields(reloc->lobj.tiling_flags,
1592 							&bankw, &bankh, &mtaspect,
1593 							&tile_split);
1594 				ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
1595 				ib[idx] |= CB_TILE_SPLIT(tile_split) |
1596 					   CB_BANK_WIDTH(bankw) |
1597 					   CB_BANK_HEIGHT(bankh) |
1598 					   CB_MACRO_TILE_ASPECT(mtaspect);
1599 			}
1600 		}
1601 		tmp = ((reg - CB_COLOR8_ATTRIB) / 0x1c) + 8;
1602 		track->cb_color_attrib[tmp] = ib[idx];
1603 		track->cb_dirty = true;
1604 		break;
1605 	case CB_COLOR0_FMASK:
1606 	case CB_COLOR1_FMASK:
1607 	case CB_COLOR2_FMASK:
1608 	case CB_COLOR3_FMASK:
1609 	case CB_COLOR4_FMASK:
1610 	case CB_COLOR5_FMASK:
1611 	case CB_COLOR6_FMASK:
1612 	case CB_COLOR7_FMASK:
1613 		tmp = (reg - CB_COLOR0_FMASK) / 0x3c;
1614 		r = evergreen_cs_packet_next_reloc(p, &reloc);
1615 		if (r) {
1616 			dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1617 			return -EINVAL;
1618 		}
1619 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1620 		track->cb_color_fmask_bo[tmp] = reloc->robj;
1621 		break;
1622 	case CB_COLOR0_CMASK:
1623 	case CB_COLOR1_CMASK:
1624 	case CB_COLOR2_CMASK:
1625 	case CB_COLOR3_CMASK:
1626 	case CB_COLOR4_CMASK:
1627 	case CB_COLOR5_CMASK:
1628 	case CB_COLOR6_CMASK:
1629 	case CB_COLOR7_CMASK:
1630 		tmp = (reg - CB_COLOR0_CMASK) / 0x3c;
1631 		r = evergreen_cs_packet_next_reloc(p, &reloc);
1632 		if (r) {
1633 			dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1634 			return -EINVAL;
1635 		}
1636 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1637 		track->cb_color_cmask_bo[tmp] = reloc->robj;
1638 		break;
1639 	case CB_COLOR0_FMASK_SLICE:
1640 	case CB_COLOR1_FMASK_SLICE:
1641 	case CB_COLOR2_FMASK_SLICE:
1642 	case CB_COLOR3_FMASK_SLICE:
1643 	case CB_COLOR4_FMASK_SLICE:
1644 	case CB_COLOR5_FMASK_SLICE:
1645 	case CB_COLOR6_FMASK_SLICE:
1646 	case CB_COLOR7_FMASK_SLICE:
1647 		tmp = (reg - CB_COLOR0_FMASK_SLICE) / 0x3c;
1648 		track->cb_color_fmask_slice[tmp] = radeon_get_ib_value(p, idx);
1649 		break;
1650 	case CB_COLOR0_CMASK_SLICE:
1651 	case CB_COLOR1_CMASK_SLICE:
1652 	case CB_COLOR2_CMASK_SLICE:
1653 	case CB_COLOR3_CMASK_SLICE:
1654 	case CB_COLOR4_CMASK_SLICE:
1655 	case CB_COLOR5_CMASK_SLICE:
1656 	case CB_COLOR6_CMASK_SLICE:
1657 	case CB_COLOR7_CMASK_SLICE:
1658 		tmp = (reg - CB_COLOR0_CMASK_SLICE) / 0x3c;
1659 		track->cb_color_cmask_slice[tmp] = radeon_get_ib_value(p, idx);
1660 		break;
1661 	case CB_COLOR0_BASE:
1662 	case CB_COLOR1_BASE:
1663 	case CB_COLOR2_BASE:
1664 	case CB_COLOR3_BASE:
1665 	case CB_COLOR4_BASE:
1666 	case CB_COLOR5_BASE:
1667 	case CB_COLOR6_BASE:
1668 	case CB_COLOR7_BASE:
1669 		r = evergreen_cs_packet_next_reloc(p, &reloc);
1670 		if (r) {
1671 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1672 					"0x%04X\n", reg);
1673 			return -EINVAL;
1674 		}
1675 		tmp = (reg - CB_COLOR0_BASE) / 0x3c;
1676 		track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
1677 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1678 		track->cb_color_bo[tmp] = reloc->robj;
1679 		track->cb_dirty = true;
1680 		break;
1681 	case CB_COLOR8_BASE:
1682 	case CB_COLOR9_BASE:
1683 	case CB_COLOR10_BASE:
1684 	case CB_COLOR11_BASE:
1685 		r = evergreen_cs_packet_next_reloc(p, &reloc);
1686 		if (r) {
1687 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1688 					"0x%04X\n", reg);
1689 			return -EINVAL;
1690 		}
1691 		tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8;
1692 		track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
1693 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1694 		track->cb_color_bo[tmp] = reloc->robj;
1695 		track->cb_dirty = true;
1696 		break;
1697 	case DB_HTILE_DATA_BASE:
1698 		r = evergreen_cs_packet_next_reloc(p, &reloc);
1699 		if (r) {
1700 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1701 					"0x%04X\n", reg);
1702 			return -EINVAL;
1703 		}
1704 		track->htile_offset = radeon_get_ib_value(p, idx);
1705 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1706 		track->htile_bo = reloc->robj;
1707 		track->db_dirty = true;
1708 		break;
1709 	case DB_HTILE_SURFACE:
1710 		/* 8x8 only */
1711 		track->htile_surface = radeon_get_ib_value(p, idx);
1712 		track->db_dirty = true;
1713 		break;
1714 	case CB_IMMED0_BASE:
1715 	case CB_IMMED1_BASE:
1716 	case CB_IMMED2_BASE:
1717 	case CB_IMMED3_BASE:
1718 	case CB_IMMED4_BASE:
1719 	case CB_IMMED5_BASE:
1720 	case CB_IMMED6_BASE:
1721 	case CB_IMMED7_BASE:
1722 	case CB_IMMED8_BASE:
1723 	case CB_IMMED9_BASE:
1724 	case CB_IMMED10_BASE:
1725 	case CB_IMMED11_BASE:
1726 	case SQ_PGM_START_FS:
1727 	case SQ_PGM_START_ES:
1728 	case SQ_PGM_START_VS:
1729 	case SQ_PGM_START_GS:
1730 	case SQ_PGM_START_PS:
1731 	case SQ_PGM_START_HS:
1732 	case SQ_PGM_START_LS:
1733 	case SQ_CONST_MEM_BASE:
1734 	case SQ_ALU_CONST_CACHE_GS_0:
1735 	case SQ_ALU_CONST_CACHE_GS_1:
1736 	case SQ_ALU_CONST_CACHE_GS_2:
1737 	case SQ_ALU_CONST_CACHE_GS_3:
1738 	case SQ_ALU_CONST_CACHE_GS_4:
1739 	case SQ_ALU_CONST_CACHE_GS_5:
1740 	case SQ_ALU_CONST_CACHE_GS_6:
1741 	case SQ_ALU_CONST_CACHE_GS_7:
1742 	case SQ_ALU_CONST_CACHE_GS_8:
1743 	case SQ_ALU_CONST_CACHE_GS_9:
1744 	case SQ_ALU_CONST_CACHE_GS_10:
1745 	case SQ_ALU_CONST_CACHE_GS_11:
1746 	case SQ_ALU_CONST_CACHE_GS_12:
1747 	case SQ_ALU_CONST_CACHE_GS_13:
1748 	case SQ_ALU_CONST_CACHE_GS_14:
1749 	case SQ_ALU_CONST_CACHE_GS_15:
1750 	case SQ_ALU_CONST_CACHE_PS_0:
1751 	case SQ_ALU_CONST_CACHE_PS_1:
1752 	case SQ_ALU_CONST_CACHE_PS_2:
1753 	case SQ_ALU_CONST_CACHE_PS_3:
1754 	case SQ_ALU_CONST_CACHE_PS_4:
1755 	case SQ_ALU_CONST_CACHE_PS_5:
1756 	case SQ_ALU_CONST_CACHE_PS_6:
1757 	case SQ_ALU_CONST_CACHE_PS_7:
1758 	case SQ_ALU_CONST_CACHE_PS_8:
1759 	case SQ_ALU_CONST_CACHE_PS_9:
1760 	case SQ_ALU_CONST_CACHE_PS_10:
1761 	case SQ_ALU_CONST_CACHE_PS_11:
1762 	case SQ_ALU_CONST_CACHE_PS_12:
1763 	case SQ_ALU_CONST_CACHE_PS_13:
1764 	case SQ_ALU_CONST_CACHE_PS_14:
1765 	case SQ_ALU_CONST_CACHE_PS_15:
1766 	case SQ_ALU_CONST_CACHE_VS_0:
1767 	case SQ_ALU_CONST_CACHE_VS_1:
1768 	case SQ_ALU_CONST_CACHE_VS_2:
1769 	case SQ_ALU_CONST_CACHE_VS_3:
1770 	case SQ_ALU_CONST_CACHE_VS_4:
1771 	case SQ_ALU_CONST_CACHE_VS_5:
1772 	case SQ_ALU_CONST_CACHE_VS_6:
1773 	case SQ_ALU_CONST_CACHE_VS_7:
1774 	case SQ_ALU_CONST_CACHE_VS_8:
1775 	case SQ_ALU_CONST_CACHE_VS_9:
1776 	case SQ_ALU_CONST_CACHE_VS_10:
1777 	case SQ_ALU_CONST_CACHE_VS_11:
1778 	case SQ_ALU_CONST_CACHE_VS_12:
1779 	case SQ_ALU_CONST_CACHE_VS_13:
1780 	case SQ_ALU_CONST_CACHE_VS_14:
1781 	case SQ_ALU_CONST_CACHE_VS_15:
1782 	case SQ_ALU_CONST_CACHE_HS_0:
1783 	case SQ_ALU_CONST_CACHE_HS_1:
1784 	case SQ_ALU_CONST_CACHE_HS_2:
1785 	case SQ_ALU_CONST_CACHE_HS_3:
1786 	case SQ_ALU_CONST_CACHE_HS_4:
1787 	case SQ_ALU_CONST_CACHE_HS_5:
1788 	case SQ_ALU_CONST_CACHE_HS_6:
1789 	case SQ_ALU_CONST_CACHE_HS_7:
1790 	case SQ_ALU_CONST_CACHE_HS_8:
1791 	case SQ_ALU_CONST_CACHE_HS_9:
1792 	case SQ_ALU_CONST_CACHE_HS_10:
1793 	case SQ_ALU_CONST_CACHE_HS_11:
1794 	case SQ_ALU_CONST_CACHE_HS_12:
1795 	case SQ_ALU_CONST_CACHE_HS_13:
1796 	case SQ_ALU_CONST_CACHE_HS_14:
1797 	case SQ_ALU_CONST_CACHE_HS_15:
1798 	case SQ_ALU_CONST_CACHE_LS_0:
1799 	case SQ_ALU_CONST_CACHE_LS_1:
1800 	case SQ_ALU_CONST_CACHE_LS_2:
1801 	case SQ_ALU_CONST_CACHE_LS_3:
1802 	case SQ_ALU_CONST_CACHE_LS_4:
1803 	case SQ_ALU_CONST_CACHE_LS_5:
1804 	case SQ_ALU_CONST_CACHE_LS_6:
1805 	case SQ_ALU_CONST_CACHE_LS_7:
1806 	case SQ_ALU_CONST_CACHE_LS_8:
1807 	case SQ_ALU_CONST_CACHE_LS_9:
1808 	case SQ_ALU_CONST_CACHE_LS_10:
1809 	case SQ_ALU_CONST_CACHE_LS_11:
1810 	case SQ_ALU_CONST_CACHE_LS_12:
1811 	case SQ_ALU_CONST_CACHE_LS_13:
1812 	case SQ_ALU_CONST_CACHE_LS_14:
1813 	case SQ_ALU_CONST_CACHE_LS_15:
1814 		r = evergreen_cs_packet_next_reloc(p, &reloc);
1815 		if (r) {
1816 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1817 					"0x%04X\n", reg);
1818 			return -EINVAL;
1819 		}
1820 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1821 		break;
1822 	case SX_MEMORY_EXPORT_BASE:
1823 		if (p->rdev->family >= CHIP_CAYMAN) {
1824 			dev_warn(p->dev, "bad SET_CONFIG_REG "
1825 				 "0x%04X\n", reg);
1826 			return -EINVAL;
1827 		}
1828 		r = evergreen_cs_packet_next_reloc(p, &reloc);
1829 		if (r) {
1830 			dev_warn(p->dev, "bad SET_CONFIG_REG "
1831 					"0x%04X\n", reg);
1832 			return -EINVAL;
1833 		}
1834 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1835 		break;
1836 	case CAYMAN_SX_SCATTER_EXPORT_BASE:
1837 		if (p->rdev->family < CHIP_CAYMAN) {
1838 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1839 				 "0x%04X\n", reg);
1840 			return -EINVAL;
1841 		}
1842 		r = evergreen_cs_packet_next_reloc(p, &reloc);
1843 		if (r) {
1844 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1845 					"0x%04X\n", reg);
1846 			return -EINVAL;
1847 		}
1848 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1849 		break;
1850 	case SX_MISC:
1851 		track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
1852 		break;
1853 	default:
1854 		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1855 		return -EINVAL;
1856 	}
1857 	return 0;
1858 }
1859 
1860 static bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1861 {
1862 	u32 last_reg, m, i;
1863 
1864 	if (p->rdev->family >= CHIP_CAYMAN)
1865 		last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
1866 	else
1867 		last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
1868 
1869 	i = (reg >> 7);
1870 	if (i >= last_reg) {
1871 		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1872 		return false;
1873 	}
1874 	m = 1 << ((reg >> 2) & 31);
1875 	if (p->rdev->family >= CHIP_CAYMAN) {
1876 		if (!(cayman_reg_safe_bm[i] & m))
1877 			return true;
1878 	} else {
1879 		if (!(evergreen_reg_safe_bm[i] & m))
1880 			return true;
1881 	}
1882 	dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1883 	return false;
1884 }
1885 
1886 static int evergreen_packet3_check(struct radeon_cs_parser *p,
1887 				   struct radeon_cs_packet *pkt)
1888 {
1889 	struct radeon_cs_reloc *reloc;
1890 	struct evergreen_cs_track *track;
1891 	volatile u32 *ib;
1892 	unsigned idx;
1893 	unsigned i;
1894 	unsigned start_reg, end_reg, reg;
1895 	int r;
1896 	u32 idx_value;
1897 
1898 	track = (struct evergreen_cs_track *)p->track;
1899 	ib = p->ib->ptr;
1900 	idx = pkt->idx + 1;
1901 	idx_value = radeon_get_ib_value(p, idx);
1902 
1903 	switch (pkt->opcode) {
1904 	case PACKET3_SET_PREDICATION:
1905 	{
1906 		int pred_op;
1907 		int tmp;
1908 		uint64_t offset;
1909 
1910 		if (pkt->count != 1) {
1911 			DRM_ERROR("bad SET PREDICATION\n");
1912 			return -EINVAL;
1913 		}
1914 
1915 		tmp = radeon_get_ib_value(p, idx + 1);
1916 		pred_op = (tmp >> 16) & 0x7;
1917 
1918 		/* for the clear predicate operation */
1919 		if (pred_op == 0)
1920 			return 0;
1921 
1922 		if (pred_op > 2) {
1923 			DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
1924 			return -EINVAL;
1925 		}
1926 
1927 		r = evergreen_cs_packet_next_reloc(p, &reloc);
1928 		if (r) {
1929 			DRM_ERROR("bad SET PREDICATION\n");
1930 			return -EINVAL;
1931 		}
1932 
1933 		offset = reloc->lobj.gpu_offset +
1934 		         (idx_value & 0xfffffff0) +
1935 		         ((u64)(tmp & 0xff) << 32);
1936 
1937 		ib[idx + 0] = offset;
1938 		ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
1939 	}
1940 	break;
1941 	case PACKET3_CONTEXT_CONTROL:
1942 		if (pkt->count != 1) {
1943 			DRM_ERROR("bad CONTEXT_CONTROL\n");
1944 			return -EINVAL;
1945 		}
1946 		break;
1947 	case PACKET3_INDEX_TYPE:
1948 	case PACKET3_NUM_INSTANCES:
1949 	case PACKET3_CLEAR_STATE:
1950 		if (pkt->count) {
1951 			DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
1952 			return -EINVAL;
1953 		}
1954 		break;
1955 	case CAYMAN_PACKET3_DEALLOC_STATE:
1956 		if (p->rdev->family < CHIP_CAYMAN) {
1957 			DRM_ERROR("bad PACKET3_DEALLOC_STATE\n");
1958 			return -EINVAL;
1959 		}
1960 		if (pkt->count) {
1961 			DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
1962 			return -EINVAL;
1963 		}
1964 		break;
1965 	case PACKET3_INDEX_BASE:
1966 	{
1967 		uint64_t offset;
1968 
1969 		if (pkt->count != 1) {
1970 			DRM_ERROR("bad INDEX_BASE\n");
1971 			return -EINVAL;
1972 		}
1973 		r = evergreen_cs_packet_next_reloc(p, &reloc);
1974 		if (r) {
1975 			DRM_ERROR("bad INDEX_BASE\n");
1976 			return -EINVAL;
1977 		}
1978 
1979 		offset = reloc->lobj.gpu_offset +
1980 		         idx_value +
1981 		         ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
1982 
1983 		ib[idx+0] = offset;
1984 		ib[idx+1] = upper_32_bits(offset) & 0xff;
1985 
1986 		r = evergreen_cs_track_check(p);
1987 		if (r) {
1988 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1989 			return r;
1990 		}
1991 		break;
1992 	}
1993 	case PACKET3_DRAW_INDEX:
1994 	{
1995 		uint64_t offset;
1996 		if (pkt->count != 3) {
1997 			DRM_ERROR("bad DRAW_INDEX\n");
1998 			return -EINVAL;
1999 		}
2000 		r = evergreen_cs_packet_next_reloc(p, &reloc);
2001 		if (r) {
2002 			DRM_ERROR("bad DRAW_INDEX\n");
2003 			return -EINVAL;
2004 		}
2005 
2006 		offset = reloc->lobj.gpu_offset +
2007 		         idx_value +
2008 		         ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
2009 
2010 		ib[idx+0] = offset;
2011 		ib[idx+1] = upper_32_bits(offset) & 0xff;
2012 
2013 		r = evergreen_cs_track_check(p);
2014 		if (r) {
2015 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
2016 			return r;
2017 		}
2018 		break;
2019 	}
2020 	case PACKET3_DRAW_INDEX_2:
2021 	{
2022 		uint64_t offset;
2023 
2024 		if (pkt->count != 4) {
2025 			DRM_ERROR("bad DRAW_INDEX_2\n");
2026 			return -EINVAL;
2027 		}
2028 		r = evergreen_cs_packet_next_reloc(p, &reloc);
2029 		if (r) {
2030 			DRM_ERROR("bad DRAW_INDEX_2\n");
2031 			return -EINVAL;
2032 		}
2033 
2034 		offset = reloc->lobj.gpu_offset +
2035 		         radeon_get_ib_value(p, idx+1) +
2036 		         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2037 
2038 		ib[idx+1] = offset;
2039 		ib[idx+2] = upper_32_bits(offset) & 0xff;
2040 
2041 		r = evergreen_cs_track_check(p);
2042 		if (r) {
2043 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
2044 			return r;
2045 		}
2046 		break;
2047 	}
2048 	case PACKET3_DRAW_INDEX_AUTO:
2049 		if (pkt->count != 1) {
2050 			DRM_ERROR("bad DRAW_INDEX_AUTO\n");
2051 			return -EINVAL;
2052 		}
2053 		r = evergreen_cs_track_check(p);
2054 		if (r) {
2055 			dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
2056 			return r;
2057 		}
2058 		break;
2059 	case PACKET3_DRAW_INDEX_MULTI_AUTO:
2060 		if (pkt->count != 2) {
2061 			DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n");
2062 			return -EINVAL;
2063 		}
2064 		r = evergreen_cs_track_check(p);
2065 		if (r) {
2066 			dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
2067 			return r;
2068 		}
2069 		break;
2070 	case PACKET3_DRAW_INDEX_IMMD:
2071 		if (pkt->count < 2) {
2072 			DRM_ERROR("bad DRAW_INDEX_IMMD\n");
2073 			return -EINVAL;
2074 		}
2075 		r = evergreen_cs_track_check(p);
2076 		if (r) {
2077 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
2078 			return r;
2079 		}
2080 		break;
2081 	case PACKET3_DRAW_INDEX_OFFSET:
2082 		if (pkt->count != 2) {
2083 			DRM_ERROR("bad DRAW_INDEX_OFFSET\n");
2084 			return -EINVAL;
2085 		}
2086 		r = evergreen_cs_track_check(p);
2087 		if (r) {
2088 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
2089 			return r;
2090 		}
2091 		break;
2092 	case PACKET3_DRAW_INDEX_OFFSET_2:
2093 		if (pkt->count != 3) {
2094 			DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n");
2095 			return -EINVAL;
2096 		}
2097 		r = evergreen_cs_track_check(p);
2098 		if (r) {
2099 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
2100 			return r;
2101 		}
2102 		break;
2103 	case PACKET3_DISPATCH_DIRECT:
2104 		if (pkt->count != 3) {
2105 			DRM_ERROR("bad DISPATCH_DIRECT\n");
2106 			return -EINVAL;
2107 		}
2108 		r = evergreen_cs_track_check(p);
2109 		if (r) {
2110 			dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
2111 			return r;
2112 		}
2113 		break;
2114 	case PACKET3_DISPATCH_INDIRECT:
2115 		if (pkt->count != 1) {
2116 			DRM_ERROR("bad DISPATCH_INDIRECT\n");
2117 			return -EINVAL;
2118 		}
2119 		r = evergreen_cs_packet_next_reloc(p, &reloc);
2120 		if (r) {
2121 			DRM_ERROR("bad DISPATCH_INDIRECT\n");
2122 			return -EINVAL;
2123 		}
2124 		ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
2125 		r = evergreen_cs_track_check(p);
2126 		if (r) {
2127 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
2128 			return r;
2129 		}
2130 		break;
2131 	case PACKET3_WAIT_REG_MEM:
2132 		if (pkt->count != 5) {
2133 			DRM_ERROR("bad WAIT_REG_MEM\n");
2134 			return -EINVAL;
2135 		}
2136 		/* bit 4 is reg (0) or mem (1) */
2137 		if (idx_value & 0x10) {
2138 			uint64_t offset;
2139 
2140 			r = evergreen_cs_packet_next_reloc(p, &reloc);
2141 			if (r) {
2142 				DRM_ERROR("bad WAIT_REG_MEM\n");
2143 				return -EINVAL;
2144 			}
2145 
2146 			offset = reloc->lobj.gpu_offset +
2147 			         (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
2148 			         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2149 
2150 			ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc);
2151 			ib[idx+2] = upper_32_bits(offset) & 0xff;
2152 		}
2153 		break;
2154 	case PACKET3_SURFACE_SYNC:
2155 		if (pkt->count != 3) {
2156 			DRM_ERROR("bad SURFACE_SYNC\n");
2157 			return -EINVAL;
2158 		}
2159 		/* 0xffffffff/0x0 is flush all cache flag */
2160 		if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
2161 		    radeon_get_ib_value(p, idx + 2) != 0) {
2162 			r = evergreen_cs_packet_next_reloc(p, &reloc);
2163 			if (r) {
2164 				DRM_ERROR("bad SURFACE_SYNC\n");
2165 				return -EINVAL;
2166 			}
2167 			ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2168 		}
2169 		break;
2170 	case PACKET3_EVENT_WRITE:
2171 		if (pkt->count != 2 && pkt->count != 0) {
2172 			DRM_ERROR("bad EVENT_WRITE\n");
2173 			return -EINVAL;
2174 		}
2175 		if (pkt->count) {
2176 			uint64_t offset;
2177 
2178 			r = evergreen_cs_packet_next_reloc(p, &reloc);
2179 			if (r) {
2180 				DRM_ERROR("bad EVENT_WRITE\n");
2181 				return -EINVAL;
2182 			}
2183 			offset = reloc->lobj.gpu_offset +
2184 			         (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
2185 			         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2186 
2187 			ib[idx+1] = offset & 0xfffffff8;
2188 			ib[idx+2] = upper_32_bits(offset) & 0xff;
2189 		}
2190 		break;
2191 	case PACKET3_EVENT_WRITE_EOP:
2192 	{
2193 		uint64_t offset;
2194 
2195 		if (pkt->count != 4) {
2196 			DRM_ERROR("bad EVENT_WRITE_EOP\n");
2197 			return -EINVAL;
2198 		}
2199 		r = evergreen_cs_packet_next_reloc(p, &reloc);
2200 		if (r) {
2201 			DRM_ERROR("bad EVENT_WRITE_EOP\n");
2202 			return -EINVAL;
2203 		}
2204 
2205 		offset = reloc->lobj.gpu_offset +
2206 		         (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
2207 		         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2208 
2209 		ib[idx+1] = offset & 0xfffffffc;
2210 		ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
2211 		break;
2212 	}
2213 	case PACKET3_EVENT_WRITE_EOS:
2214 	{
2215 		uint64_t offset;
2216 
2217 		if (pkt->count != 3) {
2218 			DRM_ERROR("bad EVENT_WRITE_EOS\n");
2219 			return -EINVAL;
2220 		}
2221 		r = evergreen_cs_packet_next_reloc(p, &reloc);
2222 		if (r) {
2223 			DRM_ERROR("bad EVENT_WRITE_EOS\n");
2224 			return -EINVAL;
2225 		}
2226 
2227 		offset = reloc->lobj.gpu_offset +
2228 		         (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
2229 		         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2230 
2231 		ib[idx+1] = offset & 0xfffffffc;
2232 		ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
2233 		break;
2234 	}
2235 	case PACKET3_SET_CONFIG_REG:
2236 		start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
2237 		end_reg = 4 * pkt->count + start_reg - 4;
2238 		if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
2239 		    (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
2240 		    (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
2241 			DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
2242 			return -EINVAL;
2243 		}
2244 		for (i = 0; i < pkt->count; i++) {
2245 			reg = start_reg + (4 * i);
2246 			r = evergreen_cs_check_reg(p, reg, idx+1+i);
2247 			if (r)
2248 				return r;
2249 		}
2250 		break;
2251 	case PACKET3_SET_CONTEXT_REG:
2252 		start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_START;
2253 		end_reg = 4 * pkt->count + start_reg - 4;
2254 		if ((start_reg < PACKET3_SET_CONTEXT_REG_START) ||
2255 		    (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
2256 		    (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
2257 			DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
2258 			return -EINVAL;
2259 		}
2260 		for (i = 0; i < pkt->count; i++) {
2261 			reg = start_reg + (4 * i);
2262 			r = evergreen_cs_check_reg(p, reg, idx+1+i);
2263 			if (r)
2264 				return r;
2265 		}
2266 		break;
2267 	case PACKET3_SET_RESOURCE:
2268 		if (pkt->count % 8) {
2269 			DRM_ERROR("bad SET_RESOURCE\n");
2270 			return -EINVAL;
2271 		}
2272 		start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START;
2273 		end_reg = 4 * pkt->count + start_reg - 4;
2274 		if ((start_reg < PACKET3_SET_RESOURCE_START) ||
2275 		    (start_reg >= PACKET3_SET_RESOURCE_END) ||
2276 		    (end_reg >= PACKET3_SET_RESOURCE_END)) {
2277 			DRM_ERROR("bad SET_RESOURCE\n");
2278 			return -EINVAL;
2279 		}
2280 		for (i = 0; i < (pkt->count / 8); i++) {
2281 			struct radeon_bo *texture, *mipmap;
2282 			u32 toffset, moffset;
2283 			u32 size, offset;
2284 
2285 			switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
2286 			case SQ_TEX_VTX_VALID_TEXTURE:
2287 				/* tex base */
2288 				r = evergreen_cs_packet_next_reloc(p, &reloc);
2289 				if (r) {
2290 					DRM_ERROR("bad SET_RESOURCE (tex)\n");
2291 					return -EINVAL;
2292 				}
2293 				if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
2294 					ib[idx+1+(i*8)+1] |=
2295 						TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
2296 					if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
2297 						unsigned bankw, bankh, mtaspect, tile_split;
2298 
2299 						evergreen_tiling_fields(reloc->lobj.tiling_flags,
2300 									&bankw, &bankh, &mtaspect,
2301 									&tile_split);
2302 						ib[idx+1+(i*8)+6] |= TEX_TILE_SPLIT(tile_split);
2303 						ib[idx+1+(i*8)+7] |=
2304 							TEX_BANK_WIDTH(bankw) |
2305 							TEX_BANK_HEIGHT(bankh) |
2306 							MACRO_TILE_ASPECT(mtaspect) |
2307 							TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
2308 					}
2309 				}
2310 				texture = reloc->robj;
2311 				toffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2312 				/* tex mip base */
2313 				r = evergreen_cs_packet_next_reloc(p, &reloc);
2314 				if (r) {
2315 					DRM_ERROR("bad SET_RESOURCE (tex)\n");
2316 					return -EINVAL;
2317 				}
2318 				moffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2319 				mipmap = reloc->robj;
2320 				r = evergreen_cs_track_validate_texture(p, texture, mipmap, idx+1+(i*8));
2321 				if (r)
2322 					return r;
2323 				ib[idx+1+(i*8)+2] += toffset;
2324 				ib[idx+1+(i*8)+3] += moffset;
2325 				break;
2326 			case SQ_TEX_VTX_VALID_BUFFER:
2327 			{
2328 				uint64_t offset64;
2329 				/* vtx base */
2330 				r = evergreen_cs_packet_next_reloc(p, &reloc);
2331 				if (r) {
2332 					DRM_ERROR("bad SET_RESOURCE (vtx)\n");
2333 					return -EINVAL;
2334 				}
2335 				offset = radeon_get_ib_value(p, idx+1+(i*8)+0);
2336 				size = radeon_get_ib_value(p, idx+1+(i*8)+1);
2337 				if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
2338 					/* force size to size of the buffer */
2339 					dev_warn(p->dev, "vbo resource seems too big for the bo\n");
2340 					ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj) - offset;
2341 				}
2342 
2343 				offset64 = reloc->lobj.gpu_offset + offset;
2344 				ib[idx+1+(i*8)+0] = offset64;
2345 				ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
2346 						    (upper_32_bits(offset64) & 0xff);
2347 				break;
2348 			}
2349 			case SQ_TEX_VTX_INVALID_TEXTURE:
2350 			case SQ_TEX_VTX_INVALID_BUFFER:
2351 			default:
2352 				DRM_ERROR("bad SET_RESOURCE\n");
2353 				return -EINVAL;
2354 			}
2355 		}
2356 		break;
2357 	case PACKET3_SET_ALU_CONST:
2358 		/* XXX fix me ALU const buffers only */
2359 		break;
2360 	case PACKET3_SET_BOOL_CONST:
2361 		start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_START;
2362 		end_reg = 4 * pkt->count + start_reg - 4;
2363 		if ((start_reg < PACKET3_SET_BOOL_CONST_START) ||
2364 		    (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
2365 		    (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
2366 			DRM_ERROR("bad SET_BOOL_CONST\n");
2367 			return -EINVAL;
2368 		}
2369 		break;
2370 	case PACKET3_SET_LOOP_CONST:
2371 		start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_START;
2372 		end_reg = 4 * pkt->count + start_reg - 4;
2373 		if ((start_reg < PACKET3_SET_LOOP_CONST_START) ||
2374 		    (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
2375 		    (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
2376 			DRM_ERROR("bad SET_LOOP_CONST\n");
2377 			return -EINVAL;
2378 		}
2379 		break;
2380 	case PACKET3_SET_CTL_CONST:
2381 		start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_START;
2382 		end_reg = 4 * pkt->count + start_reg - 4;
2383 		if ((start_reg < PACKET3_SET_CTL_CONST_START) ||
2384 		    (start_reg >= PACKET3_SET_CTL_CONST_END) ||
2385 		    (end_reg >= PACKET3_SET_CTL_CONST_END)) {
2386 			DRM_ERROR("bad SET_CTL_CONST\n");
2387 			return -EINVAL;
2388 		}
2389 		break;
2390 	case PACKET3_SET_SAMPLER:
2391 		if (pkt->count % 3) {
2392 			DRM_ERROR("bad SET_SAMPLER\n");
2393 			return -EINVAL;
2394 		}
2395 		start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START;
2396 		end_reg = 4 * pkt->count + start_reg - 4;
2397 		if ((start_reg < PACKET3_SET_SAMPLER_START) ||
2398 		    (start_reg >= PACKET3_SET_SAMPLER_END) ||
2399 		    (end_reg >= PACKET3_SET_SAMPLER_END)) {
2400 			DRM_ERROR("bad SET_SAMPLER\n");
2401 			return -EINVAL;
2402 		}
2403 		break;
2404 	case PACKET3_STRMOUT_BUFFER_UPDATE:
2405 		if (pkt->count != 4) {
2406 			DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
2407 			return -EINVAL;
2408 		}
2409 		/* Updating memory at DST_ADDRESS. */
2410 		if (idx_value & 0x1) {
2411 			u64 offset;
2412 			r = evergreen_cs_packet_next_reloc(p, &reloc);
2413 			if (r) {
2414 				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
2415 				return -EINVAL;
2416 			}
2417 			offset = radeon_get_ib_value(p, idx+1);
2418 			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2419 			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2420 				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
2421 					  offset + 4, radeon_bo_size(reloc->robj));
2422 				return -EINVAL;
2423 			}
2424 			offset += reloc->lobj.gpu_offset;
2425 			ib[idx+1] = offset;
2426 			ib[idx+2] = upper_32_bits(offset) & 0xff;
2427 		}
2428 		/* Reading data from SRC_ADDRESS. */
2429 		if (((idx_value >> 1) & 0x3) == 2) {
2430 			u64 offset;
2431 			r = evergreen_cs_packet_next_reloc(p, &reloc);
2432 			if (r) {
2433 				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
2434 				return -EINVAL;
2435 			}
2436 			offset = radeon_get_ib_value(p, idx+3);
2437 			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2438 			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2439 				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
2440 					  offset + 4, radeon_bo_size(reloc->robj));
2441 				return -EINVAL;
2442 			}
2443 			offset += reloc->lobj.gpu_offset;
2444 			ib[idx+3] = offset;
2445 			ib[idx+4] = upper_32_bits(offset) & 0xff;
2446 		}
2447 		break;
2448 	case PACKET3_COPY_DW:
2449 		if (pkt->count != 4) {
2450 			DRM_ERROR("bad COPY_DW (invalid count)\n");
2451 			return -EINVAL;
2452 		}
2453 		if (idx_value & 0x1) {
2454 			u64 offset;
2455 			/* SRC is memory. */
2456 			r = evergreen_cs_packet_next_reloc(p, &reloc);
2457 			if (r) {
2458 				DRM_ERROR("bad COPY_DW (missing src reloc)\n");
2459 				return -EINVAL;
2460 			}
2461 			offset = radeon_get_ib_value(p, idx+1);
2462 			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2463 			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2464 				DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
2465 					  offset + 4, radeon_bo_size(reloc->robj));
2466 				return -EINVAL;
2467 			}
2468 			offset += reloc->lobj.gpu_offset;
2469 			ib[idx+1] = offset;
2470 			ib[idx+2] = upper_32_bits(offset) & 0xff;
2471 		} else {
2472 			/* SRC is a reg. */
2473 			reg = radeon_get_ib_value(p, idx+1) << 2;
2474 			if (!evergreen_is_safe_reg(p, reg, idx+1))
2475 				return -EINVAL;
2476 		}
2477 		if (idx_value & 0x2) {
2478 			u64 offset;
2479 			/* DST is memory. */
2480 			r = evergreen_cs_packet_next_reloc(p, &reloc);
2481 			if (r) {
2482 				DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
2483 				return -EINVAL;
2484 			}
2485 			offset = radeon_get_ib_value(p, idx+3);
2486 			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2487 			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2488 				DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
2489 					  offset + 4, radeon_bo_size(reloc->robj));
2490 				return -EINVAL;
2491 			}
2492 			offset += reloc->lobj.gpu_offset;
2493 			ib[idx+3] = offset;
2494 			ib[idx+4] = upper_32_bits(offset) & 0xff;
2495 		} else {
2496 			/* DST is a reg. */
2497 			reg = radeon_get_ib_value(p, idx+3) << 2;
2498 			if (!evergreen_is_safe_reg(p, reg, idx+3))
2499 				return -EINVAL;
2500 		}
2501 		break;
2502 	case PACKET3_NOP:
2503 		break;
2504 	default:
2505 		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
2506 		return -EINVAL;
2507 	}
2508 	return 0;
2509 }
2510 
2511 int evergreen_cs_parse(struct radeon_cs_parser *p)
2512 {
2513 	struct radeon_cs_packet pkt;
2514 	struct evergreen_cs_track *track;
2515 	u32 tmp;
2516 	int r;
2517 
2518 	if (p->track == NULL) {
2519 		/* initialize tracker, we are in kms */
2520 		track = kzalloc(sizeof(*track), GFP_KERNEL);
2521 		if (track == NULL)
2522 			return -ENOMEM;
2523 		evergreen_cs_track_init(track);
2524 		if (p->rdev->family >= CHIP_CAYMAN)
2525 			tmp = p->rdev->config.cayman.tile_config;
2526 		else
2527 			tmp = p->rdev->config.evergreen.tile_config;
2528 
2529 		switch (tmp & 0xf) {
2530 		case 0:
2531 			track->npipes = 1;
2532 			break;
2533 		case 1:
2534 		default:
2535 			track->npipes = 2;
2536 			break;
2537 		case 2:
2538 			track->npipes = 4;
2539 			break;
2540 		case 3:
2541 			track->npipes = 8;
2542 			break;
2543 		}
2544 
2545 		switch ((tmp & 0xf0) >> 4) {
2546 		case 0:
2547 			track->nbanks = 4;
2548 			break;
2549 		case 1:
2550 		default:
2551 			track->nbanks = 8;
2552 			break;
2553 		case 2:
2554 			track->nbanks = 16;
2555 			break;
2556 		}
2557 
2558 		switch ((tmp & 0xf00) >> 8) {
2559 		case 0:
2560 			track->group_size = 256;
2561 			break;
2562 		case 1:
2563 		default:
2564 			track->group_size = 512;
2565 			break;
2566 		}
2567 
2568 		switch ((tmp & 0xf000) >> 12) {
2569 		case 0:
2570 			track->row_size = 1;
2571 			break;
2572 		case 1:
2573 		default:
2574 			track->row_size = 2;
2575 			break;
2576 		case 2:
2577 			track->row_size = 4;
2578 			break;
2579 		}
2580 
2581 		p->track = track;
2582 	}
2583 	do {
2584 		r = evergreen_cs_packet_parse(p, &pkt, p->idx);
2585 		if (r) {
2586 			kfree(p->track);
2587 			p->track = NULL;
2588 			return r;
2589 		}
2590 		p->idx += pkt.count + 2;
2591 		switch (pkt.type) {
2592 		case PACKET_TYPE0:
2593 			r = evergreen_cs_parse_packet0(p, &pkt);
2594 			break;
2595 		case PACKET_TYPE2:
2596 			break;
2597 		case PACKET_TYPE3:
2598 			r = evergreen_packet3_check(p, &pkt);
2599 			break;
2600 		default:
2601 			DRM_ERROR("Unknown packet type %d !\n", pkt.type);
2602 			kfree(p->track);
2603 			p->track = NULL;
2604 			return -EINVAL;
2605 		}
2606 		if (r) {
2607 			kfree(p->track);
2608 			p->track = NULL;
2609 			return r;
2610 		}
2611 	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
2612 #if 0
2613 	for (r = 0; r < p->ib->length_dw; r++) {
2614 		printk(KERN_INFO "%05d  0x%08X\n", r, p->ib->ptr[r]);
2615 		mdelay(1);
2616 	}
2617 #endif
2618 	kfree(p->track);
2619 	p->track = NULL;
2620 	return 0;
2621 }
2622 
2623 /* vm parser */
2624 static bool evergreen_vm_reg_valid(u32 reg)
2625 {
2626 	/* context regs are fine */
2627 	if (reg >= 0x28000)
2628 		return true;
2629 
2630 	/* check config regs */
2631 	switch (reg) {
2632 	case GRBM_GFX_INDEX:
2633 	case VGT_VTX_VECT_EJECT_REG:
2634 	case VGT_CACHE_INVALIDATION:
2635 	case VGT_GS_VERTEX_REUSE:
2636 	case VGT_PRIMITIVE_TYPE:
2637 	case VGT_INDEX_TYPE:
2638 	case VGT_NUM_INDICES:
2639 	case VGT_NUM_INSTANCES:
2640 	case VGT_COMPUTE_DIM_X:
2641 	case VGT_COMPUTE_DIM_Y:
2642 	case VGT_COMPUTE_DIM_Z:
2643 	case VGT_COMPUTE_START_X:
2644 	case VGT_COMPUTE_START_Y:
2645 	case VGT_COMPUTE_START_Z:
2646 	case VGT_COMPUTE_INDEX:
2647 	case VGT_COMPUTE_THREAD_GROUP_SIZE:
2648 	case VGT_HS_OFFCHIP_PARAM:
2649 	case PA_CL_ENHANCE:
2650 	case PA_SU_LINE_STIPPLE_VALUE:
2651 	case PA_SC_LINE_STIPPLE_STATE:
2652 	case PA_SC_ENHANCE:
2653 	case SQ_DYN_GPR_CNTL_PS_FLUSH_REQ:
2654 	case SQ_DYN_GPR_SIMD_LOCK_EN:
2655 	case SQ_CONFIG:
2656 	case SQ_GPR_RESOURCE_MGMT_1:
2657 	case SQ_GLOBAL_GPR_RESOURCE_MGMT_1:
2658 	case SQ_GLOBAL_GPR_RESOURCE_MGMT_2:
2659 	case SQ_CONST_MEM_BASE:
2660 	case SQ_STATIC_THREAD_MGMT_1:
2661 	case SQ_STATIC_THREAD_MGMT_2:
2662 	case SQ_STATIC_THREAD_MGMT_3:
2663 	case SPI_CONFIG_CNTL:
2664 	case SPI_CONFIG_CNTL_1:
2665 	case TA_CNTL_AUX:
2666 	case DB_DEBUG:
2667 	case DB_DEBUG2:
2668 	case DB_DEBUG3:
2669 	case DB_DEBUG4:
2670 	case DB_WATERMARKS:
2671 	case TD_PS_BORDER_COLOR_INDEX:
2672 	case TD_PS_BORDER_COLOR_RED:
2673 	case TD_PS_BORDER_COLOR_GREEN:
2674 	case TD_PS_BORDER_COLOR_BLUE:
2675 	case TD_PS_BORDER_COLOR_ALPHA:
2676 	case TD_VS_BORDER_COLOR_INDEX:
2677 	case TD_VS_BORDER_COLOR_RED:
2678 	case TD_VS_BORDER_COLOR_GREEN:
2679 	case TD_VS_BORDER_COLOR_BLUE:
2680 	case TD_VS_BORDER_COLOR_ALPHA:
2681 	case TD_GS_BORDER_COLOR_INDEX:
2682 	case TD_GS_BORDER_COLOR_RED:
2683 	case TD_GS_BORDER_COLOR_GREEN:
2684 	case TD_GS_BORDER_COLOR_BLUE:
2685 	case TD_GS_BORDER_COLOR_ALPHA:
2686 	case TD_HS_BORDER_COLOR_INDEX:
2687 	case TD_HS_BORDER_COLOR_RED:
2688 	case TD_HS_BORDER_COLOR_GREEN:
2689 	case TD_HS_BORDER_COLOR_BLUE:
2690 	case TD_HS_BORDER_COLOR_ALPHA:
2691 	case TD_LS_BORDER_COLOR_INDEX:
2692 	case TD_LS_BORDER_COLOR_RED:
2693 	case TD_LS_BORDER_COLOR_GREEN:
2694 	case TD_LS_BORDER_COLOR_BLUE:
2695 	case TD_LS_BORDER_COLOR_ALPHA:
2696 	case TD_CS_BORDER_COLOR_INDEX:
2697 	case TD_CS_BORDER_COLOR_RED:
2698 	case TD_CS_BORDER_COLOR_GREEN:
2699 	case TD_CS_BORDER_COLOR_BLUE:
2700 	case TD_CS_BORDER_COLOR_ALPHA:
2701 	case SQ_ESGS_RING_SIZE:
2702 	case SQ_GSVS_RING_SIZE:
2703 	case SQ_ESTMP_RING_SIZE:
2704 	case SQ_GSTMP_RING_SIZE:
2705 	case SQ_HSTMP_RING_SIZE:
2706 	case SQ_LSTMP_RING_SIZE:
2707 	case SQ_PSTMP_RING_SIZE:
2708 	case SQ_VSTMP_RING_SIZE:
2709 	case SQ_ESGS_RING_ITEMSIZE:
2710 	case SQ_ESTMP_RING_ITEMSIZE:
2711 	case SQ_GSTMP_RING_ITEMSIZE:
2712 	case SQ_GSVS_RING_ITEMSIZE:
2713 	case SQ_GS_VERT_ITEMSIZE:
2714 	case SQ_GS_VERT_ITEMSIZE_1:
2715 	case SQ_GS_VERT_ITEMSIZE_2:
2716 	case SQ_GS_VERT_ITEMSIZE_3:
2717 	case SQ_GSVS_RING_OFFSET_1:
2718 	case SQ_GSVS_RING_OFFSET_2:
2719 	case SQ_GSVS_RING_OFFSET_3:
2720 	case SQ_HSTMP_RING_ITEMSIZE:
2721 	case SQ_LSTMP_RING_ITEMSIZE:
2722 	case SQ_PSTMP_RING_ITEMSIZE:
2723 	case SQ_VSTMP_RING_ITEMSIZE:
2724 	case VGT_TF_RING_SIZE:
2725 	case SQ_ESGS_RING_BASE:
2726 	case SQ_GSVS_RING_BASE:
2727 	case SQ_ESTMP_RING_BASE:
2728 	case SQ_GSTMP_RING_BASE:
2729 	case SQ_HSTMP_RING_BASE:
2730 	case SQ_LSTMP_RING_BASE:
2731 	case SQ_PSTMP_RING_BASE:
2732 	case SQ_VSTMP_RING_BASE:
2733 	case CAYMAN_VGT_OFFCHIP_LDS_BASE:
2734 	case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS:
2735 		return true;
2736 	default:
2737 		return false;
2738 	}
2739 }
2740 
2741 static int evergreen_vm_packet3_check(struct radeon_device *rdev,
2742 				      u32 *ib, struct radeon_cs_packet *pkt)
2743 {
2744 	u32 idx = pkt->idx + 1;
2745 	u32 idx_value = ib[idx];
2746 	u32 start_reg, end_reg, reg, i;
2747 
2748 	switch (pkt->opcode) {
2749 	case PACKET3_NOP:
2750 	case PACKET3_SET_BASE:
2751 	case PACKET3_CLEAR_STATE:
2752 	case PACKET3_INDEX_BUFFER_SIZE:
2753 	case PACKET3_DISPATCH_DIRECT:
2754 	case PACKET3_DISPATCH_INDIRECT:
2755 	case PACKET3_MODE_CONTROL:
2756 	case PACKET3_SET_PREDICATION:
2757 	case PACKET3_COND_EXEC:
2758 	case PACKET3_PRED_EXEC:
2759 	case PACKET3_DRAW_INDIRECT:
2760 	case PACKET3_DRAW_INDEX_INDIRECT:
2761 	case PACKET3_INDEX_BASE:
2762 	case PACKET3_DRAW_INDEX_2:
2763 	case PACKET3_CONTEXT_CONTROL:
2764 	case PACKET3_DRAW_INDEX_OFFSET:
2765 	case PACKET3_INDEX_TYPE:
2766 	case PACKET3_DRAW_INDEX:
2767 	case PACKET3_DRAW_INDEX_AUTO:
2768 	case PACKET3_DRAW_INDEX_IMMD:
2769 	case PACKET3_NUM_INSTANCES:
2770 	case PACKET3_DRAW_INDEX_MULTI_AUTO:
2771 	case PACKET3_STRMOUT_BUFFER_UPDATE:
2772 	case PACKET3_DRAW_INDEX_OFFSET_2:
2773 	case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
2774 	case PACKET3_MPEG_INDEX:
2775 	case PACKET3_WAIT_REG_MEM:
2776 	case PACKET3_MEM_WRITE:
2777 	case PACKET3_SURFACE_SYNC:
2778 	case PACKET3_EVENT_WRITE:
2779 	case PACKET3_EVENT_WRITE_EOP:
2780 	case PACKET3_EVENT_WRITE_EOS:
2781 	case PACKET3_SET_CONTEXT_REG:
2782 	case PACKET3_SET_BOOL_CONST:
2783 	case PACKET3_SET_LOOP_CONST:
2784 	case PACKET3_SET_RESOURCE:
2785 	case PACKET3_SET_SAMPLER:
2786 	case PACKET3_SET_CTL_CONST:
2787 	case PACKET3_SET_RESOURCE_OFFSET:
2788 	case PACKET3_SET_CONTEXT_REG_INDIRECT:
2789 	case PACKET3_SET_RESOURCE_INDIRECT:
2790 	case CAYMAN_PACKET3_DEALLOC_STATE:
2791 		break;
2792 	case PACKET3_COND_WRITE:
2793 		if (idx_value & 0x100) {
2794 			reg = ib[idx + 5] * 4;
2795 			if (!evergreen_vm_reg_valid(reg))
2796 				return -EINVAL;
2797 		}
2798 		break;
2799 	case PACKET3_COPY_DW:
2800 		if (idx_value & 0x2) {
2801 			reg = ib[idx + 3] * 4;
2802 			if (!evergreen_vm_reg_valid(reg))
2803 				return -EINVAL;
2804 		}
2805 		break;
2806 	case PACKET3_SET_CONFIG_REG:
2807 		start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
2808 		end_reg = 4 * pkt->count + start_reg - 4;
2809 		if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
2810 		    (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
2811 		    (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
2812 			DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
2813 			return -EINVAL;
2814 		}
2815 		for (i = 0; i < pkt->count; i++) {
2816 			reg = start_reg + (4 * i);
2817 			if (!evergreen_vm_reg_valid(reg))
2818 				return -EINVAL;
2819 		}
2820 		break;
2821 	default:
2822 		return -EINVAL;
2823 	}
2824 	return 0;
2825 }
2826 
2827 int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
2828 {
2829 	int ret = 0;
2830 	u32 idx = 0;
2831 	struct radeon_cs_packet pkt;
2832 
2833 	do {
2834 		pkt.idx = idx;
2835 		pkt.type = CP_PACKET_GET_TYPE(ib->ptr[idx]);
2836 		pkt.count = CP_PACKET_GET_COUNT(ib->ptr[idx]);
2837 		pkt.one_reg_wr = 0;
2838 		switch (pkt.type) {
2839 		case PACKET_TYPE0:
2840 			dev_err(rdev->dev, "Packet0 not allowed!\n");
2841 			ret = -EINVAL;
2842 			break;
2843 		case PACKET_TYPE2:
2844 			idx += 1;
2845 			break;
2846 		case PACKET_TYPE3:
2847 			pkt.opcode = CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
2848 			ret = evergreen_vm_packet3_check(rdev, ib->ptr, &pkt);
2849 			idx += pkt.count + 2;
2850 			break;
2851 		default:
2852 			dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
2853 			ret = -EINVAL;
2854 			break;
2855 		}
2856 		if (ret)
2857 			break;
2858 	} while (idx < ib->length_dw);
2859 
2860 	return ret;
2861 }
2862