xref: /openbmc/linux/drivers/gpu/drm/radeon/r600_cs.c (revision baa7eb025ab14f3cba2e35c0a8648f9c9f01d24f)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/kernel.h>
29 #include "drmP.h"
30 #include "radeon.h"
31 #include "r600d.h"
32 #include "r600_reg_safe.h"
33 
34 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
35 					struct radeon_cs_reloc **cs_reloc);
36 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
37 					struct radeon_cs_reloc **cs_reloc);
38 typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
39 static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
40 extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);
41 
42 
43 struct r600_cs_track {
44 	/* configuration we miror so that we use same code btw kms/ums */
45 	u32			group_size;
46 	u32			nbanks;
47 	u32			npipes;
48 	/* value we track */
49 	u32			sq_config;
50 	u32			nsamples;
51 	u32			cb_color_base_last[8];
52 	struct radeon_bo	*cb_color_bo[8];
53 	u64			cb_color_bo_mc[8];
54 	u32			cb_color_bo_offset[8];
55 	struct radeon_bo	*cb_color_frag_bo[8];
56 	struct radeon_bo	*cb_color_tile_bo[8];
57 	u32			cb_color_info[8];
58 	u32			cb_color_size_idx[8];
59 	u32			cb_target_mask;
60 	u32			cb_shader_mask;
61 	u32			cb_color_size[8];
62 	u32			vgt_strmout_en;
63 	u32			vgt_strmout_buffer_en;
64 	u32			db_depth_control;
65 	u32			db_depth_info;
66 	u32			db_depth_size_idx;
67 	u32			db_depth_view;
68 	u32			db_depth_size;
69 	u32			db_offset;
70 	struct radeon_bo	*db_bo;
71 	u64			db_bo_mc;
72 };
73 
74 static inline int r600_bpe_from_format(u32 *bpe, u32 format)
75 {
76 	switch (format) {
77 	case V_038004_COLOR_8:
78 	case V_038004_COLOR_4_4:
79 	case V_038004_COLOR_3_3_2:
80 	case V_038004_FMT_1:
81 		*bpe = 1;
82 		break;
83 	case V_038004_COLOR_16:
84 	case V_038004_COLOR_16_FLOAT:
85 	case V_038004_COLOR_8_8:
86 	case V_038004_COLOR_5_6_5:
87 	case V_038004_COLOR_6_5_5:
88 	case V_038004_COLOR_1_5_5_5:
89 	case V_038004_COLOR_4_4_4_4:
90 	case V_038004_COLOR_5_5_5_1:
91 		*bpe = 2;
92 		break;
93 	case V_038004_FMT_8_8_8:
94 		*bpe = 3;
95 		break;
96 	case V_038004_COLOR_32:
97 	case V_038004_COLOR_32_FLOAT:
98 	case V_038004_COLOR_16_16:
99 	case V_038004_COLOR_16_16_FLOAT:
100 	case V_038004_COLOR_8_24:
101 	case V_038004_COLOR_8_24_FLOAT:
102 	case V_038004_COLOR_24_8:
103 	case V_038004_COLOR_24_8_FLOAT:
104 	case V_038004_COLOR_10_11_11:
105 	case V_038004_COLOR_10_11_11_FLOAT:
106 	case V_038004_COLOR_11_11_10:
107 	case V_038004_COLOR_11_11_10_FLOAT:
108 	case V_038004_COLOR_2_10_10_10:
109 	case V_038004_COLOR_8_8_8_8:
110 	case V_038004_COLOR_10_10_10_2:
111 	case V_038004_FMT_5_9_9_9_SHAREDEXP:
112 	case V_038004_FMT_32_AS_8:
113 	case V_038004_FMT_32_AS_8_8:
114 		*bpe = 4;
115 		break;
116 	case V_038004_COLOR_X24_8_32_FLOAT:
117 	case V_038004_COLOR_32_32:
118 	case V_038004_COLOR_32_32_FLOAT:
119 	case V_038004_COLOR_16_16_16_16:
120 	case V_038004_COLOR_16_16_16_16_FLOAT:
121 		*bpe = 8;
122 		break;
123 	case V_038004_FMT_16_16_16:
124 	case V_038004_FMT_16_16_16_FLOAT:
125 		*bpe = 6;
126 		break;
127 	case V_038004_FMT_32_32_32:
128 	case V_038004_FMT_32_32_32_FLOAT:
129 		*bpe = 12;
130 		break;
131 	case V_038004_COLOR_32_32_32_32:
132 	case V_038004_COLOR_32_32_32_32_FLOAT:
133 		*bpe = 16;
134 		break;
135 	case V_038004_FMT_GB_GR:
136 	case V_038004_FMT_BG_RG:
137 	case V_038004_COLOR_INVALID:
138 	default:
139 		*bpe = 16;
140 		return -EINVAL;
141 	}
142 	return 0;
143 }
144 
145 struct array_mode_checker {
146 	int array_mode;
147 	u32 group_size;
148 	u32 nbanks;
149 	u32 npipes;
150 	u32 nsamples;
151 	u32 bpe;
152 };
153 
154 /* returns alignment in pixels for pitch/height/depth and bytes for base */
155 static inline int r600_get_array_mode_alignment(struct array_mode_checker *values,
156 						u32 *pitch_align,
157 						u32 *height_align,
158 						u32 *depth_align,
159 						u64 *base_align)
160 {
161 	u32 tile_width = 8;
162 	u32 tile_height = 8;
163 	u32 macro_tile_width = values->nbanks;
164 	u32 macro_tile_height = values->npipes;
165 	u32 tile_bytes = tile_width * tile_height * values->bpe * values->nsamples;
166 	u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes;
167 
168 	switch (values->array_mode) {
169 	case ARRAY_LINEAR_GENERAL:
170 		/* technically tile_width/_height for pitch/height */
171 		*pitch_align = 1; /* tile_width */
172 		*height_align = 1; /* tile_height */
173 		*depth_align = 1;
174 		*base_align = 1;
175 		break;
176 	case ARRAY_LINEAR_ALIGNED:
177 		*pitch_align = max((u32)64, (u32)(values->group_size / values->bpe));
178 		*height_align = tile_height;
179 		*depth_align = 1;
180 		*base_align = values->group_size;
181 		break;
182 	case ARRAY_1D_TILED_THIN1:
183 		*pitch_align = max((u32)tile_width,
184 				   (u32)(values->group_size /
185 					 (tile_height * values->bpe * values->nsamples)));
186 		*height_align = tile_height;
187 		*depth_align = 1;
188 		*base_align = values->group_size;
189 		break;
190 	case ARRAY_2D_TILED_THIN1:
191 		*pitch_align = max((u32)macro_tile_width,
192 				  (u32)(((values->group_size / tile_height) /
193 					 (values->bpe * values->nsamples)) *
194 					values->nbanks)) * tile_width;
195 		*height_align = macro_tile_height * tile_height;
196 		*depth_align = 1;
197 		*base_align = max(macro_tile_bytes,
198 				  (*pitch_align) * values->bpe * (*height_align) * values->nsamples);
199 		break;
200 	default:
201 		return -EINVAL;
202 	}
203 
204 	return 0;
205 }
206 
207 static void r600_cs_track_init(struct r600_cs_track *track)
208 {
209 	int i;
210 
211 	/* assume DX9 mode */
212 	track->sq_config = DX9_CONSTS;
213 	for (i = 0; i < 8; i++) {
214 		track->cb_color_base_last[i] = 0;
215 		track->cb_color_size[i] = 0;
216 		track->cb_color_size_idx[i] = 0;
217 		track->cb_color_info[i] = 0;
218 		track->cb_color_bo[i] = NULL;
219 		track->cb_color_bo_offset[i] = 0xFFFFFFFF;
220 		track->cb_color_bo_mc[i] = 0xFFFFFFFF;
221 	}
222 	track->cb_target_mask = 0xFFFFFFFF;
223 	track->cb_shader_mask = 0xFFFFFFFF;
224 	track->db_bo = NULL;
225 	track->db_bo_mc = 0xFFFFFFFF;
226 	/* assume the biggest format and that htile is enabled */
227 	track->db_depth_info = 7 | (1 << 25);
228 	track->db_depth_view = 0xFFFFC000;
229 	track->db_depth_size = 0xFFFFFFFF;
230 	track->db_depth_size_idx = 0;
231 	track->db_depth_control = 0xFFFFFFFF;
232 }
233 
234 static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
235 {
236 	struct r600_cs_track *track = p->track;
237 	u32 bpe = 0, slice_tile_max, size, tmp;
238 	u32 height, height_align, pitch, pitch_align, depth_align;
239 	u64 base_offset, base_align;
240 	struct array_mode_checker array_check;
241 	volatile u32 *ib = p->ib->ptr;
242 	unsigned array_mode;
243 
244 	if (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
245 		dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n");
246 		return -EINVAL;
247 	}
248 	size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
249 	if (r600_bpe_from_format(&bpe, G_0280A0_FORMAT(track->cb_color_info[i]))) {
250 		dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
251 			 __func__, __LINE__, G_0280A0_FORMAT(track->cb_color_info[i]),
252 			i, track->cb_color_info[i]);
253 		return -EINVAL;
254 	}
255 	/* pitch in pixels */
256 	pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8;
257 	slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
258 	slice_tile_max *= 64;
259 	height = slice_tile_max / pitch;
260 	if (height > 8192)
261 		height = 8192;
262 	array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]);
263 
264 	base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i];
265 	array_check.array_mode = array_mode;
266 	array_check.group_size = track->group_size;
267 	array_check.nbanks = track->nbanks;
268 	array_check.npipes = track->npipes;
269 	array_check.nsamples = track->nsamples;
270 	array_check.bpe = bpe;
271 	if (r600_get_array_mode_alignment(&array_check,
272 					  &pitch_align, &height_align, &depth_align, &base_align)) {
273 		dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
274 			 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
275 			 track->cb_color_info[i]);
276 		return -EINVAL;
277 	}
278 	switch (array_mode) {
279 	case V_0280A0_ARRAY_LINEAR_GENERAL:
280 		break;
281 	case V_0280A0_ARRAY_LINEAR_ALIGNED:
282 		break;
283 	case V_0280A0_ARRAY_1D_TILED_THIN1:
284 		/* avoid breaking userspace */
285 		if (height > 7)
286 			height &= ~0x7;
287 		break;
288 	case V_0280A0_ARRAY_2D_TILED_THIN1:
289 		break;
290 	default:
291 		dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
292 			G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
293 			track->cb_color_info[i]);
294 		return -EINVAL;
295 	}
296 
297 	if (!IS_ALIGNED(pitch, pitch_align)) {
298 		dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
299 			 __func__, __LINE__, pitch);
300 		return -EINVAL;
301 	}
302 	if (!IS_ALIGNED(height, height_align)) {
303 		dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
304 			 __func__, __LINE__, height);
305 		return -EINVAL;
306 	}
307 	if (!IS_ALIGNED(base_offset, base_align)) {
308 		dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset);
309 		return -EINVAL;
310 	}
311 
312 	/* check offset */
313 	tmp = height * pitch * bpe;
314 	if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
315 		if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
316 			/* the initial DDX does bad things with the CB size occasionally */
317 			/* it rounds up height too far for slice tile max but the BO is smaller */
318 			/* r600c,g also seem to flush at bad times in some apps resulting in
319 			 * bogus values here. So for linear just allow anything to avoid breaking
320 			 * broken userspace.
321 			 */
322 		} else {
323 			dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i]));
324 			return -EINVAL;
325 		}
326 	}
327 	/* limit max tile */
328 	tmp = (height * pitch) >> 6;
329 	if (tmp < slice_tile_max)
330 		slice_tile_max = tmp;
331 	tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) |
332 		S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
333 	ib[track->cb_color_size_idx[i]] = tmp;
334 	return 0;
335 }
336 
337 static int r600_cs_track_check(struct radeon_cs_parser *p)
338 {
339 	struct r600_cs_track *track = p->track;
340 	u32 tmp;
341 	int r, i;
342 	volatile u32 *ib = p->ib->ptr;
343 
344 	/* on legacy kernel we don't perform advanced check */
345 	if (p->rdev == NULL)
346 		return 0;
347 	/* we don't support out buffer yet */
348 	if (track->vgt_strmout_en || track->vgt_strmout_buffer_en) {
349 		dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n");
350 		return -EINVAL;
351 	}
352 	/* check that we have a cb for each enabled target, we don't check
353 	 * shader_mask because it seems mesa isn't always setting it :(
354 	 */
355 	tmp = track->cb_target_mask;
356 	for (i = 0; i < 8; i++) {
357 		if ((tmp >> (i * 4)) & 0xF) {
358 			/* at least one component is enabled */
359 			if (track->cb_color_bo[i] == NULL) {
360 				dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
361 					__func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
362 				return -EINVAL;
363 			}
364 			/* perform rewrite of CB_COLOR[0-7]_SIZE */
365 			r = r600_cs_track_validate_cb(p, i);
366 			if (r)
367 				return r;
368 		}
369 	}
370 	/* Check depth buffer */
371 	if (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
372 		G_028800_Z_ENABLE(track->db_depth_control)) {
373 		u32 nviews, bpe, ntiles, size, slice_tile_max;
374 		u32 height, height_align, pitch, pitch_align, depth_align;
375 		u64 base_offset, base_align;
376 		struct array_mode_checker array_check;
377 		int array_mode;
378 
379 		if (track->db_bo == NULL) {
380 			dev_warn(p->dev, "z/stencil with no depth buffer\n");
381 			return -EINVAL;
382 		}
383 		if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
384 			dev_warn(p->dev, "this kernel doesn't support z/stencil htile\n");
385 			return -EINVAL;
386 		}
387 		switch (G_028010_FORMAT(track->db_depth_info)) {
388 		case V_028010_DEPTH_16:
389 			bpe = 2;
390 			break;
391 		case V_028010_DEPTH_X8_24:
392 		case V_028010_DEPTH_8_24:
393 		case V_028010_DEPTH_X8_24_FLOAT:
394 		case V_028010_DEPTH_8_24_FLOAT:
395 		case V_028010_DEPTH_32_FLOAT:
396 			bpe = 4;
397 			break;
398 		case V_028010_DEPTH_X24_8_32_FLOAT:
399 			bpe = 8;
400 			break;
401 		default:
402 			dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
403 			return -EINVAL;
404 		}
405 		if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
406 			if (!track->db_depth_size_idx) {
407 				dev_warn(p->dev, "z/stencil buffer size not set\n");
408 				return -EINVAL;
409 			}
410 			tmp = radeon_bo_size(track->db_bo) - track->db_offset;
411 			tmp = (tmp / bpe) >> 6;
412 			if (!tmp) {
413 				dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
414 						track->db_depth_size, bpe, track->db_offset,
415 						radeon_bo_size(track->db_bo));
416 				return -EINVAL;
417 			}
418 			ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
419 		} else {
420 			size = radeon_bo_size(track->db_bo);
421 			/* pitch in pixels */
422 			pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8;
423 			slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
424 			slice_tile_max *= 64;
425 			height = slice_tile_max / pitch;
426 			if (height > 8192)
427 				height = 8192;
428 			base_offset = track->db_bo_mc + track->db_offset;
429 			array_mode = G_028010_ARRAY_MODE(track->db_depth_info);
430 			array_check.array_mode = array_mode;
431 			array_check.group_size = track->group_size;
432 			array_check.nbanks = track->nbanks;
433 			array_check.npipes = track->npipes;
434 			array_check.nsamples = track->nsamples;
435 			array_check.bpe = bpe;
436 			if (r600_get_array_mode_alignment(&array_check,
437 							  &pitch_align, &height_align, &depth_align, &base_align)) {
438 				dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
439 					 G_028010_ARRAY_MODE(track->db_depth_info),
440 					 track->db_depth_info);
441 				return -EINVAL;
442 			}
443 			switch (array_mode) {
444 			case V_028010_ARRAY_1D_TILED_THIN1:
445 				/* don't break userspace */
446 				height &= ~0x7;
447 				break;
448 			case V_028010_ARRAY_2D_TILED_THIN1:
449 				break;
450 			default:
451 				dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
452 					 G_028010_ARRAY_MODE(track->db_depth_info),
453 					 track->db_depth_info);
454 				return -EINVAL;
455 			}
456 
457 			if (!IS_ALIGNED(pitch, pitch_align)) {
458 				dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n",
459 					 __func__, __LINE__, pitch);
460 				return -EINVAL;
461 			}
462 			if (!IS_ALIGNED(height, height_align)) {
463 				dev_warn(p->dev, "%s:%d db height (%d) invalid\n",
464 					 __func__, __LINE__, height);
465 				return -EINVAL;
466 			}
467 			if (!IS_ALIGNED(base_offset, base_align)) {
468 				dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset);
469 				return -EINVAL;
470 			}
471 
472 			ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
473 			nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
474 			tmp = ntiles * bpe * 64 * nviews;
475 			if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
476 				dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %u have %lu)\n",
477 						track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
478 						radeon_bo_size(track->db_bo));
479 				return -EINVAL;
480 			}
481 		}
482 	}
483 	return 0;
484 }
485 
486 /**
487  * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
488  * @parser:	parser structure holding parsing context.
489  * @pkt:	where to store packet informations
490  *
491  * Assume that chunk_ib_index is properly set. Will return -EINVAL
492  * if packet is bigger than remaining ib size. or if packets is unknown.
493  **/
494 int r600_cs_packet_parse(struct radeon_cs_parser *p,
495 			struct radeon_cs_packet *pkt,
496 			unsigned idx)
497 {
498 	struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
499 	uint32_t header;
500 
501 	if (idx >= ib_chunk->length_dw) {
502 		DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
503 			  idx, ib_chunk->length_dw);
504 		return -EINVAL;
505 	}
506 	header = radeon_get_ib_value(p, idx);
507 	pkt->idx = idx;
508 	pkt->type = CP_PACKET_GET_TYPE(header);
509 	pkt->count = CP_PACKET_GET_COUNT(header);
510 	pkt->one_reg_wr = 0;
511 	switch (pkt->type) {
512 	case PACKET_TYPE0:
513 		pkt->reg = CP_PACKET0_GET_REG(header);
514 		break;
515 	case PACKET_TYPE3:
516 		pkt->opcode = CP_PACKET3_GET_OPCODE(header);
517 		break;
518 	case PACKET_TYPE2:
519 		pkt->count = -1;
520 		break;
521 	default:
522 		DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
523 		return -EINVAL;
524 	}
525 	if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
526 		DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
527 			  pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
528 		return -EINVAL;
529 	}
530 	return 0;
531 }
532 
533 /**
534  * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3
535  * @parser:		parser structure holding parsing context.
536  * @data:		pointer to relocation data
537  * @offset_start:	starting offset
538  * @offset_mask:	offset mask (to align start offset on)
539  * @reloc:		reloc informations
540  *
541  * Check next packet is relocation packet3, do bo validation and compute
542  * GPU offset using the provided start.
543  **/
544 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
545 					struct radeon_cs_reloc **cs_reloc)
546 {
547 	struct radeon_cs_chunk *relocs_chunk;
548 	struct radeon_cs_packet p3reloc;
549 	unsigned idx;
550 	int r;
551 
552 	if (p->chunk_relocs_idx == -1) {
553 		DRM_ERROR("No relocation chunk !\n");
554 		return -EINVAL;
555 	}
556 	*cs_reloc = NULL;
557 	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
558 	r = r600_cs_packet_parse(p, &p3reloc, p->idx);
559 	if (r) {
560 		return r;
561 	}
562 	p->idx += p3reloc.count + 2;
563 	if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
564 		DRM_ERROR("No packet3 for relocation for packet at %d.\n",
565 			  p3reloc.idx);
566 		return -EINVAL;
567 	}
568 	idx = radeon_get_ib_value(p, p3reloc.idx + 1);
569 	if (idx >= relocs_chunk->length_dw) {
570 		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
571 			  idx, relocs_chunk->length_dw);
572 		return -EINVAL;
573 	}
574 	/* FIXME: we assume reloc size is 4 dwords */
575 	*cs_reloc = p->relocs_ptr[(idx / 4)];
576 	return 0;
577 }
578 
579 /**
580  * r600_cs_packet_next_reloc_nomm() - parse next packet which should be reloc packet3
581  * @parser:		parser structure holding parsing context.
582  * @data:		pointer to relocation data
583  * @offset_start:	starting offset
584  * @offset_mask:	offset mask (to align start offset on)
585  * @reloc:		reloc informations
586  *
587  * Check next packet is relocation packet3, do bo validation and compute
588  * GPU offset using the provided start.
589  **/
590 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
591 					struct radeon_cs_reloc **cs_reloc)
592 {
593 	struct radeon_cs_chunk *relocs_chunk;
594 	struct radeon_cs_packet p3reloc;
595 	unsigned idx;
596 	int r;
597 
598 	if (p->chunk_relocs_idx == -1) {
599 		DRM_ERROR("No relocation chunk !\n");
600 		return -EINVAL;
601 	}
602 	*cs_reloc = NULL;
603 	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
604 	r = r600_cs_packet_parse(p, &p3reloc, p->idx);
605 	if (r) {
606 		return r;
607 	}
608 	p->idx += p3reloc.count + 2;
609 	if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
610 		DRM_ERROR("No packet3 for relocation for packet at %d.\n",
611 			  p3reloc.idx);
612 		return -EINVAL;
613 	}
614 	idx = radeon_get_ib_value(p, p3reloc.idx + 1);
615 	if (idx >= relocs_chunk->length_dw) {
616 		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
617 			  idx, relocs_chunk->length_dw);
618 		return -EINVAL;
619 	}
620 	*cs_reloc = p->relocs;
621 	(*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32;
622 	(*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
623 	return 0;
624 }
625 
626 /**
627  * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
628  * @parser:		parser structure holding parsing context.
629  *
630  * Check next packet is relocation packet3, do bo validation and compute
631  * GPU offset using the provided start.
632  **/
633 static inline int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
634 {
635 	struct radeon_cs_packet p3reloc;
636 	int r;
637 
638 	r = r600_cs_packet_parse(p, &p3reloc, p->idx);
639 	if (r) {
640 		return 0;
641 	}
642 	if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
643 		return 0;
644 	}
645 	return 1;
646 }
647 
648 /**
649  * r600_cs_packet_next_vline() - parse userspace VLINE packet
650  * @parser:		parser structure holding parsing context.
651  *
652  * Userspace sends a special sequence for VLINE waits.
653  * PACKET0 - VLINE_START_END + value
654  * PACKET3 - WAIT_REG_MEM poll vline status reg
655  * RELOC (P3) - crtc_id in reloc.
656  *
657  * This function parses this and relocates the VLINE START END
658  * and WAIT_REG_MEM packets to the correct crtc.
659  * It also detects a switched off crtc and nulls out the
660  * wait in that case.
661  */
662 static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
663 {
664 	struct drm_mode_object *obj;
665 	struct drm_crtc *crtc;
666 	struct radeon_crtc *radeon_crtc;
667 	struct radeon_cs_packet p3reloc, wait_reg_mem;
668 	int crtc_id;
669 	int r;
670 	uint32_t header, h_idx, reg, wait_reg_mem_info;
671 	volatile uint32_t *ib;
672 
673 	ib = p->ib->ptr;
674 
675 	/* parse the WAIT_REG_MEM */
676 	r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx);
677 	if (r)
678 		return r;
679 
680 	/* check its a WAIT_REG_MEM */
681 	if (wait_reg_mem.type != PACKET_TYPE3 ||
682 	    wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
683 		DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
684 		r = -EINVAL;
685 		return r;
686 	}
687 
688 	wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
689 	/* bit 4 is reg (0) or mem (1) */
690 	if (wait_reg_mem_info & 0x10) {
691 		DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
692 		r = -EINVAL;
693 		return r;
694 	}
695 	/* waiting for value to be equal */
696 	if ((wait_reg_mem_info & 0x7) != 0x3) {
697 		DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
698 		r = -EINVAL;
699 		return r;
700 	}
701 	if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) {
702 		DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
703 		r = -EINVAL;
704 		return r;
705 	}
706 
707 	if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) {
708 		DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
709 		r = -EINVAL;
710 		return r;
711 	}
712 
713 	/* jump over the NOP */
714 	r = r600_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
715 	if (r)
716 		return r;
717 
718 	h_idx = p->idx - 2;
719 	p->idx += wait_reg_mem.count + 2;
720 	p->idx += p3reloc.count + 2;
721 
722 	header = radeon_get_ib_value(p, h_idx);
723 	crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
724 	reg = CP_PACKET0_GET_REG(header);
725 
726 	obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
727 	if (!obj) {
728 		DRM_ERROR("cannot find crtc %d\n", crtc_id);
729 		r = -EINVAL;
730 		goto out;
731 	}
732 	crtc = obj_to_crtc(obj);
733 	radeon_crtc = to_radeon_crtc(crtc);
734 	crtc_id = radeon_crtc->crtc_id;
735 
736 	if (!crtc->enabled) {
737 		/* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
738 		ib[h_idx + 2] = PACKET2(0);
739 		ib[h_idx + 3] = PACKET2(0);
740 		ib[h_idx + 4] = PACKET2(0);
741 		ib[h_idx + 5] = PACKET2(0);
742 		ib[h_idx + 6] = PACKET2(0);
743 		ib[h_idx + 7] = PACKET2(0);
744 		ib[h_idx + 8] = PACKET2(0);
745 	} else if (crtc_id == 1) {
746 		switch (reg) {
747 		case AVIVO_D1MODE_VLINE_START_END:
748 			header &= ~R600_CP_PACKET0_REG_MASK;
749 			header |= AVIVO_D2MODE_VLINE_START_END >> 2;
750 			break;
751 		default:
752 			DRM_ERROR("unknown crtc reloc\n");
753 			r = -EINVAL;
754 			goto out;
755 		}
756 		ib[h_idx] = header;
757 		ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2;
758 	}
759 out:
760 	return r;
761 }
762 
763 static int r600_packet0_check(struct radeon_cs_parser *p,
764 				struct radeon_cs_packet *pkt,
765 				unsigned idx, unsigned reg)
766 {
767 	int r;
768 
769 	switch (reg) {
770 	case AVIVO_D1MODE_VLINE_START_END:
771 		r = r600_cs_packet_parse_vline(p);
772 		if (r) {
773 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
774 					idx, reg);
775 			return r;
776 		}
777 		break;
778 	default:
779 		printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
780 		       reg, idx);
781 		return -EINVAL;
782 	}
783 	return 0;
784 }
785 
786 static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
787 				struct radeon_cs_packet *pkt)
788 {
789 	unsigned reg, i;
790 	unsigned idx;
791 	int r;
792 
793 	idx = pkt->idx + 1;
794 	reg = pkt->reg;
795 	for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
796 		r = r600_packet0_check(p, pkt, idx, reg);
797 		if (r) {
798 			return r;
799 		}
800 	}
801 	return 0;
802 }
803 
804 /**
805  * r600_cs_check_reg() - check if register is authorized or not
806  * @parser: parser structure holding parsing context
807  * @reg: register we are testing
808  * @idx: index into the cs buffer
809  *
810  * This function will test against r600_reg_safe_bm and return 0
811  * if register is safe. If register is not flag as safe this function
812  * will test it against a list of register needind special handling.
813  */
814 static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
815 {
816 	struct r600_cs_track *track = (struct r600_cs_track *)p->track;
817 	struct radeon_cs_reloc *reloc;
818 	u32 last_reg = ARRAY_SIZE(r600_reg_safe_bm);
819 	u32 m, i, tmp, *ib;
820 	int r;
821 
822 	i = (reg >> 7);
823 	if (i > last_reg) {
824 		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
825 		return -EINVAL;
826 	}
827 	m = 1 << ((reg >> 2) & 31);
828 	if (!(r600_reg_safe_bm[i] & m))
829 		return 0;
830 	ib = p->ib->ptr;
831 	switch (reg) {
832 	/* force following reg to 0 in an attemp to disable out buffer
833 	 * which will need us to better understand how it works to perform
834 	 * security check on it (Jerome)
835 	 */
836 	case R_0288A8_SQ_ESGS_RING_ITEMSIZE:
837 	case R_008C44_SQ_ESGS_RING_SIZE:
838 	case R_0288B0_SQ_ESTMP_RING_ITEMSIZE:
839 	case R_008C54_SQ_ESTMP_RING_SIZE:
840 	case R_0288C0_SQ_FBUF_RING_ITEMSIZE:
841 	case R_008C74_SQ_FBUF_RING_SIZE:
842 	case R_0288B4_SQ_GSTMP_RING_ITEMSIZE:
843 	case R_008C5C_SQ_GSTMP_RING_SIZE:
844 	case R_0288AC_SQ_GSVS_RING_ITEMSIZE:
845 	case R_008C4C_SQ_GSVS_RING_SIZE:
846 	case R_0288BC_SQ_PSTMP_RING_ITEMSIZE:
847 	case R_008C6C_SQ_PSTMP_RING_SIZE:
848 	case R_0288C4_SQ_REDUC_RING_ITEMSIZE:
849 	case R_008C7C_SQ_REDUC_RING_SIZE:
850 	case R_0288B8_SQ_VSTMP_RING_ITEMSIZE:
851 	case R_008C64_SQ_VSTMP_RING_SIZE:
852 	case R_0288C8_SQ_GS_VERT_ITEMSIZE:
853 		/* get value to populate the IB don't remove */
854 		tmp =radeon_get_ib_value(p, idx);
855 		ib[idx] = 0;
856 		break;
857 	case SQ_CONFIG:
858 		track->sq_config = radeon_get_ib_value(p, idx);
859 		break;
860 	case R_028800_DB_DEPTH_CONTROL:
861 		track->db_depth_control = radeon_get_ib_value(p, idx);
862 		break;
863 	case R_028010_DB_DEPTH_INFO:
864 		if (r600_cs_packet_next_is_pkt3_nop(p)) {
865 			r = r600_cs_packet_next_reloc(p, &reloc);
866 			if (r) {
867 				dev_warn(p->dev, "bad SET_CONTEXT_REG "
868 					 "0x%04X\n", reg);
869 				return -EINVAL;
870 			}
871 			track->db_depth_info = radeon_get_ib_value(p, idx);
872 			ib[idx] &= C_028010_ARRAY_MODE;
873 			track->db_depth_info &= C_028010_ARRAY_MODE;
874 			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
875 				ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
876 				track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
877 			} else {
878 				ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
879 				track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
880 			}
881 		} else
882 			track->db_depth_info = radeon_get_ib_value(p, idx);
883 		break;
884 	case R_028004_DB_DEPTH_VIEW:
885 		track->db_depth_view = radeon_get_ib_value(p, idx);
886 		break;
887 	case R_028000_DB_DEPTH_SIZE:
888 		track->db_depth_size = radeon_get_ib_value(p, idx);
889 		track->db_depth_size_idx = idx;
890 		break;
891 	case R_028AB0_VGT_STRMOUT_EN:
892 		track->vgt_strmout_en = radeon_get_ib_value(p, idx);
893 		break;
894 	case R_028B20_VGT_STRMOUT_BUFFER_EN:
895 		track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx);
896 		break;
897 	case R_028238_CB_TARGET_MASK:
898 		track->cb_target_mask = radeon_get_ib_value(p, idx);
899 		break;
900 	case R_02823C_CB_SHADER_MASK:
901 		track->cb_shader_mask = radeon_get_ib_value(p, idx);
902 		break;
903 	case R_028C04_PA_SC_AA_CONFIG:
904 		tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
905 		track->nsamples = 1 << tmp;
906 		break;
907 	case R_0280A0_CB_COLOR0_INFO:
908 	case R_0280A4_CB_COLOR1_INFO:
909 	case R_0280A8_CB_COLOR2_INFO:
910 	case R_0280AC_CB_COLOR3_INFO:
911 	case R_0280B0_CB_COLOR4_INFO:
912 	case R_0280B4_CB_COLOR5_INFO:
913 	case R_0280B8_CB_COLOR6_INFO:
914 	case R_0280BC_CB_COLOR7_INFO:
915 		if (r600_cs_packet_next_is_pkt3_nop(p)) {
916 			r = r600_cs_packet_next_reloc(p, &reloc);
917 			if (r) {
918 				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
919 				return -EINVAL;
920 			}
921 			tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
922 			track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
923 			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
924 				ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
925 				track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
926 			} else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
927 				ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
928 				track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
929 			}
930 		} else {
931 			tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
932 			track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
933 		}
934 		break;
935 	case R_028060_CB_COLOR0_SIZE:
936 	case R_028064_CB_COLOR1_SIZE:
937 	case R_028068_CB_COLOR2_SIZE:
938 	case R_02806C_CB_COLOR3_SIZE:
939 	case R_028070_CB_COLOR4_SIZE:
940 	case R_028074_CB_COLOR5_SIZE:
941 	case R_028078_CB_COLOR6_SIZE:
942 	case R_02807C_CB_COLOR7_SIZE:
943 		tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4;
944 		track->cb_color_size[tmp] = radeon_get_ib_value(p, idx);
945 		track->cb_color_size_idx[tmp] = idx;
946 		break;
947 		/* This register were added late, there is userspace
948 		 * which does provide relocation for those but set
949 		 * 0 offset. In order to avoid breaking old userspace
950 		 * we detect this and set address to point to last
951 		 * CB_COLOR0_BASE, note that if userspace doesn't set
952 		 * CB_COLOR0_BASE before this register we will report
953 		 * error. Old userspace always set CB_COLOR0_BASE
954 		 * before any of this.
955 		 */
956 	case R_0280E0_CB_COLOR0_FRAG:
957 	case R_0280E4_CB_COLOR1_FRAG:
958 	case R_0280E8_CB_COLOR2_FRAG:
959 	case R_0280EC_CB_COLOR3_FRAG:
960 	case R_0280F0_CB_COLOR4_FRAG:
961 	case R_0280F4_CB_COLOR5_FRAG:
962 	case R_0280F8_CB_COLOR6_FRAG:
963 	case R_0280FC_CB_COLOR7_FRAG:
964 		tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4;
965 		if (!r600_cs_packet_next_is_pkt3_nop(p)) {
966 			if (!track->cb_color_base_last[tmp]) {
967 				dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
968 				return -EINVAL;
969 			}
970 			ib[idx] = track->cb_color_base_last[tmp];
971 			track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
972 		} else {
973 			r = r600_cs_packet_next_reloc(p, &reloc);
974 			if (r) {
975 				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
976 				return -EINVAL;
977 			}
978 			ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
979 			track->cb_color_frag_bo[tmp] = reloc->robj;
980 		}
981 		break;
982 	case R_0280C0_CB_COLOR0_TILE:
983 	case R_0280C4_CB_COLOR1_TILE:
984 	case R_0280C8_CB_COLOR2_TILE:
985 	case R_0280CC_CB_COLOR3_TILE:
986 	case R_0280D0_CB_COLOR4_TILE:
987 	case R_0280D4_CB_COLOR5_TILE:
988 	case R_0280D8_CB_COLOR6_TILE:
989 	case R_0280DC_CB_COLOR7_TILE:
990 		tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4;
991 		if (!r600_cs_packet_next_is_pkt3_nop(p)) {
992 			if (!track->cb_color_base_last[tmp]) {
993 				dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
994 				return -EINVAL;
995 			}
996 			ib[idx] = track->cb_color_base_last[tmp];
997 			track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
998 		} else {
999 			r = r600_cs_packet_next_reloc(p, &reloc);
1000 			if (r) {
1001 				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1002 				return -EINVAL;
1003 			}
1004 			ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1005 			track->cb_color_tile_bo[tmp] = reloc->robj;
1006 		}
1007 		break;
1008 	case CB_COLOR0_BASE:
1009 	case CB_COLOR1_BASE:
1010 	case CB_COLOR2_BASE:
1011 	case CB_COLOR3_BASE:
1012 	case CB_COLOR4_BASE:
1013 	case CB_COLOR5_BASE:
1014 	case CB_COLOR6_BASE:
1015 	case CB_COLOR7_BASE:
1016 		r = r600_cs_packet_next_reloc(p, &reloc);
1017 		if (r) {
1018 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1019 					"0x%04X\n", reg);
1020 			return -EINVAL;
1021 		}
1022 		tmp = (reg - CB_COLOR0_BASE) / 4;
1023 		track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
1024 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1025 		track->cb_color_base_last[tmp] = ib[idx];
1026 		track->cb_color_bo[tmp] = reloc->robj;
1027 		track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset;
1028 		break;
1029 	case DB_DEPTH_BASE:
1030 		r = r600_cs_packet_next_reloc(p, &reloc);
1031 		if (r) {
1032 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1033 					"0x%04X\n", reg);
1034 			return -EINVAL;
1035 		}
1036 		track->db_offset = radeon_get_ib_value(p, idx) << 8;
1037 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1038 		track->db_bo = reloc->robj;
1039 		track->db_bo_mc = reloc->lobj.gpu_offset;
1040 		break;
1041 	case DB_HTILE_DATA_BASE:
1042 	case SQ_PGM_START_FS:
1043 	case SQ_PGM_START_ES:
1044 	case SQ_PGM_START_VS:
1045 	case SQ_PGM_START_GS:
1046 	case SQ_PGM_START_PS:
1047 	case SQ_ALU_CONST_CACHE_GS_0:
1048 	case SQ_ALU_CONST_CACHE_GS_1:
1049 	case SQ_ALU_CONST_CACHE_GS_2:
1050 	case SQ_ALU_CONST_CACHE_GS_3:
1051 	case SQ_ALU_CONST_CACHE_GS_4:
1052 	case SQ_ALU_CONST_CACHE_GS_5:
1053 	case SQ_ALU_CONST_CACHE_GS_6:
1054 	case SQ_ALU_CONST_CACHE_GS_7:
1055 	case SQ_ALU_CONST_CACHE_GS_8:
1056 	case SQ_ALU_CONST_CACHE_GS_9:
1057 	case SQ_ALU_CONST_CACHE_GS_10:
1058 	case SQ_ALU_CONST_CACHE_GS_11:
1059 	case SQ_ALU_CONST_CACHE_GS_12:
1060 	case SQ_ALU_CONST_CACHE_GS_13:
1061 	case SQ_ALU_CONST_CACHE_GS_14:
1062 	case SQ_ALU_CONST_CACHE_GS_15:
1063 	case SQ_ALU_CONST_CACHE_PS_0:
1064 	case SQ_ALU_CONST_CACHE_PS_1:
1065 	case SQ_ALU_CONST_CACHE_PS_2:
1066 	case SQ_ALU_CONST_CACHE_PS_3:
1067 	case SQ_ALU_CONST_CACHE_PS_4:
1068 	case SQ_ALU_CONST_CACHE_PS_5:
1069 	case SQ_ALU_CONST_CACHE_PS_6:
1070 	case SQ_ALU_CONST_CACHE_PS_7:
1071 	case SQ_ALU_CONST_CACHE_PS_8:
1072 	case SQ_ALU_CONST_CACHE_PS_9:
1073 	case SQ_ALU_CONST_CACHE_PS_10:
1074 	case SQ_ALU_CONST_CACHE_PS_11:
1075 	case SQ_ALU_CONST_CACHE_PS_12:
1076 	case SQ_ALU_CONST_CACHE_PS_13:
1077 	case SQ_ALU_CONST_CACHE_PS_14:
1078 	case SQ_ALU_CONST_CACHE_PS_15:
1079 	case SQ_ALU_CONST_CACHE_VS_0:
1080 	case SQ_ALU_CONST_CACHE_VS_1:
1081 	case SQ_ALU_CONST_CACHE_VS_2:
1082 	case SQ_ALU_CONST_CACHE_VS_3:
1083 	case SQ_ALU_CONST_CACHE_VS_4:
1084 	case SQ_ALU_CONST_CACHE_VS_5:
1085 	case SQ_ALU_CONST_CACHE_VS_6:
1086 	case SQ_ALU_CONST_CACHE_VS_7:
1087 	case SQ_ALU_CONST_CACHE_VS_8:
1088 	case SQ_ALU_CONST_CACHE_VS_9:
1089 	case SQ_ALU_CONST_CACHE_VS_10:
1090 	case SQ_ALU_CONST_CACHE_VS_11:
1091 	case SQ_ALU_CONST_CACHE_VS_12:
1092 	case SQ_ALU_CONST_CACHE_VS_13:
1093 	case SQ_ALU_CONST_CACHE_VS_14:
1094 	case SQ_ALU_CONST_CACHE_VS_15:
1095 		r = r600_cs_packet_next_reloc(p, &reloc);
1096 		if (r) {
1097 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1098 					"0x%04X\n", reg);
1099 			return -EINVAL;
1100 		}
1101 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1102 		break;
1103 	default:
1104 		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1105 		return -EINVAL;
1106 	}
1107 	return 0;
1108 }
1109 
1110 static inline unsigned minify(unsigned size, unsigned levels)
1111 {
1112 	size = size >> levels;
1113 	if (size < 1)
1114 		size = 1;
1115 	return size;
1116 }
1117 
1118 static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels,
1119 			      unsigned w0, unsigned h0, unsigned d0, unsigned bpe,
1120 			      unsigned pitch_align,
1121 			      unsigned *l0_size, unsigned *mipmap_size)
1122 {
1123 	unsigned offset, i, level, face;
1124 	unsigned width, height, depth, rowstride, size;
1125 
1126 	w0 = minify(w0, 0);
1127 	h0 = minify(h0, 0);
1128 	d0 = minify(d0, 0);
1129 	for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
1130 		width = minify(w0, i);
1131 		height = minify(h0, i);
1132 		depth = minify(d0, i);
1133 		for(face = 0; face < nfaces; face++) {
1134 			rowstride = ALIGN((width * bpe), pitch_align);
1135 			size = height * rowstride * depth;
1136 			offset += size;
1137 			offset = (offset + 0x1f) & ~0x1f;
1138 		}
1139 	}
1140 	*l0_size = ALIGN((w0 * bpe), pitch_align) * h0 * d0;
1141 	*mipmap_size = offset;
1142 	if (!nlevels)
1143 		*mipmap_size = *l0_size;
1144 	if (!blevel)
1145 		*mipmap_size -= *l0_size;
1146 }
1147 
1148 /**
1149  * r600_check_texture_resource() - check if register is authorized or not
1150  * @p: parser structure holding parsing context
1151  * @idx: index into the cs buffer
1152  * @texture: texture's bo structure
1153  * @mipmap: mipmap's bo structure
1154  *
1155  * This function will check that the resource has valid field and that
1156  * the texture and mipmap bo object are big enough to cover this resource.
1157  */
1158 static inline int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 idx,
1159 					      struct radeon_bo *texture,
1160 					      struct radeon_bo *mipmap,
1161 					      u64 base_offset,
1162 					      u64 mip_offset,
1163 					      u32 tiling_flags)
1164 {
1165 	struct r600_cs_track *track = p->track;
1166 	u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0;
1167 	u32 word0, word1, l0_size, mipmap_size;
1168 	u32 height_align, pitch, pitch_align, depth_align;
1169 	u64 base_align;
1170 	struct array_mode_checker array_check;
1171 
1172 	/* on legacy kernel we don't perform advanced check */
1173 	if (p->rdev == NULL)
1174 		return 0;
1175 
1176 	/* convert to bytes */
1177 	base_offset <<= 8;
1178 	mip_offset <<= 8;
1179 
1180 	word0 = radeon_get_ib_value(p, idx + 0);
1181 	if (tiling_flags & RADEON_TILING_MACRO)
1182 		word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1183 	else if (tiling_flags & RADEON_TILING_MICRO)
1184 		word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
1185 	word1 = radeon_get_ib_value(p, idx + 1);
1186 	w0 = G_038000_TEX_WIDTH(word0) + 1;
1187 	h0 = G_038004_TEX_HEIGHT(word1) + 1;
1188 	d0 = G_038004_TEX_DEPTH(word1);
1189 	nfaces = 1;
1190 	switch (G_038000_DIM(word0)) {
1191 	case V_038000_SQ_TEX_DIM_1D:
1192 	case V_038000_SQ_TEX_DIM_2D:
1193 	case V_038000_SQ_TEX_DIM_3D:
1194 		break;
1195 	case V_038000_SQ_TEX_DIM_CUBEMAP:
1196 		nfaces = 6;
1197 		break;
1198 	case V_038000_SQ_TEX_DIM_1D_ARRAY:
1199 	case V_038000_SQ_TEX_DIM_2D_ARRAY:
1200 	case V_038000_SQ_TEX_DIM_2D_MSAA:
1201 	case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
1202 	default:
1203 		dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
1204 		return -EINVAL;
1205 	}
1206 	if (r600_bpe_from_format(&bpe,  G_038004_DATA_FORMAT(word1))) {
1207 		dev_warn(p->dev, "%s:%d texture invalid format %d\n",
1208 			 __func__, __LINE__, G_038004_DATA_FORMAT(word1));
1209 		return -EINVAL;
1210 	}
1211 
1212 	/* pitch in texels */
1213 	pitch = (G_038000_PITCH(word0) + 1) * 8;
1214 	array_check.array_mode = G_038000_TILE_MODE(word0);
1215 	array_check.group_size = track->group_size;
1216 	array_check.nbanks = track->nbanks;
1217 	array_check.npipes = track->npipes;
1218 	array_check.nsamples = 1;
1219 	array_check.bpe = bpe;
1220 	if (r600_get_array_mode_alignment(&array_check,
1221 					  &pitch_align, &height_align, &depth_align, &base_align)) {
1222 		dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
1223 			 __func__, __LINE__, G_038000_TILE_MODE(word0));
1224 		return -EINVAL;
1225 	}
1226 
1227 	/* XXX check height as well... */
1228 
1229 	if (!IS_ALIGNED(pitch, pitch_align)) {
1230 		dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n",
1231 			 __func__, __LINE__, pitch);
1232 		return -EINVAL;
1233 	}
1234 	if (!IS_ALIGNED(base_offset, base_align)) {
1235 		dev_warn(p->dev, "%s:%d tex base offset (0x%llx) invalid\n",
1236 			 __func__, __LINE__, base_offset);
1237 		return -EINVAL;
1238 	}
1239 	if (!IS_ALIGNED(mip_offset, base_align)) {
1240 		dev_warn(p->dev, "%s:%d tex mip offset (0x%llx) invalid\n",
1241 			 __func__, __LINE__, mip_offset);
1242 		return -EINVAL;
1243 	}
1244 
1245 	word0 = radeon_get_ib_value(p, idx + 4);
1246 	word1 = radeon_get_ib_value(p, idx + 5);
1247 	blevel = G_038010_BASE_LEVEL(word0);
1248 	nlevels = G_038014_LAST_LEVEL(word1);
1249 	r600_texture_size(nfaces, blevel, nlevels, w0, h0, d0, bpe,
1250 			  (pitch_align * bpe),
1251 			  &l0_size, &mipmap_size);
1252 	/* using get ib will give us the offset into the texture bo */
1253 	word0 = radeon_get_ib_value(p, idx + 2) << 8;
1254 	if ((l0_size + word0) > radeon_bo_size(texture)) {
1255 		dev_warn(p->dev, "texture bo too small (%d %d %d %d -> %d have %ld)\n",
1256 			w0, h0, bpe, word0, l0_size, radeon_bo_size(texture));
1257 		return -EINVAL;
1258 	}
1259 	/* using get ib will give us the offset into the mipmap bo */
1260 	word0 = radeon_get_ib_value(p, idx + 3) << 8;
1261 	if ((mipmap_size + word0) > radeon_bo_size(mipmap)) {
1262 		/*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
1263 		  w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));*/
1264 	}
1265 	return 0;
1266 }
1267 
1268 static int r600_packet3_check(struct radeon_cs_parser *p,
1269 				struct radeon_cs_packet *pkt)
1270 {
1271 	struct radeon_cs_reloc *reloc;
1272 	struct r600_cs_track *track;
1273 	volatile u32 *ib;
1274 	unsigned idx;
1275 	unsigned i;
1276 	unsigned start_reg, end_reg, reg;
1277 	int r;
1278 	u32 idx_value;
1279 
1280 	track = (struct r600_cs_track *)p->track;
1281 	ib = p->ib->ptr;
1282 	idx = pkt->idx + 1;
1283 	idx_value = radeon_get_ib_value(p, idx);
1284 
1285 	switch (pkt->opcode) {
1286 	case PACKET3_START_3D_CMDBUF:
1287 		if (p->family >= CHIP_RV770 || pkt->count) {
1288 			DRM_ERROR("bad START_3D\n");
1289 			return -EINVAL;
1290 		}
1291 		break;
1292 	case PACKET3_CONTEXT_CONTROL:
1293 		if (pkt->count != 1) {
1294 			DRM_ERROR("bad CONTEXT_CONTROL\n");
1295 			return -EINVAL;
1296 		}
1297 		break;
1298 	case PACKET3_INDEX_TYPE:
1299 	case PACKET3_NUM_INSTANCES:
1300 		if (pkt->count) {
1301 			DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
1302 			return -EINVAL;
1303 		}
1304 		break;
1305 	case PACKET3_DRAW_INDEX:
1306 		if (pkt->count != 3) {
1307 			DRM_ERROR("bad DRAW_INDEX\n");
1308 			return -EINVAL;
1309 		}
1310 		r = r600_cs_packet_next_reloc(p, &reloc);
1311 		if (r) {
1312 			DRM_ERROR("bad DRAW_INDEX\n");
1313 			return -EINVAL;
1314 		}
1315 		ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1316 		ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1317 		r = r600_cs_track_check(p);
1318 		if (r) {
1319 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1320 			return r;
1321 		}
1322 		break;
1323 	case PACKET3_DRAW_INDEX_AUTO:
1324 		if (pkt->count != 1) {
1325 			DRM_ERROR("bad DRAW_INDEX_AUTO\n");
1326 			return -EINVAL;
1327 		}
1328 		r = r600_cs_track_check(p);
1329 		if (r) {
1330 			dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1331 			return r;
1332 		}
1333 		break;
1334 	case PACKET3_DRAW_INDEX_IMMD_BE:
1335 	case PACKET3_DRAW_INDEX_IMMD:
1336 		if (pkt->count < 2) {
1337 			DRM_ERROR("bad DRAW_INDEX_IMMD\n");
1338 			return -EINVAL;
1339 		}
1340 		r = r600_cs_track_check(p);
1341 		if (r) {
1342 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1343 			return r;
1344 		}
1345 		break;
1346 	case PACKET3_WAIT_REG_MEM:
1347 		if (pkt->count != 5) {
1348 			DRM_ERROR("bad WAIT_REG_MEM\n");
1349 			return -EINVAL;
1350 		}
1351 		/* bit 4 is reg (0) or mem (1) */
1352 		if (idx_value & 0x10) {
1353 			r = r600_cs_packet_next_reloc(p, &reloc);
1354 			if (r) {
1355 				DRM_ERROR("bad WAIT_REG_MEM\n");
1356 				return -EINVAL;
1357 			}
1358 			ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1359 			ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1360 		}
1361 		break;
1362 	case PACKET3_SURFACE_SYNC:
1363 		if (pkt->count != 3) {
1364 			DRM_ERROR("bad SURFACE_SYNC\n");
1365 			return -EINVAL;
1366 		}
1367 		/* 0xffffffff/0x0 is flush all cache flag */
1368 		if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
1369 		    radeon_get_ib_value(p, idx + 2) != 0) {
1370 			r = r600_cs_packet_next_reloc(p, &reloc);
1371 			if (r) {
1372 				DRM_ERROR("bad SURFACE_SYNC\n");
1373 				return -EINVAL;
1374 			}
1375 			ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1376 		}
1377 		break;
1378 	case PACKET3_EVENT_WRITE:
1379 		if (pkt->count != 2 && pkt->count != 0) {
1380 			DRM_ERROR("bad EVENT_WRITE\n");
1381 			return -EINVAL;
1382 		}
1383 		if (pkt->count) {
1384 			r = r600_cs_packet_next_reloc(p, &reloc);
1385 			if (r) {
1386 				DRM_ERROR("bad EVENT_WRITE\n");
1387 				return -EINVAL;
1388 			}
1389 			ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1390 			ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1391 		}
1392 		break;
1393 	case PACKET3_EVENT_WRITE_EOP:
1394 		if (pkt->count != 4) {
1395 			DRM_ERROR("bad EVENT_WRITE_EOP\n");
1396 			return -EINVAL;
1397 		}
1398 		r = r600_cs_packet_next_reloc(p, &reloc);
1399 		if (r) {
1400 			DRM_ERROR("bad EVENT_WRITE\n");
1401 			return -EINVAL;
1402 		}
1403 		ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1404 		ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1405 		break;
1406 	case PACKET3_SET_CONFIG_REG:
1407 		start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
1408 		end_reg = 4 * pkt->count + start_reg - 4;
1409 		if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) ||
1410 		    (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
1411 		    (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
1412 			DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
1413 			return -EINVAL;
1414 		}
1415 		for (i = 0; i < pkt->count; i++) {
1416 			reg = start_reg + (4 * i);
1417 			r = r600_cs_check_reg(p, reg, idx+1+i);
1418 			if (r)
1419 				return r;
1420 		}
1421 		break;
1422 	case PACKET3_SET_CONTEXT_REG:
1423 		start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET;
1424 		end_reg = 4 * pkt->count + start_reg - 4;
1425 		if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) ||
1426 		    (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
1427 		    (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
1428 			DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
1429 			return -EINVAL;
1430 		}
1431 		for (i = 0; i < pkt->count; i++) {
1432 			reg = start_reg + (4 * i);
1433 			r = r600_cs_check_reg(p, reg, idx+1+i);
1434 			if (r)
1435 				return r;
1436 		}
1437 		break;
1438 	case PACKET3_SET_RESOURCE:
1439 		if (pkt->count % 7) {
1440 			DRM_ERROR("bad SET_RESOURCE\n");
1441 			return -EINVAL;
1442 		}
1443 		start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET;
1444 		end_reg = 4 * pkt->count + start_reg - 4;
1445 		if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) ||
1446 		    (start_reg >= PACKET3_SET_RESOURCE_END) ||
1447 		    (end_reg >= PACKET3_SET_RESOURCE_END)) {
1448 			DRM_ERROR("bad SET_RESOURCE\n");
1449 			return -EINVAL;
1450 		}
1451 		for (i = 0; i < (pkt->count / 7); i++) {
1452 			struct radeon_bo *texture, *mipmap;
1453 			u32 size, offset, base_offset, mip_offset;
1454 
1455 			switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
1456 			case SQ_TEX_VTX_VALID_TEXTURE:
1457 				/* tex base */
1458 				r = r600_cs_packet_next_reloc(p, &reloc);
1459 				if (r) {
1460 					DRM_ERROR("bad SET_RESOURCE\n");
1461 					return -EINVAL;
1462 				}
1463 				base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1464 				if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1465 					ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1466 				else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1467 					ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
1468 				texture = reloc->robj;
1469 				/* tex mip base */
1470 				r = r600_cs_packet_next_reloc(p, &reloc);
1471 				if (r) {
1472 					DRM_ERROR("bad SET_RESOURCE\n");
1473 					return -EINVAL;
1474 				}
1475 				mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1476 				mipmap = reloc->robj;
1477 				r = r600_check_texture_resource(p,  idx+(i*7)+1,
1478 								texture, mipmap,
1479 								base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2),
1480 								mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3),
1481 								reloc->lobj.tiling_flags);
1482 				if (r)
1483 					return r;
1484 				ib[idx+1+(i*7)+2] += base_offset;
1485 				ib[idx+1+(i*7)+3] += mip_offset;
1486 				break;
1487 			case SQ_TEX_VTX_VALID_BUFFER:
1488 				/* vtx base */
1489 				r = r600_cs_packet_next_reloc(p, &reloc);
1490 				if (r) {
1491 					DRM_ERROR("bad SET_RESOURCE\n");
1492 					return -EINVAL;
1493 				}
1494 				offset = radeon_get_ib_value(p, idx+1+(i*7)+0);
1495 				size = radeon_get_ib_value(p, idx+1+(i*7)+1) + 1;
1496 				if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
1497 					/* force size to size of the buffer */
1498 					dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n",
1499 						 size + offset, radeon_bo_size(reloc->robj));
1500 					ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj);
1501 				}
1502 				ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
1503 				ib[idx+1+(i*7)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1504 				break;
1505 			case SQ_TEX_VTX_INVALID_TEXTURE:
1506 			case SQ_TEX_VTX_INVALID_BUFFER:
1507 			default:
1508 				DRM_ERROR("bad SET_RESOURCE\n");
1509 				return -EINVAL;
1510 			}
1511 		}
1512 		break;
1513 	case PACKET3_SET_ALU_CONST:
1514 		if (track->sq_config & DX9_CONSTS) {
1515 			start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
1516 			end_reg = 4 * pkt->count + start_reg - 4;
1517 			if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
1518 			    (start_reg >= PACKET3_SET_ALU_CONST_END) ||
1519 			    (end_reg >= PACKET3_SET_ALU_CONST_END)) {
1520 				DRM_ERROR("bad SET_ALU_CONST\n");
1521 				return -EINVAL;
1522 			}
1523 		}
1524 		break;
1525 	case PACKET3_SET_BOOL_CONST:
1526 		start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET;
1527 		end_reg = 4 * pkt->count + start_reg - 4;
1528 		if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) ||
1529 		    (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
1530 		    (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
1531 			DRM_ERROR("bad SET_BOOL_CONST\n");
1532 			return -EINVAL;
1533 		}
1534 		break;
1535 	case PACKET3_SET_LOOP_CONST:
1536 		start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET;
1537 		end_reg = 4 * pkt->count + start_reg - 4;
1538 		if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) ||
1539 		    (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
1540 		    (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
1541 			DRM_ERROR("bad SET_LOOP_CONST\n");
1542 			return -EINVAL;
1543 		}
1544 		break;
1545 	case PACKET3_SET_CTL_CONST:
1546 		start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET;
1547 		end_reg = 4 * pkt->count + start_reg - 4;
1548 		if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) ||
1549 		    (start_reg >= PACKET3_SET_CTL_CONST_END) ||
1550 		    (end_reg >= PACKET3_SET_CTL_CONST_END)) {
1551 			DRM_ERROR("bad SET_CTL_CONST\n");
1552 			return -EINVAL;
1553 		}
1554 		break;
1555 	case PACKET3_SET_SAMPLER:
1556 		if (pkt->count % 3) {
1557 			DRM_ERROR("bad SET_SAMPLER\n");
1558 			return -EINVAL;
1559 		}
1560 		start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET;
1561 		end_reg = 4 * pkt->count + start_reg - 4;
1562 		if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) ||
1563 		    (start_reg >= PACKET3_SET_SAMPLER_END) ||
1564 		    (end_reg >= PACKET3_SET_SAMPLER_END)) {
1565 			DRM_ERROR("bad SET_SAMPLER\n");
1566 			return -EINVAL;
1567 		}
1568 		break;
1569 	case PACKET3_SURFACE_BASE_UPDATE:
1570 		if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
1571 			DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
1572 			return -EINVAL;
1573 		}
1574 		if (pkt->count) {
1575 			DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
1576 			return -EINVAL;
1577 		}
1578 		break;
1579 	case PACKET3_NOP:
1580 		break;
1581 	default:
1582 		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1583 		return -EINVAL;
1584 	}
1585 	return 0;
1586 }
1587 
1588 int r600_cs_parse(struct radeon_cs_parser *p)
1589 {
1590 	struct radeon_cs_packet pkt;
1591 	struct r600_cs_track *track;
1592 	int r;
1593 
1594 	if (p->track == NULL) {
1595 		/* initialize tracker, we are in kms */
1596 		track = kzalloc(sizeof(*track), GFP_KERNEL);
1597 		if (track == NULL)
1598 			return -ENOMEM;
1599 		r600_cs_track_init(track);
1600 		if (p->rdev->family < CHIP_RV770) {
1601 			track->npipes = p->rdev->config.r600.tiling_npipes;
1602 			track->nbanks = p->rdev->config.r600.tiling_nbanks;
1603 			track->group_size = p->rdev->config.r600.tiling_group_size;
1604 		} else if (p->rdev->family <= CHIP_RV740) {
1605 			track->npipes = p->rdev->config.rv770.tiling_npipes;
1606 			track->nbanks = p->rdev->config.rv770.tiling_nbanks;
1607 			track->group_size = p->rdev->config.rv770.tiling_group_size;
1608 		}
1609 		p->track = track;
1610 	}
1611 	do {
1612 		r = r600_cs_packet_parse(p, &pkt, p->idx);
1613 		if (r) {
1614 			kfree(p->track);
1615 			p->track = NULL;
1616 			return r;
1617 		}
1618 		p->idx += pkt.count + 2;
1619 		switch (pkt.type) {
1620 		case PACKET_TYPE0:
1621 			r = r600_cs_parse_packet0(p, &pkt);
1622 			break;
1623 		case PACKET_TYPE2:
1624 			break;
1625 		case PACKET_TYPE3:
1626 			r = r600_packet3_check(p, &pkt);
1627 			break;
1628 		default:
1629 			DRM_ERROR("Unknown packet type %d !\n", pkt.type);
1630 			kfree(p->track);
1631 			p->track = NULL;
1632 			return -EINVAL;
1633 		}
1634 		if (r) {
1635 			kfree(p->track);
1636 			p->track = NULL;
1637 			return r;
1638 		}
1639 	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1640 #if 0
1641 	for (r = 0; r < p->ib->length_dw; r++) {
1642 		printk(KERN_INFO "%05d  0x%08X\n", r, p->ib->ptr[r]);
1643 		mdelay(1);
1644 	}
1645 #endif
1646 	kfree(p->track);
1647 	p->track = NULL;
1648 	return 0;
1649 }
1650 
1651 static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
1652 {
1653 	if (p->chunk_relocs_idx == -1) {
1654 		return 0;
1655 	}
1656 	p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
1657 	if (p->relocs == NULL) {
1658 		return -ENOMEM;
1659 	}
1660 	return 0;
1661 }
1662 
1663 /**
1664  * cs_parser_fini() - clean parser states
1665  * @parser:	parser structure holding parsing context.
1666  * @error:	error number
1667  *
1668  * If error is set than unvalidate buffer, otherwise just free memory
1669  * used by parsing context.
1670  **/
1671 static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
1672 {
1673 	unsigned i;
1674 
1675 	kfree(parser->relocs);
1676 	for (i = 0; i < parser->nchunks; i++) {
1677 		kfree(parser->chunks[i].kdata);
1678 		kfree(parser->chunks[i].kpage[0]);
1679 		kfree(parser->chunks[i].kpage[1]);
1680 	}
1681 	kfree(parser->chunks);
1682 	kfree(parser->chunks_array);
1683 }
1684 
1685 int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
1686 			unsigned family, u32 *ib, int *l)
1687 {
1688 	struct radeon_cs_parser parser;
1689 	struct radeon_cs_chunk *ib_chunk;
1690 	struct radeon_ib fake_ib;
1691 	struct r600_cs_track *track;
1692 	int r;
1693 
1694 	/* initialize tracker */
1695 	track = kzalloc(sizeof(*track), GFP_KERNEL);
1696 	if (track == NULL)
1697 		return -ENOMEM;
1698 	r600_cs_track_init(track);
1699 	r600_cs_legacy_get_tiling_conf(dev, &track->npipes, &track->nbanks, &track->group_size);
1700 	/* initialize parser */
1701 	memset(&parser, 0, sizeof(struct radeon_cs_parser));
1702 	parser.filp = filp;
1703 	parser.dev = &dev->pdev->dev;
1704 	parser.rdev = NULL;
1705 	parser.family = family;
1706 	parser.ib = &fake_ib;
1707 	parser.track = track;
1708 	fake_ib.ptr = ib;
1709 	r = radeon_cs_parser_init(&parser, data);
1710 	if (r) {
1711 		DRM_ERROR("Failed to initialize parser !\n");
1712 		r600_cs_parser_fini(&parser, r);
1713 		return r;
1714 	}
1715 	r = r600_cs_parser_relocs_legacy(&parser);
1716 	if (r) {
1717 		DRM_ERROR("Failed to parse relocation !\n");
1718 		r600_cs_parser_fini(&parser, r);
1719 		return r;
1720 	}
1721 	/* Copy the packet into the IB, the parser will read from the
1722 	 * input memory (cached) and write to the IB (which can be
1723 	 * uncached). */
1724 	ib_chunk = &parser.chunks[parser.chunk_ib_idx];
1725 	parser.ib->length_dw = ib_chunk->length_dw;
1726 	*l = parser.ib->length_dw;
1727 	r = r600_cs_parse(&parser);
1728 	if (r) {
1729 		DRM_ERROR("Invalid command stream !\n");
1730 		r600_cs_parser_fini(&parser, r);
1731 		return r;
1732 	}
1733 	r = radeon_cs_finish_pages(&parser);
1734 	if (r) {
1735 		DRM_ERROR("Invalid command stream !\n");
1736 		r600_cs_parser_fini(&parser, r);
1737 		return r;
1738 	}
1739 	r600_cs_parser_fini(&parser, r);
1740 	return r;
1741 }
1742 
1743 void r600_cs_legacy_init(void)
1744 {
1745 	r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm;
1746 }
1747