xref: /openbmc/linux/drivers/gpu/drm/radeon/r600_cs.c (revision c4ee0af3)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/kernel.h>
29 #include <drm/drmP.h>
30 #include "radeon.h"
31 #include "r600d.h"
32 #include "r600_reg_safe.h"
33 
34 static int r600_nomm;
35 extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);
36 
37 
38 struct r600_cs_track {
39 	/* configuration we miror so that we use same code btw kms/ums */
40 	u32			group_size;
41 	u32			nbanks;
42 	u32			npipes;
43 	/* value we track */
44 	u32			sq_config;
45 	u32			log_nsamples;
46 	u32			nsamples;
47 	u32			cb_color_base_last[8];
48 	struct radeon_bo	*cb_color_bo[8];
49 	u64			cb_color_bo_mc[8];
50 	u64			cb_color_bo_offset[8];
51 	struct radeon_bo	*cb_color_frag_bo[8];
52 	u64			cb_color_frag_offset[8];
53 	struct radeon_bo	*cb_color_tile_bo[8];
54 	u64			cb_color_tile_offset[8];
55 	u32			cb_color_mask[8];
56 	u32			cb_color_info[8];
57 	u32			cb_color_view[8];
58 	u32			cb_color_size_idx[8]; /* unused */
59 	u32			cb_target_mask;
60 	u32			cb_shader_mask;  /* unused */
61 	bool			is_resolve;
62 	u32			cb_color_size[8];
63 	u32			vgt_strmout_en;
64 	u32			vgt_strmout_buffer_en;
65 	struct radeon_bo	*vgt_strmout_bo[4];
66 	u64			vgt_strmout_bo_mc[4]; /* unused */
67 	u32			vgt_strmout_bo_offset[4];
68 	u32			vgt_strmout_size[4];
69 	u32			db_depth_control;
70 	u32			db_depth_info;
71 	u32			db_depth_size_idx;
72 	u32			db_depth_view;
73 	u32			db_depth_size;
74 	u32			db_offset;
75 	struct radeon_bo	*db_bo;
76 	u64			db_bo_mc;
77 	bool			sx_misc_kill_all_prims;
78 	bool			cb_dirty;
79 	bool			db_dirty;
80 	bool			streamout_dirty;
81 	struct radeon_bo	*htile_bo;
82 	u64			htile_offset;
83 	u32			htile_surface;
84 };
85 
86 #define FMT_8_BIT(fmt, vc)   [fmt] = { 1, 1, 1, vc, CHIP_R600 }
87 #define FMT_16_BIT(fmt, vc)  [fmt] = { 1, 1, 2, vc, CHIP_R600 }
88 #define FMT_24_BIT(fmt)      [fmt] = { 1, 1, 4,  0, CHIP_R600 }
89 #define FMT_32_BIT(fmt, vc)  [fmt] = { 1, 1, 4, vc, CHIP_R600 }
90 #define FMT_48_BIT(fmt)      [fmt] = { 1, 1, 8,  0, CHIP_R600 }
91 #define FMT_64_BIT(fmt, vc)  [fmt] = { 1, 1, 8, vc, CHIP_R600 }
92 #define FMT_96_BIT(fmt)      [fmt] = { 1, 1, 12, 0, CHIP_R600 }
93 #define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 }
94 
95 struct gpu_formats {
96 	unsigned blockwidth;
97 	unsigned blockheight;
98 	unsigned blocksize;
99 	unsigned valid_color;
100 	enum radeon_family min_family;
101 };
102 
103 static const struct gpu_formats color_formats_table[] = {
104 	/* 8 bit */
105 	FMT_8_BIT(V_038004_COLOR_8, 1),
106 	FMT_8_BIT(V_038004_COLOR_4_4, 1),
107 	FMT_8_BIT(V_038004_COLOR_3_3_2, 1),
108 	FMT_8_BIT(V_038004_FMT_1, 0),
109 
110 	/* 16-bit */
111 	FMT_16_BIT(V_038004_COLOR_16, 1),
112 	FMT_16_BIT(V_038004_COLOR_16_FLOAT, 1),
113 	FMT_16_BIT(V_038004_COLOR_8_8, 1),
114 	FMT_16_BIT(V_038004_COLOR_5_6_5, 1),
115 	FMT_16_BIT(V_038004_COLOR_6_5_5, 1),
116 	FMT_16_BIT(V_038004_COLOR_1_5_5_5, 1),
117 	FMT_16_BIT(V_038004_COLOR_4_4_4_4, 1),
118 	FMT_16_BIT(V_038004_COLOR_5_5_5_1, 1),
119 
120 	/* 24-bit */
121 	FMT_24_BIT(V_038004_FMT_8_8_8),
122 
123 	/* 32-bit */
124 	FMT_32_BIT(V_038004_COLOR_32, 1),
125 	FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1),
126 	FMT_32_BIT(V_038004_COLOR_16_16, 1),
127 	FMT_32_BIT(V_038004_COLOR_16_16_FLOAT, 1),
128 	FMT_32_BIT(V_038004_COLOR_8_24, 1),
129 	FMT_32_BIT(V_038004_COLOR_8_24_FLOAT, 1),
130 	FMT_32_BIT(V_038004_COLOR_24_8, 1),
131 	FMT_32_BIT(V_038004_COLOR_24_8_FLOAT, 1),
132 	FMT_32_BIT(V_038004_COLOR_10_11_11, 1),
133 	FMT_32_BIT(V_038004_COLOR_10_11_11_FLOAT, 1),
134 	FMT_32_BIT(V_038004_COLOR_11_11_10, 1),
135 	FMT_32_BIT(V_038004_COLOR_11_11_10_FLOAT, 1),
136 	FMT_32_BIT(V_038004_COLOR_2_10_10_10, 1),
137 	FMT_32_BIT(V_038004_COLOR_8_8_8_8, 1),
138 	FMT_32_BIT(V_038004_COLOR_10_10_10_2, 1),
139 	FMT_32_BIT(V_038004_FMT_5_9_9_9_SHAREDEXP, 0),
140 	FMT_32_BIT(V_038004_FMT_32_AS_8, 0),
141 	FMT_32_BIT(V_038004_FMT_32_AS_8_8, 0),
142 
143 	/* 48-bit */
144 	FMT_48_BIT(V_038004_FMT_16_16_16),
145 	FMT_48_BIT(V_038004_FMT_16_16_16_FLOAT),
146 
147 	/* 64-bit */
148 	FMT_64_BIT(V_038004_COLOR_X24_8_32_FLOAT, 1),
149 	FMT_64_BIT(V_038004_COLOR_32_32, 1),
150 	FMT_64_BIT(V_038004_COLOR_32_32_FLOAT, 1),
151 	FMT_64_BIT(V_038004_COLOR_16_16_16_16, 1),
152 	FMT_64_BIT(V_038004_COLOR_16_16_16_16_FLOAT, 1),
153 
154 	FMT_96_BIT(V_038004_FMT_32_32_32),
155 	FMT_96_BIT(V_038004_FMT_32_32_32_FLOAT),
156 
157 	/* 128-bit */
158 	FMT_128_BIT(V_038004_COLOR_32_32_32_32, 1),
159 	FMT_128_BIT(V_038004_COLOR_32_32_32_32_FLOAT, 1),
160 
161 	[V_038004_FMT_GB_GR] = { 2, 1, 4, 0 },
162 	[V_038004_FMT_BG_RG] = { 2, 1, 4, 0 },
163 
164 	/* block compressed formats */
165 	[V_038004_FMT_BC1] = { 4, 4, 8, 0 },
166 	[V_038004_FMT_BC2] = { 4, 4, 16, 0 },
167 	[V_038004_FMT_BC3] = { 4, 4, 16, 0 },
168 	[V_038004_FMT_BC4] = { 4, 4, 8, 0 },
169 	[V_038004_FMT_BC5] = { 4, 4, 16, 0},
170 	[V_038004_FMT_BC6] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
171 	[V_038004_FMT_BC7] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
172 
173 	/* The other Evergreen formats */
174 	[V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR},
175 };
176 
177 bool r600_fmt_is_valid_color(u32 format)
178 {
179 	if (format >= ARRAY_SIZE(color_formats_table))
180 		return false;
181 
182 	if (color_formats_table[format].valid_color)
183 		return true;
184 
185 	return false;
186 }
187 
188 bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family)
189 {
190 	if (format >= ARRAY_SIZE(color_formats_table))
191 		return false;
192 
193 	if (family < color_formats_table[format].min_family)
194 		return false;
195 
196 	if (color_formats_table[format].blockwidth > 0)
197 		return true;
198 
199 	return false;
200 }
201 
202 int r600_fmt_get_blocksize(u32 format)
203 {
204 	if (format >= ARRAY_SIZE(color_formats_table))
205 		return 0;
206 
207 	return color_formats_table[format].blocksize;
208 }
209 
210 int r600_fmt_get_nblocksx(u32 format, u32 w)
211 {
212 	unsigned bw;
213 
214 	if (format >= ARRAY_SIZE(color_formats_table))
215 		return 0;
216 
217 	bw = color_formats_table[format].blockwidth;
218 	if (bw == 0)
219 		return 0;
220 
221 	return (w + bw - 1) / bw;
222 }
223 
224 int r600_fmt_get_nblocksy(u32 format, u32 h)
225 {
226 	unsigned bh;
227 
228 	if (format >= ARRAY_SIZE(color_formats_table))
229 		return 0;
230 
231 	bh = color_formats_table[format].blockheight;
232 	if (bh == 0)
233 		return 0;
234 
235 	return (h + bh - 1) / bh;
236 }
237 
238 struct array_mode_checker {
239 	int array_mode;
240 	u32 group_size;
241 	u32 nbanks;
242 	u32 npipes;
243 	u32 nsamples;
244 	u32 blocksize;
245 };
246 
247 /* returns alignment in pixels for pitch/height/depth and bytes for base */
248 static int r600_get_array_mode_alignment(struct array_mode_checker *values,
249 						u32 *pitch_align,
250 						u32 *height_align,
251 						u32 *depth_align,
252 						u64 *base_align)
253 {
254 	u32 tile_width = 8;
255 	u32 tile_height = 8;
256 	u32 macro_tile_width = values->nbanks;
257 	u32 macro_tile_height = values->npipes;
258 	u32 tile_bytes = tile_width * tile_height * values->blocksize * values->nsamples;
259 	u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes;
260 
261 	switch (values->array_mode) {
262 	case ARRAY_LINEAR_GENERAL:
263 		/* technically tile_width/_height for pitch/height */
264 		*pitch_align = 1; /* tile_width */
265 		*height_align = 1; /* tile_height */
266 		*depth_align = 1;
267 		*base_align = 1;
268 		break;
269 	case ARRAY_LINEAR_ALIGNED:
270 		*pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize));
271 		*height_align = 1;
272 		*depth_align = 1;
273 		*base_align = values->group_size;
274 		break;
275 	case ARRAY_1D_TILED_THIN1:
276 		*pitch_align = max((u32)tile_width,
277 				   (u32)(values->group_size /
278 					 (tile_height * values->blocksize * values->nsamples)));
279 		*height_align = tile_height;
280 		*depth_align = 1;
281 		*base_align = values->group_size;
282 		break;
283 	case ARRAY_2D_TILED_THIN1:
284 		*pitch_align = max((u32)macro_tile_width * tile_width,
285 				(u32)((values->group_size * values->nbanks) /
286 				(values->blocksize * values->nsamples * tile_width)));
287 		*height_align = macro_tile_height * tile_height;
288 		*depth_align = 1;
289 		*base_align = max(macro_tile_bytes,
290 				  (*pitch_align) * values->blocksize * (*height_align) * values->nsamples);
291 		break;
292 	default:
293 		return -EINVAL;
294 	}
295 
296 	return 0;
297 }
298 
299 static void r600_cs_track_init(struct r600_cs_track *track)
300 {
301 	int i;
302 
303 	/* assume DX9 mode */
304 	track->sq_config = DX9_CONSTS;
305 	for (i = 0; i < 8; i++) {
306 		track->cb_color_base_last[i] = 0;
307 		track->cb_color_size[i] = 0;
308 		track->cb_color_size_idx[i] = 0;
309 		track->cb_color_info[i] = 0;
310 		track->cb_color_view[i] = 0xFFFFFFFF;
311 		track->cb_color_bo[i] = NULL;
312 		track->cb_color_bo_offset[i] = 0xFFFFFFFF;
313 		track->cb_color_bo_mc[i] = 0xFFFFFFFF;
314 		track->cb_color_frag_bo[i] = NULL;
315 		track->cb_color_frag_offset[i] = 0xFFFFFFFF;
316 		track->cb_color_tile_bo[i] = NULL;
317 		track->cb_color_tile_offset[i] = 0xFFFFFFFF;
318 		track->cb_color_mask[i] = 0xFFFFFFFF;
319 	}
320 	track->is_resolve = false;
321 	track->nsamples = 16;
322 	track->log_nsamples = 4;
323 	track->cb_target_mask = 0xFFFFFFFF;
324 	track->cb_shader_mask = 0xFFFFFFFF;
325 	track->cb_dirty = true;
326 	track->db_bo = NULL;
327 	track->db_bo_mc = 0xFFFFFFFF;
328 	/* assume the biggest format and that htile is enabled */
329 	track->db_depth_info = 7 | (1 << 25);
330 	track->db_depth_view = 0xFFFFC000;
331 	track->db_depth_size = 0xFFFFFFFF;
332 	track->db_depth_size_idx = 0;
333 	track->db_depth_control = 0xFFFFFFFF;
334 	track->db_dirty = true;
335 	track->htile_bo = NULL;
336 	track->htile_offset = 0xFFFFFFFF;
337 	track->htile_surface = 0;
338 
339 	for (i = 0; i < 4; i++) {
340 		track->vgt_strmout_size[i] = 0;
341 		track->vgt_strmout_bo[i] = NULL;
342 		track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
343 		track->vgt_strmout_bo_mc[i] = 0xFFFFFFFF;
344 	}
345 	track->streamout_dirty = true;
346 	track->sx_misc_kill_all_prims = false;
347 }
348 
349 static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
350 {
351 	struct r600_cs_track *track = p->track;
352 	u32 slice_tile_max, size, tmp;
353 	u32 height, height_align, pitch, pitch_align, depth_align;
354 	u64 base_offset, base_align;
355 	struct array_mode_checker array_check;
356 	volatile u32 *ib = p->ib.ptr;
357 	unsigned array_mode;
358 	u32 format;
359 	/* When resolve is used, the second colorbuffer has always 1 sample. */
360 	unsigned nsamples = track->is_resolve && i == 1 ? 1 : track->nsamples;
361 
362 	size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
363 	format = G_0280A0_FORMAT(track->cb_color_info[i]);
364 	if (!r600_fmt_is_valid_color(format)) {
365 		dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
366 			 __func__, __LINE__, format,
367 			i, track->cb_color_info[i]);
368 		return -EINVAL;
369 	}
370 	/* pitch in pixels */
371 	pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8;
372 	slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
373 	slice_tile_max *= 64;
374 	height = slice_tile_max / pitch;
375 	if (height > 8192)
376 		height = 8192;
377 	array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]);
378 
379 	base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i];
380 	array_check.array_mode = array_mode;
381 	array_check.group_size = track->group_size;
382 	array_check.nbanks = track->nbanks;
383 	array_check.npipes = track->npipes;
384 	array_check.nsamples = nsamples;
385 	array_check.blocksize = r600_fmt_get_blocksize(format);
386 	if (r600_get_array_mode_alignment(&array_check,
387 					  &pitch_align, &height_align, &depth_align, &base_align)) {
388 		dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
389 			 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
390 			 track->cb_color_info[i]);
391 		return -EINVAL;
392 	}
393 	switch (array_mode) {
394 	case V_0280A0_ARRAY_LINEAR_GENERAL:
395 		break;
396 	case V_0280A0_ARRAY_LINEAR_ALIGNED:
397 		break;
398 	case V_0280A0_ARRAY_1D_TILED_THIN1:
399 		/* avoid breaking userspace */
400 		if (height > 7)
401 			height &= ~0x7;
402 		break;
403 	case V_0280A0_ARRAY_2D_TILED_THIN1:
404 		break;
405 	default:
406 		dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
407 			G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
408 			track->cb_color_info[i]);
409 		return -EINVAL;
410 	}
411 
412 	if (!IS_ALIGNED(pitch, pitch_align)) {
413 		dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
414 			 __func__, __LINE__, pitch, pitch_align, array_mode);
415 		return -EINVAL;
416 	}
417 	if (!IS_ALIGNED(height, height_align)) {
418 		dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
419 			 __func__, __LINE__, height, height_align, array_mode);
420 		return -EINVAL;
421 	}
422 	if (!IS_ALIGNED(base_offset, base_align)) {
423 		dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i,
424 			 base_offset, base_align, array_mode);
425 		return -EINVAL;
426 	}
427 
428 	/* check offset */
429 	tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) *
430 	      r600_fmt_get_blocksize(format) * nsamples;
431 	switch (array_mode) {
432 	default:
433 	case V_0280A0_ARRAY_LINEAR_GENERAL:
434 	case V_0280A0_ARRAY_LINEAR_ALIGNED:
435 		tmp += track->cb_color_view[i] & 0xFF;
436 		break;
437 	case V_0280A0_ARRAY_1D_TILED_THIN1:
438 	case V_0280A0_ARRAY_2D_TILED_THIN1:
439 		tmp += G_028080_SLICE_MAX(track->cb_color_view[i]) * tmp;
440 		break;
441 	}
442 	if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
443 		if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
444 			/* the initial DDX does bad things with the CB size occasionally */
445 			/* it rounds up height too far for slice tile max but the BO is smaller */
446 			/* r600c,g also seem to flush at bad times in some apps resulting in
447 			 * bogus values here. So for linear just allow anything to avoid breaking
448 			 * broken userspace.
449 			 */
450 		} else {
451 			dev_warn(p->dev, "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n",
452 				 __func__, i, array_mode,
453 				 track->cb_color_bo_offset[i], tmp,
454 				 radeon_bo_size(track->cb_color_bo[i]),
455 				 pitch, height, r600_fmt_get_nblocksx(format, pitch),
456 				 r600_fmt_get_nblocksy(format, height),
457 				 r600_fmt_get_blocksize(format));
458 			return -EINVAL;
459 		}
460 	}
461 	/* limit max tile */
462 	tmp = (height * pitch) >> 6;
463 	if (tmp < slice_tile_max)
464 		slice_tile_max = tmp;
465 	tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) |
466 		S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
467 	ib[track->cb_color_size_idx[i]] = tmp;
468 
469 	/* FMASK/CMASK */
470 	switch (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
471 	case V_0280A0_TILE_DISABLE:
472 		break;
473 	case V_0280A0_FRAG_ENABLE:
474 		if (track->nsamples > 1) {
475 			uint32_t tile_max = G_028100_FMASK_TILE_MAX(track->cb_color_mask[i]);
476 			/* the tile size is 8x8, but the size is in units of bits.
477 			 * for bytes, do just * 8. */
478 			uint32_t bytes = track->nsamples * track->log_nsamples * 8 * (tile_max + 1);
479 
480 			if (bytes + track->cb_color_frag_offset[i] >
481 			    radeon_bo_size(track->cb_color_frag_bo[i])) {
482 				dev_warn(p->dev, "%s FMASK_TILE_MAX too large "
483 					 "(tile_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
484 					 __func__, tile_max, bytes,
485 					 track->cb_color_frag_offset[i],
486 					 radeon_bo_size(track->cb_color_frag_bo[i]));
487 				return -EINVAL;
488 			}
489 		}
490 		/* fall through */
491 	case V_0280A0_CLEAR_ENABLE:
492 	{
493 		uint32_t block_max = G_028100_CMASK_BLOCK_MAX(track->cb_color_mask[i]);
494 		/* One block = 128x128 pixels, one 8x8 tile has 4 bits..
495 		 * (128*128) / (8*8) / 2 = 128 bytes per block. */
496 		uint32_t bytes = (block_max + 1) * 128;
497 
498 		if (bytes + track->cb_color_tile_offset[i] >
499 		    radeon_bo_size(track->cb_color_tile_bo[i])) {
500 			dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large "
501 				 "(block_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
502 				 __func__, block_max, bytes,
503 				 track->cb_color_tile_offset[i],
504 				 radeon_bo_size(track->cb_color_tile_bo[i]));
505 			return -EINVAL;
506 		}
507 		break;
508 	}
509 	default:
510 		dev_warn(p->dev, "%s invalid tile mode\n", __func__);
511 		return -EINVAL;
512 	}
513 	return 0;
514 }
515 
516 static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
517 {
518 	struct r600_cs_track *track = p->track;
519 	u32 nviews, bpe, ntiles, size, slice_tile_max, tmp;
520 	u32 height_align, pitch_align, depth_align;
521 	u32 pitch = 8192;
522 	u32 height = 8192;
523 	u64 base_offset, base_align;
524 	struct array_mode_checker array_check;
525 	int array_mode;
526 	volatile u32 *ib = p->ib.ptr;
527 
528 
529 	if (track->db_bo == NULL) {
530 		dev_warn(p->dev, "z/stencil with no depth buffer\n");
531 		return -EINVAL;
532 	}
533 	switch (G_028010_FORMAT(track->db_depth_info)) {
534 	case V_028010_DEPTH_16:
535 		bpe = 2;
536 		break;
537 	case V_028010_DEPTH_X8_24:
538 	case V_028010_DEPTH_8_24:
539 	case V_028010_DEPTH_X8_24_FLOAT:
540 	case V_028010_DEPTH_8_24_FLOAT:
541 	case V_028010_DEPTH_32_FLOAT:
542 		bpe = 4;
543 		break;
544 	case V_028010_DEPTH_X24_8_32_FLOAT:
545 		bpe = 8;
546 		break;
547 	default:
548 		dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
549 		return -EINVAL;
550 	}
551 	if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
552 		if (!track->db_depth_size_idx) {
553 			dev_warn(p->dev, "z/stencil buffer size not set\n");
554 			return -EINVAL;
555 		}
556 		tmp = radeon_bo_size(track->db_bo) - track->db_offset;
557 		tmp = (tmp / bpe) >> 6;
558 		if (!tmp) {
559 			dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
560 					track->db_depth_size, bpe, track->db_offset,
561 					radeon_bo_size(track->db_bo));
562 			return -EINVAL;
563 		}
564 		ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
565 	} else {
566 		size = radeon_bo_size(track->db_bo);
567 		/* pitch in pixels */
568 		pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8;
569 		slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
570 		slice_tile_max *= 64;
571 		height = slice_tile_max / pitch;
572 		if (height > 8192)
573 			height = 8192;
574 		base_offset = track->db_bo_mc + track->db_offset;
575 		array_mode = G_028010_ARRAY_MODE(track->db_depth_info);
576 		array_check.array_mode = array_mode;
577 		array_check.group_size = track->group_size;
578 		array_check.nbanks = track->nbanks;
579 		array_check.npipes = track->npipes;
580 		array_check.nsamples = track->nsamples;
581 		array_check.blocksize = bpe;
582 		if (r600_get_array_mode_alignment(&array_check,
583 					&pitch_align, &height_align, &depth_align, &base_align)) {
584 			dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
585 					G_028010_ARRAY_MODE(track->db_depth_info),
586 					track->db_depth_info);
587 			return -EINVAL;
588 		}
589 		switch (array_mode) {
590 		case V_028010_ARRAY_1D_TILED_THIN1:
591 			/* don't break userspace */
592 			height &= ~0x7;
593 			break;
594 		case V_028010_ARRAY_2D_TILED_THIN1:
595 			break;
596 		default:
597 			dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
598 					G_028010_ARRAY_MODE(track->db_depth_info),
599 					track->db_depth_info);
600 			return -EINVAL;
601 		}
602 
603 		if (!IS_ALIGNED(pitch, pitch_align)) {
604 			dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
605 					__func__, __LINE__, pitch, pitch_align, array_mode);
606 			return -EINVAL;
607 		}
608 		if (!IS_ALIGNED(height, height_align)) {
609 			dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
610 					__func__, __LINE__, height, height_align, array_mode);
611 			return -EINVAL;
612 		}
613 		if (!IS_ALIGNED(base_offset, base_align)) {
614 			dev_warn(p->dev, "%s offset 0x%llx, 0x%llx, %d not aligned\n", __func__,
615 					base_offset, base_align, array_mode);
616 			return -EINVAL;
617 		}
618 
619 		ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
620 		nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
621 		tmp = ntiles * bpe * 64 * nviews * track->nsamples;
622 		if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
623 			dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
624 					array_mode,
625 					track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
626 					radeon_bo_size(track->db_bo));
627 			return -EINVAL;
628 		}
629 	}
630 
631 	/* hyperz */
632 	if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
633 		unsigned long size;
634 		unsigned nbx, nby;
635 
636 		if (track->htile_bo == NULL) {
637 			dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
638 				 __func__, __LINE__, track->db_depth_info);
639 			return -EINVAL;
640 		}
641 		if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
642 			dev_warn(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n",
643 				 __func__, __LINE__, track->db_depth_size);
644 			return -EINVAL;
645 		}
646 
647 		nbx = pitch;
648 		nby = height;
649 		if (G_028D24_LINEAR(track->htile_surface)) {
650 			/* nbx must be 16 htiles aligned == 16 * 8 pixel aligned */
651 			nbx = round_up(nbx, 16 * 8);
652 			/* nby is npipes htiles aligned == npipes * 8 pixel aligned */
653 			nby = round_up(nby, track->npipes * 8);
654 		} else {
655 			/* always assume 8x8 htile */
656 			/* align is htile align * 8, htile align vary according to
657 			 * number of pipe and tile width and nby
658 			 */
659 			switch (track->npipes) {
660 			case 8:
661 				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
662 				nbx = round_up(nbx, 64 * 8);
663 				nby = round_up(nby, 64 * 8);
664 				break;
665 			case 4:
666 				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
667 				nbx = round_up(nbx, 64 * 8);
668 				nby = round_up(nby, 32 * 8);
669 				break;
670 			case 2:
671 				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
672 				nbx = round_up(nbx, 32 * 8);
673 				nby = round_up(nby, 32 * 8);
674 				break;
675 			case 1:
676 				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
677 				nbx = round_up(nbx, 32 * 8);
678 				nby = round_up(nby, 16 * 8);
679 				break;
680 			default:
681 				dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
682 					 __func__, __LINE__, track->npipes);
683 				return -EINVAL;
684 			}
685 		}
686 		/* compute number of htile */
687 		nbx = nbx >> 3;
688 		nby = nby >> 3;
689 		/* size must be aligned on npipes * 2K boundary */
690 		size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
691 		size += track->htile_offset;
692 
693 		if (size > radeon_bo_size(track->htile_bo)) {
694 			dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
695 				 __func__, __LINE__, radeon_bo_size(track->htile_bo),
696 				 size, nbx, nby);
697 			return -EINVAL;
698 		}
699 	}
700 
701 	track->db_dirty = false;
702 	return 0;
703 }
704 
705 static int r600_cs_track_check(struct radeon_cs_parser *p)
706 {
707 	struct r600_cs_track *track = p->track;
708 	u32 tmp;
709 	int r, i;
710 
711 	/* on legacy kernel we don't perform advanced check */
712 	if (p->rdev == NULL)
713 		return 0;
714 
715 	/* check streamout */
716 	if (track->streamout_dirty && track->vgt_strmout_en) {
717 		for (i = 0; i < 4; i++) {
718 			if (track->vgt_strmout_buffer_en & (1 << i)) {
719 				if (track->vgt_strmout_bo[i]) {
720 					u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
721 						(u64)track->vgt_strmout_size[i];
722 					if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
723 						DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
724 							  i, offset,
725 							  radeon_bo_size(track->vgt_strmout_bo[i]));
726 						return -EINVAL;
727 					}
728 				} else {
729 					dev_warn(p->dev, "No buffer for streamout %d\n", i);
730 					return -EINVAL;
731 				}
732 			}
733 		}
734 		track->streamout_dirty = false;
735 	}
736 
737 	if (track->sx_misc_kill_all_prims)
738 		return 0;
739 
740 	/* check that we have a cb for each enabled target, we don't check
741 	 * shader_mask because it seems mesa isn't always setting it :(
742 	 */
743 	if (track->cb_dirty) {
744 		tmp = track->cb_target_mask;
745 
746 		/* We must check both colorbuffers for RESOLVE. */
747 		if (track->is_resolve) {
748 			tmp |= 0xff;
749 		}
750 
751 		for (i = 0; i < 8; i++) {
752 			if ((tmp >> (i * 4)) & 0xF) {
753 				/* at least one component is enabled */
754 				if (track->cb_color_bo[i] == NULL) {
755 					dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
756 						__func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
757 					return -EINVAL;
758 				}
759 				/* perform rewrite of CB_COLOR[0-7]_SIZE */
760 				r = r600_cs_track_validate_cb(p, i);
761 				if (r)
762 					return r;
763 			}
764 		}
765 		track->cb_dirty = false;
766 	}
767 
768 	/* Check depth buffer */
769 	if (track->db_dirty &&
770 	    G_028010_FORMAT(track->db_depth_info) != V_028010_DEPTH_INVALID &&
771 	    (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
772 	     G_028800_Z_ENABLE(track->db_depth_control))) {
773 		r = r600_cs_track_validate_db(p);
774 		if (r)
775 			return r;
776 	}
777 
778 	return 0;
779 }
780 
781 /**
782  * r600_cs_packet_parse_vline() - parse userspace VLINE packet
783  * @parser:		parser structure holding parsing context.
784  *
785  * This is an R600-specific function for parsing VLINE packets.
786  * Real work is done by r600_cs_common_vline_parse function.
787  * Here we just set up ASIC-specific register table and call
788  * the common implementation function.
789  */
790 static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
791 {
792 	static uint32_t vline_start_end[2] = {AVIVO_D1MODE_VLINE_START_END,
793 					      AVIVO_D2MODE_VLINE_START_END};
794 	static uint32_t vline_status[2] = {AVIVO_D1MODE_VLINE_STATUS,
795 					   AVIVO_D2MODE_VLINE_STATUS};
796 
797 	return r600_cs_common_vline_parse(p, vline_start_end, vline_status);
798 }
799 
800 /**
801  * r600_cs_common_vline_parse() - common vline parser
802  * @parser:		parser structure holding parsing context.
803  * @vline_start_end:    table of vline_start_end registers
804  * @vline_status:       table of vline_status registers
805  *
806  * Userspace sends a special sequence for VLINE waits.
807  * PACKET0 - VLINE_START_END + value
808  * PACKET3 - WAIT_REG_MEM poll vline status reg
809  * RELOC (P3) - crtc_id in reloc.
810  *
811  * This function parses this and relocates the VLINE START END
812  * and WAIT_REG_MEM packets to the correct crtc.
813  * It also detects a switched off crtc and nulls out the
814  * wait in that case. This function is common for all ASICs that
815  * are R600 and newer. The parsing algorithm is the same, and only
816  * differs in which registers are used.
817  *
818  * Caller is the ASIC-specific function which passes the parser
819  * context and ASIC-specific register table
820  */
821 int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
822 			       uint32_t *vline_start_end,
823 			       uint32_t *vline_status)
824 {
825 	struct drm_mode_object *obj;
826 	struct drm_crtc *crtc;
827 	struct radeon_crtc *radeon_crtc;
828 	struct radeon_cs_packet p3reloc, wait_reg_mem;
829 	int crtc_id;
830 	int r;
831 	uint32_t header, h_idx, reg, wait_reg_mem_info;
832 	volatile uint32_t *ib;
833 
834 	ib = p->ib.ptr;
835 
836 	/* parse the WAIT_REG_MEM */
837 	r = radeon_cs_packet_parse(p, &wait_reg_mem, p->idx);
838 	if (r)
839 		return r;
840 
841 	/* check its a WAIT_REG_MEM */
842 	if (wait_reg_mem.type != RADEON_PACKET_TYPE3 ||
843 	    wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
844 		DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
845 		return -EINVAL;
846 	}
847 
848 	wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
849 	/* bit 4 is reg (0) or mem (1) */
850 	if (wait_reg_mem_info & 0x10) {
851 		DRM_ERROR("vline WAIT_REG_MEM waiting on MEM instead of REG\n");
852 		return -EINVAL;
853 	}
854 	/* bit 8 is me (0) or pfp (1) */
855 	if (wait_reg_mem_info & 0x100) {
856 		DRM_ERROR("vline WAIT_REG_MEM waiting on PFP instead of ME\n");
857 		return -EINVAL;
858 	}
859 	/* waiting for value to be equal */
860 	if ((wait_reg_mem_info & 0x7) != 0x3) {
861 		DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
862 		return -EINVAL;
863 	}
864 	if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != vline_status[0]) {
865 		DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
866 		return -EINVAL;
867 	}
868 
869 	if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != RADEON_VLINE_STAT) {
870 		DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
871 		return -EINVAL;
872 	}
873 
874 	/* jump over the NOP */
875 	r = radeon_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
876 	if (r)
877 		return r;
878 
879 	h_idx = p->idx - 2;
880 	p->idx += wait_reg_mem.count + 2;
881 	p->idx += p3reloc.count + 2;
882 
883 	header = radeon_get_ib_value(p, h_idx);
884 	crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
885 	reg = R600_CP_PACKET0_GET_REG(header);
886 
887 	obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
888 	if (!obj) {
889 		DRM_ERROR("cannot find crtc %d\n", crtc_id);
890 		return -ENOENT;
891 	}
892 	crtc = obj_to_crtc(obj);
893 	radeon_crtc = to_radeon_crtc(crtc);
894 	crtc_id = radeon_crtc->crtc_id;
895 
896 	if (!crtc->enabled) {
897 		/* CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
898 		ib[h_idx + 2] = PACKET2(0);
899 		ib[h_idx + 3] = PACKET2(0);
900 		ib[h_idx + 4] = PACKET2(0);
901 		ib[h_idx + 5] = PACKET2(0);
902 		ib[h_idx + 6] = PACKET2(0);
903 		ib[h_idx + 7] = PACKET2(0);
904 		ib[h_idx + 8] = PACKET2(0);
905 	} else if (reg == vline_start_end[0]) {
906 		header &= ~R600_CP_PACKET0_REG_MASK;
907 		header |= vline_start_end[crtc_id] >> 2;
908 		ib[h_idx] = header;
909 		ib[h_idx + 4] = vline_status[crtc_id] >> 2;
910 	} else {
911 		DRM_ERROR("unknown crtc reloc\n");
912 		return -EINVAL;
913 	}
914 	return 0;
915 }
916 
917 static int r600_packet0_check(struct radeon_cs_parser *p,
918 				struct radeon_cs_packet *pkt,
919 				unsigned idx, unsigned reg)
920 {
921 	int r;
922 
923 	switch (reg) {
924 	case AVIVO_D1MODE_VLINE_START_END:
925 		r = r600_cs_packet_parse_vline(p);
926 		if (r) {
927 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
928 					idx, reg);
929 			return r;
930 		}
931 		break;
932 	default:
933 		printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
934 		       reg, idx);
935 		return -EINVAL;
936 	}
937 	return 0;
938 }
939 
940 static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
941 				struct radeon_cs_packet *pkt)
942 {
943 	unsigned reg, i;
944 	unsigned idx;
945 	int r;
946 
947 	idx = pkt->idx + 1;
948 	reg = pkt->reg;
949 	for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
950 		r = r600_packet0_check(p, pkt, idx, reg);
951 		if (r) {
952 			return r;
953 		}
954 	}
955 	return 0;
956 }
957 
958 /**
959  * r600_cs_check_reg() - check if register is authorized or not
960  * @parser: parser structure holding parsing context
961  * @reg: register we are testing
962  * @idx: index into the cs buffer
963  *
964  * This function will test against r600_reg_safe_bm and return 0
965  * if register is safe. If register is not flag as safe this function
966  * will test it against a list of register needind special handling.
967  */
968 static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
969 {
970 	struct r600_cs_track *track = (struct r600_cs_track *)p->track;
971 	struct radeon_cs_reloc *reloc;
972 	u32 m, i, tmp, *ib;
973 	int r;
974 
975 	i = (reg >> 7);
976 	if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
977 		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
978 		return -EINVAL;
979 	}
980 	m = 1 << ((reg >> 2) & 31);
981 	if (!(r600_reg_safe_bm[i] & m))
982 		return 0;
983 	ib = p->ib.ptr;
984 	switch (reg) {
985 	/* force following reg to 0 in an attempt to disable out buffer
986 	 * which will need us to better understand how it works to perform
987 	 * security check on it (Jerome)
988 	 */
989 	case R_0288A8_SQ_ESGS_RING_ITEMSIZE:
990 	case R_008C44_SQ_ESGS_RING_SIZE:
991 	case R_0288B0_SQ_ESTMP_RING_ITEMSIZE:
992 	case R_008C54_SQ_ESTMP_RING_SIZE:
993 	case R_0288C0_SQ_FBUF_RING_ITEMSIZE:
994 	case R_008C74_SQ_FBUF_RING_SIZE:
995 	case R_0288B4_SQ_GSTMP_RING_ITEMSIZE:
996 	case R_008C5C_SQ_GSTMP_RING_SIZE:
997 	case R_0288AC_SQ_GSVS_RING_ITEMSIZE:
998 	case R_008C4C_SQ_GSVS_RING_SIZE:
999 	case R_0288BC_SQ_PSTMP_RING_ITEMSIZE:
1000 	case R_008C6C_SQ_PSTMP_RING_SIZE:
1001 	case R_0288C4_SQ_REDUC_RING_ITEMSIZE:
1002 	case R_008C7C_SQ_REDUC_RING_SIZE:
1003 	case R_0288B8_SQ_VSTMP_RING_ITEMSIZE:
1004 	case R_008C64_SQ_VSTMP_RING_SIZE:
1005 	case R_0288C8_SQ_GS_VERT_ITEMSIZE:
1006 		/* get value to populate the IB don't remove */
1007 		tmp =radeon_get_ib_value(p, idx);
1008 		ib[idx] = 0;
1009 		break;
1010 	case SQ_CONFIG:
1011 		track->sq_config = radeon_get_ib_value(p, idx);
1012 		break;
1013 	case R_028800_DB_DEPTH_CONTROL:
1014 		track->db_depth_control = radeon_get_ib_value(p, idx);
1015 		track->db_dirty = true;
1016 		break;
1017 	case R_028010_DB_DEPTH_INFO:
1018 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
1019 		    radeon_cs_packet_next_is_pkt3_nop(p)) {
1020 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1021 			if (r) {
1022 				dev_warn(p->dev, "bad SET_CONTEXT_REG "
1023 					 "0x%04X\n", reg);
1024 				return -EINVAL;
1025 			}
1026 			track->db_depth_info = radeon_get_ib_value(p, idx);
1027 			ib[idx] &= C_028010_ARRAY_MODE;
1028 			track->db_depth_info &= C_028010_ARRAY_MODE;
1029 			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1030 				ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
1031 				track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
1032 			} else {
1033 				ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
1034 				track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
1035 			}
1036 		} else {
1037 			track->db_depth_info = radeon_get_ib_value(p, idx);
1038 		}
1039 		track->db_dirty = true;
1040 		break;
1041 	case R_028004_DB_DEPTH_VIEW:
1042 		track->db_depth_view = radeon_get_ib_value(p, idx);
1043 		track->db_dirty = true;
1044 		break;
1045 	case R_028000_DB_DEPTH_SIZE:
1046 		track->db_depth_size = radeon_get_ib_value(p, idx);
1047 		track->db_depth_size_idx = idx;
1048 		track->db_dirty = true;
1049 		break;
1050 	case R_028AB0_VGT_STRMOUT_EN:
1051 		track->vgt_strmout_en = radeon_get_ib_value(p, idx);
1052 		track->streamout_dirty = true;
1053 		break;
1054 	case R_028B20_VGT_STRMOUT_BUFFER_EN:
1055 		track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx);
1056 		track->streamout_dirty = true;
1057 		break;
1058 	case VGT_STRMOUT_BUFFER_BASE_0:
1059 	case VGT_STRMOUT_BUFFER_BASE_1:
1060 	case VGT_STRMOUT_BUFFER_BASE_2:
1061 	case VGT_STRMOUT_BUFFER_BASE_3:
1062 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1063 		if (r) {
1064 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1065 					"0x%04X\n", reg);
1066 			return -EINVAL;
1067 		}
1068 		tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
1069 		track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
1070 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1071 		track->vgt_strmout_bo[tmp] = reloc->robj;
1072 		track->vgt_strmout_bo_mc[tmp] = reloc->lobj.gpu_offset;
1073 		track->streamout_dirty = true;
1074 		break;
1075 	case VGT_STRMOUT_BUFFER_SIZE_0:
1076 	case VGT_STRMOUT_BUFFER_SIZE_1:
1077 	case VGT_STRMOUT_BUFFER_SIZE_2:
1078 	case VGT_STRMOUT_BUFFER_SIZE_3:
1079 		tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
1080 		/* size in register is DWs, convert to bytes */
1081 		track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
1082 		track->streamout_dirty = true;
1083 		break;
1084 	case CP_COHER_BASE:
1085 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1086 		if (r) {
1087 			dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
1088 					"0x%04X\n", reg);
1089 			return -EINVAL;
1090 		}
1091 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1092 		break;
1093 	case R_028238_CB_TARGET_MASK:
1094 		track->cb_target_mask = radeon_get_ib_value(p, idx);
1095 		track->cb_dirty = true;
1096 		break;
1097 	case R_02823C_CB_SHADER_MASK:
1098 		track->cb_shader_mask = radeon_get_ib_value(p, idx);
1099 		break;
1100 	case R_028C04_PA_SC_AA_CONFIG:
1101 		tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
1102 		track->log_nsamples = tmp;
1103 		track->nsamples = 1 << tmp;
1104 		track->cb_dirty = true;
1105 		break;
1106 	case R_028808_CB_COLOR_CONTROL:
1107 		tmp = G_028808_SPECIAL_OP(radeon_get_ib_value(p, idx));
1108 		track->is_resolve = tmp == V_028808_SPECIAL_RESOLVE_BOX;
1109 		track->cb_dirty = true;
1110 		break;
1111 	case R_0280A0_CB_COLOR0_INFO:
1112 	case R_0280A4_CB_COLOR1_INFO:
1113 	case R_0280A8_CB_COLOR2_INFO:
1114 	case R_0280AC_CB_COLOR3_INFO:
1115 	case R_0280B0_CB_COLOR4_INFO:
1116 	case R_0280B4_CB_COLOR5_INFO:
1117 	case R_0280B8_CB_COLOR6_INFO:
1118 	case R_0280BC_CB_COLOR7_INFO:
1119 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
1120 		     radeon_cs_packet_next_is_pkt3_nop(p)) {
1121 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1122 			if (r) {
1123 				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1124 				return -EINVAL;
1125 			}
1126 			tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
1127 			track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
1128 			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1129 				ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
1130 				track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
1131 			} else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
1132 				ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
1133 				track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
1134 			}
1135 		} else {
1136 			tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
1137 			track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
1138 		}
1139 		track->cb_dirty = true;
1140 		break;
1141 	case R_028080_CB_COLOR0_VIEW:
1142 	case R_028084_CB_COLOR1_VIEW:
1143 	case R_028088_CB_COLOR2_VIEW:
1144 	case R_02808C_CB_COLOR3_VIEW:
1145 	case R_028090_CB_COLOR4_VIEW:
1146 	case R_028094_CB_COLOR5_VIEW:
1147 	case R_028098_CB_COLOR6_VIEW:
1148 	case R_02809C_CB_COLOR7_VIEW:
1149 		tmp = (reg - R_028080_CB_COLOR0_VIEW) / 4;
1150 		track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
1151 		track->cb_dirty = true;
1152 		break;
1153 	case R_028060_CB_COLOR0_SIZE:
1154 	case R_028064_CB_COLOR1_SIZE:
1155 	case R_028068_CB_COLOR2_SIZE:
1156 	case R_02806C_CB_COLOR3_SIZE:
1157 	case R_028070_CB_COLOR4_SIZE:
1158 	case R_028074_CB_COLOR5_SIZE:
1159 	case R_028078_CB_COLOR6_SIZE:
1160 	case R_02807C_CB_COLOR7_SIZE:
1161 		tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4;
1162 		track->cb_color_size[tmp] = radeon_get_ib_value(p, idx);
1163 		track->cb_color_size_idx[tmp] = idx;
1164 		track->cb_dirty = true;
1165 		break;
1166 		/* This register were added late, there is userspace
1167 		 * which does provide relocation for those but set
1168 		 * 0 offset. In order to avoid breaking old userspace
1169 		 * we detect this and set address to point to last
1170 		 * CB_COLOR0_BASE, note that if userspace doesn't set
1171 		 * CB_COLOR0_BASE before this register we will report
1172 		 * error. Old userspace always set CB_COLOR0_BASE
1173 		 * before any of this.
1174 		 */
1175 	case R_0280E0_CB_COLOR0_FRAG:
1176 	case R_0280E4_CB_COLOR1_FRAG:
1177 	case R_0280E8_CB_COLOR2_FRAG:
1178 	case R_0280EC_CB_COLOR3_FRAG:
1179 	case R_0280F0_CB_COLOR4_FRAG:
1180 	case R_0280F4_CB_COLOR5_FRAG:
1181 	case R_0280F8_CB_COLOR6_FRAG:
1182 	case R_0280FC_CB_COLOR7_FRAG:
1183 		tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4;
1184 		if (!radeon_cs_packet_next_is_pkt3_nop(p)) {
1185 			if (!track->cb_color_base_last[tmp]) {
1186 				dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
1187 				return -EINVAL;
1188 			}
1189 			track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
1190 			track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp];
1191 			ib[idx] = track->cb_color_base_last[tmp];
1192 		} else {
1193 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1194 			if (r) {
1195 				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1196 				return -EINVAL;
1197 			}
1198 			track->cb_color_frag_bo[tmp] = reloc->robj;
1199 			track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8;
1200 			ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1201 		}
1202 		if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
1203 			track->cb_dirty = true;
1204 		}
1205 		break;
1206 	case R_0280C0_CB_COLOR0_TILE:
1207 	case R_0280C4_CB_COLOR1_TILE:
1208 	case R_0280C8_CB_COLOR2_TILE:
1209 	case R_0280CC_CB_COLOR3_TILE:
1210 	case R_0280D0_CB_COLOR4_TILE:
1211 	case R_0280D4_CB_COLOR5_TILE:
1212 	case R_0280D8_CB_COLOR6_TILE:
1213 	case R_0280DC_CB_COLOR7_TILE:
1214 		tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4;
1215 		if (!radeon_cs_packet_next_is_pkt3_nop(p)) {
1216 			if (!track->cb_color_base_last[tmp]) {
1217 				dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
1218 				return -EINVAL;
1219 			}
1220 			track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
1221 			track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp];
1222 			ib[idx] = track->cb_color_base_last[tmp];
1223 		} else {
1224 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1225 			if (r) {
1226 				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1227 				return -EINVAL;
1228 			}
1229 			track->cb_color_tile_bo[tmp] = reloc->robj;
1230 			track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8;
1231 			ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1232 		}
1233 		if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
1234 			track->cb_dirty = true;
1235 		}
1236 		break;
1237 	case R_028100_CB_COLOR0_MASK:
1238 	case R_028104_CB_COLOR1_MASK:
1239 	case R_028108_CB_COLOR2_MASK:
1240 	case R_02810C_CB_COLOR3_MASK:
1241 	case R_028110_CB_COLOR4_MASK:
1242 	case R_028114_CB_COLOR5_MASK:
1243 	case R_028118_CB_COLOR6_MASK:
1244 	case R_02811C_CB_COLOR7_MASK:
1245 		tmp = (reg - R_028100_CB_COLOR0_MASK) / 4;
1246 		track->cb_color_mask[tmp] = radeon_get_ib_value(p, idx);
1247 		if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
1248 			track->cb_dirty = true;
1249 		}
1250 		break;
1251 	case CB_COLOR0_BASE:
1252 	case CB_COLOR1_BASE:
1253 	case CB_COLOR2_BASE:
1254 	case CB_COLOR3_BASE:
1255 	case CB_COLOR4_BASE:
1256 	case CB_COLOR5_BASE:
1257 	case CB_COLOR6_BASE:
1258 	case CB_COLOR7_BASE:
1259 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1260 		if (r) {
1261 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1262 					"0x%04X\n", reg);
1263 			return -EINVAL;
1264 		}
1265 		tmp = (reg - CB_COLOR0_BASE) / 4;
1266 		track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
1267 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1268 		track->cb_color_base_last[tmp] = ib[idx];
1269 		track->cb_color_bo[tmp] = reloc->robj;
1270 		track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset;
1271 		track->cb_dirty = true;
1272 		break;
1273 	case DB_DEPTH_BASE:
1274 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1275 		if (r) {
1276 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1277 					"0x%04X\n", reg);
1278 			return -EINVAL;
1279 		}
1280 		track->db_offset = radeon_get_ib_value(p, idx) << 8;
1281 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1282 		track->db_bo = reloc->robj;
1283 		track->db_bo_mc = reloc->lobj.gpu_offset;
1284 		track->db_dirty = true;
1285 		break;
1286 	case DB_HTILE_DATA_BASE:
1287 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1288 		if (r) {
1289 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1290 					"0x%04X\n", reg);
1291 			return -EINVAL;
1292 		}
1293 		track->htile_offset = radeon_get_ib_value(p, idx) << 8;
1294 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1295 		track->htile_bo = reloc->robj;
1296 		track->db_dirty = true;
1297 		break;
1298 	case DB_HTILE_SURFACE:
1299 		track->htile_surface = radeon_get_ib_value(p, idx);
1300 		/* force 8x8 htile width and height */
1301 		ib[idx] |= 3;
1302 		track->db_dirty = true;
1303 		break;
1304 	case SQ_PGM_START_FS:
1305 	case SQ_PGM_START_ES:
1306 	case SQ_PGM_START_VS:
1307 	case SQ_PGM_START_GS:
1308 	case SQ_PGM_START_PS:
1309 	case SQ_ALU_CONST_CACHE_GS_0:
1310 	case SQ_ALU_CONST_CACHE_GS_1:
1311 	case SQ_ALU_CONST_CACHE_GS_2:
1312 	case SQ_ALU_CONST_CACHE_GS_3:
1313 	case SQ_ALU_CONST_CACHE_GS_4:
1314 	case SQ_ALU_CONST_CACHE_GS_5:
1315 	case SQ_ALU_CONST_CACHE_GS_6:
1316 	case SQ_ALU_CONST_CACHE_GS_7:
1317 	case SQ_ALU_CONST_CACHE_GS_8:
1318 	case SQ_ALU_CONST_CACHE_GS_9:
1319 	case SQ_ALU_CONST_CACHE_GS_10:
1320 	case SQ_ALU_CONST_CACHE_GS_11:
1321 	case SQ_ALU_CONST_CACHE_GS_12:
1322 	case SQ_ALU_CONST_CACHE_GS_13:
1323 	case SQ_ALU_CONST_CACHE_GS_14:
1324 	case SQ_ALU_CONST_CACHE_GS_15:
1325 	case SQ_ALU_CONST_CACHE_PS_0:
1326 	case SQ_ALU_CONST_CACHE_PS_1:
1327 	case SQ_ALU_CONST_CACHE_PS_2:
1328 	case SQ_ALU_CONST_CACHE_PS_3:
1329 	case SQ_ALU_CONST_CACHE_PS_4:
1330 	case SQ_ALU_CONST_CACHE_PS_5:
1331 	case SQ_ALU_CONST_CACHE_PS_6:
1332 	case SQ_ALU_CONST_CACHE_PS_7:
1333 	case SQ_ALU_CONST_CACHE_PS_8:
1334 	case SQ_ALU_CONST_CACHE_PS_9:
1335 	case SQ_ALU_CONST_CACHE_PS_10:
1336 	case SQ_ALU_CONST_CACHE_PS_11:
1337 	case SQ_ALU_CONST_CACHE_PS_12:
1338 	case SQ_ALU_CONST_CACHE_PS_13:
1339 	case SQ_ALU_CONST_CACHE_PS_14:
1340 	case SQ_ALU_CONST_CACHE_PS_15:
1341 	case SQ_ALU_CONST_CACHE_VS_0:
1342 	case SQ_ALU_CONST_CACHE_VS_1:
1343 	case SQ_ALU_CONST_CACHE_VS_2:
1344 	case SQ_ALU_CONST_CACHE_VS_3:
1345 	case SQ_ALU_CONST_CACHE_VS_4:
1346 	case SQ_ALU_CONST_CACHE_VS_5:
1347 	case SQ_ALU_CONST_CACHE_VS_6:
1348 	case SQ_ALU_CONST_CACHE_VS_7:
1349 	case SQ_ALU_CONST_CACHE_VS_8:
1350 	case SQ_ALU_CONST_CACHE_VS_9:
1351 	case SQ_ALU_CONST_CACHE_VS_10:
1352 	case SQ_ALU_CONST_CACHE_VS_11:
1353 	case SQ_ALU_CONST_CACHE_VS_12:
1354 	case SQ_ALU_CONST_CACHE_VS_13:
1355 	case SQ_ALU_CONST_CACHE_VS_14:
1356 	case SQ_ALU_CONST_CACHE_VS_15:
1357 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1358 		if (r) {
1359 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1360 					"0x%04X\n", reg);
1361 			return -EINVAL;
1362 		}
1363 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1364 		break;
1365 	case SX_MEMORY_EXPORT_BASE:
1366 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1367 		if (r) {
1368 			dev_warn(p->dev, "bad SET_CONFIG_REG "
1369 					"0x%04X\n", reg);
1370 			return -EINVAL;
1371 		}
1372 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1373 		break;
1374 	case SX_MISC:
1375 		track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
1376 		break;
1377 	default:
1378 		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1379 		return -EINVAL;
1380 	}
1381 	return 0;
1382 }
1383 
1384 unsigned r600_mip_minify(unsigned size, unsigned level)
1385 {
1386 	unsigned val;
1387 
1388 	val = max(1U, size >> level);
1389 	if (level > 0)
1390 		val = roundup_pow_of_two(val);
1391 	return val;
1392 }
1393 
1394 static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
1395 			      unsigned w0, unsigned h0, unsigned d0, unsigned nsamples, unsigned format,
1396 			      unsigned block_align, unsigned height_align, unsigned base_align,
1397 			      unsigned *l0_size, unsigned *mipmap_size)
1398 {
1399 	unsigned offset, i, level;
1400 	unsigned width, height, depth, size;
1401 	unsigned blocksize;
1402 	unsigned nbx, nby;
1403 	unsigned nlevels = llevel - blevel + 1;
1404 
1405 	*l0_size = -1;
1406 	blocksize = r600_fmt_get_blocksize(format);
1407 
1408 	w0 = r600_mip_minify(w0, 0);
1409 	h0 = r600_mip_minify(h0, 0);
1410 	d0 = r600_mip_minify(d0, 0);
1411 	for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
1412 		width = r600_mip_minify(w0, i);
1413 		nbx = r600_fmt_get_nblocksx(format, width);
1414 
1415 		nbx = round_up(nbx, block_align);
1416 
1417 		height = r600_mip_minify(h0, i);
1418 		nby = r600_fmt_get_nblocksy(format, height);
1419 		nby = round_up(nby, height_align);
1420 
1421 		depth = r600_mip_minify(d0, i);
1422 
1423 		size = nbx * nby * blocksize * nsamples;
1424 		if (nfaces)
1425 			size *= nfaces;
1426 		else
1427 			size *= depth;
1428 
1429 		if (i == 0)
1430 			*l0_size = size;
1431 
1432 		if (i == 0 || i == 1)
1433 			offset = round_up(offset, base_align);
1434 
1435 		offset += size;
1436 	}
1437 	*mipmap_size = offset;
1438 	if (llevel == 0)
1439 		*mipmap_size = *l0_size;
1440 	if (!blevel)
1441 		*mipmap_size -= *l0_size;
1442 }
1443 
1444 /**
1445  * r600_check_texture_resource() - check if register is authorized or not
1446  * @p: parser structure holding parsing context
1447  * @idx: index into the cs buffer
1448  * @texture: texture's bo structure
1449  * @mipmap: mipmap's bo structure
1450  *
1451  * This function will check that the resource has valid field and that
1452  * the texture and mipmap bo object are big enough to cover this resource.
1453  */
1454 static int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 idx,
1455 					      struct radeon_bo *texture,
1456 					      struct radeon_bo *mipmap,
1457 					      u64 base_offset,
1458 					      u64 mip_offset,
1459 					      u32 tiling_flags)
1460 {
1461 	struct r600_cs_track *track = p->track;
1462 	u32 dim, nfaces, llevel, blevel, w0, h0, d0;
1463 	u32 word0, word1, l0_size, mipmap_size, word2, word3, word4, word5;
1464 	u32 height_align, pitch, pitch_align, depth_align;
1465 	u32 barray, larray;
1466 	u64 base_align;
1467 	struct array_mode_checker array_check;
1468 	u32 format;
1469 	bool is_array;
1470 
1471 	/* on legacy kernel we don't perform advanced check */
1472 	if (p->rdev == NULL)
1473 		return 0;
1474 
1475 	/* convert to bytes */
1476 	base_offset <<= 8;
1477 	mip_offset <<= 8;
1478 
1479 	word0 = radeon_get_ib_value(p, idx + 0);
1480 	if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1481 		if (tiling_flags & RADEON_TILING_MACRO)
1482 			word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1483 		else if (tiling_flags & RADEON_TILING_MICRO)
1484 			word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
1485 	}
1486 	word1 = radeon_get_ib_value(p, idx + 1);
1487 	word2 = radeon_get_ib_value(p, idx + 2) << 8;
1488 	word3 = radeon_get_ib_value(p, idx + 3) << 8;
1489 	word4 = radeon_get_ib_value(p, idx + 4);
1490 	word5 = radeon_get_ib_value(p, idx + 5);
1491 	dim = G_038000_DIM(word0);
1492 	w0 = G_038000_TEX_WIDTH(word0) + 1;
1493 	pitch = (G_038000_PITCH(word0) + 1) * 8;
1494 	h0 = G_038004_TEX_HEIGHT(word1) + 1;
1495 	d0 = G_038004_TEX_DEPTH(word1);
1496 	format = G_038004_DATA_FORMAT(word1);
1497 	blevel = G_038010_BASE_LEVEL(word4);
1498 	llevel = G_038014_LAST_LEVEL(word5);
1499 	/* pitch in texels */
1500 	array_check.array_mode = G_038000_TILE_MODE(word0);
1501 	array_check.group_size = track->group_size;
1502 	array_check.nbanks = track->nbanks;
1503 	array_check.npipes = track->npipes;
1504 	array_check.nsamples = 1;
1505 	array_check.blocksize = r600_fmt_get_blocksize(format);
1506 	nfaces = 1;
1507 	is_array = false;
1508 	switch (dim) {
1509 	case V_038000_SQ_TEX_DIM_1D:
1510 	case V_038000_SQ_TEX_DIM_2D:
1511 	case V_038000_SQ_TEX_DIM_3D:
1512 		break;
1513 	case V_038000_SQ_TEX_DIM_CUBEMAP:
1514 		if (p->family >= CHIP_RV770)
1515 			nfaces = 8;
1516 		else
1517 			nfaces = 6;
1518 		break;
1519 	case V_038000_SQ_TEX_DIM_1D_ARRAY:
1520 	case V_038000_SQ_TEX_DIM_2D_ARRAY:
1521 		is_array = true;
1522 		break;
1523 	case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
1524 		is_array = true;
1525 		/* fall through */
1526 	case V_038000_SQ_TEX_DIM_2D_MSAA:
1527 		array_check.nsamples = 1 << llevel;
1528 		llevel = 0;
1529 		break;
1530 	default:
1531 		dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
1532 		return -EINVAL;
1533 	}
1534 	if (!r600_fmt_is_valid_texture(format, p->family)) {
1535 		dev_warn(p->dev, "%s:%d texture invalid format %d\n",
1536 			 __func__, __LINE__, format);
1537 		return -EINVAL;
1538 	}
1539 
1540 	if (r600_get_array_mode_alignment(&array_check,
1541 					  &pitch_align, &height_align, &depth_align, &base_align)) {
1542 		dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
1543 			 __func__, __LINE__, G_038000_TILE_MODE(word0));
1544 		return -EINVAL;
1545 	}
1546 
1547 	/* XXX check height as well... */
1548 
1549 	if (!IS_ALIGNED(pitch, pitch_align)) {
1550 		dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
1551 			 __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0));
1552 		return -EINVAL;
1553 	}
1554 	if (!IS_ALIGNED(base_offset, base_align)) {
1555 		dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n",
1556 			 __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0));
1557 		return -EINVAL;
1558 	}
1559 	if (!IS_ALIGNED(mip_offset, base_align)) {
1560 		dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n",
1561 			 __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0));
1562 		return -EINVAL;
1563 	}
1564 
1565 	if (blevel > llevel) {
1566 		dev_warn(p->dev, "texture blevel %d > llevel %d\n",
1567 			 blevel, llevel);
1568 	}
1569 	if (is_array) {
1570 		barray = G_038014_BASE_ARRAY(word5);
1571 		larray = G_038014_LAST_ARRAY(word5);
1572 
1573 		nfaces = larray - barray + 1;
1574 	}
1575 	r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, array_check.nsamples, format,
1576 			  pitch_align, height_align, base_align,
1577 			  &l0_size, &mipmap_size);
1578 	/* using get ib will give us the offset into the texture bo */
1579 	if ((l0_size + word2) > radeon_bo_size(texture)) {
1580 		dev_warn(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n",
1581 			 w0, h0, pitch_align, height_align,
1582 			 array_check.array_mode, format, word2,
1583 			 l0_size, radeon_bo_size(texture));
1584 		dev_warn(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align);
1585 		return -EINVAL;
1586 	}
1587 	/* using get ib will give us the offset into the mipmap bo */
1588 	if ((mipmap_size + word3) > radeon_bo_size(mipmap)) {
1589 		/*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
1590 		  w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/
1591 	}
1592 	return 0;
1593 }
1594 
1595 static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1596 {
1597 	u32 m, i;
1598 
1599 	i = (reg >> 7);
1600 	if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
1601 		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1602 		return false;
1603 	}
1604 	m = 1 << ((reg >> 2) & 31);
1605 	if (!(r600_reg_safe_bm[i] & m))
1606 		return true;
1607 	dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1608 	return false;
1609 }
1610 
1611 static int r600_packet3_check(struct radeon_cs_parser *p,
1612 				struct radeon_cs_packet *pkt)
1613 {
1614 	struct radeon_cs_reloc *reloc;
1615 	struct r600_cs_track *track;
1616 	volatile u32 *ib;
1617 	unsigned idx;
1618 	unsigned i;
1619 	unsigned start_reg, end_reg, reg;
1620 	int r;
1621 	u32 idx_value;
1622 
1623 	track = (struct r600_cs_track *)p->track;
1624 	ib = p->ib.ptr;
1625 	idx = pkt->idx + 1;
1626 	idx_value = radeon_get_ib_value(p, idx);
1627 
1628 	switch (pkt->opcode) {
1629 	case PACKET3_SET_PREDICATION:
1630 	{
1631 		int pred_op;
1632 		int tmp;
1633 		uint64_t offset;
1634 
1635 		if (pkt->count != 1) {
1636 			DRM_ERROR("bad SET PREDICATION\n");
1637 			return -EINVAL;
1638 		}
1639 
1640 		tmp = radeon_get_ib_value(p, idx + 1);
1641 		pred_op = (tmp >> 16) & 0x7;
1642 
1643 		/* for the clear predicate operation */
1644 		if (pred_op == 0)
1645 			return 0;
1646 
1647 		if (pred_op > 2) {
1648 			DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
1649 			return -EINVAL;
1650 		}
1651 
1652 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1653 		if (r) {
1654 			DRM_ERROR("bad SET PREDICATION\n");
1655 			return -EINVAL;
1656 		}
1657 
1658 		offset = reloc->lobj.gpu_offset +
1659 		         (idx_value & 0xfffffff0) +
1660 		         ((u64)(tmp & 0xff) << 32);
1661 
1662 		ib[idx + 0] = offset;
1663 		ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
1664 	}
1665 	break;
1666 
1667 	case PACKET3_START_3D_CMDBUF:
1668 		if (p->family >= CHIP_RV770 || pkt->count) {
1669 			DRM_ERROR("bad START_3D\n");
1670 			return -EINVAL;
1671 		}
1672 		break;
1673 	case PACKET3_CONTEXT_CONTROL:
1674 		if (pkt->count != 1) {
1675 			DRM_ERROR("bad CONTEXT_CONTROL\n");
1676 			return -EINVAL;
1677 		}
1678 		break;
1679 	case PACKET3_INDEX_TYPE:
1680 	case PACKET3_NUM_INSTANCES:
1681 		if (pkt->count) {
1682 			DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
1683 			return -EINVAL;
1684 		}
1685 		break;
1686 	case PACKET3_DRAW_INDEX:
1687 	{
1688 		uint64_t offset;
1689 		if (pkt->count != 3) {
1690 			DRM_ERROR("bad DRAW_INDEX\n");
1691 			return -EINVAL;
1692 		}
1693 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1694 		if (r) {
1695 			DRM_ERROR("bad DRAW_INDEX\n");
1696 			return -EINVAL;
1697 		}
1698 
1699 		offset = reloc->lobj.gpu_offset +
1700 		         idx_value +
1701 		         ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
1702 
1703 		ib[idx+0] = offset;
1704 		ib[idx+1] = upper_32_bits(offset) & 0xff;
1705 
1706 		r = r600_cs_track_check(p);
1707 		if (r) {
1708 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1709 			return r;
1710 		}
1711 		break;
1712 	}
1713 	case PACKET3_DRAW_INDEX_AUTO:
1714 		if (pkt->count != 1) {
1715 			DRM_ERROR("bad DRAW_INDEX_AUTO\n");
1716 			return -EINVAL;
1717 		}
1718 		r = r600_cs_track_check(p);
1719 		if (r) {
1720 			dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1721 			return r;
1722 		}
1723 		break;
1724 	case PACKET3_DRAW_INDEX_IMMD_BE:
1725 	case PACKET3_DRAW_INDEX_IMMD:
1726 		if (pkt->count < 2) {
1727 			DRM_ERROR("bad DRAW_INDEX_IMMD\n");
1728 			return -EINVAL;
1729 		}
1730 		r = r600_cs_track_check(p);
1731 		if (r) {
1732 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1733 			return r;
1734 		}
1735 		break;
1736 	case PACKET3_WAIT_REG_MEM:
1737 		if (pkt->count != 5) {
1738 			DRM_ERROR("bad WAIT_REG_MEM\n");
1739 			return -EINVAL;
1740 		}
1741 		/* bit 4 is reg (0) or mem (1) */
1742 		if (idx_value & 0x10) {
1743 			uint64_t offset;
1744 
1745 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1746 			if (r) {
1747 				DRM_ERROR("bad WAIT_REG_MEM\n");
1748 				return -EINVAL;
1749 			}
1750 
1751 			offset = reloc->lobj.gpu_offset +
1752 			         (radeon_get_ib_value(p, idx+1) & 0xfffffff0) +
1753 			         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
1754 
1755 			ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0);
1756 			ib[idx+2] = upper_32_bits(offset) & 0xff;
1757 		} else if (idx_value & 0x100) {
1758 			DRM_ERROR("cannot use PFP on REG wait\n");
1759 			return -EINVAL;
1760 		}
1761 		break;
1762 	case PACKET3_CP_DMA:
1763 	{
1764 		u32 command, size;
1765 		u64 offset, tmp;
1766 		if (pkt->count != 4) {
1767 			DRM_ERROR("bad CP DMA\n");
1768 			return -EINVAL;
1769 		}
1770 		command = radeon_get_ib_value(p, idx+4);
1771 		size = command & 0x1fffff;
1772 		if (command & PACKET3_CP_DMA_CMD_SAS) {
1773 			/* src address space is register */
1774 			DRM_ERROR("CP DMA SAS not supported\n");
1775 			return -EINVAL;
1776 		} else {
1777 			if (command & PACKET3_CP_DMA_CMD_SAIC) {
1778 				DRM_ERROR("CP DMA SAIC only supported for registers\n");
1779 				return -EINVAL;
1780 			}
1781 			/* src address space is memory */
1782 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1783 			if (r) {
1784 				DRM_ERROR("bad CP DMA SRC\n");
1785 				return -EINVAL;
1786 			}
1787 
1788 			tmp = radeon_get_ib_value(p, idx) +
1789 				((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
1790 
1791 			offset = reloc->lobj.gpu_offset + tmp;
1792 
1793 			if ((tmp + size) > radeon_bo_size(reloc->robj)) {
1794 				dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
1795 					 tmp + size, radeon_bo_size(reloc->robj));
1796 				return -EINVAL;
1797 			}
1798 
1799 			ib[idx] = offset;
1800 			ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
1801 		}
1802 		if (command & PACKET3_CP_DMA_CMD_DAS) {
1803 			/* dst address space is register */
1804 			DRM_ERROR("CP DMA DAS not supported\n");
1805 			return -EINVAL;
1806 		} else {
1807 			/* dst address space is memory */
1808 			if (command & PACKET3_CP_DMA_CMD_DAIC) {
1809 				DRM_ERROR("CP DMA DAIC only supported for registers\n");
1810 				return -EINVAL;
1811 			}
1812 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1813 			if (r) {
1814 				DRM_ERROR("bad CP DMA DST\n");
1815 				return -EINVAL;
1816 			}
1817 
1818 			tmp = radeon_get_ib_value(p, idx+2) +
1819 				((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
1820 
1821 			offset = reloc->lobj.gpu_offset + tmp;
1822 
1823 			if ((tmp + size) > radeon_bo_size(reloc->robj)) {
1824 				dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
1825 					 tmp + size, radeon_bo_size(reloc->robj));
1826 				return -EINVAL;
1827 			}
1828 
1829 			ib[idx+2] = offset;
1830 			ib[idx+3] = upper_32_bits(offset) & 0xff;
1831 		}
1832 		break;
1833 	}
1834 	case PACKET3_SURFACE_SYNC:
1835 		if (pkt->count != 3) {
1836 			DRM_ERROR("bad SURFACE_SYNC\n");
1837 			return -EINVAL;
1838 		}
1839 		/* 0xffffffff/0x0 is flush all cache flag */
1840 		if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
1841 		    radeon_get_ib_value(p, idx + 2) != 0) {
1842 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1843 			if (r) {
1844 				DRM_ERROR("bad SURFACE_SYNC\n");
1845 				return -EINVAL;
1846 			}
1847 			ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1848 		}
1849 		break;
1850 	case PACKET3_EVENT_WRITE:
1851 		if (pkt->count != 2 && pkt->count != 0) {
1852 			DRM_ERROR("bad EVENT_WRITE\n");
1853 			return -EINVAL;
1854 		}
1855 		if (pkt->count) {
1856 			uint64_t offset;
1857 
1858 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1859 			if (r) {
1860 				DRM_ERROR("bad EVENT_WRITE\n");
1861 				return -EINVAL;
1862 			}
1863 			offset = reloc->lobj.gpu_offset +
1864 			         (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
1865 			         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
1866 
1867 			ib[idx+1] = offset & 0xfffffff8;
1868 			ib[idx+2] = upper_32_bits(offset) & 0xff;
1869 		}
1870 		break;
1871 	case PACKET3_EVENT_WRITE_EOP:
1872 	{
1873 		uint64_t offset;
1874 
1875 		if (pkt->count != 4) {
1876 			DRM_ERROR("bad EVENT_WRITE_EOP\n");
1877 			return -EINVAL;
1878 		}
1879 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1880 		if (r) {
1881 			DRM_ERROR("bad EVENT_WRITE\n");
1882 			return -EINVAL;
1883 		}
1884 
1885 		offset = reloc->lobj.gpu_offset +
1886 		         (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
1887 		         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
1888 
1889 		ib[idx+1] = offset & 0xfffffffc;
1890 		ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
1891 		break;
1892 	}
1893 	case PACKET3_SET_CONFIG_REG:
1894 		start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
1895 		end_reg = 4 * pkt->count + start_reg - 4;
1896 		if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) ||
1897 		    (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
1898 		    (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
1899 			DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
1900 			return -EINVAL;
1901 		}
1902 		for (i = 0; i < pkt->count; i++) {
1903 			reg = start_reg + (4 * i);
1904 			r = r600_cs_check_reg(p, reg, idx+1+i);
1905 			if (r)
1906 				return r;
1907 		}
1908 		break;
1909 	case PACKET3_SET_CONTEXT_REG:
1910 		start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET;
1911 		end_reg = 4 * pkt->count + start_reg - 4;
1912 		if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) ||
1913 		    (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
1914 		    (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
1915 			DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
1916 			return -EINVAL;
1917 		}
1918 		for (i = 0; i < pkt->count; i++) {
1919 			reg = start_reg + (4 * i);
1920 			r = r600_cs_check_reg(p, reg, idx+1+i);
1921 			if (r)
1922 				return r;
1923 		}
1924 		break;
1925 	case PACKET3_SET_RESOURCE:
1926 		if (pkt->count % 7) {
1927 			DRM_ERROR("bad SET_RESOURCE\n");
1928 			return -EINVAL;
1929 		}
1930 		start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET;
1931 		end_reg = 4 * pkt->count + start_reg - 4;
1932 		if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) ||
1933 		    (start_reg >= PACKET3_SET_RESOURCE_END) ||
1934 		    (end_reg >= PACKET3_SET_RESOURCE_END)) {
1935 			DRM_ERROR("bad SET_RESOURCE\n");
1936 			return -EINVAL;
1937 		}
1938 		for (i = 0; i < (pkt->count / 7); i++) {
1939 			struct radeon_bo *texture, *mipmap;
1940 			u32 size, offset, base_offset, mip_offset;
1941 
1942 			switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
1943 			case SQ_TEX_VTX_VALID_TEXTURE:
1944 				/* tex base */
1945 				r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1946 				if (r) {
1947 					DRM_ERROR("bad SET_RESOURCE\n");
1948 					return -EINVAL;
1949 				}
1950 				base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1951 				if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1952 					if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1953 						ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1954 					else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1955 						ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
1956 				}
1957 				texture = reloc->robj;
1958 				/* tex mip base */
1959 				r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1960 				if (r) {
1961 					DRM_ERROR("bad SET_RESOURCE\n");
1962 					return -EINVAL;
1963 				}
1964 				mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1965 				mipmap = reloc->robj;
1966 				r = r600_check_texture_resource(p,  idx+(i*7)+1,
1967 								texture, mipmap,
1968 								base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2),
1969 								mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3),
1970 								reloc->lobj.tiling_flags);
1971 				if (r)
1972 					return r;
1973 				ib[idx+1+(i*7)+2] += base_offset;
1974 				ib[idx+1+(i*7)+3] += mip_offset;
1975 				break;
1976 			case SQ_TEX_VTX_VALID_BUFFER:
1977 			{
1978 				uint64_t offset64;
1979 				/* vtx base */
1980 				r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1981 				if (r) {
1982 					DRM_ERROR("bad SET_RESOURCE\n");
1983 					return -EINVAL;
1984 				}
1985 				offset = radeon_get_ib_value(p, idx+1+(i*7)+0);
1986 				size = radeon_get_ib_value(p, idx+1+(i*7)+1) + 1;
1987 				if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
1988 					/* force size to size of the buffer */
1989 					dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n",
1990 						 size + offset, radeon_bo_size(reloc->robj));
1991 					ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset;
1992 				}
1993 
1994 				offset64 = reloc->lobj.gpu_offset + offset;
1995 				ib[idx+1+(i*8)+0] = offset64;
1996 				ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
1997 						    (upper_32_bits(offset64) & 0xff);
1998 				break;
1999 			}
2000 			case SQ_TEX_VTX_INVALID_TEXTURE:
2001 			case SQ_TEX_VTX_INVALID_BUFFER:
2002 			default:
2003 				DRM_ERROR("bad SET_RESOURCE\n");
2004 				return -EINVAL;
2005 			}
2006 		}
2007 		break;
2008 	case PACKET3_SET_ALU_CONST:
2009 		if (track->sq_config & DX9_CONSTS) {
2010 			start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
2011 			end_reg = 4 * pkt->count + start_reg - 4;
2012 			if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
2013 			    (start_reg >= PACKET3_SET_ALU_CONST_END) ||
2014 			    (end_reg >= PACKET3_SET_ALU_CONST_END)) {
2015 				DRM_ERROR("bad SET_ALU_CONST\n");
2016 				return -EINVAL;
2017 			}
2018 		}
2019 		break;
2020 	case PACKET3_SET_BOOL_CONST:
2021 		start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET;
2022 		end_reg = 4 * pkt->count + start_reg - 4;
2023 		if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) ||
2024 		    (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
2025 		    (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
2026 			DRM_ERROR("bad SET_BOOL_CONST\n");
2027 			return -EINVAL;
2028 		}
2029 		break;
2030 	case PACKET3_SET_LOOP_CONST:
2031 		start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET;
2032 		end_reg = 4 * pkt->count + start_reg - 4;
2033 		if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) ||
2034 		    (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
2035 		    (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
2036 			DRM_ERROR("bad SET_LOOP_CONST\n");
2037 			return -EINVAL;
2038 		}
2039 		break;
2040 	case PACKET3_SET_CTL_CONST:
2041 		start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET;
2042 		end_reg = 4 * pkt->count + start_reg - 4;
2043 		if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) ||
2044 		    (start_reg >= PACKET3_SET_CTL_CONST_END) ||
2045 		    (end_reg >= PACKET3_SET_CTL_CONST_END)) {
2046 			DRM_ERROR("bad SET_CTL_CONST\n");
2047 			return -EINVAL;
2048 		}
2049 		break;
2050 	case PACKET3_SET_SAMPLER:
2051 		if (pkt->count % 3) {
2052 			DRM_ERROR("bad SET_SAMPLER\n");
2053 			return -EINVAL;
2054 		}
2055 		start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET;
2056 		end_reg = 4 * pkt->count + start_reg - 4;
2057 		if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) ||
2058 		    (start_reg >= PACKET3_SET_SAMPLER_END) ||
2059 		    (end_reg >= PACKET3_SET_SAMPLER_END)) {
2060 			DRM_ERROR("bad SET_SAMPLER\n");
2061 			return -EINVAL;
2062 		}
2063 		break;
2064 	case PACKET3_STRMOUT_BASE_UPDATE:
2065 		/* RS780 and RS880 also need this */
2066 		if (p->family < CHIP_RS780) {
2067 			DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n");
2068 			return -EINVAL;
2069 		}
2070 		if (pkt->count != 1) {
2071 			DRM_ERROR("bad STRMOUT_BASE_UPDATE packet count\n");
2072 			return -EINVAL;
2073 		}
2074 		if (idx_value > 3) {
2075 			DRM_ERROR("bad STRMOUT_BASE_UPDATE index\n");
2076 			return -EINVAL;
2077 		}
2078 		{
2079 			u64 offset;
2080 
2081 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2082 			if (r) {
2083 				DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n");
2084 				return -EINVAL;
2085 			}
2086 
2087 			if (reloc->robj != track->vgt_strmout_bo[idx_value]) {
2088 				DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n");
2089 				return -EINVAL;
2090 			}
2091 
2092 			offset = radeon_get_ib_value(p, idx+1) << 8;
2093 			if (offset != track->vgt_strmout_bo_offset[idx_value]) {
2094 				DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n",
2095 					  offset, track->vgt_strmout_bo_offset[idx_value]);
2096 				return -EINVAL;
2097 			}
2098 
2099 			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2100 				DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n",
2101 					  offset + 4, radeon_bo_size(reloc->robj));
2102 				return -EINVAL;
2103 			}
2104 			ib[idx+1] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2105 		}
2106 		break;
2107 	case PACKET3_SURFACE_BASE_UPDATE:
2108 		if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
2109 			DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
2110 			return -EINVAL;
2111 		}
2112 		if (pkt->count) {
2113 			DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
2114 			return -EINVAL;
2115 		}
2116 		break;
2117 	case PACKET3_STRMOUT_BUFFER_UPDATE:
2118 		if (pkt->count != 4) {
2119 			DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
2120 			return -EINVAL;
2121 		}
2122 		/* Updating memory at DST_ADDRESS. */
2123 		if (idx_value & 0x1) {
2124 			u64 offset;
2125 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2126 			if (r) {
2127 				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
2128 				return -EINVAL;
2129 			}
2130 			offset = radeon_get_ib_value(p, idx+1);
2131 			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2132 			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2133 				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
2134 					  offset + 4, radeon_bo_size(reloc->robj));
2135 				return -EINVAL;
2136 			}
2137 			offset += reloc->lobj.gpu_offset;
2138 			ib[idx+1] = offset;
2139 			ib[idx+2] = upper_32_bits(offset) & 0xff;
2140 		}
2141 		/* Reading data from SRC_ADDRESS. */
2142 		if (((idx_value >> 1) & 0x3) == 2) {
2143 			u64 offset;
2144 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2145 			if (r) {
2146 				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
2147 				return -EINVAL;
2148 			}
2149 			offset = radeon_get_ib_value(p, idx+3);
2150 			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2151 			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2152 				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
2153 					  offset + 4, radeon_bo_size(reloc->robj));
2154 				return -EINVAL;
2155 			}
2156 			offset += reloc->lobj.gpu_offset;
2157 			ib[idx+3] = offset;
2158 			ib[idx+4] = upper_32_bits(offset) & 0xff;
2159 		}
2160 		break;
2161 	case PACKET3_MEM_WRITE:
2162 	{
2163 		u64 offset;
2164 
2165 		if (pkt->count != 3) {
2166 			DRM_ERROR("bad MEM_WRITE (invalid count)\n");
2167 			return -EINVAL;
2168 		}
2169 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2170 		if (r) {
2171 			DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
2172 			return -EINVAL;
2173 		}
2174 		offset = radeon_get_ib_value(p, idx+0);
2175 		offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
2176 		if (offset & 0x7) {
2177 			DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
2178 			return -EINVAL;
2179 		}
2180 		if ((offset + 8) > radeon_bo_size(reloc->robj)) {
2181 			DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
2182 				  offset + 8, radeon_bo_size(reloc->robj));
2183 			return -EINVAL;
2184 		}
2185 		offset += reloc->lobj.gpu_offset;
2186 		ib[idx+0] = offset;
2187 		ib[idx+1] = upper_32_bits(offset) & 0xff;
2188 		break;
2189 	}
2190 	case PACKET3_COPY_DW:
2191 		if (pkt->count != 4) {
2192 			DRM_ERROR("bad COPY_DW (invalid count)\n");
2193 			return -EINVAL;
2194 		}
2195 		if (idx_value & 0x1) {
2196 			u64 offset;
2197 			/* SRC is memory. */
2198 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2199 			if (r) {
2200 				DRM_ERROR("bad COPY_DW (missing src reloc)\n");
2201 				return -EINVAL;
2202 			}
2203 			offset = radeon_get_ib_value(p, idx+1);
2204 			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2205 			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2206 				DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
2207 					  offset + 4, radeon_bo_size(reloc->robj));
2208 				return -EINVAL;
2209 			}
2210 			offset += reloc->lobj.gpu_offset;
2211 			ib[idx+1] = offset;
2212 			ib[idx+2] = upper_32_bits(offset) & 0xff;
2213 		} else {
2214 			/* SRC is a reg. */
2215 			reg = radeon_get_ib_value(p, idx+1) << 2;
2216 			if (!r600_is_safe_reg(p, reg, idx+1))
2217 				return -EINVAL;
2218 		}
2219 		if (idx_value & 0x2) {
2220 			u64 offset;
2221 			/* DST is memory. */
2222 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2223 			if (r) {
2224 				DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
2225 				return -EINVAL;
2226 			}
2227 			offset = radeon_get_ib_value(p, idx+3);
2228 			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2229 			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2230 				DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
2231 					  offset + 4, radeon_bo_size(reloc->robj));
2232 				return -EINVAL;
2233 			}
2234 			offset += reloc->lobj.gpu_offset;
2235 			ib[idx+3] = offset;
2236 			ib[idx+4] = upper_32_bits(offset) & 0xff;
2237 		} else {
2238 			/* DST is a reg. */
2239 			reg = radeon_get_ib_value(p, idx+3) << 2;
2240 			if (!r600_is_safe_reg(p, reg, idx+3))
2241 				return -EINVAL;
2242 		}
2243 		break;
2244 	case PACKET3_NOP:
2245 		break;
2246 	default:
2247 		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
2248 		return -EINVAL;
2249 	}
2250 	return 0;
2251 }
2252 
2253 int r600_cs_parse(struct radeon_cs_parser *p)
2254 {
2255 	struct radeon_cs_packet pkt;
2256 	struct r600_cs_track *track;
2257 	int r;
2258 
2259 	if (p->track == NULL) {
2260 		/* initialize tracker, we are in kms */
2261 		track = kzalloc(sizeof(*track), GFP_KERNEL);
2262 		if (track == NULL)
2263 			return -ENOMEM;
2264 		r600_cs_track_init(track);
2265 		if (p->rdev->family < CHIP_RV770) {
2266 			track->npipes = p->rdev->config.r600.tiling_npipes;
2267 			track->nbanks = p->rdev->config.r600.tiling_nbanks;
2268 			track->group_size = p->rdev->config.r600.tiling_group_size;
2269 		} else if (p->rdev->family <= CHIP_RV740) {
2270 			track->npipes = p->rdev->config.rv770.tiling_npipes;
2271 			track->nbanks = p->rdev->config.rv770.tiling_nbanks;
2272 			track->group_size = p->rdev->config.rv770.tiling_group_size;
2273 		}
2274 		p->track = track;
2275 	}
2276 	do {
2277 		r = radeon_cs_packet_parse(p, &pkt, p->idx);
2278 		if (r) {
2279 			kfree(p->track);
2280 			p->track = NULL;
2281 			return r;
2282 		}
2283 		p->idx += pkt.count + 2;
2284 		switch (pkt.type) {
2285 		case RADEON_PACKET_TYPE0:
2286 			r = r600_cs_parse_packet0(p, &pkt);
2287 			break;
2288 		case RADEON_PACKET_TYPE2:
2289 			break;
2290 		case RADEON_PACKET_TYPE3:
2291 			r = r600_packet3_check(p, &pkt);
2292 			break;
2293 		default:
2294 			DRM_ERROR("Unknown packet type %d !\n", pkt.type);
2295 			kfree(p->track);
2296 			p->track = NULL;
2297 			return -EINVAL;
2298 		}
2299 		if (r) {
2300 			kfree(p->track);
2301 			p->track = NULL;
2302 			return r;
2303 		}
2304 	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
2305 #if 0
2306 	for (r = 0; r < p->ib.length_dw; r++) {
2307 		printk(KERN_INFO "%05d  0x%08X\n", r, p->ib.ptr[r]);
2308 		mdelay(1);
2309 	}
2310 #endif
2311 	kfree(p->track);
2312 	p->track = NULL;
2313 	return 0;
2314 }
2315 
2316 #ifdef CONFIG_DRM_RADEON_UMS
2317 
2318 /**
2319  * cs_parser_fini() - clean parser states
2320  * @parser:	parser structure holding parsing context.
2321  * @error:	error number
2322  *
2323  * If error is set than unvalidate buffer, otherwise just free memory
2324  * used by parsing context.
2325  **/
2326 static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
2327 {
2328 	unsigned i;
2329 
2330 	kfree(parser->relocs);
2331 	for (i = 0; i < parser->nchunks; i++)
2332 		drm_free_large(parser->chunks[i].kdata);
2333 	kfree(parser->chunks);
2334 	kfree(parser->chunks_array);
2335 }
2336 
2337 static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
2338 {
2339 	if (p->chunk_relocs_idx == -1) {
2340 		return 0;
2341 	}
2342 	p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
2343 	if (p->relocs == NULL) {
2344 		return -ENOMEM;
2345 	}
2346 	return 0;
2347 }
2348 
2349 int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
2350 			unsigned family, u32 *ib, int *l)
2351 {
2352 	struct radeon_cs_parser parser;
2353 	struct radeon_cs_chunk *ib_chunk;
2354 	struct r600_cs_track *track;
2355 	int r;
2356 
2357 	/* initialize tracker */
2358 	track = kzalloc(sizeof(*track), GFP_KERNEL);
2359 	if (track == NULL)
2360 		return -ENOMEM;
2361 	r600_cs_track_init(track);
2362 	r600_cs_legacy_get_tiling_conf(dev, &track->npipes, &track->nbanks, &track->group_size);
2363 	/* initialize parser */
2364 	memset(&parser, 0, sizeof(struct radeon_cs_parser));
2365 	parser.filp = filp;
2366 	parser.dev = &dev->pdev->dev;
2367 	parser.rdev = NULL;
2368 	parser.family = family;
2369 	parser.track = track;
2370 	parser.ib.ptr = ib;
2371 	r = radeon_cs_parser_init(&parser, data);
2372 	if (r) {
2373 		DRM_ERROR("Failed to initialize parser !\n");
2374 		r600_cs_parser_fini(&parser, r);
2375 		return r;
2376 	}
2377 	r = r600_cs_parser_relocs_legacy(&parser);
2378 	if (r) {
2379 		DRM_ERROR("Failed to parse relocation !\n");
2380 		r600_cs_parser_fini(&parser, r);
2381 		return r;
2382 	}
2383 	/* Copy the packet into the IB, the parser will read from the
2384 	 * input memory (cached) and write to the IB (which can be
2385 	 * uncached). */
2386 	ib_chunk = &parser.chunks[parser.chunk_ib_idx];
2387 	parser.ib.length_dw = ib_chunk->length_dw;
2388 	*l = parser.ib.length_dw;
2389 	if (DRM_COPY_FROM_USER(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) {
2390 		r = -EFAULT;
2391 		r600_cs_parser_fini(&parser, r);
2392 		return r;
2393 	}
2394 	r = r600_cs_parse(&parser);
2395 	if (r) {
2396 		DRM_ERROR("Invalid command stream !\n");
2397 		r600_cs_parser_fini(&parser, r);
2398 		return r;
2399 	}
2400 	r600_cs_parser_fini(&parser, r);
2401 	return r;
2402 }
2403 
2404 void r600_cs_legacy_init(void)
2405 {
2406 	r600_nomm = 1;
2407 }
2408 
2409 #endif
2410 
2411 /*
2412  *  DMA
2413  */
2414 /**
2415  * r600_dma_cs_next_reloc() - parse next reloc
2416  * @p:		parser structure holding parsing context.
2417  * @cs_reloc:		reloc informations
2418  *
2419  * Return the next reloc, do bo validation and compute
2420  * GPU offset using the provided start.
2421  **/
2422 int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
2423 			   struct radeon_cs_reloc **cs_reloc)
2424 {
2425 	struct radeon_cs_chunk *relocs_chunk;
2426 	unsigned idx;
2427 
2428 	*cs_reloc = NULL;
2429 	if (p->chunk_relocs_idx == -1) {
2430 		DRM_ERROR("No relocation chunk !\n");
2431 		return -EINVAL;
2432 	}
2433 	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
2434 	idx = p->dma_reloc_idx;
2435 	if (idx >= p->nrelocs) {
2436 		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
2437 			  idx, p->nrelocs);
2438 		return -EINVAL;
2439 	}
2440 	*cs_reloc = p->relocs_ptr[idx];
2441 	p->dma_reloc_idx++;
2442 	return 0;
2443 }
2444 
2445 #define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
2446 #define GET_DMA_COUNT(h) ((h) & 0x0000ffff)
2447 #define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
2448 
2449 /**
2450  * r600_dma_cs_parse() - parse the DMA IB
2451  * @p:		parser structure holding parsing context.
2452  *
2453  * Parses the DMA IB from the CS ioctl and updates
2454  * the GPU addresses based on the reloc information and
2455  * checks for errors. (R6xx-R7xx)
2456  * Returns 0 for success and an error on failure.
2457  **/
2458 int r600_dma_cs_parse(struct radeon_cs_parser *p)
2459 {
2460 	struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
2461 	struct radeon_cs_reloc *src_reloc, *dst_reloc;
2462 	u32 header, cmd, count, tiled;
2463 	volatile u32 *ib = p->ib.ptr;
2464 	u32 idx, idx_value;
2465 	u64 src_offset, dst_offset;
2466 	int r;
2467 
2468 	do {
2469 		if (p->idx >= ib_chunk->length_dw) {
2470 			DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
2471 				  p->idx, ib_chunk->length_dw);
2472 			return -EINVAL;
2473 		}
2474 		idx = p->idx;
2475 		header = radeon_get_ib_value(p, idx);
2476 		cmd = GET_DMA_CMD(header);
2477 		count = GET_DMA_COUNT(header);
2478 		tiled = GET_DMA_T(header);
2479 
2480 		switch (cmd) {
2481 		case DMA_PACKET_WRITE:
2482 			r = r600_dma_cs_next_reloc(p, &dst_reloc);
2483 			if (r) {
2484 				DRM_ERROR("bad DMA_PACKET_WRITE\n");
2485 				return -EINVAL;
2486 			}
2487 			if (tiled) {
2488 				dst_offset = radeon_get_ib_value(p, idx+1);
2489 				dst_offset <<= 8;
2490 
2491 				ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2492 				p->idx += count + 5;
2493 			} else {
2494 				dst_offset = radeon_get_ib_value(p, idx+1);
2495 				dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2496 
2497 				ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2498 				ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2499 				p->idx += count + 3;
2500 			}
2501 			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2502 				dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
2503 					 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2504 				return -EINVAL;
2505 			}
2506 			break;
2507 		case DMA_PACKET_COPY:
2508 			r = r600_dma_cs_next_reloc(p, &src_reloc);
2509 			if (r) {
2510 				DRM_ERROR("bad DMA_PACKET_COPY\n");
2511 				return -EINVAL;
2512 			}
2513 			r = r600_dma_cs_next_reloc(p, &dst_reloc);
2514 			if (r) {
2515 				DRM_ERROR("bad DMA_PACKET_COPY\n");
2516 				return -EINVAL;
2517 			}
2518 			if (tiled) {
2519 				idx_value = radeon_get_ib_value(p, idx + 2);
2520 				/* detile bit */
2521 				if (idx_value & (1 << 31)) {
2522 					/* tiled src, linear dst */
2523 					src_offset = radeon_get_ib_value(p, idx+1);
2524 					src_offset <<= 8;
2525 					ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
2526 
2527 					dst_offset = radeon_get_ib_value(p, idx+5);
2528 					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
2529 					ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2530 					ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2531 				} else {
2532 					/* linear src, tiled dst */
2533 					src_offset = radeon_get_ib_value(p, idx+5);
2534 					src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
2535 					ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2536 					ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2537 
2538 					dst_offset = radeon_get_ib_value(p, idx+1);
2539 					dst_offset <<= 8;
2540 					ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2541 				}
2542 				p->idx += 7;
2543 			} else {
2544 				if (p->family >= CHIP_RV770) {
2545 					src_offset = radeon_get_ib_value(p, idx+2);
2546 					src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2547 					dst_offset = radeon_get_ib_value(p, idx+1);
2548 					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
2549 
2550 					ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2551 					ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2552 					ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2553 					ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2554 					p->idx += 5;
2555 				} else {
2556 					src_offset = radeon_get_ib_value(p, idx+2);
2557 					src_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
2558 					dst_offset = radeon_get_ib_value(p, idx+1);
2559 					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16;
2560 
2561 					ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2562 					ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2563 					ib[idx+3] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2564 					ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff) << 16;
2565 					p->idx += 4;
2566 				}
2567 			}
2568 			if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2569 				dev_warn(p->dev, "DMA copy src buffer too small (%llu %lu)\n",
2570 					 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
2571 				return -EINVAL;
2572 			}
2573 			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2574 				dev_warn(p->dev, "DMA write dst buffer too small (%llu %lu)\n",
2575 					 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2576 				return -EINVAL;
2577 			}
2578 			break;
2579 		case DMA_PACKET_CONSTANT_FILL:
2580 			if (p->family < CHIP_RV770) {
2581 				DRM_ERROR("Constant Fill is 7xx only !\n");
2582 				return -EINVAL;
2583 			}
2584 			r = r600_dma_cs_next_reloc(p, &dst_reloc);
2585 			if (r) {
2586 				DRM_ERROR("bad DMA_PACKET_WRITE\n");
2587 				return -EINVAL;
2588 			}
2589 			dst_offset = radeon_get_ib_value(p, idx+1);
2590 			dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
2591 			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2592 				dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
2593 					 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2594 				return -EINVAL;
2595 			}
2596 			ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2597 			ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
2598 			p->idx += 4;
2599 			break;
2600 		case DMA_PACKET_NOP:
2601 			p->idx += 1;
2602 			break;
2603 		default:
2604 			DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
2605 			return -EINVAL;
2606 		}
2607 	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
2608 #if 0
2609 	for (r = 0; r < p->ib->length_dw; r++) {
2610 		printk(KERN_INFO "%05d  0x%08X\n", r, p->ib.ptr[r]);
2611 		mdelay(1);
2612 	}
2613 #endif
2614 	return 0;
2615 }
2616