xref: /openbmc/linux/drivers/gpu/drm/radeon/r600_cs.c (revision a09d2831)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include "drmP.h"
29 #include "radeon.h"
30 #include "r600d.h"
31 
32 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
33 					struct radeon_cs_reloc **cs_reloc);
34 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
35 					struct radeon_cs_reloc **cs_reloc);
36 typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
37 static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
38 
39 /**
40  * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
41  * @parser:	parser structure holding parsing context.
42  * @pkt:	where to store packet informations
43  *
44  * Assume that chunk_ib_index is properly set. Will return -EINVAL
45  * if packet is bigger than remaining ib size. or if packets is unknown.
46  **/
47 int r600_cs_packet_parse(struct radeon_cs_parser *p,
48 			struct radeon_cs_packet *pkt,
49 			unsigned idx)
50 {
51 	struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
52 	uint32_t header;
53 
54 	if (idx >= ib_chunk->length_dw) {
55 		DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
56 			  idx, ib_chunk->length_dw);
57 		return -EINVAL;
58 	}
59 	header = radeon_get_ib_value(p, idx);
60 	pkt->idx = idx;
61 	pkt->type = CP_PACKET_GET_TYPE(header);
62 	pkt->count = CP_PACKET_GET_COUNT(header);
63 	pkt->one_reg_wr = 0;
64 	switch (pkt->type) {
65 	case PACKET_TYPE0:
66 		pkt->reg = CP_PACKET0_GET_REG(header);
67 		break;
68 	case PACKET_TYPE3:
69 		pkt->opcode = CP_PACKET3_GET_OPCODE(header);
70 		break;
71 	case PACKET_TYPE2:
72 		pkt->count = -1;
73 		break;
74 	default:
75 		DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
76 		return -EINVAL;
77 	}
78 	if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
79 		DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
80 			  pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
81 		return -EINVAL;
82 	}
83 	return 0;
84 }
85 
86 /**
87  * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3
88  * @parser:		parser structure holding parsing context.
89  * @data:		pointer to relocation data
90  * @offset_start:	starting offset
91  * @offset_mask:	offset mask (to align start offset on)
92  * @reloc:		reloc informations
93  *
94  * Check next packet is relocation packet3, do bo validation and compute
95  * GPU offset using the provided start.
96  **/
97 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
98 					struct radeon_cs_reloc **cs_reloc)
99 {
100 	struct radeon_cs_chunk *relocs_chunk;
101 	struct radeon_cs_packet p3reloc;
102 	unsigned idx;
103 	int r;
104 
105 	if (p->chunk_relocs_idx == -1) {
106 		DRM_ERROR("No relocation chunk !\n");
107 		return -EINVAL;
108 	}
109 	*cs_reloc = NULL;
110 	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
111 	r = r600_cs_packet_parse(p, &p3reloc, p->idx);
112 	if (r) {
113 		return r;
114 	}
115 	p->idx += p3reloc.count + 2;
116 	if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
117 		DRM_ERROR("No packet3 for relocation for packet at %d.\n",
118 			  p3reloc.idx);
119 		return -EINVAL;
120 	}
121 	idx = radeon_get_ib_value(p, p3reloc.idx + 1);
122 	if (idx >= relocs_chunk->length_dw) {
123 		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
124 			  idx, relocs_chunk->length_dw);
125 		return -EINVAL;
126 	}
127 	/* FIXME: we assume reloc size is 4 dwords */
128 	*cs_reloc = p->relocs_ptr[(idx / 4)];
129 	return 0;
130 }
131 
132 /**
133  * r600_cs_packet_next_reloc_nomm() - parse next packet which should be reloc packet3
134  * @parser:		parser structure holding parsing context.
135  * @data:		pointer to relocation data
136  * @offset_start:	starting offset
137  * @offset_mask:	offset mask (to align start offset on)
138  * @reloc:		reloc informations
139  *
140  * Check next packet is relocation packet3, do bo validation and compute
141  * GPU offset using the provided start.
142  **/
143 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
144 					struct radeon_cs_reloc **cs_reloc)
145 {
146 	struct radeon_cs_chunk *relocs_chunk;
147 	struct radeon_cs_packet p3reloc;
148 	unsigned idx;
149 	int r;
150 
151 	if (p->chunk_relocs_idx == -1) {
152 		DRM_ERROR("No relocation chunk !\n");
153 		return -EINVAL;
154 	}
155 	*cs_reloc = NULL;
156 	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
157 	r = r600_cs_packet_parse(p, &p3reloc, p->idx);
158 	if (r) {
159 		return r;
160 	}
161 	p->idx += p3reloc.count + 2;
162 	if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
163 		DRM_ERROR("No packet3 for relocation for packet at %d.\n",
164 			  p3reloc.idx);
165 		return -EINVAL;
166 	}
167 	idx = radeon_get_ib_value(p, p3reloc.idx + 1);
168 	if (idx >= relocs_chunk->length_dw) {
169 		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
170 			  idx, relocs_chunk->length_dw);
171 		return -EINVAL;
172 	}
173 	*cs_reloc = p->relocs;
174 	(*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32;
175 	(*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
176 	return 0;
177 }
178 
179 /**
180  * r600_cs_packet_next_vline() - parse userspace VLINE packet
181  * @parser:		parser structure holding parsing context.
182  *
183  * Userspace sends a special sequence for VLINE waits.
184  * PACKET0 - VLINE_START_END + value
185  * PACKET3 - WAIT_REG_MEM poll vline status reg
186  * RELOC (P3) - crtc_id in reloc.
187  *
188  * This function parses this and relocates the VLINE START END
189  * and WAIT_REG_MEM packets to the correct crtc.
190  * It also detects a switched off crtc and nulls out the
191  * wait in that case.
192  */
193 static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
194 {
195 	struct drm_mode_object *obj;
196 	struct drm_crtc *crtc;
197 	struct radeon_crtc *radeon_crtc;
198 	struct radeon_cs_packet p3reloc, wait_reg_mem;
199 	int crtc_id;
200 	int r;
201 	uint32_t header, h_idx, reg, wait_reg_mem_info;
202 	volatile uint32_t *ib;
203 
204 	ib = p->ib->ptr;
205 
206 	/* parse the WAIT_REG_MEM */
207 	r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx);
208 	if (r)
209 		return r;
210 
211 	/* check its a WAIT_REG_MEM */
212 	if (wait_reg_mem.type != PACKET_TYPE3 ||
213 	    wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
214 		DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
215 		r = -EINVAL;
216 		return r;
217 	}
218 
219 	wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
220 	/* bit 4 is reg (0) or mem (1) */
221 	if (wait_reg_mem_info & 0x10) {
222 		DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
223 		r = -EINVAL;
224 		return r;
225 	}
226 	/* waiting for value to be equal */
227 	if ((wait_reg_mem_info & 0x7) != 0x3) {
228 		DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
229 		r = -EINVAL;
230 		return r;
231 	}
232 	if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) {
233 		DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
234 		r = -EINVAL;
235 		return r;
236 	}
237 
238 	if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) {
239 		DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
240 		r = -EINVAL;
241 		return r;
242 	}
243 
244 	/* jump over the NOP */
245 	r = r600_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
246 	if (r)
247 		return r;
248 
249 	h_idx = p->idx - 2;
250 	p->idx += wait_reg_mem.count + 2;
251 	p->idx += p3reloc.count + 2;
252 
253 	header = radeon_get_ib_value(p, h_idx);
254 	crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
255 	reg = CP_PACKET0_GET_REG(header);
256 	mutex_lock(&p->rdev->ddev->mode_config.mutex);
257 	obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
258 	if (!obj) {
259 		DRM_ERROR("cannot find crtc %d\n", crtc_id);
260 		r = -EINVAL;
261 		goto out;
262 	}
263 	crtc = obj_to_crtc(obj);
264 	radeon_crtc = to_radeon_crtc(crtc);
265 	crtc_id = radeon_crtc->crtc_id;
266 
267 	if (!crtc->enabled) {
268 		/* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
269 		ib[h_idx + 2] = PACKET2(0);
270 		ib[h_idx + 3] = PACKET2(0);
271 		ib[h_idx + 4] = PACKET2(0);
272 		ib[h_idx + 5] = PACKET2(0);
273 		ib[h_idx + 6] = PACKET2(0);
274 		ib[h_idx + 7] = PACKET2(0);
275 		ib[h_idx + 8] = PACKET2(0);
276 	} else if (crtc_id == 1) {
277 		switch (reg) {
278 		case AVIVO_D1MODE_VLINE_START_END:
279 			header &= ~R600_CP_PACKET0_REG_MASK;
280 			header |= AVIVO_D2MODE_VLINE_START_END >> 2;
281 			break;
282 		default:
283 			DRM_ERROR("unknown crtc reloc\n");
284 			r = -EINVAL;
285 			goto out;
286 		}
287 		ib[h_idx] = header;
288 		ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2;
289 	}
290 out:
291 	mutex_unlock(&p->rdev->ddev->mode_config.mutex);
292 	return r;
293 }
294 
295 static int r600_packet0_check(struct radeon_cs_parser *p,
296 				struct radeon_cs_packet *pkt,
297 				unsigned idx, unsigned reg)
298 {
299 	int r;
300 
301 	switch (reg) {
302 	case AVIVO_D1MODE_VLINE_START_END:
303 		r = r600_cs_packet_parse_vline(p);
304 		if (r) {
305 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
306 					idx, reg);
307 			return r;
308 		}
309 		break;
310 	default:
311 		printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
312 		       reg, idx);
313 		return -EINVAL;
314 	}
315 	return 0;
316 }
317 
318 static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
319 				struct radeon_cs_packet *pkt)
320 {
321 	unsigned reg, i;
322 	unsigned idx;
323 	int r;
324 
325 	idx = pkt->idx + 1;
326 	reg = pkt->reg;
327 	for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
328 		r = r600_packet0_check(p, pkt, idx, reg);
329 		if (r) {
330 			return r;
331 		}
332 	}
333 	return 0;
334 }
335 
336 static int r600_packet3_check(struct radeon_cs_parser *p,
337 				struct radeon_cs_packet *pkt)
338 {
339 	struct radeon_cs_reloc *reloc;
340 	volatile u32 *ib;
341 	unsigned idx;
342 	unsigned i;
343 	unsigned start_reg, end_reg, reg;
344 	int r;
345 	u32 idx_value;
346 
347 	ib = p->ib->ptr;
348 	idx = pkt->idx + 1;
349 	idx_value = radeon_get_ib_value(p, idx);
350 
351 	switch (pkt->opcode) {
352 	case PACKET3_START_3D_CMDBUF:
353 		if (p->family >= CHIP_RV770 || pkt->count) {
354 			DRM_ERROR("bad START_3D\n");
355 			return -EINVAL;
356 		}
357 		break;
358 	case PACKET3_CONTEXT_CONTROL:
359 		if (pkt->count != 1) {
360 			DRM_ERROR("bad CONTEXT_CONTROL\n");
361 			return -EINVAL;
362 		}
363 		break;
364 	case PACKET3_INDEX_TYPE:
365 	case PACKET3_NUM_INSTANCES:
366 		if (pkt->count) {
367 			DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
368 			return -EINVAL;
369 		}
370 		break;
371 	case PACKET3_DRAW_INDEX:
372 		if (pkt->count != 3) {
373 			DRM_ERROR("bad DRAW_INDEX\n");
374 			return -EINVAL;
375 		}
376 		r = r600_cs_packet_next_reloc(p, &reloc);
377 		if (r) {
378 			DRM_ERROR("bad DRAW_INDEX\n");
379 			return -EINVAL;
380 		}
381 		ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
382 		ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
383 		break;
384 	case PACKET3_DRAW_INDEX_AUTO:
385 		if (pkt->count != 1) {
386 			DRM_ERROR("bad DRAW_INDEX_AUTO\n");
387 			return -EINVAL;
388 		}
389 		break;
390 	case PACKET3_DRAW_INDEX_IMMD_BE:
391 	case PACKET3_DRAW_INDEX_IMMD:
392 		if (pkt->count < 2) {
393 			DRM_ERROR("bad DRAW_INDEX_IMMD\n");
394 			return -EINVAL;
395 		}
396 		break;
397 	case PACKET3_WAIT_REG_MEM:
398 		if (pkt->count != 5) {
399 			DRM_ERROR("bad WAIT_REG_MEM\n");
400 			return -EINVAL;
401 		}
402 		/* bit 4 is reg (0) or mem (1) */
403 		if (idx_value & 0x10) {
404 			r = r600_cs_packet_next_reloc(p, &reloc);
405 			if (r) {
406 				DRM_ERROR("bad WAIT_REG_MEM\n");
407 				return -EINVAL;
408 			}
409 			ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
410 			ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
411 		}
412 		break;
413 	case PACKET3_SURFACE_SYNC:
414 		if (pkt->count != 3) {
415 			DRM_ERROR("bad SURFACE_SYNC\n");
416 			return -EINVAL;
417 		}
418 		/* 0xffffffff/0x0 is flush all cache flag */
419 		if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
420 		    radeon_get_ib_value(p, idx + 2) != 0) {
421 			r = r600_cs_packet_next_reloc(p, &reloc);
422 			if (r) {
423 				DRM_ERROR("bad SURFACE_SYNC\n");
424 				return -EINVAL;
425 			}
426 			ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
427 		}
428 		break;
429 	case PACKET3_EVENT_WRITE:
430 		if (pkt->count != 2 && pkt->count != 0) {
431 			DRM_ERROR("bad EVENT_WRITE\n");
432 			return -EINVAL;
433 		}
434 		if (pkt->count) {
435 			r = r600_cs_packet_next_reloc(p, &reloc);
436 			if (r) {
437 				DRM_ERROR("bad EVENT_WRITE\n");
438 				return -EINVAL;
439 			}
440 			ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
441 			ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
442 		}
443 		break;
444 	case PACKET3_EVENT_WRITE_EOP:
445 		if (pkt->count != 4) {
446 			DRM_ERROR("bad EVENT_WRITE_EOP\n");
447 			return -EINVAL;
448 		}
449 		r = r600_cs_packet_next_reloc(p, &reloc);
450 		if (r) {
451 			DRM_ERROR("bad EVENT_WRITE\n");
452 			return -EINVAL;
453 		}
454 		ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
455 		ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
456 		break;
457 	case PACKET3_SET_CONFIG_REG:
458 		start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
459 		end_reg = 4 * pkt->count + start_reg - 4;
460 		if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) ||
461 		    (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
462 		    (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
463 			DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
464 			return -EINVAL;
465 		}
466 		for (i = 0; i < pkt->count; i++) {
467 			reg = start_reg + (4 * i);
468 			switch (reg) {
469 			case SQ_ESGS_RING_BASE:
470 			case SQ_GSVS_RING_BASE:
471 			case SQ_ESTMP_RING_BASE:
472 			case SQ_GSTMP_RING_BASE:
473 			case SQ_VSTMP_RING_BASE:
474 			case SQ_PSTMP_RING_BASE:
475 			case SQ_FBUF_RING_BASE:
476 			case SQ_REDUC_RING_BASE:
477 			case SX_MEMORY_EXPORT_BASE:
478 				r = r600_cs_packet_next_reloc(p, &reloc);
479 				if (r) {
480 					DRM_ERROR("bad SET_CONFIG_REG "
481 							"0x%04X\n", reg);
482 					return -EINVAL;
483 				}
484 				ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
485 				break;
486 			case CP_COHER_BASE:
487 				/* use PACKET3_SURFACE_SYNC */
488 				return -EINVAL;
489 			default:
490 				break;
491 			}
492 		}
493 		break;
494 	case PACKET3_SET_CONTEXT_REG:
495 		start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET;
496 		end_reg = 4 * pkt->count + start_reg - 4;
497 		if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) ||
498 		    (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
499 		    (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
500 			DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
501 			return -EINVAL;
502 		}
503 		for (i = 0; i < pkt->count; i++) {
504 			reg = start_reg + (4 * i);
505 			switch (reg) {
506 			case DB_DEPTH_BASE:
507 			case DB_HTILE_DATA_BASE:
508 			case CB_COLOR0_BASE:
509 			case CB_COLOR1_BASE:
510 			case CB_COLOR2_BASE:
511 			case CB_COLOR3_BASE:
512 			case CB_COLOR4_BASE:
513 			case CB_COLOR5_BASE:
514 			case CB_COLOR6_BASE:
515 			case CB_COLOR7_BASE:
516 			case SQ_PGM_START_FS:
517 			case SQ_PGM_START_ES:
518 			case SQ_PGM_START_VS:
519 			case SQ_PGM_START_GS:
520 			case SQ_PGM_START_PS:
521 				r = r600_cs_packet_next_reloc(p, &reloc);
522 				if (r) {
523 					DRM_ERROR("bad SET_CONTEXT_REG "
524 							"0x%04X\n", reg);
525 					return -EINVAL;
526 				}
527 				ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
528 				break;
529 			case VGT_DMA_BASE:
530 			case VGT_DMA_BASE_HI:
531 				/* These should be handled by DRAW_INDEX packet 3 */
532 			case VGT_STRMOUT_BASE_OFFSET_0:
533 			case VGT_STRMOUT_BASE_OFFSET_1:
534 			case VGT_STRMOUT_BASE_OFFSET_2:
535 			case VGT_STRMOUT_BASE_OFFSET_3:
536 			case VGT_STRMOUT_BASE_OFFSET_HI_0:
537 			case VGT_STRMOUT_BASE_OFFSET_HI_1:
538 			case VGT_STRMOUT_BASE_OFFSET_HI_2:
539 			case VGT_STRMOUT_BASE_OFFSET_HI_3:
540 			case VGT_STRMOUT_BUFFER_BASE_0:
541 			case VGT_STRMOUT_BUFFER_BASE_1:
542 			case VGT_STRMOUT_BUFFER_BASE_2:
543 			case VGT_STRMOUT_BUFFER_BASE_3:
544 			case VGT_STRMOUT_BUFFER_OFFSET_0:
545 			case VGT_STRMOUT_BUFFER_OFFSET_1:
546 			case VGT_STRMOUT_BUFFER_OFFSET_2:
547 			case VGT_STRMOUT_BUFFER_OFFSET_3:
548 				/* These should be handled by STRMOUT_BUFFER packet 3 */
549 				DRM_ERROR("bad context reg: 0x%08x\n", reg);
550 				return -EINVAL;
551 			default:
552 				break;
553 			}
554 		}
555 		break;
556 	case PACKET3_SET_RESOURCE:
557 		if (pkt->count % 7) {
558 			DRM_ERROR("bad SET_RESOURCE\n");
559 			return -EINVAL;
560 		}
561 		start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET;
562 		end_reg = 4 * pkt->count + start_reg - 4;
563 		if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) ||
564 		    (start_reg >= PACKET3_SET_RESOURCE_END) ||
565 		    (end_reg >= PACKET3_SET_RESOURCE_END)) {
566 			DRM_ERROR("bad SET_RESOURCE\n");
567 			return -EINVAL;
568 		}
569 		for (i = 0; i < (pkt->count / 7); i++) {
570 			switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
571 			case SQ_TEX_VTX_VALID_TEXTURE:
572 				/* tex base */
573 				r = r600_cs_packet_next_reloc(p, &reloc);
574 				if (r) {
575 					DRM_ERROR("bad SET_RESOURCE\n");
576 					return -EINVAL;
577 				}
578 				ib[idx+1+(i*7)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
579 				/* tex mip base */
580 				r = r600_cs_packet_next_reloc(p, &reloc);
581 				if (r) {
582 					DRM_ERROR("bad SET_RESOURCE\n");
583 					return -EINVAL;
584 				}
585 				ib[idx+1+(i*7)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
586 				break;
587 			case SQ_TEX_VTX_VALID_BUFFER:
588 				/* vtx base */
589 				r = r600_cs_packet_next_reloc(p, &reloc);
590 				if (r) {
591 					DRM_ERROR("bad SET_RESOURCE\n");
592 					return -EINVAL;
593 				}
594 				ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
595 				ib[idx+1+(i*7)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
596 				break;
597 			case SQ_TEX_VTX_INVALID_TEXTURE:
598 			case SQ_TEX_VTX_INVALID_BUFFER:
599 			default:
600 				DRM_ERROR("bad SET_RESOURCE\n");
601 				return -EINVAL;
602 			}
603 		}
604 		break;
605 	case PACKET3_SET_ALU_CONST:
606 		start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
607 		end_reg = 4 * pkt->count + start_reg - 4;
608 		if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
609 		    (start_reg >= PACKET3_SET_ALU_CONST_END) ||
610 		    (end_reg >= PACKET3_SET_ALU_CONST_END)) {
611 			DRM_ERROR("bad SET_ALU_CONST\n");
612 			return -EINVAL;
613 		}
614 		break;
615 	case PACKET3_SET_BOOL_CONST:
616 		start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET;
617 		end_reg = 4 * pkt->count + start_reg - 4;
618 		if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) ||
619 		    (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
620 		    (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
621 			DRM_ERROR("bad SET_BOOL_CONST\n");
622 			return -EINVAL;
623 		}
624 		break;
625 	case PACKET3_SET_LOOP_CONST:
626 		start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET;
627 		end_reg = 4 * pkt->count + start_reg - 4;
628 		if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) ||
629 		    (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
630 		    (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
631 			DRM_ERROR("bad SET_LOOP_CONST\n");
632 			return -EINVAL;
633 		}
634 		break;
635 	case PACKET3_SET_CTL_CONST:
636 		start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET;
637 		end_reg = 4 * pkt->count + start_reg - 4;
638 		if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) ||
639 		    (start_reg >= PACKET3_SET_CTL_CONST_END) ||
640 		    (end_reg >= PACKET3_SET_CTL_CONST_END)) {
641 			DRM_ERROR("bad SET_CTL_CONST\n");
642 			return -EINVAL;
643 		}
644 		break;
645 	case PACKET3_SET_SAMPLER:
646 		if (pkt->count % 3) {
647 			DRM_ERROR("bad SET_SAMPLER\n");
648 			return -EINVAL;
649 		}
650 		start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET;
651 		end_reg = 4 * pkt->count + start_reg - 4;
652 		if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) ||
653 		    (start_reg >= PACKET3_SET_SAMPLER_END) ||
654 		    (end_reg >= PACKET3_SET_SAMPLER_END)) {
655 			DRM_ERROR("bad SET_SAMPLER\n");
656 			return -EINVAL;
657 		}
658 		break;
659 	case PACKET3_SURFACE_BASE_UPDATE:
660 		if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
661 			DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
662 			return -EINVAL;
663 		}
664 		if (pkt->count) {
665 			DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
666 			return -EINVAL;
667 		}
668 		break;
669 	case PACKET3_NOP:
670 		break;
671 	default:
672 		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
673 		return -EINVAL;
674 	}
675 	return 0;
676 }
677 
678 int r600_cs_parse(struct radeon_cs_parser *p)
679 {
680 	struct radeon_cs_packet pkt;
681 	int r;
682 
683 	do {
684 		r = r600_cs_packet_parse(p, &pkt, p->idx);
685 		if (r) {
686 			return r;
687 		}
688 		p->idx += pkt.count + 2;
689 		switch (pkt.type) {
690 		case PACKET_TYPE0:
691 			r = r600_cs_parse_packet0(p, &pkt);
692 			break;
693 		case PACKET_TYPE2:
694 			break;
695 		case PACKET_TYPE3:
696 			r = r600_packet3_check(p, &pkt);
697 			break;
698 		default:
699 			DRM_ERROR("Unknown packet type %d !\n", pkt.type);
700 			return -EINVAL;
701 		}
702 		if (r) {
703 			return r;
704 		}
705 	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
706 #if 0
707 	for (r = 0; r < p->ib->length_dw; r++) {
708 		printk(KERN_INFO "%05d  0x%08X\n", r, p->ib->ptr[r]);
709 		mdelay(1);
710 	}
711 #endif
712 	return 0;
713 }
714 
715 static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
716 {
717 	if (p->chunk_relocs_idx == -1) {
718 		return 0;
719 	}
720 	p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
721 	if (p->relocs == NULL) {
722 		return -ENOMEM;
723 	}
724 	return 0;
725 }
726 
727 /**
728  * cs_parser_fini() - clean parser states
729  * @parser:	parser structure holding parsing context.
730  * @error:	error number
731  *
732  * If error is set than unvalidate buffer, otherwise just free memory
733  * used by parsing context.
734  **/
735 static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
736 {
737 	unsigned i;
738 
739 	kfree(parser->relocs);
740 	for (i = 0; i < parser->nchunks; i++) {
741 		kfree(parser->chunks[i].kdata);
742 		kfree(parser->chunks[i].kpage[0]);
743 		kfree(parser->chunks[i].kpage[1]);
744 	}
745 	kfree(parser->chunks);
746 	kfree(parser->chunks_array);
747 }
748 
749 int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
750 			unsigned family, u32 *ib, int *l)
751 {
752 	struct radeon_cs_parser parser;
753 	struct radeon_cs_chunk *ib_chunk;
754 	struct radeon_ib	fake_ib;
755 	int r;
756 
757 	/* initialize parser */
758 	memset(&parser, 0, sizeof(struct radeon_cs_parser));
759 	parser.filp = filp;
760 	parser.rdev = NULL;
761 	parser.family = family;
762 	parser.ib = &fake_ib;
763 	fake_ib.ptr = ib;
764 	r = radeon_cs_parser_init(&parser, data);
765 	if (r) {
766 		DRM_ERROR("Failed to initialize parser !\n");
767 		r600_cs_parser_fini(&parser, r);
768 		return r;
769 	}
770 	r = r600_cs_parser_relocs_legacy(&parser);
771 	if (r) {
772 		DRM_ERROR("Failed to parse relocation !\n");
773 		r600_cs_parser_fini(&parser, r);
774 		return r;
775 	}
776 	/* Copy the packet into the IB, the parser will read from the
777 	 * input memory (cached) and write to the IB (which can be
778 	 * uncached). */
779 	ib_chunk = &parser.chunks[parser.chunk_ib_idx];
780 	parser.ib->length_dw = ib_chunk->length_dw;
781 	*l = parser.ib->length_dw;
782 	r = r600_cs_parse(&parser);
783 	if (r) {
784 		DRM_ERROR("Invalid command stream !\n");
785 		r600_cs_parser_fini(&parser, r);
786 		return r;
787 	}
788 	r = radeon_cs_finish_pages(&parser);
789 	if (r) {
790 		DRM_ERROR("Invalid command stream !\n");
791 		r600_cs_parser_fini(&parser, r);
792 		return r;
793 	}
794 	r600_cs_parser_fini(&parser, r);
795 	return r;
796 }
797 
798 void r600_cs_legacy_init(void)
799 {
800 	r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm;
801 }
802