1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/kernel.h> 29 #include "drmP.h" 30 #include "radeon.h" 31 #include "r600d.h" 32 #include "r600_reg_safe.h" 33 34 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, 35 struct radeon_cs_reloc **cs_reloc); 36 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, 37 struct radeon_cs_reloc **cs_reloc); 38 typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**); 39 static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm; 40 extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size); 41 42 43 struct r600_cs_track { 44 /* configuration we miror so that we use same code btw kms/ums */ 45 u32 group_size; 46 u32 nbanks; 47 u32 npipes; 48 /* value we track */ 49 u32 sq_config; 50 u32 nsamples; 51 u32 cb_color_base_last[8]; 52 struct radeon_bo *cb_color_bo[8]; 53 u64 cb_color_bo_mc[8]; 54 u32 cb_color_bo_offset[8]; 55 struct radeon_bo *cb_color_frag_bo[8]; 56 struct radeon_bo *cb_color_tile_bo[8]; 57 u32 cb_color_info[8]; 58 u32 cb_color_size_idx[8]; 59 u32 cb_target_mask; 60 u32 cb_shader_mask; 61 u32 cb_color_size[8]; 62 u32 vgt_strmout_en; 63 u32 vgt_strmout_buffer_en; 64 u32 db_depth_control; 65 u32 db_depth_info; 66 u32 db_depth_size_idx; 67 u32 db_depth_view; 68 u32 db_depth_size; 69 u32 db_offset; 70 struct radeon_bo *db_bo; 71 u64 db_bo_mc; 72 }; 73 74 #define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc, CHIP_R600 } 75 #define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc, CHIP_R600 } 76 #define FMT_24_BIT(fmt) [fmt] = { 1, 1, 3, 0, CHIP_R600 } 77 #define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc, CHIP_R600 } 78 #define FMT_48_BIT(fmt) [fmt] = { 1, 1, 6, 0, CHIP_R600 } 79 #define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc, CHIP_R600 } 80 #define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0, CHIP_R600 } 81 #define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 } 82 83 struct gpu_formats { 84 unsigned blockwidth; 85 unsigned blockheight; 86 unsigned blocksize; 87 unsigned valid_color; 88 enum radeon_family min_family; 89 }; 90 91 static const struct gpu_formats color_formats_table[] = { 92 /* 8 bit */ 93 FMT_8_BIT(V_038004_COLOR_8, 1), 94 FMT_8_BIT(V_038004_COLOR_4_4, 1), 95 FMT_8_BIT(V_038004_COLOR_3_3_2, 1), 96 FMT_8_BIT(V_038004_FMT_1, 0), 97 98 /* 16-bit */ 99 FMT_16_BIT(V_038004_COLOR_16, 1), 100 FMT_16_BIT(V_038004_COLOR_16_FLOAT, 1), 101 FMT_16_BIT(V_038004_COLOR_8_8, 1), 102 FMT_16_BIT(V_038004_COLOR_5_6_5, 1), 103 FMT_16_BIT(V_038004_COLOR_6_5_5, 1), 104 FMT_16_BIT(V_038004_COLOR_1_5_5_5, 1), 105 FMT_16_BIT(V_038004_COLOR_4_4_4_4, 1), 106 FMT_16_BIT(V_038004_COLOR_5_5_5_1, 1), 107 108 /* 24-bit */ 109 FMT_24_BIT(V_038004_FMT_8_8_8), 110 111 /* 32-bit */ 112 FMT_32_BIT(V_038004_COLOR_32, 1), 113 FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1), 114 FMT_32_BIT(V_038004_COLOR_16_16, 1), 115 FMT_32_BIT(V_038004_COLOR_16_16_FLOAT, 1), 116 FMT_32_BIT(V_038004_COLOR_8_24, 1), 117 FMT_32_BIT(V_038004_COLOR_8_24_FLOAT, 1), 118 FMT_32_BIT(V_038004_COLOR_24_8, 1), 119 FMT_32_BIT(V_038004_COLOR_24_8_FLOAT, 1), 120 FMT_32_BIT(V_038004_COLOR_10_11_11, 1), 121 FMT_32_BIT(V_038004_COLOR_10_11_11_FLOAT, 1), 122 FMT_32_BIT(V_038004_COLOR_11_11_10, 1), 123 FMT_32_BIT(V_038004_COLOR_11_11_10_FLOAT, 1), 124 FMT_32_BIT(V_038004_COLOR_2_10_10_10, 1), 125 FMT_32_BIT(V_038004_COLOR_8_8_8_8, 1), 126 FMT_32_BIT(V_038004_COLOR_10_10_10_2, 1), 127 FMT_32_BIT(V_038004_FMT_5_9_9_9_SHAREDEXP, 0), 128 FMT_32_BIT(V_038004_FMT_32_AS_8, 0), 129 FMT_32_BIT(V_038004_FMT_32_AS_8_8, 0), 130 131 /* 48-bit */ 132 FMT_48_BIT(V_038004_FMT_16_16_16), 133 FMT_48_BIT(V_038004_FMT_16_16_16_FLOAT), 134 135 /* 64-bit */ 136 FMT_64_BIT(V_038004_COLOR_X24_8_32_FLOAT, 1), 137 FMT_64_BIT(V_038004_COLOR_32_32, 1), 138 FMT_64_BIT(V_038004_COLOR_32_32_FLOAT, 1), 139 FMT_64_BIT(V_038004_COLOR_16_16_16_16, 1), 140 FMT_64_BIT(V_038004_COLOR_16_16_16_16_FLOAT, 1), 141 142 FMT_96_BIT(V_038004_FMT_32_32_32), 143 FMT_96_BIT(V_038004_FMT_32_32_32_FLOAT), 144 145 /* 128-bit */ 146 FMT_128_BIT(V_038004_COLOR_32_32_32_32, 1), 147 FMT_128_BIT(V_038004_COLOR_32_32_32_32_FLOAT, 1), 148 149 [V_038004_FMT_GB_GR] = { 2, 1, 4, 0 }, 150 [V_038004_FMT_BG_RG] = { 2, 1, 4, 0 }, 151 152 /* block compressed formats */ 153 [V_038004_FMT_BC1] = { 4, 4, 8, 0 }, 154 [V_038004_FMT_BC2] = { 4, 4, 16, 0 }, 155 [V_038004_FMT_BC3] = { 4, 4, 16, 0 }, 156 [V_038004_FMT_BC4] = { 4, 4, 8, 0 }, 157 [V_038004_FMT_BC5] = { 4, 4, 16, 0}, 158 [V_038004_FMT_BC6] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */ 159 [V_038004_FMT_BC7] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */ 160 161 /* The other Evergreen formats */ 162 [V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR}, 163 }; 164 165 static inline bool fmt_is_valid_color(u32 format) 166 { 167 if (format >= ARRAY_SIZE(color_formats_table)) 168 return false; 169 170 if (color_formats_table[format].valid_color) 171 return true; 172 173 return false; 174 } 175 176 static inline bool fmt_is_valid_texture(u32 format, enum radeon_family family) 177 { 178 if (format >= ARRAY_SIZE(color_formats_table)) 179 return false; 180 181 if (family < color_formats_table[format].min_family) 182 return false; 183 184 if (color_formats_table[format].blockwidth > 0) 185 return true; 186 187 return false; 188 } 189 190 static inline int fmt_get_blocksize(u32 format) 191 { 192 if (format >= ARRAY_SIZE(color_formats_table)) 193 return 0; 194 195 return color_formats_table[format].blocksize; 196 } 197 198 static inline int fmt_get_nblocksx(u32 format, u32 w) 199 { 200 unsigned bw; 201 202 if (format >= ARRAY_SIZE(color_formats_table)) 203 return 0; 204 205 bw = color_formats_table[format].blockwidth; 206 if (bw == 0) 207 return 0; 208 209 return (w + bw - 1) / bw; 210 } 211 212 static inline int fmt_get_nblocksy(u32 format, u32 h) 213 { 214 unsigned bh; 215 216 if (format >= ARRAY_SIZE(color_formats_table)) 217 return 0; 218 219 bh = color_formats_table[format].blockheight; 220 if (bh == 0) 221 return 0; 222 223 return (h + bh - 1) / bh; 224 } 225 226 static inline int r600_bpe_from_format(u32 *bpe, u32 format) 227 { 228 unsigned res; 229 230 if (format >= ARRAY_SIZE(color_formats_table)) 231 goto fail; 232 233 res = color_formats_table[format].blocksize; 234 if (res == 0) 235 goto fail; 236 237 *bpe = res; 238 return 0; 239 240 fail: 241 *bpe = 16; 242 return -EINVAL; 243 } 244 245 struct array_mode_checker { 246 int array_mode; 247 u32 group_size; 248 u32 nbanks; 249 u32 npipes; 250 u32 nsamples; 251 u32 blocksize; 252 }; 253 254 /* returns alignment in pixels for pitch/height/depth and bytes for base */ 255 static inline int r600_get_array_mode_alignment(struct array_mode_checker *values, 256 u32 *pitch_align, 257 u32 *height_align, 258 u32 *depth_align, 259 u64 *base_align) 260 { 261 u32 tile_width = 8; 262 u32 tile_height = 8; 263 u32 macro_tile_width = values->nbanks; 264 u32 macro_tile_height = values->npipes; 265 u32 tile_bytes = tile_width * tile_height * values->blocksize * values->nsamples; 266 u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes; 267 268 switch (values->array_mode) { 269 case ARRAY_LINEAR_GENERAL: 270 /* technically tile_width/_height for pitch/height */ 271 *pitch_align = 1; /* tile_width */ 272 *height_align = 1; /* tile_height */ 273 *depth_align = 1; 274 *base_align = 1; 275 break; 276 case ARRAY_LINEAR_ALIGNED: 277 *pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize)); 278 *height_align = tile_height; 279 *depth_align = 1; 280 *base_align = values->group_size; 281 break; 282 case ARRAY_1D_TILED_THIN1: 283 *pitch_align = max((u32)tile_width, 284 (u32)(values->group_size / 285 (tile_height * values->blocksize * values->nsamples))); 286 *height_align = tile_height; 287 *depth_align = 1; 288 *base_align = values->group_size; 289 break; 290 case ARRAY_2D_TILED_THIN1: 291 *pitch_align = max((u32)macro_tile_width, 292 (u32)(((values->group_size / tile_height) / 293 (values->blocksize * values->nsamples)) * 294 values->nbanks)) * tile_width; 295 *height_align = macro_tile_height * tile_height; 296 *depth_align = 1; 297 *base_align = max(macro_tile_bytes, 298 (*pitch_align) * values->blocksize * (*height_align) * values->nsamples); 299 break; 300 default: 301 return -EINVAL; 302 } 303 304 return 0; 305 } 306 307 static void r600_cs_track_init(struct r600_cs_track *track) 308 { 309 int i; 310 311 /* assume DX9 mode */ 312 track->sq_config = DX9_CONSTS; 313 for (i = 0; i < 8; i++) { 314 track->cb_color_base_last[i] = 0; 315 track->cb_color_size[i] = 0; 316 track->cb_color_size_idx[i] = 0; 317 track->cb_color_info[i] = 0; 318 track->cb_color_bo[i] = NULL; 319 track->cb_color_bo_offset[i] = 0xFFFFFFFF; 320 track->cb_color_bo_mc[i] = 0xFFFFFFFF; 321 } 322 track->cb_target_mask = 0xFFFFFFFF; 323 track->cb_shader_mask = 0xFFFFFFFF; 324 track->db_bo = NULL; 325 track->db_bo_mc = 0xFFFFFFFF; 326 /* assume the biggest format and that htile is enabled */ 327 track->db_depth_info = 7 | (1 << 25); 328 track->db_depth_view = 0xFFFFC000; 329 track->db_depth_size = 0xFFFFFFFF; 330 track->db_depth_size_idx = 0; 331 track->db_depth_control = 0xFFFFFFFF; 332 } 333 334 static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) 335 { 336 struct r600_cs_track *track = p->track; 337 u32 slice_tile_max, size, tmp; 338 u32 height, height_align, pitch, pitch_align, depth_align; 339 u64 base_offset, base_align; 340 struct array_mode_checker array_check; 341 volatile u32 *ib = p->ib->ptr; 342 unsigned array_mode; 343 u32 format; 344 if (G_0280A0_TILE_MODE(track->cb_color_info[i])) { 345 dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n"); 346 return -EINVAL; 347 } 348 size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i]; 349 format = G_0280A0_FORMAT(track->cb_color_info[i]); 350 if (!fmt_is_valid_color(format)) { 351 dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n", 352 __func__, __LINE__, format, 353 i, track->cb_color_info[i]); 354 return -EINVAL; 355 } 356 /* pitch in pixels */ 357 pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8; 358 slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1; 359 slice_tile_max *= 64; 360 height = slice_tile_max / pitch; 361 if (height > 8192) 362 height = 8192; 363 array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]); 364 365 base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i]; 366 array_check.array_mode = array_mode; 367 array_check.group_size = track->group_size; 368 array_check.nbanks = track->nbanks; 369 array_check.npipes = track->npipes; 370 array_check.nsamples = track->nsamples; 371 array_check.blocksize = fmt_get_blocksize(format); 372 if (r600_get_array_mode_alignment(&array_check, 373 &pitch_align, &height_align, &depth_align, &base_align)) { 374 dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, 375 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i, 376 track->cb_color_info[i]); 377 return -EINVAL; 378 } 379 switch (array_mode) { 380 case V_0280A0_ARRAY_LINEAR_GENERAL: 381 break; 382 case V_0280A0_ARRAY_LINEAR_ALIGNED: 383 break; 384 case V_0280A0_ARRAY_1D_TILED_THIN1: 385 /* avoid breaking userspace */ 386 if (height > 7) 387 height &= ~0x7; 388 break; 389 case V_0280A0_ARRAY_2D_TILED_THIN1: 390 break; 391 default: 392 dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, 393 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i, 394 track->cb_color_info[i]); 395 return -EINVAL; 396 } 397 398 if (!IS_ALIGNED(pitch, pitch_align)) { 399 dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n", 400 __func__, __LINE__, pitch, pitch_align, array_mode); 401 return -EINVAL; 402 } 403 if (!IS_ALIGNED(height, height_align)) { 404 dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n", 405 __func__, __LINE__, height, height_align, array_mode); 406 return -EINVAL; 407 } 408 if (!IS_ALIGNED(base_offset, base_align)) { 409 dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i, 410 base_offset, base_align, array_mode); 411 return -EINVAL; 412 } 413 414 /* check offset */ 415 tmp = fmt_get_nblocksy(format, height) * fmt_get_nblocksx(format, pitch) * fmt_get_blocksize(format); 416 if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { 417 if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) { 418 /* the initial DDX does bad things with the CB size occasionally */ 419 /* it rounds up height too far for slice tile max but the BO is smaller */ 420 /* r600c,g also seem to flush at bad times in some apps resulting in 421 * bogus values here. So for linear just allow anything to avoid breaking 422 * broken userspace. 423 */ 424 } else { 425 dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big\n", __func__, i, 426 array_mode, 427 track->cb_color_bo_offset[i], tmp, 428 radeon_bo_size(track->cb_color_bo[i])); 429 return -EINVAL; 430 } 431 } 432 /* limit max tile */ 433 tmp = (height * pitch) >> 6; 434 if (tmp < slice_tile_max) 435 slice_tile_max = tmp; 436 tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) | 437 S_028060_SLICE_TILE_MAX(slice_tile_max - 1); 438 ib[track->cb_color_size_idx[i]] = tmp; 439 return 0; 440 } 441 442 static int r600_cs_track_check(struct radeon_cs_parser *p) 443 { 444 struct r600_cs_track *track = p->track; 445 u32 tmp; 446 int r, i; 447 volatile u32 *ib = p->ib->ptr; 448 449 /* on legacy kernel we don't perform advanced check */ 450 if (p->rdev == NULL) 451 return 0; 452 /* we don't support out buffer yet */ 453 if (track->vgt_strmout_en || track->vgt_strmout_buffer_en) { 454 dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n"); 455 return -EINVAL; 456 } 457 /* check that we have a cb for each enabled target, we don't check 458 * shader_mask because it seems mesa isn't always setting it :( 459 */ 460 tmp = track->cb_target_mask; 461 for (i = 0; i < 8; i++) { 462 if ((tmp >> (i * 4)) & 0xF) { 463 /* at least one component is enabled */ 464 if (track->cb_color_bo[i] == NULL) { 465 dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n", 466 __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i); 467 return -EINVAL; 468 } 469 /* perform rewrite of CB_COLOR[0-7]_SIZE */ 470 r = r600_cs_track_validate_cb(p, i); 471 if (r) 472 return r; 473 } 474 } 475 /* Check depth buffer */ 476 if (G_028800_STENCIL_ENABLE(track->db_depth_control) || 477 G_028800_Z_ENABLE(track->db_depth_control)) { 478 u32 nviews, bpe, ntiles, size, slice_tile_max; 479 u32 height, height_align, pitch, pitch_align, depth_align; 480 u64 base_offset, base_align; 481 struct array_mode_checker array_check; 482 int array_mode; 483 484 if (track->db_bo == NULL) { 485 dev_warn(p->dev, "z/stencil with no depth buffer\n"); 486 return -EINVAL; 487 } 488 if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) { 489 dev_warn(p->dev, "this kernel doesn't support z/stencil htile\n"); 490 return -EINVAL; 491 } 492 switch (G_028010_FORMAT(track->db_depth_info)) { 493 case V_028010_DEPTH_16: 494 bpe = 2; 495 break; 496 case V_028010_DEPTH_X8_24: 497 case V_028010_DEPTH_8_24: 498 case V_028010_DEPTH_X8_24_FLOAT: 499 case V_028010_DEPTH_8_24_FLOAT: 500 case V_028010_DEPTH_32_FLOAT: 501 bpe = 4; 502 break; 503 case V_028010_DEPTH_X24_8_32_FLOAT: 504 bpe = 8; 505 break; 506 default: 507 dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info)); 508 return -EINVAL; 509 } 510 if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) { 511 if (!track->db_depth_size_idx) { 512 dev_warn(p->dev, "z/stencil buffer size not set\n"); 513 return -EINVAL; 514 } 515 tmp = radeon_bo_size(track->db_bo) - track->db_offset; 516 tmp = (tmp / bpe) >> 6; 517 if (!tmp) { 518 dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n", 519 track->db_depth_size, bpe, track->db_offset, 520 radeon_bo_size(track->db_bo)); 521 return -EINVAL; 522 } 523 ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF); 524 } else { 525 size = radeon_bo_size(track->db_bo); 526 /* pitch in pixels */ 527 pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8; 528 slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; 529 slice_tile_max *= 64; 530 height = slice_tile_max / pitch; 531 if (height > 8192) 532 height = 8192; 533 base_offset = track->db_bo_mc + track->db_offset; 534 array_mode = G_028010_ARRAY_MODE(track->db_depth_info); 535 array_check.array_mode = array_mode; 536 array_check.group_size = track->group_size; 537 array_check.nbanks = track->nbanks; 538 array_check.npipes = track->npipes; 539 array_check.nsamples = track->nsamples; 540 array_check.blocksize = bpe; 541 if (r600_get_array_mode_alignment(&array_check, 542 &pitch_align, &height_align, &depth_align, &base_align)) { 543 dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, 544 G_028010_ARRAY_MODE(track->db_depth_info), 545 track->db_depth_info); 546 return -EINVAL; 547 } 548 switch (array_mode) { 549 case V_028010_ARRAY_1D_TILED_THIN1: 550 /* don't break userspace */ 551 height &= ~0x7; 552 break; 553 case V_028010_ARRAY_2D_TILED_THIN1: 554 break; 555 default: 556 dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, 557 G_028010_ARRAY_MODE(track->db_depth_info), 558 track->db_depth_info); 559 return -EINVAL; 560 } 561 562 if (!IS_ALIGNED(pitch, pitch_align)) { 563 dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n", 564 __func__, __LINE__, pitch, pitch_align, array_mode); 565 return -EINVAL; 566 } 567 if (!IS_ALIGNED(height, height_align)) { 568 dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n", 569 __func__, __LINE__, height, height_align, array_mode); 570 return -EINVAL; 571 } 572 if (!IS_ALIGNED(base_offset, base_align)) { 573 dev_warn(p->dev, "%s offset[%d] 0x%llx, 0x%llx, %d not aligned\n", __func__, i, 574 base_offset, base_align, array_mode); 575 return -EINVAL; 576 } 577 578 ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; 579 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; 580 tmp = ntiles * bpe * 64 * nviews; 581 if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { 582 dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n", 583 array_mode, 584 track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, 585 radeon_bo_size(track->db_bo)); 586 return -EINVAL; 587 } 588 } 589 } 590 return 0; 591 } 592 593 /** 594 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet 595 * @parser: parser structure holding parsing context. 596 * @pkt: where to store packet informations 597 * 598 * Assume that chunk_ib_index is properly set. Will return -EINVAL 599 * if packet is bigger than remaining ib size. or if packets is unknown. 600 **/ 601 int r600_cs_packet_parse(struct radeon_cs_parser *p, 602 struct radeon_cs_packet *pkt, 603 unsigned idx) 604 { 605 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; 606 uint32_t header; 607 608 if (idx >= ib_chunk->length_dw) { 609 DRM_ERROR("Can not parse packet at %d after CS end %d !\n", 610 idx, ib_chunk->length_dw); 611 return -EINVAL; 612 } 613 header = radeon_get_ib_value(p, idx); 614 pkt->idx = idx; 615 pkt->type = CP_PACKET_GET_TYPE(header); 616 pkt->count = CP_PACKET_GET_COUNT(header); 617 pkt->one_reg_wr = 0; 618 switch (pkt->type) { 619 case PACKET_TYPE0: 620 pkt->reg = CP_PACKET0_GET_REG(header); 621 break; 622 case PACKET_TYPE3: 623 pkt->opcode = CP_PACKET3_GET_OPCODE(header); 624 break; 625 case PACKET_TYPE2: 626 pkt->count = -1; 627 break; 628 default: 629 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx); 630 return -EINVAL; 631 } 632 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) { 633 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n", 634 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw); 635 return -EINVAL; 636 } 637 return 0; 638 } 639 640 /** 641 * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3 642 * @parser: parser structure holding parsing context. 643 * @data: pointer to relocation data 644 * @offset_start: starting offset 645 * @offset_mask: offset mask (to align start offset on) 646 * @reloc: reloc informations 647 * 648 * Check next packet is relocation packet3, do bo validation and compute 649 * GPU offset using the provided start. 650 **/ 651 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, 652 struct radeon_cs_reloc **cs_reloc) 653 { 654 struct radeon_cs_chunk *relocs_chunk; 655 struct radeon_cs_packet p3reloc; 656 unsigned idx; 657 int r; 658 659 if (p->chunk_relocs_idx == -1) { 660 DRM_ERROR("No relocation chunk !\n"); 661 return -EINVAL; 662 } 663 *cs_reloc = NULL; 664 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 665 r = r600_cs_packet_parse(p, &p3reloc, p->idx); 666 if (r) { 667 return r; 668 } 669 p->idx += p3reloc.count + 2; 670 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { 671 DRM_ERROR("No packet3 for relocation for packet at %d.\n", 672 p3reloc.idx); 673 return -EINVAL; 674 } 675 idx = radeon_get_ib_value(p, p3reloc.idx + 1); 676 if (idx >= relocs_chunk->length_dw) { 677 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 678 idx, relocs_chunk->length_dw); 679 return -EINVAL; 680 } 681 /* FIXME: we assume reloc size is 4 dwords */ 682 *cs_reloc = p->relocs_ptr[(idx / 4)]; 683 return 0; 684 } 685 686 /** 687 * r600_cs_packet_next_reloc_nomm() - parse next packet which should be reloc packet3 688 * @parser: parser structure holding parsing context. 689 * @data: pointer to relocation data 690 * @offset_start: starting offset 691 * @offset_mask: offset mask (to align start offset on) 692 * @reloc: reloc informations 693 * 694 * Check next packet is relocation packet3, do bo validation and compute 695 * GPU offset using the provided start. 696 **/ 697 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, 698 struct radeon_cs_reloc **cs_reloc) 699 { 700 struct radeon_cs_chunk *relocs_chunk; 701 struct radeon_cs_packet p3reloc; 702 unsigned idx; 703 int r; 704 705 if (p->chunk_relocs_idx == -1) { 706 DRM_ERROR("No relocation chunk !\n"); 707 return -EINVAL; 708 } 709 *cs_reloc = NULL; 710 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 711 r = r600_cs_packet_parse(p, &p3reloc, p->idx); 712 if (r) { 713 return r; 714 } 715 p->idx += p3reloc.count + 2; 716 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { 717 DRM_ERROR("No packet3 for relocation for packet at %d.\n", 718 p3reloc.idx); 719 return -EINVAL; 720 } 721 idx = radeon_get_ib_value(p, p3reloc.idx + 1); 722 if (idx >= relocs_chunk->length_dw) { 723 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 724 idx, relocs_chunk->length_dw); 725 return -EINVAL; 726 } 727 *cs_reloc = p->relocs; 728 (*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32; 729 (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0]; 730 return 0; 731 } 732 733 /** 734 * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc 735 * @parser: parser structure holding parsing context. 736 * 737 * Check next packet is relocation packet3, do bo validation and compute 738 * GPU offset using the provided start. 739 **/ 740 static inline int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p) 741 { 742 struct radeon_cs_packet p3reloc; 743 int r; 744 745 r = r600_cs_packet_parse(p, &p3reloc, p->idx); 746 if (r) { 747 return 0; 748 } 749 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { 750 return 0; 751 } 752 return 1; 753 } 754 755 /** 756 * r600_cs_packet_next_vline() - parse userspace VLINE packet 757 * @parser: parser structure holding parsing context. 758 * 759 * Userspace sends a special sequence for VLINE waits. 760 * PACKET0 - VLINE_START_END + value 761 * PACKET3 - WAIT_REG_MEM poll vline status reg 762 * RELOC (P3) - crtc_id in reloc. 763 * 764 * This function parses this and relocates the VLINE START END 765 * and WAIT_REG_MEM packets to the correct crtc. 766 * It also detects a switched off crtc and nulls out the 767 * wait in that case. 768 */ 769 static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p) 770 { 771 struct drm_mode_object *obj; 772 struct drm_crtc *crtc; 773 struct radeon_crtc *radeon_crtc; 774 struct radeon_cs_packet p3reloc, wait_reg_mem; 775 int crtc_id; 776 int r; 777 uint32_t header, h_idx, reg, wait_reg_mem_info; 778 volatile uint32_t *ib; 779 780 ib = p->ib->ptr; 781 782 /* parse the WAIT_REG_MEM */ 783 r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx); 784 if (r) 785 return r; 786 787 /* check its a WAIT_REG_MEM */ 788 if (wait_reg_mem.type != PACKET_TYPE3 || 789 wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) { 790 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n"); 791 return -EINVAL; 792 } 793 794 wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1); 795 /* bit 4 is reg (0) or mem (1) */ 796 if (wait_reg_mem_info & 0x10) { 797 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n"); 798 return -EINVAL; 799 } 800 /* waiting for value to be equal */ 801 if ((wait_reg_mem_info & 0x7) != 0x3) { 802 DRM_ERROR("vline WAIT_REG_MEM function not equal\n"); 803 return -EINVAL; 804 } 805 if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) { 806 DRM_ERROR("vline WAIT_REG_MEM bad reg\n"); 807 return -EINVAL; 808 } 809 810 if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) { 811 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n"); 812 return -EINVAL; 813 } 814 815 /* jump over the NOP */ 816 r = r600_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2); 817 if (r) 818 return r; 819 820 h_idx = p->idx - 2; 821 p->idx += wait_reg_mem.count + 2; 822 p->idx += p3reloc.count + 2; 823 824 header = radeon_get_ib_value(p, h_idx); 825 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); 826 reg = CP_PACKET0_GET_REG(header); 827 828 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); 829 if (!obj) { 830 DRM_ERROR("cannot find crtc %d\n", crtc_id); 831 return -EINVAL; 832 } 833 crtc = obj_to_crtc(obj); 834 radeon_crtc = to_radeon_crtc(crtc); 835 crtc_id = radeon_crtc->crtc_id; 836 837 if (!crtc->enabled) { 838 /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */ 839 ib[h_idx + 2] = PACKET2(0); 840 ib[h_idx + 3] = PACKET2(0); 841 ib[h_idx + 4] = PACKET2(0); 842 ib[h_idx + 5] = PACKET2(0); 843 ib[h_idx + 6] = PACKET2(0); 844 ib[h_idx + 7] = PACKET2(0); 845 ib[h_idx + 8] = PACKET2(0); 846 } else if (crtc_id == 1) { 847 switch (reg) { 848 case AVIVO_D1MODE_VLINE_START_END: 849 header &= ~R600_CP_PACKET0_REG_MASK; 850 header |= AVIVO_D2MODE_VLINE_START_END >> 2; 851 break; 852 default: 853 DRM_ERROR("unknown crtc reloc\n"); 854 return -EINVAL; 855 } 856 ib[h_idx] = header; 857 ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2; 858 } 859 860 return 0; 861 } 862 863 static int r600_packet0_check(struct radeon_cs_parser *p, 864 struct radeon_cs_packet *pkt, 865 unsigned idx, unsigned reg) 866 { 867 int r; 868 869 switch (reg) { 870 case AVIVO_D1MODE_VLINE_START_END: 871 r = r600_cs_packet_parse_vline(p); 872 if (r) { 873 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 874 idx, reg); 875 return r; 876 } 877 break; 878 default: 879 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", 880 reg, idx); 881 return -EINVAL; 882 } 883 return 0; 884 } 885 886 static int r600_cs_parse_packet0(struct radeon_cs_parser *p, 887 struct radeon_cs_packet *pkt) 888 { 889 unsigned reg, i; 890 unsigned idx; 891 int r; 892 893 idx = pkt->idx + 1; 894 reg = pkt->reg; 895 for (i = 0; i <= pkt->count; i++, idx++, reg += 4) { 896 r = r600_packet0_check(p, pkt, idx, reg); 897 if (r) { 898 return r; 899 } 900 } 901 return 0; 902 } 903 904 /** 905 * r600_cs_check_reg() - check if register is authorized or not 906 * @parser: parser structure holding parsing context 907 * @reg: register we are testing 908 * @idx: index into the cs buffer 909 * 910 * This function will test against r600_reg_safe_bm and return 0 911 * if register is safe. If register is not flag as safe this function 912 * will test it against a list of register needind special handling. 913 */ 914 static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) 915 { 916 struct r600_cs_track *track = (struct r600_cs_track *)p->track; 917 struct radeon_cs_reloc *reloc; 918 u32 m, i, tmp, *ib; 919 int r; 920 921 i = (reg >> 7); 922 if (i >= ARRAY_SIZE(r600_reg_safe_bm)) { 923 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 924 return -EINVAL; 925 } 926 m = 1 << ((reg >> 2) & 31); 927 if (!(r600_reg_safe_bm[i] & m)) 928 return 0; 929 ib = p->ib->ptr; 930 switch (reg) { 931 /* force following reg to 0 in an attempt to disable out buffer 932 * which will need us to better understand how it works to perform 933 * security check on it (Jerome) 934 */ 935 case R_0288A8_SQ_ESGS_RING_ITEMSIZE: 936 case R_008C44_SQ_ESGS_RING_SIZE: 937 case R_0288B0_SQ_ESTMP_RING_ITEMSIZE: 938 case R_008C54_SQ_ESTMP_RING_SIZE: 939 case R_0288C0_SQ_FBUF_RING_ITEMSIZE: 940 case R_008C74_SQ_FBUF_RING_SIZE: 941 case R_0288B4_SQ_GSTMP_RING_ITEMSIZE: 942 case R_008C5C_SQ_GSTMP_RING_SIZE: 943 case R_0288AC_SQ_GSVS_RING_ITEMSIZE: 944 case R_008C4C_SQ_GSVS_RING_SIZE: 945 case R_0288BC_SQ_PSTMP_RING_ITEMSIZE: 946 case R_008C6C_SQ_PSTMP_RING_SIZE: 947 case R_0288C4_SQ_REDUC_RING_ITEMSIZE: 948 case R_008C7C_SQ_REDUC_RING_SIZE: 949 case R_0288B8_SQ_VSTMP_RING_ITEMSIZE: 950 case R_008C64_SQ_VSTMP_RING_SIZE: 951 case R_0288C8_SQ_GS_VERT_ITEMSIZE: 952 /* get value to populate the IB don't remove */ 953 tmp =radeon_get_ib_value(p, idx); 954 ib[idx] = 0; 955 break; 956 case SQ_CONFIG: 957 track->sq_config = radeon_get_ib_value(p, idx); 958 break; 959 case R_028800_DB_DEPTH_CONTROL: 960 track->db_depth_control = radeon_get_ib_value(p, idx); 961 break; 962 case R_028010_DB_DEPTH_INFO: 963 if (r600_cs_packet_next_is_pkt3_nop(p)) { 964 r = r600_cs_packet_next_reloc(p, &reloc); 965 if (r) { 966 dev_warn(p->dev, "bad SET_CONTEXT_REG " 967 "0x%04X\n", reg); 968 return -EINVAL; 969 } 970 track->db_depth_info = radeon_get_ib_value(p, idx); 971 ib[idx] &= C_028010_ARRAY_MODE; 972 track->db_depth_info &= C_028010_ARRAY_MODE; 973 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 974 ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1); 975 track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1); 976 } else { 977 ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1); 978 track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1); 979 } 980 } else 981 track->db_depth_info = radeon_get_ib_value(p, idx); 982 break; 983 case R_028004_DB_DEPTH_VIEW: 984 track->db_depth_view = radeon_get_ib_value(p, idx); 985 break; 986 case R_028000_DB_DEPTH_SIZE: 987 track->db_depth_size = radeon_get_ib_value(p, idx); 988 track->db_depth_size_idx = idx; 989 break; 990 case R_028AB0_VGT_STRMOUT_EN: 991 track->vgt_strmout_en = radeon_get_ib_value(p, idx); 992 break; 993 case R_028B20_VGT_STRMOUT_BUFFER_EN: 994 track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx); 995 break; 996 case R_028238_CB_TARGET_MASK: 997 track->cb_target_mask = radeon_get_ib_value(p, idx); 998 break; 999 case R_02823C_CB_SHADER_MASK: 1000 track->cb_shader_mask = radeon_get_ib_value(p, idx); 1001 break; 1002 case R_028C04_PA_SC_AA_CONFIG: 1003 tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx)); 1004 track->nsamples = 1 << tmp; 1005 break; 1006 case R_0280A0_CB_COLOR0_INFO: 1007 case R_0280A4_CB_COLOR1_INFO: 1008 case R_0280A8_CB_COLOR2_INFO: 1009 case R_0280AC_CB_COLOR3_INFO: 1010 case R_0280B0_CB_COLOR4_INFO: 1011 case R_0280B4_CB_COLOR5_INFO: 1012 case R_0280B8_CB_COLOR6_INFO: 1013 case R_0280BC_CB_COLOR7_INFO: 1014 if (r600_cs_packet_next_is_pkt3_nop(p)) { 1015 r = r600_cs_packet_next_reloc(p, &reloc); 1016 if (r) { 1017 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); 1018 return -EINVAL; 1019 } 1020 tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4; 1021 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); 1022 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 1023 ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1); 1024 track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1); 1025 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { 1026 ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1); 1027 track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1); 1028 } 1029 } else { 1030 tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4; 1031 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); 1032 } 1033 break; 1034 case R_028060_CB_COLOR0_SIZE: 1035 case R_028064_CB_COLOR1_SIZE: 1036 case R_028068_CB_COLOR2_SIZE: 1037 case R_02806C_CB_COLOR3_SIZE: 1038 case R_028070_CB_COLOR4_SIZE: 1039 case R_028074_CB_COLOR5_SIZE: 1040 case R_028078_CB_COLOR6_SIZE: 1041 case R_02807C_CB_COLOR7_SIZE: 1042 tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4; 1043 track->cb_color_size[tmp] = radeon_get_ib_value(p, idx); 1044 track->cb_color_size_idx[tmp] = idx; 1045 break; 1046 /* This register were added late, there is userspace 1047 * which does provide relocation for those but set 1048 * 0 offset. In order to avoid breaking old userspace 1049 * we detect this and set address to point to last 1050 * CB_COLOR0_BASE, note that if userspace doesn't set 1051 * CB_COLOR0_BASE before this register we will report 1052 * error. Old userspace always set CB_COLOR0_BASE 1053 * before any of this. 1054 */ 1055 case R_0280E0_CB_COLOR0_FRAG: 1056 case R_0280E4_CB_COLOR1_FRAG: 1057 case R_0280E8_CB_COLOR2_FRAG: 1058 case R_0280EC_CB_COLOR3_FRAG: 1059 case R_0280F0_CB_COLOR4_FRAG: 1060 case R_0280F4_CB_COLOR5_FRAG: 1061 case R_0280F8_CB_COLOR6_FRAG: 1062 case R_0280FC_CB_COLOR7_FRAG: 1063 tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4; 1064 if (!r600_cs_packet_next_is_pkt3_nop(p)) { 1065 if (!track->cb_color_base_last[tmp]) { 1066 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg); 1067 return -EINVAL; 1068 } 1069 ib[idx] = track->cb_color_base_last[tmp]; 1070 track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp]; 1071 } else { 1072 r = r600_cs_packet_next_reloc(p, &reloc); 1073 if (r) { 1074 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); 1075 return -EINVAL; 1076 } 1077 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1078 track->cb_color_frag_bo[tmp] = reloc->robj; 1079 } 1080 break; 1081 case R_0280C0_CB_COLOR0_TILE: 1082 case R_0280C4_CB_COLOR1_TILE: 1083 case R_0280C8_CB_COLOR2_TILE: 1084 case R_0280CC_CB_COLOR3_TILE: 1085 case R_0280D0_CB_COLOR4_TILE: 1086 case R_0280D4_CB_COLOR5_TILE: 1087 case R_0280D8_CB_COLOR6_TILE: 1088 case R_0280DC_CB_COLOR7_TILE: 1089 tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4; 1090 if (!r600_cs_packet_next_is_pkt3_nop(p)) { 1091 if (!track->cb_color_base_last[tmp]) { 1092 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg); 1093 return -EINVAL; 1094 } 1095 ib[idx] = track->cb_color_base_last[tmp]; 1096 track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp]; 1097 } else { 1098 r = r600_cs_packet_next_reloc(p, &reloc); 1099 if (r) { 1100 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); 1101 return -EINVAL; 1102 } 1103 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1104 track->cb_color_tile_bo[tmp] = reloc->robj; 1105 } 1106 break; 1107 case CB_COLOR0_BASE: 1108 case CB_COLOR1_BASE: 1109 case CB_COLOR2_BASE: 1110 case CB_COLOR3_BASE: 1111 case CB_COLOR4_BASE: 1112 case CB_COLOR5_BASE: 1113 case CB_COLOR6_BASE: 1114 case CB_COLOR7_BASE: 1115 r = r600_cs_packet_next_reloc(p, &reloc); 1116 if (r) { 1117 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1118 "0x%04X\n", reg); 1119 return -EINVAL; 1120 } 1121 tmp = (reg - CB_COLOR0_BASE) / 4; 1122 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8; 1123 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1124 track->cb_color_base_last[tmp] = ib[idx]; 1125 track->cb_color_bo[tmp] = reloc->robj; 1126 track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset; 1127 break; 1128 case DB_DEPTH_BASE: 1129 r = r600_cs_packet_next_reloc(p, &reloc); 1130 if (r) { 1131 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1132 "0x%04X\n", reg); 1133 return -EINVAL; 1134 } 1135 track->db_offset = radeon_get_ib_value(p, idx) << 8; 1136 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1137 track->db_bo = reloc->robj; 1138 track->db_bo_mc = reloc->lobj.gpu_offset; 1139 break; 1140 case DB_HTILE_DATA_BASE: 1141 case SQ_PGM_START_FS: 1142 case SQ_PGM_START_ES: 1143 case SQ_PGM_START_VS: 1144 case SQ_PGM_START_GS: 1145 case SQ_PGM_START_PS: 1146 case SQ_ALU_CONST_CACHE_GS_0: 1147 case SQ_ALU_CONST_CACHE_GS_1: 1148 case SQ_ALU_CONST_CACHE_GS_2: 1149 case SQ_ALU_CONST_CACHE_GS_3: 1150 case SQ_ALU_CONST_CACHE_GS_4: 1151 case SQ_ALU_CONST_CACHE_GS_5: 1152 case SQ_ALU_CONST_CACHE_GS_6: 1153 case SQ_ALU_CONST_CACHE_GS_7: 1154 case SQ_ALU_CONST_CACHE_GS_8: 1155 case SQ_ALU_CONST_CACHE_GS_9: 1156 case SQ_ALU_CONST_CACHE_GS_10: 1157 case SQ_ALU_CONST_CACHE_GS_11: 1158 case SQ_ALU_CONST_CACHE_GS_12: 1159 case SQ_ALU_CONST_CACHE_GS_13: 1160 case SQ_ALU_CONST_CACHE_GS_14: 1161 case SQ_ALU_CONST_CACHE_GS_15: 1162 case SQ_ALU_CONST_CACHE_PS_0: 1163 case SQ_ALU_CONST_CACHE_PS_1: 1164 case SQ_ALU_CONST_CACHE_PS_2: 1165 case SQ_ALU_CONST_CACHE_PS_3: 1166 case SQ_ALU_CONST_CACHE_PS_4: 1167 case SQ_ALU_CONST_CACHE_PS_5: 1168 case SQ_ALU_CONST_CACHE_PS_6: 1169 case SQ_ALU_CONST_CACHE_PS_7: 1170 case SQ_ALU_CONST_CACHE_PS_8: 1171 case SQ_ALU_CONST_CACHE_PS_9: 1172 case SQ_ALU_CONST_CACHE_PS_10: 1173 case SQ_ALU_CONST_CACHE_PS_11: 1174 case SQ_ALU_CONST_CACHE_PS_12: 1175 case SQ_ALU_CONST_CACHE_PS_13: 1176 case SQ_ALU_CONST_CACHE_PS_14: 1177 case SQ_ALU_CONST_CACHE_PS_15: 1178 case SQ_ALU_CONST_CACHE_VS_0: 1179 case SQ_ALU_CONST_CACHE_VS_1: 1180 case SQ_ALU_CONST_CACHE_VS_2: 1181 case SQ_ALU_CONST_CACHE_VS_3: 1182 case SQ_ALU_CONST_CACHE_VS_4: 1183 case SQ_ALU_CONST_CACHE_VS_5: 1184 case SQ_ALU_CONST_CACHE_VS_6: 1185 case SQ_ALU_CONST_CACHE_VS_7: 1186 case SQ_ALU_CONST_CACHE_VS_8: 1187 case SQ_ALU_CONST_CACHE_VS_9: 1188 case SQ_ALU_CONST_CACHE_VS_10: 1189 case SQ_ALU_CONST_CACHE_VS_11: 1190 case SQ_ALU_CONST_CACHE_VS_12: 1191 case SQ_ALU_CONST_CACHE_VS_13: 1192 case SQ_ALU_CONST_CACHE_VS_14: 1193 case SQ_ALU_CONST_CACHE_VS_15: 1194 r = r600_cs_packet_next_reloc(p, &reloc); 1195 if (r) { 1196 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1197 "0x%04X\n", reg); 1198 return -EINVAL; 1199 } 1200 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1201 break; 1202 case SX_MEMORY_EXPORT_BASE: 1203 r = r600_cs_packet_next_reloc(p, &reloc); 1204 if (r) { 1205 dev_warn(p->dev, "bad SET_CONFIG_REG " 1206 "0x%04X\n", reg); 1207 return -EINVAL; 1208 } 1209 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1210 break; 1211 default: 1212 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 1213 return -EINVAL; 1214 } 1215 return 0; 1216 } 1217 1218 static inline unsigned mip_minify(unsigned size, unsigned level) 1219 { 1220 unsigned val; 1221 1222 val = max(1U, size >> level); 1223 if (level > 0) 1224 val = roundup_pow_of_two(val); 1225 return val; 1226 } 1227 1228 static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel, 1229 unsigned w0, unsigned h0, unsigned d0, unsigned format, 1230 unsigned block_align, unsigned height_align, unsigned base_align, 1231 unsigned *l0_size, unsigned *mipmap_size) 1232 { 1233 unsigned offset, i, level; 1234 unsigned width, height, depth, size; 1235 unsigned blocksize; 1236 unsigned nbx, nby; 1237 unsigned nlevels = llevel - blevel + 1; 1238 1239 *l0_size = -1; 1240 blocksize = fmt_get_blocksize(format); 1241 1242 w0 = mip_minify(w0, 0); 1243 h0 = mip_minify(h0, 0); 1244 d0 = mip_minify(d0, 0); 1245 for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) { 1246 width = mip_minify(w0, i); 1247 nbx = fmt_get_nblocksx(format, width); 1248 1249 nbx = round_up(nbx, block_align); 1250 1251 height = mip_minify(h0, i); 1252 nby = fmt_get_nblocksy(format, height); 1253 nby = round_up(nby, height_align); 1254 1255 depth = mip_minify(d0, i); 1256 1257 size = nbx * nby * blocksize; 1258 if (nfaces) 1259 size *= nfaces; 1260 else 1261 size *= depth; 1262 1263 if (i == 0) 1264 *l0_size = size; 1265 1266 if (i == 0 || i == 1) 1267 offset = round_up(offset, base_align); 1268 1269 offset += size; 1270 } 1271 *mipmap_size = offset; 1272 if (llevel == 0) 1273 *mipmap_size = *l0_size; 1274 if (!blevel) 1275 *mipmap_size -= *l0_size; 1276 } 1277 1278 /** 1279 * r600_check_texture_resource() - check if register is authorized or not 1280 * @p: parser structure holding parsing context 1281 * @idx: index into the cs buffer 1282 * @texture: texture's bo structure 1283 * @mipmap: mipmap's bo structure 1284 * 1285 * This function will check that the resource has valid field and that 1286 * the texture and mipmap bo object are big enough to cover this resource. 1287 */ 1288 static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, 1289 struct radeon_bo *texture, 1290 struct radeon_bo *mipmap, 1291 u64 base_offset, 1292 u64 mip_offset, 1293 u32 tiling_flags) 1294 { 1295 struct r600_cs_track *track = p->track; 1296 u32 nfaces, llevel, blevel, w0, h0, d0; 1297 u32 word0, word1, l0_size, mipmap_size, word2, word3; 1298 u32 height_align, pitch, pitch_align, depth_align; 1299 u32 array, barray, larray; 1300 u64 base_align; 1301 struct array_mode_checker array_check; 1302 u32 format; 1303 1304 /* on legacy kernel we don't perform advanced check */ 1305 if (p->rdev == NULL) 1306 return 0; 1307 1308 /* convert to bytes */ 1309 base_offset <<= 8; 1310 mip_offset <<= 8; 1311 1312 word0 = radeon_get_ib_value(p, idx + 0); 1313 if (tiling_flags & RADEON_TILING_MACRO) 1314 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); 1315 else if (tiling_flags & RADEON_TILING_MICRO) 1316 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); 1317 word1 = radeon_get_ib_value(p, idx + 1); 1318 w0 = G_038000_TEX_WIDTH(word0) + 1; 1319 h0 = G_038004_TEX_HEIGHT(word1) + 1; 1320 d0 = G_038004_TEX_DEPTH(word1); 1321 nfaces = 1; 1322 switch (G_038000_DIM(word0)) { 1323 case V_038000_SQ_TEX_DIM_1D: 1324 case V_038000_SQ_TEX_DIM_2D: 1325 case V_038000_SQ_TEX_DIM_3D: 1326 break; 1327 case V_038000_SQ_TEX_DIM_CUBEMAP: 1328 if (p->family >= CHIP_RV770) 1329 nfaces = 8; 1330 else 1331 nfaces = 6; 1332 break; 1333 case V_038000_SQ_TEX_DIM_1D_ARRAY: 1334 case V_038000_SQ_TEX_DIM_2D_ARRAY: 1335 array = 1; 1336 break; 1337 case V_038000_SQ_TEX_DIM_2D_MSAA: 1338 case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA: 1339 default: 1340 dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0)); 1341 return -EINVAL; 1342 } 1343 format = G_038004_DATA_FORMAT(word1); 1344 if (!fmt_is_valid_texture(format, p->family)) { 1345 dev_warn(p->dev, "%s:%d texture invalid format %d\n", 1346 __func__, __LINE__, format); 1347 return -EINVAL; 1348 } 1349 1350 /* pitch in texels */ 1351 pitch = (G_038000_PITCH(word0) + 1) * 8; 1352 array_check.array_mode = G_038000_TILE_MODE(word0); 1353 array_check.group_size = track->group_size; 1354 array_check.nbanks = track->nbanks; 1355 array_check.npipes = track->npipes; 1356 array_check.nsamples = 1; 1357 array_check.blocksize = fmt_get_blocksize(format); 1358 if (r600_get_array_mode_alignment(&array_check, 1359 &pitch_align, &height_align, &depth_align, &base_align)) { 1360 dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n", 1361 __func__, __LINE__, G_038000_TILE_MODE(word0)); 1362 return -EINVAL; 1363 } 1364 1365 /* XXX check height as well... */ 1366 1367 if (!IS_ALIGNED(pitch, pitch_align)) { 1368 dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n", 1369 __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0)); 1370 return -EINVAL; 1371 } 1372 if (!IS_ALIGNED(base_offset, base_align)) { 1373 dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n", 1374 __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0)); 1375 return -EINVAL; 1376 } 1377 if (!IS_ALIGNED(mip_offset, base_align)) { 1378 dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n", 1379 __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0)); 1380 return -EINVAL; 1381 } 1382 1383 word2 = radeon_get_ib_value(p, idx + 2) << 8; 1384 word3 = radeon_get_ib_value(p, idx + 3) << 8; 1385 1386 word0 = radeon_get_ib_value(p, idx + 4); 1387 word1 = radeon_get_ib_value(p, idx + 5); 1388 blevel = G_038010_BASE_LEVEL(word0); 1389 llevel = G_038014_LAST_LEVEL(word1); 1390 if (array == 1) { 1391 barray = G_038014_BASE_ARRAY(word1); 1392 larray = G_038014_LAST_ARRAY(word1); 1393 1394 nfaces = larray - barray + 1; 1395 } 1396 r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, format, 1397 pitch_align, height_align, base_align, 1398 &l0_size, &mipmap_size); 1399 /* using get ib will give us the offset into the texture bo */ 1400 if ((l0_size + word2) > radeon_bo_size(texture)) { 1401 dev_warn(p->dev, "texture bo too small (%d %d %d %d -> %d have %ld)\n", 1402 w0, h0, format, word2, l0_size, radeon_bo_size(texture)); 1403 dev_warn(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align); 1404 return -EINVAL; 1405 } 1406 /* using get ib will give us the offset into the mipmap bo */ 1407 word3 = radeon_get_ib_value(p, idx + 3) << 8; 1408 if ((mipmap_size + word3) > radeon_bo_size(mipmap)) { 1409 /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n", 1410 w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/ 1411 } 1412 return 0; 1413 } 1414 1415 static int r600_packet3_check(struct radeon_cs_parser *p, 1416 struct radeon_cs_packet *pkt) 1417 { 1418 struct radeon_cs_reloc *reloc; 1419 struct r600_cs_track *track; 1420 volatile u32 *ib; 1421 unsigned idx; 1422 unsigned i; 1423 unsigned start_reg, end_reg, reg; 1424 int r; 1425 u32 idx_value; 1426 1427 track = (struct r600_cs_track *)p->track; 1428 ib = p->ib->ptr; 1429 idx = pkt->idx + 1; 1430 idx_value = radeon_get_ib_value(p, idx); 1431 1432 switch (pkt->opcode) { 1433 case PACKET3_SET_PREDICATION: 1434 { 1435 int pred_op; 1436 int tmp; 1437 if (pkt->count != 1) { 1438 DRM_ERROR("bad SET PREDICATION\n"); 1439 return -EINVAL; 1440 } 1441 1442 tmp = radeon_get_ib_value(p, idx + 1); 1443 pred_op = (tmp >> 16) & 0x7; 1444 1445 /* for the clear predicate operation */ 1446 if (pred_op == 0) 1447 return 0; 1448 1449 if (pred_op > 2) { 1450 DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op); 1451 return -EINVAL; 1452 } 1453 1454 r = r600_cs_packet_next_reloc(p, &reloc); 1455 if (r) { 1456 DRM_ERROR("bad SET PREDICATION\n"); 1457 return -EINVAL; 1458 } 1459 1460 ib[idx + 0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); 1461 ib[idx + 1] = tmp + (upper_32_bits(reloc->lobj.gpu_offset) & 0xff); 1462 } 1463 break; 1464 1465 case PACKET3_START_3D_CMDBUF: 1466 if (p->family >= CHIP_RV770 || pkt->count) { 1467 DRM_ERROR("bad START_3D\n"); 1468 return -EINVAL; 1469 } 1470 break; 1471 case PACKET3_CONTEXT_CONTROL: 1472 if (pkt->count != 1) { 1473 DRM_ERROR("bad CONTEXT_CONTROL\n"); 1474 return -EINVAL; 1475 } 1476 break; 1477 case PACKET3_INDEX_TYPE: 1478 case PACKET3_NUM_INSTANCES: 1479 if (pkt->count) { 1480 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n"); 1481 return -EINVAL; 1482 } 1483 break; 1484 case PACKET3_DRAW_INDEX: 1485 if (pkt->count != 3) { 1486 DRM_ERROR("bad DRAW_INDEX\n"); 1487 return -EINVAL; 1488 } 1489 r = r600_cs_packet_next_reloc(p, &reloc); 1490 if (r) { 1491 DRM_ERROR("bad DRAW_INDEX\n"); 1492 return -EINVAL; 1493 } 1494 ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); 1495 ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; 1496 r = r600_cs_track_check(p); 1497 if (r) { 1498 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); 1499 return r; 1500 } 1501 break; 1502 case PACKET3_DRAW_INDEX_AUTO: 1503 if (pkt->count != 1) { 1504 DRM_ERROR("bad DRAW_INDEX_AUTO\n"); 1505 return -EINVAL; 1506 } 1507 r = r600_cs_track_check(p); 1508 if (r) { 1509 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); 1510 return r; 1511 } 1512 break; 1513 case PACKET3_DRAW_INDEX_IMMD_BE: 1514 case PACKET3_DRAW_INDEX_IMMD: 1515 if (pkt->count < 2) { 1516 DRM_ERROR("bad DRAW_INDEX_IMMD\n"); 1517 return -EINVAL; 1518 } 1519 r = r600_cs_track_check(p); 1520 if (r) { 1521 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); 1522 return r; 1523 } 1524 break; 1525 case PACKET3_WAIT_REG_MEM: 1526 if (pkt->count != 5) { 1527 DRM_ERROR("bad WAIT_REG_MEM\n"); 1528 return -EINVAL; 1529 } 1530 /* bit 4 is reg (0) or mem (1) */ 1531 if (idx_value & 0x10) { 1532 r = r600_cs_packet_next_reloc(p, &reloc); 1533 if (r) { 1534 DRM_ERROR("bad WAIT_REG_MEM\n"); 1535 return -EINVAL; 1536 } 1537 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); 1538 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; 1539 } 1540 break; 1541 case PACKET3_SURFACE_SYNC: 1542 if (pkt->count != 3) { 1543 DRM_ERROR("bad SURFACE_SYNC\n"); 1544 return -EINVAL; 1545 } 1546 /* 0xffffffff/0x0 is flush all cache flag */ 1547 if (radeon_get_ib_value(p, idx + 1) != 0xffffffff || 1548 radeon_get_ib_value(p, idx + 2) != 0) { 1549 r = r600_cs_packet_next_reloc(p, &reloc); 1550 if (r) { 1551 DRM_ERROR("bad SURFACE_SYNC\n"); 1552 return -EINVAL; 1553 } 1554 ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1555 } 1556 break; 1557 case PACKET3_EVENT_WRITE: 1558 if (pkt->count != 2 && pkt->count != 0) { 1559 DRM_ERROR("bad EVENT_WRITE\n"); 1560 return -EINVAL; 1561 } 1562 if (pkt->count) { 1563 r = r600_cs_packet_next_reloc(p, &reloc); 1564 if (r) { 1565 DRM_ERROR("bad EVENT_WRITE\n"); 1566 return -EINVAL; 1567 } 1568 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); 1569 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; 1570 } 1571 break; 1572 case PACKET3_EVENT_WRITE_EOP: 1573 if (pkt->count != 4) { 1574 DRM_ERROR("bad EVENT_WRITE_EOP\n"); 1575 return -EINVAL; 1576 } 1577 r = r600_cs_packet_next_reloc(p, &reloc); 1578 if (r) { 1579 DRM_ERROR("bad EVENT_WRITE\n"); 1580 return -EINVAL; 1581 } 1582 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); 1583 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; 1584 break; 1585 case PACKET3_SET_CONFIG_REG: 1586 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET; 1587 end_reg = 4 * pkt->count + start_reg - 4; 1588 if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) || 1589 (start_reg >= PACKET3_SET_CONFIG_REG_END) || 1590 (end_reg >= PACKET3_SET_CONFIG_REG_END)) { 1591 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n"); 1592 return -EINVAL; 1593 } 1594 for (i = 0; i < pkt->count; i++) { 1595 reg = start_reg + (4 * i); 1596 r = r600_cs_check_reg(p, reg, idx+1+i); 1597 if (r) 1598 return r; 1599 } 1600 break; 1601 case PACKET3_SET_CONTEXT_REG: 1602 start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET; 1603 end_reg = 4 * pkt->count + start_reg - 4; 1604 if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) || 1605 (start_reg >= PACKET3_SET_CONTEXT_REG_END) || 1606 (end_reg >= PACKET3_SET_CONTEXT_REG_END)) { 1607 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n"); 1608 return -EINVAL; 1609 } 1610 for (i = 0; i < pkt->count; i++) { 1611 reg = start_reg + (4 * i); 1612 r = r600_cs_check_reg(p, reg, idx+1+i); 1613 if (r) 1614 return r; 1615 } 1616 break; 1617 case PACKET3_SET_RESOURCE: 1618 if (pkt->count % 7) { 1619 DRM_ERROR("bad SET_RESOURCE\n"); 1620 return -EINVAL; 1621 } 1622 start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET; 1623 end_reg = 4 * pkt->count + start_reg - 4; 1624 if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) || 1625 (start_reg >= PACKET3_SET_RESOURCE_END) || 1626 (end_reg >= PACKET3_SET_RESOURCE_END)) { 1627 DRM_ERROR("bad SET_RESOURCE\n"); 1628 return -EINVAL; 1629 } 1630 for (i = 0; i < (pkt->count / 7); i++) { 1631 struct radeon_bo *texture, *mipmap; 1632 u32 size, offset, base_offset, mip_offset; 1633 1634 switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) { 1635 case SQ_TEX_VTX_VALID_TEXTURE: 1636 /* tex base */ 1637 r = r600_cs_packet_next_reloc(p, &reloc); 1638 if (r) { 1639 DRM_ERROR("bad SET_RESOURCE\n"); 1640 return -EINVAL; 1641 } 1642 base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1643 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1644 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); 1645 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1646 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); 1647 texture = reloc->robj; 1648 /* tex mip base */ 1649 r = r600_cs_packet_next_reloc(p, &reloc); 1650 if (r) { 1651 DRM_ERROR("bad SET_RESOURCE\n"); 1652 return -EINVAL; 1653 } 1654 mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1655 mipmap = reloc->robj; 1656 r = r600_check_texture_resource(p, idx+(i*7)+1, 1657 texture, mipmap, 1658 base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2), 1659 mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3), 1660 reloc->lobj.tiling_flags); 1661 if (r) 1662 return r; 1663 ib[idx+1+(i*7)+2] += base_offset; 1664 ib[idx+1+(i*7)+3] += mip_offset; 1665 break; 1666 case SQ_TEX_VTX_VALID_BUFFER: 1667 /* vtx base */ 1668 r = r600_cs_packet_next_reloc(p, &reloc); 1669 if (r) { 1670 DRM_ERROR("bad SET_RESOURCE\n"); 1671 return -EINVAL; 1672 } 1673 offset = radeon_get_ib_value(p, idx+1+(i*7)+0); 1674 size = radeon_get_ib_value(p, idx+1+(i*7)+1) + 1; 1675 if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) { 1676 /* force size to size of the buffer */ 1677 dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n", 1678 size + offset, radeon_bo_size(reloc->robj)); 1679 ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj); 1680 } 1681 ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff); 1682 ib[idx+1+(i*7)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; 1683 break; 1684 case SQ_TEX_VTX_INVALID_TEXTURE: 1685 case SQ_TEX_VTX_INVALID_BUFFER: 1686 default: 1687 DRM_ERROR("bad SET_RESOURCE\n"); 1688 return -EINVAL; 1689 } 1690 } 1691 break; 1692 case PACKET3_SET_ALU_CONST: 1693 if (track->sq_config & DX9_CONSTS) { 1694 start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET; 1695 end_reg = 4 * pkt->count + start_reg - 4; 1696 if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) || 1697 (start_reg >= PACKET3_SET_ALU_CONST_END) || 1698 (end_reg >= PACKET3_SET_ALU_CONST_END)) { 1699 DRM_ERROR("bad SET_ALU_CONST\n"); 1700 return -EINVAL; 1701 } 1702 } 1703 break; 1704 case PACKET3_SET_BOOL_CONST: 1705 start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET; 1706 end_reg = 4 * pkt->count + start_reg - 4; 1707 if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) || 1708 (start_reg >= PACKET3_SET_BOOL_CONST_END) || 1709 (end_reg >= PACKET3_SET_BOOL_CONST_END)) { 1710 DRM_ERROR("bad SET_BOOL_CONST\n"); 1711 return -EINVAL; 1712 } 1713 break; 1714 case PACKET3_SET_LOOP_CONST: 1715 start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET; 1716 end_reg = 4 * pkt->count + start_reg - 4; 1717 if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) || 1718 (start_reg >= PACKET3_SET_LOOP_CONST_END) || 1719 (end_reg >= PACKET3_SET_LOOP_CONST_END)) { 1720 DRM_ERROR("bad SET_LOOP_CONST\n"); 1721 return -EINVAL; 1722 } 1723 break; 1724 case PACKET3_SET_CTL_CONST: 1725 start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET; 1726 end_reg = 4 * pkt->count + start_reg - 4; 1727 if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) || 1728 (start_reg >= PACKET3_SET_CTL_CONST_END) || 1729 (end_reg >= PACKET3_SET_CTL_CONST_END)) { 1730 DRM_ERROR("bad SET_CTL_CONST\n"); 1731 return -EINVAL; 1732 } 1733 break; 1734 case PACKET3_SET_SAMPLER: 1735 if (pkt->count % 3) { 1736 DRM_ERROR("bad SET_SAMPLER\n"); 1737 return -EINVAL; 1738 } 1739 start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET; 1740 end_reg = 4 * pkt->count + start_reg - 4; 1741 if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) || 1742 (start_reg >= PACKET3_SET_SAMPLER_END) || 1743 (end_reg >= PACKET3_SET_SAMPLER_END)) { 1744 DRM_ERROR("bad SET_SAMPLER\n"); 1745 return -EINVAL; 1746 } 1747 break; 1748 case PACKET3_SURFACE_BASE_UPDATE: 1749 if (p->family >= CHIP_RV770 || p->family == CHIP_R600) { 1750 DRM_ERROR("bad SURFACE_BASE_UPDATE\n"); 1751 return -EINVAL; 1752 } 1753 if (pkt->count) { 1754 DRM_ERROR("bad SURFACE_BASE_UPDATE\n"); 1755 return -EINVAL; 1756 } 1757 break; 1758 case PACKET3_NOP: 1759 break; 1760 default: 1761 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); 1762 return -EINVAL; 1763 } 1764 return 0; 1765 } 1766 1767 int r600_cs_parse(struct radeon_cs_parser *p) 1768 { 1769 struct radeon_cs_packet pkt; 1770 struct r600_cs_track *track; 1771 int r; 1772 1773 if (p->track == NULL) { 1774 /* initialize tracker, we are in kms */ 1775 track = kzalloc(sizeof(*track), GFP_KERNEL); 1776 if (track == NULL) 1777 return -ENOMEM; 1778 r600_cs_track_init(track); 1779 if (p->rdev->family < CHIP_RV770) { 1780 track->npipes = p->rdev->config.r600.tiling_npipes; 1781 track->nbanks = p->rdev->config.r600.tiling_nbanks; 1782 track->group_size = p->rdev->config.r600.tiling_group_size; 1783 } else if (p->rdev->family <= CHIP_RV740) { 1784 track->npipes = p->rdev->config.rv770.tiling_npipes; 1785 track->nbanks = p->rdev->config.rv770.tiling_nbanks; 1786 track->group_size = p->rdev->config.rv770.tiling_group_size; 1787 } 1788 p->track = track; 1789 } 1790 do { 1791 r = r600_cs_packet_parse(p, &pkt, p->idx); 1792 if (r) { 1793 kfree(p->track); 1794 p->track = NULL; 1795 return r; 1796 } 1797 p->idx += pkt.count + 2; 1798 switch (pkt.type) { 1799 case PACKET_TYPE0: 1800 r = r600_cs_parse_packet0(p, &pkt); 1801 break; 1802 case PACKET_TYPE2: 1803 break; 1804 case PACKET_TYPE3: 1805 r = r600_packet3_check(p, &pkt); 1806 break; 1807 default: 1808 DRM_ERROR("Unknown packet type %d !\n", pkt.type); 1809 kfree(p->track); 1810 p->track = NULL; 1811 return -EINVAL; 1812 } 1813 if (r) { 1814 kfree(p->track); 1815 p->track = NULL; 1816 return r; 1817 } 1818 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 1819 #if 0 1820 for (r = 0; r < p->ib->length_dw; r++) { 1821 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]); 1822 mdelay(1); 1823 } 1824 #endif 1825 kfree(p->track); 1826 p->track = NULL; 1827 return 0; 1828 } 1829 1830 static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p) 1831 { 1832 if (p->chunk_relocs_idx == -1) { 1833 return 0; 1834 } 1835 p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL); 1836 if (p->relocs == NULL) { 1837 return -ENOMEM; 1838 } 1839 return 0; 1840 } 1841 1842 /** 1843 * cs_parser_fini() - clean parser states 1844 * @parser: parser structure holding parsing context. 1845 * @error: error number 1846 * 1847 * If error is set than unvalidate buffer, otherwise just free memory 1848 * used by parsing context. 1849 **/ 1850 static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error) 1851 { 1852 unsigned i; 1853 1854 kfree(parser->relocs); 1855 for (i = 0; i < parser->nchunks; i++) { 1856 kfree(parser->chunks[i].kdata); 1857 kfree(parser->chunks[i].kpage[0]); 1858 kfree(parser->chunks[i].kpage[1]); 1859 } 1860 kfree(parser->chunks); 1861 kfree(parser->chunks_array); 1862 } 1863 1864 int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp, 1865 unsigned family, u32 *ib, int *l) 1866 { 1867 struct radeon_cs_parser parser; 1868 struct radeon_cs_chunk *ib_chunk; 1869 struct radeon_ib fake_ib; 1870 struct r600_cs_track *track; 1871 int r; 1872 1873 /* initialize tracker */ 1874 track = kzalloc(sizeof(*track), GFP_KERNEL); 1875 if (track == NULL) 1876 return -ENOMEM; 1877 r600_cs_track_init(track); 1878 r600_cs_legacy_get_tiling_conf(dev, &track->npipes, &track->nbanks, &track->group_size); 1879 /* initialize parser */ 1880 memset(&parser, 0, sizeof(struct radeon_cs_parser)); 1881 parser.filp = filp; 1882 parser.dev = &dev->pdev->dev; 1883 parser.rdev = NULL; 1884 parser.family = family; 1885 parser.ib = &fake_ib; 1886 parser.track = track; 1887 fake_ib.ptr = ib; 1888 r = radeon_cs_parser_init(&parser, data); 1889 if (r) { 1890 DRM_ERROR("Failed to initialize parser !\n"); 1891 r600_cs_parser_fini(&parser, r); 1892 return r; 1893 } 1894 r = r600_cs_parser_relocs_legacy(&parser); 1895 if (r) { 1896 DRM_ERROR("Failed to parse relocation !\n"); 1897 r600_cs_parser_fini(&parser, r); 1898 return r; 1899 } 1900 /* Copy the packet into the IB, the parser will read from the 1901 * input memory (cached) and write to the IB (which can be 1902 * uncached). */ 1903 ib_chunk = &parser.chunks[parser.chunk_ib_idx]; 1904 parser.ib->length_dw = ib_chunk->length_dw; 1905 *l = parser.ib->length_dw; 1906 r = r600_cs_parse(&parser); 1907 if (r) { 1908 DRM_ERROR("Invalid command stream !\n"); 1909 r600_cs_parser_fini(&parser, r); 1910 return r; 1911 } 1912 r = radeon_cs_finish_pages(&parser); 1913 if (r) { 1914 DRM_ERROR("Invalid command stream !\n"); 1915 r600_cs_parser_fini(&parser, r); 1916 return r; 1917 } 1918 r600_cs_parser_fini(&parser, r); 1919 return r; 1920 } 1921 1922 void r600_cs_legacy_init(void) 1923 { 1924 r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm; 1925 } 1926