1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Author: Stanislaw Skowronek 23 */ 24 25 #include <linux/module.h> 26 #include <linux/sched.h> 27 #include <linux/slab.h> 28 #include <asm/unaligned.h> 29 30 #include <drm/drm_util.h> 31 32 #define ATOM_DEBUG 33 34 #include "atomfirmware.h" 35 #include "atom.h" 36 #include "atom-names.h" 37 #include "atom-bits.h" 38 #include "amdgpu.h" 39 40 #define ATOM_COND_ABOVE 0 41 #define ATOM_COND_ABOVEOREQUAL 1 42 #define ATOM_COND_ALWAYS 2 43 #define ATOM_COND_BELOW 3 44 #define ATOM_COND_BELOWOREQUAL 4 45 #define ATOM_COND_EQUAL 5 46 #define ATOM_COND_NOTEQUAL 6 47 48 #define ATOM_PORT_ATI 0 49 #define ATOM_PORT_PCI 1 50 #define ATOM_PORT_SYSIO 2 51 52 #define ATOM_UNIT_MICROSEC 0 53 #define ATOM_UNIT_MILLISEC 1 54 55 #define PLL_INDEX 2 56 #define PLL_DATA 3 57 58 #define ATOM_CMD_TIMEOUT_SEC 20 59 60 typedef struct { 61 struct atom_context *ctx; 62 uint32_t *ps, *ws; 63 int ps_shift; 64 uint16_t start; 65 unsigned last_jump; 66 unsigned long last_jump_jiffies; 67 bool abort; 68 } atom_exec_context; 69 70 int amdgpu_atom_debug; 71 static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params); 72 int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params); 73 74 static uint32_t atom_arg_mask[8] = 75 { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000, 76 0xFF000000 }; 77 static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 }; 78 79 static int atom_dst_to_src[8][4] = { 80 /* translate destination alignment field to the source alignment encoding */ 81 {0, 0, 0, 0}, 82 {1, 2, 3, 0}, 83 {1, 2, 3, 0}, 84 {1, 2, 3, 0}, 85 {4, 5, 6, 7}, 86 {4, 5, 6, 7}, 87 {4, 5, 6, 7}, 88 {4, 5, 6, 7}, 89 }; 90 static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 }; 91 92 static int debug_depth; 93 #ifdef ATOM_DEBUG 94 static void debug_print_spaces(int n) 95 { 96 while (n--) 97 printk(" "); 98 } 99 100 #define DEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0) 101 #define SDEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0) 102 #else 103 #define DEBUG(...) do { } while (0) 104 #define SDEBUG(...) do { } while (0) 105 #endif 106 107 static uint32_t atom_iio_execute(struct atom_context *ctx, int base, 108 uint32_t index, uint32_t data) 109 { 110 uint32_t temp = 0xCDCDCDCD; 111 112 while (1) 113 switch (CU8(base)) { 114 case ATOM_IIO_NOP: 115 base++; 116 break; 117 case ATOM_IIO_READ: 118 temp = ctx->card->reg_read(ctx->card, CU16(base + 1)); 119 base += 3; 120 break; 121 case ATOM_IIO_WRITE: 122 ctx->card->reg_write(ctx->card, CU16(base + 1), temp); 123 base += 3; 124 break; 125 case ATOM_IIO_CLEAR: 126 temp &= 127 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << 128 CU8(base + 2)); 129 base += 3; 130 break; 131 case ATOM_IIO_SET: 132 temp |= 133 (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base + 134 2); 135 base += 3; 136 break; 137 case ATOM_IIO_MOVE_INDEX: 138 temp &= 139 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << 140 CU8(base + 3)); 141 temp |= 142 ((index >> CU8(base + 2)) & 143 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + 144 3); 145 base += 4; 146 break; 147 case ATOM_IIO_MOVE_DATA: 148 temp &= 149 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << 150 CU8(base + 3)); 151 temp |= 152 ((data >> CU8(base + 2)) & 153 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + 154 3); 155 base += 4; 156 break; 157 case ATOM_IIO_MOVE_ATTR: 158 temp &= 159 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << 160 CU8(base + 3)); 161 temp |= 162 ((ctx-> 163 io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 - 164 CU8 165 (base 166 + 167 1)))) 168 << CU8(base + 3); 169 base += 4; 170 break; 171 case ATOM_IIO_END: 172 return temp; 173 default: 174 pr_info("Unknown IIO opcode\n"); 175 return 0; 176 } 177 } 178 179 static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr, 180 int *ptr, uint32_t *saved, int print) 181 { 182 uint32_t idx, val = 0xCDCDCDCD, align, arg; 183 struct atom_context *gctx = ctx->ctx; 184 arg = attr & 7; 185 align = (attr >> 3) & 7; 186 switch (arg) { 187 case ATOM_ARG_REG: 188 idx = U16(*ptr); 189 (*ptr) += 2; 190 if (print) 191 DEBUG("REG[0x%04X]", idx); 192 idx += gctx->reg_block; 193 switch (gctx->io_mode) { 194 case ATOM_IO_MM: 195 val = gctx->card->reg_read(gctx->card, idx); 196 break; 197 case ATOM_IO_PCI: 198 pr_info("PCI registers are not implemented\n"); 199 return 0; 200 case ATOM_IO_SYSIO: 201 pr_info("SYSIO registers are not implemented\n"); 202 return 0; 203 default: 204 if (!(gctx->io_mode & 0x80)) { 205 pr_info("Bad IO mode\n"); 206 return 0; 207 } 208 if (!gctx->iio[gctx->io_mode & 0x7F]) { 209 pr_info("Undefined indirect IO read method %d\n", 210 gctx->io_mode & 0x7F); 211 return 0; 212 } 213 val = 214 atom_iio_execute(gctx, 215 gctx->iio[gctx->io_mode & 0x7F], 216 idx, 0); 217 } 218 break; 219 case ATOM_ARG_PS: 220 idx = U8(*ptr); 221 (*ptr)++; 222 /* get_unaligned_le32 avoids unaligned accesses from atombios 223 * tables, noticed on a DEC Alpha. */ 224 val = get_unaligned_le32((u32 *)&ctx->ps[idx]); 225 if (print) 226 DEBUG("PS[0x%02X,0x%04X]", idx, val); 227 break; 228 case ATOM_ARG_WS: 229 idx = U8(*ptr); 230 (*ptr)++; 231 if (print) 232 DEBUG("WS[0x%02X]", idx); 233 switch (idx) { 234 case ATOM_WS_QUOTIENT: 235 val = gctx->divmul[0]; 236 break; 237 case ATOM_WS_REMAINDER: 238 val = gctx->divmul[1]; 239 break; 240 case ATOM_WS_DATAPTR: 241 val = gctx->data_block; 242 break; 243 case ATOM_WS_SHIFT: 244 val = gctx->shift; 245 break; 246 case ATOM_WS_OR_MASK: 247 val = 1 << gctx->shift; 248 break; 249 case ATOM_WS_AND_MASK: 250 val = ~(1 << gctx->shift); 251 break; 252 case ATOM_WS_FB_WINDOW: 253 val = gctx->fb_base; 254 break; 255 case ATOM_WS_ATTRIBUTES: 256 val = gctx->io_attr; 257 break; 258 case ATOM_WS_REGPTR: 259 val = gctx->reg_block; 260 break; 261 default: 262 val = ctx->ws[idx]; 263 } 264 break; 265 case ATOM_ARG_ID: 266 idx = U16(*ptr); 267 (*ptr) += 2; 268 if (print) { 269 if (gctx->data_block) 270 DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block); 271 else 272 DEBUG("ID[0x%04X]", idx); 273 } 274 val = U32(idx + gctx->data_block); 275 break; 276 case ATOM_ARG_FB: 277 idx = U8(*ptr); 278 (*ptr)++; 279 if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) { 280 DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n", 281 gctx->fb_base + (idx * 4), gctx->scratch_size_bytes); 282 val = 0; 283 } else 284 val = gctx->scratch[(gctx->fb_base / 4) + idx]; 285 if (print) 286 DEBUG("FB[0x%02X]", idx); 287 break; 288 case ATOM_ARG_IMM: 289 switch (align) { 290 case ATOM_SRC_DWORD: 291 val = U32(*ptr); 292 (*ptr) += 4; 293 if (print) 294 DEBUG("IMM 0x%08X\n", val); 295 return val; 296 case ATOM_SRC_WORD0: 297 case ATOM_SRC_WORD8: 298 case ATOM_SRC_WORD16: 299 val = U16(*ptr); 300 (*ptr) += 2; 301 if (print) 302 DEBUG("IMM 0x%04X\n", val); 303 return val; 304 case ATOM_SRC_BYTE0: 305 case ATOM_SRC_BYTE8: 306 case ATOM_SRC_BYTE16: 307 case ATOM_SRC_BYTE24: 308 val = U8(*ptr); 309 (*ptr)++; 310 if (print) 311 DEBUG("IMM 0x%02X\n", val); 312 return val; 313 } 314 return 0; 315 case ATOM_ARG_PLL: 316 idx = U8(*ptr); 317 (*ptr)++; 318 if (print) 319 DEBUG("PLL[0x%02X]", idx); 320 val = gctx->card->pll_read(gctx->card, idx); 321 break; 322 case ATOM_ARG_MC: 323 idx = U8(*ptr); 324 (*ptr)++; 325 if (print) 326 DEBUG("MC[0x%02X]", idx); 327 val = gctx->card->mc_read(gctx->card, idx); 328 break; 329 } 330 if (saved) 331 *saved = val; 332 val &= atom_arg_mask[align]; 333 val >>= atom_arg_shift[align]; 334 if (print) 335 switch (align) { 336 case ATOM_SRC_DWORD: 337 DEBUG(".[31:0] -> 0x%08X\n", val); 338 break; 339 case ATOM_SRC_WORD0: 340 DEBUG(".[15:0] -> 0x%04X\n", val); 341 break; 342 case ATOM_SRC_WORD8: 343 DEBUG(".[23:8] -> 0x%04X\n", val); 344 break; 345 case ATOM_SRC_WORD16: 346 DEBUG(".[31:16] -> 0x%04X\n", val); 347 break; 348 case ATOM_SRC_BYTE0: 349 DEBUG(".[7:0] -> 0x%02X\n", val); 350 break; 351 case ATOM_SRC_BYTE8: 352 DEBUG(".[15:8] -> 0x%02X\n", val); 353 break; 354 case ATOM_SRC_BYTE16: 355 DEBUG(".[23:16] -> 0x%02X\n", val); 356 break; 357 case ATOM_SRC_BYTE24: 358 DEBUG(".[31:24] -> 0x%02X\n", val); 359 break; 360 } 361 return val; 362 } 363 364 static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr) 365 { 366 uint32_t align = (attr >> 3) & 7, arg = attr & 7; 367 switch (arg) { 368 case ATOM_ARG_REG: 369 case ATOM_ARG_ID: 370 (*ptr) += 2; 371 break; 372 case ATOM_ARG_PLL: 373 case ATOM_ARG_MC: 374 case ATOM_ARG_PS: 375 case ATOM_ARG_WS: 376 case ATOM_ARG_FB: 377 (*ptr)++; 378 break; 379 case ATOM_ARG_IMM: 380 switch (align) { 381 case ATOM_SRC_DWORD: 382 (*ptr) += 4; 383 return; 384 case ATOM_SRC_WORD0: 385 case ATOM_SRC_WORD8: 386 case ATOM_SRC_WORD16: 387 (*ptr) += 2; 388 return; 389 case ATOM_SRC_BYTE0: 390 case ATOM_SRC_BYTE8: 391 case ATOM_SRC_BYTE16: 392 case ATOM_SRC_BYTE24: 393 (*ptr)++; 394 return; 395 } 396 return; 397 } 398 } 399 400 static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr) 401 { 402 return atom_get_src_int(ctx, attr, ptr, NULL, 1); 403 } 404 405 static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr) 406 { 407 uint32_t val = 0xCDCDCDCD; 408 409 switch (align) { 410 case ATOM_SRC_DWORD: 411 val = U32(*ptr); 412 (*ptr) += 4; 413 break; 414 case ATOM_SRC_WORD0: 415 case ATOM_SRC_WORD8: 416 case ATOM_SRC_WORD16: 417 val = U16(*ptr); 418 (*ptr) += 2; 419 break; 420 case ATOM_SRC_BYTE0: 421 case ATOM_SRC_BYTE8: 422 case ATOM_SRC_BYTE16: 423 case ATOM_SRC_BYTE24: 424 val = U8(*ptr); 425 (*ptr)++; 426 break; 427 } 428 return val; 429 } 430 431 static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr, 432 int *ptr, uint32_t *saved, int print) 433 { 434 return atom_get_src_int(ctx, 435 arg | atom_dst_to_src[(attr >> 3) & 436 7][(attr >> 6) & 3] << 3, 437 ptr, saved, print); 438 } 439 440 static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr) 441 { 442 atom_skip_src_int(ctx, 443 arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 444 3] << 3, ptr); 445 } 446 447 static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr, 448 int *ptr, uint32_t val, uint32_t saved) 449 { 450 uint32_t align = 451 atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val = 452 val, idx; 453 struct atom_context *gctx = ctx->ctx; 454 old_val &= atom_arg_mask[align] >> atom_arg_shift[align]; 455 val <<= atom_arg_shift[align]; 456 val &= atom_arg_mask[align]; 457 saved &= ~atom_arg_mask[align]; 458 val |= saved; 459 switch (arg) { 460 case ATOM_ARG_REG: 461 idx = U16(*ptr); 462 (*ptr) += 2; 463 DEBUG("REG[0x%04X]", idx); 464 idx += gctx->reg_block; 465 switch (gctx->io_mode) { 466 case ATOM_IO_MM: 467 if (idx == 0) 468 gctx->card->reg_write(gctx->card, idx, 469 val << 2); 470 else 471 gctx->card->reg_write(gctx->card, idx, val); 472 break; 473 case ATOM_IO_PCI: 474 pr_info("PCI registers are not implemented\n"); 475 return; 476 case ATOM_IO_SYSIO: 477 pr_info("SYSIO registers are not implemented\n"); 478 return; 479 default: 480 if (!(gctx->io_mode & 0x80)) { 481 pr_info("Bad IO mode\n"); 482 return; 483 } 484 if (!gctx->iio[gctx->io_mode & 0xFF]) { 485 pr_info("Undefined indirect IO write method %d\n", 486 gctx->io_mode & 0x7F); 487 return; 488 } 489 atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF], 490 idx, val); 491 } 492 break; 493 case ATOM_ARG_PS: 494 idx = U8(*ptr); 495 (*ptr)++; 496 DEBUG("PS[0x%02X]", idx); 497 ctx->ps[idx] = cpu_to_le32(val); 498 break; 499 case ATOM_ARG_WS: 500 idx = U8(*ptr); 501 (*ptr)++; 502 DEBUG("WS[0x%02X]", idx); 503 switch (idx) { 504 case ATOM_WS_QUOTIENT: 505 gctx->divmul[0] = val; 506 break; 507 case ATOM_WS_REMAINDER: 508 gctx->divmul[1] = val; 509 break; 510 case ATOM_WS_DATAPTR: 511 gctx->data_block = val; 512 break; 513 case ATOM_WS_SHIFT: 514 gctx->shift = val; 515 break; 516 case ATOM_WS_OR_MASK: 517 case ATOM_WS_AND_MASK: 518 break; 519 case ATOM_WS_FB_WINDOW: 520 gctx->fb_base = val; 521 break; 522 case ATOM_WS_ATTRIBUTES: 523 gctx->io_attr = val; 524 break; 525 case ATOM_WS_REGPTR: 526 gctx->reg_block = val; 527 break; 528 default: 529 ctx->ws[idx] = val; 530 } 531 break; 532 case ATOM_ARG_FB: 533 idx = U8(*ptr); 534 (*ptr)++; 535 if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) { 536 DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n", 537 gctx->fb_base + (idx * 4), gctx->scratch_size_bytes); 538 } else 539 gctx->scratch[(gctx->fb_base / 4) + idx] = val; 540 DEBUG("FB[0x%02X]", idx); 541 break; 542 case ATOM_ARG_PLL: 543 idx = U8(*ptr); 544 (*ptr)++; 545 DEBUG("PLL[0x%02X]", idx); 546 gctx->card->pll_write(gctx->card, idx, val); 547 break; 548 case ATOM_ARG_MC: 549 idx = U8(*ptr); 550 (*ptr)++; 551 DEBUG("MC[0x%02X]", idx); 552 gctx->card->mc_write(gctx->card, idx, val); 553 return; 554 } 555 switch (align) { 556 case ATOM_SRC_DWORD: 557 DEBUG(".[31:0] <- 0x%08X\n", old_val); 558 break; 559 case ATOM_SRC_WORD0: 560 DEBUG(".[15:0] <- 0x%04X\n", old_val); 561 break; 562 case ATOM_SRC_WORD8: 563 DEBUG(".[23:8] <- 0x%04X\n", old_val); 564 break; 565 case ATOM_SRC_WORD16: 566 DEBUG(".[31:16] <- 0x%04X\n", old_val); 567 break; 568 case ATOM_SRC_BYTE0: 569 DEBUG(".[7:0] <- 0x%02X\n", old_val); 570 break; 571 case ATOM_SRC_BYTE8: 572 DEBUG(".[15:8] <- 0x%02X\n", old_val); 573 break; 574 case ATOM_SRC_BYTE16: 575 DEBUG(".[23:16] <- 0x%02X\n", old_val); 576 break; 577 case ATOM_SRC_BYTE24: 578 DEBUG(".[31:24] <- 0x%02X\n", old_val); 579 break; 580 } 581 } 582 583 static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg) 584 { 585 uint8_t attr = U8((*ptr)++); 586 uint32_t dst, src, saved; 587 int dptr = *ptr; 588 SDEBUG(" dst: "); 589 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 590 SDEBUG(" src: "); 591 src = atom_get_src(ctx, attr, ptr); 592 dst += src; 593 SDEBUG(" dst: "); 594 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 595 } 596 597 static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg) 598 { 599 uint8_t attr = U8((*ptr)++); 600 uint32_t dst, src, saved; 601 int dptr = *ptr; 602 SDEBUG(" dst: "); 603 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 604 SDEBUG(" src: "); 605 src = atom_get_src(ctx, attr, ptr); 606 dst &= src; 607 SDEBUG(" dst: "); 608 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 609 } 610 611 static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg) 612 { 613 printk("ATOM BIOS beeped!\n"); 614 } 615 616 static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg) 617 { 618 int idx = U8((*ptr)++); 619 int r = 0; 620 621 if (idx < ATOM_TABLE_NAMES_CNT) 622 SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]); 623 else 624 SDEBUG(" table: %d\n", idx); 625 if (U16(ctx->ctx->cmd_table + 4 + 2 * idx)) 626 r = amdgpu_atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift); 627 if (r) { 628 ctx->abort = true; 629 } 630 } 631 632 static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg) 633 { 634 uint8_t attr = U8((*ptr)++); 635 uint32_t saved; 636 int dptr = *ptr; 637 attr &= 0x38; 638 attr |= atom_def_dst[attr >> 3] << 6; 639 atom_get_dst(ctx, arg, attr, ptr, &saved, 0); 640 SDEBUG(" dst: "); 641 atom_put_dst(ctx, arg, attr, &dptr, 0, saved); 642 } 643 644 static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg) 645 { 646 uint8_t attr = U8((*ptr)++); 647 uint32_t dst, src; 648 SDEBUG(" src1: "); 649 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); 650 SDEBUG(" src2: "); 651 src = atom_get_src(ctx, attr, ptr); 652 ctx->ctx->cs_equal = (dst == src); 653 ctx->ctx->cs_above = (dst > src); 654 SDEBUG(" result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE", 655 ctx->ctx->cs_above ? "GT" : "LE"); 656 } 657 658 static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg) 659 { 660 unsigned count = U8((*ptr)++); 661 SDEBUG(" count: %d\n", count); 662 if (arg == ATOM_UNIT_MICROSEC) 663 udelay(count); 664 else if (!drm_can_sleep()) 665 mdelay(count); 666 else 667 msleep(count); 668 } 669 670 static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg) 671 { 672 uint8_t attr = U8((*ptr)++); 673 uint32_t dst, src; 674 SDEBUG(" src1: "); 675 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); 676 SDEBUG(" src2: "); 677 src = atom_get_src(ctx, attr, ptr); 678 if (src != 0) { 679 ctx->ctx->divmul[0] = dst / src; 680 ctx->ctx->divmul[1] = dst % src; 681 } else { 682 ctx->ctx->divmul[0] = 0; 683 ctx->ctx->divmul[1] = 0; 684 } 685 } 686 687 static void atom_op_div32(atom_exec_context *ctx, int *ptr, int arg) 688 { 689 uint64_t val64; 690 uint8_t attr = U8((*ptr)++); 691 uint32_t dst, src; 692 SDEBUG(" src1: "); 693 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); 694 SDEBUG(" src2: "); 695 src = atom_get_src(ctx, attr, ptr); 696 if (src != 0) { 697 val64 = dst; 698 val64 |= ((uint64_t)ctx->ctx->divmul[1]) << 32; 699 do_div(val64, src); 700 ctx->ctx->divmul[0] = lower_32_bits(val64); 701 ctx->ctx->divmul[1] = upper_32_bits(val64); 702 } else { 703 ctx->ctx->divmul[0] = 0; 704 ctx->ctx->divmul[1] = 0; 705 } 706 } 707 708 static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg) 709 { 710 /* functionally, a nop */ 711 } 712 713 static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) 714 { 715 int execute = 0, target = U16(*ptr); 716 unsigned long cjiffies; 717 718 (*ptr) += 2; 719 switch (arg) { 720 case ATOM_COND_ABOVE: 721 execute = ctx->ctx->cs_above; 722 break; 723 case ATOM_COND_ABOVEOREQUAL: 724 execute = ctx->ctx->cs_above || ctx->ctx->cs_equal; 725 break; 726 case ATOM_COND_ALWAYS: 727 execute = 1; 728 break; 729 case ATOM_COND_BELOW: 730 execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal); 731 break; 732 case ATOM_COND_BELOWOREQUAL: 733 execute = !ctx->ctx->cs_above; 734 break; 735 case ATOM_COND_EQUAL: 736 execute = ctx->ctx->cs_equal; 737 break; 738 case ATOM_COND_NOTEQUAL: 739 execute = !ctx->ctx->cs_equal; 740 break; 741 } 742 if (arg != ATOM_COND_ALWAYS) 743 SDEBUG(" taken: %s\n", execute ? "yes" : "no"); 744 SDEBUG(" target: 0x%04X\n", target); 745 if (execute) { 746 if (ctx->last_jump == (ctx->start + target)) { 747 cjiffies = jiffies; 748 if (time_after(cjiffies, ctx->last_jump_jiffies)) { 749 cjiffies -= ctx->last_jump_jiffies; 750 if ((jiffies_to_msecs(cjiffies) > ATOM_CMD_TIMEOUT_SEC*1000)) { 751 DRM_ERROR("atombios stuck in loop for more than %dsecs aborting\n", 752 ATOM_CMD_TIMEOUT_SEC); 753 ctx->abort = true; 754 } 755 } else { 756 /* jiffies wrap around we will just wait a little longer */ 757 ctx->last_jump_jiffies = jiffies; 758 } 759 } else { 760 ctx->last_jump = ctx->start + target; 761 ctx->last_jump_jiffies = jiffies; 762 } 763 *ptr = ctx->start + target; 764 } 765 } 766 767 static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg) 768 { 769 uint8_t attr = U8((*ptr)++); 770 uint32_t dst, mask, src, saved; 771 int dptr = *ptr; 772 SDEBUG(" dst: "); 773 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 774 mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr); 775 SDEBUG(" mask: 0x%08x", mask); 776 SDEBUG(" src: "); 777 src = atom_get_src(ctx, attr, ptr); 778 dst &= mask; 779 dst |= src; 780 SDEBUG(" dst: "); 781 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 782 } 783 784 static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg) 785 { 786 uint8_t attr = U8((*ptr)++); 787 uint32_t src, saved; 788 int dptr = *ptr; 789 if (((attr >> 3) & 7) != ATOM_SRC_DWORD) 790 atom_get_dst(ctx, arg, attr, ptr, &saved, 0); 791 else { 792 atom_skip_dst(ctx, arg, attr, ptr); 793 saved = 0xCDCDCDCD; 794 } 795 SDEBUG(" src: "); 796 src = atom_get_src(ctx, attr, ptr); 797 SDEBUG(" dst: "); 798 atom_put_dst(ctx, arg, attr, &dptr, src, saved); 799 } 800 801 static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg) 802 { 803 uint8_t attr = U8((*ptr)++); 804 uint32_t dst, src; 805 SDEBUG(" src1: "); 806 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); 807 SDEBUG(" src2: "); 808 src = atom_get_src(ctx, attr, ptr); 809 ctx->ctx->divmul[0] = dst * src; 810 } 811 812 static void atom_op_mul32(atom_exec_context *ctx, int *ptr, int arg) 813 { 814 uint64_t val64; 815 uint8_t attr = U8((*ptr)++); 816 uint32_t dst, src; 817 SDEBUG(" src1: "); 818 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); 819 SDEBUG(" src2: "); 820 src = atom_get_src(ctx, attr, ptr); 821 val64 = (uint64_t)dst * (uint64_t)src; 822 ctx->ctx->divmul[0] = lower_32_bits(val64); 823 ctx->ctx->divmul[1] = upper_32_bits(val64); 824 } 825 826 static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg) 827 { 828 /* nothing */ 829 } 830 831 static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg) 832 { 833 uint8_t attr = U8((*ptr)++); 834 uint32_t dst, src, saved; 835 int dptr = *ptr; 836 SDEBUG(" dst: "); 837 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 838 SDEBUG(" src: "); 839 src = atom_get_src(ctx, attr, ptr); 840 dst |= src; 841 SDEBUG(" dst: "); 842 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 843 } 844 845 static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg) 846 { 847 uint8_t val = U8((*ptr)++); 848 SDEBUG("POST card output: 0x%02X\n", val); 849 } 850 851 static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg) 852 { 853 pr_info("unimplemented!\n"); 854 } 855 856 static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg) 857 { 858 pr_info("unimplemented!\n"); 859 } 860 861 static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg) 862 { 863 pr_info("unimplemented!\n"); 864 } 865 866 static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg) 867 { 868 int idx = U8(*ptr); 869 (*ptr)++; 870 SDEBUG(" block: %d\n", idx); 871 if (!idx) 872 ctx->ctx->data_block = 0; 873 else if (idx == 255) 874 ctx->ctx->data_block = ctx->start; 875 else 876 ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx); 877 SDEBUG(" base: 0x%04X\n", ctx->ctx->data_block); 878 } 879 880 static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg) 881 { 882 uint8_t attr = U8((*ptr)++); 883 SDEBUG(" fb_base: "); 884 ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr); 885 } 886 887 static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg) 888 { 889 int port; 890 switch (arg) { 891 case ATOM_PORT_ATI: 892 port = U16(*ptr); 893 if (port < ATOM_IO_NAMES_CNT) 894 SDEBUG(" port: %d (%s)\n", port, atom_io_names[port]); 895 else 896 SDEBUG(" port: %d\n", port); 897 if (!port) 898 ctx->ctx->io_mode = ATOM_IO_MM; 899 else 900 ctx->ctx->io_mode = ATOM_IO_IIO | port; 901 (*ptr) += 2; 902 break; 903 case ATOM_PORT_PCI: 904 ctx->ctx->io_mode = ATOM_IO_PCI; 905 (*ptr)++; 906 break; 907 case ATOM_PORT_SYSIO: 908 ctx->ctx->io_mode = ATOM_IO_SYSIO; 909 (*ptr)++; 910 break; 911 } 912 } 913 914 static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg) 915 { 916 ctx->ctx->reg_block = U16(*ptr); 917 (*ptr) += 2; 918 SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block); 919 } 920 921 static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg) 922 { 923 uint8_t attr = U8((*ptr)++), shift; 924 uint32_t saved, dst; 925 int dptr = *ptr; 926 attr &= 0x38; 927 attr |= atom_def_dst[attr >> 3] << 6; 928 SDEBUG(" dst: "); 929 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 930 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr); 931 SDEBUG(" shift: %d\n", shift); 932 dst <<= shift; 933 SDEBUG(" dst: "); 934 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 935 } 936 937 static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg) 938 { 939 uint8_t attr = U8((*ptr)++), shift; 940 uint32_t saved, dst; 941 int dptr = *ptr; 942 attr &= 0x38; 943 attr |= atom_def_dst[attr >> 3] << 6; 944 SDEBUG(" dst: "); 945 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 946 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr); 947 SDEBUG(" shift: %d\n", shift); 948 dst >>= shift; 949 SDEBUG(" dst: "); 950 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 951 } 952 953 static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) 954 { 955 uint8_t attr = U8((*ptr)++), shift; 956 uint32_t saved, dst; 957 int dptr = *ptr; 958 uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3]; 959 SDEBUG(" dst: "); 960 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 961 /* op needs to full dst value */ 962 dst = saved; 963 shift = atom_get_src(ctx, attr, ptr); 964 SDEBUG(" shift: %d\n", shift); 965 dst <<= shift; 966 dst &= atom_arg_mask[dst_align]; 967 dst >>= atom_arg_shift[dst_align]; 968 SDEBUG(" dst: "); 969 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 970 } 971 972 static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg) 973 { 974 uint8_t attr = U8((*ptr)++), shift; 975 uint32_t saved, dst; 976 int dptr = *ptr; 977 uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3]; 978 SDEBUG(" dst: "); 979 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 980 /* op needs to full dst value */ 981 dst = saved; 982 shift = atom_get_src(ctx, attr, ptr); 983 SDEBUG(" shift: %d\n", shift); 984 dst >>= shift; 985 dst &= atom_arg_mask[dst_align]; 986 dst >>= atom_arg_shift[dst_align]; 987 SDEBUG(" dst: "); 988 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 989 } 990 991 static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg) 992 { 993 uint8_t attr = U8((*ptr)++); 994 uint32_t dst, src, saved; 995 int dptr = *ptr; 996 SDEBUG(" dst: "); 997 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 998 SDEBUG(" src: "); 999 src = atom_get_src(ctx, attr, ptr); 1000 dst -= src; 1001 SDEBUG(" dst: "); 1002 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 1003 } 1004 1005 static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg) 1006 { 1007 uint8_t attr = U8((*ptr)++); 1008 uint32_t src, val, target; 1009 SDEBUG(" switch: "); 1010 src = atom_get_src(ctx, attr, ptr); 1011 while (U16(*ptr) != ATOM_CASE_END) 1012 if (U8(*ptr) == ATOM_CASE_MAGIC) { 1013 (*ptr)++; 1014 SDEBUG(" case: "); 1015 val = 1016 atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM, 1017 ptr); 1018 target = U16(*ptr); 1019 if (val == src) { 1020 SDEBUG(" target: %04X\n", target); 1021 *ptr = ctx->start + target; 1022 return; 1023 } 1024 (*ptr) += 2; 1025 } else { 1026 pr_info("Bad case\n"); 1027 return; 1028 } 1029 (*ptr) += 2; 1030 } 1031 1032 static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg) 1033 { 1034 uint8_t attr = U8((*ptr)++); 1035 uint32_t dst, src; 1036 SDEBUG(" src1: "); 1037 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); 1038 SDEBUG(" src2: "); 1039 src = atom_get_src(ctx, attr, ptr); 1040 ctx->ctx->cs_equal = ((dst & src) == 0); 1041 SDEBUG(" result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE"); 1042 } 1043 1044 static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg) 1045 { 1046 uint8_t attr = U8((*ptr)++); 1047 uint32_t dst, src, saved; 1048 int dptr = *ptr; 1049 SDEBUG(" dst: "); 1050 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 1051 SDEBUG(" src: "); 1052 src = atom_get_src(ctx, attr, ptr); 1053 dst ^= src; 1054 SDEBUG(" dst: "); 1055 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 1056 } 1057 1058 static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg) 1059 { 1060 uint8_t val = U8((*ptr)++); 1061 SDEBUG("DEBUG output: 0x%02X\n", val); 1062 } 1063 1064 static void atom_op_processds(atom_exec_context *ctx, int *ptr, int arg) 1065 { 1066 uint16_t val = U16(*ptr); 1067 (*ptr) += val + 2; 1068 SDEBUG("PROCESSDS output: 0x%02X\n", val); 1069 } 1070 1071 static struct { 1072 void (*func) (atom_exec_context *, int *, int); 1073 int arg; 1074 } opcode_table[ATOM_OP_CNT] = { 1075 { 1076 NULL, 0}, { 1077 atom_op_move, ATOM_ARG_REG}, { 1078 atom_op_move, ATOM_ARG_PS}, { 1079 atom_op_move, ATOM_ARG_WS}, { 1080 atom_op_move, ATOM_ARG_FB}, { 1081 atom_op_move, ATOM_ARG_PLL}, { 1082 atom_op_move, ATOM_ARG_MC}, { 1083 atom_op_and, ATOM_ARG_REG}, { 1084 atom_op_and, ATOM_ARG_PS}, { 1085 atom_op_and, ATOM_ARG_WS}, { 1086 atom_op_and, ATOM_ARG_FB}, { 1087 atom_op_and, ATOM_ARG_PLL}, { 1088 atom_op_and, ATOM_ARG_MC}, { 1089 atom_op_or, ATOM_ARG_REG}, { 1090 atom_op_or, ATOM_ARG_PS}, { 1091 atom_op_or, ATOM_ARG_WS}, { 1092 atom_op_or, ATOM_ARG_FB}, { 1093 atom_op_or, ATOM_ARG_PLL}, { 1094 atom_op_or, ATOM_ARG_MC}, { 1095 atom_op_shift_left, ATOM_ARG_REG}, { 1096 atom_op_shift_left, ATOM_ARG_PS}, { 1097 atom_op_shift_left, ATOM_ARG_WS}, { 1098 atom_op_shift_left, ATOM_ARG_FB}, { 1099 atom_op_shift_left, ATOM_ARG_PLL}, { 1100 atom_op_shift_left, ATOM_ARG_MC}, { 1101 atom_op_shift_right, ATOM_ARG_REG}, { 1102 atom_op_shift_right, ATOM_ARG_PS}, { 1103 atom_op_shift_right, ATOM_ARG_WS}, { 1104 atom_op_shift_right, ATOM_ARG_FB}, { 1105 atom_op_shift_right, ATOM_ARG_PLL}, { 1106 atom_op_shift_right, ATOM_ARG_MC}, { 1107 atom_op_mul, ATOM_ARG_REG}, { 1108 atom_op_mul, ATOM_ARG_PS}, { 1109 atom_op_mul, ATOM_ARG_WS}, { 1110 atom_op_mul, ATOM_ARG_FB}, { 1111 atom_op_mul, ATOM_ARG_PLL}, { 1112 atom_op_mul, ATOM_ARG_MC}, { 1113 atom_op_div, ATOM_ARG_REG}, { 1114 atom_op_div, ATOM_ARG_PS}, { 1115 atom_op_div, ATOM_ARG_WS}, { 1116 atom_op_div, ATOM_ARG_FB}, { 1117 atom_op_div, ATOM_ARG_PLL}, { 1118 atom_op_div, ATOM_ARG_MC}, { 1119 atom_op_add, ATOM_ARG_REG}, { 1120 atom_op_add, ATOM_ARG_PS}, { 1121 atom_op_add, ATOM_ARG_WS}, { 1122 atom_op_add, ATOM_ARG_FB}, { 1123 atom_op_add, ATOM_ARG_PLL}, { 1124 atom_op_add, ATOM_ARG_MC}, { 1125 atom_op_sub, ATOM_ARG_REG}, { 1126 atom_op_sub, ATOM_ARG_PS}, { 1127 atom_op_sub, ATOM_ARG_WS}, { 1128 atom_op_sub, ATOM_ARG_FB}, { 1129 atom_op_sub, ATOM_ARG_PLL}, { 1130 atom_op_sub, ATOM_ARG_MC}, { 1131 atom_op_setport, ATOM_PORT_ATI}, { 1132 atom_op_setport, ATOM_PORT_PCI}, { 1133 atom_op_setport, ATOM_PORT_SYSIO}, { 1134 atom_op_setregblock, 0}, { 1135 atom_op_setfbbase, 0}, { 1136 atom_op_compare, ATOM_ARG_REG}, { 1137 atom_op_compare, ATOM_ARG_PS}, { 1138 atom_op_compare, ATOM_ARG_WS}, { 1139 atom_op_compare, ATOM_ARG_FB}, { 1140 atom_op_compare, ATOM_ARG_PLL}, { 1141 atom_op_compare, ATOM_ARG_MC}, { 1142 atom_op_switch, 0}, { 1143 atom_op_jump, ATOM_COND_ALWAYS}, { 1144 atom_op_jump, ATOM_COND_EQUAL}, { 1145 atom_op_jump, ATOM_COND_BELOW}, { 1146 atom_op_jump, ATOM_COND_ABOVE}, { 1147 atom_op_jump, ATOM_COND_BELOWOREQUAL}, { 1148 atom_op_jump, ATOM_COND_ABOVEOREQUAL}, { 1149 atom_op_jump, ATOM_COND_NOTEQUAL}, { 1150 atom_op_test, ATOM_ARG_REG}, { 1151 atom_op_test, ATOM_ARG_PS}, { 1152 atom_op_test, ATOM_ARG_WS}, { 1153 atom_op_test, ATOM_ARG_FB}, { 1154 atom_op_test, ATOM_ARG_PLL}, { 1155 atom_op_test, ATOM_ARG_MC}, { 1156 atom_op_delay, ATOM_UNIT_MILLISEC}, { 1157 atom_op_delay, ATOM_UNIT_MICROSEC}, { 1158 atom_op_calltable, 0}, { 1159 atom_op_repeat, 0}, { 1160 atom_op_clear, ATOM_ARG_REG}, { 1161 atom_op_clear, ATOM_ARG_PS}, { 1162 atom_op_clear, ATOM_ARG_WS}, { 1163 atom_op_clear, ATOM_ARG_FB}, { 1164 atom_op_clear, ATOM_ARG_PLL}, { 1165 atom_op_clear, ATOM_ARG_MC}, { 1166 atom_op_nop, 0}, { 1167 atom_op_eot, 0}, { 1168 atom_op_mask, ATOM_ARG_REG}, { 1169 atom_op_mask, ATOM_ARG_PS}, { 1170 atom_op_mask, ATOM_ARG_WS}, { 1171 atom_op_mask, ATOM_ARG_FB}, { 1172 atom_op_mask, ATOM_ARG_PLL}, { 1173 atom_op_mask, ATOM_ARG_MC}, { 1174 atom_op_postcard, 0}, { 1175 atom_op_beep, 0}, { 1176 atom_op_savereg, 0}, { 1177 atom_op_restorereg, 0}, { 1178 atom_op_setdatablock, 0}, { 1179 atom_op_xor, ATOM_ARG_REG}, { 1180 atom_op_xor, ATOM_ARG_PS}, { 1181 atom_op_xor, ATOM_ARG_WS}, { 1182 atom_op_xor, ATOM_ARG_FB}, { 1183 atom_op_xor, ATOM_ARG_PLL}, { 1184 atom_op_xor, ATOM_ARG_MC}, { 1185 atom_op_shl, ATOM_ARG_REG}, { 1186 atom_op_shl, ATOM_ARG_PS}, { 1187 atom_op_shl, ATOM_ARG_WS}, { 1188 atom_op_shl, ATOM_ARG_FB}, { 1189 atom_op_shl, ATOM_ARG_PLL}, { 1190 atom_op_shl, ATOM_ARG_MC}, { 1191 atom_op_shr, ATOM_ARG_REG}, { 1192 atom_op_shr, ATOM_ARG_PS}, { 1193 atom_op_shr, ATOM_ARG_WS}, { 1194 atom_op_shr, ATOM_ARG_FB}, { 1195 atom_op_shr, ATOM_ARG_PLL}, { 1196 atom_op_shr, ATOM_ARG_MC}, { 1197 atom_op_debug, 0}, { 1198 atom_op_processds, 0}, { 1199 atom_op_mul32, ATOM_ARG_PS}, { 1200 atom_op_mul32, ATOM_ARG_WS}, { 1201 atom_op_div32, ATOM_ARG_PS}, { 1202 atom_op_div32, ATOM_ARG_WS}, 1203 }; 1204 1205 static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params) 1206 { 1207 int base = CU16(ctx->cmd_table + 4 + 2 * index); 1208 int len, ws, ps, ptr; 1209 unsigned char op; 1210 atom_exec_context ectx; 1211 int ret = 0; 1212 1213 if (!base) 1214 return -EINVAL; 1215 1216 len = CU16(base + ATOM_CT_SIZE_PTR); 1217 ws = CU8(base + ATOM_CT_WS_PTR); 1218 ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK; 1219 ptr = base + ATOM_CT_CODE_PTR; 1220 1221 SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps); 1222 1223 ectx.ctx = ctx; 1224 ectx.ps_shift = ps / 4; 1225 ectx.start = base; 1226 ectx.ps = params; 1227 ectx.abort = false; 1228 ectx.last_jump = 0; 1229 if (ws) 1230 ectx.ws = kcalloc(4, ws, GFP_KERNEL); 1231 else 1232 ectx.ws = NULL; 1233 1234 debug_depth++; 1235 while (1) { 1236 op = CU8(ptr++); 1237 if (op < ATOM_OP_NAMES_CNT) 1238 SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1); 1239 else 1240 SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1); 1241 if (ectx.abort) { 1242 DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n", 1243 base, len, ws, ps, ptr - 1); 1244 ret = -EINVAL; 1245 goto free; 1246 } 1247 1248 if (op < ATOM_OP_CNT && op > 0) 1249 opcode_table[op].func(&ectx, &ptr, 1250 opcode_table[op].arg); 1251 else 1252 break; 1253 1254 if (op == ATOM_OP_EOT) 1255 break; 1256 } 1257 debug_depth--; 1258 SDEBUG("<<\n"); 1259 1260 free: 1261 if (ws) 1262 kfree(ectx.ws); 1263 return ret; 1264 } 1265 1266 int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params) 1267 { 1268 int r; 1269 1270 mutex_lock(&ctx->mutex); 1271 /* reset data block */ 1272 ctx->data_block = 0; 1273 /* reset reg block */ 1274 ctx->reg_block = 0; 1275 /* reset fb window */ 1276 ctx->fb_base = 0; 1277 /* reset io mode */ 1278 ctx->io_mode = ATOM_IO_MM; 1279 /* reset divmul */ 1280 ctx->divmul[0] = 0; 1281 ctx->divmul[1] = 0; 1282 r = amdgpu_atom_execute_table_locked(ctx, index, params); 1283 mutex_unlock(&ctx->mutex); 1284 return r; 1285 } 1286 1287 static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 }; 1288 1289 static void atom_index_iio(struct atom_context *ctx, int base) 1290 { 1291 ctx->iio = kzalloc(2 * 256, GFP_KERNEL); 1292 if (!ctx->iio) 1293 return; 1294 while (CU8(base) == ATOM_IIO_START) { 1295 ctx->iio[CU8(base + 1)] = base + 2; 1296 base += 2; 1297 while (CU8(base) != ATOM_IIO_END) 1298 base += atom_iio_len[CU8(base)]; 1299 base += 3; 1300 } 1301 } 1302 1303 static void atom_get_vbios_name(struct atom_context *ctx) 1304 { 1305 unsigned char *p_rom; 1306 unsigned char str_num; 1307 unsigned short off_to_vbios_str; 1308 unsigned char *c_ptr; 1309 int name_size; 1310 int i; 1311 1312 const char *na = "--N/A--"; 1313 char *back; 1314 1315 p_rom = ctx->bios; 1316 1317 str_num = *(p_rom + OFFSET_TO_GET_ATOMBIOS_NUMBER_OF_STRINGS); 1318 if (str_num != 0) { 1319 off_to_vbios_str = 1320 *(unsigned short *)(p_rom + OFFSET_TO_GET_ATOMBIOS_STRING_START); 1321 1322 c_ptr = (unsigned char *)(p_rom + off_to_vbios_str); 1323 } else { 1324 /* do not know where to find name */ 1325 memcpy(ctx->name, na, 7); 1326 ctx->name[7] = 0; 1327 return; 1328 } 1329 1330 /* 1331 * skip the atombios strings, usually 4 1332 * 1st is P/N, 2nd is ASIC, 3rd is PCI type, 4th is Memory type 1333 */ 1334 for (i = 0; i < str_num; i++) { 1335 while (*c_ptr != 0) 1336 c_ptr++; 1337 c_ptr++; 1338 } 1339 1340 /* skip the following 2 chars: 0x0D 0x0A */ 1341 c_ptr += 2; 1342 1343 name_size = strnlen(c_ptr, STRLEN_LONG - 1); 1344 memcpy(ctx->name, c_ptr, name_size); 1345 back = ctx->name + name_size; 1346 while ((*--back) == ' ') 1347 ; 1348 *(back + 1) = '\0'; 1349 } 1350 1351 static void atom_get_vbios_date(struct atom_context *ctx) 1352 { 1353 unsigned char *p_rom; 1354 unsigned char *date_in_rom; 1355 1356 p_rom = ctx->bios; 1357 1358 date_in_rom = p_rom + OFFSET_TO_VBIOS_DATE; 1359 1360 ctx->date[0] = '2'; 1361 ctx->date[1] = '0'; 1362 ctx->date[2] = date_in_rom[6]; 1363 ctx->date[3] = date_in_rom[7]; 1364 ctx->date[4] = '/'; 1365 ctx->date[5] = date_in_rom[0]; 1366 ctx->date[6] = date_in_rom[1]; 1367 ctx->date[7] = '/'; 1368 ctx->date[8] = date_in_rom[3]; 1369 ctx->date[9] = date_in_rom[4]; 1370 ctx->date[10] = ' '; 1371 ctx->date[11] = date_in_rom[9]; 1372 ctx->date[12] = date_in_rom[10]; 1373 ctx->date[13] = date_in_rom[11]; 1374 ctx->date[14] = date_in_rom[12]; 1375 ctx->date[15] = date_in_rom[13]; 1376 ctx->date[16] = '\0'; 1377 } 1378 1379 static unsigned char *atom_find_str_in_rom(struct atom_context *ctx, char *str, int start, 1380 int end, int maxlen) 1381 { 1382 unsigned long str_off; 1383 unsigned char *p_rom; 1384 unsigned short str_len; 1385 1386 str_off = 0; 1387 str_len = strnlen(str, maxlen); 1388 p_rom = ctx->bios; 1389 1390 for (; start <= end; ++start) { 1391 for (str_off = 0; str_off < str_len; ++str_off) { 1392 if (str[str_off] != *(p_rom + start + str_off)) 1393 break; 1394 } 1395 1396 if (str_off == str_len || str[str_off] == 0) 1397 return p_rom + start; 1398 } 1399 return NULL; 1400 } 1401 1402 static void atom_get_vbios_pn(struct atom_context *ctx) 1403 { 1404 unsigned char *p_rom; 1405 unsigned short off_to_vbios_str; 1406 unsigned char *vbios_str; 1407 int count; 1408 1409 off_to_vbios_str = 0; 1410 p_rom = ctx->bios; 1411 1412 if (*(p_rom + OFFSET_TO_GET_ATOMBIOS_NUMBER_OF_STRINGS) != 0) { 1413 off_to_vbios_str = 1414 *(unsigned short *)(p_rom + OFFSET_TO_GET_ATOMBIOS_STRING_START); 1415 1416 vbios_str = (unsigned char *)(p_rom + off_to_vbios_str); 1417 } else { 1418 vbios_str = p_rom + OFFSET_TO_VBIOS_PART_NUMBER; 1419 } 1420 1421 if (*vbios_str == 0) { 1422 vbios_str = atom_find_str_in_rom(ctx, BIOS_ATOM_PREFIX, 3, 1024, 64); 1423 if (vbios_str == NULL) 1424 vbios_str += sizeof(BIOS_ATOM_PREFIX) - 1; 1425 } 1426 if (vbios_str != NULL && *vbios_str == 0) 1427 vbios_str++; 1428 1429 if (vbios_str != NULL) { 1430 count = 0; 1431 while ((count < BIOS_STRING_LENGTH) && vbios_str[count] >= ' ' && 1432 vbios_str[count] <= 'z') { 1433 ctx->vbios_pn[count] = vbios_str[count]; 1434 count++; 1435 } 1436 1437 ctx->vbios_pn[count] = 0; 1438 } 1439 } 1440 1441 static void atom_get_vbios_version(struct atom_context *ctx) 1442 { 1443 unsigned char *vbios_ver; 1444 1445 /* find anchor ATOMBIOSBK-AMD */ 1446 vbios_ver = atom_find_str_in_rom(ctx, BIOS_VERSION_PREFIX, 3, 1024, 64); 1447 if (vbios_ver != NULL) { 1448 /* skip ATOMBIOSBK-AMD VER */ 1449 vbios_ver += 18; 1450 memcpy(ctx->vbios_ver_str, vbios_ver, STRLEN_NORMAL); 1451 } else { 1452 ctx->vbios_ver_str[0] = '\0'; 1453 } 1454 } 1455 1456 struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios) 1457 { 1458 int base; 1459 struct atom_context *ctx = 1460 kzalloc(sizeof(struct atom_context), GFP_KERNEL); 1461 char *str; 1462 struct _ATOM_ROM_HEADER *atom_rom_header; 1463 struct _ATOM_MASTER_DATA_TABLE *master_table; 1464 struct _ATOM_FIRMWARE_INFO *atom_fw_info; 1465 u16 idx; 1466 1467 if (!ctx) 1468 return NULL; 1469 1470 ctx->card = card; 1471 ctx->bios = bios; 1472 1473 if (CU16(0) != ATOM_BIOS_MAGIC) { 1474 pr_info("Invalid BIOS magic\n"); 1475 kfree(ctx); 1476 return NULL; 1477 } 1478 if (strncmp 1479 (CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC, 1480 strlen(ATOM_ATI_MAGIC))) { 1481 pr_info("Invalid ATI magic\n"); 1482 kfree(ctx); 1483 return NULL; 1484 } 1485 1486 base = CU16(ATOM_ROM_TABLE_PTR); 1487 if (strncmp 1488 (CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC, 1489 strlen(ATOM_ROM_MAGIC))) { 1490 pr_info("Invalid ATOM magic\n"); 1491 kfree(ctx); 1492 return NULL; 1493 } 1494 1495 ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR); 1496 ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR); 1497 atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4); 1498 if (!ctx->iio) { 1499 amdgpu_atom_destroy(ctx); 1500 return NULL; 1501 } 1502 1503 idx = CU16(ATOM_ROM_PART_NUMBER_PTR); 1504 if (idx == 0) 1505 idx = 0x80; 1506 1507 str = CSTR(idx); 1508 if (*str != '\0') { 1509 pr_info("ATOM BIOS: %s\n", str); 1510 strlcpy(ctx->vbios_version, str, sizeof(ctx->vbios_version)); 1511 } 1512 1513 atom_rom_header = (struct _ATOM_ROM_HEADER *)CSTR(base); 1514 if (atom_rom_header->usMasterDataTableOffset != 0) { 1515 master_table = (struct _ATOM_MASTER_DATA_TABLE *) 1516 CSTR(atom_rom_header->usMasterDataTableOffset); 1517 if (master_table->ListOfDataTables.FirmwareInfo != 0) { 1518 atom_fw_info = (struct _ATOM_FIRMWARE_INFO *) 1519 CSTR(master_table->ListOfDataTables.FirmwareInfo); 1520 ctx->version = atom_fw_info->ulFirmwareRevision; 1521 } 1522 } 1523 1524 atom_get_vbios_name(ctx); 1525 atom_get_vbios_pn(ctx); 1526 atom_get_vbios_date(ctx); 1527 atom_get_vbios_version(ctx); 1528 1529 return ctx; 1530 } 1531 1532 int amdgpu_atom_asic_init(struct atom_context *ctx) 1533 { 1534 int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR); 1535 uint32_t ps[16]; 1536 int ret; 1537 1538 memset(ps, 0, 64); 1539 1540 ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR)); 1541 ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR)); 1542 if (!ps[0] || !ps[1]) 1543 return 1; 1544 1545 if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT)) 1546 return 1; 1547 ret = amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, ps); 1548 if (ret) 1549 return ret; 1550 1551 memset(ps, 0, 64); 1552 1553 return ret; 1554 } 1555 1556 void amdgpu_atom_destroy(struct atom_context *ctx) 1557 { 1558 kfree(ctx->iio); 1559 kfree(ctx); 1560 } 1561 1562 bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index, 1563 uint16_t *size, uint8_t *frev, uint8_t *crev, 1564 uint16_t *data_start) 1565 { 1566 int offset = index * 2 + 4; 1567 int idx = CU16(ctx->data_table + offset); 1568 u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4); 1569 1570 if (!mdt[index]) 1571 return false; 1572 1573 if (size) 1574 *size = CU16(idx); 1575 if (frev) 1576 *frev = CU8(idx + 2); 1577 if (crev) 1578 *crev = CU8(idx + 3); 1579 *data_start = idx; 1580 return true; 1581 } 1582 1583 bool amdgpu_atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, 1584 uint8_t *crev) 1585 { 1586 int offset = index * 2 + 4; 1587 int idx = CU16(ctx->cmd_table + offset); 1588 u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4); 1589 1590 if (!mct[index]) 1591 return false; 1592 1593 if (frev) 1594 *frev = CU8(idx + 2); 1595 if (crev) 1596 *crev = CU8(idx + 3); 1597 return true; 1598 } 1599 1600