1 /* 2 * Copyright © 2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include <linux/firmware.h> 26 27 #include "i915_drv.h" 28 #include "i915_reg.h" 29 #include "intel_de.h" 30 #include "intel_dmc.h" 31 #include "intel_dmc_regs.h" 32 33 /** 34 * DOC: DMC Firmware Support 35 * 36 * From gen9 onwards we have newly added DMC (Display microcontroller) in display 37 * engine to save and restore the state of display engine when it enter into 38 * low-power state and comes back to normal. 39 */ 40 41 #define DMC_VERSION(major, minor) ((major) << 16 | (minor)) 42 #define DMC_VERSION_MAJOR(version) ((version) >> 16) 43 #define DMC_VERSION_MINOR(version) ((version) & 0xffff) 44 45 #define DMC_PATH(platform, major, minor) \ 46 "i915/" \ 47 __stringify(platform) "_dmc_ver" \ 48 __stringify(major) "_" \ 49 __stringify(minor) ".bin" 50 51 #define DISPLAY_VER13_DMC_MAX_FW_SIZE 0x20000 52 53 #define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE 54 55 #define DG2_DMC_PATH DMC_PATH(dg2, 2, 08) 56 MODULE_FIRMWARE(DG2_DMC_PATH); 57 58 #define ADLP_DMC_PATH DMC_PATH(adlp, 2, 16) 59 MODULE_FIRMWARE(ADLP_DMC_PATH); 60 61 #define ADLS_DMC_PATH DMC_PATH(adls, 2, 01) 62 MODULE_FIRMWARE(ADLS_DMC_PATH); 63 64 #define DG1_DMC_PATH DMC_PATH(dg1, 2, 02) 65 MODULE_FIRMWARE(DG1_DMC_PATH); 66 67 #define RKL_DMC_PATH DMC_PATH(rkl, 2, 03) 68 MODULE_FIRMWARE(RKL_DMC_PATH); 69 70 #define TGL_DMC_PATH DMC_PATH(tgl, 2, 12) 71 MODULE_FIRMWARE(TGL_DMC_PATH); 72 73 #define ICL_DMC_PATH DMC_PATH(icl, 1, 09) 74 #define ICL_DMC_MAX_FW_SIZE 0x6000 75 MODULE_FIRMWARE(ICL_DMC_PATH); 76 77 #define GLK_DMC_PATH DMC_PATH(glk, 1, 04) 78 #define GLK_DMC_MAX_FW_SIZE 0x4000 79 MODULE_FIRMWARE(GLK_DMC_PATH); 80 81 #define KBL_DMC_PATH DMC_PATH(kbl, 1, 04) 82 #define KBL_DMC_MAX_FW_SIZE BXT_DMC_MAX_FW_SIZE 83 MODULE_FIRMWARE(KBL_DMC_PATH); 84 85 #define SKL_DMC_PATH DMC_PATH(skl, 1, 27) 86 #define SKL_DMC_MAX_FW_SIZE BXT_DMC_MAX_FW_SIZE 87 MODULE_FIRMWARE(SKL_DMC_PATH); 88 89 #define BXT_DMC_PATH DMC_PATH(bxt, 1, 07) 90 #define BXT_DMC_MAX_FW_SIZE 0x3000 91 MODULE_FIRMWARE(BXT_DMC_PATH); 92 93 #define DMC_DEFAULT_FW_OFFSET 0xFFFFFFFF 94 #define PACKAGE_MAX_FW_INFO_ENTRIES 20 95 #define PACKAGE_V2_MAX_FW_INFO_ENTRIES 32 96 #define DMC_V1_MAX_MMIO_COUNT 8 97 #define DMC_V3_MAX_MMIO_COUNT 20 98 #define DMC_V1_MMIO_START_RANGE 0x80000 99 100 struct intel_css_header { 101 /* 0x09 for DMC */ 102 u32 module_type; 103 104 /* Includes the DMC specific header in dwords */ 105 u32 header_len; 106 107 /* always value would be 0x10000 */ 108 u32 header_ver; 109 110 /* Not used */ 111 u32 module_id; 112 113 /* Not used */ 114 u32 module_vendor; 115 116 /* in YYYYMMDD format */ 117 u32 date; 118 119 /* Size in dwords (CSS_Headerlen + PackageHeaderLen + dmc FWsLen)/4 */ 120 u32 size; 121 122 /* Not used */ 123 u32 key_size; 124 125 /* Not used */ 126 u32 modulus_size; 127 128 /* Not used */ 129 u32 exponent_size; 130 131 /* Not used */ 132 u32 reserved1[12]; 133 134 /* Major Minor */ 135 u32 version; 136 137 /* Not used */ 138 u32 reserved2[8]; 139 140 /* Not used */ 141 u32 kernel_header_info; 142 } __packed; 143 144 struct intel_fw_info { 145 u8 reserved1; 146 147 /* reserved on package_header version 1, must be 0 on version 2 */ 148 u8 dmc_id; 149 150 /* Stepping (A, B, C, ..., *). * is a wildcard */ 151 char stepping; 152 153 /* Sub-stepping (0, 1, ..., *). * is a wildcard */ 154 char substepping; 155 156 u32 offset; 157 u32 reserved2; 158 } __packed; 159 160 struct intel_package_header { 161 /* DMC container header length in dwords */ 162 u8 header_len; 163 164 /* 0x01, 0x02 */ 165 u8 header_ver; 166 167 u8 reserved[10]; 168 169 /* Number of valid entries in the FWInfo array below */ 170 u32 num_entries; 171 } __packed; 172 173 struct intel_dmc_header_base { 174 /* always value would be 0x40403E3E */ 175 u32 signature; 176 177 /* DMC binary header length */ 178 u8 header_len; 179 180 /* 0x01 */ 181 u8 header_ver; 182 183 /* Reserved */ 184 u16 dmcc_ver; 185 186 /* Major, Minor */ 187 u32 project; 188 189 /* Firmware program size (excluding header) in dwords */ 190 u32 fw_size; 191 192 /* Major Minor version */ 193 u32 fw_version; 194 } __packed; 195 196 struct intel_dmc_header_v1 { 197 struct intel_dmc_header_base base; 198 199 /* Number of valid MMIO cycles present. */ 200 u32 mmio_count; 201 202 /* MMIO address */ 203 u32 mmioaddr[DMC_V1_MAX_MMIO_COUNT]; 204 205 /* MMIO data */ 206 u32 mmiodata[DMC_V1_MAX_MMIO_COUNT]; 207 208 /* FW filename */ 209 char dfile[32]; 210 211 u32 reserved1[2]; 212 } __packed; 213 214 struct intel_dmc_header_v3 { 215 struct intel_dmc_header_base base; 216 217 /* DMC RAM start MMIO address */ 218 u32 start_mmioaddr; 219 220 u32 reserved[9]; 221 222 /* FW filename */ 223 char dfile[32]; 224 225 /* Number of valid MMIO cycles present. */ 226 u32 mmio_count; 227 228 /* MMIO address */ 229 u32 mmioaddr[DMC_V3_MAX_MMIO_COUNT]; 230 231 /* MMIO data */ 232 u32 mmiodata[DMC_V3_MAX_MMIO_COUNT]; 233 } __packed; 234 235 struct stepping_info { 236 char stepping; 237 char substepping; 238 }; 239 240 static bool has_dmc_id_fw(struct drm_i915_private *i915, int dmc_id) 241 { 242 return i915->display.dmc.dmc_info[dmc_id].payload; 243 } 244 245 bool intel_dmc_has_payload(struct drm_i915_private *i915) 246 { 247 return has_dmc_id_fw(i915, DMC_FW_MAIN); 248 } 249 250 static const struct stepping_info * 251 intel_get_stepping_info(struct drm_i915_private *i915, 252 struct stepping_info *si) 253 { 254 const char *step_name = intel_step_name(RUNTIME_INFO(i915)->step.display_step); 255 256 si->stepping = step_name[0]; 257 si->substepping = step_name[1]; 258 return si; 259 } 260 261 static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv) 262 { 263 /* The below bit doesn't need to be cleared ever afterwards */ 264 intel_de_rmw(dev_priv, DC_STATE_DEBUG, 0, 265 DC_STATE_DEBUG_MASK_CORES | DC_STATE_DEBUG_MASK_MEMORY_UP); 266 intel_de_posting_read(dev_priv, DC_STATE_DEBUG); 267 } 268 269 static void disable_event_handler(struct drm_i915_private *i915, 270 i915_reg_t ctl_reg, i915_reg_t htp_reg) 271 { 272 intel_de_write(i915, ctl_reg, 273 REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK, 274 DMC_EVT_CTL_TYPE_EDGE_0_1) | 275 REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK, 276 DMC_EVT_CTL_EVENT_ID_FALSE)); 277 intel_de_write(i915, htp_reg, 0); 278 } 279 280 static void 281 disable_flip_queue_event(struct drm_i915_private *i915, 282 i915_reg_t ctl_reg, i915_reg_t htp_reg) 283 { 284 u32 event_ctl; 285 u32 event_htp; 286 287 event_ctl = intel_de_read(i915, ctl_reg); 288 event_htp = intel_de_read(i915, htp_reg); 289 if (event_ctl != (DMC_EVT_CTL_ENABLE | 290 DMC_EVT_CTL_RECURRING | 291 REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK, 292 DMC_EVT_CTL_TYPE_EDGE_0_1) | 293 REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK, 294 DMC_EVT_CTL_EVENT_ID_CLK_MSEC)) || 295 !event_htp) { 296 drm_dbg_kms(&i915->drm, 297 "Unexpected DMC event configuration (control %08x htp %08x)\n", 298 event_ctl, event_htp); 299 return; 300 } 301 302 disable_event_handler(i915, ctl_reg, htp_reg); 303 } 304 305 static bool 306 get_flip_queue_event_regs(struct drm_i915_private *i915, int dmc_id, 307 i915_reg_t *ctl_reg, i915_reg_t *htp_reg) 308 { 309 switch (dmc_id) { 310 case DMC_FW_MAIN: 311 if (DISPLAY_VER(i915) == 12) { 312 *ctl_reg = DMC_EVT_CTL(i915, dmc_id, 3); 313 *htp_reg = DMC_EVT_HTP(i915, dmc_id, 3); 314 315 return true; 316 } 317 break; 318 case DMC_FW_PIPEA ... DMC_FW_PIPED: 319 if (IS_DG2(i915)) { 320 *ctl_reg = DMC_EVT_CTL(i915, dmc_id, 2); 321 *htp_reg = DMC_EVT_HTP(i915, dmc_id, 2); 322 323 return true; 324 } 325 break; 326 } 327 328 return false; 329 } 330 331 static void 332 disable_all_flip_queue_events(struct drm_i915_private *i915) 333 { 334 int dmc_id; 335 336 /* TODO: check if the following applies to all D13+ platforms. */ 337 if (!IS_DG2(i915) && !IS_TIGERLAKE(i915)) 338 return; 339 340 for (dmc_id = 0; dmc_id < DMC_FW_MAX; dmc_id++) { 341 i915_reg_t ctl_reg; 342 i915_reg_t htp_reg; 343 344 if (!has_dmc_id_fw(i915, dmc_id)) 345 continue; 346 347 if (!get_flip_queue_event_regs(i915, dmc_id, &ctl_reg, &htp_reg)) 348 continue; 349 350 disable_flip_queue_event(i915, ctl_reg, htp_reg); 351 } 352 } 353 354 static void disable_all_event_handlers(struct drm_i915_private *i915) 355 { 356 int id; 357 358 /* TODO: disable the event handlers on pre-GEN12 platforms as well */ 359 if (DISPLAY_VER(i915) < 12) 360 return; 361 362 for (id = DMC_FW_MAIN; id < DMC_FW_MAX; id++) { 363 int handler; 364 365 if (!has_dmc_id_fw(i915, id)) 366 continue; 367 368 for (handler = 0; handler < DMC_EVENT_HANDLER_COUNT_GEN12; handler++) 369 disable_event_handler(i915, 370 DMC_EVT_CTL(i915, id, handler), 371 DMC_EVT_HTP(i915, id, handler)); 372 } 373 } 374 375 static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable) 376 { 377 enum pipe pipe; 378 379 if (DISPLAY_VER(i915) < 13) 380 return; 381 382 /* 383 * Wa_16015201720:adl-p,dg2, mtl 384 * The WA requires clock gating to be disabled all the time 385 * for pipe A and B. 386 * For pipe C and D clock gating needs to be disabled only 387 * during initializing the firmware. 388 */ 389 if (enable) 390 for (pipe = PIPE_A; pipe <= PIPE_D; pipe++) 391 intel_de_rmw(i915, CLKGATE_DIS_PSL_EXT(pipe), 392 0, PIPEDMC_GATING_DIS); 393 else 394 for (pipe = PIPE_C; pipe <= PIPE_D; pipe++) 395 intel_de_rmw(i915, CLKGATE_DIS_PSL_EXT(pipe), 396 PIPEDMC_GATING_DIS, 0); 397 } 398 399 /** 400 * intel_dmc_load_program() - write the firmware from memory to register. 401 * @dev_priv: i915 drm device. 402 * 403 * DMC firmware is read from a .bin file and kept in internal memory one time. 404 * Everytime display comes back from low power state this function is called to 405 * copy the firmware from internal memory to registers. 406 */ 407 void intel_dmc_load_program(struct drm_i915_private *dev_priv) 408 { 409 struct intel_dmc *dmc = &dev_priv->display.dmc; 410 u32 id, i; 411 412 if (!intel_dmc_has_payload(dev_priv)) 413 return; 414 415 pipedmc_clock_gating_wa(dev_priv, true); 416 417 disable_all_event_handlers(dev_priv); 418 419 assert_rpm_wakelock_held(&dev_priv->runtime_pm); 420 421 preempt_disable(); 422 423 for (id = 0; id < DMC_FW_MAX; id++) { 424 for (i = 0; i < dmc->dmc_info[id].dmc_fw_size; i++) { 425 intel_de_write_fw(dev_priv, 426 DMC_PROGRAM(dmc->dmc_info[id].start_mmioaddr, i), 427 dmc->dmc_info[id].payload[i]); 428 } 429 } 430 431 preempt_enable(); 432 433 for (id = 0; id < DMC_FW_MAX; id++) { 434 for (i = 0; i < dmc->dmc_info[id].mmio_count; i++) { 435 intel_de_write(dev_priv, dmc->dmc_info[id].mmioaddr[i], 436 dmc->dmc_info[id].mmiodata[i]); 437 } 438 } 439 440 dev_priv->display.dmc.dc_state = 0; 441 442 gen9_set_dc_state_debugmask(dev_priv); 443 444 /* 445 * Flip queue events need to be disabled before enabling DC5/6. 446 * i915 doesn't use the flip queue feature, so disable it already 447 * here. 448 */ 449 disable_all_flip_queue_events(dev_priv); 450 451 pipedmc_clock_gating_wa(dev_priv, false); 452 } 453 454 /** 455 * intel_dmc_disable_program() - disable the firmware 456 * @i915: i915 drm device 457 * 458 * Disable all event handlers in the firmware, making sure the firmware is 459 * inactive after the display is uninitialized. 460 */ 461 void intel_dmc_disable_program(struct drm_i915_private *i915) 462 { 463 if (!intel_dmc_has_payload(i915)) 464 return; 465 466 pipedmc_clock_gating_wa(i915, true); 467 disable_all_event_handlers(i915); 468 pipedmc_clock_gating_wa(i915, false); 469 } 470 471 void assert_dmc_loaded(struct drm_i915_private *i915) 472 { 473 drm_WARN_ONCE(&i915->drm, 474 !intel_de_read(i915, DMC_PROGRAM(i915->display.dmc.dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)), 475 "DMC program storage start is NULL\n"); 476 drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_SSP_BASE), 477 "DMC SSP Base Not fine\n"); 478 drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_HTP_SKL), 479 "DMC HTP Not fine\n"); 480 } 481 482 static bool fw_info_matches_stepping(const struct intel_fw_info *fw_info, 483 const struct stepping_info *si) 484 { 485 if ((fw_info->substepping == '*' && si->stepping == fw_info->stepping) || 486 (si->stepping == fw_info->stepping && si->substepping == fw_info->substepping) || 487 /* 488 * If we don't find a more specific one from above two checks, we 489 * then check for the generic one to be sure to work even with 490 * "broken firmware" 491 */ 492 (si->stepping == '*' && si->substepping == fw_info->substepping) || 493 (fw_info->stepping == '*' && fw_info->substepping == '*')) 494 return true; 495 496 return false; 497 } 498 499 /* 500 * Search fw_info table for dmc_offset to find firmware binary: num_entries is 501 * already sanitized. 502 */ 503 static void dmc_set_fw_offset(struct intel_dmc *dmc, 504 const struct intel_fw_info *fw_info, 505 unsigned int num_entries, 506 const struct stepping_info *si, 507 u8 package_ver) 508 { 509 unsigned int i, id; 510 511 struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc); 512 513 for (i = 0; i < num_entries; i++) { 514 id = package_ver <= 1 ? DMC_FW_MAIN : fw_info[i].dmc_id; 515 516 if (id >= DMC_FW_MAX) { 517 drm_dbg(&i915->drm, "Unsupported firmware id: %u\n", id); 518 continue; 519 } 520 521 /* More specific versions come first, so we don't even have to 522 * check for the stepping since we already found a previous FW 523 * for this id. 524 */ 525 if (dmc->dmc_info[id].present) 526 continue; 527 528 if (fw_info_matches_stepping(&fw_info[i], si)) { 529 dmc->dmc_info[id].present = true; 530 dmc->dmc_info[id].dmc_offset = fw_info[i].offset; 531 } 532 } 533 } 534 535 static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc, 536 const u32 *mmioaddr, u32 mmio_count, 537 int header_ver, u8 dmc_id) 538 { 539 struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc); 540 u32 start_range, end_range; 541 int i; 542 543 if (dmc_id >= DMC_FW_MAX) { 544 drm_warn(&i915->drm, "Unsupported firmware id %u\n", dmc_id); 545 return false; 546 } 547 548 if (header_ver == 1) { 549 start_range = DMC_MMIO_START_RANGE; 550 end_range = DMC_MMIO_END_RANGE; 551 } else if (dmc_id == DMC_FW_MAIN) { 552 start_range = TGL_MAIN_MMIO_START; 553 end_range = TGL_MAIN_MMIO_END; 554 } else if (DISPLAY_VER(i915) >= 13) { 555 start_range = ADLP_PIPE_MMIO_START; 556 end_range = ADLP_PIPE_MMIO_END; 557 } else if (DISPLAY_VER(i915) >= 12) { 558 start_range = TGL_PIPE_MMIO_START(dmc_id); 559 end_range = TGL_PIPE_MMIO_END(dmc_id); 560 } else { 561 drm_warn(&i915->drm, "Unknown mmio range for sanity check"); 562 return false; 563 } 564 565 for (i = 0; i < mmio_count; i++) { 566 if (mmioaddr[i] < start_range || mmioaddr[i] > end_range) 567 return false; 568 } 569 570 return true; 571 } 572 573 static u32 parse_dmc_fw_header(struct intel_dmc *dmc, 574 const struct intel_dmc_header_base *dmc_header, 575 size_t rem_size, u8 dmc_id) 576 { 577 struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc); 578 struct dmc_fw_info *dmc_info = &dmc->dmc_info[dmc_id]; 579 unsigned int header_len_bytes, dmc_header_size, payload_size, i; 580 const u32 *mmioaddr, *mmiodata; 581 u32 mmio_count, mmio_count_max, start_mmioaddr; 582 u8 *payload; 583 584 BUILD_BUG_ON(ARRAY_SIZE(dmc_info->mmioaddr) < DMC_V3_MAX_MMIO_COUNT || 585 ARRAY_SIZE(dmc_info->mmioaddr) < DMC_V1_MAX_MMIO_COUNT); 586 587 /* 588 * Check if we can access common fields, we will checkc again below 589 * after we have read the version 590 */ 591 if (rem_size < sizeof(struct intel_dmc_header_base)) 592 goto error_truncated; 593 594 /* Cope with small differences between v1 and v3 */ 595 if (dmc_header->header_ver == 3) { 596 const struct intel_dmc_header_v3 *v3 = 597 (const struct intel_dmc_header_v3 *)dmc_header; 598 599 if (rem_size < sizeof(struct intel_dmc_header_v3)) 600 goto error_truncated; 601 602 mmioaddr = v3->mmioaddr; 603 mmiodata = v3->mmiodata; 604 mmio_count = v3->mmio_count; 605 mmio_count_max = DMC_V3_MAX_MMIO_COUNT; 606 /* header_len is in dwords */ 607 header_len_bytes = dmc_header->header_len * 4; 608 start_mmioaddr = v3->start_mmioaddr; 609 dmc_header_size = sizeof(*v3); 610 } else if (dmc_header->header_ver == 1) { 611 const struct intel_dmc_header_v1 *v1 = 612 (const struct intel_dmc_header_v1 *)dmc_header; 613 614 if (rem_size < sizeof(struct intel_dmc_header_v1)) 615 goto error_truncated; 616 617 mmioaddr = v1->mmioaddr; 618 mmiodata = v1->mmiodata; 619 mmio_count = v1->mmio_count; 620 mmio_count_max = DMC_V1_MAX_MMIO_COUNT; 621 header_len_bytes = dmc_header->header_len; 622 start_mmioaddr = DMC_V1_MMIO_START_RANGE; 623 dmc_header_size = sizeof(*v1); 624 } else { 625 drm_err(&i915->drm, "Unknown DMC fw header version: %u\n", 626 dmc_header->header_ver); 627 return 0; 628 } 629 630 if (header_len_bytes != dmc_header_size) { 631 drm_err(&i915->drm, "DMC firmware has wrong dmc header length " 632 "(%u bytes)\n", header_len_bytes); 633 return 0; 634 } 635 636 /* Cache the dmc header info. */ 637 if (mmio_count > mmio_count_max) { 638 drm_err(&i915->drm, "DMC firmware has wrong mmio count %u\n", mmio_count); 639 return 0; 640 } 641 642 if (!dmc_mmio_addr_sanity_check(dmc, mmioaddr, mmio_count, 643 dmc_header->header_ver, dmc_id)) { 644 drm_err(&i915->drm, "DMC firmware has Wrong MMIO Addresses\n"); 645 return 0; 646 } 647 648 for (i = 0; i < mmio_count; i++) { 649 dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]); 650 dmc_info->mmiodata[i] = mmiodata[i]; 651 } 652 dmc_info->mmio_count = mmio_count; 653 dmc_info->start_mmioaddr = start_mmioaddr; 654 655 rem_size -= header_len_bytes; 656 657 /* fw_size is in dwords, so multiplied by 4 to convert into bytes. */ 658 payload_size = dmc_header->fw_size * 4; 659 if (rem_size < payload_size) 660 goto error_truncated; 661 662 if (payload_size > dmc->max_fw_size) { 663 drm_err(&i915->drm, "DMC FW too big (%u bytes)\n", payload_size); 664 return 0; 665 } 666 dmc_info->dmc_fw_size = dmc_header->fw_size; 667 668 dmc_info->payload = kmalloc(payload_size, GFP_KERNEL); 669 if (!dmc_info->payload) 670 return 0; 671 672 payload = (u8 *)(dmc_header) + header_len_bytes; 673 memcpy(dmc_info->payload, payload, payload_size); 674 675 return header_len_bytes + payload_size; 676 677 error_truncated: 678 drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n"); 679 return 0; 680 } 681 682 static u32 683 parse_dmc_fw_package(struct intel_dmc *dmc, 684 const struct intel_package_header *package_header, 685 const struct stepping_info *si, 686 size_t rem_size) 687 { 688 struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc); 689 u32 package_size = sizeof(struct intel_package_header); 690 u32 num_entries, max_entries; 691 const struct intel_fw_info *fw_info; 692 693 if (rem_size < package_size) 694 goto error_truncated; 695 696 if (package_header->header_ver == 1) { 697 max_entries = PACKAGE_MAX_FW_INFO_ENTRIES; 698 } else if (package_header->header_ver == 2) { 699 max_entries = PACKAGE_V2_MAX_FW_INFO_ENTRIES; 700 } else { 701 drm_err(&i915->drm, "DMC firmware has unknown header version %u\n", 702 package_header->header_ver); 703 return 0; 704 } 705 706 /* 707 * We should always have space for max_entries, 708 * even if not all are used 709 */ 710 package_size += max_entries * sizeof(struct intel_fw_info); 711 if (rem_size < package_size) 712 goto error_truncated; 713 714 if (package_header->header_len * 4 != package_size) { 715 drm_err(&i915->drm, "DMC firmware has wrong package header length " 716 "(%u bytes)\n", package_size); 717 return 0; 718 } 719 720 num_entries = package_header->num_entries; 721 if (WARN_ON(package_header->num_entries > max_entries)) 722 num_entries = max_entries; 723 724 fw_info = (const struct intel_fw_info *) 725 ((u8 *)package_header + sizeof(*package_header)); 726 dmc_set_fw_offset(dmc, fw_info, num_entries, si, 727 package_header->header_ver); 728 729 /* dmc_offset is in dwords */ 730 return package_size; 731 732 error_truncated: 733 drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n"); 734 return 0; 735 } 736 737 /* Return number of bytes parsed or 0 on error */ 738 static u32 parse_dmc_fw_css(struct intel_dmc *dmc, 739 struct intel_css_header *css_header, 740 size_t rem_size) 741 { 742 struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc); 743 744 if (rem_size < sizeof(struct intel_css_header)) { 745 drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n"); 746 return 0; 747 } 748 749 if (sizeof(struct intel_css_header) != 750 (css_header->header_len * 4)) { 751 drm_err(&i915->drm, "DMC firmware has wrong CSS header length " 752 "(%u bytes)\n", 753 (css_header->header_len * 4)); 754 return 0; 755 } 756 757 dmc->version = css_header->version; 758 759 return sizeof(struct intel_css_header); 760 } 761 762 static void parse_dmc_fw(struct drm_i915_private *dev_priv, 763 const struct firmware *fw) 764 { 765 struct intel_css_header *css_header; 766 struct intel_package_header *package_header; 767 struct intel_dmc_header_base *dmc_header; 768 struct intel_dmc *dmc = &dev_priv->display.dmc; 769 struct stepping_info display_info = { '*', '*'}; 770 const struct stepping_info *si = intel_get_stepping_info(dev_priv, &display_info); 771 u32 readcount = 0; 772 u32 r, offset; 773 int id; 774 775 if (!fw) 776 return; 777 778 /* Extract CSS Header information */ 779 css_header = (struct intel_css_header *)fw->data; 780 r = parse_dmc_fw_css(dmc, css_header, fw->size); 781 if (!r) 782 return; 783 784 readcount += r; 785 786 /* Extract Package Header information */ 787 package_header = (struct intel_package_header *)&fw->data[readcount]; 788 r = parse_dmc_fw_package(dmc, package_header, si, fw->size - readcount); 789 if (!r) 790 return; 791 792 readcount += r; 793 794 for (id = 0; id < DMC_FW_MAX; id++) { 795 if (!dev_priv->display.dmc.dmc_info[id].present) 796 continue; 797 798 offset = readcount + dmc->dmc_info[id].dmc_offset * 4; 799 if (offset > fw->size) { 800 drm_err(&dev_priv->drm, "Reading beyond the fw_size\n"); 801 continue; 802 } 803 804 dmc_header = (struct intel_dmc_header_base *)&fw->data[offset]; 805 parse_dmc_fw_header(dmc, dmc_header, fw->size - offset, id); 806 } 807 } 808 809 static void intel_dmc_runtime_pm_get(struct drm_i915_private *dev_priv) 810 { 811 drm_WARN_ON(&dev_priv->drm, dev_priv->display.dmc.wakeref); 812 dev_priv->display.dmc.wakeref = 813 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 814 } 815 816 static void intel_dmc_runtime_pm_put(struct drm_i915_private *dev_priv) 817 { 818 intel_wakeref_t wakeref __maybe_unused = 819 fetch_and_zero(&dev_priv->display.dmc.wakeref); 820 821 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref); 822 } 823 824 static void dmc_load_work_fn(struct work_struct *work) 825 { 826 struct drm_i915_private *dev_priv; 827 struct intel_dmc *dmc; 828 const struct firmware *fw = NULL; 829 830 dev_priv = container_of(work, typeof(*dev_priv), display.dmc.work); 831 dmc = &dev_priv->display.dmc; 832 833 request_firmware(&fw, dev_priv->display.dmc.fw_path, dev_priv->drm.dev); 834 parse_dmc_fw(dev_priv, fw); 835 836 if (intel_dmc_has_payload(dev_priv)) { 837 intel_dmc_load_program(dev_priv); 838 intel_dmc_runtime_pm_put(dev_priv); 839 840 drm_info(&dev_priv->drm, 841 "Finished loading DMC firmware %s (v%u.%u)\n", 842 dev_priv->display.dmc.fw_path, DMC_VERSION_MAJOR(dmc->version), 843 DMC_VERSION_MINOR(dmc->version)); 844 } else { 845 drm_notice(&dev_priv->drm, 846 "Failed to load DMC firmware %s." 847 " Disabling runtime power management.\n", 848 dmc->fw_path); 849 drm_notice(&dev_priv->drm, "DMC firmware homepage: %s", 850 INTEL_UC_FIRMWARE_URL); 851 } 852 853 release_firmware(fw); 854 } 855 856 /** 857 * intel_dmc_ucode_init() - initialize the firmware loading. 858 * @dev_priv: i915 drm device. 859 * 860 * This function is called at the time of loading the display driver to read 861 * firmware from a .bin file and copied into a internal memory. 862 */ 863 void intel_dmc_ucode_init(struct drm_i915_private *dev_priv) 864 { 865 struct intel_dmc *dmc = &dev_priv->display.dmc; 866 867 INIT_WORK(&dev_priv->display.dmc.work, dmc_load_work_fn); 868 869 if (!HAS_DMC(dev_priv)) 870 return; 871 872 /* 873 * Obtain a runtime pm reference, until DMC is loaded, to avoid entering 874 * runtime-suspend. 875 * 876 * On error, we return with the rpm wakeref held to prevent runtime 877 * suspend as runtime suspend *requires* a working DMC for whatever 878 * reason. 879 */ 880 intel_dmc_runtime_pm_get(dev_priv); 881 882 if (IS_DG2(dev_priv)) { 883 dmc->fw_path = DG2_DMC_PATH; 884 dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE; 885 } else if (IS_ALDERLAKE_P(dev_priv)) { 886 dmc->fw_path = ADLP_DMC_PATH; 887 dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE; 888 } else if (IS_ALDERLAKE_S(dev_priv)) { 889 dmc->fw_path = ADLS_DMC_PATH; 890 dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; 891 } else if (IS_DG1(dev_priv)) { 892 dmc->fw_path = DG1_DMC_PATH; 893 dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; 894 } else if (IS_ROCKETLAKE(dev_priv)) { 895 dmc->fw_path = RKL_DMC_PATH; 896 dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; 897 } else if (IS_TIGERLAKE(dev_priv)) { 898 dmc->fw_path = TGL_DMC_PATH; 899 dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; 900 } else if (DISPLAY_VER(dev_priv) == 11) { 901 dmc->fw_path = ICL_DMC_PATH; 902 dmc->max_fw_size = ICL_DMC_MAX_FW_SIZE; 903 } else if (IS_GEMINILAKE(dev_priv)) { 904 dmc->fw_path = GLK_DMC_PATH; 905 dmc->max_fw_size = GLK_DMC_MAX_FW_SIZE; 906 } else if (IS_KABYLAKE(dev_priv) || 907 IS_COFFEELAKE(dev_priv) || 908 IS_COMETLAKE(dev_priv)) { 909 dmc->fw_path = KBL_DMC_PATH; 910 dmc->max_fw_size = KBL_DMC_MAX_FW_SIZE; 911 } else if (IS_SKYLAKE(dev_priv)) { 912 dmc->fw_path = SKL_DMC_PATH; 913 dmc->max_fw_size = SKL_DMC_MAX_FW_SIZE; 914 } else if (IS_BROXTON(dev_priv)) { 915 dmc->fw_path = BXT_DMC_PATH; 916 dmc->max_fw_size = BXT_DMC_MAX_FW_SIZE; 917 } 918 919 if (dev_priv->params.dmc_firmware_path) { 920 if (strlen(dev_priv->params.dmc_firmware_path) == 0) { 921 dmc->fw_path = NULL; 922 drm_info(&dev_priv->drm, 923 "Disabling DMC firmware and runtime PM\n"); 924 return; 925 } 926 927 dmc->fw_path = dev_priv->params.dmc_firmware_path; 928 } 929 930 if (!dmc->fw_path) { 931 drm_dbg_kms(&dev_priv->drm, 932 "No known DMC firmware for platform, disabling runtime PM\n"); 933 return; 934 } 935 936 drm_dbg_kms(&dev_priv->drm, "Loading %s\n", dmc->fw_path); 937 schedule_work(&dev_priv->display.dmc.work); 938 } 939 940 /** 941 * intel_dmc_ucode_suspend() - prepare DMC firmware before system suspend 942 * @dev_priv: i915 drm device 943 * 944 * Prepare the DMC firmware before entering system suspend. This includes 945 * flushing pending work items and releasing any resources acquired during 946 * init. 947 */ 948 void intel_dmc_ucode_suspend(struct drm_i915_private *dev_priv) 949 { 950 if (!HAS_DMC(dev_priv)) 951 return; 952 953 flush_work(&dev_priv->display.dmc.work); 954 955 /* Drop the reference held in case DMC isn't loaded. */ 956 if (!intel_dmc_has_payload(dev_priv)) 957 intel_dmc_runtime_pm_put(dev_priv); 958 } 959 960 /** 961 * intel_dmc_ucode_resume() - init DMC firmware during system resume 962 * @dev_priv: i915 drm device 963 * 964 * Reinitialize the DMC firmware during system resume, reacquiring any 965 * resources released in intel_dmc_ucode_suspend(). 966 */ 967 void intel_dmc_ucode_resume(struct drm_i915_private *dev_priv) 968 { 969 if (!HAS_DMC(dev_priv)) 970 return; 971 972 /* 973 * Reacquire the reference to keep RPM disabled in case DMC isn't 974 * loaded. 975 */ 976 if (!intel_dmc_has_payload(dev_priv)) 977 intel_dmc_runtime_pm_get(dev_priv); 978 } 979 980 /** 981 * intel_dmc_ucode_fini() - unload the DMC firmware. 982 * @dev_priv: i915 drm device. 983 * 984 * Firmmware unloading includes freeing the internal memory and reset the 985 * firmware loading status. 986 */ 987 void intel_dmc_ucode_fini(struct drm_i915_private *dev_priv) 988 { 989 int id; 990 991 if (!HAS_DMC(dev_priv)) 992 return; 993 994 intel_dmc_ucode_suspend(dev_priv); 995 drm_WARN_ON(&dev_priv->drm, dev_priv->display.dmc.wakeref); 996 997 for (id = 0; id < DMC_FW_MAX; id++) 998 kfree(dev_priv->display.dmc.dmc_info[id].payload); 999 } 1000 1001 void intel_dmc_print_error_state(struct drm_i915_error_state_buf *m, 1002 struct drm_i915_private *i915) 1003 { 1004 struct intel_dmc *dmc = &i915->display.dmc; 1005 1006 if (!HAS_DMC(i915)) 1007 return; 1008 1009 i915_error_printf(m, "DMC loaded: %s\n", 1010 str_yes_no(intel_dmc_has_payload(i915))); 1011 i915_error_printf(m, "DMC fw version: %d.%d\n", 1012 DMC_VERSION_MAJOR(dmc->version), 1013 DMC_VERSION_MINOR(dmc->version)); 1014 } 1015 1016 static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused) 1017 { 1018 struct drm_i915_private *i915 = m->private; 1019 intel_wakeref_t wakeref; 1020 struct intel_dmc *dmc; 1021 i915_reg_t dc5_reg, dc6_reg = INVALID_MMIO_REG; 1022 1023 if (!HAS_DMC(i915)) 1024 return -ENODEV; 1025 1026 dmc = &i915->display.dmc; 1027 1028 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 1029 1030 seq_printf(m, "fw loaded: %s\n", 1031 str_yes_no(intel_dmc_has_payload(i915))); 1032 seq_printf(m, "path: %s\n", dmc->fw_path); 1033 seq_printf(m, "Pipe A fw needed: %s\n", 1034 str_yes_no(GRAPHICS_VER(i915) >= 12)); 1035 seq_printf(m, "Pipe A fw loaded: %s\n", 1036 str_yes_no(dmc->dmc_info[DMC_FW_PIPEA].payload)); 1037 seq_printf(m, "Pipe B fw needed: %s\n", 1038 str_yes_no(IS_ALDERLAKE_P(i915) || 1039 DISPLAY_VER(i915) >= 14)); 1040 seq_printf(m, "Pipe B fw loaded: %s\n", 1041 str_yes_no(dmc->dmc_info[DMC_FW_PIPEB].payload)); 1042 1043 if (!intel_dmc_has_payload(i915)) 1044 goto out; 1045 1046 seq_printf(m, "version: %d.%d\n", DMC_VERSION_MAJOR(dmc->version), 1047 DMC_VERSION_MINOR(dmc->version)); 1048 1049 if (DISPLAY_VER(i915) >= 12) { 1050 i915_reg_t dc3co_reg; 1051 1052 if (IS_DGFX(i915) || DISPLAY_VER(i915) >= 14) { 1053 dc3co_reg = DG1_DMC_DEBUG3; 1054 dc5_reg = DG1_DMC_DEBUG_DC5_COUNT; 1055 } else { 1056 dc3co_reg = TGL_DMC_DEBUG3; 1057 dc5_reg = TGL_DMC_DEBUG_DC5_COUNT; 1058 dc6_reg = TGL_DMC_DEBUG_DC6_COUNT; 1059 } 1060 1061 seq_printf(m, "DC3CO count: %d\n", 1062 intel_de_read(i915, dc3co_reg)); 1063 } else { 1064 dc5_reg = IS_BROXTON(i915) ? BXT_DMC_DC3_DC5_COUNT : 1065 SKL_DMC_DC3_DC5_COUNT; 1066 if (!IS_GEMINILAKE(i915) && !IS_BROXTON(i915)) 1067 dc6_reg = SKL_DMC_DC5_DC6_COUNT; 1068 } 1069 1070 seq_printf(m, "DC3 -> DC5 count: %d\n", intel_de_read(i915, dc5_reg)); 1071 if (i915_mmio_reg_valid(dc6_reg)) 1072 seq_printf(m, "DC5 -> DC6 count: %d\n", 1073 intel_de_read(i915, dc6_reg)); 1074 1075 out: 1076 seq_printf(m, "program base: 0x%08x\n", 1077 intel_de_read(i915, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0))); 1078 seq_printf(m, "ssp base: 0x%08x\n", 1079 intel_de_read(i915, DMC_SSP_BASE)); 1080 seq_printf(m, "htp: 0x%08x\n", intel_de_read(i915, DMC_HTP_SKL)); 1081 1082 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 1083 1084 return 0; 1085 } 1086 1087 DEFINE_SHOW_ATTRIBUTE(intel_dmc_debugfs_status); 1088 1089 void intel_dmc_debugfs_register(struct drm_i915_private *i915) 1090 { 1091 struct drm_minor *minor = i915->drm.primary; 1092 1093 debugfs_create_file("i915_dmc_info", 0444, minor->debugfs_root, 1094 i915, &intel_dmc_debugfs_status_fops); 1095 } 1096