1 /* 2 * Copyright © 2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include <linux/firmware.h> 26 27 #include "i915_drv.h" 28 #include "i915_reg.h" 29 #include "intel_de.h" 30 #include "intel_dmc.h" 31 #include "intel_dmc_regs.h" 32 33 /** 34 * DOC: DMC Firmware Support 35 * 36 * From gen9 onwards we have newly added DMC (Display microcontroller) in display 37 * engine to save and restore the state of display engine when it enter into 38 * low-power state and comes back to normal. 39 */ 40 41 enum intel_dmc_id { 42 DMC_FW_MAIN = 0, 43 DMC_FW_PIPEA, 44 DMC_FW_PIPEB, 45 DMC_FW_PIPEC, 46 DMC_FW_PIPED, 47 DMC_FW_MAX 48 }; 49 50 struct intel_dmc { 51 struct drm_i915_private *i915; 52 struct work_struct work; 53 const char *fw_path; 54 u32 max_fw_size; /* bytes */ 55 u32 version; 56 struct dmc_fw_info { 57 u32 mmio_count; 58 i915_reg_t mmioaddr[20]; 59 u32 mmiodata[20]; 60 u32 dmc_offset; 61 u32 start_mmioaddr; 62 u32 dmc_fw_size; /*dwords */ 63 u32 *payload; 64 bool present; 65 } dmc_info[DMC_FW_MAX]; 66 }; 67 68 /* Note: This may be NULL. */ 69 static struct intel_dmc *i915_to_dmc(struct drm_i915_private *i915) 70 { 71 return i915->display.dmc.dmc; 72 } 73 74 #define DMC_VERSION(major, minor) ((major) << 16 | (minor)) 75 #define DMC_VERSION_MAJOR(version) ((version) >> 16) 76 #define DMC_VERSION_MINOR(version) ((version) & 0xffff) 77 78 #define DMC_PATH(platform) \ 79 "i915/" __stringify(platform) "_dmc.bin" 80 81 /* 82 * New DMC additions should not use this. This is used solely to remain 83 * compatible with systems that have not yet updated DMC blobs to use 84 * unversioned file names. 85 */ 86 #define DMC_LEGACY_PATH(platform, major, minor) \ 87 "i915/" \ 88 __stringify(platform) "_dmc_ver" \ 89 __stringify(major) "_" \ 90 __stringify(minor) ".bin" 91 92 #define XELPDP_DMC_MAX_FW_SIZE 0x7000 93 #define DISPLAY_VER13_DMC_MAX_FW_SIZE 0x20000 94 #define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE 95 96 #define MTL_DMC_PATH DMC_PATH(mtl) 97 MODULE_FIRMWARE(MTL_DMC_PATH); 98 99 #define DG2_DMC_PATH DMC_LEGACY_PATH(dg2, 2, 08) 100 MODULE_FIRMWARE(DG2_DMC_PATH); 101 102 #define ADLP_DMC_PATH DMC_PATH(adlp) 103 #define ADLP_DMC_FALLBACK_PATH DMC_LEGACY_PATH(adlp, 2, 16) 104 MODULE_FIRMWARE(ADLP_DMC_PATH); 105 MODULE_FIRMWARE(ADLP_DMC_FALLBACK_PATH); 106 107 #define ADLS_DMC_PATH DMC_LEGACY_PATH(adls, 2, 01) 108 MODULE_FIRMWARE(ADLS_DMC_PATH); 109 110 #define DG1_DMC_PATH DMC_LEGACY_PATH(dg1, 2, 02) 111 MODULE_FIRMWARE(DG1_DMC_PATH); 112 113 #define RKL_DMC_PATH DMC_LEGACY_PATH(rkl, 2, 03) 114 MODULE_FIRMWARE(RKL_DMC_PATH); 115 116 #define TGL_DMC_PATH DMC_LEGACY_PATH(tgl, 2, 12) 117 MODULE_FIRMWARE(TGL_DMC_PATH); 118 119 #define ICL_DMC_PATH DMC_LEGACY_PATH(icl, 1, 09) 120 #define ICL_DMC_MAX_FW_SIZE 0x6000 121 MODULE_FIRMWARE(ICL_DMC_PATH); 122 123 #define GLK_DMC_PATH DMC_LEGACY_PATH(glk, 1, 04) 124 #define GLK_DMC_MAX_FW_SIZE 0x4000 125 MODULE_FIRMWARE(GLK_DMC_PATH); 126 127 #define KBL_DMC_PATH DMC_LEGACY_PATH(kbl, 1, 04) 128 #define KBL_DMC_MAX_FW_SIZE BXT_DMC_MAX_FW_SIZE 129 MODULE_FIRMWARE(KBL_DMC_PATH); 130 131 #define SKL_DMC_PATH DMC_LEGACY_PATH(skl, 1, 27) 132 #define SKL_DMC_MAX_FW_SIZE BXT_DMC_MAX_FW_SIZE 133 MODULE_FIRMWARE(SKL_DMC_PATH); 134 135 #define BXT_DMC_PATH DMC_LEGACY_PATH(bxt, 1, 07) 136 #define BXT_DMC_MAX_FW_SIZE 0x3000 137 MODULE_FIRMWARE(BXT_DMC_PATH); 138 139 #define DMC_DEFAULT_FW_OFFSET 0xFFFFFFFF 140 #define PACKAGE_MAX_FW_INFO_ENTRIES 20 141 #define PACKAGE_V2_MAX_FW_INFO_ENTRIES 32 142 #define DMC_V1_MAX_MMIO_COUNT 8 143 #define DMC_V3_MAX_MMIO_COUNT 20 144 #define DMC_V1_MMIO_START_RANGE 0x80000 145 146 #define PIPE_TO_DMC_ID(pipe) (DMC_FW_PIPEA + ((pipe) - PIPE_A)) 147 148 struct intel_css_header { 149 /* 0x09 for DMC */ 150 u32 module_type; 151 152 /* Includes the DMC specific header in dwords */ 153 u32 header_len; 154 155 /* always value would be 0x10000 */ 156 u32 header_ver; 157 158 /* Not used */ 159 u32 module_id; 160 161 /* Not used */ 162 u32 module_vendor; 163 164 /* in YYYYMMDD format */ 165 u32 date; 166 167 /* Size in dwords (CSS_Headerlen + PackageHeaderLen + dmc FWsLen)/4 */ 168 u32 size; 169 170 /* Not used */ 171 u32 key_size; 172 173 /* Not used */ 174 u32 modulus_size; 175 176 /* Not used */ 177 u32 exponent_size; 178 179 /* Not used */ 180 u32 reserved1[12]; 181 182 /* Major Minor */ 183 u32 version; 184 185 /* Not used */ 186 u32 reserved2[8]; 187 188 /* Not used */ 189 u32 kernel_header_info; 190 } __packed; 191 192 struct intel_fw_info { 193 u8 reserved1; 194 195 /* reserved on package_header version 1, must be 0 on version 2 */ 196 u8 dmc_id; 197 198 /* Stepping (A, B, C, ..., *). * is a wildcard */ 199 char stepping; 200 201 /* Sub-stepping (0, 1, ..., *). * is a wildcard */ 202 char substepping; 203 204 u32 offset; 205 u32 reserved2; 206 } __packed; 207 208 struct intel_package_header { 209 /* DMC container header length in dwords */ 210 u8 header_len; 211 212 /* 0x01, 0x02 */ 213 u8 header_ver; 214 215 u8 reserved[10]; 216 217 /* Number of valid entries in the FWInfo array below */ 218 u32 num_entries; 219 } __packed; 220 221 struct intel_dmc_header_base { 222 /* always value would be 0x40403E3E */ 223 u32 signature; 224 225 /* DMC binary header length */ 226 u8 header_len; 227 228 /* 0x01 */ 229 u8 header_ver; 230 231 /* Reserved */ 232 u16 dmcc_ver; 233 234 /* Major, Minor */ 235 u32 project; 236 237 /* Firmware program size (excluding header) in dwords */ 238 u32 fw_size; 239 240 /* Major Minor version */ 241 u32 fw_version; 242 } __packed; 243 244 struct intel_dmc_header_v1 { 245 struct intel_dmc_header_base base; 246 247 /* Number of valid MMIO cycles present. */ 248 u32 mmio_count; 249 250 /* MMIO address */ 251 u32 mmioaddr[DMC_V1_MAX_MMIO_COUNT]; 252 253 /* MMIO data */ 254 u32 mmiodata[DMC_V1_MAX_MMIO_COUNT]; 255 256 /* FW filename */ 257 char dfile[32]; 258 259 u32 reserved1[2]; 260 } __packed; 261 262 struct intel_dmc_header_v3 { 263 struct intel_dmc_header_base base; 264 265 /* DMC RAM start MMIO address */ 266 u32 start_mmioaddr; 267 268 u32 reserved[9]; 269 270 /* FW filename */ 271 char dfile[32]; 272 273 /* Number of valid MMIO cycles present. */ 274 u32 mmio_count; 275 276 /* MMIO address */ 277 u32 mmioaddr[DMC_V3_MAX_MMIO_COUNT]; 278 279 /* MMIO data */ 280 u32 mmiodata[DMC_V3_MAX_MMIO_COUNT]; 281 } __packed; 282 283 struct stepping_info { 284 char stepping; 285 char substepping; 286 }; 287 288 #define for_each_dmc_id(__dmc_id) \ 289 for ((__dmc_id) = DMC_FW_MAIN; (__dmc_id) < DMC_FW_MAX; (__dmc_id)++) 290 291 static bool is_valid_dmc_id(enum intel_dmc_id dmc_id) 292 { 293 return dmc_id >= DMC_FW_MAIN && dmc_id < DMC_FW_MAX; 294 } 295 296 static bool has_dmc_id_fw(struct drm_i915_private *i915, enum intel_dmc_id dmc_id) 297 { 298 struct intel_dmc *dmc = i915_to_dmc(i915); 299 300 return dmc && dmc->dmc_info[dmc_id].payload; 301 } 302 303 bool intel_dmc_has_payload(struct drm_i915_private *i915) 304 { 305 return has_dmc_id_fw(i915, DMC_FW_MAIN); 306 } 307 308 static const struct stepping_info * 309 intel_get_stepping_info(struct drm_i915_private *i915, 310 struct stepping_info *si) 311 { 312 const char *step_name = intel_step_name(RUNTIME_INFO(i915)->step.display_step); 313 314 si->stepping = step_name[0]; 315 si->substepping = step_name[1]; 316 return si; 317 } 318 319 static void gen9_set_dc_state_debugmask(struct drm_i915_private *i915) 320 { 321 /* The below bit doesn't need to be cleared ever afterwards */ 322 intel_de_rmw(i915, DC_STATE_DEBUG, 0, 323 DC_STATE_DEBUG_MASK_CORES | DC_STATE_DEBUG_MASK_MEMORY_UP); 324 intel_de_posting_read(i915, DC_STATE_DEBUG); 325 } 326 327 static void disable_event_handler(struct drm_i915_private *i915, 328 i915_reg_t ctl_reg, i915_reg_t htp_reg) 329 { 330 intel_de_write(i915, ctl_reg, 331 REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK, 332 DMC_EVT_CTL_TYPE_EDGE_0_1) | 333 REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK, 334 DMC_EVT_CTL_EVENT_ID_FALSE)); 335 intel_de_write(i915, htp_reg, 0); 336 } 337 338 static void 339 disable_flip_queue_event(struct drm_i915_private *i915, 340 i915_reg_t ctl_reg, i915_reg_t htp_reg) 341 { 342 u32 event_ctl; 343 u32 event_htp; 344 345 event_ctl = intel_de_read(i915, ctl_reg); 346 event_htp = intel_de_read(i915, htp_reg); 347 if (event_ctl != (DMC_EVT_CTL_ENABLE | 348 DMC_EVT_CTL_RECURRING | 349 REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK, 350 DMC_EVT_CTL_TYPE_EDGE_0_1) | 351 REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK, 352 DMC_EVT_CTL_EVENT_ID_CLK_MSEC)) || 353 !event_htp) { 354 drm_dbg_kms(&i915->drm, 355 "Unexpected DMC event configuration (control %08x htp %08x)\n", 356 event_ctl, event_htp); 357 return; 358 } 359 360 disable_event_handler(i915, ctl_reg, htp_reg); 361 } 362 363 static bool 364 get_flip_queue_event_regs(struct drm_i915_private *i915, enum intel_dmc_id dmc_id, 365 i915_reg_t *ctl_reg, i915_reg_t *htp_reg) 366 { 367 if (dmc_id == DMC_FW_MAIN) { 368 if (DISPLAY_VER(i915) == 12) { 369 *ctl_reg = DMC_EVT_CTL(i915, dmc_id, 3); 370 *htp_reg = DMC_EVT_HTP(i915, dmc_id, 3); 371 372 return true; 373 } 374 } else if (dmc_id >= DMC_FW_PIPEA && dmc_id <= DMC_FW_PIPED) { 375 if (IS_DG2(i915)) { 376 *ctl_reg = DMC_EVT_CTL(i915, dmc_id, 2); 377 *htp_reg = DMC_EVT_HTP(i915, dmc_id, 2); 378 379 return true; 380 } 381 } 382 383 return false; 384 } 385 386 static void 387 disable_all_flip_queue_events(struct drm_i915_private *i915) 388 { 389 enum intel_dmc_id dmc_id; 390 391 /* TODO: check if the following applies to all D13+ platforms. */ 392 if (!IS_TIGERLAKE(i915)) 393 return; 394 395 for_each_dmc_id(dmc_id) { 396 i915_reg_t ctl_reg; 397 i915_reg_t htp_reg; 398 399 if (!has_dmc_id_fw(i915, dmc_id)) 400 continue; 401 402 if (!get_flip_queue_event_regs(i915, dmc_id, &ctl_reg, &htp_reg)) 403 continue; 404 405 disable_flip_queue_event(i915, ctl_reg, htp_reg); 406 } 407 } 408 409 static void disable_all_event_handlers(struct drm_i915_private *i915) 410 { 411 enum intel_dmc_id dmc_id; 412 413 /* TODO: disable the event handlers on pre-GEN12 platforms as well */ 414 if (DISPLAY_VER(i915) < 12) 415 return; 416 417 for_each_dmc_id(dmc_id) { 418 int handler; 419 420 if (!has_dmc_id_fw(i915, dmc_id)) 421 continue; 422 423 for (handler = 0; handler < DMC_EVENT_HANDLER_COUNT_GEN12; handler++) 424 disable_event_handler(i915, 425 DMC_EVT_CTL(i915, dmc_id, handler), 426 DMC_EVT_HTP(i915, dmc_id, handler)); 427 } 428 } 429 430 static void adlp_pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable) 431 { 432 enum pipe pipe; 433 434 /* 435 * Wa_16015201720:adl-p,dg2 436 * The WA requires clock gating to be disabled all the time 437 * for pipe A and B. 438 * For pipe C and D clock gating needs to be disabled only 439 * during initializing the firmware. 440 */ 441 if (enable) 442 for (pipe = PIPE_A; pipe <= PIPE_D; pipe++) 443 intel_de_rmw(i915, CLKGATE_DIS_PSL_EXT(pipe), 444 0, PIPEDMC_GATING_DIS); 445 else 446 for (pipe = PIPE_C; pipe <= PIPE_D; pipe++) 447 intel_de_rmw(i915, CLKGATE_DIS_PSL_EXT(pipe), 448 PIPEDMC_GATING_DIS, 0); 449 } 450 451 static void mtl_pipedmc_clock_gating_wa(struct drm_i915_private *i915) 452 { 453 /* 454 * Wa_16015201720 455 * The WA requires clock gating to be disabled all the time 456 * for pipe A and B. 457 */ 458 intel_de_rmw(i915, GEN9_CLKGATE_DIS_0, 0, 459 MTL_PIPEDMC_GATING_DIS_A | MTL_PIPEDMC_GATING_DIS_B); 460 } 461 462 static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable) 463 { 464 if (DISPLAY_VER(i915) >= 14 && enable) 465 mtl_pipedmc_clock_gating_wa(i915); 466 else if (DISPLAY_VER(i915) == 13) 467 adlp_pipedmc_clock_gating_wa(i915, enable); 468 } 469 470 void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe) 471 { 472 enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe); 473 474 if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(i915, dmc_id)) 475 return; 476 477 if (DISPLAY_VER(i915) >= 14) 478 intel_de_rmw(i915, MTL_PIPEDMC_CONTROL, 0, PIPEDMC_ENABLE_MTL(pipe)); 479 else 480 intel_de_rmw(i915, PIPEDMC_CONTROL(pipe), 0, PIPEDMC_ENABLE); 481 } 482 483 void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe) 484 { 485 enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe); 486 487 if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(i915, dmc_id)) 488 return; 489 490 if (DISPLAY_VER(i915) >= 14) 491 intel_de_rmw(i915, MTL_PIPEDMC_CONTROL, PIPEDMC_ENABLE_MTL(pipe), 0); 492 else 493 intel_de_rmw(i915, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0); 494 } 495 496 static bool is_dmc_evt_ctl_reg(struct drm_i915_private *i915, 497 enum intel_dmc_id dmc_id, i915_reg_t reg) 498 { 499 u32 offset = i915_mmio_reg_offset(reg); 500 u32 start = i915_mmio_reg_offset(DMC_EVT_CTL(i915, dmc_id, 0)); 501 u32 end = i915_mmio_reg_offset(DMC_EVT_CTL(i915, dmc_id, DMC_EVENT_HANDLER_COUNT_GEN12)); 502 503 return offset >= start && offset < end; 504 } 505 506 static bool disable_dmc_evt(struct drm_i915_private *i915, 507 enum intel_dmc_id dmc_id, 508 i915_reg_t reg, u32 data) 509 { 510 if (!is_dmc_evt_ctl_reg(i915, dmc_id, reg)) 511 return false; 512 513 /* keep all pipe DMC events disabled by default */ 514 if (dmc_id != DMC_FW_MAIN) 515 return true; 516 517 return false; 518 } 519 520 static u32 dmc_mmiodata(struct drm_i915_private *i915, 521 struct intel_dmc *dmc, 522 enum intel_dmc_id dmc_id, int i) 523 { 524 if (disable_dmc_evt(i915, dmc_id, 525 dmc->dmc_info[dmc_id].mmioaddr[i], 526 dmc->dmc_info[dmc_id].mmiodata[i])) 527 return REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK, 528 DMC_EVT_CTL_TYPE_EDGE_0_1) | 529 REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK, 530 DMC_EVT_CTL_EVENT_ID_FALSE); 531 else 532 return dmc->dmc_info[dmc_id].mmiodata[i]; 533 } 534 535 /** 536 * intel_dmc_load_program() - write the firmware from memory to register. 537 * @i915: i915 drm device. 538 * 539 * DMC firmware is read from a .bin file and kept in internal memory one time. 540 * Everytime display comes back from low power state this function is called to 541 * copy the firmware from internal memory to registers. 542 */ 543 void intel_dmc_load_program(struct drm_i915_private *i915) 544 { 545 struct i915_power_domains *power_domains = &i915->display.power.domains; 546 struct intel_dmc *dmc = i915_to_dmc(i915); 547 enum intel_dmc_id dmc_id; 548 u32 i; 549 550 if (!intel_dmc_has_payload(i915)) 551 return; 552 553 pipedmc_clock_gating_wa(i915, true); 554 555 disable_all_event_handlers(i915); 556 557 assert_rpm_wakelock_held(&i915->runtime_pm); 558 559 preempt_disable(); 560 561 for_each_dmc_id(dmc_id) { 562 for (i = 0; i < dmc->dmc_info[dmc_id].dmc_fw_size; i++) { 563 intel_de_write_fw(i915, 564 DMC_PROGRAM(dmc->dmc_info[dmc_id].start_mmioaddr, i), 565 dmc->dmc_info[dmc_id].payload[i]); 566 } 567 } 568 569 preempt_enable(); 570 571 for_each_dmc_id(dmc_id) { 572 for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) { 573 intel_de_write(i915, dmc->dmc_info[dmc_id].mmioaddr[i], 574 dmc_mmiodata(i915, dmc, dmc_id, i)); 575 } 576 } 577 578 power_domains->dc_state = 0; 579 580 gen9_set_dc_state_debugmask(i915); 581 582 /* 583 * Flip queue events need to be disabled before enabling DC5/6. 584 * i915 doesn't use the flip queue feature, so disable it already 585 * here. 586 */ 587 disable_all_flip_queue_events(i915); 588 589 pipedmc_clock_gating_wa(i915, false); 590 } 591 592 /** 593 * intel_dmc_disable_program() - disable the firmware 594 * @i915: i915 drm device 595 * 596 * Disable all event handlers in the firmware, making sure the firmware is 597 * inactive after the display is uninitialized. 598 */ 599 void intel_dmc_disable_program(struct drm_i915_private *i915) 600 { 601 if (!intel_dmc_has_payload(i915)) 602 return; 603 604 pipedmc_clock_gating_wa(i915, true); 605 disable_all_event_handlers(i915); 606 pipedmc_clock_gating_wa(i915, false); 607 } 608 609 void assert_dmc_loaded(struct drm_i915_private *i915) 610 { 611 struct intel_dmc *dmc = i915_to_dmc(i915); 612 613 drm_WARN_ONCE(&i915->drm, !dmc, "DMC not initialized\n"); 614 drm_WARN_ONCE(&i915->drm, dmc && 615 !intel_de_read(i915, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)), 616 "DMC program storage start is NULL\n"); 617 drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_SSP_BASE), 618 "DMC SSP Base Not fine\n"); 619 drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_HTP_SKL), 620 "DMC HTP Not fine\n"); 621 } 622 623 static bool fw_info_matches_stepping(const struct intel_fw_info *fw_info, 624 const struct stepping_info *si) 625 { 626 if ((fw_info->substepping == '*' && si->stepping == fw_info->stepping) || 627 (si->stepping == fw_info->stepping && si->substepping == fw_info->substepping) || 628 /* 629 * If we don't find a more specific one from above two checks, we 630 * then check for the generic one to be sure to work even with 631 * "broken firmware" 632 */ 633 (si->stepping == '*' && si->substepping == fw_info->substepping) || 634 (fw_info->stepping == '*' && fw_info->substepping == '*')) 635 return true; 636 637 return false; 638 } 639 640 /* 641 * Search fw_info table for dmc_offset to find firmware binary: num_entries is 642 * already sanitized. 643 */ 644 static void dmc_set_fw_offset(struct intel_dmc *dmc, 645 const struct intel_fw_info *fw_info, 646 unsigned int num_entries, 647 const struct stepping_info *si, 648 u8 package_ver) 649 { 650 struct drm_i915_private *i915 = dmc->i915; 651 enum intel_dmc_id dmc_id; 652 unsigned int i; 653 654 for (i = 0; i < num_entries; i++) { 655 dmc_id = package_ver <= 1 ? DMC_FW_MAIN : fw_info[i].dmc_id; 656 657 if (!is_valid_dmc_id(dmc_id)) { 658 drm_dbg(&i915->drm, "Unsupported firmware id: %u\n", dmc_id); 659 continue; 660 } 661 662 /* More specific versions come first, so we don't even have to 663 * check for the stepping since we already found a previous FW 664 * for this id. 665 */ 666 if (dmc->dmc_info[dmc_id].present) 667 continue; 668 669 if (fw_info_matches_stepping(&fw_info[i], si)) { 670 dmc->dmc_info[dmc_id].present = true; 671 dmc->dmc_info[dmc_id].dmc_offset = fw_info[i].offset; 672 } 673 } 674 } 675 676 static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc, 677 const u32 *mmioaddr, u32 mmio_count, 678 int header_ver, enum intel_dmc_id dmc_id) 679 { 680 struct drm_i915_private *i915 = dmc->i915; 681 u32 start_range, end_range; 682 int i; 683 684 if (header_ver == 1) { 685 start_range = DMC_MMIO_START_RANGE; 686 end_range = DMC_MMIO_END_RANGE; 687 } else if (dmc_id == DMC_FW_MAIN) { 688 start_range = TGL_MAIN_MMIO_START; 689 end_range = TGL_MAIN_MMIO_END; 690 } else if (DISPLAY_VER(i915) >= 13) { 691 start_range = ADLP_PIPE_MMIO_START; 692 end_range = ADLP_PIPE_MMIO_END; 693 } else if (DISPLAY_VER(i915) >= 12) { 694 start_range = TGL_PIPE_MMIO_START(dmc_id); 695 end_range = TGL_PIPE_MMIO_END(dmc_id); 696 } else { 697 drm_warn(&i915->drm, "Unknown mmio range for sanity check"); 698 return false; 699 } 700 701 for (i = 0; i < mmio_count; i++) { 702 if (mmioaddr[i] < start_range || mmioaddr[i] > end_range) 703 return false; 704 } 705 706 return true; 707 } 708 709 static u32 parse_dmc_fw_header(struct intel_dmc *dmc, 710 const struct intel_dmc_header_base *dmc_header, 711 size_t rem_size, enum intel_dmc_id dmc_id) 712 { 713 struct drm_i915_private *i915 = dmc->i915; 714 struct dmc_fw_info *dmc_info = &dmc->dmc_info[dmc_id]; 715 unsigned int header_len_bytes, dmc_header_size, payload_size, i; 716 const u32 *mmioaddr, *mmiodata; 717 u32 mmio_count, mmio_count_max, start_mmioaddr; 718 u8 *payload; 719 720 BUILD_BUG_ON(ARRAY_SIZE(dmc_info->mmioaddr) < DMC_V3_MAX_MMIO_COUNT || 721 ARRAY_SIZE(dmc_info->mmioaddr) < DMC_V1_MAX_MMIO_COUNT); 722 723 /* 724 * Check if we can access common fields, we will checkc again below 725 * after we have read the version 726 */ 727 if (rem_size < sizeof(struct intel_dmc_header_base)) 728 goto error_truncated; 729 730 /* Cope with small differences between v1 and v3 */ 731 if (dmc_header->header_ver == 3) { 732 const struct intel_dmc_header_v3 *v3 = 733 (const struct intel_dmc_header_v3 *)dmc_header; 734 735 if (rem_size < sizeof(struct intel_dmc_header_v3)) 736 goto error_truncated; 737 738 mmioaddr = v3->mmioaddr; 739 mmiodata = v3->mmiodata; 740 mmio_count = v3->mmio_count; 741 mmio_count_max = DMC_V3_MAX_MMIO_COUNT; 742 /* header_len is in dwords */ 743 header_len_bytes = dmc_header->header_len * 4; 744 start_mmioaddr = v3->start_mmioaddr; 745 dmc_header_size = sizeof(*v3); 746 } else if (dmc_header->header_ver == 1) { 747 const struct intel_dmc_header_v1 *v1 = 748 (const struct intel_dmc_header_v1 *)dmc_header; 749 750 if (rem_size < sizeof(struct intel_dmc_header_v1)) 751 goto error_truncated; 752 753 mmioaddr = v1->mmioaddr; 754 mmiodata = v1->mmiodata; 755 mmio_count = v1->mmio_count; 756 mmio_count_max = DMC_V1_MAX_MMIO_COUNT; 757 header_len_bytes = dmc_header->header_len; 758 start_mmioaddr = DMC_V1_MMIO_START_RANGE; 759 dmc_header_size = sizeof(*v1); 760 } else { 761 drm_err(&i915->drm, "Unknown DMC fw header version: %u\n", 762 dmc_header->header_ver); 763 return 0; 764 } 765 766 if (header_len_bytes != dmc_header_size) { 767 drm_err(&i915->drm, "DMC firmware has wrong dmc header length " 768 "(%u bytes)\n", header_len_bytes); 769 return 0; 770 } 771 772 /* Cache the dmc header info. */ 773 if (mmio_count > mmio_count_max) { 774 drm_err(&i915->drm, "DMC firmware has wrong mmio count %u\n", mmio_count); 775 return 0; 776 } 777 778 if (!dmc_mmio_addr_sanity_check(dmc, mmioaddr, mmio_count, 779 dmc_header->header_ver, dmc_id)) { 780 drm_err(&i915->drm, "DMC firmware has Wrong MMIO Addresses\n"); 781 return 0; 782 } 783 784 for (i = 0; i < mmio_count; i++) { 785 dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]); 786 dmc_info->mmiodata[i] = mmiodata[i]; 787 } 788 dmc_info->mmio_count = mmio_count; 789 dmc_info->start_mmioaddr = start_mmioaddr; 790 791 rem_size -= header_len_bytes; 792 793 /* fw_size is in dwords, so multiplied by 4 to convert into bytes. */ 794 payload_size = dmc_header->fw_size * 4; 795 if (rem_size < payload_size) 796 goto error_truncated; 797 798 if (payload_size > dmc->max_fw_size) { 799 drm_err(&i915->drm, "DMC FW too big (%u bytes)\n", payload_size); 800 return 0; 801 } 802 dmc_info->dmc_fw_size = dmc_header->fw_size; 803 804 dmc_info->payload = kmalloc(payload_size, GFP_KERNEL); 805 if (!dmc_info->payload) 806 return 0; 807 808 payload = (u8 *)(dmc_header) + header_len_bytes; 809 memcpy(dmc_info->payload, payload, payload_size); 810 811 return header_len_bytes + payload_size; 812 813 error_truncated: 814 drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n"); 815 return 0; 816 } 817 818 static u32 819 parse_dmc_fw_package(struct intel_dmc *dmc, 820 const struct intel_package_header *package_header, 821 const struct stepping_info *si, 822 size_t rem_size) 823 { 824 struct drm_i915_private *i915 = dmc->i915; 825 u32 package_size = sizeof(struct intel_package_header); 826 u32 num_entries, max_entries; 827 const struct intel_fw_info *fw_info; 828 829 if (rem_size < package_size) 830 goto error_truncated; 831 832 if (package_header->header_ver == 1) { 833 max_entries = PACKAGE_MAX_FW_INFO_ENTRIES; 834 } else if (package_header->header_ver == 2) { 835 max_entries = PACKAGE_V2_MAX_FW_INFO_ENTRIES; 836 } else { 837 drm_err(&i915->drm, "DMC firmware has unknown header version %u\n", 838 package_header->header_ver); 839 return 0; 840 } 841 842 /* 843 * We should always have space for max_entries, 844 * even if not all are used 845 */ 846 package_size += max_entries * sizeof(struct intel_fw_info); 847 if (rem_size < package_size) 848 goto error_truncated; 849 850 if (package_header->header_len * 4 != package_size) { 851 drm_err(&i915->drm, "DMC firmware has wrong package header length " 852 "(%u bytes)\n", package_size); 853 return 0; 854 } 855 856 num_entries = package_header->num_entries; 857 if (WARN_ON(package_header->num_entries > max_entries)) 858 num_entries = max_entries; 859 860 fw_info = (const struct intel_fw_info *) 861 ((u8 *)package_header + sizeof(*package_header)); 862 dmc_set_fw_offset(dmc, fw_info, num_entries, si, 863 package_header->header_ver); 864 865 /* dmc_offset is in dwords */ 866 return package_size; 867 868 error_truncated: 869 drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n"); 870 return 0; 871 } 872 873 /* Return number of bytes parsed or 0 on error */ 874 static u32 parse_dmc_fw_css(struct intel_dmc *dmc, 875 struct intel_css_header *css_header, 876 size_t rem_size) 877 { 878 struct drm_i915_private *i915 = dmc->i915; 879 880 if (rem_size < sizeof(struct intel_css_header)) { 881 drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n"); 882 return 0; 883 } 884 885 if (sizeof(struct intel_css_header) != 886 (css_header->header_len * 4)) { 887 drm_err(&i915->drm, "DMC firmware has wrong CSS header length " 888 "(%u bytes)\n", 889 (css_header->header_len * 4)); 890 return 0; 891 } 892 893 dmc->version = css_header->version; 894 895 return sizeof(struct intel_css_header); 896 } 897 898 static void parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw) 899 { 900 struct drm_i915_private *i915 = dmc->i915; 901 struct intel_css_header *css_header; 902 struct intel_package_header *package_header; 903 struct intel_dmc_header_base *dmc_header; 904 struct stepping_info display_info = { '*', '*'}; 905 const struct stepping_info *si = intel_get_stepping_info(i915, &display_info); 906 enum intel_dmc_id dmc_id; 907 u32 readcount = 0; 908 u32 r, offset; 909 910 if (!fw) 911 return; 912 913 /* Extract CSS Header information */ 914 css_header = (struct intel_css_header *)fw->data; 915 r = parse_dmc_fw_css(dmc, css_header, fw->size); 916 if (!r) 917 return; 918 919 readcount += r; 920 921 /* Extract Package Header information */ 922 package_header = (struct intel_package_header *)&fw->data[readcount]; 923 r = parse_dmc_fw_package(dmc, package_header, si, fw->size - readcount); 924 if (!r) 925 return; 926 927 readcount += r; 928 929 for_each_dmc_id(dmc_id) { 930 if (!dmc->dmc_info[dmc_id].present) 931 continue; 932 933 offset = readcount + dmc->dmc_info[dmc_id].dmc_offset * 4; 934 if (offset > fw->size) { 935 drm_err(&i915->drm, "Reading beyond the fw_size\n"); 936 continue; 937 } 938 939 dmc_header = (struct intel_dmc_header_base *)&fw->data[offset]; 940 parse_dmc_fw_header(dmc, dmc_header, fw->size - offset, dmc_id); 941 } 942 } 943 944 static void intel_dmc_runtime_pm_get(struct drm_i915_private *i915) 945 { 946 drm_WARN_ON(&i915->drm, i915->display.dmc.wakeref); 947 i915->display.dmc.wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT); 948 } 949 950 static void intel_dmc_runtime_pm_put(struct drm_i915_private *i915) 951 { 952 intel_wakeref_t wakeref __maybe_unused = 953 fetch_and_zero(&i915->display.dmc.wakeref); 954 955 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 956 } 957 958 static const char *dmc_fallback_path(struct drm_i915_private *i915) 959 { 960 if (IS_ALDERLAKE_P(i915)) 961 return ADLP_DMC_FALLBACK_PATH; 962 963 return NULL; 964 } 965 966 static void dmc_load_work_fn(struct work_struct *work) 967 { 968 struct intel_dmc *dmc = container_of(work, typeof(*dmc), work); 969 struct drm_i915_private *i915 = dmc->i915; 970 const struct firmware *fw = NULL; 971 const char *fallback_path; 972 int err; 973 974 err = request_firmware(&fw, dmc->fw_path, i915->drm.dev); 975 976 if (err == -ENOENT && !i915->params.dmc_firmware_path) { 977 fallback_path = dmc_fallback_path(i915); 978 if (fallback_path) { 979 drm_dbg_kms(&i915->drm, "%s not found, falling back to %s\n", 980 dmc->fw_path, fallback_path); 981 err = request_firmware(&fw, fallback_path, i915->drm.dev); 982 if (err == 0) 983 dmc->fw_path = fallback_path; 984 } 985 } 986 987 parse_dmc_fw(dmc, fw); 988 989 if (intel_dmc_has_payload(i915)) { 990 intel_dmc_load_program(i915); 991 intel_dmc_runtime_pm_put(i915); 992 993 drm_info(&i915->drm, "Finished loading DMC firmware %s (v%u.%u)\n", 994 dmc->fw_path, DMC_VERSION_MAJOR(dmc->version), 995 DMC_VERSION_MINOR(dmc->version)); 996 } else { 997 drm_notice(&i915->drm, 998 "Failed to load DMC firmware %s." 999 " Disabling runtime power management.\n", 1000 dmc->fw_path); 1001 drm_notice(&i915->drm, "DMC firmware homepage: %s", 1002 INTEL_UC_FIRMWARE_URL); 1003 } 1004 1005 release_firmware(fw); 1006 } 1007 1008 /** 1009 * intel_dmc_init() - initialize the firmware loading. 1010 * @i915: i915 drm device. 1011 * 1012 * This function is called at the time of loading the display driver to read 1013 * firmware from a .bin file and copied into a internal memory. 1014 */ 1015 void intel_dmc_init(struct drm_i915_private *i915) 1016 { 1017 struct intel_dmc *dmc; 1018 1019 if (!HAS_DMC(i915)) 1020 return; 1021 1022 /* 1023 * Obtain a runtime pm reference, until DMC is loaded, to avoid entering 1024 * runtime-suspend. 1025 * 1026 * On error, we return with the rpm wakeref held to prevent runtime 1027 * suspend as runtime suspend *requires* a working DMC for whatever 1028 * reason. 1029 */ 1030 intel_dmc_runtime_pm_get(i915); 1031 1032 dmc = kzalloc(sizeof(*dmc), GFP_KERNEL); 1033 if (!dmc) 1034 return; 1035 1036 dmc->i915 = i915; 1037 1038 INIT_WORK(&dmc->work, dmc_load_work_fn); 1039 1040 if (IS_METEORLAKE(i915)) { 1041 dmc->fw_path = MTL_DMC_PATH; 1042 dmc->max_fw_size = XELPDP_DMC_MAX_FW_SIZE; 1043 } else if (IS_DG2(i915)) { 1044 dmc->fw_path = DG2_DMC_PATH; 1045 dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE; 1046 } else if (IS_ALDERLAKE_P(i915)) { 1047 dmc->fw_path = ADLP_DMC_PATH; 1048 dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE; 1049 } else if (IS_ALDERLAKE_S(i915)) { 1050 dmc->fw_path = ADLS_DMC_PATH; 1051 dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; 1052 } else if (IS_DG1(i915)) { 1053 dmc->fw_path = DG1_DMC_PATH; 1054 dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; 1055 } else if (IS_ROCKETLAKE(i915)) { 1056 dmc->fw_path = RKL_DMC_PATH; 1057 dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; 1058 } else if (IS_TIGERLAKE(i915)) { 1059 dmc->fw_path = TGL_DMC_PATH; 1060 dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; 1061 } else if (DISPLAY_VER(i915) == 11) { 1062 dmc->fw_path = ICL_DMC_PATH; 1063 dmc->max_fw_size = ICL_DMC_MAX_FW_SIZE; 1064 } else if (IS_GEMINILAKE(i915)) { 1065 dmc->fw_path = GLK_DMC_PATH; 1066 dmc->max_fw_size = GLK_DMC_MAX_FW_SIZE; 1067 } else if (IS_KABYLAKE(i915) || 1068 IS_COFFEELAKE(i915) || 1069 IS_COMETLAKE(i915)) { 1070 dmc->fw_path = KBL_DMC_PATH; 1071 dmc->max_fw_size = KBL_DMC_MAX_FW_SIZE; 1072 } else if (IS_SKYLAKE(i915)) { 1073 dmc->fw_path = SKL_DMC_PATH; 1074 dmc->max_fw_size = SKL_DMC_MAX_FW_SIZE; 1075 } else if (IS_BROXTON(i915)) { 1076 dmc->fw_path = BXT_DMC_PATH; 1077 dmc->max_fw_size = BXT_DMC_MAX_FW_SIZE; 1078 } 1079 1080 if (i915->params.dmc_firmware_path) { 1081 if (strlen(i915->params.dmc_firmware_path) == 0) { 1082 drm_info(&i915->drm, 1083 "Disabling DMC firmware and runtime PM\n"); 1084 goto out; 1085 } 1086 1087 dmc->fw_path = i915->params.dmc_firmware_path; 1088 } 1089 1090 if (!dmc->fw_path) { 1091 drm_dbg_kms(&i915->drm, 1092 "No known DMC firmware for platform, disabling runtime PM\n"); 1093 goto out; 1094 } 1095 1096 i915->display.dmc.dmc = dmc; 1097 1098 drm_dbg_kms(&i915->drm, "Loading %s\n", dmc->fw_path); 1099 queue_work(i915->unordered_wq, &dmc->work); 1100 1101 return; 1102 1103 out: 1104 kfree(dmc); 1105 } 1106 1107 /** 1108 * intel_dmc_suspend() - prepare DMC firmware before system suspend 1109 * @i915: i915 drm device 1110 * 1111 * Prepare the DMC firmware before entering system suspend. This includes 1112 * flushing pending work items and releasing any resources acquired during 1113 * init. 1114 */ 1115 void intel_dmc_suspend(struct drm_i915_private *i915) 1116 { 1117 struct intel_dmc *dmc = i915_to_dmc(i915); 1118 1119 if (!HAS_DMC(i915)) 1120 return; 1121 1122 if (dmc) 1123 flush_work(&dmc->work); 1124 1125 /* Drop the reference held in case DMC isn't loaded. */ 1126 if (!intel_dmc_has_payload(i915)) 1127 intel_dmc_runtime_pm_put(i915); 1128 } 1129 1130 /** 1131 * intel_dmc_resume() - init DMC firmware during system resume 1132 * @i915: i915 drm device 1133 * 1134 * Reinitialize the DMC firmware during system resume, reacquiring any 1135 * resources released in intel_dmc_suspend(). 1136 */ 1137 void intel_dmc_resume(struct drm_i915_private *i915) 1138 { 1139 if (!HAS_DMC(i915)) 1140 return; 1141 1142 /* 1143 * Reacquire the reference to keep RPM disabled in case DMC isn't 1144 * loaded. 1145 */ 1146 if (!intel_dmc_has_payload(i915)) 1147 intel_dmc_runtime_pm_get(i915); 1148 } 1149 1150 /** 1151 * intel_dmc_fini() - unload the DMC firmware. 1152 * @i915: i915 drm device. 1153 * 1154 * Firmmware unloading includes freeing the internal memory and reset the 1155 * firmware loading status. 1156 */ 1157 void intel_dmc_fini(struct drm_i915_private *i915) 1158 { 1159 struct intel_dmc *dmc = i915_to_dmc(i915); 1160 enum intel_dmc_id dmc_id; 1161 1162 if (!HAS_DMC(i915)) 1163 return; 1164 1165 intel_dmc_suspend(i915); 1166 drm_WARN_ON(&i915->drm, i915->display.dmc.wakeref); 1167 1168 if (dmc) { 1169 for_each_dmc_id(dmc_id) 1170 kfree(dmc->dmc_info[dmc_id].payload); 1171 1172 kfree(dmc); 1173 i915->display.dmc.dmc = NULL; 1174 } 1175 } 1176 1177 void intel_dmc_print_error_state(struct drm_i915_error_state_buf *m, 1178 struct drm_i915_private *i915) 1179 { 1180 struct intel_dmc *dmc = i915_to_dmc(i915); 1181 1182 if (!HAS_DMC(i915)) 1183 return; 1184 1185 i915_error_printf(m, "DMC initialized: %s\n", str_yes_no(dmc)); 1186 i915_error_printf(m, "DMC loaded: %s\n", 1187 str_yes_no(intel_dmc_has_payload(i915))); 1188 if (dmc) 1189 i915_error_printf(m, "DMC fw version: %d.%d\n", 1190 DMC_VERSION_MAJOR(dmc->version), 1191 DMC_VERSION_MINOR(dmc->version)); 1192 } 1193 1194 static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused) 1195 { 1196 struct drm_i915_private *i915 = m->private; 1197 struct intel_dmc *dmc = i915_to_dmc(i915); 1198 intel_wakeref_t wakeref; 1199 i915_reg_t dc5_reg, dc6_reg = INVALID_MMIO_REG; 1200 1201 if (!HAS_DMC(i915)) 1202 return -ENODEV; 1203 1204 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 1205 1206 seq_printf(m, "DMC initialized: %s\n", str_yes_no(dmc)); 1207 seq_printf(m, "fw loaded: %s\n", 1208 str_yes_no(intel_dmc_has_payload(i915))); 1209 seq_printf(m, "path: %s\n", dmc ? dmc->fw_path : "N/A"); 1210 seq_printf(m, "Pipe A fw needed: %s\n", 1211 str_yes_no(GRAPHICS_VER(i915) >= 12)); 1212 seq_printf(m, "Pipe A fw loaded: %s\n", 1213 str_yes_no(has_dmc_id_fw(i915, DMC_FW_PIPEA))); 1214 seq_printf(m, "Pipe B fw needed: %s\n", 1215 str_yes_no(IS_ALDERLAKE_P(i915) || 1216 DISPLAY_VER(i915) >= 14)); 1217 seq_printf(m, "Pipe B fw loaded: %s\n", 1218 str_yes_no(has_dmc_id_fw(i915, DMC_FW_PIPEB))); 1219 1220 if (!intel_dmc_has_payload(i915)) 1221 goto out; 1222 1223 seq_printf(m, "version: %d.%d\n", DMC_VERSION_MAJOR(dmc->version), 1224 DMC_VERSION_MINOR(dmc->version)); 1225 1226 if (DISPLAY_VER(i915) >= 12) { 1227 i915_reg_t dc3co_reg; 1228 1229 if (IS_DGFX(i915) || DISPLAY_VER(i915) >= 14) { 1230 dc3co_reg = DG1_DMC_DEBUG3; 1231 dc5_reg = DG1_DMC_DEBUG_DC5_COUNT; 1232 } else { 1233 dc3co_reg = TGL_DMC_DEBUG3; 1234 dc5_reg = TGL_DMC_DEBUG_DC5_COUNT; 1235 dc6_reg = TGL_DMC_DEBUG_DC6_COUNT; 1236 } 1237 1238 seq_printf(m, "DC3CO count: %d\n", 1239 intel_de_read(i915, dc3co_reg)); 1240 } else { 1241 dc5_reg = IS_BROXTON(i915) ? BXT_DMC_DC3_DC5_COUNT : 1242 SKL_DMC_DC3_DC5_COUNT; 1243 if (!IS_GEMINILAKE(i915) && !IS_BROXTON(i915)) 1244 dc6_reg = SKL_DMC_DC5_DC6_COUNT; 1245 } 1246 1247 seq_printf(m, "DC3 -> DC5 count: %d\n", intel_de_read(i915, dc5_reg)); 1248 if (i915_mmio_reg_valid(dc6_reg)) 1249 seq_printf(m, "DC5 -> DC6 count: %d\n", 1250 intel_de_read(i915, dc6_reg)); 1251 1252 seq_printf(m, "program base: 0x%08x\n", 1253 intel_de_read(i915, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0))); 1254 1255 out: 1256 seq_printf(m, "ssp base: 0x%08x\n", 1257 intel_de_read(i915, DMC_SSP_BASE)); 1258 seq_printf(m, "htp: 0x%08x\n", intel_de_read(i915, DMC_HTP_SKL)); 1259 1260 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 1261 1262 return 0; 1263 } 1264 1265 DEFINE_SHOW_ATTRIBUTE(intel_dmc_debugfs_status); 1266 1267 void intel_dmc_debugfs_register(struct drm_i915_private *i915) 1268 { 1269 struct drm_minor *minor = i915->drm.primary; 1270 1271 debugfs_create_file("i915_dmc_info", 0444, minor->debugfs_root, 1272 i915, &intel_dmc_debugfs_status_fops); 1273 } 1274