1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * AMD SoC Power Management Controller Driver 4 * 5 * Copyright (c) 2020, Advanced Micro Devices, Inc. 6 * All Rights Reserved. 7 * 8 * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com> 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 #include <asm/amd_nb.h> 14 #include <linux/acpi.h> 15 #include <linux/bitfield.h> 16 #include <linux/bits.h> 17 #include <linux/debugfs.h> 18 #include <linux/delay.h> 19 #include <linux/io.h> 20 #include <linux/iopoll.h> 21 #include <linux/limits.h> 22 #include <linux/module.h> 23 #include <linux/pci.h> 24 #include <linux/platform_device.h> 25 #include <linux/rtc.h> 26 #include <linux/serio.h> 27 #include <linux/suspend.h> 28 #include <linux/seq_file.h> 29 #include <linux/uaccess.h> 30 31 #include "pmc.h" 32 33 /* SMU communication registers */ 34 #define AMD_PMC_REGISTER_MESSAGE 0x538 35 #define AMD_PMC_REGISTER_RESPONSE 0x980 36 #define AMD_PMC_REGISTER_ARGUMENT 0x9BC 37 38 /* PMC Scratch Registers */ 39 #define AMD_PMC_SCRATCH_REG_CZN 0x94 40 #define AMD_PMC_SCRATCH_REG_YC 0xD14 41 42 /* STB Registers */ 43 #define AMD_PMC_STB_PMI_0 0x03E30600 44 #define AMD_PMC_STB_S2IDLE_PREPARE 0xC6000001 45 #define AMD_PMC_STB_S2IDLE_RESTORE 0xC6000002 46 #define AMD_PMC_STB_S2IDLE_CHECK 0xC6000003 47 #define AMD_PMC_STB_DUMMY_PC 0xC6000007 48 49 /* STB S2D(Spill to DRAM) has different message port offset */ 50 #define AMD_S2D_REGISTER_MESSAGE 0xA20 51 #define AMD_S2D_REGISTER_RESPONSE 0xA80 52 #define AMD_S2D_REGISTER_ARGUMENT 0xA88 53 54 /* STB Spill to DRAM Parameters */ 55 #define S2D_TELEMETRY_BYTES_MAX 0x100000 56 #define S2D_TELEMETRY_DRAMBYTES_MAX 0x1000000 57 58 /* Base address of SMU for mapping physical address to virtual address */ 59 #define AMD_PMC_MAPPING_SIZE 0x01000 60 #define AMD_PMC_BASE_ADDR_OFFSET 0x10000 61 #define AMD_PMC_BASE_ADDR_LO 0x13B102E8 62 #define AMD_PMC_BASE_ADDR_HI 0x13B102EC 63 #define AMD_PMC_BASE_ADDR_LO_MASK GENMASK(15, 0) 64 #define AMD_PMC_BASE_ADDR_HI_MASK GENMASK(31, 20) 65 66 /* SMU Response Codes */ 67 #define AMD_PMC_RESULT_OK 0x01 68 #define AMD_PMC_RESULT_CMD_REJECT_BUSY 0xFC 69 #define AMD_PMC_RESULT_CMD_REJECT_PREREQ 0xFD 70 #define AMD_PMC_RESULT_CMD_UNKNOWN 0xFE 71 #define AMD_PMC_RESULT_FAILED 0xFF 72 73 /* FCH SSC Registers */ 74 #define FCH_S0I3_ENTRY_TIME_L_OFFSET 0x30 75 #define FCH_S0I3_ENTRY_TIME_H_OFFSET 0x34 76 #define FCH_S0I3_EXIT_TIME_L_OFFSET 0x38 77 #define FCH_S0I3_EXIT_TIME_H_OFFSET 0x3C 78 #define FCH_SSC_MAPPING_SIZE 0x800 79 #define FCH_BASE_PHY_ADDR_LOW 0xFED81100 80 #define FCH_BASE_PHY_ADDR_HIGH 0x00000000 81 82 /* SMU Message Definations */ 83 #define SMU_MSG_GETSMUVERSION 0x02 84 #define SMU_MSG_LOG_GETDRAM_ADDR_HI 0x04 85 #define SMU_MSG_LOG_GETDRAM_ADDR_LO 0x05 86 #define SMU_MSG_LOG_START 0x06 87 #define SMU_MSG_LOG_RESET 0x07 88 #define SMU_MSG_LOG_DUMP_DATA 0x08 89 #define SMU_MSG_GET_SUP_CONSTRAINTS 0x09 90 /* List of supported CPU ids */ 91 #define AMD_CPU_ID_RV 0x15D0 92 #define AMD_CPU_ID_RN 0x1630 93 #define AMD_CPU_ID_PCO AMD_CPU_ID_RV 94 #define AMD_CPU_ID_CZN AMD_CPU_ID_RN 95 #define AMD_CPU_ID_YC 0x14B5 96 #define AMD_CPU_ID_CB 0x14D8 97 #define AMD_CPU_ID_PS 0x14E8 98 #define AMD_CPU_ID_SP 0x14A4 99 #define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507 100 101 #define PMC_MSG_DELAY_MIN_US 50 102 #define RESPONSE_REGISTER_LOOP_MAX 20000 103 104 #define DELAY_MIN_US 2000 105 #define DELAY_MAX_US 3000 106 #define FIFO_SIZE 4096 107 108 enum amd_pmc_def { 109 MSG_TEST = 0x01, 110 MSG_OS_HINT_PCO, 111 MSG_OS_HINT_RN, 112 }; 113 114 enum s2d_arg { 115 S2D_TELEMETRY_SIZE = 0x01, 116 S2D_PHYS_ADDR_LOW, 117 S2D_PHYS_ADDR_HIGH, 118 S2D_NUM_SAMPLES, 119 S2D_DRAM_SIZE, 120 }; 121 122 struct amd_pmc_bit_map { 123 const char *name; 124 u32 bit_mask; 125 }; 126 127 static const struct amd_pmc_bit_map soc15_ip_blk[] = { 128 {"DISPLAY", BIT(0)}, 129 {"CPU", BIT(1)}, 130 {"GFX", BIT(2)}, 131 {"VDD", BIT(3)}, 132 {"ACP", BIT(4)}, 133 {"VCN", BIT(5)}, 134 {"ISP", BIT(6)}, 135 {"NBIO", BIT(7)}, 136 {"DF", BIT(8)}, 137 {"USB3_0", BIT(9)}, 138 {"USB3_1", BIT(10)}, 139 {"LAPIC", BIT(11)}, 140 {"USB3_2", BIT(12)}, 141 {"USB3_3", BIT(13)}, 142 {"USB3_4", BIT(14)}, 143 {"USB4_0", BIT(15)}, 144 {"USB4_1", BIT(16)}, 145 {"MPM", BIT(17)}, 146 {"JPEG", BIT(18)}, 147 {"IPU", BIT(19)}, 148 {"UMSCH", BIT(20)}, 149 {} 150 }; 151 152 static bool enable_stb; 153 module_param(enable_stb, bool, 0644); 154 MODULE_PARM_DESC(enable_stb, "Enable the STB debug mechanism"); 155 156 static bool disable_workarounds; 157 module_param(disable_workarounds, bool, 0644); 158 MODULE_PARM_DESC(disable_workarounds, "Disable workarounds for platform bugs"); 159 160 static struct amd_pmc_dev pmc; 161 static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret); 162 static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf); 163 static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data); 164 165 static inline u32 amd_pmc_reg_read(struct amd_pmc_dev *dev, int reg_offset) 166 { 167 return ioread32(dev->regbase + reg_offset); 168 } 169 170 static inline void amd_pmc_reg_write(struct amd_pmc_dev *dev, int reg_offset, u32 val) 171 { 172 iowrite32(val, dev->regbase + reg_offset); 173 } 174 175 struct smu_metrics { 176 u32 table_version; 177 u32 hint_count; 178 u32 s0i3_last_entry_status; 179 u32 timein_s0i2; 180 u64 timeentering_s0i3_lastcapture; 181 u64 timeentering_s0i3_totaltime; 182 u64 timeto_resume_to_os_lastcapture; 183 u64 timeto_resume_to_os_totaltime; 184 u64 timein_s0i3_lastcapture; 185 u64 timein_s0i3_totaltime; 186 u64 timein_swdrips_lastcapture; 187 u64 timein_swdrips_totaltime; 188 u64 timecondition_notmet_lastcapture[32]; 189 u64 timecondition_notmet_totaltime[32]; 190 } __packed; 191 192 static int amd_pmc_stb_debugfs_open(struct inode *inode, struct file *filp) 193 { 194 struct amd_pmc_dev *dev = filp->f_inode->i_private; 195 u32 size = FIFO_SIZE * sizeof(u32); 196 u32 *buf; 197 int rc; 198 199 buf = kzalloc(size, GFP_KERNEL); 200 if (!buf) 201 return -ENOMEM; 202 203 rc = amd_pmc_read_stb(dev, buf); 204 if (rc) { 205 kfree(buf); 206 return rc; 207 } 208 209 filp->private_data = buf; 210 return rc; 211 } 212 213 static ssize_t amd_pmc_stb_debugfs_read(struct file *filp, char __user *buf, size_t size, 214 loff_t *pos) 215 { 216 if (!filp->private_data) 217 return -EINVAL; 218 219 return simple_read_from_buffer(buf, size, pos, filp->private_data, 220 FIFO_SIZE * sizeof(u32)); 221 } 222 223 static int amd_pmc_stb_debugfs_release(struct inode *inode, struct file *filp) 224 { 225 kfree(filp->private_data); 226 return 0; 227 } 228 229 static const struct file_operations amd_pmc_stb_debugfs_fops = { 230 .owner = THIS_MODULE, 231 .open = amd_pmc_stb_debugfs_open, 232 .read = amd_pmc_stb_debugfs_read, 233 .release = amd_pmc_stb_debugfs_release, 234 }; 235 236 static int amd_pmc_stb_debugfs_open_v2(struct inode *inode, struct file *filp) 237 { 238 struct amd_pmc_dev *dev = filp->f_inode->i_private; 239 u32 *buf, fsize, num_samples, stb_rdptr_offset = 0; 240 int ret; 241 242 /* Write dummy postcode while reading the STB buffer */ 243 ret = amd_pmc_write_stb(dev, AMD_PMC_STB_DUMMY_PC); 244 if (ret) 245 dev_err(dev->dev, "error writing to STB: %d\n", ret); 246 247 buf = kzalloc(S2D_TELEMETRY_BYTES_MAX, GFP_KERNEL); 248 if (!buf) 249 return -ENOMEM; 250 251 /* Spill to DRAM num_samples uses separate SMU message port */ 252 dev->msg_port = 1; 253 254 /* Get the num_samples to calculate the last push location */ 255 ret = amd_pmc_send_cmd(dev, S2D_NUM_SAMPLES, &num_samples, dev->s2d_msg_id, true); 256 /* Clear msg_port for other SMU operation */ 257 dev->msg_port = 0; 258 if (ret) { 259 dev_err(dev->dev, "error: S2D_NUM_SAMPLES not supported : %d\n", ret); 260 kfree(buf); 261 return ret; 262 } 263 264 /* Start capturing data from the last push location */ 265 if (num_samples > S2D_TELEMETRY_BYTES_MAX) { 266 fsize = S2D_TELEMETRY_BYTES_MAX; 267 stb_rdptr_offset = num_samples - fsize; 268 } else { 269 fsize = num_samples; 270 stb_rdptr_offset = 0; 271 } 272 273 memcpy_fromio(buf, dev->stb_virt_addr + stb_rdptr_offset, fsize); 274 filp->private_data = buf; 275 276 return 0; 277 } 278 279 static ssize_t amd_pmc_stb_debugfs_read_v2(struct file *filp, char __user *buf, size_t size, 280 loff_t *pos) 281 { 282 if (!filp->private_data) 283 return -EINVAL; 284 285 return simple_read_from_buffer(buf, size, pos, filp->private_data, 286 S2D_TELEMETRY_BYTES_MAX); 287 } 288 289 static int amd_pmc_stb_debugfs_release_v2(struct inode *inode, struct file *filp) 290 { 291 kfree(filp->private_data); 292 return 0; 293 } 294 295 static const struct file_operations amd_pmc_stb_debugfs_fops_v2 = { 296 .owner = THIS_MODULE, 297 .open = amd_pmc_stb_debugfs_open_v2, 298 .read = amd_pmc_stb_debugfs_read_v2, 299 .release = amd_pmc_stb_debugfs_release_v2, 300 }; 301 302 static void amd_pmc_get_ip_info(struct amd_pmc_dev *dev) 303 { 304 switch (dev->cpu_id) { 305 case AMD_CPU_ID_PCO: 306 case AMD_CPU_ID_RN: 307 case AMD_CPU_ID_YC: 308 case AMD_CPU_ID_CB: 309 dev->num_ips = 12; 310 dev->s2d_msg_id = 0xBE; 311 break; 312 case AMD_CPU_ID_PS: 313 dev->num_ips = 21; 314 dev->s2d_msg_id = 0x85; 315 break; 316 } 317 } 318 319 static int amd_pmc_setup_smu_logging(struct amd_pmc_dev *dev) 320 { 321 if (dev->cpu_id == AMD_CPU_ID_PCO) { 322 dev_warn_once(dev->dev, "SMU debugging info not supported on this platform\n"); 323 return -EINVAL; 324 } 325 326 /* Get Active devices list from SMU */ 327 if (!dev->active_ips) 328 amd_pmc_send_cmd(dev, 0, &dev->active_ips, SMU_MSG_GET_SUP_CONSTRAINTS, true); 329 330 /* Get dram address */ 331 if (!dev->smu_virt_addr) { 332 u32 phys_addr_low, phys_addr_hi; 333 u64 smu_phys_addr; 334 335 amd_pmc_send_cmd(dev, 0, &phys_addr_low, SMU_MSG_LOG_GETDRAM_ADDR_LO, true); 336 amd_pmc_send_cmd(dev, 0, &phys_addr_hi, SMU_MSG_LOG_GETDRAM_ADDR_HI, true); 337 smu_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low); 338 339 dev->smu_virt_addr = devm_ioremap(dev->dev, smu_phys_addr, 340 sizeof(struct smu_metrics)); 341 if (!dev->smu_virt_addr) 342 return -ENOMEM; 343 } 344 345 /* Start the logging */ 346 amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_RESET, false); 347 amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_START, false); 348 349 return 0; 350 } 351 352 static int get_metrics_table(struct amd_pmc_dev *pdev, struct smu_metrics *table) 353 { 354 if (!pdev->smu_virt_addr) { 355 int ret = amd_pmc_setup_smu_logging(pdev); 356 357 if (ret) 358 return ret; 359 } 360 361 if (pdev->cpu_id == AMD_CPU_ID_PCO) 362 return -ENODEV; 363 memcpy_fromio(table, pdev->smu_virt_addr, sizeof(struct smu_metrics)); 364 return 0; 365 } 366 367 static void amd_pmc_validate_deepest(struct amd_pmc_dev *pdev) 368 { 369 struct smu_metrics table; 370 371 if (get_metrics_table(pdev, &table)) 372 return; 373 374 if (!table.s0i3_last_entry_status) 375 dev_warn(pdev->dev, "Last suspend didn't reach deepest state\n"); 376 pm_report_hw_sleep_time(table.s0i3_last_entry_status ? 377 table.timein_s0i3_lastcapture : 0); 378 } 379 380 static int amd_pmc_get_smu_version(struct amd_pmc_dev *dev) 381 { 382 int rc; 383 u32 val; 384 385 if (dev->cpu_id == AMD_CPU_ID_PCO) 386 return -ENODEV; 387 388 rc = amd_pmc_send_cmd(dev, 0, &val, SMU_MSG_GETSMUVERSION, true); 389 if (rc) 390 return rc; 391 392 dev->smu_program = (val >> 24) & GENMASK(7, 0); 393 dev->major = (val >> 16) & GENMASK(7, 0); 394 dev->minor = (val >> 8) & GENMASK(7, 0); 395 dev->rev = (val >> 0) & GENMASK(7, 0); 396 397 dev_dbg(dev->dev, "SMU program %u version is %u.%u.%u\n", 398 dev->smu_program, dev->major, dev->minor, dev->rev); 399 400 return 0; 401 } 402 403 static ssize_t smu_fw_version_show(struct device *d, struct device_attribute *attr, 404 char *buf) 405 { 406 struct amd_pmc_dev *dev = dev_get_drvdata(d); 407 408 if (!dev->major) { 409 int rc = amd_pmc_get_smu_version(dev); 410 411 if (rc) 412 return rc; 413 } 414 return sysfs_emit(buf, "%u.%u.%u\n", dev->major, dev->minor, dev->rev); 415 } 416 417 static ssize_t smu_program_show(struct device *d, struct device_attribute *attr, 418 char *buf) 419 { 420 struct amd_pmc_dev *dev = dev_get_drvdata(d); 421 422 if (!dev->major) { 423 int rc = amd_pmc_get_smu_version(dev); 424 425 if (rc) 426 return rc; 427 } 428 return sysfs_emit(buf, "%u\n", dev->smu_program); 429 } 430 431 static DEVICE_ATTR_RO(smu_fw_version); 432 static DEVICE_ATTR_RO(smu_program); 433 434 static umode_t pmc_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx) 435 { 436 struct device *dev = kobj_to_dev(kobj); 437 struct amd_pmc_dev *pdev = dev_get_drvdata(dev); 438 439 if (pdev->cpu_id == AMD_CPU_ID_PCO) 440 return 0; 441 return 0444; 442 } 443 444 static struct attribute *pmc_attrs[] = { 445 &dev_attr_smu_fw_version.attr, 446 &dev_attr_smu_program.attr, 447 NULL, 448 }; 449 450 static struct attribute_group pmc_attr_group = { 451 .attrs = pmc_attrs, 452 .is_visible = pmc_attr_is_visible, 453 }; 454 455 static const struct attribute_group *pmc_groups[] = { 456 &pmc_attr_group, 457 NULL, 458 }; 459 460 static int smu_fw_info_show(struct seq_file *s, void *unused) 461 { 462 struct amd_pmc_dev *dev = s->private; 463 struct smu_metrics table; 464 int idx; 465 466 if (get_metrics_table(dev, &table)) 467 return -EINVAL; 468 469 seq_puts(s, "\n=== SMU Statistics ===\n"); 470 seq_printf(s, "Table Version: %d\n", table.table_version); 471 seq_printf(s, "Hint Count: %d\n", table.hint_count); 472 seq_printf(s, "Last S0i3 Status: %s\n", table.s0i3_last_entry_status ? "Success" : 473 "Unknown/Fail"); 474 seq_printf(s, "Time (in us) to S0i3: %lld\n", table.timeentering_s0i3_lastcapture); 475 seq_printf(s, "Time (in us) in S0i3: %lld\n", table.timein_s0i3_lastcapture); 476 seq_printf(s, "Time (in us) to resume from S0i3: %lld\n", 477 table.timeto_resume_to_os_lastcapture); 478 479 seq_puts(s, "\n=== Active time (in us) ===\n"); 480 for (idx = 0 ; idx < dev->num_ips ; idx++) { 481 if (soc15_ip_blk[idx].bit_mask & dev->active_ips) 482 seq_printf(s, "%-8s : %lld\n", soc15_ip_blk[idx].name, 483 table.timecondition_notmet_lastcapture[idx]); 484 } 485 486 return 0; 487 } 488 DEFINE_SHOW_ATTRIBUTE(smu_fw_info); 489 490 static int s0ix_stats_show(struct seq_file *s, void *unused) 491 { 492 struct amd_pmc_dev *dev = s->private; 493 u64 entry_time, exit_time, residency; 494 495 /* Use FCH registers to get the S0ix stats */ 496 if (!dev->fch_virt_addr) { 497 u32 base_addr_lo = FCH_BASE_PHY_ADDR_LOW; 498 u32 base_addr_hi = FCH_BASE_PHY_ADDR_HIGH; 499 u64 fch_phys_addr = ((u64)base_addr_hi << 32 | base_addr_lo); 500 501 dev->fch_virt_addr = devm_ioremap(dev->dev, fch_phys_addr, FCH_SSC_MAPPING_SIZE); 502 if (!dev->fch_virt_addr) 503 return -ENOMEM; 504 } 505 506 entry_time = ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_H_OFFSET); 507 entry_time = entry_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_L_OFFSET); 508 509 exit_time = ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_H_OFFSET); 510 exit_time = exit_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_L_OFFSET); 511 512 /* It's in 48MHz. We need to convert it */ 513 residency = exit_time - entry_time; 514 do_div(residency, 48); 515 516 seq_puts(s, "=== S0ix statistics ===\n"); 517 seq_printf(s, "S0ix Entry Time: %lld\n", entry_time); 518 seq_printf(s, "S0ix Exit Time: %lld\n", exit_time); 519 seq_printf(s, "Residency Time: %lld\n", residency); 520 521 return 0; 522 } 523 DEFINE_SHOW_ATTRIBUTE(s0ix_stats); 524 525 static int amd_pmc_idlemask_read(struct amd_pmc_dev *pdev, struct device *dev, 526 struct seq_file *s) 527 { 528 u32 val; 529 int rc; 530 531 switch (pdev->cpu_id) { 532 case AMD_CPU_ID_CZN: 533 /* we haven't yet read SMU version */ 534 if (!pdev->major) { 535 rc = amd_pmc_get_smu_version(pdev); 536 if (rc) 537 return rc; 538 } 539 if (pdev->major > 56 || (pdev->major >= 55 && pdev->minor >= 37)) 540 val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_CZN); 541 else 542 return -EINVAL; 543 break; 544 case AMD_CPU_ID_YC: 545 case AMD_CPU_ID_CB: 546 case AMD_CPU_ID_PS: 547 val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_YC); 548 break; 549 default: 550 return -EINVAL; 551 } 552 553 if (dev) 554 pm_pr_dbg("SMU idlemask s0i3: 0x%x\n", val); 555 556 if (s) 557 seq_printf(s, "SMU idlemask : 0x%x\n", val); 558 559 return 0; 560 } 561 562 static int amd_pmc_idlemask_show(struct seq_file *s, void *unused) 563 { 564 return amd_pmc_idlemask_read(s->private, NULL, s); 565 } 566 DEFINE_SHOW_ATTRIBUTE(amd_pmc_idlemask); 567 568 static void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev) 569 { 570 debugfs_remove_recursive(dev->dbgfs_dir); 571 } 572 573 static bool amd_pmc_is_stb_supported(struct amd_pmc_dev *dev) 574 { 575 switch (dev->cpu_id) { 576 case AMD_CPU_ID_YC: 577 case AMD_CPU_ID_CB: 578 case AMD_CPU_ID_PS: 579 return true; 580 default: 581 return false; 582 } 583 } 584 585 static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev) 586 { 587 dev->dbgfs_dir = debugfs_create_dir("amd_pmc", NULL); 588 debugfs_create_file("smu_fw_info", 0644, dev->dbgfs_dir, dev, 589 &smu_fw_info_fops); 590 debugfs_create_file("s0ix_stats", 0644, dev->dbgfs_dir, dev, 591 &s0ix_stats_fops); 592 debugfs_create_file("amd_pmc_idlemask", 0644, dev->dbgfs_dir, dev, 593 &amd_pmc_idlemask_fops); 594 /* Enable STB only when the module_param is set */ 595 if (enable_stb) { 596 if (amd_pmc_is_stb_supported(dev)) 597 debugfs_create_file("stb_read", 0644, dev->dbgfs_dir, dev, 598 &amd_pmc_stb_debugfs_fops_v2); 599 else 600 debugfs_create_file("stb_read", 0644, dev->dbgfs_dir, dev, 601 &amd_pmc_stb_debugfs_fops); 602 } 603 } 604 605 static void amd_pmc_dump_registers(struct amd_pmc_dev *dev) 606 { 607 u32 value, message, argument, response; 608 609 if (dev->msg_port) { 610 message = AMD_S2D_REGISTER_MESSAGE; 611 argument = AMD_S2D_REGISTER_ARGUMENT; 612 response = AMD_S2D_REGISTER_RESPONSE; 613 } else { 614 message = AMD_PMC_REGISTER_MESSAGE; 615 argument = AMD_PMC_REGISTER_ARGUMENT; 616 response = AMD_PMC_REGISTER_RESPONSE; 617 } 618 619 value = amd_pmc_reg_read(dev, response); 620 dev_dbg(dev->dev, "AMD_%s_REGISTER_RESPONSE:%x\n", dev->msg_port ? "S2D" : "PMC", value); 621 622 value = amd_pmc_reg_read(dev, argument); 623 dev_dbg(dev->dev, "AMD_%s_REGISTER_ARGUMENT:%x\n", dev->msg_port ? "S2D" : "PMC", value); 624 625 value = amd_pmc_reg_read(dev, message); 626 dev_dbg(dev->dev, "AMD_%s_REGISTER_MESSAGE:%x\n", dev->msg_port ? "S2D" : "PMC", value); 627 } 628 629 static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret) 630 { 631 int rc; 632 u32 val, message, argument, response; 633 634 mutex_lock(&dev->lock); 635 636 if (dev->msg_port) { 637 message = AMD_S2D_REGISTER_MESSAGE; 638 argument = AMD_S2D_REGISTER_ARGUMENT; 639 response = AMD_S2D_REGISTER_RESPONSE; 640 } else { 641 message = AMD_PMC_REGISTER_MESSAGE; 642 argument = AMD_PMC_REGISTER_ARGUMENT; 643 response = AMD_PMC_REGISTER_RESPONSE; 644 } 645 646 /* Wait until we get a valid response */ 647 rc = readx_poll_timeout(ioread32, dev->regbase + response, 648 val, val != 0, PMC_MSG_DELAY_MIN_US, 649 PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX); 650 if (rc) { 651 dev_err(dev->dev, "failed to talk to SMU\n"); 652 goto out_unlock; 653 } 654 655 /* Write zero to response register */ 656 amd_pmc_reg_write(dev, response, 0); 657 658 /* Write argument into response register */ 659 amd_pmc_reg_write(dev, argument, arg); 660 661 /* Write message ID to message ID register */ 662 amd_pmc_reg_write(dev, message, msg); 663 664 /* Wait until we get a valid response */ 665 rc = readx_poll_timeout(ioread32, dev->regbase + response, 666 val, val != 0, PMC_MSG_DELAY_MIN_US, 667 PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX); 668 if (rc) { 669 dev_err(dev->dev, "SMU response timed out\n"); 670 goto out_unlock; 671 } 672 673 switch (val) { 674 case AMD_PMC_RESULT_OK: 675 if (ret) { 676 /* PMFW may take longer time to return back the data */ 677 usleep_range(DELAY_MIN_US, 10 * DELAY_MAX_US); 678 *data = amd_pmc_reg_read(dev, argument); 679 } 680 break; 681 case AMD_PMC_RESULT_CMD_REJECT_BUSY: 682 dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val); 683 rc = -EBUSY; 684 goto out_unlock; 685 case AMD_PMC_RESULT_CMD_UNKNOWN: 686 dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val); 687 rc = -EINVAL; 688 goto out_unlock; 689 case AMD_PMC_RESULT_CMD_REJECT_PREREQ: 690 case AMD_PMC_RESULT_FAILED: 691 default: 692 dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val); 693 rc = -EIO; 694 goto out_unlock; 695 } 696 697 out_unlock: 698 mutex_unlock(&dev->lock); 699 amd_pmc_dump_registers(dev); 700 return rc; 701 } 702 703 static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev) 704 { 705 switch (dev->cpu_id) { 706 case AMD_CPU_ID_PCO: 707 return MSG_OS_HINT_PCO; 708 case AMD_CPU_ID_RN: 709 case AMD_CPU_ID_YC: 710 case AMD_CPU_ID_CB: 711 case AMD_CPU_ID_PS: 712 return MSG_OS_HINT_RN; 713 } 714 return -EINVAL; 715 } 716 717 static int amd_pmc_czn_wa_irq1(struct amd_pmc_dev *pdev) 718 { 719 struct device *d; 720 int rc; 721 722 if (!pdev->major) { 723 rc = amd_pmc_get_smu_version(pdev); 724 if (rc) 725 return rc; 726 } 727 728 if (pdev->major > 64 || (pdev->major == 64 && pdev->minor > 65)) 729 return 0; 730 731 d = bus_find_device_by_name(&serio_bus, NULL, "serio0"); 732 if (!d) 733 return 0; 734 if (device_may_wakeup(d)) { 735 dev_info_once(d, "Disabling IRQ1 wakeup source to avoid platform firmware bug\n"); 736 disable_irq_wake(1); 737 device_set_wakeup_enable(d, false); 738 } 739 put_device(d); 740 741 return 0; 742 } 743 744 static int amd_pmc_verify_czn_rtc(struct amd_pmc_dev *pdev, u32 *arg) 745 { 746 struct rtc_device *rtc_device; 747 time64_t then, now, duration; 748 struct rtc_wkalrm alarm; 749 struct rtc_time tm; 750 int rc; 751 752 /* we haven't yet read SMU version */ 753 if (!pdev->major) { 754 rc = amd_pmc_get_smu_version(pdev); 755 if (rc) 756 return rc; 757 } 758 759 if (pdev->major < 64 || (pdev->major == 64 && pdev->minor < 53)) 760 return 0; 761 762 rtc_device = rtc_class_open("rtc0"); 763 if (!rtc_device) 764 return 0; 765 rc = rtc_read_alarm(rtc_device, &alarm); 766 if (rc) 767 return rc; 768 if (!alarm.enabled) { 769 dev_dbg(pdev->dev, "alarm not enabled\n"); 770 return 0; 771 } 772 rc = rtc_read_time(rtc_device, &tm); 773 if (rc) 774 return rc; 775 then = rtc_tm_to_time64(&alarm.time); 776 now = rtc_tm_to_time64(&tm); 777 duration = then-now; 778 779 /* in the past */ 780 if (then < now) 781 return 0; 782 783 /* will be stored in upper 16 bits of s0i3 hint argument, 784 * so timer wakeup from s0i3 is limited to ~18 hours or less 785 */ 786 if (duration <= 4 || duration > U16_MAX) 787 return -EINVAL; 788 789 *arg |= (duration << 16); 790 rc = rtc_alarm_irq_enable(rtc_device, 0); 791 pm_pr_dbg("wakeup timer programmed for %lld seconds\n", duration); 792 793 return rc; 794 } 795 796 static void amd_pmc_s2idle_prepare(void) 797 { 798 struct amd_pmc_dev *pdev = &pmc; 799 int rc; 800 u8 msg; 801 u32 arg = 1; 802 803 /* Reset and Start SMU logging - to monitor the s0i3 stats */ 804 amd_pmc_setup_smu_logging(pdev); 805 806 /* Activate CZN specific platform bug workarounds */ 807 if (pdev->cpu_id == AMD_CPU_ID_CZN && !disable_workarounds) { 808 rc = amd_pmc_verify_czn_rtc(pdev, &arg); 809 if (rc) { 810 dev_err(pdev->dev, "failed to set RTC: %d\n", rc); 811 return; 812 } 813 } 814 815 msg = amd_pmc_get_os_hint(pdev); 816 rc = amd_pmc_send_cmd(pdev, arg, NULL, msg, false); 817 if (rc) { 818 dev_err(pdev->dev, "suspend failed: %d\n", rc); 819 return; 820 } 821 822 rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_S2IDLE_PREPARE); 823 if (rc) 824 dev_err(pdev->dev, "error writing to STB: %d\n", rc); 825 } 826 827 static void amd_pmc_s2idle_check(void) 828 { 829 struct amd_pmc_dev *pdev = &pmc; 830 struct smu_metrics table; 831 int rc; 832 833 /* CZN: Ensure that future s0i3 entry attempts at least 10ms passed */ 834 if (pdev->cpu_id == AMD_CPU_ID_CZN && !get_metrics_table(pdev, &table) && 835 table.s0i3_last_entry_status) 836 usleep_range(10000, 20000); 837 838 /* Dump the IdleMask before we add to the STB */ 839 amd_pmc_idlemask_read(pdev, pdev->dev, NULL); 840 841 rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_S2IDLE_CHECK); 842 if (rc) 843 dev_err(pdev->dev, "error writing to STB: %d\n", rc); 844 } 845 846 static int amd_pmc_dump_data(struct amd_pmc_dev *pdev) 847 { 848 if (pdev->cpu_id == AMD_CPU_ID_PCO) 849 return -ENODEV; 850 851 return amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, false); 852 } 853 854 static void amd_pmc_s2idle_restore(void) 855 { 856 struct amd_pmc_dev *pdev = &pmc; 857 int rc; 858 u8 msg; 859 860 msg = amd_pmc_get_os_hint(pdev); 861 rc = amd_pmc_send_cmd(pdev, 0, NULL, msg, false); 862 if (rc) 863 dev_err(pdev->dev, "resume failed: %d\n", rc); 864 865 /* Let SMU know that we are looking for stats */ 866 amd_pmc_dump_data(pdev); 867 868 rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_S2IDLE_RESTORE); 869 if (rc) 870 dev_err(pdev->dev, "error writing to STB: %d\n", rc); 871 872 /* Notify on failed entry */ 873 amd_pmc_validate_deepest(pdev); 874 875 amd_pmc_process_restore_quirks(pdev); 876 } 877 878 static struct acpi_s2idle_dev_ops amd_pmc_s2idle_dev_ops = { 879 .prepare = amd_pmc_s2idle_prepare, 880 .check = amd_pmc_s2idle_check, 881 .restore = amd_pmc_s2idle_restore, 882 }; 883 884 static int amd_pmc_suspend_handler(struct device *dev) 885 { 886 struct amd_pmc_dev *pdev = dev_get_drvdata(dev); 887 888 if (pdev->cpu_id == AMD_CPU_ID_CZN && !disable_workarounds) { 889 int rc = amd_pmc_czn_wa_irq1(pdev); 890 891 if (rc) { 892 dev_err(pdev->dev, "failed to adjust keyboard wakeup: %d\n", rc); 893 return rc; 894 } 895 } 896 897 return 0; 898 } 899 900 static DEFINE_SIMPLE_DEV_PM_OPS(amd_pmc_pm, amd_pmc_suspend_handler, NULL); 901 902 static const struct pci_device_id pmc_pci_ids[] = { 903 { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) }, 904 { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CB) }, 905 { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) }, 906 { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CZN) }, 907 { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RN) }, 908 { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PCO) }, 909 { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RV) }, 910 { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_SP) }, 911 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) }, 912 { } 913 }; 914 915 static int amd_pmc_get_dram_size(struct amd_pmc_dev *dev) 916 { 917 int ret; 918 919 switch (dev->cpu_id) { 920 case AMD_CPU_ID_YC: 921 if (!(dev->major > 90 || (dev->major == 90 && dev->minor > 39))) { 922 ret = -EINVAL; 923 goto err_dram_size; 924 } 925 break; 926 default: 927 ret = -EINVAL; 928 goto err_dram_size; 929 } 930 931 ret = amd_pmc_send_cmd(dev, S2D_DRAM_SIZE, &dev->dram_size, dev->s2d_msg_id, true); 932 if (ret || !dev->dram_size) 933 goto err_dram_size; 934 935 return 0; 936 937 err_dram_size: 938 dev_err(dev->dev, "DRAM size command not supported for this platform\n"); 939 return ret; 940 } 941 942 static int amd_pmc_s2d_init(struct amd_pmc_dev *dev) 943 { 944 u32 phys_addr_low, phys_addr_hi; 945 u64 stb_phys_addr; 946 u32 size = 0; 947 int ret; 948 949 /* Spill to DRAM feature uses separate SMU message port */ 950 dev->msg_port = 1; 951 952 /* Get num of IP blocks within the SoC */ 953 amd_pmc_get_ip_info(dev); 954 955 amd_pmc_send_cmd(dev, S2D_TELEMETRY_SIZE, &size, dev->s2d_msg_id, true); 956 if (size != S2D_TELEMETRY_BYTES_MAX) 957 return -EIO; 958 959 /* Get DRAM size */ 960 ret = amd_pmc_get_dram_size(dev); 961 if (ret) 962 dev->dram_size = S2D_TELEMETRY_DRAMBYTES_MAX; 963 964 /* Get STB DRAM address */ 965 amd_pmc_send_cmd(dev, S2D_PHYS_ADDR_LOW, &phys_addr_low, dev->s2d_msg_id, true); 966 amd_pmc_send_cmd(dev, S2D_PHYS_ADDR_HIGH, &phys_addr_hi, dev->s2d_msg_id, true); 967 968 stb_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low); 969 970 /* Clear msg_port for other SMU operation */ 971 dev->msg_port = 0; 972 973 dev->stb_virt_addr = devm_ioremap(dev->dev, stb_phys_addr, dev->dram_size); 974 if (!dev->stb_virt_addr) 975 return -ENOMEM; 976 977 return 0; 978 } 979 980 static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data) 981 { 982 int err; 983 984 err = amd_smn_write(0, AMD_PMC_STB_PMI_0, data); 985 if (err) { 986 dev_err(dev->dev, "failed to write data in stb: 0x%X\n", AMD_PMC_STB_PMI_0); 987 return pcibios_err_to_errno(err); 988 } 989 990 return 0; 991 } 992 993 static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf) 994 { 995 int i, err; 996 997 for (i = 0; i < FIFO_SIZE; i++) { 998 err = amd_smn_read(0, AMD_PMC_STB_PMI_0, buf++); 999 if (err) { 1000 dev_err(dev->dev, "error reading data from stb: 0x%X\n", AMD_PMC_STB_PMI_0); 1001 return pcibios_err_to_errno(err); 1002 } 1003 } 1004 1005 return 0; 1006 } 1007 1008 static int amd_pmc_probe(struct platform_device *pdev) 1009 { 1010 struct amd_pmc_dev *dev = &pmc; 1011 struct pci_dev *rdev; 1012 u32 base_addr_lo, base_addr_hi; 1013 u64 base_addr; 1014 int err; 1015 u32 val; 1016 1017 dev->dev = &pdev->dev; 1018 1019 rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0)); 1020 if (!rdev || !pci_match_id(pmc_pci_ids, rdev)) { 1021 err = -ENODEV; 1022 goto err_pci_dev_put; 1023 } 1024 1025 dev->cpu_id = rdev->device; 1026 1027 if (dev->cpu_id == AMD_CPU_ID_SP) { 1028 dev_warn_once(dev->dev, "S0i3 is not supported on this hardware\n"); 1029 err = -ENODEV; 1030 goto err_pci_dev_put; 1031 } 1032 1033 dev->rdev = rdev; 1034 err = amd_smn_read(0, AMD_PMC_BASE_ADDR_LO, &val); 1035 if (err) { 1036 dev_err(dev->dev, "error reading 0x%x\n", AMD_PMC_BASE_ADDR_LO); 1037 err = pcibios_err_to_errno(err); 1038 goto err_pci_dev_put; 1039 } 1040 1041 base_addr_lo = val & AMD_PMC_BASE_ADDR_HI_MASK; 1042 1043 err = amd_smn_read(0, AMD_PMC_BASE_ADDR_HI, &val); 1044 if (err) { 1045 dev_err(dev->dev, "error reading 0x%x\n", AMD_PMC_BASE_ADDR_HI); 1046 err = pcibios_err_to_errno(err); 1047 goto err_pci_dev_put; 1048 } 1049 1050 base_addr_hi = val & AMD_PMC_BASE_ADDR_LO_MASK; 1051 base_addr = ((u64)base_addr_hi << 32 | base_addr_lo); 1052 1053 dev->regbase = devm_ioremap(dev->dev, base_addr + AMD_PMC_BASE_ADDR_OFFSET, 1054 AMD_PMC_MAPPING_SIZE); 1055 if (!dev->regbase) { 1056 err = -ENOMEM; 1057 goto err_pci_dev_put; 1058 } 1059 1060 mutex_init(&dev->lock); 1061 1062 if (enable_stb && amd_pmc_is_stb_supported(dev)) { 1063 err = amd_pmc_s2d_init(dev); 1064 if (err) 1065 goto err_pci_dev_put; 1066 } 1067 1068 platform_set_drvdata(pdev, dev); 1069 if (IS_ENABLED(CONFIG_SUSPEND)) { 1070 err = acpi_register_lps0_dev(&amd_pmc_s2idle_dev_ops); 1071 if (err) 1072 dev_warn(dev->dev, "failed to register LPS0 sleep handler, expect increased power consumption\n"); 1073 if (!disable_workarounds) 1074 amd_pmc_quirks_init(dev); 1075 } 1076 1077 amd_pmc_dbgfs_register(dev); 1078 pm_report_max_hw_sleep(U64_MAX); 1079 return 0; 1080 1081 err_pci_dev_put: 1082 pci_dev_put(rdev); 1083 return err; 1084 } 1085 1086 static void amd_pmc_remove(struct platform_device *pdev) 1087 { 1088 struct amd_pmc_dev *dev = platform_get_drvdata(pdev); 1089 1090 if (IS_ENABLED(CONFIG_SUSPEND)) 1091 acpi_unregister_lps0_dev(&amd_pmc_s2idle_dev_ops); 1092 amd_pmc_dbgfs_unregister(dev); 1093 pci_dev_put(dev->rdev); 1094 mutex_destroy(&dev->lock); 1095 } 1096 1097 static const struct acpi_device_id amd_pmc_acpi_ids[] = { 1098 {"AMDI0005", 0}, 1099 {"AMDI0006", 0}, 1100 {"AMDI0007", 0}, 1101 {"AMDI0008", 0}, 1102 {"AMDI0009", 0}, 1103 {"AMDI000A", 0}, 1104 {"AMD0004", 0}, 1105 {"AMD0005", 0}, 1106 { } 1107 }; 1108 MODULE_DEVICE_TABLE(acpi, amd_pmc_acpi_ids); 1109 1110 static struct platform_driver amd_pmc_driver = { 1111 .driver = { 1112 .name = "amd_pmc", 1113 .acpi_match_table = amd_pmc_acpi_ids, 1114 .dev_groups = pmc_groups, 1115 .pm = pm_sleep_ptr(&amd_pmc_pm), 1116 }, 1117 .probe = amd_pmc_probe, 1118 .remove_new = amd_pmc_remove, 1119 }; 1120 module_platform_driver(amd_pmc_driver); 1121 1122 MODULE_LICENSE("GPL v2"); 1123 MODULE_DESCRIPTION("AMD PMC Driver"); 1124