1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Enable PCIe link L0s/L1 state and Clock Power Management 4 * 5 * Copyright (C) 2007 Intel 6 * Copyright (C) Zhang Yanmin (yanmin.zhang@intel.com) 7 * Copyright (C) Shaohua Li (shaohua.li@intel.com) 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/module.h> 12 #include <linux/moduleparam.h> 13 #include <linux/pci.h> 14 #include <linux/pci_regs.h> 15 #include <linux/errno.h> 16 #include <linux/pm.h> 17 #include <linux/init.h> 18 #include <linux/slab.h> 19 #include <linux/jiffies.h> 20 #include <linux/delay.h> 21 #include <linux/pci-aspm.h> 22 #include "../pci.h" 23 24 #ifdef MODULE_PARAM_PREFIX 25 #undef MODULE_PARAM_PREFIX 26 #endif 27 #define MODULE_PARAM_PREFIX "pcie_aspm." 28 29 /* Note: those are not register definitions */ 30 #define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */ 31 #define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */ 32 #define ASPM_STATE_L1 (4) /* L1 state */ 33 #define ASPM_STATE_L1_1 (8) /* ASPM L1.1 state */ 34 #define ASPM_STATE_L1_2 (0x10) /* ASPM L1.2 state */ 35 #define ASPM_STATE_L1_1_PCIPM (0x20) /* PCI PM L1.1 state */ 36 #define ASPM_STATE_L1_2_PCIPM (0x40) /* PCI PM L1.2 state */ 37 #define ASPM_STATE_L1_SS_PCIPM (ASPM_STATE_L1_1_PCIPM | ASPM_STATE_L1_2_PCIPM) 38 #define ASPM_STATE_L1_2_MASK (ASPM_STATE_L1_2 | ASPM_STATE_L1_2_PCIPM) 39 #define ASPM_STATE_L1SS (ASPM_STATE_L1_1 | ASPM_STATE_L1_1_PCIPM |\ 40 ASPM_STATE_L1_2_MASK) 41 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW) 42 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1 | \ 43 ASPM_STATE_L1SS) 44 45 struct aspm_latency { 46 u32 l0s; /* L0s latency (nsec) */ 47 u32 l1; /* L1 latency (nsec) */ 48 }; 49 50 struct pcie_link_state { 51 struct pci_dev *pdev; /* Upstream component of the Link */ 52 struct pci_dev *downstream; /* Downstream component, function 0 */ 53 struct pcie_link_state *root; /* pointer to the root port link */ 54 struct pcie_link_state *parent; /* pointer to the parent Link state */ 55 struct list_head sibling; /* node in link_list */ 56 struct list_head children; /* list of child link states */ 57 struct list_head link; /* node in parent's children list */ 58 59 /* ASPM state */ 60 u32 aspm_support:7; /* Supported ASPM state */ 61 u32 aspm_enabled:7; /* Enabled ASPM state */ 62 u32 aspm_capable:7; /* Capable ASPM state with latency */ 63 u32 aspm_default:7; /* Default ASPM state by BIOS */ 64 u32 aspm_disable:7; /* Disabled ASPM state */ 65 66 /* Clock PM state */ 67 u32 clkpm_capable:1; /* Clock PM capable? */ 68 u32 clkpm_enabled:1; /* Current Clock PM state */ 69 u32 clkpm_default:1; /* Default Clock PM state by BIOS */ 70 71 /* Exit latencies */ 72 struct aspm_latency latency_up; /* Upstream direction exit latency */ 73 struct aspm_latency latency_dw; /* Downstream direction exit latency */ 74 /* 75 * Endpoint acceptable latencies. A pcie downstream port only 76 * has one slot under it, so at most there are 8 functions. 77 */ 78 struct aspm_latency acceptable[8]; 79 80 /* L1 PM Substate info */ 81 struct { 82 u32 up_cap_ptr; /* L1SS cap ptr in upstream dev */ 83 u32 dw_cap_ptr; /* L1SS cap ptr in downstream dev */ 84 u32 ctl1; /* value to be programmed in ctl1 */ 85 u32 ctl2; /* value to be programmed in ctl2 */ 86 } l1ss; 87 }; 88 89 static int aspm_disabled, aspm_force; 90 static bool aspm_support_enabled = true; 91 static DEFINE_MUTEX(aspm_lock); 92 static LIST_HEAD(link_list); 93 94 #define POLICY_DEFAULT 0 /* BIOS default setting */ 95 #define POLICY_PERFORMANCE 1 /* high performance */ 96 #define POLICY_POWERSAVE 2 /* high power saving */ 97 #define POLICY_POWER_SUPERSAVE 3 /* possibly even more power saving */ 98 99 #ifdef CONFIG_PCIEASPM_PERFORMANCE 100 static int aspm_policy = POLICY_PERFORMANCE; 101 #elif defined CONFIG_PCIEASPM_POWERSAVE 102 static int aspm_policy = POLICY_POWERSAVE; 103 #elif defined CONFIG_PCIEASPM_POWER_SUPERSAVE 104 static int aspm_policy = POLICY_POWER_SUPERSAVE; 105 #else 106 static int aspm_policy; 107 #endif 108 109 static const char *policy_str[] = { 110 [POLICY_DEFAULT] = "default", 111 [POLICY_PERFORMANCE] = "performance", 112 [POLICY_POWERSAVE] = "powersave", 113 [POLICY_POWER_SUPERSAVE] = "powersupersave" 114 }; 115 116 #define LINK_RETRAIN_TIMEOUT HZ 117 118 static int policy_to_aspm_state(struct pcie_link_state *link) 119 { 120 switch (aspm_policy) { 121 case POLICY_PERFORMANCE: 122 /* Disable ASPM and Clock PM */ 123 return 0; 124 case POLICY_POWERSAVE: 125 /* Enable ASPM L0s/L1 */ 126 return (ASPM_STATE_L0S | ASPM_STATE_L1); 127 case POLICY_POWER_SUPERSAVE: 128 /* Enable Everything */ 129 return ASPM_STATE_ALL; 130 case POLICY_DEFAULT: 131 return link->aspm_default; 132 } 133 return 0; 134 } 135 136 static int policy_to_clkpm_state(struct pcie_link_state *link) 137 { 138 switch (aspm_policy) { 139 case POLICY_PERFORMANCE: 140 /* Disable ASPM and Clock PM */ 141 return 0; 142 case POLICY_POWERSAVE: 143 case POLICY_POWER_SUPERSAVE: 144 /* Enable Clock PM */ 145 return 1; 146 case POLICY_DEFAULT: 147 return link->clkpm_default; 148 } 149 return 0; 150 } 151 152 static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable) 153 { 154 struct pci_dev *child; 155 struct pci_bus *linkbus = link->pdev->subordinate; 156 u32 val = enable ? PCI_EXP_LNKCTL_CLKREQ_EN : 0; 157 158 list_for_each_entry(child, &linkbus->devices, bus_list) 159 pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL, 160 PCI_EXP_LNKCTL_CLKREQ_EN, 161 val); 162 link->clkpm_enabled = !!enable; 163 } 164 165 static void pcie_set_clkpm(struct pcie_link_state *link, int enable) 166 { 167 /* Don't enable Clock PM if the link is not Clock PM capable */ 168 if (!link->clkpm_capable) 169 enable = 0; 170 /* Need nothing if the specified equals to current state */ 171 if (link->clkpm_enabled == enable) 172 return; 173 pcie_set_clkpm_nocheck(link, enable); 174 } 175 176 static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist) 177 { 178 int capable = 1, enabled = 1; 179 u32 reg32; 180 u16 reg16; 181 struct pci_dev *child; 182 struct pci_bus *linkbus = link->pdev->subordinate; 183 184 /* All functions should have the same cap and state, take the worst */ 185 list_for_each_entry(child, &linkbus->devices, bus_list) { 186 pcie_capability_read_dword(child, PCI_EXP_LNKCAP, ®32); 187 if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) { 188 capable = 0; 189 enabled = 0; 190 break; 191 } 192 pcie_capability_read_word(child, PCI_EXP_LNKCTL, ®16); 193 if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN)) 194 enabled = 0; 195 } 196 link->clkpm_enabled = enabled; 197 link->clkpm_default = enabled; 198 link->clkpm_capable = (blacklist) ? 0 : capable; 199 } 200 201 /* 202 * pcie_aspm_configure_common_clock: check if the 2 ends of a link 203 * could use common clock. If they are, configure them to use the 204 * common clock. That will reduce the ASPM state exit latency. 205 */ 206 static void pcie_aspm_configure_common_clock(struct pcie_link_state *link) 207 { 208 int same_clock = 1; 209 u16 reg16, parent_reg, child_reg[8]; 210 unsigned long start_jiffies; 211 struct pci_dev *child, *parent = link->pdev; 212 struct pci_bus *linkbus = parent->subordinate; 213 /* 214 * All functions of a slot should have the same Slot Clock 215 * Configuration, so just check one function 216 */ 217 child = list_entry(linkbus->devices.next, struct pci_dev, bus_list); 218 BUG_ON(!pci_is_pcie(child)); 219 220 /* Check downstream component if bit Slot Clock Configuration is 1 */ 221 pcie_capability_read_word(child, PCI_EXP_LNKSTA, ®16); 222 if (!(reg16 & PCI_EXP_LNKSTA_SLC)) 223 same_clock = 0; 224 225 /* Check upstream component if bit Slot Clock Configuration is 1 */ 226 pcie_capability_read_word(parent, PCI_EXP_LNKSTA, ®16); 227 if (!(reg16 & PCI_EXP_LNKSTA_SLC)) 228 same_clock = 0; 229 230 /* Port might be already in common clock mode */ 231 pcie_capability_read_word(parent, PCI_EXP_LNKCTL, ®16); 232 if (same_clock && (reg16 & PCI_EXP_LNKCTL_CCC)) { 233 bool consistent = true; 234 235 list_for_each_entry(child, &linkbus->devices, bus_list) { 236 pcie_capability_read_word(child, PCI_EXP_LNKCTL, 237 ®16); 238 if (!(reg16 & PCI_EXP_LNKCTL_CCC)) { 239 consistent = false; 240 break; 241 } 242 } 243 if (consistent) 244 return; 245 pci_warn(parent, "ASPM: current common clock configuration is broken, reconfiguring\n"); 246 } 247 248 /* Configure downstream component, all functions */ 249 list_for_each_entry(child, &linkbus->devices, bus_list) { 250 pcie_capability_read_word(child, PCI_EXP_LNKCTL, ®16); 251 child_reg[PCI_FUNC(child->devfn)] = reg16; 252 if (same_clock) 253 reg16 |= PCI_EXP_LNKCTL_CCC; 254 else 255 reg16 &= ~PCI_EXP_LNKCTL_CCC; 256 pcie_capability_write_word(child, PCI_EXP_LNKCTL, reg16); 257 } 258 259 /* Configure upstream component */ 260 pcie_capability_read_word(parent, PCI_EXP_LNKCTL, ®16); 261 parent_reg = reg16; 262 if (same_clock) 263 reg16 |= PCI_EXP_LNKCTL_CCC; 264 else 265 reg16 &= ~PCI_EXP_LNKCTL_CCC; 266 pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); 267 268 /* Retrain link */ 269 reg16 |= PCI_EXP_LNKCTL_RL; 270 pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); 271 272 /* Wait for link training end. Break out after waiting for timeout */ 273 start_jiffies = jiffies; 274 for (;;) { 275 pcie_capability_read_word(parent, PCI_EXP_LNKSTA, ®16); 276 if (!(reg16 & PCI_EXP_LNKSTA_LT)) 277 break; 278 if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) 279 break; 280 msleep(1); 281 } 282 if (!(reg16 & PCI_EXP_LNKSTA_LT)) 283 return; 284 285 /* Training failed. Restore common clock configurations */ 286 pci_err(parent, "ASPM: Could not configure common clock\n"); 287 list_for_each_entry(child, &linkbus->devices, bus_list) 288 pcie_capability_write_word(child, PCI_EXP_LNKCTL, 289 child_reg[PCI_FUNC(child->devfn)]); 290 pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_reg); 291 } 292 293 /* Convert L0s latency encoding to ns */ 294 static u32 calc_l0s_latency(u32 encoding) 295 { 296 if (encoding == 0x7) 297 return (5 * 1000); /* > 4us */ 298 return (64 << encoding); 299 } 300 301 /* Convert L0s acceptable latency encoding to ns */ 302 static u32 calc_l0s_acceptable(u32 encoding) 303 { 304 if (encoding == 0x7) 305 return -1U; 306 return (64 << encoding); 307 } 308 309 /* Convert L1 latency encoding to ns */ 310 static u32 calc_l1_latency(u32 encoding) 311 { 312 if (encoding == 0x7) 313 return (65 * 1000); /* > 64us */ 314 return (1000 << encoding); 315 } 316 317 /* Convert L1 acceptable latency encoding to ns */ 318 static u32 calc_l1_acceptable(u32 encoding) 319 { 320 if (encoding == 0x7) 321 return -1U; 322 return (1000 << encoding); 323 } 324 325 /* Convert L1SS T_pwr encoding to usec */ 326 static u32 calc_l1ss_pwron(struct pci_dev *pdev, u32 scale, u32 val) 327 { 328 switch (scale) { 329 case 0: 330 return val * 2; 331 case 1: 332 return val * 10; 333 case 2: 334 return val * 100; 335 } 336 pci_err(pdev, "%s: Invalid T_PwrOn scale: %u\n", __func__, scale); 337 return 0; 338 } 339 340 static void encode_l12_threshold(u32 threshold_us, u32 *scale, u32 *value) 341 { 342 u32 threshold_ns = threshold_us * 1000; 343 344 /* See PCIe r3.1, sec 7.33.3 and sec 6.18 */ 345 if (threshold_ns < 32) { 346 *scale = 0; 347 *value = threshold_ns; 348 } else if (threshold_ns < 1024) { 349 *scale = 1; 350 *value = threshold_ns >> 5; 351 } else if (threshold_ns < 32768) { 352 *scale = 2; 353 *value = threshold_ns >> 10; 354 } else if (threshold_ns < 1048576) { 355 *scale = 3; 356 *value = threshold_ns >> 15; 357 } else if (threshold_ns < 33554432) { 358 *scale = 4; 359 *value = threshold_ns >> 20; 360 } else { 361 *scale = 5; 362 *value = threshold_ns >> 25; 363 } 364 } 365 366 struct aspm_register_info { 367 u32 support:2; 368 u32 enabled:2; 369 u32 latency_encoding_l0s; 370 u32 latency_encoding_l1; 371 372 /* L1 substates */ 373 u32 l1ss_cap_ptr; 374 u32 l1ss_cap; 375 u32 l1ss_ctl1; 376 u32 l1ss_ctl2; 377 }; 378 379 static void pcie_get_aspm_reg(struct pci_dev *pdev, 380 struct aspm_register_info *info) 381 { 382 u16 reg16; 383 u32 reg32; 384 385 pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, ®32); 386 info->support = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10; 387 info->latency_encoding_l0s = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12; 388 info->latency_encoding_l1 = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15; 389 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, ®16); 390 info->enabled = reg16 & PCI_EXP_LNKCTL_ASPMC; 391 392 /* Read L1 PM substate capabilities */ 393 info->l1ss_cap = info->l1ss_ctl1 = info->l1ss_ctl2 = 0; 394 info->l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); 395 if (!info->l1ss_cap_ptr) 396 return; 397 pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CAP, 398 &info->l1ss_cap); 399 if (!(info->l1ss_cap & PCI_L1SS_CAP_L1_PM_SS)) { 400 info->l1ss_cap = 0; 401 return; 402 } 403 404 /* 405 * If we don't have LTR for the entire path from the Root Complex 406 * to this device, we can't use ASPM L1.2 because it relies on the 407 * LTR_L1.2_THRESHOLD. See PCIe r4.0, secs 5.5.4, 6.18. 408 */ 409 if (!pdev->ltr_path) 410 info->l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2; 411 412 pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CTL1, 413 &info->l1ss_ctl1); 414 pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CTL2, 415 &info->l1ss_ctl2); 416 } 417 418 static void pcie_aspm_check_latency(struct pci_dev *endpoint) 419 { 420 u32 latency, l1_switch_latency = 0; 421 struct aspm_latency *acceptable; 422 struct pcie_link_state *link; 423 424 /* Device not in D0 doesn't need latency check */ 425 if ((endpoint->current_state != PCI_D0) && 426 (endpoint->current_state != PCI_UNKNOWN)) 427 return; 428 429 link = endpoint->bus->self->link_state; 430 acceptable = &link->acceptable[PCI_FUNC(endpoint->devfn)]; 431 432 while (link) { 433 /* Check upstream direction L0s latency */ 434 if ((link->aspm_capable & ASPM_STATE_L0S_UP) && 435 (link->latency_up.l0s > acceptable->l0s)) 436 link->aspm_capable &= ~ASPM_STATE_L0S_UP; 437 438 /* Check downstream direction L0s latency */ 439 if ((link->aspm_capable & ASPM_STATE_L0S_DW) && 440 (link->latency_dw.l0s > acceptable->l0s)) 441 link->aspm_capable &= ~ASPM_STATE_L0S_DW; 442 /* 443 * Check L1 latency. 444 * Every switch on the path to root complex need 1 445 * more microsecond for L1. Spec doesn't mention L0s. 446 * 447 * The exit latencies for L1 substates are not advertised 448 * by a device. Since the spec also doesn't mention a way 449 * to determine max latencies introduced by enabling L1 450 * substates on the components, it is not clear how to do 451 * a L1 substate exit latency check. We assume that the 452 * L1 exit latencies advertised by a device include L1 453 * substate latencies (and hence do not do any check). 454 */ 455 latency = max_t(u32, link->latency_up.l1, link->latency_dw.l1); 456 if ((link->aspm_capable & ASPM_STATE_L1) && 457 (latency + l1_switch_latency > acceptable->l1)) 458 link->aspm_capable &= ~ASPM_STATE_L1; 459 l1_switch_latency += 1000; 460 461 link = link->parent; 462 } 463 } 464 465 /* 466 * The L1 PM substate capability is only implemented in function 0 in a 467 * multi function device. 468 */ 469 static struct pci_dev *pci_function_0(struct pci_bus *linkbus) 470 { 471 struct pci_dev *child; 472 473 list_for_each_entry(child, &linkbus->devices, bus_list) 474 if (PCI_FUNC(child->devfn) == 0) 475 return child; 476 return NULL; 477 } 478 479 /* Calculate L1.2 PM substate timing parameters */ 480 static void aspm_calc_l1ss_info(struct pcie_link_state *link, 481 struct aspm_register_info *upreg, 482 struct aspm_register_info *dwreg) 483 { 484 u32 val1, val2, scale1, scale2; 485 u32 t_common_mode, t_power_on, l1_2_threshold, scale, value; 486 487 link->l1ss.up_cap_ptr = upreg->l1ss_cap_ptr; 488 link->l1ss.dw_cap_ptr = dwreg->l1ss_cap_ptr; 489 link->l1ss.ctl1 = link->l1ss.ctl2 = 0; 490 491 if (!(link->aspm_support & ASPM_STATE_L1_2_MASK)) 492 return; 493 494 /* Choose the greater of the two Port Common_Mode_Restore_Times */ 495 val1 = (upreg->l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8; 496 val2 = (dwreg->l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8; 497 t_common_mode = max(val1, val2); 498 499 /* Choose the greater of the two Port T_POWER_ON times */ 500 val1 = (upreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19; 501 scale1 = (upreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16; 502 val2 = (dwreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19; 503 scale2 = (dwreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16; 504 505 if (calc_l1ss_pwron(link->pdev, scale1, val1) > 506 calc_l1ss_pwron(link->downstream, scale2, val2)) { 507 link->l1ss.ctl2 |= scale1 | (val1 << 3); 508 t_power_on = calc_l1ss_pwron(link->pdev, scale1, val1); 509 } else { 510 link->l1ss.ctl2 |= scale2 | (val2 << 3); 511 t_power_on = calc_l1ss_pwron(link->downstream, scale2, val2); 512 } 513 514 /* 515 * Set LTR_L1.2_THRESHOLD to the time required to transition the 516 * Link from L0 to L1.2 and back to L0 so we enter L1.2 only if 517 * downstream devices report (via LTR) that they can tolerate at 518 * least that much latency. 519 * 520 * Based on PCIe r3.1, sec 5.5.3.3.1, Figures 5-16 and 5-17, and 521 * Table 5-11. T(POWER_OFF) is at most 2us and T(L1.2) is at 522 * least 4us. 523 */ 524 l1_2_threshold = 2 + 4 + t_common_mode + t_power_on; 525 encode_l12_threshold(l1_2_threshold, &scale, &value); 526 link->l1ss.ctl1 |= t_common_mode << 8 | scale << 29 | value << 16; 527 } 528 529 static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) 530 { 531 struct pci_dev *child = link->downstream, *parent = link->pdev; 532 struct pci_bus *linkbus = parent->subordinate; 533 struct aspm_register_info upreg, dwreg; 534 535 if (blacklist) { 536 /* Set enabled/disable so that we will disable ASPM later */ 537 link->aspm_enabled = ASPM_STATE_ALL; 538 link->aspm_disable = ASPM_STATE_ALL; 539 return; 540 } 541 542 /* Get upstream/downstream components' register state */ 543 pcie_get_aspm_reg(parent, &upreg); 544 pcie_get_aspm_reg(child, &dwreg); 545 546 /* 547 * If ASPM not supported, don't mess with the clocks and link, 548 * bail out now. 549 */ 550 if (!(upreg.support & dwreg.support)) 551 return; 552 553 /* Configure common clock before checking latencies */ 554 pcie_aspm_configure_common_clock(link); 555 556 /* 557 * Re-read upstream/downstream components' register state 558 * after clock configuration 559 */ 560 pcie_get_aspm_reg(parent, &upreg); 561 pcie_get_aspm_reg(child, &dwreg); 562 563 /* 564 * Setup L0s state 565 * 566 * Note that we must not enable L0s in either direction on a 567 * given link unless components on both sides of the link each 568 * support L0s. 569 */ 570 if (dwreg.support & upreg.support & PCIE_LINK_STATE_L0S) 571 link->aspm_support |= ASPM_STATE_L0S; 572 if (dwreg.enabled & PCIE_LINK_STATE_L0S) 573 link->aspm_enabled |= ASPM_STATE_L0S_UP; 574 if (upreg.enabled & PCIE_LINK_STATE_L0S) 575 link->aspm_enabled |= ASPM_STATE_L0S_DW; 576 link->latency_up.l0s = calc_l0s_latency(upreg.latency_encoding_l0s); 577 link->latency_dw.l0s = calc_l0s_latency(dwreg.latency_encoding_l0s); 578 579 /* Setup L1 state */ 580 if (upreg.support & dwreg.support & PCIE_LINK_STATE_L1) 581 link->aspm_support |= ASPM_STATE_L1; 582 if (upreg.enabled & dwreg.enabled & PCIE_LINK_STATE_L1) 583 link->aspm_enabled |= ASPM_STATE_L1; 584 link->latency_up.l1 = calc_l1_latency(upreg.latency_encoding_l1); 585 link->latency_dw.l1 = calc_l1_latency(dwreg.latency_encoding_l1); 586 587 /* Setup L1 substate */ 588 if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_ASPM_L1_1) 589 link->aspm_support |= ASPM_STATE_L1_1; 590 if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_ASPM_L1_2) 591 link->aspm_support |= ASPM_STATE_L1_2; 592 if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_1) 593 link->aspm_support |= ASPM_STATE_L1_1_PCIPM; 594 if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_2) 595 link->aspm_support |= ASPM_STATE_L1_2_PCIPM; 596 597 if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_1) 598 link->aspm_enabled |= ASPM_STATE_L1_1; 599 if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_2) 600 link->aspm_enabled |= ASPM_STATE_L1_2; 601 if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_1) 602 link->aspm_enabled |= ASPM_STATE_L1_1_PCIPM; 603 if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2) 604 link->aspm_enabled |= ASPM_STATE_L1_2_PCIPM; 605 606 if (link->aspm_support & ASPM_STATE_L1SS) 607 aspm_calc_l1ss_info(link, &upreg, &dwreg); 608 609 /* Save default state */ 610 link->aspm_default = link->aspm_enabled; 611 612 /* Setup initial capable state. Will be updated later */ 613 link->aspm_capable = link->aspm_support; 614 /* 615 * If the downstream component has pci bridge function, don't 616 * do ASPM for now. 617 */ 618 list_for_each_entry(child, &linkbus->devices, bus_list) { 619 if (pci_pcie_type(child) == PCI_EXP_TYPE_PCI_BRIDGE) { 620 link->aspm_disable = ASPM_STATE_ALL; 621 break; 622 } 623 } 624 625 /* Get and check endpoint acceptable latencies */ 626 list_for_each_entry(child, &linkbus->devices, bus_list) { 627 u32 reg32, encoding; 628 struct aspm_latency *acceptable = 629 &link->acceptable[PCI_FUNC(child->devfn)]; 630 631 if (pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT && 632 pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END) 633 continue; 634 635 pcie_capability_read_dword(child, PCI_EXP_DEVCAP, ®32); 636 /* Calculate endpoint L0s acceptable latency */ 637 encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6; 638 acceptable->l0s = calc_l0s_acceptable(encoding); 639 /* Calculate endpoint L1 acceptable latency */ 640 encoding = (reg32 & PCI_EXP_DEVCAP_L1) >> 9; 641 acceptable->l1 = calc_l1_acceptable(encoding); 642 643 pcie_aspm_check_latency(child); 644 } 645 } 646 647 static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos, 648 u32 clear, u32 set) 649 { 650 u32 val; 651 652 pci_read_config_dword(pdev, pos, &val); 653 val &= ~clear; 654 val |= set; 655 pci_write_config_dword(pdev, pos, val); 656 } 657 658 /* Configure the ASPM L1 substates */ 659 static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) 660 { 661 u32 val, enable_req; 662 struct pci_dev *child = link->downstream, *parent = link->pdev; 663 u32 up_cap_ptr = link->l1ss.up_cap_ptr; 664 u32 dw_cap_ptr = link->l1ss.dw_cap_ptr; 665 666 enable_req = (link->aspm_enabled ^ state) & state; 667 668 /* 669 * Here are the rules specified in the PCIe spec for enabling L1SS: 670 * - When enabling L1.x, enable bit at parent first, then at child 671 * - When disabling L1.x, disable bit at child first, then at parent 672 * - When enabling ASPM L1.x, need to disable L1 673 * (at child followed by parent). 674 * - The ASPM/PCIPM L1.2 must be disabled while programming timing 675 * parameters 676 * 677 * To keep it simple, disable all L1SS bits first, and later enable 678 * what is needed. 679 */ 680 681 /* Disable all L1 substates */ 682 pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1, 683 PCI_L1SS_CTL1_L1SS_MASK, 0); 684 pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1, 685 PCI_L1SS_CTL1_L1SS_MASK, 0); 686 /* 687 * If needed, disable L1, and it gets enabled later 688 * in pcie_config_aspm_link(). 689 */ 690 if (enable_req & (ASPM_STATE_L1_1 | ASPM_STATE_L1_2)) { 691 pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL, 692 PCI_EXP_LNKCTL_ASPM_L1, 0); 693 pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL, 694 PCI_EXP_LNKCTL_ASPM_L1, 0); 695 } 696 697 if (enable_req & ASPM_STATE_L1_2_MASK) { 698 699 /* Program T_POWER_ON times in both ports */ 700 pci_write_config_dword(parent, up_cap_ptr + PCI_L1SS_CTL2, 701 link->l1ss.ctl2); 702 pci_write_config_dword(child, dw_cap_ptr + PCI_L1SS_CTL2, 703 link->l1ss.ctl2); 704 705 /* Program Common_Mode_Restore_Time in upstream device */ 706 pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1, 707 PCI_L1SS_CTL1_CM_RESTORE_TIME, 708 link->l1ss.ctl1); 709 710 /* Program LTR_L1.2_THRESHOLD time in both ports */ 711 pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1, 712 PCI_L1SS_CTL1_LTR_L12_TH_VALUE | 713 PCI_L1SS_CTL1_LTR_L12_TH_SCALE, 714 link->l1ss.ctl1); 715 pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1, 716 PCI_L1SS_CTL1_LTR_L12_TH_VALUE | 717 PCI_L1SS_CTL1_LTR_L12_TH_SCALE, 718 link->l1ss.ctl1); 719 } 720 721 val = 0; 722 if (state & ASPM_STATE_L1_1) 723 val |= PCI_L1SS_CTL1_ASPM_L1_1; 724 if (state & ASPM_STATE_L1_2) 725 val |= PCI_L1SS_CTL1_ASPM_L1_2; 726 if (state & ASPM_STATE_L1_1_PCIPM) 727 val |= PCI_L1SS_CTL1_PCIPM_L1_1; 728 if (state & ASPM_STATE_L1_2_PCIPM) 729 val |= PCI_L1SS_CTL1_PCIPM_L1_2; 730 731 /* Enable what we need to enable */ 732 pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1, 733 PCI_L1SS_CAP_L1_PM_SS, val); 734 pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1, 735 PCI_L1SS_CAP_L1_PM_SS, val); 736 } 737 738 static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val) 739 { 740 pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL, 741 PCI_EXP_LNKCTL_ASPMC, val); 742 } 743 744 static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state) 745 { 746 u32 upstream = 0, dwstream = 0; 747 struct pci_dev *child = link->downstream, *parent = link->pdev; 748 struct pci_bus *linkbus = parent->subordinate; 749 750 /* Enable only the states that were not explicitly disabled */ 751 state &= (link->aspm_capable & ~link->aspm_disable); 752 753 /* Can't enable any substates if L1 is not enabled */ 754 if (!(state & ASPM_STATE_L1)) 755 state &= ~ASPM_STATE_L1SS; 756 757 /* Spec says both ports must be in D0 before enabling PCI PM substates*/ 758 if (parent->current_state != PCI_D0 || child->current_state != PCI_D0) { 759 state &= ~ASPM_STATE_L1_SS_PCIPM; 760 state |= (link->aspm_enabled & ASPM_STATE_L1_SS_PCIPM); 761 } 762 763 /* Nothing to do if the link is already in the requested state */ 764 if (link->aspm_enabled == state) 765 return; 766 /* Convert ASPM state to upstream/downstream ASPM register state */ 767 if (state & ASPM_STATE_L0S_UP) 768 dwstream |= PCI_EXP_LNKCTL_ASPM_L0S; 769 if (state & ASPM_STATE_L0S_DW) 770 upstream |= PCI_EXP_LNKCTL_ASPM_L0S; 771 if (state & ASPM_STATE_L1) { 772 upstream |= PCI_EXP_LNKCTL_ASPM_L1; 773 dwstream |= PCI_EXP_LNKCTL_ASPM_L1; 774 } 775 776 if (link->aspm_capable & ASPM_STATE_L1SS) 777 pcie_config_aspm_l1ss(link, state); 778 779 /* 780 * Spec 2.0 suggests all functions should be configured the 781 * same setting for ASPM. Enabling ASPM L1 should be done in 782 * upstream component first and then downstream, and vice 783 * versa for disabling ASPM L1. Spec doesn't mention L0S. 784 */ 785 if (state & ASPM_STATE_L1) 786 pcie_config_aspm_dev(parent, upstream); 787 list_for_each_entry(child, &linkbus->devices, bus_list) 788 pcie_config_aspm_dev(child, dwstream); 789 if (!(state & ASPM_STATE_L1)) 790 pcie_config_aspm_dev(parent, upstream); 791 792 link->aspm_enabled = state; 793 } 794 795 static void pcie_config_aspm_path(struct pcie_link_state *link) 796 { 797 while (link) { 798 pcie_config_aspm_link(link, policy_to_aspm_state(link)); 799 link = link->parent; 800 } 801 } 802 803 static void free_link_state(struct pcie_link_state *link) 804 { 805 link->pdev->link_state = NULL; 806 kfree(link); 807 } 808 809 static int pcie_aspm_sanity_check(struct pci_dev *pdev) 810 { 811 struct pci_dev *child; 812 u32 reg32; 813 814 /* 815 * Some functions in a slot might not all be PCIe functions, 816 * very strange. Disable ASPM for the whole slot 817 */ 818 list_for_each_entry(child, &pdev->subordinate->devices, bus_list) { 819 if (!pci_is_pcie(child)) 820 return -EINVAL; 821 822 /* 823 * If ASPM is disabled then we're not going to change 824 * the BIOS state. It's safe to continue even if it's a 825 * pre-1.1 device 826 */ 827 828 if (aspm_disabled) 829 continue; 830 831 /* 832 * Disable ASPM for pre-1.1 PCIe device, we follow MS to use 833 * RBER bit to determine if a function is 1.1 version device 834 */ 835 pcie_capability_read_dword(child, PCI_EXP_DEVCAP, ®32); 836 if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) { 837 pci_info(child, "disabling ASPM on pre-1.1 PCIe device. You can enable it with 'pcie_aspm=force'\n"); 838 return -EINVAL; 839 } 840 } 841 return 0; 842 } 843 844 static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev) 845 { 846 struct pcie_link_state *link; 847 848 link = kzalloc(sizeof(*link), GFP_KERNEL); 849 if (!link) 850 return NULL; 851 852 INIT_LIST_HEAD(&link->sibling); 853 INIT_LIST_HEAD(&link->children); 854 INIT_LIST_HEAD(&link->link); 855 link->pdev = pdev; 856 link->downstream = pci_function_0(pdev->subordinate); 857 858 /* 859 * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe 860 * hierarchies. Note that some PCIe host implementations omit 861 * the root ports entirely, in which case a downstream port on 862 * a switch may become the root of the link state chain for all 863 * its subordinate endpoints. 864 */ 865 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT || 866 pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE || 867 !pdev->bus->parent->self) { 868 link->root = link; 869 } else { 870 struct pcie_link_state *parent; 871 872 parent = pdev->bus->parent->self->link_state; 873 if (!parent) { 874 kfree(link); 875 return NULL; 876 } 877 878 link->parent = parent; 879 link->root = link->parent->root; 880 list_add(&link->link, &parent->children); 881 } 882 883 list_add(&link->sibling, &link_list); 884 pdev->link_state = link; 885 return link; 886 } 887 888 /* 889 * pcie_aspm_init_link_state: Initiate PCI express link state. 890 * It is called after the pcie and its children devices are scanned. 891 * @pdev: the root port or switch downstream port 892 */ 893 void pcie_aspm_init_link_state(struct pci_dev *pdev) 894 { 895 struct pcie_link_state *link; 896 int blacklist = !!pcie_aspm_sanity_check(pdev); 897 898 if (!aspm_support_enabled) 899 return; 900 901 if (pdev->link_state) 902 return; 903 904 /* 905 * We allocate pcie_link_state for the component on the upstream 906 * end of a Link, so there's nothing to do unless this device has a 907 * Link on its secondary side. 908 */ 909 if (!pdev->has_secondary_link) 910 return; 911 912 /* VIA has a strange chipset, root port is under a bridge */ 913 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT && 914 pdev->bus->self) 915 return; 916 917 down_read(&pci_bus_sem); 918 if (list_empty(&pdev->subordinate->devices)) 919 goto out; 920 921 mutex_lock(&aspm_lock); 922 link = alloc_pcie_link_state(pdev); 923 if (!link) 924 goto unlock; 925 /* 926 * Setup initial ASPM state. Note that we need to configure 927 * upstream links also because capable state of them can be 928 * update through pcie_aspm_cap_init(). 929 */ 930 pcie_aspm_cap_init(link, blacklist); 931 932 /* Setup initial Clock PM state */ 933 pcie_clkpm_cap_init(link, blacklist); 934 935 /* 936 * At this stage drivers haven't had an opportunity to change the 937 * link policy setting. Enabling ASPM on broken hardware can cripple 938 * it even before the driver has had a chance to disable ASPM, so 939 * default to a safe level right now. If we're enabling ASPM beyond 940 * the BIOS's expectation, we'll do so once pci_enable_device() is 941 * called. 942 */ 943 if (aspm_policy != POLICY_POWERSAVE && 944 aspm_policy != POLICY_POWER_SUPERSAVE) { 945 pcie_config_aspm_path(link); 946 pcie_set_clkpm(link, policy_to_clkpm_state(link)); 947 } 948 949 unlock: 950 mutex_unlock(&aspm_lock); 951 out: 952 up_read(&pci_bus_sem); 953 } 954 955 /* Recheck latencies and update aspm_capable for links under the root */ 956 static void pcie_update_aspm_capable(struct pcie_link_state *root) 957 { 958 struct pcie_link_state *link; 959 BUG_ON(root->parent); 960 list_for_each_entry(link, &link_list, sibling) { 961 if (link->root != root) 962 continue; 963 link->aspm_capable = link->aspm_support; 964 } 965 list_for_each_entry(link, &link_list, sibling) { 966 struct pci_dev *child; 967 struct pci_bus *linkbus = link->pdev->subordinate; 968 if (link->root != root) 969 continue; 970 list_for_each_entry(child, &linkbus->devices, bus_list) { 971 if ((pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT) && 972 (pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END)) 973 continue; 974 pcie_aspm_check_latency(child); 975 } 976 } 977 } 978 979 /* @pdev: the endpoint device */ 980 void pcie_aspm_exit_link_state(struct pci_dev *pdev) 981 { 982 struct pci_dev *parent = pdev->bus->self; 983 struct pcie_link_state *link, *root, *parent_link; 984 985 if (!parent || !parent->link_state) 986 return; 987 988 down_read(&pci_bus_sem); 989 mutex_lock(&aspm_lock); 990 /* 991 * All PCIe functions are in one slot, remove one function will remove 992 * the whole slot, so just wait until we are the last function left. 993 */ 994 if (!list_is_last(&pdev->bus_list, &parent->subordinate->devices)) 995 goto out; 996 997 link = parent->link_state; 998 root = link->root; 999 parent_link = link->parent; 1000 1001 /* All functions are removed, so just disable ASPM for the link */ 1002 pcie_config_aspm_link(link, 0); 1003 list_del(&link->sibling); 1004 list_del(&link->link); 1005 /* Clock PM is for endpoint device */ 1006 free_link_state(link); 1007 1008 /* Recheck latencies and configure upstream links */ 1009 if (parent_link) { 1010 pcie_update_aspm_capable(root); 1011 pcie_config_aspm_path(parent_link); 1012 } 1013 out: 1014 mutex_unlock(&aspm_lock); 1015 up_read(&pci_bus_sem); 1016 } 1017 1018 /* @pdev: the root port or switch downstream port */ 1019 void pcie_aspm_pm_state_change(struct pci_dev *pdev) 1020 { 1021 struct pcie_link_state *link = pdev->link_state; 1022 1023 if (aspm_disabled || !link) 1024 return; 1025 /* 1026 * Devices changed PM state, we should recheck if latency 1027 * meets all functions' requirement 1028 */ 1029 down_read(&pci_bus_sem); 1030 mutex_lock(&aspm_lock); 1031 pcie_update_aspm_capable(link->root); 1032 pcie_config_aspm_path(link); 1033 mutex_unlock(&aspm_lock); 1034 up_read(&pci_bus_sem); 1035 } 1036 1037 void pcie_aspm_powersave_config_link(struct pci_dev *pdev) 1038 { 1039 struct pcie_link_state *link = pdev->link_state; 1040 1041 if (aspm_disabled || !link) 1042 return; 1043 1044 if (aspm_policy != POLICY_POWERSAVE && 1045 aspm_policy != POLICY_POWER_SUPERSAVE) 1046 return; 1047 1048 down_read(&pci_bus_sem); 1049 mutex_lock(&aspm_lock); 1050 pcie_config_aspm_path(link); 1051 pcie_set_clkpm(link, policy_to_clkpm_state(link)); 1052 mutex_unlock(&aspm_lock); 1053 up_read(&pci_bus_sem); 1054 } 1055 1056 static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem) 1057 { 1058 struct pci_dev *parent = pdev->bus->self; 1059 struct pcie_link_state *link; 1060 1061 if (!pci_is_pcie(pdev)) 1062 return; 1063 1064 if (pdev->has_secondary_link) 1065 parent = pdev; 1066 if (!parent || !parent->link_state) 1067 return; 1068 1069 /* 1070 * A driver requested that ASPM be disabled on this device, but 1071 * if we don't have permission to manage ASPM (e.g., on ACPI 1072 * systems we have to observe the FADT ACPI_FADT_NO_ASPM bit and 1073 * the _OSC method), we can't honor that request. Windows has 1074 * a similar mechanism using "PciASPMOptOut", which is also 1075 * ignored in this situation. 1076 */ 1077 if (aspm_disabled) { 1078 pci_warn(pdev, "can't disable ASPM; OS doesn't have ASPM control\n"); 1079 return; 1080 } 1081 1082 if (sem) 1083 down_read(&pci_bus_sem); 1084 mutex_lock(&aspm_lock); 1085 link = parent->link_state; 1086 if (state & PCIE_LINK_STATE_L0S) 1087 link->aspm_disable |= ASPM_STATE_L0S; 1088 if (state & PCIE_LINK_STATE_L1) 1089 link->aspm_disable |= ASPM_STATE_L1; 1090 pcie_config_aspm_link(link, policy_to_aspm_state(link)); 1091 1092 if (state & PCIE_LINK_STATE_CLKPM) { 1093 link->clkpm_capable = 0; 1094 pcie_set_clkpm(link, 0); 1095 } 1096 mutex_unlock(&aspm_lock); 1097 if (sem) 1098 up_read(&pci_bus_sem); 1099 } 1100 1101 void pci_disable_link_state_locked(struct pci_dev *pdev, int state) 1102 { 1103 __pci_disable_link_state(pdev, state, false); 1104 } 1105 EXPORT_SYMBOL(pci_disable_link_state_locked); 1106 1107 /** 1108 * pci_disable_link_state - Disable device's link state, so the link will 1109 * never enter specific states. Note that if the BIOS didn't grant ASPM 1110 * control to the OS, this does nothing because we can't touch the LNKCTL 1111 * register. 1112 * 1113 * @pdev: PCI device 1114 * @state: ASPM link state to disable 1115 */ 1116 void pci_disable_link_state(struct pci_dev *pdev, int state) 1117 { 1118 __pci_disable_link_state(pdev, state, true); 1119 } 1120 EXPORT_SYMBOL(pci_disable_link_state); 1121 1122 static int pcie_aspm_set_policy(const char *val, 1123 const struct kernel_param *kp) 1124 { 1125 int i; 1126 struct pcie_link_state *link; 1127 1128 if (aspm_disabled) 1129 return -EPERM; 1130 i = sysfs_match_string(policy_str, val); 1131 if (i < 0) 1132 return i; 1133 if (i == aspm_policy) 1134 return 0; 1135 1136 down_read(&pci_bus_sem); 1137 mutex_lock(&aspm_lock); 1138 aspm_policy = i; 1139 list_for_each_entry(link, &link_list, sibling) { 1140 pcie_config_aspm_link(link, policy_to_aspm_state(link)); 1141 pcie_set_clkpm(link, policy_to_clkpm_state(link)); 1142 } 1143 mutex_unlock(&aspm_lock); 1144 up_read(&pci_bus_sem); 1145 return 0; 1146 } 1147 1148 static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp) 1149 { 1150 int i, cnt = 0; 1151 for (i = 0; i < ARRAY_SIZE(policy_str); i++) 1152 if (i == aspm_policy) 1153 cnt += sprintf(buffer + cnt, "[%s] ", policy_str[i]); 1154 else 1155 cnt += sprintf(buffer + cnt, "%s ", policy_str[i]); 1156 return cnt; 1157 } 1158 1159 module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy, 1160 NULL, 0644); 1161 1162 #ifdef CONFIG_PCIEASPM_DEBUG 1163 static ssize_t link_state_show(struct device *dev, 1164 struct device_attribute *attr, 1165 char *buf) 1166 { 1167 struct pci_dev *pci_device = to_pci_dev(dev); 1168 struct pcie_link_state *link_state = pci_device->link_state; 1169 1170 return sprintf(buf, "%d\n", link_state->aspm_enabled); 1171 } 1172 1173 static ssize_t link_state_store(struct device *dev, 1174 struct device_attribute *attr, 1175 const char *buf, 1176 size_t n) 1177 { 1178 struct pci_dev *pdev = to_pci_dev(dev); 1179 struct pcie_link_state *link, *root = pdev->link_state->root; 1180 u32 state; 1181 1182 if (aspm_disabled) 1183 return -EPERM; 1184 1185 if (kstrtouint(buf, 10, &state)) 1186 return -EINVAL; 1187 if ((state & ~ASPM_STATE_ALL) != 0) 1188 return -EINVAL; 1189 1190 down_read(&pci_bus_sem); 1191 mutex_lock(&aspm_lock); 1192 list_for_each_entry(link, &link_list, sibling) { 1193 if (link->root != root) 1194 continue; 1195 pcie_config_aspm_link(link, state); 1196 } 1197 mutex_unlock(&aspm_lock); 1198 up_read(&pci_bus_sem); 1199 return n; 1200 } 1201 1202 static ssize_t clk_ctl_show(struct device *dev, 1203 struct device_attribute *attr, 1204 char *buf) 1205 { 1206 struct pci_dev *pci_device = to_pci_dev(dev); 1207 struct pcie_link_state *link_state = pci_device->link_state; 1208 1209 return sprintf(buf, "%d\n", link_state->clkpm_enabled); 1210 } 1211 1212 static ssize_t clk_ctl_store(struct device *dev, 1213 struct device_attribute *attr, 1214 const char *buf, 1215 size_t n) 1216 { 1217 struct pci_dev *pdev = to_pci_dev(dev); 1218 bool state; 1219 1220 if (strtobool(buf, &state)) 1221 return -EINVAL; 1222 1223 down_read(&pci_bus_sem); 1224 mutex_lock(&aspm_lock); 1225 pcie_set_clkpm_nocheck(pdev->link_state, state); 1226 mutex_unlock(&aspm_lock); 1227 up_read(&pci_bus_sem); 1228 1229 return n; 1230 } 1231 1232 static DEVICE_ATTR_RW(link_state); 1233 static DEVICE_ATTR_RW(clk_ctl); 1234 1235 static char power_group[] = "power"; 1236 void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev) 1237 { 1238 struct pcie_link_state *link_state = pdev->link_state; 1239 1240 if (!link_state) 1241 return; 1242 1243 if (link_state->aspm_support) 1244 sysfs_add_file_to_group(&pdev->dev.kobj, 1245 &dev_attr_link_state.attr, power_group); 1246 if (link_state->clkpm_capable) 1247 sysfs_add_file_to_group(&pdev->dev.kobj, 1248 &dev_attr_clk_ctl.attr, power_group); 1249 } 1250 1251 void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev) 1252 { 1253 struct pcie_link_state *link_state = pdev->link_state; 1254 1255 if (!link_state) 1256 return; 1257 1258 if (link_state->aspm_support) 1259 sysfs_remove_file_from_group(&pdev->dev.kobj, 1260 &dev_attr_link_state.attr, power_group); 1261 if (link_state->clkpm_capable) 1262 sysfs_remove_file_from_group(&pdev->dev.kobj, 1263 &dev_attr_clk_ctl.attr, power_group); 1264 } 1265 #endif 1266 1267 static int __init pcie_aspm_disable(char *str) 1268 { 1269 if (!strcmp(str, "off")) { 1270 aspm_policy = POLICY_DEFAULT; 1271 aspm_disabled = 1; 1272 aspm_support_enabled = false; 1273 printk(KERN_INFO "PCIe ASPM is disabled\n"); 1274 } else if (!strcmp(str, "force")) { 1275 aspm_force = 1; 1276 printk(KERN_INFO "PCIe ASPM is forcibly enabled\n"); 1277 } 1278 return 1; 1279 } 1280 1281 __setup("pcie_aspm=", pcie_aspm_disable); 1282 1283 void pcie_no_aspm(void) 1284 { 1285 /* 1286 * Disabling ASPM is intended to prevent the kernel from modifying 1287 * existing hardware state, not to clear existing state. To that end: 1288 * (a) set policy to POLICY_DEFAULT in order to avoid changing state 1289 * (b) prevent userspace from changing policy 1290 */ 1291 if (!aspm_force) { 1292 aspm_policy = POLICY_DEFAULT; 1293 aspm_disabled = 1; 1294 } 1295 } 1296 1297 bool pcie_aspm_support_enabled(void) 1298 { 1299 return aspm_support_enabled; 1300 } 1301 EXPORT_SYMBOL(pcie_aspm_support_enabled); 1302