xref: /openbmc/linux/drivers/pci/pcie/aspm.c (revision e620a1e0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Enable PCIe link L0s/L1 state and Clock Power Management
4  *
5  * Copyright (C) 2007 Intel
6  * Copyright (C) Zhang Yanmin (yanmin.zhang@intel.com)
7  * Copyright (C) Shaohua Li (shaohua.li@intel.com)
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/pci.h>
14 #include <linux/pci_regs.h>
15 #include <linux/errno.h>
16 #include <linux/pm.h>
17 #include <linux/init.h>
18 #include <linux/slab.h>
19 #include <linux/jiffies.h>
20 #include <linux/delay.h>
21 #include "../pci.h"
22 
23 #ifdef MODULE_PARAM_PREFIX
24 #undef MODULE_PARAM_PREFIX
25 #endif
26 #define MODULE_PARAM_PREFIX "pcie_aspm."
27 
28 /* Note: those are not register definitions */
29 #define ASPM_STATE_L0S_UP	(1)	/* Upstream direction L0s state */
30 #define ASPM_STATE_L0S_DW	(2)	/* Downstream direction L0s state */
31 #define ASPM_STATE_L1		(4)	/* L1 state */
32 #define ASPM_STATE_L1_1		(8)	/* ASPM L1.1 state */
33 #define ASPM_STATE_L1_2		(0x10)	/* ASPM L1.2 state */
34 #define ASPM_STATE_L1_1_PCIPM	(0x20)	/* PCI PM L1.1 state */
35 #define ASPM_STATE_L1_2_PCIPM	(0x40)	/* PCI PM L1.2 state */
36 #define ASPM_STATE_L1_SS_PCIPM	(ASPM_STATE_L1_1_PCIPM | ASPM_STATE_L1_2_PCIPM)
37 #define ASPM_STATE_L1_2_MASK	(ASPM_STATE_L1_2 | ASPM_STATE_L1_2_PCIPM)
38 #define ASPM_STATE_L1SS		(ASPM_STATE_L1_1 | ASPM_STATE_L1_1_PCIPM |\
39 				 ASPM_STATE_L1_2_MASK)
40 #define ASPM_STATE_L0S		(ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
41 #define ASPM_STATE_ALL		(ASPM_STATE_L0S | ASPM_STATE_L1 |	\
42 				 ASPM_STATE_L1SS)
43 
44 struct aspm_latency {
45 	u32 l0s;			/* L0s latency (nsec) */
46 	u32 l1;				/* L1 latency (nsec) */
47 };
48 
49 struct pcie_link_state {
50 	struct pci_dev *pdev;		/* Upstream component of the Link */
51 	struct pci_dev *downstream;	/* Downstream component, function 0 */
52 	struct pcie_link_state *root;	/* pointer to the root port link */
53 	struct pcie_link_state *parent;	/* pointer to the parent Link state */
54 	struct list_head sibling;	/* node in link_list */
55 
56 	/* ASPM state */
57 	u32 aspm_support:7;		/* Supported ASPM state */
58 	u32 aspm_enabled:7;		/* Enabled ASPM state */
59 	u32 aspm_capable:7;		/* Capable ASPM state with latency */
60 	u32 aspm_default:7;		/* Default ASPM state by BIOS */
61 	u32 aspm_disable:7;		/* Disabled ASPM state */
62 
63 	/* Clock PM state */
64 	u32 clkpm_capable:1;		/* Clock PM capable? */
65 	u32 clkpm_enabled:1;		/* Current Clock PM state */
66 	u32 clkpm_default:1;		/* Default Clock PM state by BIOS */
67 
68 	/* Exit latencies */
69 	struct aspm_latency latency_up;	/* Upstream direction exit latency */
70 	struct aspm_latency latency_dw;	/* Downstream direction exit latency */
71 	/*
72 	 * Endpoint acceptable latencies. A pcie downstream port only
73 	 * has one slot under it, so at most there are 8 functions.
74 	 */
75 	struct aspm_latency acceptable[8];
76 
77 	/* L1 PM Substate info */
78 	struct {
79 		u32 up_cap_ptr;		/* L1SS cap ptr in upstream dev */
80 		u32 dw_cap_ptr;		/* L1SS cap ptr in downstream dev */
81 		u32 ctl1;		/* value to be programmed in ctl1 */
82 		u32 ctl2;		/* value to be programmed in ctl2 */
83 	} l1ss;
84 };
85 
86 static int aspm_disabled, aspm_force;
87 static bool aspm_support_enabled = true;
88 static DEFINE_MUTEX(aspm_lock);
89 static LIST_HEAD(link_list);
90 
91 #define POLICY_DEFAULT 0	/* BIOS default setting */
92 #define POLICY_PERFORMANCE 1	/* high performance */
93 #define POLICY_POWERSAVE 2	/* high power saving */
94 #define POLICY_POWER_SUPERSAVE 3 /* possibly even more power saving */
95 
96 #ifdef CONFIG_PCIEASPM_PERFORMANCE
97 static int aspm_policy = POLICY_PERFORMANCE;
98 #elif defined CONFIG_PCIEASPM_POWERSAVE
99 static int aspm_policy = POLICY_POWERSAVE;
100 #elif defined CONFIG_PCIEASPM_POWER_SUPERSAVE
101 static int aspm_policy = POLICY_POWER_SUPERSAVE;
102 #else
103 static int aspm_policy;
104 #endif
105 
106 static const char *policy_str[] = {
107 	[POLICY_DEFAULT] = "default",
108 	[POLICY_PERFORMANCE] = "performance",
109 	[POLICY_POWERSAVE] = "powersave",
110 	[POLICY_POWER_SUPERSAVE] = "powersupersave"
111 };
112 
113 #define LINK_RETRAIN_TIMEOUT HZ
114 
115 static int policy_to_aspm_state(struct pcie_link_state *link)
116 {
117 	switch (aspm_policy) {
118 	case POLICY_PERFORMANCE:
119 		/* Disable ASPM and Clock PM */
120 		return 0;
121 	case POLICY_POWERSAVE:
122 		/* Enable ASPM L0s/L1 */
123 		return (ASPM_STATE_L0S | ASPM_STATE_L1);
124 	case POLICY_POWER_SUPERSAVE:
125 		/* Enable Everything */
126 		return ASPM_STATE_ALL;
127 	case POLICY_DEFAULT:
128 		return link->aspm_default;
129 	}
130 	return 0;
131 }
132 
133 static int policy_to_clkpm_state(struct pcie_link_state *link)
134 {
135 	switch (aspm_policy) {
136 	case POLICY_PERFORMANCE:
137 		/* Disable ASPM and Clock PM */
138 		return 0;
139 	case POLICY_POWERSAVE:
140 	case POLICY_POWER_SUPERSAVE:
141 		/* Enable Clock PM */
142 		return 1;
143 	case POLICY_DEFAULT:
144 		return link->clkpm_default;
145 	}
146 	return 0;
147 }
148 
149 static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
150 {
151 	struct pci_dev *child;
152 	struct pci_bus *linkbus = link->pdev->subordinate;
153 	u32 val = enable ? PCI_EXP_LNKCTL_CLKREQ_EN : 0;
154 
155 	list_for_each_entry(child, &linkbus->devices, bus_list)
156 		pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
157 						   PCI_EXP_LNKCTL_CLKREQ_EN,
158 						   val);
159 	link->clkpm_enabled = !!enable;
160 }
161 
162 static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
163 {
164 	/* Don't enable Clock PM if the link is not Clock PM capable */
165 	if (!link->clkpm_capable)
166 		enable = 0;
167 	/* Need nothing if the specified equals to current state */
168 	if (link->clkpm_enabled == enable)
169 		return;
170 	pcie_set_clkpm_nocheck(link, enable);
171 }
172 
173 static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
174 {
175 	int capable = 1, enabled = 1;
176 	u32 reg32;
177 	u16 reg16;
178 	struct pci_dev *child;
179 	struct pci_bus *linkbus = link->pdev->subordinate;
180 
181 	/* All functions should have the same cap and state, take the worst */
182 	list_for_each_entry(child, &linkbus->devices, bus_list) {
183 		pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &reg32);
184 		if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) {
185 			capable = 0;
186 			enabled = 0;
187 			break;
188 		}
189 		pcie_capability_read_word(child, PCI_EXP_LNKCTL, &reg16);
190 		if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN))
191 			enabled = 0;
192 	}
193 	link->clkpm_enabled = enabled;
194 	link->clkpm_default = enabled;
195 	link->clkpm_capable = (blacklist) ? 0 : capable;
196 }
197 
198 static bool pcie_retrain_link(struct pcie_link_state *link)
199 {
200 	struct pci_dev *parent = link->pdev;
201 	unsigned long end_jiffies;
202 	u16 reg16;
203 
204 	pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16);
205 	reg16 |= PCI_EXP_LNKCTL_RL;
206 	pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
207 	if (parent->clear_retrain_link) {
208 		/*
209 		 * Due to an erratum in some devices the Retrain Link bit
210 		 * needs to be cleared again manually to allow the link
211 		 * training to succeed.
212 		 */
213 		reg16 &= ~PCI_EXP_LNKCTL_RL;
214 		pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
215 	}
216 
217 	/* Wait for link training end. Break out after waiting for timeout */
218 	end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT;
219 	do {
220 		pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
221 		if (!(reg16 & PCI_EXP_LNKSTA_LT))
222 			break;
223 		msleep(1);
224 	} while (time_before(jiffies, end_jiffies));
225 	return !(reg16 & PCI_EXP_LNKSTA_LT);
226 }
227 
228 /*
229  * pcie_aspm_configure_common_clock: check if the 2 ends of a link
230  *   could use common clock. If they are, configure them to use the
231  *   common clock. That will reduce the ASPM state exit latency.
232  */
233 static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
234 {
235 	int same_clock = 1;
236 	u16 reg16, parent_reg, child_reg[8];
237 	struct pci_dev *child, *parent = link->pdev;
238 	struct pci_bus *linkbus = parent->subordinate;
239 	/*
240 	 * All functions of a slot should have the same Slot Clock
241 	 * Configuration, so just check one function
242 	 */
243 	child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
244 	BUG_ON(!pci_is_pcie(child));
245 
246 	/* Check downstream component if bit Slot Clock Configuration is 1 */
247 	pcie_capability_read_word(child, PCI_EXP_LNKSTA, &reg16);
248 	if (!(reg16 & PCI_EXP_LNKSTA_SLC))
249 		same_clock = 0;
250 
251 	/* Check upstream component if bit Slot Clock Configuration is 1 */
252 	pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
253 	if (!(reg16 & PCI_EXP_LNKSTA_SLC))
254 		same_clock = 0;
255 
256 	/* Port might be already in common clock mode */
257 	pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16);
258 	if (same_clock && (reg16 & PCI_EXP_LNKCTL_CCC)) {
259 		bool consistent = true;
260 
261 		list_for_each_entry(child, &linkbus->devices, bus_list) {
262 			pcie_capability_read_word(child, PCI_EXP_LNKCTL,
263 						  &reg16);
264 			if (!(reg16 & PCI_EXP_LNKCTL_CCC)) {
265 				consistent = false;
266 				break;
267 			}
268 		}
269 		if (consistent)
270 			return;
271 		pci_warn(parent, "ASPM: current common clock configuration is broken, reconfiguring\n");
272 	}
273 
274 	/* Configure downstream component, all functions */
275 	list_for_each_entry(child, &linkbus->devices, bus_list) {
276 		pcie_capability_read_word(child, PCI_EXP_LNKCTL, &reg16);
277 		child_reg[PCI_FUNC(child->devfn)] = reg16;
278 		if (same_clock)
279 			reg16 |= PCI_EXP_LNKCTL_CCC;
280 		else
281 			reg16 &= ~PCI_EXP_LNKCTL_CCC;
282 		pcie_capability_write_word(child, PCI_EXP_LNKCTL, reg16);
283 	}
284 
285 	/* Configure upstream component */
286 	pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16);
287 	parent_reg = reg16;
288 	if (same_clock)
289 		reg16 |= PCI_EXP_LNKCTL_CCC;
290 	else
291 		reg16 &= ~PCI_EXP_LNKCTL_CCC;
292 	pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
293 
294 	if (pcie_retrain_link(link))
295 		return;
296 
297 	/* Training failed. Restore common clock configurations */
298 	pci_err(parent, "ASPM: Could not configure common clock\n");
299 	list_for_each_entry(child, &linkbus->devices, bus_list)
300 		pcie_capability_write_word(child, PCI_EXP_LNKCTL,
301 					   child_reg[PCI_FUNC(child->devfn)]);
302 	pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_reg);
303 }
304 
305 /* Convert L0s latency encoding to ns */
306 static u32 calc_l0s_latency(u32 encoding)
307 {
308 	if (encoding == 0x7)
309 		return (5 * 1000);	/* > 4us */
310 	return (64 << encoding);
311 }
312 
313 /* Convert L0s acceptable latency encoding to ns */
314 static u32 calc_l0s_acceptable(u32 encoding)
315 {
316 	if (encoding == 0x7)
317 		return -1U;
318 	return (64 << encoding);
319 }
320 
321 /* Convert L1 latency encoding to ns */
322 static u32 calc_l1_latency(u32 encoding)
323 {
324 	if (encoding == 0x7)
325 		return (65 * 1000);	/* > 64us */
326 	return (1000 << encoding);
327 }
328 
329 /* Convert L1 acceptable latency encoding to ns */
330 static u32 calc_l1_acceptable(u32 encoding)
331 {
332 	if (encoding == 0x7)
333 		return -1U;
334 	return (1000 << encoding);
335 }
336 
337 /* Convert L1SS T_pwr encoding to usec */
338 static u32 calc_l1ss_pwron(struct pci_dev *pdev, u32 scale, u32 val)
339 {
340 	switch (scale) {
341 	case 0:
342 		return val * 2;
343 	case 1:
344 		return val * 10;
345 	case 2:
346 		return val * 100;
347 	}
348 	pci_err(pdev, "%s: Invalid T_PwrOn scale: %u\n", __func__, scale);
349 	return 0;
350 }
351 
352 static void encode_l12_threshold(u32 threshold_us, u32 *scale, u32 *value)
353 {
354 	u32 threshold_ns = threshold_us * 1000;
355 
356 	/* See PCIe r3.1, sec 7.33.3 and sec 6.18 */
357 	if (threshold_ns < 32) {
358 		*scale = 0;
359 		*value = threshold_ns;
360 	} else if (threshold_ns < 1024) {
361 		*scale = 1;
362 		*value = threshold_ns >> 5;
363 	} else if (threshold_ns < 32768) {
364 		*scale = 2;
365 		*value = threshold_ns >> 10;
366 	} else if (threshold_ns < 1048576) {
367 		*scale = 3;
368 		*value = threshold_ns >> 15;
369 	} else if (threshold_ns < 33554432) {
370 		*scale = 4;
371 		*value = threshold_ns >> 20;
372 	} else {
373 		*scale = 5;
374 		*value = threshold_ns >> 25;
375 	}
376 }
377 
378 struct aspm_register_info {
379 	u32 support:2;
380 	u32 enabled:2;
381 	u32 latency_encoding_l0s;
382 	u32 latency_encoding_l1;
383 
384 	/* L1 substates */
385 	u32 l1ss_cap_ptr;
386 	u32 l1ss_cap;
387 	u32 l1ss_ctl1;
388 	u32 l1ss_ctl2;
389 };
390 
391 static void pcie_get_aspm_reg(struct pci_dev *pdev,
392 			      struct aspm_register_info *info)
393 {
394 	u16 reg16;
395 	u32 reg32;
396 
397 	pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &reg32);
398 	info->support = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10;
399 	info->latency_encoding_l0s = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12;
400 	info->latency_encoding_l1  = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15;
401 	pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &reg16);
402 	info->enabled = reg16 & PCI_EXP_LNKCTL_ASPMC;
403 
404 	/* Read L1 PM substate capabilities */
405 	info->l1ss_cap = info->l1ss_ctl1 = info->l1ss_ctl2 = 0;
406 	info->l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
407 	if (!info->l1ss_cap_ptr)
408 		return;
409 	pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CAP,
410 			      &info->l1ss_cap);
411 	if (!(info->l1ss_cap & PCI_L1SS_CAP_L1_PM_SS)) {
412 		info->l1ss_cap = 0;
413 		return;
414 	}
415 
416 	/*
417 	 * If we don't have LTR for the entire path from the Root Complex
418 	 * to this device, we can't use ASPM L1.2 because it relies on the
419 	 * LTR_L1.2_THRESHOLD.  See PCIe r4.0, secs 5.5.4, 6.18.
420 	 */
421 	if (!pdev->ltr_path)
422 		info->l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2;
423 
424 	pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CTL1,
425 			      &info->l1ss_ctl1);
426 	pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CTL2,
427 			      &info->l1ss_ctl2);
428 }
429 
430 static void pcie_aspm_check_latency(struct pci_dev *endpoint)
431 {
432 	u32 latency, l1_switch_latency = 0;
433 	struct aspm_latency *acceptable;
434 	struct pcie_link_state *link;
435 
436 	/* Device not in D0 doesn't need latency check */
437 	if ((endpoint->current_state != PCI_D0) &&
438 	    (endpoint->current_state != PCI_UNKNOWN))
439 		return;
440 
441 	link = endpoint->bus->self->link_state;
442 	acceptable = &link->acceptable[PCI_FUNC(endpoint->devfn)];
443 
444 	while (link) {
445 		/* Check upstream direction L0s latency */
446 		if ((link->aspm_capable & ASPM_STATE_L0S_UP) &&
447 		    (link->latency_up.l0s > acceptable->l0s))
448 			link->aspm_capable &= ~ASPM_STATE_L0S_UP;
449 
450 		/* Check downstream direction L0s latency */
451 		if ((link->aspm_capable & ASPM_STATE_L0S_DW) &&
452 		    (link->latency_dw.l0s > acceptable->l0s))
453 			link->aspm_capable &= ~ASPM_STATE_L0S_DW;
454 		/*
455 		 * Check L1 latency.
456 		 * Every switch on the path to root complex need 1
457 		 * more microsecond for L1. Spec doesn't mention L0s.
458 		 *
459 		 * The exit latencies for L1 substates are not advertised
460 		 * by a device.  Since the spec also doesn't mention a way
461 		 * to determine max latencies introduced by enabling L1
462 		 * substates on the components, it is not clear how to do
463 		 * a L1 substate exit latency check.  We assume that the
464 		 * L1 exit latencies advertised by a device include L1
465 		 * substate latencies (and hence do not do any check).
466 		 */
467 		latency = max_t(u32, link->latency_up.l1, link->latency_dw.l1);
468 		if ((link->aspm_capable & ASPM_STATE_L1) &&
469 		    (latency + l1_switch_latency > acceptable->l1))
470 			link->aspm_capable &= ~ASPM_STATE_L1;
471 		l1_switch_latency += 1000;
472 
473 		link = link->parent;
474 	}
475 }
476 
477 /*
478  * The L1 PM substate capability is only implemented in function 0 in a
479  * multi function device.
480  */
481 static struct pci_dev *pci_function_0(struct pci_bus *linkbus)
482 {
483 	struct pci_dev *child;
484 
485 	list_for_each_entry(child, &linkbus->devices, bus_list)
486 		if (PCI_FUNC(child->devfn) == 0)
487 			return child;
488 	return NULL;
489 }
490 
491 /* Calculate L1.2 PM substate timing parameters */
492 static void aspm_calc_l1ss_info(struct pcie_link_state *link,
493 				struct aspm_register_info *upreg,
494 				struct aspm_register_info *dwreg)
495 {
496 	u32 val1, val2, scale1, scale2;
497 	u32 t_common_mode, t_power_on, l1_2_threshold, scale, value;
498 
499 	link->l1ss.up_cap_ptr = upreg->l1ss_cap_ptr;
500 	link->l1ss.dw_cap_ptr = dwreg->l1ss_cap_ptr;
501 	link->l1ss.ctl1 = link->l1ss.ctl2 = 0;
502 
503 	if (!(link->aspm_support & ASPM_STATE_L1_2_MASK))
504 		return;
505 
506 	/* Choose the greater of the two Port Common_Mode_Restore_Times */
507 	val1 = (upreg->l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8;
508 	val2 = (dwreg->l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8;
509 	t_common_mode = max(val1, val2);
510 
511 	/* Choose the greater of the two Port T_POWER_ON times */
512 	val1   = (upreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19;
513 	scale1 = (upreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16;
514 	val2   = (dwreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19;
515 	scale2 = (dwreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16;
516 
517 	if (calc_l1ss_pwron(link->pdev, scale1, val1) >
518 	    calc_l1ss_pwron(link->downstream, scale2, val2)) {
519 		link->l1ss.ctl2 |= scale1 | (val1 << 3);
520 		t_power_on = calc_l1ss_pwron(link->pdev, scale1, val1);
521 	} else {
522 		link->l1ss.ctl2 |= scale2 | (val2 << 3);
523 		t_power_on = calc_l1ss_pwron(link->downstream, scale2, val2);
524 	}
525 
526 	/*
527 	 * Set LTR_L1.2_THRESHOLD to the time required to transition the
528 	 * Link from L0 to L1.2 and back to L0 so we enter L1.2 only if
529 	 * downstream devices report (via LTR) that they can tolerate at
530 	 * least that much latency.
531 	 *
532 	 * Based on PCIe r3.1, sec 5.5.3.3.1, Figures 5-16 and 5-17, and
533 	 * Table 5-11.  T(POWER_OFF) is at most 2us and T(L1.2) is at
534 	 * least 4us.
535 	 */
536 	l1_2_threshold = 2 + 4 + t_common_mode + t_power_on;
537 	encode_l12_threshold(l1_2_threshold, &scale, &value);
538 	link->l1ss.ctl1 |= t_common_mode << 8 | scale << 29 | value << 16;
539 }
540 
541 static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
542 {
543 	struct pci_dev *child = link->downstream, *parent = link->pdev;
544 	struct pci_bus *linkbus = parent->subordinate;
545 	struct aspm_register_info upreg, dwreg;
546 
547 	if (blacklist) {
548 		/* Set enabled/disable so that we will disable ASPM later */
549 		link->aspm_enabled = ASPM_STATE_ALL;
550 		link->aspm_disable = ASPM_STATE_ALL;
551 		return;
552 	}
553 
554 	/* Get upstream/downstream components' register state */
555 	pcie_get_aspm_reg(parent, &upreg);
556 	pcie_get_aspm_reg(child, &dwreg);
557 
558 	/*
559 	 * If ASPM not supported, don't mess with the clocks and link,
560 	 * bail out now.
561 	 */
562 	if (!(upreg.support & dwreg.support))
563 		return;
564 
565 	/* Configure common clock before checking latencies */
566 	pcie_aspm_configure_common_clock(link);
567 
568 	/*
569 	 * Re-read upstream/downstream components' register state
570 	 * after clock configuration
571 	 */
572 	pcie_get_aspm_reg(parent, &upreg);
573 	pcie_get_aspm_reg(child, &dwreg);
574 
575 	/*
576 	 * Setup L0s state
577 	 *
578 	 * Note that we must not enable L0s in either direction on a
579 	 * given link unless components on both sides of the link each
580 	 * support L0s.
581 	 */
582 	if (dwreg.support & upreg.support & PCIE_LINK_STATE_L0S)
583 		link->aspm_support |= ASPM_STATE_L0S;
584 	if (dwreg.enabled & PCIE_LINK_STATE_L0S)
585 		link->aspm_enabled |= ASPM_STATE_L0S_UP;
586 	if (upreg.enabled & PCIE_LINK_STATE_L0S)
587 		link->aspm_enabled |= ASPM_STATE_L0S_DW;
588 	link->latency_up.l0s = calc_l0s_latency(upreg.latency_encoding_l0s);
589 	link->latency_dw.l0s = calc_l0s_latency(dwreg.latency_encoding_l0s);
590 
591 	/* Setup L1 state */
592 	if (upreg.support & dwreg.support & PCIE_LINK_STATE_L1)
593 		link->aspm_support |= ASPM_STATE_L1;
594 	if (upreg.enabled & dwreg.enabled & PCIE_LINK_STATE_L1)
595 		link->aspm_enabled |= ASPM_STATE_L1;
596 	link->latency_up.l1 = calc_l1_latency(upreg.latency_encoding_l1);
597 	link->latency_dw.l1 = calc_l1_latency(dwreg.latency_encoding_l1);
598 
599 	/* Setup L1 substate */
600 	if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_ASPM_L1_1)
601 		link->aspm_support |= ASPM_STATE_L1_1;
602 	if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_ASPM_L1_2)
603 		link->aspm_support |= ASPM_STATE_L1_2;
604 	if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_1)
605 		link->aspm_support |= ASPM_STATE_L1_1_PCIPM;
606 	if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_2)
607 		link->aspm_support |= ASPM_STATE_L1_2_PCIPM;
608 
609 	if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_1)
610 		link->aspm_enabled |= ASPM_STATE_L1_1;
611 	if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_2)
612 		link->aspm_enabled |= ASPM_STATE_L1_2;
613 	if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_1)
614 		link->aspm_enabled |= ASPM_STATE_L1_1_PCIPM;
615 	if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2)
616 		link->aspm_enabled |= ASPM_STATE_L1_2_PCIPM;
617 
618 	if (link->aspm_support & ASPM_STATE_L1SS)
619 		aspm_calc_l1ss_info(link, &upreg, &dwreg);
620 
621 	/* Save default state */
622 	link->aspm_default = link->aspm_enabled;
623 
624 	/* Setup initial capable state. Will be updated later */
625 	link->aspm_capable = link->aspm_support;
626 	/*
627 	 * If the downstream component has pci bridge function, don't
628 	 * do ASPM for now.
629 	 */
630 	list_for_each_entry(child, &linkbus->devices, bus_list) {
631 		if (pci_pcie_type(child) == PCI_EXP_TYPE_PCI_BRIDGE) {
632 			link->aspm_disable = ASPM_STATE_ALL;
633 			break;
634 		}
635 	}
636 
637 	/* Get and check endpoint acceptable latencies */
638 	list_for_each_entry(child, &linkbus->devices, bus_list) {
639 		u32 reg32, encoding;
640 		struct aspm_latency *acceptable =
641 			&link->acceptable[PCI_FUNC(child->devfn)];
642 
643 		if (pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT &&
644 		    pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END)
645 			continue;
646 
647 		pcie_capability_read_dword(child, PCI_EXP_DEVCAP, &reg32);
648 		/* Calculate endpoint L0s acceptable latency */
649 		encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6;
650 		acceptable->l0s = calc_l0s_acceptable(encoding);
651 		/* Calculate endpoint L1 acceptable latency */
652 		encoding = (reg32 & PCI_EXP_DEVCAP_L1) >> 9;
653 		acceptable->l1 = calc_l1_acceptable(encoding);
654 
655 		pcie_aspm_check_latency(child);
656 	}
657 }
658 
659 static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos,
660 				    u32 clear, u32 set)
661 {
662 	u32 val;
663 
664 	pci_read_config_dword(pdev, pos, &val);
665 	val &= ~clear;
666 	val |= set;
667 	pci_write_config_dword(pdev, pos, val);
668 }
669 
670 /* Configure the ASPM L1 substates */
671 static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
672 {
673 	u32 val, enable_req;
674 	struct pci_dev *child = link->downstream, *parent = link->pdev;
675 	u32 up_cap_ptr = link->l1ss.up_cap_ptr;
676 	u32 dw_cap_ptr = link->l1ss.dw_cap_ptr;
677 
678 	enable_req = (link->aspm_enabled ^ state) & state;
679 
680 	/*
681 	 * Here are the rules specified in the PCIe spec for enabling L1SS:
682 	 * - When enabling L1.x, enable bit at parent first, then at child
683 	 * - When disabling L1.x, disable bit at child first, then at parent
684 	 * - When enabling ASPM L1.x, need to disable L1
685 	 *   (at child followed by parent).
686 	 * - The ASPM/PCIPM L1.2 must be disabled while programming timing
687 	 *   parameters
688 	 *
689 	 * To keep it simple, disable all L1SS bits first, and later enable
690 	 * what is needed.
691 	 */
692 
693 	/* Disable all L1 substates */
694 	pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1,
695 				PCI_L1SS_CTL1_L1SS_MASK, 0);
696 	pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1,
697 				PCI_L1SS_CTL1_L1SS_MASK, 0);
698 	/*
699 	 * If needed, disable L1, and it gets enabled later
700 	 * in pcie_config_aspm_link().
701 	 */
702 	if (enable_req & (ASPM_STATE_L1_1 | ASPM_STATE_L1_2)) {
703 		pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
704 						   PCI_EXP_LNKCTL_ASPM_L1, 0);
705 		pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
706 						   PCI_EXP_LNKCTL_ASPM_L1, 0);
707 	}
708 
709 	if (enable_req & ASPM_STATE_L1_2_MASK) {
710 
711 		/* Program T_POWER_ON times in both ports */
712 		pci_write_config_dword(parent, up_cap_ptr + PCI_L1SS_CTL2,
713 				       link->l1ss.ctl2);
714 		pci_write_config_dword(child, dw_cap_ptr + PCI_L1SS_CTL2,
715 				       link->l1ss.ctl2);
716 
717 		/* Program Common_Mode_Restore_Time in upstream device */
718 		pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1,
719 					PCI_L1SS_CTL1_CM_RESTORE_TIME,
720 					link->l1ss.ctl1);
721 
722 		/* Program LTR_L1.2_THRESHOLD time in both ports */
723 		pci_clear_and_set_dword(parent,	up_cap_ptr + PCI_L1SS_CTL1,
724 					PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
725 					PCI_L1SS_CTL1_LTR_L12_TH_SCALE,
726 					link->l1ss.ctl1);
727 		pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1,
728 					PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
729 					PCI_L1SS_CTL1_LTR_L12_TH_SCALE,
730 					link->l1ss.ctl1);
731 	}
732 
733 	val = 0;
734 	if (state & ASPM_STATE_L1_1)
735 		val |= PCI_L1SS_CTL1_ASPM_L1_1;
736 	if (state & ASPM_STATE_L1_2)
737 		val |= PCI_L1SS_CTL1_ASPM_L1_2;
738 	if (state & ASPM_STATE_L1_1_PCIPM)
739 		val |= PCI_L1SS_CTL1_PCIPM_L1_1;
740 	if (state & ASPM_STATE_L1_2_PCIPM)
741 		val |= PCI_L1SS_CTL1_PCIPM_L1_2;
742 
743 	/* Enable what we need to enable */
744 	pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1,
745 				PCI_L1SS_CAP_L1_PM_SS, val);
746 	pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1,
747 				PCI_L1SS_CAP_L1_PM_SS, val);
748 }
749 
750 static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
751 {
752 	pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
753 					   PCI_EXP_LNKCTL_ASPMC, val);
754 }
755 
756 static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state)
757 {
758 	u32 upstream = 0, dwstream = 0;
759 	struct pci_dev *child = link->downstream, *parent = link->pdev;
760 	struct pci_bus *linkbus = parent->subordinate;
761 
762 	/* Enable only the states that were not explicitly disabled */
763 	state &= (link->aspm_capable & ~link->aspm_disable);
764 
765 	/* Can't enable any substates if L1 is not enabled */
766 	if (!(state & ASPM_STATE_L1))
767 		state &= ~ASPM_STATE_L1SS;
768 
769 	/* Spec says both ports must be in D0 before enabling PCI PM substates*/
770 	if (parent->current_state != PCI_D0 || child->current_state != PCI_D0) {
771 		state &= ~ASPM_STATE_L1_SS_PCIPM;
772 		state |= (link->aspm_enabled & ASPM_STATE_L1_SS_PCIPM);
773 	}
774 
775 	/* Nothing to do if the link is already in the requested state */
776 	if (link->aspm_enabled == state)
777 		return;
778 	/* Convert ASPM state to upstream/downstream ASPM register state */
779 	if (state & ASPM_STATE_L0S_UP)
780 		dwstream |= PCI_EXP_LNKCTL_ASPM_L0S;
781 	if (state & ASPM_STATE_L0S_DW)
782 		upstream |= PCI_EXP_LNKCTL_ASPM_L0S;
783 	if (state & ASPM_STATE_L1) {
784 		upstream |= PCI_EXP_LNKCTL_ASPM_L1;
785 		dwstream |= PCI_EXP_LNKCTL_ASPM_L1;
786 	}
787 
788 	if (link->aspm_capable & ASPM_STATE_L1SS)
789 		pcie_config_aspm_l1ss(link, state);
790 
791 	/*
792 	 * Spec 2.0 suggests all functions should be configured the
793 	 * same setting for ASPM. Enabling ASPM L1 should be done in
794 	 * upstream component first and then downstream, and vice
795 	 * versa for disabling ASPM L1. Spec doesn't mention L0S.
796 	 */
797 	if (state & ASPM_STATE_L1)
798 		pcie_config_aspm_dev(parent, upstream);
799 	list_for_each_entry(child, &linkbus->devices, bus_list)
800 		pcie_config_aspm_dev(child, dwstream);
801 	if (!(state & ASPM_STATE_L1))
802 		pcie_config_aspm_dev(parent, upstream);
803 
804 	link->aspm_enabled = state;
805 }
806 
807 static void pcie_config_aspm_path(struct pcie_link_state *link)
808 {
809 	while (link) {
810 		pcie_config_aspm_link(link, policy_to_aspm_state(link));
811 		link = link->parent;
812 	}
813 }
814 
815 static void free_link_state(struct pcie_link_state *link)
816 {
817 	link->pdev->link_state = NULL;
818 	kfree(link);
819 }
820 
821 static int pcie_aspm_sanity_check(struct pci_dev *pdev)
822 {
823 	struct pci_dev *child;
824 	u32 reg32;
825 
826 	/*
827 	 * Some functions in a slot might not all be PCIe functions,
828 	 * very strange. Disable ASPM for the whole slot
829 	 */
830 	list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
831 		if (!pci_is_pcie(child))
832 			return -EINVAL;
833 
834 		/*
835 		 * If ASPM is disabled then we're not going to change
836 		 * the BIOS state. It's safe to continue even if it's a
837 		 * pre-1.1 device
838 		 */
839 
840 		if (aspm_disabled)
841 			continue;
842 
843 		/*
844 		 * Disable ASPM for pre-1.1 PCIe device, we follow MS to use
845 		 * RBER bit to determine if a function is 1.1 version device
846 		 */
847 		pcie_capability_read_dword(child, PCI_EXP_DEVCAP, &reg32);
848 		if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) {
849 			pci_info(child, "disabling ASPM on pre-1.1 PCIe device.  You can enable it with 'pcie_aspm=force'\n");
850 			return -EINVAL;
851 		}
852 	}
853 	return 0;
854 }
855 
856 static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
857 {
858 	struct pcie_link_state *link;
859 
860 	link = kzalloc(sizeof(*link), GFP_KERNEL);
861 	if (!link)
862 		return NULL;
863 
864 	INIT_LIST_HEAD(&link->sibling);
865 	link->pdev = pdev;
866 	link->downstream = pci_function_0(pdev->subordinate);
867 
868 	/*
869 	 * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
870 	 * hierarchies.  Note that some PCIe host implementations omit
871 	 * the root ports entirely, in which case a downstream port on
872 	 * a switch may become the root of the link state chain for all
873 	 * its subordinate endpoints.
874 	 */
875 	if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
876 	    pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE ||
877 	    !pdev->bus->parent->self) {
878 		link->root = link;
879 	} else {
880 		struct pcie_link_state *parent;
881 
882 		parent = pdev->bus->parent->self->link_state;
883 		if (!parent) {
884 			kfree(link);
885 			return NULL;
886 		}
887 
888 		link->parent = parent;
889 		link->root = link->parent->root;
890 	}
891 
892 	list_add(&link->sibling, &link_list);
893 	pdev->link_state = link;
894 	return link;
895 }
896 
897 /*
898  * pcie_aspm_init_link_state: Initiate PCI express link state.
899  * It is called after the pcie and its children devices are scanned.
900  * @pdev: the root port or switch downstream port
901  */
902 void pcie_aspm_init_link_state(struct pci_dev *pdev)
903 {
904 	struct pcie_link_state *link;
905 	int blacklist = !!pcie_aspm_sanity_check(pdev);
906 
907 	if (!aspm_support_enabled)
908 		return;
909 
910 	if (pdev->link_state)
911 		return;
912 
913 	/*
914 	 * We allocate pcie_link_state for the component on the upstream
915 	 * end of a Link, so there's nothing to do unless this device is
916 	 * downstream port.
917 	 */
918 	if (!pcie_downstream_port(pdev))
919 		return;
920 
921 	/* VIA has a strange chipset, root port is under a bridge */
922 	if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT &&
923 	    pdev->bus->self)
924 		return;
925 
926 	down_read(&pci_bus_sem);
927 	if (list_empty(&pdev->subordinate->devices))
928 		goto out;
929 
930 	mutex_lock(&aspm_lock);
931 	link = alloc_pcie_link_state(pdev);
932 	if (!link)
933 		goto unlock;
934 	/*
935 	 * Setup initial ASPM state. Note that we need to configure
936 	 * upstream links also because capable state of them can be
937 	 * update through pcie_aspm_cap_init().
938 	 */
939 	pcie_aspm_cap_init(link, blacklist);
940 
941 	/* Setup initial Clock PM state */
942 	pcie_clkpm_cap_init(link, blacklist);
943 
944 	/*
945 	 * At this stage drivers haven't had an opportunity to change the
946 	 * link policy setting. Enabling ASPM on broken hardware can cripple
947 	 * it even before the driver has had a chance to disable ASPM, so
948 	 * default to a safe level right now. If we're enabling ASPM beyond
949 	 * the BIOS's expectation, we'll do so once pci_enable_device() is
950 	 * called.
951 	 */
952 	if (aspm_policy != POLICY_POWERSAVE &&
953 	    aspm_policy != POLICY_POWER_SUPERSAVE) {
954 		pcie_config_aspm_path(link);
955 		pcie_set_clkpm(link, policy_to_clkpm_state(link));
956 	}
957 
958 unlock:
959 	mutex_unlock(&aspm_lock);
960 out:
961 	up_read(&pci_bus_sem);
962 }
963 
964 /* Recheck latencies and update aspm_capable for links under the root */
965 static void pcie_update_aspm_capable(struct pcie_link_state *root)
966 {
967 	struct pcie_link_state *link;
968 	BUG_ON(root->parent);
969 	list_for_each_entry(link, &link_list, sibling) {
970 		if (link->root != root)
971 			continue;
972 		link->aspm_capable = link->aspm_support;
973 	}
974 	list_for_each_entry(link, &link_list, sibling) {
975 		struct pci_dev *child;
976 		struct pci_bus *linkbus = link->pdev->subordinate;
977 		if (link->root != root)
978 			continue;
979 		list_for_each_entry(child, &linkbus->devices, bus_list) {
980 			if ((pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT) &&
981 			    (pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END))
982 				continue;
983 			pcie_aspm_check_latency(child);
984 		}
985 	}
986 }
987 
988 /* @pdev: the endpoint device */
989 void pcie_aspm_exit_link_state(struct pci_dev *pdev)
990 {
991 	struct pci_dev *parent = pdev->bus->self;
992 	struct pcie_link_state *link, *root, *parent_link;
993 
994 	if (!parent || !parent->link_state)
995 		return;
996 
997 	down_read(&pci_bus_sem);
998 	mutex_lock(&aspm_lock);
999 	/*
1000 	 * All PCIe functions are in one slot, remove one function will remove
1001 	 * the whole slot, so just wait until we are the last function left.
1002 	 */
1003 	if (!list_empty(&parent->subordinate->devices))
1004 		goto out;
1005 
1006 	link = parent->link_state;
1007 	root = link->root;
1008 	parent_link = link->parent;
1009 
1010 	/* All functions are removed, so just disable ASPM for the link */
1011 	pcie_config_aspm_link(link, 0);
1012 	list_del(&link->sibling);
1013 	/* Clock PM is for endpoint device */
1014 	free_link_state(link);
1015 
1016 	/* Recheck latencies and configure upstream links */
1017 	if (parent_link) {
1018 		pcie_update_aspm_capable(root);
1019 		pcie_config_aspm_path(parent_link);
1020 	}
1021 out:
1022 	mutex_unlock(&aspm_lock);
1023 	up_read(&pci_bus_sem);
1024 }
1025 
1026 /* @pdev: the root port or switch downstream port */
1027 void pcie_aspm_pm_state_change(struct pci_dev *pdev)
1028 {
1029 	struct pcie_link_state *link = pdev->link_state;
1030 
1031 	if (aspm_disabled || !link)
1032 		return;
1033 	/*
1034 	 * Devices changed PM state, we should recheck if latency
1035 	 * meets all functions' requirement
1036 	 */
1037 	down_read(&pci_bus_sem);
1038 	mutex_lock(&aspm_lock);
1039 	pcie_update_aspm_capable(link->root);
1040 	pcie_config_aspm_path(link);
1041 	mutex_unlock(&aspm_lock);
1042 	up_read(&pci_bus_sem);
1043 }
1044 
1045 void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
1046 {
1047 	struct pcie_link_state *link = pdev->link_state;
1048 
1049 	if (aspm_disabled || !link)
1050 		return;
1051 
1052 	if (aspm_policy != POLICY_POWERSAVE &&
1053 	    aspm_policy != POLICY_POWER_SUPERSAVE)
1054 		return;
1055 
1056 	down_read(&pci_bus_sem);
1057 	mutex_lock(&aspm_lock);
1058 	pcie_config_aspm_path(link);
1059 	pcie_set_clkpm(link, policy_to_clkpm_state(link));
1060 	mutex_unlock(&aspm_lock);
1061 	up_read(&pci_bus_sem);
1062 }
1063 
1064 static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
1065 {
1066 	struct pci_dev *parent = pdev->bus->self;
1067 	struct pcie_link_state *link;
1068 
1069 	if (!pci_is_pcie(pdev))
1070 		return 0;
1071 
1072 	if (pcie_downstream_port(pdev))
1073 		parent = pdev;
1074 	if (!parent || !parent->link_state)
1075 		return -EINVAL;
1076 
1077 	/*
1078 	 * A driver requested that ASPM be disabled on this device, but
1079 	 * if we don't have permission to manage ASPM (e.g., on ACPI
1080 	 * systems we have to observe the FADT ACPI_FADT_NO_ASPM bit and
1081 	 * the _OSC method), we can't honor that request.  Windows has
1082 	 * a similar mechanism using "PciASPMOptOut", which is also
1083 	 * ignored in this situation.
1084 	 */
1085 	if (aspm_disabled) {
1086 		pci_warn(pdev, "can't disable ASPM; OS doesn't have ASPM control\n");
1087 		return -EPERM;
1088 	}
1089 
1090 	if (sem)
1091 		down_read(&pci_bus_sem);
1092 	mutex_lock(&aspm_lock);
1093 	link = parent->link_state;
1094 	if (state & PCIE_LINK_STATE_L0S)
1095 		link->aspm_disable |= ASPM_STATE_L0S;
1096 	if (state & PCIE_LINK_STATE_L1)
1097 		link->aspm_disable |= ASPM_STATE_L1;
1098 	pcie_config_aspm_link(link, policy_to_aspm_state(link));
1099 
1100 	if (state & PCIE_LINK_STATE_CLKPM) {
1101 		link->clkpm_capable = 0;
1102 		pcie_set_clkpm(link, 0);
1103 	}
1104 	mutex_unlock(&aspm_lock);
1105 	if (sem)
1106 		up_read(&pci_bus_sem);
1107 
1108 	return 0;
1109 }
1110 
1111 int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
1112 {
1113 	return __pci_disable_link_state(pdev, state, false);
1114 }
1115 EXPORT_SYMBOL(pci_disable_link_state_locked);
1116 
1117 /**
1118  * pci_disable_link_state - Disable device's link state, so the link will
1119  * never enter specific states.  Note that if the BIOS didn't grant ASPM
1120  * control to the OS, this does nothing because we can't touch the LNKCTL
1121  * register. Returns 0 or a negative errno.
1122  *
1123  * @pdev: PCI device
1124  * @state: ASPM link state to disable
1125  */
1126 int pci_disable_link_state(struct pci_dev *pdev, int state)
1127 {
1128 	return __pci_disable_link_state(pdev, state, true);
1129 }
1130 EXPORT_SYMBOL(pci_disable_link_state);
1131 
1132 static int pcie_aspm_set_policy(const char *val,
1133 				const struct kernel_param *kp)
1134 {
1135 	int i;
1136 	struct pcie_link_state *link;
1137 
1138 	if (aspm_disabled)
1139 		return -EPERM;
1140 	i = sysfs_match_string(policy_str, val);
1141 	if (i < 0)
1142 		return i;
1143 	if (i == aspm_policy)
1144 		return 0;
1145 
1146 	down_read(&pci_bus_sem);
1147 	mutex_lock(&aspm_lock);
1148 	aspm_policy = i;
1149 	list_for_each_entry(link, &link_list, sibling) {
1150 		pcie_config_aspm_link(link, policy_to_aspm_state(link));
1151 		pcie_set_clkpm(link, policy_to_clkpm_state(link));
1152 	}
1153 	mutex_unlock(&aspm_lock);
1154 	up_read(&pci_bus_sem);
1155 	return 0;
1156 }
1157 
1158 static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp)
1159 {
1160 	int i, cnt = 0;
1161 	for (i = 0; i < ARRAY_SIZE(policy_str); i++)
1162 		if (i == aspm_policy)
1163 			cnt += sprintf(buffer + cnt, "[%s] ", policy_str[i]);
1164 		else
1165 			cnt += sprintf(buffer + cnt, "%s ", policy_str[i]);
1166 	return cnt;
1167 }
1168 
1169 module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy,
1170 	NULL, 0644);
1171 
1172 /**
1173  * pcie_aspm_enabled - Check if PCIe ASPM has been enabled for a device.
1174  * @pdev: Target device.
1175  */
1176 bool pcie_aspm_enabled(struct pci_dev *pdev)
1177 {
1178 	struct pci_dev *bridge = pci_upstream_bridge(pdev);
1179 	bool ret;
1180 
1181 	if (!bridge)
1182 		return false;
1183 
1184 	mutex_lock(&aspm_lock);
1185 	ret = bridge->link_state ? !!bridge->link_state->aspm_enabled : false;
1186 	mutex_unlock(&aspm_lock);
1187 
1188 	return ret;
1189 }
1190 EXPORT_SYMBOL_GPL(pcie_aspm_enabled);
1191 
1192 #ifdef CONFIG_PCIEASPM_DEBUG
1193 static ssize_t link_state_show(struct device *dev,
1194 		struct device_attribute *attr,
1195 		char *buf)
1196 {
1197 	struct pci_dev *pci_device = to_pci_dev(dev);
1198 	struct pcie_link_state *link_state = pci_device->link_state;
1199 
1200 	return sprintf(buf, "%d\n", link_state->aspm_enabled);
1201 }
1202 
1203 static ssize_t link_state_store(struct device *dev,
1204 		struct device_attribute *attr,
1205 		const char *buf,
1206 		size_t n)
1207 {
1208 	struct pci_dev *pdev = to_pci_dev(dev);
1209 	struct pcie_link_state *link, *root = pdev->link_state->root;
1210 	u32 state;
1211 
1212 	if (aspm_disabled)
1213 		return -EPERM;
1214 
1215 	if (kstrtouint(buf, 10, &state))
1216 		return -EINVAL;
1217 	if ((state & ~ASPM_STATE_ALL) != 0)
1218 		return -EINVAL;
1219 
1220 	down_read(&pci_bus_sem);
1221 	mutex_lock(&aspm_lock);
1222 	list_for_each_entry(link, &link_list, sibling) {
1223 		if (link->root != root)
1224 			continue;
1225 		pcie_config_aspm_link(link, state);
1226 	}
1227 	mutex_unlock(&aspm_lock);
1228 	up_read(&pci_bus_sem);
1229 	return n;
1230 }
1231 
1232 static ssize_t clk_ctl_show(struct device *dev,
1233 		struct device_attribute *attr,
1234 		char *buf)
1235 {
1236 	struct pci_dev *pci_device = to_pci_dev(dev);
1237 	struct pcie_link_state *link_state = pci_device->link_state;
1238 
1239 	return sprintf(buf, "%d\n", link_state->clkpm_enabled);
1240 }
1241 
1242 static ssize_t clk_ctl_store(struct device *dev,
1243 		struct device_attribute *attr,
1244 		const char *buf,
1245 		size_t n)
1246 {
1247 	struct pci_dev *pdev = to_pci_dev(dev);
1248 	bool state;
1249 
1250 	if (strtobool(buf, &state))
1251 		return -EINVAL;
1252 
1253 	down_read(&pci_bus_sem);
1254 	mutex_lock(&aspm_lock);
1255 	pcie_set_clkpm_nocheck(pdev->link_state, state);
1256 	mutex_unlock(&aspm_lock);
1257 	up_read(&pci_bus_sem);
1258 
1259 	return n;
1260 }
1261 
1262 static DEVICE_ATTR_RW(link_state);
1263 static DEVICE_ATTR_RW(clk_ctl);
1264 
1265 static char power_group[] = "power";
1266 void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev)
1267 {
1268 	struct pcie_link_state *link_state = pdev->link_state;
1269 
1270 	if (!link_state)
1271 		return;
1272 
1273 	if (link_state->aspm_support)
1274 		sysfs_add_file_to_group(&pdev->dev.kobj,
1275 			&dev_attr_link_state.attr, power_group);
1276 	if (link_state->clkpm_capable)
1277 		sysfs_add_file_to_group(&pdev->dev.kobj,
1278 			&dev_attr_clk_ctl.attr, power_group);
1279 }
1280 
1281 void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev)
1282 {
1283 	struct pcie_link_state *link_state = pdev->link_state;
1284 
1285 	if (!link_state)
1286 		return;
1287 
1288 	if (link_state->aspm_support)
1289 		sysfs_remove_file_from_group(&pdev->dev.kobj,
1290 			&dev_attr_link_state.attr, power_group);
1291 	if (link_state->clkpm_capable)
1292 		sysfs_remove_file_from_group(&pdev->dev.kobj,
1293 			&dev_attr_clk_ctl.attr, power_group);
1294 }
1295 #endif
1296 
1297 static int __init pcie_aspm_disable(char *str)
1298 {
1299 	if (!strcmp(str, "off")) {
1300 		aspm_policy = POLICY_DEFAULT;
1301 		aspm_disabled = 1;
1302 		aspm_support_enabled = false;
1303 		printk(KERN_INFO "PCIe ASPM is disabled\n");
1304 	} else if (!strcmp(str, "force")) {
1305 		aspm_force = 1;
1306 		printk(KERN_INFO "PCIe ASPM is forcibly enabled\n");
1307 	}
1308 	return 1;
1309 }
1310 
1311 __setup("pcie_aspm=", pcie_aspm_disable);
1312 
1313 void pcie_no_aspm(void)
1314 {
1315 	/*
1316 	 * Disabling ASPM is intended to prevent the kernel from modifying
1317 	 * existing hardware state, not to clear existing state. To that end:
1318 	 * (a) set policy to POLICY_DEFAULT in order to avoid changing state
1319 	 * (b) prevent userspace from changing policy
1320 	 */
1321 	if (!aspm_force) {
1322 		aspm_policy = POLICY_DEFAULT;
1323 		aspm_disabled = 1;
1324 	}
1325 }
1326 
1327 bool pcie_aspm_support_enabled(void)
1328 {
1329 	return aspm_support_enabled;
1330 }
1331 EXPORT_SYMBOL(pcie_aspm_support_enabled);
1332