xref: /openbmc/linux/drivers/acpi/acpi_lpss.c (revision 234489ac)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * ACPI support for Intel Lynxpoint LPSS.
4  *
5  * Copyright (C) 2013, Intel Corporation
6  * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
7  *          Rafael J. Wysocki <rafael.j.wysocki@intel.com>
8  */
9 
10 #include <linux/acpi.h>
11 #include <linux/clkdev.h>
12 #include <linux/clk-provider.h>
13 #include <linux/dmi.h>
14 #include <linux/err.h>
15 #include <linux/io.h>
16 #include <linux/mutex.h>
17 #include <linux/pci.h>
18 #include <linux/platform_device.h>
19 #include <linux/platform_data/x86/clk-lpss.h>
20 #include <linux/platform_data/x86/pmc_atom.h>
21 #include <linux/pm_domain.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/pwm.h>
24 #include <linux/pxa2xx_ssp.h>
25 #include <linux/suspend.h>
26 #include <linux/delay.h>
27 
28 #include "internal.h"
29 
30 #ifdef CONFIG_X86_INTEL_LPSS
31 
32 #include <asm/cpu_device_id.h>
33 #include <asm/intel-family.h>
34 #include <asm/iosf_mbi.h>
35 
36 #define LPSS_ADDR(desc) ((unsigned long)&desc)
37 
38 #define LPSS_CLK_SIZE	0x04
39 #define LPSS_LTR_SIZE	0x18
40 
41 /* Offsets relative to LPSS_PRIVATE_OFFSET */
42 #define LPSS_CLK_DIVIDER_DEF_MASK	(BIT(1) | BIT(16))
43 #define LPSS_RESETS			0x04
44 #define LPSS_RESETS_RESET_FUNC		BIT(0)
45 #define LPSS_RESETS_RESET_APB		BIT(1)
46 #define LPSS_GENERAL			0x08
47 #define LPSS_GENERAL_LTR_MODE_SW	BIT(2)
48 #define LPSS_GENERAL_UART_RTS_OVRD	BIT(3)
49 #define LPSS_SW_LTR			0x10
50 #define LPSS_AUTO_LTR			0x14
51 #define LPSS_LTR_SNOOP_REQ		BIT(15)
52 #define LPSS_LTR_SNOOP_MASK		0x0000FFFF
53 #define LPSS_LTR_SNOOP_LAT_1US		0x800
54 #define LPSS_LTR_SNOOP_LAT_32US		0xC00
55 #define LPSS_LTR_SNOOP_LAT_SHIFT	5
56 #define LPSS_LTR_SNOOP_LAT_CUTOFF	3000
57 #define LPSS_LTR_MAX_VAL		0x3FF
58 #define LPSS_TX_INT			0x20
59 #define LPSS_TX_INT_MASK		BIT(1)
60 
61 #define LPSS_PRV_REG_COUNT		9
62 
63 /* LPSS Flags */
64 #define LPSS_CLK			BIT(0)
65 #define LPSS_CLK_GATE			BIT(1)
66 #define LPSS_CLK_DIVIDER		BIT(2)
67 #define LPSS_LTR			BIT(3)
68 #define LPSS_SAVE_CTX			BIT(4)
69 /*
70  * For some devices the DSDT AML code for another device turns off the device
71  * before our suspend handler runs, causing us to read/save all 1-s (0xffffffff)
72  * as ctx register values.
73  * Luckily these devices always use the same ctx register values, so we can
74  * work around this by saving the ctx registers once on activation.
75  */
76 #define LPSS_SAVE_CTX_ONCE		BIT(5)
77 #define LPSS_NO_D3_DELAY		BIT(6)
78 
79 struct lpss_private_data;
80 
81 struct lpss_device_desc {
82 	unsigned int flags;
83 	const char *clk_con_id;
84 	unsigned int prv_offset;
85 	size_t prv_size_override;
86 	const struct property_entry *properties;
87 	void (*setup)(struct lpss_private_data *pdata);
88 	bool resume_from_noirq;
89 };
90 
91 static const struct lpss_device_desc lpss_dma_desc = {
92 	.flags = LPSS_CLK,
93 };
94 
95 struct lpss_private_data {
96 	struct acpi_device *adev;
97 	void __iomem *mmio_base;
98 	resource_size_t mmio_size;
99 	unsigned int fixed_clk_rate;
100 	struct clk *clk;
101 	const struct lpss_device_desc *dev_desc;
102 	u32 prv_reg_ctx[LPSS_PRV_REG_COUNT];
103 };
104 
105 /* Devices which need to be in D3 before lpss_iosf_enter_d3_state() proceeds */
106 static u32 pmc_atom_d3_mask = 0xfe000ffe;
107 
108 /* LPSS run time quirks */
109 static unsigned int lpss_quirks;
110 
111 /*
112  * LPSS_QUIRK_ALWAYS_POWER_ON: override power state for LPSS DMA device.
113  *
114  * The LPSS DMA controller has neither _PS0 nor _PS3 method. Moreover
115  * it can be powered off automatically whenever the last LPSS device goes down.
116  * In case of no power any access to the DMA controller will hang the system.
117  * The behaviour is reproduced on some HP laptops based on Intel BayTrail as
118  * well as on ASuS T100TA transformer.
119  *
120  * This quirk overrides power state of entire LPSS island to keep DMA powered
121  * on whenever we have at least one other device in use.
122  */
123 #define LPSS_QUIRK_ALWAYS_POWER_ON	BIT(0)
124 
125 /* UART Component Parameter Register */
126 #define LPSS_UART_CPR			0xF4
127 #define LPSS_UART_CPR_AFCE		BIT(4)
128 
129 static void lpss_uart_setup(struct lpss_private_data *pdata)
130 {
131 	unsigned int offset;
132 	u32 val;
133 
134 	offset = pdata->dev_desc->prv_offset + LPSS_TX_INT;
135 	val = readl(pdata->mmio_base + offset);
136 	writel(val | LPSS_TX_INT_MASK, pdata->mmio_base + offset);
137 
138 	val = readl(pdata->mmio_base + LPSS_UART_CPR);
139 	if (!(val & LPSS_UART_CPR_AFCE)) {
140 		offset = pdata->dev_desc->prv_offset + LPSS_GENERAL;
141 		val = readl(pdata->mmio_base + offset);
142 		val |= LPSS_GENERAL_UART_RTS_OVRD;
143 		writel(val, pdata->mmio_base + offset);
144 	}
145 }
146 
147 static void lpss_deassert_reset(struct lpss_private_data *pdata)
148 {
149 	unsigned int offset;
150 	u32 val;
151 
152 	offset = pdata->dev_desc->prv_offset + LPSS_RESETS;
153 	val = readl(pdata->mmio_base + offset);
154 	val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC;
155 	writel(val, pdata->mmio_base + offset);
156 }
157 
158 /*
159  * BYT PWM used for backlight control by the i915 driver on systems without
160  * the Crystal Cove PMIC.
161  */
162 static struct pwm_lookup byt_pwm_lookup[] = {
163 	PWM_LOOKUP_WITH_MODULE("80860F09:00", 0, "0000:00:02.0",
164 			       "pwm_soc_backlight", 0, PWM_POLARITY_NORMAL,
165 			       "pwm-lpss-platform"),
166 };
167 
168 static void byt_pwm_setup(struct lpss_private_data *pdata)
169 {
170 	u64 uid;
171 
172 	/* Only call pwm_add_table for the first PWM controller */
173 	if (acpi_dev_uid_to_integer(pdata->adev, &uid) || uid != 1)
174 		return;
175 
176 	pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup));
177 }
178 
179 #define LPSS_I2C_ENABLE			0x6c
180 
181 static void byt_i2c_setup(struct lpss_private_data *pdata)
182 {
183 	acpi_handle handle = pdata->adev->handle;
184 	unsigned long long shared_host = 0;
185 	acpi_status status;
186 	u64 uid;
187 
188 	/* Expected to always be successfull, but better safe then sorry */
189 	if (!acpi_dev_uid_to_integer(pdata->adev, &uid) && uid) {
190 		/* Detect I2C bus shared with PUNIT and ignore its d3 status */
191 		status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host);
192 		if (ACPI_SUCCESS(status) && shared_host)
193 			pmc_atom_d3_mask &= ~(BIT_LPSS2_F1_I2C1 << (uid - 1));
194 	}
195 
196 	lpss_deassert_reset(pdata);
197 
198 	if (readl(pdata->mmio_base + pdata->dev_desc->prv_offset))
199 		pdata->fixed_clk_rate = 133000000;
200 
201 	writel(0, pdata->mmio_base + LPSS_I2C_ENABLE);
202 }
203 
204 /* BSW PWM used for backlight control by the i915 driver */
205 static struct pwm_lookup bsw_pwm_lookup[] = {
206 	PWM_LOOKUP_WITH_MODULE("80862288:00", 0, "0000:00:02.0",
207 			       "pwm_soc_backlight", 0, PWM_POLARITY_NORMAL,
208 			       "pwm-lpss-platform"),
209 };
210 
211 static void bsw_pwm_setup(struct lpss_private_data *pdata)
212 {
213 	u64 uid;
214 
215 	/* Only call pwm_add_table for the first PWM controller */
216 	if (acpi_dev_uid_to_integer(pdata->adev, &uid) || uid != 1)
217 		return;
218 
219 	pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup));
220 }
221 
222 static const struct property_entry lpt_spi_properties[] = {
223 	PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_LPT_SSP),
224 	{ }
225 };
226 
227 static const struct lpss_device_desc lpt_spi_dev_desc = {
228 	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR
229 			| LPSS_SAVE_CTX,
230 	.prv_offset = 0x800,
231 	.properties = lpt_spi_properties,
232 };
233 
234 static const struct lpss_device_desc lpt_i2c_dev_desc = {
235 	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR | LPSS_SAVE_CTX,
236 	.prv_offset = 0x800,
237 };
238 
239 static struct property_entry uart_properties[] = {
240 	PROPERTY_ENTRY_U32("reg-io-width", 4),
241 	PROPERTY_ENTRY_U32("reg-shift", 2),
242 	PROPERTY_ENTRY_BOOL("snps,uart-16550-compatible"),
243 	{ },
244 };
245 
246 static const struct lpss_device_desc lpt_uart_dev_desc = {
247 	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR
248 			| LPSS_SAVE_CTX,
249 	.clk_con_id = "baudclk",
250 	.prv_offset = 0x800,
251 	.setup = lpss_uart_setup,
252 	.properties = uart_properties,
253 };
254 
255 static const struct lpss_device_desc lpt_sdio_dev_desc = {
256 	.flags = LPSS_LTR,
257 	.prv_offset = 0x1000,
258 	.prv_size_override = 0x1018,
259 };
260 
261 static const struct lpss_device_desc byt_pwm_dev_desc = {
262 	.flags = LPSS_SAVE_CTX,
263 	.prv_offset = 0x800,
264 	.setup = byt_pwm_setup,
265 };
266 
267 static const struct lpss_device_desc bsw_pwm_dev_desc = {
268 	.flags = LPSS_SAVE_CTX_ONCE | LPSS_NO_D3_DELAY,
269 	.prv_offset = 0x800,
270 	.setup = bsw_pwm_setup,
271 	.resume_from_noirq = true,
272 };
273 
274 static const struct lpss_device_desc bsw_pwm2_dev_desc = {
275 	.flags = LPSS_SAVE_CTX_ONCE | LPSS_NO_D3_DELAY,
276 	.prv_offset = 0x800,
277 	.resume_from_noirq = true,
278 };
279 
280 static const struct lpss_device_desc byt_uart_dev_desc = {
281 	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
282 	.clk_con_id = "baudclk",
283 	.prv_offset = 0x800,
284 	.setup = lpss_uart_setup,
285 	.properties = uart_properties,
286 };
287 
288 static const struct lpss_device_desc bsw_uart_dev_desc = {
289 	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
290 			| LPSS_NO_D3_DELAY,
291 	.clk_con_id = "baudclk",
292 	.prv_offset = 0x800,
293 	.setup = lpss_uart_setup,
294 	.properties = uart_properties,
295 };
296 
297 static const struct property_entry byt_spi_properties[] = {
298 	PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_BYT_SSP),
299 	{ }
300 };
301 
302 static const struct lpss_device_desc byt_spi_dev_desc = {
303 	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
304 	.prv_offset = 0x400,
305 	.properties = byt_spi_properties,
306 };
307 
308 static const struct lpss_device_desc byt_sdio_dev_desc = {
309 	.flags = LPSS_CLK,
310 };
311 
312 static const struct lpss_device_desc byt_i2c_dev_desc = {
313 	.flags = LPSS_CLK | LPSS_SAVE_CTX,
314 	.prv_offset = 0x800,
315 	.setup = byt_i2c_setup,
316 	.resume_from_noirq = true,
317 };
318 
319 static const struct lpss_device_desc bsw_i2c_dev_desc = {
320 	.flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
321 	.prv_offset = 0x800,
322 	.setup = byt_i2c_setup,
323 	.resume_from_noirq = true,
324 };
325 
326 static const struct property_entry bsw_spi_properties[] = {
327 	PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_BSW_SSP),
328 	{ }
329 };
330 
331 static const struct lpss_device_desc bsw_spi_dev_desc = {
332 	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
333 			| LPSS_NO_D3_DELAY,
334 	.prv_offset = 0x400,
335 	.setup = lpss_deassert_reset,
336 	.properties = bsw_spi_properties,
337 };
338 
339 static const struct x86_cpu_id lpss_cpu_ids[] = {
340 	X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT,	NULL),
341 	X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT,	NULL),
342 	{}
343 };
344 
345 #else
346 
347 #define LPSS_ADDR(desc) (0UL)
348 
349 #endif /* CONFIG_X86_INTEL_LPSS */
350 
351 static const struct acpi_device_id acpi_lpss_device_ids[] = {
352 	/* Generic LPSS devices */
353 	{ "INTL9C60", LPSS_ADDR(lpss_dma_desc) },
354 
355 	/* Lynxpoint LPSS devices */
356 	{ "INT33C0", LPSS_ADDR(lpt_spi_dev_desc) },
357 	{ "INT33C1", LPSS_ADDR(lpt_spi_dev_desc) },
358 	{ "INT33C2", LPSS_ADDR(lpt_i2c_dev_desc) },
359 	{ "INT33C3", LPSS_ADDR(lpt_i2c_dev_desc) },
360 	{ "INT33C4", LPSS_ADDR(lpt_uart_dev_desc) },
361 	{ "INT33C5", LPSS_ADDR(lpt_uart_dev_desc) },
362 	{ "INT33C6", LPSS_ADDR(lpt_sdio_dev_desc) },
363 	{ "INT33C7", },
364 
365 	/* BayTrail LPSS devices */
366 	{ "80860F09", LPSS_ADDR(byt_pwm_dev_desc) },
367 	{ "80860F0A", LPSS_ADDR(byt_uart_dev_desc) },
368 	{ "80860F0E", LPSS_ADDR(byt_spi_dev_desc) },
369 	{ "80860F14", LPSS_ADDR(byt_sdio_dev_desc) },
370 	{ "80860F41", LPSS_ADDR(byt_i2c_dev_desc) },
371 	{ "INT33B2", },
372 	{ "INT33FC", },
373 
374 	/* Braswell LPSS devices */
375 	{ "80862286", LPSS_ADDR(lpss_dma_desc) },
376 	{ "80862288", LPSS_ADDR(bsw_pwm_dev_desc) },
377 	{ "80862289", LPSS_ADDR(bsw_pwm2_dev_desc) },
378 	{ "8086228A", LPSS_ADDR(bsw_uart_dev_desc) },
379 	{ "8086228E", LPSS_ADDR(bsw_spi_dev_desc) },
380 	{ "808622C0", LPSS_ADDR(lpss_dma_desc) },
381 	{ "808622C1", LPSS_ADDR(bsw_i2c_dev_desc) },
382 
383 	/* Broadwell LPSS devices */
384 	{ "INT3430", LPSS_ADDR(lpt_spi_dev_desc) },
385 	{ "INT3431", LPSS_ADDR(lpt_spi_dev_desc) },
386 	{ "INT3432", LPSS_ADDR(lpt_i2c_dev_desc) },
387 	{ "INT3433", LPSS_ADDR(lpt_i2c_dev_desc) },
388 	{ "INT3434", LPSS_ADDR(lpt_uart_dev_desc) },
389 	{ "INT3435", LPSS_ADDR(lpt_uart_dev_desc) },
390 	{ "INT3436", LPSS_ADDR(lpt_sdio_dev_desc) },
391 	{ "INT3437", },
392 
393 	/* Wildcat Point LPSS devices */
394 	{ "INT3438", LPSS_ADDR(lpt_spi_dev_desc) },
395 
396 	{ }
397 };
398 
399 #ifdef CONFIG_X86_INTEL_LPSS
400 
401 /* LPSS main clock device. */
402 static struct platform_device *lpss_clk_dev;
403 
404 static inline void lpt_register_clock_device(void)
405 {
406 	lpss_clk_dev = platform_device_register_simple("clk-lpss-atom",
407 						       PLATFORM_DEVID_NONE,
408 						       NULL, 0);
409 }
410 
411 static int register_device_clock(struct acpi_device *adev,
412 				 struct lpss_private_data *pdata)
413 {
414 	const struct lpss_device_desc *dev_desc = pdata->dev_desc;
415 	const char *devname = dev_name(&adev->dev);
416 	struct clk *clk;
417 	struct lpss_clk_data *clk_data;
418 	const char *parent, *clk_name;
419 	void __iomem *prv_base;
420 
421 	if (!lpss_clk_dev)
422 		lpt_register_clock_device();
423 
424 	if (IS_ERR(lpss_clk_dev))
425 		return PTR_ERR(lpss_clk_dev);
426 
427 	clk_data = platform_get_drvdata(lpss_clk_dev);
428 	if (!clk_data)
429 		return -ENODEV;
430 	clk = clk_data->clk;
431 
432 	if (!pdata->mmio_base
433 	    || pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE)
434 		return -ENODATA;
435 
436 	parent = clk_data->name;
437 	prv_base = pdata->mmio_base + dev_desc->prv_offset;
438 
439 	if (pdata->fixed_clk_rate) {
440 		clk = clk_register_fixed_rate(NULL, devname, parent, 0,
441 					      pdata->fixed_clk_rate);
442 		goto out;
443 	}
444 
445 	if (dev_desc->flags & LPSS_CLK_GATE) {
446 		clk = clk_register_gate(NULL, devname, parent, 0,
447 					prv_base, 0, 0, NULL);
448 		parent = devname;
449 	}
450 
451 	if (dev_desc->flags & LPSS_CLK_DIVIDER) {
452 		/* Prevent division by zero */
453 		if (!readl(prv_base))
454 			writel(LPSS_CLK_DIVIDER_DEF_MASK, prv_base);
455 
456 		clk_name = kasprintf(GFP_KERNEL, "%s-div", devname);
457 		if (!clk_name)
458 			return -ENOMEM;
459 		clk = clk_register_fractional_divider(NULL, clk_name, parent,
460 						      CLK_FRAC_DIVIDER_POWER_OF_TWO_PS,
461 						      prv_base, 1, 15, 16, 15, 0, NULL);
462 		parent = clk_name;
463 
464 		clk_name = kasprintf(GFP_KERNEL, "%s-update", devname);
465 		if (!clk_name) {
466 			kfree(parent);
467 			return -ENOMEM;
468 		}
469 		clk = clk_register_gate(NULL, clk_name, parent,
470 					CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
471 					prv_base, 31, 0, NULL);
472 		kfree(parent);
473 		kfree(clk_name);
474 	}
475 out:
476 	if (IS_ERR(clk))
477 		return PTR_ERR(clk);
478 
479 	pdata->clk = clk;
480 	clk_register_clkdev(clk, dev_desc->clk_con_id, devname);
481 	return 0;
482 }
483 
484 struct lpss_device_links {
485 	const char *supplier_hid;
486 	const char *supplier_uid;
487 	const char *consumer_hid;
488 	const char *consumer_uid;
489 	u32 flags;
490 	const struct dmi_system_id *dep_missing_ids;
491 };
492 
493 /* Please keep this list sorted alphabetically by vendor and model */
494 static const struct dmi_system_id i2c1_dep_missing_dmi_ids[] = {
495 	{
496 		.matches = {
497 			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
498 			DMI_MATCH(DMI_PRODUCT_NAME, "T200TA"),
499 		},
500 	},
501 	{}
502 };
503 
504 /*
505  * The _DEP method is used to identify dependencies but instead of creating
506  * device links for every handle in _DEP, only links in the following list are
507  * created. That is necessary because, in the general case, _DEP can refer to
508  * devices that might not have drivers, or that are on different buses, or where
509  * the supplier is not enumerated until after the consumer is probed.
510  */
511 static const struct lpss_device_links lpss_device_links[] = {
512 	/* CHT External sdcard slot controller depends on PMIC I2C ctrl */
513 	{"808622C1", "7", "80860F14", "3", DL_FLAG_PM_RUNTIME},
514 	/* CHT iGPU depends on PMIC I2C controller */
515 	{"808622C1", "7", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME},
516 	/* BYT iGPU depends on the Embedded Controller I2C controller (UID 1) */
517 	{"80860F41", "1", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME,
518 	 i2c1_dep_missing_dmi_ids},
519 	/* BYT CR iGPU depends on PMIC I2C controller (UID 5 on CR) */
520 	{"80860F41", "5", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME},
521 	/* BYT iGPU depends on PMIC I2C controller (UID 7 on non CR) */
522 	{"80860F41", "7", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME},
523 };
524 
525 static bool acpi_lpss_is_supplier(struct acpi_device *adev,
526 				  const struct lpss_device_links *link)
527 {
528 	return acpi_dev_hid_uid_match(adev, link->supplier_hid, link->supplier_uid);
529 }
530 
531 static bool acpi_lpss_is_consumer(struct acpi_device *adev,
532 				  const struct lpss_device_links *link)
533 {
534 	return acpi_dev_hid_uid_match(adev, link->consumer_hid, link->consumer_uid);
535 }
536 
537 struct hid_uid {
538 	const char *hid;
539 	const char *uid;
540 };
541 
542 static int match_hid_uid(struct device *dev, const void *data)
543 {
544 	struct acpi_device *adev = ACPI_COMPANION(dev);
545 	const struct hid_uid *id = data;
546 
547 	if (!adev)
548 		return 0;
549 
550 	return acpi_dev_hid_uid_match(adev, id->hid, id->uid);
551 }
552 
553 static struct device *acpi_lpss_find_device(const char *hid, const char *uid)
554 {
555 	struct device *dev;
556 
557 	struct hid_uid data = {
558 		.hid = hid,
559 		.uid = uid,
560 	};
561 
562 	dev = bus_find_device(&platform_bus_type, NULL, &data, match_hid_uid);
563 	if (dev)
564 		return dev;
565 
566 	return bus_find_device(&pci_bus_type, NULL, &data, match_hid_uid);
567 }
568 
569 static bool acpi_lpss_dep(struct acpi_device *adev, acpi_handle handle)
570 {
571 	struct acpi_handle_list dep_devices;
572 	acpi_status status;
573 	int i;
574 
575 	if (!acpi_has_method(adev->handle, "_DEP"))
576 		return false;
577 
578 	status = acpi_evaluate_reference(adev->handle, "_DEP", NULL,
579 					 &dep_devices);
580 	if (ACPI_FAILURE(status)) {
581 		dev_dbg(&adev->dev, "Failed to evaluate _DEP.\n");
582 		return false;
583 	}
584 
585 	for (i = 0; i < dep_devices.count; i++) {
586 		if (dep_devices.handles[i] == handle)
587 			return true;
588 	}
589 
590 	return false;
591 }
592 
593 static void acpi_lpss_link_consumer(struct device *dev1,
594 				    const struct lpss_device_links *link)
595 {
596 	struct device *dev2;
597 
598 	dev2 = acpi_lpss_find_device(link->consumer_hid, link->consumer_uid);
599 	if (!dev2)
600 		return;
601 
602 	if ((link->dep_missing_ids && dmi_check_system(link->dep_missing_ids))
603 	    || acpi_lpss_dep(ACPI_COMPANION(dev2), ACPI_HANDLE(dev1)))
604 		device_link_add(dev2, dev1, link->flags);
605 
606 	put_device(dev2);
607 }
608 
609 static void acpi_lpss_link_supplier(struct device *dev1,
610 				    const struct lpss_device_links *link)
611 {
612 	struct device *dev2;
613 
614 	dev2 = acpi_lpss_find_device(link->supplier_hid, link->supplier_uid);
615 	if (!dev2)
616 		return;
617 
618 	if ((link->dep_missing_ids && dmi_check_system(link->dep_missing_ids))
619 	    || acpi_lpss_dep(ACPI_COMPANION(dev1), ACPI_HANDLE(dev2)))
620 		device_link_add(dev1, dev2, link->flags);
621 
622 	put_device(dev2);
623 }
624 
625 static void acpi_lpss_create_device_links(struct acpi_device *adev,
626 					  struct platform_device *pdev)
627 {
628 	int i;
629 
630 	for (i = 0; i < ARRAY_SIZE(lpss_device_links); i++) {
631 		const struct lpss_device_links *link = &lpss_device_links[i];
632 
633 		if (acpi_lpss_is_supplier(adev, link))
634 			acpi_lpss_link_consumer(&pdev->dev, link);
635 
636 		if (acpi_lpss_is_consumer(adev, link))
637 			acpi_lpss_link_supplier(&pdev->dev, link);
638 	}
639 }
640 
641 static int acpi_lpss_create_device(struct acpi_device *adev,
642 				   const struct acpi_device_id *id)
643 {
644 	const struct lpss_device_desc *dev_desc;
645 	struct lpss_private_data *pdata;
646 	struct resource_entry *rentry;
647 	struct list_head resource_list;
648 	struct platform_device *pdev;
649 	int ret;
650 
651 	dev_desc = (const struct lpss_device_desc *)id->driver_data;
652 	if (!dev_desc) {
653 		pdev = acpi_create_platform_device(adev, NULL);
654 		return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1;
655 	}
656 	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
657 	if (!pdata)
658 		return -ENOMEM;
659 
660 	INIT_LIST_HEAD(&resource_list);
661 	ret = acpi_dev_get_memory_resources(adev, &resource_list);
662 	if (ret < 0)
663 		goto err_out;
664 
665 	rentry = list_first_entry_or_null(&resource_list, struct resource_entry, node);
666 	if (rentry) {
667 		if (dev_desc->prv_size_override)
668 			pdata->mmio_size = dev_desc->prv_size_override;
669 		else
670 			pdata->mmio_size = resource_size(rentry->res);
671 		pdata->mmio_base = ioremap(rentry->res->start, pdata->mmio_size);
672 	}
673 
674 	acpi_dev_free_resource_list(&resource_list);
675 
676 	if (!pdata->mmio_base) {
677 		/* Avoid acpi_bus_attach() instantiating a pdev for this dev. */
678 		adev->pnp.type.platform_id = 0;
679 		goto out_free;
680 	}
681 
682 	pdata->adev = adev;
683 	pdata->dev_desc = dev_desc;
684 
685 	if (dev_desc->setup)
686 		dev_desc->setup(pdata);
687 
688 	if (dev_desc->flags & LPSS_CLK) {
689 		ret = register_device_clock(adev, pdata);
690 		if (ret)
691 			goto out_free;
692 	}
693 
694 	/*
695 	 * This works around a known issue in ACPI tables where LPSS devices
696 	 * have _PS0 and _PS3 without _PSC (and no power resources), so
697 	 * acpi_bus_init_power() will assume that the BIOS has put them into D0.
698 	 */
699 	acpi_device_fix_up_power(adev);
700 
701 	adev->driver_data = pdata;
702 	pdev = acpi_create_platform_device(adev, dev_desc->properties);
703 	if (IS_ERR_OR_NULL(pdev)) {
704 		adev->driver_data = NULL;
705 		ret = PTR_ERR(pdev);
706 		goto err_out;
707 	}
708 
709 	acpi_lpss_create_device_links(adev, pdev);
710 	return 1;
711 
712 out_free:
713 	/* Skip the device, but continue the namespace scan */
714 	ret = 0;
715 err_out:
716 	kfree(pdata);
717 	return ret;
718 }
719 
720 static u32 __lpss_reg_read(struct lpss_private_data *pdata, unsigned int reg)
721 {
722 	return readl(pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
723 }
724 
725 static void __lpss_reg_write(u32 val, struct lpss_private_data *pdata,
726 			     unsigned int reg)
727 {
728 	writel(val, pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
729 }
730 
731 static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val)
732 {
733 	struct acpi_device *adev = ACPI_COMPANION(dev);
734 	struct lpss_private_data *pdata;
735 	unsigned long flags;
736 	int ret;
737 
738 	if (WARN_ON(!adev))
739 		return -ENODEV;
740 
741 	spin_lock_irqsave(&dev->power.lock, flags);
742 	if (pm_runtime_suspended(dev)) {
743 		ret = -EAGAIN;
744 		goto out;
745 	}
746 	pdata = acpi_driver_data(adev);
747 	if (WARN_ON(!pdata || !pdata->mmio_base)) {
748 		ret = -ENODEV;
749 		goto out;
750 	}
751 	*val = __lpss_reg_read(pdata, reg);
752 	ret = 0;
753 
754  out:
755 	spin_unlock_irqrestore(&dev->power.lock, flags);
756 	return ret;
757 }
758 
759 static ssize_t lpss_ltr_show(struct device *dev, struct device_attribute *attr,
760 			     char *buf)
761 {
762 	u32 ltr_value = 0;
763 	unsigned int reg;
764 	int ret;
765 
766 	reg = strcmp(attr->attr.name, "auto_ltr") ? LPSS_SW_LTR : LPSS_AUTO_LTR;
767 	ret = lpss_reg_read(dev, reg, &ltr_value);
768 	if (ret)
769 		return ret;
770 
771 	return sysfs_emit(buf, "%08x\n", ltr_value);
772 }
773 
774 static ssize_t lpss_ltr_mode_show(struct device *dev,
775 				  struct device_attribute *attr, char *buf)
776 {
777 	u32 ltr_mode = 0;
778 	char *outstr;
779 	int ret;
780 
781 	ret = lpss_reg_read(dev, LPSS_GENERAL, &ltr_mode);
782 	if (ret)
783 		return ret;
784 
785 	outstr = (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) ? "sw" : "auto";
786 	return sprintf(buf, "%s\n", outstr);
787 }
788 
789 static DEVICE_ATTR(auto_ltr, S_IRUSR, lpss_ltr_show, NULL);
790 static DEVICE_ATTR(sw_ltr, S_IRUSR, lpss_ltr_show, NULL);
791 static DEVICE_ATTR(ltr_mode, S_IRUSR, lpss_ltr_mode_show, NULL);
792 
793 static struct attribute *lpss_attrs[] = {
794 	&dev_attr_auto_ltr.attr,
795 	&dev_attr_sw_ltr.attr,
796 	&dev_attr_ltr_mode.attr,
797 	NULL,
798 };
799 
800 static const struct attribute_group lpss_attr_group = {
801 	.attrs = lpss_attrs,
802 	.name = "lpss_ltr",
803 };
804 
805 static void acpi_lpss_set_ltr(struct device *dev, s32 val)
806 {
807 	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
808 	u32 ltr_mode, ltr_val;
809 
810 	ltr_mode = __lpss_reg_read(pdata, LPSS_GENERAL);
811 	if (val < 0) {
812 		if (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) {
813 			ltr_mode &= ~LPSS_GENERAL_LTR_MODE_SW;
814 			__lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL);
815 		}
816 		return;
817 	}
818 	ltr_val = __lpss_reg_read(pdata, LPSS_SW_LTR) & ~LPSS_LTR_SNOOP_MASK;
819 	if (val >= LPSS_LTR_SNOOP_LAT_CUTOFF) {
820 		ltr_val |= LPSS_LTR_SNOOP_LAT_32US;
821 		val = LPSS_LTR_MAX_VAL;
822 	} else if (val > LPSS_LTR_MAX_VAL) {
823 		ltr_val |= LPSS_LTR_SNOOP_LAT_32US | LPSS_LTR_SNOOP_REQ;
824 		val >>= LPSS_LTR_SNOOP_LAT_SHIFT;
825 	} else {
826 		ltr_val |= LPSS_LTR_SNOOP_LAT_1US | LPSS_LTR_SNOOP_REQ;
827 	}
828 	ltr_val |= val;
829 	__lpss_reg_write(ltr_val, pdata, LPSS_SW_LTR);
830 	if (!(ltr_mode & LPSS_GENERAL_LTR_MODE_SW)) {
831 		ltr_mode |= LPSS_GENERAL_LTR_MODE_SW;
832 		__lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL);
833 	}
834 }
835 
836 #ifdef CONFIG_PM
837 /**
838  * acpi_lpss_save_ctx() - Save the private registers of LPSS device
839  * @dev: LPSS device
840  * @pdata: pointer to the private data of the LPSS device
841  *
842  * Most LPSS devices have private registers which may loose their context when
843  * the device is powered down. acpi_lpss_save_ctx() saves those registers into
844  * prv_reg_ctx array.
845  */
846 static void acpi_lpss_save_ctx(struct device *dev,
847 			       struct lpss_private_data *pdata)
848 {
849 	unsigned int i;
850 
851 	for (i = 0; i < LPSS_PRV_REG_COUNT; i++) {
852 		unsigned long offset = i * sizeof(u32);
853 
854 		pdata->prv_reg_ctx[i] = __lpss_reg_read(pdata, offset);
855 		dev_dbg(dev, "saving 0x%08x from LPSS reg at offset 0x%02lx\n",
856 			pdata->prv_reg_ctx[i], offset);
857 	}
858 }
859 
860 /**
861  * acpi_lpss_restore_ctx() - Restore the private registers of LPSS device
862  * @dev: LPSS device
863  * @pdata: pointer to the private data of the LPSS device
864  *
865  * Restores the registers that were previously stored with acpi_lpss_save_ctx().
866  */
867 static void acpi_lpss_restore_ctx(struct device *dev,
868 				  struct lpss_private_data *pdata)
869 {
870 	unsigned int i;
871 
872 	for (i = 0; i < LPSS_PRV_REG_COUNT; i++) {
873 		unsigned long offset = i * sizeof(u32);
874 
875 		__lpss_reg_write(pdata->prv_reg_ctx[i], pdata, offset);
876 		dev_dbg(dev, "restoring 0x%08x to LPSS reg at offset 0x%02lx\n",
877 			pdata->prv_reg_ctx[i], offset);
878 	}
879 }
880 
881 static void acpi_lpss_d3_to_d0_delay(struct lpss_private_data *pdata)
882 {
883 	/*
884 	 * The following delay is needed or the subsequent write operations may
885 	 * fail. The LPSS devices are actually PCI devices and the PCI spec
886 	 * expects 10ms delay before the device can be accessed after D3 to D0
887 	 * transition. However some platforms like BSW does not need this delay.
888 	 */
889 	unsigned int delay = 10;	/* default 10ms delay */
890 
891 	if (pdata->dev_desc->flags & LPSS_NO_D3_DELAY)
892 		delay = 0;
893 
894 	msleep(delay);
895 }
896 
897 static int acpi_lpss_activate(struct device *dev)
898 {
899 	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
900 	int ret;
901 
902 	ret = acpi_dev_resume(dev);
903 	if (ret)
904 		return ret;
905 
906 	acpi_lpss_d3_to_d0_delay(pdata);
907 
908 	/*
909 	 * This is called only on ->probe() stage where a device is either in
910 	 * known state defined by BIOS or most likely powered off. Due to this
911 	 * we have to deassert reset line to be sure that ->probe() will
912 	 * recognize the device.
913 	 */
914 	if (pdata->dev_desc->flags & (LPSS_SAVE_CTX | LPSS_SAVE_CTX_ONCE))
915 		lpss_deassert_reset(pdata);
916 
917 #ifdef CONFIG_PM
918 	if (pdata->dev_desc->flags & LPSS_SAVE_CTX_ONCE)
919 		acpi_lpss_save_ctx(dev, pdata);
920 #endif
921 
922 	return 0;
923 }
924 
925 static void acpi_lpss_dismiss(struct device *dev)
926 {
927 	acpi_dev_suspend(dev, false);
928 }
929 
930 /* IOSF SB for LPSS island */
931 #define LPSS_IOSF_UNIT_LPIOEP		0xA0
932 #define LPSS_IOSF_UNIT_LPIO1		0xAB
933 #define LPSS_IOSF_UNIT_LPIO2		0xAC
934 
935 #define LPSS_IOSF_PMCSR			0x84
936 #define LPSS_PMCSR_D0			0
937 #define LPSS_PMCSR_D3hot		3
938 #define LPSS_PMCSR_Dx_MASK		GENMASK(1, 0)
939 
940 #define LPSS_IOSF_GPIODEF0		0x154
941 #define LPSS_GPIODEF0_DMA1_D3		BIT(2)
942 #define LPSS_GPIODEF0_DMA2_D3		BIT(3)
943 #define LPSS_GPIODEF0_DMA_D3_MASK	GENMASK(3, 2)
944 #define LPSS_GPIODEF0_DMA_LLP		BIT(13)
945 
946 static DEFINE_MUTEX(lpss_iosf_mutex);
947 static bool lpss_iosf_d3_entered = true;
948 
949 static void lpss_iosf_enter_d3_state(void)
950 {
951 	u32 value1 = 0;
952 	u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP;
953 	u32 value2 = LPSS_PMCSR_D3hot;
954 	u32 mask2 = LPSS_PMCSR_Dx_MASK;
955 	/*
956 	 * PMC provides an information about actual status of the LPSS devices.
957 	 * Here we read the values related to LPSS power island, i.e. LPSS
958 	 * devices, excluding both LPSS DMA controllers, along with SCC domain.
959 	 */
960 	u32 func_dis, d3_sts_0, pmc_status;
961 	int ret;
962 
963 	ret = pmc_atom_read(PMC_FUNC_DIS, &func_dis);
964 	if (ret)
965 		return;
966 
967 	mutex_lock(&lpss_iosf_mutex);
968 
969 	ret = pmc_atom_read(PMC_D3_STS_0, &d3_sts_0);
970 	if (ret)
971 		goto exit;
972 
973 	/*
974 	 * Get the status of entire LPSS power island per device basis.
975 	 * Shutdown both LPSS DMA controllers if and only if all other devices
976 	 * are already in D3hot.
977 	 */
978 	pmc_status = (~(d3_sts_0 | func_dis)) & pmc_atom_d3_mask;
979 	if (pmc_status)
980 		goto exit;
981 
982 	iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
983 			LPSS_IOSF_PMCSR, value2, mask2);
984 
985 	iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE,
986 			LPSS_IOSF_PMCSR, value2, mask2);
987 
988 	iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
989 			LPSS_IOSF_GPIODEF0, value1, mask1);
990 
991 	lpss_iosf_d3_entered = true;
992 
993 exit:
994 	mutex_unlock(&lpss_iosf_mutex);
995 }
996 
997 static void lpss_iosf_exit_d3_state(void)
998 {
999 	u32 value1 = LPSS_GPIODEF0_DMA1_D3 | LPSS_GPIODEF0_DMA2_D3 |
1000 		     LPSS_GPIODEF0_DMA_LLP;
1001 	u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP;
1002 	u32 value2 = LPSS_PMCSR_D0;
1003 	u32 mask2 = LPSS_PMCSR_Dx_MASK;
1004 
1005 	mutex_lock(&lpss_iosf_mutex);
1006 
1007 	if (!lpss_iosf_d3_entered)
1008 		goto exit;
1009 
1010 	lpss_iosf_d3_entered = false;
1011 
1012 	iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
1013 			LPSS_IOSF_GPIODEF0, value1, mask1);
1014 
1015 	iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE,
1016 			LPSS_IOSF_PMCSR, value2, mask2);
1017 
1018 	iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
1019 			LPSS_IOSF_PMCSR, value2, mask2);
1020 
1021 exit:
1022 	mutex_unlock(&lpss_iosf_mutex);
1023 }
1024 
1025 static int acpi_lpss_suspend(struct device *dev, bool wakeup)
1026 {
1027 	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1028 	int ret;
1029 
1030 	if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
1031 		acpi_lpss_save_ctx(dev, pdata);
1032 
1033 	ret = acpi_dev_suspend(dev, wakeup);
1034 
1035 	/*
1036 	 * This call must be last in the sequence, otherwise PMC will return
1037 	 * wrong status for devices being about to be powered off. See
1038 	 * lpss_iosf_enter_d3_state() for further information.
1039 	 */
1040 	if (acpi_target_system_state() == ACPI_STATE_S0 &&
1041 	    lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
1042 		lpss_iosf_enter_d3_state();
1043 
1044 	return ret;
1045 }
1046 
1047 static int acpi_lpss_resume(struct device *dev)
1048 {
1049 	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1050 	int ret;
1051 
1052 	/*
1053 	 * This call is kept first to be in symmetry with
1054 	 * acpi_lpss_runtime_suspend() one.
1055 	 */
1056 	if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
1057 		lpss_iosf_exit_d3_state();
1058 
1059 	ret = acpi_dev_resume(dev);
1060 	if (ret)
1061 		return ret;
1062 
1063 	acpi_lpss_d3_to_d0_delay(pdata);
1064 
1065 	if (pdata->dev_desc->flags & (LPSS_SAVE_CTX | LPSS_SAVE_CTX_ONCE))
1066 		acpi_lpss_restore_ctx(dev, pdata);
1067 
1068 	return 0;
1069 }
1070 
1071 #ifdef CONFIG_PM_SLEEP
1072 static int acpi_lpss_do_suspend_late(struct device *dev)
1073 {
1074 	int ret;
1075 
1076 	if (dev_pm_skip_suspend(dev))
1077 		return 0;
1078 
1079 	ret = pm_generic_suspend_late(dev);
1080 	return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
1081 }
1082 
1083 static int acpi_lpss_suspend_late(struct device *dev)
1084 {
1085 	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1086 
1087 	if (pdata->dev_desc->resume_from_noirq)
1088 		return 0;
1089 
1090 	return acpi_lpss_do_suspend_late(dev);
1091 }
1092 
1093 static int acpi_lpss_suspend_noirq(struct device *dev)
1094 {
1095 	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1096 	int ret;
1097 
1098 	if (pdata->dev_desc->resume_from_noirq) {
1099 		/*
1100 		 * The driver's ->suspend_late callback will be invoked by
1101 		 * acpi_lpss_do_suspend_late(), with the assumption that the
1102 		 * driver really wanted to run that code in ->suspend_noirq, but
1103 		 * it could not run after acpi_dev_suspend() and the driver
1104 		 * expected the latter to be called in the "late" phase.
1105 		 */
1106 		ret = acpi_lpss_do_suspend_late(dev);
1107 		if (ret)
1108 			return ret;
1109 	}
1110 
1111 	return acpi_subsys_suspend_noirq(dev);
1112 }
1113 
1114 static int acpi_lpss_do_resume_early(struct device *dev)
1115 {
1116 	int ret = acpi_lpss_resume(dev);
1117 
1118 	return ret ? ret : pm_generic_resume_early(dev);
1119 }
1120 
1121 static int acpi_lpss_resume_early(struct device *dev)
1122 {
1123 	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1124 
1125 	if (pdata->dev_desc->resume_from_noirq)
1126 		return 0;
1127 
1128 	if (dev_pm_skip_resume(dev))
1129 		return 0;
1130 
1131 	return acpi_lpss_do_resume_early(dev);
1132 }
1133 
1134 static int acpi_lpss_resume_noirq(struct device *dev)
1135 {
1136 	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1137 	int ret;
1138 
1139 	/* Follow acpi_subsys_resume_noirq(). */
1140 	if (dev_pm_skip_resume(dev))
1141 		return 0;
1142 
1143 	ret = pm_generic_resume_noirq(dev);
1144 	if (ret)
1145 		return ret;
1146 
1147 	if (!pdata->dev_desc->resume_from_noirq)
1148 		return 0;
1149 
1150 	/*
1151 	 * The driver's ->resume_early callback will be invoked by
1152 	 * acpi_lpss_do_resume_early(), with the assumption that the driver
1153 	 * really wanted to run that code in ->resume_noirq, but it could not
1154 	 * run before acpi_dev_resume() and the driver expected the latter to be
1155 	 * called in the "early" phase.
1156 	 */
1157 	return acpi_lpss_do_resume_early(dev);
1158 }
1159 
1160 static int acpi_lpss_do_restore_early(struct device *dev)
1161 {
1162 	int ret = acpi_lpss_resume(dev);
1163 
1164 	return ret ? ret : pm_generic_restore_early(dev);
1165 }
1166 
1167 static int acpi_lpss_restore_early(struct device *dev)
1168 {
1169 	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1170 
1171 	if (pdata->dev_desc->resume_from_noirq)
1172 		return 0;
1173 
1174 	return acpi_lpss_do_restore_early(dev);
1175 }
1176 
1177 static int acpi_lpss_restore_noirq(struct device *dev)
1178 {
1179 	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1180 	int ret;
1181 
1182 	ret = pm_generic_restore_noirq(dev);
1183 	if (ret)
1184 		return ret;
1185 
1186 	if (!pdata->dev_desc->resume_from_noirq)
1187 		return 0;
1188 
1189 	/* This is analogous to what happens in acpi_lpss_resume_noirq(). */
1190 	return acpi_lpss_do_restore_early(dev);
1191 }
1192 
1193 static int acpi_lpss_do_poweroff_late(struct device *dev)
1194 {
1195 	int ret = pm_generic_poweroff_late(dev);
1196 
1197 	return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
1198 }
1199 
1200 static int acpi_lpss_poweroff_late(struct device *dev)
1201 {
1202 	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1203 
1204 	if (dev_pm_skip_suspend(dev))
1205 		return 0;
1206 
1207 	if (pdata->dev_desc->resume_from_noirq)
1208 		return 0;
1209 
1210 	return acpi_lpss_do_poweroff_late(dev);
1211 }
1212 
1213 static int acpi_lpss_poweroff_noirq(struct device *dev)
1214 {
1215 	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1216 
1217 	if (dev_pm_skip_suspend(dev))
1218 		return 0;
1219 
1220 	if (pdata->dev_desc->resume_from_noirq) {
1221 		/* This is analogous to the acpi_lpss_suspend_noirq() case. */
1222 		int ret = acpi_lpss_do_poweroff_late(dev);
1223 
1224 		if (ret)
1225 			return ret;
1226 	}
1227 
1228 	return pm_generic_poweroff_noirq(dev);
1229 }
1230 #endif /* CONFIG_PM_SLEEP */
1231 
1232 static int acpi_lpss_runtime_suspend(struct device *dev)
1233 {
1234 	int ret = pm_generic_runtime_suspend(dev);
1235 
1236 	return ret ? ret : acpi_lpss_suspend(dev, true);
1237 }
1238 
1239 static int acpi_lpss_runtime_resume(struct device *dev)
1240 {
1241 	int ret = acpi_lpss_resume(dev);
1242 
1243 	return ret ? ret : pm_generic_runtime_resume(dev);
1244 }
1245 #endif /* CONFIG_PM */
1246 
1247 static struct dev_pm_domain acpi_lpss_pm_domain = {
1248 #ifdef CONFIG_PM
1249 	.activate = acpi_lpss_activate,
1250 	.dismiss = acpi_lpss_dismiss,
1251 #endif
1252 	.ops = {
1253 #ifdef CONFIG_PM
1254 #ifdef CONFIG_PM_SLEEP
1255 		.prepare = acpi_subsys_prepare,
1256 		.complete = acpi_subsys_complete,
1257 		.suspend = acpi_subsys_suspend,
1258 		.suspend_late = acpi_lpss_suspend_late,
1259 		.suspend_noirq = acpi_lpss_suspend_noirq,
1260 		.resume_noirq = acpi_lpss_resume_noirq,
1261 		.resume_early = acpi_lpss_resume_early,
1262 		.freeze = acpi_subsys_freeze,
1263 		.poweroff = acpi_subsys_poweroff,
1264 		.poweroff_late = acpi_lpss_poweroff_late,
1265 		.poweroff_noirq = acpi_lpss_poweroff_noirq,
1266 		.restore_noirq = acpi_lpss_restore_noirq,
1267 		.restore_early = acpi_lpss_restore_early,
1268 #endif
1269 		.runtime_suspend = acpi_lpss_runtime_suspend,
1270 		.runtime_resume = acpi_lpss_runtime_resume,
1271 #endif
1272 	},
1273 };
1274 
1275 static int acpi_lpss_platform_notify(struct notifier_block *nb,
1276 				     unsigned long action, void *data)
1277 {
1278 	struct platform_device *pdev = to_platform_device(data);
1279 	struct lpss_private_data *pdata;
1280 	struct acpi_device *adev;
1281 	const struct acpi_device_id *id;
1282 
1283 	id = acpi_match_device(acpi_lpss_device_ids, &pdev->dev);
1284 	if (!id || !id->driver_data)
1285 		return 0;
1286 
1287 	adev = ACPI_COMPANION(&pdev->dev);
1288 	if (!adev)
1289 		return 0;
1290 
1291 	pdata = acpi_driver_data(adev);
1292 	if (!pdata)
1293 		return 0;
1294 
1295 	if (pdata->mmio_base &&
1296 	    pdata->mmio_size < pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) {
1297 		dev_err(&pdev->dev, "MMIO size insufficient to access LTR\n");
1298 		return 0;
1299 	}
1300 
1301 	switch (action) {
1302 	case BUS_NOTIFY_BIND_DRIVER:
1303 		dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain);
1304 		break;
1305 	case BUS_NOTIFY_DRIVER_NOT_BOUND:
1306 	case BUS_NOTIFY_UNBOUND_DRIVER:
1307 		dev_pm_domain_set(&pdev->dev, NULL);
1308 		break;
1309 	case BUS_NOTIFY_ADD_DEVICE:
1310 		dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain);
1311 		if (pdata->dev_desc->flags & LPSS_LTR)
1312 			return sysfs_create_group(&pdev->dev.kobj,
1313 						  &lpss_attr_group);
1314 		break;
1315 	case BUS_NOTIFY_DEL_DEVICE:
1316 		if (pdata->dev_desc->flags & LPSS_LTR)
1317 			sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group);
1318 		dev_pm_domain_set(&pdev->dev, NULL);
1319 		break;
1320 	default:
1321 		break;
1322 	}
1323 
1324 	return 0;
1325 }
1326 
1327 static struct notifier_block acpi_lpss_nb = {
1328 	.notifier_call = acpi_lpss_platform_notify,
1329 };
1330 
1331 static void acpi_lpss_bind(struct device *dev)
1332 {
1333 	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1334 
1335 	if (!pdata || !pdata->mmio_base || !(pdata->dev_desc->flags & LPSS_LTR))
1336 		return;
1337 
1338 	if (pdata->mmio_size >= pdata->dev_desc->prv_offset + LPSS_LTR_SIZE)
1339 		dev->power.set_latency_tolerance = acpi_lpss_set_ltr;
1340 	else
1341 		dev_err(dev, "MMIO size insufficient to access LTR\n");
1342 }
1343 
1344 static void acpi_lpss_unbind(struct device *dev)
1345 {
1346 	dev->power.set_latency_tolerance = NULL;
1347 }
1348 
1349 static struct acpi_scan_handler lpss_handler = {
1350 	.ids = acpi_lpss_device_ids,
1351 	.attach = acpi_lpss_create_device,
1352 	.bind = acpi_lpss_bind,
1353 	.unbind = acpi_lpss_unbind,
1354 };
1355 
1356 void __init acpi_lpss_init(void)
1357 {
1358 	const struct x86_cpu_id *id;
1359 	int ret;
1360 
1361 	ret = lpss_atom_clk_init();
1362 	if (ret)
1363 		return;
1364 
1365 	id = x86_match_cpu(lpss_cpu_ids);
1366 	if (id)
1367 		lpss_quirks |= LPSS_QUIRK_ALWAYS_POWER_ON;
1368 
1369 	bus_register_notifier(&platform_bus_type, &acpi_lpss_nb);
1370 	acpi_scan_add_handler(&lpss_handler);
1371 }
1372 
1373 #else
1374 
1375 static struct acpi_scan_handler lpss_handler = {
1376 	.ids = acpi_lpss_device_ids,
1377 };
1378 
1379 void __init acpi_lpss_init(void)
1380 {
1381 	acpi_scan_add_handler(&lpss_handler);
1382 }
1383 
1384 #endif /* CONFIG_X86_INTEL_LPSS */
1385