1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-21 Intel Corporation.
4  */
5 
6 #include <linux/acpi.h>
7 #include <linux/bitfield.h>
8 #include <linux/module.h>
9 #include <net/rtnetlink.h>
10 
11 #include "iosm_ipc_imem.h"
12 #include "iosm_ipc_pcie.h"
13 #include "iosm_ipc_protocol.h"
14 
15 MODULE_DESCRIPTION("IOSM Driver");
16 MODULE_LICENSE("GPL v2");
17 
18 /* WWAN GUID */
19 static guid_t wwan_acpi_guid = GUID_INIT(0xbad01b75, 0x22a8, 0x4f48, 0x87, 0x92,
20 				       0xbd, 0xde, 0x94, 0x67, 0x74, 0x7d);
21 
22 static void ipc_pcie_resources_release(struct iosm_pcie *ipc_pcie)
23 {
24 	/* Free the MSI resources. */
25 	ipc_release_irq(ipc_pcie);
26 
27 	/* Free mapped doorbell scratchpad bus memory into CPU space. */
28 	iounmap(ipc_pcie->scratchpad);
29 
30 	/* Free mapped IPC_REGS bus memory into CPU space. */
31 	iounmap(ipc_pcie->ipc_regs);
32 
33 	/* Releases all PCI I/O and memory resources previously reserved by a
34 	 * successful call to pci_request_regions.  Call this function only
35 	 * after all use of the PCI regions has ceased.
36 	 */
37 	pci_release_regions(ipc_pcie->pci);
38 }
39 
40 static void ipc_pcie_cleanup(struct iosm_pcie *ipc_pcie)
41 {
42 	/* Free the shared memory resources. */
43 	ipc_imem_cleanup(ipc_pcie->imem);
44 
45 	ipc_pcie_resources_release(ipc_pcie);
46 
47 	/* Signal to the system that the PCI device is not in use. */
48 	pci_disable_device(ipc_pcie->pci);
49 }
50 
51 static void ipc_pcie_deinit(struct iosm_pcie *ipc_pcie)
52 {
53 	kfree(ipc_pcie->imem);
54 	kfree(ipc_pcie);
55 }
56 
57 static void ipc_pcie_remove(struct pci_dev *pci)
58 {
59 	struct iosm_pcie *ipc_pcie = pci_get_drvdata(pci);
60 
61 	ipc_pcie_cleanup(ipc_pcie);
62 
63 	ipc_pcie_deinit(ipc_pcie);
64 }
65 
66 static int ipc_pcie_resources_request(struct iosm_pcie *ipc_pcie)
67 {
68 	struct pci_dev *pci = ipc_pcie->pci;
69 	u32 cap = 0;
70 	u32 ret;
71 
72 	/* Reserved PCI I/O and memory resources.
73 	 * Mark all PCI regions associated with PCI device pci as
74 	 * being reserved by owner IOSM_IPC.
75 	 */
76 	ret = pci_request_regions(pci, "IOSM_IPC");
77 	if (ret) {
78 		dev_err(ipc_pcie->dev, "failed pci request regions");
79 		goto pci_request_region_fail;
80 	}
81 
82 	/* Reserve the doorbell IPC REGS memory resources.
83 	 * Remap the memory into CPU space. Arrange for the physical address
84 	 * (BAR) to be visible from this driver.
85 	 * pci_ioremap_bar() ensures that the memory is marked uncachable.
86 	 */
87 	ipc_pcie->ipc_regs = pci_ioremap_bar(pci, ipc_pcie->ipc_regs_bar_nr);
88 
89 	if (!ipc_pcie->ipc_regs) {
90 		dev_err(ipc_pcie->dev, "IPC REGS ioremap error");
91 		ret = -EBUSY;
92 		goto ipc_regs_remap_fail;
93 	}
94 
95 	/* Reserve the MMIO scratchpad memory resources.
96 	 * Remap the memory into CPU space. Arrange for the physical address
97 	 * (BAR) to be visible from this driver.
98 	 * pci_ioremap_bar() ensures that the memory is marked uncachable.
99 	 */
100 	ipc_pcie->scratchpad =
101 		pci_ioremap_bar(pci, ipc_pcie->scratchpad_bar_nr);
102 
103 	if (!ipc_pcie->scratchpad) {
104 		dev_err(ipc_pcie->dev, "doorbell scratchpad ioremap error");
105 		ret = -EBUSY;
106 		goto scratch_remap_fail;
107 	}
108 
109 	/* Install the irq handler triggered by CP. */
110 	ret = ipc_acquire_irq(ipc_pcie);
111 	if (ret) {
112 		dev_err(ipc_pcie->dev, "acquiring MSI irq failed!");
113 		goto irq_acquire_fail;
114 	}
115 
116 	/* Enable bus-mastering for the IOSM IPC device. */
117 	pci_set_master(pci);
118 
119 	/* Enable LTR if possible
120 	 * This is needed for L1.2!
121 	 */
122 	pcie_capability_read_dword(ipc_pcie->pci, PCI_EXP_DEVCAP2, &cap);
123 	if (cap & PCI_EXP_DEVCAP2_LTR)
124 		pcie_capability_set_word(ipc_pcie->pci, PCI_EXP_DEVCTL2,
125 					 PCI_EXP_DEVCTL2_LTR_EN);
126 
127 	dev_dbg(ipc_pcie->dev, "link between AP and CP is fully on");
128 
129 	return ret;
130 
131 irq_acquire_fail:
132 	iounmap(ipc_pcie->scratchpad);
133 scratch_remap_fail:
134 	iounmap(ipc_pcie->ipc_regs);
135 ipc_regs_remap_fail:
136 	pci_release_regions(pci);
137 pci_request_region_fail:
138 	return ret;
139 }
140 
141 bool ipc_pcie_check_aspm_enabled(struct iosm_pcie *ipc_pcie,
142 				 bool parent)
143 {
144 	struct pci_dev *pdev;
145 	u16 value = 0;
146 	u32 enabled;
147 
148 	if (parent)
149 		pdev = ipc_pcie->pci->bus->self;
150 	else
151 		pdev = ipc_pcie->pci;
152 
153 	pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &value);
154 	enabled = value & PCI_EXP_LNKCTL_ASPMC;
155 	dev_dbg(ipc_pcie->dev, "ASPM L1: 0x%04X 0x%03X", pdev->device, value);
156 
157 	return (enabled == PCI_EXP_LNKCTL_ASPM_L1 ||
158 		enabled == PCI_EXP_LNKCTL_ASPMC);
159 }
160 
161 bool ipc_pcie_check_data_link_active(struct iosm_pcie *ipc_pcie)
162 {
163 	struct pci_dev *parent;
164 	u16 link_status = 0;
165 
166 	if (!ipc_pcie->pci->bus || !ipc_pcie->pci->bus->self) {
167 		dev_err(ipc_pcie->dev, "root port not found");
168 		return false;
169 	}
170 
171 	parent = ipc_pcie->pci->bus->self;
172 
173 	pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &link_status);
174 	dev_dbg(ipc_pcie->dev, "Link status: 0x%04X", link_status);
175 
176 	return link_status & PCI_EXP_LNKSTA_DLLLA;
177 }
178 
179 static bool ipc_pcie_check_aspm_supported(struct iosm_pcie *ipc_pcie,
180 					  bool parent)
181 {
182 	struct pci_dev *pdev;
183 	u32 support;
184 	u32 cap = 0;
185 
186 	if (parent)
187 		pdev = ipc_pcie->pci->bus->self;
188 	else
189 		pdev = ipc_pcie->pci;
190 	pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &cap);
191 	support = u32_get_bits(cap, PCI_EXP_LNKCAP_ASPMS);
192 	if (support < PCI_EXP_LNKCTL_ASPM_L1) {
193 		dev_dbg(ipc_pcie->dev, "ASPM L1 not supported: 0x%04X",
194 			pdev->device);
195 		return false;
196 	}
197 	return true;
198 }
199 
200 void ipc_pcie_config_aspm(struct iosm_pcie *ipc_pcie)
201 {
202 	bool parent_aspm_enabled, dev_aspm_enabled;
203 
204 	/* check if both root port and child supports ASPM L1 */
205 	if (!ipc_pcie_check_aspm_supported(ipc_pcie, true) ||
206 	    !ipc_pcie_check_aspm_supported(ipc_pcie, false))
207 		return;
208 
209 	parent_aspm_enabled = ipc_pcie_check_aspm_enabled(ipc_pcie, true);
210 	dev_aspm_enabled = ipc_pcie_check_aspm_enabled(ipc_pcie, false);
211 
212 	dev_dbg(ipc_pcie->dev, "ASPM parent: %s device: %s",
213 		parent_aspm_enabled ? "Enabled" : "Disabled",
214 		dev_aspm_enabled ? "Enabled" : "Disabled");
215 }
216 
217 /* Initializes PCIe endpoint configuration */
218 static void ipc_pcie_config_init(struct iosm_pcie *ipc_pcie)
219 {
220 	/* BAR0 is used for doorbell */
221 	ipc_pcie->ipc_regs_bar_nr = IPC_DOORBELL_BAR0;
222 
223 	/* update HW configuration */
224 	ipc_pcie->scratchpad_bar_nr = IPC_SCRATCHPAD_BAR2;
225 	ipc_pcie->doorbell_reg_offset = IPC_DOORBELL_CH_OFFSET;
226 	ipc_pcie->doorbell_write = IPC_WRITE_PTR_REG_0;
227 	ipc_pcie->doorbell_capture = IPC_CAPTURE_PTR_REG_0;
228 }
229 
230 /* This will read the BIOS WWAN RTD3 settings:
231  * D0L1.2/D3L2/Disabled
232  */
233 static enum ipc_pcie_sleep_state ipc_pcie_read_bios_cfg(struct device *dev)
234 {
235 	union acpi_object *object;
236 	acpi_handle handle_acpi;
237 
238 	handle_acpi = ACPI_HANDLE(dev);
239 	if (!handle_acpi) {
240 		pr_debug("pci device is NOT ACPI supporting device\n");
241 		goto default_ret;
242 	}
243 
244 	object = acpi_evaluate_dsm(handle_acpi, &wwan_acpi_guid, 0, 3, NULL);
245 
246 	if (object && object->integer.value == 3)
247 		return IPC_PCIE_D3L2;
248 
249 default_ret:
250 	return IPC_PCIE_D0L12;
251 }
252 
253 static int ipc_pcie_probe(struct pci_dev *pci,
254 			  const struct pci_device_id *pci_id)
255 {
256 	struct iosm_pcie *ipc_pcie = kzalloc(sizeof(*ipc_pcie), GFP_KERNEL);
257 
258 	pr_debug("Probing device 0x%X from the vendor 0x%X", pci_id->device,
259 		 pci_id->vendor);
260 
261 	if (!ipc_pcie)
262 		goto ret_fail;
263 
264 	/* Initialize ipc dbg component for the PCIe device */
265 	ipc_pcie->dev = &pci->dev;
266 
267 	/* Set the driver specific data. */
268 	pci_set_drvdata(pci, ipc_pcie);
269 
270 	/* Save the address of the PCI device configuration. */
271 	ipc_pcie->pci = pci;
272 
273 	/* Update platform configuration */
274 	ipc_pcie_config_init(ipc_pcie);
275 
276 	/* Initialize the device before it is used. Ask low-level code
277 	 * to enable I/O and memory. Wake up the device if it was suspended.
278 	 */
279 	if (pci_enable_device(pci)) {
280 		dev_err(ipc_pcie->dev, "failed to enable the AP PCIe device");
281 		/* If enable of PCIe device has failed then calling
282 		 * ipc_pcie_cleanup will panic the system. More over
283 		 * ipc_pcie_cleanup() is required to be called after
284 		 * ipc_imem_mount()
285 		 */
286 		goto pci_enable_fail;
287 	}
288 
289 	ipc_pcie_config_aspm(ipc_pcie);
290 	dev_dbg(ipc_pcie->dev, "PCIe device enabled.");
291 
292 	/* Read WWAN RTD3 BIOS Setting
293 	 */
294 	ipc_pcie->d3l2_support = ipc_pcie_read_bios_cfg(&pci->dev);
295 
296 	ipc_pcie->suspend = 0;
297 
298 	if (ipc_pcie_resources_request(ipc_pcie))
299 		goto resources_req_fail;
300 
301 	/* Establish the link to the imem layer. */
302 	ipc_pcie->imem = ipc_imem_init(ipc_pcie, pci->device,
303 				       ipc_pcie->scratchpad, ipc_pcie->dev);
304 	if (!ipc_pcie->imem) {
305 		dev_err(ipc_pcie->dev, "failed to init imem");
306 		goto imem_init_fail;
307 	}
308 
309 	return 0;
310 
311 imem_init_fail:
312 	ipc_pcie_resources_release(ipc_pcie);
313 resources_req_fail:
314 	pci_disable_device(pci);
315 pci_enable_fail:
316 	kfree(ipc_pcie);
317 ret_fail:
318 	return -EIO;
319 }
320 
321 static const struct pci_device_id iosm_ipc_ids[] = {
322 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CP_DEVICE_7560_ID) },
323 	{}
324 };
325 MODULE_DEVICE_TABLE(pci, iosm_ipc_ids);
326 
327 /* Enter sleep in s2idle case
328  */
329 static int __maybe_unused ipc_pcie_suspend_s2idle(struct iosm_pcie *ipc_pcie)
330 {
331 	ipc_cp_irq_sleep_control(ipc_pcie, IPC_MEM_DEV_PM_FORCE_SLEEP);
332 
333 	/* Complete all memory stores before setting bit */
334 	smp_mb__before_atomic();
335 
336 	set_bit(0, &ipc_pcie->suspend);
337 
338 	/* Complete all memory stores after setting bit */
339 	smp_mb__after_atomic();
340 
341 	ipc_imem_pm_s2idle_sleep(ipc_pcie->imem, true);
342 
343 	return 0;
344 }
345 
346 /* Resume from sleep in s2idle case
347  */
348 static int __maybe_unused ipc_pcie_resume_s2idle(struct iosm_pcie *ipc_pcie)
349 {
350 	ipc_cp_irq_sleep_control(ipc_pcie, IPC_MEM_DEV_PM_FORCE_ACTIVE);
351 
352 	ipc_imem_pm_s2idle_sleep(ipc_pcie->imem, false);
353 
354 	/* Complete all memory stores before clearing bit. */
355 	smp_mb__before_atomic();
356 
357 	clear_bit(0, &ipc_pcie->suspend);
358 
359 	/* Complete all memory stores after clearing bit. */
360 	smp_mb__after_atomic();
361 	return 0;
362 }
363 
364 int __maybe_unused ipc_pcie_suspend(struct iosm_pcie *ipc_pcie)
365 {
366 	struct pci_dev *pdev;
367 	int ret;
368 
369 	pdev = ipc_pcie->pci;
370 
371 	/* Execute D3 one time. */
372 	if (pdev->current_state != PCI_D0) {
373 		dev_dbg(ipc_pcie->dev, "done for PM=%d", pdev->current_state);
374 		return 0;
375 	}
376 
377 	/* The HAL shall ask the shared memory layer whether D3 is allowed. */
378 	ipc_imem_pm_suspend(ipc_pcie->imem);
379 
380 	/* Save the PCI configuration space of a device before suspending. */
381 	ret = pci_save_state(pdev);
382 
383 	if (ret) {
384 		dev_err(ipc_pcie->dev, "pci_save_state error=%d", ret);
385 		return ret;
386 	}
387 
388 	/* Set the power state of a PCI device.
389 	 * Transition a device to a new power state, using the device's PCI PM
390 	 * registers.
391 	 */
392 	ret = pci_set_power_state(pdev, PCI_D3cold);
393 
394 	if (ret) {
395 		dev_err(ipc_pcie->dev, "pci_set_power_state error=%d", ret);
396 		return ret;
397 	}
398 
399 	dev_dbg(ipc_pcie->dev, "SUSPEND done");
400 	return ret;
401 }
402 
403 int __maybe_unused ipc_pcie_resume(struct iosm_pcie *ipc_pcie)
404 {
405 	int ret;
406 
407 	/* Set the power state of a PCI device.
408 	 * Transition a device to a new power state, using the device's PCI PM
409 	 * registers.
410 	 */
411 	ret = pci_set_power_state(ipc_pcie->pci, PCI_D0);
412 
413 	if (ret) {
414 		dev_err(ipc_pcie->dev, "pci_set_power_state error=%d", ret);
415 		return ret;
416 	}
417 
418 	pci_restore_state(ipc_pcie->pci);
419 
420 	/* The HAL shall inform the shared memory layer that the device is
421 	 * active.
422 	 */
423 	ipc_imem_pm_resume(ipc_pcie->imem);
424 
425 	dev_dbg(ipc_pcie->dev, "RESUME done");
426 	return ret;
427 }
428 
429 static int __maybe_unused ipc_pcie_suspend_cb(struct device *dev)
430 {
431 	struct iosm_pcie *ipc_pcie;
432 	struct pci_dev *pdev;
433 
434 	pdev = to_pci_dev(dev);
435 
436 	ipc_pcie = pci_get_drvdata(pdev);
437 
438 	switch (ipc_pcie->d3l2_support) {
439 	case IPC_PCIE_D0L12:
440 		ipc_pcie_suspend_s2idle(ipc_pcie);
441 		break;
442 	case IPC_PCIE_D3L2:
443 		ipc_pcie_suspend(ipc_pcie);
444 		break;
445 	}
446 
447 	return 0;
448 }
449 
450 static int __maybe_unused ipc_pcie_resume_cb(struct device *dev)
451 {
452 	struct iosm_pcie *ipc_pcie;
453 	struct pci_dev *pdev;
454 
455 	pdev = to_pci_dev(dev);
456 
457 	ipc_pcie = pci_get_drvdata(pdev);
458 
459 	switch (ipc_pcie->d3l2_support) {
460 	case IPC_PCIE_D0L12:
461 		ipc_pcie_resume_s2idle(ipc_pcie);
462 		break;
463 	case IPC_PCIE_D3L2:
464 		ipc_pcie_resume(ipc_pcie);
465 		break;
466 	}
467 
468 	return 0;
469 }
470 
471 static SIMPLE_DEV_PM_OPS(iosm_ipc_pm, ipc_pcie_suspend_cb, ipc_pcie_resume_cb);
472 
473 static struct pci_driver iosm_ipc_driver = {
474 	.name = KBUILD_MODNAME,
475 	.probe = ipc_pcie_probe,
476 	.remove = ipc_pcie_remove,
477 	.driver = {
478 		.pm = &iosm_ipc_pm,
479 	},
480 	.id_table = iosm_ipc_ids,
481 };
482 module_pci_driver(iosm_ipc_driver);
483 
484 int ipc_pcie_addr_map(struct iosm_pcie *ipc_pcie, unsigned char *data,
485 		      size_t size, dma_addr_t *mapping, int direction)
486 {
487 	if (ipc_pcie->pci) {
488 		*mapping = dma_map_single(&ipc_pcie->pci->dev, data, size,
489 					  direction);
490 		if (dma_mapping_error(&ipc_pcie->pci->dev, *mapping)) {
491 			dev_err(ipc_pcie->dev, "dma mapping failed");
492 			return -EINVAL;
493 		}
494 	}
495 	return 0;
496 }
497 
498 void ipc_pcie_addr_unmap(struct iosm_pcie *ipc_pcie, size_t size,
499 			 dma_addr_t mapping, int direction)
500 {
501 	if (!mapping)
502 		return;
503 	if (ipc_pcie->pci)
504 		dma_unmap_single(&ipc_pcie->pci->dev, mapping, size, direction);
505 }
506 
507 struct sk_buff *ipc_pcie_alloc_local_skb(struct iosm_pcie *ipc_pcie,
508 					 gfp_t flags, size_t size)
509 {
510 	struct sk_buff *skb;
511 
512 	if (!ipc_pcie || !size) {
513 		pr_err("invalid pcie object or size");
514 		return NULL;
515 	}
516 
517 	skb = __netdev_alloc_skb(NULL, size, flags);
518 	if (!skb)
519 		return NULL;
520 
521 	IPC_CB(skb)->op_type = (u8)UL_DEFAULT;
522 	IPC_CB(skb)->mapping = 0;
523 
524 	return skb;
525 }
526 
527 struct sk_buff *ipc_pcie_alloc_skb(struct iosm_pcie *ipc_pcie, size_t size,
528 				   gfp_t flags, dma_addr_t *mapping,
529 				   int direction, size_t headroom)
530 {
531 	struct sk_buff *skb = ipc_pcie_alloc_local_skb(ipc_pcie, flags,
532 						       size + headroom);
533 	if (!skb)
534 		return NULL;
535 
536 	if (headroom)
537 		skb_reserve(skb, headroom);
538 
539 	if (ipc_pcie_addr_map(ipc_pcie, skb->data, size, mapping, direction)) {
540 		dev_kfree_skb(skb);
541 		return NULL;
542 	}
543 
544 	BUILD_BUG_ON(sizeof(*IPC_CB(skb)) > sizeof(skb->cb));
545 
546 	/* Store the mapping address in skb scratch pad for later usage */
547 	IPC_CB(skb)->mapping = *mapping;
548 	IPC_CB(skb)->direction = direction;
549 	IPC_CB(skb)->len = size;
550 
551 	return skb;
552 }
553 
554 void ipc_pcie_kfree_skb(struct iosm_pcie *ipc_pcie, struct sk_buff *skb)
555 {
556 	if (!skb)
557 		return;
558 
559 	ipc_pcie_addr_unmap(ipc_pcie, IPC_CB(skb)->len, IPC_CB(skb)->mapping,
560 			    IPC_CB(skb)->direction);
561 	IPC_CB(skb)->mapping = 0;
562 	dev_kfree_skb(skb);
563 }
564