xref: /openbmc/linux/drivers/net/wwan/iosm/iosm_ipc_pcie.c (revision 360823a09426347ea8f232b0b0b5156d0aed0302)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-21 Intel Corporation.
4  */
5 
6 #include <linux/acpi.h>
7 #include <linux/bitfield.h>
8 #include <linux/module.h>
9 #include <linux/suspend.h>
10 #include <net/rtnetlink.h>
11 
12 #include "iosm_ipc_imem.h"
13 #include "iosm_ipc_pcie.h"
14 #include "iosm_ipc_protocol.h"
15 
16 MODULE_DESCRIPTION("IOSM Driver");
17 MODULE_LICENSE("GPL v2");
18 
19 /* WWAN GUID */
20 static guid_t wwan_acpi_guid = GUID_INIT(0xbad01b75, 0x22a8, 0x4f48, 0x87, 0x92,
21 				       0xbd, 0xde, 0x94, 0x67, 0x74, 0x7d);
22 static bool pci_registered;
23 
ipc_pcie_resources_release(struct iosm_pcie * ipc_pcie)24 static void ipc_pcie_resources_release(struct iosm_pcie *ipc_pcie)
25 {
26 	/* Free the MSI resources. */
27 	ipc_release_irq(ipc_pcie);
28 
29 	/* Free mapped doorbell scratchpad bus memory into CPU space. */
30 	iounmap(ipc_pcie->scratchpad);
31 
32 	/* Free mapped IPC_REGS bus memory into CPU space. */
33 	iounmap(ipc_pcie->ipc_regs);
34 
35 	/* Releases all PCI I/O and memory resources previously reserved by a
36 	 * successful call to pci_request_regions.  Call this function only
37 	 * after all use of the PCI regions has ceased.
38 	 */
39 	pci_release_regions(ipc_pcie->pci);
40 }
41 
ipc_pcie_cleanup(struct iosm_pcie * ipc_pcie)42 static void ipc_pcie_cleanup(struct iosm_pcie *ipc_pcie)
43 {
44 	/* Free the shared memory resources. */
45 	ipc_imem_cleanup(ipc_pcie->imem);
46 
47 	ipc_pcie_resources_release(ipc_pcie);
48 
49 	/* Signal to the system that the PCI device is not in use. */
50 	pci_disable_device(ipc_pcie->pci);
51 }
52 
ipc_pcie_deinit(struct iosm_pcie * ipc_pcie)53 static void ipc_pcie_deinit(struct iosm_pcie *ipc_pcie)
54 {
55 	kfree(ipc_pcie->imem);
56 	kfree(ipc_pcie);
57 }
58 
ipc_pcie_remove(struct pci_dev * pci)59 static void ipc_pcie_remove(struct pci_dev *pci)
60 {
61 	struct iosm_pcie *ipc_pcie = pci_get_drvdata(pci);
62 
63 	ipc_pcie_cleanup(ipc_pcie);
64 
65 	ipc_pcie_deinit(ipc_pcie);
66 }
67 
ipc_pcie_resources_request(struct iosm_pcie * ipc_pcie)68 static int ipc_pcie_resources_request(struct iosm_pcie *ipc_pcie)
69 {
70 	struct pci_dev *pci = ipc_pcie->pci;
71 	u32 cap = 0;
72 	u32 ret;
73 
74 	/* Reserved PCI I/O and memory resources.
75 	 * Mark all PCI regions associated with PCI device pci as
76 	 * being reserved by owner IOSM_IPC.
77 	 */
78 	ret = pci_request_regions(pci, "IOSM_IPC");
79 	if (ret) {
80 		dev_err(ipc_pcie->dev, "failed pci request regions");
81 		goto pci_request_region_fail;
82 	}
83 
84 	/* Reserve the doorbell IPC REGS memory resources.
85 	 * Remap the memory into CPU space. Arrange for the physical address
86 	 * (BAR) to be visible from this driver.
87 	 * pci_ioremap_bar() ensures that the memory is marked uncachable.
88 	 */
89 	ipc_pcie->ipc_regs = pci_ioremap_bar(pci, ipc_pcie->ipc_regs_bar_nr);
90 
91 	if (!ipc_pcie->ipc_regs) {
92 		dev_err(ipc_pcie->dev, "IPC REGS ioremap error");
93 		ret = -EBUSY;
94 		goto ipc_regs_remap_fail;
95 	}
96 
97 	/* Reserve the MMIO scratchpad memory resources.
98 	 * Remap the memory into CPU space. Arrange for the physical address
99 	 * (BAR) to be visible from this driver.
100 	 * pci_ioremap_bar() ensures that the memory is marked uncachable.
101 	 */
102 	ipc_pcie->scratchpad =
103 		pci_ioremap_bar(pci, ipc_pcie->scratchpad_bar_nr);
104 
105 	if (!ipc_pcie->scratchpad) {
106 		dev_err(ipc_pcie->dev, "doorbell scratchpad ioremap error");
107 		ret = -EBUSY;
108 		goto scratch_remap_fail;
109 	}
110 
111 	/* Install the irq handler triggered by CP. */
112 	ret = ipc_acquire_irq(ipc_pcie);
113 	if (ret) {
114 		dev_err(ipc_pcie->dev, "acquiring MSI irq failed!");
115 		goto irq_acquire_fail;
116 	}
117 
118 	/* Enable bus-mastering for the IOSM IPC device. */
119 	pci_set_master(pci);
120 
121 	/* Enable LTR if possible
122 	 * This is needed for L1.2!
123 	 */
124 	pcie_capability_read_dword(ipc_pcie->pci, PCI_EXP_DEVCAP2, &cap);
125 	if (cap & PCI_EXP_DEVCAP2_LTR)
126 		pcie_capability_set_word(ipc_pcie->pci, PCI_EXP_DEVCTL2,
127 					 PCI_EXP_DEVCTL2_LTR_EN);
128 
129 	dev_dbg(ipc_pcie->dev, "link between AP and CP is fully on");
130 
131 	return ret;
132 
133 irq_acquire_fail:
134 	iounmap(ipc_pcie->scratchpad);
135 scratch_remap_fail:
136 	iounmap(ipc_pcie->ipc_regs);
137 ipc_regs_remap_fail:
138 	pci_release_regions(pci);
139 pci_request_region_fail:
140 	return ret;
141 }
142 
ipc_pcie_check_aspm_enabled(struct iosm_pcie * ipc_pcie,bool parent)143 bool ipc_pcie_check_aspm_enabled(struct iosm_pcie *ipc_pcie,
144 				 bool parent)
145 {
146 	struct pci_dev *pdev;
147 	u16 value = 0;
148 	u32 enabled;
149 
150 	if (parent)
151 		pdev = ipc_pcie->pci->bus->self;
152 	else
153 		pdev = ipc_pcie->pci;
154 
155 	pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &value);
156 	enabled = value & PCI_EXP_LNKCTL_ASPMC;
157 	dev_dbg(ipc_pcie->dev, "ASPM L1: 0x%04X 0x%03X", pdev->device, value);
158 
159 	return (enabled == PCI_EXP_LNKCTL_ASPM_L1 ||
160 		enabled == PCI_EXP_LNKCTL_ASPMC);
161 }
162 
ipc_pcie_check_data_link_active(struct iosm_pcie * ipc_pcie)163 bool ipc_pcie_check_data_link_active(struct iosm_pcie *ipc_pcie)
164 {
165 	struct pci_dev *parent;
166 	u16 link_status = 0;
167 
168 	if (!ipc_pcie->pci->bus || !ipc_pcie->pci->bus->self) {
169 		dev_err(ipc_pcie->dev, "root port not found");
170 		return false;
171 	}
172 
173 	parent = ipc_pcie->pci->bus->self;
174 
175 	pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &link_status);
176 	dev_dbg(ipc_pcie->dev, "Link status: 0x%04X", link_status);
177 
178 	return link_status & PCI_EXP_LNKSTA_DLLLA;
179 }
180 
ipc_pcie_check_aspm_supported(struct iosm_pcie * ipc_pcie,bool parent)181 static bool ipc_pcie_check_aspm_supported(struct iosm_pcie *ipc_pcie,
182 					  bool parent)
183 {
184 	struct pci_dev *pdev;
185 	u32 support;
186 	u32 cap = 0;
187 
188 	if (parent)
189 		pdev = ipc_pcie->pci->bus->self;
190 	else
191 		pdev = ipc_pcie->pci;
192 	pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &cap);
193 	support = u32_get_bits(cap, PCI_EXP_LNKCAP_ASPMS);
194 	if (support < PCI_EXP_LNKCTL_ASPM_L1) {
195 		dev_dbg(ipc_pcie->dev, "ASPM L1 not supported: 0x%04X",
196 			pdev->device);
197 		return false;
198 	}
199 	return true;
200 }
201 
ipc_pcie_config_aspm(struct iosm_pcie * ipc_pcie)202 void ipc_pcie_config_aspm(struct iosm_pcie *ipc_pcie)
203 {
204 	bool parent_aspm_enabled, dev_aspm_enabled;
205 
206 	/* check if both root port and child supports ASPM L1 */
207 	if (!ipc_pcie_check_aspm_supported(ipc_pcie, true) ||
208 	    !ipc_pcie_check_aspm_supported(ipc_pcie, false))
209 		return;
210 
211 	parent_aspm_enabled = ipc_pcie_check_aspm_enabled(ipc_pcie, true);
212 	dev_aspm_enabled = ipc_pcie_check_aspm_enabled(ipc_pcie, false);
213 
214 	dev_dbg(ipc_pcie->dev, "ASPM parent: %s device: %s",
215 		parent_aspm_enabled ? "Enabled" : "Disabled",
216 		dev_aspm_enabled ? "Enabled" : "Disabled");
217 }
218 
219 /* Initializes PCIe endpoint configuration */
ipc_pcie_config_init(struct iosm_pcie * ipc_pcie)220 static void ipc_pcie_config_init(struct iosm_pcie *ipc_pcie)
221 {
222 	/* BAR0 is used for doorbell */
223 	ipc_pcie->ipc_regs_bar_nr = IPC_DOORBELL_BAR0;
224 
225 	/* update HW configuration */
226 	ipc_pcie->scratchpad_bar_nr = IPC_SCRATCHPAD_BAR2;
227 	ipc_pcie->doorbell_reg_offset = IPC_DOORBELL_CH_OFFSET;
228 	ipc_pcie->doorbell_write = IPC_WRITE_PTR_REG_0;
229 	ipc_pcie->doorbell_capture = IPC_CAPTURE_PTR_REG_0;
230 }
231 
232 /* This will read the BIOS WWAN RTD3 settings:
233  * D0L1.2/D3L2/Disabled
234  */
ipc_pcie_read_bios_cfg(struct device * dev)235 static enum ipc_pcie_sleep_state ipc_pcie_read_bios_cfg(struct device *dev)
236 {
237 	enum ipc_pcie_sleep_state sleep_state = IPC_PCIE_D0L12;
238 	union acpi_object *object;
239 	acpi_handle handle_acpi;
240 
241 	handle_acpi = ACPI_HANDLE(dev);
242 	if (!handle_acpi) {
243 		pr_debug("pci device is NOT ACPI supporting device\n");
244 		goto default_ret;
245 	}
246 
247 	object = acpi_evaluate_dsm(handle_acpi, &wwan_acpi_guid, 0, 3, NULL);
248 	if (!object)
249 		goto default_ret;
250 
251 	if (object->integer.value == 3)
252 		sleep_state = IPC_PCIE_D3L2;
253 
254 	ACPI_FREE(object);
255 
256 default_ret:
257 	return sleep_state;
258 }
259 
ipc_pcie_probe(struct pci_dev * pci,const struct pci_device_id * pci_id)260 static int ipc_pcie_probe(struct pci_dev *pci,
261 			  const struct pci_device_id *pci_id)
262 {
263 	struct iosm_pcie *ipc_pcie = kzalloc(sizeof(*ipc_pcie), GFP_KERNEL);
264 	int ret;
265 
266 	pr_debug("Probing device 0x%X from the vendor 0x%X", pci_id->device,
267 		 pci_id->vendor);
268 
269 	if (!ipc_pcie)
270 		goto ret_fail;
271 
272 	/* Initialize ipc dbg component for the PCIe device */
273 	ipc_pcie->dev = &pci->dev;
274 
275 	/* Set the driver specific data. */
276 	pci_set_drvdata(pci, ipc_pcie);
277 
278 	/* Save the address of the PCI device configuration. */
279 	ipc_pcie->pci = pci;
280 
281 	/* Update platform configuration */
282 	ipc_pcie_config_init(ipc_pcie);
283 
284 	/* Initialize the device before it is used. Ask low-level code
285 	 * to enable I/O and memory. Wake up the device if it was suspended.
286 	 */
287 	if (pci_enable_device(pci)) {
288 		dev_err(ipc_pcie->dev, "failed to enable the AP PCIe device");
289 		/* If enable of PCIe device has failed then calling
290 		 * ipc_pcie_cleanup will panic the system. More over
291 		 * ipc_pcie_cleanup() is required to be called after
292 		 * ipc_imem_mount()
293 		 */
294 		goto pci_enable_fail;
295 	}
296 
297 	ret = dma_set_mask(ipc_pcie->dev, DMA_BIT_MASK(64));
298 	if (ret) {
299 		dev_err(ipc_pcie->dev, "Could not set PCI DMA mask: %d", ret);
300 		goto set_mask_fail;
301 	}
302 
303 	ipc_pcie_config_aspm(ipc_pcie);
304 	dev_dbg(ipc_pcie->dev, "PCIe device enabled.");
305 
306 	/* Read WWAN RTD3 BIOS Setting
307 	 */
308 	ipc_pcie->d3l2_support = ipc_pcie_read_bios_cfg(&pci->dev);
309 
310 	ipc_pcie->suspend = 0;
311 
312 	if (ipc_pcie_resources_request(ipc_pcie))
313 		goto resources_req_fail;
314 
315 	/* Establish the link to the imem layer. */
316 	ipc_pcie->imem = ipc_imem_init(ipc_pcie, pci->device,
317 				       ipc_pcie->scratchpad, ipc_pcie->dev);
318 	if (!ipc_pcie->imem) {
319 		dev_err(ipc_pcie->dev, "failed to init imem");
320 		goto imem_init_fail;
321 	}
322 
323 	return 0;
324 
325 imem_init_fail:
326 	ipc_pcie_resources_release(ipc_pcie);
327 resources_req_fail:
328 set_mask_fail:
329 	pci_disable_device(pci);
330 pci_enable_fail:
331 	kfree(ipc_pcie);
332 ret_fail:
333 	return -EIO;
334 }
335 
336 static const struct pci_device_id iosm_ipc_ids[] = {
337 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CP_DEVICE_7560_ID) },
338 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CP_DEVICE_7360_ID) },
339 	{}
340 };
341 MODULE_DEVICE_TABLE(pci, iosm_ipc_ids);
342 
343 /* Enter sleep in s2idle case
344  */
ipc_pcie_suspend_s2idle(struct iosm_pcie * ipc_pcie)345 static int __maybe_unused ipc_pcie_suspend_s2idle(struct iosm_pcie *ipc_pcie)
346 {
347 	ipc_cp_irq_sleep_control(ipc_pcie, IPC_MEM_DEV_PM_FORCE_SLEEP);
348 
349 	/* Complete all memory stores before setting bit */
350 	smp_mb__before_atomic();
351 
352 	set_bit(0, &ipc_pcie->suspend);
353 
354 	/* Complete all memory stores after setting bit */
355 	smp_mb__after_atomic();
356 
357 	ipc_imem_pm_s2idle_sleep(ipc_pcie->imem, true);
358 
359 	return 0;
360 }
361 
362 /* Resume from sleep in s2idle case
363  */
ipc_pcie_resume_s2idle(struct iosm_pcie * ipc_pcie)364 static int __maybe_unused ipc_pcie_resume_s2idle(struct iosm_pcie *ipc_pcie)
365 {
366 	ipc_cp_irq_sleep_control(ipc_pcie, IPC_MEM_DEV_PM_FORCE_ACTIVE);
367 
368 	ipc_imem_pm_s2idle_sleep(ipc_pcie->imem, false);
369 
370 	/* Complete all memory stores before clearing bit. */
371 	smp_mb__before_atomic();
372 
373 	clear_bit(0, &ipc_pcie->suspend);
374 
375 	/* Complete all memory stores after clearing bit. */
376 	smp_mb__after_atomic();
377 	return 0;
378 }
379 
ipc_pcie_suspend(struct iosm_pcie * ipc_pcie)380 int __maybe_unused ipc_pcie_suspend(struct iosm_pcie *ipc_pcie)
381 {
382 	/* The HAL shall ask the shared memory layer whether D3 is allowed. */
383 	ipc_imem_pm_suspend(ipc_pcie->imem);
384 
385 	dev_dbg(ipc_pcie->dev, "SUSPEND done");
386 	return 0;
387 }
388 
ipc_pcie_resume(struct iosm_pcie * ipc_pcie)389 int __maybe_unused ipc_pcie_resume(struct iosm_pcie *ipc_pcie)
390 {
391 	/* The HAL shall inform the shared memory layer that the device is
392 	 * active.
393 	 */
394 	ipc_imem_pm_resume(ipc_pcie->imem);
395 
396 	dev_dbg(ipc_pcie->dev, "RESUME done");
397 	return 0;
398 }
399 
ipc_pcie_suspend_cb(struct device * dev)400 static int __maybe_unused ipc_pcie_suspend_cb(struct device *dev)
401 {
402 	struct iosm_pcie *ipc_pcie;
403 	struct pci_dev *pdev;
404 
405 	pdev = to_pci_dev(dev);
406 
407 	ipc_pcie = pci_get_drvdata(pdev);
408 
409 	switch (ipc_pcie->d3l2_support) {
410 	case IPC_PCIE_D0L12:
411 		ipc_pcie_suspend_s2idle(ipc_pcie);
412 		break;
413 	case IPC_PCIE_D3L2:
414 		ipc_pcie_suspend(ipc_pcie);
415 		break;
416 	}
417 
418 	return 0;
419 }
420 
ipc_pcie_resume_cb(struct device * dev)421 static int __maybe_unused ipc_pcie_resume_cb(struct device *dev)
422 {
423 	struct iosm_pcie *ipc_pcie;
424 	struct pci_dev *pdev;
425 
426 	pdev = to_pci_dev(dev);
427 
428 	ipc_pcie = pci_get_drvdata(pdev);
429 
430 	switch (ipc_pcie->d3l2_support) {
431 	case IPC_PCIE_D0L12:
432 		ipc_pcie_resume_s2idle(ipc_pcie);
433 		break;
434 	case IPC_PCIE_D3L2:
435 		ipc_pcie_resume(ipc_pcie);
436 		break;
437 	}
438 
439 	return 0;
440 }
441 
442 static SIMPLE_DEV_PM_OPS(iosm_ipc_pm, ipc_pcie_suspend_cb, ipc_pcie_resume_cb);
443 
444 static struct pci_driver iosm_ipc_driver = {
445 	.name = KBUILD_MODNAME,
446 	.probe = ipc_pcie_probe,
447 	.remove = ipc_pcie_remove,
448 	.driver = {
449 		.pm = &iosm_ipc_pm,
450 	},
451 	.id_table = iosm_ipc_ids,
452 };
453 
ipc_pcie_addr_map(struct iosm_pcie * ipc_pcie,unsigned char * data,size_t size,dma_addr_t * mapping,int direction)454 int ipc_pcie_addr_map(struct iosm_pcie *ipc_pcie, unsigned char *data,
455 		      size_t size, dma_addr_t *mapping, int direction)
456 {
457 	if (ipc_pcie->pci) {
458 		*mapping = dma_map_single(&ipc_pcie->pci->dev, data, size,
459 					  direction);
460 		if (dma_mapping_error(&ipc_pcie->pci->dev, *mapping)) {
461 			dev_err(ipc_pcie->dev, "dma mapping failed");
462 			return -EINVAL;
463 		}
464 	}
465 	return 0;
466 }
467 
ipc_pcie_addr_unmap(struct iosm_pcie * ipc_pcie,size_t size,dma_addr_t mapping,int direction)468 void ipc_pcie_addr_unmap(struct iosm_pcie *ipc_pcie, size_t size,
469 			 dma_addr_t mapping, int direction)
470 {
471 	if (!mapping)
472 		return;
473 	if (ipc_pcie->pci)
474 		dma_unmap_single(&ipc_pcie->pci->dev, mapping, size, direction);
475 }
476 
ipc_pcie_alloc_local_skb(struct iosm_pcie * ipc_pcie,gfp_t flags,size_t size)477 struct sk_buff *ipc_pcie_alloc_local_skb(struct iosm_pcie *ipc_pcie,
478 					 gfp_t flags, size_t size)
479 {
480 	struct sk_buff *skb;
481 
482 	if (!ipc_pcie || !size) {
483 		pr_err("invalid pcie object or size");
484 		return NULL;
485 	}
486 
487 	skb = __netdev_alloc_skb(NULL, size, flags);
488 	if (!skb)
489 		return NULL;
490 
491 	IPC_CB(skb)->op_type = (u8)UL_DEFAULT;
492 	IPC_CB(skb)->mapping = 0;
493 
494 	return skb;
495 }
496 
ipc_pcie_alloc_skb(struct iosm_pcie * ipc_pcie,size_t size,gfp_t flags,dma_addr_t * mapping,int direction,size_t headroom)497 struct sk_buff *ipc_pcie_alloc_skb(struct iosm_pcie *ipc_pcie, size_t size,
498 				   gfp_t flags, dma_addr_t *mapping,
499 				   int direction, size_t headroom)
500 {
501 	struct sk_buff *skb = ipc_pcie_alloc_local_skb(ipc_pcie, flags,
502 						       size + headroom);
503 	if (!skb)
504 		return NULL;
505 
506 	if (headroom)
507 		skb_reserve(skb, headroom);
508 
509 	if (ipc_pcie_addr_map(ipc_pcie, skb->data, size, mapping, direction)) {
510 		dev_kfree_skb(skb);
511 		return NULL;
512 	}
513 
514 	BUILD_BUG_ON(sizeof(*IPC_CB(skb)) > sizeof(skb->cb));
515 
516 	/* Store the mapping address in skb scratch pad for later usage */
517 	IPC_CB(skb)->mapping = *mapping;
518 	IPC_CB(skb)->direction = direction;
519 	IPC_CB(skb)->len = size;
520 
521 	return skb;
522 }
523 
ipc_pcie_kfree_skb(struct iosm_pcie * ipc_pcie,struct sk_buff * skb)524 void ipc_pcie_kfree_skb(struct iosm_pcie *ipc_pcie, struct sk_buff *skb)
525 {
526 	if (!skb)
527 		return;
528 
529 	ipc_pcie_addr_unmap(ipc_pcie, IPC_CB(skb)->len, IPC_CB(skb)->mapping,
530 			    IPC_CB(skb)->direction);
531 	IPC_CB(skb)->mapping = 0;
532 	dev_kfree_skb(skb);
533 }
534 
pm_notify(struct notifier_block * nb,unsigned long mode,void * _unused)535 static int pm_notify(struct notifier_block *nb, unsigned long mode, void *_unused)
536 {
537 	if (mode == PM_HIBERNATION_PREPARE || mode == PM_RESTORE_PREPARE) {
538 		if (pci_registered) {
539 			pci_unregister_driver(&iosm_ipc_driver);
540 			pci_registered = false;
541 		}
542 	} else if (mode == PM_POST_HIBERNATION || mode == PM_POST_RESTORE) {
543 		if (!pci_registered) {
544 			int ret;
545 
546 			ret = pci_register_driver(&iosm_ipc_driver);
547 			if (ret) {
548 				pr_err(KBUILD_MODNAME ": unable to re-register PCI driver: %d\n",
549 				       ret);
550 			} else {
551 				pci_registered = true;
552 			}
553 		}
554 	}
555 
556 	return 0;
557 }
558 
559 static struct notifier_block pm_notifier = {
560 	.notifier_call = pm_notify,
561 };
562 
iosm_ipc_driver_init(void)563 static int __init iosm_ipc_driver_init(void)
564 {
565 	int ret;
566 
567 	ret = pci_register_driver(&iosm_ipc_driver);
568 	if (ret)
569 		return ret;
570 
571 	pci_registered = true;
572 
573 	register_pm_notifier(&pm_notifier);
574 
575 	return 0;
576 }
577 module_init(iosm_ipc_driver_init);
578 
iosm_ipc_driver_exit(void)579 static void __exit iosm_ipc_driver_exit(void)
580 {
581 	unregister_pm_notifier(&pm_notifier);
582 
583 	if (pci_registered)
584 		pci_unregister_driver(&iosm_ipc_driver);
585 }
586 module_exit(iosm_ipc_driver_exit);
587