Lines Matching +full:deep +full:- +full:sleep
1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (c) 2021-2022, Intel Corporation.
23 #include <linux/dma-mapping.h>
85 if (ret == -ETIMEDOUT) in t7xx_wait_pm_config()
86 dev_err(&t7xx_dev->pdev->dev, "PM configuration timed out\n"); in t7xx_wait_pm_config()
93 struct pci_dev *pdev = t7xx_dev->pdev; in t7xx_pci_pm_init()
95 INIT_LIST_HEAD(&t7xx_dev->md_pm_entities); in t7xx_pci_pm_init()
96 mutex_init(&t7xx_dev->md_pm_entity_mtx); in t7xx_pci_pm_init()
97 spin_lock_init(&t7xx_dev->md_pm_lock); in t7xx_pci_pm_init()
98 init_completion(&t7xx_dev->sleep_lock_acquire); in t7xx_pci_pm_init()
99 init_completion(&t7xx_dev->pm_sr_ack); in t7xx_pci_pm_init()
100 init_completion(&t7xx_dev->init_done); in t7xx_pci_pm_init()
101 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT); in t7xx_pci_pm_init()
103 device_init_wakeup(&pdev->dev, true); in t7xx_pci_pm_init()
104 dev_pm_set_driver_flags(&pdev->dev, pdev->dev.power.driver_flags | in t7xx_pci_pm_init()
108 pm_runtime_set_autosuspend_delay(&pdev->dev, PM_AUTOSUSPEND_MS); in t7xx_pci_pm_init()
109 pm_runtime_use_autosuspend(&pdev->dev); in t7xx_pci_pm_init()
116 /* Enable the PCIe resource lock only after MD deep sleep is done */ in t7xx_pci_pm_init_late()
124 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED); in t7xx_pci_pm_init_late()
126 pm_runtime_mark_last_busy(&t7xx_dev->pdev->dev); in t7xx_pci_pm_init_late()
127 pm_runtime_allow(&t7xx_dev->pdev->dev); in t7xx_pci_pm_init_late()
128 pm_runtime_put_noidle(&t7xx_dev->pdev->dev); in t7xx_pci_pm_init_late()
129 complete_all(&t7xx_dev->init_done); in t7xx_pci_pm_init_late()
134 /* The device is kept in FSM re-init flow in t7xx_pci_pm_reinit()
137 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT); in t7xx_pci_pm_reinit()
139 pm_runtime_get_noresume(&t7xx_dev->pdev->dev); in t7xx_pci_pm_reinit()
149 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_EXCEPTION); in t7xx_pci_pm_exp_detected()
156 mutex_lock(&t7xx_dev->md_pm_entity_mtx); in t7xx_pci_pm_entity_register()
157 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { in t7xx_pci_pm_entity_register()
158 if (entity->id == pm_entity->id) { in t7xx_pci_pm_entity_register()
159 mutex_unlock(&t7xx_dev->md_pm_entity_mtx); in t7xx_pci_pm_entity_register()
160 return -EEXIST; in t7xx_pci_pm_entity_register()
164 list_add_tail(&pm_entity->entity, &t7xx_dev->md_pm_entities); in t7xx_pci_pm_entity_register()
165 mutex_unlock(&t7xx_dev->md_pm_entity_mtx); in t7xx_pci_pm_entity_register()
173 mutex_lock(&t7xx_dev->md_pm_entity_mtx); in t7xx_pci_pm_entity_unregister()
174 list_for_each_entry_safe(entity, tmp_entity, &t7xx_dev->md_pm_entities, entity) { in t7xx_pci_pm_entity_unregister()
175 if (entity->id == pm_entity->id) { in t7xx_pci_pm_entity_unregister()
176 list_del(&pm_entity->entity); in t7xx_pci_pm_entity_unregister()
177 mutex_unlock(&t7xx_dev->md_pm_entity_mtx); in t7xx_pci_pm_entity_unregister()
182 mutex_unlock(&t7xx_dev->md_pm_entity_mtx); in t7xx_pci_pm_entity_unregister()
184 return -ENXIO; in t7xx_pci_pm_entity_unregister()
189 struct device *dev = &t7xx_dev->pdev->dev; in t7xx_pci_sleep_disable_complete()
192 ret = wait_for_completion_timeout(&t7xx_dev->sleep_lock_acquire, in t7xx_pci_sleep_disable_complete()
201 * t7xx_pci_disable_sleep() - Disable deep sleep capability.
204 * Lock the deep sleep capability, note that the device can still go into deep sleep
205 * state while device is in D0 state, from the host's point-of-view.
207 * If device is in deep sleep state, wake up the device and disable deep sleep capability.
213 spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags); in t7xx_pci_disable_sleep()
214 t7xx_dev->sleep_disable_count++; in t7xx_pci_disable_sleep()
215 if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED) in t7xx_pci_disable_sleep()
218 if (t7xx_dev->sleep_disable_count == 1) { in t7xx_pci_disable_sleep()
221 reinit_completion(&t7xx_dev->sleep_lock_acquire); in t7xx_pci_disable_sleep()
230 spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags); in t7xx_pci_disable_sleep()
234 spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags); in t7xx_pci_disable_sleep()
235 complete_all(&t7xx_dev->sleep_lock_acquire); in t7xx_pci_disable_sleep()
239 * t7xx_pci_enable_sleep() - Enable deep sleep capability.
242 * After enabling deep sleep, device can enter into deep sleep state.
248 spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags); in t7xx_pci_enable_sleep()
249 t7xx_dev->sleep_disable_count--; in t7xx_pci_enable_sleep()
250 if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED) in t7xx_pci_enable_sleep()
253 if (t7xx_dev->sleep_disable_count == 0) in t7xx_pci_enable_sleep()
257 spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags); in t7xx_pci_enable_sleep()
264 reinit_completion(&t7xx_dev->pm_sr_ack); in t7xx_send_pm_request()
266 wait_ret = wait_for_completion_timeout(&t7xx_dev->pm_sr_ack, in t7xx_send_pm_request()
269 return -ETIMEDOUT; in t7xx_send_pm_request()
282 if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) { in __t7xx_pci_pm_suspend()
283 dev_err(&pdev->dev, "[PM] Exiting suspend, modem in invalid state\n"); in __t7xx_pci_pm_suspend()
284 return -EFAULT; in __t7xx_pci_pm_suspend()
294 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED); in __t7xx_pci_pm_suspend()
296 t7xx_dev->rgu_pci_irq_en = false; in __t7xx_pci_pm_suspend()
298 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { in __t7xx_pci_pm_suspend()
299 if (!entity->suspend) in __t7xx_pci_pm_suspend()
302 ret = entity->suspend(t7xx_dev, entity->entity_param); in __t7xx_pci_pm_suspend()
304 entity_id = entity->id; in __t7xx_pci_pm_suspend()
305 dev_err(&pdev->dev, "[PM] Suspend error: %d, id: %d\n", ret, entity_id); in __t7xx_pci_pm_suspend()
312 dev_err(&pdev->dev, "[PM] MD suspend error: %d\n", ret); in __t7xx_pci_pm_suspend()
319 dev_err(&pdev->dev, "[PM] SAP suspend error: %d\n", ret); in __t7xx_pci_pm_suspend()
323 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { in __t7xx_pci_pm_suspend()
324 if (entity->suspend_late) in __t7xx_pci_pm_suspend()
325 entity->suspend_late(t7xx_dev, entity->entity_param); in __t7xx_pci_pm_suspend()
332 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { in __t7xx_pci_pm_suspend()
333 if (entity_id == entity->id) in __t7xx_pci_pm_suspend()
336 if (entity->resume) in __t7xx_pci_pm_suspend()
337 entity->resume(t7xx_dev, entity->entity_param); in __t7xx_pci_pm_suspend()
341 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED); in __t7xx_pci_pm_suspend()
354 * following function will re-enable PCIe interrupts. in t7xx_pcie_interrupt_reinit()
364 ret = pcim_enable_device(t7xx_dev->pdev); in t7xx_pcie_reinit()
381 struct t7xx_fsm_ctl *fsm_ctl = t7xx_dev->md->fsm_ctl; in t7xx_send_fsm_command()
382 struct device *dev = &t7xx_dev->pdev->dev; in t7xx_send_fsm_command()
383 int ret = -EINVAL; in t7xx_send_fsm_command()
393 t7xx_dev->rgu_pci_irq_en = true; in t7xx_send_fsm_command()
416 if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) { in __t7xx_pci_pm_resume()
454 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED); in __t7xx_pci_pm_resume()
455 t7xx_dev->rgu_pci_irq_en = true; in __t7xx_pci_pm_resume()
480 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED); in __t7xx_pci_pm_resume()
488 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { in __t7xx_pci_pm_resume()
489 if (entity->resume_early) in __t7xx_pci_pm_resume()
490 entity->resume_early(t7xx_dev, entity->entity_param); in __t7xx_pci_pm_resume()
495 dev_err(&pdev->dev, "[PM] MD resume error: %d\n", ret); in __t7xx_pci_pm_resume()
499 dev_err(&pdev->dev, "[PM] SAP resume error: %d\n", ret); in __t7xx_pci_pm_resume()
501 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { in __t7xx_pci_pm_resume()
502 if (entity->resume) { in __t7xx_pci_pm_resume()
503 ret = entity->resume(t7xx_dev, entity->entity_param); in __t7xx_pci_pm_resume()
505 dev_err(&pdev->dev, "[PM] Resume entry ID: %d error: %d\n", in __t7xx_pci_pm_resume()
506 entity->id, ret); in __t7xx_pci_pm_resume()
510 t7xx_dev->rgu_pci_irq_en = true; in __t7xx_pci_pm_resume()
513 pm_runtime_mark_last_busy(&pdev->dev); in __t7xx_pci_pm_resume()
514 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED); in __t7xx_pci_pm_resume()
541 if (!wait_for_completion_timeout(&t7xx_dev->init_done, T7XX_INIT_TIMEOUT * HZ)) { in t7xx_pci_pm_prepare()
542 dev_warn(dev, "Not ready for system sleep.\n"); in t7xx_pci_pm_prepare()
543 return -ETIMEDOUT; in t7xx_pci_pm_prepare()
599 if (!t7xx_dev->intr_handler[i]) in t7xx_request_irq()
602 irq_descr = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_%d", in t7xx_request_irq()
603 dev_driver_string(&pdev->dev), i); in t7xx_request_irq()
605 ret = -ENOMEM; in t7xx_request_irq()
610 ret = request_threaded_irq(irq_vec, t7xx_dev->intr_handler[i], in t7xx_request_irq()
611 t7xx_dev->intr_thread[i], 0, irq_descr, in t7xx_request_irq()
612 t7xx_dev->callback_param[i]); in t7xx_request_irq()
614 dev_err(&pdev->dev, "Failed to request IRQ: %d\n", ret); in t7xx_request_irq()
620 while (i--) { in t7xx_request_irq()
621 if (!t7xx_dev->intr_handler[i]) in t7xx_request_irq()
624 free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]); in t7xx_request_irq()
633 struct pci_dev *pdev = t7xx_dev->pdev; in t7xx_setup_msix()
636 /* Only using 6 interrupts, but HW-design requires power-of-2 IRQs allocation */ in t7xx_setup_msix()
639 dev_err(&pdev->dev, "Failed to allocate MSI-X entry: %d\n", ret); in t7xx_setup_msix()
657 if (!t7xx_dev->pdev->msix_cap) in t7xx_interrupt_init()
658 return -EINVAL; in t7xx_interrupt_init()
673 t7xx_dev->base_addr.infracfg_ao_base = t7xx_dev->base_addr.pcie_ext_reg_base + in t7xx_pci_infracfg_ao_calc()
674 INFRACFG_AO_DEV_CHIP - in t7xx_pci_infracfg_ao_calc()
675 t7xx_dev->base_addr.pcie_dev_reg_trsl_addr; in t7xx_pci_infracfg_ao_calc()
683 t7xx_dev = devm_kzalloc(&pdev->dev, sizeof(*t7xx_dev), GFP_KERNEL); in t7xx_pci_probe()
685 return -ENOMEM; in t7xx_pci_probe()
688 t7xx_dev->pdev = pdev; in t7xx_pci_probe()
699 dev_err(&pdev->dev, "Could not request BARs: %d\n", ret); in t7xx_pci_probe()
700 return -ENOMEM; in t7xx_pci_probe()
703 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); in t7xx_pci_probe()
705 dev_err(&pdev->dev, "Could not set PCI DMA mask: %d\n", ret); in t7xx_pci_probe()
709 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); in t7xx_pci_probe()
711 dev_err(&pdev->dev, "Could not set consistent PCI DMA mask: %d\n", ret); in t7xx_pci_probe()
716 t7xx_dev->base_addr.pcie_ext_reg_base = pcim_iomap_table(pdev)[T7XX_PCI_EREG_BASE]; in t7xx_pci_probe()
753 if (!t7xx_dev->intr_handler[i]) in t7xx_pci_remove()
756 free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]); in t7xx_pci_remove()
759 pci_free_irq_vectors(t7xx_dev->pdev); in t7xx_pci_remove()