Lines Matching +full:pre +full:- +full:timeout
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020-2023 Intel Corporation
29 struct ivpu_fw_info *fw = vdev->fw; in ivpu_pm_prepare_cold_boot()
34 fw->entry_point = fw->cold_boot_entry_point; in ivpu_pm_prepare_cold_boot()
39 struct ivpu_fw_info *fw = vdev->fw; in ivpu_pm_prepare_warm_boot()
40 struct vpu_boot_params *bp = fw->mem->kvaddr; in ivpu_pm_prepare_warm_boot()
42 if (!bp->save_restore_ret_address) { in ivpu_pm_prepare_warm_boot()
47 ivpu_dbg(vdev, FW_BOOT, "Save/restore entry point %llx", bp->save_restore_ret_address); in ivpu_pm_prepare_warm_boot()
48 fw->entry_point = bp->save_restore_ret_address; in ivpu_pm_prepare_warm_boot()
101 struct ivpu_device *vdev = pm->vdev; in ivpu_pm_recovery_work()
106 ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev)); in ivpu_pm_recovery_work()
107 if (ret == -EAGAIN && !drm_dev_is_unplugged(&vdev->drm)) { in ivpu_pm_recovery_work()
112 if (ret && ret != -EAGAIN) in ivpu_pm_recovery_work()
115 kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt); in ivpu_pm_recovery_work()
120 struct ivpu_pm_info *pm = vdev->pm; in ivpu_pm_schedule_recovery()
133 if (atomic_cmpxchg(&pm->in_reset, 0, 1) == 0) { in ivpu_pm_schedule_recovery()
135 queue_work(system_long_wq, &pm->recovery_work); in ivpu_pm_schedule_recovery()
143 unsigned long timeout; in ivpu_pm_suspend_cb() local
147 timeout = jiffies + msecs_to_jiffies(vdev->timeout.tdr); in ivpu_pm_suspend_cb()
150 if (time_after_eq(jiffies, timeout)) { in ivpu_pm_suspend_cb()
152 return -EBUSY; in ivpu_pm_suspend_cb()
195 if (!ivpu_hw_is_idle(vdev) && vdev->pm->suspend_reschedule_counter) { in ivpu_pm_runtime_suspend_cb()
197 vdev->pm->suspend_reschedule_counter); in ivpu_pm_runtime_suspend_cb()
198 pm_schedule_suspend(dev, vdev->timeout.reschedule_suspend); in ivpu_pm_runtime_suspend_cb()
199 vdev->pm->suspend_reschedule_counter--; in ivpu_pm_runtime_suspend_cb()
200 return -EAGAIN; in ivpu_pm_runtime_suspend_cb()
207 if (!vdev->pm->suspend_reschedule_counter) { in ivpu_pm_runtime_suspend_cb()
214 vdev->pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT; in ivpu_pm_runtime_suspend_cb()
242 ret = pm_runtime_resume_and_get(vdev->drm.dev); in ivpu_rpm_get()
243 if (!drm_WARN_ON(&vdev->drm, ret < 0)) in ivpu_rpm_get()
244 vdev->pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT; in ivpu_rpm_get()
251 pm_runtime_mark_last_busy(vdev->drm.dev); in ivpu_rpm_put()
252 pm_runtime_put_autosuspend(vdev->drm.dev); in ivpu_rpm_put()
259 pm_runtime_get_sync(vdev->drm.dev); in ivpu_pm_reset_prepare_cb()
261 ivpu_dbg(vdev, PM, "Pre-reset..\n"); in ivpu_pm_reset_prepare_cb()
262 atomic_inc(&vdev->pm->reset_counter); in ivpu_pm_reset_prepare_cb()
263 atomic_set(&vdev->pm->in_reset, 1); in ivpu_pm_reset_prepare_cb()
268 ivpu_dbg(vdev, PM, "Pre-reset done.\n"); in ivpu_pm_reset_prepare_cb()
276 ivpu_dbg(vdev, PM, "Post-reset..\n"); in ivpu_pm_reset_done_cb()
280 atomic_set(&vdev->pm->in_reset, 0); in ivpu_pm_reset_done_cb()
281 ivpu_dbg(vdev, PM, "Post-reset done.\n"); in ivpu_pm_reset_done_cb()
283 pm_runtime_put_autosuspend(vdev->drm.dev); in ivpu_pm_reset_done_cb()
288 struct device *dev = vdev->drm.dev; in ivpu_pm_init()
289 struct ivpu_pm_info *pm = vdev->pm; in ivpu_pm_init()
291 pm->vdev = vdev; in ivpu_pm_init()
292 pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT; in ivpu_pm_init()
294 atomic_set(&pm->in_reset, 0); in ivpu_pm_init()
295 INIT_WORK(&pm->recovery_work, ivpu_pm_recovery_work); in ivpu_pm_init()
300 pm_runtime_set_autosuspend_delay(dev, -1); in ivpu_pm_init()
311 cancel_work_sync(&vdev->pm->recovery_work); in ivpu_pm_cancel_recovery()
316 struct device *dev = vdev->drm.dev; in ivpu_pm_enable()
326 pm_runtime_get_noresume(vdev->drm.dev); in ivpu_pm_disable()
327 pm_runtime_forbid(vdev->drm.dev); in ivpu_pm_disable()