1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2019 Intel Corporation 5 */ 6 7 #include "gem/i915_gem_pm.h" 8 #include "gt/intel_gt.h" 9 #include "gt/intel_gt_pm.h" 10 #include "gt/intel_gt_requests.h" 11 12 #include "i915_drv.h" 13 14 static int pm_notifier(struct notifier_block *nb, 15 unsigned long action, 16 void *data) 17 { 18 struct drm_i915_private *i915 = 19 container_of(nb, typeof(*i915), gem.pm_notifier); 20 21 switch (action) { 22 case INTEL_GT_UNPARK: 23 break; 24 25 case INTEL_GT_PARK: 26 i915_vma_parked(i915); 27 break; 28 } 29 30 return NOTIFY_OK; 31 } 32 33 static bool switch_to_kernel_context_sync(struct intel_gt *gt) 34 { 35 bool result = !intel_gt_is_wedged(gt); 36 37 if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) { 38 /* XXX hide warning from gem_eio */ 39 if (i915_modparams.reset) { 40 dev_err(gt->i915->drm.dev, 41 "Failed to idle engines, declaring wedged!\n"); 42 GEM_TRACE_DUMP(); 43 } 44 45 /* 46 * Forcibly cancel outstanding work and leave 47 * the gpu quiet. 48 */ 49 intel_gt_set_wedged(gt); 50 result = false; 51 } 52 53 if (intel_gt_pm_wait_for_idle(gt)) 54 result = false; 55 56 return result; 57 } 58 59 bool i915_gem_load_power_context(struct drm_i915_private *i915) 60 { 61 return switch_to_kernel_context_sync(&i915->gt); 62 } 63 64 static void user_forcewake(struct intel_gt *gt, bool suspend) 65 { 66 int count = atomic_read(>->user_wakeref); 67 68 /* Inside suspend/resume so single threaded, no races to worry about. */ 69 if (likely(!count)) 70 return; 71 72 intel_gt_pm_get(gt); 73 if (suspend) { 74 GEM_BUG_ON(count > atomic_read(>->wakeref.count)); 75 atomic_sub(count, >->wakeref.count); 76 } else { 77 atomic_add(count, >->wakeref.count); 78 } 79 intel_gt_pm_put(gt); 80 } 81 82 void i915_gem_suspend(struct drm_i915_private *i915) 83 { 84 GEM_TRACE("\n"); 85 86 intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0); 87 flush_workqueue(i915->wq); 88 89 user_forcewake(&i915->gt, true); 90 91 /* 92 * We have to flush all the executing contexts to main memory so 93 * that they can saved in the hibernation image. To ensure the last 94 * context image is coherent, we have to switch away from it. That 95 * leaves the i915->kernel_context still active when 96 * we actually suspend, and its image in memory may not match the GPU 97 * state. Fortunately, the kernel_context is disposable and we do 98 * not rely on its state. 99 */ 100 intel_gt_suspend(&i915->gt); 101 intel_uc_suspend(&i915->gt.uc); 102 103 cancel_delayed_work_sync(&i915->gt.hangcheck.work); 104 105 i915_gem_drain_freed_objects(i915); 106 } 107 108 static struct drm_i915_gem_object *first_mm_object(struct list_head *list) 109 { 110 return list_first_entry_or_null(list, 111 struct drm_i915_gem_object, 112 mm.link); 113 } 114 115 void i915_gem_suspend_late(struct drm_i915_private *i915) 116 { 117 struct drm_i915_gem_object *obj; 118 struct list_head *phases[] = { 119 &i915->mm.shrink_list, 120 &i915->mm.purge_list, 121 NULL 122 }, **phase; 123 unsigned long flags; 124 125 /* 126 * Neither the BIOS, ourselves or any other kernel 127 * expects the system to be in execlists mode on startup, 128 * so we need to reset the GPU back to legacy mode. And the only 129 * known way to disable logical contexts is through a GPU reset. 130 * 131 * So in order to leave the system in a known default configuration, 132 * always reset the GPU upon unload and suspend. Afterwards we then 133 * clean up the GEM state tracking, flushing off the requests and 134 * leaving the system in a known idle state. 135 * 136 * Note that is of the upmost importance that the GPU is idle and 137 * all stray writes are flushed *before* we dismantle the backing 138 * storage for the pinned objects. 139 * 140 * However, since we are uncertain that resetting the GPU on older 141 * machines is a good idea, we don't - just in case it leaves the 142 * machine in an unusable condition. 143 */ 144 145 spin_lock_irqsave(&i915->mm.obj_lock, flags); 146 for (phase = phases; *phase; phase++) { 147 LIST_HEAD(keep); 148 149 while ((obj = first_mm_object(*phase))) { 150 list_move_tail(&obj->mm.link, &keep); 151 152 /* Beware the background _i915_gem_free_objects */ 153 if (!kref_get_unless_zero(&obj->base.refcount)) 154 continue; 155 156 spin_unlock_irqrestore(&i915->mm.obj_lock, flags); 157 158 i915_gem_object_lock(obj); 159 WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false)); 160 i915_gem_object_unlock(obj); 161 i915_gem_object_put(obj); 162 163 spin_lock_irqsave(&i915->mm.obj_lock, flags); 164 } 165 166 list_splice_tail(&keep, *phase); 167 } 168 spin_unlock_irqrestore(&i915->mm.obj_lock, flags); 169 170 i915_gem_sanitize(i915); 171 } 172 173 void i915_gem_resume(struct drm_i915_private *i915) 174 { 175 GEM_TRACE("\n"); 176 177 intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL); 178 179 if (intel_gt_init_hw(&i915->gt)) 180 goto err_wedged; 181 182 /* 183 * As we didn't flush the kernel context before suspend, we cannot 184 * guarantee that the context image is complete. So let's just reset 185 * it and start again. 186 */ 187 if (intel_gt_resume(&i915->gt)) 188 goto err_wedged; 189 190 intel_uc_resume(&i915->gt.uc); 191 192 /* Always reload a context for powersaving. */ 193 if (!i915_gem_load_power_context(i915)) 194 goto err_wedged; 195 196 user_forcewake(&i915->gt, false); 197 198 out_unlock: 199 intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); 200 return; 201 202 err_wedged: 203 if (!intel_gt_is_wedged(&i915->gt)) { 204 dev_err(i915->drm.dev, 205 "Failed to re-initialize GPU, declaring it wedged!\n"); 206 intel_gt_set_wedged(&i915->gt); 207 } 208 goto out_unlock; 209 } 210 211 void i915_gem_init__pm(struct drm_i915_private *i915) 212 { 213 i915->gem.pm_notifier.notifier_call = pm_notifier; 214 blocking_notifier_chain_register(&i915->gt.pm_notifications, 215 &i915->gem.pm_notifier); 216 } 217