1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2019 Intel Corporation 5 */ 6 7 #include "gem/i915_gem_pm.h" 8 #include "gt/intel_gt.h" 9 #include "gt/intel_gt_pm.h" 10 #include "gt/intel_gt_requests.h" 11 12 #include "i915_drv.h" 13 14 static bool switch_to_kernel_context_sync(struct intel_gt *gt) 15 { 16 bool result = !intel_gt_is_wedged(gt); 17 18 if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) { 19 /* XXX hide warning from gem_eio */ 20 if (i915_modparams.reset) { 21 dev_err(gt->i915->drm.dev, 22 "Failed to idle engines, declaring wedged!\n"); 23 GEM_TRACE_DUMP(); 24 } 25 26 /* 27 * Forcibly cancel outstanding work and leave 28 * the gpu quiet. 29 */ 30 intel_gt_set_wedged(gt); 31 result = false; 32 } 33 34 if (intel_gt_pm_wait_for_idle(gt)) 35 result = false; 36 37 return result; 38 } 39 40 static void user_forcewake(struct intel_gt *gt, bool suspend) 41 { 42 int count = atomic_read(>->user_wakeref); 43 44 /* Inside suspend/resume so single threaded, no races to worry about. */ 45 if (likely(!count)) 46 return; 47 48 intel_gt_pm_get(gt); 49 if (suspend) { 50 GEM_BUG_ON(count > atomic_read(>->wakeref.count)); 51 atomic_sub(count, >->wakeref.count); 52 } else { 53 atomic_add(count, >->wakeref.count); 54 } 55 intel_gt_pm_put(gt); 56 } 57 58 void i915_gem_suspend(struct drm_i915_private *i915) 59 { 60 GEM_TRACE("\n"); 61 62 intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0); 63 flush_workqueue(i915->wq); 64 65 user_forcewake(&i915->gt, true); 66 67 /* 68 * We have to flush all the executing contexts to main memory so 69 * that they can saved in the hibernation image. To ensure the last 70 * context image is coherent, we have to switch away from it. That 71 * leaves the i915->kernel_context still active when 72 * we actually suspend, and its image in memory may not match the GPU 73 * state. Fortunately, the kernel_context is disposable and we do 74 * not rely on its state. 75 */ 76 intel_gt_suspend(&i915->gt); 77 intel_uc_suspend(&i915->gt.uc); 78 79 i915_gem_drain_freed_objects(i915); 80 } 81 82 static struct drm_i915_gem_object *first_mm_object(struct list_head *list) 83 { 84 return list_first_entry_or_null(list, 85 struct drm_i915_gem_object, 86 mm.link); 87 } 88 89 void i915_gem_suspend_late(struct drm_i915_private *i915) 90 { 91 struct drm_i915_gem_object *obj; 92 struct list_head *phases[] = { 93 &i915->mm.shrink_list, 94 &i915->mm.purge_list, 95 NULL 96 }, **phase; 97 unsigned long flags; 98 99 /* 100 * Neither the BIOS, ourselves or any other kernel 101 * expects the system to be in execlists mode on startup, 102 * so we need to reset the GPU back to legacy mode. And the only 103 * known way to disable logical contexts is through a GPU reset. 104 * 105 * So in order to leave the system in a known default configuration, 106 * always reset the GPU upon unload and suspend. Afterwards we then 107 * clean up the GEM state tracking, flushing off the requests and 108 * leaving the system in a known idle state. 109 * 110 * Note that is of the upmost importance that the GPU is idle and 111 * all stray writes are flushed *before* we dismantle the backing 112 * storage for the pinned objects. 113 * 114 * However, since we are uncertain that resetting the GPU on older 115 * machines is a good idea, we don't - just in case it leaves the 116 * machine in an unusable condition. 117 */ 118 119 spin_lock_irqsave(&i915->mm.obj_lock, flags); 120 for (phase = phases; *phase; phase++) { 121 LIST_HEAD(keep); 122 123 while ((obj = first_mm_object(*phase))) { 124 list_move_tail(&obj->mm.link, &keep); 125 126 /* Beware the background _i915_gem_free_objects */ 127 if (!kref_get_unless_zero(&obj->base.refcount)) 128 continue; 129 130 spin_unlock_irqrestore(&i915->mm.obj_lock, flags); 131 132 i915_gem_object_lock(obj); 133 WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false)); 134 i915_gem_object_unlock(obj); 135 i915_gem_object_put(obj); 136 137 spin_lock_irqsave(&i915->mm.obj_lock, flags); 138 } 139 140 list_splice_tail(&keep, *phase); 141 } 142 spin_unlock_irqrestore(&i915->mm.obj_lock, flags); 143 144 i915_gem_sanitize(i915); 145 } 146 147 void i915_gem_resume(struct drm_i915_private *i915) 148 { 149 GEM_TRACE("\n"); 150 151 intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL); 152 153 if (intel_gt_init_hw(&i915->gt)) 154 goto err_wedged; 155 156 /* 157 * As we didn't flush the kernel context before suspend, we cannot 158 * guarantee that the context image is complete. So let's just reset 159 * it and start again. 160 */ 161 if (intel_gt_resume(&i915->gt)) 162 goto err_wedged; 163 164 intel_uc_resume(&i915->gt.uc); 165 166 /* Always reload a context for powersaving. */ 167 if (!switch_to_kernel_context_sync(&i915->gt)) 168 goto err_wedged; 169 170 user_forcewake(&i915->gt, false); 171 172 out_unlock: 173 intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); 174 return; 175 176 err_wedged: 177 if (!intel_gt_is_wedged(&i915->gt)) { 178 dev_err(i915->drm.dev, 179 "Failed to re-initialize GPU, declaring it wedged!\n"); 180 intel_gt_set_wedged(&i915->gt); 181 } 182 goto out_unlock; 183 } 184