1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2019 Intel Corporation
5  */
6 
7 #include "i915_drv.h"
8 
9 #include "intel_engine.h"
10 #include "intel_engine_pm.h"
11 #include "intel_gt.h"
12 #include "intel_gt_pm.h"
13 
14 static int __engine_unpark(struct intel_wakeref *wf)
15 {
16 	struct intel_engine_cs *engine =
17 		container_of(wf, typeof(*engine), wakeref);
18 	void *map;
19 
20 	GEM_TRACE("%s\n", engine->name);
21 
22 	intel_gt_pm_get(engine->gt);
23 
24 	/* Pin the default state for fast resets from atomic context. */
25 	map = NULL;
26 	if (engine->default_state)
27 		map = i915_gem_object_pin_map(engine->default_state,
28 					      I915_MAP_WB);
29 	if (!IS_ERR_OR_NULL(map))
30 		engine->pinned_default_state = map;
31 
32 	if (engine->unpark)
33 		engine->unpark(engine);
34 
35 	intel_engine_init_hangcheck(engine);
36 	return 0;
37 }
38 
39 void intel_engine_pm_get(struct intel_engine_cs *engine)
40 {
41 	intel_wakeref_get(&engine->i915->runtime_pm, &engine->wakeref, __engine_unpark);
42 }
43 
44 void intel_engine_park(struct intel_engine_cs *engine)
45 {
46 	/*
47 	 * We are committed now to parking this engine, make sure there
48 	 * will be no more interrupts arriving later and the engine
49 	 * is truly idle.
50 	 */
51 	if (wait_for(intel_engine_is_idle(engine), 10)) {
52 		struct drm_printer p = drm_debug_printer(__func__);
53 
54 		dev_err(engine->i915->drm.dev,
55 			"%s is not idle before parking\n",
56 			engine->name);
57 		intel_engine_dump(engine, &p, NULL);
58 	}
59 }
60 
61 static bool switch_to_kernel_context(struct intel_engine_cs *engine)
62 {
63 	struct i915_request *rq;
64 
65 	/* Already inside the kernel context, safe to power down. */
66 	if (engine->wakeref_serial == engine->serial)
67 		return true;
68 
69 	/* GPU is pointing to the void, as good as in the kernel context. */
70 	if (intel_gt_is_wedged(engine->gt))
71 		return true;
72 
73 	/*
74 	 * Note, we do this without taking the timeline->mutex. We cannot
75 	 * as we may be called while retiring the kernel context and so
76 	 * already underneath the timeline->mutex. Instead we rely on the
77 	 * exclusive property of the __engine_park that prevents anyone
78 	 * else from creating a request on this engine. This also requires
79 	 * that the ring is empty and we avoid any waits while constructing
80 	 * the context, as they assume protection by the timeline->mutex.
81 	 * This should hold true as we can only park the engine after
82 	 * retiring the last request, thus all rings should be empty and
83 	 * all timelines idle.
84 	 */
85 	rq = __i915_request_create(engine->kernel_context, GFP_NOWAIT);
86 	if (IS_ERR(rq))
87 		/* Context switch failed, hope for the best! Maybe reset? */
88 		return true;
89 
90 	/* Check again on the next retirement. */
91 	engine->wakeref_serial = engine->serial + 1;
92 
93 	i915_request_add_barriers(rq);
94 	__i915_request_commit(rq);
95 
96 	return false;
97 }
98 
99 static int __engine_park(struct intel_wakeref *wf)
100 {
101 	struct intel_engine_cs *engine =
102 		container_of(wf, typeof(*engine), wakeref);
103 
104 	engine->saturated = 0;
105 
106 	/*
107 	 * If one and only one request is completed between pm events,
108 	 * we know that we are inside the kernel context and it is
109 	 * safe to power down. (We are paranoid in case that runtime
110 	 * suspend causes corruption to the active context image, and
111 	 * want to avoid that impacting userspace.)
112 	 */
113 	if (!switch_to_kernel_context(engine))
114 		return -EBUSY;
115 
116 	GEM_TRACE("%s\n", engine->name);
117 
118 	intel_engine_disarm_breadcrumbs(engine);
119 
120 	/* Must be reset upon idling, or we may miss the busy wakeup. */
121 	GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
122 
123 	if (engine->park)
124 		engine->park(engine);
125 
126 	if (engine->pinned_default_state) {
127 		i915_gem_object_unpin_map(engine->default_state);
128 		engine->pinned_default_state = NULL;
129 	}
130 
131 	engine->execlists.no_priolist = false;
132 
133 	intel_gt_pm_put(engine->gt);
134 	return 0;
135 }
136 
137 void intel_engine_pm_put(struct intel_engine_cs *engine)
138 {
139 	intel_wakeref_put(&engine->i915->runtime_pm, &engine->wakeref, __engine_park);
140 }
141 
142 void intel_engine_init__pm(struct intel_engine_cs *engine)
143 {
144 	intel_wakeref_init(&engine->wakeref);
145 }
146