1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "hsw_ips.h"
7 #include "i915_drv.h"
8 #include "i915_reg.h"
9 #include "intel_de.h"
10 #include "intel_display_types.h"
11 #include "intel_pcode.h"
12 
13 static void hsw_ips_enable(const struct intel_crtc_state *crtc_state)
14 {
15 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
16 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
17 
18 	if (!crtc_state->ips_enabled)
19 		return;
20 
21 	/*
22 	 * We can only enable IPS after we enable a plane and wait for a vblank
23 	 * This function is called from post_plane_update, which is run after
24 	 * a vblank wait.
25 	 */
26 	drm_WARN_ON(&i915->drm,
27 		    !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
28 
29 	if (IS_BROADWELL(i915)) {
30 		drm_WARN_ON(&i915->drm,
31 			    snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL,
32 					    IPS_ENABLE | IPS_PCODE_CONTROL));
33 		/*
34 		 * Quoting Art Runyan: "its not safe to expect any particular
35 		 * value in IPS_CTL bit 31 after enabling IPS through the
36 		 * mailbox." Moreover, the mailbox may return a bogus state,
37 		 * so we need to just enable it and continue on.
38 		 */
39 	} else {
40 		intel_de_write(i915, IPS_CTL, IPS_ENABLE);
41 		/*
42 		 * The bit only becomes 1 in the next vblank, so this wait here
43 		 * is essentially intel_wait_for_vblank. If we don't have this
44 		 * and don't wait for vblanks until the end of crtc_enable, then
45 		 * the HW state readout code will complain that the expected
46 		 * IPS_CTL value is not the one we read.
47 		 */
48 		if (intel_de_wait_for_set(i915, IPS_CTL, IPS_ENABLE, 50))
49 			drm_err(&i915->drm,
50 				"Timed out waiting for IPS enable\n");
51 	}
52 }
53 
54 bool hsw_ips_disable(const struct intel_crtc_state *crtc_state)
55 {
56 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
57 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
58 	bool need_vblank_wait = false;
59 
60 	if (!crtc_state->ips_enabled)
61 		return need_vblank_wait;
62 
63 	if (IS_BROADWELL(i915)) {
64 		drm_WARN_ON(&i915->drm,
65 			    snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL, 0));
66 		/*
67 		 * Wait for PCODE to finish disabling IPS. The BSpec specified
68 		 * 42ms timeout value leads to occasional timeouts so use 100ms
69 		 * instead.
70 		 */
71 		if (intel_de_wait_for_clear(i915, IPS_CTL, IPS_ENABLE, 100))
72 			drm_err(&i915->drm,
73 				"Timed out waiting for IPS disable\n");
74 	} else {
75 		intel_de_write(i915, IPS_CTL, 0);
76 		intel_de_posting_read(i915, IPS_CTL);
77 	}
78 
79 	/* We need to wait for a vblank before we can disable the plane. */
80 	need_vblank_wait = true;
81 
82 	return need_vblank_wait;
83 }
84 
85 static bool hsw_ips_need_disable(struct intel_atomic_state *state,
86 				 struct intel_crtc *crtc)
87 {
88 	struct drm_i915_private *i915 = to_i915(state->base.dev);
89 	const struct intel_crtc_state *old_crtc_state =
90 		intel_atomic_get_old_crtc_state(state, crtc);
91 	const struct intel_crtc_state *new_crtc_state =
92 		intel_atomic_get_new_crtc_state(state, crtc);
93 
94 	if (!old_crtc_state->ips_enabled)
95 		return false;
96 
97 	if (intel_crtc_needs_modeset(new_crtc_state))
98 		return true;
99 
100 	/*
101 	 * Workaround : Do not read or write the pipe palette/gamma data while
102 	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
103 	 *
104 	 * Disable IPS before we program the LUT.
105 	 */
106 	if (IS_HASWELL(i915) &&
107 	    intel_crtc_needs_color_update(new_crtc_state) &&
108 	    new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
109 		return true;
110 
111 	return !new_crtc_state->ips_enabled;
112 }
113 
114 bool hsw_ips_pre_update(struct intel_atomic_state *state,
115 			struct intel_crtc *crtc)
116 {
117 	const struct intel_crtc_state *old_crtc_state =
118 		intel_atomic_get_old_crtc_state(state, crtc);
119 
120 	if (!hsw_ips_need_disable(state, crtc))
121 		return false;
122 
123 	return hsw_ips_disable(old_crtc_state);
124 }
125 
126 static bool hsw_ips_need_enable(struct intel_atomic_state *state,
127 				struct intel_crtc *crtc)
128 {
129 	struct drm_i915_private *i915 = to_i915(state->base.dev);
130 	const struct intel_crtc_state *old_crtc_state =
131 		intel_atomic_get_old_crtc_state(state, crtc);
132 	const struct intel_crtc_state *new_crtc_state =
133 		intel_atomic_get_new_crtc_state(state, crtc);
134 
135 	if (!new_crtc_state->ips_enabled)
136 		return false;
137 
138 	if (intel_crtc_needs_modeset(new_crtc_state))
139 		return true;
140 
141 	/*
142 	 * Workaround : Do not read or write the pipe palette/gamma data while
143 	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
144 	 *
145 	 * Re-enable IPS after the LUT has been programmed.
146 	 */
147 	if (IS_HASWELL(i915) &&
148 	    intel_crtc_needs_color_update(new_crtc_state) &&
149 	    new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
150 		return true;
151 
152 	/*
153 	 * We can't read out IPS on broadwell, assume the worst and
154 	 * forcibly enable IPS on the first fastset.
155 	 */
156 	if (intel_crtc_needs_fastset(new_crtc_state) && old_crtc_state->inherited)
157 		return true;
158 
159 	return !old_crtc_state->ips_enabled;
160 }
161 
162 void hsw_ips_post_update(struct intel_atomic_state *state,
163 			 struct intel_crtc *crtc)
164 {
165 	const struct intel_crtc_state *new_crtc_state =
166 		intel_atomic_get_new_crtc_state(state, crtc);
167 
168 	if (!hsw_ips_need_enable(state, crtc))
169 		return;
170 
171 	hsw_ips_enable(new_crtc_state);
172 }
173 
174 /* IPS only exists on ULT machines and is tied to pipe A. */
175 bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
176 {
177 	return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
178 }
179 
180 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
181 {
182 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
183 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
184 
185 	/* IPS only exists on ULT machines and is tied to pipe A. */
186 	if (!hsw_crtc_supports_ips(crtc))
187 		return false;
188 
189 	if (!i915->params.enable_ips)
190 		return false;
191 
192 	if (crtc_state->pipe_bpp > 24)
193 		return false;
194 
195 	/*
196 	 * We compare against max which means we must take
197 	 * the increased cdclk requirement into account when
198 	 * calculating the new cdclk.
199 	 *
200 	 * Should measure whether using a lower cdclk w/o IPS
201 	 */
202 	if (IS_BROADWELL(i915) &&
203 	    crtc_state->pixel_rate > i915->display.cdclk.max_cdclk_freq * 95 / 100)
204 		return false;
205 
206 	return true;
207 }
208 
209 int hsw_ips_compute_config(struct intel_atomic_state *state,
210 			   struct intel_crtc *crtc)
211 {
212 	struct drm_i915_private *i915 = to_i915(state->base.dev);
213 	struct intel_crtc_state *crtc_state =
214 		intel_atomic_get_new_crtc_state(state, crtc);
215 
216 	crtc_state->ips_enabled = false;
217 
218 	if (!hsw_crtc_state_ips_capable(crtc_state))
219 		return 0;
220 
221 	/*
222 	 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
223 	 * enabled and disabled dynamically based on package C states,
224 	 * user space can't make reliable use of the CRCs, so let's just
225 	 * completely disable it.
226 	 */
227 	if (crtc_state->crc_enabled)
228 		return 0;
229 
230 	/* IPS should be fine as long as at least one plane is enabled. */
231 	if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
232 		return 0;
233 
234 	if (IS_BROADWELL(i915)) {
235 		const struct intel_cdclk_state *cdclk_state;
236 
237 		cdclk_state = intel_atomic_get_cdclk_state(state);
238 		if (IS_ERR(cdclk_state))
239 			return PTR_ERR(cdclk_state);
240 
241 		/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
242 		if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
243 			return 0;
244 	}
245 
246 	crtc_state->ips_enabled = true;
247 
248 	return 0;
249 }
250 
251 void hsw_ips_get_config(struct intel_crtc_state *crtc_state)
252 {
253 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
254 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
255 
256 	if (!hsw_crtc_supports_ips(crtc))
257 		return;
258 
259 	if (IS_HASWELL(i915)) {
260 		crtc_state->ips_enabled = intel_de_read(i915, IPS_CTL) & IPS_ENABLE;
261 	} else {
262 		/*
263 		 * We cannot readout IPS state on broadwell, set to
264 		 * true so we can set it to a defined state on first
265 		 * commit.
266 		 */
267 		crtc_state->ips_enabled = true;
268 	}
269 }
270 
271 static int hsw_ips_debugfs_status_show(struct seq_file *m, void *unused)
272 {
273 	struct drm_i915_private *i915 = m->private;
274 	intel_wakeref_t wakeref;
275 
276 	if (!HAS_IPS(i915))
277 		return -ENODEV;
278 
279 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
280 
281 	seq_printf(m, "Enabled by kernel parameter: %s\n",
282 		   str_yes_no(i915->params.enable_ips));
283 
284 	if (DISPLAY_VER(i915) >= 8) {
285 		seq_puts(m, "Currently: unknown\n");
286 	} else {
287 		if (intel_de_read(i915, IPS_CTL) & IPS_ENABLE)
288 			seq_puts(m, "Currently: enabled\n");
289 		else
290 			seq_puts(m, "Currently: disabled\n");
291 	}
292 
293 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
294 
295 	return 0;
296 }
297 
298 DEFINE_SHOW_ATTRIBUTE(hsw_ips_debugfs_status);
299 
300 void hsw_ips_debugfs_register(struct drm_i915_private *i915)
301 {
302 	struct drm_minor *minor = i915->drm.primary;
303 
304 	debugfs_create_file("i915_ips_status", 0444, minor->debugfs_root,
305 			    i915, &hsw_ips_debugfs_status_fops);
306 }
307