1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 /**
25  * DOC: atomic modeset support
26  *
27  * The functions here implement the state management and hardware programming
28  * dispatch required by the atomic modeset infrastructure.
29  * See intel_atomic_plane.c for the plane-specific atomic functionality.
30  */
31 
32 #include <drm/drm_atomic.h>
33 #include <drm/drm_atomic_helper.h>
34 #include <drm/drm_fourcc.h>
35 #include <drm/drm_plane_helper.h>
36 
37 #include "intel_atomic.h"
38 #include "intel_cdclk.h"
39 #include "intel_display_types.h"
40 #include "intel_global_state.h"
41 #include "intel_hdcp.h"
42 #include "intel_psr.h"
43 #include "intel_sprite.h"
44 
45 /**
46  * intel_digital_connector_atomic_get_property - hook for connector->atomic_get_property.
47  * @connector: Connector to get the property for.
48  * @state: Connector state to retrieve the property from.
49  * @property: Property to retrieve.
50  * @val: Return value for the property.
51  *
52  * Returns the atomic property value for a digital connector.
53  */
54 int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
55 						const struct drm_connector_state *state,
56 						struct drm_property *property,
57 						u64 *val)
58 {
59 	struct drm_device *dev = connector->dev;
60 	struct drm_i915_private *dev_priv = to_i915(dev);
61 	struct intel_digital_connector_state *intel_conn_state =
62 		to_intel_digital_connector_state(state);
63 
64 	if (property == dev_priv->force_audio_property)
65 		*val = intel_conn_state->force_audio;
66 	else if (property == dev_priv->broadcast_rgb_property)
67 		*val = intel_conn_state->broadcast_rgb;
68 	else {
69 		drm_dbg_atomic(&dev_priv->drm,
70 			       "Unknown property [PROP:%d:%s]\n",
71 			       property->base.id, property->name);
72 		return -EINVAL;
73 	}
74 
75 	return 0;
76 }
77 
78 /**
79  * intel_digital_connector_atomic_set_property - hook for connector->atomic_set_property.
80  * @connector: Connector to set the property for.
81  * @state: Connector state to set the property on.
82  * @property: Property to set.
83  * @val: New value for the property.
84  *
85  * Sets the atomic property value for a digital connector.
86  */
87 int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
88 						struct drm_connector_state *state,
89 						struct drm_property *property,
90 						u64 val)
91 {
92 	struct drm_device *dev = connector->dev;
93 	struct drm_i915_private *dev_priv = to_i915(dev);
94 	struct intel_digital_connector_state *intel_conn_state =
95 		to_intel_digital_connector_state(state);
96 
97 	if (property == dev_priv->force_audio_property) {
98 		intel_conn_state->force_audio = val;
99 		return 0;
100 	}
101 
102 	if (property == dev_priv->broadcast_rgb_property) {
103 		intel_conn_state->broadcast_rgb = val;
104 		return 0;
105 	}
106 
107 	drm_dbg_atomic(&dev_priv->drm, "Unknown property [PROP:%d:%s]\n",
108 		       property->base.id, property->name);
109 	return -EINVAL;
110 }
111 
112 static bool blob_equal(const struct drm_property_blob *a,
113 		       const struct drm_property_blob *b)
114 {
115 	if (a && b)
116 		return a->length == b->length &&
117 			!memcmp(a->data, b->data, a->length);
118 
119 	return !a == !b;
120 }
121 
122 int intel_digital_connector_atomic_check(struct drm_connector *conn,
123 					 struct drm_atomic_state *state)
124 {
125 	struct drm_connector_state *new_state =
126 		drm_atomic_get_new_connector_state(state, conn);
127 	struct intel_digital_connector_state *new_conn_state =
128 		to_intel_digital_connector_state(new_state);
129 	struct drm_connector_state *old_state =
130 		drm_atomic_get_old_connector_state(state, conn);
131 	struct intel_digital_connector_state *old_conn_state =
132 		to_intel_digital_connector_state(old_state);
133 	struct drm_crtc_state *crtc_state;
134 
135 	intel_hdcp_atomic_check(conn, old_state, new_state);
136 	intel_psr_atomic_check(conn, old_state, new_state);
137 
138 	if (!new_state->crtc)
139 		return 0;
140 
141 	crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
142 
143 	/*
144 	 * These properties are handled by fastset, and might not end
145 	 * up in a modeset.
146 	 */
147 	if (new_conn_state->force_audio != old_conn_state->force_audio ||
148 	    new_conn_state->broadcast_rgb != old_conn_state->broadcast_rgb ||
149 	    new_conn_state->base.colorspace != old_conn_state->base.colorspace ||
150 	    new_conn_state->base.picture_aspect_ratio != old_conn_state->base.picture_aspect_ratio ||
151 	    new_conn_state->base.content_type != old_conn_state->base.content_type ||
152 	    new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode ||
153 	    !blob_equal(new_conn_state->base.hdr_output_metadata,
154 			old_conn_state->base.hdr_output_metadata))
155 		crtc_state->mode_changed = true;
156 
157 	return 0;
158 }
159 
160 /**
161  * intel_digital_connector_duplicate_state - duplicate connector state
162  * @connector: digital connector
163  *
164  * Allocates and returns a copy of the connector state (both common and
165  * digital connector specific) for the specified connector.
166  *
167  * Returns: The newly allocated connector state, or NULL on failure.
168  */
169 struct drm_connector_state *
170 intel_digital_connector_duplicate_state(struct drm_connector *connector)
171 {
172 	struct intel_digital_connector_state *state;
173 
174 	state = kmemdup(connector->state, sizeof(*state), GFP_KERNEL);
175 	if (!state)
176 		return NULL;
177 
178 	__drm_atomic_helper_connector_duplicate_state(connector, &state->base);
179 	return &state->base;
180 }
181 
182 /**
183  * intel_connector_needs_modeset - check if connector needs a modeset
184  * @state: the atomic state corresponding to this modeset
185  * @connector: the connector
186  */
187 bool
188 intel_connector_needs_modeset(struct intel_atomic_state *state,
189 			      struct drm_connector *connector)
190 {
191 	const struct drm_connector_state *old_conn_state, *new_conn_state;
192 
193 	old_conn_state = drm_atomic_get_old_connector_state(&state->base, connector);
194 	new_conn_state = drm_atomic_get_new_connector_state(&state->base, connector);
195 
196 	return old_conn_state->crtc != new_conn_state->crtc ||
197 	       (new_conn_state->crtc &&
198 		drm_atomic_crtc_needs_modeset(drm_atomic_get_new_crtc_state(&state->base,
199 									    new_conn_state->crtc)));
200 }
201 
202 struct intel_digital_connector_state *
203 intel_atomic_get_digital_connector_state(struct intel_atomic_state *state,
204 					 struct intel_connector *connector)
205 {
206 	struct drm_connector_state *conn_state;
207 
208 	conn_state = drm_atomic_get_connector_state(&state->base,
209 						    &connector->base);
210 	if (IS_ERR(conn_state))
211 		return ERR_CAST(conn_state);
212 
213 	return to_intel_digital_connector_state(conn_state);
214 }
215 
216 /**
217  * intel_crtc_duplicate_state - duplicate crtc state
218  * @crtc: drm crtc
219  *
220  * Allocates and returns a copy of the crtc state (both common and
221  * Intel-specific) for the specified crtc.
222  *
223  * Returns: The newly allocated crtc state, or NULL on failure.
224  */
225 struct drm_crtc_state *
226 intel_crtc_duplicate_state(struct drm_crtc *crtc)
227 {
228 	const struct intel_crtc_state *old_crtc_state = to_intel_crtc_state(crtc->state);
229 	struct intel_crtc_state *crtc_state;
230 
231 	crtc_state = kmemdup(old_crtc_state, sizeof(*crtc_state), GFP_KERNEL);
232 	if (!crtc_state)
233 		return NULL;
234 
235 	__drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->uapi);
236 
237 	/* copy color blobs */
238 	if (crtc_state->hw.degamma_lut)
239 		drm_property_blob_get(crtc_state->hw.degamma_lut);
240 	if (crtc_state->hw.ctm)
241 		drm_property_blob_get(crtc_state->hw.ctm);
242 	if (crtc_state->hw.gamma_lut)
243 		drm_property_blob_get(crtc_state->hw.gamma_lut);
244 
245 	crtc_state->update_pipe = false;
246 	crtc_state->disable_lp_wm = false;
247 	crtc_state->disable_cxsr = false;
248 	crtc_state->update_wm_pre = false;
249 	crtc_state->update_wm_post = false;
250 	crtc_state->fifo_changed = false;
251 	crtc_state->preload_luts = false;
252 	crtc_state->wm.need_postvbl_update = false;
253 	crtc_state->fb_bits = 0;
254 	crtc_state->update_planes = 0;
255 
256 	return &crtc_state->uapi;
257 }
258 
259 static void intel_crtc_put_color_blobs(struct intel_crtc_state *crtc_state)
260 {
261 	drm_property_blob_put(crtc_state->hw.degamma_lut);
262 	drm_property_blob_put(crtc_state->hw.gamma_lut);
263 	drm_property_blob_put(crtc_state->hw.ctm);
264 }
265 
266 void intel_crtc_free_hw_state(struct intel_crtc_state *crtc_state)
267 {
268 	intel_crtc_put_color_blobs(crtc_state);
269 }
270 
271 void intel_crtc_copy_color_blobs(struct intel_crtc_state *crtc_state)
272 {
273 	drm_property_replace_blob(&crtc_state->hw.degamma_lut,
274 				  crtc_state->uapi.degamma_lut);
275 	drm_property_replace_blob(&crtc_state->hw.gamma_lut,
276 				  crtc_state->uapi.gamma_lut);
277 	drm_property_replace_blob(&crtc_state->hw.ctm,
278 				  crtc_state->uapi.ctm);
279 }
280 
281 /**
282  * intel_crtc_destroy_state - destroy crtc state
283  * @crtc: drm crtc
284  * @state: the state to destroy
285  *
286  * Destroys the crtc state (both common and Intel-specific) for the
287  * specified crtc.
288  */
289 void
290 intel_crtc_destroy_state(struct drm_crtc *crtc,
291 			 struct drm_crtc_state *state)
292 {
293 	struct intel_crtc_state *crtc_state = to_intel_crtc_state(state);
294 
295 	__drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
296 	intel_crtc_free_hw_state(crtc_state);
297 	kfree(crtc_state);
298 }
299 
300 static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state,
301 				      int num_scalers_need, struct intel_crtc *intel_crtc,
302 				      const char *name, int idx,
303 				      struct intel_plane_state *plane_state,
304 				      int *scaler_id)
305 {
306 	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
307 	int j;
308 	u32 mode;
309 
310 	if (*scaler_id < 0) {
311 		/* find a free scaler */
312 		for (j = 0; j < intel_crtc->num_scalers; j++) {
313 			if (scaler_state->scalers[j].in_use)
314 				continue;
315 
316 			*scaler_id = j;
317 			scaler_state->scalers[*scaler_id].in_use = 1;
318 			break;
319 		}
320 	}
321 
322 	if (drm_WARN(&dev_priv->drm, *scaler_id < 0,
323 		     "Cannot find scaler for %s:%d\n", name, idx))
324 		return;
325 
326 	/* set scaler mode */
327 	if (plane_state && plane_state->hw.fb &&
328 	    plane_state->hw.fb->format->is_yuv &&
329 	    plane_state->hw.fb->format->num_planes > 1) {
330 		struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
331 		if (IS_GEN(dev_priv, 9) &&
332 		    !IS_GEMINILAKE(dev_priv)) {
333 			mode = SKL_PS_SCALER_MODE_NV12;
334 		} else if (icl_is_hdr_plane(dev_priv, plane->id)) {
335 			/*
336 			 * On gen11+'s HDR planes we only use the scaler for
337 			 * scaling. They have a dedicated chroma upsampler, so
338 			 * we don't need the scaler to upsample the UV plane.
339 			 */
340 			mode = PS_SCALER_MODE_NORMAL;
341 		} else {
342 			struct intel_plane *linked =
343 				plane_state->planar_linked_plane;
344 
345 			mode = PS_SCALER_MODE_PLANAR;
346 
347 			if (linked)
348 				mode |= PS_PLANE_Y_SEL(linked->id);
349 		}
350 	} else if (INTEL_GEN(dev_priv) > 9 || IS_GEMINILAKE(dev_priv)) {
351 		mode = PS_SCALER_MODE_NORMAL;
352 	} else if (num_scalers_need == 1 && intel_crtc->num_scalers > 1) {
353 		/*
354 		 * when only 1 scaler is in use on a pipe with 2 scalers
355 		 * scaler 0 operates in high quality (HQ) mode.
356 		 * In this case use scaler 0 to take advantage of HQ mode
357 		 */
358 		scaler_state->scalers[*scaler_id].in_use = 0;
359 		*scaler_id = 0;
360 		scaler_state->scalers[0].in_use = 1;
361 		mode = SKL_PS_SCALER_MODE_HQ;
362 	} else {
363 		mode = SKL_PS_SCALER_MODE_DYN;
364 	}
365 
366 	drm_dbg_kms(&dev_priv->drm, "Attached scaler id %u.%u to %s:%d\n",
367 		    intel_crtc->pipe, *scaler_id, name, idx);
368 	scaler_state->scalers[*scaler_id].mode = mode;
369 }
370 
371 /**
372  * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
373  * @dev_priv: i915 device
374  * @intel_crtc: intel crtc
375  * @crtc_state: incoming crtc_state to validate and setup scalers
376  *
377  * This function sets up scalers based on staged scaling requests for
378  * a @crtc and its planes. It is called from crtc level check path. If request
379  * is a supportable request, it attaches scalers to requested planes and crtc.
380  *
381  * This function takes into account the current scaler(s) in use by any planes
382  * not being part of this atomic state
383  *
384  *  Returns:
385  *         0 - scalers were setup succesfully
386  *         error code - otherwise
387  */
388 int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
389 			       struct intel_crtc *intel_crtc,
390 			       struct intel_crtc_state *crtc_state)
391 {
392 	struct drm_plane *plane = NULL;
393 	struct intel_plane *intel_plane;
394 	struct intel_plane_state *plane_state = NULL;
395 	struct intel_crtc_scaler_state *scaler_state =
396 		&crtc_state->scaler_state;
397 	struct drm_atomic_state *drm_state = crtc_state->uapi.state;
398 	struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state);
399 	int num_scalers_need;
400 	int i;
401 
402 	num_scalers_need = hweight32(scaler_state->scaler_users);
403 
404 	/*
405 	 * High level flow:
406 	 * - staged scaler requests are already in scaler_state->scaler_users
407 	 * - check whether staged scaling requests can be supported
408 	 * - add planes using scalers that aren't in current transaction
409 	 * - assign scalers to requested users
410 	 * - as part of plane commit, scalers will be committed
411 	 *   (i.e., either attached or detached) to respective planes in hw
412 	 * - as part of crtc_commit, scaler will be either attached or detached
413 	 *   to crtc in hw
414 	 */
415 
416 	/* fail if required scalers > available scalers */
417 	if (num_scalers_need > intel_crtc->num_scalers){
418 		drm_dbg_kms(&dev_priv->drm,
419 			    "Too many scaling requests %d > %d\n",
420 			    num_scalers_need, intel_crtc->num_scalers);
421 		return -EINVAL;
422 	}
423 
424 	/* walkthrough scaler_users bits and start assigning scalers */
425 	for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
426 		int *scaler_id;
427 		const char *name;
428 		int idx;
429 
430 		/* skip if scaler not required */
431 		if (!(scaler_state->scaler_users & (1 << i)))
432 			continue;
433 
434 		if (i == SKL_CRTC_INDEX) {
435 			name = "CRTC";
436 			idx = intel_crtc->base.base.id;
437 
438 			/* panel fitter case: assign as a crtc scaler */
439 			scaler_id = &scaler_state->scaler_id;
440 		} else {
441 			name = "PLANE";
442 
443 			/* plane scaler case: assign as a plane scaler */
444 			/* find the plane that set the bit as scaler_user */
445 			plane = drm_state->planes[i].ptr;
446 
447 			/*
448 			 * to enable/disable hq mode, add planes that are using scaler
449 			 * into this transaction
450 			 */
451 			if (!plane) {
452 				struct drm_plane_state *state;
453 
454 				/*
455 				 * GLK+ scalers don't have a HQ mode so it
456 				 * isn't necessary to change between HQ and dyn mode
457 				 * on those platforms.
458 				 */
459 				if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
460 					continue;
461 
462 				plane = drm_plane_from_index(&dev_priv->drm, i);
463 				state = drm_atomic_get_plane_state(drm_state, plane);
464 				if (IS_ERR(state)) {
465 					drm_dbg_kms(&dev_priv->drm,
466 						    "Failed to add [PLANE:%d] to drm_state\n",
467 						    plane->base.id);
468 					return PTR_ERR(state);
469 				}
470 			}
471 
472 			intel_plane = to_intel_plane(plane);
473 			idx = plane->base.id;
474 
475 			/* plane on different crtc cannot be a scaler user of this crtc */
476 			if (drm_WARN_ON(&dev_priv->drm,
477 					intel_plane->pipe != intel_crtc->pipe))
478 				continue;
479 
480 			plane_state = intel_atomic_get_new_plane_state(intel_state,
481 								       intel_plane);
482 			scaler_id = &plane_state->scaler_id;
483 		}
484 
485 		intel_atomic_setup_scaler(scaler_state, num_scalers_need,
486 					  intel_crtc, name, idx,
487 					  plane_state, scaler_id);
488 	}
489 
490 	return 0;
491 }
492 
493 struct drm_atomic_state *
494 intel_atomic_state_alloc(struct drm_device *dev)
495 {
496 	struct intel_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
497 
498 	if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
499 		kfree(state);
500 		return NULL;
501 	}
502 
503 	return &state->base;
504 }
505 
506 void intel_atomic_state_free(struct drm_atomic_state *_state)
507 {
508 	struct intel_atomic_state *state = to_intel_atomic_state(_state);
509 
510 	drm_atomic_state_default_release(&state->base);
511 	kfree(state->global_objs);
512 
513 	i915_sw_fence_fini(&state->commit_ready);
514 
515 	kfree(state);
516 }
517 
518 void intel_atomic_state_clear(struct drm_atomic_state *s)
519 {
520 	struct intel_atomic_state *state = to_intel_atomic_state(s);
521 
522 	drm_atomic_state_default_clear(&state->base);
523 	intel_atomic_clear_global_state(state);
524 
525 	state->dpll_set = state->modeset = false;
526 	state->global_state_changed = false;
527 	state->active_pipes = 0;
528 }
529 
530 struct intel_crtc_state *
531 intel_atomic_get_crtc_state(struct drm_atomic_state *state,
532 			    struct intel_crtc *crtc)
533 {
534 	struct drm_crtc_state *crtc_state;
535 	crtc_state = drm_atomic_get_crtc_state(state, &crtc->base);
536 	if (IS_ERR(crtc_state))
537 		return ERR_CAST(crtc_state);
538 
539 	return to_intel_crtc_state(crtc_state);
540 }
541 
542 int _intel_atomic_lock_global_state(struct intel_atomic_state *state)
543 {
544 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
545 	struct intel_crtc *crtc;
546 
547 	state->global_state_changed = true;
548 
549 	for_each_intel_crtc(&dev_priv->drm, crtc) {
550 		int ret;
551 
552 		ret = drm_modeset_lock(&crtc->base.mutex,
553 				       state->base.acquire_ctx);
554 		if (ret)
555 			return ret;
556 	}
557 
558 	return 0;
559 }
560 
561 int _intel_atomic_serialize_global_state(struct intel_atomic_state *state)
562 {
563 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
564 	struct intel_crtc *crtc;
565 
566 	state->global_state_changed = true;
567 
568 	for_each_intel_crtc(&dev_priv->drm, crtc) {
569 		struct intel_crtc_state *crtc_state;
570 
571 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
572 		if (IS_ERR(crtc_state))
573 			return PTR_ERR(crtc_state);
574 	}
575 
576 	return 0;
577 }
578