1 /*
2  * Copyright (C) 2014 Red Hat
3  * Copyright (C) 2014 Intel Corp.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  * Rob Clark <robdclark@gmail.com>
25  * Daniel Vetter <daniel.vetter@ffwll.ch>
26  */
27 
28 #include <linux/dma-fence.h>
29 #include <linux/ktime.h>
30 
31 #include <drm/drm_atomic.h>
32 #include <drm/drm_atomic_helper.h>
33 #include <drm/drm_atomic_uapi.h>
34 #include <drm/drm_bridge.h>
35 #include <drm/drm_damage_helper.h>
36 #include <drm/drm_device.h>
37 #include <drm/drm_drv.h>
38 #include <drm/drm_plane_helper.h>
39 #include <drm/drm_print.h>
40 #include <drm/drm_self_refresh_helper.h>
41 #include <drm/drm_vblank.h>
42 #include <drm/drm_writeback.h>
43 
44 #include "drm_crtc_helper_internal.h"
45 #include "drm_crtc_internal.h"
46 
47 /**
48  * DOC: overview
49  *
50  * This helper library provides implementations of check and commit functions on
51  * top of the CRTC modeset helper callbacks and the plane helper callbacks. It
52  * also provides convenience implementations for the atomic state handling
53  * callbacks for drivers which don't need to subclass the drm core structures to
54  * add their own additional internal state.
55  *
56  * This library also provides default implementations for the check callback in
57  * drm_atomic_helper_check() and for the commit callback with
58  * drm_atomic_helper_commit(). But the individual stages and callbacks are
59  * exposed to allow drivers to mix and match and e.g. use the plane helpers only
60  * together with a driver private modeset implementation.
61  *
62  * This library also provides implementations for all the legacy driver
63  * interfaces on top of the atomic interface. See drm_atomic_helper_set_config(),
64  * drm_atomic_helper_disable_plane(), and the various functions to implement
65  * set_property callbacks. New drivers must not implement these functions
66  * themselves but must use the provided helpers.
67  *
68  * The atomic helper uses the same function table structures as all other
69  * modesetting helpers. See the documentation for &struct drm_crtc_helper_funcs,
70  * struct &drm_encoder_helper_funcs and &struct drm_connector_helper_funcs. It
71  * also shares the &struct drm_plane_helper_funcs function table with the plane
72  * helpers.
73  */
74 static void
75 drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
76 				struct drm_plane_state *old_plane_state,
77 				struct drm_plane_state *plane_state,
78 				struct drm_plane *plane)
79 {
80 	struct drm_crtc_state *crtc_state;
81 
82 	if (old_plane_state->crtc) {
83 		crtc_state = drm_atomic_get_new_crtc_state(state,
84 							   old_plane_state->crtc);
85 
86 		if (WARN_ON(!crtc_state))
87 			return;
88 
89 		crtc_state->planes_changed = true;
90 	}
91 
92 	if (plane_state->crtc) {
93 		crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc);
94 
95 		if (WARN_ON(!crtc_state))
96 			return;
97 
98 		crtc_state->planes_changed = true;
99 	}
100 }
101 
102 static int handle_conflicting_encoders(struct drm_atomic_state *state,
103 				       bool disable_conflicting_encoders)
104 {
105 	struct drm_connector_state *new_conn_state;
106 	struct drm_connector *connector;
107 	struct drm_connector_list_iter conn_iter;
108 	struct drm_encoder *encoder;
109 	unsigned encoder_mask = 0;
110 	int i, ret = 0;
111 
112 	/*
113 	 * First loop, find all newly assigned encoders from the connectors
114 	 * part of the state. If the same encoder is assigned to multiple
115 	 * connectors bail out.
116 	 */
117 	for_each_new_connector_in_state(state, connector, new_conn_state, i) {
118 		const struct drm_connector_helper_funcs *funcs = connector->helper_private;
119 		struct drm_encoder *new_encoder;
120 
121 		if (!new_conn_state->crtc)
122 			continue;
123 
124 		if (funcs->atomic_best_encoder)
125 			new_encoder = funcs->atomic_best_encoder(connector,
126 								 state);
127 		else if (funcs->best_encoder)
128 			new_encoder = funcs->best_encoder(connector);
129 		else
130 			new_encoder = drm_connector_get_single_encoder(connector);
131 
132 		if (new_encoder) {
133 			if (encoder_mask & drm_encoder_mask(new_encoder)) {
134 				DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n",
135 					new_encoder->base.id, new_encoder->name,
136 					connector->base.id, connector->name);
137 
138 				return -EINVAL;
139 			}
140 
141 			encoder_mask |= drm_encoder_mask(new_encoder);
142 		}
143 	}
144 
145 	if (!encoder_mask)
146 		return 0;
147 
148 	/*
149 	 * Second loop, iterate over all connectors not part of the state.
150 	 *
151 	 * If a conflicting encoder is found and disable_conflicting_encoders
152 	 * is not set, an error is returned. Userspace can provide a solution
153 	 * through the atomic ioctl.
154 	 *
155 	 * If the flag is set conflicting connectors are removed from the CRTC
156 	 * and the CRTC is disabled if no encoder is left. This preserves
157 	 * compatibility with the legacy set_config behavior.
158 	 */
159 	drm_connector_list_iter_begin(state->dev, &conn_iter);
160 	drm_for_each_connector_iter(connector, &conn_iter) {
161 		struct drm_crtc_state *crtc_state;
162 
163 		if (drm_atomic_get_new_connector_state(state, connector))
164 			continue;
165 
166 		encoder = connector->state->best_encoder;
167 		if (!encoder || !(encoder_mask & drm_encoder_mask(encoder)))
168 			continue;
169 
170 		if (!disable_conflicting_encoders) {
171 			DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s] by [CONNECTOR:%d:%s]\n",
172 					 encoder->base.id, encoder->name,
173 					 connector->state->crtc->base.id,
174 					 connector->state->crtc->name,
175 					 connector->base.id, connector->name);
176 			ret = -EINVAL;
177 			goto out;
178 		}
179 
180 		new_conn_state = drm_atomic_get_connector_state(state, connector);
181 		if (IS_ERR(new_conn_state)) {
182 			ret = PTR_ERR(new_conn_state);
183 			goto out;
184 		}
185 
186 		DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], disabling [CONNECTOR:%d:%s]\n",
187 				 encoder->base.id, encoder->name,
188 				 new_conn_state->crtc->base.id, new_conn_state->crtc->name,
189 				 connector->base.id, connector->name);
190 
191 		crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
192 
193 		ret = drm_atomic_set_crtc_for_connector(new_conn_state, NULL);
194 		if (ret)
195 			goto out;
196 
197 		if (!crtc_state->connector_mask) {
198 			ret = drm_atomic_set_mode_prop_for_crtc(crtc_state,
199 								NULL);
200 			if (ret < 0)
201 				goto out;
202 
203 			crtc_state->active = false;
204 		}
205 	}
206 out:
207 	drm_connector_list_iter_end(&conn_iter);
208 
209 	return ret;
210 }
211 
212 static void
213 set_best_encoder(struct drm_atomic_state *state,
214 		 struct drm_connector_state *conn_state,
215 		 struct drm_encoder *encoder)
216 {
217 	struct drm_crtc_state *crtc_state;
218 	struct drm_crtc *crtc;
219 
220 	if (conn_state->best_encoder) {
221 		/* Unset the encoder_mask in the old crtc state. */
222 		crtc = conn_state->connector->state->crtc;
223 
224 		/* A NULL crtc is an error here because we should have
225 		 * duplicated a NULL best_encoder when crtc was NULL.
226 		 * As an exception restoring duplicated atomic state
227 		 * during resume is allowed, so don't warn when
228 		 * best_encoder is equal to encoder we intend to set.
229 		 */
230 		WARN_ON(!crtc && encoder != conn_state->best_encoder);
231 		if (crtc) {
232 			crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
233 
234 			crtc_state->encoder_mask &=
235 				~drm_encoder_mask(conn_state->best_encoder);
236 		}
237 	}
238 
239 	if (encoder) {
240 		crtc = conn_state->crtc;
241 		WARN_ON(!crtc);
242 		if (crtc) {
243 			crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
244 
245 			crtc_state->encoder_mask |=
246 				drm_encoder_mask(encoder);
247 		}
248 	}
249 
250 	conn_state->best_encoder = encoder;
251 }
252 
253 static void
254 steal_encoder(struct drm_atomic_state *state,
255 	      struct drm_encoder *encoder)
256 {
257 	struct drm_crtc_state *crtc_state;
258 	struct drm_connector *connector;
259 	struct drm_connector_state *old_connector_state, *new_connector_state;
260 	int i;
261 
262 	for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
263 		struct drm_crtc *encoder_crtc;
264 
265 		if (new_connector_state->best_encoder != encoder)
266 			continue;
267 
268 		encoder_crtc = old_connector_state->crtc;
269 
270 		DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n",
271 				 encoder->base.id, encoder->name,
272 				 encoder_crtc->base.id, encoder_crtc->name);
273 
274 		set_best_encoder(state, new_connector_state, NULL);
275 
276 		crtc_state = drm_atomic_get_new_crtc_state(state, encoder_crtc);
277 		crtc_state->connectors_changed = true;
278 
279 		return;
280 	}
281 }
282 
283 static int
284 update_connector_routing(struct drm_atomic_state *state,
285 			 struct drm_connector *connector,
286 			 struct drm_connector_state *old_connector_state,
287 			 struct drm_connector_state *new_connector_state)
288 {
289 	const struct drm_connector_helper_funcs *funcs;
290 	struct drm_encoder *new_encoder;
291 	struct drm_crtc_state *crtc_state;
292 
293 	DRM_DEBUG_ATOMIC("Updating routing for [CONNECTOR:%d:%s]\n",
294 			 connector->base.id,
295 			 connector->name);
296 
297 	if (old_connector_state->crtc != new_connector_state->crtc) {
298 		if (old_connector_state->crtc) {
299 			crtc_state = drm_atomic_get_new_crtc_state(state, old_connector_state->crtc);
300 			crtc_state->connectors_changed = true;
301 		}
302 
303 		if (new_connector_state->crtc) {
304 			crtc_state = drm_atomic_get_new_crtc_state(state, new_connector_state->crtc);
305 			crtc_state->connectors_changed = true;
306 		}
307 	}
308 
309 	if (!new_connector_state->crtc) {
310 		DRM_DEBUG_ATOMIC("Disabling [CONNECTOR:%d:%s]\n",
311 				connector->base.id,
312 				connector->name);
313 
314 		set_best_encoder(state, new_connector_state, NULL);
315 
316 		return 0;
317 	}
318 
319 	crtc_state = drm_atomic_get_new_crtc_state(state,
320 						   new_connector_state->crtc);
321 	/*
322 	 * For compatibility with legacy users, we want to make sure that
323 	 * we allow DPMS On->Off modesets on unregistered connectors. Modesets
324 	 * which would result in anything else must be considered invalid, to
325 	 * avoid turning on new displays on dead connectors.
326 	 *
327 	 * Since the connector can be unregistered at any point during an
328 	 * atomic check or commit, this is racy. But that's OK: all we care
329 	 * about is ensuring that userspace can't do anything but shut off the
330 	 * display on a connector that was destroyed after it's been notified,
331 	 * not before.
332 	 *
333 	 * Additionally, we also want to ignore connector registration when
334 	 * we're trying to restore an atomic state during system resume since
335 	 * there's a chance the connector may have been destroyed during the
336 	 * process, but it's better to ignore that then cause
337 	 * drm_atomic_helper_resume() to fail.
338 	 */
339 	if (!state->duplicated && drm_connector_is_unregistered(connector) &&
340 	    crtc_state->active) {
341 		DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] is not registered\n",
342 				 connector->base.id, connector->name);
343 		return -EINVAL;
344 	}
345 
346 	funcs = connector->helper_private;
347 
348 	if (funcs->atomic_best_encoder)
349 		new_encoder = funcs->atomic_best_encoder(connector, state);
350 	else if (funcs->best_encoder)
351 		new_encoder = funcs->best_encoder(connector);
352 	else
353 		new_encoder = drm_connector_get_single_encoder(connector);
354 
355 	if (!new_encoder) {
356 		DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
357 				 connector->base.id,
358 				 connector->name);
359 		return -EINVAL;
360 	}
361 
362 	if (!drm_encoder_crtc_ok(new_encoder, new_connector_state->crtc)) {
363 		DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] incompatible with [CRTC:%d:%s]\n",
364 				 new_encoder->base.id,
365 				 new_encoder->name,
366 				 new_connector_state->crtc->base.id,
367 				 new_connector_state->crtc->name);
368 		return -EINVAL;
369 	}
370 
371 	if (new_encoder == new_connector_state->best_encoder) {
372 		set_best_encoder(state, new_connector_state, new_encoder);
373 
374 		DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n",
375 				 connector->base.id,
376 				 connector->name,
377 				 new_encoder->base.id,
378 				 new_encoder->name,
379 				 new_connector_state->crtc->base.id,
380 				 new_connector_state->crtc->name);
381 
382 		return 0;
383 	}
384 
385 	steal_encoder(state, new_encoder);
386 
387 	set_best_encoder(state, new_connector_state, new_encoder);
388 
389 	crtc_state->connectors_changed = true;
390 
391 	DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n",
392 			 connector->base.id,
393 			 connector->name,
394 			 new_encoder->base.id,
395 			 new_encoder->name,
396 			 new_connector_state->crtc->base.id,
397 			 new_connector_state->crtc->name);
398 
399 	return 0;
400 }
401 
402 static int
403 mode_fixup(struct drm_atomic_state *state)
404 {
405 	struct drm_crtc *crtc;
406 	struct drm_crtc_state *new_crtc_state;
407 	struct drm_connector *connector;
408 	struct drm_connector_state *new_conn_state;
409 	int i;
410 	int ret;
411 
412 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
413 		if (!new_crtc_state->mode_changed &&
414 		    !new_crtc_state->connectors_changed)
415 			continue;
416 
417 		drm_mode_copy(&new_crtc_state->adjusted_mode, &new_crtc_state->mode);
418 	}
419 
420 	for_each_new_connector_in_state(state, connector, new_conn_state, i) {
421 		const struct drm_encoder_helper_funcs *funcs;
422 		struct drm_encoder *encoder;
423 		struct drm_bridge *bridge;
424 
425 		WARN_ON(!!new_conn_state->best_encoder != !!new_conn_state->crtc);
426 
427 		if (!new_conn_state->crtc || !new_conn_state->best_encoder)
428 			continue;
429 
430 		new_crtc_state =
431 			drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
432 
433 		/*
434 		 * Each encoder has at most one connector (since we always steal
435 		 * it away), so we won't call ->mode_fixup twice.
436 		 */
437 		encoder = new_conn_state->best_encoder;
438 		funcs = encoder->helper_private;
439 
440 		bridge = drm_bridge_chain_get_first_bridge(encoder);
441 		ret = drm_atomic_bridge_chain_check(bridge,
442 						    new_crtc_state,
443 						    new_conn_state);
444 		if (ret) {
445 			DRM_DEBUG_ATOMIC("Bridge atomic check failed\n");
446 			return ret;
447 		}
448 
449 		if (funcs && funcs->atomic_check) {
450 			ret = funcs->atomic_check(encoder, new_crtc_state,
451 						  new_conn_state);
452 			if (ret) {
453 				DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] check failed\n",
454 						 encoder->base.id, encoder->name);
455 				return ret;
456 			}
457 		} else if (funcs && funcs->mode_fixup) {
458 			ret = funcs->mode_fixup(encoder, &new_crtc_state->mode,
459 						&new_crtc_state->adjusted_mode);
460 			if (!ret) {
461 				DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] fixup failed\n",
462 						 encoder->base.id, encoder->name);
463 				return -EINVAL;
464 			}
465 		}
466 	}
467 
468 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
469 		const struct drm_crtc_helper_funcs *funcs;
470 
471 		if (!new_crtc_state->enable)
472 			continue;
473 
474 		if (!new_crtc_state->mode_changed &&
475 		    !new_crtc_state->connectors_changed)
476 			continue;
477 
478 		funcs = crtc->helper_private;
479 		if (!funcs || !funcs->mode_fixup)
480 			continue;
481 
482 		ret = funcs->mode_fixup(crtc, &new_crtc_state->mode,
483 					&new_crtc_state->adjusted_mode);
484 		if (!ret) {
485 			DRM_DEBUG_ATOMIC("[CRTC:%d:%s] fixup failed\n",
486 					 crtc->base.id, crtc->name);
487 			return -EINVAL;
488 		}
489 	}
490 
491 	return 0;
492 }
493 
494 static enum drm_mode_status mode_valid_path(struct drm_connector *connector,
495 					    struct drm_encoder *encoder,
496 					    struct drm_crtc *crtc,
497 					    const struct drm_display_mode *mode)
498 {
499 	struct drm_bridge *bridge;
500 	enum drm_mode_status ret;
501 
502 	ret = drm_encoder_mode_valid(encoder, mode);
503 	if (ret != MODE_OK) {
504 		DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] mode_valid() failed\n",
505 				encoder->base.id, encoder->name);
506 		return ret;
507 	}
508 
509 	bridge = drm_bridge_chain_get_first_bridge(encoder);
510 	ret = drm_bridge_chain_mode_valid(bridge, &connector->display_info,
511 					  mode);
512 	if (ret != MODE_OK) {
513 		DRM_DEBUG_ATOMIC("[BRIDGE] mode_valid() failed\n");
514 		return ret;
515 	}
516 
517 	ret = drm_crtc_mode_valid(crtc, mode);
518 	if (ret != MODE_OK) {
519 		DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode_valid() failed\n",
520 				crtc->base.id, crtc->name);
521 		return ret;
522 	}
523 
524 	return ret;
525 }
526 
527 static int
528 mode_valid(struct drm_atomic_state *state)
529 {
530 	struct drm_connector_state *conn_state;
531 	struct drm_connector *connector;
532 	int i;
533 
534 	for_each_new_connector_in_state(state, connector, conn_state, i) {
535 		struct drm_encoder *encoder = conn_state->best_encoder;
536 		struct drm_crtc *crtc = conn_state->crtc;
537 		struct drm_crtc_state *crtc_state;
538 		enum drm_mode_status mode_status;
539 		const struct drm_display_mode *mode;
540 
541 		if (!crtc || !encoder)
542 			continue;
543 
544 		crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
545 		if (!crtc_state)
546 			continue;
547 		if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
548 			continue;
549 
550 		mode = &crtc_state->mode;
551 
552 		mode_status = mode_valid_path(connector, encoder, crtc, mode);
553 		if (mode_status != MODE_OK)
554 			return -EINVAL;
555 	}
556 
557 	return 0;
558 }
559 
560 /**
561  * drm_atomic_helper_check_modeset - validate state object for modeset changes
562  * @dev: DRM device
563  * @state: the driver state object
564  *
565  * Check the state object to see if the requested state is physically possible.
566  * This does all the CRTC and connector related computations for an atomic
567  * update and adds any additional connectors needed for full modesets. It calls
568  * the various per-object callbacks in the follow order:
569  *
570  * 1. &drm_connector_helper_funcs.atomic_best_encoder for determining the new encoder.
571  * 2. &drm_connector_helper_funcs.atomic_check to validate the connector state.
572  * 3. If it's determined a modeset is needed then all connectors on the affected
573  *    CRTC are added and &drm_connector_helper_funcs.atomic_check is run on them.
574  * 4. &drm_encoder_helper_funcs.mode_valid, &drm_bridge_funcs.mode_valid and
575  *    &drm_crtc_helper_funcs.mode_valid are called on the affected components.
576  * 5. &drm_bridge_funcs.mode_fixup is called on all encoder bridges.
577  * 6. &drm_encoder_helper_funcs.atomic_check is called to validate any encoder state.
578  *    This function is only called when the encoder will be part of a configured CRTC,
579  *    it must not be used for implementing connector property validation.
580  *    If this function is NULL, &drm_atomic_encoder_helper_funcs.mode_fixup is called
581  *    instead.
582  * 7. &drm_crtc_helper_funcs.mode_fixup is called last, to fix up the mode with CRTC constraints.
583  *
584  * &drm_crtc_state.mode_changed is set when the input mode is changed.
585  * &drm_crtc_state.connectors_changed is set when a connector is added or
586  * removed from the CRTC.  &drm_crtc_state.active_changed is set when
587  * &drm_crtc_state.active changes, which is used for DPMS.
588  * &drm_crtc_state.no_vblank is set from the result of drm_dev_has_vblank().
589  * See also: drm_atomic_crtc_needs_modeset()
590  *
591  * IMPORTANT:
592  *
593  * Drivers which set &drm_crtc_state.mode_changed (e.g. in their
594  * &drm_plane_helper_funcs.atomic_check hooks if a plane update can't be done
595  * without a full modeset) _must_ call this function after that change. It is
596  * permitted to call this function multiple times for the same update, e.g.
597  * when the &drm_crtc_helper_funcs.atomic_check functions depend upon the
598  * adjusted dotclock for fifo space allocation and watermark computation.
599  *
600  * RETURNS:
601  * Zero for success or -errno
602  */
603 int
604 drm_atomic_helper_check_modeset(struct drm_device *dev,
605 				struct drm_atomic_state *state)
606 {
607 	struct drm_crtc *crtc;
608 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
609 	struct drm_connector *connector;
610 	struct drm_connector_state *old_connector_state, *new_connector_state;
611 	int i, ret;
612 	unsigned connectors_mask = 0;
613 
614 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
615 		bool has_connectors =
616 			!!new_crtc_state->connector_mask;
617 
618 		WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
619 
620 		if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) {
621 			DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n",
622 					 crtc->base.id, crtc->name);
623 			new_crtc_state->mode_changed = true;
624 		}
625 
626 		if (old_crtc_state->enable != new_crtc_state->enable) {
627 			DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enable changed\n",
628 					 crtc->base.id, crtc->name);
629 
630 			/*
631 			 * For clarity this assignment is done here, but
632 			 * enable == 0 is only true when there are no
633 			 * connectors and a NULL mode.
634 			 *
635 			 * The other way around is true as well. enable != 0
636 			 * iff connectors are attached and a mode is set.
637 			 */
638 			new_crtc_state->mode_changed = true;
639 			new_crtc_state->connectors_changed = true;
640 		}
641 
642 		if (old_crtc_state->active != new_crtc_state->active) {
643 			DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active changed\n",
644 					 crtc->base.id, crtc->name);
645 			new_crtc_state->active_changed = true;
646 		}
647 
648 		if (new_crtc_state->enable != has_connectors) {
649 			DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled/connectors mismatch\n",
650 					 crtc->base.id, crtc->name);
651 
652 			return -EINVAL;
653 		}
654 
655 		if (drm_dev_has_vblank(dev))
656 			new_crtc_state->no_vblank = false;
657 		else
658 			new_crtc_state->no_vblank = true;
659 	}
660 
661 	ret = handle_conflicting_encoders(state, false);
662 	if (ret)
663 		return ret;
664 
665 	for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
666 		const struct drm_connector_helper_funcs *funcs = connector->helper_private;
667 
668 		WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
669 
670 		/*
671 		 * This only sets crtc->connectors_changed for routing changes,
672 		 * drivers must set crtc->connectors_changed themselves when
673 		 * connector properties need to be updated.
674 		 */
675 		ret = update_connector_routing(state, connector,
676 					       old_connector_state,
677 					       new_connector_state);
678 		if (ret)
679 			return ret;
680 		if (old_connector_state->crtc) {
681 			new_crtc_state = drm_atomic_get_new_crtc_state(state,
682 								       old_connector_state->crtc);
683 			if (old_connector_state->link_status !=
684 			    new_connector_state->link_status)
685 				new_crtc_state->connectors_changed = true;
686 
687 			if (old_connector_state->max_requested_bpc !=
688 			    new_connector_state->max_requested_bpc)
689 				new_crtc_state->connectors_changed = true;
690 		}
691 
692 		if (funcs->atomic_check)
693 			ret = funcs->atomic_check(connector, state);
694 		if (ret)
695 			return ret;
696 
697 		connectors_mask |= BIT(i);
698 	}
699 
700 	/*
701 	 * After all the routing has been prepared we need to add in any
702 	 * connector which is itself unchanged, but whose CRTC changes its
703 	 * configuration. This must be done before calling mode_fixup in case a
704 	 * crtc only changed its mode but has the same set of connectors.
705 	 */
706 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
707 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
708 			continue;
709 
710 		DRM_DEBUG_ATOMIC("[CRTC:%d:%s] needs all connectors, enable: %c, active: %c\n",
711 				 crtc->base.id, crtc->name,
712 				 new_crtc_state->enable ? 'y' : 'n',
713 				 new_crtc_state->active ? 'y' : 'n');
714 
715 		ret = drm_atomic_add_affected_connectors(state, crtc);
716 		if (ret != 0)
717 			return ret;
718 
719 		ret = drm_atomic_add_affected_planes(state, crtc);
720 		if (ret != 0)
721 			return ret;
722 	}
723 
724 	/*
725 	 * Iterate over all connectors again, to make sure atomic_check()
726 	 * has been called on them when a modeset is forced.
727 	 */
728 	for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
729 		const struct drm_connector_helper_funcs *funcs = connector->helper_private;
730 
731 		if (connectors_mask & BIT(i))
732 			continue;
733 
734 		if (funcs->atomic_check)
735 			ret = funcs->atomic_check(connector, state);
736 		if (ret)
737 			return ret;
738 	}
739 
740 	/*
741 	 * Iterate over all connectors again, and add all affected bridges to
742 	 * the state.
743 	 */
744 	for_each_oldnew_connector_in_state(state, connector,
745 					   old_connector_state,
746 					   new_connector_state, i) {
747 		struct drm_encoder *encoder;
748 
749 		encoder = old_connector_state->best_encoder;
750 		ret = drm_atomic_add_encoder_bridges(state, encoder);
751 		if (ret)
752 			return ret;
753 
754 		encoder = new_connector_state->best_encoder;
755 		ret = drm_atomic_add_encoder_bridges(state, encoder);
756 		if (ret)
757 			return ret;
758 	}
759 
760 	ret = mode_valid(state);
761 	if (ret)
762 		return ret;
763 
764 	return mode_fixup(state);
765 }
766 EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
767 
768 /**
769  * drm_atomic_helper_check_plane_state() - Check plane state for validity
770  * @plane_state: plane state to check
771  * @crtc_state: CRTC state to check
772  * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
773  * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
774  * @can_position: is it legal to position the plane such that it
775  *                doesn't cover the entire CRTC?  This will generally
776  *                only be false for primary planes.
777  * @can_update_disabled: can the plane be updated while the CRTC
778  *                       is disabled?
779  *
780  * Checks that a desired plane update is valid, and updates various
781  * bits of derived state (clipped coordinates etc.). Drivers that provide
782  * their own plane handling rather than helper-provided implementations may
783  * still wish to call this function to avoid duplication of error checking
784  * code.
785  *
786  * RETURNS:
787  * Zero if update appears valid, error code on failure
788  */
789 int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
790 					const struct drm_crtc_state *crtc_state,
791 					int min_scale,
792 					int max_scale,
793 					bool can_position,
794 					bool can_update_disabled)
795 {
796 	struct drm_framebuffer *fb = plane_state->fb;
797 	struct drm_rect *src = &plane_state->src;
798 	struct drm_rect *dst = &plane_state->dst;
799 	unsigned int rotation = plane_state->rotation;
800 	struct drm_rect clip = {};
801 	int hscale, vscale;
802 
803 	WARN_ON(plane_state->crtc && plane_state->crtc != crtc_state->crtc);
804 
805 	*src = drm_plane_state_src(plane_state);
806 	*dst = drm_plane_state_dest(plane_state);
807 
808 	if (!fb) {
809 		plane_state->visible = false;
810 		return 0;
811 	}
812 
813 	/* crtc should only be NULL when disabling (i.e., !fb) */
814 	if (WARN_ON(!plane_state->crtc)) {
815 		plane_state->visible = false;
816 		return 0;
817 	}
818 
819 	if (!crtc_state->enable && !can_update_disabled) {
820 		DRM_DEBUG_KMS("Cannot update plane of a disabled CRTC.\n");
821 		return -EINVAL;
822 	}
823 
824 	drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation);
825 
826 	/* Check scaling */
827 	hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
828 	vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
829 	if (hscale < 0 || vscale < 0) {
830 		DRM_DEBUG_KMS("Invalid scaling of plane\n");
831 		drm_rect_debug_print("src: ", &plane_state->src, true);
832 		drm_rect_debug_print("dst: ", &plane_state->dst, false);
833 		return -ERANGE;
834 	}
835 
836 	if (crtc_state->enable)
837 		drm_mode_get_hv_timing(&crtc_state->mode, &clip.x2, &clip.y2);
838 
839 	plane_state->visible = drm_rect_clip_scaled(src, dst, &clip);
840 
841 	drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation);
842 
843 	if (!plane_state->visible)
844 		/*
845 		 * Plane isn't visible; some drivers can handle this
846 		 * so we just return success here.  Drivers that can't
847 		 * (including those that use the primary plane helper's
848 		 * update function) will return an error from their
849 		 * update_plane handler.
850 		 */
851 		return 0;
852 
853 	if (!can_position && !drm_rect_equals(dst, &clip)) {
854 		DRM_DEBUG_KMS("Plane must cover entire CRTC\n");
855 		drm_rect_debug_print("dst: ", dst, false);
856 		drm_rect_debug_print("clip: ", &clip, false);
857 		return -EINVAL;
858 	}
859 
860 	return 0;
861 }
862 EXPORT_SYMBOL(drm_atomic_helper_check_plane_state);
863 
864 /**
865  * drm_atomic_helper_check_planes - validate state object for planes changes
866  * @dev: DRM device
867  * @state: the driver state object
868  *
869  * Check the state object to see if the requested state is physically possible.
870  * This does all the plane update related checks using by calling into the
871  * &drm_crtc_helper_funcs.atomic_check and &drm_plane_helper_funcs.atomic_check
872  * hooks provided by the driver.
873  *
874  * It also sets &drm_crtc_state.planes_changed to indicate that a CRTC has
875  * updated planes.
876  *
877  * RETURNS:
878  * Zero for success or -errno
879  */
880 int
881 drm_atomic_helper_check_planes(struct drm_device *dev,
882 			       struct drm_atomic_state *state)
883 {
884 	struct drm_crtc *crtc;
885 	struct drm_crtc_state *new_crtc_state;
886 	struct drm_plane *plane;
887 	struct drm_plane_state *new_plane_state, *old_plane_state;
888 	int i, ret = 0;
889 
890 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
891 		const struct drm_plane_helper_funcs *funcs;
892 
893 		WARN_ON(!drm_modeset_is_locked(&plane->mutex));
894 
895 		funcs = plane->helper_private;
896 
897 		drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane);
898 
899 		drm_atomic_helper_check_plane_damage(state, new_plane_state);
900 
901 		if (!funcs || !funcs->atomic_check)
902 			continue;
903 
904 		ret = funcs->atomic_check(plane, state);
905 		if (ret) {
906 			DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n",
907 					 plane->base.id, plane->name);
908 			return ret;
909 		}
910 	}
911 
912 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
913 		const struct drm_crtc_helper_funcs *funcs;
914 
915 		funcs = crtc->helper_private;
916 
917 		if (!funcs || !funcs->atomic_check)
918 			continue;
919 
920 		ret = funcs->atomic_check(crtc, state);
921 		if (ret) {
922 			DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
923 					 crtc->base.id, crtc->name);
924 			return ret;
925 		}
926 	}
927 
928 	return ret;
929 }
930 EXPORT_SYMBOL(drm_atomic_helper_check_planes);
931 
932 /**
933  * drm_atomic_helper_check - validate state object
934  * @dev: DRM device
935  * @state: the driver state object
936  *
937  * Check the state object to see if the requested state is physically possible.
938  * Only CRTCs and planes have check callbacks, so for any additional (global)
939  * checking that a driver needs it can simply wrap that around this function.
940  * Drivers without such needs can directly use this as their
941  * &drm_mode_config_funcs.atomic_check callback.
942  *
943  * This just wraps the two parts of the state checking for planes and modeset
944  * state in the default order: First it calls drm_atomic_helper_check_modeset()
945  * and then drm_atomic_helper_check_planes(). The assumption is that the
946  * @drm_plane_helper_funcs.atomic_check and @drm_crtc_helper_funcs.atomic_check
947  * functions depend upon an updated adjusted_mode.clock to e.g. properly compute
948  * watermarks.
949  *
950  * Note that zpos normalization will add all enable planes to the state which
951  * might not desired for some drivers.
952  * For example enable/disable of a cursor plane which have fixed zpos value
953  * would trigger all other enabled planes to be forced to the state change.
954  *
955  * RETURNS:
956  * Zero for success or -errno
957  */
958 int drm_atomic_helper_check(struct drm_device *dev,
959 			    struct drm_atomic_state *state)
960 {
961 	int ret;
962 
963 	ret = drm_atomic_helper_check_modeset(dev, state);
964 	if (ret)
965 		return ret;
966 
967 	if (dev->mode_config.normalize_zpos) {
968 		ret = drm_atomic_normalize_zpos(dev, state);
969 		if (ret)
970 			return ret;
971 	}
972 
973 	ret = drm_atomic_helper_check_planes(dev, state);
974 	if (ret)
975 		return ret;
976 
977 	if (state->legacy_cursor_update)
978 		state->async_update = !drm_atomic_helper_async_check(dev, state);
979 
980 	drm_self_refresh_helper_alter_state(state);
981 
982 	return ret;
983 }
984 EXPORT_SYMBOL(drm_atomic_helper_check);
985 
986 static bool
987 crtc_needs_disable(struct drm_crtc_state *old_state,
988 		   struct drm_crtc_state *new_state)
989 {
990 	/*
991 	 * No new_state means the CRTC is off, so the only criteria is whether
992 	 * it's currently active or in self refresh mode.
993 	 */
994 	if (!new_state)
995 		return drm_atomic_crtc_effectively_active(old_state);
996 
997 	/*
998 	 * We need to run through the crtc_funcs->disable() function if the CRTC
999 	 * is currently on, if it's transitioning to self refresh mode, or if
1000 	 * it's in self refresh mode and needs to be fully disabled.
1001 	 */
1002 	return old_state->active ||
1003 	       (old_state->self_refresh_active && !new_state->enable) ||
1004 	       new_state->self_refresh_active;
1005 }
1006 
1007 static void
1008 disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
1009 {
1010 	struct drm_connector *connector;
1011 	struct drm_connector_state *old_conn_state, *new_conn_state;
1012 	struct drm_crtc *crtc;
1013 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1014 	int i;
1015 
1016 	for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) {
1017 		const struct drm_encoder_helper_funcs *funcs;
1018 		struct drm_encoder *encoder;
1019 		struct drm_bridge *bridge;
1020 
1021 		/* Shut down everything that's in the changeset and currently
1022 		 * still on. So need to check the old, saved state. */
1023 		if (!old_conn_state->crtc)
1024 			continue;
1025 
1026 		old_crtc_state = drm_atomic_get_old_crtc_state(old_state, old_conn_state->crtc);
1027 
1028 		if (new_conn_state->crtc)
1029 			new_crtc_state = drm_atomic_get_new_crtc_state(
1030 						old_state,
1031 						new_conn_state->crtc);
1032 		else
1033 			new_crtc_state = NULL;
1034 
1035 		if (!crtc_needs_disable(old_crtc_state, new_crtc_state) ||
1036 		    !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
1037 			continue;
1038 
1039 		encoder = old_conn_state->best_encoder;
1040 
1041 		/* We shouldn't get this far if we didn't previously have
1042 		 * an encoder.. but WARN_ON() rather than explode.
1043 		 */
1044 		if (WARN_ON(!encoder))
1045 			continue;
1046 
1047 		funcs = encoder->helper_private;
1048 
1049 		DRM_DEBUG_ATOMIC("disabling [ENCODER:%d:%s]\n",
1050 				 encoder->base.id, encoder->name);
1051 
1052 		/*
1053 		 * Each encoder has at most one connector (since we always steal
1054 		 * it away), so we won't call disable hooks twice.
1055 		 */
1056 		bridge = drm_bridge_chain_get_first_bridge(encoder);
1057 		drm_atomic_bridge_chain_disable(bridge, old_state);
1058 
1059 		/* Right function depends upon target state. */
1060 		if (funcs) {
1061 			if (funcs->atomic_disable)
1062 				funcs->atomic_disable(encoder, old_state);
1063 			else if (new_conn_state->crtc && funcs->prepare)
1064 				funcs->prepare(encoder);
1065 			else if (funcs->disable)
1066 				funcs->disable(encoder);
1067 			else if (funcs->dpms)
1068 				funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
1069 		}
1070 
1071 		drm_atomic_bridge_chain_post_disable(bridge, old_state);
1072 	}
1073 
1074 	for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
1075 		const struct drm_crtc_helper_funcs *funcs;
1076 		int ret;
1077 
1078 		/* Shut down everything that needs a full modeset. */
1079 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
1080 			continue;
1081 
1082 		if (!crtc_needs_disable(old_crtc_state, new_crtc_state))
1083 			continue;
1084 
1085 		funcs = crtc->helper_private;
1086 
1087 		DRM_DEBUG_ATOMIC("disabling [CRTC:%d:%s]\n",
1088 				 crtc->base.id, crtc->name);
1089 
1090 
1091 		/* Right function depends upon target state. */
1092 		if (new_crtc_state->enable && funcs->prepare)
1093 			funcs->prepare(crtc);
1094 		else if (funcs->atomic_disable)
1095 			funcs->atomic_disable(crtc, old_state);
1096 		else if (funcs->disable)
1097 			funcs->disable(crtc);
1098 		else if (funcs->dpms)
1099 			funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
1100 
1101 		if (!drm_dev_has_vblank(dev))
1102 			continue;
1103 
1104 		ret = drm_crtc_vblank_get(crtc);
1105 		WARN_ONCE(ret != -EINVAL, "driver forgot to call drm_crtc_vblank_off()\n");
1106 		if (ret == 0)
1107 			drm_crtc_vblank_put(crtc);
1108 	}
1109 }
1110 
1111 /**
1112  * drm_atomic_helper_update_legacy_modeset_state - update legacy modeset state
1113  * @dev: DRM device
1114  * @old_state: atomic state object with old state structures
1115  *
1116  * This function updates all the various legacy modeset state pointers in
1117  * connectors, encoders and CRTCs.
1118  *
1119  * Drivers can use this for building their own atomic commit if they don't have
1120  * a pure helper-based modeset implementation.
1121  *
1122  * Since these updates are not synchronized with lockings, only code paths
1123  * called from &drm_mode_config_helper_funcs.atomic_commit_tail can look at the
1124  * legacy state filled out by this helper. Defacto this means this helper and
1125  * the legacy state pointers are only really useful for transitioning an
1126  * existing driver to the atomic world.
1127  */
1128 void
1129 drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
1130 					      struct drm_atomic_state *old_state)
1131 {
1132 	struct drm_connector *connector;
1133 	struct drm_connector_state *old_conn_state, *new_conn_state;
1134 	struct drm_crtc *crtc;
1135 	struct drm_crtc_state *new_crtc_state;
1136 	int i;
1137 
1138 	/* clear out existing links and update dpms */
1139 	for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) {
1140 		if (connector->encoder) {
1141 			WARN_ON(!connector->encoder->crtc);
1142 
1143 			connector->encoder->crtc = NULL;
1144 			connector->encoder = NULL;
1145 		}
1146 
1147 		crtc = new_conn_state->crtc;
1148 		if ((!crtc && old_conn_state->crtc) ||
1149 		    (crtc && drm_atomic_crtc_needs_modeset(crtc->state))) {
1150 			int mode = DRM_MODE_DPMS_OFF;
1151 
1152 			if (crtc && crtc->state->active)
1153 				mode = DRM_MODE_DPMS_ON;
1154 
1155 			connector->dpms = mode;
1156 		}
1157 	}
1158 
1159 	/* set new links */
1160 	for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
1161 		if (!new_conn_state->crtc)
1162 			continue;
1163 
1164 		if (WARN_ON(!new_conn_state->best_encoder))
1165 			continue;
1166 
1167 		connector->encoder = new_conn_state->best_encoder;
1168 		connector->encoder->crtc = new_conn_state->crtc;
1169 	}
1170 
1171 	/* set legacy state in the crtc structure */
1172 	for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
1173 		struct drm_plane *primary = crtc->primary;
1174 		struct drm_plane_state *new_plane_state;
1175 
1176 		crtc->mode = new_crtc_state->mode;
1177 		crtc->enabled = new_crtc_state->enable;
1178 
1179 		new_plane_state =
1180 			drm_atomic_get_new_plane_state(old_state, primary);
1181 
1182 		if (new_plane_state && new_plane_state->crtc == crtc) {
1183 			crtc->x = new_plane_state->src_x >> 16;
1184 			crtc->y = new_plane_state->src_y >> 16;
1185 		}
1186 	}
1187 }
1188 EXPORT_SYMBOL(drm_atomic_helper_update_legacy_modeset_state);
1189 
1190 /**
1191  * drm_atomic_helper_calc_timestamping_constants - update vblank timestamping constants
1192  * @state: atomic state object
1193  *
1194  * Updates the timestamping constants used for precise vblank timestamps
1195  * by calling drm_calc_timestamping_constants() for all enabled crtcs in @state.
1196  */
1197 void drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *state)
1198 {
1199 	struct drm_crtc_state *new_crtc_state;
1200 	struct drm_crtc *crtc;
1201 	int i;
1202 
1203 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
1204 		if (new_crtc_state->enable)
1205 			drm_calc_timestamping_constants(crtc,
1206 							&new_crtc_state->adjusted_mode);
1207 	}
1208 }
1209 EXPORT_SYMBOL(drm_atomic_helper_calc_timestamping_constants);
1210 
1211 static void
1212 crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
1213 {
1214 	struct drm_crtc *crtc;
1215 	struct drm_crtc_state *new_crtc_state;
1216 	struct drm_connector *connector;
1217 	struct drm_connector_state *new_conn_state;
1218 	int i;
1219 
1220 	for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
1221 		const struct drm_crtc_helper_funcs *funcs;
1222 
1223 		if (!new_crtc_state->mode_changed)
1224 			continue;
1225 
1226 		funcs = crtc->helper_private;
1227 
1228 		if (new_crtc_state->enable && funcs->mode_set_nofb) {
1229 			DRM_DEBUG_ATOMIC("modeset on [CRTC:%d:%s]\n",
1230 					 crtc->base.id, crtc->name);
1231 
1232 			funcs->mode_set_nofb(crtc);
1233 		}
1234 	}
1235 
1236 	for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
1237 		const struct drm_encoder_helper_funcs *funcs;
1238 		struct drm_encoder *encoder;
1239 		struct drm_display_mode *mode, *adjusted_mode;
1240 		struct drm_bridge *bridge;
1241 
1242 		if (!new_conn_state->best_encoder)
1243 			continue;
1244 
1245 		encoder = new_conn_state->best_encoder;
1246 		funcs = encoder->helper_private;
1247 		new_crtc_state = new_conn_state->crtc->state;
1248 		mode = &new_crtc_state->mode;
1249 		adjusted_mode = &new_crtc_state->adjusted_mode;
1250 
1251 		if (!new_crtc_state->mode_changed)
1252 			continue;
1253 
1254 		DRM_DEBUG_ATOMIC("modeset on [ENCODER:%d:%s]\n",
1255 				 encoder->base.id, encoder->name);
1256 
1257 		/*
1258 		 * Each encoder has at most one connector (since we always steal
1259 		 * it away), so we won't call mode_set hooks twice.
1260 		 */
1261 		if (funcs && funcs->atomic_mode_set) {
1262 			funcs->atomic_mode_set(encoder, new_crtc_state,
1263 					       new_conn_state);
1264 		} else if (funcs && funcs->mode_set) {
1265 			funcs->mode_set(encoder, mode, adjusted_mode);
1266 		}
1267 
1268 		bridge = drm_bridge_chain_get_first_bridge(encoder);
1269 		drm_bridge_chain_mode_set(bridge, mode, adjusted_mode);
1270 	}
1271 }
1272 
1273 /**
1274  * drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
1275  * @dev: DRM device
1276  * @old_state: atomic state object with old state structures
1277  *
1278  * This function shuts down all the outputs that need to be shut down and
1279  * prepares them (if required) with the new mode.
1280  *
1281  * For compatibility with legacy CRTC helpers this should be called before
1282  * drm_atomic_helper_commit_planes(), which is what the default commit function
1283  * does. But drivers with different needs can group the modeset commits together
1284  * and do the plane commits at the end. This is useful for drivers doing runtime
1285  * PM since planes updates then only happen when the CRTC is actually enabled.
1286  */
1287 void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
1288 					       struct drm_atomic_state *old_state)
1289 {
1290 	disable_outputs(dev, old_state);
1291 
1292 	drm_atomic_helper_update_legacy_modeset_state(dev, old_state);
1293 	drm_atomic_helper_calc_timestamping_constants(old_state);
1294 
1295 	crtc_set_mode(dev, old_state);
1296 }
1297 EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
1298 
1299 static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
1300 						struct drm_atomic_state *old_state)
1301 {
1302 	struct drm_connector *connector;
1303 	struct drm_connector_state *new_conn_state;
1304 	int i;
1305 
1306 	for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
1307 		const struct drm_connector_helper_funcs *funcs;
1308 
1309 		funcs = connector->helper_private;
1310 		if (!funcs->atomic_commit)
1311 			continue;
1312 
1313 		if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) {
1314 			WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
1315 			funcs->atomic_commit(connector, old_state);
1316 		}
1317 	}
1318 }
1319 
1320 /**
1321  * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
1322  * @dev: DRM device
1323  * @old_state: atomic state object with old state structures
1324  *
1325  * This function enables all the outputs with the new configuration which had to
1326  * be turned off for the update.
1327  *
1328  * For compatibility with legacy CRTC helpers this should be called after
1329  * drm_atomic_helper_commit_planes(), which is what the default commit function
1330  * does. But drivers with different needs can group the modeset commits together
1331  * and do the plane commits at the end. This is useful for drivers doing runtime
1332  * PM since planes updates then only happen when the CRTC is actually enabled.
1333  */
1334 void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
1335 					      struct drm_atomic_state *old_state)
1336 {
1337 	struct drm_crtc *crtc;
1338 	struct drm_crtc_state *old_crtc_state;
1339 	struct drm_crtc_state *new_crtc_state;
1340 	struct drm_connector *connector;
1341 	struct drm_connector_state *new_conn_state;
1342 	int i;
1343 
1344 	for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
1345 		const struct drm_crtc_helper_funcs *funcs;
1346 
1347 		/* Need to filter out CRTCs where only planes change. */
1348 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
1349 			continue;
1350 
1351 		if (!new_crtc_state->active)
1352 			continue;
1353 
1354 		funcs = crtc->helper_private;
1355 
1356 		if (new_crtc_state->enable) {
1357 			DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n",
1358 					 crtc->base.id, crtc->name);
1359 			if (funcs->atomic_enable)
1360 				funcs->atomic_enable(crtc, old_state);
1361 			else if (funcs->commit)
1362 				funcs->commit(crtc);
1363 		}
1364 	}
1365 
1366 	for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
1367 		const struct drm_encoder_helper_funcs *funcs;
1368 		struct drm_encoder *encoder;
1369 		struct drm_bridge *bridge;
1370 
1371 		if (!new_conn_state->best_encoder)
1372 			continue;
1373 
1374 		if (!new_conn_state->crtc->state->active ||
1375 		    !drm_atomic_crtc_needs_modeset(new_conn_state->crtc->state))
1376 			continue;
1377 
1378 		encoder = new_conn_state->best_encoder;
1379 		funcs = encoder->helper_private;
1380 
1381 		DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n",
1382 				 encoder->base.id, encoder->name);
1383 
1384 		/*
1385 		 * Each encoder has at most one connector (since we always steal
1386 		 * it away), so we won't call enable hooks twice.
1387 		 */
1388 		bridge = drm_bridge_chain_get_first_bridge(encoder);
1389 		drm_atomic_bridge_chain_pre_enable(bridge, old_state);
1390 
1391 		if (funcs) {
1392 			if (funcs->atomic_enable)
1393 				funcs->atomic_enable(encoder, old_state);
1394 			else if (funcs->enable)
1395 				funcs->enable(encoder);
1396 			else if (funcs->commit)
1397 				funcs->commit(encoder);
1398 		}
1399 
1400 		drm_atomic_bridge_chain_enable(bridge, old_state);
1401 	}
1402 
1403 	drm_atomic_helper_commit_writebacks(dev, old_state);
1404 }
1405 EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
1406 
1407 /**
1408  * drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state
1409  * @dev: DRM device
1410  * @state: atomic state object with old state structures
1411  * @pre_swap: If true, do an interruptible wait, and @state is the new state.
1412  * 	Otherwise @state is the old state.
1413  *
1414  * For implicit sync, driver should fish the exclusive fence out from the
1415  * incoming fb's and stash it in the drm_plane_state.  This is called after
1416  * drm_atomic_helper_swap_state() so it uses the current plane state (and
1417  * just uses the atomic state to find the changed planes)
1418  *
1419  * Note that @pre_swap is needed since the point where we block for fences moves
1420  * around depending upon whether an atomic commit is blocking or
1421  * non-blocking. For non-blocking commit all waiting needs to happen after
1422  * drm_atomic_helper_swap_state() is called, but for blocking commits we want
1423  * to wait **before** we do anything that can't be easily rolled back. That is
1424  * before we call drm_atomic_helper_swap_state().
1425  *
1426  * Returns zero if success or < 0 if dma_fence_wait() fails.
1427  */
1428 int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
1429 				      struct drm_atomic_state *state,
1430 				      bool pre_swap)
1431 {
1432 	struct drm_plane *plane;
1433 	struct drm_plane_state *new_plane_state;
1434 	int i, ret;
1435 
1436 	for_each_new_plane_in_state(state, plane, new_plane_state, i) {
1437 		if (!new_plane_state->fence)
1438 			continue;
1439 
1440 		WARN_ON(!new_plane_state->fb);
1441 
1442 		/*
1443 		 * If waiting for fences pre-swap (ie: nonblock), userspace can
1444 		 * still interrupt the operation. Instead of blocking until the
1445 		 * timer expires, make the wait interruptible.
1446 		 */
1447 		ret = dma_fence_wait(new_plane_state->fence, pre_swap);
1448 		if (ret)
1449 			return ret;
1450 
1451 		dma_fence_put(new_plane_state->fence);
1452 		new_plane_state->fence = NULL;
1453 	}
1454 
1455 	return 0;
1456 }
1457 EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
1458 
1459 /**
1460  * drm_atomic_helper_wait_for_vblanks - wait for vblank on CRTCs
1461  * @dev: DRM device
1462  * @old_state: atomic state object with old state structures
1463  *
1464  * Helper to, after atomic commit, wait for vblanks on all affected
1465  * CRTCs (ie. before cleaning up old framebuffers using
1466  * drm_atomic_helper_cleanup_planes()). It will only wait on CRTCs where the
1467  * framebuffers have actually changed to optimize for the legacy cursor and
1468  * plane update use-case.
1469  *
1470  * Drivers using the nonblocking commit tracking support initialized by calling
1471  * drm_atomic_helper_setup_commit() should look at
1472  * drm_atomic_helper_wait_for_flip_done() as an alternative.
1473  */
1474 void
1475 drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
1476 		struct drm_atomic_state *old_state)
1477 {
1478 	struct drm_crtc *crtc;
1479 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1480 	int i, ret;
1481 	unsigned crtc_mask = 0;
1482 
1483 	 /*
1484 	  * Legacy cursor ioctls are completely unsynced, and userspace
1485 	  * relies on that (by doing tons of cursor updates).
1486 	  */
1487 	if (old_state->legacy_cursor_update)
1488 		return;
1489 
1490 	for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
1491 		if (!new_crtc_state->active)
1492 			continue;
1493 
1494 		ret = drm_crtc_vblank_get(crtc);
1495 		if (ret != 0)
1496 			continue;
1497 
1498 		crtc_mask |= drm_crtc_mask(crtc);
1499 		old_state->crtcs[i].last_vblank_count = drm_crtc_vblank_count(crtc);
1500 	}
1501 
1502 	for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
1503 		if (!(crtc_mask & drm_crtc_mask(crtc)))
1504 			continue;
1505 
1506 		ret = wait_event_timeout(dev->vblank[i].queue,
1507 				old_state->crtcs[i].last_vblank_count !=
1508 					drm_crtc_vblank_count(crtc),
1509 				msecs_to_jiffies(100));
1510 
1511 		WARN(!ret, "[CRTC:%d:%s] vblank wait timed out\n",
1512 		     crtc->base.id, crtc->name);
1513 
1514 		drm_crtc_vblank_put(crtc);
1515 	}
1516 }
1517 EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
1518 
1519 /**
1520  * drm_atomic_helper_wait_for_flip_done - wait for all page flips to be done
1521  * @dev: DRM device
1522  * @old_state: atomic state object with old state structures
1523  *
1524  * Helper to, after atomic commit, wait for page flips on all affected
1525  * crtcs (ie. before cleaning up old framebuffers using
1526  * drm_atomic_helper_cleanup_planes()). Compared to
1527  * drm_atomic_helper_wait_for_vblanks() this waits for the completion on all
1528  * CRTCs, assuming that cursors-only updates are signalling their completion
1529  * immediately (or using a different path).
1530  *
1531  * This requires that drivers use the nonblocking commit tracking support
1532  * initialized using drm_atomic_helper_setup_commit().
1533  */
1534 void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
1535 					  struct drm_atomic_state *old_state)
1536 {
1537 	struct drm_crtc *crtc;
1538 	int i;
1539 
1540 	for (i = 0; i < dev->mode_config.num_crtc; i++) {
1541 		struct drm_crtc_commit *commit = old_state->crtcs[i].commit;
1542 		int ret;
1543 
1544 		crtc = old_state->crtcs[i].ptr;
1545 
1546 		if (!crtc || !commit)
1547 			continue;
1548 
1549 		ret = wait_for_completion_timeout(&commit->flip_done, 10 * HZ);
1550 		if (ret == 0)
1551 			DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
1552 				  crtc->base.id, crtc->name);
1553 	}
1554 
1555 	if (old_state->fake_commit)
1556 		complete_all(&old_state->fake_commit->flip_done);
1557 }
1558 EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done);
1559 
1560 /**
1561  * drm_atomic_helper_commit_tail - commit atomic update to hardware
1562  * @old_state: atomic state object with old state structures
1563  *
1564  * This is the default implementation for the
1565  * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
1566  * that do not support runtime_pm or do not need the CRTC to be
1567  * enabled to perform a commit. Otherwise, see
1568  * drm_atomic_helper_commit_tail_rpm().
1569  *
1570  * Note that the default ordering of how the various stages are called is to
1571  * match the legacy modeset helper library closest.
1572  */
1573 void drm_atomic_helper_commit_tail(struct drm_atomic_state *old_state)
1574 {
1575 	struct drm_device *dev = old_state->dev;
1576 
1577 	drm_atomic_helper_commit_modeset_disables(dev, old_state);
1578 
1579 	drm_atomic_helper_commit_planes(dev, old_state, 0);
1580 
1581 	drm_atomic_helper_commit_modeset_enables(dev, old_state);
1582 
1583 	drm_atomic_helper_fake_vblank(old_state);
1584 
1585 	drm_atomic_helper_commit_hw_done(old_state);
1586 
1587 	drm_atomic_helper_wait_for_vblanks(dev, old_state);
1588 
1589 	drm_atomic_helper_cleanup_planes(dev, old_state);
1590 }
1591 EXPORT_SYMBOL(drm_atomic_helper_commit_tail);
1592 
1593 /**
1594  * drm_atomic_helper_commit_tail_rpm - commit atomic update to hardware
1595  * @old_state: new modeset state to be committed
1596  *
1597  * This is an alternative implementation for the
1598  * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
1599  * that support runtime_pm or need the CRTC to be enabled to perform a
1600  * commit. Otherwise, one should use the default implementation
1601  * drm_atomic_helper_commit_tail().
1602  */
1603 void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *old_state)
1604 {
1605 	struct drm_device *dev = old_state->dev;
1606 
1607 	drm_atomic_helper_commit_modeset_disables(dev, old_state);
1608 
1609 	drm_atomic_helper_commit_modeset_enables(dev, old_state);
1610 
1611 	drm_atomic_helper_commit_planes(dev, old_state,
1612 					DRM_PLANE_COMMIT_ACTIVE_ONLY);
1613 
1614 	drm_atomic_helper_fake_vblank(old_state);
1615 
1616 	drm_atomic_helper_commit_hw_done(old_state);
1617 
1618 	drm_atomic_helper_wait_for_vblanks(dev, old_state);
1619 
1620 	drm_atomic_helper_cleanup_planes(dev, old_state);
1621 }
1622 EXPORT_SYMBOL(drm_atomic_helper_commit_tail_rpm);
1623 
1624 static void commit_tail(struct drm_atomic_state *old_state)
1625 {
1626 	struct drm_device *dev = old_state->dev;
1627 	const struct drm_mode_config_helper_funcs *funcs;
1628 	struct drm_crtc_state *new_crtc_state;
1629 	struct drm_crtc *crtc;
1630 	ktime_t start;
1631 	s64 commit_time_ms;
1632 	unsigned int i, new_self_refresh_mask = 0;
1633 
1634 	funcs = dev->mode_config.helper_private;
1635 
1636 	/*
1637 	 * We're measuring the _entire_ commit, so the time will vary depending
1638 	 * on how many fences and objects are involved. For the purposes of self
1639 	 * refresh, this is desirable since it'll give us an idea of how
1640 	 * congested things are. This will inform our decision on how often we
1641 	 * should enter self refresh after idle.
1642 	 *
1643 	 * These times will be averaged out in the self refresh helpers to avoid
1644 	 * overreacting over one outlier frame
1645 	 */
1646 	start = ktime_get();
1647 
1648 	drm_atomic_helper_wait_for_fences(dev, old_state, false);
1649 
1650 	drm_atomic_helper_wait_for_dependencies(old_state);
1651 
1652 	/*
1653 	 * We cannot safely access new_crtc_state after
1654 	 * drm_atomic_helper_commit_hw_done() so figure out which crtc's have
1655 	 * self-refresh active beforehand:
1656 	 */
1657 	for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i)
1658 		if (new_crtc_state->self_refresh_active)
1659 			new_self_refresh_mask |= BIT(i);
1660 
1661 	if (funcs && funcs->atomic_commit_tail)
1662 		funcs->atomic_commit_tail(old_state);
1663 	else
1664 		drm_atomic_helper_commit_tail(old_state);
1665 
1666 	commit_time_ms = ktime_ms_delta(ktime_get(), start);
1667 	if (commit_time_ms > 0)
1668 		drm_self_refresh_helper_update_avg_times(old_state,
1669 						 (unsigned long)commit_time_ms,
1670 						 new_self_refresh_mask);
1671 
1672 	drm_atomic_helper_commit_cleanup_done(old_state);
1673 
1674 	drm_atomic_state_put(old_state);
1675 }
1676 
1677 static void commit_work(struct work_struct *work)
1678 {
1679 	struct drm_atomic_state *state = container_of(work,
1680 						      struct drm_atomic_state,
1681 						      commit_work);
1682 	commit_tail(state);
1683 }
1684 
1685 /**
1686  * drm_atomic_helper_async_check - check if state can be commited asynchronously
1687  * @dev: DRM device
1688  * @state: the driver state object
1689  *
1690  * This helper will check if it is possible to commit the state asynchronously.
1691  * Async commits are not supposed to swap the states like normal sync commits
1692  * but just do in-place changes on the current state.
1693  *
1694  * It will return 0 if the commit can happen in an asynchronous fashion or error
1695  * if not. Note that error just mean it can't be commited asynchronously, if it
1696  * fails the commit should be treated like a normal synchronous commit.
1697  */
1698 int drm_atomic_helper_async_check(struct drm_device *dev,
1699 				   struct drm_atomic_state *state)
1700 {
1701 	struct drm_crtc *crtc;
1702 	struct drm_crtc_state *crtc_state;
1703 	struct drm_plane *plane = NULL;
1704 	struct drm_plane_state *old_plane_state = NULL;
1705 	struct drm_plane_state *new_plane_state = NULL;
1706 	const struct drm_plane_helper_funcs *funcs;
1707 	int i, n_planes = 0;
1708 
1709 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1710 		if (drm_atomic_crtc_needs_modeset(crtc_state))
1711 			return -EINVAL;
1712 	}
1713 
1714 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
1715 		n_planes++;
1716 
1717 	/* FIXME: we support only single plane updates for now */
1718 	if (n_planes != 1)
1719 		return -EINVAL;
1720 
1721 	if (!new_plane_state->crtc ||
1722 	    old_plane_state->crtc != new_plane_state->crtc)
1723 		return -EINVAL;
1724 
1725 	funcs = plane->helper_private;
1726 	if (!funcs->atomic_async_update)
1727 		return -EINVAL;
1728 
1729 	if (new_plane_state->fence)
1730 		return -EINVAL;
1731 
1732 	/*
1733 	 * Don't do an async update if there is an outstanding commit modifying
1734 	 * the plane.  This prevents our async update's changes from getting
1735 	 * overridden by a previous synchronous update's state.
1736 	 */
1737 	if (old_plane_state->commit &&
1738 	    !try_wait_for_completion(&old_plane_state->commit->hw_done)) {
1739 		DRM_DEBUG_ATOMIC("[PLANE:%d:%s] inflight previous commit preventing async commit\n",
1740 			plane->base.id, plane->name);
1741 		return -EBUSY;
1742 	}
1743 
1744 	return funcs->atomic_async_check(plane, state);
1745 }
1746 EXPORT_SYMBOL(drm_atomic_helper_async_check);
1747 
1748 /**
1749  * drm_atomic_helper_async_commit - commit state asynchronously
1750  * @dev: DRM device
1751  * @state: the driver state object
1752  *
1753  * This function commits a state asynchronously, i.e., not vblank
1754  * synchronized. It should be used on a state only when
1755  * drm_atomic_async_check() succeeds. Async commits are not supposed to swap
1756  * the states like normal sync commits, but just do in-place changes on the
1757  * current state.
1758  *
1759  * TODO: Implement full swap instead of doing in-place changes.
1760  */
1761 void drm_atomic_helper_async_commit(struct drm_device *dev,
1762 				    struct drm_atomic_state *state)
1763 {
1764 	struct drm_plane *plane;
1765 	struct drm_plane_state *plane_state;
1766 	const struct drm_plane_helper_funcs *funcs;
1767 	int i;
1768 
1769 	for_each_new_plane_in_state(state, plane, plane_state, i) {
1770 		struct drm_framebuffer *new_fb = plane_state->fb;
1771 		struct drm_framebuffer *old_fb = plane->state->fb;
1772 
1773 		funcs = plane->helper_private;
1774 		funcs->atomic_async_update(plane, state);
1775 
1776 		/*
1777 		 * ->atomic_async_update() is supposed to update the
1778 		 * plane->state in-place, make sure at least common
1779 		 * properties have been properly updated.
1780 		 */
1781 		WARN_ON_ONCE(plane->state->fb != new_fb);
1782 		WARN_ON_ONCE(plane->state->crtc_x != plane_state->crtc_x);
1783 		WARN_ON_ONCE(plane->state->crtc_y != plane_state->crtc_y);
1784 		WARN_ON_ONCE(plane->state->src_x != plane_state->src_x);
1785 		WARN_ON_ONCE(plane->state->src_y != plane_state->src_y);
1786 
1787 		/*
1788 		 * Make sure the FBs have been swapped so that cleanups in the
1789 		 * new_state performs a cleanup in the old FB.
1790 		 */
1791 		WARN_ON_ONCE(plane_state->fb != old_fb);
1792 	}
1793 }
1794 EXPORT_SYMBOL(drm_atomic_helper_async_commit);
1795 
1796 /**
1797  * drm_atomic_helper_commit - commit validated state object
1798  * @dev: DRM device
1799  * @state: the driver state object
1800  * @nonblock: whether nonblocking behavior is requested.
1801  *
1802  * This function commits a with drm_atomic_helper_check() pre-validated state
1803  * object. This can still fail when e.g. the framebuffer reservation fails. This
1804  * function implements nonblocking commits, using
1805  * drm_atomic_helper_setup_commit() and related functions.
1806  *
1807  * Committing the actual hardware state is done through the
1808  * &drm_mode_config_helper_funcs.atomic_commit_tail callback, or its default
1809  * implementation drm_atomic_helper_commit_tail().
1810  *
1811  * RETURNS:
1812  * Zero for success or -errno.
1813  */
1814 int drm_atomic_helper_commit(struct drm_device *dev,
1815 			     struct drm_atomic_state *state,
1816 			     bool nonblock)
1817 {
1818 	int ret;
1819 
1820 	if (state->async_update) {
1821 		ret = drm_atomic_helper_prepare_planes(dev, state);
1822 		if (ret)
1823 			return ret;
1824 
1825 		drm_atomic_helper_async_commit(dev, state);
1826 		drm_atomic_helper_cleanup_planes(dev, state);
1827 
1828 		return 0;
1829 	}
1830 
1831 	ret = drm_atomic_helper_setup_commit(state, nonblock);
1832 	if (ret)
1833 		return ret;
1834 
1835 	INIT_WORK(&state->commit_work, commit_work);
1836 
1837 	ret = drm_atomic_helper_prepare_planes(dev, state);
1838 	if (ret)
1839 		return ret;
1840 
1841 	if (!nonblock) {
1842 		ret = drm_atomic_helper_wait_for_fences(dev, state, true);
1843 		if (ret)
1844 			goto err;
1845 	}
1846 
1847 	/*
1848 	 * This is the point of no return - everything below never fails except
1849 	 * when the hw goes bonghits. Which means we can commit the new state on
1850 	 * the software side now.
1851 	 */
1852 
1853 	ret = drm_atomic_helper_swap_state(state, true);
1854 	if (ret)
1855 		goto err;
1856 
1857 	/*
1858 	 * Everything below can be run asynchronously without the need to grab
1859 	 * any modeset locks at all under one condition: It must be guaranteed
1860 	 * that the asynchronous work has either been cancelled (if the driver
1861 	 * supports it, which at least requires that the framebuffers get
1862 	 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
1863 	 * before the new state gets committed on the software side with
1864 	 * drm_atomic_helper_swap_state().
1865 	 *
1866 	 * This scheme allows new atomic state updates to be prepared and
1867 	 * checked in parallel to the asynchronous completion of the previous
1868 	 * update. Which is important since compositors need to figure out the
1869 	 * composition of the next frame right after having submitted the
1870 	 * current layout.
1871 	 *
1872 	 * NOTE: Commit work has multiple phases, first hardware commit, then
1873 	 * cleanup. We want them to overlap, hence need system_unbound_wq to
1874 	 * make sure work items don't artificially stall on each another.
1875 	 */
1876 
1877 	drm_atomic_state_get(state);
1878 	if (nonblock)
1879 		queue_work(system_unbound_wq, &state->commit_work);
1880 	else
1881 		commit_tail(state);
1882 
1883 	return 0;
1884 
1885 err:
1886 	drm_atomic_helper_cleanup_planes(dev, state);
1887 	return ret;
1888 }
1889 EXPORT_SYMBOL(drm_atomic_helper_commit);
1890 
1891 /**
1892  * DOC: implementing nonblocking commit
1893  *
1894  * Nonblocking atomic commits should use struct &drm_crtc_commit to sequence
1895  * different operations against each another. Locks, especially struct
1896  * &drm_modeset_lock, should not be held in worker threads or any other
1897  * asynchronous context used to commit the hardware state.
1898  *
1899  * drm_atomic_helper_commit() implements the recommended sequence for
1900  * nonblocking commits, using drm_atomic_helper_setup_commit() internally:
1901  *
1902  * 1. Run drm_atomic_helper_prepare_planes(). Since this can fail and we
1903  * need to propagate out of memory/VRAM errors to userspace, it must be called
1904  * synchronously.
1905  *
1906  * 2. Synchronize with any outstanding nonblocking commit worker threads which
1907  * might be affected by the new state update. This is handled by
1908  * drm_atomic_helper_setup_commit().
1909  *
1910  * Asynchronous workers need to have sufficient parallelism to be able to run
1911  * different atomic commits on different CRTCs in parallel. The simplest way to
1912  * achieve this is by running them on the &system_unbound_wq work queue. Note
1913  * that drivers are not required to split up atomic commits and run an
1914  * individual commit in parallel - userspace is supposed to do that if it cares.
1915  * But it might be beneficial to do that for modesets, since those necessarily
1916  * must be done as one global operation, and enabling or disabling a CRTC can
1917  * take a long time. But even that is not required.
1918  *
1919  * IMPORTANT: A &drm_atomic_state update for multiple CRTCs is sequenced
1920  * against all CRTCs therein. Therefore for atomic state updates which only flip
1921  * planes the driver must not get the struct &drm_crtc_state of unrelated CRTCs
1922  * in its atomic check code: This would prevent committing of atomic updates to
1923  * multiple CRTCs in parallel. In general, adding additional state structures
1924  * should be avoided as much as possible, because this reduces parallelism in
1925  * (nonblocking) commits, both due to locking and due to commit sequencing
1926  * requirements.
1927  *
1928  * 3. The software state is updated synchronously with
1929  * drm_atomic_helper_swap_state(). Doing this under the protection of all modeset
1930  * locks means concurrent callers never see inconsistent state. Note that commit
1931  * workers do not hold any locks; their access is only coordinated through
1932  * ordering. If workers would access state only through the pointers in the
1933  * free-standing state objects (currently not the case for any driver) then even
1934  * multiple pending commits could be in-flight at the same time.
1935  *
1936  * 4. Schedule a work item to do all subsequent steps, using the split-out
1937  * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and
1938  * then cleaning up the framebuffers after the old framebuffer is no longer
1939  * being displayed. The scheduled work should synchronize against other workers
1940  * using the &drm_crtc_commit infrastructure as needed. See
1941  * drm_atomic_helper_setup_commit() for more details.
1942  */
1943 
1944 static int stall_checks(struct drm_crtc *crtc, bool nonblock)
1945 {
1946 	struct drm_crtc_commit *commit, *stall_commit = NULL;
1947 	bool completed = true;
1948 	int i;
1949 	long ret = 0;
1950 
1951 	spin_lock(&crtc->commit_lock);
1952 	i = 0;
1953 	list_for_each_entry(commit, &crtc->commit_list, commit_entry) {
1954 		if (i == 0) {
1955 			completed = try_wait_for_completion(&commit->flip_done);
1956 			/* Userspace is not allowed to get ahead of the previous
1957 			 * commit with nonblocking ones. */
1958 			if (!completed && nonblock) {
1959 				spin_unlock(&crtc->commit_lock);
1960 				DRM_DEBUG_ATOMIC("[CRTC:%d:%s] busy with a previous commit\n",
1961 					crtc->base.id, crtc->name);
1962 
1963 				return -EBUSY;
1964 			}
1965 		} else if (i == 1) {
1966 			stall_commit = drm_crtc_commit_get(commit);
1967 			break;
1968 		}
1969 
1970 		i++;
1971 	}
1972 	spin_unlock(&crtc->commit_lock);
1973 
1974 	if (!stall_commit)
1975 		return 0;
1976 
1977 	/* We don't want to let commits get ahead of cleanup work too much,
1978 	 * stalling on 2nd previous commit means triple-buffer won't ever stall.
1979 	 */
1980 	ret = wait_for_completion_interruptible_timeout(&stall_commit->cleanup_done,
1981 							10*HZ);
1982 	if (ret == 0)
1983 		DRM_ERROR("[CRTC:%d:%s] cleanup_done timed out\n",
1984 			  crtc->base.id, crtc->name);
1985 
1986 	drm_crtc_commit_put(stall_commit);
1987 
1988 	return ret < 0 ? ret : 0;
1989 }
1990 
1991 static void release_crtc_commit(struct completion *completion)
1992 {
1993 	struct drm_crtc_commit *commit = container_of(completion,
1994 						      typeof(*commit),
1995 						      flip_done);
1996 
1997 	drm_crtc_commit_put(commit);
1998 }
1999 
2000 static void init_commit(struct drm_crtc_commit *commit, struct drm_crtc *crtc)
2001 {
2002 	init_completion(&commit->flip_done);
2003 	init_completion(&commit->hw_done);
2004 	init_completion(&commit->cleanup_done);
2005 	INIT_LIST_HEAD(&commit->commit_entry);
2006 	kref_init(&commit->ref);
2007 	commit->crtc = crtc;
2008 }
2009 
2010 static struct drm_crtc_commit *
2011 crtc_or_fake_commit(struct drm_atomic_state *state, struct drm_crtc *crtc)
2012 {
2013 	if (crtc) {
2014 		struct drm_crtc_state *new_crtc_state;
2015 
2016 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
2017 
2018 		return new_crtc_state->commit;
2019 	}
2020 
2021 	if (!state->fake_commit) {
2022 		state->fake_commit = kzalloc(sizeof(*state->fake_commit), GFP_KERNEL);
2023 		if (!state->fake_commit)
2024 			return NULL;
2025 
2026 		init_commit(state->fake_commit, NULL);
2027 	}
2028 
2029 	return state->fake_commit;
2030 }
2031 
2032 /**
2033  * drm_atomic_helper_setup_commit - setup possibly nonblocking commit
2034  * @state: new modeset state to be committed
2035  * @nonblock: whether nonblocking behavior is requested.
2036  *
2037  * This function prepares @state to be used by the atomic helper's support for
2038  * nonblocking commits. Drivers using the nonblocking commit infrastructure
2039  * should always call this function from their
2040  * &drm_mode_config_funcs.atomic_commit hook.
2041  *
2042  * Drivers that need to extend the commit setup to private objects can use the
2043  * &drm_mode_config_helper_funcs.atomic_commit_setup hook.
2044  *
2045  * To be able to use this support drivers need to use a few more helper
2046  * functions. drm_atomic_helper_wait_for_dependencies() must be called before
2047  * actually committing the hardware state, and for nonblocking commits this call
2048  * must be placed in the async worker. See also drm_atomic_helper_swap_state()
2049  * and its stall parameter, for when a driver's commit hooks look at the
2050  * &drm_crtc.state, &drm_plane.state or &drm_connector.state pointer directly.
2051  *
2052  * Completion of the hardware commit step must be signalled using
2053  * drm_atomic_helper_commit_hw_done(). After this step the driver is not allowed
2054  * to read or change any permanent software or hardware modeset state. The only
2055  * exception is state protected by other means than &drm_modeset_lock locks.
2056  * Only the free standing @state with pointers to the old state structures can
2057  * be inspected, e.g. to clean up old buffers using
2058  * drm_atomic_helper_cleanup_planes().
2059  *
2060  * At the very end, before cleaning up @state drivers must call
2061  * drm_atomic_helper_commit_cleanup_done().
2062  *
2063  * This is all implemented by in drm_atomic_helper_commit(), giving drivers a
2064  * complete and easy-to-use default implementation of the atomic_commit() hook.
2065  *
2066  * The tracking of asynchronously executed and still pending commits is done
2067  * using the core structure &drm_crtc_commit.
2068  *
2069  * By default there's no need to clean up resources allocated by this function
2070  * explicitly: drm_atomic_state_default_clear() will take care of that
2071  * automatically.
2072  *
2073  * Returns:
2074  *
2075  * 0 on success. -EBUSY when userspace schedules nonblocking commits too fast,
2076  * -ENOMEM on allocation failures and -EINTR when a signal is pending.
2077  */
2078 int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
2079 				   bool nonblock)
2080 {
2081 	struct drm_crtc *crtc;
2082 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2083 	struct drm_connector *conn;
2084 	struct drm_connector_state *old_conn_state, *new_conn_state;
2085 	struct drm_plane *plane;
2086 	struct drm_plane_state *old_plane_state, *new_plane_state;
2087 	struct drm_crtc_commit *commit;
2088 	const struct drm_mode_config_helper_funcs *funcs;
2089 	int i, ret;
2090 
2091 	funcs = state->dev->mode_config.helper_private;
2092 
2093 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2094 		commit = kzalloc(sizeof(*commit), GFP_KERNEL);
2095 		if (!commit)
2096 			return -ENOMEM;
2097 
2098 		init_commit(commit, crtc);
2099 
2100 		new_crtc_state->commit = commit;
2101 
2102 		ret = stall_checks(crtc, nonblock);
2103 		if (ret)
2104 			return ret;
2105 
2106 		/* Drivers only send out events when at least either current or
2107 		 * new CRTC state is active. Complete right away if everything
2108 		 * stays off. */
2109 		if (!old_crtc_state->active && !new_crtc_state->active) {
2110 			complete_all(&commit->flip_done);
2111 			continue;
2112 		}
2113 
2114 		/* Legacy cursor updates are fully unsynced. */
2115 		if (state->legacy_cursor_update) {
2116 			complete_all(&commit->flip_done);
2117 			continue;
2118 		}
2119 
2120 		if (!new_crtc_state->event) {
2121 			commit->event = kzalloc(sizeof(*commit->event),
2122 						GFP_KERNEL);
2123 			if (!commit->event)
2124 				return -ENOMEM;
2125 
2126 			new_crtc_state->event = commit->event;
2127 		}
2128 
2129 		new_crtc_state->event->base.completion = &commit->flip_done;
2130 		new_crtc_state->event->base.completion_release = release_crtc_commit;
2131 		drm_crtc_commit_get(commit);
2132 
2133 		commit->abort_completion = true;
2134 
2135 		state->crtcs[i].commit = commit;
2136 		drm_crtc_commit_get(commit);
2137 	}
2138 
2139 	for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) {
2140 		/* Userspace is not allowed to get ahead of the previous
2141 		 * commit with nonblocking ones. */
2142 		if (nonblock && old_conn_state->commit &&
2143 		    !try_wait_for_completion(&old_conn_state->commit->flip_done)) {
2144 			DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] busy with a previous commit\n",
2145 				conn->base.id, conn->name);
2146 
2147 			return -EBUSY;
2148 		}
2149 
2150 		/* Always track connectors explicitly for e.g. link retraining. */
2151 		commit = crtc_or_fake_commit(state, new_conn_state->crtc ?: old_conn_state->crtc);
2152 		if (!commit)
2153 			return -ENOMEM;
2154 
2155 		new_conn_state->commit = drm_crtc_commit_get(commit);
2156 	}
2157 
2158 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
2159 		/* Userspace is not allowed to get ahead of the previous
2160 		 * commit with nonblocking ones. */
2161 		if (nonblock && old_plane_state->commit &&
2162 		    !try_wait_for_completion(&old_plane_state->commit->flip_done)) {
2163 			DRM_DEBUG_ATOMIC("[PLANE:%d:%s] busy with a previous commit\n",
2164 				plane->base.id, plane->name);
2165 
2166 			return -EBUSY;
2167 		}
2168 
2169 		/* Always track planes explicitly for async pageflip support. */
2170 		commit = crtc_or_fake_commit(state, new_plane_state->crtc ?: old_plane_state->crtc);
2171 		if (!commit)
2172 			return -ENOMEM;
2173 
2174 		new_plane_state->commit = drm_crtc_commit_get(commit);
2175 	}
2176 
2177 	if (funcs && funcs->atomic_commit_setup)
2178 		return funcs->atomic_commit_setup(state);
2179 
2180 	return 0;
2181 }
2182 EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
2183 
2184 /**
2185  * drm_atomic_helper_wait_for_dependencies - wait for required preceeding commits
2186  * @old_state: atomic state object with old state structures
2187  *
2188  * This function waits for all preceeding commits that touch the same CRTC as
2189  * @old_state to both be committed to the hardware (as signalled by
2190  * drm_atomic_helper_commit_hw_done()) and executed by the hardware (as signalled
2191  * by calling drm_crtc_send_vblank_event() on the &drm_crtc_state.event).
2192  *
2193  * This is part of the atomic helper support for nonblocking commits, see
2194  * drm_atomic_helper_setup_commit() for an overview.
2195  */
2196 void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
2197 {
2198 	struct drm_crtc *crtc;
2199 	struct drm_crtc_state *old_crtc_state;
2200 	struct drm_plane *plane;
2201 	struct drm_plane_state *old_plane_state;
2202 	struct drm_connector *conn;
2203 	struct drm_connector_state *old_conn_state;
2204 	int i;
2205 	long ret;
2206 
2207 	for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
2208 		ret = drm_crtc_commit_wait(old_crtc_state->commit);
2209 		if (ret)
2210 			DRM_ERROR("[CRTC:%d:%s] commit wait timed out\n",
2211 				  crtc->base.id, crtc->name);
2212 	}
2213 
2214 	for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
2215 		ret = drm_crtc_commit_wait(old_conn_state->commit);
2216 		if (ret)
2217 			DRM_ERROR("[CONNECTOR:%d:%s] commit wait timed out\n",
2218 				  conn->base.id, conn->name);
2219 	}
2220 
2221 	for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
2222 		ret = drm_crtc_commit_wait(old_plane_state->commit);
2223 		if (ret)
2224 			DRM_ERROR("[PLANE:%d:%s] commit wait timed out\n",
2225 				  plane->base.id, plane->name);
2226 	}
2227 }
2228 EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
2229 
2230 /**
2231  * drm_atomic_helper_fake_vblank - fake VBLANK events if needed
2232  * @old_state: atomic state object with old state structures
2233  *
2234  * This function walks all CRTCs and fakes VBLANK events on those with
2235  * &drm_crtc_state.no_vblank set to true and &drm_crtc_state.event != NULL.
2236  * The primary use of this function is writeback connectors working in oneshot
2237  * mode and faking VBLANK events. In this case they only fake the VBLANK event
2238  * when a job is queued, and any change to the pipeline that does not touch the
2239  * connector is leading to timeouts when calling
2240  * drm_atomic_helper_wait_for_vblanks() or
2241  * drm_atomic_helper_wait_for_flip_done(). In addition to writeback
2242  * connectors, this function can also fake VBLANK events for CRTCs without
2243  * VBLANK interrupt.
2244  *
2245  * This is part of the atomic helper support for nonblocking commits, see
2246  * drm_atomic_helper_setup_commit() for an overview.
2247  */
2248 void drm_atomic_helper_fake_vblank(struct drm_atomic_state *old_state)
2249 {
2250 	struct drm_crtc_state *new_crtc_state;
2251 	struct drm_crtc *crtc;
2252 	int i;
2253 
2254 	for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
2255 		unsigned long flags;
2256 
2257 		if (!new_crtc_state->no_vblank)
2258 			continue;
2259 
2260 		spin_lock_irqsave(&old_state->dev->event_lock, flags);
2261 		if (new_crtc_state->event) {
2262 			drm_crtc_send_vblank_event(crtc,
2263 						   new_crtc_state->event);
2264 			new_crtc_state->event = NULL;
2265 		}
2266 		spin_unlock_irqrestore(&old_state->dev->event_lock, flags);
2267 	}
2268 }
2269 EXPORT_SYMBOL(drm_atomic_helper_fake_vblank);
2270 
2271 /**
2272  * drm_atomic_helper_commit_hw_done - setup possible nonblocking commit
2273  * @old_state: atomic state object with old state structures
2274  *
2275  * This function is used to signal completion of the hardware commit step. After
2276  * this step the driver is not allowed to read or change any permanent software
2277  * or hardware modeset state. The only exception is state protected by other
2278  * means than &drm_modeset_lock locks.
2279  *
2280  * Drivers should try to postpone any expensive or delayed cleanup work after
2281  * this function is called.
2282  *
2283  * This is part of the atomic helper support for nonblocking commits, see
2284  * drm_atomic_helper_setup_commit() for an overview.
2285  */
2286 void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *old_state)
2287 {
2288 	struct drm_crtc *crtc;
2289 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2290 	struct drm_crtc_commit *commit;
2291 	int i;
2292 
2293 	for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
2294 		commit = new_crtc_state->commit;
2295 		if (!commit)
2296 			continue;
2297 
2298 		/*
2299 		 * copy new_crtc_state->commit to old_crtc_state->commit,
2300 		 * it's unsafe to touch new_crtc_state after hw_done,
2301 		 * but we still need to do so in cleanup_done().
2302 		 */
2303 		if (old_crtc_state->commit)
2304 			drm_crtc_commit_put(old_crtc_state->commit);
2305 
2306 		old_crtc_state->commit = drm_crtc_commit_get(commit);
2307 
2308 		/* backend must have consumed any event by now */
2309 		WARN_ON(new_crtc_state->event);
2310 		complete_all(&commit->hw_done);
2311 	}
2312 
2313 	if (old_state->fake_commit) {
2314 		complete_all(&old_state->fake_commit->hw_done);
2315 		complete_all(&old_state->fake_commit->flip_done);
2316 	}
2317 }
2318 EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
2319 
2320 /**
2321  * drm_atomic_helper_commit_cleanup_done - signal completion of commit
2322  * @old_state: atomic state object with old state structures
2323  *
2324  * This signals completion of the atomic update @old_state, including any
2325  * cleanup work. If used, it must be called right before calling
2326  * drm_atomic_state_put().
2327  *
2328  * This is part of the atomic helper support for nonblocking commits, see
2329  * drm_atomic_helper_setup_commit() for an overview.
2330  */
2331 void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state)
2332 {
2333 	struct drm_crtc *crtc;
2334 	struct drm_crtc_state *old_crtc_state;
2335 	struct drm_crtc_commit *commit;
2336 	int i;
2337 
2338 	for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
2339 		commit = old_crtc_state->commit;
2340 		if (WARN_ON(!commit))
2341 			continue;
2342 
2343 		complete_all(&commit->cleanup_done);
2344 		WARN_ON(!try_wait_for_completion(&commit->hw_done));
2345 
2346 		spin_lock(&crtc->commit_lock);
2347 		list_del(&commit->commit_entry);
2348 		spin_unlock(&crtc->commit_lock);
2349 	}
2350 
2351 	if (old_state->fake_commit) {
2352 		complete_all(&old_state->fake_commit->cleanup_done);
2353 		WARN_ON(!try_wait_for_completion(&old_state->fake_commit->hw_done));
2354 	}
2355 }
2356 EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
2357 
2358 /**
2359  * drm_atomic_helper_prepare_planes - prepare plane resources before commit
2360  * @dev: DRM device
2361  * @state: atomic state object with new state structures
2362  *
2363  * This function prepares plane state, specifically framebuffers, for the new
2364  * configuration, by calling &drm_plane_helper_funcs.prepare_fb. If any failure
2365  * is encountered this function will call &drm_plane_helper_funcs.cleanup_fb on
2366  * any already successfully prepared framebuffer.
2367  *
2368  * Returns:
2369  * 0 on success, negative error code on failure.
2370  */
2371 int drm_atomic_helper_prepare_planes(struct drm_device *dev,
2372 				     struct drm_atomic_state *state)
2373 {
2374 	struct drm_connector *connector;
2375 	struct drm_connector_state *new_conn_state;
2376 	struct drm_plane *plane;
2377 	struct drm_plane_state *new_plane_state;
2378 	int ret, i, j;
2379 
2380 	for_each_new_connector_in_state(state, connector, new_conn_state, i) {
2381 		if (!new_conn_state->writeback_job)
2382 			continue;
2383 
2384 		ret = drm_writeback_prepare_job(new_conn_state->writeback_job);
2385 		if (ret < 0)
2386 			return ret;
2387 	}
2388 
2389 	for_each_new_plane_in_state(state, plane, new_plane_state, i) {
2390 		const struct drm_plane_helper_funcs *funcs;
2391 
2392 		funcs = plane->helper_private;
2393 
2394 		if (funcs->prepare_fb) {
2395 			ret = funcs->prepare_fb(plane, new_plane_state);
2396 			if (ret)
2397 				goto fail;
2398 		}
2399 	}
2400 
2401 	return 0;
2402 
2403 fail:
2404 	for_each_new_plane_in_state(state, plane, new_plane_state, j) {
2405 		const struct drm_plane_helper_funcs *funcs;
2406 
2407 		if (j >= i)
2408 			continue;
2409 
2410 		funcs = plane->helper_private;
2411 
2412 		if (funcs->cleanup_fb)
2413 			funcs->cleanup_fb(plane, new_plane_state);
2414 	}
2415 
2416 	return ret;
2417 }
2418 EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
2419 
2420 static bool plane_crtc_active(const struct drm_plane_state *state)
2421 {
2422 	return state->crtc && state->crtc->state->active;
2423 }
2424 
2425 /**
2426  * drm_atomic_helper_commit_planes - commit plane state
2427  * @dev: DRM device
2428  * @old_state: atomic state object with old state structures
2429  * @flags: flags for committing plane state
2430  *
2431  * This function commits the new plane state using the plane and atomic helper
2432  * functions for planes and CRTCs. It assumes that the atomic state has already
2433  * been pushed into the relevant object state pointers, since this step can no
2434  * longer fail.
2435  *
2436  * It still requires the global state object @old_state to know which planes and
2437  * crtcs need to be updated though.
2438  *
2439  * Note that this function does all plane updates across all CRTCs in one step.
2440  * If the hardware can't support this approach look at
2441  * drm_atomic_helper_commit_planes_on_crtc() instead.
2442  *
2443  * Plane parameters can be updated by applications while the associated CRTC is
2444  * disabled. The DRM/KMS core will store the parameters in the plane state,
2445  * which will be available to the driver when the CRTC is turned on. As a result
2446  * most drivers don't need to be immediately notified of plane updates for a
2447  * disabled CRTC.
2448  *
2449  * Unless otherwise needed, drivers are advised to set the ACTIVE_ONLY flag in
2450  * @flags in order not to receive plane update notifications related to a
2451  * disabled CRTC. This avoids the need to manually ignore plane updates in
2452  * driver code when the driver and/or hardware can't or just don't need to deal
2453  * with updates on disabled CRTCs, for example when supporting runtime PM.
2454  *
2455  * Drivers may set the NO_DISABLE_AFTER_MODESET flag in @flags if the relevant
2456  * display controllers require to disable a CRTC's planes when the CRTC is
2457  * disabled. This function would skip the &drm_plane_helper_funcs.atomic_disable
2458  * call for a plane if the CRTC of the old plane state needs a modesetting
2459  * operation. Of course, the drivers need to disable the planes in their CRTC
2460  * disable callbacks since no one else would do that.
2461  *
2462  * The drm_atomic_helper_commit() default implementation doesn't set the
2463  * ACTIVE_ONLY flag to most closely match the behaviour of the legacy helpers.
2464  * This should not be copied blindly by drivers.
2465  */
2466 void drm_atomic_helper_commit_planes(struct drm_device *dev,
2467 				     struct drm_atomic_state *old_state,
2468 				     uint32_t flags)
2469 {
2470 	struct drm_crtc *crtc;
2471 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2472 	struct drm_plane *plane;
2473 	struct drm_plane_state *old_plane_state, *new_plane_state;
2474 	int i;
2475 	bool active_only = flags & DRM_PLANE_COMMIT_ACTIVE_ONLY;
2476 	bool no_disable = flags & DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET;
2477 
2478 	for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
2479 		const struct drm_crtc_helper_funcs *funcs;
2480 
2481 		funcs = crtc->helper_private;
2482 
2483 		if (!funcs || !funcs->atomic_begin)
2484 			continue;
2485 
2486 		if (active_only && !new_crtc_state->active)
2487 			continue;
2488 
2489 		funcs->atomic_begin(crtc, old_state);
2490 	}
2491 
2492 	for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
2493 		const struct drm_plane_helper_funcs *funcs;
2494 		bool disabling;
2495 
2496 		funcs = plane->helper_private;
2497 
2498 		if (!funcs)
2499 			continue;
2500 
2501 		disabling = drm_atomic_plane_disabling(old_plane_state,
2502 						       new_plane_state);
2503 
2504 		if (active_only) {
2505 			/*
2506 			 * Skip planes related to inactive CRTCs. If the plane
2507 			 * is enabled use the state of the current CRTC. If the
2508 			 * plane is being disabled use the state of the old
2509 			 * CRTC to avoid skipping planes being disabled on an
2510 			 * active CRTC.
2511 			 */
2512 			if (!disabling && !plane_crtc_active(new_plane_state))
2513 				continue;
2514 			if (disabling && !plane_crtc_active(old_plane_state))
2515 				continue;
2516 		}
2517 
2518 		/*
2519 		 * Special-case disabling the plane if drivers support it.
2520 		 */
2521 		if (disabling && funcs->atomic_disable) {
2522 			struct drm_crtc_state *crtc_state;
2523 
2524 			crtc_state = old_plane_state->crtc->state;
2525 
2526 			if (drm_atomic_crtc_needs_modeset(crtc_state) &&
2527 			    no_disable)
2528 				continue;
2529 
2530 			funcs->atomic_disable(plane, old_state);
2531 		} else if (new_plane_state->crtc || disabling) {
2532 			funcs->atomic_update(plane, old_state);
2533 		}
2534 	}
2535 
2536 	for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
2537 		const struct drm_crtc_helper_funcs *funcs;
2538 
2539 		funcs = crtc->helper_private;
2540 
2541 		if (!funcs || !funcs->atomic_flush)
2542 			continue;
2543 
2544 		if (active_only && !new_crtc_state->active)
2545 			continue;
2546 
2547 		funcs->atomic_flush(crtc, old_state);
2548 	}
2549 }
2550 EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
2551 
2552 /**
2553  * drm_atomic_helper_commit_planes_on_crtc - commit plane state for a CRTC
2554  * @old_crtc_state: atomic state object with the old CRTC state
2555  *
2556  * This function commits the new plane state using the plane and atomic helper
2557  * functions for planes on the specific CRTC. It assumes that the atomic state
2558  * has already been pushed into the relevant object state pointers, since this
2559  * step can no longer fail.
2560  *
2561  * This function is useful when plane updates should be done CRTC-by-CRTC
2562  * instead of one global step like drm_atomic_helper_commit_planes() does.
2563  *
2564  * This function can only be savely used when planes are not allowed to move
2565  * between different CRTCs because this function doesn't handle inter-CRTC
2566  * depencies. Callers need to ensure that either no such depencies exist,
2567  * resolve them through ordering of commit calls or through some other means.
2568  */
2569 void
2570 drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
2571 {
2572 	const struct drm_crtc_helper_funcs *crtc_funcs;
2573 	struct drm_crtc *crtc = old_crtc_state->crtc;
2574 	struct drm_atomic_state *old_state = old_crtc_state->state;
2575 	struct drm_crtc_state *new_crtc_state =
2576 		drm_atomic_get_new_crtc_state(old_state, crtc);
2577 	struct drm_plane *plane;
2578 	unsigned plane_mask;
2579 
2580 	plane_mask = old_crtc_state->plane_mask;
2581 	plane_mask |= new_crtc_state->plane_mask;
2582 
2583 	crtc_funcs = crtc->helper_private;
2584 	if (crtc_funcs && crtc_funcs->atomic_begin)
2585 		crtc_funcs->atomic_begin(crtc, old_state);
2586 
2587 	drm_for_each_plane_mask(plane, crtc->dev, plane_mask) {
2588 		struct drm_plane_state *old_plane_state =
2589 			drm_atomic_get_old_plane_state(old_state, plane);
2590 		struct drm_plane_state *new_plane_state =
2591 			drm_atomic_get_new_plane_state(old_state, plane);
2592 		const struct drm_plane_helper_funcs *plane_funcs;
2593 
2594 		plane_funcs = plane->helper_private;
2595 
2596 		if (!old_plane_state || !plane_funcs)
2597 			continue;
2598 
2599 		WARN_ON(new_plane_state->crtc &&
2600 			new_plane_state->crtc != crtc);
2601 
2602 		if (drm_atomic_plane_disabling(old_plane_state, new_plane_state) &&
2603 		    plane_funcs->atomic_disable)
2604 			plane_funcs->atomic_disable(plane, old_state);
2605 		else if (new_plane_state->crtc ||
2606 			 drm_atomic_plane_disabling(old_plane_state, new_plane_state))
2607 			plane_funcs->atomic_update(plane, old_state);
2608 	}
2609 
2610 	if (crtc_funcs && crtc_funcs->atomic_flush)
2611 		crtc_funcs->atomic_flush(crtc, old_state);
2612 }
2613 EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc);
2614 
2615 /**
2616  * drm_atomic_helper_disable_planes_on_crtc - helper to disable CRTC's planes
2617  * @old_crtc_state: atomic state object with the old CRTC state
2618  * @atomic: if set, synchronize with CRTC's atomic_begin/flush hooks
2619  *
2620  * Disables all planes associated with the given CRTC. This can be
2621  * used for instance in the CRTC helper atomic_disable callback to disable
2622  * all planes.
2623  *
2624  * If the atomic-parameter is set the function calls the CRTC's
2625  * atomic_begin hook before and atomic_flush hook after disabling the
2626  * planes.
2627  *
2628  * It is a bug to call this function without having implemented the
2629  * &drm_plane_helper_funcs.atomic_disable plane hook.
2630  */
2631 void
2632 drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state,
2633 					 bool atomic)
2634 {
2635 	struct drm_crtc *crtc = old_crtc_state->crtc;
2636 	const struct drm_crtc_helper_funcs *crtc_funcs =
2637 		crtc->helper_private;
2638 	struct drm_plane *plane;
2639 
2640 	if (atomic && crtc_funcs && crtc_funcs->atomic_begin)
2641 		crtc_funcs->atomic_begin(crtc, NULL);
2642 
2643 	drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) {
2644 		const struct drm_plane_helper_funcs *plane_funcs =
2645 			plane->helper_private;
2646 
2647 		if (!plane_funcs)
2648 			continue;
2649 
2650 		WARN_ON(!plane_funcs->atomic_disable);
2651 		if (plane_funcs->atomic_disable)
2652 			plane_funcs->atomic_disable(plane, NULL);
2653 	}
2654 
2655 	if (atomic && crtc_funcs && crtc_funcs->atomic_flush)
2656 		crtc_funcs->atomic_flush(crtc, NULL);
2657 }
2658 EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc);
2659 
2660 /**
2661  * drm_atomic_helper_cleanup_planes - cleanup plane resources after commit
2662  * @dev: DRM device
2663  * @old_state: atomic state object with old state structures
2664  *
2665  * This function cleans up plane state, specifically framebuffers, from the old
2666  * configuration. Hence the old configuration must be perserved in @old_state to
2667  * be able to call this function.
2668  *
2669  * This function must also be called on the new state when the atomic update
2670  * fails at any point after calling drm_atomic_helper_prepare_planes().
2671  */
2672 void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
2673 				      struct drm_atomic_state *old_state)
2674 {
2675 	struct drm_plane *plane;
2676 	struct drm_plane_state *old_plane_state, *new_plane_state;
2677 	int i;
2678 
2679 	for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
2680 		const struct drm_plane_helper_funcs *funcs;
2681 		struct drm_plane_state *plane_state;
2682 
2683 		/*
2684 		 * This might be called before swapping when commit is aborted,
2685 		 * in which case we have to cleanup the new state.
2686 		 */
2687 		if (old_plane_state == plane->state)
2688 			plane_state = new_plane_state;
2689 		else
2690 			plane_state = old_plane_state;
2691 
2692 		funcs = plane->helper_private;
2693 
2694 		if (funcs->cleanup_fb)
2695 			funcs->cleanup_fb(plane, plane_state);
2696 	}
2697 }
2698 EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
2699 
2700 /**
2701  * drm_atomic_helper_swap_state - store atomic state into current sw state
2702  * @state: atomic state
2703  * @stall: stall for preceeding commits
2704  *
2705  * This function stores the atomic state into the current state pointers in all
2706  * driver objects. It should be called after all failing steps have been done
2707  * and succeeded, but before the actual hardware state is committed.
2708  *
2709  * For cleanup and error recovery the current state for all changed objects will
2710  * be swapped into @state.
2711  *
2712  * With that sequence it fits perfectly into the plane prepare/cleanup sequence:
2713  *
2714  * 1. Call drm_atomic_helper_prepare_planes() with the staged atomic state.
2715  *
2716  * 2. Do any other steps that might fail.
2717  *
2718  * 3. Put the staged state into the current state pointers with this function.
2719  *
2720  * 4. Actually commit the hardware state.
2721  *
2722  * 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3
2723  * contains the old state. Also do any other cleanup required with that state.
2724  *
2725  * @stall must be set when nonblocking commits for this driver directly access
2726  * the &drm_plane.state, &drm_crtc.state or &drm_connector.state pointer. With
2727  * the current atomic helpers this is almost always the case, since the helpers
2728  * don't pass the right state structures to the callbacks.
2729  *
2730  * Returns:
2731  *
2732  * Returns 0 on success. Can return -ERESTARTSYS when @stall is true and the
2733  * waiting for the previous commits has been interrupted.
2734  */
2735 int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
2736 				  bool stall)
2737 {
2738 	int i, ret;
2739 	struct drm_connector *connector;
2740 	struct drm_connector_state *old_conn_state, *new_conn_state;
2741 	struct drm_crtc *crtc;
2742 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2743 	struct drm_plane *plane;
2744 	struct drm_plane_state *old_plane_state, *new_plane_state;
2745 	struct drm_crtc_commit *commit;
2746 	struct drm_private_obj *obj;
2747 	struct drm_private_state *old_obj_state, *new_obj_state;
2748 
2749 	if (stall) {
2750 		/*
2751 		 * We have to stall for hw_done here before
2752 		 * drm_atomic_helper_wait_for_dependencies() because flip
2753 		 * depth > 1 is not yet supported by all drivers. As long as
2754 		 * obj->state is directly dereferenced anywhere in the drivers
2755 		 * atomic_commit_tail function, then it's unsafe to swap state
2756 		 * before drm_atomic_helper_commit_hw_done() is called.
2757 		 */
2758 
2759 		for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
2760 			commit = old_crtc_state->commit;
2761 
2762 			if (!commit)
2763 				continue;
2764 
2765 			ret = wait_for_completion_interruptible(&commit->hw_done);
2766 			if (ret)
2767 				return ret;
2768 		}
2769 
2770 		for_each_old_connector_in_state(state, connector, old_conn_state, i) {
2771 			commit = old_conn_state->commit;
2772 
2773 			if (!commit)
2774 				continue;
2775 
2776 			ret = wait_for_completion_interruptible(&commit->hw_done);
2777 			if (ret)
2778 				return ret;
2779 		}
2780 
2781 		for_each_old_plane_in_state(state, plane, old_plane_state, i) {
2782 			commit = old_plane_state->commit;
2783 
2784 			if (!commit)
2785 				continue;
2786 
2787 			ret = wait_for_completion_interruptible(&commit->hw_done);
2788 			if (ret)
2789 				return ret;
2790 		}
2791 	}
2792 
2793 	for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
2794 		WARN_ON(connector->state != old_conn_state);
2795 
2796 		old_conn_state->state = state;
2797 		new_conn_state->state = NULL;
2798 
2799 		state->connectors[i].state = old_conn_state;
2800 		connector->state = new_conn_state;
2801 	}
2802 
2803 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2804 		WARN_ON(crtc->state != old_crtc_state);
2805 
2806 		old_crtc_state->state = state;
2807 		new_crtc_state->state = NULL;
2808 
2809 		state->crtcs[i].state = old_crtc_state;
2810 		crtc->state = new_crtc_state;
2811 
2812 		if (new_crtc_state->commit) {
2813 			spin_lock(&crtc->commit_lock);
2814 			list_add(&new_crtc_state->commit->commit_entry,
2815 				 &crtc->commit_list);
2816 			spin_unlock(&crtc->commit_lock);
2817 
2818 			new_crtc_state->commit->event = NULL;
2819 		}
2820 	}
2821 
2822 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
2823 		WARN_ON(plane->state != old_plane_state);
2824 
2825 		old_plane_state->state = state;
2826 		new_plane_state->state = NULL;
2827 
2828 		state->planes[i].state = old_plane_state;
2829 		plane->state = new_plane_state;
2830 	}
2831 
2832 	for_each_oldnew_private_obj_in_state(state, obj, old_obj_state, new_obj_state, i) {
2833 		WARN_ON(obj->state != old_obj_state);
2834 
2835 		old_obj_state->state = state;
2836 		new_obj_state->state = NULL;
2837 
2838 		state->private_objs[i].state = old_obj_state;
2839 		obj->state = new_obj_state;
2840 	}
2841 
2842 	return 0;
2843 }
2844 EXPORT_SYMBOL(drm_atomic_helper_swap_state);
2845 
2846 /**
2847  * drm_atomic_helper_update_plane - Helper for primary plane update using atomic
2848  * @plane: plane object to update
2849  * @crtc: owning CRTC of owning plane
2850  * @fb: framebuffer to flip onto plane
2851  * @crtc_x: x offset of primary plane on @crtc
2852  * @crtc_y: y offset of primary plane on @crtc
2853  * @crtc_w: width of primary plane rectangle on @crtc
2854  * @crtc_h: height of primary plane rectangle on @crtc
2855  * @src_x: x offset of @fb for panning
2856  * @src_y: y offset of @fb for panning
2857  * @src_w: width of source rectangle in @fb
2858  * @src_h: height of source rectangle in @fb
2859  * @ctx: lock acquire context
2860  *
2861  * Provides a default plane update handler using the atomic driver interface.
2862  *
2863  * RETURNS:
2864  * Zero on success, error code on failure
2865  */
2866 int drm_atomic_helper_update_plane(struct drm_plane *plane,
2867 				   struct drm_crtc *crtc,
2868 				   struct drm_framebuffer *fb,
2869 				   int crtc_x, int crtc_y,
2870 				   unsigned int crtc_w, unsigned int crtc_h,
2871 				   uint32_t src_x, uint32_t src_y,
2872 				   uint32_t src_w, uint32_t src_h,
2873 				   struct drm_modeset_acquire_ctx *ctx)
2874 {
2875 	struct drm_atomic_state *state;
2876 	struct drm_plane_state *plane_state;
2877 	int ret = 0;
2878 
2879 	state = drm_atomic_state_alloc(plane->dev);
2880 	if (!state)
2881 		return -ENOMEM;
2882 
2883 	state->acquire_ctx = ctx;
2884 	plane_state = drm_atomic_get_plane_state(state, plane);
2885 	if (IS_ERR(plane_state)) {
2886 		ret = PTR_ERR(plane_state);
2887 		goto fail;
2888 	}
2889 
2890 	ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
2891 	if (ret != 0)
2892 		goto fail;
2893 	drm_atomic_set_fb_for_plane(plane_state, fb);
2894 	plane_state->crtc_x = crtc_x;
2895 	plane_state->crtc_y = crtc_y;
2896 	plane_state->crtc_w = crtc_w;
2897 	plane_state->crtc_h = crtc_h;
2898 	plane_state->src_x = src_x;
2899 	plane_state->src_y = src_y;
2900 	plane_state->src_w = src_w;
2901 	plane_state->src_h = src_h;
2902 
2903 	if (plane == crtc->cursor)
2904 		state->legacy_cursor_update = true;
2905 
2906 	ret = drm_atomic_commit(state);
2907 fail:
2908 	drm_atomic_state_put(state);
2909 	return ret;
2910 }
2911 EXPORT_SYMBOL(drm_atomic_helper_update_plane);
2912 
2913 /**
2914  * drm_atomic_helper_disable_plane - Helper for primary plane disable using * atomic
2915  * @plane: plane to disable
2916  * @ctx: lock acquire context
2917  *
2918  * Provides a default plane disable handler using the atomic driver interface.
2919  *
2920  * RETURNS:
2921  * Zero on success, error code on failure
2922  */
2923 int drm_atomic_helper_disable_plane(struct drm_plane *plane,
2924 				    struct drm_modeset_acquire_ctx *ctx)
2925 {
2926 	struct drm_atomic_state *state;
2927 	struct drm_plane_state *plane_state;
2928 	int ret = 0;
2929 
2930 	state = drm_atomic_state_alloc(plane->dev);
2931 	if (!state)
2932 		return -ENOMEM;
2933 
2934 	state->acquire_ctx = ctx;
2935 	plane_state = drm_atomic_get_plane_state(state, plane);
2936 	if (IS_ERR(plane_state)) {
2937 		ret = PTR_ERR(plane_state);
2938 		goto fail;
2939 	}
2940 
2941 	if (plane_state->crtc && plane_state->crtc->cursor == plane)
2942 		plane_state->state->legacy_cursor_update = true;
2943 
2944 	ret = __drm_atomic_helper_disable_plane(plane, plane_state);
2945 	if (ret != 0)
2946 		goto fail;
2947 
2948 	ret = drm_atomic_commit(state);
2949 fail:
2950 	drm_atomic_state_put(state);
2951 	return ret;
2952 }
2953 EXPORT_SYMBOL(drm_atomic_helper_disable_plane);
2954 
2955 /**
2956  * drm_atomic_helper_set_config - set a new config from userspace
2957  * @set: mode set configuration
2958  * @ctx: lock acquisition context
2959  *
2960  * Provides a default CRTC set_config handler using the atomic driver interface.
2961  *
2962  * NOTE: For backwards compatibility with old userspace this automatically
2963  * resets the "link-status" property to GOOD, to force any link
2964  * re-training. The SETCRTC ioctl does not define whether an update does
2965  * need a full modeset or just a plane update, hence we're allowed to do
2966  * that. See also drm_connector_set_link_status_property().
2967  *
2968  * Returns:
2969  * Returns 0 on success, negative errno numbers on failure.
2970  */
2971 int drm_atomic_helper_set_config(struct drm_mode_set *set,
2972 				 struct drm_modeset_acquire_ctx *ctx)
2973 {
2974 	struct drm_atomic_state *state;
2975 	struct drm_crtc *crtc = set->crtc;
2976 	int ret = 0;
2977 
2978 	state = drm_atomic_state_alloc(crtc->dev);
2979 	if (!state)
2980 		return -ENOMEM;
2981 
2982 	state->acquire_ctx = ctx;
2983 	ret = __drm_atomic_helper_set_config(set, state);
2984 	if (ret != 0)
2985 		goto fail;
2986 
2987 	ret = handle_conflicting_encoders(state, true);
2988 	if (ret)
2989 		goto fail;
2990 
2991 	ret = drm_atomic_commit(state);
2992 
2993 fail:
2994 	drm_atomic_state_put(state);
2995 	return ret;
2996 }
2997 EXPORT_SYMBOL(drm_atomic_helper_set_config);
2998 
2999 /**
3000  * drm_atomic_helper_disable_all - disable all currently active outputs
3001  * @dev: DRM device
3002  * @ctx: lock acquisition context
3003  *
3004  * Loops through all connectors, finding those that aren't turned off and then
3005  * turns them off by setting their DPMS mode to OFF and deactivating the CRTC
3006  * that they are connected to.
3007  *
3008  * This is used for example in suspend/resume to disable all currently active
3009  * functions when suspending. If you just want to shut down everything at e.g.
3010  * driver unload, look at drm_atomic_helper_shutdown().
3011  *
3012  * Note that if callers haven't already acquired all modeset locks this might
3013  * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
3014  *
3015  * Returns:
3016  * 0 on success or a negative error code on failure.
3017  *
3018  * See also:
3019  * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and
3020  * drm_atomic_helper_shutdown().
3021  */
3022 int drm_atomic_helper_disable_all(struct drm_device *dev,
3023 				  struct drm_modeset_acquire_ctx *ctx)
3024 {
3025 	struct drm_atomic_state *state;
3026 	struct drm_connector_state *conn_state;
3027 	struct drm_connector *conn;
3028 	struct drm_plane_state *plane_state;
3029 	struct drm_plane *plane;
3030 	struct drm_crtc_state *crtc_state;
3031 	struct drm_crtc *crtc;
3032 	int ret, i;
3033 
3034 	state = drm_atomic_state_alloc(dev);
3035 	if (!state)
3036 		return -ENOMEM;
3037 
3038 	state->acquire_ctx = ctx;
3039 
3040 	drm_for_each_crtc(crtc, dev) {
3041 		crtc_state = drm_atomic_get_crtc_state(state, crtc);
3042 		if (IS_ERR(crtc_state)) {
3043 			ret = PTR_ERR(crtc_state);
3044 			goto free;
3045 		}
3046 
3047 		crtc_state->active = false;
3048 
3049 		ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, NULL);
3050 		if (ret < 0)
3051 			goto free;
3052 
3053 		ret = drm_atomic_add_affected_planes(state, crtc);
3054 		if (ret < 0)
3055 			goto free;
3056 
3057 		ret = drm_atomic_add_affected_connectors(state, crtc);
3058 		if (ret < 0)
3059 			goto free;
3060 	}
3061 
3062 	for_each_new_connector_in_state(state, conn, conn_state, i) {
3063 		ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
3064 		if (ret < 0)
3065 			goto free;
3066 	}
3067 
3068 	for_each_new_plane_in_state(state, plane, plane_state, i) {
3069 		ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
3070 		if (ret < 0)
3071 			goto free;
3072 
3073 		drm_atomic_set_fb_for_plane(plane_state, NULL);
3074 	}
3075 
3076 	ret = drm_atomic_commit(state);
3077 free:
3078 	drm_atomic_state_put(state);
3079 	return ret;
3080 }
3081 EXPORT_SYMBOL(drm_atomic_helper_disable_all);
3082 
3083 /**
3084  * drm_atomic_helper_shutdown - shutdown all CRTC
3085  * @dev: DRM device
3086  *
3087  * This shuts down all CRTC, which is useful for driver unloading. Shutdown on
3088  * suspend should instead be handled with drm_atomic_helper_suspend(), since
3089  * that also takes a snapshot of the modeset state to be restored on resume.
3090  *
3091  * This is just a convenience wrapper around drm_atomic_helper_disable_all(),
3092  * and it is the atomic version of drm_crtc_force_disable_all().
3093  */
3094 void drm_atomic_helper_shutdown(struct drm_device *dev)
3095 {
3096 	struct drm_modeset_acquire_ctx ctx;
3097 	int ret;
3098 
3099 	DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
3100 
3101 	ret = drm_atomic_helper_disable_all(dev, &ctx);
3102 	if (ret)
3103 		DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret);
3104 
3105 	DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
3106 }
3107 EXPORT_SYMBOL(drm_atomic_helper_shutdown);
3108 
3109 /**
3110  * drm_atomic_helper_duplicate_state - duplicate an atomic state object
3111  * @dev: DRM device
3112  * @ctx: lock acquisition context
3113  *
3114  * Makes a copy of the current atomic state by looping over all objects and
3115  * duplicating their respective states. This is used for example by suspend/
3116  * resume support code to save the state prior to suspend such that it can
3117  * be restored upon resume.
3118  *
3119  * Note that this treats atomic state as persistent between save and restore.
3120  * Drivers must make sure that this is possible and won't result in confusion
3121  * or erroneous behaviour.
3122  *
3123  * Note that if callers haven't already acquired all modeset locks this might
3124  * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
3125  *
3126  * Returns:
3127  * A pointer to the copy of the atomic state object on success or an
3128  * ERR_PTR()-encoded error code on failure.
3129  *
3130  * See also:
3131  * drm_atomic_helper_suspend(), drm_atomic_helper_resume()
3132  */
3133 struct drm_atomic_state *
3134 drm_atomic_helper_duplicate_state(struct drm_device *dev,
3135 				  struct drm_modeset_acquire_ctx *ctx)
3136 {
3137 	struct drm_atomic_state *state;
3138 	struct drm_connector *conn;
3139 	struct drm_connector_list_iter conn_iter;
3140 	struct drm_plane *plane;
3141 	struct drm_crtc *crtc;
3142 	int err = 0;
3143 
3144 	state = drm_atomic_state_alloc(dev);
3145 	if (!state)
3146 		return ERR_PTR(-ENOMEM);
3147 
3148 	state->acquire_ctx = ctx;
3149 	state->duplicated = true;
3150 
3151 	drm_for_each_crtc(crtc, dev) {
3152 		struct drm_crtc_state *crtc_state;
3153 
3154 		crtc_state = drm_atomic_get_crtc_state(state, crtc);
3155 		if (IS_ERR(crtc_state)) {
3156 			err = PTR_ERR(crtc_state);
3157 			goto free;
3158 		}
3159 	}
3160 
3161 	drm_for_each_plane(plane, dev) {
3162 		struct drm_plane_state *plane_state;
3163 
3164 		plane_state = drm_atomic_get_plane_state(state, plane);
3165 		if (IS_ERR(plane_state)) {
3166 			err = PTR_ERR(plane_state);
3167 			goto free;
3168 		}
3169 	}
3170 
3171 	drm_connector_list_iter_begin(dev, &conn_iter);
3172 	drm_for_each_connector_iter(conn, &conn_iter) {
3173 		struct drm_connector_state *conn_state;
3174 
3175 		conn_state = drm_atomic_get_connector_state(state, conn);
3176 		if (IS_ERR(conn_state)) {
3177 			err = PTR_ERR(conn_state);
3178 			drm_connector_list_iter_end(&conn_iter);
3179 			goto free;
3180 		}
3181 	}
3182 	drm_connector_list_iter_end(&conn_iter);
3183 
3184 	/* clear the acquire context so that it isn't accidentally reused */
3185 	state->acquire_ctx = NULL;
3186 
3187 free:
3188 	if (err < 0) {
3189 		drm_atomic_state_put(state);
3190 		state = ERR_PTR(err);
3191 	}
3192 
3193 	return state;
3194 }
3195 EXPORT_SYMBOL(drm_atomic_helper_duplicate_state);
3196 
3197 /**
3198  * drm_atomic_helper_suspend - subsystem-level suspend helper
3199  * @dev: DRM device
3200  *
3201  * Duplicates the current atomic state, disables all active outputs and then
3202  * returns a pointer to the original atomic state to the caller. Drivers can
3203  * pass this pointer to the drm_atomic_helper_resume() helper upon resume to
3204  * restore the output configuration that was active at the time the system
3205  * entered suspend.
3206  *
3207  * Note that it is potentially unsafe to use this. The atomic state object
3208  * returned by this function is assumed to be persistent. Drivers must ensure
3209  * that this holds true. Before calling this function, drivers must make sure
3210  * to suspend fbdev emulation so that nothing can be using the device.
3211  *
3212  * Returns:
3213  * A pointer to a copy of the state before suspend on success or an ERR_PTR()-
3214  * encoded error code on failure. Drivers should store the returned atomic
3215  * state object and pass it to the drm_atomic_helper_resume() helper upon
3216  * resume.
3217  *
3218  * See also:
3219  * drm_atomic_helper_duplicate_state(), drm_atomic_helper_disable_all(),
3220  * drm_atomic_helper_resume(), drm_atomic_helper_commit_duplicated_state()
3221  */
3222 struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev)
3223 {
3224 	struct drm_modeset_acquire_ctx ctx;
3225 	struct drm_atomic_state *state;
3226 	int err;
3227 
3228 	/* This can never be returned, but it makes the compiler happy */
3229 	state = ERR_PTR(-EINVAL);
3230 
3231 	DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
3232 
3233 	state = drm_atomic_helper_duplicate_state(dev, &ctx);
3234 	if (IS_ERR(state))
3235 		goto unlock;
3236 
3237 	err = drm_atomic_helper_disable_all(dev, &ctx);
3238 	if (err < 0) {
3239 		drm_atomic_state_put(state);
3240 		state = ERR_PTR(err);
3241 		goto unlock;
3242 	}
3243 
3244 unlock:
3245 	DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
3246 	if (err)
3247 		return ERR_PTR(err);
3248 
3249 	return state;
3250 }
3251 EXPORT_SYMBOL(drm_atomic_helper_suspend);
3252 
3253 /**
3254  * drm_atomic_helper_commit_duplicated_state - commit duplicated state
3255  * @state: duplicated atomic state to commit
3256  * @ctx: pointer to acquire_ctx to use for commit.
3257  *
3258  * The state returned by drm_atomic_helper_duplicate_state() and
3259  * drm_atomic_helper_suspend() is partially invalid, and needs to
3260  * be fixed up before commit.
3261  *
3262  * Returns:
3263  * 0 on success or a negative error code on failure.
3264  *
3265  * See also:
3266  * drm_atomic_helper_suspend()
3267  */
3268 int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
3269 					      struct drm_modeset_acquire_ctx *ctx)
3270 {
3271 	int i, ret;
3272 	struct drm_plane *plane;
3273 	struct drm_plane_state *new_plane_state;
3274 	struct drm_connector *connector;
3275 	struct drm_connector_state *new_conn_state;
3276 	struct drm_crtc *crtc;
3277 	struct drm_crtc_state *new_crtc_state;
3278 
3279 	state->acquire_ctx = ctx;
3280 
3281 	for_each_new_plane_in_state(state, plane, new_plane_state, i)
3282 		state->planes[i].old_state = plane->state;
3283 
3284 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
3285 		state->crtcs[i].old_state = crtc->state;
3286 
3287 	for_each_new_connector_in_state(state, connector, new_conn_state, i)
3288 		state->connectors[i].old_state = connector->state;
3289 
3290 	ret = drm_atomic_commit(state);
3291 
3292 	state->acquire_ctx = NULL;
3293 
3294 	return ret;
3295 }
3296 EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state);
3297 
3298 /**
3299  * drm_atomic_helper_resume - subsystem-level resume helper
3300  * @dev: DRM device
3301  * @state: atomic state to resume to
3302  *
3303  * Calls drm_mode_config_reset() to synchronize hardware and software states,
3304  * grabs all modeset locks and commits the atomic state object. This can be
3305  * used in conjunction with the drm_atomic_helper_suspend() helper to
3306  * implement suspend/resume for drivers that support atomic mode-setting.
3307  *
3308  * Returns:
3309  * 0 on success or a negative error code on failure.
3310  *
3311  * See also:
3312  * drm_atomic_helper_suspend()
3313  */
3314 int drm_atomic_helper_resume(struct drm_device *dev,
3315 			     struct drm_atomic_state *state)
3316 {
3317 	struct drm_modeset_acquire_ctx ctx;
3318 	int err;
3319 
3320 	drm_mode_config_reset(dev);
3321 
3322 	DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
3323 
3324 	err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
3325 
3326 	DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
3327 	drm_atomic_state_put(state);
3328 
3329 	return err;
3330 }
3331 EXPORT_SYMBOL(drm_atomic_helper_resume);
3332 
3333 static int page_flip_common(struct drm_atomic_state *state,
3334 			    struct drm_crtc *crtc,
3335 			    struct drm_framebuffer *fb,
3336 			    struct drm_pending_vblank_event *event,
3337 			    uint32_t flags)
3338 {
3339 	struct drm_plane *plane = crtc->primary;
3340 	struct drm_plane_state *plane_state;
3341 	struct drm_crtc_state *crtc_state;
3342 	int ret = 0;
3343 
3344 	crtc_state = drm_atomic_get_crtc_state(state, crtc);
3345 	if (IS_ERR(crtc_state))
3346 		return PTR_ERR(crtc_state);
3347 
3348 	crtc_state->event = event;
3349 	crtc_state->async_flip = flags & DRM_MODE_PAGE_FLIP_ASYNC;
3350 
3351 	plane_state = drm_atomic_get_plane_state(state, plane);
3352 	if (IS_ERR(plane_state))
3353 		return PTR_ERR(plane_state);
3354 
3355 	ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
3356 	if (ret != 0)
3357 		return ret;
3358 	drm_atomic_set_fb_for_plane(plane_state, fb);
3359 
3360 	/* Make sure we don't accidentally do a full modeset. */
3361 	state->allow_modeset = false;
3362 	if (!crtc_state->active) {
3363 		DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled, rejecting legacy flip\n",
3364 				 crtc->base.id, crtc->name);
3365 		return -EINVAL;
3366 	}
3367 
3368 	return ret;
3369 }
3370 
3371 /**
3372  * drm_atomic_helper_page_flip - execute a legacy page flip
3373  * @crtc: DRM CRTC
3374  * @fb: DRM framebuffer
3375  * @event: optional DRM event to signal upon completion
3376  * @flags: flip flags for non-vblank sync'ed updates
3377  * @ctx: lock acquisition context
3378  *
3379  * Provides a default &drm_crtc_funcs.page_flip implementation
3380  * using the atomic driver interface.
3381  *
3382  * Returns:
3383  * Returns 0 on success, negative errno numbers on failure.
3384  *
3385  * See also:
3386  * drm_atomic_helper_page_flip_target()
3387  */
3388 int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
3389 				struct drm_framebuffer *fb,
3390 				struct drm_pending_vblank_event *event,
3391 				uint32_t flags,
3392 				struct drm_modeset_acquire_ctx *ctx)
3393 {
3394 	struct drm_plane *plane = crtc->primary;
3395 	struct drm_atomic_state *state;
3396 	int ret = 0;
3397 
3398 	state = drm_atomic_state_alloc(plane->dev);
3399 	if (!state)
3400 		return -ENOMEM;
3401 
3402 	state->acquire_ctx = ctx;
3403 
3404 	ret = page_flip_common(state, crtc, fb, event, flags);
3405 	if (ret != 0)
3406 		goto fail;
3407 
3408 	ret = drm_atomic_nonblocking_commit(state);
3409 fail:
3410 	drm_atomic_state_put(state);
3411 	return ret;
3412 }
3413 EXPORT_SYMBOL(drm_atomic_helper_page_flip);
3414 
3415 /**
3416  * drm_atomic_helper_page_flip_target - do page flip on target vblank period.
3417  * @crtc: DRM CRTC
3418  * @fb: DRM framebuffer
3419  * @event: optional DRM event to signal upon completion
3420  * @flags: flip flags for non-vblank sync'ed updates
3421  * @target: specifying the target vblank period when the flip to take effect
3422  * @ctx: lock acquisition context
3423  *
3424  * Provides a default &drm_crtc_funcs.page_flip_target implementation.
3425  * Similar to drm_atomic_helper_page_flip() with extra parameter to specify
3426  * target vblank period to flip.
3427  *
3428  * Returns:
3429  * Returns 0 on success, negative errno numbers on failure.
3430  */
3431 int drm_atomic_helper_page_flip_target(struct drm_crtc *crtc,
3432 				       struct drm_framebuffer *fb,
3433 				       struct drm_pending_vblank_event *event,
3434 				       uint32_t flags,
3435 				       uint32_t target,
3436 				       struct drm_modeset_acquire_ctx *ctx)
3437 {
3438 	struct drm_plane *plane = crtc->primary;
3439 	struct drm_atomic_state *state;
3440 	struct drm_crtc_state *crtc_state;
3441 	int ret = 0;
3442 
3443 	state = drm_atomic_state_alloc(plane->dev);
3444 	if (!state)
3445 		return -ENOMEM;
3446 
3447 	state->acquire_ctx = ctx;
3448 
3449 	ret = page_flip_common(state, crtc, fb, event, flags);
3450 	if (ret != 0)
3451 		goto fail;
3452 
3453 	crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
3454 	if (WARN_ON(!crtc_state)) {
3455 		ret = -EINVAL;
3456 		goto fail;
3457 	}
3458 	crtc_state->target_vblank = target;
3459 
3460 	ret = drm_atomic_nonblocking_commit(state);
3461 fail:
3462 	drm_atomic_state_put(state);
3463 	return ret;
3464 }
3465 EXPORT_SYMBOL(drm_atomic_helper_page_flip_target);
3466 
3467 /**
3468  * drm_atomic_helper_bridge_propagate_bus_fmt() - Propagate output format to
3469  *						  the input end of a bridge
3470  * @bridge: bridge control structure
3471  * @bridge_state: new bridge state
3472  * @crtc_state: new CRTC state
3473  * @conn_state: new connector state
3474  * @output_fmt: tested output bus format
3475  * @num_input_fmts: will contain the size of the returned array
3476  *
3477  * This helper is a pluggable implementation of the
3478  * &drm_bridge_funcs.atomic_get_input_bus_fmts operation for bridges that don't
3479  * modify the bus configuration between their input and their output. It
3480  * returns an array of input formats with a single element set to @output_fmt.
3481  *
3482  * RETURNS:
3483  * a valid format array of size @num_input_fmts, or NULL if the allocation
3484  * failed
3485  */
3486 u32 *
3487 drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge,
3488 					struct drm_bridge_state *bridge_state,
3489 					struct drm_crtc_state *crtc_state,
3490 					struct drm_connector_state *conn_state,
3491 					u32 output_fmt,
3492 					unsigned int *num_input_fmts)
3493 {
3494 	u32 *input_fmts;
3495 
3496 	input_fmts = kzalloc(sizeof(*input_fmts), GFP_KERNEL);
3497 	if (!input_fmts) {
3498 		*num_input_fmts = 0;
3499 		return NULL;
3500 	}
3501 
3502 	*num_input_fmts = 1;
3503 	input_fmts[0] = output_fmt;
3504 	return input_fmts;
3505 }
3506 EXPORT_SYMBOL(drm_atomic_helper_bridge_propagate_bus_fmt);
3507