xref: /openbmc/linux/drivers/gpu/drm/drm_bridge.c (revision 45755ef1)
1 /*
2  * Copyright (c) 2014 Samsung Electronics Co., Ltd
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sub license,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the
12  * next paragraph) shall be included in all copies or substantial portions
13  * of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/err.h>
25 #include <linux/media-bus-format.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 
29 #include <drm/drm_atomic_state_helper.h>
30 #include <drm/drm_bridge.h>
31 #include <drm/drm_debugfs.h>
32 #include <drm/drm_edid.h>
33 #include <drm/drm_encoder.h>
34 #include <drm/drm_file.h>
35 #include <drm/drm_of.h>
36 #include <drm/drm_print.h>
37 
38 #include "drm_crtc_internal.h"
39 
40 /**
41  * DOC: overview
42  *
43  * &struct drm_bridge represents a device that hangs on to an encoder. These are
44  * handy when a regular &drm_encoder entity isn't enough to represent the entire
45  * encoder chain.
46  *
47  * A bridge is always attached to a single &drm_encoder at a time, but can be
48  * either connected to it directly, or through a chain of bridges::
49  *
50  *     [ CRTC ---> ] Encoder ---> Bridge A ---> Bridge B
51  *
52  * Here, the output of the encoder feeds to bridge A, and that furthers feeds to
53  * bridge B. Bridge chains can be arbitrarily long, and shall be fully linear:
54  * Chaining multiple bridges to the output of a bridge, or the same bridge to
55  * the output of different bridges, is not supported.
56  *
57  * &drm_bridge, like &drm_panel, aren't &drm_mode_object entities like planes,
58  * CRTCs, encoders or connectors and hence are not visible to userspace. They
59  * just provide additional hooks to get the desired output at the end of the
60  * encoder chain.
61  */
62 
63 /**
64  * DOC:	display driver integration
65  *
66  * Display drivers are responsible for linking encoders with the first bridge
67  * in the chains. This is done by acquiring the appropriate bridge with
68  * devm_drm_of_get_bridge(). Once acquired, the bridge shall be attached to the
69  * encoder with a call to drm_bridge_attach().
70  *
71  * Bridges are responsible for linking themselves with the next bridge in the
72  * chain, if any. This is done the same way as for encoders, with the call to
73  * drm_bridge_attach() occurring in the &drm_bridge_funcs.attach operation.
74  *
75  * Once these links are created, the bridges can participate along with encoder
76  * functions to perform mode validation and fixup (through
77  * drm_bridge_chain_mode_valid() and drm_atomic_bridge_chain_check()), mode
78  * setting (through drm_bridge_chain_mode_set()), enable (through
79  * drm_atomic_bridge_chain_pre_enable() and drm_atomic_bridge_chain_enable())
80  * and disable (through drm_atomic_bridge_chain_disable() and
81  * drm_atomic_bridge_chain_post_disable()). Those functions call the
82  * corresponding operations provided in &drm_bridge_funcs in sequence for all
83  * bridges in the chain.
84  *
85  * For display drivers that use the atomic helpers
86  * drm_atomic_helper_check_modeset(),
87  * drm_atomic_helper_commit_modeset_enables() and
88  * drm_atomic_helper_commit_modeset_disables() (either directly in hand-rolled
89  * commit check and commit tail handlers, or through the higher-level
90  * drm_atomic_helper_check() and drm_atomic_helper_commit_tail() or
91  * drm_atomic_helper_commit_tail_rpm() helpers), this is done transparently and
92  * requires no intervention from the driver. For other drivers, the relevant
93  * DRM bridge chain functions shall be called manually.
94  *
95  * Bridges also participate in implementing the &drm_connector at the end of
96  * the bridge chain. Display drivers may use the drm_bridge_connector_init()
97  * helper to create the &drm_connector, or implement it manually on top of the
98  * connector-related operations exposed by the bridge (see the overview
99  * documentation of bridge operations for more details).
100  */
101 
102 /**
103  * DOC: special care dsi
104  *
105  * The interaction between the bridges and other frameworks involved in
106  * the probing of the upstream driver and the bridge driver can be
107  * challenging. Indeed, there's multiple cases that needs to be
108  * considered:
109  *
110  * - The upstream driver doesn't use the component framework and isn't a
111  *   MIPI-DSI host. In this case, the bridge driver will probe at some
112  *   point and the upstream driver should try to probe again by returning
113  *   EPROBE_DEFER as long as the bridge driver hasn't probed.
114  *
115  * - The upstream driver doesn't use the component framework, but is a
116  *   MIPI-DSI host. The bridge device uses the MIPI-DCS commands to be
117  *   controlled. In this case, the bridge device is a child of the
118  *   display device and when it will probe it's assured that the display
119  *   device (and MIPI-DSI host) is present. The upstream driver will be
120  *   assured that the bridge driver is connected between the
121  *   &mipi_dsi_host_ops.attach and &mipi_dsi_host_ops.detach operations.
122  *   Therefore, it must run mipi_dsi_host_register() in its probe
123  *   function, and then run drm_bridge_attach() in its
124  *   &mipi_dsi_host_ops.attach hook.
125  *
126  * - The upstream driver uses the component framework and is a MIPI-DSI
127  *   host. The bridge device uses the MIPI-DCS commands to be
128  *   controlled. This is the same situation than above, and can run
129  *   mipi_dsi_host_register() in either its probe or bind hooks.
130  *
131  * - The upstream driver uses the component framework and is a MIPI-DSI
132  *   host. The bridge device uses a separate bus (such as I2C) to be
133  *   controlled. In this case, there's no correlation between the probe
134  *   of the bridge and upstream drivers, so care must be taken to avoid
135  *   an endless EPROBE_DEFER loop, with each driver waiting for the
136  *   other to probe.
137  *
138  * The ideal pattern to cover the last item (and all the others in the
139  * MIPI-DSI host driver case) is to split the operations like this:
140  *
141  * - The MIPI-DSI host driver must run mipi_dsi_host_register() in its
142  *   probe hook. It will make sure that the MIPI-DSI host sticks around,
143  *   and that the driver's bind can be called.
144  *
145  * - In its probe hook, the bridge driver must try to find its MIPI-DSI
146  *   host, register as a MIPI-DSI device and attach the MIPI-DSI device
147  *   to its host. The bridge driver is now functional.
148  *
149  * - In its &struct mipi_dsi_host_ops.attach hook, the MIPI-DSI host can
150  *   now add its component. Its bind hook will now be called and since
151  *   the bridge driver is attached and registered, we can now look for
152  *   and attach it.
153  *
154  * At this point, we're now certain that both the upstream driver and
155  * the bridge driver are functional and we can't have a deadlock-like
156  * situation when probing.
157  */
158 
159 /**
160  * DOC: dsi bridge operations
161  *
162  * DSI host interfaces are expected to be implemented as bridges rather than
163  * encoders, however there are a few aspects of their operation that need to
164  * be defined in order to provide a consistent interface.
165  *
166  * A DSI host should keep the PHY powered down until the pre_enable operation is
167  * called. All lanes are in an undefined idle state up to this point, and it
168  * must not be assumed that it is LP-11.
169  * pre_enable should initialise the PHY, set the data lanes to LP-11, and the
170  * clock lane to either LP-11 or HS depending on the mode_flag
171  * %MIPI_DSI_CLOCK_NON_CONTINUOUS.
172  *
173  * Ordinarily the downstream bridge DSI peripheral pre_enable will have been
174  * called before the DSI host. If the DSI peripheral requires LP-11 and/or
175  * the clock lane to be in HS mode prior to pre_enable, then it can set the
176  * &pre_enable_prev_first flag to request the pre_enable (and
177  * post_disable) order to be altered to enable the DSI host first.
178  *
179  * Either the CRTC being enabled, or the DSI host enable operation should switch
180  * the host to actively transmitting video on the data lanes.
181  *
182  * The reverse also applies. The DSI host disable operation or stopping the CRTC
183  * should stop transmitting video, and the data lanes should return to the LP-11
184  * state. The DSI host &post_disable operation should disable the PHY.
185  * If the &pre_enable_prev_first flag is set, then the DSI peripheral's
186  * bridge &post_disable will be called before the DSI host's post_disable.
187  *
188  * Whilst it is valid to call &host_transfer prior to pre_enable or after
189  * post_disable, the exact state of the lanes is undefined at this point. The
190  * DSI host should initialise the interface, transmit the data, and then disable
191  * the interface again.
192  *
193  * Ultra Low Power State (ULPS) is not explicitly supported by DRM. If
194  * implemented, it therefore needs to be handled entirely within the DSI Host
195  * driver.
196  */
197 
198 static DEFINE_MUTEX(bridge_lock);
199 static LIST_HEAD(bridge_list);
200 
201 /**
202  * drm_bridge_add - add the given bridge to the global bridge list
203  *
204  * @bridge: bridge control structure
205  */
drm_bridge_add(struct drm_bridge * bridge)206 void drm_bridge_add(struct drm_bridge *bridge)
207 {
208 	mutex_init(&bridge->hpd_mutex);
209 
210 	mutex_lock(&bridge_lock);
211 	list_add_tail(&bridge->list, &bridge_list);
212 	mutex_unlock(&bridge_lock);
213 }
214 EXPORT_SYMBOL(drm_bridge_add);
215 
drm_bridge_remove_void(void * bridge)216 static void drm_bridge_remove_void(void *bridge)
217 {
218 	drm_bridge_remove(bridge);
219 }
220 
221 /**
222  * devm_drm_bridge_add - devm managed version of drm_bridge_add()
223  *
224  * @dev: device to tie the bridge lifetime to
225  * @bridge: bridge control structure
226  *
227  * This is the managed version of drm_bridge_add() which automatically
228  * calls drm_bridge_remove() when @dev is unbound.
229  *
230  * Return: 0 if no error or negative error code.
231  */
devm_drm_bridge_add(struct device * dev,struct drm_bridge * bridge)232 int devm_drm_bridge_add(struct device *dev, struct drm_bridge *bridge)
233 {
234 	drm_bridge_add(bridge);
235 	return devm_add_action_or_reset(dev, drm_bridge_remove_void, bridge);
236 }
237 EXPORT_SYMBOL(devm_drm_bridge_add);
238 
239 /**
240  * drm_bridge_remove - remove the given bridge from the global bridge list
241  *
242  * @bridge: bridge control structure
243  */
drm_bridge_remove(struct drm_bridge * bridge)244 void drm_bridge_remove(struct drm_bridge *bridge)
245 {
246 	mutex_lock(&bridge_lock);
247 	list_del_init(&bridge->list);
248 	mutex_unlock(&bridge_lock);
249 
250 	mutex_destroy(&bridge->hpd_mutex);
251 }
252 EXPORT_SYMBOL(drm_bridge_remove);
253 
254 static struct drm_private_state *
drm_bridge_atomic_duplicate_priv_state(struct drm_private_obj * obj)255 drm_bridge_atomic_duplicate_priv_state(struct drm_private_obj *obj)
256 {
257 	struct drm_bridge *bridge = drm_priv_to_bridge(obj);
258 	struct drm_bridge_state *state;
259 
260 	state = bridge->funcs->atomic_duplicate_state(bridge);
261 	return state ? &state->base : NULL;
262 }
263 
264 static void
drm_bridge_atomic_destroy_priv_state(struct drm_private_obj * obj,struct drm_private_state * s)265 drm_bridge_atomic_destroy_priv_state(struct drm_private_obj *obj,
266 				     struct drm_private_state *s)
267 {
268 	struct drm_bridge_state *state = drm_priv_to_bridge_state(s);
269 	struct drm_bridge *bridge = drm_priv_to_bridge(obj);
270 
271 	bridge->funcs->atomic_destroy_state(bridge, state);
272 }
273 
274 static const struct drm_private_state_funcs drm_bridge_priv_state_funcs = {
275 	.atomic_duplicate_state = drm_bridge_atomic_duplicate_priv_state,
276 	.atomic_destroy_state = drm_bridge_atomic_destroy_priv_state,
277 };
278 
279 /**
280  * drm_bridge_attach - attach the bridge to an encoder's chain
281  *
282  * @encoder: DRM encoder
283  * @bridge: bridge to attach
284  * @previous: previous bridge in the chain (optional)
285  * @flags: DRM_BRIDGE_ATTACH_* flags
286  *
287  * Called by a kms driver to link the bridge to an encoder's chain. The previous
288  * argument specifies the previous bridge in the chain. If NULL, the bridge is
289  * linked directly at the encoder's output. Otherwise it is linked at the
290  * previous bridge's output.
291  *
292  * If non-NULL the previous bridge must be already attached by a call to this
293  * function.
294  *
295  * Note that bridges attached to encoders are auto-detached during encoder
296  * cleanup in drm_encoder_cleanup(), so drm_bridge_attach() should generally
297  * *not* be balanced with a drm_bridge_detach() in driver code.
298  *
299  * RETURNS:
300  * Zero on success, error code on failure
301  */
drm_bridge_attach(struct drm_encoder * encoder,struct drm_bridge * bridge,struct drm_bridge * previous,enum drm_bridge_attach_flags flags)302 int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
303 		      struct drm_bridge *previous,
304 		      enum drm_bridge_attach_flags flags)
305 {
306 	int ret;
307 
308 	if (!encoder || !bridge)
309 		return -EINVAL;
310 
311 	if (previous && (!previous->dev || previous->encoder != encoder))
312 		return -EINVAL;
313 
314 	if (bridge->dev)
315 		return -EBUSY;
316 
317 	bridge->dev = encoder->dev;
318 	bridge->encoder = encoder;
319 
320 	if (previous)
321 		list_add(&bridge->chain_node, &previous->chain_node);
322 	else
323 		list_add(&bridge->chain_node, &encoder->bridge_chain);
324 
325 	if (bridge->funcs->attach) {
326 		ret = bridge->funcs->attach(bridge, flags);
327 		if (ret < 0)
328 			goto err_reset_bridge;
329 	}
330 
331 	if (bridge->funcs->atomic_reset) {
332 		struct drm_bridge_state *state;
333 
334 		state = bridge->funcs->atomic_reset(bridge);
335 		if (IS_ERR(state)) {
336 			ret = PTR_ERR(state);
337 			goto err_detach_bridge;
338 		}
339 
340 		drm_atomic_private_obj_init(bridge->dev, &bridge->base,
341 					    &state->base,
342 					    &drm_bridge_priv_state_funcs);
343 	}
344 
345 	return 0;
346 
347 err_detach_bridge:
348 	if (bridge->funcs->detach)
349 		bridge->funcs->detach(bridge);
350 
351 err_reset_bridge:
352 	bridge->dev = NULL;
353 	bridge->encoder = NULL;
354 	list_del(&bridge->chain_node);
355 
356 #ifdef CONFIG_OF
357 	DRM_ERROR("failed to attach bridge %pOF to encoder %s: %d\n",
358 		  bridge->of_node, encoder->name, ret);
359 #else
360 	DRM_ERROR("failed to attach bridge to encoder %s: %d\n",
361 		  encoder->name, ret);
362 #endif
363 
364 	return ret;
365 }
366 EXPORT_SYMBOL(drm_bridge_attach);
367 
drm_bridge_detach(struct drm_bridge * bridge)368 void drm_bridge_detach(struct drm_bridge *bridge)
369 {
370 	if (WARN_ON(!bridge))
371 		return;
372 
373 	if (WARN_ON(!bridge->dev))
374 		return;
375 
376 	if (bridge->funcs->atomic_reset)
377 		drm_atomic_private_obj_fini(&bridge->base);
378 
379 	if (bridge->funcs->detach)
380 		bridge->funcs->detach(bridge);
381 
382 	list_del(&bridge->chain_node);
383 	bridge->dev = NULL;
384 }
385 
386 /**
387  * DOC: bridge operations
388  *
389  * Bridge drivers expose operations through the &drm_bridge_funcs structure.
390  * The DRM internals (atomic and CRTC helpers) use the helpers defined in
391  * drm_bridge.c to call bridge operations. Those operations are divided in
392  * three big categories to support different parts of the bridge usage.
393  *
394  * - The encoder-related operations support control of the bridges in the
395  *   chain, and are roughly counterparts to the &drm_encoder_helper_funcs
396  *   operations. They are used by the legacy CRTC and the atomic modeset
397  *   helpers to perform mode validation, fixup and setting, and enable and
398  *   disable the bridge automatically.
399  *
400  *   The enable and disable operations are split in
401  *   &drm_bridge_funcs.pre_enable, &drm_bridge_funcs.enable,
402  *   &drm_bridge_funcs.disable and &drm_bridge_funcs.post_disable to provide
403  *   finer-grained control.
404  *
405  *   Bridge drivers may implement the legacy version of those operations, or
406  *   the atomic version (prefixed with atomic\_), in which case they shall also
407  *   implement the atomic state bookkeeping operations
408  *   (&drm_bridge_funcs.atomic_duplicate_state,
409  *   &drm_bridge_funcs.atomic_destroy_state and &drm_bridge_funcs.reset).
410  *   Mixing atomic and non-atomic versions of the operations is not supported.
411  *
412  * - The bus format negotiation operations
413  *   &drm_bridge_funcs.atomic_get_output_bus_fmts and
414  *   &drm_bridge_funcs.atomic_get_input_bus_fmts allow bridge drivers to
415  *   negotiate the formats transmitted between bridges in the chain when
416  *   multiple formats are supported. Negotiation for formats is performed
417  *   transparently for display drivers by the atomic modeset helpers. Only
418  *   atomic versions of those operations exist, bridge drivers that need to
419  *   implement them shall thus also implement the atomic version of the
420  *   encoder-related operations. This feature is not supported by the legacy
421  *   CRTC helpers.
422  *
423  * - The connector-related operations support implementing a &drm_connector
424  *   based on a chain of bridges. DRM bridges traditionally create a
425  *   &drm_connector for bridges meant to be used at the end of the chain. This
426  *   puts additional burden on bridge drivers, especially for bridges that may
427  *   be used in the middle of a chain or at the end of it. Furthermore, it
428  *   requires all operations of the &drm_connector to be handled by a single
429  *   bridge, which doesn't always match the hardware architecture.
430  *
431  *   To simplify bridge drivers and make the connector implementation more
432  *   flexible, a new model allows bridges to unconditionally skip creation of
433  *   &drm_connector and instead expose &drm_bridge_funcs operations to support
434  *   an externally-implemented &drm_connector. Those operations are
435  *   &drm_bridge_funcs.detect, &drm_bridge_funcs.get_modes,
436  *   &drm_bridge_funcs.get_edid, &drm_bridge_funcs.hpd_notify,
437  *   &drm_bridge_funcs.hpd_enable and &drm_bridge_funcs.hpd_disable. When
438  *   implemented, display drivers shall create a &drm_connector instance for
439  *   each chain of bridges, and implement those connector instances based on
440  *   the bridge connector operations.
441  *
442  *   Bridge drivers shall implement the connector-related operations for all
443  *   the features that the bridge hardware support. For instance, if a bridge
444  *   supports reading EDID, the &drm_bridge_funcs.get_edid shall be
445  *   implemented. This however doesn't mean that the DDC lines are wired to the
446  *   bridge on a particular platform, as they could also be connected to an I2C
447  *   controller of the SoC. Support for the connector-related operations on the
448  *   running platform is reported through the &drm_bridge.ops flags. Bridge
449  *   drivers shall detect which operations they can support on the platform
450  *   (usually this information is provided by ACPI or DT), and set the
451  *   &drm_bridge.ops flags for all supported operations. A flag shall only be
452  *   set if the corresponding &drm_bridge_funcs operation is implemented, but
453  *   an implemented operation doesn't necessarily imply that the corresponding
454  *   flag will be set. Display drivers shall use the &drm_bridge.ops flags to
455  *   decide which bridge to delegate a connector operation to. This mechanism
456  *   allows providing a single static const &drm_bridge_funcs instance in
457  *   bridge drivers, improving security by storing function pointers in
458  *   read-only memory.
459  *
460  *   In order to ease transition, bridge drivers may support both the old and
461  *   new models by making connector creation optional and implementing the
462  *   connected-related bridge operations. Connector creation is then controlled
463  *   by the flags argument to the drm_bridge_attach() function. Display drivers
464  *   that support the new model and create connectors themselves shall set the
465  *   %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag, and bridge drivers shall then skip
466  *   connector creation. For intermediate bridges in the chain, the flag shall
467  *   be passed to the drm_bridge_attach() call for the downstream bridge.
468  *   Bridge drivers that implement the new model only shall return an error
469  *   from their &drm_bridge_funcs.attach handler when the
470  *   %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag is not set. New display drivers
471  *   should use the new model, and convert the bridge drivers they use if
472  *   needed, in order to gradually transition to the new model.
473  */
474 
475 /**
476  * drm_bridge_chain_mode_fixup - fixup proposed mode for all bridges in the
477  *				 encoder chain
478  * @bridge: bridge control structure
479  * @mode: desired mode to be set for the bridge
480  * @adjusted_mode: updated mode that works for this bridge
481  *
482  * Calls &drm_bridge_funcs.mode_fixup for all the bridges in the
483  * encoder chain, starting from the first bridge to the last.
484  *
485  * Note: the bridge passed should be the one closest to the encoder
486  *
487  * RETURNS:
488  * true on success, false on failure
489  */
drm_bridge_chain_mode_fixup(struct drm_bridge * bridge,const struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)490 bool drm_bridge_chain_mode_fixup(struct drm_bridge *bridge,
491 				 const struct drm_display_mode *mode,
492 				 struct drm_display_mode *adjusted_mode)
493 {
494 	struct drm_encoder *encoder;
495 
496 	if (!bridge)
497 		return true;
498 
499 	encoder = bridge->encoder;
500 	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
501 		if (!bridge->funcs->mode_fixup)
502 			continue;
503 
504 		if (!bridge->funcs->mode_fixup(bridge, mode, adjusted_mode))
505 			return false;
506 	}
507 
508 	return true;
509 }
510 EXPORT_SYMBOL(drm_bridge_chain_mode_fixup);
511 
512 /**
513  * drm_bridge_chain_mode_valid - validate the mode against all bridges in the
514  *				 encoder chain.
515  * @bridge: bridge control structure
516  * @info: display info against which the mode shall be validated
517  * @mode: desired mode to be validated
518  *
519  * Calls &drm_bridge_funcs.mode_valid for all the bridges in the encoder
520  * chain, starting from the first bridge to the last. If at least one bridge
521  * does not accept the mode the function returns the error code.
522  *
523  * Note: the bridge passed should be the one closest to the encoder.
524  *
525  * RETURNS:
526  * MODE_OK on success, drm_mode_status Enum error code on failure
527  */
528 enum drm_mode_status
drm_bridge_chain_mode_valid(struct drm_bridge * bridge,const struct drm_display_info * info,const struct drm_display_mode * mode)529 drm_bridge_chain_mode_valid(struct drm_bridge *bridge,
530 			    const struct drm_display_info *info,
531 			    const struct drm_display_mode *mode)
532 {
533 	struct drm_encoder *encoder;
534 
535 	if (!bridge)
536 		return MODE_OK;
537 
538 	encoder = bridge->encoder;
539 	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
540 		enum drm_mode_status ret;
541 
542 		if (!bridge->funcs->mode_valid)
543 			continue;
544 
545 		ret = bridge->funcs->mode_valid(bridge, info, mode);
546 		if (ret != MODE_OK)
547 			return ret;
548 	}
549 
550 	return MODE_OK;
551 }
552 EXPORT_SYMBOL(drm_bridge_chain_mode_valid);
553 
554 /**
555  * drm_bridge_chain_mode_set - set proposed mode for all bridges in the
556  *			       encoder chain
557  * @bridge: bridge control structure
558  * @mode: desired mode to be set for the encoder chain
559  * @adjusted_mode: updated mode that works for this encoder chain
560  *
561  * Calls &drm_bridge_funcs.mode_set op for all the bridges in the
562  * encoder chain, starting from the first bridge to the last.
563  *
564  * Note: the bridge passed should be the one closest to the encoder
565  */
drm_bridge_chain_mode_set(struct drm_bridge * bridge,const struct drm_display_mode * mode,const struct drm_display_mode * adjusted_mode)566 void drm_bridge_chain_mode_set(struct drm_bridge *bridge,
567 			       const struct drm_display_mode *mode,
568 			       const struct drm_display_mode *adjusted_mode)
569 {
570 	struct drm_encoder *encoder;
571 
572 	if (!bridge)
573 		return;
574 
575 	encoder = bridge->encoder;
576 	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
577 		if (bridge->funcs->mode_set)
578 			bridge->funcs->mode_set(bridge, mode, adjusted_mode);
579 	}
580 }
581 EXPORT_SYMBOL(drm_bridge_chain_mode_set);
582 
583 /**
584  * drm_atomic_bridge_chain_disable - disables all bridges in the encoder chain
585  * @bridge: bridge control structure
586  * @old_state: old atomic state
587  *
588  * Calls &drm_bridge_funcs.atomic_disable (falls back on
589  * &drm_bridge_funcs.disable) op for all the bridges in the encoder chain,
590  * starting from the last bridge to the first. These are called before calling
591  * &drm_encoder_helper_funcs.atomic_disable
592  *
593  * Note: the bridge passed should be the one closest to the encoder
594  */
drm_atomic_bridge_chain_disable(struct drm_bridge * bridge,struct drm_atomic_state * old_state)595 void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
596 				     struct drm_atomic_state *old_state)
597 {
598 	struct drm_encoder *encoder;
599 	struct drm_bridge *iter;
600 
601 	if (!bridge)
602 		return;
603 
604 	encoder = bridge->encoder;
605 	list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
606 		if (iter->funcs->atomic_disable) {
607 			struct drm_bridge_state *old_bridge_state;
608 
609 			old_bridge_state =
610 				drm_atomic_get_old_bridge_state(old_state,
611 								iter);
612 			if (WARN_ON(!old_bridge_state))
613 				return;
614 
615 			iter->funcs->atomic_disable(iter, old_bridge_state);
616 		} else if (iter->funcs->disable) {
617 			iter->funcs->disable(iter);
618 		}
619 
620 		if (iter == bridge)
621 			break;
622 	}
623 }
624 EXPORT_SYMBOL(drm_atomic_bridge_chain_disable);
625 
drm_atomic_bridge_call_post_disable(struct drm_bridge * bridge,struct drm_atomic_state * old_state)626 static void drm_atomic_bridge_call_post_disable(struct drm_bridge *bridge,
627 						struct drm_atomic_state *old_state)
628 {
629 	if (old_state && bridge->funcs->atomic_post_disable) {
630 		struct drm_bridge_state *old_bridge_state;
631 
632 		old_bridge_state =
633 			drm_atomic_get_old_bridge_state(old_state,
634 							bridge);
635 		if (WARN_ON(!old_bridge_state))
636 			return;
637 
638 		bridge->funcs->atomic_post_disable(bridge,
639 						   old_bridge_state);
640 	} else if (bridge->funcs->post_disable) {
641 		bridge->funcs->post_disable(bridge);
642 	}
643 }
644 
645 /**
646  * drm_atomic_bridge_chain_post_disable - cleans up after disabling all bridges
647  *					  in the encoder chain
648  * @bridge: bridge control structure
649  * @old_state: old atomic state
650  *
651  * Calls &drm_bridge_funcs.atomic_post_disable (falls back on
652  * &drm_bridge_funcs.post_disable) op for all the bridges in the encoder chain,
653  * starting from the first bridge to the last. These are called after completing
654  * &drm_encoder_helper_funcs.atomic_disable
655  *
656  * If a bridge sets @pre_enable_prev_first, then the @post_disable for that
657  * bridge will be called before the previous one to reverse the @pre_enable
658  * calling direction.
659  *
660  * Note: the bridge passed should be the one closest to the encoder
661  */
drm_atomic_bridge_chain_post_disable(struct drm_bridge * bridge,struct drm_atomic_state * old_state)662 void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
663 					  struct drm_atomic_state *old_state)
664 {
665 	struct drm_encoder *encoder;
666 	struct drm_bridge *next, *limit;
667 
668 	if (!bridge)
669 		return;
670 
671 	encoder = bridge->encoder;
672 
673 	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
674 		limit = NULL;
675 
676 		if (!list_is_last(&bridge->chain_node, &encoder->bridge_chain)) {
677 			next = list_next_entry(bridge, chain_node);
678 
679 			if (next->pre_enable_prev_first) {
680 				/* next bridge had requested that prev
681 				 * was enabled first, so disabled last
682 				 */
683 				limit = next;
684 
685 				/* Find the next bridge that has NOT requested
686 				 * prev to be enabled first / disabled last
687 				 */
688 				list_for_each_entry_from(next, &encoder->bridge_chain,
689 							 chain_node) {
690 					if (!next->pre_enable_prev_first) {
691 						next = list_prev_entry(next, chain_node);
692 						limit = next;
693 						break;
694 					}
695 
696 					if (list_is_last(&next->chain_node,
697 							 &encoder->bridge_chain)) {
698 						limit = next;
699 						break;
700 					}
701 				}
702 
703 				/* Call these bridges in reverse order */
704 				list_for_each_entry_from_reverse(next, &encoder->bridge_chain,
705 								 chain_node) {
706 					if (next == bridge)
707 						break;
708 
709 					drm_atomic_bridge_call_post_disable(next,
710 									    old_state);
711 				}
712 			}
713 		}
714 
715 		drm_atomic_bridge_call_post_disable(bridge, old_state);
716 
717 		if (limit)
718 			/* Jump all bridges that we have already post_disabled */
719 			bridge = limit;
720 	}
721 }
722 EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable);
723 
drm_atomic_bridge_call_pre_enable(struct drm_bridge * bridge,struct drm_atomic_state * old_state)724 static void drm_atomic_bridge_call_pre_enable(struct drm_bridge *bridge,
725 					      struct drm_atomic_state *old_state)
726 {
727 	if (old_state && bridge->funcs->atomic_pre_enable) {
728 		struct drm_bridge_state *old_bridge_state;
729 
730 		old_bridge_state =
731 			drm_atomic_get_old_bridge_state(old_state,
732 							bridge);
733 		if (WARN_ON(!old_bridge_state))
734 			return;
735 
736 		bridge->funcs->atomic_pre_enable(bridge, old_bridge_state);
737 	} else if (bridge->funcs->pre_enable) {
738 		bridge->funcs->pre_enable(bridge);
739 	}
740 }
741 
742 /**
743  * drm_atomic_bridge_chain_pre_enable - prepares for enabling all bridges in
744  *					the encoder chain
745  * @bridge: bridge control structure
746  * @old_state: old atomic state
747  *
748  * Calls &drm_bridge_funcs.atomic_pre_enable (falls back on
749  * &drm_bridge_funcs.pre_enable) op for all the bridges in the encoder chain,
750  * starting from the last bridge to the first. These are called before calling
751  * &drm_encoder_helper_funcs.atomic_enable
752  *
753  * If a bridge sets @pre_enable_prev_first, then the pre_enable for the
754  * prev bridge will be called before pre_enable of this bridge.
755  *
756  * Note: the bridge passed should be the one closest to the encoder
757  */
drm_atomic_bridge_chain_pre_enable(struct drm_bridge * bridge,struct drm_atomic_state * old_state)758 void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
759 					struct drm_atomic_state *old_state)
760 {
761 	struct drm_encoder *encoder;
762 	struct drm_bridge *iter, *next, *limit;
763 
764 	if (!bridge)
765 		return;
766 
767 	encoder = bridge->encoder;
768 
769 	list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
770 		if (iter->pre_enable_prev_first) {
771 			next = iter;
772 			limit = bridge;
773 			list_for_each_entry_from_reverse(next,
774 							 &encoder->bridge_chain,
775 							 chain_node) {
776 				if (next == bridge)
777 					break;
778 
779 				if (!next->pre_enable_prev_first) {
780 					/* Found first bridge that does NOT
781 					 * request prev to be enabled first
782 					 */
783 					limit = next;
784 					break;
785 				}
786 			}
787 
788 			list_for_each_entry_from(next, &encoder->bridge_chain, chain_node) {
789 				/* Call requested prev bridge pre_enable
790 				 * in order.
791 				 */
792 				if (next == iter)
793 					/* At the first bridge to request prev
794 					 * bridges called first.
795 					 */
796 					break;
797 
798 				drm_atomic_bridge_call_pre_enable(next, old_state);
799 			}
800 		}
801 
802 		drm_atomic_bridge_call_pre_enable(iter, old_state);
803 
804 		if (iter->pre_enable_prev_first)
805 			/* Jump all bridges that we have already pre_enabled */
806 			iter = limit;
807 
808 		if (iter == bridge)
809 			break;
810 	}
811 }
812 EXPORT_SYMBOL(drm_atomic_bridge_chain_pre_enable);
813 
814 /**
815  * drm_atomic_bridge_chain_enable - enables all bridges in the encoder chain
816  * @bridge: bridge control structure
817  * @old_state: old atomic state
818  *
819  * Calls &drm_bridge_funcs.atomic_enable (falls back on
820  * &drm_bridge_funcs.enable) op for all the bridges in the encoder chain,
821  * starting from the first bridge to the last. These are called after completing
822  * &drm_encoder_helper_funcs.atomic_enable
823  *
824  * Note: the bridge passed should be the one closest to the encoder
825  */
drm_atomic_bridge_chain_enable(struct drm_bridge * bridge,struct drm_atomic_state * old_state)826 void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge,
827 				    struct drm_atomic_state *old_state)
828 {
829 	struct drm_encoder *encoder;
830 
831 	if (!bridge)
832 		return;
833 
834 	encoder = bridge->encoder;
835 	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
836 		if (bridge->funcs->atomic_enable) {
837 			struct drm_bridge_state *old_bridge_state;
838 
839 			old_bridge_state =
840 				drm_atomic_get_old_bridge_state(old_state,
841 								bridge);
842 			if (WARN_ON(!old_bridge_state))
843 				return;
844 
845 			bridge->funcs->atomic_enable(bridge, old_bridge_state);
846 		} else if (bridge->funcs->enable) {
847 			bridge->funcs->enable(bridge);
848 		}
849 	}
850 }
851 EXPORT_SYMBOL(drm_atomic_bridge_chain_enable);
852 
drm_atomic_bridge_check(struct drm_bridge * bridge,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)853 static int drm_atomic_bridge_check(struct drm_bridge *bridge,
854 				   struct drm_crtc_state *crtc_state,
855 				   struct drm_connector_state *conn_state)
856 {
857 	if (bridge->funcs->atomic_check) {
858 		struct drm_bridge_state *bridge_state;
859 		int ret;
860 
861 		bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
862 							       bridge);
863 		if (WARN_ON(!bridge_state))
864 			return -EINVAL;
865 
866 		ret = bridge->funcs->atomic_check(bridge, bridge_state,
867 						  crtc_state, conn_state);
868 		if (ret)
869 			return ret;
870 	} else if (bridge->funcs->mode_fixup) {
871 		if (!bridge->funcs->mode_fixup(bridge, &crtc_state->mode,
872 					       &crtc_state->adjusted_mode))
873 			return -EINVAL;
874 	}
875 
876 	return 0;
877 }
878 
select_bus_fmt_recursive(struct drm_bridge * first_bridge,struct drm_bridge * cur_bridge,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state,u32 out_bus_fmt)879 static int select_bus_fmt_recursive(struct drm_bridge *first_bridge,
880 				    struct drm_bridge *cur_bridge,
881 				    struct drm_crtc_state *crtc_state,
882 				    struct drm_connector_state *conn_state,
883 				    u32 out_bus_fmt)
884 {
885 	unsigned int i, num_in_bus_fmts = 0;
886 	struct drm_bridge_state *cur_state;
887 	struct drm_bridge *prev_bridge;
888 	u32 *in_bus_fmts;
889 	int ret;
890 
891 	prev_bridge = drm_bridge_get_prev_bridge(cur_bridge);
892 	cur_state = drm_atomic_get_new_bridge_state(crtc_state->state,
893 						    cur_bridge);
894 
895 	/*
896 	 * If bus format negotiation is not supported by this bridge, let's
897 	 * pass MEDIA_BUS_FMT_FIXED to the previous bridge in the chain and
898 	 * hope that it can handle this situation gracefully (by providing
899 	 * appropriate default values).
900 	 */
901 	if (!cur_bridge->funcs->atomic_get_input_bus_fmts) {
902 		if (cur_bridge != first_bridge) {
903 			ret = select_bus_fmt_recursive(first_bridge,
904 						       prev_bridge, crtc_state,
905 						       conn_state,
906 						       MEDIA_BUS_FMT_FIXED);
907 			if (ret)
908 				return ret;
909 		}
910 
911 		/*
912 		 * Driver does not implement the atomic state hooks, but that's
913 		 * fine, as long as it does not access the bridge state.
914 		 */
915 		if (cur_state) {
916 			cur_state->input_bus_cfg.format = MEDIA_BUS_FMT_FIXED;
917 			cur_state->output_bus_cfg.format = out_bus_fmt;
918 		}
919 
920 		return 0;
921 	}
922 
923 	/*
924 	 * If the driver implements ->atomic_get_input_bus_fmts() it
925 	 * should also implement the atomic state hooks.
926 	 */
927 	if (WARN_ON(!cur_state))
928 		return -EINVAL;
929 
930 	in_bus_fmts = cur_bridge->funcs->atomic_get_input_bus_fmts(cur_bridge,
931 							cur_state,
932 							crtc_state,
933 							conn_state,
934 							out_bus_fmt,
935 							&num_in_bus_fmts);
936 	if (!num_in_bus_fmts)
937 		return -ENOTSUPP;
938 	else if (!in_bus_fmts)
939 		return -ENOMEM;
940 
941 	if (first_bridge == cur_bridge) {
942 		cur_state->input_bus_cfg.format = in_bus_fmts[0];
943 		cur_state->output_bus_cfg.format = out_bus_fmt;
944 		kfree(in_bus_fmts);
945 		return 0;
946 	}
947 
948 	for (i = 0; i < num_in_bus_fmts; i++) {
949 		ret = select_bus_fmt_recursive(first_bridge, prev_bridge,
950 					       crtc_state, conn_state,
951 					       in_bus_fmts[i]);
952 		if (ret != -ENOTSUPP)
953 			break;
954 	}
955 
956 	if (!ret) {
957 		cur_state->input_bus_cfg.format = in_bus_fmts[i];
958 		cur_state->output_bus_cfg.format = out_bus_fmt;
959 	}
960 
961 	kfree(in_bus_fmts);
962 	return ret;
963 }
964 
965 /*
966  * This function is called by &drm_atomic_bridge_chain_check() just before
967  * calling &drm_bridge_funcs.atomic_check() on all elements of the chain.
968  * It performs bus format negotiation between bridge elements. The negotiation
969  * happens in reverse order, starting from the last element in the chain up to
970  * @bridge.
971  *
972  * Negotiation starts by retrieving supported output bus formats on the last
973  * bridge element and testing them one by one. The test is recursive, meaning
974  * that for each tested output format, the whole chain will be walked backward,
975  * and each element will have to choose an input bus format that can be
976  * transcoded to the requested output format. When a bridge element does not
977  * support transcoding into a specific output format -ENOTSUPP is returned and
978  * the next bridge element will have to try a different format. If none of the
979  * combinations worked, -ENOTSUPP is returned and the atomic modeset will fail.
980  *
981  * This implementation is relying on
982  * &drm_bridge_funcs.atomic_get_output_bus_fmts() and
983  * &drm_bridge_funcs.atomic_get_input_bus_fmts() to gather supported
984  * input/output formats.
985  *
986  * When &drm_bridge_funcs.atomic_get_output_bus_fmts() is not implemented by
987  * the last element of the chain, &drm_atomic_bridge_chain_select_bus_fmts()
988  * tries a single format: &drm_connector.display_info.bus_formats[0] if
989  * available, MEDIA_BUS_FMT_FIXED otherwise.
990  *
991  * When &drm_bridge_funcs.atomic_get_input_bus_fmts() is not implemented,
992  * &drm_atomic_bridge_chain_select_bus_fmts() skips the negotiation on the
993  * bridge element that lacks this hook and asks the previous element in the
994  * chain to try MEDIA_BUS_FMT_FIXED. It's up to bridge drivers to decide what
995  * to do in that case (fail if they want to enforce bus format negotiation, or
996  * provide a reasonable default if they need to support pipelines where not
997  * all elements support bus format negotiation).
998  */
999 static int
drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge * bridge,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)1000 drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge *bridge,
1001 					struct drm_crtc_state *crtc_state,
1002 					struct drm_connector_state *conn_state)
1003 {
1004 	struct drm_connector *conn = conn_state->connector;
1005 	struct drm_encoder *encoder = bridge->encoder;
1006 	struct drm_bridge_state *last_bridge_state;
1007 	unsigned int i, num_out_bus_fmts = 0;
1008 	struct drm_bridge *last_bridge;
1009 	u32 *out_bus_fmts;
1010 	int ret = 0;
1011 
1012 	last_bridge = list_last_entry(&encoder->bridge_chain,
1013 				      struct drm_bridge, chain_node);
1014 	last_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
1015 							    last_bridge);
1016 
1017 	if (last_bridge->funcs->atomic_get_output_bus_fmts) {
1018 		const struct drm_bridge_funcs *funcs = last_bridge->funcs;
1019 
1020 		/*
1021 		 * If the driver implements ->atomic_get_output_bus_fmts() it
1022 		 * should also implement the atomic state hooks.
1023 		 */
1024 		if (WARN_ON(!last_bridge_state))
1025 			return -EINVAL;
1026 
1027 		out_bus_fmts = funcs->atomic_get_output_bus_fmts(last_bridge,
1028 							last_bridge_state,
1029 							crtc_state,
1030 							conn_state,
1031 							&num_out_bus_fmts);
1032 		if (!num_out_bus_fmts)
1033 			return -ENOTSUPP;
1034 		else if (!out_bus_fmts)
1035 			return -ENOMEM;
1036 	} else {
1037 		num_out_bus_fmts = 1;
1038 		out_bus_fmts = kmalloc(sizeof(*out_bus_fmts), GFP_KERNEL);
1039 		if (!out_bus_fmts)
1040 			return -ENOMEM;
1041 
1042 		if (conn->display_info.num_bus_formats &&
1043 		    conn->display_info.bus_formats)
1044 			out_bus_fmts[0] = conn->display_info.bus_formats[0];
1045 		else
1046 			out_bus_fmts[0] = MEDIA_BUS_FMT_FIXED;
1047 	}
1048 
1049 	for (i = 0; i < num_out_bus_fmts; i++) {
1050 		ret = select_bus_fmt_recursive(bridge, last_bridge, crtc_state,
1051 					       conn_state, out_bus_fmts[i]);
1052 		if (ret != -ENOTSUPP)
1053 			break;
1054 	}
1055 
1056 	kfree(out_bus_fmts);
1057 
1058 	return ret;
1059 }
1060 
1061 static void
drm_atomic_bridge_propagate_bus_flags(struct drm_bridge * bridge,struct drm_connector * conn,struct drm_atomic_state * state)1062 drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge,
1063 				      struct drm_connector *conn,
1064 				      struct drm_atomic_state *state)
1065 {
1066 	struct drm_bridge_state *bridge_state, *next_bridge_state;
1067 	struct drm_bridge *next_bridge;
1068 	u32 output_flags = 0;
1069 
1070 	bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
1071 
1072 	/* No bridge state attached to this bridge => nothing to propagate. */
1073 	if (!bridge_state)
1074 		return;
1075 
1076 	next_bridge = drm_bridge_get_next_bridge(bridge);
1077 
1078 	/*
1079 	 * Let's try to apply the most common case here, that is, propagate
1080 	 * display_info flags for the last bridge, and propagate the input
1081 	 * flags of the next bridge element to the output end of the current
1082 	 * bridge when the bridge is not the last one.
1083 	 * There are exceptions to this rule, like when signal inversion is
1084 	 * happening at the board level, but that's something drivers can deal
1085 	 * with from their &drm_bridge_funcs.atomic_check() implementation by
1086 	 * simply overriding the flags value we've set here.
1087 	 */
1088 	if (!next_bridge) {
1089 		output_flags = conn->display_info.bus_flags;
1090 	} else {
1091 		next_bridge_state = drm_atomic_get_new_bridge_state(state,
1092 								next_bridge);
1093 		/*
1094 		 * No bridge state attached to the next bridge, just leave the
1095 		 * flags to 0.
1096 		 */
1097 		if (next_bridge_state)
1098 			output_flags = next_bridge_state->input_bus_cfg.flags;
1099 	}
1100 
1101 	bridge_state->output_bus_cfg.flags = output_flags;
1102 
1103 	/*
1104 	 * Propagate the output flags to the input end of the bridge. Again, it's
1105 	 * not necessarily what all bridges want, but that's what most of them
1106 	 * do, and by doing that by default we avoid forcing drivers to
1107 	 * duplicate the "dummy propagation" logic.
1108 	 */
1109 	bridge_state->input_bus_cfg.flags = output_flags;
1110 }
1111 
1112 /**
1113  * drm_atomic_bridge_chain_check() - Do an atomic check on the bridge chain
1114  * @bridge: bridge control structure
1115  * @crtc_state: new CRTC state
1116  * @conn_state: new connector state
1117  *
1118  * First trigger a bus format negotiation before calling
1119  * &drm_bridge_funcs.atomic_check() (falls back on
1120  * &drm_bridge_funcs.mode_fixup()) op for all the bridges in the encoder chain,
1121  * starting from the last bridge to the first. These are called before calling
1122  * &drm_encoder_helper_funcs.atomic_check()
1123  *
1124  * RETURNS:
1125  * 0 on success, a negative error code on failure
1126  */
drm_atomic_bridge_chain_check(struct drm_bridge * bridge,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)1127 int drm_atomic_bridge_chain_check(struct drm_bridge *bridge,
1128 				  struct drm_crtc_state *crtc_state,
1129 				  struct drm_connector_state *conn_state)
1130 {
1131 	struct drm_connector *conn = conn_state->connector;
1132 	struct drm_encoder *encoder;
1133 	struct drm_bridge *iter;
1134 	int ret;
1135 
1136 	if (!bridge)
1137 		return 0;
1138 
1139 	ret = drm_atomic_bridge_chain_select_bus_fmts(bridge, crtc_state,
1140 						      conn_state);
1141 	if (ret)
1142 		return ret;
1143 
1144 	encoder = bridge->encoder;
1145 	list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
1146 		int ret;
1147 
1148 		/*
1149 		 * Bus flags are propagated by default. If a bridge needs to
1150 		 * tweak the input bus flags for any reason, it should happen
1151 		 * in its &drm_bridge_funcs.atomic_check() implementation such
1152 		 * that preceding bridges in the chain can propagate the new
1153 		 * bus flags.
1154 		 */
1155 		drm_atomic_bridge_propagate_bus_flags(iter, conn,
1156 						      crtc_state->state);
1157 
1158 		ret = drm_atomic_bridge_check(iter, crtc_state, conn_state);
1159 		if (ret)
1160 			return ret;
1161 
1162 		if (iter == bridge)
1163 			break;
1164 	}
1165 
1166 	return 0;
1167 }
1168 EXPORT_SYMBOL(drm_atomic_bridge_chain_check);
1169 
1170 /**
1171  * drm_bridge_detect - check if anything is attached to the bridge output
1172  * @bridge: bridge control structure
1173  *
1174  * If the bridge supports output detection, as reported by the
1175  * DRM_BRIDGE_OP_DETECT bridge ops flag, call &drm_bridge_funcs.detect for the
1176  * bridge and return the connection status. Otherwise return
1177  * connector_status_unknown.
1178  *
1179  * RETURNS:
1180  * The detection status on success, or connector_status_unknown if the bridge
1181  * doesn't support output detection.
1182  */
drm_bridge_detect(struct drm_bridge * bridge)1183 enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge)
1184 {
1185 	if (!(bridge->ops & DRM_BRIDGE_OP_DETECT))
1186 		return connector_status_unknown;
1187 
1188 	return bridge->funcs->detect(bridge);
1189 }
1190 EXPORT_SYMBOL_GPL(drm_bridge_detect);
1191 
1192 /**
1193  * drm_bridge_get_modes - fill all modes currently valid for the sink into the
1194  * @connector
1195  * @bridge: bridge control structure
1196  * @connector: the connector to fill with modes
1197  *
1198  * If the bridge supports output modes retrieval, as reported by the
1199  * DRM_BRIDGE_OP_MODES bridge ops flag, call &drm_bridge_funcs.get_modes to
1200  * fill the connector with all valid modes and return the number of modes
1201  * added. Otherwise return 0.
1202  *
1203  * RETURNS:
1204  * The number of modes added to the connector.
1205  */
drm_bridge_get_modes(struct drm_bridge * bridge,struct drm_connector * connector)1206 int drm_bridge_get_modes(struct drm_bridge *bridge,
1207 			 struct drm_connector *connector)
1208 {
1209 	if (!(bridge->ops & DRM_BRIDGE_OP_MODES))
1210 		return 0;
1211 
1212 	return bridge->funcs->get_modes(bridge, connector);
1213 }
1214 EXPORT_SYMBOL_GPL(drm_bridge_get_modes);
1215 
1216 /**
1217  * drm_bridge_edid_read - read the EDID data of the connected display
1218  * @bridge: bridge control structure
1219  * @connector: the connector to read EDID for
1220  *
1221  * If the bridge supports output EDID retrieval, as reported by the
1222  * DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.edid_read to get
1223  * the EDID and return it. Otherwise return NULL.
1224  *
1225  * If &drm_bridge_funcs.edid_read is not set, fall back to using
1226  * drm_bridge_get_edid() and wrapping it in struct drm_edid.
1227  *
1228  * RETURNS:
1229  * The retrieved EDID on success, or NULL otherwise.
1230  */
drm_bridge_edid_read(struct drm_bridge * bridge,struct drm_connector * connector)1231 const struct drm_edid *drm_bridge_edid_read(struct drm_bridge *bridge,
1232 					    struct drm_connector *connector)
1233 {
1234 	if (!(bridge->ops & DRM_BRIDGE_OP_EDID))
1235 		return NULL;
1236 
1237 	/* Transitional: Fall back to ->get_edid. */
1238 	if (!bridge->funcs->edid_read) {
1239 		const struct drm_edid *drm_edid;
1240 		struct edid *edid;
1241 
1242 		edid = drm_bridge_get_edid(bridge, connector);
1243 		if (!edid)
1244 			return NULL;
1245 
1246 		drm_edid = drm_edid_alloc(edid, (edid->extensions + 1) * EDID_LENGTH);
1247 
1248 		kfree(edid);
1249 
1250 		return drm_edid;
1251 	}
1252 
1253 	return bridge->funcs->edid_read(bridge, connector);
1254 }
1255 EXPORT_SYMBOL_GPL(drm_bridge_edid_read);
1256 
1257 /**
1258  * drm_bridge_get_edid - get the EDID data of the connected display
1259  * @bridge: bridge control structure
1260  * @connector: the connector to read EDID for
1261  *
1262  * If the bridge supports output EDID retrieval, as reported by the
1263  * DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.get_edid to
1264  * get the EDID and return it. Otherwise return NULL.
1265  *
1266  * Deprecated. Prefer using drm_bridge_edid_read().
1267  *
1268  * RETURNS:
1269  * The retrieved EDID on success, or NULL otherwise.
1270  */
drm_bridge_get_edid(struct drm_bridge * bridge,struct drm_connector * connector)1271 struct edid *drm_bridge_get_edid(struct drm_bridge *bridge,
1272 				 struct drm_connector *connector)
1273 {
1274 	if (!(bridge->ops & DRM_BRIDGE_OP_EDID))
1275 		return NULL;
1276 
1277 	return bridge->funcs->get_edid(bridge, connector);
1278 }
1279 EXPORT_SYMBOL_GPL(drm_bridge_get_edid);
1280 
1281 /**
1282  * drm_bridge_hpd_enable - enable hot plug detection for the bridge
1283  * @bridge: bridge control structure
1284  * @cb: hot-plug detection callback
1285  * @data: data to be passed to the hot-plug detection callback
1286  *
1287  * Call &drm_bridge_funcs.hpd_enable if implemented and register the given @cb
1288  * and @data as hot plug notification callback. From now on the @cb will be
1289  * called with @data when an output status change is detected by the bridge,
1290  * until hot plug notification gets disabled with drm_bridge_hpd_disable().
1291  *
1292  * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in
1293  * bridge->ops. This function shall not be called when the flag is not set.
1294  *
1295  * Only one hot plug detection callback can be registered at a time, it is an
1296  * error to call this function when hot plug detection is already enabled for
1297  * the bridge.
1298  */
drm_bridge_hpd_enable(struct drm_bridge * bridge,void (* cb)(void * data,enum drm_connector_status status),void * data)1299 void drm_bridge_hpd_enable(struct drm_bridge *bridge,
1300 			   void (*cb)(void *data,
1301 				      enum drm_connector_status status),
1302 			   void *data)
1303 {
1304 	if (!(bridge->ops & DRM_BRIDGE_OP_HPD))
1305 		return;
1306 
1307 	mutex_lock(&bridge->hpd_mutex);
1308 
1309 	if (WARN(bridge->hpd_cb, "Hot plug detection already enabled\n"))
1310 		goto unlock;
1311 
1312 	bridge->hpd_cb = cb;
1313 	bridge->hpd_data = data;
1314 
1315 	if (bridge->funcs->hpd_enable)
1316 		bridge->funcs->hpd_enable(bridge);
1317 
1318 unlock:
1319 	mutex_unlock(&bridge->hpd_mutex);
1320 }
1321 EXPORT_SYMBOL_GPL(drm_bridge_hpd_enable);
1322 
1323 /**
1324  * drm_bridge_hpd_disable - disable hot plug detection for the bridge
1325  * @bridge: bridge control structure
1326  *
1327  * Call &drm_bridge_funcs.hpd_disable if implemented and unregister the hot
1328  * plug detection callback previously registered with drm_bridge_hpd_enable().
1329  * Once this function returns the callback will not be called by the bridge
1330  * when an output status change occurs.
1331  *
1332  * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in
1333  * bridge->ops. This function shall not be called when the flag is not set.
1334  */
drm_bridge_hpd_disable(struct drm_bridge * bridge)1335 void drm_bridge_hpd_disable(struct drm_bridge *bridge)
1336 {
1337 	if (!(bridge->ops & DRM_BRIDGE_OP_HPD))
1338 		return;
1339 
1340 	mutex_lock(&bridge->hpd_mutex);
1341 	if (bridge->funcs->hpd_disable)
1342 		bridge->funcs->hpd_disable(bridge);
1343 
1344 	bridge->hpd_cb = NULL;
1345 	bridge->hpd_data = NULL;
1346 	mutex_unlock(&bridge->hpd_mutex);
1347 }
1348 EXPORT_SYMBOL_GPL(drm_bridge_hpd_disable);
1349 
1350 /**
1351  * drm_bridge_hpd_notify - notify hot plug detection events
1352  * @bridge: bridge control structure
1353  * @status: output connection status
1354  *
1355  * Bridge drivers shall call this function to report hot plug events when they
1356  * detect a change in the output status, when hot plug detection has been
1357  * enabled by drm_bridge_hpd_enable().
1358  *
1359  * This function shall be called in a context that can sleep.
1360  */
drm_bridge_hpd_notify(struct drm_bridge * bridge,enum drm_connector_status status)1361 void drm_bridge_hpd_notify(struct drm_bridge *bridge,
1362 			   enum drm_connector_status status)
1363 {
1364 	mutex_lock(&bridge->hpd_mutex);
1365 	if (bridge->hpd_cb)
1366 		bridge->hpd_cb(bridge->hpd_data, status);
1367 	mutex_unlock(&bridge->hpd_mutex);
1368 }
1369 EXPORT_SYMBOL_GPL(drm_bridge_hpd_notify);
1370 
1371 #ifdef CONFIG_OF
1372 /**
1373  * of_drm_find_bridge - find the bridge corresponding to the device node in
1374  *			the global bridge list
1375  *
1376  * @np: device node
1377  *
1378  * RETURNS:
1379  * drm_bridge control struct on success, NULL on failure
1380  */
of_drm_find_bridge(struct device_node * np)1381 struct drm_bridge *of_drm_find_bridge(struct device_node *np)
1382 {
1383 	struct drm_bridge *bridge;
1384 
1385 	mutex_lock(&bridge_lock);
1386 
1387 	list_for_each_entry(bridge, &bridge_list, list) {
1388 		if (bridge->of_node == np) {
1389 			mutex_unlock(&bridge_lock);
1390 			return bridge;
1391 		}
1392 	}
1393 
1394 	mutex_unlock(&bridge_lock);
1395 	return NULL;
1396 }
1397 EXPORT_SYMBOL(of_drm_find_bridge);
1398 #endif
1399 
1400 #ifdef CONFIG_DEBUG_FS
drm_bridge_chains_info(struct seq_file * m,void * data)1401 static int drm_bridge_chains_info(struct seq_file *m, void *data)
1402 {
1403 	struct drm_debugfs_entry *entry = m->private;
1404 	struct drm_device *dev = entry->dev;
1405 	struct drm_printer p = drm_seq_file_printer(m);
1406 	struct drm_mode_config *config = &dev->mode_config;
1407 	struct drm_encoder *encoder;
1408 	unsigned int bridge_idx = 0;
1409 
1410 	list_for_each_entry(encoder, &config->encoder_list, head) {
1411 		struct drm_bridge *bridge;
1412 
1413 		drm_printf(&p, "encoder[%u]\n", encoder->base.id);
1414 
1415 		drm_for_each_bridge_in_chain(encoder, bridge) {
1416 			drm_printf(&p, "\tbridge[%u] type: %u, ops: %#x",
1417 				   bridge_idx, bridge->type, bridge->ops);
1418 
1419 #ifdef CONFIG_OF
1420 			if (bridge->of_node)
1421 				drm_printf(&p, ", OF: %pOFfc", bridge->of_node);
1422 #endif
1423 
1424 			drm_printf(&p, "\n");
1425 
1426 			bridge_idx++;
1427 		}
1428 	}
1429 
1430 	return 0;
1431 }
1432 
1433 static const struct drm_debugfs_info drm_bridge_debugfs_list[] = {
1434 	{ "bridge_chains", drm_bridge_chains_info, 0 },
1435 };
1436 
drm_bridge_debugfs_init(struct drm_minor * minor)1437 void drm_bridge_debugfs_init(struct drm_minor *minor)
1438 {
1439 	drm_debugfs_add_files(minor->dev, drm_bridge_debugfs_list,
1440 			      ARRAY_SIZE(drm_bridge_debugfs_list));
1441 }
1442 #endif
1443 
1444 MODULE_AUTHOR("Ajay Kumar <ajaykumar.rs@samsung.com>");
1445 MODULE_DESCRIPTION("DRM bridge infrastructure");
1446 MODULE_LICENSE("GPL and additional rights");
1447