1 /*
2 * Copyright © 2014 Red Hat.
3 *
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
13 *
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20 * OF THIS SOFTWARE.
21 */
22 #ifndef _DRM_DP_MST_HELPER_H_
23 #define _DRM_DP_MST_HELPER_H_
24
25 #include <linux/types.h>
26 #include <drm/display/drm_dp_helper.h>
27 #include <drm/drm_atomic.h>
28
29 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
30 #include <linux/stackdepot.h>
31 #include <linux/timekeeping.h>
32
33 enum drm_dp_mst_topology_ref_type {
34 DRM_DP_MST_TOPOLOGY_REF_GET,
35 DRM_DP_MST_TOPOLOGY_REF_PUT,
36 };
37
38 struct drm_dp_mst_topology_ref_history {
39 struct drm_dp_mst_topology_ref_entry {
40 enum drm_dp_mst_topology_ref_type type;
41 int count;
42 ktime_t ts_nsec;
43 depot_stack_handle_t backtrace;
44 } *entries;
45 int len;
46 };
47 #endif /* IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) */
48
49 struct drm_dp_mst_branch;
50
51 /**
52 * struct drm_dp_mst_port - MST port
53 * @port_num: port number
54 * @input: if this port is an input port. Protected by
55 * &drm_dp_mst_topology_mgr.base.lock.
56 * @mcs: message capability status - DP 1.2 spec. Protected by
57 * &drm_dp_mst_topology_mgr.base.lock.
58 * @ddps: DisplayPort Device Plug Status - DP 1.2. Protected by
59 * &drm_dp_mst_topology_mgr.base.lock.
60 * @pdt: Peer Device Type. Protected by
61 * &drm_dp_mst_topology_mgr.base.lock.
62 * @ldps: Legacy Device Plug Status. Protected by
63 * &drm_dp_mst_topology_mgr.base.lock.
64 * @dpcd_rev: DPCD revision of device on this port. Protected by
65 * &drm_dp_mst_topology_mgr.base.lock.
66 * @num_sdp_streams: Number of simultaneous streams. Protected by
67 * &drm_dp_mst_topology_mgr.base.lock.
68 * @num_sdp_stream_sinks: Number of stream sinks. Protected by
69 * &drm_dp_mst_topology_mgr.base.lock.
70 * @full_pbn: Max possible bandwidth for this port. Protected by
71 * &drm_dp_mst_topology_mgr.base.lock.
72 * @next: link to next port on this branch device
73 * @aux: i2c aux transport to talk to device connected to this port, protected
74 * by &drm_dp_mst_topology_mgr.base.lock.
75 * @passthrough_aux: parent aux to which DSC pass-through requests should be
76 * sent, only set if DSC pass-through is possible.
77 * @parent: branch device parent of this port
78 * @vcpi: Virtual Channel Payload info for this port.
79 * @connector: DRM connector this port is connected to. Protected by
80 * &drm_dp_mst_topology_mgr.base.lock.
81 * @mgr: topology manager this port lives under.
82 *
83 * This structure represents an MST port endpoint on a device somewhere
84 * in the MST topology.
85 */
86 struct drm_dp_mst_port {
87 /**
88 * @topology_kref: refcount for this port's lifetime in the topology,
89 * only the DP MST helpers should need to touch this
90 */
91 struct kref topology_kref;
92
93 /**
94 * @malloc_kref: refcount for the memory allocation containing this
95 * structure. See drm_dp_mst_get_port_malloc() and
96 * drm_dp_mst_put_port_malloc().
97 */
98 struct kref malloc_kref;
99
100 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
101 /**
102 * @topology_ref_history: A history of each topology
103 * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS.
104 */
105 struct drm_dp_mst_topology_ref_history topology_ref_history;
106 #endif
107
108 u8 port_num;
109 bool input;
110 bool mcs;
111 bool ddps;
112 u8 pdt;
113 bool ldps;
114 u8 dpcd_rev;
115 u8 num_sdp_streams;
116 u8 num_sdp_stream_sinks;
117 uint16_t full_pbn;
118 struct list_head next;
119 /**
120 * @mstb: the branch device connected to this port, if there is one.
121 * This should be considered protected for reading by
122 * &drm_dp_mst_topology_mgr.lock. There are two exceptions to this:
123 * &drm_dp_mst_topology_mgr.up_req_work and
124 * &drm_dp_mst_topology_mgr.work, which do not grab
125 * &drm_dp_mst_topology_mgr.lock during reads but are the only
126 * updaters of this list and are protected from writing concurrently
127 * by &drm_dp_mst_topology_mgr.probe_lock.
128 */
129 struct drm_dp_mst_branch *mstb;
130 struct drm_dp_aux aux; /* i2c bus for this port? */
131 struct drm_dp_aux *passthrough_aux;
132 struct drm_dp_mst_branch *parent;
133
134 struct drm_connector *connector;
135 struct drm_dp_mst_topology_mgr *mgr;
136
137 /**
138 * @cached_edid: for DP logical ports - make tiling work by ensuring
139 * that the EDID for all connectors is read immediately.
140 */
141 const struct drm_edid *cached_edid;
142
143 /**
144 * @fec_capable: bool indicating if FEC can be supported up to that
145 * point in the MST topology.
146 */
147 bool fec_capable;
148 };
149
150 /* sideband msg header - not bit struct */
151 struct drm_dp_sideband_msg_hdr {
152 u8 lct;
153 u8 lcr;
154 u8 rad[8];
155 bool broadcast;
156 bool path_msg;
157 u8 msg_len;
158 bool somt;
159 bool eomt;
160 bool seqno;
161 };
162
163 struct drm_dp_sideband_msg_rx {
164 u8 chunk[48];
165 u8 msg[256];
166 u8 curchunk_len;
167 u8 curchunk_idx; /* chunk we are parsing now */
168 u8 curchunk_hdrlen;
169 u8 curlen; /* total length of the msg */
170 bool have_somt;
171 bool have_eomt;
172 struct drm_dp_sideband_msg_hdr initial_hdr;
173 };
174
175 /**
176 * struct drm_dp_mst_branch - MST branch device.
177 * @rad: Relative Address to talk to this branch device.
178 * @lct: Link count total to talk to this branch device.
179 * @num_ports: number of ports on the branch.
180 * @port_parent: pointer to the port parent, NULL if toplevel.
181 * @mgr: topology manager for this branch device.
182 * @link_address_sent: if a link address message has been sent to this device yet.
183 * @guid: guid for DP 1.2 branch device. port under this branch can be
184 * identified by port #.
185 *
186 * This structure represents an MST branch device, there is one
187 * primary branch device at the root, along with any other branches connected
188 * to downstream port of parent branches.
189 */
190 struct drm_dp_mst_branch {
191 /**
192 * @topology_kref: refcount for this branch device's lifetime in the
193 * topology, only the DP MST helpers should need to touch this
194 */
195 struct kref topology_kref;
196
197 /**
198 * @malloc_kref: refcount for the memory allocation containing this
199 * structure. See drm_dp_mst_get_mstb_malloc() and
200 * drm_dp_mst_put_mstb_malloc().
201 */
202 struct kref malloc_kref;
203
204 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
205 /**
206 * @topology_ref_history: A history of each topology
207 * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS.
208 */
209 struct drm_dp_mst_topology_ref_history topology_ref_history;
210 #endif
211
212 /**
213 * @destroy_next: linked-list entry used by
214 * drm_dp_delayed_destroy_work()
215 */
216 struct list_head destroy_next;
217
218 u8 rad[8];
219 u8 lct;
220 int num_ports;
221
222 /**
223 * @ports: the list of ports on this branch device. This should be
224 * considered protected for reading by &drm_dp_mst_topology_mgr.lock.
225 * There are two exceptions to this:
226 * &drm_dp_mst_topology_mgr.up_req_work and
227 * &drm_dp_mst_topology_mgr.work, which do not grab
228 * &drm_dp_mst_topology_mgr.lock during reads but are the only
229 * updaters of this list and are protected from updating the list
230 * concurrently by @drm_dp_mst_topology_mgr.probe_lock
231 */
232 struct list_head ports;
233
234 struct drm_dp_mst_port *port_parent;
235 struct drm_dp_mst_topology_mgr *mgr;
236
237 bool link_address_sent;
238
239 /* global unique identifier to identify branch devices */
240 u8 guid[16];
241 };
242
243
244 struct drm_dp_nak_reply {
245 u8 guid[16];
246 u8 reason;
247 u8 nak_data;
248 };
249
250 struct drm_dp_link_address_ack_reply {
251 u8 guid[16];
252 u8 nports;
253 struct drm_dp_link_addr_reply_port {
254 bool input_port;
255 u8 peer_device_type;
256 u8 port_number;
257 bool mcs;
258 bool ddps;
259 bool legacy_device_plug_status;
260 u8 dpcd_revision;
261 u8 peer_guid[16];
262 u8 num_sdp_streams;
263 u8 num_sdp_stream_sinks;
264 } ports[16];
265 };
266
267 struct drm_dp_remote_dpcd_read_ack_reply {
268 u8 port_number;
269 u8 num_bytes;
270 u8 bytes[255];
271 };
272
273 struct drm_dp_remote_dpcd_write_ack_reply {
274 u8 port_number;
275 };
276
277 struct drm_dp_remote_dpcd_write_nak_reply {
278 u8 port_number;
279 u8 reason;
280 u8 bytes_written_before_failure;
281 };
282
283 struct drm_dp_remote_i2c_read_ack_reply {
284 u8 port_number;
285 u8 num_bytes;
286 u8 bytes[255];
287 };
288
289 struct drm_dp_remote_i2c_read_nak_reply {
290 u8 port_number;
291 u8 nak_reason;
292 u8 i2c_nak_transaction;
293 };
294
295 struct drm_dp_remote_i2c_write_ack_reply {
296 u8 port_number;
297 };
298
299 struct drm_dp_query_stream_enc_status_ack_reply {
300 /* Bit[23:16]- Stream Id */
301 u8 stream_id;
302
303 /* Bit[15]- Signed */
304 bool reply_signed;
305
306 /* Bit[10:8]- Stream Output Sink Type */
307 bool unauthorizable_device_present;
308 bool legacy_device_present;
309 bool query_capable_device_present;
310
311 /* Bit[12:11]- Stream Output CP Type */
312 bool hdcp_1x_device_present;
313 bool hdcp_2x_device_present;
314
315 /* Bit[4]- Stream Authentication */
316 bool auth_completed;
317
318 /* Bit[3]- Stream Encryption */
319 bool encryption_enabled;
320
321 /* Bit[2]- Stream Repeater Function Present */
322 bool repeater_present;
323
324 /* Bit[1:0]- Stream State */
325 u8 state;
326 };
327
328 #define DRM_DP_MAX_SDP_STREAMS 16
329 struct drm_dp_allocate_payload {
330 u8 port_number;
331 u8 number_sdp_streams;
332 u8 vcpi;
333 u16 pbn;
334 u8 sdp_stream_sink[DRM_DP_MAX_SDP_STREAMS];
335 };
336
337 struct drm_dp_allocate_payload_ack_reply {
338 u8 port_number;
339 u8 vcpi;
340 u16 allocated_pbn;
341 };
342
343 struct drm_dp_connection_status_notify {
344 u8 guid[16];
345 u8 port_number;
346 bool legacy_device_plug_status;
347 bool displayport_device_plug_status;
348 bool message_capability_status;
349 bool input_port;
350 u8 peer_device_type;
351 };
352
353 struct drm_dp_remote_dpcd_read {
354 u8 port_number;
355 u32 dpcd_address;
356 u8 num_bytes;
357 };
358
359 struct drm_dp_remote_dpcd_write {
360 u8 port_number;
361 u32 dpcd_address;
362 u8 num_bytes;
363 u8 *bytes;
364 };
365
366 #define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4
367 struct drm_dp_remote_i2c_read {
368 u8 num_transactions;
369 u8 port_number;
370 struct drm_dp_remote_i2c_read_tx {
371 u8 i2c_dev_id;
372 u8 num_bytes;
373 u8 *bytes;
374 u8 no_stop_bit;
375 u8 i2c_transaction_delay;
376 } transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS];
377 u8 read_i2c_device_id;
378 u8 num_bytes_read;
379 };
380
381 struct drm_dp_remote_i2c_write {
382 u8 port_number;
383 u8 write_i2c_device_id;
384 u8 num_bytes;
385 u8 *bytes;
386 };
387
388 struct drm_dp_query_stream_enc_status {
389 u8 stream_id;
390 u8 client_id[7]; /* 56-bit nonce */
391 u8 stream_event;
392 bool valid_stream_event;
393 u8 stream_behavior;
394 u8 valid_stream_behavior;
395 };
396
397 /* this covers ENUM_RESOURCES, POWER_DOWN_PHY, POWER_UP_PHY */
398 struct drm_dp_port_number_req {
399 u8 port_number;
400 };
401
402 struct drm_dp_enum_path_resources_ack_reply {
403 u8 port_number;
404 bool fec_capable;
405 u16 full_payload_bw_number;
406 u16 avail_payload_bw_number;
407 };
408
409 /* covers POWER_DOWN_PHY, POWER_UP_PHY */
410 struct drm_dp_port_number_rep {
411 u8 port_number;
412 };
413
414 struct drm_dp_query_payload {
415 u8 port_number;
416 u8 vcpi;
417 };
418
419 struct drm_dp_resource_status_notify {
420 u8 port_number;
421 u8 guid[16];
422 u16 available_pbn;
423 };
424
425 struct drm_dp_query_payload_ack_reply {
426 u8 port_number;
427 u16 allocated_pbn;
428 };
429
430 struct drm_dp_sideband_msg_req_body {
431 u8 req_type;
432 union ack_req {
433 struct drm_dp_connection_status_notify conn_stat;
434 struct drm_dp_port_number_req port_num;
435 struct drm_dp_resource_status_notify resource_stat;
436
437 struct drm_dp_query_payload query_payload;
438 struct drm_dp_allocate_payload allocate_payload;
439
440 struct drm_dp_remote_dpcd_read dpcd_read;
441 struct drm_dp_remote_dpcd_write dpcd_write;
442
443 struct drm_dp_remote_i2c_read i2c_read;
444 struct drm_dp_remote_i2c_write i2c_write;
445
446 struct drm_dp_query_stream_enc_status enc_status;
447 } u;
448 };
449
450 struct drm_dp_sideband_msg_reply_body {
451 u8 reply_type;
452 u8 req_type;
453 union ack_replies {
454 struct drm_dp_nak_reply nak;
455 struct drm_dp_link_address_ack_reply link_addr;
456 struct drm_dp_port_number_rep port_number;
457
458 struct drm_dp_enum_path_resources_ack_reply path_resources;
459 struct drm_dp_allocate_payload_ack_reply allocate_payload;
460 struct drm_dp_query_payload_ack_reply query_payload;
461
462 struct drm_dp_remote_dpcd_read_ack_reply remote_dpcd_read_ack;
463 struct drm_dp_remote_dpcd_write_ack_reply remote_dpcd_write_ack;
464 struct drm_dp_remote_dpcd_write_nak_reply remote_dpcd_write_nack;
465
466 struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack;
467 struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack;
468 struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack;
469
470 struct drm_dp_query_stream_enc_status_ack_reply enc_status;
471 } u;
472 };
473
474 /* msg is queued to be put into a slot */
475 #define DRM_DP_SIDEBAND_TX_QUEUED 0
476 /* msg has started transmitting on a slot - still on msgq */
477 #define DRM_DP_SIDEBAND_TX_START_SEND 1
478 /* msg has finished transmitting on a slot - removed from msgq only in slot */
479 #define DRM_DP_SIDEBAND_TX_SENT 2
480 /* msg has received a response - removed from slot */
481 #define DRM_DP_SIDEBAND_TX_RX 3
482 #define DRM_DP_SIDEBAND_TX_TIMEOUT 4
483
484 struct drm_dp_sideband_msg_tx {
485 u8 msg[256];
486 u8 chunk[48];
487 u8 cur_offset;
488 u8 cur_len;
489 struct drm_dp_mst_branch *dst;
490 struct list_head next;
491 int seqno;
492 int state;
493 bool path_msg;
494 struct drm_dp_sideband_msg_reply_body reply;
495 };
496
497 /* sideband msg handler */
498 struct drm_dp_mst_topology_mgr;
499 struct drm_dp_mst_topology_cbs {
500 /* create a connector for a port */
501 struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
502 /*
503 * Checks for any pending MST interrupts, passing them to MST core for
504 * processing, the same way an HPD IRQ pulse handler would do this.
505 * If provided MST core calls this callback from a poll-waiting loop
506 * when waiting for MST down message replies. The driver is expected
507 * to guard against a race between this callback and the driver's HPD
508 * IRQ pulse handler.
509 */
510 void (*poll_hpd_irq)(struct drm_dp_mst_topology_mgr *mgr);
511 };
512
513 #define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base)
514
515 /**
516 * struct drm_dp_mst_atomic_payload - Atomic state struct for an MST payload
517 *
518 * The primary atomic state structure for a given MST payload. Stores information like current
519 * bandwidth allocation, intended action for this payload, etc.
520 */
521 struct drm_dp_mst_atomic_payload {
522 /** @port: The MST port assigned to this payload */
523 struct drm_dp_mst_port *port;
524
525 /**
526 * @vc_start_slot: The time slot that this payload starts on. Because payload start slots
527 * can't be determined ahead of time, the contents of this value are UNDEFINED at atomic
528 * check time. This shouldn't usually matter, as the start slot should never be relevant for
529 * atomic state computations.
530 *
531 * Since this value is determined at commit time instead of check time, this value is
532 * protected by the MST helpers ensuring that async commits operating on the given topology
533 * never run in parallel. In the event that a driver does need to read this value (e.g. to
534 * inform hardware of the starting timeslot for a payload), the driver may either:
535 *
536 * * Read this field during the atomic commit after
537 * drm_dp_mst_atomic_wait_for_dependencies() has been called, which will ensure the
538 * previous MST states payload start slots have been copied over to the new state. Note
539 * that a new start slot won't be assigned/removed from this payload until
540 * drm_dp_add_payload_part1()/drm_dp_remove_payload() have been called.
541 * * Acquire the MST modesetting lock, and then wait for any pending MST-related commits to
542 * get committed to hardware by calling drm_crtc_commit_wait() on each of the
543 * &drm_crtc_commit structs in &drm_dp_mst_topology_state.commit_deps.
544 *
545 * If neither of the two above solutions suffice (e.g. the driver needs to read the start
546 * slot in the middle of an atomic commit without waiting for some reason), then drivers
547 * should cache this value themselves after changing payloads.
548 */
549 s8 vc_start_slot;
550
551 /** @vcpi: The Virtual Channel Payload Identifier */
552 u8 vcpi;
553 /**
554 * @time_slots:
555 * The number of timeslots allocated to this payload from the source DP Tx to
556 * the immediate downstream DP Rx
557 */
558 int time_slots;
559 /** @pbn: The payload bandwidth for this payload */
560 int pbn;
561
562 /** @delete: Whether or not we intend to delete this payload during this atomic commit */
563 bool delete : 1;
564 /** @dsc_enabled: Whether or not this payload has DSC enabled */
565 bool dsc_enabled : 1;
566
567 /** @next: The list node for this payload */
568 struct list_head next;
569 };
570
571 /**
572 * struct drm_dp_mst_topology_state - DisplayPort MST topology atomic state
573 *
574 * This struct represents the atomic state of the toplevel DisplayPort MST manager
575 */
576 struct drm_dp_mst_topology_state {
577 /** @base: Base private state for atomic */
578 struct drm_private_state base;
579
580 /** @mgr: The topology manager */
581 struct drm_dp_mst_topology_mgr *mgr;
582
583 /**
584 * @pending_crtc_mask: A bitmask of all CRTCs this topology state touches, drivers may
585 * modify this to add additional dependencies if needed.
586 */
587 u32 pending_crtc_mask;
588 /**
589 * @commit_deps: A list of all CRTC commits affecting this topology, this field isn't
590 * populated until drm_dp_mst_atomic_wait_for_dependencies() is called.
591 */
592 struct drm_crtc_commit **commit_deps;
593 /** @num_commit_deps: The number of CRTC commits in @commit_deps */
594 size_t num_commit_deps;
595
596 /** @payload_mask: A bitmask of allocated VCPIs, used for VCPI assignments */
597 u32 payload_mask;
598 /** @payloads: The list of payloads being created/destroyed in this state */
599 struct list_head payloads;
600
601 /** @total_avail_slots: The total number of slots this topology can handle (63 or 64) */
602 u8 total_avail_slots;
603 /** @start_slot: The first usable time slot in this topology (1 or 0) */
604 u8 start_slot;
605
606 /**
607 * @pbn_div: The current PBN divisor for this topology. The driver is expected to fill this
608 * out itself.
609 */
610 int pbn_div;
611 };
612
613 #define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base)
614
615 /**
616 * struct drm_dp_mst_topology_mgr - DisplayPort MST manager
617 *
618 * This struct represents the toplevel displayport MST topology manager.
619 * There should be one instance of this for every MST capable DP connector
620 * on the GPU.
621 */
622 struct drm_dp_mst_topology_mgr {
623 /**
624 * @base: Base private object for atomic
625 */
626 struct drm_private_obj base;
627
628 /**
629 * @dev: device pointer for adding i2c devices etc.
630 */
631 struct drm_device *dev;
632 /**
633 * @cbs: callbacks for connector addition and destruction.
634 */
635 const struct drm_dp_mst_topology_cbs *cbs;
636 /**
637 * @max_dpcd_transaction_bytes: maximum number of bytes to read/write
638 * in one go.
639 */
640 int max_dpcd_transaction_bytes;
641 /**
642 * @aux: AUX channel for the DP MST connector this topolgy mgr is
643 * controlling.
644 */
645 struct drm_dp_aux *aux;
646 /**
647 * @max_payloads: maximum number of payloads the GPU can generate.
648 */
649 int max_payloads;
650 /**
651 * @conn_base_id: DRM connector ID this mgr is connected to. Only used
652 * to build the MST connector path value.
653 */
654 int conn_base_id;
655
656 /**
657 * @up_req_recv: Message receiver state for up requests.
658 */
659 struct drm_dp_sideband_msg_rx up_req_recv;
660
661 /**
662 * @down_rep_recv: Message receiver state for replies to down
663 * requests.
664 */
665 struct drm_dp_sideband_msg_rx down_rep_recv;
666
667 /**
668 * @lock: protects @mst_state, @mst_primary, @dpcd, and
669 * @payload_id_table_cleared.
670 */
671 struct mutex lock;
672
673 /**
674 * @probe_lock: Prevents @work and @up_req_work, the only writers of
675 * &drm_dp_mst_port.mstb and &drm_dp_mst_branch.ports, from racing
676 * while they update the topology.
677 */
678 struct mutex probe_lock;
679
680 /**
681 * @mst_state: If this manager is enabled for an MST capable port. False
682 * if no MST sink/branch devices is connected.
683 */
684 bool mst_state : 1;
685
686 /**
687 * @payload_id_table_cleared: Whether or not we've cleared the payload
688 * ID table for @mst_primary. Protected by @lock.
689 */
690 bool payload_id_table_cleared : 1;
691
692 /**
693 * @reset_rx_state: The down request's reply and up request message
694 * receiver state must be reset, after the topology manager got
695 * removed. Protected by @lock.
696 */
697 bool reset_rx_state : 1;
698
699 /**
700 * @payload_count: The number of currently active payloads in hardware. This value is only
701 * intended to be used internally by MST helpers for payload tracking, and is only safe to
702 * read/write from the atomic commit (not check) context.
703 */
704 u8 payload_count;
705
706 /**
707 * @next_start_slot: The starting timeslot to use for new VC payloads. This value is used
708 * internally by MST helpers for payload tracking, and is only safe to read/write from the
709 * atomic commit (not check) context.
710 */
711 u8 next_start_slot;
712
713 /**
714 * @mst_primary: Pointer to the primary/first branch device.
715 */
716 struct drm_dp_mst_branch *mst_primary;
717
718 /**
719 * @dpcd: Cache of DPCD for primary port.
720 */
721 u8 dpcd[DP_RECEIVER_CAP_SIZE];
722 /**
723 * @sink_count: Sink count from DEVICE_SERVICE_IRQ_VECTOR_ESI0.
724 */
725 u8 sink_count;
726
727 /**
728 * @funcs: Atomic helper callbacks
729 */
730 const struct drm_private_state_funcs *funcs;
731
732 /**
733 * @qlock: protects @tx_msg_downq and &drm_dp_sideband_msg_tx.state
734 */
735 struct mutex qlock;
736
737 /**
738 * @tx_msg_downq: List of pending down requests
739 */
740 struct list_head tx_msg_downq;
741
742 /**
743 * @tx_waitq: Wait to queue stall for the tx worker.
744 */
745 wait_queue_head_t tx_waitq;
746 /**
747 * @work: Probe work.
748 */
749 struct work_struct work;
750 /**
751 * @tx_work: Sideband transmit worker. This can nest within the main
752 * @work worker for each transaction @work launches.
753 */
754 struct work_struct tx_work;
755
756 /**
757 * @destroy_port_list: List of to be destroyed connectors.
758 */
759 struct list_head destroy_port_list;
760 /**
761 * @destroy_branch_device_list: List of to be destroyed branch
762 * devices.
763 */
764 struct list_head destroy_branch_device_list;
765 /**
766 * @delayed_destroy_lock: Protects @destroy_port_list and
767 * @destroy_branch_device_list.
768 */
769 struct mutex delayed_destroy_lock;
770
771 /**
772 * @delayed_destroy_wq: Workqueue used for delayed_destroy_work items.
773 * A dedicated WQ makes it possible to drain any requeued work items
774 * on it.
775 */
776 struct workqueue_struct *delayed_destroy_wq;
777
778 /**
779 * @delayed_destroy_work: Work item to destroy MST port and branch
780 * devices, needed to avoid locking inversion.
781 */
782 struct work_struct delayed_destroy_work;
783
784 /**
785 * @up_req_list: List of pending up requests from the topology that
786 * need to be processed, in chronological order.
787 */
788 struct list_head up_req_list;
789 /**
790 * @up_req_lock: Protects @up_req_list
791 */
792 struct mutex up_req_lock;
793 /**
794 * @up_req_work: Work item to process up requests received from the
795 * topology. Needed to avoid blocking hotplug handling and sideband
796 * transmissions.
797 */
798 struct work_struct up_req_work;
799
800 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
801 /**
802 * @topology_ref_history_lock: protects
803 * &drm_dp_mst_port.topology_ref_history and
804 * &drm_dp_mst_branch.topology_ref_history.
805 */
806 struct mutex topology_ref_history_lock;
807 #endif
808 };
809
810 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
811 struct drm_device *dev, struct drm_dp_aux *aux,
812 int max_dpcd_transaction_bytes,
813 int max_payloads, int conn_base_id);
814
815 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
816
817 bool drm_dp_read_mst_cap(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
818 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state);
819
820 int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr,
821 const u8 *esi,
822 u8 *ack,
823 bool *handled);
824 void drm_dp_mst_hpd_irq_send_new_request(struct drm_dp_mst_topology_mgr *mgr);
825
826 int
827 drm_dp_mst_detect_port(struct drm_connector *connector,
828 struct drm_modeset_acquire_ctx *ctx,
829 struct drm_dp_mst_topology_mgr *mgr,
830 struct drm_dp_mst_port *port);
831
832 const struct drm_edid *drm_dp_mst_edid_read(struct drm_connector *connector,
833 struct drm_dp_mst_topology_mgr *mgr,
834 struct drm_dp_mst_port *port);
835 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector,
836 struct drm_dp_mst_topology_mgr *mgr,
837 struct drm_dp_mst_port *port);
838
839 int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
840 int link_rate, int link_lane_count);
841
842 int drm_dp_calc_pbn_mode(int clock, int bpp);
843
844 void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap);
845
846 int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
847 struct drm_dp_mst_topology_state *mst_state,
848 struct drm_dp_mst_atomic_payload *payload);
849 int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
850 struct drm_atomic_state *state,
851 struct drm_dp_mst_atomic_payload *payload);
852 void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr,
853 struct drm_dp_mst_topology_state *mst_state,
854 const struct drm_dp_mst_atomic_payload *old_payload,
855 struct drm_dp_mst_atomic_payload *new_payload);
856
857 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
858
859 void drm_dp_mst_dump_topology(struct seq_file *m,
860 struct drm_dp_mst_topology_mgr *mgr);
861
862 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
863 int __must_check
864 drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
865 bool sync);
866
867 ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
868 unsigned int offset, void *buffer, size_t size);
869 ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
870 unsigned int offset, void *buffer, size_t size);
871
872 int drm_dp_mst_connector_late_register(struct drm_connector *connector,
873 struct drm_dp_mst_port *port);
874 void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
875 struct drm_dp_mst_port *port);
876
877 struct drm_dp_mst_topology_state *
878 drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
879 struct drm_dp_mst_topology_mgr *mgr);
880 struct drm_dp_mst_topology_state *
881 drm_atomic_get_old_mst_topology_state(struct drm_atomic_state *state,
882 struct drm_dp_mst_topology_mgr *mgr);
883 struct drm_dp_mst_topology_state *
884 drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state,
885 struct drm_dp_mst_topology_mgr *mgr);
886 struct drm_dp_mst_atomic_payload *
887 drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state,
888 struct drm_dp_mst_port *port);
889 int __must_check
890 drm_dp_atomic_find_time_slots(struct drm_atomic_state *state,
891 struct drm_dp_mst_topology_mgr *mgr,
892 struct drm_dp_mst_port *port, int pbn);
893 int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
894 struct drm_dp_mst_port *port,
895 int pbn, bool enable);
896 int __must_check
897 drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state,
898 struct drm_dp_mst_topology_mgr *mgr);
899 int __must_check
900 drm_dp_atomic_release_time_slots(struct drm_atomic_state *state,
901 struct drm_dp_mst_topology_mgr *mgr,
902 struct drm_dp_mst_port *port);
903 void drm_dp_mst_atomic_wait_for_dependencies(struct drm_atomic_state *state);
904 int __must_check drm_dp_mst_atomic_setup_commit(struct drm_atomic_state *state);
905 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
906 struct drm_dp_mst_port *port, bool power_up);
907 int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
908 struct drm_dp_mst_port *port,
909 struct drm_dp_query_stream_enc_status_ack_reply *status);
910 int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state);
911 int __must_check drm_dp_mst_root_conn_atomic_check(struct drm_connector_state *new_conn_state,
912 struct drm_dp_mst_topology_mgr *mgr);
913
914 void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port);
915 void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port);
916
917 struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port);
918
919 static inline struct drm_dp_mst_topology_state *
to_drm_dp_mst_topology_state(struct drm_private_state * state)920 to_drm_dp_mst_topology_state(struct drm_private_state *state)
921 {
922 return container_of(state, struct drm_dp_mst_topology_state, base);
923 }
924
925 extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs;
926
927 /**
928 * __drm_dp_mst_state_iter_get - private atomic state iterator function for
929 * macro-internal use
930 * @state: &struct drm_atomic_state pointer
931 * @mgr: pointer to the &struct drm_dp_mst_topology_mgr iteration cursor
932 * @old_state: optional pointer to the old &struct drm_dp_mst_topology_state
933 * iteration cursor
934 * @new_state: optional pointer to the new &struct drm_dp_mst_topology_state
935 * iteration cursor
936 * @i: int iteration cursor, for macro-internal use
937 *
938 * Used by for_each_oldnew_mst_mgr_in_state(),
939 * for_each_old_mst_mgr_in_state(), and for_each_new_mst_mgr_in_state(). Don't
940 * call this directly.
941 *
942 * Returns:
943 * True if the current &struct drm_private_obj is a &struct
944 * drm_dp_mst_topology_mgr, false otherwise.
945 */
946 static inline bool
__drm_dp_mst_state_iter_get(struct drm_atomic_state * state,struct drm_dp_mst_topology_mgr ** mgr,struct drm_dp_mst_topology_state ** old_state,struct drm_dp_mst_topology_state ** new_state,int i)947 __drm_dp_mst_state_iter_get(struct drm_atomic_state *state,
948 struct drm_dp_mst_topology_mgr **mgr,
949 struct drm_dp_mst_topology_state **old_state,
950 struct drm_dp_mst_topology_state **new_state,
951 int i)
952 {
953 struct __drm_private_objs_state *objs_state = &state->private_objs[i];
954
955 if (objs_state->ptr->funcs != &drm_dp_mst_topology_state_funcs)
956 return false;
957
958 *mgr = to_dp_mst_topology_mgr(objs_state->ptr);
959 if (old_state)
960 *old_state = to_dp_mst_topology_state(objs_state->old_state);
961 if (new_state)
962 *new_state = to_dp_mst_topology_state(objs_state->new_state);
963
964 return true;
965 }
966
967 /**
968 * for_each_oldnew_mst_mgr_in_state - iterate over all DP MST topology
969 * managers in an atomic update
970 * @__state: &struct drm_atomic_state pointer
971 * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
972 * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old
973 * state
974 * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new
975 * state
976 * @__i: int iteration cursor, for macro-internal use
977 *
978 * This iterates over all DRM DP MST topology managers in an atomic update,
979 * tracking both old and new state. This is useful in places where the state
980 * delta needs to be considered, for example in atomic check functions.
981 */
982 #define for_each_oldnew_mst_mgr_in_state(__state, mgr, old_state, new_state, __i) \
983 for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
984 for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), &(new_state), (__i)))
985
986 /**
987 * for_each_old_mst_mgr_in_state - iterate over all DP MST topology managers
988 * in an atomic update
989 * @__state: &struct drm_atomic_state pointer
990 * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
991 * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old
992 * state
993 * @__i: int iteration cursor, for macro-internal use
994 *
995 * This iterates over all DRM DP MST topology managers in an atomic update,
996 * tracking only the old state. This is useful in disable functions, where we
997 * need the old state the hardware is still in.
998 */
999 #define for_each_old_mst_mgr_in_state(__state, mgr, old_state, __i) \
1000 for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
1001 for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), NULL, (__i)))
1002
1003 /**
1004 * for_each_new_mst_mgr_in_state - iterate over all DP MST topology managers
1005 * in an atomic update
1006 * @__state: &struct drm_atomic_state pointer
1007 * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
1008 * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new
1009 * state
1010 * @__i: int iteration cursor, for macro-internal use
1011 *
1012 * This iterates over all DRM DP MST topology managers in an atomic update,
1013 * tracking only the new state. This is useful in enable functions, where we
1014 * need the new state the hardware should be in when the atomic commit
1015 * operation has completed.
1016 */
1017 #define for_each_new_mst_mgr_in_state(__state, mgr, new_state, __i) \
1018 for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
1019 for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), NULL, &(new_state), (__i)))
1020
1021 #endif
1022