xref: /openbmc/linux/drivers/thunderbolt/icm.c (revision 3080e197)
1 /*
2  * Internal Thunderbolt Connection Manager. This is a firmware running on
3  * the Thunderbolt host controller performing most of the low-level
4  * handling.
5  *
6  * Copyright (C) 2017, Intel Corporation
7  * Authors: Michael Jamet <michael.jamet@intel.com>
8  *          Mika Westerberg <mika.westerberg@linux.intel.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14 
15 #include <linux/delay.h>
16 #include <linux/mutex.h>
17 #include <linux/pci.h>
18 #include <linux/platform_data/x86/apple.h>
19 #include <linux/sizes.h>
20 #include <linux/slab.h>
21 #include <linux/workqueue.h>
22 
23 #include "ctl.h"
24 #include "nhi_regs.h"
25 #include "tb.h"
26 
27 #define PCIE2CIO_CMD			0x30
28 #define PCIE2CIO_CMD_TIMEOUT		BIT(31)
29 #define PCIE2CIO_CMD_START		BIT(30)
30 #define PCIE2CIO_CMD_WRITE		BIT(21)
31 #define PCIE2CIO_CMD_CS_MASK		GENMASK(20, 19)
32 #define PCIE2CIO_CMD_CS_SHIFT		19
33 #define PCIE2CIO_CMD_PORT_MASK		GENMASK(18, 13)
34 #define PCIE2CIO_CMD_PORT_SHIFT		13
35 
36 #define PCIE2CIO_WRDATA			0x34
37 #define PCIE2CIO_RDDATA			0x38
38 
39 #define PHY_PORT_CS1			0x37
40 #define PHY_PORT_CS1_LINK_DISABLE	BIT(14)
41 #define PHY_PORT_CS1_LINK_STATE_MASK	GENMASK(29, 26)
42 #define PHY_PORT_CS1_LINK_STATE_SHIFT	26
43 
44 #define ICM_TIMEOUT			5000	/* ms */
45 #define ICM_APPROVE_TIMEOUT		10000	/* ms */
46 #define ICM_MAX_LINK			4
47 #define ICM_MAX_DEPTH			6
48 
49 /**
50  * struct icm - Internal connection manager private data
51  * @request_lock: Makes sure only one message is send to ICM at time
52  * @rescan_work: Work used to rescan the surviving switches after resume
53  * @upstream_port: Pointer to the PCIe upstream port this host
54  *		   controller is connected. This is only set for systems
55  *		   where ICM needs to be started manually
56  * @vnd_cap: Vendor defined capability where PCIe2CIO mailbox resides
57  *	     (only set when @upstream_port is not %NULL)
58  * @safe_mode: ICM is in safe mode
59  * @is_supported: Checks if we can support ICM on this controller
60  * @get_mode: Read and return the ICM firmware mode (optional)
61  * @get_route: Find a route string for given switch
62  * @driver_ready: Send driver ready message to ICM
63  * @device_connected: Handle device connected ICM message
64  * @device_disconnected: Handle device disconnected ICM message
65  * @xdomain_connected - Handle XDomain connected ICM message
66  * @xdomain_disconnected - Handle XDomain disconnected ICM message
67  */
68 struct icm {
69 	struct mutex request_lock;
70 	struct delayed_work rescan_work;
71 	struct pci_dev *upstream_port;
72 	int vnd_cap;
73 	bool safe_mode;
74 	bool (*is_supported)(struct tb *tb);
75 	int (*get_mode)(struct tb *tb);
76 	int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route);
77 	int (*driver_ready)(struct tb *tb,
78 			    enum tb_security_level *security_level);
79 	void (*device_connected)(struct tb *tb,
80 				 const struct icm_pkg_header *hdr);
81 	void (*device_disconnected)(struct tb *tb,
82 				    const struct icm_pkg_header *hdr);
83 	void (*xdomain_connected)(struct tb *tb,
84 				  const struct icm_pkg_header *hdr);
85 	void (*xdomain_disconnected)(struct tb *tb,
86 				     const struct icm_pkg_header *hdr);
87 };
88 
89 struct icm_notification {
90 	struct work_struct work;
91 	struct icm_pkg_header *pkg;
92 	struct tb *tb;
93 };
94 
95 static inline struct tb *icm_to_tb(struct icm *icm)
96 {
97 	return ((void *)icm - sizeof(struct tb));
98 }
99 
100 static inline u8 phy_port_from_route(u64 route, u8 depth)
101 {
102 	u8 link;
103 
104 	link = depth ? route >> ((depth - 1) * 8) : route;
105 	return tb_phy_port_from_link(link);
106 }
107 
108 static inline u8 dual_link_from_link(u8 link)
109 {
110 	return link ? ((link - 1) ^ 0x01) + 1 : 0;
111 }
112 
113 static inline u64 get_route(u32 route_hi, u32 route_lo)
114 {
115 	return (u64)route_hi << 32 | route_lo;
116 }
117 
118 static bool icm_match(const struct tb_cfg_request *req,
119 		      const struct ctl_pkg *pkg)
120 {
121 	const struct icm_pkg_header *res_hdr = pkg->buffer;
122 	const struct icm_pkg_header *req_hdr = req->request;
123 
124 	if (pkg->frame.eof != req->response_type)
125 		return false;
126 	if (res_hdr->code != req_hdr->code)
127 		return false;
128 
129 	return true;
130 }
131 
132 static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
133 {
134 	const struct icm_pkg_header *hdr = pkg->buffer;
135 
136 	if (hdr->packet_id < req->npackets) {
137 		size_t offset = hdr->packet_id * req->response_size;
138 
139 		memcpy(req->response + offset, pkg->buffer, req->response_size);
140 	}
141 
142 	return hdr->packet_id == hdr->total_packets - 1;
143 }
144 
145 static int icm_request(struct tb *tb, const void *request, size_t request_size,
146 		       void *response, size_t response_size, size_t npackets,
147 		       unsigned int timeout_msec)
148 {
149 	struct icm *icm = tb_priv(tb);
150 	int retries = 3;
151 
152 	do {
153 		struct tb_cfg_request *req;
154 		struct tb_cfg_result res;
155 
156 		req = tb_cfg_request_alloc();
157 		if (!req)
158 			return -ENOMEM;
159 
160 		req->match = icm_match;
161 		req->copy = icm_copy;
162 		req->request = request;
163 		req->request_size = request_size;
164 		req->request_type = TB_CFG_PKG_ICM_CMD;
165 		req->response = response;
166 		req->npackets = npackets;
167 		req->response_size = response_size;
168 		req->response_type = TB_CFG_PKG_ICM_RESP;
169 
170 		mutex_lock(&icm->request_lock);
171 		res = tb_cfg_request_sync(tb->ctl, req, timeout_msec);
172 		mutex_unlock(&icm->request_lock);
173 
174 		tb_cfg_request_put(req);
175 
176 		if (res.err != -ETIMEDOUT)
177 			return res.err == 1 ? -EIO : res.err;
178 
179 		usleep_range(20, 50);
180 	} while (retries--);
181 
182 	return -ETIMEDOUT;
183 }
184 
185 static bool icm_fr_is_supported(struct tb *tb)
186 {
187 	return !x86_apple_machine;
188 }
189 
190 static inline int icm_fr_get_switch_index(u32 port)
191 {
192 	int index;
193 
194 	if ((port & ICM_PORT_TYPE_MASK) != TB_TYPE_PORT)
195 		return 0;
196 
197 	index = port >> ICM_PORT_INDEX_SHIFT;
198 	return index != 0xff ? index : 0;
199 }
200 
201 static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
202 {
203 	struct icm_fr_pkg_get_topology_response *switches, *sw;
204 	struct icm_fr_pkg_get_topology request = {
205 		.hdr = { .code = ICM_GET_TOPOLOGY },
206 	};
207 	size_t npackets = ICM_GET_TOPOLOGY_PACKETS;
208 	int ret, index;
209 	u8 i;
210 
211 	switches = kcalloc(npackets, sizeof(*switches), GFP_KERNEL);
212 	if (!switches)
213 		return -ENOMEM;
214 
215 	ret = icm_request(tb, &request, sizeof(request), switches,
216 			  sizeof(*switches), npackets, ICM_TIMEOUT);
217 	if (ret)
218 		goto err_free;
219 
220 	sw = &switches[0];
221 	index = icm_fr_get_switch_index(sw->ports[link]);
222 	if (!index) {
223 		ret = -ENODEV;
224 		goto err_free;
225 	}
226 
227 	sw = &switches[index];
228 	for (i = 1; i < depth; i++) {
229 		unsigned int j;
230 
231 		if (!(sw->first_data & ICM_SWITCH_USED)) {
232 			ret = -ENODEV;
233 			goto err_free;
234 		}
235 
236 		for (j = 0; j < ARRAY_SIZE(sw->ports); j++) {
237 			index = icm_fr_get_switch_index(sw->ports[j]);
238 			if (index > sw->switch_index) {
239 				sw = &switches[index];
240 				break;
241 			}
242 		}
243 	}
244 
245 	*route = get_route(sw->route_hi, sw->route_lo);
246 
247 err_free:
248 	kfree(switches);
249 	return ret;
250 }
251 
252 static int
253 icm_fr_driver_ready(struct tb *tb, enum tb_security_level *security_level)
254 {
255 	struct icm_fr_pkg_driver_ready_response reply;
256 	struct icm_pkg_driver_ready request = {
257 		.hdr.code = ICM_DRIVER_READY,
258 	};
259 	int ret;
260 
261 	memset(&reply, 0, sizeof(reply));
262 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
263 			  1, ICM_TIMEOUT);
264 	if (ret)
265 		return ret;
266 
267 	if (security_level)
268 		*security_level = reply.security_level & ICM_FR_SLEVEL_MASK;
269 
270 	return 0;
271 }
272 
273 static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw)
274 {
275 	struct icm_fr_pkg_approve_device request;
276 	struct icm_fr_pkg_approve_device reply;
277 	int ret;
278 
279 	memset(&request, 0, sizeof(request));
280 	memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
281 	request.hdr.code = ICM_APPROVE_DEVICE;
282 	request.connection_id = sw->connection_id;
283 	request.connection_key = sw->connection_key;
284 
285 	memset(&reply, 0, sizeof(reply));
286 	/* Use larger timeout as establishing tunnels can take some time */
287 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
288 			  1, ICM_APPROVE_TIMEOUT);
289 	if (ret)
290 		return ret;
291 
292 	if (reply.hdr.flags & ICM_FLAGS_ERROR) {
293 		tb_warn(tb, "PCIe tunnel creation failed\n");
294 		return -EIO;
295 	}
296 
297 	return 0;
298 }
299 
300 static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw)
301 {
302 	struct icm_fr_pkg_add_device_key request;
303 	struct icm_fr_pkg_add_device_key_response reply;
304 	int ret;
305 
306 	memset(&request, 0, sizeof(request));
307 	memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
308 	request.hdr.code = ICM_ADD_DEVICE_KEY;
309 	request.connection_id = sw->connection_id;
310 	request.connection_key = sw->connection_key;
311 	memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE);
312 
313 	memset(&reply, 0, sizeof(reply));
314 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
315 			  1, ICM_TIMEOUT);
316 	if (ret)
317 		return ret;
318 
319 	if (reply.hdr.flags & ICM_FLAGS_ERROR) {
320 		tb_warn(tb, "Adding key to switch failed\n");
321 		return -EIO;
322 	}
323 
324 	return 0;
325 }
326 
327 static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
328 				       const u8 *challenge, u8 *response)
329 {
330 	struct icm_fr_pkg_challenge_device request;
331 	struct icm_fr_pkg_challenge_device_response reply;
332 	int ret;
333 
334 	memset(&request, 0, sizeof(request));
335 	memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
336 	request.hdr.code = ICM_CHALLENGE_DEVICE;
337 	request.connection_id = sw->connection_id;
338 	request.connection_key = sw->connection_key;
339 	memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE);
340 
341 	memset(&reply, 0, sizeof(reply));
342 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
343 			  1, ICM_TIMEOUT);
344 	if (ret)
345 		return ret;
346 
347 	if (reply.hdr.flags & ICM_FLAGS_ERROR)
348 		return -EKEYREJECTED;
349 	if (reply.hdr.flags & ICM_FLAGS_NO_KEY)
350 		return -ENOKEY;
351 
352 	memcpy(response, reply.response, TB_SWITCH_KEY_SIZE);
353 
354 	return 0;
355 }
356 
357 static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
358 {
359 	struct icm_fr_pkg_approve_xdomain_response reply;
360 	struct icm_fr_pkg_approve_xdomain request;
361 	int ret;
362 
363 	memset(&request, 0, sizeof(request));
364 	request.hdr.code = ICM_APPROVE_XDOMAIN;
365 	request.link_info = xd->depth << ICM_LINK_INFO_DEPTH_SHIFT | xd->link;
366 	memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
367 
368 	request.transmit_path = xd->transmit_path;
369 	request.transmit_ring = xd->transmit_ring;
370 	request.receive_path = xd->receive_path;
371 	request.receive_ring = xd->receive_ring;
372 
373 	memset(&reply, 0, sizeof(reply));
374 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
375 			  1, ICM_TIMEOUT);
376 	if (ret)
377 		return ret;
378 
379 	if (reply.hdr.flags & ICM_FLAGS_ERROR)
380 		return -EIO;
381 
382 	return 0;
383 }
384 
385 static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
386 {
387 	u8 phy_port;
388 	u8 cmd;
389 
390 	phy_port = tb_phy_port_from_link(xd->link);
391 	if (phy_port == 0)
392 		cmd = NHI_MAILBOX_DISCONNECT_PA;
393 	else
394 		cmd = NHI_MAILBOX_DISCONNECT_PB;
395 
396 	nhi_mailbox_cmd(tb->nhi, cmd, 1);
397 	usleep_range(10, 50);
398 	nhi_mailbox_cmd(tb->nhi, cmd, 2);
399 	return 0;
400 }
401 
402 static void add_switch(struct tb_switch *parent_sw, u64 route,
403 		       const uuid_t *uuid, u8 connection_id, u8 connection_key,
404 		       u8 link, u8 depth, enum tb_security_level security_level,
405 		       bool authorized)
406 {
407 	struct tb_switch *sw;
408 
409 	sw = tb_switch_alloc(parent_sw->tb, &parent_sw->dev, route);
410 	if (!sw)
411 		return;
412 
413 	sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL);
414 	sw->connection_id = connection_id;
415 	sw->connection_key = connection_key;
416 	sw->link = link;
417 	sw->depth = depth;
418 	sw->authorized = authorized;
419 	sw->security_level = security_level;
420 
421 	/* Link the two switches now */
422 	tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
423 	tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw);
424 
425 	if (tb_switch_add(sw)) {
426 		tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
427 		tb_switch_put(sw);
428 		return;
429 	}
430 }
431 
432 static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw,
433 			  u64 route, u8 connection_id, u8 connection_key,
434 			  u8 link, u8 depth)
435 {
436 	/* Disconnect from parent */
437 	tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
438 	/* Re-connect via updated port*/
439 	tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
440 
441 	/* Update with the new addressing information */
442 	sw->config.route_hi = upper_32_bits(route);
443 	sw->config.route_lo = lower_32_bits(route);
444 	sw->connection_id = connection_id;
445 	sw->connection_key = connection_key;
446 	sw->link = link;
447 	sw->depth = depth;
448 
449 	/* This switch still exists */
450 	sw->is_unplugged = false;
451 }
452 
453 static void remove_switch(struct tb_switch *sw)
454 {
455 	struct tb_switch *parent_sw;
456 
457 	parent_sw = tb_to_switch(sw->dev.parent);
458 	tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
459 	tb_switch_remove(sw);
460 }
461 
462 static void add_xdomain(struct tb_switch *sw, u64 route,
463 			const uuid_t *local_uuid, const uuid_t *remote_uuid,
464 			u8 link, u8 depth)
465 {
466 	struct tb_xdomain *xd;
467 
468 	xd = tb_xdomain_alloc(sw->tb, &sw->dev, route, local_uuid, remote_uuid);
469 	if (!xd)
470 		return;
471 
472 	xd->link = link;
473 	xd->depth = depth;
474 
475 	tb_port_at(route, sw)->xdomain = xd;
476 
477 	tb_xdomain_add(xd);
478 }
479 
480 static void update_xdomain(struct tb_xdomain *xd, u64 route, u8 link)
481 {
482 	xd->link = link;
483 	xd->route = route;
484 	xd->is_unplugged = false;
485 }
486 
487 static void remove_xdomain(struct tb_xdomain *xd)
488 {
489 	struct tb_switch *sw;
490 
491 	sw = tb_to_switch(xd->dev.parent);
492 	tb_port_at(xd->route, sw)->xdomain = NULL;
493 	tb_xdomain_remove(xd);
494 }
495 
496 static void
497 icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
498 {
499 	const struct icm_fr_event_device_connected *pkg =
500 		(const struct icm_fr_event_device_connected *)hdr;
501 	enum tb_security_level security_level;
502 	struct tb_switch *sw, *parent_sw;
503 	struct icm *icm = tb_priv(tb);
504 	bool authorized = false;
505 	struct tb_xdomain *xd;
506 	u8 link, depth;
507 	u64 route;
508 	int ret;
509 
510 	link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
511 	depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
512 		ICM_LINK_INFO_DEPTH_SHIFT;
513 	authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
514 	security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
515 			 ICM_FLAGS_SLEVEL_SHIFT;
516 
517 	if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
518 		tb_info(tb, "switch at %u.%u was rejected by ICM firmware because topology limit exceeded\n",
519 			link, depth);
520 		return;
521 	}
522 
523 	ret = icm->get_route(tb, link, depth, &route);
524 	if (ret) {
525 		tb_err(tb, "failed to find route string for switch at %u.%u\n",
526 		       link, depth);
527 		return;
528 	}
529 
530 	sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
531 	if (sw) {
532 		u8 phy_port, sw_phy_port;
533 
534 		parent_sw = tb_to_switch(sw->dev.parent);
535 		sw_phy_port = phy_port_from_route(tb_route(sw), sw->depth);
536 		phy_port = phy_port_from_route(route, depth);
537 
538 		/*
539 		 * On resume ICM will send us connected events for the
540 		 * devices that still are present. However, that
541 		 * information might have changed for example by the
542 		 * fact that a switch on a dual-link connection might
543 		 * have been enumerated using the other link now. Make
544 		 * sure our book keeping matches that.
545 		 */
546 		if (sw->depth == depth && sw_phy_port == phy_port &&
547 		    !!sw->authorized == authorized) {
548 			update_switch(parent_sw, sw, route, pkg->connection_id,
549 				      pkg->connection_key, link, depth);
550 			tb_switch_put(sw);
551 			return;
552 		}
553 
554 		/*
555 		 * User connected the same switch to another physical
556 		 * port or to another part of the topology. Remove the
557 		 * existing switch now before adding the new one.
558 		 */
559 		remove_switch(sw);
560 		tb_switch_put(sw);
561 	}
562 
563 	/*
564 	 * If the switch was not found by UUID, look for a switch on
565 	 * same physical port (taking possible link aggregation into
566 	 * account) and depth. If we found one it is definitely a stale
567 	 * one so remove it first.
568 	 */
569 	sw = tb_switch_find_by_link_depth(tb, link, depth);
570 	if (!sw) {
571 		u8 dual_link;
572 
573 		dual_link = dual_link_from_link(link);
574 		if (dual_link)
575 			sw = tb_switch_find_by_link_depth(tb, dual_link, depth);
576 	}
577 	if (sw) {
578 		remove_switch(sw);
579 		tb_switch_put(sw);
580 	}
581 
582 	/* Remove existing XDomain connection if found */
583 	xd = tb_xdomain_find_by_link_depth(tb, link, depth);
584 	if (xd) {
585 		remove_xdomain(xd);
586 		tb_xdomain_put(xd);
587 	}
588 
589 	parent_sw = tb_switch_find_by_link_depth(tb, link, depth - 1);
590 	if (!parent_sw) {
591 		tb_err(tb, "failed to find parent switch for %u.%u\n",
592 		       link, depth);
593 		return;
594 	}
595 
596 	add_switch(parent_sw, route, &pkg->ep_uuid, pkg->connection_id,
597 		   pkg->connection_key, link, depth, security_level,
598 		   authorized);
599 
600 	tb_switch_put(parent_sw);
601 }
602 
603 static void
604 icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
605 {
606 	const struct icm_fr_event_device_disconnected *pkg =
607 		(const struct icm_fr_event_device_disconnected *)hdr;
608 	struct tb_switch *sw;
609 	u8 link, depth;
610 
611 	link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
612 	depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
613 		ICM_LINK_INFO_DEPTH_SHIFT;
614 
615 	if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) {
616 		tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
617 		return;
618 	}
619 
620 	sw = tb_switch_find_by_link_depth(tb, link, depth);
621 	if (!sw) {
622 		tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
623 			depth);
624 		return;
625 	}
626 
627 	remove_switch(sw);
628 	tb_switch_put(sw);
629 }
630 
631 static void
632 icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
633 {
634 	const struct icm_fr_event_xdomain_connected *pkg =
635 		(const struct icm_fr_event_xdomain_connected *)hdr;
636 	struct tb_xdomain *xd;
637 	struct tb_switch *sw;
638 	u8 link, depth;
639 	bool approved;
640 	u64 route;
641 
642 	/*
643 	 * After NVM upgrade adding root switch device fails because we
644 	 * initiated reset. During that time ICM might still send
645 	 * XDomain connected message which we ignore here.
646 	 */
647 	if (!tb->root_switch)
648 		return;
649 
650 	link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
651 	depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
652 		ICM_LINK_INFO_DEPTH_SHIFT;
653 	approved = pkg->link_info & ICM_LINK_INFO_APPROVED;
654 
655 	if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) {
656 		tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
657 		return;
658 	}
659 
660 	route = get_route(pkg->local_route_hi, pkg->local_route_lo);
661 
662 	xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
663 	if (xd) {
664 		u8 xd_phy_port, phy_port;
665 
666 		xd_phy_port = phy_port_from_route(xd->route, xd->depth);
667 		phy_port = phy_port_from_route(route, depth);
668 
669 		if (xd->depth == depth && xd_phy_port == phy_port) {
670 			update_xdomain(xd, route, link);
671 			tb_xdomain_put(xd);
672 			return;
673 		}
674 
675 		/*
676 		 * If we find an existing XDomain connection remove it
677 		 * now. We need to go through login handshake and
678 		 * everything anyway to be able to re-establish the
679 		 * connection.
680 		 */
681 		remove_xdomain(xd);
682 		tb_xdomain_put(xd);
683 	}
684 
685 	/*
686 	 * Look if there already exists an XDomain in the same place
687 	 * than the new one and in that case remove it because it is
688 	 * most likely another host that got disconnected.
689 	 */
690 	xd = tb_xdomain_find_by_link_depth(tb, link, depth);
691 	if (!xd) {
692 		u8 dual_link;
693 
694 		dual_link = dual_link_from_link(link);
695 		if (dual_link)
696 			xd = tb_xdomain_find_by_link_depth(tb, dual_link,
697 							   depth);
698 	}
699 	if (xd) {
700 		remove_xdomain(xd);
701 		tb_xdomain_put(xd);
702 	}
703 
704 	/*
705 	 * If the user disconnected a switch during suspend and
706 	 * connected another host to the same port, remove the switch
707 	 * first.
708 	 */
709 	sw = get_switch_at_route(tb->root_switch, route);
710 	if (sw)
711 		remove_switch(sw);
712 
713 	sw = tb_switch_find_by_link_depth(tb, link, depth);
714 	if (!sw) {
715 		tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
716 			depth);
717 		return;
718 	}
719 
720 	add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, link,
721 		    depth);
722 	tb_switch_put(sw);
723 }
724 
725 static void
726 icm_fr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
727 {
728 	const struct icm_fr_event_xdomain_disconnected *pkg =
729 		(const struct icm_fr_event_xdomain_disconnected *)hdr;
730 	struct tb_xdomain *xd;
731 
732 	/*
733 	 * If the connection is through one or multiple devices, the
734 	 * XDomain device is removed along with them so it is fine if we
735 	 * cannot find it here.
736 	 */
737 	xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
738 	if (xd) {
739 		remove_xdomain(xd);
740 		tb_xdomain_put(xd);
741 	}
742 }
743 
744 static struct pci_dev *get_upstream_port(struct pci_dev *pdev)
745 {
746 	struct pci_dev *parent;
747 
748 	parent = pci_upstream_bridge(pdev);
749 	while (parent) {
750 		if (!pci_is_pcie(parent))
751 			return NULL;
752 		if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM)
753 			break;
754 		parent = pci_upstream_bridge(parent);
755 	}
756 
757 	if (!parent)
758 		return NULL;
759 
760 	switch (parent->device) {
761 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
762 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
763 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
764 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
765 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
766 		return parent;
767 	}
768 
769 	return NULL;
770 }
771 
772 static bool icm_ar_is_supported(struct tb *tb)
773 {
774 	struct pci_dev *upstream_port;
775 	struct icm *icm = tb_priv(tb);
776 
777 	/*
778 	 * Starting from Alpine Ridge we can use ICM on Apple machines
779 	 * as well. We just need to reset and re-enable it first.
780 	 */
781 	if (!x86_apple_machine)
782 		return true;
783 
784 	/*
785 	 * Find the upstream PCIe port in case we need to do reset
786 	 * through its vendor specific registers.
787 	 */
788 	upstream_port = get_upstream_port(tb->nhi->pdev);
789 	if (upstream_port) {
790 		int cap;
791 
792 		cap = pci_find_ext_capability(upstream_port,
793 					      PCI_EXT_CAP_ID_VNDR);
794 		if (cap > 0) {
795 			icm->upstream_port = upstream_port;
796 			icm->vnd_cap = cap;
797 
798 			return true;
799 		}
800 	}
801 
802 	return false;
803 }
804 
805 static int icm_ar_get_mode(struct tb *tb)
806 {
807 	struct tb_nhi *nhi = tb->nhi;
808 	int retries = 60;
809 	u32 val;
810 
811 	do {
812 		val = ioread32(nhi->iobase + REG_FW_STS);
813 		if (val & REG_FW_STS_NVM_AUTH_DONE)
814 			break;
815 		msleep(50);
816 	} while (--retries);
817 
818 	if (!retries) {
819 		dev_err(&nhi->pdev->dev, "ICM firmware not authenticated\n");
820 		return -ENODEV;
821 	}
822 
823 	return nhi_mailbox_mode(nhi);
824 }
825 
826 static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
827 {
828 	struct icm_ar_pkg_get_route_response reply;
829 	struct icm_ar_pkg_get_route request = {
830 		.hdr = { .code = ICM_GET_ROUTE },
831 		.link_info = depth << ICM_LINK_INFO_DEPTH_SHIFT | link,
832 	};
833 	int ret;
834 
835 	memset(&reply, 0, sizeof(reply));
836 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
837 			  1, ICM_TIMEOUT);
838 	if (ret)
839 		return ret;
840 
841 	if (reply.hdr.flags & ICM_FLAGS_ERROR)
842 		return -EIO;
843 
844 	*route = get_route(reply.route_hi, reply.route_lo);
845 	return 0;
846 }
847 
848 static void icm_handle_notification(struct work_struct *work)
849 {
850 	struct icm_notification *n = container_of(work, typeof(*n), work);
851 	struct tb *tb = n->tb;
852 	struct icm *icm = tb_priv(tb);
853 
854 	mutex_lock(&tb->lock);
855 
856 	switch (n->pkg->code) {
857 	case ICM_EVENT_DEVICE_CONNECTED:
858 		icm->device_connected(tb, n->pkg);
859 		break;
860 	case ICM_EVENT_DEVICE_DISCONNECTED:
861 		icm->device_disconnected(tb, n->pkg);
862 		break;
863 	case ICM_EVENT_XDOMAIN_CONNECTED:
864 		icm->xdomain_connected(tb, n->pkg);
865 		break;
866 	case ICM_EVENT_XDOMAIN_DISCONNECTED:
867 		icm->xdomain_disconnected(tb, n->pkg);
868 		break;
869 	}
870 
871 	mutex_unlock(&tb->lock);
872 
873 	kfree(n->pkg);
874 	kfree(n);
875 }
876 
877 static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
878 			     const void *buf, size_t size)
879 {
880 	struct icm_notification *n;
881 
882 	n = kmalloc(sizeof(*n), GFP_KERNEL);
883 	if (!n)
884 		return;
885 
886 	INIT_WORK(&n->work, icm_handle_notification);
887 	n->pkg = kmemdup(buf, size, GFP_KERNEL);
888 	n->tb = tb;
889 
890 	queue_work(tb->wq, &n->work);
891 }
892 
893 static int
894 __icm_driver_ready(struct tb *tb, enum tb_security_level *security_level)
895 {
896 	struct icm *icm = tb_priv(tb);
897 	unsigned int retries = 50;
898 	int ret;
899 
900 	ret = icm->driver_ready(tb, security_level);
901 	if (ret) {
902 		tb_err(tb, "failed to send driver ready to ICM\n");
903 		return ret;
904 	}
905 
906 	/*
907 	 * Hold on here until the switch config space is accessible so
908 	 * that we can read root switch config successfully.
909 	 */
910 	do {
911 		struct tb_cfg_result res;
912 		u32 tmp;
913 
914 		res = tb_cfg_read_raw(tb->ctl, &tmp, 0, 0, TB_CFG_SWITCH,
915 				      0, 1, 100);
916 		if (!res.err)
917 			return 0;
918 
919 		msleep(50);
920 	} while (--retries);
921 
922 	tb_err(tb, "failed to read root switch config space, giving up\n");
923 	return -ETIMEDOUT;
924 }
925 
926 static int pci2cio_wait_completion(struct icm *icm, unsigned long timeout_msec)
927 {
928 	unsigned long end = jiffies + msecs_to_jiffies(timeout_msec);
929 	u32 cmd;
930 
931 	do {
932 		pci_read_config_dword(icm->upstream_port,
933 				      icm->vnd_cap + PCIE2CIO_CMD, &cmd);
934 		if (!(cmd & PCIE2CIO_CMD_START)) {
935 			if (cmd & PCIE2CIO_CMD_TIMEOUT)
936 				break;
937 			return 0;
938 		}
939 
940 		msleep(50);
941 	} while (time_before(jiffies, end));
942 
943 	return -ETIMEDOUT;
944 }
945 
946 static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs,
947 			 unsigned int port, unsigned int index, u32 *data)
948 {
949 	struct pci_dev *pdev = icm->upstream_port;
950 	int ret, vnd_cap = icm->vnd_cap;
951 	u32 cmd;
952 
953 	cmd = index;
954 	cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
955 	cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
956 	cmd |= PCIE2CIO_CMD_START;
957 	pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
958 
959 	ret = pci2cio_wait_completion(icm, 5000);
960 	if (ret)
961 		return ret;
962 
963 	pci_read_config_dword(pdev, vnd_cap + PCIE2CIO_RDDATA, data);
964 	return 0;
965 }
966 
967 static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs,
968 			  unsigned int port, unsigned int index, u32 data)
969 {
970 	struct pci_dev *pdev = icm->upstream_port;
971 	int vnd_cap = icm->vnd_cap;
972 	u32 cmd;
973 
974 	pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_WRDATA, data);
975 
976 	cmd = index;
977 	cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
978 	cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
979 	cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START;
980 	pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
981 
982 	return pci2cio_wait_completion(icm, 5000);
983 }
984 
985 static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi)
986 {
987 	struct icm *icm = tb_priv(tb);
988 	u32 val;
989 
990 	/* Put ARC to wait for CIO reset event to happen */
991 	val = ioread32(nhi->iobase + REG_FW_STS);
992 	val |= REG_FW_STS_CIO_RESET_REQ;
993 	iowrite32(val, nhi->iobase + REG_FW_STS);
994 
995 	/* Re-start ARC */
996 	val = ioread32(nhi->iobase + REG_FW_STS);
997 	val |= REG_FW_STS_ICM_EN_INVERT;
998 	val |= REG_FW_STS_ICM_EN_CPU;
999 	iowrite32(val, nhi->iobase + REG_FW_STS);
1000 
1001 	/* Trigger CIO reset now */
1002 	return pcie2cio_write(icm, TB_CFG_SWITCH, 0, 0x50, BIT(9));
1003 }
1004 
1005 static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi)
1006 {
1007 	unsigned int retries = 10;
1008 	int ret;
1009 	u32 val;
1010 
1011 	/* Check if the ICM firmware is already running */
1012 	val = ioread32(nhi->iobase + REG_FW_STS);
1013 	if (val & REG_FW_STS_ICM_EN)
1014 		return 0;
1015 
1016 	dev_info(&nhi->pdev->dev, "starting ICM firmware\n");
1017 
1018 	ret = icm_firmware_reset(tb, nhi);
1019 	if (ret)
1020 		return ret;
1021 
1022 	/* Wait until the ICM firmware tells us it is up and running */
1023 	do {
1024 		/* Check that the ICM firmware is running */
1025 		val = ioread32(nhi->iobase + REG_FW_STS);
1026 		if (val & REG_FW_STS_NVM_AUTH_DONE)
1027 			return 0;
1028 
1029 		msleep(300);
1030 	} while (--retries);
1031 
1032 	return -ETIMEDOUT;
1033 }
1034 
1035 static int icm_reset_phy_port(struct tb *tb, int phy_port)
1036 {
1037 	struct icm *icm = tb_priv(tb);
1038 	u32 state0, state1;
1039 	int port0, port1;
1040 	u32 val0, val1;
1041 	int ret;
1042 
1043 	if (!icm->upstream_port)
1044 		return 0;
1045 
1046 	if (phy_port) {
1047 		port0 = 3;
1048 		port1 = 4;
1049 	} else {
1050 		port0 = 1;
1051 		port1 = 2;
1052 	}
1053 
1054 	/*
1055 	 * Read link status of both null ports belonging to a single
1056 	 * physical port.
1057 	 */
1058 	ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
1059 	if (ret)
1060 		return ret;
1061 	ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
1062 	if (ret)
1063 		return ret;
1064 
1065 	state0 = val0 & PHY_PORT_CS1_LINK_STATE_MASK;
1066 	state0 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
1067 	state1 = val1 & PHY_PORT_CS1_LINK_STATE_MASK;
1068 	state1 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
1069 
1070 	/* If they are both up we need to reset them now */
1071 	if (state0 != TB_PORT_UP || state1 != TB_PORT_UP)
1072 		return 0;
1073 
1074 	val0 |= PHY_PORT_CS1_LINK_DISABLE;
1075 	ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
1076 	if (ret)
1077 		return ret;
1078 
1079 	val1 |= PHY_PORT_CS1_LINK_DISABLE;
1080 	ret = pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
1081 	if (ret)
1082 		return ret;
1083 
1084 	/* Wait a bit and then re-enable both ports */
1085 	usleep_range(10, 100);
1086 
1087 	ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
1088 	if (ret)
1089 		return ret;
1090 	ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
1091 	if (ret)
1092 		return ret;
1093 
1094 	val0 &= ~PHY_PORT_CS1_LINK_DISABLE;
1095 	ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
1096 	if (ret)
1097 		return ret;
1098 
1099 	val1 &= ~PHY_PORT_CS1_LINK_DISABLE;
1100 	return pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
1101 }
1102 
1103 static int icm_firmware_init(struct tb *tb)
1104 {
1105 	struct icm *icm = tb_priv(tb);
1106 	struct tb_nhi *nhi = tb->nhi;
1107 	int ret;
1108 
1109 	ret = icm_firmware_start(tb, nhi);
1110 	if (ret) {
1111 		dev_err(&nhi->pdev->dev, "could not start ICM firmware\n");
1112 		return ret;
1113 	}
1114 
1115 	if (icm->get_mode) {
1116 		ret = icm->get_mode(tb);
1117 
1118 		switch (ret) {
1119 		case NHI_FW_SAFE_MODE:
1120 			icm->safe_mode = true;
1121 			break;
1122 
1123 		case NHI_FW_CM_MODE:
1124 			/* Ask ICM to accept all Thunderbolt devices */
1125 			nhi_mailbox_cmd(nhi, NHI_MAILBOX_ALLOW_ALL_DEVS, 0);
1126 			break;
1127 
1128 		default:
1129 			if (ret < 0)
1130 				return ret;
1131 
1132 			tb_err(tb, "ICM firmware is in wrong mode: %u\n", ret);
1133 			return -ENODEV;
1134 		}
1135 	}
1136 
1137 	/*
1138 	 * Reset both physical ports if there is anything connected to
1139 	 * them already.
1140 	 */
1141 	ret = icm_reset_phy_port(tb, 0);
1142 	if (ret)
1143 		dev_warn(&nhi->pdev->dev, "failed to reset links on port0\n");
1144 	ret = icm_reset_phy_port(tb, 1);
1145 	if (ret)
1146 		dev_warn(&nhi->pdev->dev, "failed to reset links on port1\n");
1147 
1148 	return 0;
1149 }
1150 
1151 static int icm_driver_ready(struct tb *tb)
1152 {
1153 	struct icm *icm = tb_priv(tb);
1154 	int ret;
1155 
1156 	ret = icm_firmware_init(tb);
1157 	if (ret)
1158 		return ret;
1159 
1160 	if (icm->safe_mode) {
1161 		tb_info(tb, "Thunderbolt host controller is in safe mode.\n");
1162 		tb_info(tb, "You need to update NVM firmware of the controller before it can be used.\n");
1163 		tb_info(tb, "For latest updates check https://thunderbolttechnology.net/updates.\n");
1164 		return 0;
1165 	}
1166 
1167 	return __icm_driver_ready(tb, &tb->security_level);
1168 }
1169 
1170 static int icm_suspend(struct tb *tb)
1171 {
1172 	int ret;
1173 
1174 	ret = nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0);
1175 	if (ret)
1176 		tb_info(tb, "Ignoring mailbox command error (%d) in %s\n",
1177 			ret, __func__);
1178 
1179 	return 0;
1180 }
1181 
1182 /*
1183  * Mark all switches (except root switch) below this one unplugged. ICM
1184  * firmware will send us an updated list of switches after we have send
1185  * it driver ready command. If a switch is not in that list it will be
1186  * removed when we perform rescan.
1187  */
1188 static void icm_unplug_children(struct tb_switch *sw)
1189 {
1190 	unsigned int i;
1191 
1192 	if (tb_route(sw))
1193 		sw->is_unplugged = true;
1194 
1195 	for (i = 1; i <= sw->config.max_port_number; i++) {
1196 		struct tb_port *port = &sw->ports[i];
1197 
1198 		if (tb_is_upstream_port(port))
1199 			continue;
1200 		if (port->xdomain) {
1201 			port->xdomain->is_unplugged = true;
1202 			continue;
1203 		}
1204 		if (!port->remote)
1205 			continue;
1206 
1207 		icm_unplug_children(port->remote->sw);
1208 	}
1209 }
1210 
1211 static void icm_free_unplugged_children(struct tb_switch *sw)
1212 {
1213 	unsigned int i;
1214 
1215 	for (i = 1; i <= sw->config.max_port_number; i++) {
1216 		struct tb_port *port = &sw->ports[i];
1217 
1218 		if (tb_is_upstream_port(port))
1219 			continue;
1220 
1221 		if (port->xdomain && port->xdomain->is_unplugged) {
1222 			tb_xdomain_remove(port->xdomain);
1223 			port->xdomain = NULL;
1224 			continue;
1225 		}
1226 
1227 		if (!port->remote)
1228 			continue;
1229 
1230 		if (port->remote->sw->is_unplugged) {
1231 			tb_switch_remove(port->remote->sw);
1232 			port->remote = NULL;
1233 		} else {
1234 			icm_free_unplugged_children(port->remote->sw);
1235 		}
1236 	}
1237 }
1238 
1239 static void icm_rescan_work(struct work_struct *work)
1240 {
1241 	struct icm *icm = container_of(work, struct icm, rescan_work.work);
1242 	struct tb *tb = icm_to_tb(icm);
1243 
1244 	mutex_lock(&tb->lock);
1245 	if (tb->root_switch)
1246 		icm_free_unplugged_children(tb->root_switch);
1247 	mutex_unlock(&tb->lock);
1248 }
1249 
1250 static void icm_complete(struct tb *tb)
1251 {
1252 	struct icm *icm = tb_priv(tb);
1253 
1254 	if (tb->nhi->going_away)
1255 		return;
1256 
1257 	icm_unplug_children(tb->root_switch);
1258 
1259 	/*
1260 	 * Now all existing children should be resumed, start events
1261 	 * from ICM to get updated status.
1262 	 */
1263 	__icm_driver_ready(tb, NULL);
1264 
1265 	/*
1266 	 * We do not get notifications of devices that have been
1267 	 * unplugged during suspend so schedule rescan to clean them up
1268 	 * if any.
1269 	 */
1270 	queue_delayed_work(tb->wq, &icm->rescan_work, msecs_to_jiffies(500));
1271 }
1272 
1273 static int icm_start(struct tb *tb)
1274 {
1275 	struct icm *icm = tb_priv(tb);
1276 	int ret;
1277 
1278 	if (icm->safe_mode)
1279 		tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0);
1280 	else
1281 		tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
1282 	if (!tb->root_switch)
1283 		return -ENODEV;
1284 
1285 	/*
1286 	 * NVM upgrade has not been tested on Apple systems and they
1287 	 * don't provide images publicly either. To be on the safe side
1288 	 * prevent root switch NVM upgrade on Macs for now.
1289 	 */
1290 	tb->root_switch->no_nvm_upgrade = x86_apple_machine;
1291 
1292 	ret = tb_switch_add(tb->root_switch);
1293 	if (ret) {
1294 		tb_switch_put(tb->root_switch);
1295 		tb->root_switch = NULL;
1296 	}
1297 
1298 	return ret;
1299 }
1300 
1301 static void icm_stop(struct tb *tb)
1302 {
1303 	struct icm *icm = tb_priv(tb);
1304 
1305 	cancel_delayed_work(&icm->rescan_work);
1306 	tb_switch_remove(tb->root_switch);
1307 	tb->root_switch = NULL;
1308 	nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
1309 }
1310 
1311 static int icm_disconnect_pcie_paths(struct tb *tb)
1312 {
1313 	return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0);
1314 }
1315 
1316 /* Falcon Ridge and Alpine Ridge */
1317 static const struct tb_cm_ops icm_fr_ops = {
1318 	.driver_ready = icm_driver_ready,
1319 	.start = icm_start,
1320 	.stop = icm_stop,
1321 	.suspend = icm_suspend,
1322 	.complete = icm_complete,
1323 	.handle_event = icm_handle_event,
1324 	.approve_switch = icm_fr_approve_switch,
1325 	.add_switch_key = icm_fr_add_switch_key,
1326 	.challenge_switch_key = icm_fr_challenge_switch_key,
1327 	.disconnect_pcie_paths = icm_disconnect_pcie_paths,
1328 	.approve_xdomain_paths = icm_fr_approve_xdomain_paths,
1329 	.disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths,
1330 };
1331 
1332 struct tb *icm_probe(struct tb_nhi *nhi)
1333 {
1334 	struct icm *icm;
1335 	struct tb *tb;
1336 
1337 	tb = tb_domain_alloc(nhi, sizeof(struct icm));
1338 	if (!tb)
1339 		return NULL;
1340 
1341 	icm = tb_priv(tb);
1342 	INIT_DELAYED_WORK(&icm->rescan_work, icm_rescan_work);
1343 	mutex_init(&icm->request_lock);
1344 
1345 	switch (nhi->pdev->device) {
1346 	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
1347 	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
1348 		icm->is_supported = icm_fr_is_supported;
1349 		icm->get_route = icm_fr_get_route;
1350 		icm->driver_ready = icm_fr_driver_ready;
1351 		icm->device_connected = icm_fr_device_connected;
1352 		icm->device_disconnected = icm_fr_device_disconnected;
1353 		icm->xdomain_connected = icm_fr_xdomain_connected;
1354 		icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
1355 		tb->cm_ops = &icm_fr_ops;
1356 		break;
1357 
1358 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI:
1359 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI:
1360 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI:
1361 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI:
1362 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI:
1363 		icm->is_supported = icm_ar_is_supported;
1364 		icm->get_mode = icm_ar_get_mode;
1365 		icm->get_route = icm_ar_get_route;
1366 		icm->driver_ready = icm_fr_driver_ready;
1367 		icm->device_connected = icm_fr_device_connected;
1368 		icm->device_disconnected = icm_fr_device_disconnected;
1369 		icm->xdomain_connected = icm_fr_xdomain_connected;
1370 		icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
1371 		tb->cm_ops = &icm_fr_ops;
1372 		break;
1373 	}
1374 
1375 	if (!icm->is_supported || !icm->is_supported(tb)) {
1376 		dev_dbg(&nhi->pdev->dev, "ICM not supported on this controller\n");
1377 		tb_domain_put(tb);
1378 		return NULL;
1379 	}
1380 
1381 	return tb;
1382 }
1383