xref: /openbmc/linux/drivers/thunderbolt/icm.c (revision 0b0a0bd0)
1 /*
2  * Internal Thunderbolt Connection Manager. This is a firmware running on
3  * the Thunderbolt host controller performing most of the low-level
4  * handling.
5  *
6  * Copyright (C) 2017, Intel Corporation
7  * Authors: Michael Jamet <michael.jamet@intel.com>
8  *          Mika Westerberg <mika.westerberg@linux.intel.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14 
15 #include <linux/delay.h>
16 #include <linux/mutex.h>
17 #include <linux/pci.h>
18 #include <linux/platform_data/x86/apple.h>
19 #include <linux/sizes.h>
20 #include <linux/slab.h>
21 #include <linux/workqueue.h>
22 
23 #include "ctl.h"
24 #include "nhi_regs.h"
25 #include "tb.h"
26 
27 #define PCIE2CIO_CMD			0x30
28 #define PCIE2CIO_CMD_TIMEOUT		BIT(31)
29 #define PCIE2CIO_CMD_START		BIT(30)
30 #define PCIE2CIO_CMD_WRITE		BIT(21)
31 #define PCIE2CIO_CMD_CS_MASK		GENMASK(20, 19)
32 #define PCIE2CIO_CMD_CS_SHIFT		19
33 #define PCIE2CIO_CMD_PORT_MASK		GENMASK(18, 13)
34 #define PCIE2CIO_CMD_PORT_SHIFT		13
35 
36 #define PCIE2CIO_WRDATA			0x34
37 #define PCIE2CIO_RDDATA			0x38
38 
39 #define PHY_PORT_CS1			0x37
40 #define PHY_PORT_CS1_LINK_DISABLE	BIT(14)
41 #define PHY_PORT_CS1_LINK_STATE_MASK	GENMASK(29, 26)
42 #define PHY_PORT_CS1_LINK_STATE_SHIFT	26
43 
44 #define ICM_TIMEOUT			5000	/* ms */
45 #define ICM_APPROVE_TIMEOUT		10000	/* ms */
46 #define ICM_MAX_LINK			4
47 #define ICM_MAX_DEPTH			6
48 
49 /**
50  * struct icm - Internal connection manager private data
51  * @request_lock: Makes sure only one message is send to ICM at time
52  * @rescan_work: Work used to rescan the surviving switches after resume
53  * @upstream_port: Pointer to the PCIe upstream port this host
54  *		   controller is connected. This is only set for systems
55  *		   where ICM needs to be started manually
56  * @vnd_cap: Vendor defined capability where PCIe2CIO mailbox resides
57  *	     (only set when @upstream_port is not %NULL)
58  * @safe_mode: ICM is in safe mode
59  * @is_supported: Checks if we can support ICM on this controller
60  * @get_mode: Read and return the ICM firmware mode (optional)
61  * @get_route: Find a route string for given switch
62  * @device_connected: Handle device connected ICM message
63  * @device_disconnected: Handle device disconnected ICM message
64  * @xdomain_connected - Handle XDomain connected ICM message
65  * @xdomain_disconnected - Handle XDomain disconnected ICM message
66  */
67 struct icm {
68 	struct mutex request_lock;
69 	struct delayed_work rescan_work;
70 	struct pci_dev *upstream_port;
71 	int vnd_cap;
72 	bool safe_mode;
73 	bool (*is_supported)(struct tb *tb);
74 	int (*get_mode)(struct tb *tb);
75 	int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route);
76 	void (*device_connected)(struct tb *tb,
77 				 const struct icm_pkg_header *hdr);
78 	void (*device_disconnected)(struct tb *tb,
79 				    const struct icm_pkg_header *hdr);
80 	void (*xdomain_connected)(struct tb *tb,
81 				  const struct icm_pkg_header *hdr);
82 	void (*xdomain_disconnected)(struct tb *tb,
83 				     const struct icm_pkg_header *hdr);
84 };
85 
86 struct icm_notification {
87 	struct work_struct work;
88 	struct icm_pkg_header *pkg;
89 	struct tb *tb;
90 };
91 
92 static inline struct tb *icm_to_tb(struct icm *icm)
93 {
94 	return ((void *)icm - sizeof(struct tb));
95 }
96 
97 static inline u8 phy_port_from_route(u64 route, u8 depth)
98 {
99 	u8 link;
100 
101 	link = depth ? route >> ((depth - 1) * 8) : route;
102 	return tb_phy_port_from_link(link);
103 }
104 
105 static inline u8 dual_link_from_link(u8 link)
106 {
107 	return link ? ((link - 1) ^ 0x01) + 1 : 0;
108 }
109 
110 static inline u64 get_route(u32 route_hi, u32 route_lo)
111 {
112 	return (u64)route_hi << 32 | route_lo;
113 }
114 
115 static bool icm_match(const struct tb_cfg_request *req,
116 		      const struct ctl_pkg *pkg)
117 {
118 	const struct icm_pkg_header *res_hdr = pkg->buffer;
119 	const struct icm_pkg_header *req_hdr = req->request;
120 
121 	if (pkg->frame.eof != req->response_type)
122 		return false;
123 	if (res_hdr->code != req_hdr->code)
124 		return false;
125 
126 	return true;
127 }
128 
129 static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
130 {
131 	const struct icm_pkg_header *hdr = pkg->buffer;
132 
133 	if (hdr->packet_id < req->npackets) {
134 		size_t offset = hdr->packet_id * req->response_size;
135 
136 		memcpy(req->response + offset, pkg->buffer, req->response_size);
137 	}
138 
139 	return hdr->packet_id == hdr->total_packets - 1;
140 }
141 
142 static int icm_request(struct tb *tb, const void *request, size_t request_size,
143 		       void *response, size_t response_size, size_t npackets,
144 		       unsigned int timeout_msec)
145 {
146 	struct icm *icm = tb_priv(tb);
147 	int retries = 3;
148 
149 	do {
150 		struct tb_cfg_request *req;
151 		struct tb_cfg_result res;
152 
153 		req = tb_cfg_request_alloc();
154 		if (!req)
155 			return -ENOMEM;
156 
157 		req->match = icm_match;
158 		req->copy = icm_copy;
159 		req->request = request;
160 		req->request_size = request_size;
161 		req->request_type = TB_CFG_PKG_ICM_CMD;
162 		req->response = response;
163 		req->npackets = npackets;
164 		req->response_size = response_size;
165 		req->response_type = TB_CFG_PKG_ICM_RESP;
166 
167 		mutex_lock(&icm->request_lock);
168 		res = tb_cfg_request_sync(tb->ctl, req, timeout_msec);
169 		mutex_unlock(&icm->request_lock);
170 
171 		tb_cfg_request_put(req);
172 
173 		if (res.err != -ETIMEDOUT)
174 			return res.err == 1 ? -EIO : res.err;
175 
176 		usleep_range(20, 50);
177 	} while (retries--);
178 
179 	return -ETIMEDOUT;
180 }
181 
182 static bool icm_fr_is_supported(struct tb *tb)
183 {
184 	return !x86_apple_machine;
185 }
186 
187 static inline int icm_fr_get_switch_index(u32 port)
188 {
189 	int index;
190 
191 	if ((port & ICM_PORT_TYPE_MASK) != TB_TYPE_PORT)
192 		return 0;
193 
194 	index = port >> ICM_PORT_INDEX_SHIFT;
195 	return index != 0xff ? index : 0;
196 }
197 
198 static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
199 {
200 	struct icm_fr_pkg_get_topology_response *switches, *sw;
201 	struct icm_fr_pkg_get_topology request = {
202 		.hdr = { .code = ICM_GET_TOPOLOGY },
203 	};
204 	size_t npackets = ICM_GET_TOPOLOGY_PACKETS;
205 	int ret, index;
206 	u8 i;
207 
208 	switches = kcalloc(npackets, sizeof(*switches), GFP_KERNEL);
209 	if (!switches)
210 		return -ENOMEM;
211 
212 	ret = icm_request(tb, &request, sizeof(request), switches,
213 			  sizeof(*switches), npackets, ICM_TIMEOUT);
214 	if (ret)
215 		goto err_free;
216 
217 	sw = &switches[0];
218 	index = icm_fr_get_switch_index(sw->ports[link]);
219 	if (!index) {
220 		ret = -ENODEV;
221 		goto err_free;
222 	}
223 
224 	sw = &switches[index];
225 	for (i = 1; i < depth; i++) {
226 		unsigned int j;
227 
228 		if (!(sw->first_data & ICM_SWITCH_USED)) {
229 			ret = -ENODEV;
230 			goto err_free;
231 		}
232 
233 		for (j = 0; j < ARRAY_SIZE(sw->ports); j++) {
234 			index = icm_fr_get_switch_index(sw->ports[j]);
235 			if (index > sw->switch_index) {
236 				sw = &switches[index];
237 				break;
238 			}
239 		}
240 	}
241 
242 	*route = get_route(sw->route_hi, sw->route_lo);
243 
244 err_free:
245 	kfree(switches);
246 	return ret;
247 }
248 
249 static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw)
250 {
251 	struct icm_fr_pkg_approve_device request;
252 	struct icm_fr_pkg_approve_device reply;
253 	int ret;
254 
255 	memset(&request, 0, sizeof(request));
256 	memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
257 	request.hdr.code = ICM_APPROVE_DEVICE;
258 	request.connection_id = sw->connection_id;
259 	request.connection_key = sw->connection_key;
260 
261 	memset(&reply, 0, sizeof(reply));
262 	/* Use larger timeout as establishing tunnels can take some time */
263 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
264 			  1, ICM_APPROVE_TIMEOUT);
265 	if (ret)
266 		return ret;
267 
268 	if (reply.hdr.flags & ICM_FLAGS_ERROR) {
269 		tb_warn(tb, "PCIe tunnel creation failed\n");
270 		return -EIO;
271 	}
272 
273 	return 0;
274 }
275 
276 static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw)
277 {
278 	struct icm_fr_pkg_add_device_key request;
279 	struct icm_fr_pkg_add_device_key_response reply;
280 	int ret;
281 
282 	memset(&request, 0, sizeof(request));
283 	memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
284 	request.hdr.code = ICM_ADD_DEVICE_KEY;
285 	request.connection_id = sw->connection_id;
286 	request.connection_key = sw->connection_key;
287 	memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE);
288 
289 	memset(&reply, 0, sizeof(reply));
290 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
291 			  1, ICM_TIMEOUT);
292 	if (ret)
293 		return ret;
294 
295 	if (reply.hdr.flags & ICM_FLAGS_ERROR) {
296 		tb_warn(tb, "Adding key to switch failed\n");
297 		return -EIO;
298 	}
299 
300 	return 0;
301 }
302 
303 static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
304 				       const u8 *challenge, u8 *response)
305 {
306 	struct icm_fr_pkg_challenge_device request;
307 	struct icm_fr_pkg_challenge_device_response reply;
308 	int ret;
309 
310 	memset(&request, 0, sizeof(request));
311 	memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
312 	request.hdr.code = ICM_CHALLENGE_DEVICE;
313 	request.connection_id = sw->connection_id;
314 	request.connection_key = sw->connection_key;
315 	memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE);
316 
317 	memset(&reply, 0, sizeof(reply));
318 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
319 			  1, ICM_TIMEOUT);
320 	if (ret)
321 		return ret;
322 
323 	if (reply.hdr.flags & ICM_FLAGS_ERROR)
324 		return -EKEYREJECTED;
325 	if (reply.hdr.flags & ICM_FLAGS_NO_KEY)
326 		return -ENOKEY;
327 
328 	memcpy(response, reply.response, TB_SWITCH_KEY_SIZE);
329 
330 	return 0;
331 }
332 
333 static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
334 {
335 	struct icm_fr_pkg_approve_xdomain_response reply;
336 	struct icm_fr_pkg_approve_xdomain request;
337 	int ret;
338 
339 	memset(&request, 0, sizeof(request));
340 	request.hdr.code = ICM_APPROVE_XDOMAIN;
341 	request.link_info = xd->depth << ICM_LINK_INFO_DEPTH_SHIFT | xd->link;
342 	memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
343 
344 	request.transmit_path = xd->transmit_path;
345 	request.transmit_ring = xd->transmit_ring;
346 	request.receive_path = xd->receive_path;
347 	request.receive_ring = xd->receive_ring;
348 
349 	memset(&reply, 0, sizeof(reply));
350 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
351 			  1, ICM_TIMEOUT);
352 	if (ret)
353 		return ret;
354 
355 	if (reply.hdr.flags & ICM_FLAGS_ERROR)
356 		return -EIO;
357 
358 	return 0;
359 }
360 
361 static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
362 {
363 	u8 phy_port;
364 	u8 cmd;
365 
366 	phy_port = tb_phy_port_from_link(xd->link);
367 	if (phy_port == 0)
368 		cmd = NHI_MAILBOX_DISCONNECT_PA;
369 	else
370 		cmd = NHI_MAILBOX_DISCONNECT_PB;
371 
372 	nhi_mailbox_cmd(tb->nhi, cmd, 1);
373 	usleep_range(10, 50);
374 	nhi_mailbox_cmd(tb->nhi, cmd, 2);
375 	return 0;
376 }
377 
378 static void add_switch(struct tb_switch *parent_sw, u64 route,
379 		       const uuid_t *uuid, u8 connection_id, u8 connection_key,
380 		       u8 link, u8 depth, enum tb_security_level security_level,
381 		       bool authorized)
382 {
383 	struct tb_switch *sw;
384 
385 	sw = tb_switch_alloc(parent_sw->tb, &parent_sw->dev, route);
386 	if (!sw)
387 		return;
388 
389 	sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL);
390 	sw->connection_id = connection_id;
391 	sw->connection_key = connection_key;
392 	sw->link = link;
393 	sw->depth = depth;
394 	sw->authorized = authorized;
395 	sw->security_level = security_level;
396 
397 	/* Link the two switches now */
398 	tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
399 	tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw);
400 
401 	if (tb_switch_add(sw)) {
402 		tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
403 		tb_switch_put(sw);
404 		return;
405 	}
406 }
407 
408 static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw,
409 			  u64 route, u8 connection_id, u8 connection_key,
410 			  u8 link, u8 depth)
411 {
412 	/* Disconnect from parent */
413 	tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
414 	/* Re-connect via updated port*/
415 	tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
416 
417 	/* Update with the new addressing information */
418 	sw->config.route_hi = upper_32_bits(route);
419 	sw->config.route_lo = lower_32_bits(route);
420 	sw->connection_id = connection_id;
421 	sw->connection_key = connection_key;
422 	sw->link = link;
423 	sw->depth = depth;
424 
425 	/* This switch still exists */
426 	sw->is_unplugged = false;
427 }
428 
429 static void remove_switch(struct tb_switch *sw)
430 {
431 	struct tb_switch *parent_sw;
432 
433 	parent_sw = tb_to_switch(sw->dev.parent);
434 	tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
435 	tb_switch_remove(sw);
436 }
437 
438 static void add_xdomain(struct tb_switch *sw, u64 route,
439 			const uuid_t *local_uuid, const uuid_t *remote_uuid,
440 			u8 link, u8 depth)
441 {
442 	struct tb_xdomain *xd;
443 
444 	xd = tb_xdomain_alloc(sw->tb, &sw->dev, route, local_uuid, remote_uuid);
445 	if (!xd)
446 		return;
447 
448 	xd->link = link;
449 	xd->depth = depth;
450 
451 	tb_port_at(route, sw)->xdomain = xd;
452 
453 	tb_xdomain_add(xd);
454 }
455 
456 static void update_xdomain(struct tb_xdomain *xd, u64 route, u8 link)
457 {
458 	xd->link = link;
459 	xd->route = route;
460 	xd->is_unplugged = false;
461 }
462 
463 static void remove_xdomain(struct tb_xdomain *xd)
464 {
465 	struct tb_switch *sw;
466 
467 	sw = tb_to_switch(xd->dev.parent);
468 	tb_port_at(xd->route, sw)->xdomain = NULL;
469 	tb_xdomain_remove(xd);
470 }
471 
472 static void
473 icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
474 {
475 	const struct icm_fr_event_device_connected *pkg =
476 		(const struct icm_fr_event_device_connected *)hdr;
477 	enum tb_security_level security_level;
478 	struct tb_switch *sw, *parent_sw;
479 	struct icm *icm = tb_priv(tb);
480 	bool authorized = false;
481 	struct tb_xdomain *xd;
482 	u8 link, depth;
483 	u64 route;
484 	int ret;
485 
486 	link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
487 	depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
488 		ICM_LINK_INFO_DEPTH_SHIFT;
489 	authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
490 	security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
491 			 ICM_FLAGS_SLEVEL_SHIFT;
492 
493 	if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
494 		tb_info(tb, "switch at %u.%u was rejected by ICM firmware because topology limit exceeded\n",
495 			link, depth);
496 		return;
497 	}
498 
499 	ret = icm->get_route(tb, link, depth, &route);
500 	if (ret) {
501 		tb_err(tb, "failed to find route string for switch at %u.%u\n",
502 		       link, depth);
503 		return;
504 	}
505 
506 	sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
507 	if (sw) {
508 		u8 phy_port, sw_phy_port;
509 
510 		parent_sw = tb_to_switch(sw->dev.parent);
511 		sw_phy_port = phy_port_from_route(tb_route(sw), sw->depth);
512 		phy_port = phy_port_from_route(route, depth);
513 
514 		/*
515 		 * On resume ICM will send us connected events for the
516 		 * devices that still are present. However, that
517 		 * information might have changed for example by the
518 		 * fact that a switch on a dual-link connection might
519 		 * have been enumerated using the other link now. Make
520 		 * sure our book keeping matches that.
521 		 */
522 		if (sw->depth == depth && sw_phy_port == phy_port &&
523 		    !!sw->authorized == authorized) {
524 			update_switch(parent_sw, sw, route, pkg->connection_id,
525 				      pkg->connection_key, link, depth);
526 			tb_switch_put(sw);
527 			return;
528 		}
529 
530 		/*
531 		 * User connected the same switch to another physical
532 		 * port or to another part of the topology. Remove the
533 		 * existing switch now before adding the new one.
534 		 */
535 		remove_switch(sw);
536 		tb_switch_put(sw);
537 	}
538 
539 	/*
540 	 * If the switch was not found by UUID, look for a switch on
541 	 * same physical port (taking possible link aggregation into
542 	 * account) and depth. If we found one it is definitely a stale
543 	 * one so remove it first.
544 	 */
545 	sw = tb_switch_find_by_link_depth(tb, link, depth);
546 	if (!sw) {
547 		u8 dual_link;
548 
549 		dual_link = dual_link_from_link(link);
550 		if (dual_link)
551 			sw = tb_switch_find_by_link_depth(tb, dual_link, depth);
552 	}
553 	if (sw) {
554 		remove_switch(sw);
555 		tb_switch_put(sw);
556 	}
557 
558 	/* Remove existing XDomain connection if found */
559 	xd = tb_xdomain_find_by_link_depth(tb, link, depth);
560 	if (xd) {
561 		remove_xdomain(xd);
562 		tb_xdomain_put(xd);
563 	}
564 
565 	parent_sw = tb_switch_find_by_link_depth(tb, link, depth - 1);
566 	if (!parent_sw) {
567 		tb_err(tb, "failed to find parent switch for %u.%u\n",
568 		       link, depth);
569 		return;
570 	}
571 
572 	add_switch(parent_sw, route, &pkg->ep_uuid, pkg->connection_id,
573 		   pkg->connection_key, link, depth, security_level,
574 		   authorized);
575 
576 	tb_switch_put(parent_sw);
577 }
578 
579 static void
580 icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
581 {
582 	const struct icm_fr_event_device_disconnected *pkg =
583 		(const struct icm_fr_event_device_disconnected *)hdr;
584 	struct tb_switch *sw;
585 	u8 link, depth;
586 
587 	link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
588 	depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
589 		ICM_LINK_INFO_DEPTH_SHIFT;
590 
591 	if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) {
592 		tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
593 		return;
594 	}
595 
596 	sw = tb_switch_find_by_link_depth(tb, link, depth);
597 	if (!sw) {
598 		tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
599 			depth);
600 		return;
601 	}
602 
603 	remove_switch(sw);
604 	tb_switch_put(sw);
605 }
606 
607 static void
608 icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
609 {
610 	const struct icm_fr_event_xdomain_connected *pkg =
611 		(const struct icm_fr_event_xdomain_connected *)hdr;
612 	struct tb_xdomain *xd;
613 	struct tb_switch *sw;
614 	u8 link, depth;
615 	bool approved;
616 	u64 route;
617 
618 	/*
619 	 * After NVM upgrade adding root switch device fails because we
620 	 * initiated reset. During that time ICM might still send
621 	 * XDomain connected message which we ignore here.
622 	 */
623 	if (!tb->root_switch)
624 		return;
625 
626 	link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
627 	depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
628 		ICM_LINK_INFO_DEPTH_SHIFT;
629 	approved = pkg->link_info & ICM_LINK_INFO_APPROVED;
630 
631 	if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) {
632 		tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
633 		return;
634 	}
635 
636 	route = get_route(pkg->local_route_hi, pkg->local_route_lo);
637 
638 	xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
639 	if (xd) {
640 		u8 xd_phy_port, phy_port;
641 
642 		xd_phy_port = phy_port_from_route(xd->route, xd->depth);
643 		phy_port = phy_port_from_route(route, depth);
644 
645 		if (xd->depth == depth && xd_phy_port == phy_port) {
646 			update_xdomain(xd, route, link);
647 			tb_xdomain_put(xd);
648 			return;
649 		}
650 
651 		/*
652 		 * If we find an existing XDomain connection remove it
653 		 * now. We need to go through login handshake and
654 		 * everything anyway to be able to re-establish the
655 		 * connection.
656 		 */
657 		remove_xdomain(xd);
658 		tb_xdomain_put(xd);
659 	}
660 
661 	/*
662 	 * Look if there already exists an XDomain in the same place
663 	 * than the new one and in that case remove it because it is
664 	 * most likely another host that got disconnected.
665 	 */
666 	xd = tb_xdomain_find_by_link_depth(tb, link, depth);
667 	if (!xd) {
668 		u8 dual_link;
669 
670 		dual_link = dual_link_from_link(link);
671 		if (dual_link)
672 			xd = tb_xdomain_find_by_link_depth(tb, dual_link,
673 							   depth);
674 	}
675 	if (xd) {
676 		remove_xdomain(xd);
677 		tb_xdomain_put(xd);
678 	}
679 
680 	/*
681 	 * If the user disconnected a switch during suspend and
682 	 * connected another host to the same port, remove the switch
683 	 * first.
684 	 */
685 	sw = get_switch_at_route(tb->root_switch, route);
686 	if (sw)
687 		remove_switch(sw);
688 
689 	sw = tb_switch_find_by_link_depth(tb, link, depth);
690 	if (!sw) {
691 		tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
692 			depth);
693 		return;
694 	}
695 
696 	add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, link,
697 		    depth);
698 	tb_switch_put(sw);
699 }
700 
701 static void
702 icm_fr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
703 {
704 	const struct icm_fr_event_xdomain_disconnected *pkg =
705 		(const struct icm_fr_event_xdomain_disconnected *)hdr;
706 	struct tb_xdomain *xd;
707 
708 	/*
709 	 * If the connection is through one or multiple devices, the
710 	 * XDomain device is removed along with them so it is fine if we
711 	 * cannot find it here.
712 	 */
713 	xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
714 	if (xd) {
715 		remove_xdomain(xd);
716 		tb_xdomain_put(xd);
717 	}
718 }
719 
720 static struct pci_dev *get_upstream_port(struct pci_dev *pdev)
721 {
722 	struct pci_dev *parent;
723 
724 	parent = pci_upstream_bridge(pdev);
725 	while (parent) {
726 		if (!pci_is_pcie(parent))
727 			return NULL;
728 		if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM)
729 			break;
730 		parent = pci_upstream_bridge(parent);
731 	}
732 
733 	if (!parent)
734 		return NULL;
735 
736 	switch (parent->device) {
737 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
738 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
739 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
740 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
741 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
742 		return parent;
743 	}
744 
745 	return NULL;
746 }
747 
748 static bool icm_ar_is_supported(struct tb *tb)
749 {
750 	struct pci_dev *upstream_port;
751 	struct icm *icm = tb_priv(tb);
752 
753 	/*
754 	 * Starting from Alpine Ridge we can use ICM on Apple machines
755 	 * as well. We just need to reset and re-enable it first.
756 	 */
757 	if (!x86_apple_machine)
758 		return true;
759 
760 	/*
761 	 * Find the upstream PCIe port in case we need to do reset
762 	 * through its vendor specific registers.
763 	 */
764 	upstream_port = get_upstream_port(tb->nhi->pdev);
765 	if (upstream_port) {
766 		int cap;
767 
768 		cap = pci_find_ext_capability(upstream_port,
769 					      PCI_EXT_CAP_ID_VNDR);
770 		if (cap > 0) {
771 			icm->upstream_port = upstream_port;
772 			icm->vnd_cap = cap;
773 
774 			return true;
775 		}
776 	}
777 
778 	return false;
779 }
780 
781 static int icm_ar_get_mode(struct tb *tb)
782 {
783 	struct tb_nhi *nhi = tb->nhi;
784 	int retries = 60;
785 	u32 val;
786 
787 	do {
788 		val = ioread32(nhi->iobase + REG_FW_STS);
789 		if (val & REG_FW_STS_NVM_AUTH_DONE)
790 			break;
791 		msleep(50);
792 	} while (--retries);
793 
794 	if (!retries) {
795 		dev_err(&nhi->pdev->dev, "ICM firmware not authenticated\n");
796 		return -ENODEV;
797 	}
798 
799 	return nhi_mailbox_mode(nhi);
800 }
801 
802 static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
803 {
804 	struct icm_ar_pkg_get_route_response reply;
805 	struct icm_ar_pkg_get_route request = {
806 		.hdr = { .code = ICM_GET_ROUTE },
807 		.link_info = depth << ICM_LINK_INFO_DEPTH_SHIFT | link,
808 	};
809 	int ret;
810 
811 	memset(&reply, 0, sizeof(reply));
812 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
813 			  1, ICM_TIMEOUT);
814 	if (ret)
815 		return ret;
816 
817 	if (reply.hdr.flags & ICM_FLAGS_ERROR)
818 		return -EIO;
819 
820 	*route = get_route(reply.route_hi, reply.route_lo);
821 	return 0;
822 }
823 
824 static void icm_handle_notification(struct work_struct *work)
825 {
826 	struct icm_notification *n = container_of(work, typeof(*n), work);
827 	struct tb *tb = n->tb;
828 	struct icm *icm = tb_priv(tb);
829 
830 	mutex_lock(&tb->lock);
831 
832 	switch (n->pkg->code) {
833 	case ICM_EVENT_DEVICE_CONNECTED:
834 		icm->device_connected(tb, n->pkg);
835 		break;
836 	case ICM_EVENT_DEVICE_DISCONNECTED:
837 		icm->device_disconnected(tb, n->pkg);
838 		break;
839 	case ICM_EVENT_XDOMAIN_CONNECTED:
840 		icm->xdomain_connected(tb, n->pkg);
841 		break;
842 	case ICM_EVENT_XDOMAIN_DISCONNECTED:
843 		icm->xdomain_disconnected(tb, n->pkg);
844 		break;
845 	}
846 
847 	mutex_unlock(&tb->lock);
848 
849 	kfree(n->pkg);
850 	kfree(n);
851 }
852 
853 static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
854 			     const void *buf, size_t size)
855 {
856 	struct icm_notification *n;
857 
858 	n = kmalloc(sizeof(*n), GFP_KERNEL);
859 	if (!n)
860 		return;
861 
862 	INIT_WORK(&n->work, icm_handle_notification);
863 	n->pkg = kmemdup(buf, size, GFP_KERNEL);
864 	n->tb = tb;
865 
866 	queue_work(tb->wq, &n->work);
867 }
868 
869 static int
870 __icm_driver_ready(struct tb *tb, enum tb_security_level *security_level)
871 {
872 	struct icm_pkg_driver_ready_response reply;
873 	struct icm_pkg_driver_ready request = {
874 		.hdr.code = ICM_DRIVER_READY,
875 	};
876 	unsigned int retries = 50;
877 	int ret;
878 
879 	memset(&reply, 0, sizeof(reply));
880 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
881 			  1, ICM_TIMEOUT);
882 	if (ret)
883 		return ret;
884 
885 	if (security_level)
886 		*security_level = reply.security_level & 0xf;
887 
888 	/*
889 	 * Hold on here until the switch config space is accessible so
890 	 * that we can read root switch config successfully.
891 	 */
892 	do {
893 		struct tb_cfg_result res;
894 		u32 tmp;
895 
896 		res = tb_cfg_read_raw(tb->ctl, &tmp, 0, 0, TB_CFG_SWITCH,
897 				      0, 1, 100);
898 		if (!res.err)
899 			return 0;
900 
901 		msleep(50);
902 	} while (--retries);
903 
904 	tb_err(tb, "failed to read root switch config space, giving up\n");
905 	return -ETIMEDOUT;
906 }
907 
908 static int pci2cio_wait_completion(struct icm *icm, unsigned long timeout_msec)
909 {
910 	unsigned long end = jiffies + msecs_to_jiffies(timeout_msec);
911 	u32 cmd;
912 
913 	do {
914 		pci_read_config_dword(icm->upstream_port,
915 				      icm->vnd_cap + PCIE2CIO_CMD, &cmd);
916 		if (!(cmd & PCIE2CIO_CMD_START)) {
917 			if (cmd & PCIE2CIO_CMD_TIMEOUT)
918 				break;
919 			return 0;
920 		}
921 
922 		msleep(50);
923 	} while (time_before(jiffies, end));
924 
925 	return -ETIMEDOUT;
926 }
927 
928 static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs,
929 			 unsigned int port, unsigned int index, u32 *data)
930 {
931 	struct pci_dev *pdev = icm->upstream_port;
932 	int ret, vnd_cap = icm->vnd_cap;
933 	u32 cmd;
934 
935 	cmd = index;
936 	cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
937 	cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
938 	cmd |= PCIE2CIO_CMD_START;
939 	pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
940 
941 	ret = pci2cio_wait_completion(icm, 5000);
942 	if (ret)
943 		return ret;
944 
945 	pci_read_config_dword(pdev, vnd_cap + PCIE2CIO_RDDATA, data);
946 	return 0;
947 }
948 
949 static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs,
950 			  unsigned int port, unsigned int index, u32 data)
951 {
952 	struct pci_dev *pdev = icm->upstream_port;
953 	int vnd_cap = icm->vnd_cap;
954 	u32 cmd;
955 
956 	pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_WRDATA, data);
957 
958 	cmd = index;
959 	cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
960 	cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
961 	cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START;
962 	pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
963 
964 	return pci2cio_wait_completion(icm, 5000);
965 }
966 
967 static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi)
968 {
969 	struct icm *icm = tb_priv(tb);
970 	u32 val;
971 
972 	/* Put ARC to wait for CIO reset event to happen */
973 	val = ioread32(nhi->iobase + REG_FW_STS);
974 	val |= REG_FW_STS_CIO_RESET_REQ;
975 	iowrite32(val, nhi->iobase + REG_FW_STS);
976 
977 	/* Re-start ARC */
978 	val = ioread32(nhi->iobase + REG_FW_STS);
979 	val |= REG_FW_STS_ICM_EN_INVERT;
980 	val |= REG_FW_STS_ICM_EN_CPU;
981 	iowrite32(val, nhi->iobase + REG_FW_STS);
982 
983 	/* Trigger CIO reset now */
984 	return pcie2cio_write(icm, TB_CFG_SWITCH, 0, 0x50, BIT(9));
985 }
986 
987 static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi)
988 {
989 	unsigned int retries = 10;
990 	int ret;
991 	u32 val;
992 
993 	/* Check if the ICM firmware is already running */
994 	val = ioread32(nhi->iobase + REG_FW_STS);
995 	if (val & REG_FW_STS_ICM_EN)
996 		return 0;
997 
998 	dev_info(&nhi->pdev->dev, "starting ICM firmware\n");
999 
1000 	ret = icm_firmware_reset(tb, nhi);
1001 	if (ret)
1002 		return ret;
1003 
1004 	/* Wait until the ICM firmware tells us it is up and running */
1005 	do {
1006 		/* Check that the ICM firmware is running */
1007 		val = ioread32(nhi->iobase + REG_FW_STS);
1008 		if (val & REG_FW_STS_NVM_AUTH_DONE)
1009 			return 0;
1010 
1011 		msleep(300);
1012 	} while (--retries);
1013 
1014 	return -ETIMEDOUT;
1015 }
1016 
1017 static int icm_reset_phy_port(struct tb *tb, int phy_port)
1018 {
1019 	struct icm *icm = tb_priv(tb);
1020 	u32 state0, state1;
1021 	int port0, port1;
1022 	u32 val0, val1;
1023 	int ret;
1024 
1025 	if (!icm->upstream_port)
1026 		return 0;
1027 
1028 	if (phy_port) {
1029 		port0 = 3;
1030 		port1 = 4;
1031 	} else {
1032 		port0 = 1;
1033 		port1 = 2;
1034 	}
1035 
1036 	/*
1037 	 * Read link status of both null ports belonging to a single
1038 	 * physical port.
1039 	 */
1040 	ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
1041 	if (ret)
1042 		return ret;
1043 	ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
1044 	if (ret)
1045 		return ret;
1046 
1047 	state0 = val0 & PHY_PORT_CS1_LINK_STATE_MASK;
1048 	state0 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
1049 	state1 = val1 & PHY_PORT_CS1_LINK_STATE_MASK;
1050 	state1 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
1051 
1052 	/* If they are both up we need to reset them now */
1053 	if (state0 != TB_PORT_UP || state1 != TB_PORT_UP)
1054 		return 0;
1055 
1056 	val0 |= PHY_PORT_CS1_LINK_DISABLE;
1057 	ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
1058 	if (ret)
1059 		return ret;
1060 
1061 	val1 |= PHY_PORT_CS1_LINK_DISABLE;
1062 	ret = pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
1063 	if (ret)
1064 		return ret;
1065 
1066 	/* Wait a bit and then re-enable both ports */
1067 	usleep_range(10, 100);
1068 
1069 	ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
1070 	if (ret)
1071 		return ret;
1072 	ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
1073 	if (ret)
1074 		return ret;
1075 
1076 	val0 &= ~PHY_PORT_CS1_LINK_DISABLE;
1077 	ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
1078 	if (ret)
1079 		return ret;
1080 
1081 	val1 &= ~PHY_PORT_CS1_LINK_DISABLE;
1082 	return pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
1083 }
1084 
1085 static int icm_firmware_init(struct tb *tb)
1086 {
1087 	struct icm *icm = tb_priv(tb);
1088 	struct tb_nhi *nhi = tb->nhi;
1089 	int ret;
1090 
1091 	ret = icm_firmware_start(tb, nhi);
1092 	if (ret) {
1093 		dev_err(&nhi->pdev->dev, "could not start ICM firmware\n");
1094 		return ret;
1095 	}
1096 
1097 	if (icm->get_mode) {
1098 		ret = icm->get_mode(tb);
1099 
1100 		switch (ret) {
1101 		case NHI_FW_SAFE_MODE:
1102 			icm->safe_mode = true;
1103 			break;
1104 
1105 		case NHI_FW_CM_MODE:
1106 			/* Ask ICM to accept all Thunderbolt devices */
1107 			nhi_mailbox_cmd(nhi, NHI_MAILBOX_ALLOW_ALL_DEVS, 0);
1108 			break;
1109 
1110 		default:
1111 			if (ret < 0)
1112 				return ret;
1113 
1114 			tb_err(tb, "ICM firmware is in wrong mode: %u\n", ret);
1115 			return -ENODEV;
1116 		}
1117 	}
1118 
1119 	/*
1120 	 * Reset both physical ports if there is anything connected to
1121 	 * them already.
1122 	 */
1123 	ret = icm_reset_phy_port(tb, 0);
1124 	if (ret)
1125 		dev_warn(&nhi->pdev->dev, "failed to reset links on port0\n");
1126 	ret = icm_reset_phy_port(tb, 1);
1127 	if (ret)
1128 		dev_warn(&nhi->pdev->dev, "failed to reset links on port1\n");
1129 
1130 	return 0;
1131 }
1132 
1133 static int icm_driver_ready(struct tb *tb)
1134 {
1135 	struct icm *icm = tb_priv(tb);
1136 	int ret;
1137 
1138 	ret = icm_firmware_init(tb);
1139 	if (ret)
1140 		return ret;
1141 
1142 	if (icm->safe_mode) {
1143 		tb_info(tb, "Thunderbolt host controller is in safe mode.\n");
1144 		tb_info(tb, "You need to update NVM firmware of the controller before it can be used.\n");
1145 		tb_info(tb, "For latest updates check https://thunderbolttechnology.net/updates.\n");
1146 		return 0;
1147 	}
1148 
1149 	return __icm_driver_ready(tb, &tb->security_level);
1150 }
1151 
1152 static int icm_suspend(struct tb *tb)
1153 {
1154 	int ret;
1155 
1156 	ret = nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0);
1157 	if (ret)
1158 		tb_info(tb, "Ignoring mailbox command error (%d) in %s\n",
1159 			ret, __func__);
1160 
1161 	return 0;
1162 }
1163 
1164 /*
1165  * Mark all switches (except root switch) below this one unplugged. ICM
1166  * firmware will send us an updated list of switches after we have send
1167  * it driver ready command. If a switch is not in that list it will be
1168  * removed when we perform rescan.
1169  */
1170 static void icm_unplug_children(struct tb_switch *sw)
1171 {
1172 	unsigned int i;
1173 
1174 	if (tb_route(sw))
1175 		sw->is_unplugged = true;
1176 
1177 	for (i = 1; i <= sw->config.max_port_number; i++) {
1178 		struct tb_port *port = &sw->ports[i];
1179 
1180 		if (tb_is_upstream_port(port))
1181 			continue;
1182 		if (port->xdomain) {
1183 			port->xdomain->is_unplugged = true;
1184 			continue;
1185 		}
1186 		if (!port->remote)
1187 			continue;
1188 
1189 		icm_unplug_children(port->remote->sw);
1190 	}
1191 }
1192 
1193 static void icm_free_unplugged_children(struct tb_switch *sw)
1194 {
1195 	unsigned int i;
1196 
1197 	for (i = 1; i <= sw->config.max_port_number; i++) {
1198 		struct tb_port *port = &sw->ports[i];
1199 
1200 		if (tb_is_upstream_port(port))
1201 			continue;
1202 
1203 		if (port->xdomain && port->xdomain->is_unplugged) {
1204 			tb_xdomain_remove(port->xdomain);
1205 			port->xdomain = NULL;
1206 			continue;
1207 		}
1208 
1209 		if (!port->remote)
1210 			continue;
1211 
1212 		if (port->remote->sw->is_unplugged) {
1213 			tb_switch_remove(port->remote->sw);
1214 			port->remote = NULL;
1215 		} else {
1216 			icm_free_unplugged_children(port->remote->sw);
1217 		}
1218 	}
1219 }
1220 
1221 static void icm_rescan_work(struct work_struct *work)
1222 {
1223 	struct icm *icm = container_of(work, struct icm, rescan_work.work);
1224 	struct tb *tb = icm_to_tb(icm);
1225 
1226 	mutex_lock(&tb->lock);
1227 	if (tb->root_switch)
1228 		icm_free_unplugged_children(tb->root_switch);
1229 	mutex_unlock(&tb->lock);
1230 }
1231 
1232 static void icm_complete(struct tb *tb)
1233 {
1234 	struct icm *icm = tb_priv(tb);
1235 
1236 	if (tb->nhi->going_away)
1237 		return;
1238 
1239 	icm_unplug_children(tb->root_switch);
1240 
1241 	/*
1242 	 * Now all existing children should be resumed, start events
1243 	 * from ICM to get updated status.
1244 	 */
1245 	__icm_driver_ready(tb, NULL);
1246 
1247 	/*
1248 	 * We do not get notifications of devices that have been
1249 	 * unplugged during suspend so schedule rescan to clean them up
1250 	 * if any.
1251 	 */
1252 	queue_delayed_work(tb->wq, &icm->rescan_work, msecs_to_jiffies(500));
1253 }
1254 
1255 static int icm_start(struct tb *tb)
1256 {
1257 	struct icm *icm = tb_priv(tb);
1258 	int ret;
1259 
1260 	if (icm->safe_mode)
1261 		tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0);
1262 	else
1263 		tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
1264 	if (!tb->root_switch)
1265 		return -ENODEV;
1266 
1267 	/*
1268 	 * NVM upgrade has not been tested on Apple systems and they
1269 	 * don't provide images publicly either. To be on the safe side
1270 	 * prevent root switch NVM upgrade on Macs for now.
1271 	 */
1272 	tb->root_switch->no_nvm_upgrade = x86_apple_machine;
1273 
1274 	ret = tb_switch_add(tb->root_switch);
1275 	if (ret) {
1276 		tb_switch_put(tb->root_switch);
1277 		tb->root_switch = NULL;
1278 	}
1279 
1280 	return ret;
1281 }
1282 
1283 static void icm_stop(struct tb *tb)
1284 {
1285 	struct icm *icm = tb_priv(tb);
1286 
1287 	cancel_delayed_work(&icm->rescan_work);
1288 	tb_switch_remove(tb->root_switch);
1289 	tb->root_switch = NULL;
1290 	nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
1291 }
1292 
1293 static int icm_disconnect_pcie_paths(struct tb *tb)
1294 {
1295 	return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0);
1296 }
1297 
1298 /* Falcon Ridge and Alpine Ridge */
1299 static const struct tb_cm_ops icm_fr_ops = {
1300 	.driver_ready = icm_driver_ready,
1301 	.start = icm_start,
1302 	.stop = icm_stop,
1303 	.suspend = icm_suspend,
1304 	.complete = icm_complete,
1305 	.handle_event = icm_handle_event,
1306 	.approve_switch = icm_fr_approve_switch,
1307 	.add_switch_key = icm_fr_add_switch_key,
1308 	.challenge_switch_key = icm_fr_challenge_switch_key,
1309 	.disconnect_pcie_paths = icm_disconnect_pcie_paths,
1310 	.approve_xdomain_paths = icm_fr_approve_xdomain_paths,
1311 	.disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths,
1312 };
1313 
1314 struct tb *icm_probe(struct tb_nhi *nhi)
1315 {
1316 	struct icm *icm;
1317 	struct tb *tb;
1318 
1319 	tb = tb_domain_alloc(nhi, sizeof(struct icm));
1320 	if (!tb)
1321 		return NULL;
1322 
1323 	icm = tb_priv(tb);
1324 	INIT_DELAYED_WORK(&icm->rescan_work, icm_rescan_work);
1325 	mutex_init(&icm->request_lock);
1326 
1327 	switch (nhi->pdev->device) {
1328 	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
1329 	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
1330 		icm->is_supported = icm_fr_is_supported;
1331 		icm->get_route = icm_fr_get_route;
1332 		icm->device_connected = icm_fr_device_connected;
1333 		icm->device_disconnected = icm_fr_device_disconnected;
1334 		icm->xdomain_connected = icm_fr_xdomain_connected;
1335 		icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
1336 		tb->cm_ops = &icm_fr_ops;
1337 		break;
1338 
1339 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI:
1340 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI:
1341 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI:
1342 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI:
1343 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI:
1344 		icm->is_supported = icm_ar_is_supported;
1345 		icm->get_mode = icm_ar_get_mode;
1346 		icm->get_route = icm_ar_get_route;
1347 		icm->device_connected = icm_fr_device_connected;
1348 		icm->device_disconnected = icm_fr_device_disconnected;
1349 		icm->xdomain_connected = icm_fr_xdomain_connected;
1350 		icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
1351 		tb->cm_ops = &icm_fr_ops;
1352 		break;
1353 	}
1354 
1355 	if (!icm->is_supported || !icm->is_supported(tb)) {
1356 		dev_dbg(&nhi->pdev->dev, "ICM not supported on this controller\n");
1357 		tb_domain_put(tb);
1358 		return NULL;
1359 	}
1360 
1361 	return tb;
1362 }
1363