xref: /openbmc/linux/drivers/thunderbolt/icm.c (revision 4da722ca)
1 /*
2  * Internal Thunderbolt Connection Manager. This is a firmware running on
3  * the Thunderbolt host controller performing most of the low-level
4  * handling.
5  *
6  * Copyright (C) 2017, Intel Corporation
7  * Authors: Michael Jamet <michael.jamet@intel.com>
8  *          Mika Westerberg <mika.westerberg@linux.intel.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14 
15 #include <linux/delay.h>
16 #include <linux/dmi.h>
17 #include <linux/mutex.h>
18 #include <linux/pci.h>
19 #include <linux/sizes.h>
20 #include <linux/slab.h>
21 #include <linux/workqueue.h>
22 
23 #include "ctl.h"
24 #include "nhi_regs.h"
25 #include "tb.h"
26 
27 #define PCIE2CIO_CMD			0x30
28 #define PCIE2CIO_CMD_TIMEOUT		BIT(31)
29 #define PCIE2CIO_CMD_START		BIT(30)
30 #define PCIE2CIO_CMD_WRITE		BIT(21)
31 #define PCIE2CIO_CMD_CS_MASK		GENMASK(20, 19)
32 #define PCIE2CIO_CMD_CS_SHIFT		19
33 #define PCIE2CIO_CMD_PORT_MASK		GENMASK(18, 13)
34 #define PCIE2CIO_CMD_PORT_SHIFT		13
35 
36 #define PCIE2CIO_WRDATA			0x34
37 #define PCIE2CIO_RDDATA			0x38
38 
39 #define PHY_PORT_CS1			0x37
40 #define PHY_PORT_CS1_LINK_DISABLE	BIT(14)
41 #define PHY_PORT_CS1_LINK_STATE_MASK	GENMASK(29, 26)
42 #define PHY_PORT_CS1_LINK_STATE_SHIFT	26
43 
44 #define ICM_TIMEOUT			5000 /* ms */
45 #define ICM_MAX_LINK			4
46 #define ICM_MAX_DEPTH			6
47 
48 /**
49  * struct icm - Internal connection manager private data
50  * @request_lock: Makes sure only one message is send to ICM at time
51  * @rescan_work: Work used to rescan the surviving switches after resume
52  * @upstream_port: Pointer to the PCIe upstream port this host
53  *		   controller is connected. This is only set for systems
54  *		   where ICM needs to be started manually
55  * @vnd_cap: Vendor defined capability where PCIe2CIO mailbox resides
56  *	     (only set when @upstream_port is not %NULL)
57  * @safe_mode: ICM is in safe mode
58  * @is_supported: Checks if we can support ICM on this controller
59  * @get_mode: Read and return the ICM firmware mode (optional)
60  * @get_route: Find a route string for given switch
61  * @device_connected: Handle device connected ICM message
62  * @device_disconnected: Handle device disconnected ICM message
63  */
64 struct icm {
65 	struct mutex request_lock;
66 	struct delayed_work rescan_work;
67 	struct pci_dev *upstream_port;
68 	int vnd_cap;
69 	bool safe_mode;
70 	bool (*is_supported)(struct tb *tb);
71 	int (*get_mode)(struct tb *tb);
72 	int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route);
73 	void (*device_connected)(struct tb *tb,
74 				 const struct icm_pkg_header *hdr);
75 	void (*device_disconnected)(struct tb *tb,
76 				    const struct icm_pkg_header *hdr);
77 };
78 
79 struct icm_notification {
80 	struct work_struct work;
81 	struct icm_pkg_header *pkg;
82 	struct tb *tb;
83 };
84 
85 static inline struct tb *icm_to_tb(struct icm *icm)
86 {
87 	return ((void *)icm - sizeof(struct tb));
88 }
89 
90 static inline u8 phy_port_from_route(u64 route, u8 depth)
91 {
92 	return tb_switch_phy_port_from_link(route >> ((depth - 1) * 8));
93 }
94 
95 static inline u8 dual_link_from_link(u8 link)
96 {
97 	return link ? ((link - 1) ^ 0x01) + 1 : 0;
98 }
99 
100 static inline u64 get_route(u32 route_hi, u32 route_lo)
101 {
102 	return (u64)route_hi << 32 | route_lo;
103 }
104 
105 static inline bool is_apple(void)
106 {
107 	return dmi_match(DMI_BOARD_VENDOR, "Apple Inc.");
108 }
109 
110 static bool icm_match(const struct tb_cfg_request *req,
111 		      const struct ctl_pkg *pkg)
112 {
113 	const struct icm_pkg_header *res_hdr = pkg->buffer;
114 	const struct icm_pkg_header *req_hdr = req->request;
115 
116 	if (pkg->frame.eof != req->response_type)
117 		return false;
118 	if (res_hdr->code != req_hdr->code)
119 		return false;
120 
121 	return true;
122 }
123 
124 static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
125 {
126 	const struct icm_pkg_header *hdr = pkg->buffer;
127 
128 	if (hdr->packet_id < req->npackets) {
129 		size_t offset = hdr->packet_id * req->response_size;
130 
131 		memcpy(req->response + offset, pkg->buffer, req->response_size);
132 	}
133 
134 	return hdr->packet_id == hdr->total_packets - 1;
135 }
136 
137 static int icm_request(struct tb *tb, const void *request, size_t request_size,
138 		       void *response, size_t response_size, size_t npackets,
139 		       unsigned int timeout_msec)
140 {
141 	struct icm *icm = tb_priv(tb);
142 	int retries = 3;
143 
144 	do {
145 		struct tb_cfg_request *req;
146 		struct tb_cfg_result res;
147 
148 		req = tb_cfg_request_alloc();
149 		if (!req)
150 			return -ENOMEM;
151 
152 		req->match = icm_match;
153 		req->copy = icm_copy;
154 		req->request = request;
155 		req->request_size = request_size;
156 		req->request_type = TB_CFG_PKG_ICM_CMD;
157 		req->response = response;
158 		req->npackets = npackets;
159 		req->response_size = response_size;
160 		req->response_type = TB_CFG_PKG_ICM_RESP;
161 
162 		mutex_lock(&icm->request_lock);
163 		res = tb_cfg_request_sync(tb->ctl, req, timeout_msec);
164 		mutex_unlock(&icm->request_lock);
165 
166 		tb_cfg_request_put(req);
167 
168 		if (res.err != -ETIMEDOUT)
169 			return res.err == 1 ? -EIO : res.err;
170 
171 		usleep_range(20, 50);
172 	} while (retries--);
173 
174 	return -ETIMEDOUT;
175 }
176 
177 static bool icm_fr_is_supported(struct tb *tb)
178 {
179 	return !is_apple();
180 }
181 
182 static inline int icm_fr_get_switch_index(u32 port)
183 {
184 	int index;
185 
186 	if ((port & ICM_PORT_TYPE_MASK) != TB_TYPE_PORT)
187 		return 0;
188 
189 	index = port >> ICM_PORT_INDEX_SHIFT;
190 	return index != 0xff ? index : 0;
191 }
192 
193 static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
194 {
195 	struct icm_fr_pkg_get_topology_response *switches, *sw;
196 	struct icm_fr_pkg_get_topology request = {
197 		.hdr = { .code = ICM_GET_TOPOLOGY },
198 	};
199 	size_t npackets = ICM_GET_TOPOLOGY_PACKETS;
200 	int ret, index;
201 	u8 i;
202 
203 	switches = kcalloc(npackets, sizeof(*switches), GFP_KERNEL);
204 	if (!switches)
205 		return -ENOMEM;
206 
207 	ret = icm_request(tb, &request, sizeof(request), switches,
208 			  sizeof(*switches), npackets, ICM_TIMEOUT);
209 	if (ret)
210 		goto err_free;
211 
212 	sw = &switches[0];
213 	index = icm_fr_get_switch_index(sw->ports[link]);
214 	if (!index) {
215 		ret = -ENODEV;
216 		goto err_free;
217 	}
218 
219 	sw = &switches[index];
220 	for (i = 1; i < depth; i++) {
221 		unsigned int j;
222 
223 		if (!(sw->first_data & ICM_SWITCH_USED)) {
224 			ret = -ENODEV;
225 			goto err_free;
226 		}
227 
228 		for (j = 0; j < ARRAY_SIZE(sw->ports); j++) {
229 			index = icm_fr_get_switch_index(sw->ports[j]);
230 			if (index > sw->switch_index) {
231 				sw = &switches[index];
232 				break;
233 			}
234 		}
235 	}
236 
237 	*route = get_route(sw->route_hi, sw->route_lo);
238 
239 err_free:
240 	kfree(switches);
241 	return ret;
242 }
243 
244 static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw)
245 {
246 	struct icm_fr_pkg_approve_device request;
247 	struct icm_fr_pkg_approve_device reply;
248 	int ret;
249 
250 	memset(&request, 0, sizeof(request));
251 	memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
252 	request.hdr.code = ICM_APPROVE_DEVICE;
253 	request.connection_id = sw->connection_id;
254 	request.connection_key = sw->connection_key;
255 
256 	memset(&reply, 0, sizeof(reply));
257 	/* Use larger timeout as establishing tunnels can take some time */
258 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
259 			  1, 10000);
260 	if (ret)
261 		return ret;
262 
263 	if (reply.hdr.flags & ICM_FLAGS_ERROR) {
264 		tb_warn(tb, "PCIe tunnel creation failed\n");
265 		return -EIO;
266 	}
267 
268 	return 0;
269 }
270 
271 static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw)
272 {
273 	struct icm_fr_pkg_add_device_key request;
274 	struct icm_fr_pkg_add_device_key_response reply;
275 	int ret;
276 
277 	memset(&request, 0, sizeof(request));
278 	memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
279 	request.hdr.code = ICM_ADD_DEVICE_KEY;
280 	request.connection_id = sw->connection_id;
281 	request.connection_key = sw->connection_key;
282 	memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE);
283 
284 	memset(&reply, 0, sizeof(reply));
285 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
286 			  1, ICM_TIMEOUT);
287 	if (ret)
288 		return ret;
289 
290 	if (reply.hdr.flags & ICM_FLAGS_ERROR) {
291 		tb_warn(tb, "Adding key to switch failed\n");
292 		return -EIO;
293 	}
294 
295 	return 0;
296 }
297 
298 static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
299 				       const u8 *challenge, u8 *response)
300 {
301 	struct icm_fr_pkg_challenge_device request;
302 	struct icm_fr_pkg_challenge_device_response reply;
303 	int ret;
304 
305 	memset(&request, 0, sizeof(request));
306 	memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
307 	request.hdr.code = ICM_CHALLENGE_DEVICE;
308 	request.connection_id = sw->connection_id;
309 	request.connection_key = sw->connection_key;
310 	memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE);
311 
312 	memset(&reply, 0, sizeof(reply));
313 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
314 			  1, ICM_TIMEOUT);
315 	if (ret)
316 		return ret;
317 
318 	if (reply.hdr.flags & ICM_FLAGS_ERROR)
319 		return -EKEYREJECTED;
320 	if (reply.hdr.flags & ICM_FLAGS_NO_KEY)
321 		return -ENOKEY;
322 
323 	memcpy(response, reply.response, TB_SWITCH_KEY_SIZE);
324 
325 	return 0;
326 }
327 
328 static void remove_switch(struct tb_switch *sw)
329 {
330 	struct tb_switch *parent_sw;
331 
332 	parent_sw = tb_to_switch(sw->dev.parent);
333 	tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
334 	tb_switch_remove(sw);
335 }
336 
337 static void
338 icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
339 {
340 	const struct icm_fr_event_device_connected *pkg =
341 		(const struct icm_fr_event_device_connected *)hdr;
342 	struct tb_switch *sw, *parent_sw;
343 	struct icm *icm = tb_priv(tb);
344 	bool authorized = false;
345 	u8 link, depth;
346 	u64 route;
347 	int ret;
348 
349 	link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
350 	depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
351 		ICM_LINK_INFO_DEPTH_SHIFT;
352 	authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
353 
354 	ret = icm->get_route(tb, link, depth, &route);
355 	if (ret) {
356 		tb_err(tb, "failed to find route string for switch at %u.%u\n",
357 		       link, depth);
358 		return;
359 	}
360 
361 	sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
362 	if (sw) {
363 		u8 phy_port, sw_phy_port;
364 
365 		parent_sw = tb_to_switch(sw->dev.parent);
366 		sw_phy_port = phy_port_from_route(tb_route(sw), sw->depth);
367 		phy_port = phy_port_from_route(route, depth);
368 
369 		/*
370 		 * On resume ICM will send us connected events for the
371 		 * devices that still are present. However, that
372 		 * information might have changed for example by the
373 		 * fact that a switch on a dual-link connection might
374 		 * have been enumerated using the other link now. Make
375 		 * sure our book keeping matches that.
376 		 */
377 		if (sw->depth == depth && sw_phy_port == phy_port &&
378 		    !!sw->authorized == authorized) {
379 			tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
380 			tb_port_at(route, parent_sw)->remote =
381 				   tb_upstream_port(sw);
382 			sw->config.route_hi = upper_32_bits(route);
383 			sw->config.route_lo = lower_32_bits(route);
384 			sw->connection_id = pkg->connection_id;
385 			sw->connection_key = pkg->connection_key;
386 			sw->link = link;
387 			sw->depth = depth;
388 			sw->is_unplugged = false;
389 			tb_switch_put(sw);
390 			return;
391 		}
392 
393 		/*
394 		 * User connected the same switch to another physical
395 		 * port or to another part of the topology. Remove the
396 		 * existing switch now before adding the new one.
397 		 */
398 		remove_switch(sw);
399 		tb_switch_put(sw);
400 	}
401 
402 	/*
403 	 * If the switch was not found by UUID, look for a switch on
404 	 * same physical port (taking possible link aggregation into
405 	 * account) and depth. If we found one it is definitely a stale
406 	 * one so remove it first.
407 	 */
408 	sw = tb_switch_find_by_link_depth(tb, link, depth);
409 	if (!sw) {
410 		u8 dual_link;
411 
412 		dual_link = dual_link_from_link(link);
413 		if (dual_link)
414 			sw = tb_switch_find_by_link_depth(tb, dual_link, depth);
415 	}
416 	if (sw) {
417 		remove_switch(sw);
418 		tb_switch_put(sw);
419 	}
420 
421 	parent_sw = tb_switch_find_by_link_depth(tb, link, depth - 1);
422 	if (!parent_sw) {
423 		tb_err(tb, "failed to find parent switch for %u.%u\n",
424 		       link, depth);
425 		return;
426 	}
427 
428 	sw = tb_switch_alloc(tb, &parent_sw->dev, route);
429 	if (!sw) {
430 		tb_switch_put(parent_sw);
431 		return;
432 	}
433 
434 	sw->uuid = kmemdup(&pkg->ep_uuid, sizeof(pkg->ep_uuid), GFP_KERNEL);
435 	sw->connection_id = pkg->connection_id;
436 	sw->connection_key = pkg->connection_key;
437 	sw->link = link;
438 	sw->depth = depth;
439 	sw->authorized = authorized;
440 	sw->security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
441 				ICM_FLAGS_SLEVEL_SHIFT;
442 
443 	/* Link the two switches now */
444 	tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
445 	tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw);
446 
447 	ret = tb_switch_add(sw);
448 	if (ret) {
449 		tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
450 		tb_switch_put(sw);
451 	}
452 	tb_switch_put(parent_sw);
453 }
454 
455 static void
456 icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
457 {
458 	const struct icm_fr_event_device_disconnected *pkg =
459 		(const struct icm_fr_event_device_disconnected *)hdr;
460 	struct tb_switch *sw;
461 	u8 link, depth;
462 
463 	link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
464 	depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
465 		ICM_LINK_INFO_DEPTH_SHIFT;
466 
467 	if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) {
468 		tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
469 		return;
470 	}
471 
472 	sw = tb_switch_find_by_link_depth(tb, link, depth);
473 	if (!sw) {
474 		tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
475 			depth);
476 		return;
477 	}
478 
479 	remove_switch(sw);
480 	tb_switch_put(sw);
481 }
482 
483 static struct pci_dev *get_upstream_port(struct pci_dev *pdev)
484 {
485 	struct pci_dev *parent;
486 
487 	parent = pci_upstream_bridge(pdev);
488 	while (parent) {
489 		if (!pci_is_pcie(parent))
490 			return NULL;
491 		if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM)
492 			break;
493 		parent = pci_upstream_bridge(parent);
494 	}
495 
496 	if (!parent)
497 		return NULL;
498 
499 	switch (parent->device) {
500 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
501 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
502 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
503 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
504 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
505 		return parent;
506 	}
507 
508 	return NULL;
509 }
510 
511 static bool icm_ar_is_supported(struct tb *tb)
512 {
513 	struct pci_dev *upstream_port;
514 	struct icm *icm = tb_priv(tb);
515 
516 	/*
517 	 * Starting from Alpine Ridge we can use ICM on Apple machines
518 	 * as well. We just need to reset and re-enable it first.
519 	 */
520 	if (!is_apple())
521 		return true;
522 
523 	/*
524 	 * Find the upstream PCIe port in case we need to do reset
525 	 * through its vendor specific registers.
526 	 */
527 	upstream_port = get_upstream_port(tb->nhi->pdev);
528 	if (upstream_port) {
529 		int cap;
530 
531 		cap = pci_find_ext_capability(upstream_port,
532 					      PCI_EXT_CAP_ID_VNDR);
533 		if (cap > 0) {
534 			icm->upstream_port = upstream_port;
535 			icm->vnd_cap = cap;
536 
537 			return true;
538 		}
539 	}
540 
541 	return false;
542 }
543 
544 static int icm_ar_get_mode(struct tb *tb)
545 {
546 	struct tb_nhi *nhi = tb->nhi;
547 	int retries = 5;
548 	u32 val;
549 
550 	do {
551 		val = ioread32(nhi->iobase + REG_FW_STS);
552 		if (val & REG_FW_STS_NVM_AUTH_DONE)
553 			break;
554 		msleep(30);
555 	} while (--retries);
556 
557 	if (!retries) {
558 		dev_err(&nhi->pdev->dev, "ICM firmware not authenticated\n");
559 		return -ENODEV;
560 	}
561 
562 	return nhi_mailbox_mode(nhi);
563 }
564 
565 static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
566 {
567 	struct icm_ar_pkg_get_route_response reply;
568 	struct icm_ar_pkg_get_route request = {
569 		.hdr = { .code = ICM_GET_ROUTE },
570 		.link_info = depth << ICM_LINK_INFO_DEPTH_SHIFT | link,
571 	};
572 	int ret;
573 
574 	memset(&reply, 0, sizeof(reply));
575 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
576 			  1, ICM_TIMEOUT);
577 	if (ret)
578 		return ret;
579 
580 	if (reply.hdr.flags & ICM_FLAGS_ERROR)
581 		return -EIO;
582 
583 	*route = get_route(reply.route_hi, reply.route_lo);
584 	return 0;
585 }
586 
587 static void icm_handle_notification(struct work_struct *work)
588 {
589 	struct icm_notification *n = container_of(work, typeof(*n), work);
590 	struct tb *tb = n->tb;
591 	struct icm *icm = tb_priv(tb);
592 
593 	mutex_lock(&tb->lock);
594 
595 	switch (n->pkg->code) {
596 	case ICM_EVENT_DEVICE_CONNECTED:
597 		icm->device_connected(tb, n->pkg);
598 		break;
599 	case ICM_EVENT_DEVICE_DISCONNECTED:
600 		icm->device_disconnected(tb, n->pkg);
601 		break;
602 	}
603 
604 	mutex_unlock(&tb->lock);
605 
606 	kfree(n->pkg);
607 	kfree(n);
608 }
609 
610 static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
611 			     const void *buf, size_t size)
612 {
613 	struct icm_notification *n;
614 
615 	n = kmalloc(sizeof(*n), GFP_KERNEL);
616 	if (!n)
617 		return;
618 
619 	INIT_WORK(&n->work, icm_handle_notification);
620 	n->pkg = kmemdup(buf, size, GFP_KERNEL);
621 	n->tb = tb;
622 
623 	queue_work(tb->wq, &n->work);
624 }
625 
626 static int
627 __icm_driver_ready(struct tb *tb, enum tb_security_level *security_level)
628 {
629 	struct icm_pkg_driver_ready_response reply;
630 	struct icm_pkg_driver_ready request = {
631 		.hdr.code = ICM_DRIVER_READY,
632 	};
633 	unsigned int retries = 10;
634 	int ret;
635 
636 	memset(&reply, 0, sizeof(reply));
637 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
638 			  1, ICM_TIMEOUT);
639 	if (ret)
640 		return ret;
641 
642 	if (security_level)
643 		*security_level = reply.security_level & 0xf;
644 
645 	/*
646 	 * Hold on here until the switch config space is accessible so
647 	 * that we can read root switch config successfully.
648 	 */
649 	do {
650 		struct tb_cfg_result res;
651 		u32 tmp;
652 
653 		res = tb_cfg_read_raw(tb->ctl, &tmp, 0, 0, TB_CFG_SWITCH,
654 				      0, 1, 100);
655 		if (!res.err)
656 			return 0;
657 
658 		msleep(50);
659 	} while (--retries);
660 
661 	return -ETIMEDOUT;
662 }
663 
664 static int pci2cio_wait_completion(struct icm *icm, unsigned long timeout_msec)
665 {
666 	unsigned long end = jiffies + msecs_to_jiffies(timeout_msec);
667 	u32 cmd;
668 
669 	do {
670 		pci_read_config_dword(icm->upstream_port,
671 				      icm->vnd_cap + PCIE2CIO_CMD, &cmd);
672 		if (!(cmd & PCIE2CIO_CMD_START)) {
673 			if (cmd & PCIE2CIO_CMD_TIMEOUT)
674 				break;
675 			return 0;
676 		}
677 
678 		msleep(50);
679 	} while (time_before(jiffies, end));
680 
681 	return -ETIMEDOUT;
682 }
683 
684 static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs,
685 			 unsigned int port, unsigned int index, u32 *data)
686 {
687 	struct pci_dev *pdev = icm->upstream_port;
688 	int ret, vnd_cap = icm->vnd_cap;
689 	u32 cmd;
690 
691 	cmd = index;
692 	cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
693 	cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
694 	cmd |= PCIE2CIO_CMD_START;
695 	pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
696 
697 	ret = pci2cio_wait_completion(icm, 5000);
698 	if (ret)
699 		return ret;
700 
701 	pci_read_config_dword(pdev, vnd_cap + PCIE2CIO_RDDATA, data);
702 	return 0;
703 }
704 
705 static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs,
706 			  unsigned int port, unsigned int index, u32 data)
707 {
708 	struct pci_dev *pdev = icm->upstream_port;
709 	int vnd_cap = icm->vnd_cap;
710 	u32 cmd;
711 
712 	pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_WRDATA, data);
713 
714 	cmd = index;
715 	cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
716 	cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
717 	cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START;
718 	pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
719 
720 	return pci2cio_wait_completion(icm, 5000);
721 }
722 
723 static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi)
724 {
725 	struct icm *icm = tb_priv(tb);
726 	u32 val;
727 
728 	/* Put ARC to wait for CIO reset event to happen */
729 	val = ioread32(nhi->iobase + REG_FW_STS);
730 	val |= REG_FW_STS_CIO_RESET_REQ;
731 	iowrite32(val, nhi->iobase + REG_FW_STS);
732 
733 	/* Re-start ARC */
734 	val = ioread32(nhi->iobase + REG_FW_STS);
735 	val |= REG_FW_STS_ICM_EN_INVERT;
736 	val |= REG_FW_STS_ICM_EN_CPU;
737 	iowrite32(val, nhi->iobase + REG_FW_STS);
738 
739 	/* Trigger CIO reset now */
740 	return pcie2cio_write(icm, TB_CFG_SWITCH, 0, 0x50, BIT(9));
741 }
742 
743 static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi)
744 {
745 	unsigned int retries = 10;
746 	int ret;
747 	u32 val;
748 
749 	/* Check if the ICM firmware is already running */
750 	val = ioread32(nhi->iobase + REG_FW_STS);
751 	if (val & REG_FW_STS_ICM_EN)
752 		return 0;
753 
754 	dev_info(&nhi->pdev->dev, "starting ICM firmware\n");
755 
756 	ret = icm_firmware_reset(tb, nhi);
757 	if (ret)
758 		return ret;
759 
760 	/* Wait until the ICM firmware tells us it is up and running */
761 	do {
762 		/* Check that the ICM firmware is running */
763 		val = ioread32(nhi->iobase + REG_FW_STS);
764 		if (val & REG_FW_STS_NVM_AUTH_DONE)
765 			return 0;
766 
767 		msleep(300);
768 	} while (--retries);
769 
770 	return -ETIMEDOUT;
771 }
772 
773 static int icm_reset_phy_port(struct tb *tb, int phy_port)
774 {
775 	struct icm *icm = tb_priv(tb);
776 	u32 state0, state1;
777 	int port0, port1;
778 	u32 val0, val1;
779 	int ret;
780 
781 	if (!icm->upstream_port)
782 		return 0;
783 
784 	if (phy_port) {
785 		port0 = 3;
786 		port1 = 4;
787 	} else {
788 		port0 = 1;
789 		port1 = 2;
790 	}
791 
792 	/*
793 	 * Read link status of both null ports belonging to a single
794 	 * physical port.
795 	 */
796 	ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
797 	if (ret)
798 		return ret;
799 	ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
800 	if (ret)
801 		return ret;
802 
803 	state0 = val0 & PHY_PORT_CS1_LINK_STATE_MASK;
804 	state0 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
805 	state1 = val1 & PHY_PORT_CS1_LINK_STATE_MASK;
806 	state1 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
807 
808 	/* If they are both up we need to reset them now */
809 	if (state0 != TB_PORT_UP || state1 != TB_PORT_UP)
810 		return 0;
811 
812 	val0 |= PHY_PORT_CS1_LINK_DISABLE;
813 	ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
814 	if (ret)
815 		return ret;
816 
817 	val1 |= PHY_PORT_CS1_LINK_DISABLE;
818 	ret = pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
819 	if (ret)
820 		return ret;
821 
822 	/* Wait a bit and then re-enable both ports */
823 	usleep_range(10, 100);
824 
825 	ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
826 	if (ret)
827 		return ret;
828 	ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
829 	if (ret)
830 		return ret;
831 
832 	val0 &= ~PHY_PORT_CS1_LINK_DISABLE;
833 	ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
834 	if (ret)
835 		return ret;
836 
837 	val1 &= ~PHY_PORT_CS1_LINK_DISABLE;
838 	return pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
839 }
840 
841 static int icm_firmware_init(struct tb *tb)
842 {
843 	struct icm *icm = tb_priv(tb);
844 	struct tb_nhi *nhi = tb->nhi;
845 	int ret;
846 
847 	ret = icm_firmware_start(tb, nhi);
848 	if (ret) {
849 		dev_err(&nhi->pdev->dev, "could not start ICM firmware\n");
850 		return ret;
851 	}
852 
853 	if (icm->get_mode) {
854 		ret = icm->get_mode(tb);
855 
856 		switch (ret) {
857 		case NHI_FW_SAFE_MODE:
858 			icm->safe_mode = true;
859 			break;
860 
861 		case NHI_FW_CM_MODE:
862 			/* Ask ICM to accept all Thunderbolt devices */
863 			nhi_mailbox_cmd(nhi, NHI_MAILBOX_ALLOW_ALL_DEVS, 0);
864 			break;
865 
866 		default:
867 			tb_err(tb, "ICM firmware is in wrong mode: %u\n", ret);
868 			return -ENODEV;
869 		}
870 	}
871 
872 	/*
873 	 * Reset both physical ports if there is anything connected to
874 	 * them already.
875 	 */
876 	ret = icm_reset_phy_port(tb, 0);
877 	if (ret)
878 		dev_warn(&nhi->pdev->dev, "failed to reset links on port0\n");
879 	ret = icm_reset_phy_port(tb, 1);
880 	if (ret)
881 		dev_warn(&nhi->pdev->dev, "failed to reset links on port1\n");
882 
883 	return 0;
884 }
885 
886 static int icm_driver_ready(struct tb *tb)
887 {
888 	struct icm *icm = tb_priv(tb);
889 	int ret;
890 
891 	ret = icm_firmware_init(tb);
892 	if (ret)
893 		return ret;
894 
895 	if (icm->safe_mode) {
896 		tb_info(tb, "Thunderbolt host controller is in safe mode.\n");
897 		tb_info(tb, "You need to update NVM firmware of the controller before it can be used.\n");
898 		tb_info(tb, "For latest updates check https://thunderbolttechnology.net/updates.\n");
899 		return 0;
900 	}
901 
902 	return __icm_driver_ready(tb, &tb->security_level);
903 }
904 
905 static int icm_suspend(struct tb *tb)
906 {
907 	return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0);
908 }
909 
910 /*
911  * Mark all switches (except root switch) below this one unplugged. ICM
912  * firmware will send us an updated list of switches after we have send
913  * it driver ready command. If a switch is not in that list it will be
914  * removed when we perform rescan.
915  */
916 static void icm_unplug_children(struct tb_switch *sw)
917 {
918 	unsigned int i;
919 
920 	if (tb_route(sw))
921 		sw->is_unplugged = true;
922 
923 	for (i = 1; i <= sw->config.max_port_number; i++) {
924 		struct tb_port *port = &sw->ports[i];
925 
926 		if (tb_is_upstream_port(port))
927 			continue;
928 		if (!port->remote)
929 			continue;
930 
931 		icm_unplug_children(port->remote->sw);
932 	}
933 }
934 
935 static void icm_free_unplugged_children(struct tb_switch *sw)
936 {
937 	unsigned int i;
938 
939 	for (i = 1; i <= sw->config.max_port_number; i++) {
940 		struct tb_port *port = &sw->ports[i];
941 
942 		if (tb_is_upstream_port(port))
943 			continue;
944 		if (!port->remote)
945 			continue;
946 
947 		if (port->remote->sw->is_unplugged) {
948 			tb_switch_remove(port->remote->sw);
949 			port->remote = NULL;
950 		} else {
951 			icm_free_unplugged_children(port->remote->sw);
952 		}
953 	}
954 }
955 
956 static void icm_rescan_work(struct work_struct *work)
957 {
958 	struct icm *icm = container_of(work, struct icm, rescan_work.work);
959 	struct tb *tb = icm_to_tb(icm);
960 
961 	mutex_lock(&tb->lock);
962 	if (tb->root_switch)
963 		icm_free_unplugged_children(tb->root_switch);
964 	mutex_unlock(&tb->lock);
965 }
966 
967 static void icm_complete(struct tb *tb)
968 {
969 	struct icm *icm = tb_priv(tb);
970 
971 	if (tb->nhi->going_away)
972 		return;
973 
974 	icm_unplug_children(tb->root_switch);
975 
976 	/*
977 	 * Now all existing children should be resumed, start events
978 	 * from ICM to get updated status.
979 	 */
980 	__icm_driver_ready(tb, NULL);
981 
982 	/*
983 	 * We do not get notifications of devices that have been
984 	 * unplugged during suspend so schedule rescan to clean them up
985 	 * if any.
986 	 */
987 	queue_delayed_work(tb->wq, &icm->rescan_work, msecs_to_jiffies(500));
988 }
989 
990 static int icm_start(struct tb *tb)
991 {
992 	struct icm *icm = tb_priv(tb);
993 	int ret;
994 
995 	if (icm->safe_mode)
996 		tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0);
997 	else
998 		tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
999 	if (!tb->root_switch)
1000 		return -ENODEV;
1001 
1002 	/*
1003 	 * NVM upgrade has not been tested on Apple systems and they
1004 	 * don't provide images publicly either. To be on the safe side
1005 	 * prevent root switch NVM upgrade on Macs for now.
1006 	 */
1007 	tb->root_switch->no_nvm_upgrade = is_apple();
1008 
1009 	ret = tb_switch_add(tb->root_switch);
1010 	if (ret)
1011 		tb_switch_put(tb->root_switch);
1012 
1013 	return ret;
1014 }
1015 
1016 static void icm_stop(struct tb *tb)
1017 {
1018 	struct icm *icm = tb_priv(tb);
1019 
1020 	cancel_delayed_work(&icm->rescan_work);
1021 	tb_switch_remove(tb->root_switch);
1022 	tb->root_switch = NULL;
1023 	nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
1024 }
1025 
1026 static int icm_disconnect_pcie_paths(struct tb *tb)
1027 {
1028 	return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0);
1029 }
1030 
1031 /* Falcon Ridge and Alpine Ridge */
1032 static const struct tb_cm_ops icm_fr_ops = {
1033 	.driver_ready = icm_driver_ready,
1034 	.start = icm_start,
1035 	.stop = icm_stop,
1036 	.suspend = icm_suspend,
1037 	.complete = icm_complete,
1038 	.handle_event = icm_handle_event,
1039 	.approve_switch = icm_fr_approve_switch,
1040 	.add_switch_key = icm_fr_add_switch_key,
1041 	.challenge_switch_key = icm_fr_challenge_switch_key,
1042 	.disconnect_pcie_paths = icm_disconnect_pcie_paths,
1043 };
1044 
1045 struct tb *icm_probe(struct tb_nhi *nhi)
1046 {
1047 	struct icm *icm;
1048 	struct tb *tb;
1049 
1050 	tb = tb_domain_alloc(nhi, sizeof(struct icm));
1051 	if (!tb)
1052 		return NULL;
1053 
1054 	icm = tb_priv(tb);
1055 	INIT_DELAYED_WORK(&icm->rescan_work, icm_rescan_work);
1056 	mutex_init(&icm->request_lock);
1057 
1058 	switch (nhi->pdev->device) {
1059 	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
1060 	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
1061 		icm->is_supported = icm_fr_is_supported;
1062 		icm->get_route = icm_fr_get_route;
1063 		icm->device_connected = icm_fr_device_connected;
1064 		icm->device_disconnected = icm_fr_device_disconnected;
1065 		tb->cm_ops = &icm_fr_ops;
1066 		break;
1067 
1068 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI:
1069 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI:
1070 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI:
1071 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI:
1072 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI:
1073 		icm->is_supported = icm_ar_is_supported;
1074 		icm->get_mode = icm_ar_get_mode;
1075 		icm->get_route = icm_ar_get_route;
1076 		icm->device_connected = icm_fr_device_connected;
1077 		icm->device_disconnected = icm_fr_device_disconnected;
1078 		tb->cm_ops = &icm_fr_ops;
1079 		break;
1080 	}
1081 
1082 	if (!icm->is_supported || !icm->is_supported(tb)) {
1083 		dev_dbg(&nhi->pdev->dev, "ICM not supported on this controller\n");
1084 		tb_domain_put(tb);
1085 		return NULL;
1086 	}
1087 
1088 	return tb;
1089 }
1090