xref: /openbmc/linux/drivers/thunderbolt/tb.c (revision 81a54b5e)
1 /*
2  * Thunderbolt Cactus Ridge driver - bus logic (NHI independent)
3  *
4  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
5  */
6 
7 #include <linux/slab.h>
8 #include <linux/errno.h>
9 #include <linux/delay.h>
10 
11 #include "tb.h"
12 #include "tb_regs.h"
13 #include "tunnel_pci.h"
14 
15 /**
16  * struct tb_cm - Simple Thunderbolt connection manager
17  * @tunnel_list: List of active tunnels
18  * @hotplug_active: tb_handle_hotplug will stop progressing plug
19  *		    events and exit if this is not set (it needs to
20  *		    acquire the lock one more time). Used to drain wq
21  *		    after cfg has been paused.
22  */
23 struct tb_cm {
24 	struct list_head tunnel_list;
25 	bool hotplug_active;
26 };
27 
28 /* enumeration & hot plug handling */
29 
30 
31 static void tb_scan_port(struct tb_port *port);
32 
33 /**
34  * tb_scan_switch() - scan for and initialize downstream switches
35  */
36 static void tb_scan_switch(struct tb_switch *sw)
37 {
38 	int i;
39 	for (i = 1; i <= sw->config.max_port_number; i++)
40 		tb_scan_port(&sw->ports[i]);
41 }
42 
43 /**
44  * tb_scan_port() - check for and initialize switches below port
45  */
46 static void tb_scan_port(struct tb_port *port)
47 {
48 	struct tb_switch *sw;
49 	if (tb_is_upstream_port(port))
50 		return;
51 	if (port->config.type != TB_TYPE_PORT)
52 		return;
53 	if (port->dual_link_port && port->link_nr)
54 		return; /*
55 			 * Downstream switch is reachable through two ports.
56 			 * Only scan on the primary port (link_nr == 0).
57 			 */
58 	if (tb_wait_for_port(port, false) <= 0)
59 		return;
60 	if (port->remote) {
61 		tb_port_WARN(port, "port already has a remote!\n");
62 		return;
63 	}
64 	sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
65 			     tb_downstream_route(port));
66 	if (!sw)
67 		return;
68 
69 	if (tb_switch_configure(sw)) {
70 		tb_switch_put(sw);
71 		return;
72 	}
73 
74 	if (tb_switch_add(sw)) {
75 		tb_switch_put(sw);
76 		return;
77 	}
78 
79 	port->remote = tb_upstream_port(sw);
80 	tb_upstream_port(sw)->remote = port;
81 	tb_scan_switch(sw);
82 }
83 
84 /**
85  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
86  */
87 static void tb_free_invalid_tunnels(struct tb *tb)
88 {
89 	struct tb_cm *tcm = tb_priv(tb);
90 	struct tb_pci_tunnel *tunnel;
91 	struct tb_pci_tunnel *n;
92 
93 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
94 		if (tb_pci_is_invalid(tunnel)) {
95 			tb_pci_deactivate(tunnel);
96 			list_del(&tunnel->list);
97 			tb_pci_free(tunnel);
98 		}
99 	}
100 }
101 
102 /**
103  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
104  */
105 static void tb_free_unplugged_children(struct tb_switch *sw)
106 {
107 	int i;
108 	for (i = 1; i <= sw->config.max_port_number; i++) {
109 		struct tb_port *port = &sw->ports[i];
110 		if (tb_is_upstream_port(port))
111 			continue;
112 		if (!port->remote)
113 			continue;
114 		if (port->remote->sw->is_unplugged) {
115 			tb_switch_remove(port->remote->sw);
116 			port->remote = NULL;
117 		} else {
118 			tb_free_unplugged_children(port->remote->sw);
119 		}
120 	}
121 }
122 
123 
124 /**
125  * find_pci_up_port() - return the first PCIe up port on @sw or NULL
126  */
127 static struct tb_port *tb_find_pci_up_port(struct tb_switch *sw)
128 {
129 	int i;
130 	for (i = 1; i <= sw->config.max_port_number; i++)
131 		if (sw->ports[i].config.type == TB_TYPE_PCIE_UP)
132 			return &sw->ports[i];
133 	return NULL;
134 }
135 
136 /**
137  * find_unused_down_port() - return the first inactive PCIe down port on @sw
138  */
139 static struct tb_port *tb_find_unused_down_port(struct tb_switch *sw)
140 {
141 	int i;
142 	int cap;
143 	int res;
144 	int data;
145 	for (i = 1; i <= sw->config.max_port_number; i++) {
146 		if (tb_is_upstream_port(&sw->ports[i]))
147 			continue;
148 		if (sw->ports[i].config.type != TB_TYPE_PCIE_DOWN)
149 			continue;
150 		cap = tb_port_find_cap(&sw->ports[i], TB_PORT_CAP_ADAP);
151 		if (cap < 0)
152 			continue;
153 		res = tb_port_read(&sw->ports[i], &data, TB_CFG_PORT, cap, 1);
154 		if (res < 0)
155 			continue;
156 		if (data & 0x80000000)
157 			continue;
158 		return &sw->ports[i];
159 	}
160 	return NULL;
161 }
162 
163 /**
164  * tb_activate_pcie_devices() - scan for and activate PCIe devices
165  *
166  * This method is somewhat ad hoc. For now it only supports one device
167  * per port and only devices at depth 1.
168  */
169 static void tb_activate_pcie_devices(struct tb *tb)
170 {
171 	int i;
172 	int cap;
173 	u32 data;
174 	struct tb_switch *sw;
175 	struct tb_port *up_port;
176 	struct tb_port *down_port;
177 	struct tb_pci_tunnel *tunnel;
178 	struct tb_cm *tcm = tb_priv(tb);
179 
180 	/* scan for pcie devices at depth 1*/
181 	for (i = 1; i <= tb->root_switch->config.max_port_number; i++) {
182 		if (tb_is_upstream_port(&tb->root_switch->ports[i]))
183 			continue;
184 		if (tb->root_switch->ports[i].config.type != TB_TYPE_PORT)
185 			continue;
186 		if (!tb->root_switch->ports[i].remote)
187 			continue;
188 		sw = tb->root_switch->ports[i].remote->sw;
189 		up_port = tb_find_pci_up_port(sw);
190 		if (!up_port) {
191 			tb_sw_info(sw, "no PCIe devices found, aborting\n");
192 			continue;
193 		}
194 
195 		/* check whether port is already activated */
196 		cap = tb_port_find_cap(up_port, TB_PORT_CAP_ADAP);
197 		if (cap < 0)
198 			continue;
199 		if (tb_port_read(up_port, &data, TB_CFG_PORT, cap, 1))
200 			continue;
201 		if (data & 0x80000000) {
202 			tb_port_info(up_port,
203 				     "PCIe port already activated, aborting\n");
204 			continue;
205 		}
206 
207 		down_port = tb_find_unused_down_port(tb->root_switch);
208 		if (!down_port) {
209 			tb_port_info(up_port,
210 				     "All PCIe down ports are occupied, aborting\n");
211 			continue;
212 		}
213 		tunnel = tb_pci_alloc(tb, up_port, down_port);
214 		if (!tunnel) {
215 			tb_port_info(up_port,
216 				     "PCIe tunnel allocation failed, aborting\n");
217 			continue;
218 		}
219 
220 		if (tb_pci_activate(tunnel)) {
221 			tb_port_info(up_port,
222 				     "PCIe tunnel activation failed, aborting\n");
223 			tb_pci_free(tunnel);
224 		}
225 
226 		list_add(&tunnel->list, &tcm->tunnel_list);
227 	}
228 }
229 
230 /* hotplug handling */
231 
232 struct tb_hotplug_event {
233 	struct work_struct work;
234 	struct tb *tb;
235 	u64 route;
236 	u8 port;
237 	bool unplug;
238 };
239 
240 /**
241  * tb_handle_hotplug() - handle hotplug event
242  *
243  * Executes on tb->wq.
244  */
245 static void tb_handle_hotplug(struct work_struct *work)
246 {
247 	struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
248 	struct tb *tb = ev->tb;
249 	struct tb_cm *tcm = tb_priv(tb);
250 	struct tb_switch *sw;
251 	struct tb_port *port;
252 	mutex_lock(&tb->lock);
253 	if (!tcm->hotplug_active)
254 		goto out; /* during init, suspend or shutdown */
255 
256 	sw = get_switch_at_route(tb->root_switch, ev->route);
257 	if (!sw) {
258 		tb_warn(tb,
259 			"hotplug event from non existent switch %llx:%x (unplug: %d)\n",
260 			ev->route, ev->port, ev->unplug);
261 		goto out;
262 	}
263 	if (ev->port > sw->config.max_port_number) {
264 		tb_warn(tb,
265 			"hotplug event from non existent port %llx:%x (unplug: %d)\n",
266 			ev->route, ev->port, ev->unplug);
267 		goto out;
268 	}
269 	port = &sw->ports[ev->port];
270 	if (tb_is_upstream_port(port)) {
271 		tb_warn(tb,
272 			"hotplug event for upstream port %llx:%x (unplug: %d)\n",
273 			ev->route, ev->port, ev->unplug);
274 		goto out;
275 	}
276 	if (ev->unplug) {
277 		if (port->remote) {
278 			tb_port_info(port, "unplugged\n");
279 			tb_sw_set_unplugged(port->remote->sw);
280 			tb_free_invalid_tunnels(tb);
281 			tb_switch_remove(port->remote->sw);
282 			port->remote = NULL;
283 		} else {
284 			tb_port_info(port,
285 				     "got unplug event for disconnected port, ignoring\n");
286 		}
287 	} else if (port->remote) {
288 		tb_port_info(port,
289 			     "got plug event for connected port, ignoring\n");
290 	} else {
291 		tb_port_info(port, "hotplug: scanning\n");
292 		tb_scan_port(port);
293 		if (!port->remote) {
294 			tb_port_info(port, "hotplug: no switch found\n");
295 		} else if (port->remote->sw->config.depth > 1) {
296 			tb_sw_warn(port->remote->sw,
297 				   "hotplug: chaining not supported\n");
298 		} else {
299 			tb_sw_info(port->remote->sw,
300 				   "hotplug: activating pcie devices\n");
301 			tb_activate_pcie_devices(tb);
302 		}
303 	}
304 out:
305 	mutex_unlock(&tb->lock);
306 	kfree(ev);
307 }
308 
309 /**
310  * tb_schedule_hotplug_handler() - callback function for the control channel
311  *
312  * Delegates to tb_handle_hotplug.
313  */
314 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
315 			    const void *buf, size_t size)
316 {
317 	const struct cfg_event_pkg *pkg = buf;
318 	struct tb_hotplug_event *ev;
319 	u64 route;
320 
321 	if (type != TB_CFG_PKG_EVENT) {
322 		tb_warn(tb, "unexpected event %#x, ignoring\n", type);
323 		return;
324 	}
325 
326 	route = tb_cfg_get_route(&pkg->header);
327 
328 	if (tb_cfg_error(tb->ctl, route, pkg->port,
329 			 TB_CFG_ERROR_ACK_PLUG_EVENT)) {
330 		tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
331 			pkg->port);
332 	}
333 
334 	ev = kmalloc(sizeof(*ev), GFP_KERNEL);
335 	if (!ev)
336 		return;
337 	INIT_WORK(&ev->work, tb_handle_hotplug);
338 	ev->tb = tb;
339 	ev->route = route;
340 	ev->port = pkg->port;
341 	ev->unplug = pkg->unplug;
342 	queue_work(tb->wq, &ev->work);
343 }
344 
345 static void tb_stop(struct tb *tb)
346 {
347 	struct tb_cm *tcm = tb_priv(tb);
348 	struct tb_pci_tunnel *tunnel;
349 	struct tb_pci_tunnel *n;
350 
351 	/* tunnels are only present after everything has been initialized */
352 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
353 		tb_pci_deactivate(tunnel);
354 		tb_pci_free(tunnel);
355 	}
356 	tb_switch_remove(tb->root_switch);
357 	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
358 }
359 
360 static int tb_start(struct tb *tb)
361 {
362 	struct tb_cm *tcm = tb_priv(tb);
363 	int ret;
364 
365 	tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
366 	if (!tb->root_switch)
367 		return -ENOMEM;
368 
369 	ret = tb_switch_configure(tb->root_switch);
370 	if (ret) {
371 		tb_switch_put(tb->root_switch);
372 		return ret;
373 	}
374 
375 	/* Announce the switch to the world */
376 	ret = tb_switch_add(tb->root_switch);
377 	if (ret) {
378 		tb_switch_put(tb->root_switch);
379 		return ret;
380 	}
381 
382 	/* Full scan to discover devices added before the driver was loaded. */
383 	tb_scan_switch(tb->root_switch);
384 	tb_activate_pcie_devices(tb);
385 
386 	/* Allow tb_handle_hotplug to progress events */
387 	tcm->hotplug_active = true;
388 	return 0;
389 }
390 
391 static int tb_suspend_noirq(struct tb *tb)
392 {
393 	struct tb_cm *tcm = tb_priv(tb);
394 
395 	tb_info(tb, "suspending...\n");
396 	tb_switch_suspend(tb->root_switch);
397 	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
398 	tb_info(tb, "suspend finished\n");
399 
400 	return 0;
401 }
402 
403 static int tb_resume_noirq(struct tb *tb)
404 {
405 	struct tb_cm *tcm = tb_priv(tb);
406 	struct tb_pci_tunnel *tunnel, *n;
407 
408 	tb_info(tb, "resuming...\n");
409 
410 	/* remove any pci devices the firmware might have setup */
411 	tb_switch_reset(tb, 0);
412 
413 	tb_switch_resume(tb->root_switch);
414 	tb_free_invalid_tunnels(tb);
415 	tb_free_unplugged_children(tb->root_switch);
416 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
417 		tb_pci_restart(tunnel);
418 	if (!list_empty(&tcm->tunnel_list)) {
419 		/*
420 		 * the pcie links need some time to get going.
421 		 * 100ms works for me...
422 		 */
423 		tb_info(tb, "tunnels restarted, sleeping for 100ms\n");
424 		msleep(100);
425 	}
426 	 /* Allow tb_handle_hotplug to progress events */
427 	tcm->hotplug_active = true;
428 	tb_info(tb, "resume finished\n");
429 
430 	return 0;
431 }
432 
433 static const struct tb_cm_ops tb_cm_ops = {
434 	.start = tb_start,
435 	.stop = tb_stop,
436 	.suspend_noirq = tb_suspend_noirq,
437 	.resume_noirq = tb_resume_noirq,
438 	.handle_event = tb_handle_event,
439 };
440 
441 struct tb *tb_probe(struct tb_nhi *nhi)
442 {
443 	struct tb_cm *tcm;
444 	struct tb *tb;
445 
446 	tb = tb_domain_alloc(nhi, sizeof(*tcm));
447 	if (!tb)
448 		return NULL;
449 
450 	tb->cm_ops = &tb_cm_ops;
451 
452 	tcm = tb_priv(tb);
453 	INIT_LIST_HEAD(&tcm->tunnel_list);
454 
455 	return tb;
456 }
457