xref: /openbmc/linux/drivers/thunderbolt/tb.c (revision 23dd5bb4)
1 /*
2  * Thunderbolt Cactus Ridge driver - bus logic (NHI independent)
3  *
4  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
5  */
6 
7 #include <linux/slab.h>
8 #include <linux/errno.h>
9 #include <linux/delay.h>
10 
11 #include "tb.h"
12 #include "tb_regs.h"
13 #include "tunnel_pci.h"
14 
15 
16 /* enumeration & hot plug handling */
17 
18 
19 static void tb_scan_port(struct tb_port *port);
20 
21 /**
22  * tb_scan_switch() - scan for and initialize downstream switches
23  */
24 static void tb_scan_switch(struct tb_switch *sw)
25 {
26 	int i;
27 	for (i = 1; i <= sw->config.max_port_number; i++)
28 		tb_scan_port(&sw->ports[i]);
29 }
30 
31 /**
32  * tb_scan_port() - check for and initialize switches below port
33  */
34 static void tb_scan_port(struct tb_port *port)
35 {
36 	struct tb_switch *sw;
37 	if (tb_is_upstream_port(port))
38 		return;
39 	if (port->config.type != TB_TYPE_PORT)
40 		return;
41 	if (tb_wait_for_port(port, false) <= 0)
42 		return;
43 	if (port->remote) {
44 		tb_port_WARN(port, "port already has a remote!\n");
45 		return;
46 	}
47 	sw = tb_switch_alloc(port->sw->tb, tb_downstream_route(port));
48 	if (!sw)
49 		return;
50 	port->remote = tb_upstream_port(sw);
51 	tb_upstream_port(sw)->remote = port;
52 	tb_scan_switch(sw);
53 }
54 
55 /**
56  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
57  */
58 static void tb_free_invalid_tunnels(struct tb *tb)
59 {
60 	struct tb_pci_tunnel *tunnel;
61 	struct tb_pci_tunnel *n;
62 	list_for_each_entry_safe(tunnel, n, &tb->tunnel_list, list)
63 	{
64 		if (tb_pci_is_invalid(tunnel)) {
65 			tb_pci_deactivate(tunnel);
66 			tb_pci_free(tunnel);
67 		}
68 	}
69 }
70 
71 /**
72  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
73  */
74 static void tb_free_unplugged_children(struct tb_switch *sw)
75 {
76 	int i;
77 	for (i = 1; i <= sw->config.max_port_number; i++) {
78 		struct tb_port *port = &sw->ports[i];
79 		if (tb_is_upstream_port(port))
80 			continue;
81 		if (!port->remote)
82 			continue;
83 		if (port->remote->sw->is_unplugged) {
84 			tb_switch_free(port->remote->sw);
85 			port->remote = NULL;
86 		} else {
87 			tb_free_unplugged_children(port->remote->sw);
88 		}
89 	}
90 }
91 
92 
93 /**
94  * find_pci_up_port() - return the first PCIe up port on @sw or NULL
95  */
96 static struct tb_port *tb_find_pci_up_port(struct tb_switch *sw)
97 {
98 	int i;
99 	for (i = 1; i <= sw->config.max_port_number; i++)
100 		if (sw->ports[i].config.type == TB_TYPE_PCIE_UP)
101 			return &sw->ports[i];
102 	return NULL;
103 }
104 
105 /**
106  * find_unused_down_port() - return the first inactive PCIe down port on @sw
107  */
108 static struct tb_port *tb_find_unused_down_port(struct tb_switch *sw)
109 {
110 	int i;
111 	int cap;
112 	int res;
113 	int data;
114 	for (i = 1; i <= sw->config.max_port_number; i++) {
115 		if (tb_is_upstream_port(&sw->ports[i]))
116 			continue;
117 		if (sw->ports[i].config.type != TB_TYPE_PCIE_DOWN)
118 			continue;
119 		cap = tb_find_cap(&sw->ports[i], TB_CFG_PORT, TB_CAP_PCIE);
120 		if (cap <= 0)
121 			continue;
122 		res = tb_port_read(&sw->ports[i], &data, TB_CFG_PORT, cap, 1);
123 		if (res < 0)
124 			continue;
125 		if (data & 0x80000000)
126 			continue;
127 		return &sw->ports[i];
128 	}
129 	return NULL;
130 }
131 
132 /**
133  * tb_activate_pcie_devices() - scan for and activate PCIe devices
134  *
135  * This method is somewhat ad hoc. For now it only supports one device
136  * per port and only devices at depth 1.
137  */
138 static void tb_activate_pcie_devices(struct tb *tb)
139 {
140 	int i;
141 	int cap;
142 	u32 data;
143 	struct tb_switch *sw;
144 	struct tb_port *up_port;
145 	struct tb_port *down_port;
146 	struct tb_pci_tunnel *tunnel;
147 	/* scan for pcie devices at depth 1*/
148 	for (i = 1; i <= tb->root_switch->config.max_port_number; i++) {
149 		if (tb_is_upstream_port(&tb->root_switch->ports[i]))
150 			continue;
151 		if (tb->root_switch->ports[i].config.type != TB_TYPE_PORT)
152 			continue;
153 		if (!tb->root_switch->ports[i].remote)
154 			continue;
155 		sw = tb->root_switch->ports[i].remote->sw;
156 		up_port = tb_find_pci_up_port(sw);
157 		if (!up_port) {
158 			tb_sw_info(sw, "no PCIe devices found, aborting\n");
159 			continue;
160 		}
161 
162 		/* check whether port is already activated */
163 		cap = tb_find_cap(up_port, TB_CFG_PORT, TB_CAP_PCIE);
164 		if (cap <= 0)
165 			continue;
166 		if (tb_port_read(up_port, &data, TB_CFG_PORT, cap, 1))
167 			continue;
168 		if (data & 0x80000000) {
169 			tb_port_info(up_port,
170 				     "PCIe port already activated, aborting\n");
171 			continue;
172 		}
173 
174 		down_port = tb_find_unused_down_port(tb->root_switch);
175 		if (!down_port) {
176 			tb_port_info(up_port,
177 				     "All PCIe down ports are occupied, aborting\n");
178 			continue;
179 		}
180 		tunnel = tb_pci_alloc(tb, up_port, down_port);
181 		if (!tunnel) {
182 			tb_port_info(up_port,
183 				     "PCIe tunnel allocation failed, aborting\n");
184 			continue;
185 		}
186 
187 		if (tb_pci_activate(tunnel)) {
188 			tb_port_info(up_port,
189 				     "PCIe tunnel activation failed, aborting\n");
190 			tb_pci_free(tunnel);
191 		}
192 
193 	}
194 }
195 
196 /* hotplug handling */
197 
198 struct tb_hotplug_event {
199 	struct work_struct work;
200 	struct tb *tb;
201 	u64 route;
202 	u8 port;
203 	bool unplug;
204 };
205 
206 /**
207  * tb_handle_hotplug() - handle hotplug event
208  *
209  * Executes on tb->wq.
210  */
211 static void tb_handle_hotplug(struct work_struct *work)
212 {
213 	struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
214 	struct tb *tb = ev->tb;
215 	struct tb_switch *sw;
216 	struct tb_port *port;
217 	mutex_lock(&tb->lock);
218 	if (!tb->hotplug_active)
219 		goto out; /* during init, suspend or shutdown */
220 
221 	sw = get_switch_at_route(tb->root_switch, ev->route);
222 	if (!sw) {
223 		tb_warn(tb,
224 			"hotplug event from non existent switch %llx:%x (unplug: %d)\n",
225 			ev->route, ev->port, ev->unplug);
226 		goto out;
227 	}
228 	if (ev->port > sw->config.max_port_number) {
229 		tb_warn(tb,
230 			"hotplug event from non existent port %llx:%x (unplug: %d)\n",
231 			ev->route, ev->port, ev->unplug);
232 		goto out;
233 	}
234 	port = &sw->ports[ev->port];
235 	if (tb_is_upstream_port(port)) {
236 		tb_warn(tb,
237 			"hotplug event for upstream port %llx:%x (unplug: %d)\n",
238 			ev->route, ev->port, ev->unplug);
239 		goto out;
240 	}
241 	if (ev->unplug) {
242 		if (port->remote) {
243 			tb_port_info(port, "unplugged\n");
244 			tb_sw_set_unpplugged(port->remote->sw);
245 			tb_free_invalid_tunnels(tb);
246 			tb_switch_free(port->remote->sw);
247 			port->remote = NULL;
248 		} else {
249 			tb_port_info(port,
250 				     "got unplug event for disconnected port, ignoring\n");
251 		}
252 	} else if (port->remote) {
253 		tb_port_info(port,
254 			     "got plug event for connected port, ignoring\n");
255 	} else {
256 		tb_port_info(port, "hotplug: scanning\n");
257 		tb_scan_port(port);
258 		if (!port->remote) {
259 			tb_port_info(port, "hotplug: no switch found\n");
260 		} else if (port->remote->sw->config.depth > 1) {
261 			tb_sw_warn(port->remote->sw,
262 				   "hotplug: chaining not supported\n");
263 		} else {
264 			tb_sw_info(port->remote->sw,
265 				   "hotplug: activating pcie devices\n");
266 			tb_activate_pcie_devices(tb);
267 		}
268 	}
269 out:
270 	mutex_unlock(&tb->lock);
271 	kfree(ev);
272 }
273 
274 /**
275  * tb_schedule_hotplug_handler() - callback function for the control channel
276  *
277  * Delegates to tb_handle_hotplug.
278  */
279 static void tb_schedule_hotplug_handler(void *data, u64 route, u8 port,
280 					bool unplug)
281 {
282 	struct tb *tb = data;
283 	struct tb_hotplug_event *ev = kmalloc(sizeof(*ev), GFP_KERNEL);
284 	if (!ev)
285 		return;
286 	INIT_WORK(&ev->work, tb_handle_hotplug);
287 	ev->tb = tb;
288 	ev->route = route;
289 	ev->port = port;
290 	ev->unplug = unplug;
291 	queue_work(tb->wq, &ev->work);
292 }
293 
294 /**
295  * thunderbolt_shutdown_and_free() - shutdown everything
296  *
297  * Free all switches and the config channel.
298  *
299  * Used in the error path of thunderbolt_alloc_and_start.
300  */
301 void thunderbolt_shutdown_and_free(struct tb *tb)
302 {
303 	struct tb_pci_tunnel *tunnel;
304 	struct tb_pci_tunnel *n;
305 
306 	mutex_lock(&tb->lock);
307 
308 	/* tunnels are only present after everything has been initialized */
309 	list_for_each_entry_safe(tunnel, n, &tb->tunnel_list, list) {
310 		tb_pci_deactivate(tunnel);
311 		tb_pci_free(tunnel);
312 	}
313 
314 	if (tb->root_switch)
315 		tb_switch_free(tb->root_switch);
316 	tb->root_switch = NULL;
317 
318 	if (tb->ctl) {
319 		tb_ctl_stop(tb->ctl);
320 		tb_ctl_free(tb->ctl);
321 	}
322 	tb->ctl = NULL;
323 	tb->hotplug_active = false; /* signal tb_handle_hotplug to quit */
324 
325 	/* allow tb_handle_hotplug to acquire the lock */
326 	mutex_unlock(&tb->lock);
327 	if (tb->wq) {
328 		flush_workqueue(tb->wq);
329 		destroy_workqueue(tb->wq);
330 		tb->wq = NULL;
331 	}
332 	mutex_destroy(&tb->lock);
333 	kfree(tb);
334 }
335 
336 /**
337  * thunderbolt_alloc_and_start() - setup the thunderbolt bus
338  *
339  * Allocates a tb_cfg control channel, initializes the root switch, enables
340  * plug events and activates pci devices.
341  *
342  * Return: Returns NULL on error.
343  */
344 struct tb *thunderbolt_alloc_and_start(struct tb_nhi *nhi)
345 {
346 	struct tb *tb;
347 
348 	BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4);
349 	BUILD_BUG_ON(sizeof(struct tb_regs_port_header) != 8 * 4);
350 	BUILD_BUG_ON(sizeof(struct tb_regs_hop) != 2 * 4);
351 
352 	tb = kzalloc(sizeof(*tb), GFP_KERNEL);
353 	if (!tb)
354 		return NULL;
355 
356 	tb->nhi = nhi;
357 	mutex_init(&tb->lock);
358 	mutex_lock(&tb->lock);
359 	INIT_LIST_HEAD(&tb->tunnel_list);
360 
361 	tb->wq = alloc_ordered_workqueue("thunderbolt", 0);
362 	if (!tb->wq)
363 		goto err_locked;
364 
365 	tb->ctl = tb_ctl_alloc(tb->nhi, tb_schedule_hotplug_handler, tb);
366 	if (!tb->ctl)
367 		goto err_locked;
368 	/*
369 	 * tb_schedule_hotplug_handler may be called as soon as the config
370 	 * channel is started. Thats why we have to hold the lock here.
371 	 */
372 	tb_ctl_start(tb->ctl);
373 
374 	tb->root_switch = tb_switch_alloc(tb, 0);
375 	if (!tb->root_switch)
376 		goto err_locked;
377 
378 	/* Full scan to discover devices added before the driver was loaded. */
379 	tb_scan_switch(tb->root_switch);
380 	tb_activate_pcie_devices(tb);
381 
382 	/* Allow tb_handle_hotplug to progress events */
383 	tb->hotplug_active = true;
384 	mutex_unlock(&tb->lock);
385 	return tb;
386 
387 err_locked:
388 	mutex_unlock(&tb->lock);
389 	thunderbolt_shutdown_and_free(tb);
390 	return NULL;
391 }
392 
393 void thunderbolt_suspend(struct tb *tb)
394 {
395 	tb_info(tb, "suspending...\n");
396 	mutex_lock(&tb->lock);
397 	tb_switch_suspend(tb->root_switch);
398 	tb_ctl_stop(tb->ctl);
399 	tb->hotplug_active = false; /* signal tb_handle_hotplug to quit */
400 	mutex_unlock(&tb->lock);
401 	tb_info(tb, "suspend finished\n");
402 }
403 
404 void thunderbolt_resume(struct tb *tb)
405 {
406 	struct tb_pci_tunnel *tunnel, *n;
407 	tb_info(tb, "resuming...\n");
408 	mutex_lock(&tb->lock);
409 	tb_ctl_start(tb->ctl);
410 
411 	/* remove any pci devices the firmware might have setup */
412 	tb_switch_reset(tb, 0);
413 
414 	tb_switch_resume(tb->root_switch);
415 	tb_free_invalid_tunnels(tb);
416 	tb_free_unplugged_children(tb->root_switch);
417 	list_for_each_entry_safe(tunnel, n, &tb->tunnel_list, list)
418 		tb_pci_restart(tunnel);
419 	if (!list_empty(&tb->tunnel_list)) {
420 		/*
421 		 * the pcie links need some time to get going.
422 		 * 100ms works for me...
423 		 */
424 		tb_info(tb, "tunnels restarted, sleeping for 100ms\n");
425 		msleep(100);
426 	}
427 	 /* Allow tb_handle_hotplug to progress events */
428 	tb->hotplug_active = true;
429 	mutex_unlock(&tb->lock);
430 	tb_info(tb, "resume finished\n");
431 }
432