xref: /openbmc/linux/drivers/thunderbolt/xdomain.c (revision 308092d0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt XDomain discovery protocol support
4  *
5  * Copyright (C) 2017, Intel Corporation
6  * Authors: Michael Jamet <michael.jamet@intel.com>
7  *          Mika Westerberg <mika.westerberg@linux.intel.com>
8  */
9 
10 #include <linux/device.h>
11 #include <linux/delay.h>
12 #include <linux/kmod.h>
13 #include <linux/module.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/prandom.h>
16 #include <linux/string_helpers.h>
17 #include <linux/utsname.h>
18 #include <linux/uuid.h>
19 #include <linux/workqueue.h>
20 
21 #include "tb.h"
22 
23 #define XDOMAIN_SHORT_TIMEOUT			100	/* ms */
24 #define XDOMAIN_DEFAULT_TIMEOUT			1000	/* ms */
25 #define XDOMAIN_BONDING_TIMEOUT			10000	/* ms */
26 #define XDOMAIN_RETRIES				10
27 #define XDOMAIN_DEFAULT_MAX_HOPID		15
28 
29 enum {
30 	XDOMAIN_STATE_INIT,
31 	XDOMAIN_STATE_UUID,
32 	XDOMAIN_STATE_LINK_STATUS,
33 	XDOMAIN_STATE_LINK_STATE_CHANGE,
34 	XDOMAIN_STATE_LINK_STATUS2,
35 	XDOMAIN_STATE_BONDING_UUID_LOW,
36 	XDOMAIN_STATE_BONDING_UUID_HIGH,
37 	XDOMAIN_STATE_PROPERTIES,
38 	XDOMAIN_STATE_ENUMERATED,
39 	XDOMAIN_STATE_ERROR,
40 };
41 
42 static const char * const state_names[] = {
43 	[XDOMAIN_STATE_INIT] = "INIT",
44 	[XDOMAIN_STATE_UUID] = "UUID",
45 	[XDOMAIN_STATE_LINK_STATUS] = "LINK_STATUS",
46 	[XDOMAIN_STATE_LINK_STATE_CHANGE] = "LINK_STATE_CHANGE",
47 	[XDOMAIN_STATE_LINK_STATUS2] = "LINK_STATUS2",
48 	[XDOMAIN_STATE_BONDING_UUID_LOW] = "BONDING_UUID_LOW",
49 	[XDOMAIN_STATE_BONDING_UUID_HIGH] = "BONDING_UUID_HIGH",
50 	[XDOMAIN_STATE_PROPERTIES] = "PROPERTIES",
51 	[XDOMAIN_STATE_ENUMERATED] = "ENUMERATED",
52 	[XDOMAIN_STATE_ERROR] = "ERROR",
53 };
54 
55 struct xdomain_request_work {
56 	struct work_struct work;
57 	struct tb_xdp_header *pkg;
58 	struct tb *tb;
59 };
60 
61 static bool tb_xdomain_enabled = true;
62 module_param_named(xdomain, tb_xdomain_enabled, bool, 0444);
63 MODULE_PARM_DESC(xdomain, "allow XDomain protocol (default: true)");
64 
65 /*
66  * Serializes access to the properties and protocol handlers below. If
67  * you need to take both this lock and the struct tb_xdomain lock, take
68  * this one first.
69  */
70 static DEFINE_MUTEX(xdomain_lock);
71 
72 /* Properties exposed to the remote domains */
73 static struct tb_property_dir *xdomain_property_dir;
74 static u32 xdomain_property_block_gen;
75 
76 /* Additional protocol handlers */
77 static LIST_HEAD(protocol_handlers);
78 
79 /* UUID for XDomain discovery protocol: b638d70e-42ff-40bb-97c2-90e2c0b2ff07 */
80 static const uuid_t tb_xdp_uuid =
81 	UUID_INIT(0xb638d70e, 0x42ff, 0x40bb,
82 		  0x97, 0xc2, 0x90, 0xe2, 0xc0, 0xb2, 0xff, 0x07);
83 
tb_is_xdomain_enabled(void)84 bool tb_is_xdomain_enabled(void)
85 {
86 	return tb_xdomain_enabled && tb_acpi_is_xdomain_allowed();
87 }
88 
tb_xdomain_match(const struct tb_cfg_request * req,const struct ctl_pkg * pkg)89 static bool tb_xdomain_match(const struct tb_cfg_request *req,
90 			     const struct ctl_pkg *pkg)
91 {
92 	switch (pkg->frame.eof) {
93 	case TB_CFG_PKG_ERROR:
94 		return true;
95 
96 	case TB_CFG_PKG_XDOMAIN_RESP: {
97 		const struct tb_xdp_header *res_hdr = pkg->buffer;
98 		const struct tb_xdp_header *req_hdr = req->request;
99 
100 		if (pkg->frame.size < req->response_size / 4)
101 			return false;
102 
103 		/* Make sure route matches */
104 		if ((res_hdr->xd_hdr.route_hi & ~BIT(31)) !=
105 		     req_hdr->xd_hdr.route_hi)
106 			return false;
107 		if ((res_hdr->xd_hdr.route_lo) != req_hdr->xd_hdr.route_lo)
108 			return false;
109 
110 		/* Check that the XDomain protocol matches */
111 		if (!uuid_equal(&res_hdr->uuid, &req_hdr->uuid))
112 			return false;
113 
114 		return true;
115 	}
116 
117 	default:
118 		return false;
119 	}
120 }
121 
tb_xdomain_copy(struct tb_cfg_request * req,const struct ctl_pkg * pkg)122 static bool tb_xdomain_copy(struct tb_cfg_request *req,
123 			    const struct ctl_pkg *pkg)
124 {
125 	memcpy(req->response, pkg->buffer, req->response_size);
126 	req->result.err = 0;
127 	return true;
128 }
129 
response_ready(void * data)130 static void response_ready(void *data)
131 {
132 	tb_cfg_request_put(data);
133 }
134 
__tb_xdomain_response(struct tb_ctl * ctl,const void * response,size_t size,enum tb_cfg_pkg_type type)135 static int __tb_xdomain_response(struct tb_ctl *ctl, const void *response,
136 				 size_t size, enum tb_cfg_pkg_type type)
137 {
138 	struct tb_cfg_request *req;
139 
140 	req = tb_cfg_request_alloc();
141 	if (!req)
142 		return -ENOMEM;
143 
144 	req->match = tb_xdomain_match;
145 	req->copy = tb_xdomain_copy;
146 	req->request = response;
147 	req->request_size = size;
148 	req->request_type = type;
149 
150 	return tb_cfg_request(ctl, req, response_ready, req);
151 }
152 
153 /**
154  * tb_xdomain_response() - Send a XDomain response message
155  * @xd: XDomain to send the message
156  * @response: Response to send
157  * @size: Size of the response
158  * @type: PDF type of the response
159  *
160  * This can be used to send a XDomain response message to the other
161  * domain. No response for the message is expected.
162  *
163  * Return: %0 in case of success and negative errno in case of failure
164  */
tb_xdomain_response(struct tb_xdomain * xd,const void * response,size_t size,enum tb_cfg_pkg_type type)165 int tb_xdomain_response(struct tb_xdomain *xd, const void *response,
166 			size_t size, enum tb_cfg_pkg_type type)
167 {
168 	return __tb_xdomain_response(xd->tb->ctl, response, size, type);
169 }
170 EXPORT_SYMBOL_GPL(tb_xdomain_response);
171 
__tb_xdomain_request(struct tb_ctl * ctl,const void * request,size_t request_size,enum tb_cfg_pkg_type request_type,void * response,size_t response_size,enum tb_cfg_pkg_type response_type,unsigned int timeout_msec)172 static int __tb_xdomain_request(struct tb_ctl *ctl, const void *request,
173 	size_t request_size, enum tb_cfg_pkg_type request_type, void *response,
174 	size_t response_size, enum tb_cfg_pkg_type response_type,
175 	unsigned int timeout_msec)
176 {
177 	struct tb_cfg_request *req;
178 	struct tb_cfg_result res;
179 
180 	req = tb_cfg_request_alloc();
181 	if (!req)
182 		return -ENOMEM;
183 
184 	req->match = tb_xdomain_match;
185 	req->copy = tb_xdomain_copy;
186 	req->request = request;
187 	req->request_size = request_size;
188 	req->request_type = request_type;
189 	req->response = response;
190 	req->response_size = response_size;
191 	req->response_type = response_type;
192 
193 	res = tb_cfg_request_sync(ctl, req, timeout_msec);
194 
195 	tb_cfg_request_put(req);
196 
197 	return res.err == 1 ? -EIO : res.err;
198 }
199 
200 /**
201  * tb_xdomain_request() - Send a XDomain request
202  * @xd: XDomain to send the request
203  * @request: Request to send
204  * @request_size: Size of the request in bytes
205  * @request_type: PDF type of the request
206  * @response: Response is copied here
207  * @response_size: Expected size of the response in bytes
208  * @response_type: Expected PDF type of the response
209  * @timeout_msec: Timeout in milliseconds to wait for the response
210  *
211  * This function can be used to send XDomain control channel messages to
212  * the other domain. The function waits until the response is received
213  * or when timeout triggers. Whichever comes first.
214  *
215  * Return: %0 in case of success and negative errno in case of failure
216  */
tb_xdomain_request(struct tb_xdomain * xd,const void * request,size_t request_size,enum tb_cfg_pkg_type request_type,void * response,size_t response_size,enum tb_cfg_pkg_type response_type,unsigned int timeout_msec)217 int tb_xdomain_request(struct tb_xdomain *xd, const void *request,
218 	size_t request_size, enum tb_cfg_pkg_type request_type,
219 	void *response, size_t response_size,
220 	enum tb_cfg_pkg_type response_type, unsigned int timeout_msec)
221 {
222 	return __tb_xdomain_request(xd->tb->ctl, request, request_size,
223 				    request_type, response, response_size,
224 				    response_type, timeout_msec);
225 }
226 EXPORT_SYMBOL_GPL(tb_xdomain_request);
227 
tb_xdp_fill_header(struct tb_xdp_header * hdr,u64 route,u8 sequence,enum tb_xdp_type type,size_t size)228 static inline void tb_xdp_fill_header(struct tb_xdp_header *hdr, u64 route,
229 	u8 sequence, enum tb_xdp_type type, size_t size)
230 {
231 	u32 length_sn;
232 
233 	length_sn = (size - sizeof(hdr->xd_hdr)) / 4;
234 	length_sn |= (sequence << TB_XDOMAIN_SN_SHIFT) & TB_XDOMAIN_SN_MASK;
235 
236 	hdr->xd_hdr.route_hi = upper_32_bits(route);
237 	hdr->xd_hdr.route_lo = lower_32_bits(route);
238 	hdr->xd_hdr.length_sn = length_sn;
239 	hdr->type = type;
240 	memcpy(&hdr->uuid, &tb_xdp_uuid, sizeof(tb_xdp_uuid));
241 }
242 
tb_xdp_handle_error(const struct tb_xdp_error_response * res)243 static int tb_xdp_handle_error(const struct tb_xdp_error_response *res)
244 {
245 	if (res->hdr.type != ERROR_RESPONSE)
246 		return 0;
247 
248 	switch (res->error) {
249 	case ERROR_UNKNOWN_PACKET:
250 	case ERROR_UNKNOWN_DOMAIN:
251 		return -EIO;
252 	case ERROR_NOT_SUPPORTED:
253 		return -ENOTSUPP;
254 	case ERROR_NOT_READY:
255 		return -EAGAIN;
256 	default:
257 		break;
258 	}
259 
260 	return 0;
261 }
262 
tb_xdp_uuid_request(struct tb_ctl * ctl,u64 route,int retry,uuid_t * uuid,u64 * remote_route)263 static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry,
264 			       uuid_t *uuid, u64 *remote_route)
265 {
266 	struct tb_xdp_uuid_response res;
267 	struct tb_xdp_uuid req;
268 	int ret;
269 
270 	memset(&req, 0, sizeof(req));
271 	tb_xdp_fill_header(&req.hdr, route, retry % 4, UUID_REQUEST,
272 			   sizeof(req));
273 
274 	memset(&res, 0, sizeof(res));
275 	ret = __tb_xdomain_request(ctl, &req, sizeof(req),
276 				   TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
277 				   TB_CFG_PKG_XDOMAIN_RESP,
278 				   XDOMAIN_DEFAULT_TIMEOUT);
279 	if (ret)
280 		return ret;
281 
282 	ret = tb_xdp_handle_error(&res.err);
283 	if (ret)
284 		return ret;
285 
286 	uuid_copy(uuid, &res.src_uuid);
287 	*remote_route = (u64)res.src_route_hi << 32 | res.src_route_lo;
288 
289 	return 0;
290 }
291 
tb_xdp_uuid_response(struct tb_ctl * ctl,u64 route,u8 sequence,const uuid_t * uuid)292 static int tb_xdp_uuid_response(struct tb_ctl *ctl, u64 route, u8 sequence,
293 				const uuid_t *uuid)
294 {
295 	struct tb_xdp_uuid_response res;
296 
297 	memset(&res, 0, sizeof(res));
298 	tb_xdp_fill_header(&res.hdr, route, sequence, UUID_RESPONSE,
299 			   sizeof(res));
300 
301 	uuid_copy(&res.src_uuid, uuid);
302 	res.src_route_hi = upper_32_bits(route);
303 	res.src_route_lo = lower_32_bits(route);
304 
305 	return __tb_xdomain_response(ctl, &res, sizeof(res),
306 				     TB_CFG_PKG_XDOMAIN_RESP);
307 }
308 
tb_xdp_error_response(struct tb_ctl * ctl,u64 route,u8 sequence,enum tb_xdp_error error)309 static int tb_xdp_error_response(struct tb_ctl *ctl, u64 route, u8 sequence,
310 				 enum tb_xdp_error error)
311 {
312 	struct tb_xdp_error_response res;
313 
314 	memset(&res, 0, sizeof(res));
315 	tb_xdp_fill_header(&res.hdr, route, sequence, ERROR_RESPONSE,
316 			   sizeof(res));
317 	res.error = error;
318 
319 	return __tb_xdomain_response(ctl, &res, sizeof(res),
320 				     TB_CFG_PKG_XDOMAIN_RESP);
321 }
322 
tb_xdp_properties_request(struct tb_ctl * ctl,u64 route,const uuid_t * src_uuid,const uuid_t * dst_uuid,int retry,u32 ** block,u32 * generation)323 static int tb_xdp_properties_request(struct tb_ctl *ctl, u64 route,
324 	const uuid_t *src_uuid, const uuid_t *dst_uuid, int retry,
325 	u32 **block, u32 *generation)
326 {
327 	struct tb_xdp_properties_response *res;
328 	struct tb_xdp_properties req;
329 	u16 data_len, len;
330 	size_t total_size;
331 	u32 *data = NULL;
332 	int ret;
333 
334 	total_size = sizeof(*res) + TB_XDP_PROPERTIES_MAX_DATA_LENGTH * 4;
335 	res = kzalloc(total_size, GFP_KERNEL);
336 	if (!res)
337 		return -ENOMEM;
338 
339 	memset(&req, 0, sizeof(req));
340 	tb_xdp_fill_header(&req.hdr, route, retry % 4, PROPERTIES_REQUEST,
341 			   sizeof(req));
342 	memcpy(&req.src_uuid, src_uuid, sizeof(*src_uuid));
343 	memcpy(&req.dst_uuid, dst_uuid, sizeof(*dst_uuid));
344 
345 	data_len = 0;
346 
347 	do {
348 		ret = __tb_xdomain_request(ctl, &req, sizeof(req),
349 					   TB_CFG_PKG_XDOMAIN_REQ, res,
350 					   total_size, TB_CFG_PKG_XDOMAIN_RESP,
351 					   XDOMAIN_DEFAULT_TIMEOUT);
352 		if (ret)
353 			goto err;
354 
355 		ret = tb_xdp_handle_error(&res->err);
356 		if (ret)
357 			goto err;
358 
359 		/*
360 		 * Package length includes the whole payload without the
361 		 * XDomain header. Validate first that the package is at
362 		 * least size of the response structure.
363 		 */
364 		len = res->hdr.xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
365 		if (len < sizeof(*res) / 4) {
366 			ret = -EINVAL;
367 			goto err;
368 		}
369 
370 		len += sizeof(res->hdr.xd_hdr) / 4;
371 		len -= sizeof(*res) / 4;
372 
373 		if (res->offset != req.offset) {
374 			ret = -EINVAL;
375 			goto err;
376 		}
377 
378 		/*
379 		 * First time allocate block that has enough space for
380 		 * the whole properties block.
381 		 */
382 		if (!data) {
383 			data_len = res->data_length;
384 			if (data_len > TB_XDP_PROPERTIES_MAX_LENGTH) {
385 				ret = -E2BIG;
386 				goto err;
387 			}
388 
389 			data = kcalloc(data_len, sizeof(u32), GFP_KERNEL);
390 			if (!data) {
391 				ret = -ENOMEM;
392 				goto err;
393 			}
394 		}
395 
396 		memcpy(data + req.offset, res->data, len * 4);
397 		req.offset += len;
398 	} while (!data_len || req.offset < data_len);
399 
400 	*block = data;
401 	*generation = res->generation;
402 
403 	kfree(res);
404 
405 	return data_len;
406 
407 err:
408 	kfree(data);
409 	kfree(res);
410 
411 	return ret;
412 }
413 
tb_xdp_properties_response(struct tb * tb,struct tb_ctl * ctl,struct tb_xdomain * xd,u8 sequence,const struct tb_xdp_properties * req)414 static int tb_xdp_properties_response(struct tb *tb, struct tb_ctl *ctl,
415 	struct tb_xdomain *xd, u8 sequence, const struct tb_xdp_properties *req)
416 {
417 	struct tb_xdp_properties_response *res;
418 	size_t total_size;
419 	u16 len;
420 	int ret;
421 
422 	/*
423 	 * Currently we expect all requests to be directed to us. The
424 	 * protocol supports forwarding, though which we might add
425 	 * support later on.
426 	 */
427 	if (!uuid_equal(xd->local_uuid, &req->dst_uuid)) {
428 		tb_xdp_error_response(ctl, xd->route, sequence,
429 				      ERROR_UNKNOWN_DOMAIN);
430 		return 0;
431 	}
432 
433 	mutex_lock(&xd->lock);
434 
435 	if (req->offset >= xd->local_property_block_len) {
436 		mutex_unlock(&xd->lock);
437 		return -EINVAL;
438 	}
439 
440 	len = xd->local_property_block_len - req->offset;
441 	len = min_t(u16, len, TB_XDP_PROPERTIES_MAX_DATA_LENGTH);
442 	total_size = sizeof(*res) + len * 4;
443 
444 	res = kzalloc(total_size, GFP_KERNEL);
445 	if (!res) {
446 		mutex_unlock(&xd->lock);
447 		return -ENOMEM;
448 	}
449 
450 	tb_xdp_fill_header(&res->hdr, xd->route, sequence, PROPERTIES_RESPONSE,
451 			   total_size);
452 	res->generation = xd->local_property_block_gen;
453 	res->data_length = xd->local_property_block_len;
454 	res->offset = req->offset;
455 	uuid_copy(&res->src_uuid, xd->local_uuid);
456 	uuid_copy(&res->dst_uuid, &req->src_uuid);
457 	memcpy(res->data, &xd->local_property_block[req->offset], len * 4);
458 
459 	mutex_unlock(&xd->lock);
460 
461 	ret = __tb_xdomain_response(ctl, res, total_size,
462 				    TB_CFG_PKG_XDOMAIN_RESP);
463 
464 	kfree(res);
465 	return ret;
466 }
467 
tb_xdp_properties_changed_request(struct tb_ctl * ctl,u64 route,int retry,const uuid_t * uuid)468 static int tb_xdp_properties_changed_request(struct tb_ctl *ctl, u64 route,
469 					     int retry, const uuid_t *uuid)
470 {
471 	struct tb_xdp_properties_changed_response res;
472 	struct tb_xdp_properties_changed req;
473 	int ret;
474 
475 	memset(&req, 0, sizeof(req));
476 	tb_xdp_fill_header(&req.hdr, route, retry % 4,
477 			   PROPERTIES_CHANGED_REQUEST, sizeof(req));
478 	uuid_copy(&req.src_uuid, uuid);
479 
480 	memset(&res, 0, sizeof(res));
481 	ret = __tb_xdomain_request(ctl, &req, sizeof(req),
482 				   TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
483 				   TB_CFG_PKG_XDOMAIN_RESP,
484 				   XDOMAIN_DEFAULT_TIMEOUT);
485 	if (ret)
486 		return ret;
487 
488 	return tb_xdp_handle_error(&res.err);
489 }
490 
491 static int
tb_xdp_properties_changed_response(struct tb_ctl * ctl,u64 route,u8 sequence)492 tb_xdp_properties_changed_response(struct tb_ctl *ctl, u64 route, u8 sequence)
493 {
494 	struct tb_xdp_properties_changed_response res;
495 
496 	memset(&res, 0, sizeof(res));
497 	tb_xdp_fill_header(&res.hdr, route, sequence,
498 			   PROPERTIES_CHANGED_RESPONSE, sizeof(res));
499 	return __tb_xdomain_response(ctl, &res, sizeof(res),
500 				     TB_CFG_PKG_XDOMAIN_RESP);
501 }
502 
tb_xdp_link_state_status_request(struct tb_ctl * ctl,u64 route,u8 sequence,u8 * slw,u8 * tlw,u8 * sls,u8 * tls)503 static int tb_xdp_link_state_status_request(struct tb_ctl *ctl, u64 route,
504 					    u8 sequence, u8 *slw, u8 *tlw,
505 					    u8 *sls, u8 *tls)
506 {
507 	struct tb_xdp_link_state_status_response res;
508 	struct tb_xdp_link_state_status req;
509 	int ret;
510 
511 	memset(&req, 0, sizeof(req));
512 	tb_xdp_fill_header(&req.hdr, route, sequence, LINK_STATE_STATUS_REQUEST,
513 			   sizeof(req));
514 
515 	memset(&res, 0, sizeof(res));
516 	ret = __tb_xdomain_request(ctl, &req, sizeof(req), TB_CFG_PKG_XDOMAIN_REQ,
517 				   &res, sizeof(res), TB_CFG_PKG_XDOMAIN_RESP,
518 				   XDOMAIN_DEFAULT_TIMEOUT);
519 	if (ret)
520 		return ret;
521 
522 	ret = tb_xdp_handle_error(&res.err);
523 	if (ret)
524 		return ret;
525 
526 	if (res.status != 0)
527 		return -EREMOTEIO;
528 
529 	*slw = res.slw;
530 	*tlw = res.tlw;
531 	*sls = res.sls;
532 	*tls = res.tls;
533 
534 	return 0;
535 }
536 
tb_xdp_link_state_status_response(struct tb * tb,struct tb_ctl * ctl,struct tb_xdomain * xd,u8 sequence)537 static int tb_xdp_link_state_status_response(struct tb *tb, struct tb_ctl *ctl,
538 					     struct tb_xdomain *xd, u8 sequence)
539 {
540 	struct tb_xdp_link_state_status_response res;
541 	struct tb_port *port = tb_xdomain_downstream_port(xd);
542 	u32 val[2];
543 	int ret;
544 
545 	memset(&res, 0, sizeof(res));
546 	tb_xdp_fill_header(&res.hdr, xd->route, sequence,
547 			   LINK_STATE_STATUS_RESPONSE, sizeof(res));
548 
549 	ret = tb_port_read(port, val, TB_CFG_PORT,
550 			   port->cap_phy + LANE_ADP_CS_0, ARRAY_SIZE(val));
551 	if (ret)
552 		return ret;
553 
554 	res.slw = (val[0] & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
555 			LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
556 	res.sls = (val[0] & LANE_ADP_CS_0_SUPPORTED_SPEED_MASK) >>
557 			LANE_ADP_CS_0_SUPPORTED_SPEED_SHIFT;
558 	res.tls = val[1] & LANE_ADP_CS_1_TARGET_SPEED_MASK;
559 	res.tlw = (val[1] & LANE_ADP_CS_1_TARGET_WIDTH_MASK) >>
560 			LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
561 
562 	return __tb_xdomain_response(ctl, &res, sizeof(res),
563 				     TB_CFG_PKG_XDOMAIN_RESP);
564 }
565 
tb_xdp_link_state_change_request(struct tb_ctl * ctl,u64 route,u8 sequence,u8 tlw,u8 tls)566 static int tb_xdp_link_state_change_request(struct tb_ctl *ctl, u64 route,
567 					    u8 sequence, u8 tlw, u8 tls)
568 {
569 	struct tb_xdp_link_state_change_response res;
570 	struct tb_xdp_link_state_change req;
571 	int ret;
572 
573 	memset(&req, 0, sizeof(req));
574 	tb_xdp_fill_header(&req.hdr, route, sequence, LINK_STATE_CHANGE_REQUEST,
575 			   sizeof(req));
576 	req.tlw = tlw;
577 	req.tls = tls;
578 
579 	memset(&res, 0, sizeof(res));
580 	ret = __tb_xdomain_request(ctl, &req, sizeof(req), TB_CFG_PKG_XDOMAIN_REQ,
581 				   &res, sizeof(res), TB_CFG_PKG_XDOMAIN_RESP,
582 				   XDOMAIN_DEFAULT_TIMEOUT);
583 	if (ret)
584 		return ret;
585 
586 	ret = tb_xdp_handle_error(&res.err);
587 	if (ret)
588 		return ret;
589 
590 	return res.status != 0 ? -EREMOTEIO : 0;
591 }
592 
tb_xdp_link_state_change_response(struct tb_ctl * ctl,u64 route,u8 sequence,u32 status)593 static int tb_xdp_link_state_change_response(struct tb_ctl *ctl, u64 route,
594 					     u8 sequence, u32 status)
595 {
596 	struct tb_xdp_link_state_change_response res;
597 
598 	memset(&res, 0, sizeof(res));
599 	tb_xdp_fill_header(&res.hdr, route, sequence, LINK_STATE_CHANGE_RESPONSE,
600 			   sizeof(res));
601 
602 	res.status = status;
603 
604 	return __tb_xdomain_response(ctl, &res, sizeof(res),
605 				     TB_CFG_PKG_XDOMAIN_RESP);
606 }
607 
608 /**
609  * tb_register_protocol_handler() - Register protocol handler
610  * @handler: Handler to register
611  *
612  * This allows XDomain service drivers to hook into incoming XDomain
613  * messages. After this function is called the service driver needs to
614  * be able to handle calls to callback whenever a package with the
615  * registered protocol is received.
616  */
tb_register_protocol_handler(struct tb_protocol_handler * handler)617 int tb_register_protocol_handler(struct tb_protocol_handler *handler)
618 {
619 	if (!handler->uuid || !handler->callback)
620 		return -EINVAL;
621 	if (uuid_equal(handler->uuid, &tb_xdp_uuid))
622 		return -EINVAL;
623 
624 	mutex_lock(&xdomain_lock);
625 	list_add_tail(&handler->list, &protocol_handlers);
626 	mutex_unlock(&xdomain_lock);
627 
628 	return 0;
629 }
630 EXPORT_SYMBOL_GPL(tb_register_protocol_handler);
631 
632 /**
633  * tb_unregister_protocol_handler() - Unregister protocol handler
634  * @handler: Handler to unregister
635  *
636  * Removes the previously registered protocol handler.
637  */
tb_unregister_protocol_handler(struct tb_protocol_handler * handler)638 void tb_unregister_protocol_handler(struct tb_protocol_handler *handler)
639 {
640 	mutex_lock(&xdomain_lock);
641 	list_del_init(&handler->list);
642 	mutex_unlock(&xdomain_lock);
643 }
644 EXPORT_SYMBOL_GPL(tb_unregister_protocol_handler);
645 
update_property_block(struct tb_xdomain * xd)646 static void update_property_block(struct tb_xdomain *xd)
647 {
648 	mutex_lock(&xdomain_lock);
649 	mutex_lock(&xd->lock);
650 	/*
651 	 * If the local property block is not up-to-date, rebuild it now
652 	 * based on the global property template.
653 	 */
654 	if (!xd->local_property_block ||
655 	    xd->local_property_block_gen < xdomain_property_block_gen) {
656 		struct tb_property_dir *dir;
657 		int ret, block_len;
658 		u32 *block;
659 
660 		dir = tb_property_copy_dir(xdomain_property_dir);
661 		if (!dir) {
662 			dev_warn(&xd->dev, "failed to copy properties\n");
663 			goto out_unlock;
664 		}
665 
666 		/* Fill in non-static properties now */
667 		tb_property_add_text(dir, "deviceid", utsname()->nodename);
668 		tb_property_add_immediate(dir, "maxhopid", xd->local_max_hopid);
669 
670 		ret = tb_property_format_dir(dir, NULL, 0);
671 		if (ret < 0) {
672 			dev_warn(&xd->dev, "local property block creation failed\n");
673 			tb_property_free_dir(dir);
674 			goto out_unlock;
675 		}
676 
677 		block_len = ret;
678 		block = kcalloc(block_len, sizeof(*block), GFP_KERNEL);
679 		if (!block) {
680 			tb_property_free_dir(dir);
681 			goto out_unlock;
682 		}
683 
684 		ret = tb_property_format_dir(dir, block, block_len);
685 		if (ret) {
686 			dev_warn(&xd->dev, "property block generation failed\n");
687 			tb_property_free_dir(dir);
688 			kfree(block);
689 			goto out_unlock;
690 		}
691 
692 		tb_property_free_dir(dir);
693 		/* Release the previous block */
694 		kfree(xd->local_property_block);
695 		/* Assign new one */
696 		xd->local_property_block = block;
697 		xd->local_property_block_len = block_len;
698 		xd->local_property_block_gen = xdomain_property_block_gen;
699 	}
700 
701 out_unlock:
702 	mutex_unlock(&xd->lock);
703 	mutex_unlock(&xdomain_lock);
704 }
705 
start_handshake(struct tb_xdomain * xd)706 static void start_handshake(struct tb_xdomain *xd)
707 {
708 	xd->state = XDOMAIN_STATE_INIT;
709 	queue_delayed_work(xd->tb->wq, &xd->state_work,
710 			   msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
711 }
712 
713 /* Can be called from state_work */
__stop_handshake(struct tb_xdomain * xd)714 static void __stop_handshake(struct tb_xdomain *xd)
715 {
716 	cancel_delayed_work_sync(&xd->properties_changed_work);
717 	xd->properties_changed_retries = 0;
718 	xd->state_retries = 0;
719 }
720 
stop_handshake(struct tb_xdomain * xd)721 static void stop_handshake(struct tb_xdomain *xd)
722 {
723 	cancel_delayed_work_sync(&xd->state_work);
724 	__stop_handshake(xd);
725 }
726 
tb_xdp_handle_request(struct work_struct * work)727 static void tb_xdp_handle_request(struct work_struct *work)
728 {
729 	struct xdomain_request_work *xw = container_of(work, typeof(*xw), work);
730 	const struct tb_xdp_header *pkg = xw->pkg;
731 	const struct tb_xdomain_header *xhdr = &pkg->xd_hdr;
732 	struct tb *tb = xw->tb;
733 	struct tb_ctl *ctl = tb->ctl;
734 	struct tb_xdomain *xd;
735 	const uuid_t *uuid;
736 	int ret = 0;
737 	u32 sequence;
738 	u64 route;
739 
740 	route = ((u64)xhdr->route_hi << 32 | xhdr->route_lo) & ~BIT_ULL(63);
741 	sequence = xhdr->length_sn & TB_XDOMAIN_SN_MASK;
742 	sequence >>= TB_XDOMAIN_SN_SHIFT;
743 
744 	mutex_lock(&tb->lock);
745 	if (tb->root_switch)
746 		uuid = tb->root_switch->uuid;
747 	else
748 		uuid = NULL;
749 	mutex_unlock(&tb->lock);
750 
751 	if (!uuid) {
752 		tb_xdp_error_response(ctl, route, sequence, ERROR_NOT_READY);
753 		goto out;
754 	}
755 
756 	xd = tb_xdomain_find_by_route_locked(tb, route);
757 	if (xd)
758 		update_property_block(xd);
759 
760 	switch (pkg->type) {
761 	case PROPERTIES_REQUEST:
762 		tb_dbg(tb, "%llx: received XDomain properties request\n", route);
763 		if (xd) {
764 			ret = tb_xdp_properties_response(tb, ctl, xd, sequence,
765 				(const struct tb_xdp_properties *)pkg);
766 		}
767 		break;
768 
769 	case PROPERTIES_CHANGED_REQUEST:
770 		tb_dbg(tb, "%llx: received XDomain properties changed request\n",
771 		       route);
772 
773 		ret = tb_xdp_properties_changed_response(ctl, route, sequence);
774 
775 		/*
776 		 * Since the properties have been changed, let's update
777 		 * the xdomain related to this connection as well in
778 		 * case there is a change in services it offers.
779 		 */
780 		if (xd && device_is_registered(&xd->dev))
781 			queue_delayed_work(tb->wq, &xd->state_work,
782 					   msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
783 		break;
784 
785 	case UUID_REQUEST_OLD:
786 	case UUID_REQUEST:
787 		tb_dbg(tb, "%llx: received XDomain UUID request\n", route);
788 		ret = tb_xdp_uuid_response(ctl, route, sequence, uuid);
789 		/*
790 		 * If we've stopped the discovery with an error such as
791 		 * timing out, we will restart the handshake now that we
792 		 * received UUID request from the remote host.
793 		 */
794 		if (!ret && xd && xd->state == XDOMAIN_STATE_ERROR) {
795 			dev_dbg(&xd->dev, "restarting handshake\n");
796 			start_handshake(xd);
797 		}
798 		break;
799 
800 	case LINK_STATE_STATUS_REQUEST:
801 		tb_dbg(tb, "%llx: received XDomain link state status request\n",
802 		       route);
803 
804 		if (xd) {
805 			ret = tb_xdp_link_state_status_response(tb, ctl, xd,
806 								sequence);
807 		} else {
808 			tb_xdp_error_response(ctl, route, sequence,
809 					      ERROR_NOT_READY);
810 		}
811 		break;
812 
813 	case LINK_STATE_CHANGE_REQUEST:
814 		tb_dbg(tb, "%llx: received XDomain link state change request\n",
815 		       route);
816 
817 		if (xd && xd->state == XDOMAIN_STATE_BONDING_UUID_HIGH) {
818 			const struct tb_xdp_link_state_change *lsc =
819 				(const struct tb_xdp_link_state_change *)pkg;
820 
821 			ret = tb_xdp_link_state_change_response(ctl, route,
822 								sequence, 0);
823 			xd->target_link_width = lsc->tlw;
824 			queue_delayed_work(tb->wq, &xd->state_work,
825 					   msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
826 		} else {
827 			tb_xdp_error_response(ctl, route, sequence,
828 					      ERROR_NOT_READY);
829 		}
830 		break;
831 
832 	default:
833 		tb_dbg(tb, "%llx: unknown XDomain request %#x\n", route, pkg->type);
834 		tb_xdp_error_response(ctl, route, sequence,
835 				      ERROR_NOT_SUPPORTED);
836 		break;
837 	}
838 
839 	tb_xdomain_put(xd);
840 
841 	if (ret) {
842 		tb_warn(tb, "failed to send XDomain response for %#x\n",
843 			pkg->type);
844 	}
845 
846 out:
847 	kfree(xw->pkg);
848 	kfree(xw);
849 
850 	tb_domain_put(tb);
851 }
852 
853 static bool
tb_xdp_schedule_request(struct tb * tb,const struct tb_xdp_header * hdr,size_t size)854 tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr,
855 			size_t size)
856 {
857 	struct xdomain_request_work *xw;
858 
859 	xw = kmalloc(sizeof(*xw), GFP_KERNEL);
860 	if (!xw)
861 		return false;
862 
863 	INIT_WORK(&xw->work, tb_xdp_handle_request);
864 	xw->pkg = kmemdup(hdr, size, GFP_KERNEL);
865 	if (!xw->pkg) {
866 		kfree(xw);
867 		return false;
868 	}
869 	xw->tb = tb_domain_get(tb);
870 
871 	schedule_work(&xw->work);
872 	return true;
873 }
874 
875 /**
876  * tb_register_service_driver() - Register XDomain service driver
877  * @drv: Driver to register
878  *
879  * Registers new service driver from @drv to the bus.
880  */
tb_register_service_driver(struct tb_service_driver * drv)881 int tb_register_service_driver(struct tb_service_driver *drv)
882 {
883 	drv->driver.bus = &tb_bus_type;
884 	return driver_register(&drv->driver);
885 }
886 EXPORT_SYMBOL_GPL(tb_register_service_driver);
887 
888 /**
889  * tb_unregister_service_driver() - Unregister XDomain service driver
890  * @drv: Driver to unregister
891  *
892  * Unregisters XDomain service driver from the bus.
893  */
tb_unregister_service_driver(struct tb_service_driver * drv)894 void tb_unregister_service_driver(struct tb_service_driver *drv)
895 {
896 	driver_unregister(&drv->driver);
897 }
898 EXPORT_SYMBOL_GPL(tb_unregister_service_driver);
899 
key_show(struct device * dev,struct device_attribute * attr,char * buf)900 static ssize_t key_show(struct device *dev, struct device_attribute *attr,
901 			char *buf)
902 {
903 	struct tb_service *svc = container_of(dev, struct tb_service, dev);
904 
905 	/*
906 	 * It should be null terminated but anything else is pretty much
907 	 * allowed.
908 	 */
909 	return sysfs_emit(buf, "%*pE\n", (int)strlen(svc->key), svc->key);
910 }
911 static DEVICE_ATTR_RO(key);
912 
get_modalias(const struct tb_service * svc,char * buf,size_t size)913 static int get_modalias(const struct tb_service *svc, char *buf, size_t size)
914 {
915 	return snprintf(buf, size, "tbsvc:k%sp%08Xv%08Xr%08X", svc->key,
916 			svc->prtcid, svc->prtcvers, svc->prtcrevs);
917 }
918 
modalias_show(struct device * dev,struct device_attribute * attr,char * buf)919 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
920 			     char *buf)
921 {
922 	struct tb_service *svc = container_of(dev, struct tb_service, dev);
923 
924 	/* Full buffer size except new line and null termination */
925 	get_modalias(svc, buf, PAGE_SIZE - 2);
926 	return strlen(strcat(buf, "\n"));
927 }
928 static DEVICE_ATTR_RO(modalias);
929 
prtcid_show(struct device * dev,struct device_attribute * attr,char * buf)930 static ssize_t prtcid_show(struct device *dev, struct device_attribute *attr,
931 			   char *buf)
932 {
933 	struct tb_service *svc = container_of(dev, struct tb_service, dev);
934 
935 	return sysfs_emit(buf, "%u\n", svc->prtcid);
936 }
937 static DEVICE_ATTR_RO(prtcid);
938 
prtcvers_show(struct device * dev,struct device_attribute * attr,char * buf)939 static ssize_t prtcvers_show(struct device *dev, struct device_attribute *attr,
940 			     char *buf)
941 {
942 	struct tb_service *svc = container_of(dev, struct tb_service, dev);
943 
944 	return sysfs_emit(buf, "%u\n", svc->prtcvers);
945 }
946 static DEVICE_ATTR_RO(prtcvers);
947 
prtcrevs_show(struct device * dev,struct device_attribute * attr,char * buf)948 static ssize_t prtcrevs_show(struct device *dev, struct device_attribute *attr,
949 			     char *buf)
950 {
951 	struct tb_service *svc = container_of(dev, struct tb_service, dev);
952 
953 	return sysfs_emit(buf, "%u\n", svc->prtcrevs);
954 }
955 static DEVICE_ATTR_RO(prtcrevs);
956 
prtcstns_show(struct device * dev,struct device_attribute * attr,char * buf)957 static ssize_t prtcstns_show(struct device *dev, struct device_attribute *attr,
958 			     char *buf)
959 {
960 	struct tb_service *svc = container_of(dev, struct tb_service, dev);
961 
962 	return sysfs_emit(buf, "0x%08x\n", svc->prtcstns);
963 }
964 static DEVICE_ATTR_RO(prtcstns);
965 
966 static struct attribute *tb_service_attrs[] = {
967 	&dev_attr_key.attr,
968 	&dev_attr_modalias.attr,
969 	&dev_attr_prtcid.attr,
970 	&dev_attr_prtcvers.attr,
971 	&dev_attr_prtcrevs.attr,
972 	&dev_attr_prtcstns.attr,
973 	NULL,
974 };
975 
976 static const struct attribute_group tb_service_attr_group = {
977 	.attrs = tb_service_attrs,
978 };
979 
980 static const struct attribute_group *tb_service_attr_groups[] = {
981 	&tb_service_attr_group,
982 	NULL,
983 };
984 
tb_service_uevent(const struct device * dev,struct kobj_uevent_env * env)985 static int tb_service_uevent(const struct device *dev, struct kobj_uevent_env *env)
986 {
987 	const struct tb_service *svc = container_of_const(dev, struct tb_service, dev);
988 	char modalias[64];
989 
990 	get_modalias(svc, modalias, sizeof(modalias));
991 	return add_uevent_var(env, "MODALIAS=%s", modalias);
992 }
993 
tb_service_release(struct device * dev)994 static void tb_service_release(struct device *dev)
995 {
996 	struct tb_service *svc = container_of(dev, struct tb_service, dev);
997 	struct tb_xdomain *xd = tb_service_parent(svc);
998 
999 	tb_service_debugfs_remove(svc);
1000 	ida_simple_remove(&xd->service_ids, svc->id);
1001 	kfree(svc->key);
1002 	kfree(svc);
1003 }
1004 
1005 struct device_type tb_service_type = {
1006 	.name = "thunderbolt_service",
1007 	.groups = tb_service_attr_groups,
1008 	.uevent = tb_service_uevent,
1009 	.release = tb_service_release,
1010 };
1011 EXPORT_SYMBOL_GPL(tb_service_type);
1012 
remove_missing_service(struct device * dev,void * data)1013 static int remove_missing_service(struct device *dev, void *data)
1014 {
1015 	struct tb_xdomain *xd = data;
1016 	struct tb_service *svc;
1017 
1018 	svc = tb_to_service(dev);
1019 	if (!svc)
1020 		return 0;
1021 
1022 	if (!tb_property_find(xd->remote_properties, svc->key,
1023 			      TB_PROPERTY_TYPE_DIRECTORY))
1024 		device_unregister(dev);
1025 
1026 	return 0;
1027 }
1028 
find_service(struct device * dev,void * data)1029 static int find_service(struct device *dev, void *data)
1030 {
1031 	const struct tb_property *p = data;
1032 	struct tb_service *svc;
1033 
1034 	svc = tb_to_service(dev);
1035 	if (!svc)
1036 		return 0;
1037 
1038 	return !strcmp(svc->key, p->key);
1039 }
1040 
populate_service(struct tb_service * svc,struct tb_property * property)1041 static int populate_service(struct tb_service *svc,
1042 			    struct tb_property *property)
1043 {
1044 	struct tb_property_dir *dir = property->value.dir;
1045 	struct tb_property *p;
1046 
1047 	/* Fill in standard properties */
1048 	p = tb_property_find(dir, "prtcid", TB_PROPERTY_TYPE_VALUE);
1049 	if (p)
1050 		svc->prtcid = p->value.immediate;
1051 	p = tb_property_find(dir, "prtcvers", TB_PROPERTY_TYPE_VALUE);
1052 	if (p)
1053 		svc->prtcvers = p->value.immediate;
1054 	p = tb_property_find(dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE);
1055 	if (p)
1056 		svc->prtcrevs = p->value.immediate;
1057 	p = tb_property_find(dir, "prtcstns", TB_PROPERTY_TYPE_VALUE);
1058 	if (p)
1059 		svc->prtcstns = p->value.immediate;
1060 
1061 	svc->key = kstrdup(property->key, GFP_KERNEL);
1062 	if (!svc->key)
1063 		return -ENOMEM;
1064 
1065 	return 0;
1066 }
1067 
enumerate_services(struct tb_xdomain * xd)1068 static void enumerate_services(struct tb_xdomain *xd)
1069 {
1070 	struct tb_service *svc;
1071 	struct tb_property *p;
1072 	struct device *dev;
1073 	int id;
1074 
1075 	/*
1076 	 * First remove all services that are not available anymore in
1077 	 * the updated property block.
1078 	 */
1079 	device_for_each_child_reverse(&xd->dev, xd, remove_missing_service);
1080 
1081 	/* Then re-enumerate properties creating new services as we go */
1082 	tb_property_for_each(xd->remote_properties, p) {
1083 		if (p->type != TB_PROPERTY_TYPE_DIRECTORY)
1084 			continue;
1085 
1086 		/* If the service exists already we are fine */
1087 		dev = device_find_child(&xd->dev, p, find_service);
1088 		if (dev) {
1089 			put_device(dev);
1090 			continue;
1091 		}
1092 
1093 		svc = kzalloc(sizeof(*svc), GFP_KERNEL);
1094 		if (!svc)
1095 			break;
1096 
1097 		if (populate_service(svc, p)) {
1098 			kfree(svc);
1099 			break;
1100 		}
1101 
1102 		id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
1103 		if (id < 0) {
1104 			kfree(svc->key);
1105 			kfree(svc);
1106 			break;
1107 		}
1108 		svc->id = id;
1109 		svc->dev.bus = &tb_bus_type;
1110 		svc->dev.type = &tb_service_type;
1111 		svc->dev.parent = &xd->dev;
1112 		dev_set_name(&svc->dev, "%s.%d", dev_name(&xd->dev), svc->id);
1113 
1114 		tb_service_debugfs_init(svc);
1115 
1116 		if (device_register(&svc->dev)) {
1117 			put_device(&svc->dev);
1118 			break;
1119 		}
1120 	}
1121 }
1122 
populate_properties(struct tb_xdomain * xd,struct tb_property_dir * dir)1123 static int populate_properties(struct tb_xdomain *xd,
1124 			       struct tb_property_dir *dir)
1125 {
1126 	const struct tb_property *p;
1127 
1128 	/* Required properties */
1129 	p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
1130 	if (!p)
1131 		return -EINVAL;
1132 	xd->device = p->value.immediate;
1133 
1134 	p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE);
1135 	if (!p)
1136 		return -EINVAL;
1137 	xd->vendor = p->value.immediate;
1138 
1139 	p = tb_property_find(dir, "maxhopid", TB_PROPERTY_TYPE_VALUE);
1140 	/*
1141 	 * USB4 inter-domain spec suggests using 15 as HopID if the
1142 	 * other end does not announce it in a property. This is for
1143 	 * TBT3 compatibility.
1144 	 */
1145 	xd->remote_max_hopid = p ? p->value.immediate : XDOMAIN_DEFAULT_MAX_HOPID;
1146 
1147 	kfree(xd->device_name);
1148 	xd->device_name = NULL;
1149 	kfree(xd->vendor_name);
1150 	xd->vendor_name = NULL;
1151 
1152 	/* Optional properties */
1153 	p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
1154 	if (p)
1155 		xd->device_name = kstrdup(p->value.text, GFP_KERNEL);
1156 	p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT);
1157 	if (p)
1158 		xd->vendor_name = kstrdup(p->value.text, GFP_KERNEL);
1159 
1160 	return 0;
1161 }
1162 
tb_xdomain_update_link_attributes(struct tb_xdomain * xd)1163 static int tb_xdomain_update_link_attributes(struct tb_xdomain *xd)
1164 {
1165 	bool change = false;
1166 	struct tb_port *port;
1167 	int ret;
1168 
1169 	port = tb_xdomain_downstream_port(xd);
1170 
1171 	ret = tb_port_get_link_speed(port);
1172 	if (ret < 0)
1173 		return ret;
1174 
1175 	if (xd->link_speed != ret)
1176 		change = true;
1177 
1178 	xd->link_speed = ret;
1179 
1180 	ret = tb_port_get_link_width(port);
1181 	if (ret < 0)
1182 		return ret;
1183 
1184 	if (xd->link_width != ret)
1185 		change = true;
1186 
1187 	xd->link_width = ret;
1188 
1189 	if (change)
1190 		kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
1191 
1192 	return 0;
1193 }
1194 
tb_xdomain_get_uuid(struct tb_xdomain * xd)1195 static int tb_xdomain_get_uuid(struct tb_xdomain *xd)
1196 {
1197 	struct tb *tb = xd->tb;
1198 	uuid_t uuid;
1199 	u64 route;
1200 	int ret;
1201 
1202 	dev_dbg(&xd->dev, "requesting remote UUID\n");
1203 
1204 	ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->state_retries, &uuid,
1205 				  &route);
1206 	if (ret < 0) {
1207 		if (xd->state_retries-- > 0) {
1208 			dev_dbg(&xd->dev, "failed to request UUID, retrying\n");
1209 			return -EAGAIN;
1210 		}
1211 		dev_dbg(&xd->dev, "failed to read remote UUID\n");
1212 		return ret;
1213 	}
1214 
1215 	dev_dbg(&xd->dev, "got remote UUID %pUb\n", &uuid);
1216 
1217 	if (uuid_equal(&uuid, xd->local_uuid)) {
1218 		if (route == xd->route)
1219 			dev_dbg(&xd->dev, "loop back detected\n");
1220 		else
1221 			dev_dbg(&xd->dev, "intra-domain loop detected\n");
1222 
1223 		/* Don't bond lanes automatically for loops */
1224 		xd->bonding_possible = false;
1225 	}
1226 
1227 	/*
1228 	 * If the UUID is different, there is another domain connected
1229 	 * so mark this one unplugged and wait for the connection
1230 	 * manager to replace it.
1231 	 */
1232 	if (xd->remote_uuid && !uuid_equal(&uuid, xd->remote_uuid)) {
1233 		dev_dbg(&xd->dev, "remote UUID is different, unplugging\n");
1234 		xd->is_unplugged = true;
1235 		return -ENODEV;
1236 	}
1237 
1238 	/* First time fill in the missing UUID */
1239 	if (!xd->remote_uuid) {
1240 		xd->remote_uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL);
1241 		if (!xd->remote_uuid)
1242 			return -ENOMEM;
1243 	}
1244 
1245 	return 0;
1246 }
1247 
tb_xdomain_get_link_status(struct tb_xdomain * xd)1248 static int tb_xdomain_get_link_status(struct tb_xdomain *xd)
1249 {
1250 	struct tb *tb = xd->tb;
1251 	u8 slw, tlw, sls, tls;
1252 	int ret;
1253 
1254 	dev_dbg(&xd->dev, "sending link state status request to %pUb\n",
1255 		xd->remote_uuid);
1256 
1257 	ret = tb_xdp_link_state_status_request(tb->ctl, xd->route,
1258 					       xd->state_retries, &slw, &tlw, &sls,
1259 					       &tls);
1260 	if (ret) {
1261 		if (ret != -EOPNOTSUPP && xd->state_retries-- > 0) {
1262 			dev_dbg(&xd->dev,
1263 				"failed to request remote link status, retrying\n");
1264 			return -EAGAIN;
1265 		}
1266 		dev_dbg(&xd->dev, "failed to receive remote link status\n");
1267 		return ret;
1268 	}
1269 
1270 	dev_dbg(&xd->dev, "remote link supports width %#x speed %#x\n", slw, sls);
1271 
1272 	if (slw < LANE_ADP_CS_0_SUPPORTED_WIDTH_DUAL) {
1273 		dev_dbg(&xd->dev, "remote adapter is single lane only\n");
1274 		return -EOPNOTSUPP;
1275 	}
1276 
1277 	return 0;
1278 }
1279 
tb_xdomain_link_state_change(struct tb_xdomain * xd,unsigned int width)1280 static int tb_xdomain_link_state_change(struct tb_xdomain *xd,
1281 					unsigned int width)
1282 {
1283 	struct tb_port *port = tb_xdomain_downstream_port(xd);
1284 	struct tb *tb = xd->tb;
1285 	u8 tlw, tls;
1286 	u32 val;
1287 	int ret;
1288 
1289 	if (width == 2)
1290 		tlw = LANE_ADP_CS_1_TARGET_WIDTH_DUAL;
1291 	else if (width == 1)
1292 		tlw = LANE_ADP_CS_1_TARGET_WIDTH_SINGLE;
1293 	else
1294 		return -EINVAL;
1295 
1296 	/* Use the current target speed */
1297 	ret = tb_port_read(port, &val, TB_CFG_PORT, port->cap_phy + LANE_ADP_CS_1, 1);
1298 	if (ret)
1299 		return ret;
1300 	tls = val & LANE_ADP_CS_1_TARGET_SPEED_MASK;
1301 
1302 	dev_dbg(&xd->dev, "sending link state change request with width %#x speed %#x\n",
1303 		tlw, tls);
1304 
1305 	ret = tb_xdp_link_state_change_request(tb->ctl, xd->route,
1306 					       xd->state_retries, tlw, tls);
1307 	if (ret) {
1308 		if (ret != -EOPNOTSUPP && xd->state_retries-- > 0) {
1309 			dev_dbg(&xd->dev,
1310 				"failed to change remote link state, retrying\n");
1311 			return -EAGAIN;
1312 		}
1313 		dev_err(&xd->dev, "failed request link state change, aborting\n");
1314 		return ret;
1315 	}
1316 
1317 	dev_dbg(&xd->dev, "received link state change response\n");
1318 	return 0;
1319 }
1320 
tb_xdomain_bond_lanes_uuid_high(struct tb_xdomain * xd)1321 static int tb_xdomain_bond_lanes_uuid_high(struct tb_xdomain *xd)
1322 {
1323 	unsigned int width, width_mask;
1324 	struct tb_port *port;
1325 	int ret;
1326 
1327 	if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_SINGLE) {
1328 		width = TB_LINK_WIDTH_SINGLE;
1329 		width_mask = width;
1330 	} else if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_DUAL) {
1331 		width = TB_LINK_WIDTH_DUAL;
1332 		width_mask = width | TB_LINK_WIDTH_ASYM_TX | TB_LINK_WIDTH_ASYM_RX;
1333 	} else {
1334 		if (xd->state_retries-- > 0) {
1335 			dev_dbg(&xd->dev,
1336 				"link state change request not received yet, retrying\n");
1337 			return -EAGAIN;
1338 		}
1339 		dev_dbg(&xd->dev, "timeout waiting for link change request\n");
1340 		return -ETIMEDOUT;
1341 	}
1342 
1343 	port = tb_xdomain_downstream_port(xd);
1344 
1345 	/*
1346 	 * We can't use tb_xdomain_lane_bonding_enable() here because it
1347 	 * is the other side that initiates lane bonding. So here we
1348 	 * just set the width to both lane adapters and wait for the
1349 	 * link to transition bonded.
1350 	 */
1351 	ret = tb_port_set_link_width(port->dual_link_port, width);
1352 	if (ret) {
1353 		tb_port_warn(port->dual_link_port,
1354 			     "failed to set link width to %d\n", width);
1355 		return ret;
1356 	}
1357 
1358 	ret = tb_port_set_link_width(port, width);
1359 	if (ret) {
1360 		tb_port_warn(port, "failed to set link width to %d\n", width);
1361 		return ret;
1362 	}
1363 
1364 	ret = tb_port_wait_for_link_width(port, width_mask,
1365 					  XDOMAIN_BONDING_TIMEOUT);
1366 	if (ret) {
1367 		dev_warn(&xd->dev, "error waiting for link width to become %d\n",
1368 			 width_mask);
1369 		return ret;
1370 	}
1371 
1372 	port->bonded = width > TB_LINK_WIDTH_SINGLE;
1373 	port->dual_link_port->bonded = width > TB_LINK_WIDTH_SINGLE;
1374 
1375 	tb_port_update_credits(port);
1376 	tb_xdomain_update_link_attributes(xd);
1377 
1378 	dev_dbg(&xd->dev, "lane bonding %s\n", str_enabled_disabled(width == 2));
1379 	return 0;
1380 }
1381 
tb_xdomain_get_properties(struct tb_xdomain * xd)1382 static int tb_xdomain_get_properties(struct tb_xdomain *xd)
1383 {
1384 	struct tb_property_dir *dir;
1385 	struct tb *tb = xd->tb;
1386 	bool update = false;
1387 	u32 *block = NULL;
1388 	u32 gen = 0;
1389 	int ret;
1390 
1391 	dev_dbg(&xd->dev, "requesting remote properties\n");
1392 
1393 	ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid,
1394 					xd->remote_uuid, xd->state_retries,
1395 					&block, &gen);
1396 	if (ret < 0) {
1397 		if (xd->state_retries-- > 0) {
1398 			dev_dbg(&xd->dev,
1399 				"failed to request remote properties, retrying\n");
1400 			return -EAGAIN;
1401 		}
1402 		/* Give up now */
1403 		dev_err(&xd->dev, "failed read XDomain properties from %pUb\n",
1404 			xd->remote_uuid);
1405 
1406 		return ret;
1407 	}
1408 
1409 	mutex_lock(&xd->lock);
1410 
1411 	/* Only accept newer generation properties */
1412 	if (xd->remote_properties && gen <= xd->remote_property_block_gen) {
1413 		ret = 0;
1414 		goto err_free_block;
1415 	}
1416 
1417 	dir = tb_property_parse_dir(block, ret);
1418 	if (!dir) {
1419 		dev_err(&xd->dev, "failed to parse XDomain properties\n");
1420 		ret = -ENOMEM;
1421 		goto err_free_block;
1422 	}
1423 
1424 	ret = populate_properties(xd, dir);
1425 	if (ret) {
1426 		dev_err(&xd->dev, "missing XDomain properties in response\n");
1427 		goto err_free_dir;
1428 	}
1429 
1430 	/* Release the existing one */
1431 	if (xd->remote_properties) {
1432 		tb_property_free_dir(xd->remote_properties);
1433 		update = true;
1434 	}
1435 
1436 	xd->remote_properties = dir;
1437 	xd->remote_property_block_gen = gen;
1438 
1439 	tb_xdomain_update_link_attributes(xd);
1440 
1441 	mutex_unlock(&xd->lock);
1442 
1443 	kfree(block);
1444 
1445 	/*
1446 	 * Now the device should be ready enough so we can add it to the
1447 	 * bus and let userspace know about it. If the device is already
1448 	 * registered, we notify the userspace that it has changed.
1449 	 */
1450 	if (!update) {
1451 		/*
1452 		 * Now disable lane 1 if bonding was not enabled. Do
1453 		 * this only if bonding was possible at the beginning
1454 		 * (that is we are the connection manager and there are
1455 		 * two lanes).
1456 		 */
1457 		if (xd->bonding_possible) {
1458 			struct tb_port *port;
1459 
1460 			port = tb_xdomain_downstream_port(xd);
1461 			if (!port->bonded)
1462 				tb_port_disable(port->dual_link_port);
1463 		}
1464 
1465 		if (device_add(&xd->dev)) {
1466 			dev_err(&xd->dev, "failed to add XDomain device\n");
1467 			return -ENODEV;
1468 		}
1469 		dev_info(&xd->dev, "new host found, vendor=%#x device=%#x\n",
1470 			 xd->vendor, xd->device);
1471 		if (xd->vendor_name && xd->device_name)
1472 			dev_info(&xd->dev, "%s %s\n", xd->vendor_name,
1473 				 xd->device_name);
1474 
1475 		tb_xdomain_debugfs_init(xd);
1476 	} else {
1477 		kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
1478 	}
1479 
1480 	enumerate_services(xd);
1481 	return 0;
1482 
1483 err_free_dir:
1484 	tb_property_free_dir(dir);
1485 err_free_block:
1486 	kfree(block);
1487 	mutex_unlock(&xd->lock);
1488 
1489 	return ret;
1490 }
1491 
tb_xdomain_queue_uuid(struct tb_xdomain * xd)1492 static void tb_xdomain_queue_uuid(struct tb_xdomain *xd)
1493 {
1494 	xd->state = XDOMAIN_STATE_UUID;
1495 	xd->state_retries = XDOMAIN_RETRIES;
1496 	queue_delayed_work(xd->tb->wq, &xd->state_work,
1497 			   msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
1498 }
1499 
tb_xdomain_queue_link_status(struct tb_xdomain * xd)1500 static void tb_xdomain_queue_link_status(struct tb_xdomain *xd)
1501 {
1502 	xd->state = XDOMAIN_STATE_LINK_STATUS;
1503 	xd->state_retries = XDOMAIN_RETRIES;
1504 	queue_delayed_work(xd->tb->wq, &xd->state_work,
1505 			   msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
1506 }
1507 
tb_xdomain_queue_link_status2(struct tb_xdomain * xd)1508 static void tb_xdomain_queue_link_status2(struct tb_xdomain *xd)
1509 {
1510 	xd->state = XDOMAIN_STATE_LINK_STATUS2;
1511 	xd->state_retries = XDOMAIN_RETRIES;
1512 	queue_delayed_work(xd->tb->wq, &xd->state_work,
1513 			   msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
1514 }
1515 
tb_xdomain_queue_bonding(struct tb_xdomain * xd)1516 static void tb_xdomain_queue_bonding(struct tb_xdomain *xd)
1517 {
1518 	if (memcmp(xd->local_uuid, xd->remote_uuid, UUID_SIZE) > 0) {
1519 		dev_dbg(&xd->dev, "we have higher UUID, other side bonds the lanes\n");
1520 		xd->state = XDOMAIN_STATE_BONDING_UUID_HIGH;
1521 	} else {
1522 		dev_dbg(&xd->dev, "we have lower UUID, bonding lanes\n");
1523 		xd->state = XDOMAIN_STATE_LINK_STATE_CHANGE;
1524 	}
1525 
1526 	xd->state_retries = XDOMAIN_RETRIES;
1527 	queue_delayed_work(xd->tb->wq, &xd->state_work,
1528 			   msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
1529 }
1530 
tb_xdomain_queue_bonding_uuid_low(struct tb_xdomain * xd)1531 static void tb_xdomain_queue_bonding_uuid_low(struct tb_xdomain *xd)
1532 {
1533 	xd->state = XDOMAIN_STATE_BONDING_UUID_LOW;
1534 	xd->state_retries = XDOMAIN_RETRIES;
1535 	queue_delayed_work(xd->tb->wq, &xd->state_work,
1536 			   msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
1537 }
1538 
tb_xdomain_queue_properties(struct tb_xdomain * xd)1539 static void tb_xdomain_queue_properties(struct tb_xdomain *xd)
1540 {
1541 	xd->state = XDOMAIN_STATE_PROPERTIES;
1542 	xd->state_retries = XDOMAIN_RETRIES;
1543 	queue_delayed_work(xd->tb->wq, &xd->state_work,
1544 			   msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
1545 }
1546 
tb_xdomain_queue_properties_changed(struct tb_xdomain * xd)1547 static void tb_xdomain_queue_properties_changed(struct tb_xdomain *xd)
1548 {
1549 	xd->properties_changed_retries = XDOMAIN_RETRIES;
1550 	queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
1551 			   msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
1552 }
1553 
tb_xdomain_failed(struct tb_xdomain * xd)1554 static void tb_xdomain_failed(struct tb_xdomain *xd)
1555 {
1556 	xd->state = XDOMAIN_STATE_ERROR;
1557 	queue_delayed_work(xd->tb->wq, &xd->state_work,
1558 			   msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
1559 }
1560 
tb_xdomain_state_work(struct work_struct * work)1561 static void tb_xdomain_state_work(struct work_struct *work)
1562 {
1563 	struct tb_xdomain *xd = container_of(work, typeof(*xd), state_work.work);
1564 	int ret, state = xd->state;
1565 
1566 	if (WARN_ON_ONCE(state < XDOMAIN_STATE_INIT ||
1567 			 state > XDOMAIN_STATE_ERROR))
1568 		return;
1569 
1570 	dev_dbg(&xd->dev, "running state %s\n", state_names[state]);
1571 
1572 	switch (state) {
1573 	case XDOMAIN_STATE_INIT:
1574 		if (xd->needs_uuid) {
1575 			tb_xdomain_queue_uuid(xd);
1576 		} else {
1577 			tb_xdomain_queue_properties_changed(xd);
1578 			tb_xdomain_queue_properties(xd);
1579 		}
1580 		break;
1581 
1582 	case XDOMAIN_STATE_UUID:
1583 		ret = tb_xdomain_get_uuid(xd);
1584 		if (ret) {
1585 			if (ret == -EAGAIN)
1586 				goto retry_state;
1587 			tb_xdomain_failed(xd);
1588 		} else {
1589 			tb_xdomain_queue_properties_changed(xd);
1590 			if (xd->bonding_possible)
1591 				tb_xdomain_queue_link_status(xd);
1592 			else
1593 				tb_xdomain_queue_properties(xd);
1594 		}
1595 		break;
1596 
1597 	case XDOMAIN_STATE_LINK_STATUS:
1598 		ret = tb_xdomain_get_link_status(xd);
1599 		if (ret) {
1600 			if (ret == -EAGAIN)
1601 				goto retry_state;
1602 
1603 			/*
1604 			 * If any of the lane bonding states fail we skip
1605 			 * bonding completely and try to continue from
1606 			 * reading properties.
1607 			 */
1608 			tb_xdomain_queue_properties(xd);
1609 		} else {
1610 			tb_xdomain_queue_bonding(xd);
1611 		}
1612 		break;
1613 
1614 	case XDOMAIN_STATE_LINK_STATE_CHANGE:
1615 		ret = tb_xdomain_link_state_change(xd, 2);
1616 		if (ret) {
1617 			if (ret == -EAGAIN)
1618 				goto retry_state;
1619 			tb_xdomain_queue_properties(xd);
1620 		} else {
1621 			tb_xdomain_queue_link_status2(xd);
1622 		}
1623 		break;
1624 
1625 	case XDOMAIN_STATE_LINK_STATUS2:
1626 		ret = tb_xdomain_get_link_status(xd);
1627 		if (ret) {
1628 			if (ret == -EAGAIN)
1629 				goto retry_state;
1630 			tb_xdomain_queue_properties(xd);
1631 		} else {
1632 			tb_xdomain_queue_bonding_uuid_low(xd);
1633 		}
1634 		break;
1635 
1636 	case XDOMAIN_STATE_BONDING_UUID_LOW:
1637 		tb_xdomain_lane_bonding_enable(xd);
1638 		tb_xdomain_queue_properties(xd);
1639 		break;
1640 
1641 	case XDOMAIN_STATE_BONDING_UUID_HIGH:
1642 		if (tb_xdomain_bond_lanes_uuid_high(xd) == -EAGAIN)
1643 			goto retry_state;
1644 		tb_xdomain_queue_properties(xd);
1645 		break;
1646 
1647 	case XDOMAIN_STATE_PROPERTIES:
1648 		ret = tb_xdomain_get_properties(xd);
1649 		if (ret) {
1650 			if (ret == -EAGAIN)
1651 				goto retry_state;
1652 			tb_xdomain_failed(xd);
1653 		} else {
1654 			xd->state = XDOMAIN_STATE_ENUMERATED;
1655 		}
1656 		break;
1657 
1658 	case XDOMAIN_STATE_ENUMERATED:
1659 		tb_xdomain_queue_properties(xd);
1660 		break;
1661 
1662 	case XDOMAIN_STATE_ERROR:
1663 		dev_dbg(&xd->dev, "discovery failed, stopping handshake\n");
1664 		__stop_handshake(xd);
1665 		break;
1666 
1667 	default:
1668 		dev_warn(&xd->dev, "unexpected state %d\n", state);
1669 		break;
1670 	}
1671 
1672 	return;
1673 
1674 retry_state:
1675 	queue_delayed_work(xd->tb->wq, &xd->state_work,
1676 			   msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
1677 }
1678 
tb_xdomain_properties_changed(struct work_struct * work)1679 static void tb_xdomain_properties_changed(struct work_struct *work)
1680 {
1681 	struct tb_xdomain *xd = container_of(work, typeof(*xd),
1682 					     properties_changed_work.work);
1683 	int ret;
1684 
1685 	dev_dbg(&xd->dev, "sending properties changed notification\n");
1686 
1687 	ret = tb_xdp_properties_changed_request(xd->tb->ctl, xd->route,
1688 				xd->properties_changed_retries, xd->local_uuid);
1689 	if (ret) {
1690 		if (xd->properties_changed_retries-- > 0) {
1691 			dev_dbg(&xd->dev,
1692 				"failed to send properties changed notification, retrying\n");
1693 			queue_delayed_work(xd->tb->wq,
1694 					   &xd->properties_changed_work,
1695 					   msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
1696 		}
1697 		dev_err(&xd->dev, "failed to send properties changed notification\n");
1698 		return;
1699 	}
1700 
1701 	xd->properties_changed_retries = XDOMAIN_RETRIES;
1702 }
1703 
device_show(struct device * dev,struct device_attribute * attr,char * buf)1704 static ssize_t device_show(struct device *dev, struct device_attribute *attr,
1705 			   char *buf)
1706 {
1707 	struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1708 
1709 	return sysfs_emit(buf, "%#x\n", xd->device);
1710 }
1711 static DEVICE_ATTR_RO(device);
1712 
1713 static ssize_t
device_name_show(struct device * dev,struct device_attribute * attr,char * buf)1714 device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1715 {
1716 	struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1717 	int ret;
1718 
1719 	if (mutex_lock_interruptible(&xd->lock))
1720 		return -ERESTARTSYS;
1721 	ret = sysfs_emit(buf, "%s\n", xd->device_name ?: "");
1722 	mutex_unlock(&xd->lock);
1723 
1724 	return ret;
1725 }
1726 static DEVICE_ATTR_RO(device_name);
1727 
maxhopid_show(struct device * dev,struct device_attribute * attr,char * buf)1728 static ssize_t maxhopid_show(struct device *dev, struct device_attribute *attr,
1729 			     char *buf)
1730 {
1731 	struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1732 
1733 	return sysfs_emit(buf, "%d\n", xd->remote_max_hopid);
1734 }
1735 static DEVICE_ATTR_RO(maxhopid);
1736 
vendor_show(struct device * dev,struct device_attribute * attr,char * buf)1737 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
1738 			   char *buf)
1739 {
1740 	struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1741 
1742 	return sysfs_emit(buf, "%#x\n", xd->vendor);
1743 }
1744 static DEVICE_ATTR_RO(vendor);
1745 
1746 static ssize_t
vendor_name_show(struct device * dev,struct device_attribute * attr,char * buf)1747 vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1748 {
1749 	struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1750 	int ret;
1751 
1752 	if (mutex_lock_interruptible(&xd->lock))
1753 		return -ERESTARTSYS;
1754 	ret = sysfs_emit(buf, "%s\n", xd->vendor_name ?: "");
1755 	mutex_unlock(&xd->lock);
1756 
1757 	return ret;
1758 }
1759 static DEVICE_ATTR_RO(vendor_name);
1760 
unique_id_show(struct device * dev,struct device_attribute * attr,char * buf)1761 static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
1762 			      char *buf)
1763 {
1764 	struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1765 
1766 	return sysfs_emit(buf, "%pUb\n", xd->remote_uuid);
1767 }
1768 static DEVICE_ATTR_RO(unique_id);
1769 
speed_show(struct device * dev,struct device_attribute * attr,char * buf)1770 static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
1771 			  char *buf)
1772 {
1773 	struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1774 
1775 	return sysfs_emit(buf, "%u.0 Gb/s\n", xd->link_speed);
1776 }
1777 
1778 static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
1779 static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
1780 
rx_lanes_show(struct device * dev,struct device_attribute * attr,char * buf)1781 static ssize_t rx_lanes_show(struct device *dev, struct device_attribute *attr,
1782 			     char *buf)
1783 {
1784 	struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1785 	unsigned int width;
1786 
1787 	switch (xd->link_width) {
1788 	case TB_LINK_WIDTH_SINGLE:
1789 	case TB_LINK_WIDTH_ASYM_RX:
1790 		width = 1;
1791 		break;
1792 	case TB_LINK_WIDTH_DUAL:
1793 		width = 2;
1794 		break;
1795 	case TB_LINK_WIDTH_ASYM_TX:
1796 		width = 3;
1797 		break;
1798 	default:
1799 		WARN_ON_ONCE(1);
1800 		return -EINVAL;
1801 	}
1802 
1803 	return sysfs_emit(buf, "%u\n", width);
1804 }
1805 static DEVICE_ATTR(rx_lanes, 0444, rx_lanes_show, NULL);
1806 
tx_lanes_show(struct device * dev,struct device_attribute * attr,char * buf)1807 static ssize_t tx_lanes_show(struct device *dev, struct device_attribute *attr,
1808 			     char *buf)
1809 {
1810 	struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1811 	unsigned int width;
1812 
1813 	switch (xd->link_width) {
1814 	case TB_LINK_WIDTH_SINGLE:
1815 	case TB_LINK_WIDTH_ASYM_TX:
1816 		width = 1;
1817 		break;
1818 	case TB_LINK_WIDTH_DUAL:
1819 		width = 2;
1820 		break;
1821 	case TB_LINK_WIDTH_ASYM_RX:
1822 		width = 3;
1823 		break;
1824 	default:
1825 		WARN_ON_ONCE(1);
1826 		return -EINVAL;
1827 	}
1828 
1829 	return sysfs_emit(buf, "%u\n", width);
1830 }
1831 static DEVICE_ATTR(tx_lanes, 0444, tx_lanes_show, NULL);
1832 
1833 static struct attribute *xdomain_attrs[] = {
1834 	&dev_attr_device.attr,
1835 	&dev_attr_device_name.attr,
1836 	&dev_attr_maxhopid.attr,
1837 	&dev_attr_rx_lanes.attr,
1838 	&dev_attr_rx_speed.attr,
1839 	&dev_attr_tx_lanes.attr,
1840 	&dev_attr_tx_speed.attr,
1841 	&dev_attr_unique_id.attr,
1842 	&dev_attr_vendor.attr,
1843 	&dev_attr_vendor_name.attr,
1844 	NULL,
1845 };
1846 
1847 static const struct attribute_group xdomain_attr_group = {
1848 	.attrs = xdomain_attrs,
1849 };
1850 
1851 static const struct attribute_group *xdomain_attr_groups[] = {
1852 	&xdomain_attr_group,
1853 	NULL,
1854 };
1855 
tb_xdomain_release(struct device * dev)1856 static void tb_xdomain_release(struct device *dev)
1857 {
1858 	struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1859 
1860 	put_device(xd->dev.parent);
1861 
1862 	kfree(xd->local_property_block);
1863 	tb_property_free_dir(xd->remote_properties);
1864 	ida_destroy(&xd->out_hopids);
1865 	ida_destroy(&xd->in_hopids);
1866 	ida_destroy(&xd->service_ids);
1867 
1868 	kfree(xd->local_uuid);
1869 	kfree(xd->remote_uuid);
1870 	kfree(xd->device_name);
1871 	kfree(xd->vendor_name);
1872 	kfree(xd);
1873 }
1874 
tb_xdomain_suspend(struct device * dev)1875 static int __maybe_unused tb_xdomain_suspend(struct device *dev)
1876 {
1877 	stop_handshake(tb_to_xdomain(dev));
1878 	return 0;
1879 }
1880 
tb_xdomain_resume(struct device * dev)1881 static int __maybe_unused tb_xdomain_resume(struct device *dev)
1882 {
1883 	start_handshake(tb_to_xdomain(dev));
1884 	return 0;
1885 }
1886 
1887 static const struct dev_pm_ops tb_xdomain_pm_ops = {
1888 	SET_SYSTEM_SLEEP_PM_OPS(tb_xdomain_suspend, tb_xdomain_resume)
1889 };
1890 
1891 struct device_type tb_xdomain_type = {
1892 	.name = "thunderbolt_xdomain",
1893 	.release = tb_xdomain_release,
1894 	.pm = &tb_xdomain_pm_ops,
1895 };
1896 EXPORT_SYMBOL_GPL(tb_xdomain_type);
1897 
1898 /**
1899  * tb_xdomain_alloc() - Allocate new XDomain object
1900  * @tb: Domain where the XDomain belongs
1901  * @parent: Parent device (the switch through the connection to the
1902  *	    other domain is reached).
1903  * @route: Route string used to reach the other domain
1904  * @local_uuid: Our local domain UUID
1905  * @remote_uuid: UUID of the other domain (optional)
1906  *
1907  * Allocates new XDomain structure and returns pointer to that. The
1908  * object must be released by calling tb_xdomain_put().
1909  */
tb_xdomain_alloc(struct tb * tb,struct device * parent,u64 route,const uuid_t * local_uuid,const uuid_t * remote_uuid)1910 struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
1911 				    u64 route, const uuid_t *local_uuid,
1912 				    const uuid_t *remote_uuid)
1913 {
1914 	struct tb_switch *parent_sw = tb_to_switch(parent);
1915 	struct tb_xdomain *xd;
1916 	struct tb_port *down;
1917 
1918 	/* Make sure the downstream domain is accessible */
1919 	down = tb_port_at(route, parent_sw);
1920 	tb_port_unlock(down);
1921 
1922 	xd = kzalloc(sizeof(*xd), GFP_KERNEL);
1923 	if (!xd)
1924 		return NULL;
1925 
1926 	xd->tb = tb;
1927 	xd->route = route;
1928 	xd->local_max_hopid = down->config.max_in_hop_id;
1929 	ida_init(&xd->service_ids);
1930 	ida_init(&xd->in_hopids);
1931 	ida_init(&xd->out_hopids);
1932 	mutex_init(&xd->lock);
1933 	INIT_DELAYED_WORK(&xd->state_work, tb_xdomain_state_work);
1934 	INIT_DELAYED_WORK(&xd->properties_changed_work,
1935 			  tb_xdomain_properties_changed);
1936 
1937 	xd->local_uuid = kmemdup(local_uuid, sizeof(uuid_t), GFP_KERNEL);
1938 	if (!xd->local_uuid)
1939 		goto err_free;
1940 
1941 	if (remote_uuid) {
1942 		xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t),
1943 					  GFP_KERNEL);
1944 		if (!xd->remote_uuid)
1945 			goto err_free_local_uuid;
1946 	} else {
1947 		xd->needs_uuid = true;
1948 		xd->bonding_possible = !!down->dual_link_port;
1949 	}
1950 
1951 	device_initialize(&xd->dev);
1952 	xd->dev.parent = get_device(parent);
1953 	xd->dev.bus = &tb_bus_type;
1954 	xd->dev.type = &tb_xdomain_type;
1955 	xd->dev.groups = xdomain_attr_groups;
1956 	dev_set_name(&xd->dev, "%u-%llx", tb->index, route);
1957 
1958 	dev_dbg(&xd->dev, "local UUID %pUb\n", local_uuid);
1959 	if (remote_uuid)
1960 		dev_dbg(&xd->dev, "remote UUID %pUb\n", remote_uuid);
1961 
1962 	/*
1963 	 * This keeps the DMA powered on as long as we have active
1964 	 * connection to another host.
1965 	 */
1966 	pm_runtime_set_active(&xd->dev);
1967 	pm_runtime_get_noresume(&xd->dev);
1968 	pm_runtime_enable(&xd->dev);
1969 
1970 	return xd;
1971 
1972 err_free_local_uuid:
1973 	kfree(xd->local_uuid);
1974 err_free:
1975 	kfree(xd);
1976 
1977 	return NULL;
1978 }
1979 
1980 /**
1981  * tb_xdomain_add() - Add XDomain to the bus
1982  * @xd: XDomain to add
1983  *
1984  * This function starts XDomain discovery protocol handshake and
1985  * eventually adds the XDomain to the bus. After calling this function
1986  * the caller needs to call tb_xdomain_remove() in order to remove and
1987  * release the object regardless whether the handshake succeeded or not.
1988  */
tb_xdomain_add(struct tb_xdomain * xd)1989 void tb_xdomain_add(struct tb_xdomain *xd)
1990 {
1991 	/* Start exchanging properties with the other host */
1992 	start_handshake(xd);
1993 }
1994 
unregister_service(struct device * dev,void * data)1995 static int unregister_service(struct device *dev, void *data)
1996 {
1997 	device_unregister(dev);
1998 	return 0;
1999 }
2000 
2001 /**
2002  * tb_xdomain_remove() - Remove XDomain from the bus
2003  * @xd: XDomain to remove
2004  *
2005  * This will stop all ongoing configuration work and remove the XDomain
2006  * along with any services from the bus. When the last reference to @xd
2007  * is released the object will be released as well.
2008  */
tb_xdomain_remove(struct tb_xdomain * xd)2009 void tb_xdomain_remove(struct tb_xdomain *xd)
2010 {
2011 	tb_xdomain_debugfs_remove(xd);
2012 
2013 	stop_handshake(xd);
2014 
2015 	device_for_each_child_reverse(&xd->dev, xd, unregister_service);
2016 
2017 	/*
2018 	 * Undo runtime PM here explicitly because it is possible that
2019 	 * the XDomain was never added to the bus and thus device_del()
2020 	 * is not called for it (device_del() would handle this otherwise).
2021 	 */
2022 	pm_runtime_disable(&xd->dev);
2023 	pm_runtime_put_noidle(&xd->dev);
2024 	pm_runtime_set_suspended(&xd->dev);
2025 
2026 	if (!device_is_registered(&xd->dev)) {
2027 		put_device(&xd->dev);
2028 	} else {
2029 		dev_info(&xd->dev, "host disconnected\n");
2030 		device_unregister(&xd->dev);
2031 	}
2032 }
2033 
2034 /**
2035  * tb_xdomain_lane_bonding_enable() - Enable lane bonding on XDomain
2036  * @xd: XDomain connection
2037  *
2038  * Lane bonding is disabled by default for XDomains. This function tries
2039  * to enable bonding by first enabling the port and waiting for the CL0
2040  * state.
2041  *
2042  * Return: %0 in case of success and negative errno in case of error.
2043  */
tb_xdomain_lane_bonding_enable(struct tb_xdomain * xd)2044 int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd)
2045 {
2046 	unsigned int width_mask;
2047 	struct tb_port *port;
2048 	int ret;
2049 
2050 	port = tb_xdomain_downstream_port(xd);
2051 	if (!port->dual_link_port)
2052 		return -ENODEV;
2053 
2054 	ret = tb_port_enable(port->dual_link_port);
2055 	if (ret)
2056 		return ret;
2057 
2058 	ret = tb_wait_for_port(port->dual_link_port, true);
2059 	if (ret < 0)
2060 		return ret;
2061 	if (!ret)
2062 		return -ENOTCONN;
2063 
2064 	ret = tb_port_lane_bonding_enable(port);
2065 	if (ret) {
2066 		tb_port_warn(port, "failed to enable lane bonding\n");
2067 		return ret;
2068 	}
2069 
2070 	/* Any of the widths are all bonded */
2071 	width_mask = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX |
2072 		     TB_LINK_WIDTH_ASYM_RX;
2073 
2074 	ret = tb_port_wait_for_link_width(port, width_mask,
2075 					  XDOMAIN_BONDING_TIMEOUT);
2076 	if (ret) {
2077 		tb_port_warn(port, "failed to enable lane bonding\n");
2078 		return ret;
2079 	}
2080 
2081 	tb_port_update_credits(port);
2082 	tb_xdomain_update_link_attributes(xd);
2083 
2084 	dev_dbg(&xd->dev, "lane bonding enabled\n");
2085 	return 0;
2086 }
2087 EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_enable);
2088 
2089 /**
2090  * tb_xdomain_lane_bonding_disable() - Disable lane bonding
2091  * @xd: XDomain connection
2092  *
2093  * Lane bonding is disabled by default for XDomains. If bonding has been
2094  * enabled, this function can be used to disable it.
2095  */
tb_xdomain_lane_bonding_disable(struct tb_xdomain * xd)2096 void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd)
2097 {
2098 	struct tb_port *port;
2099 
2100 	port = tb_xdomain_downstream_port(xd);
2101 	if (port->dual_link_port) {
2102 		int ret;
2103 
2104 		tb_port_lane_bonding_disable(port);
2105 		ret = tb_port_wait_for_link_width(port, TB_LINK_WIDTH_SINGLE, 100);
2106 		if (ret == -ETIMEDOUT)
2107 			tb_port_warn(port, "timeout disabling lane bonding\n");
2108 		tb_port_disable(port->dual_link_port);
2109 		tb_port_update_credits(port);
2110 		tb_xdomain_update_link_attributes(xd);
2111 
2112 		dev_dbg(&xd->dev, "lane bonding disabled\n");
2113 	}
2114 }
2115 EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_disable);
2116 
2117 /**
2118  * tb_xdomain_alloc_in_hopid() - Allocate input HopID for tunneling
2119  * @xd: XDomain connection
2120  * @hopid: Preferred HopID or %-1 for next available
2121  *
2122  * Returns allocated HopID or negative errno. Specifically returns
2123  * %-ENOSPC if there are no more available HopIDs. Returned HopID is
2124  * guaranteed to be within range supported by the input lane adapter.
2125  * Call tb_xdomain_release_in_hopid() to release the allocated HopID.
2126  */
tb_xdomain_alloc_in_hopid(struct tb_xdomain * xd,int hopid)2127 int tb_xdomain_alloc_in_hopid(struct tb_xdomain *xd, int hopid)
2128 {
2129 	if (hopid < 0)
2130 		hopid = TB_PATH_MIN_HOPID;
2131 	if (hopid < TB_PATH_MIN_HOPID || hopid > xd->local_max_hopid)
2132 		return -EINVAL;
2133 
2134 	return ida_alloc_range(&xd->in_hopids, hopid, xd->local_max_hopid,
2135 			       GFP_KERNEL);
2136 }
2137 EXPORT_SYMBOL_GPL(tb_xdomain_alloc_in_hopid);
2138 
2139 /**
2140  * tb_xdomain_alloc_out_hopid() - Allocate output HopID for tunneling
2141  * @xd: XDomain connection
2142  * @hopid: Preferred HopID or %-1 for next available
2143  *
2144  * Returns allocated HopID or negative errno. Specifically returns
2145  * %-ENOSPC if there are no more available HopIDs. Returned HopID is
2146  * guaranteed to be within range supported by the output lane adapter.
2147  * Call tb_xdomain_release_in_hopid() to release the allocated HopID.
2148  */
tb_xdomain_alloc_out_hopid(struct tb_xdomain * xd,int hopid)2149 int tb_xdomain_alloc_out_hopid(struct tb_xdomain *xd, int hopid)
2150 {
2151 	if (hopid < 0)
2152 		hopid = TB_PATH_MIN_HOPID;
2153 	if (hopid < TB_PATH_MIN_HOPID || hopid > xd->remote_max_hopid)
2154 		return -EINVAL;
2155 
2156 	return ida_alloc_range(&xd->out_hopids, hopid, xd->remote_max_hopid,
2157 			       GFP_KERNEL);
2158 }
2159 EXPORT_SYMBOL_GPL(tb_xdomain_alloc_out_hopid);
2160 
2161 /**
2162  * tb_xdomain_release_in_hopid() - Release input HopID
2163  * @xd: XDomain connection
2164  * @hopid: HopID to release
2165  */
tb_xdomain_release_in_hopid(struct tb_xdomain * xd,int hopid)2166 void tb_xdomain_release_in_hopid(struct tb_xdomain *xd, int hopid)
2167 {
2168 	ida_free(&xd->in_hopids, hopid);
2169 }
2170 EXPORT_SYMBOL_GPL(tb_xdomain_release_in_hopid);
2171 
2172 /**
2173  * tb_xdomain_release_out_hopid() - Release output HopID
2174  * @xd: XDomain connection
2175  * @hopid: HopID to release
2176  */
tb_xdomain_release_out_hopid(struct tb_xdomain * xd,int hopid)2177 void tb_xdomain_release_out_hopid(struct tb_xdomain *xd, int hopid)
2178 {
2179 	ida_free(&xd->out_hopids, hopid);
2180 }
2181 EXPORT_SYMBOL_GPL(tb_xdomain_release_out_hopid);
2182 
2183 /**
2184  * tb_xdomain_enable_paths() - Enable DMA paths for XDomain connection
2185  * @xd: XDomain connection
2186  * @transmit_path: HopID we are using to send out packets
2187  * @transmit_ring: DMA ring used to send out packets
2188  * @receive_path: HopID the other end is using to send packets to us
2189  * @receive_ring: DMA ring used to receive packets from @receive_path
2190  *
2191  * The function enables DMA paths accordingly so that after successful
2192  * return the caller can send and receive packets using high-speed DMA
2193  * path. If a transmit or receive path is not needed, pass %-1 for those
2194  * parameters.
2195  *
2196  * Return: %0 in case of success and negative errno in case of error
2197  */
tb_xdomain_enable_paths(struct tb_xdomain * xd,int transmit_path,int transmit_ring,int receive_path,int receive_ring)2198 int tb_xdomain_enable_paths(struct tb_xdomain *xd, int transmit_path,
2199 			    int transmit_ring, int receive_path,
2200 			    int receive_ring)
2201 {
2202 	return tb_domain_approve_xdomain_paths(xd->tb, xd, transmit_path,
2203 					       transmit_ring, receive_path,
2204 					       receive_ring);
2205 }
2206 EXPORT_SYMBOL_GPL(tb_xdomain_enable_paths);
2207 
2208 /**
2209  * tb_xdomain_disable_paths() - Disable DMA paths for XDomain connection
2210  * @xd: XDomain connection
2211  * @transmit_path: HopID we are using to send out packets
2212  * @transmit_ring: DMA ring used to send out packets
2213  * @receive_path: HopID the other end is using to send packets to us
2214  * @receive_ring: DMA ring used to receive packets from @receive_path
2215  *
2216  * This does the opposite of tb_xdomain_enable_paths(). After call to
2217  * this the caller is not expected to use the rings anymore. Passing %-1
2218  * as path/ring parameter means don't care. Normally the callers should
2219  * pass the same values here as they do when paths are enabled.
2220  *
2221  * Return: %0 in case of success and negative errno in case of error
2222  */
tb_xdomain_disable_paths(struct tb_xdomain * xd,int transmit_path,int transmit_ring,int receive_path,int receive_ring)2223 int tb_xdomain_disable_paths(struct tb_xdomain *xd, int transmit_path,
2224 			     int transmit_ring, int receive_path,
2225 			     int receive_ring)
2226 {
2227 	return tb_domain_disconnect_xdomain_paths(xd->tb, xd, transmit_path,
2228 						  transmit_ring, receive_path,
2229 						  receive_ring);
2230 }
2231 EXPORT_SYMBOL_GPL(tb_xdomain_disable_paths);
2232 
2233 struct tb_xdomain_lookup {
2234 	const uuid_t *uuid;
2235 	u8 link;
2236 	u8 depth;
2237 	u64 route;
2238 };
2239 
switch_find_xdomain(struct tb_switch * sw,const struct tb_xdomain_lookup * lookup)2240 static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw,
2241 	const struct tb_xdomain_lookup *lookup)
2242 {
2243 	struct tb_port *port;
2244 
2245 	tb_switch_for_each_port(sw, port) {
2246 		struct tb_xdomain *xd;
2247 
2248 		if (port->xdomain) {
2249 			xd = port->xdomain;
2250 
2251 			if (lookup->uuid) {
2252 				if (xd->remote_uuid &&
2253 				    uuid_equal(xd->remote_uuid, lookup->uuid))
2254 					return xd;
2255 			} else {
2256 				if (lookup->link && lookup->link == xd->link &&
2257 				    lookup->depth == xd->depth)
2258 					return xd;
2259 				if (lookup->route && lookup->route == xd->route)
2260 					return xd;
2261 			}
2262 		} else if (tb_port_has_remote(port)) {
2263 			xd = switch_find_xdomain(port->remote->sw, lookup);
2264 			if (xd)
2265 				return xd;
2266 		}
2267 	}
2268 
2269 	return NULL;
2270 }
2271 
2272 /**
2273  * tb_xdomain_find_by_uuid() - Find an XDomain by UUID
2274  * @tb: Domain where the XDomain belongs to
2275  * @uuid: UUID to look for
2276  *
2277  * Finds XDomain by walking through the Thunderbolt topology below @tb.
2278  * The returned XDomain will have its reference count increased so the
2279  * caller needs to call tb_xdomain_put() when it is done with the
2280  * object.
2281  *
2282  * This will find all XDomains including the ones that are not yet added
2283  * to the bus (handshake is still in progress).
2284  *
2285  * The caller needs to hold @tb->lock.
2286  */
tb_xdomain_find_by_uuid(struct tb * tb,const uuid_t * uuid)2287 struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid)
2288 {
2289 	struct tb_xdomain_lookup lookup;
2290 	struct tb_xdomain *xd;
2291 
2292 	memset(&lookup, 0, sizeof(lookup));
2293 	lookup.uuid = uuid;
2294 
2295 	xd = switch_find_xdomain(tb->root_switch, &lookup);
2296 	return tb_xdomain_get(xd);
2297 }
2298 EXPORT_SYMBOL_GPL(tb_xdomain_find_by_uuid);
2299 
2300 /**
2301  * tb_xdomain_find_by_link_depth() - Find an XDomain by link and depth
2302  * @tb: Domain where the XDomain belongs to
2303  * @link: Root switch link number
2304  * @depth: Depth in the link
2305  *
2306  * Finds XDomain by walking through the Thunderbolt topology below @tb.
2307  * The returned XDomain will have its reference count increased so the
2308  * caller needs to call tb_xdomain_put() when it is done with the
2309  * object.
2310  *
2311  * This will find all XDomains including the ones that are not yet added
2312  * to the bus (handshake is still in progress).
2313  *
2314  * The caller needs to hold @tb->lock.
2315  */
tb_xdomain_find_by_link_depth(struct tb * tb,u8 link,u8 depth)2316 struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
2317 						 u8 depth)
2318 {
2319 	struct tb_xdomain_lookup lookup;
2320 	struct tb_xdomain *xd;
2321 
2322 	memset(&lookup, 0, sizeof(lookup));
2323 	lookup.link = link;
2324 	lookup.depth = depth;
2325 
2326 	xd = switch_find_xdomain(tb->root_switch, &lookup);
2327 	return tb_xdomain_get(xd);
2328 }
2329 
2330 /**
2331  * tb_xdomain_find_by_route() - Find an XDomain by route string
2332  * @tb: Domain where the XDomain belongs to
2333  * @route: XDomain route string
2334  *
2335  * Finds XDomain by walking through the Thunderbolt topology below @tb.
2336  * The returned XDomain will have its reference count increased so the
2337  * caller needs to call tb_xdomain_put() when it is done with the
2338  * object.
2339  *
2340  * This will find all XDomains including the ones that are not yet added
2341  * to the bus (handshake is still in progress).
2342  *
2343  * The caller needs to hold @tb->lock.
2344  */
tb_xdomain_find_by_route(struct tb * tb,u64 route)2345 struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route)
2346 {
2347 	struct tb_xdomain_lookup lookup;
2348 	struct tb_xdomain *xd;
2349 
2350 	memset(&lookup, 0, sizeof(lookup));
2351 	lookup.route = route;
2352 
2353 	xd = switch_find_xdomain(tb->root_switch, &lookup);
2354 	return tb_xdomain_get(xd);
2355 }
2356 EXPORT_SYMBOL_GPL(tb_xdomain_find_by_route);
2357 
tb_xdomain_handle_request(struct tb * tb,enum tb_cfg_pkg_type type,const void * buf,size_t size)2358 bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
2359 			       const void *buf, size_t size)
2360 {
2361 	const struct tb_protocol_handler *handler, *tmp;
2362 	const struct tb_xdp_header *hdr = buf;
2363 	unsigned int length;
2364 	int ret = 0;
2365 
2366 	/* We expect the packet is at least size of the header */
2367 	length = hdr->xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
2368 	if (length != size / 4 - sizeof(hdr->xd_hdr) / 4)
2369 		return true;
2370 	if (length < sizeof(*hdr) / 4 - sizeof(hdr->xd_hdr) / 4)
2371 		return true;
2372 
2373 	/*
2374 	 * Handle XDomain discovery protocol packets directly here. For
2375 	 * other protocols (based on their UUID) we call registered
2376 	 * handlers in turn.
2377 	 */
2378 	if (uuid_equal(&hdr->uuid, &tb_xdp_uuid)) {
2379 		if (type == TB_CFG_PKG_XDOMAIN_REQ)
2380 			return tb_xdp_schedule_request(tb, hdr, size);
2381 		return false;
2382 	}
2383 
2384 	mutex_lock(&xdomain_lock);
2385 	list_for_each_entry_safe(handler, tmp, &protocol_handlers, list) {
2386 		if (!uuid_equal(&hdr->uuid, handler->uuid))
2387 			continue;
2388 
2389 		mutex_unlock(&xdomain_lock);
2390 		ret = handler->callback(buf, size, handler->data);
2391 		mutex_lock(&xdomain_lock);
2392 
2393 		if (ret)
2394 			break;
2395 	}
2396 	mutex_unlock(&xdomain_lock);
2397 
2398 	return ret > 0;
2399 }
2400 
update_xdomain(struct device * dev,void * data)2401 static int update_xdomain(struct device *dev, void *data)
2402 {
2403 	struct tb_xdomain *xd;
2404 
2405 	xd = tb_to_xdomain(dev);
2406 	if (xd) {
2407 		queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
2408 				   msecs_to_jiffies(50));
2409 	}
2410 
2411 	return 0;
2412 }
2413 
update_all_xdomains(void)2414 static void update_all_xdomains(void)
2415 {
2416 	bus_for_each_dev(&tb_bus_type, NULL, NULL, update_xdomain);
2417 }
2418 
remove_directory(const char * key,const struct tb_property_dir * dir)2419 static bool remove_directory(const char *key, const struct tb_property_dir *dir)
2420 {
2421 	struct tb_property *p;
2422 
2423 	p = tb_property_find(xdomain_property_dir, key,
2424 			     TB_PROPERTY_TYPE_DIRECTORY);
2425 	if (p && p->value.dir == dir) {
2426 		tb_property_remove(p);
2427 		return true;
2428 	}
2429 	return false;
2430 }
2431 
2432 /**
2433  * tb_register_property_dir() - Register property directory to the host
2434  * @key: Key (name) of the directory to add
2435  * @dir: Directory to add
2436  *
2437  * Service drivers can use this function to add new property directory
2438  * to the host available properties. The other connected hosts are
2439  * notified so they can re-read properties of this host if they are
2440  * interested.
2441  *
2442  * Return: %0 on success and negative errno on failure
2443  */
tb_register_property_dir(const char * key,struct tb_property_dir * dir)2444 int tb_register_property_dir(const char *key, struct tb_property_dir *dir)
2445 {
2446 	int ret;
2447 
2448 	if (WARN_ON(!xdomain_property_dir))
2449 		return -EAGAIN;
2450 
2451 	if (!key || strlen(key) > 8)
2452 		return -EINVAL;
2453 
2454 	mutex_lock(&xdomain_lock);
2455 	if (tb_property_find(xdomain_property_dir, key,
2456 			     TB_PROPERTY_TYPE_DIRECTORY)) {
2457 		ret = -EEXIST;
2458 		goto err_unlock;
2459 	}
2460 
2461 	ret = tb_property_add_dir(xdomain_property_dir, key, dir);
2462 	if (ret)
2463 		goto err_unlock;
2464 
2465 	xdomain_property_block_gen++;
2466 
2467 	mutex_unlock(&xdomain_lock);
2468 	update_all_xdomains();
2469 	return 0;
2470 
2471 err_unlock:
2472 	mutex_unlock(&xdomain_lock);
2473 	return ret;
2474 }
2475 EXPORT_SYMBOL_GPL(tb_register_property_dir);
2476 
2477 /**
2478  * tb_unregister_property_dir() - Removes property directory from host
2479  * @key: Key (name) of the directory
2480  * @dir: Directory to remove
2481  *
2482  * This will remove the existing directory from this host and notify the
2483  * connected hosts about the change.
2484  */
tb_unregister_property_dir(const char * key,struct tb_property_dir * dir)2485 void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir)
2486 {
2487 	int ret = 0;
2488 
2489 	mutex_lock(&xdomain_lock);
2490 	if (remove_directory(key, dir))
2491 		xdomain_property_block_gen++;
2492 	mutex_unlock(&xdomain_lock);
2493 
2494 	if (!ret)
2495 		update_all_xdomains();
2496 }
2497 EXPORT_SYMBOL_GPL(tb_unregister_property_dir);
2498 
tb_xdomain_init(void)2499 int tb_xdomain_init(void)
2500 {
2501 	xdomain_property_dir = tb_property_create_dir(NULL);
2502 	if (!xdomain_property_dir)
2503 		return -ENOMEM;
2504 
2505 	/*
2506 	 * Initialize standard set of properties without any service
2507 	 * directories. Those will be added by service drivers
2508 	 * themselves when they are loaded.
2509 	 *
2510 	 * Rest of the properties are filled dynamically based on these
2511 	 * when the P2P connection is made.
2512 	 */
2513 	tb_property_add_immediate(xdomain_property_dir, "vendorid",
2514 				  PCI_VENDOR_ID_INTEL);
2515 	tb_property_add_text(xdomain_property_dir, "vendorid", "Intel Corp.");
2516 	tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1);
2517 	tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100);
2518 
2519 	xdomain_property_block_gen = get_random_u32();
2520 	return 0;
2521 }
2522 
tb_xdomain_exit(void)2523 void tb_xdomain_exit(void)
2524 {
2525 	tb_property_free_dir(xdomain_property_dir);
2526 }
2527