1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DMA traffic test driver
4  *
5  * Copyright (C) 2020, Intel Corporation
6  * Authors: Isaac Hazan <isaac.hazan@intel.com>
7  *	    Mika Westerberg <mika.westerberg@linux.intel.com>
8  */
9 
10 #include <linux/completion.h>
11 #include <linux/debugfs.h>
12 #include <linux/module.h>
13 #include <linux/sizes.h>
14 #include <linux/thunderbolt.h>
15 
16 #define DMA_TEST_TX_RING_SIZE		64
17 #define DMA_TEST_RX_RING_SIZE		256
18 #define DMA_TEST_FRAME_SIZE		SZ_4K
19 #define DMA_TEST_DATA_PATTERN		0x0123456789abcdefLL
20 #define DMA_TEST_MAX_PACKETS		1000
21 
22 enum dma_test_frame_pdf {
23 	DMA_TEST_PDF_FRAME_START = 1,
24 	DMA_TEST_PDF_FRAME_END,
25 };
26 
27 struct dma_test_frame {
28 	struct dma_test *dma_test;
29 	void *data;
30 	struct ring_frame frame;
31 };
32 
33 enum dma_test_test_error {
34 	DMA_TEST_NO_ERROR,
35 	DMA_TEST_INTERRUPTED,
36 	DMA_TEST_BUFFER_ERROR,
37 	DMA_TEST_DMA_ERROR,
38 	DMA_TEST_CONFIG_ERROR,
39 	DMA_TEST_SPEED_ERROR,
40 	DMA_TEST_WIDTH_ERROR,
41 	DMA_TEST_BONDING_ERROR,
42 	DMA_TEST_PACKET_ERROR,
43 };
44 
45 static const char * const dma_test_error_names[] = {
46 	[DMA_TEST_NO_ERROR] = "no errors",
47 	[DMA_TEST_INTERRUPTED] = "interrupted by signal",
48 	[DMA_TEST_BUFFER_ERROR] = "no memory for packet buffers",
49 	[DMA_TEST_DMA_ERROR] = "DMA ring setup failed",
50 	[DMA_TEST_CONFIG_ERROR] = "configuration is not valid",
51 	[DMA_TEST_SPEED_ERROR] = "unexpected link speed",
52 	[DMA_TEST_WIDTH_ERROR] = "unexpected link width",
53 	[DMA_TEST_BONDING_ERROR] = "lane bonding configuration error",
54 	[DMA_TEST_PACKET_ERROR] = "packet check failed",
55 };
56 
57 enum dma_test_result {
58 	DMA_TEST_NOT_RUN,
59 	DMA_TEST_SUCCESS,
60 	DMA_TEST_FAIL,
61 };
62 
63 static const char * const dma_test_result_names[] = {
64 	[DMA_TEST_NOT_RUN] = "not run",
65 	[DMA_TEST_SUCCESS] = "success",
66 	[DMA_TEST_FAIL] = "failed",
67 };
68 
69 /**
70  * struct dma_test - DMA test device driver private data
71  * @svc: XDomain service the driver is bound to
72  * @xd: XDomain the service belongs to
73  * @rx_ring: Software ring holding RX frames
74  * @rx_hopid: HopID used for receiving frames
75  * @tx_ring: Software ring holding TX frames
76  * @tx_hopid: HopID used for sending fames
77  * @packets_to_send: Number of packets to send
78  * @packets_to_receive: Number of packets to receive
79  * @packets_sent: Actual number of packets sent
80  * @packets_received: Actual number of packets received
81  * @link_speed: Expected link speed (Gb/s), %0 to use whatever is negotiated
82  * @link_width: Expected link width (Gb/s), %0 to use whatever is negotiated
83  * @crc_errors: Number of CRC errors during the test run
84  * @buffer_overflow_errors: Number of buffer overflow errors during the test
85  *			    run
86  * @result: Result of the last run
87  * @error_code: Error code of the last run
88  * @complete: Used to wait for the Rx to complete
89  * @lock: Lock serializing access to this structure
90  * @debugfs_dir: dentry of this dma_test
91  */
92 struct dma_test {
93 	const struct tb_service *svc;
94 	struct tb_xdomain *xd;
95 	struct tb_ring *rx_ring;
96 	int rx_hopid;
97 	struct tb_ring *tx_ring;
98 	int tx_hopid;
99 	unsigned int packets_to_send;
100 	unsigned int packets_to_receive;
101 	unsigned int packets_sent;
102 	unsigned int packets_received;
103 	unsigned int link_speed;
104 	unsigned int link_width;
105 	unsigned int crc_errors;
106 	unsigned int buffer_overflow_errors;
107 	enum dma_test_result result;
108 	enum dma_test_test_error error_code;
109 	struct completion complete;
110 	struct mutex lock;
111 	struct dentry *debugfs_dir;
112 };
113 
114 /* DMA test property directory UUID: 3188cd10-6523-4a5a-a682-fdca07a248d8 */
115 static const uuid_t dma_test_dir_uuid =
116 	UUID_INIT(0x3188cd10, 0x6523, 0x4a5a,
117 		  0xa6, 0x82, 0xfd, 0xca, 0x07, 0xa2, 0x48, 0xd8);
118 
119 static struct tb_property_dir *dma_test_dir;
120 static void *dma_test_pattern;
121 
dma_test_free_rings(struct dma_test * dt)122 static void dma_test_free_rings(struct dma_test *dt)
123 {
124 	if (dt->rx_ring) {
125 		tb_xdomain_release_in_hopid(dt->xd, dt->rx_hopid);
126 		tb_ring_free(dt->rx_ring);
127 		dt->rx_ring = NULL;
128 	}
129 	if (dt->tx_ring) {
130 		tb_xdomain_release_out_hopid(dt->xd, dt->tx_hopid);
131 		tb_ring_free(dt->tx_ring);
132 		dt->tx_ring = NULL;
133 	}
134 }
135 
dma_test_start_rings(struct dma_test * dt)136 static int dma_test_start_rings(struct dma_test *dt)
137 {
138 	unsigned int flags = RING_FLAG_FRAME;
139 	struct tb_xdomain *xd = dt->xd;
140 	int ret, e2e_tx_hop = 0;
141 	struct tb_ring *ring;
142 
143 	/*
144 	 * If we are both sender and receiver (traffic goes over a
145 	 * special loopback dongle) enable E2E flow control. This avoids
146 	 * losing packets.
147 	 */
148 	if (dt->packets_to_send && dt->packets_to_receive)
149 		flags |= RING_FLAG_E2E;
150 
151 	if (dt->packets_to_send) {
152 		ring = tb_ring_alloc_tx(xd->tb->nhi, -1, DMA_TEST_TX_RING_SIZE,
153 					flags);
154 		if (!ring)
155 			return -ENOMEM;
156 
157 		dt->tx_ring = ring;
158 		e2e_tx_hop = ring->hop;
159 
160 		ret = tb_xdomain_alloc_out_hopid(xd, -1);
161 		if (ret < 0) {
162 			dma_test_free_rings(dt);
163 			return ret;
164 		}
165 
166 		dt->tx_hopid = ret;
167 	}
168 
169 	if (dt->packets_to_receive) {
170 		u16 sof_mask, eof_mask;
171 
172 		sof_mask = BIT(DMA_TEST_PDF_FRAME_START);
173 		eof_mask = BIT(DMA_TEST_PDF_FRAME_END);
174 
175 		ring = tb_ring_alloc_rx(xd->tb->nhi, -1, DMA_TEST_RX_RING_SIZE,
176 					flags, e2e_tx_hop, sof_mask, eof_mask,
177 					NULL, NULL);
178 		if (!ring) {
179 			dma_test_free_rings(dt);
180 			return -ENOMEM;
181 		}
182 
183 		dt->rx_ring = ring;
184 
185 		ret = tb_xdomain_alloc_in_hopid(xd, -1);
186 		if (ret < 0) {
187 			dma_test_free_rings(dt);
188 			return ret;
189 		}
190 
191 		dt->rx_hopid = ret;
192 	}
193 
194 	ret = tb_xdomain_enable_paths(dt->xd, dt->tx_hopid,
195 				      dt->tx_ring ? dt->tx_ring->hop : -1,
196 				      dt->rx_hopid,
197 				      dt->rx_ring ? dt->rx_ring->hop : -1);
198 	if (ret) {
199 		dma_test_free_rings(dt);
200 		return ret;
201 	}
202 
203 	if (dt->tx_ring)
204 		tb_ring_start(dt->tx_ring);
205 	if (dt->rx_ring)
206 		tb_ring_start(dt->rx_ring);
207 
208 	return 0;
209 }
210 
dma_test_stop_rings(struct dma_test * dt)211 static void dma_test_stop_rings(struct dma_test *dt)
212 {
213 	int ret;
214 
215 	if (dt->rx_ring)
216 		tb_ring_stop(dt->rx_ring);
217 	if (dt->tx_ring)
218 		tb_ring_stop(dt->tx_ring);
219 
220 	ret = tb_xdomain_disable_paths(dt->xd, dt->tx_hopid,
221 				       dt->tx_ring ? dt->tx_ring->hop : -1,
222 				       dt->rx_hopid,
223 				       dt->rx_ring ? dt->rx_ring->hop : -1);
224 	if (ret)
225 		dev_warn(&dt->svc->dev, "failed to disable DMA paths\n");
226 
227 	dma_test_free_rings(dt);
228 }
229 
dma_test_rx_callback(struct tb_ring * ring,struct ring_frame * frame,bool canceled)230 static void dma_test_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
231 				 bool canceled)
232 {
233 	struct dma_test_frame *tf = container_of(frame, typeof(*tf), frame);
234 	struct dma_test *dt = tf->dma_test;
235 	struct device *dma_dev = tb_ring_dma_device(dt->rx_ring);
236 
237 	dma_unmap_single(dma_dev, tf->frame.buffer_phy, DMA_TEST_FRAME_SIZE,
238 			 DMA_FROM_DEVICE);
239 	kfree(tf->data);
240 
241 	if (canceled) {
242 		kfree(tf);
243 		return;
244 	}
245 
246 	dt->packets_received++;
247 	dev_dbg(&dt->svc->dev, "packet %u/%u received\n", dt->packets_received,
248 		dt->packets_to_receive);
249 
250 	if (tf->frame.flags & RING_DESC_CRC_ERROR)
251 		dt->crc_errors++;
252 	if (tf->frame.flags & RING_DESC_BUFFER_OVERRUN)
253 		dt->buffer_overflow_errors++;
254 
255 	kfree(tf);
256 
257 	if (dt->packets_received == dt->packets_to_receive)
258 		complete(&dt->complete);
259 }
260 
dma_test_submit_rx(struct dma_test * dt,size_t npackets)261 static int dma_test_submit_rx(struct dma_test *dt, size_t npackets)
262 {
263 	struct device *dma_dev = tb_ring_dma_device(dt->rx_ring);
264 	int i;
265 
266 	for (i = 0; i < npackets; i++) {
267 		struct dma_test_frame *tf;
268 		dma_addr_t dma_addr;
269 
270 		tf = kzalloc(sizeof(*tf), GFP_KERNEL);
271 		if (!tf)
272 			return -ENOMEM;
273 
274 		tf->data = kzalloc(DMA_TEST_FRAME_SIZE, GFP_KERNEL);
275 		if (!tf->data) {
276 			kfree(tf);
277 			return -ENOMEM;
278 		}
279 
280 		dma_addr = dma_map_single(dma_dev, tf->data, DMA_TEST_FRAME_SIZE,
281 					  DMA_FROM_DEVICE);
282 		if (dma_mapping_error(dma_dev, dma_addr)) {
283 			kfree(tf->data);
284 			kfree(tf);
285 			return -ENOMEM;
286 		}
287 
288 		tf->frame.buffer_phy = dma_addr;
289 		tf->frame.callback = dma_test_rx_callback;
290 		tf->dma_test = dt;
291 		INIT_LIST_HEAD(&tf->frame.list);
292 
293 		tb_ring_rx(dt->rx_ring, &tf->frame);
294 	}
295 
296 	return 0;
297 }
298 
dma_test_tx_callback(struct tb_ring * ring,struct ring_frame * frame,bool canceled)299 static void dma_test_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
300 				 bool canceled)
301 {
302 	struct dma_test_frame *tf = container_of(frame, typeof(*tf), frame);
303 	struct dma_test *dt = tf->dma_test;
304 	struct device *dma_dev = tb_ring_dma_device(dt->tx_ring);
305 
306 	dma_unmap_single(dma_dev, tf->frame.buffer_phy, DMA_TEST_FRAME_SIZE,
307 			 DMA_TO_DEVICE);
308 	kfree(tf->data);
309 	kfree(tf);
310 }
311 
dma_test_submit_tx(struct dma_test * dt,size_t npackets)312 static int dma_test_submit_tx(struct dma_test *dt, size_t npackets)
313 {
314 	struct device *dma_dev = tb_ring_dma_device(dt->tx_ring);
315 	int i;
316 
317 	for (i = 0; i < npackets; i++) {
318 		struct dma_test_frame *tf;
319 		dma_addr_t dma_addr;
320 
321 		tf = kzalloc(sizeof(*tf), GFP_KERNEL);
322 		if (!tf)
323 			return -ENOMEM;
324 
325 		tf->frame.size = 0; /* means 4096 */
326 		tf->dma_test = dt;
327 
328 		tf->data = kmemdup(dma_test_pattern, DMA_TEST_FRAME_SIZE, GFP_KERNEL);
329 		if (!tf->data) {
330 			kfree(tf);
331 			return -ENOMEM;
332 		}
333 
334 		dma_addr = dma_map_single(dma_dev, tf->data, DMA_TEST_FRAME_SIZE,
335 					  DMA_TO_DEVICE);
336 		if (dma_mapping_error(dma_dev, dma_addr)) {
337 			kfree(tf->data);
338 			kfree(tf);
339 			return -ENOMEM;
340 		}
341 
342 		tf->frame.buffer_phy = dma_addr;
343 		tf->frame.callback = dma_test_tx_callback;
344 		tf->frame.sof = DMA_TEST_PDF_FRAME_START;
345 		tf->frame.eof = DMA_TEST_PDF_FRAME_END;
346 		INIT_LIST_HEAD(&tf->frame.list);
347 
348 		dt->packets_sent++;
349 		dev_dbg(&dt->svc->dev, "packet %u/%u sent\n", dt->packets_sent,
350 			dt->packets_to_send);
351 
352 		tb_ring_tx(dt->tx_ring, &tf->frame);
353 	}
354 
355 	return 0;
356 }
357 
358 #define DMA_TEST_DEBUGFS_ATTR(__fops, __get, __validate, __set)	\
359 static int __fops ## _show(void *data, u64 *val)		\
360 {								\
361 	struct tb_service *svc = data;				\
362 	struct dma_test *dt = tb_service_get_drvdata(svc);	\
363 	int ret;						\
364 								\
365 	ret = mutex_lock_interruptible(&dt->lock);		\
366 	if (ret)						\
367 		return ret;					\
368 	__get(dt, val);						\
369 	mutex_unlock(&dt->lock);				\
370 	return 0;						\
371 }								\
372 static int __fops ## _store(void *data, u64 val)		\
373 {								\
374 	struct tb_service *svc = data;				\
375 	struct dma_test *dt = tb_service_get_drvdata(svc);	\
376 	int ret;						\
377 								\
378 	ret = __validate(val);					\
379 	if (ret)						\
380 		return ret;					\
381 	ret = mutex_lock_interruptible(&dt->lock);		\
382 	if (ret)						\
383 		return ret;					\
384 	__set(dt, val);						\
385 	mutex_unlock(&dt->lock);				\
386 	return 0;						\
387 }								\
388 DEFINE_DEBUGFS_ATTRIBUTE(__fops ## _fops, __fops ## _show,	\
389 			 __fops ## _store, "%llu\n")
390 
lanes_get(const struct dma_test * dt,u64 * val)391 static void lanes_get(const struct dma_test *dt, u64 *val)
392 {
393 	*val = dt->link_width;
394 }
395 
lanes_validate(u64 val)396 static int lanes_validate(u64 val)
397 {
398 	return val > 2 ? -EINVAL : 0;
399 }
400 
lanes_set(struct dma_test * dt,u64 val)401 static void lanes_set(struct dma_test *dt, u64 val)
402 {
403 	dt->link_width = val;
404 }
405 DMA_TEST_DEBUGFS_ATTR(lanes, lanes_get, lanes_validate, lanes_set);
406 
speed_get(const struct dma_test * dt,u64 * val)407 static void speed_get(const struct dma_test *dt, u64 *val)
408 {
409 	*val = dt->link_speed;
410 }
411 
speed_validate(u64 val)412 static int speed_validate(u64 val)
413 {
414 	switch (val) {
415 	case 40:
416 	case 20:
417 	case 10:
418 	case 0:
419 		return 0;
420 	default:
421 		return -EINVAL;
422 	}
423 }
424 
speed_set(struct dma_test * dt,u64 val)425 static void speed_set(struct dma_test *dt, u64 val)
426 {
427 	dt->link_speed = val;
428 }
429 DMA_TEST_DEBUGFS_ATTR(speed, speed_get, speed_validate, speed_set);
430 
packets_to_receive_get(const struct dma_test * dt,u64 * val)431 static void packets_to_receive_get(const struct dma_test *dt, u64 *val)
432 {
433 	*val = dt->packets_to_receive;
434 }
435 
packets_to_receive_validate(u64 val)436 static int packets_to_receive_validate(u64 val)
437 {
438 	return val > DMA_TEST_MAX_PACKETS ? -EINVAL : 0;
439 }
440 
packets_to_receive_set(struct dma_test * dt,u64 val)441 static void packets_to_receive_set(struct dma_test *dt, u64 val)
442 {
443 	dt->packets_to_receive = val;
444 }
445 DMA_TEST_DEBUGFS_ATTR(packets_to_receive, packets_to_receive_get,
446 		      packets_to_receive_validate, packets_to_receive_set);
447 
packets_to_send_get(const struct dma_test * dt,u64 * val)448 static void packets_to_send_get(const struct dma_test *dt, u64 *val)
449 {
450 	*val = dt->packets_to_send;
451 }
452 
packets_to_send_validate(u64 val)453 static int packets_to_send_validate(u64 val)
454 {
455 	return val > DMA_TEST_MAX_PACKETS ? -EINVAL : 0;
456 }
457 
packets_to_send_set(struct dma_test * dt,u64 val)458 static void packets_to_send_set(struct dma_test *dt, u64 val)
459 {
460 	dt->packets_to_send = val;
461 }
462 DMA_TEST_DEBUGFS_ATTR(packets_to_send, packets_to_send_get,
463 		      packets_to_send_validate, packets_to_send_set);
464 
dma_test_set_bonding(struct dma_test * dt)465 static int dma_test_set_bonding(struct dma_test *dt)
466 {
467 	switch (dt->link_width) {
468 	case 2:
469 		return tb_xdomain_lane_bonding_enable(dt->xd);
470 	case 1:
471 		tb_xdomain_lane_bonding_disable(dt->xd);
472 		fallthrough;
473 	default:
474 		return 0;
475 	}
476 }
477 
dma_test_validate_config(struct dma_test * dt)478 static bool dma_test_validate_config(struct dma_test *dt)
479 {
480 	if (!dt->packets_to_send && !dt->packets_to_receive)
481 		return false;
482 	if (dt->packets_to_send && dt->packets_to_receive &&
483 	    dt->packets_to_send != dt->packets_to_receive)
484 		return false;
485 	return true;
486 }
487 
dma_test_check_errors(struct dma_test * dt,int ret)488 static void dma_test_check_errors(struct dma_test *dt, int ret)
489 {
490 	if (!dt->error_code) {
491 		if (dt->link_speed && dt->xd->link_speed != dt->link_speed) {
492 			dt->error_code = DMA_TEST_SPEED_ERROR;
493 		} else if (dt->link_width) {
494 			const struct tb_xdomain *xd = dt->xd;
495 
496 			if ((dt->link_width == 1 && xd->link_width != TB_LINK_WIDTH_SINGLE) ||
497 			    (dt->link_width == 2 && xd->link_width < TB_LINK_WIDTH_DUAL))
498 				dt->error_code = DMA_TEST_WIDTH_ERROR;
499 		} else if (dt->packets_to_send != dt->packets_sent ||
500 			 dt->packets_to_receive != dt->packets_received ||
501 			 dt->crc_errors || dt->buffer_overflow_errors) {
502 			dt->error_code = DMA_TEST_PACKET_ERROR;
503 		} else {
504 			return;
505 		}
506 	}
507 
508 	dt->result = DMA_TEST_FAIL;
509 }
510 
test_store(void * data,u64 val)511 static int test_store(void *data, u64 val)
512 {
513 	struct tb_service *svc = data;
514 	struct dma_test *dt = tb_service_get_drvdata(svc);
515 	int ret;
516 
517 	if (val != 1)
518 		return -EINVAL;
519 
520 	ret = mutex_lock_interruptible(&dt->lock);
521 	if (ret)
522 		return ret;
523 
524 	dt->packets_sent = 0;
525 	dt->packets_received = 0;
526 	dt->crc_errors = 0;
527 	dt->buffer_overflow_errors = 0;
528 	dt->result = DMA_TEST_SUCCESS;
529 	dt->error_code = DMA_TEST_NO_ERROR;
530 
531 	dev_dbg(&svc->dev, "DMA test starting\n");
532 	if (dt->link_speed)
533 		dev_dbg(&svc->dev, "link_speed: %u Gb/s\n", dt->link_speed);
534 	if (dt->link_width)
535 		dev_dbg(&svc->dev, "link_width: %u\n", dt->link_width);
536 	dev_dbg(&svc->dev, "packets_to_send: %u\n", dt->packets_to_send);
537 	dev_dbg(&svc->dev, "packets_to_receive: %u\n", dt->packets_to_receive);
538 
539 	if (!dma_test_validate_config(dt)) {
540 		dev_err(&svc->dev, "invalid test configuration\n");
541 		dt->error_code = DMA_TEST_CONFIG_ERROR;
542 		goto out_unlock;
543 	}
544 
545 	ret = dma_test_set_bonding(dt);
546 	if (ret) {
547 		dev_err(&svc->dev, "failed to set lanes\n");
548 		dt->error_code = DMA_TEST_BONDING_ERROR;
549 		goto out_unlock;
550 	}
551 
552 	ret = dma_test_start_rings(dt);
553 	if (ret) {
554 		dev_err(&svc->dev, "failed to enable DMA rings\n");
555 		dt->error_code = DMA_TEST_DMA_ERROR;
556 		goto out_unlock;
557 	}
558 
559 	if (dt->packets_to_receive) {
560 		reinit_completion(&dt->complete);
561 		ret = dma_test_submit_rx(dt, dt->packets_to_receive);
562 		if (ret) {
563 			dev_err(&svc->dev, "failed to submit receive buffers\n");
564 			dt->error_code = DMA_TEST_BUFFER_ERROR;
565 			goto out_stop;
566 		}
567 	}
568 
569 	if (dt->packets_to_send) {
570 		ret = dma_test_submit_tx(dt, dt->packets_to_send);
571 		if (ret) {
572 			dev_err(&svc->dev, "failed to submit transmit buffers\n");
573 			dt->error_code = DMA_TEST_BUFFER_ERROR;
574 			goto out_stop;
575 		}
576 	}
577 
578 	if (dt->packets_to_receive) {
579 		ret = wait_for_completion_interruptible(&dt->complete);
580 		if (ret) {
581 			dt->error_code = DMA_TEST_INTERRUPTED;
582 			goto out_stop;
583 		}
584 	}
585 
586 out_stop:
587 	dma_test_stop_rings(dt);
588 out_unlock:
589 	dma_test_check_errors(dt, ret);
590 	mutex_unlock(&dt->lock);
591 
592 	dev_dbg(&svc->dev, "DMA test %s\n", dma_test_result_names[dt->result]);
593 	return ret;
594 }
595 DEFINE_DEBUGFS_ATTRIBUTE(test_fops, NULL, test_store, "%llu\n");
596 
status_show(struct seq_file * s,void * not_used)597 static int status_show(struct seq_file *s, void *not_used)
598 {
599 	struct tb_service *svc = s->private;
600 	struct dma_test *dt = tb_service_get_drvdata(svc);
601 	int ret;
602 
603 	ret = mutex_lock_interruptible(&dt->lock);
604 	if (ret)
605 		return ret;
606 
607 	seq_printf(s, "result: %s\n", dma_test_result_names[dt->result]);
608 	if (dt->result == DMA_TEST_NOT_RUN)
609 		goto out_unlock;
610 
611 	seq_printf(s, "packets received: %u\n", dt->packets_received);
612 	seq_printf(s, "packets sent: %u\n", dt->packets_sent);
613 	seq_printf(s, "CRC errors: %u\n", dt->crc_errors);
614 	seq_printf(s, "buffer overflow errors: %u\n",
615 		   dt->buffer_overflow_errors);
616 	seq_printf(s, "error: %s\n", dma_test_error_names[dt->error_code]);
617 
618 out_unlock:
619 	mutex_unlock(&dt->lock);
620 	return 0;
621 }
622 DEFINE_SHOW_ATTRIBUTE(status);
623 
dma_test_debugfs_init(struct tb_service * svc)624 static void dma_test_debugfs_init(struct tb_service *svc)
625 {
626 	struct dma_test *dt = tb_service_get_drvdata(svc);
627 
628 	dt->debugfs_dir = debugfs_create_dir("dma_test", svc->debugfs_dir);
629 
630 	debugfs_create_file("lanes", 0600, dt->debugfs_dir, svc, &lanes_fops);
631 	debugfs_create_file("speed", 0600, dt->debugfs_dir, svc, &speed_fops);
632 	debugfs_create_file("packets_to_receive", 0600, dt->debugfs_dir, svc,
633 			    &packets_to_receive_fops);
634 	debugfs_create_file("packets_to_send", 0600, dt->debugfs_dir, svc,
635 			    &packets_to_send_fops);
636 	debugfs_create_file("status", 0400, dt->debugfs_dir, svc, &status_fops);
637 	debugfs_create_file("test", 0200, dt->debugfs_dir, svc, &test_fops);
638 }
639 
dma_test_probe(struct tb_service * svc,const struct tb_service_id * id)640 static int dma_test_probe(struct tb_service *svc, const struct tb_service_id *id)
641 {
642 	struct tb_xdomain *xd = tb_service_parent(svc);
643 	struct dma_test *dt;
644 
645 	dt = devm_kzalloc(&svc->dev, sizeof(*dt), GFP_KERNEL);
646 	if (!dt)
647 		return -ENOMEM;
648 
649 	dt->svc = svc;
650 	dt->xd = xd;
651 	mutex_init(&dt->lock);
652 	init_completion(&dt->complete);
653 
654 	tb_service_set_drvdata(svc, dt);
655 	dma_test_debugfs_init(svc);
656 
657 	return 0;
658 }
659 
dma_test_remove(struct tb_service * svc)660 static void dma_test_remove(struct tb_service *svc)
661 {
662 	struct dma_test *dt = tb_service_get_drvdata(svc);
663 
664 	mutex_lock(&dt->lock);
665 	debugfs_remove_recursive(dt->debugfs_dir);
666 	mutex_unlock(&dt->lock);
667 }
668 
dma_test_suspend(struct device * dev)669 static int __maybe_unused dma_test_suspend(struct device *dev)
670 {
671 	/*
672 	 * No need to do anything special here. If userspace is writing
673 	 * to the test attribute when suspend started, it comes out from
674 	 * wait_for_completion_interruptible() with -ERESTARTSYS and the
675 	 * DMA test fails tearing down the rings. Once userspace is
676 	 * thawed the kernel restarts the write syscall effectively
677 	 * re-running the test.
678 	 */
679 	return 0;
680 }
681 
dma_test_resume(struct device * dev)682 static int __maybe_unused dma_test_resume(struct device *dev)
683 {
684 	return 0;
685 }
686 
687 static const struct dev_pm_ops dma_test_pm_ops = {
688 	SET_SYSTEM_SLEEP_PM_OPS(dma_test_suspend, dma_test_resume)
689 };
690 
691 static const struct tb_service_id dma_test_ids[] = {
692 	{ TB_SERVICE("dma_test", 1) },
693 	{ },
694 };
695 MODULE_DEVICE_TABLE(tbsvc, dma_test_ids);
696 
697 static struct tb_service_driver dma_test_driver = {
698 	.driver = {
699 		.owner = THIS_MODULE,
700 		.name = "thunderbolt_dma_test",
701 		.pm = &dma_test_pm_ops,
702 	},
703 	.probe = dma_test_probe,
704 	.remove = dma_test_remove,
705 	.id_table = dma_test_ids,
706 };
707 
dma_test_init(void)708 static int __init dma_test_init(void)
709 {
710 	u64 data_value = DMA_TEST_DATA_PATTERN;
711 	int i, ret;
712 
713 	dma_test_pattern = kmalloc(DMA_TEST_FRAME_SIZE, GFP_KERNEL);
714 	if (!dma_test_pattern)
715 		return -ENOMEM;
716 
717 	for (i = 0; i <	DMA_TEST_FRAME_SIZE / sizeof(data_value); i++)
718 		((u32 *)dma_test_pattern)[i] = data_value++;
719 
720 	dma_test_dir = tb_property_create_dir(&dma_test_dir_uuid);
721 	if (!dma_test_dir) {
722 		ret = -ENOMEM;
723 		goto err_free_pattern;
724 	}
725 
726 	tb_property_add_immediate(dma_test_dir, "prtcid", 1);
727 	tb_property_add_immediate(dma_test_dir, "prtcvers", 1);
728 	tb_property_add_immediate(dma_test_dir, "prtcrevs", 0);
729 	tb_property_add_immediate(dma_test_dir, "prtcstns", 0);
730 
731 	ret = tb_register_property_dir("dma_test", dma_test_dir);
732 	if (ret)
733 		goto err_free_dir;
734 
735 	ret = tb_register_service_driver(&dma_test_driver);
736 	if (ret)
737 		goto err_unregister_dir;
738 
739 	return 0;
740 
741 err_unregister_dir:
742 	tb_unregister_property_dir("dma_test", dma_test_dir);
743 err_free_dir:
744 	tb_property_free_dir(dma_test_dir);
745 err_free_pattern:
746 	kfree(dma_test_pattern);
747 
748 	return ret;
749 }
750 module_init(dma_test_init);
751 
dma_test_exit(void)752 static void __exit dma_test_exit(void)
753 {
754 	tb_unregister_service_driver(&dma_test_driver);
755 	tb_unregister_property_dir("dma_test", dma_test_dir);
756 	tb_property_free_dir(dma_test_dir);
757 	kfree(dma_test_pattern);
758 }
759 module_exit(dma_test_exit);
760 
761 MODULE_AUTHOR("Isaac Hazan <isaac.hazan@intel.com>");
762 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
763 MODULE_DESCRIPTION("Thunderbolt/USB4 DMA traffic test driver");
764 MODULE_LICENSE("GPL v2");
765