xref: /openbmc/linux/drivers/dma/dmatest.c (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * DMA Engine test module
4   *
5   * Copyright (C) 2007 Atmel Corporation
6   * Copyright (C) 2013 Intel Corporation
7   */
8  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9  
10  #include <linux/err.h>
11  #include <linux/delay.h>
12  #include <linux/dma-mapping.h>
13  #include <linux/dmaengine.h>
14  #include <linux/freezer.h>
15  #include <linux/init.h>
16  #include <linux/kthread.h>
17  #include <linux/sched/task.h>
18  #include <linux/module.h>
19  #include <linux/moduleparam.h>
20  #include <linux/random.h>
21  #include <linux/slab.h>
22  #include <linux/wait.h>
23  
24  static unsigned int test_buf_size = 16384;
25  module_param(test_buf_size, uint, 0644);
26  MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
27  
28  static char test_device[32];
29  module_param_string(device, test_device, sizeof(test_device), 0644);
30  MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
31  
32  static unsigned int threads_per_chan = 1;
33  module_param(threads_per_chan, uint, 0644);
34  MODULE_PARM_DESC(threads_per_chan,
35  		"Number of threads to start per channel (default: 1)");
36  
37  static unsigned int max_channels;
38  module_param(max_channels, uint, 0644);
39  MODULE_PARM_DESC(max_channels,
40  		"Maximum number of channels to use (default: all)");
41  
42  static unsigned int iterations;
43  module_param(iterations, uint, 0644);
44  MODULE_PARM_DESC(iterations,
45  		"Iterations before stopping test (default: infinite)");
46  
47  static unsigned int dmatest;
48  module_param(dmatest, uint, 0644);
49  MODULE_PARM_DESC(dmatest,
50  		"dmatest 0-memcpy 1-memset (default: 0)");
51  
52  static unsigned int xor_sources = 3;
53  module_param(xor_sources, uint, 0644);
54  MODULE_PARM_DESC(xor_sources,
55  		"Number of xor source buffers (default: 3)");
56  
57  static unsigned int pq_sources = 3;
58  module_param(pq_sources, uint, 0644);
59  MODULE_PARM_DESC(pq_sources,
60  		"Number of p+q source buffers (default: 3)");
61  
62  static int timeout = 3000;
63  module_param(timeout, int, 0644);
64  MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
65  		 "Pass -1 for infinite timeout");
66  
67  static bool noverify;
68  module_param(noverify, bool, 0644);
69  MODULE_PARM_DESC(noverify, "Disable data verification (default: verify)");
70  
71  static bool norandom;
72  module_param(norandom, bool, 0644);
73  MODULE_PARM_DESC(norandom, "Disable random offset setup (default: random)");
74  
75  static bool verbose;
76  module_param(verbose, bool, 0644);
77  MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)");
78  
79  static int alignment = -1;
80  module_param(alignment, int, 0644);
81  MODULE_PARM_DESC(alignment, "Custom data address alignment taken as 2^(alignment) (default: not used (-1))");
82  
83  static unsigned int transfer_size;
84  module_param(transfer_size, uint, 0644);
85  MODULE_PARM_DESC(transfer_size, "Optional custom transfer size in bytes (default: not used (0))");
86  
87  static bool polled;
88  module_param(polled, bool, 0644);
89  MODULE_PARM_DESC(polled, "Use polling for completion instead of interrupts");
90  
91  /**
92   * struct dmatest_params - test parameters.
93   * @buf_size:		size of the memcpy test buffer
94   * @channel:		bus ID of the channel to test
95   * @device:		bus ID of the DMA Engine to test
96   * @threads_per_chan:	number of threads to start per channel
97   * @max_channels:	maximum number of channels to use
98   * @iterations:		iterations before stopping test
99   * @xor_sources:	number of xor source buffers
100   * @pq_sources:		number of p+q source buffers
101   * @timeout:		transfer timeout in msec, -1 for infinite timeout
102   * @noverify:		disable data verification
103   * @norandom:		disable random offset setup
104   * @alignment:		custom data address alignment taken as 2^alignment
105   * @transfer_size:	custom transfer size in bytes
106   * @polled:		use polling for completion instead of interrupts
107   */
108  struct dmatest_params {
109  	unsigned int	buf_size;
110  	char		channel[20];
111  	char		device[32];
112  	unsigned int	threads_per_chan;
113  	unsigned int	max_channels;
114  	unsigned int	iterations;
115  	unsigned int	xor_sources;
116  	unsigned int	pq_sources;
117  	int		timeout;
118  	bool		noverify;
119  	bool		norandom;
120  	int		alignment;
121  	unsigned int	transfer_size;
122  	bool		polled;
123  };
124  
125  /**
126   * struct dmatest_info - test information.
127   * @params:		test parameters
128   * @channels:		channels under test
129   * @nr_channels:	number of channels under test
130   * @lock:		access protection to the fields of this structure
131   * @did_init:		module has been initialized completely
132   * @last_error:		test has faced configuration issues
133   */
134  static struct dmatest_info {
135  	/* Test parameters */
136  	struct dmatest_params	params;
137  
138  	/* Internal state */
139  	struct list_head	channels;
140  	unsigned int		nr_channels;
141  	int			last_error;
142  	struct mutex		lock;
143  	bool			did_init;
144  } test_info = {
145  	.channels = LIST_HEAD_INIT(test_info.channels),
146  	.lock = __MUTEX_INITIALIZER(test_info.lock),
147  };
148  
149  static int dmatest_run_set(const char *val, const struct kernel_param *kp);
150  static int dmatest_run_get(char *val, const struct kernel_param *kp);
151  static const struct kernel_param_ops run_ops = {
152  	.set = dmatest_run_set,
153  	.get = dmatest_run_get,
154  };
155  static bool dmatest_run;
156  module_param_cb(run, &run_ops, &dmatest_run, 0644);
157  MODULE_PARM_DESC(run, "Run the test (default: false)");
158  
159  static int dmatest_chan_set(const char *val, const struct kernel_param *kp);
160  static int dmatest_chan_get(char *val, const struct kernel_param *kp);
161  static const struct kernel_param_ops multi_chan_ops = {
162  	.set = dmatest_chan_set,
163  	.get = dmatest_chan_get,
164  };
165  
166  static char test_channel[20];
167  static struct kparam_string newchan_kps = {
168  	.string = test_channel,
169  	.maxlen = 20,
170  };
171  module_param_cb(channel, &multi_chan_ops, &newchan_kps, 0644);
172  MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
173  
174  static int dmatest_test_list_get(char *val, const struct kernel_param *kp);
175  static const struct kernel_param_ops test_list_ops = {
176  	.get = dmatest_test_list_get,
177  };
178  module_param_cb(test_list, &test_list_ops, NULL, 0444);
179  MODULE_PARM_DESC(test_list, "Print current test list");
180  
181  /* Maximum amount of mismatched bytes in buffer to print */
182  #define MAX_ERROR_COUNT		32
183  
184  /*
185   * Initialization patterns. All bytes in the source buffer has bit 7
186   * set, all bytes in the destination buffer has bit 7 cleared.
187   *
188   * Bit 6 is set for all bytes which are to be copied by the DMA
189   * engine. Bit 5 is set for all bytes which are to be overwritten by
190   * the DMA engine.
191   *
192   * The remaining bits are the inverse of a counter which increments by
193   * one for each byte address.
194   */
195  #define PATTERN_SRC		0x80
196  #define PATTERN_DST		0x00
197  #define PATTERN_COPY		0x40
198  #define PATTERN_OVERWRITE	0x20
199  #define PATTERN_COUNT_MASK	0x1f
200  #define PATTERN_MEMSET_IDX	0x01
201  
202  /* Fixed point arithmetic ops */
203  #define FIXPT_SHIFT		8
204  #define FIXPNT_MASK		0xFF
205  #define FIXPT_TO_INT(a)	((a) >> FIXPT_SHIFT)
206  #define INT_TO_FIXPT(a)	((a) << FIXPT_SHIFT)
207  #define FIXPT_GET_FRAC(a)	((((a) & FIXPNT_MASK) * 100) >> FIXPT_SHIFT)
208  
209  /* poor man's completion - we want to use wait_event_freezable() on it */
210  struct dmatest_done {
211  	bool			done;
212  	wait_queue_head_t	*wait;
213  };
214  
215  struct dmatest_data {
216  	u8		**raw;
217  	u8		**aligned;
218  	unsigned int	cnt;
219  	unsigned int	off;
220  };
221  
222  struct dmatest_thread {
223  	struct list_head	node;
224  	struct dmatest_info	*info;
225  	struct task_struct	*task;
226  	struct dma_chan		*chan;
227  	struct dmatest_data	src;
228  	struct dmatest_data	dst;
229  	enum dma_transaction_type type;
230  	wait_queue_head_t done_wait;
231  	struct dmatest_done test_done;
232  	bool			done;
233  	bool			pending;
234  };
235  
236  struct dmatest_chan {
237  	struct list_head	node;
238  	struct dma_chan		*chan;
239  	struct list_head	threads;
240  };
241  
242  static DECLARE_WAIT_QUEUE_HEAD(thread_wait);
243  static bool wait;
244  
is_threaded_test_run(struct dmatest_info * info)245  static bool is_threaded_test_run(struct dmatest_info *info)
246  {
247  	struct dmatest_chan *dtc;
248  
249  	list_for_each_entry(dtc, &info->channels, node) {
250  		struct dmatest_thread *thread;
251  
252  		list_for_each_entry(thread, &dtc->threads, node) {
253  			if (!thread->done && !thread->pending)
254  				return true;
255  		}
256  	}
257  
258  	return false;
259  }
260  
is_threaded_test_pending(struct dmatest_info * info)261  static bool is_threaded_test_pending(struct dmatest_info *info)
262  {
263  	struct dmatest_chan *dtc;
264  
265  	list_for_each_entry(dtc, &info->channels, node) {
266  		struct dmatest_thread *thread;
267  
268  		list_for_each_entry(thread, &dtc->threads, node) {
269  			if (thread->pending)
270  				return true;
271  		}
272  	}
273  
274  	return false;
275  }
276  
dmatest_wait_get(char * val,const struct kernel_param * kp)277  static int dmatest_wait_get(char *val, const struct kernel_param *kp)
278  {
279  	struct dmatest_info *info = &test_info;
280  	struct dmatest_params *params = &info->params;
281  
282  	if (params->iterations)
283  		wait_event(thread_wait, !is_threaded_test_run(info));
284  	wait = true;
285  	return param_get_bool(val, kp);
286  }
287  
288  static const struct kernel_param_ops wait_ops = {
289  	.get = dmatest_wait_get,
290  	.set = param_set_bool,
291  };
292  module_param_cb(wait, &wait_ops, &wait, 0444);
293  MODULE_PARM_DESC(wait, "Wait for tests to complete (default: false)");
294  
dmatest_match_channel(struct dmatest_params * params,struct dma_chan * chan)295  static bool dmatest_match_channel(struct dmatest_params *params,
296  		struct dma_chan *chan)
297  {
298  	if (params->channel[0] == '\0')
299  		return true;
300  	return strcmp(dma_chan_name(chan), params->channel) == 0;
301  }
302  
dmatest_match_device(struct dmatest_params * params,struct dma_device * device)303  static bool dmatest_match_device(struct dmatest_params *params,
304  		struct dma_device *device)
305  {
306  	if (params->device[0] == '\0')
307  		return true;
308  	return strcmp(dev_name(device->dev), params->device) == 0;
309  }
310  
dmatest_random(void)311  static unsigned long dmatest_random(void)
312  {
313  	unsigned long buf;
314  
315  	get_random_bytes(&buf, sizeof(buf));
316  	return buf;
317  }
318  
gen_inv_idx(u8 index,bool is_memset)319  static inline u8 gen_inv_idx(u8 index, bool is_memset)
320  {
321  	u8 val = is_memset ? PATTERN_MEMSET_IDX : index;
322  
323  	return ~val & PATTERN_COUNT_MASK;
324  }
325  
gen_src_value(u8 index,bool is_memset)326  static inline u8 gen_src_value(u8 index, bool is_memset)
327  {
328  	return PATTERN_SRC | gen_inv_idx(index, is_memset);
329  }
330  
gen_dst_value(u8 index,bool is_memset)331  static inline u8 gen_dst_value(u8 index, bool is_memset)
332  {
333  	return PATTERN_DST | gen_inv_idx(index, is_memset);
334  }
335  
dmatest_init_srcs(u8 ** bufs,unsigned int start,unsigned int len,unsigned int buf_size,bool is_memset)336  static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
337  		unsigned int buf_size, bool is_memset)
338  {
339  	unsigned int i;
340  	u8 *buf;
341  
342  	for (; (buf = *bufs); bufs++) {
343  		for (i = 0; i < start; i++)
344  			buf[i] = gen_src_value(i, is_memset);
345  		for ( ; i < start + len; i++)
346  			buf[i] = gen_src_value(i, is_memset) | PATTERN_COPY;
347  		for ( ; i < buf_size; i++)
348  			buf[i] = gen_src_value(i, is_memset);
349  		buf++;
350  	}
351  }
352  
dmatest_init_dsts(u8 ** bufs,unsigned int start,unsigned int len,unsigned int buf_size,bool is_memset)353  static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
354  		unsigned int buf_size, bool is_memset)
355  {
356  	unsigned int i;
357  	u8 *buf;
358  
359  	for (; (buf = *bufs); bufs++) {
360  		for (i = 0; i < start; i++)
361  			buf[i] = gen_dst_value(i, is_memset);
362  		for ( ; i < start + len; i++)
363  			buf[i] = gen_dst_value(i, is_memset) |
364  						PATTERN_OVERWRITE;
365  		for ( ; i < buf_size; i++)
366  			buf[i] = gen_dst_value(i, is_memset);
367  	}
368  }
369  
dmatest_mismatch(u8 actual,u8 pattern,unsigned int index,unsigned int counter,bool is_srcbuf,bool is_memset)370  static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
371  		unsigned int counter, bool is_srcbuf, bool is_memset)
372  {
373  	u8		diff = actual ^ pattern;
374  	u8		expected = pattern | gen_inv_idx(counter, is_memset);
375  	const char	*thread_name = current->comm;
376  
377  	if (is_srcbuf)
378  		pr_warn("%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n",
379  			thread_name, index, expected, actual);
380  	else if ((pattern & PATTERN_COPY)
381  			&& (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
382  		pr_warn("%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n",
383  			thread_name, index, expected, actual);
384  	else if (diff & PATTERN_SRC)
385  		pr_warn("%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n",
386  			thread_name, index, expected, actual);
387  	else
388  		pr_warn("%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n",
389  			thread_name, index, expected, actual);
390  }
391  
dmatest_verify(u8 ** bufs,unsigned int start,unsigned int end,unsigned int counter,u8 pattern,bool is_srcbuf,bool is_memset)392  static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
393  		unsigned int end, unsigned int counter, u8 pattern,
394  		bool is_srcbuf, bool is_memset)
395  {
396  	unsigned int i;
397  	unsigned int error_count = 0;
398  	u8 actual;
399  	u8 expected;
400  	u8 *buf;
401  	unsigned int counter_orig = counter;
402  
403  	for (; (buf = *bufs); bufs++) {
404  		counter = counter_orig;
405  		for (i = start; i < end; i++) {
406  			actual = buf[i];
407  			expected = pattern | gen_inv_idx(counter, is_memset);
408  			if (actual != expected) {
409  				if (error_count < MAX_ERROR_COUNT)
410  					dmatest_mismatch(actual, pattern, i,
411  							 counter, is_srcbuf,
412  							 is_memset);
413  				error_count++;
414  			}
415  			counter++;
416  		}
417  	}
418  
419  	if (error_count > MAX_ERROR_COUNT)
420  		pr_warn("%s: %u errors suppressed\n",
421  			current->comm, error_count - MAX_ERROR_COUNT);
422  
423  	return error_count;
424  }
425  
426  
dmatest_callback(void * arg)427  static void dmatest_callback(void *arg)
428  {
429  	struct dmatest_done *done = arg;
430  	struct dmatest_thread *thread =
431  		container_of(done, struct dmatest_thread, test_done);
432  	if (!thread->done) {
433  		done->done = true;
434  		wake_up_all(done->wait);
435  	} else {
436  		/*
437  		 * If thread->done, it means that this callback occurred
438  		 * after the parent thread has cleaned up. This can
439  		 * happen in the case that driver doesn't implement
440  		 * the terminate_all() functionality and a dma operation
441  		 * did not occur within the timeout period
442  		 */
443  		WARN(1, "dmatest: Kernel memory may be corrupted!!\n");
444  	}
445  }
446  
min_odd(unsigned int x,unsigned int y)447  static unsigned int min_odd(unsigned int x, unsigned int y)
448  {
449  	unsigned int val = min(x, y);
450  
451  	return val % 2 ? val : val - 1;
452  }
453  
result(const char * err,unsigned int n,unsigned int src_off,unsigned int dst_off,unsigned int len,unsigned long data)454  static void result(const char *err, unsigned int n, unsigned int src_off,
455  		   unsigned int dst_off, unsigned int len, unsigned long data)
456  {
457  	if (IS_ERR_VALUE(data)) {
458  		pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%ld)\n",
459  			current->comm, n, err, src_off, dst_off, len, data);
460  	} else {
461  		pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
462  			current->comm, n, err, src_off, dst_off, len, data);
463  	}
464  }
465  
dbg_result(const char * err,unsigned int n,unsigned int src_off,unsigned int dst_off,unsigned int len,unsigned long data)466  static void dbg_result(const char *err, unsigned int n, unsigned int src_off,
467  		       unsigned int dst_off, unsigned int len,
468  		       unsigned long data)
469  {
470  	pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
471  		 current->comm, n, err, src_off, dst_off, len, data);
472  }
473  
474  #define verbose_result(err, n, src_off, dst_off, len, data) ({	\
475  	if (verbose)						\
476  		result(err, n, src_off, dst_off, len, data);	\
477  	else							\
478  		dbg_result(err, n, src_off, dst_off, len, data);\
479  })
480  
dmatest_persec(s64 runtime,unsigned int val)481  static unsigned long long dmatest_persec(s64 runtime, unsigned int val)
482  {
483  	unsigned long long per_sec = 1000000;
484  
485  	if (runtime <= 0)
486  		return 0;
487  
488  	/* drop precision until runtime is 32-bits */
489  	while (runtime > UINT_MAX) {
490  		runtime >>= 1;
491  		per_sec <<= 1;
492  	}
493  
494  	per_sec *= val;
495  	per_sec = INT_TO_FIXPT(per_sec);
496  	do_div(per_sec, runtime);
497  
498  	return per_sec;
499  }
500  
dmatest_KBs(s64 runtime,unsigned long long len)501  static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len)
502  {
503  	return FIXPT_TO_INT(dmatest_persec(runtime, len >> 10));
504  }
505  
__dmatest_free_test_data(struct dmatest_data * d,unsigned int cnt)506  static void __dmatest_free_test_data(struct dmatest_data *d, unsigned int cnt)
507  {
508  	unsigned int i;
509  
510  	for (i = 0; i < cnt; i++)
511  		kfree(d->raw[i]);
512  
513  	kfree(d->aligned);
514  	kfree(d->raw);
515  }
516  
dmatest_free_test_data(struct dmatest_data * d)517  static void dmatest_free_test_data(struct dmatest_data *d)
518  {
519  	__dmatest_free_test_data(d, d->cnt);
520  }
521  
dmatest_alloc_test_data(struct dmatest_data * d,unsigned int buf_size,u8 align)522  static int dmatest_alloc_test_data(struct dmatest_data *d,
523  		unsigned int buf_size, u8 align)
524  {
525  	unsigned int i = 0;
526  
527  	d->raw = kcalloc(d->cnt + 1, sizeof(u8 *), GFP_KERNEL);
528  	if (!d->raw)
529  		return -ENOMEM;
530  
531  	d->aligned = kcalloc(d->cnt + 1, sizeof(u8 *), GFP_KERNEL);
532  	if (!d->aligned)
533  		goto err;
534  
535  	for (i = 0; i < d->cnt; i++) {
536  		d->raw[i] = kmalloc(buf_size + align, GFP_KERNEL);
537  		if (!d->raw[i])
538  			goto err;
539  
540  		/* align to alignment restriction */
541  		if (align)
542  			d->aligned[i] = PTR_ALIGN(d->raw[i], align);
543  		else
544  			d->aligned[i] = d->raw[i];
545  	}
546  
547  	return 0;
548  err:
549  	__dmatest_free_test_data(d, i);
550  	return -ENOMEM;
551  }
552  
553  /*
554   * This function repeatedly tests DMA transfers of various lengths and
555   * offsets for a given operation type until it is told to exit by
556   * kthread_stop(). There may be multiple threads running this function
557   * in parallel for a single channel, and there may be multiple channels
558   * being tested in parallel.
559   *
560   * Before each test, the source and destination buffer is initialized
561   * with a known pattern. This pattern is different depending on
562   * whether it's in an area which is supposed to be copied or
563   * overwritten, and different in the source and destination buffers.
564   * So if the DMA engine doesn't copy exactly what we tell it to copy,
565   * we'll notice.
566   */
dmatest_func(void * data)567  static int dmatest_func(void *data)
568  {
569  	struct dmatest_thread	*thread = data;
570  	struct dmatest_done	*done = &thread->test_done;
571  	struct dmatest_info	*info;
572  	struct dmatest_params	*params;
573  	struct dma_chan		*chan;
574  	struct dma_device	*dev;
575  	struct device		*dma_dev;
576  	unsigned int		error_count;
577  	unsigned int		failed_tests = 0;
578  	unsigned int		total_tests = 0;
579  	dma_cookie_t		cookie;
580  	enum dma_status		status;
581  	enum dma_ctrl_flags	flags;
582  	u8			*pq_coefs = NULL;
583  	int			ret;
584  	unsigned int		buf_size;
585  	struct dmatest_data	*src;
586  	struct dmatest_data	*dst;
587  	int			i;
588  	ktime_t			ktime, start, diff;
589  	ktime_t			filltime = 0;
590  	ktime_t			comparetime = 0;
591  	s64			runtime = 0;
592  	unsigned long long	total_len = 0;
593  	unsigned long long	iops = 0;
594  	u8			align = 0;
595  	bool			is_memset = false;
596  	dma_addr_t		*srcs;
597  	dma_addr_t		*dma_pq;
598  
599  	set_freezable();
600  
601  	ret = -ENOMEM;
602  
603  	smp_rmb();
604  	thread->pending = false;
605  	info = thread->info;
606  	params = &info->params;
607  	chan = thread->chan;
608  	dev = chan->device;
609  	dma_dev = dmaengine_get_dma_device(chan);
610  
611  	src = &thread->src;
612  	dst = &thread->dst;
613  	if (thread->type == DMA_MEMCPY) {
614  		align = params->alignment < 0 ? dev->copy_align :
615  						params->alignment;
616  		src->cnt = dst->cnt = 1;
617  	} else if (thread->type == DMA_MEMSET) {
618  		align = params->alignment < 0 ? dev->fill_align :
619  						params->alignment;
620  		src->cnt = dst->cnt = 1;
621  		is_memset = true;
622  	} else if (thread->type == DMA_XOR) {
623  		/* force odd to ensure dst = src */
624  		src->cnt = min_odd(params->xor_sources | 1, dev->max_xor);
625  		dst->cnt = 1;
626  		align = params->alignment < 0 ? dev->xor_align :
627  						params->alignment;
628  	} else if (thread->type == DMA_PQ) {
629  		/* force odd to ensure dst = src */
630  		src->cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
631  		dst->cnt = 2;
632  		align = params->alignment < 0 ? dev->pq_align :
633  						params->alignment;
634  
635  		pq_coefs = kmalloc(params->pq_sources + 1, GFP_KERNEL);
636  		if (!pq_coefs)
637  			goto err_thread_type;
638  
639  		for (i = 0; i < src->cnt; i++)
640  			pq_coefs[i] = 1;
641  	} else
642  		goto err_thread_type;
643  
644  	/* Check if buffer count fits into map count variable (u8) */
645  	if ((src->cnt + dst->cnt) >= 255) {
646  		pr_err("too many buffers (%d of 255 supported)\n",
647  		       src->cnt + dst->cnt);
648  		goto err_free_coefs;
649  	}
650  
651  	buf_size = params->buf_size;
652  	if (1 << align > buf_size) {
653  		pr_err("%u-byte buffer too small for %d-byte alignment\n",
654  		       buf_size, 1 << align);
655  		goto err_free_coefs;
656  	}
657  
658  	if (dmatest_alloc_test_data(src, buf_size, align) < 0)
659  		goto err_free_coefs;
660  
661  	if (dmatest_alloc_test_data(dst, buf_size, align) < 0)
662  		goto err_src;
663  
664  	set_user_nice(current, 10);
665  
666  	srcs = kcalloc(src->cnt, sizeof(dma_addr_t), GFP_KERNEL);
667  	if (!srcs)
668  		goto err_dst;
669  
670  	dma_pq = kcalloc(dst->cnt, sizeof(dma_addr_t), GFP_KERNEL);
671  	if (!dma_pq)
672  		goto err_srcs_array;
673  
674  	/*
675  	 * src and dst buffers are freed by ourselves below
676  	 */
677  	if (params->polled)
678  		flags = DMA_CTRL_ACK;
679  	else
680  		flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
681  
682  	ktime = ktime_get();
683  	while (!(kthread_should_stop() ||
684  	       (params->iterations && total_tests >= params->iterations))) {
685  		struct dma_async_tx_descriptor *tx = NULL;
686  		struct dmaengine_unmap_data *um;
687  		dma_addr_t *dsts;
688  		unsigned int len;
689  
690  		total_tests++;
691  
692  		if (params->transfer_size) {
693  			if (params->transfer_size >= buf_size) {
694  				pr_err("%u-byte transfer size must be lower than %u-buffer size\n",
695  				       params->transfer_size, buf_size);
696  				break;
697  			}
698  			len = params->transfer_size;
699  		} else if (params->norandom) {
700  			len = buf_size;
701  		} else {
702  			len = dmatest_random() % buf_size + 1;
703  		}
704  
705  		/* Do not alter transfer size explicitly defined by user */
706  		if (!params->transfer_size) {
707  			len = (len >> align) << align;
708  			if (!len)
709  				len = 1 << align;
710  		}
711  		total_len += len;
712  
713  		if (params->norandom) {
714  			src->off = 0;
715  			dst->off = 0;
716  		} else {
717  			src->off = dmatest_random() % (buf_size - len + 1);
718  			dst->off = dmatest_random() % (buf_size - len + 1);
719  
720  			src->off = (src->off >> align) << align;
721  			dst->off = (dst->off >> align) << align;
722  		}
723  
724  		if (!params->noverify) {
725  			start = ktime_get();
726  			dmatest_init_srcs(src->aligned, src->off, len,
727  					  buf_size, is_memset);
728  			dmatest_init_dsts(dst->aligned, dst->off, len,
729  					  buf_size, is_memset);
730  
731  			diff = ktime_sub(ktime_get(), start);
732  			filltime = ktime_add(filltime, diff);
733  		}
734  
735  		um = dmaengine_get_unmap_data(dma_dev, src->cnt + dst->cnt,
736  					      GFP_KERNEL);
737  		if (!um) {
738  			failed_tests++;
739  			result("unmap data NULL", total_tests,
740  			       src->off, dst->off, len, ret);
741  			continue;
742  		}
743  
744  		um->len = buf_size;
745  		for (i = 0; i < src->cnt; i++) {
746  			void *buf = src->aligned[i];
747  			struct page *pg = virt_to_page(buf);
748  			unsigned long pg_off = offset_in_page(buf);
749  
750  			um->addr[i] = dma_map_page(dma_dev, pg, pg_off,
751  						   um->len, DMA_TO_DEVICE);
752  			srcs[i] = um->addr[i] + src->off;
753  			ret = dma_mapping_error(dma_dev, um->addr[i]);
754  			if (ret) {
755  				result("src mapping error", total_tests,
756  				       src->off, dst->off, len, ret);
757  				goto error_unmap_continue;
758  			}
759  			um->to_cnt++;
760  		}
761  		/* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
762  		dsts = &um->addr[src->cnt];
763  		for (i = 0; i < dst->cnt; i++) {
764  			void *buf = dst->aligned[i];
765  			struct page *pg = virt_to_page(buf);
766  			unsigned long pg_off = offset_in_page(buf);
767  
768  			dsts[i] = dma_map_page(dma_dev, pg, pg_off, um->len,
769  					       DMA_BIDIRECTIONAL);
770  			ret = dma_mapping_error(dma_dev, dsts[i]);
771  			if (ret) {
772  				result("dst mapping error", total_tests,
773  				       src->off, dst->off, len, ret);
774  				goto error_unmap_continue;
775  			}
776  			um->bidi_cnt++;
777  		}
778  
779  		if (thread->type == DMA_MEMCPY)
780  			tx = dev->device_prep_dma_memcpy(chan,
781  							 dsts[0] + dst->off,
782  							 srcs[0], len, flags);
783  		else if (thread->type == DMA_MEMSET)
784  			tx = dev->device_prep_dma_memset(chan,
785  						dsts[0] + dst->off,
786  						*(src->aligned[0] + src->off),
787  						len, flags);
788  		else if (thread->type == DMA_XOR)
789  			tx = dev->device_prep_dma_xor(chan,
790  						      dsts[0] + dst->off,
791  						      srcs, src->cnt,
792  						      len, flags);
793  		else if (thread->type == DMA_PQ) {
794  			for (i = 0; i < dst->cnt; i++)
795  				dma_pq[i] = dsts[i] + dst->off;
796  			tx = dev->device_prep_dma_pq(chan, dma_pq, srcs,
797  						     src->cnt, pq_coefs,
798  						     len, flags);
799  		}
800  
801  		if (!tx) {
802  			result("prep error", total_tests, src->off,
803  			       dst->off, len, ret);
804  			msleep(100);
805  			goto error_unmap_continue;
806  		}
807  
808  		done->done = false;
809  		if (!params->polled) {
810  			tx->callback = dmatest_callback;
811  			tx->callback_param = done;
812  		}
813  		cookie = tx->tx_submit(tx);
814  
815  		if (dma_submit_error(cookie)) {
816  			result("submit error", total_tests, src->off,
817  			       dst->off, len, ret);
818  			msleep(100);
819  			goto error_unmap_continue;
820  		}
821  
822  		if (params->polled) {
823  			status = dma_sync_wait(chan, cookie);
824  			dmaengine_terminate_sync(chan);
825  			if (status == DMA_COMPLETE)
826  				done->done = true;
827  		} else {
828  			dma_async_issue_pending(chan);
829  
830  			wait_event_freezable_timeout(thread->done_wait,
831  					done->done,
832  					msecs_to_jiffies(params->timeout));
833  
834  			status = dma_async_is_tx_complete(chan, cookie, NULL,
835  							  NULL);
836  		}
837  
838  		if (!done->done) {
839  			result("test timed out", total_tests, src->off, dst->off,
840  			       len, 0);
841  			goto error_unmap_continue;
842  		} else if (status != DMA_COMPLETE &&
843  			   !(dma_has_cap(DMA_COMPLETION_NO_ORDER,
844  					 dev->cap_mask) &&
845  			     status == DMA_OUT_OF_ORDER)) {
846  			result(status == DMA_ERROR ?
847  			       "completion error status" :
848  			       "completion busy status", total_tests, src->off,
849  			       dst->off, len, ret);
850  			goto error_unmap_continue;
851  		}
852  
853  		dmaengine_unmap_put(um);
854  
855  		if (params->noverify) {
856  			verbose_result("test passed", total_tests, src->off,
857  				       dst->off, len, 0);
858  			continue;
859  		}
860  
861  		start = ktime_get();
862  		pr_debug("%s: verifying source buffer...\n", current->comm);
863  		error_count = dmatest_verify(src->aligned, 0, src->off,
864  				0, PATTERN_SRC, true, is_memset);
865  		error_count += dmatest_verify(src->aligned, src->off,
866  				src->off + len, src->off,
867  				PATTERN_SRC | PATTERN_COPY, true, is_memset);
868  		error_count += dmatest_verify(src->aligned, src->off + len,
869  				buf_size, src->off + len,
870  				PATTERN_SRC, true, is_memset);
871  
872  		pr_debug("%s: verifying dest buffer...\n", current->comm);
873  		error_count += dmatest_verify(dst->aligned, 0, dst->off,
874  				0, PATTERN_DST, false, is_memset);
875  
876  		error_count += dmatest_verify(dst->aligned, dst->off,
877  				dst->off + len, src->off,
878  				PATTERN_SRC | PATTERN_COPY, false, is_memset);
879  
880  		error_count += dmatest_verify(dst->aligned, dst->off + len,
881  				buf_size, dst->off + len,
882  				PATTERN_DST, false, is_memset);
883  
884  		diff = ktime_sub(ktime_get(), start);
885  		comparetime = ktime_add(comparetime, diff);
886  
887  		if (error_count) {
888  			result("data error", total_tests, src->off, dst->off,
889  			       len, error_count);
890  			failed_tests++;
891  		} else {
892  			verbose_result("test passed", total_tests, src->off,
893  				       dst->off, len, 0);
894  		}
895  
896  		continue;
897  
898  error_unmap_continue:
899  		dmaengine_unmap_put(um);
900  		failed_tests++;
901  	}
902  	ktime = ktime_sub(ktime_get(), ktime);
903  	ktime = ktime_sub(ktime, comparetime);
904  	ktime = ktime_sub(ktime, filltime);
905  	runtime = ktime_to_us(ktime);
906  
907  	ret = 0;
908  	kfree(dma_pq);
909  err_srcs_array:
910  	kfree(srcs);
911  err_dst:
912  	dmatest_free_test_data(dst);
913  err_src:
914  	dmatest_free_test_data(src);
915  err_free_coefs:
916  	kfree(pq_coefs);
917  err_thread_type:
918  	iops = dmatest_persec(runtime, total_tests);
919  	pr_info("%s: summary %u tests, %u failures %llu.%02llu iops %llu KB/s (%d)\n",
920  		current->comm, total_tests, failed_tests,
921  		FIXPT_TO_INT(iops), FIXPT_GET_FRAC(iops),
922  		dmatest_KBs(runtime, total_len), ret);
923  
924  	/* terminate all transfers on specified channels */
925  	if (ret || failed_tests)
926  		dmaengine_terminate_sync(chan);
927  
928  	thread->done = true;
929  	wake_up(&thread_wait);
930  
931  	return ret;
932  }
933  
dmatest_cleanup_channel(struct dmatest_chan * dtc)934  static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
935  {
936  	struct dmatest_thread	*thread;
937  	struct dmatest_thread	*_thread;
938  	int			ret;
939  
940  	list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
941  		ret = kthread_stop(thread->task);
942  		pr_debug("thread %s exited with status %d\n",
943  			 thread->task->comm, ret);
944  		list_del(&thread->node);
945  		put_task_struct(thread->task);
946  		kfree(thread);
947  	}
948  
949  	/* terminate all transfers on specified channels */
950  	dmaengine_terminate_sync(dtc->chan);
951  
952  	kfree(dtc);
953  }
954  
dmatest_add_threads(struct dmatest_info * info,struct dmatest_chan * dtc,enum dma_transaction_type type)955  static int dmatest_add_threads(struct dmatest_info *info,
956  		struct dmatest_chan *dtc, enum dma_transaction_type type)
957  {
958  	struct dmatest_params *params = &info->params;
959  	struct dmatest_thread *thread;
960  	struct dma_chan *chan = dtc->chan;
961  	char *op;
962  	unsigned int i;
963  
964  	if (type == DMA_MEMCPY)
965  		op = "copy";
966  	else if (type == DMA_MEMSET)
967  		op = "set";
968  	else if (type == DMA_XOR)
969  		op = "xor";
970  	else if (type == DMA_PQ)
971  		op = "pq";
972  	else
973  		return -EINVAL;
974  
975  	for (i = 0; i < params->threads_per_chan; i++) {
976  		thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
977  		if (!thread) {
978  			pr_warn("No memory for %s-%s%u\n",
979  				dma_chan_name(chan), op, i);
980  			break;
981  		}
982  		thread->info = info;
983  		thread->chan = dtc->chan;
984  		thread->type = type;
985  		thread->test_done.wait = &thread->done_wait;
986  		init_waitqueue_head(&thread->done_wait);
987  		smp_wmb();
988  		thread->task = kthread_create(dmatest_func, thread, "%s-%s%u",
989  				dma_chan_name(chan), op, i);
990  		if (IS_ERR(thread->task)) {
991  			pr_warn("Failed to create thread %s-%s%u\n",
992  				dma_chan_name(chan), op, i);
993  			kfree(thread);
994  			break;
995  		}
996  
997  		/* srcbuf and dstbuf are allocated by the thread itself */
998  		get_task_struct(thread->task);
999  		list_add_tail(&thread->node, &dtc->threads);
1000  		thread->pending = true;
1001  	}
1002  
1003  	return i;
1004  }
1005  
dmatest_add_channel(struct dmatest_info * info,struct dma_chan * chan)1006  static int dmatest_add_channel(struct dmatest_info *info,
1007  		struct dma_chan *chan)
1008  {
1009  	struct dmatest_chan	*dtc;
1010  	struct dma_device	*dma_dev = chan->device;
1011  	unsigned int		thread_count = 0;
1012  	int cnt;
1013  
1014  	dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
1015  	if (!dtc) {
1016  		pr_warn("No memory for %s\n", dma_chan_name(chan));
1017  		return -ENOMEM;
1018  	}
1019  
1020  	dtc->chan = chan;
1021  	INIT_LIST_HEAD(&dtc->threads);
1022  
1023  	if (dma_has_cap(DMA_COMPLETION_NO_ORDER, dma_dev->cap_mask) &&
1024  	    info->params.polled) {
1025  		info->params.polled = false;
1026  		pr_warn("DMA_COMPLETION_NO_ORDER, polled disabled\n");
1027  	}
1028  
1029  	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1030  		if (dmatest == 0) {
1031  			cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
1032  			thread_count += cnt > 0 ? cnt : 0;
1033  		}
1034  	}
1035  
1036  	if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) {
1037  		if (dmatest == 1) {
1038  			cnt = dmatest_add_threads(info, dtc, DMA_MEMSET);
1039  			thread_count += cnt > 0 ? cnt : 0;
1040  		}
1041  	}
1042  
1043  	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1044  		cnt = dmatest_add_threads(info, dtc, DMA_XOR);
1045  		thread_count += cnt > 0 ? cnt : 0;
1046  	}
1047  	if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
1048  		cnt = dmatest_add_threads(info, dtc, DMA_PQ);
1049  		thread_count += cnt > 0 ? cnt : 0;
1050  	}
1051  
1052  	pr_info("Added %u threads using %s\n",
1053  		thread_count, dma_chan_name(chan));
1054  
1055  	list_add_tail(&dtc->node, &info->channels);
1056  	info->nr_channels++;
1057  
1058  	return 0;
1059  }
1060  
filter(struct dma_chan * chan,void * param)1061  static bool filter(struct dma_chan *chan, void *param)
1062  {
1063  	return dmatest_match_channel(param, chan) && dmatest_match_device(param, chan->device);
1064  }
1065  
request_channels(struct dmatest_info * info,enum dma_transaction_type type)1066  static void request_channels(struct dmatest_info *info,
1067  			     enum dma_transaction_type type)
1068  {
1069  	dma_cap_mask_t mask;
1070  
1071  	dma_cap_zero(mask);
1072  	dma_cap_set(type, mask);
1073  	for (;;) {
1074  		struct dmatest_params *params = &info->params;
1075  		struct dma_chan *chan;
1076  
1077  		chan = dma_request_channel(mask, filter, params);
1078  		if (chan) {
1079  			if (dmatest_add_channel(info, chan)) {
1080  				dma_release_channel(chan);
1081  				break; /* add_channel failed, punt */
1082  			}
1083  		} else
1084  			break; /* no more channels available */
1085  		if (params->max_channels &&
1086  		    info->nr_channels >= params->max_channels)
1087  			break; /* we have all we need */
1088  	}
1089  }
1090  
add_threaded_test(struct dmatest_info * info)1091  static void add_threaded_test(struct dmatest_info *info)
1092  {
1093  	struct dmatest_params *params = &info->params;
1094  
1095  	/* Copy test parameters */
1096  	params->buf_size = test_buf_size;
1097  	strscpy(params->channel, strim(test_channel), sizeof(params->channel));
1098  	strscpy(params->device, strim(test_device), sizeof(params->device));
1099  	params->threads_per_chan = threads_per_chan;
1100  	params->max_channels = max_channels;
1101  	params->iterations = iterations;
1102  	params->xor_sources = xor_sources;
1103  	params->pq_sources = pq_sources;
1104  	params->timeout = timeout;
1105  	params->noverify = noverify;
1106  	params->norandom = norandom;
1107  	params->alignment = alignment;
1108  	params->transfer_size = transfer_size;
1109  	params->polled = polled;
1110  
1111  	request_channels(info, DMA_MEMCPY);
1112  	request_channels(info, DMA_MEMSET);
1113  	request_channels(info, DMA_XOR);
1114  	request_channels(info, DMA_PQ);
1115  }
1116  
run_pending_tests(struct dmatest_info * info)1117  static void run_pending_tests(struct dmatest_info *info)
1118  {
1119  	struct dmatest_chan *dtc;
1120  	unsigned int thread_count = 0;
1121  
1122  	list_for_each_entry(dtc, &info->channels, node) {
1123  		struct dmatest_thread *thread;
1124  
1125  		thread_count = 0;
1126  		list_for_each_entry(thread, &dtc->threads, node) {
1127  			wake_up_process(thread->task);
1128  			thread_count++;
1129  		}
1130  		pr_info("Started %u threads using %s\n",
1131  			thread_count, dma_chan_name(dtc->chan));
1132  	}
1133  }
1134  
stop_threaded_test(struct dmatest_info * info)1135  static void stop_threaded_test(struct dmatest_info *info)
1136  {
1137  	struct dmatest_chan *dtc, *_dtc;
1138  	struct dma_chan *chan;
1139  
1140  	list_for_each_entry_safe(dtc, _dtc, &info->channels, node) {
1141  		list_del(&dtc->node);
1142  		chan = dtc->chan;
1143  		dmatest_cleanup_channel(dtc);
1144  		pr_debug("dropped channel %s\n", dma_chan_name(chan));
1145  		dma_release_channel(chan);
1146  	}
1147  
1148  	info->nr_channels = 0;
1149  }
1150  
start_threaded_tests(struct dmatest_info * info)1151  static void start_threaded_tests(struct dmatest_info *info)
1152  {
1153  	/* we might be called early to set run=, defer running until all
1154  	 * parameters have been evaluated
1155  	 */
1156  	if (!info->did_init)
1157  		return;
1158  
1159  	run_pending_tests(info);
1160  }
1161  
dmatest_run_get(char * val,const struct kernel_param * kp)1162  static int dmatest_run_get(char *val, const struct kernel_param *kp)
1163  {
1164  	struct dmatest_info *info = &test_info;
1165  
1166  	mutex_lock(&info->lock);
1167  	if (is_threaded_test_run(info)) {
1168  		dmatest_run = true;
1169  	} else {
1170  		if (!is_threaded_test_pending(info))
1171  			stop_threaded_test(info);
1172  		dmatest_run = false;
1173  	}
1174  	mutex_unlock(&info->lock);
1175  
1176  	return param_get_bool(val, kp);
1177  }
1178  
dmatest_run_set(const char * val,const struct kernel_param * kp)1179  static int dmatest_run_set(const char *val, const struct kernel_param *kp)
1180  {
1181  	struct dmatest_info *info = &test_info;
1182  	int ret;
1183  
1184  	mutex_lock(&info->lock);
1185  	ret = param_set_bool(val, kp);
1186  	if (ret) {
1187  		mutex_unlock(&info->lock);
1188  		return ret;
1189  	} else if (dmatest_run) {
1190  		if (!is_threaded_test_pending(info)) {
1191  			/*
1192  			 * We have nothing to run. This can be due to:
1193  			 */
1194  			ret = info->last_error;
1195  			if (ret) {
1196  				/* 1) Misconfiguration */
1197  				pr_err("Channel misconfigured, can't continue\n");
1198  				mutex_unlock(&info->lock);
1199  				return ret;
1200  			} else {
1201  				/* 2) We rely on defaults */
1202  				pr_info("No channels configured, continue with any\n");
1203  				if (!is_threaded_test_run(info))
1204  					stop_threaded_test(info);
1205  				add_threaded_test(info);
1206  			}
1207  		}
1208  		start_threaded_tests(info);
1209  	} else {
1210  		stop_threaded_test(info);
1211  	}
1212  
1213  	mutex_unlock(&info->lock);
1214  
1215  	return ret;
1216  }
1217  
dmatest_chan_set(const char * val,const struct kernel_param * kp)1218  static int dmatest_chan_set(const char *val, const struct kernel_param *kp)
1219  {
1220  	struct dmatest_info *info = &test_info;
1221  	struct dmatest_chan *dtc;
1222  	char chan_reset_val[20];
1223  	int ret;
1224  
1225  	mutex_lock(&info->lock);
1226  	ret = param_set_copystring(val, kp);
1227  	if (ret) {
1228  		mutex_unlock(&info->lock);
1229  		return ret;
1230  	}
1231  	/*Clear any previously run threads */
1232  	if (!is_threaded_test_run(info) && !is_threaded_test_pending(info))
1233  		stop_threaded_test(info);
1234  	/* Reject channels that are already registered */
1235  	if (is_threaded_test_pending(info)) {
1236  		list_for_each_entry(dtc, &info->channels, node) {
1237  			if (strcmp(dma_chan_name(dtc->chan),
1238  				   strim(test_channel)) == 0) {
1239  				dtc = list_last_entry(&info->channels,
1240  						      struct dmatest_chan,
1241  						      node);
1242  				strscpy(chan_reset_val,
1243  					dma_chan_name(dtc->chan),
1244  					sizeof(chan_reset_val));
1245  				ret = -EBUSY;
1246  				goto add_chan_err;
1247  			}
1248  		}
1249  	}
1250  
1251  	add_threaded_test(info);
1252  
1253  	/* Check if channel was added successfully */
1254  	if (!list_empty(&info->channels)) {
1255  		/*
1256  		 * if new channel was not successfully added, revert the
1257  		 * "test_channel" string to the name of the last successfully
1258  		 * added channel. exception for when users issues empty string
1259  		 * to channel parameter.
1260  		 */
1261  		dtc = list_last_entry(&info->channels, struct dmatest_chan, node);
1262  		if ((strcmp(dma_chan_name(dtc->chan), strim(test_channel)) != 0)
1263  		    && (strcmp("", strim(test_channel)) != 0)) {
1264  			ret = -EINVAL;
1265  			strscpy(chan_reset_val, dma_chan_name(dtc->chan),
1266  				sizeof(chan_reset_val));
1267  			goto add_chan_err;
1268  		}
1269  
1270  	} else {
1271  		/* Clear test_channel if no channels were added successfully */
1272  		strscpy(chan_reset_val, "", sizeof(chan_reset_val));
1273  		ret = -EBUSY;
1274  		goto add_chan_err;
1275  	}
1276  
1277  	info->last_error = ret;
1278  	mutex_unlock(&info->lock);
1279  
1280  	return ret;
1281  
1282  add_chan_err:
1283  	param_set_copystring(chan_reset_val, kp);
1284  	info->last_error = ret;
1285  	mutex_unlock(&info->lock);
1286  
1287  	return ret;
1288  }
1289  
dmatest_chan_get(char * val,const struct kernel_param * kp)1290  static int dmatest_chan_get(char *val, const struct kernel_param *kp)
1291  {
1292  	struct dmatest_info *info = &test_info;
1293  
1294  	mutex_lock(&info->lock);
1295  	if (!is_threaded_test_run(info) && !is_threaded_test_pending(info)) {
1296  		stop_threaded_test(info);
1297  		strscpy(test_channel, "", sizeof(test_channel));
1298  	}
1299  	mutex_unlock(&info->lock);
1300  
1301  	return param_get_string(val, kp);
1302  }
1303  
dmatest_test_list_get(char * val,const struct kernel_param * kp)1304  static int dmatest_test_list_get(char *val, const struct kernel_param *kp)
1305  {
1306  	struct dmatest_info *info = &test_info;
1307  	struct dmatest_chan *dtc;
1308  	unsigned int thread_count = 0;
1309  
1310  	list_for_each_entry(dtc, &info->channels, node) {
1311  		struct dmatest_thread *thread;
1312  
1313  		thread_count = 0;
1314  		list_for_each_entry(thread, &dtc->threads, node) {
1315  			thread_count++;
1316  		}
1317  		pr_info("%u threads using %s\n",
1318  			thread_count, dma_chan_name(dtc->chan));
1319  	}
1320  
1321  	return 0;
1322  }
1323  
dmatest_init(void)1324  static int __init dmatest_init(void)
1325  {
1326  	struct dmatest_info *info = &test_info;
1327  	struct dmatest_params *params = &info->params;
1328  
1329  	if (dmatest_run) {
1330  		mutex_lock(&info->lock);
1331  		add_threaded_test(info);
1332  		run_pending_tests(info);
1333  		mutex_unlock(&info->lock);
1334  	}
1335  
1336  	if (params->iterations && wait)
1337  		wait_event(thread_wait, !is_threaded_test_run(info));
1338  
1339  	/* module parameters are stable, inittime tests are started,
1340  	 * let userspace take over 'run' control
1341  	 */
1342  	info->did_init = true;
1343  
1344  	return 0;
1345  }
1346  /* when compiled-in wait for drivers to load first */
1347  late_initcall(dmatest_init);
1348  
dmatest_exit(void)1349  static void __exit dmatest_exit(void)
1350  {
1351  	struct dmatest_info *info = &test_info;
1352  
1353  	mutex_lock(&info->lock);
1354  	stop_threaded_test(info);
1355  	mutex_unlock(&info->lock);
1356  }
1357  module_exit(dmatest_exit);
1358  
1359  MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1360  MODULE_LICENSE("GPL v2");
1361