xref: /openbmc/linux/drivers/accel/qaic/qaic_control.c (revision e50e86dbcabda570fc8a1435fe2fca97e9ab7312)
1  // SPDX-License-Identifier: GPL-2.0-only
2  
3  /* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. */
4  /* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. */
5  
6  #include <asm/byteorder.h>
7  #include <linux/completion.h>
8  #include <linux/crc32.h>
9  #include <linux/delay.h>
10  #include <linux/dma-mapping.h>
11  #include <linux/kref.h>
12  #include <linux/list.h>
13  #include <linux/mhi.h>
14  #include <linux/mm.h>
15  #include <linux/moduleparam.h>
16  #include <linux/mutex.h>
17  #include <linux/overflow.h>
18  #include <linux/pci.h>
19  #include <linux/scatterlist.h>
20  #include <linux/types.h>
21  #include <linux/uaccess.h>
22  #include <linux/workqueue.h>
23  #include <linux/wait.h>
24  #include <drm/drm_device.h>
25  #include <drm/drm_file.h>
26  #include <uapi/drm/qaic_accel.h>
27  
28  #include "qaic.h"
29  
30  #define MANAGE_MAGIC_NUMBER		((__force __le32)0x43494151) /* "QAIC" in little endian */
31  #define QAIC_DBC_Q_GAP			SZ_256
32  #define QAIC_DBC_Q_BUF_ALIGN		SZ_4K
33  #define QAIC_MANAGE_EXT_MSG_LENGTH	SZ_64K /* Max DMA message length */
34  #define QAIC_WRAPPER_MAX_SIZE		SZ_4K
35  #define QAIC_MHI_RETRY_WAIT_MS		100
36  #define QAIC_MHI_RETRY_MAX		20
37  
38  static unsigned int control_resp_timeout_s = 60; /* 60 sec default */
39  module_param(control_resp_timeout_s, uint, 0600);
40  MODULE_PARM_DESC(control_resp_timeout_s, "Timeout for NNC responses from QSM");
41  
42  struct manage_msg {
43  	u32 len;
44  	u32 count;
45  	u8 data[];
46  };
47  
48  /*
49   * wire encoding structures for the manage protocol.
50   * All fields are little endian on the wire
51   */
52  struct wire_msg_hdr {
53  	__le32 crc32; /* crc of everything following this field in the message */
54  	__le32 magic_number;
55  	__le32 sequence_number;
56  	__le32 len; /* length of this message */
57  	__le32 count; /* number of transactions in this message */
58  	__le32 handle; /* unique id to track the resources consumed */
59  	__le32 partition_id; /* partition id for the request (signed) */
60  	__le32 padding; /* must be 0 */
61  } __packed;
62  
63  struct wire_msg {
64  	struct wire_msg_hdr hdr;
65  	u8 data[];
66  } __packed;
67  
68  struct wire_trans_hdr {
69  	__le32 type;
70  	__le32 len;
71  } __packed;
72  
73  /* Each message sent from driver to device are organized in a list of wrapper_msg */
74  struct wrapper_msg {
75  	struct list_head list;
76  	struct kref ref_count;
77  	u32 len; /* length of data to transfer */
78  	struct wrapper_list *head;
79  	union {
80  		struct wire_msg msg;
81  		struct wire_trans_hdr trans;
82  	};
83  };
84  
85  struct wrapper_list {
86  	struct list_head list;
87  	spinlock_t lock; /* Protects the list state during additions and removals */
88  };
89  
90  struct wire_trans_passthrough {
91  	struct wire_trans_hdr hdr;
92  	u8 data[];
93  } __packed;
94  
95  struct wire_addr_size_pair {
96  	__le64 addr;
97  	__le64 size;
98  } __packed;
99  
100  struct wire_trans_dma_xfer {
101  	struct wire_trans_hdr hdr;
102  	__le32 tag;
103  	__le32 count;
104  	__le32 dma_chunk_id;
105  	__le32 padding;
106  	struct wire_addr_size_pair data[];
107  } __packed;
108  
109  /* Initiated by device to continue the DMA xfer of a large piece of data */
110  struct wire_trans_dma_xfer_cont {
111  	struct wire_trans_hdr hdr;
112  	__le32 dma_chunk_id;
113  	__le32 padding;
114  	__le64 xferred_size;
115  } __packed;
116  
117  struct wire_trans_activate_to_dev {
118  	struct wire_trans_hdr hdr;
119  	__le64 req_q_addr;
120  	__le64 rsp_q_addr;
121  	__le32 req_q_size;
122  	__le32 rsp_q_size;
123  	__le32 buf_len;
124  	__le32 options; /* unused, but BIT(16) has meaning to the device */
125  } __packed;
126  
127  struct wire_trans_activate_from_dev {
128  	struct wire_trans_hdr hdr;
129  	__le32 status;
130  	__le32 dbc_id;
131  	__le64 options; /* unused */
132  } __packed;
133  
134  struct wire_trans_deactivate_from_dev {
135  	struct wire_trans_hdr hdr;
136  	__le32 status;
137  	__le32 dbc_id;
138  } __packed;
139  
140  struct wire_trans_terminate_to_dev {
141  	struct wire_trans_hdr hdr;
142  	__le32 handle;
143  	__le32 padding;
144  } __packed;
145  
146  struct wire_trans_terminate_from_dev {
147  	struct wire_trans_hdr hdr;
148  	__le32 status;
149  	__le32 padding;
150  } __packed;
151  
152  struct wire_trans_status_to_dev {
153  	struct wire_trans_hdr hdr;
154  } __packed;
155  
156  struct wire_trans_status_from_dev {
157  	struct wire_trans_hdr hdr;
158  	__le16 major;
159  	__le16 minor;
160  	__le32 status;
161  	__le64 status_flags;
162  } __packed;
163  
164  struct wire_trans_validate_part_to_dev {
165  	struct wire_trans_hdr hdr;
166  	__le32 part_id;
167  	__le32 padding;
168  } __packed;
169  
170  struct wire_trans_validate_part_from_dev {
171  	struct wire_trans_hdr hdr;
172  	__le32 status;
173  	__le32 padding;
174  } __packed;
175  
176  struct xfer_queue_elem {
177  	/*
178  	 * Node in list of ongoing transfer request on control channel.
179  	 * Maintained by root device struct.
180  	 */
181  	struct list_head list;
182  	/* Sequence number of this transfer request */
183  	u32 seq_num;
184  	/* This is used to wait on until completion of transfer request */
185  	struct completion xfer_done;
186  	/* Received data from device */
187  	void *buf;
188  };
189  
190  struct dma_xfer {
191  	/* Node in list of DMA transfers which is used for cleanup */
192  	struct list_head list;
193  	/* SG table of memory used for DMA */
194  	struct sg_table *sgt;
195  	/* Array pages used for DMA */
196  	struct page **page_list;
197  	/* Number of pages used for DMA */
198  	unsigned long nr_pages;
199  };
200  
201  struct ioctl_resources {
202  	/* List of all DMA transfers which is used later for cleanup */
203  	struct list_head dma_xfers;
204  	/* Base address of request queue which belongs to a DBC */
205  	void *buf;
206  	/*
207  	 * Base bus address of request queue which belongs to a DBC. Response
208  	 * queue base bus address can be calculated by adding size of request
209  	 * queue to base bus address of request queue.
210  	 */
211  	dma_addr_t dma_addr;
212  	/* Total size of request queue and response queue in byte */
213  	u32 total_size;
214  	/* Total number of elements that can be queued in each of request and response queue */
215  	u32 nelem;
216  	/* Base address of response queue which belongs to a DBC */
217  	void *rsp_q_base;
218  	/* Status of the NNC message received */
219  	u32 status;
220  	/* DBC id of the DBC received from device */
221  	u32 dbc_id;
222  	/*
223  	 * DMA transfer request messages can be big in size and it may not be
224  	 * possible to send them in one shot. In such cases the messages are
225  	 * broken into chunks, this field stores ID of such chunks.
226  	 */
227  	u32 dma_chunk_id;
228  	/* Total number of bytes transferred for a DMA xfer request */
229  	u64 xferred_dma_size;
230  	/* Header of transaction message received from user. Used during DMA xfer request. */
231  	void *trans_hdr;
232  };
233  
234  struct resp_work {
235  	struct work_struct work;
236  	struct qaic_device *qdev;
237  	void *buf;
238  };
239  
240  /*
241   * Since we're working with little endian messages, its useful to be able to
242   * increment without filling a whole line with conversions back and forth just
243   * to add one(1) to a message count.
244   */
incr_le32(__le32 val)245  static __le32 incr_le32(__le32 val)
246  {
247  	return cpu_to_le32(le32_to_cpu(val) + 1);
248  }
249  
gen_crc(void * msg)250  static u32 gen_crc(void *msg)
251  {
252  	struct wrapper_list *wrappers = msg;
253  	struct wrapper_msg *w;
254  	u32 crc = ~0;
255  
256  	list_for_each_entry(w, &wrappers->list, list)
257  		crc = crc32(crc, &w->msg, w->len);
258  
259  	return crc ^ ~0;
260  }
261  
gen_crc_stub(void * msg)262  static u32 gen_crc_stub(void *msg)
263  {
264  	return 0;
265  }
266  
valid_crc(void * msg)267  static bool valid_crc(void *msg)
268  {
269  	struct wire_msg_hdr *hdr = msg;
270  	bool ret;
271  	u32 crc;
272  
273  	/*
274  	 * The output of this algorithm is always converted to the native
275  	 * endianness.
276  	 */
277  	crc = le32_to_cpu(hdr->crc32);
278  	hdr->crc32 = 0;
279  	ret = (crc32(~0, msg, le32_to_cpu(hdr->len)) ^ ~0) == crc;
280  	hdr->crc32 = cpu_to_le32(crc);
281  	return ret;
282  }
283  
valid_crc_stub(void * msg)284  static bool valid_crc_stub(void *msg)
285  {
286  	return true;
287  }
288  
free_wrapper(struct kref * ref)289  static void free_wrapper(struct kref *ref)
290  {
291  	struct wrapper_msg *wrapper = container_of(ref, struct wrapper_msg, ref_count);
292  
293  	list_del(&wrapper->list);
294  	kfree(wrapper);
295  }
296  
save_dbc_buf(struct qaic_device * qdev,struct ioctl_resources * resources,struct qaic_user * usr)297  static void save_dbc_buf(struct qaic_device *qdev, struct ioctl_resources *resources,
298  			 struct qaic_user *usr)
299  {
300  	u32 dbc_id = resources->dbc_id;
301  
302  	if (resources->buf) {
303  		wait_event_interruptible(qdev->dbc[dbc_id].dbc_release, !qdev->dbc[dbc_id].in_use);
304  		qdev->dbc[dbc_id].req_q_base = resources->buf;
305  		qdev->dbc[dbc_id].rsp_q_base = resources->rsp_q_base;
306  		qdev->dbc[dbc_id].dma_addr = resources->dma_addr;
307  		qdev->dbc[dbc_id].total_size = resources->total_size;
308  		qdev->dbc[dbc_id].nelem = resources->nelem;
309  		enable_dbc(qdev, dbc_id, usr);
310  		qdev->dbc[dbc_id].in_use = true;
311  		resources->buf = NULL;
312  	}
313  }
314  
free_dbc_buf(struct qaic_device * qdev,struct ioctl_resources * resources)315  static void free_dbc_buf(struct qaic_device *qdev, struct ioctl_resources *resources)
316  {
317  	if (resources->buf)
318  		dma_free_coherent(&qdev->pdev->dev, resources->total_size, resources->buf,
319  				  resources->dma_addr);
320  	resources->buf = NULL;
321  }
322  
free_dma_xfers(struct qaic_device * qdev,struct ioctl_resources * resources)323  static void free_dma_xfers(struct qaic_device *qdev, struct ioctl_resources *resources)
324  {
325  	struct dma_xfer *xfer;
326  	struct dma_xfer *x;
327  	int i;
328  
329  	list_for_each_entry_safe(xfer, x, &resources->dma_xfers, list) {
330  		dma_unmap_sgtable(&qdev->pdev->dev, xfer->sgt, DMA_TO_DEVICE, 0);
331  		sg_free_table(xfer->sgt);
332  		kfree(xfer->sgt);
333  		for (i = 0; i < xfer->nr_pages; ++i)
334  			put_page(xfer->page_list[i]);
335  		kfree(xfer->page_list);
336  		list_del(&xfer->list);
337  		kfree(xfer);
338  	}
339  }
340  
add_wrapper(struct wrapper_list * wrappers,u32 size)341  static struct wrapper_msg *add_wrapper(struct wrapper_list *wrappers, u32 size)
342  {
343  	struct wrapper_msg *w = kzalloc(size, GFP_KERNEL);
344  
345  	if (!w)
346  		return NULL;
347  	list_add_tail(&w->list, &wrappers->list);
348  	kref_init(&w->ref_count);
349  	w->head = wrappers;
350  	return w;
351  }
352  
encode_passthrough(struct qaic_device * qdev,void * trans,struct wrapper_list * wrappers,u32 * user_len)353  static int encode_passthrough(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers,
354  			      u32 *user_len)
355  {
356  	struct qaic_manage_trans_passthrough *in_trans = trans;
357  	struct wire_trans_passthrough *out_trans;
358  	struct wrapper_msg *trans_wrapper;
359  	struct wrapper_msg *wrapper;
360  	struct wire_msg *msg;
361  	u32 msg_hdr_len;
362  
363  	wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list);
364  	msg = &wrapper->msg;
365  	msg_hdr_len = le32_to_cpu(msg->hdr.len);
366  
367  	if (in_trans->hdr.len % 8 != 0)
368  		return -EINVAL;
369  
370  	if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_EXT_MSG_LENGTH)
371  		return -ENOSPC;
372  
373  	trans_wrapper = add_wrapper(wrappers,
374  				    offsetof(struct wrapper_msg, trans) + in_trans->hdr.len);
375  	if (!trans_wrapper)
376  		return -ENOMEM;
377  	trans_wrapper->len = in_trans->hdr.len;
378  	out_trans = (struct wire_trans_passthrough *)&trans_wrapper->trans;
379  
380  	memcpy(out_trans->data, in_trans->data, in_trans->hdr.len - sizeof(in_trans->hdr));
381  	msg->hdr.len = cpu_to_le32(msg_hdr_len + in_trans->hdr.len);
382  	msg->hdr.count = incr_le32(msg->hdr.count);
383  	*user_len += in_trans->hdr.len;
384  	out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_PASSTHROUGH_TO_DEV);
385  	out_trans->hdr.len = cpu_to_le32(in_trans->hdr.len);
386  
387  	return 0;
388  }
389  
390  /* returns error code for failure, 0 if enough pages alloc'd, 1 if dma_cont is needed */
find_and_map_user_pages(struct qaic_device * qdev,struct qaic_manage_trans_dma_xfer * in_trans,struct ioctl_resources * resources,struct dma_xfer * xfer)391  static int find_and_map_user_pages(struct qaic_device *qdev,
392  				   struct qaic_manage_trans_dma_xfer *in_trans,
393  				   struct ioctl_resources *resources, struct dma_xfer *xfer)
394  {
395  	u64 xfer_start_addr, remaining, end, total;
396  	unsigned long need_pages;
397  	struct page **page_list;
398  	unsigned long nr_pages;
399  	struct sg_table *sgt;
400  	int ret;
401  	int i;
402  
403  	if (check_add_overflow(in_trans->addr, resources->xferred_dma_size, &xfer_start_addr))
404  		return -EINVAL;
405  
406  	if (in_trans->size < resources->xferred_dma_size)
407  		return -EINVAL;
408  	remaining = in_trans->size - resources->xferred_dma_size;
409  	if (remaining == 0)
410  		return 0;
411  
412  	if (check_add_overflow(xfer_start_addr, remaining, &end))
413  		return -EINVAL;
414  
415  	total = remaining + offset_in_page(xfer_start_addr);
416  	if (total >= SIZE_MAX)
417  		return -EINVAL;
418  
419  	need_pages = DIV_ROUND_UP(total, PAGE_SIZE);
420  
421  	nr_pages = need_pages;
422  
423  	while (1) {
424  		page_list = kmalloc_array(nr_pages, sizeof(*page_list), GFP_KERNEL | __GFP_NOWARN);
425  		if (!page_list) {
426  			nr_pages = nr_pages / 2;
427  			if (!nr_pages)
428  				return -ENOMEM;
429  		} else {
430  			break;
431  		}
432  	}
433  
434  	ret = get_user_pages_fast(xfer_start_addr, nr_pages, 0, page_list);
435  	if (ret < 0)
436  		goto free_page_list;
437  	if (ret != nr_pages) {
438  		nr_pages = ret;
439  		ret = -EFAULT;
440  		goto put_pages;
441  	}
442  
443  	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
444  	if (!sgt) {
445  		ret = -ENOMEM;
446  		goto put_pages;
447  	}
448  
449  	ret = sg_alloc_table_from_pages(sgt, page_list, nr_pages,
450  					offset_in_page(xfer_start_addr),
451  					remaining, GFP_KERNEL);
452  	if (ret) {
453  		ret = -ENOMEM;
454  		goto free_sgt;
455  	}
456  
457  	ret = dma_map_sgtable(&qdev->pdev->dev, sgt, DMA_TO_DEVICE, 0);
458  	if (ret)
459  		goto free_table;
460  
461  	xfer->sgt = sgt;
462  	xfer->page_list = page_list;
463  	xfer->nr_pages = nr_pages;
464  
465  	return need_pages > nr_pages ? 1 : 0;
466  
467  free_table:
468  	sg_free_table(sgt);
469  free_sgt:
470  	kfree(sgt);
471  put_pages:
472  	for (i = 0; i < nr_pages; ++i)
473  		put_page(page_list[i]);
474  free_page_list:
475  	kfree(page_list);
476  	return ret;
477  }
478  
479  /* returns error code for failure, 0 if everything was encoded, 1 if dma_cont is needed */
encode_addr_size_pairs(struct dma_xfer * xfer,struct wrapper_list * wrappers,struct ioctl_resources * resources,u32 msg_hdr_len,u32 * size,struct wire_trans_dma_xfer ** out_trans)480  static int encode_addr_size_pairs(struct dma_xfer *xfer, struct wrapper_list *wrappers,
481  				  struct ioctl_resources *resources, u32 msg_hdr_len, u32 *size,
482  				  struct wire_trans_dma_xfer **out_trans)
483  {
484  	struct wrapper_msg *trans_wrapper;
485  	struct sg_table *sgt = xfer->sgt;
486  	struct wire_addr_size_pair *asp;
487  	struct scatterlist *sg;
488  	struct wrapper_msg *w;
489  	unsigned int dma_len;
490  	u64 dma_chunk_len;
491  	void *boundary;
492  	int nents_dma;
493  	int nents;
494  	int i;
495  
496  	nents = sgt->nents;
497  	nents_dma = nents;
498  	*size = QAIC_MANAGE_EXT_MSG_LENGTH - msg_hdr_len - sizeof(**out_trans);
499  	for_each_sgtable_dma_sg(sgt, sg, i) {
500  		*size -= sizeof(*asp);
501  		/* Save 1K for possible follow-up transactions. */
502  		if (*size < SZ_1K) {
503  			nents_dma = i;
504  			break;
505  		}
506  	}
507  
508  	trans_wrapper = add_wrapper(wrappers, QAIC_WRAPPER_MAX_SIZE);
509  	if (!trans_wrapper)
510  		return -ENOMEM;
511  	*out_trans = (struct wire_trans_dma_xfer *)&trans_wrapper->trans;
512  
513  	asp = (*out_trans)->data;
514  	boundary = (void *)trans_wrapper + QAIC_WRAPPER_MAX_SIZE;
515  	*size = 0;
516  
517  	dma_len = 0;
518  	w = trans_wrapper;
519  	dma_chunk_len = 0;
520  	for_each_sg(sgt->sgl, sg, nents_dma, i) {
521  		asp->size = cpu_to_le64(dma_len);
522  		dma_chunk_len += dma_len;
523  		if (dma_len) {
524  			asp++;
525  			if ((void *)asp + sizeof(*asp) > boundary) {
526  				w->len = (void *)asp - (void *)&w->msg;
527  				*size += w->len;
528  				w = add_wrapper(wrappers, QAIC_WRAPPER_MAX_SIZE);
529  				if (!w)
530  					return -ENOMEM;
531  				boundary = (void *)w + QAIC_WRAPPER_MAX_SIZE;
532  				asp = (struct wire_addr_size_pair *)&w->msg;
533  			}
534  		}
535  		asp->addr = cpu_to_le64(sg_dma_address(sg));
536  		dma_len = sg_dma_len(sg);
537  	}
538  	/* finalize the last segment */
539  	asp->size = cpu_to_le64(dma_len);
540  	w->len = (void *)asp + sizeof(*asp) - (void *)&w->msg;
541  	*size += w->len;
542  	dma_chunk_len += dma_len;
543  	resources->xferred_dma_size += dma_chunk_len;
544  
545  	return nents_dma < nents ? 1 : 0;
546  }
547  
cleanup_xfer(struct qaic_device * qdev,struct dma_xfer * xfer)548  static void cleanup_xfer(struct qaic_device *qdev, struct dma_xfer *xfer)
549  {
550  	int i;
551  
552  	dma_unmap_sgtable(&qdev->pdev->dev, xfer->sgt, DMA_TO_DEVICE, 0);
553  	sg_free_table(xfer->sgt);
554  	kfree(xfer->sgt);
555  	for (i = 0; i < xfer->nr_pages; ++i)
556  		put_page(xfer->page_list[i]);
557  	kfree(xfer->page_list);
558  }
559  
encode_dma(struct qaic_device * qdev,void * trans,struct wrapper_list * wrappers,u32 * user_len,struct ioctl_resources * resources,struct qaic_user * usr)560  static int encode_dma(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers,
561  		      u32 *user_len, struct ioctl_resources *resources, struct qaic_user *usr)
562  {
563  	struct qaic_manage_trans_dma_xfer *in_trans = trans;
564  	struct wire_trans_dma_xfer *out_trans;
565  	struct wrapper_msg *wrapper;
566  	struct dma_xfer *xfer;
567  	struct wire_msg *msg;
568  	bool need_cont_dma;
569  	u32 msg_hdr_len;
570  	u32 size;
571  	int ret;
572  
573  	wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list);
574  	msg = &wrapper->msg;
575  	msg_hdr_len = le32_to_cpu(msg->hdr.len);
576  
577  	/* There should be enough space to hold at least one ASP entry. */
578  	if (size_add(msg_hdr_len, sizeof(*out_trans) + sizeof(struct wire_addr_size_pair)) >
579  	    QAIC_MANAGE_EXT_MSG_LENGTH)
580  		return -ENOMEM;
581  
582  	xfer = kmalloc(sizeof(*xfer), GFP_KERNEL);
583  	if (!xfer)
584  		return -ENOMEM;
585  
586  	ret = find_and_map_user_pages(qdev, in_trans, resources, xfer);
587  	if (ret < 0)
588  		goto free_xfer;
589  
590  	need_cont_dma = (bool)ret;
591  
592  	ret = encode_addr_size_pairs(xfer, wrappers, resources, msg_hdr_len, &size, &out_trans);
593  	if (ret < 0)
594  		goto cleanup_xfer;
595  
596  	need_cont_dma = need_cont_dma || (bool)ret;
597  
598  	msg->hdr.len = cpu_to_le32(msg_hdr_len + size);
599  	msg->hdr.count = incr_le32(msg->hdr.count);
600  
601  	out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_DMA_XFER_TO_DEV);
602  	out_trans->hdr.len = cpu_to_le32(size);
603  	out_trans->tag = cpu_to_le32(in_trans->tag);
604  	out_trans->count = cpu_to_le32((size - sizeof(*out_trans)) /
605  								sizeof(struct wire_addr_size_pair));
606  
607  	*user_len += in_trans->hdr.len;
608  
609  	if (resources->dma_chunk_id) {
610  		out_trans->dma_chunk_id = cpu_to_le32(resources->dma_chunk_id);
611  	} else if (need_cont_dma) {
612  		while (resources->dma_chunk_id == 0)
613  			resources->dma_chunk_id = atomic_inc_return(&usr->chunk_id);
614  
615  		out_trans->dma_chunk_id = cpu_to_le32(resources->dma_chunk_id);
616  	}
617  	resources->trans_hdr = trans;
618  
619  	list_add(&xfer->list, &resources->dma_xfers);
620  	return 0;
621  
622  cleanup_xfer:
623  	cleanup_xfer(qdev, xfer);
624  free_xfer:
625  	kfree(xfer);
626  	return ret;
627  }
628  
encode_activate(struct qaic_device * qdev,void * trans,struct wrapper_list * wrappers,u32 * user_len,struct ioctl_resources * resources)629  static int encode_activate(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers,
630  			   u32 *user_len, struct ioctl_resources *resources)
631  {
632  	struct qaic_manage_trans_activate_to_dev *in_trans = trans;
633  	struct wire_trans_activate_to_dev *out_trans;
634  	struct wrapper_msg *trans_wrapper;
635  	struct wrapper_msg *wrapper;
636  	struct wire_msg *msg;
637  	dma_addr_t dma_addr;
638  	u32 msg_hdr_len;
639  	void *buf;
640  	u32 nelem;
641  	u32 size;
642  	int ret;
643  
644  	wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list);
645  	msg = &wrapper->msg;
646  	msg_hdr_len = le32_to_cpu(msg->hdr.len);
647  
648  	if (size_add(msg_hdr_len, sizeof(*out_trans)) > QAIC_MANAGE_MAX_MSG_LENGTH)
649  		return -ENOSPC;
650  
651  	if (!in_trans->queue_size)
652  		return -EINVAL;
653  
654  	if (in_trans->pad)
655  		return -EINVAL;
656  
657  	nelem = in_trans->queue_size;
658  	size = (get_dbc_req_elem_size() + get_dbc_rsp_elem_size()) * nelem;
659  	if (size / nelem != get_dbc_req_elem_size() + get_dbc_rsp_elem_size())
660  		return -EINVAL;
661  
662  	if (size + QAIC_DBC_Q_GAP + QAIC_DBC_Q_BUF_ALIGN < size)
663  		return -EINVAL;
664  
665  	size = ALIGN((size + QAIC_DBC_Q_GAP), QAIC_DBC_Q_BUF_ALIGN);
666  
667  	buf = dma_alloc_coherent(&qdev->pdev->dev, size, &dma_addr, GFP_KERNEL);
668  	if (!buf)
669  		return -ENOMEM;
670  
671  	trans_wrapper = add_wrapper(wrappers,
672  				    offsetof(struct wrapper_msg, trans) + sizeof(*out_trans));
673  	if (!trans_wrapper) {
674  		ret = -ENOMEM;
675  		goto free_dma;
676  	}
677  	trans_wrapper->len = sizeof(*out_trans);
678  	out_trans = (struct wire_trans_activate_to_dev *)&trans_wrapper->trans;
679  
680  	out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_ACTIVATE_TO_DEV);
681  	out_trans->hdr.len = cpu_to_le32(sizeof(*out_trans));
682  	out_trans->buf_len = cpu_to_le32(size);
683  	out_trans->req_q_addr = cpu_to_le64(dma_addr);
684  	out_trans->req_q_size = cpu_to_le32(nelem);
685  	out_trans->rsp_q_addr = cpu_to_le64(dma_addr + size - nelem * get_dbc_rsp_elem_size());
686  	out_trans->rsp_q_size = cpu_to_le32(nelem);
687  	out_trans->options = cpu_to_le32(in_trans->options);
688  
689  	*user_len += in_trans->hdr.len;
690  	msg->hdr.len = cpu_to_le32(msg_hdr_len + sizeof(*out_trans));
691  	msg->hdr.count = incr_le32(msg->hdr.count);
692  
693  	resources->buf = buf;
694  	resources->dma_addr = dma_addr;
695  	resources->total_size = size;
696  	resources->nelem = nelem;
697  	resources->rsp_q_base = buf + size - nelem * get_dbc_rsp_elem_size();
698  	return 0;
699  
700  free_dma:
701  	dma_free_coherent(&qdev->pdev->dev, size, buf, dma_addr);
702  	return ret;
703  }
704  
encode_deactivate(struct qaic_device * qdev,void * trans,u32 * user_len,struct qaic_user * usr)705  static int encode_deactivate(struct qaic_device *qdev, void *trans,
706  			     u32 *user_len, struct qaic_user *usr)
707  {
708  	struct qaic_manage_trans_deactivate *in_trans = trans;
709  
710  	if (in_trans->dbc_id >= qdev->num_dbc || in_trans->pad)
711  		return -EINVAL;
712  
713  	*user_len += in_trans->hdr.len;
714  
715  	return disable_dbc(qdev, in_trans->dbc_id, usr);
716  }
717  
encode_status(struct qaic_device * qdev,void * trans,struct wrapper_list * wrappers,u32 * user_len)718  static int encode_status(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers,
719  			 u32 *user_len)
720  {
721  	struct qaic_manage_trans_status_to_dev *in_trans = trans;
722  	struct wire_trans_status_to_dev *out_trans;
723  	struct wrapper_msg *trans_wrapper;
724  	struct wrapper_msg *wrapper;
725  	struct wire_msg *msg;
726  	u32 msg_hdr_len;
727  
728  	wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list);
729  	msg = &wrapper->msg;
730  	msg_hdr_len = le32_to_cpu(msg->hdr.len);
731  
732  	if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_MAX_MSG_LENGTH)
733  		return -ENOSPC;
734  
735  	trans_wrapper = add_wrapper(wrappers, sizeof(*trans_wrapper));
736  	if (!trans_wrapper)
737  		return -ENOMEM;
738  
739  	trans_wrapper->len = sizeof(*out_trans);
740  	out_trans = (struct wire_trans_status_to_dev *)&trans_wrapper->trans;
741  
742  	out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_STATUS_TO_DEV);
743  	out_trans->hdr.len = cpu_to_le32(in_trans->hdr.len);
744  	msg->hdr.len = cpu_to_le32(msg_hdr_len + in_trans->hdr.len);
745  	msg->hdr.count = incr_le32(msg->hdr.count);
746  	*user_len += in_trans->hdr.len;
747  
748  	return 0;
749  }
750  
encode_message(struct qaic_device * qdev,struct manage_msg * user_msg,struct wrapper_list * wrappers,struct ioctl_resources * resources,struct qaic_user * usr)751  static int encode_message(struct qaic_device *qdev, struct manage_msg *user_msg,
752  			  struct wrapper_list *wrappers, struct ioctl_resources *resources,
753  			  struct qaic_user *usr)
754  {
755  	struct qaic_manage_trans_hdr *trans_hdr;
756  	struct wrapper_msg *wrapper;
757  	struct wire_msg *msg;
758  	u32 user_len = 0;
759  	int ret;
760  	int i;
761  
762  	if (!user_msg->count ||
763  	    user_msg->len < sizeof(*trans_hdr)) {
764  		ret = -EINVAL;
765  		goto out;
766  	}
767  
768  	wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list);
769  	msg = &wrapper->msg;
770  
771  	msg->hdr.len = cpu_to_le32(sizeof(msg->hdr));
772  
773  	if (resources->dma_chunk_id) {
774  		ret = encode_dma(qdev, resources->trans_hdr, wrappers, &user_len, resources, usr);
775  		msg->hdr.count = cpu_to_le32(1);
776  		goto out;
777  	}
778  
779  	for (i = 0; i < user_msg->count; ++i) {
780  		if (user_len > user_msg->len - sizeof(*trans_hdr)) {
781  			ret = -EINVAL;
782  			break;
783  		}
784  		trans_hdr = (struct qaic_manage_trans_hdr *)(user_msg->data + user_len);
785  		if (trans_hdr->len < sizeof(trans_hdr) ||
786  		    size_add(user_len, trans_hdr->len) > user_msg->len) {
787  			ret = -EINVAL;
788  			break;
789  		}
790  
791  		switch (trans_hdr->type) {
792  		case QAIC_TRANS_PASSTHROUGH_FROM_USR:
793  			ret = encode_passthrough(qdev, trans_hdr, wrappers, &user_len);
794  			break;
795  		case QAIC_TRANS_DMA_XFER_FROM_USR:
796  			ret = encode_dma(qdev, trans_hdr, wrappers, &user_len, resources, usr);
797  			break;
798  		case QAIC_TRANS_ACTIVATE_FROM_USR:
799  			ret = encode_activate(qdev, trans_hdr, wrappers, &user_len, resources);
800  			break;
801  		case QAIC_TRANS_DEACTIVATE_FROM_USR:
802  			ret = encode_deactivate(qdev, trans_hdr, &user_len, usr);
803  			break;
804  		case QAIC_TRANS_STATUS_FROM_USR:
805  			ret = encode_status(qdev, trans_hdr, wrappers, &user_len);
806  			break;
807  		default:
808  			ret = -EINVAL;
809  			break;
810  		}
811  
812  		if (ret)
813  			break;
814  	}
815  
816  	if (user_len != user_msg->len)
817  		ret = -EINVAL;
818  out:
819  	if (ret) {
820  		free_dma_xfers(qdev, resources);
821  		free_dbc_buf(qdev, resources);
822  		return ret;
823  	}
824  
825  	return 0;
826  }
827  
decode_passthrough(struct qaic_device * qdev,void * trans,struct manage_msg * user_msg,u32 * msg_len)828  static int decode_passthrough(struct qaic_device *qdev, void *trans, struct manage_msg *user_msg,
829  			      u32 *msg_len)
830  {
831  	struct qaic_manage_trans_passthrough *out_trans;
832  	struct wire_trans_passthrough *in_trans = trans;
833  	u32 len;
834  
835  	out_trans = (void *)user_msg->data + user_msg->len;
836  
837  	len = le32_to_cpu(in_trans->hdr.len);
838  	if (len % 8 != 0)
839  		return -EINVAL;
840  
841  	if (user_msg->len + len > QAIC_MANAGE_MAX_MSG_LENGTH)
842  		return -ENOSPC;
843  
844  	memcpy(out_trans->data, in_trans->data, len - sizeof(in_trans->hdr));
845  	user_msg->len += len;
846  	*msg_len += len;
847  	out_trans->hdr.type = le32_to_cpu(in_trans->hdr.type);
848  	out_trans->hdr.len = len;
849  
850  	return 0;
851  }
852  
decode_activate(struct qaic_device * qdev,void * trans,struct manage_msg * user_msg,u32 * msg_len,struct ioctl_resources * resources,struct qaic_user * usr)853  static int decode_activate(struct qaic_device *qdev, void *trans, struct manage_msg *user_msg,
854  			   u32 *msg_len, struct ioctl_resources *resources, struct qaic_user *usr)
855  {
856  	struct qaic_manage_trans_activate_from_dev *out_trans;
857  	struct wire_trans_activate_from_dev *in_trans = trans;
858  	u32 len;
859  
860  	out_trans = (void *)user_msg->data + user_msg->len;
861  
862  	len = le32_to_cpu(in_trans->hdr.len);
863  	if (user_msg->len + len > QAIC_MANAGE_MAX_MSG_LENGTH)
864  		return -ENOSPC;
865  
866  	user_msg->len += len;
867  	*msg_len += len;
868  	out_trans->hdr.type = le32_to_cpu(in_trans->hdr.type);
869  	out_trans->hdr.len = len;
870  	out_trans->status = le32_to_cpu(in_trans->status);
871  	out_trans->dbc_id = le32_to_cpu(in_trans->dbc_id);
872  	out_trans->options = le64_to_cpu(in_trans->options);
873  
874  	if (!resources->buf)
875  		/* how did we get an activate response without a request? */
876  		return -EINVAL;
877  
878  	if (out_trans->dbc_id >= qdev->num_dbc)
879  		/*
880  		 * The device assigned an invalid resource, which should never
881  		 * happen. Return an error so the user can try to recover.
882  		 */
883  		return -ENODEV;
884  
885  	if (out_trans->status)
886  		/*
887  		 * Allocating resources failed on device side. This is not an
888  		 * expected behaviour, user is expected to handle this situation.
889  		 */
890  		return -ECANCELED;
891  
892  	resources->status = out_trans->status;
893  	resources->dbc_id = out_trans->dbc_id;
894  	save_dbc_buf(qdev, resources, usr);
895  
896  	return 0;
897  }
898  
decode_deactivate(struct qaic_device * qdev,void * trans,u32 * msg_len,struct qaic_user * usr)899  static int decode_deactivate(struct qaic_device *qdev, void *trans, u32 *msg_len,
900  			     struct qaic_user *usr)
901  {
902  	struct wire_trans_deactivate_from_dev *in_trans = trans;
903  	u32 dbc_id = le32_to_cpu(in_trans->dbc_id);
904  	u32 status = le32_to_cpu(in_trans->status);
905  
906  	if (dbc_id >= qdev->num_dbc)
907  		/*
908  		 * The device assigned an invalid resource, which should never
909  		 * happen. Inject an error so the user can try to recover.
910  		 */
911  		return -ENODEV;
912  
913  	if (status) {
914  		/*
915  		 * Releasing resources failed on the device side, which puts
916  		 * us in a bind since they may still be in use, so enable the
917  		 * dbc. User is expected to retry deactivation.
918  		 */
919  		enable_dbc(qdev, dbc_id, usr);
920  		return -ECANCELED;
921  	}
922  
923  	release_dbc(qdev, dbc_id);
924  	*msg_len += sizeof(*in_trans);
925  
926  	return 0;
927  }
928  
decode_status(struct qaic_device * qdev,void * trans,struct manage_msg * user_msg,u32 * user_len,struct wire_msg * msg)929  static int decode_status(struct qaic_device *qdev, void *trans, struct manage_msg *user_msg,
930  			 u32 *user_len, struct wire_msg *msg)
931  {
932  	struct qaic_manage_trans_status_from_dev *out_trans;
933  	struct wire_trans_status_from_dev *in_trans = trans;
934  	u32 len;
935  
936  	out_trans = (void *)user_msg->data + user_msg->len;
937  
938  	len = le32_to_cpu(in_trans->hdr.len);
939  	if (user_msg->len + len > QAIC_MANAGE_MAX_MSG_LENGTH)
940  		return -ENOSPC;
941  
942  	out_trans->hdr.type = QAIC_TRANS_STATUS_FROM_DEV;
943  	out_trans->hdr.len = len;
944  	out_trans->major = le16_to_cpu(in_trans->major);
945  	out_trans->minor = le16_to_cpu(in_trans->minor);
946  	out_trans->status_flags = le64_to_cpu(in_trans->status_flags);
947  	out_trans->status = le32_to_cpu(in_trans->status);
948  	*user_len += le32_to_cpu(in_trans->hdr.len);
949  	user_msg->len += len;
950  
951  	if (out_trans->status)
952  		return -ECANCELED;
953  	if (out_trans->status_flags & BIT(0) && !valid_crc(msg))
954  		return -EPIPE;
955  
956  	return 0;
957  }
958  
decode_message(struct qaic_device * qdev,struct manage_msg * user_msg,struct wire_msg * msg,struct ioctl_resources * resources,struct qaic_user * usr)959  static int decode_message(struct qaic_device *qdev, struct manage_msg *user_msg,
960  			  struct wire_msg *msg, struct ioctl_resources *resources,
961  			  struct qaic_user *usr)
962  {
963  	u32 msg_hdr_len = le32_to_cpu(msg->hdr.len);
964  	struct wire_trans_hdr *trans_hdr;
965  	u32 msg_len = 0;
966  	int ret;
967  	int i;
968  
969  	if (msg_hdr_len < sizeof(*trans_hdr) ||
970  	    msg_hdr_len > QAIC_MANAGE_MAX_MSG_LENGTH)
971  		return -EINVAL;
972  
973  	user_msg->len = 0;
974  	user_msg->count = le32_to_cpu(msg->hdr.count);
975  
976  	for (i = 0; i < user_msg->count; ++i) {
977  		u32 hdr_len;
978  
979  		if (msg_len > msg_hdr_len - sizeof(*trans_hdr))
980  			return -EINVAL;
981  
982  		trans_hdr = (struct wire_trans_hdr *)(msg->data + msg_len);
983  		hdr_len = le32_to_cpu(trans_hdr->len);
984  		if (hdr_len < sizeof(*trans_hdr) ||
985  		    size_add(msg_len, hdr_len) > msg_hdr_len)
986  			return -EINVAL;
987  
988  		switch (le32_to_cpu(trans_hdr->type)) {
989  		case QAIC_TRANS_PASSTHROUGH_FROM_DEV:
990  			ret = decode_passthrough(qdev, trans_hdr, user_msg, &msg_len);
991  			break;
992  		case QAIC_TRANS_ACTIVATE_FROM_DEV:
993  			ret = decode_activate(qdev, trans_hdr, user_msg, &msg_len, resources, usr);
994  			break;
995  		case QAIC_TRANS_DEACTIVATE_FROM_DEV:
996  			ret = decode_deactivate(qdev, trans_hdr, &msg_len, usr);
997  			break;
998  		case QAIC_TRANS_STATUS_FROM_DEV:
999  			ret = decode_status(qdev, trans_hdr, user_msg, &msg_len, msg);
1000  			break;
1001  		default:
1002  			return -EINVAL;
1003  		}
1004  
1005  		if (ret)
1006  			return ret;
1007  	}
1008  
1009  	if (msg_len != (msg_hdr_len - sizeof(msg->hdr)))
1010  		return -EINVAL;
1011  
1012  	return 0;
1013  }
1014  
msg_xfer(struct qaic_device * qdev,struct wrapper_list * wrappers,u32 seq_num,bool ignore_signal)1015  static void *msg_xfer(struct qaic_device *qdev, struct wrapper_list *wrappers, u32 seq_num,
1016  		      bool ignore_signal)
1017  {
1018  	struct xfer_queue_elem elem;
1019  	struct wire_msg *out_buf;
1020  	struct wrapper_msg *w;
1021  	long ret = -EAGAIN;
1022  	int xfer_count = 0;
1023  	int retry_count;
1024  
1025  	if (qdev->in_reset) {
1026  		mutex_unlock(&qdev->cntl_mutex);
1027  		return ERR_PTR(-ENODEV);
1028  	}
1029  
1030  	/* Attempt to avoid a partial commit of a message */
1031  	list_for_each_entry(w, &wrappers->list, list)
1032  		xfer_count++;
1033  
1034  	for (retry_count = 0; retry_count < QAIC_MHI_RETRY_MAX; retry_count++) {
1035  		if (xfer_count <= mhi_get_free_desc_count(qdev->cntl_ch, DMA_TO_DEVICE)) {
1036  			ret = 0;
1037  			break;
1038  		}
1039  		msleep_interruptible(QAIC_MHI_RETRY_WAIT_MS);
1040  		if (signal_pending(current))
1041  			break;
1042  	}
1043  
1044  	if (ret) {
1045  		mutex_unlock(&qdev->cntl_mutex);
1046  		return ERR_PTR(ret);
1047  	}
1048  
1049  	elem.seq_num = seq_num;
1050  	elem.buf = NULL;
1051  	init_completion(&elem.xfer_done);
1052  	if (likely(!qdev->cntl_lost_buf)) {
1053  		/*
1054  		 * The max size of request to device is QAIC_MANAGE_EXT_MSG_LENGTH.
1055  		 * The max size of response from device is QAIC_MANAGE_MAX_MSG_LENGTH.
1056  		 */
1057  		out_buf = kmalloc(QAIC_MANAGE_MAX_MSG_LENGTH, GFP_KERNEL);
1058  		if (!out_buf) {
1059  			mutex_unlock(&qdev->cntl_mutex);
1060  			return ERR_PTR(-ENOMEM);
1061  		}
1062  
1063  		ret = mhi_queue_buf(qdev->cntl_ch, DMA_FROM_DEVICE, out_buf,
1064  				    QAIC_MANAGE_MAX_MSG_LENGTH, MHI_EOT);
1065  		if (ret) {
1066  			mutex_unlock(&qdev->cntl_mutex);
1067  			return ERR_PTR(ret);
1068  		}
1069  	} else {
1070  		/*
1071  		 * we lost a buffer because we queued a recv buf, but then
1072  		 * queuing the corresponding tx buf failed. To try to avoid
1073  		 * a memory leak, lets reclaim it and use it for this
1074  		 * transaction.
1075  		 */
1076  		qdev->cntl_lost_buf = false;
1077  	}
1078  
1079  	list_for_each_entry(w, &wrappers->list, list) {
1080  		kref_get(&w->ref_count);
1081  		retry_count = 0;
1082  		ret = mhi_queue_buf(qdev->cntl_ch, DMA_TO_DEVICE, &w->msg, w->len,
1083  				    list_is_last(&w->list, &wrappers->list) ? MHI_EOT : MHI_CHAIN);
1084  		if (ret) {
1085  			qdev->cntl_lost_buf = true;
1086  			kref_put(&w->ref_count, free_wrapper);
1087  			mutex_unlock(&qdev->cntl_mutex);
1088  			return ERR_PTR(ret);
1089  		}
1090  	}
1091  
1092  	list_add_tail(&elem.list, &qdev->cntl_xfer_list);
1093  	mutex_unlock(&qdev->cntl_mutex);
1094  
1095  	if (ignore_signal)
1096  		ret = wait_for_completion_timeout(&elem.xfer_done, control_resp_timeout_s * HZ);
1097  	else
1098  		ret = wait_for_completion_interruptible_timeout(&elem.xfer_done,
1099  								control_resp_timeout_s * HZ);
1100  	/*
1101  	 * not using _interruptable because we have to cleanup or we'll
1102  	 * likely cause memory corruption
1103  	 */
1104  	mutex_lock(&qdev->cntl_mutex);
1105  	if (!list_empty(&elem.list))
1106  		list_del(&elem.list);
1107  	if (!ret && !elem.buf)
1108  		ret = -ETIMEDOUT;
1109  	else if (ret > 0 && !elem.buf)
1110  		ret = -EIO;
1111  	mutex_unlock(&qdev->cntl_mutex);
1112  
1113  	if (ret < 0) {
1114  		kfree(elem.buf);
1115  		return ERR_PTR(ret);
1116  	} else if (!qdev->valid_crc(elem.buf)) {
1117  		kfree(elem.buf);
1118  		return ERR_PTR(-EPIPE);
1119  	}
1120  
1121  	return elem.buf;
1122  }
1123  
1124  /* Add a transaction to abort the outstanding DMA continuation */
abort_dma_cont(struct qaic_device * qdev,struct wrapper_list * wrappers,u32 dma_chunk_id)1125  static int abort_dma_cont(struct qaic_device *qdev, struct wrapper_list *wrappers, u32 dma_chunk_id)
1126  {
1127  	struct wire_trans_dma_xfer *out_trans;
1128  	u32 size = sizeof(*out_trans);
1129  	struct wrapper_msg *wrapper;
1130  	struct wrapper_msg *w;
1131  	struct wire_msg *msg;
1132  
1133  	wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list);
1134  	msg = &wrapper->msg;
1135  
1136  	/* Remove all but the first wrapper which has the msg header */
1137  	list_for_each_entry_safe(wrapper, w, &wrappers->list, list)
1138  		if (!list_is_first(&wrapper->list, &wrappers->list))
1139  			kref_put(&wrapper->ref_count, free_wrapper);
1140  
1141  	wrapper = add_wrapper(wrappers, offsetof(struct wrapper_msg, trans) + sizeof(*out_trans));
1142  
1143  	if (!wrapper)
1144  		return -ENOMEM;
1145  
1146  	out_trans = (struct wire_trans_dma_xfer *)&wrapper->trans;
1147  	out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_DMA_XFER_TO_DEV);
1148  	out_trans->hdr.len = cpu_to_le32(size);
1149  	out_trans->tag = cpu_to_le32(0);
1150  	out_trans->count = cpu_to_le32(0);
1151  	out_trans->dma_chunk_id = cpu_to_le32(dma_chunk_id);
1152  
1153  	msg->hdr.len = cpu_to_le32(size + sizeof(*msg));
1154  	msg->hdr.count = cpu_to_le32(1);
1155  	wrapper->len = size;
1156  
1157  	return 0;
1158  }
1159  
alloc_wrapper_list(void)1160  static struct wrapper_list *alloc_wrapper_list(void)
1161  {
1162  	struct wrapper_list *wrappers;
1163  
1164  	wrappers = kmalloc(sizeof(*wrappers), GFP_KERNEL);
1165  	if (!wrappers)
1166  		return NULL;
1167  	INIT_LIST_HEAD(&wrappers->list);
1168  	spin_lock_init(&wrappers->lock);
1169  
1170  	return wrappers;
1171  }
1172  
qaic_manage_msg_xfer(struct qaic_device * qdev,struct qaic_user * usr,struct manage_msg * user_msg,struct ioctl_resources * resources,struct wire_msg ** rsp)1173  static int qaic_manage_msg_xfer(struct qaic_device *qdev, struct qaic_user *usr,
1174  				struct manage_msg *user_msg, struct ioctl_resources *resources,
1175  				struct wire_msg **rsp)
1176  {
1177  	struct wrapper_list *wrappers;
1178  	struct wrapper_msg *wrapper;
1179  	struct wrapper_msg *w;
1180  	bool all_done = false;
1181  	struct wire_msg *msg;
1182  	int ret;
1183  
1184  	wrappers = alloc_wrapper_list();
1185  	if (!wrappers)
1186  		return -ENOMEM;
1187  
1188  	wrapper = add_wrapper(wrappers, sizeof(*wrapper));
1189  	if (!wrapper) {
1190  		kfree(wrappers);
1191  		return -ENOMEM;
1192  	}
1193  
1194  	msg = &wrapper->msg;
1195  	wrapper->len = sizeof(*msg);
1196  
1197  	ret = encode_message(qdev, user_msg, wrappers, resources, usr);
1198  	if (ret && resources->dma_chunk_id)
1199  		ret = abort_dma_cont(qdev, wrappers, resources->dma_chunk_id);
1200  	if (ret)
1201  		goto encode_failed;
1202  
1203  	ret = mutex_lock_interruptible(&qdev->cntl_mutex);
1204  	if (ret)
1205  		goto lock_failed;
1206  
1207  	msg->hdr.magic_number = MANAGE_MAGIC_NUMBER;
1208  	msg->hdr.sequence_number = cpu_to_le32(qdev->next_seq_num++);
1209  
1210  	if (usr) {
1211  		msg->hdr.handle = cpu_to_le32(usr->handle);
1212  		msg->hdr.partition_id = cpu_to_le32(usr->qddev->partition_id);
1213  	} else {
1214  		msg->hdr.handle = 0;
1215  		msg->hdr.partition_id = cpu_to_le32(QAIC_NO_PARTITION);
1216  	}
1217  
1218  	msg->hdr.padding = cpu_to_le32(0);
1219  	msg->hdr.crc32 = cpu_to_le32(qdev->gen_crc(wrappers));
1220  
1221  	/* msg_xfer releases the mutex */
1222  	*rsp = msg_xfer(qdev, wrappers, qdev->next_seq_num - 1, false);
1223  	if (IS_ERR(*rsp))
1224  		ret = PTR_ERR(*rsp);
1225  
1226  lock_failed:
1227  	free_dma_xfers(qdev, resources);
1228  encode_failed:
1229  	spin_lock(&wrappers->lock);
1230  	list_for_each_entry_safe(wrapper, w, &wrappers->list, list)
1231  		kref_put(&wrapper->ref_count, free_wrapper);
1232  	all_done = list_empty(&wrappers->list);
1233  	spin_unlock(&wrappers->lock);
1234  	if (all_done)
1235  		kfree(wrappers);
1236  
1237  	return ret;
1238  }
1239  
qaic_manage(struct qaic_device * qdev,struct qaic_user * usr,struct manage_msg * user_msg)1240  static int qaic_manage(struct qaic_device *qdev, struct qaic_user *usr, struct manage_msg *user_msg)
1241  {
1242  	struct wire_trans_dma_xfer_cont *dma_cont = NULL;
1243  	struct ioctl_resources resources;
1244  	struct wire_msg *rsp = NULL;
1245  	int ret;
1246  
1247  	memset(&resources, 0, sizeof(struct ioctl_resources));
1248  
1249  	INIT_LIST_HEAD(&resources.dma_xfers);
1250  
1251  	if (user_msg->len > QAIC_MANAGE_MAX_MSG_LENGTH ||
1252  	    user_msg->count > QAIC_MANAGE_MAX_MSG_LENGTH / sizeof(struct qaic_manage_trans_hdr))
1253  		return -EINVAL;
1254  
1255  dma_xfer_continue:
1256  	ret = qaic_manage_msg_xfer(qdev, usr, user_msg, &resources, &rsp);
1257  	if (ret)
1258  		return ret;
1259  	/* dma_cont should be the only transaction if present */
1260  	if (le32_to_cpu(rsp->hdr.count) == 1) {
1261  		dma_cont = (struct wire_trans_dma_xfer_cont *)rsp->data;
1262  		if (le32_to_cpu(dma_cont->hdr.type) != QAIC_TRANS_DMA_XFER_CONT)
1263  			dma_cont = NULL;
1264  	}
1265  	if (dma_cont) {
1266  		if (le32_to_cpu(dma_cont->dma_chunk_id) == resources.dma_chunk_id &&
1267  		    le64_to_cpu(dma_cont->xferred_size) == resources.xferred_dma_size) {
1268  			kfree(rsp);
1269  			goto dma_xfer_continue;
1270  		}
1271  
1272  		ret = -EINVAL;
1273  		goto dma_cont_failed;
1274  	}
1275  
1276  	ret = decode_message(qdev, user_msg, rsp, &resources, usr);
1277  
1278  dma_cont_failed:
1279  	free_dbc_buf(qdev, &resources);
1280  	kfree(rsp);
1281  	return ret;
1282  }
1283  
qaic_manage_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1284  int qaic_manage_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1285  {
1286  	struct qaic_manage_msg *user_msg = data;
1287  	struct qaic_device *qdev;
1288  	struct manage_msg *msg;
1289  	struct qaic_user *usr;
1290  	u8 __user *user_data;
1291  	int qdev_rcu_id;
1292  	int usr_rcu_id;
1293  	int ret;
1294  
1295  	if (user_msg->len > QAIC_MANAGE_MAX_MSG_LENGTH)
1296  		return -EINVAL;
1297  
1298  	usr = file_priv->driver_priv;
1299  
1300  	usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
1301  	if (!usr->qddev) {
1302  		srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
1303  		return -ENODEV;
1304  	}
1305  
1306  	qdev = usr->qddev->qdev;
1307  
1308  	qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
1309  	if (qdev->in_reset) {
1310  		srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
1311  		srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
1312  		return -ENODEV;
1313  	}
1314  
1315  	msg = kzalloc(QAIC_MANAGE_MAX_MSG_LENGTH + sizeof(*msg), GFP_KERNEL);
1316  	if (!msg) {
1317  		ret = -ENOMEM;
1318  		goto out;
1319  	}
1320  
1321  	msg->len = user_msg->len;
1322  	msg->count = user_msg->count;
1323  
1324  	user_data = u64_to_user_ptr(user_msg->data);
1325  
1326  	if (copy_from_user(msg->data, user_data, user_msg->len)) {
1327  		ret = -EFAULT;
1328  		goto free_msg;
1329  	}
1330  
1331  	ret = qaic_manage(qdev, usr, msg);
1332  
1333  	/*
1334  	 * If the qaic_manage() is successful then we copy the message onto
1335  	 * userspace memory but we have an exception for -ECANCELED.
1336  	 * For -ECANCELED, it means that device has NACKed the message with a
1337  	 * status error code which userspace would like to know.
1338  	 */
1339  	if (ret == -ECANCELED || !ret) {
1340  		if (copy_to_user(user_data, msg->data, msg->len)) {
1341  			ret = -EFAULT;
1342  		} else {
1343  			user_msg->len = msg->len;
1344  			user_msg->count = msg->count;
1345  		}
1346  	}
1347  
1348  free_msg:
1349  	kfree(msg);
1350  out:
1351  	srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
1352  	srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
1353  	return ret;
1354  }
1355  
get_cntl_version(struct qaic_device * qdev,struct qaic_user * usr,u16 * major,u16 * minor)1356  int get_cntl_version(struct qaic_device *qdev, struct qaic_user *usr, u16 *major, u16 *minor)
1357  {
1358  	struct qaic_manage_trans_status_from_dev *status_result;
1359  	struct qaic_manage_trans_status_to_dev *status_query;
1360  	struct manage_msg *user_msg;
1361  	int ret;
1362  
1363  	user_msg = kmalloc(sizeof(*user_msg) + sizeof(*status_result), GFP_KERNEL);
1364  	if (!user_msg) {
1365  		ret = -ENOMEM;
1366  		goto out;
1367  	}
1368  	user_msg->len = sizeof(*status_query);
1369  	user_msg->count = 1;
1370  
1371  	status_query = (struct qaic_manage_trans_status_to_dev *)user_msg->data;
1372  	status_query->hdr.type = QAIC_TRANS_STATUS_FROM_USR;
1373  	status_query->hdr.len = sizeof(status_query->hdr);
1374  
1375  	ret = qaic_manage(qdev, usr, user_msg);
1376  	if (ret)
1377  		goto kfree_user_msg;
1378  	status_result = (struct qaic_manage_trans_status_from_dev *)user_msg->data;
1379  	*major = status_result->major;
1380  	*minor = status_result->minor;
1381  
1382  	if (status_result->status_flags & BIT(0)) { /* device is using CRC */
1383  		/* By default qdev->gen_crc is programmed to generate CRC */
1384  		qdev->valid_crc = valid_crc;
1385  	} else {
1386  		/* By default qdev->valid_crc is programmed to bypass CRC */
1387  		qdev->gen_crc = gen_crc_stub;
1388  	}
1389  
1390  kfree_user_msg:
1391  	kfree(user_msg);
1392  out:
1393  	return ret;
1394  }
1395  
resp_worker(struct work_struct * work)1396  static void resp_worker(struct work_struct *work)
1397  {
1398  	struct resp_work *resp = container_of(work, struct resp_work, work);
1399  	struct qaic_device *qdev = resp->qdev;
1400  	struct wire_msg *msg = resp->buf;
1401  	struct xfer_queue_elem *elem;
1402  	struct xfer_queue_elem *i;
1403  	bool found = false;
1404  
1405  	mutex_lock(&qdev->cntl_mutex);
1406  	list_for_each_entry_safe(elem, i, &qdev->cntl_xfer_list, list) {
1407  		if (elem->seq_num == le32_to_cpu(msg->hdr.sequence_number)) {
1408  			found = true;
1409  			list_del_init(&elem->list);
1410  			elem->buf = msg;
1411  			complete_all(&elem->xfer_done);
1412  			break;
1413  		}
1414  	}
1415  	mutex_unlock(&qdev->cntl_mutex);
1416  
1417  	if (!found)
1418  		/* request must have timed out, drop packet */
1419  		kfree(msg);
1420  
1421  	kfree(resp);
1422  }
1423  
free_wrapper_from_list(struct wrapper_list * wrappers,struct wrapper_msg * wrapper)1424  static void free_wrapper_from_list(struct wrapper_list *wrappers, struct wrapper_msg *wrapper)
1425  {
1426  	bool all_done = false;
1427  
1428  	spin_lock(&wrappers->lock);
1429  	kref_put(&wrapper->ref_count, free_wrapper);
1430  	all_done = list_empty(&wrappers->list);
1431  	spin_unlock(&wrappers->lock);
1432  
1433  	if (all_done)
1434  		kfree(wrappers);
1435  }
1436  
qaic_mhi_ul_xfer_cb(struct mhi_device * mhi_dev,struct mhi_result * mhi_result)1437  void qaic_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
1438  {
1439  	struct wire_msg *msg = mhi_result->buf_addr;
1440  	struct wrapper_msg *wrapper = container_of(msg, struct wrapper_msg, msg);
1441  
1442  	free_wrapper_from_list(wrapper->head, wrapper);
1443  }
1444  
qaic_mhi_dl_xfer_cb(struct mhi_device * mhi_dev,struct mhi_result * mhi_result)1445  void qaic_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
1446  {
1447  	struct qaic_device *qdev = dev_get_drvdata(&mhi_dev->dev);
1448  	struct wire_msg *msg = mhi_result->buf_addr;
1449  	struct resp_work *resp;
1450  
1451  	if (mhi_result->transaction_status || msg->hdr.magic_number != MANAGE_MAGIC_NUMBER) {
1452  		kfree(msg);
1453  		return;
1454  	}
1455  
1456  	resp = kmalloc(sizeof(*resp), GFP_ATOMIC);
1457  	if (!resp) {
1458  		kfree(msg);
1459  		return;
1460  	}
1461  
1462  	INIT_WORK(&resp->work, resp_worker);
1463  	resp->qdev = qdev;
1464  	resp->buf = msg;
1465  	queue_work(qdev->cntl_wq, &resp->work);
1466  }
1467  
qaic_control_open(struct qaic_device * qdev)1468  int qaic_control_open(struct qaic_device *qdev)
1469  {
1470  	if (!qdev->cntl_ch)
1471  		return -ENODEV;
1472  
1473  	qdev->cntl_lost_buf = false;
1474  	/*
1475  	 * By default qaic should assume that device has CRC enabled.
1476  	 * Qaic comes to know if device has CRC enabled or disabled during the
1477  	 * device status transaction, which is the first transaction performed
1478  	 * on control channel.
1479  	 *
1480  	 * So CRC validation of first device status transaction response is
1481  	 * ignored (by calling valid_crc_stub) and is done later during decoding
1482  	 * if device has CRC enabled.
1483  	 * Now that qaic knows whether device has CRC enabled or not it acts
1484  	 * accordingly.
1485  	 */
1486  	qdev->gen_crc = gen_crc;
1487  	qdev->valid_crc = valid_crc_stub;
1488  
1489  	return mhi_prepare_for_transfer(qdev->cntl_ch);
1490  }
1491  
qaic_control_close(struct qaic_device * qdev)1492  void qaic_control_close(struct qaic_device *qdev)
1493  {
1494  	mhi_unprepare_from_transfer(qdev->cntl_ch);
1495  }
1496  
qaic_release_usr(struct qaic_device * qdev,struct qaic_user * usr)1497  void qaic_release_usr(struct qaic_device *qdev, struct qaic_user *usr)
1498  {
1499  	struct wire_trans_terminate_to_dev *trans;
1500  	struct wrapper_list *wrappers;
1501  	struct wrapper_msg *wrapper;
1502  	struct wire_msg *msg;
1503  	struct wire_msg *rsp;
1504  
1505  	wrappers = alloc_wrapper_list();
1506  	if (!wrappers)
1507  		return;
1508  
1509  	wrapper = add_wrapper(wrappers, sizeof(*wrapper) + sizeof(*msg) + sizeof(*trans));
1510  	if (!wrapper)
1511  		return;
1512  
1513  	msg = &wrapper->msg;
1514  
1515  	trans = (struct wire_trans_terminate_to_dev *)msg->data;
1516  
1517  	trans->hdr.type = cpu_to_le32(QAIC_TRANS_TERMINATE_TO_DEV);
1518  	trans->hdr.len = cpu_to_le32(sizeof(*trans));
1519  	trans->handle = cpu_to_le32(usr->handle);
1520  
1521  	mutex_lock(&qdev->cntl_mutex);
1522  	wrapper->len = sizeof(msg->hdr) + sizeof(*trans);
1523  	msg->hdr.magic_number = MANAGE_MAGIC_NUMBER;
1524  	msg->hdr.sequence_number = cpu_to_le32(qdev->next_seq_num++);
1525  	msg->hdr.len = cpu_to_le32(wrapper->len);
1526  	msg->hdr.count = cpu_to_le32(1);
1527  	msg->hdr.handle = cpu_to_le32(usr->handle);
1528  	msg->hdr.padding = cpu_to_le32(0);
1529  	msg->hdr.crc32 = cpu_to_le32(qdev->gen_crc(wrappers));
1530  
1531  	/*
1532  	 * msg_xfer releases the mutex
1533  	 * We don't care about the return of msg_xfer since we will not do
1534  	 * anything different based on what happens.
1535  	 * We ignore pending signals since one will be set if the user is
1536  	 * killed, and we need give the device a chance to cleanup, otherwise
1537  	 * DMA may still be in progress when we return.
1538  	 */
1539  	rsp = msg_xfer(qdev, wrappers, qdev->next_seq_num - 1, true);
1540  	if (!IS_ERR(rsp))
1541  		kfree(rsp);
1542  	free_wrapper_from_list(wrappers, wrapper);
1543  }
1544  
wake_all_cntl(struct qaic_device * qdev)1545  void wake_all_cntl(struct qaic_device *qdev)
1546  {
1547  	struct xfer_queue_elem *elem;
1548  	struct xfer_queue_elem *i;
1549  
1550  	mutex_lock(&qdev->cntl_mutex);
1551  	list_for_each_entry_safe(elem, i, &qdev->cntl_xfer_list, list) {
1552  		list_del_init(&elem->list);
1553  		complete_all(&elem->xfer_done);
1554  	}
1555  	mutex_unlock(&qdev->cntl_mutex);
1556  }
1557