1 /*
2  * Copyright (c) 2012-2015,2017 Qualcomm Atheros, Inc.
3  * Copyright (c) 2018, The Linux Foundation. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <linux/types.h>
19 #include <linux/errno.h>
20 #include <linux/fs.h>
21 #include <linux/seq_file.h>
22 #include "wmi.h"
23 #include "wil6210.h"
24 #include "txrx.h"
25 #include "pmc.h"
26 
27 struct desc_alloc_info {
28 	dma_addr_t pa;
29 	void	  *va;
30 };
31 
32 static int wil_is_pmc_allocated(struct pmc_ctx *pmc)
33 {
34 	return !!pmc->pring_va;
35 }
36 
37 void wil_pmc_init(struct wil6210_priv *wil)
38 {
39 	memset(&wil->pmc, 0, sizeof(struct pmc_ctx));
40 	mutex_init(&wil->pmc.lock);
41 }
42 
43 /**
44  * Allocate the physical ring (p-ring) and the required
45  * number of descriptors of required size.
46  * Initialize the descriptors as required by pmc dma.
47  * The descriptors' buffers dwords are initialized to hold
48  * dword's serial number in the lsw and reserved value
49  * PCM_DATA_INVALID_DW_VAL in the msw.
50  */
51 void wil_pmc_alloc(struct wil6210_priv *wil,
52 		   int num_descriptors,
53 		   int descriptor_size)
54 {
55 	u32 i;
56 	struct pmc_ctx *pmc = &wil->pmc;
57 	struct device *dev = wil_to_dev(wil);
58 	struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
59 	struct wmi_pmc_cmd pmc_cmd = {0};
60 	int last_cmd_err = -ENOMEM;
61 
62 	mutex_lock(&pmc->lock);
63 
64 	if (wil_is_pmc_allocated(pmc)) {
65 		/* sanity check */
66 		wil_err(wil, "ERROR pmc is already allocated\n");
67 		goto no_release_err;
68 	}
69 	if ((num_descriptors <= 0) || (descriptor_size <= 0)) {
70 		wil_err(wil,
71 			"Invalid params num_descriptors(%d), descriptor_size(%d)\n",
72 			num_descriptors, descriptor_size);
73 		last_cmd_err = -EINVAL;
74 		goto no_release_err;
75 	}
76 
77 	if (num_descriptors > (1 << WIL_RING_SIZE_ORDER_MAX)) {
78 		wil_err(wil,
79 			"num_descriptors(%d) exceeds max ring size %d\n",
80 			num_descriptors, 1 << WIL_RING_SIZE_ORDER_MAX);
81 		last_cmd_err = -EINVAL;
82 		goto no_release_err;
83 	}
84 
85 	if (num_descriptors > INT_MAX / descriptor_size) {
86 		wil_err(wil,
87 			"Overflow in num_descriptors(%d)*descriptor_size(%d)\n",
88 			num_descriptors, descriptor_size);
89 		last_cmd_err = -EINVAL;
90 		goto no_release_err;
91 	}
92 
93 	pmc->num_descriptors = num_descriptors;
94 	pmc->descriptor_size = descriptor_size;
95 
96 	wil_dbg_misc(wil, "pmc_alloc: %d descriptors x %d bytes each\n",
97 		     num_descriptors, descriptor_size);
98 
99 	/* allocate descriptors info list in pmc context*/
100 	pmc->descriptors = kcalloc(num_descriptors,
101 				  sizeof(struct desc_alloc_info),
102 				  GFP_KERNEL);
103 	if (!pmc->descriptors) {
104 		wil_err(wil, "ERROR allocating pmc skb list\n");
105 		goto no_release_err;
106 	}
107 
108 	wil_dbg_misc(wil, "pmc_alloc: allocated descriptors info list %p\n",
109 		     pmc->descriptors);
110 
111 	/* Allocate pring buffer and descriptors.
112 	 * vring->va should be aligned on its size rounded up to power of 2
113 	 * This is granted by the dma_alloc_coherent.
114 	 *
115 	 * HW has limitation that all vrings addresses must share the same
116 	 * upper 16 msb bits part of 48 bits address. To workaround that,
117 	 * if we are using more than 32 bit addresses switch to 32 bit
118 	 * allocation before allocating vring memory.
119 	 *
120 	 * There's no check for the return value of dma_set_mask_and_coherent,
121 	 * since we assume if we were able to set the mask during
122 	 * initialization in this system it will not fail if we set it again
123 	 */
124 	if (wil->dma_addr_size > 32)
125 		dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
126 
127 	pmc->pring_va = dma_alloc_coherent(dev,
128 			sizeof(struct vring_tx_desc) * num_descriptors,
129 			&pmc->pring_pa,
130 			GFP_KERNEL);
131 
132 	if (wil->dma_addr_size > 32)
133 		dma_set_mask_and_coherent(dev,
134 					  DMA_BIT_MASK(wil->dma_addr_size));
135 
136 	wil_dbg_misc(wil,
137 		     "pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",
138 		     pmc->pring_va, &pmc->pring_pa,
139 		     sizeof(struct vring_tx_desc),
140 		     num_descriptors,
141 		     sizeof(struct vring_tx_desc) * num_descriptors);
142 
143 	if (!pmc->pring_va) {
144 		wil_err(wil, "ERROR allocating pmc pring\n");
145 		goto release_pmc_skb_list;
146 	}
147 
148 	/* initially, all descriptors are SW owned
149 	 * For Tx, Rx, and PMC, ownership bit is at the same location, thus
150 	 * we can use any
151 	 */
152 	for (i = 0; i < num_descriptors; i++) {
153 		struct vring_tx_desc *_d = &pmc->pring_va[i];
154 		struct vring_tx_desc dd = {}, *d = &dd;
155 		int j = 0;
156 
157 		pmc->descriptors[i].va = dma_alloc_coherent(dev,
158 			descriptor_size,
159 			&pmc->descriptors[i].pa,
160 			GFP_KERNEL);
161 
162 		if (unlikely(!pmc->descriptors[i].va)) {
163 			wil_err(wil, "ERROR allocating pmc descriptor %d", i);
164 			goto release_pmc_skbs;
165 		}
166 
167 		for (j = 0; j < descriptor_size / sizeof(u32); j++) {
168 			u32 *p = (u32 *)pmc->descriptors[i].va + j;
169 			*p = PCM_DATA_INVALID_DW_VAL | j;
170 		}
171 
172 		/* configure dma descriptor */
173 		d->dma.addr.addr_low =
174 			cpu_to_le32(lower_32_bits(pmc->descriptors[i].pa));
175 		d->dma.addr.addr_high =
176 			cpu_to_le16((u16)upper_32_bits(pmc->descriptors[i].pa));
177 		d->dma.status = 0; /* 0 = HW_OWNED */
178 		d->dma.length = cpu_to_le16(descriptor_size);
179 		d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT;
180 		*_d = *d;
181 	}
182 
183 	wil_dbg_misc(wil, "pmc_alloc: allocated successfully\n");
184 
185 	pmc_cmd.op = WMI_PMC_ALLOCATE;
186 	pmc_cmd.ring_size = cpu_to_le16(pmc->num_descriptors);
187 	pmc_cmd.mem_base = cpu_to_le64(pmc->pring_pa);
188 
189 	wil_dbg_misc(wil, "pmc_alloc: send WMI_PMC_CMD with ALLOCATE op\n");
190 	pmc->last_cmd_status = wmi_send(wil,
191 					WMI_PMC_CMDID,
192 					vif->mid,
193 					&pmc_cmd,
194 					sizeof(pmc_cmd));
195 	if (pmc->last_cmd_status) {
196 		wil_err(wil,
197 			"WMI_PMC_CMD with ALLOCATE op failed with status %d",
198 			pmc->last_cmd_status);
199 		goto release_pmc_skbs;
200 	}
201 
202 	mutex_unlock(&pmc->lock);
203 
204 	return;
205 
206 release_pmc_skbs:
207 	wil_err(wil, "exit on error: Releasing skbs...\n");
208 	for (i = 0; i < num_descriptors && pmc->descriptors[i].va; i++) {
209 		dma_free_coherent(dev,
210 				  descriptor_size,
211 				  pmc->descriptors[i].va,
212 				  pmc->descriptors[i].pa);
213 
214 		pmc->descriptors[i].va = NULL;
215 	}
216 	wil_err(wil, "exit on error: Releasing pring...\n");
217 
218 	dma_free_coherent(dev,
219 			  sizeof(struct vring_tx_desc) * num_descriptors,
220 			  pmc->pring_va,
221 			  pmc->pring_pa);
222 
223 	pmc->pring_va = NULL;
224 
225 release_pmc_skb_list:
226 	wil_err(wil, "exit on error: Releasing descriptors info list...\n");
227 	kfree(pmc->descriptors);
228 	pmc->descriptors = NULL;
229 
230 no_release_err:
231 	pmc->last_cmd_status = last_cmd_err;
232 	mutex_unlock(&pmc->lock);
233 }
234 
235 /**
236  * Traverse the p-ring and release all buffers.
237  * At the end release the p-ring memory
238  */
239 void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
240 {
241 	struct pmc_ctx *pmc = &wil->pmc;
242 	struct device *dev = wil_to_dev(wil);
243 	struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
244 	struct wmi_pmc_cmd pmc_cmd = {0};
245 
246 	mutex_lock(&pmc->lock);
247 
248 	pmc->last_cmd_status = 0;
249 
250 	if (!wil_is_pmc_allocated(pmc)) {
251 		wil_dbg_misc(wil,
252 			     "pmc_free: Error, can't free - not allocated\n");
253 		pmc->last_cmd_status = -EPERM;
254 		mutex_unlock(&pmc->lock);
255 		return;
256 	}
257 
258 	if (send_pmc_cmd) {
259 		wil_dbg_misc(wil, "send WMI_PMC_CMD with RELEASE op\n");
260 		pmc_cmd.op = WMI_PMC_RELEASE;
261 		pmc->last_cmd_status =
262 				wmi_send(wil, WMI_PMC_CMDID, vif->mid,
263 					 &pmc_cmd, sizeof(pmc_cmd));
264 		if (pmc->last_cmd_status) {
265 			wil_err(wil,
266 				"WMI_PMC_CMD with RELEASE op failed, status %d",
267 				pmc->last_cmd_status);
268 			/* There's nothing we can do with this error.
269 			 * Normally, it should never occur.
270 			 * Continue to freeing all memory allocated for pmc.
271 			 */
272 		}
273 	}
274 
275 	if (pmc->pring_va) {
276 		size_t buf_size = sizeof(struct vring_tx_desc) *
277 				  pmc->num_descriptors;
278 
279 		wil_dbg_misc(wil, "pmc_free: free pring va %p\n",
280 			     pmc->pring_va);
281 		dma_free_coherent(dev, buf_size, pmc->pring_va, pmc->pring_pa);
282 
283 		pmc->pring_va = NULL;
284 	} else {
285 		pmc->last_cmd_status = -ENOENT;
286 	}
287 
288 	if (pmc->descriptors) {
289 		int i;
290 
291 		for (i = 0;
292 		     i < pmc->num_descriptors && pmc->descriptors[i].va; i++) {
293 			dma_free_coherent(dev,
294 					  pmc->descriptor_size,
295 					  pmc->descriptors[i].va,
296 					  pmc->descriptors[i].pa);
297 			pmc->descriptors[i].va = NULL;
298 		}
299 		wil_dbg_misc(wil, "pmc_free: free descriptor info %d/%d\n", i,
300 			     pmc->num_descriptors);
301 		wil_dbg_misc(wil,
302 			     "pmc_free: free pmc descriptors info list %p\n",
303 			     pmc->descriptors);
304 		kfree(pmc->descriptors);
305 		pmc->descriptors = NULL;
306 	} else {
307 		pmc->last_cmd_status = -ENOENT;
308 	}
309 
310 	mutex_unlock(&pmc->lock);
311 }
312 
313 /**
314  * Status of the last operation requested via debugfs: alloc/free/read.
315  * 0 - success or negative errno
316  */
317 int wil_pmc_last_cmd_status(struct wil6210_priv *wil)
318 {
319 	wil_dbg_misc(wil, "pmc_last_cmd_status: status %d\n",
320 		     wil->pmc.last_cmd_status);
321 
322 	return wil->pmc.last_cmd_status;
323 }
324 
325 /**
326  * Read from required position up to the end of current descriptor,
327  * depends on descriptor size configured during alloc request.
328  */
329 ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count,
330 		     loff_t *f_pos)
331 {
332 	struct wil6210_priv *wil = filp->private_data;
333 	struct pmc_ctx *pmc = &wil->pmc;
334 	size_t retval = 0;
335 	unsigned long long idx;
336 	loff_t offset;
337 	size_t pmc_size;
338 
339 	mutex_lock(&pmc->lock);
340 
341 	if (!wil_is_pmc_allocated(pmc)) {
342 		wil_err(wil, "error, pmc is not allocated!\n");
343 		pmc->last_cmd_status = -EPERM;
344 		mutex_unlock(&pmc->lock);
345 		return -EPERM;
346 	}
347 
348 	pmc_size = pmc->descriptor_size * pmc->num_descriptors;
349 
350 	wil_dbg_misc(wil,
351 		     "pmc_read: size %u, pos %lld\n",
352 		     (u32)count, *f_pos);
353 
354 	pmc->last_cmd_status = 0;
355 
356 	idx = *f_pos;
357 	do_div(idx, pmc->descriptor_size);
358 	offset = *f_pos - (idx * pmc->descriptor_size);
359 
360 	if (*f_pos >= pmc_size) {
361 		wil_dbg_misc(wil,
362 			     "pmc_read: reached end of pmc buf: %lld >= %u\n",
363 			     *f_pos, (u32)pmc_size);
364 		pmc->last_cmd_status = -ERANGE;
365 		goto out;
366 	}
367 
368 	wil_dbg_misc(wil,
369 		     "pmc_read: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n",
370 		     *f_pos, idx, offset, count);
371 
372 	/* if no errors, return the copied byte count */
373 	retval = simple_read_from_buffer(buf,
374 					 count,
375 					 &offset,
376 					 pmc->descriptors[idx].va,
377 					 pmc->descriptor_size);
378 	*f_pos += retval;
379 out:
380 	mutex_unlock(&pmc->lock);
381 
382 	return retval;
383 }
384 
385 loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence)
386 {
387 	loff_t newpos;
388 	struct wil6210_priv *wil = filp->private_data;
389 	struct pmc_ctx *pmc = &wil->pmc;
390 	size_t pmc_size;
391 
392 	mutex_lock(&pmc->lock);
393 
394 	if (!wil_is_pmc_allocated(pmc)) {
395 		wil_err(wil, "error, pmc is not allocated!\n");
396 		pmc->last_cmd_status = -EPERM;
397 		mutex_unlock(&pmc->lock);
398 		return -EPERM;
399 	}
400 
401 	pmc_size = pmc->descriptor_size * pmc->num_descriptors;
402 
403 	switch (whence) {
404 	case 0: /* SEEK_SET */
405 		newpos = off;
406 		break;
407 
408 	case 1: /* SEEK_CUR */
409 		newpos = filp->f_pos + off;
410 		break;
411 
412 	case 2: /* SEEK_END */
413 		newpos = pmc_size;
414 		break;
415 
416 	default: /* can't happen */
417 		newpos = -EINVAL;
418 		goto out;
419 	}
420 
421 	if (newpos < 0) {
422 		newpos = -EINVAL;
423 		goto out;
424 	}
425 	if (newpos > pmc_size)
426 		newpos = pmc_size;
427 
428 	filp->f_pos = newpos;
429 
430 out:
431 	mutex_unlock(&pmc->lock);
432 
433 	return newpos;
434 }
435 
436 int wil_pmcring_read(struct seq_file *s, void *data)
437 {
438 	struct wil6210_priv *wil = s->private;
439 	struct pmc_ctx *pmc = &wil->pmc;
440 	size_t pmc_ring_size =
441 		sizeof(struct vring_rx_desc) * pmc->num_descriptors;
442 
443 	mutex_lock(&pmc->lock);
444 
445 	if (!wil_is_pmc_allocated(pmc)) {
446 		wil_err(wil, "error, pmc is not allocated!\n");
447 		pmc->last_cmd_status = -EPERM;
448 		mutex_unlock(&pmc->lock);
449 		return -EPERM;
450 	}
451 
452 	wil_dbg_misc(wil, "pmcring_read: size %zu\n", pmc_ring_size);
453 
454 	seq_write(s, pmc->pring_va, pmc_ring_size);
455 
456 	mutex_unlock(&pmc->lock);
457 
458 	return 0;
459 }
460