1 /* 2 * Copyright (c) 2012-2015,2017 Qualcomm Atheros, Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/types.h> 18 #include <linux/errno.h> 19 #include <linux/fs.h> 20 #include "wmi.h" 21 #include "wil6210.h" 22 #include "txrx.h" 23 #include "pmc.h" 24 25 struct desc_alloc_info { 26 dma_addr_t pa; 27 void *va; 28 }; 29 30 static int wil_is_pmc_allocated(struct pmc_ctx *pmc) 31 { 32 return !!pmc->pring_va; 33 } 34 35 void wil_pmc_init(struct wil6210_priv *wil) 36 { 37 memset(&wil->pmc, 0, sizeof(struct pmc_ctx)); 38 mutex_init(&wil->pmc.lock); 39 } 40 41 /** 42 * Allocate the physical ring (p-ring) and the required 43 * number of descriptors of required size. 44 * Initialize the descriptors as required by pmc dma. 45 * The descriptors' buffers dwords are initialized to hold 46 * dword's serial number in the lsw and reserved value 47 * PCM_DATA_INVALID_DW_VAL in the msw. 48 */ 49 void wil_pmc_alloc(struct wil6210_priv *wil, 50 int num_descriptors, 51 int descriptor_size) 52 { 53 u32 i; 54 struct pmc_ctx *pmc = &wil->pmc; 55 struct device *dev = wil_to_dev(wil); 56 struct wmi_pmc_cmd pmc_cmd = {0}; 57 int last_cmd_err = -ENOMEM; 58 59 mutex_lock(&pmc->lock); 60 61 if (wil_is_pmc_allocated(pmc)) { 62 /* sanity check */ 63 wil_err(wil, "ERROR pmc is already allocated\n"); 64 goto no_release_err; 65 } 66 if ((num_descriptors <= 0) || (descriptor_size <= 0)) { 67 wil_err(wil, 68 "Invalid params num_descriptors(%d), descriptor_size(%d)\n", 69 num_descriptors, descriptor_size); 70 last_cmd_err = -EINVAL; 71 goto no_release_err; 72 } 73 74 if (num_descriptors > (1 << WIL_RING_SIZE_ORDER_MAX)) { 75 wil_err(wil, 76 "num_descriptors(%d) exceeds max ring size %d\n", 77 num_descriptors, 1 << WIL_RING_SIZE_ORDER_MAX); 78 last_cmd_err = -EINVAL; 79 goto no_release_err; 80 } 81 82 if (num_descriptors > INT_MAX / descriptor_size) { 83 wil_err(wil, 84 "Overflow in num_descriptors(%d)*descriptor_size(%d)\n", 85 num_descriptors, descriptor_size); 86 last_cmd_err = -EINVAL; 87 goto no_release_err; 88 } 89 90 pmc->num_descriptors = num_descriptors; 91 pmc->descriptor_size = descriptor_size; 92 93 wil_dbg_misc(wil, "pmc_alloc: %d descriptors x %d bytes each\n", 94 num_descriptors, descriptor_size); 95 96 /* allocate descriptors info list in pmc context*/ 97 pmc->descriptors = kcalloc(num_descriptors, 98 sizeof(struct desc_alloc_info), 99 GFP_KERNEL); 100 if (!pmc->descriptors) { 101 wil_err(wil, "ERROR allocating pmc skb list\n"); 102 goto no_release_err; 103 } 104 105 wil_dbg_misc(wil, "pmc_alloc: allocated descriptors info list %p\n", 106 pmc->descriptors); 107 108 /* Allocate pring buffer and descriptors. 109 * vring->va should be aligned on its size rounded up to power of 2 110 * This is granted by the dma_alloc_coherent. 111 * 112 * HW has limitation that all vrings addresses must share the same 113 * upper 16 msb bits part of 48 bits address. To workaround that, 114 * if we are using more than 32 bit addresses switch to 32 bit 115 * allocation before allocating vring memory. 116 * 117 * There's no check for the return value of dma_set_mask_and_coherent, 118 * since we assume if we were able to set the mask during 119 * initialization in this system it will not fail if we set it again 120 */ 121 if (wil->dma_addr_size > 32) 122 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 123 124 pmc->pring_va = dma_alloc_coherent(dev, 125 sizeof(struct vring_tx_desc) * num_descriptors, 126 &pmc->pring_pa, 127 GFP_KERNEL); 128 129 if (wil->dma_addr_size > 32) 130 dma_set_mask_and_coherent(dev, 131 DMA_BIT_MASK(wil->dma_addr_size)); 132 133 wil_dbg_misc(wil, 134 "pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n", 135 pmc->pring_va, &pmc->pring_pa, 136 sizeof(struct vring_tx_desc), 137 num_descriptors, 138 sizeof(struct vring_tx_desc) * num_descriptors); 139 140 if (!pmc->pring_va) { 141 wil_err(wil, "ERROR allocating pmc pring\n"); 142 goto release_pmc_skb_list; 143 } 144 145 /* initially, all descriptors are SW owned 146 * For Tx, Rx, and PMC, ownership bit is at the same location, thus 147 * we can use any 148 */ 149 for (i = 0; i < num_descriptors; i++) { 150 struct vring_tx_desc *_d = &pmc->pring_va[i]; 151 struct vring_tx_desc dd = {}, *d = ⅆ 152 int j = 0; 153 154 pmc->descriptors[i].va = dma_alloc_coherent(dev, 155 descriptor_size, 156 &pmc->descriptors[i].pa, 157 GFP_KERNEL); 158 159 if (unlikely(!pmc->descriptors[i].va)) { 160 wil_err(wil, "ERROR allocating pmc descriptor %d", i); 161 goto release_pmc_skbs; 162 } 163 164 for (j = 0; j < descriptor_size / sizeof(u32); j++) { 165 u32 *p = (u32 *)pmc->descriptors[i].va + j; 166 *p = PCM_DATA_INVALID_DW_VAL | j; 167 } 168 169 /* configure dma descriptor */ 170 d->dma.addr.addr_low = 171 cpu_to_le32(lower_32_bits(pmc->descriptors[i].pa)); 172 d->dma.addr.addr_high = 173 cpu_to_le16((u16)upper_32_bits(pmc->descriptors[i].pa)); 174 d->dma.status = 0; /* 0 = HW_OWNED */ 175 d->dma.length = cpu_to_le16(descriptor_size); 176 d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT; 177 *_d = *d; 178 } 179 180 wil_dbg_misc(wil, "pmc_alloc: allocated successfully\n"); 181 182 pmc_cmd.op = WMI_PMC_ALLOCATE; 183 pmc_cmd.ring_size = cpu_to_le16(pmc->num_descriptors); 184 pmc_cmd.mem_base = cpu_to_le64(pmc->pring_pa); 185 186 wil_dbg_misc(wil, "pmc_alloc: send WMI_PMC_CMD with ALLOCATE op\n"); 187 pmc->last_cmd_status = wmi_send(wil, 188 WMI_PMC_CMDID, 189 &pmc_cmd, 190 sizeof(pmc_cmd)); 191 if (pmc->last_cmd_status) { 192 wil_err(wil, 193 "WMI_PMC_CMD with ALLOCATE op failed with status %d", 194 pmc->last_cmd_status); 195 goto release_pmc_skbs; 196 } 197 198 mutex_unlock(&pmc->lock); 199 200 return; 201 202 release_pmc_skbs: 203 wil_err(wil, "exit on error: Releasing skbs...\n"); 204 for (i = 0; i < num_descriptors && pmc->descriptors[i].va; i++) { 205 dma_free_coherent(dev, 206 descriptor_size, 207 pmc->descriptors[i].va, 208 pmc->descriptors[i].pa); 209 210 pmc->descriptors[i].va = NULL; 211 } 212 wil_err(wil, "exit on error: Releasing pring...\n"); 213 214 dma_free_coherent(dev, 215 sizeof(struct vring_tx_desc) * num_descriptors, 216 pmc->pring_va, 217 pmc->pring_pa); 218 219 pmc->pring_va = NULL; 220 221 release_pmc_skb_list: 222 wil_err(wil, "exit on error: Releasing descriptors info list...\n"); 223 kfree(pmc->descriptors); 224 pmc->descriptors = NULL; 225 226 no_release_err: 227 pmc->last_cmd_status = last_cmd_err; 228 mutex_unlock(&pmc->lock); 229 } 230 231 /** 232 * Traverse the p-ring and release all buffers. 233 * At the end release the p-ring memory 234 */ 235 void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd) 236 { 237 struct pmc_ctx *pmc = &wil->pmc; 238 struct device *dev = wil_to_dev(wil); 239 struct wmi_pmc_cmd pmc_cmd = {0}; 240 241 mutex_lock(&pmc->lock); 242 243 pmc->last_cmd_status = 0; 244 245 if (!wil_is_pmc_allocated(pmc)) { 246 wil_dbg_misc(wil, 247 "pmc_free: Error, can't free - not allocated\n"); 248 pmc->last_cmd_status = -EPERM; 249 mutex_unlock(&pmc->lock); 250 return; 251 } 252 253 if (send_pmc_cmd) { 254 wil_dbg_misc(wil, "send WMI_PMC_CMD with RELEASE op\n"); 255 pmc_cmd.op = WMI_PMC_RELEASE; 256 pmc->last_cmd_status = 257 wmi_send(wil, WMI_PMC_CMDID, &pmc_cmd, 258 sizeof(pmc_cmd)); 259 if (pmc->last_cmd_status) { 260 wil_err(wil, 261 "WMI_PMC_CMD with RELEASE op failed, status %d", 262 pmc->last_cmd_status); 263 /* There's nothing we can do with this error. 264 * Normally, it should never occur. 265 * Continue to freeing all memory allocated for pmc. 266 */ 267 } 268 } 269 270 if (pmc->pring_va) { 271 size_t buf_size = sizeof(struct vring_tx_desc) * 272 pmc->num_descriptors; 273 274 wil_dbg_misc(wil, "pmc_free: free pring va %p\n", 275 pmc->pring_va); 276 dma_free_coherent(dev, buf_size, pmc->pring_va, pmc->pring_pa); 277 278 pmc->pring_va = NULL; 279 } else { 280 pmc->last_cmd_status = -ENOENT; 281 } 282 283 if (pmc->descriptors) { 284 int i; 285 286 for (i = 0; 287 i < pmc->num_descriptors && pmc->descriptors[i].va; i++) { 288 dma_free_coherent(dev, 289 pmc->descriptor_size, 290 pmc->descriptors[i].va, 291 pmc->descriptors[i].pa); 292 pmc->descriptors[i].va = NULL; 293 } 294 wil_dbg_misc(wil, "pmc_free: free descriptor info %d/%d\n", i, 295 pmc->num_descriptors); 296 wil_dbg_misc(wil, 297 "pmc_free: free pmc descriptors info list %p\n", 298 pmc->descriptors); 299 kfree(pmc->descriptors); 300 pmc->descriptors = NULL; 301 } else { 302 pmc->last_cmd_status = -ENOENT; 303 } 304 305 mutex_unlock(&pmc->lock); 306 } 307 308 /** 309 * Status of the last operation requested via debugfs: alloc/free/read. 310 * 0 - success or negative errno 311 */ 312 int wil_pmc_last_cmd_status(struct wil6210_priv *wil) 313 { 314 wil_dbg_misc(wil, "pmc_last_cmd_status: status %d\n", 315 wil->pmc.last_cmd_status); 316 317 return wil->pmc.last_cmd_status; 318 } 319 320 /** 321 * Read from required position up to the end of current descriptor, 322 * depends on descriptor size configured during alloc request. 323 */ 324 ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count, 325 loff_t *f_pos) 326 { 327 struct wil6210_priv *wil = filp->private_data; 328 struct pmc_ctx *pmc = &wil->pmc; 329 size_t retval = 0; 330 unsigned long long idx; 331 loff_t offset; 332 size_t pmc_size; 333 334 mutex_lock(&pmc->lock); 335 336 if (!wil_is_pmc_allocated(pmc)) { 337 wil_err(wil, "error, pmc is not allocated!\n"); 338 pmc->last_cmd_status = -EPERM; 339 mutex_unlock(&pmc->lock); 340 return -EPERM; 341 } 342 343 pmc_size = pmc->descriptor_size * pmc->num_descriptors; 344 345 wil_dbg_misc(wil, 346 "pmc_read: size %u, pos %lld\n", 347 (u32)count, *f_pos); 348 349 pmc->last_cmd_status = 0; 350 351 idx = *f_pos; 352 do_div(idx, pmc->descriptor_size); 353 offset = *f_pos - (idx * pmc->descriptor_size); 354 355 if (*f_pos >= pmc_size) { 356 wil_dbg_misc(wil, 357 "pmc_read: reached end of pmc buf: %lld >= %u\n", 358 *f_pos, (u32)pmc_size); 359 pmc->last_cmd_status = -ERANGE; 360 goto out; 361 } 362 363 wil_dbg_misc(wil, 364 "pmc_read: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n", 365 *f_pos, idx, offset, count); 366 367 /* if no errors, return the copied byte count */ 368 retval = simple_read_from_buffer(buf, 369 count, 370 &offset, 371 pmc->descriptors[idx].va, 372 pmc->descriptor_size); 373 *f_pos += retval; 374 out: 375 mutex_unlock(&pmc->lock); 376 377 return retval; 378 } 379 380 loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence) 381 { 382 loff_t newpos; 383 struct wil6210_priv *wil = filp->private_data; 384 struct pmc_ctx *pmc = &wil->pmc; 385 size_t pmc_size; 386 387 mutex_lock(&pmc->lock); 388 389 if (!wil_is_pmc_allocated(pmc)) { 390 wil_err(wil, "error, pmc is not allocated!\n"); 391 pmc->last_cmd_status = -EPERM; 392 mutex_unlock(&pmc->lock); 393 return -EPERM; 394 } 395 396 pmc_size = pmc->descriptor_size * pmc->num_descriptors; 397 398 switch (whence) { 399 case 0: /* SEEK_SET */ 400 newpos = off; 401 break; 402 403 case 1: /* SEEK_CUR */ 404 newpos = filp->f_pos + off; 405 break; 406 407 case 2: /* SEEK_END */ 408 newpos = pmc_size; 409 break; 410 411 default: /* can't happen */ 412 newpos = -EINVAL; 413 goto out; 414 } 415 416 if (newpos < 0) { 417 newpos = -EINVAL; 418 goto out; 419 } 420 if (newpos > pmc_size) 421 newpos = pmc_size; 422 423 filp->f_pos = newpos; 424 425 out: 426 mutex_unlock(&pmc->lock); 427 428 return newpos; 429 } 430