1 /* 2 * Copyright (c) 2012-2015,2017 Qualcomm Atheros, Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/types.h> 18 #include <linux/errno.h> 19 #include <linux/fs.h> 20 #include "wmi.h" 21 #include "wil6210.h" 22 #include "txrx.h" 23 #include "pmc.h" 24 25 struct desc_alloc_info { 26 dma_addr_t pa; 27 void *va; 28 }; 29 30 static int wil_is_pmc_allocated(struct pmc_ctx *pmc) 31 { 32 return !!pmc->pring_va; 33 } 34 35 void wil_pmc_init(struct wil6210_priv *wil) 36 { 37 memset(&wil->pmc, 0, sizeof(struct pmc_ctx)); 38 mutex_init(&wil->pmc.lock); 39 } 40 41 /** 42 * Allocate the physical ring (p-ring) and the required 43 * number of descriptors of required size. 44 * Initialize the descriptors as required by pmc dma. 45 * The descriptors' buffers dwords are initialized to hold 46 * dword's serial number in the lsw and reserved value 47 * PCM_DATA_INVALID_DW_VAL in the msw. 48 */ 49 void wil_pmc_alloc(struct wil6210_priv *wil, 50 int num_descriptors, 51 int descriptor_size) 52 { 53 u32 i; 54 struct pmc_ctx *pmc = &wil->pmc; 55 struct device *dev = wil_to_dev(wil); 56 struct wmi_pmc_cmd pmc_cmd = {0}; 57 int last_cmd_err = -ENOMEM; 58 59 mutex_lock(&pmc->lock); 60 61 if (wil_is_pmc_allocated(pmc)) { 62 /* sanity check */ 63 wil_err(wil, "ERROR pmc is already allocated\n"); 64 goto no_release_err; 65 } 66 if ((num_descriptors <= 0) || (descriptor_size <= 0)) { 67 wil_err(wil, 68 "Invalid params num_descriptors(%d), descriptor_size(%d)\n", 69 num_descriptors, descriptor_size); 70 last_cmd_err = -EINVAL; 71 goto no_release_err; 72 } 73 74 if (num_descriptors > (1 << WIL_RING_SIZE_ORDER_MAX)) { 75 wil_err(wil, 76 "num_descriptors(%d) exceeds max ring size %d\n", 77 num_descriptors, 1 << WIL_RING_SIZE_ORDER_MAX); 78 last_cmd_err = -EINVAL; 79 goto no_release_err; 80 } 81 82 if (num_descriptors > INT_MAX / descriptor_size) { 83 wil_err(wil, 84 "Overflow in num_descriptors(%d)*descriptor_size(%d)\n", 85 num_descriptors, descriptor_size); 86 last_cmd_err = -EINVAL; 87 goto no_release_err; 88 } 89 90 pmc->num_descriptors = num_descriptors; 91 pmc->descriptor_size = descriptor_size; 92 93 wil_dbg_misc(wil, "pmc_alloc: %d descriptors x %d bytes each\n", 94 num_descriptors, descriptor_size); 95 96 /* allocate descriptors info list in pmc context*/ 97 pmc->descriptors = kcalloc(num_descriptors, 98 sizeof(struct desc_alloc_info), 99 GFP_KERNEL); 100 if (!pmc->descriptors) { 101 wil_err(wil, "ERROR allocating pmc skb list\n"); 102 goto no_release_err; 103 } 104 105 wil_dbg_misc(wil, "pmc_alloc: allocated descriptors info list %p\n", 106 pmc->descriptors); 107 108 /* Allocate pring buffer and descriptors. 109 * vring->va should be aligned on its size rounded up to power of 2 110 * This is granted by the dma_alloc_coherent. 111 * 112 * HW has limitation that all vrings addresses must share the same 113 * upper 16 msb bits part of 48 bits address. To workaround that, 114 * if we are using 48 bit addresses switch to 32 bit allocation 115 * before allocating vring memory. 116 * 117 * There's no check for the return value of dma_set_mask_and_coherent, 118 * since we assume if we were able to set the mask during 119 * initialization in this system it will not fail if we set it again 120 */ 121 if (wil->use_extended_dma_addr) 122 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 123 124 pmc->pring_va = dma_alloc_coherent(dev, 125 sizeof(struct vring_tx_desc) * num_descriptors, 126 &pmc->pring_pa, 127 GFP_KERNEL); 128 129 if (wil->use_extended_dma_addr) 130 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); 131 132 wil_dbg_misc(wil, 133 "pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n", 134 pmc->pring_va, &pmc->pring_pa, 135 sizeof(struct vring_tx_desc), 136 num_descriptors, 137 sizeof(struct vring_tx_desc) * num_descriptors); 138 139 if (!pmc->pring_va) { 140 wil_err(wil, "ERROR allocating pmc pring\n"); 141 goto release_pmc_skb_list; 142 } 143 144 /* initially, all descriptors are SW owned 145 * For Tx, Rx, and PMC, ownership bit is at the same location, thus 146 * we can use any 147 */ 148 for (i = 0; i < num_descriptors; i++) { 149 struct vring_tx_desc *_d = &pmc->pring_va[i]; 150 struct vring_tx_desc dd = {}, *d = ⅆ 151 int j = 0; 152 153 pmc->descriptors[i].va = dma_alloc_coherent(dev, 154 descriptor_size, 155 &pmc->descriptors[i].pa, 156 GFP_KERNEL); 157 158 if (unlikely(!pmc->descriptors[i].va)) { 159 wil_err(wil, "ERROR allocating pmc descriptor %d", i); 160 goto release_pmc_skbs; 161 } 162 163 for (j = 0; j < descriptor_size / sizeof(u32); j++) { 164 u32 *p = (u32 *)pmc->descriptors[i].va + j; 165 *p = PCM_DATA_INVALID_DW_VAL | j; 166 } 167 168 /* configure dma descriptor */ 169 d->dma.addr.addr_low = 170 cpu_to_le32(lower_32_bits(pmc->descriptors[i].pa)); 171 d->dma.addr.addr_high = 172 cpu_to_le16((u16)upper_32_bits(pmc->descriptors[i].pa)); 173 d->dma.status = 0; /* 0 = HW_OWNED */ 174 d->dma.length = cpu_to_le16(descriptor_size); 175 d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT; 176 *_d = *d; 177 } 178 179 wil_dbg_misc(wil, "pmc_alloc: allocated successfully\n"); 180 181 pmc_cmd.op = WMI_PMC_ALLOCATE; 182 pmc_cmd.ring_size = cpu_to_le16(pmc->num_descriptors); 183 pmc_cmd.mem_base = cpu_to_le64(pmc->pring_pa); 184 185 wil_dbg_misc(wil, "pmc_alloc: send WMI_PMC_CMD with ALLOCATE op\n"); 186 pmc->last_cmd_status = wmi_send(wil, 187 WMI_PMC_CMDID, 188 &pmc_cmd, 189 sizeof(pmc_cmd)); 190 if (pmc->last_cmd_status) { 191 wil_err(wil, 192 "WMI_PMC_CMD with ALLOCATE op failed with status %d", 193 pmc->last_cmd_status); 194 goto release_pmc_skbs; 195 } 196 197 mutex_unlock(&pmc->lock); 198 199 return; 200 201 release_pmc_skbs: 202 wil_err(wil, "exit on error: Releasing skbs...\n"); 203 for (i = 0; i < num_descriptors && pmc->descriptors[i].va; i++) { 204 dma_free_coherent(dev, 205 descriptor_size, 206 pmc->descriptors[i].va, 207 pmc->descriptors[i].pa); 208 209 pmc->descriptors[i].va = NULL; 210 } 211 wil_err(wil, "exit on error: Releasing pring...\n"); 212 213 dma_free_coherent(dev, 214 sizeof(struct vring_tx_desc) * num_descriptors, 215 pmc->pring_va, 216 pmc->pring_pa); 217 218 pmc->pring_va = NULL; 219 220 release_pmc_skb_list: 221 wil_err(wil, "exit on error: Releasing descriptors info list...\n"); 222 kfree(pmc->descriptors); 223 pmc->descriptors = NULL; 224 225 no_release_err: 226 pmc->last_cmd_status = last_cmd_err; 227 mutex_unlock(&pmc->lock); 228 } 229 230 /** 231 * Traverse the p-ring and release all buffers. 232 * At the end release the p-ring memory 233 */ 234 void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd) 235 { 236 struct pmc_ctx *pmc = &wil->pmc; 237 struct device *dev = wil_to_dev(wil); 238 struct wmi_pmc_cmd pmc_cmd = {0}; 239 240 mutex_lock(&pmc->lock); 241 242 pmc->last_cmd_status = 0; 243 244 if (!wil_is_pmc_allocated(pmc)) { 245 wil_dbg_misc(wil, 246 "pmc_free: Error, can't free - not allocated\n"); 247 pmc->last_cmd_status = -EPERM; 248 mutex_unlock(&pmc->lock); 249 return; 250 } 251 252 if (send_pmc_cmd) { 253 wil_dbg_misc(wil, "send WMI_PMC_CMD with RELEASE op\n"); 254 pmc_cmd.op = WMI_PMC_RELEASE; 255 pmc->last_cmd_status = 256 wmi_send(wil, WMI_PMC_CMDID, &pmc_cmd, 257 sizeof(pmc_cmd)); 258 if (pmc->last_cmd_status) { 259 wil_err(wil, 260 "WMI_PMC_CMD with RELEASE op failed, status %d", 261 pmc->last_cmd_status); 262 /* There's nothing we can do with this error. 263 * Normally, it should never occur. 264 * Continue to freeing all memory allocated for pmc. 265 */ 266 } 267 } 268 269 if (pmc->pring_va) { 270 size_t buf_size = sizeof(struct vring_tx_desc) * 271 pmc->num_descriptors; 272 273 wil_dbg_misc(wil, "pmc_free: free pring va %p\n", 274 pmc->pring_va); 275 dma_free_coherent(dev, buf_size, pmc->pring_va, pmc->pring_pa); 276 277 pmc->pring_va = NULL; 278 } else { 279 pmc->last_cmd_status = -ENOENT; 280 } 281 282 if (pmc->descriptors) { 283 int i; 284 285 for (i = 0; 286 i < pmc->num_descriptors && pmc->descriptors[i].va; i++) { 287 dma_free_coherent(dev, 288 pmc->descriptor_size, 289 pmc->descriptors[i].va, 290 pmc->descriptors[i].pa); 291 pmc->descriptors[i].va = NULL; 292 } 293 wil_dbg_misc(wil, "pmc_free: free descriptor info %d/%d\n", i, 294 pmc->num_descriptors); 295 wil_dbg_misc(wil, 296 "pmc_free: free pmc descriptors info list %p\n", 297 pmc->descriptors); 298 kfree(pmc->descriptors); 299 pmc->descriptors = NULL; 300 } else { 301 pmc->last_cmd_status = -ENOENT; 302 } 303 304 mutex_unlock(&pmc->lock); 305 } 306 307 /** 308 * Status of the last operation requested via debugfs: alloc/free/read. 309 * 0 - success or negative errno 310 */ 311 int wil_pmc_last_cmd_status(struct wil6210_priv *wil) 312 { 313 wil_dbg_misc(wil, "pmc_last_cmd_status: status %d\n", 314 wil->pmc.last_cmd_status); 315 316 return wil->pmc.last_cmd_status; 317 } 318 319 /** 320 * Read from required position up to the end of current descriptor, 321 * depends on descriptor size configured during alloc request. 322 */ 323 ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count, 324 loff_t *f_pos) 325 { 326 struct wil6210_priv *wil = filp->private_data; 327 struct pmc_ctx *pmc = &wil->pmc; 328 size_t retval = 0; 329 unsigned long long idx; 330 loff_t offset; 331 size_t pmc_size; 332 333 mutex_lock(&pmc->lock); 334 335 if (!wil_is_pmc_allocated(pmc)) { 336 wil_err(wil, "error, pmc is not allocated!\n"); 337 pmc->last_cmd_status = -EPERM; 338 mutex_unlock(&pmc->lock); 339 return -EPERM; 340 } 341 342 pmc_size = pmc->descriptor_size * pmc->num_descriptors; 343 344 wil_dbg_misc(wil, 345 "pmc_read: size %u, pos %lld\n", 346 (u32)count, *f_pos); 347 348 pmc->last_cmd_status = 0; 349 350 idx = *f_pos; 351 do_div(idx, pmc->descriptor_size); 352 offset = *f_pos - (idx * pmc->descriptor_size); 353 354 if (*f_pos >= pmc_size) { 355 wil_dbg_misc(wil, 356 "pmc_read: reached end of pmc buf: %lld >= %u\n", 357 *f_pos, (u32)pmc_size); 358 pmc->last_cmd_status = -ERANGE; 359 goto out; 360 } 361 362 wil_dbg_misc(wil, 363 "pmc_read: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n", 364 *f_pos, idx, offset, count); 365 366 /* if no errors, return the copied byte count */ 367 retval = simple_read_from_buffer(buf, 368 count, 369 &offset, 370 pmc->descriptors[idx].va, 371 pmc->descriptor_size); 372 *f_pos += retval; 373 out: 374 mutex_unlock(&pmc->lock); 375 376 return retval; 377 } 378 379 loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence) 380 { 381 loff_t newpos; 382 struct wil6210_priv *wil = filp->private_data; 383 struct pmc_ctx *pmc = &wil->pmc; 384 size_t pmc_size; 385 386 mutex_lock(&pmc->lock); 387 388 if (!wil_is_pmc_allocated(pmc)) { 389 wil_err(wil, "error, pmc is not allocated!\n"); 390 pmc->last_cmd_status = -EPERM; 391 mutex_unlock(&pmc->lock); 392 return -EPERM; 393 } 394 395 pmc_size = pmc->descriptor_size * pmc->num_descriptors; 396 397 switch (whence) { 398 case 0: /* SEEK_SET */ 399 newpos = off; 400 break; 401 402 case 1: /* SEEK_CUR */ 403 newpos = filp->f_pos + off; 404 break; 405 406 case 2: /* SEEK_END */ 407 newpos = pmc_size; 408 break; 409 410 default: /* can't happen */ 411 newpos = -EINVAL; 412 goto out; 413 } 414 415 if (newpos < 0) { 416 newpos = -EINVAL; 417 goto out; 418 } 419 if (newpos > pmc_size) 420 newpos = pmc_size; 421 422 filp->f_pos = newpos; 423 424 out: 425 mutex_unlock(&pmc->lock); 426 427 return newpos; 428 } 429