1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (c) 2012-2015,2017 Qualcomm Atheros, Inc. 4 * Copyright (c) 2018, The Linux Foundation. All rights reserved. 5 */ 6 7 #include <linux/types.h> 8 #include <linux/errno.h> 9 #include <linux/fs.h> 10 #include <linux/seq_file.h> 11 #include "wmi.h" 12 #include "wil6210.h" 13 #include "txrx.h" 14 #include "pmc.h" 15 16 struct desc_alloc_info { 17 dma_addr_t pa; 18 void *va; 19 }; 20 21 static int wil_is_pmc_allocated(struct pmc_ctx *pmc) 22 { 23 return !!pmc->pring_va; 24 } 25 26 void wil_pmc_init(struct wil6210_priv *wil) 27 { 28 memset(&wil->pmc, 0, sizeof(struct pmc_ctx)); 29 mutex_init(&wil->pmc.lock); 30 } 31 32 /** 33 * Allocate the physical ring (p-ring) and the required 34 * number of descriptors of required size. 35 * Initialize the descriptors as required by pmc dma. 36 * The descriptors' buffers dwords are initialized to hold 37 * dword's serial number in the lsw and reserved value 38 * PCM_DATA_INVALID_DW_VAL in the msw. 39 */ 40 void wil_pmc_alloc(struct wil6210_priv *wil, 41 int num_descriptors, 42 int descriptor_size) 43 { 44 u32 i; 45 struct pmc_ctx *pmc = &wil->pmc; 46 struct device *dev = wil_to_dev(wil); 47 struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev); 48 struct wmi_pmc_cmd pmc_cmd = {0}; 49 int last_cmd_err = -ENOMEM; 50 51 mutex_lock(&pmc->lock); 52 53 if (wil_is_pmc_allocated(pmc)) { 54 /* sanity check */ 55 wil_err(wil, "ERROR pmc is already allocated\n"); 56 goto no_release_err; 57 } 58 if ((num_descriptors <= 0) || (descriptor_size <= 0)) { 59 wil_err(wil, 60 "Invalid params num_descriptors(%d), descriptor_size(%d)\n", 61 num_descriptors, descriptor_size); 62 last_cmd_err = -EINVAL; 63 goto no_release_err; 64 } 65 66 if (num_descriptors > (1 << WIL_RING_SIZE_ORDER_MAX)) { 67 wil_err(wil, 68 "num_descriptors(%d) exceeds max ring size %d\n", 69 num_descriptors, 1 << WIL_RING_SIZE_ORDER_MAX); 70 last_cmd_err = -EINVAL; 71 goto no_release_err; 72 } 73 74 if (num_descriptors > INT_MAX / descriptor_size) { 75 wil_err(wil, 76 "Overflow in num_descriptors(%d)*descriptor_size(%d)\n", 77 num_descriptors, descriptor_size); 78 last_cmd_err = -EINVAL; 79 goto no_release_err; 80 } 81 82 pmc->num_descriptors = num_descriptors; 83 pmc->descriptor_size = descriptor_size; 84 85 wil_dbg_misc(wil, "pmc_alloc: %d descriptors x %d bytes each\n", 86 num_descriptors, descriptor_size); 87 88 /* allocate descriptors info list in pmc context*/ 89 pmc->descriptors = kcalloc(num_descriptors, 90 sizeof(struct desc_alloc_info), 91 GFP_KERNEL); 92 if (!pmc->descriptors) { 93 wil_err(wil, "ERROR allocating pmc skb list\n"); 94 goto no_release_err; 95 } 96 97 wil_dbg_misc(wil, "pmc_alloc: allocated descriptors info list %p\n", 98 pmc->descriptors); 99 100 /* Allocate pring buffer and descriptors. 101 * vring->va should be aligned on its size rounded up to power of 2 102 * This is granted by the dma_alloc_coherent. 103 * 104 * HW has limitation that all vrings addresses must share the same 105 * upper 16 msb bits part of 48 bits address. To workaround that, 106 * if we are using more than 32 bit addresses switch to 32 bit 107 * allocation before allocating vring memory. 108 * 109 * There's no check for the return value of dma_set_mask_and_coherent, 110 * since we assume if we were able to set the mask during 111 * initialization in this system it will not fail if we set it again 112 */ 113 if (wil->dma_addr_size > 32) 114 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 115 116 pmc->pring_va = dma_alloc_coherent(dev, 117 sizeof(struct vring_tx_desc) * num_descriptors, 118 &pmc->pring_pa, 119 GFP_KERNEL); 120 121 if (wil->dma_addr_size > 32) 122 dma_set_mask_and_coherent(dev, 123 DMA_BIT_MASK(wil->dma_addr_size)); 124 125 wil_dbg_misc(wil, 126 "pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n", 127 pmc->pring_va, &pmc->pring_pa, 128 sizeof(struct vring_tx_desc), 129 num_descriptors, 130 sizeof(struct vring_tx_desc) * num_descriptors); 131 132 if (!pmc->pring_va) { 133 wil_err(wil, "ERROR allocating pmc pring\n"); 134 goto release_pmc_skb_list; 135 } 136 137 /* initially, all descriptors are SW owned 138 * For Tx, Rx, and PMC, ownership bit is at the same location, thus 139 * we can use any 140 */ 141 for (i = 0; i < num_descriptors; i++) { 142 struct vring_tx_desc *_d = &pmc->pring_va[i]; 143 struct vring_tx_desc dd = {}, *d = ⅆ 144 int j = 0; 145 146 pmc->descriptors[i].va = dma_alloc_coherent(dev, 147 descriptor_size, 148 &pmc->descriptors[i].pa, 149 GFP_KERNEL); 150 151 if (unlikely(!pmc->descriptors[i].va)) { 152 wil_err(wil, "ERROR allocating pmc descriptor %d", i); 153 goto release_pmc_skbs; 154 } 155 156 for (j = 0; j < descriptor_size / sizeof(u32); j++) { 157 u32 *p = (u32 *)pmc->descriptors[i].va + j; 158 *p = PCM_DATA_INVALID_DW_VAL | j; 159 } 160 161 /* configure dma descriptor */ 162 d->dma.addr.addr_low = 163 cpu_to_le32(lower_32_bits(pmc->descriptors[i].pa)); 164 d->dma.addr.addr_high = 165 cpu_to_le16((u16)upper_32_bits(pmc->descriptors[i].pa)); 166 d->dma.status = 0; /* 0 = HW_OWNED */ 167 d->dma.length = cpu_to_le16(descriptor_size); 168 d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT; 169 *_d = *d; 170 } 171 172 wil_dbg_misc(wil, "pmc_alloc: allocated successfully\n"); 173 174 pmc_cmd.op = WMI_PMC_ALLOCATE; 175 pmc_cmd.ring_size = cpu_to_le16(pmc->num_descriptors); 176 pmc_cmd.mem_base = cpu_to_le64(pmc->pring_pa); 177 178 wil_dbg_misc(wil, "pmc_alloc: send WMI_PMC_CMD with ALLOCATE op\n"); 179 pmc->last_cmd_status = wmi_send(wil, 180 WMI_PMC_CMDID, 181 vif->mid, 182 &pmc_cmd, 183 sizeof(pmc_cmd)); 184 if (pmc->last_cmd_status) { 185 wil_err(wil, 186 "WMI_PMC_CMD with ALLOCATE op failed with status %d", 187 pmc->last_cmd_status); 188 goto release_pmc_skbs; 189 } 190 191 mutex_unlock(&pmc->lock); 192 193 return; 194 195 release_pmc_skbs: 196 wil_err(wil, "exit on error: Releasing skbs...\n"); 197 for (i = 0; i < num_descriptors && pmc->descriptors[i].va; i++) { 198 dma_free_coherent(dev, 199 descriptor_size, 200 pmc->descriptors[i].va, 201 pmc->descriptors[i].pa); 202 203 pmc->descriptors[i].va = NULL; 204 } 205 wil_err(wil, "exit on error: Releasing pring...\n"); 206 207 dma_free_coherent(dev, 208 sizeof(struct vring_tx_desc) * num_descriptors, 209 pmc->pring_va, 210 pmc->pring_pa); 211 212 pmc->pring_va = NULL; 213 214 release_pmc_skb_list: 215 wil_err(wil, "exit on error: Releasing descriptors info list...\n"); 216 kfree(pmc->descriptors); 217 pmc->descriptors = NULL; 218 219 no_release_err: 220 pmc->last_cmd_status = last_cmd_err; 221 mutex_unlock(&pmc->lock); 222 } 223 224 /** 225 * Traverse the p-ring and release all buffers. 226 * At the end release the p-ring memory 227 */ 228 void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd) 229 { 230 struct pmc_ctx *pmc = &wil->pmc; 231 struct device *dev = wil_to_dev(wil); 232 struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev); 233 struct wmi_pmc_cmd pmc_cmd = {0}; 234 235 mutex_lock(&pmc->lock); 236 237 pmc->last_cmd_status = 0; 238 239 if (!wil_is_pmc_allocated(pmc)) { 240 wil_dbg_misc(wil, 241 "pmc_free: Error, can't free - not allocated\n"); 242 pmc->last_cmd_status = -EPERM; 243 mutex_unlock(&pmc->lock); 244 return; 245 } 246 247 if (send_pmc_cmd) { 248 wil_dbg_misc(wil, "send WMI_PMC_CMD with RELEASE op\n"); 249 pmc_cmd.op = WMI_PMC_RELEASE; 250 pmc->last_cmd_status = 251 wmi_send(wil, WMI_PMC_CMDID, vif->mid, 252 &pmc_cmd, sizeof(pmc_cmd)); 253 if (pmc->last_cmd_status) { 254 wil_err(wil, 255 "WMI_PMC_CMD with RELEASE op failed, status %d", 256 pmc->last_cmd_status); 257 /* There's nothing we can do with this error. 258 * Normally, it should never occur. 259 * Continue to freeing all memory allocated for pmc. 260 */ 261 } 262 } 263 264 if (pmc->pring_va) { 265 size_t buf_size = sizeof(struct vring_tx_desc) * 266 pmc->num_descriptors; 267 268 wil_dbg_misc(wil, "pmc_free: free pring va %p\n", 269 pmc->pring_va); 270 dma_free_coherent(dev, buf_size, pmc->pring_va, pmc->pring_pa); 271 272 pmc->pring_va = NULL; 273 } else { 274 pmc->last_cmd_status = -ENOENT; 275 } 276 277 if (pmc->descriptors) { 278 int i; 279 280 for (i = 0; 281 i < pmc->num_descriptors && pmc->descriptors[i].va; i++) { 282 dma_free_coherent(dev, 283 pmc->descriptor_size, 284 pmc->descriptors[i].va, 285 pmc->descriptors[i].pa); 286 pmc->descriptors[i].va = NULL; 287 } 288 wil_dbg_misc(wil, "pmc_free: free descriptor info %d/%d\n", i, 289 pmc->num_descriptors); 290 wil_dbg_misc(wil, 291 "pmc_free: free pmc descriptors info list %p\n", 292 pmc->descriptors); 293 kfree(pmc->descriptors); 294 pmc->descriptors = NULL; 295 } else { 296 pmc->last_cmd_status = -ENOENT; 297 } 298 299 mutex_unlock(&pmc->lock); 300 } 301 302 /** 303 * Status of the last operation requested via debugfs: alloc/free/read. 304 * 0 - success or negative errno 305 */ 306 int wil_pmc_last_cmd_status(struct wil6210_priv *wil) 307 { 308 wil_dbg_misc(wil, "pmc_last_cmd_status: status %d\n", 309 wil->pmc.last_cmd_status); 310 311 return wil->pmc.last_cmd_status; 312 } 313 314 /** 315 * Read from required position up to the end of current descriptor, 316 * depends on descriptor size configured during alloc request. 317 */ 318 ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count, 319 loff_t *f_pos) 320 { 321 struct wil6210_priv *wil = filp->private_data; 322 struct pmc_ctx *pmc = &wil->pmc; 323 size_t retval = 0; 324 unsigned long long idx; 325 loff_t offset; 326 size_t pmc_size; 327 328 mutex_lock(&pmc->lock); 329 330 if (!wil_is_pmc_allocated(pmc)) { 331 wil_err(wil, "error, pmc is not allocated!\n"); 332 pmc->last_cmd_status = -EPERM; 333 mutex_unlock(&pmc->lock); 334 return -EPERM; 335 } 336 337 pmc_size = pmc->descriptor_size * pmc->num_descriptors; 338 339 wil_dbg_misc(wil, 340 "pmc_read: size %u, pos %lld\n", 341 (u32)count, *f_pos); 342 343 pmc->last_cmd_status = 0; 344 345 idx = *f_pos; 346 do_div(idx, pmc->descriptor_size); 347 offset = *f_pos - (idx * pmc->descriptor_size); 348 349 if (*f_pos >= pmc_size) { 350 wil_dbg_misc(wil, 351 "pmc_read: reached end of pmc buf: %lld >= %u\n", 352 *f_pos, (u32)pmc_size); 353 pmc->last_cmd_status = -ERANGE; 354 goto out; 355 } 356 357 wil_dbg_misc(wil, 358 "pmc_read: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n", 359 *f_pos, idx, offset, count); 360 361 /* if no errors, return the copied byte count */ 362 retval = simple_read_from_buffer(buf, 363 count, 364 &offset, 365 pmc->descriptors[idx].va, 366 pmc->descriptor_size); 367 *f_pos += retval; 368 out: 369 mutex_unlock(&pmc->lock); 370 371 return retval; 372 } 373 374 loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence) 375 { 376 loff_t newpos; 377 struct wil6210_priv *wil = filp->private_data; 378 struct pmc_ctx *pmc = &wil->pmc; 379 size_t pmc_size; 380 381 mutex_lock(&pmc->lock); 382 383 if (!wil_is_pmc_allocated(pmc)) { 384 wil_err(wil, "error, pmc is not allocated!\n"); 385 pmc->last_cmd_status = -EPERM; 386 mutex_unlock(&pmc->lock); 387 return -EPERM; 388 } 389 390 pmc_size = pmc->descriptor_size * pmc->num_descriptors; 391 392 switch (whence) { 393 case 0: /* SEEK_SET */ 394 newpos = off; 395 break; 396 397 case 1: /* SEEK_CUR */ 398 newpos = filp->f_pos + off; 399 break; 400 401 case 2: /* SEEK_END */ 402 newpos = pmc_size; 403 break; 404 405 default: /* can't happen */ 406 newpos = -EINVAL; 407 goto out; 408 } 409 410 if (newpos < 0) { 411 newpos = -EINVAL; 412 goto out; 413 } 414 if (newpos > pmc_size) 415 newpos = pmc_size; 416 417 filp->f_pos = newpos; 418 419 out: 420 mutex_unlock(&pmc->lock); 421 422 return newpos; 423 } 424 425 int wil_pmcring_read(struct seq_file *s, void *data) 426 { 427 struct wil6210_priv *wil = s->private; 428 struct pmc_ctx *pmc = &wil->pmc; 429 size_t pmc_ring_size = 430 sizeof(struct vring_rx_desc) * pmc->num_descriptors; 431 432 mutex_lock(&pmc->lock); 433 434 if (!wil_is_pmc_allocated(pmc)) { 435 wil_err(wil, "error, pmc is not allocated!\n"); 436 pmc->last_cmd_status = -EPERM; 437 mutex_unlock(&pmc->lock); 438 return -EPERM; 439 } 440 441 wil_dbg_misc(wil, "pmcring_read: size %zu\n", pmc_ring_size); 442 443 seq_write(s, pmc->pring_va, pmc_ring_size); 444 445 mutex_unlock(&pmc->lock); 446 447 return 0; 448 } 449