1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include "hif.h" 19 #include "ce.h" 20 #include "debug.h" 21 22 /* 23 * Support for Copy Engine hardware, which is mainly used for 24 * communication between Host and Target over a PCIe interconnect. 25 */ 26 27 /* 28 * A single CopyEngine (CE) comprises two "rings": 29 * a source ring 30 * a destination ring 31 * 32 * Each ring consists of a number of descriptors which specify 33 * an address, length, and meta-data. 34 * 35 * Typically, one side of the PCIe/AHB/SNOC interconnect (Host or Target) 36 * controls one ring and the other side controls the other ring. 37 * The source side chooses when to initiate a transfer and it 38 * chooses what to send (buffer address, length). The destination 39 * side keeps a supply of "anonymous receive buffers" available and 40 * it handles incoming data as it arrives (when the destination 41 * receives an interrupt). 42 * 43 * The sender may send a simple buffer (address/length) or it may 44 * send a small list of buffers. When a small list is sent, hardware 45 * "gathers" these and they end up in a single destination buffer 46 * with a single interrupt. 47 * 48 * There are several "contexts" managed by this layer -- more, it 49 * may seem -- than should be needed. These are provided mainly for 50 * maximum flexibility and especially to facilitate a simpler HIF 51 * implementation. There are per-CopyEngine recv, send, and watermark 52 * contexts. These are supplied by the caller when a recv, send, 53 * or watermark handler is established and they are echoed back to 54 * the caller when the respective callbacks are invoked. There is 55 * also a per-transfer context supplied by the caller when a buffer 56 * (or sendlist) is sent and when a buffer is enqueued for recv. 57 * These per-transfer contexts are echoed back to the caller when 58 * the buffer is sent/received. 59 */ 60 61 static inline unsigned int 62 ath10k_set_ring_byte(unsigned int offset, 63 struct ath10k_hw_ce_regs_addr_map *addr_map) 64 { 65 return ((offset << addr_map->lsb) & addr_map->mask); 66 } 67 68 static inline unsigned int 69 ath10k_get_ring_byte(unsigned int offset, 70 struct ath10k_hw_ce_regs_addr_map *addr_map) 71 { 72 return ((offset & addr_map->mask) >> (addr_map->lsb)); 73 } 74 75 static inline u32 ath10k_ce_read32(struct ath10k *ar, u32 offset) 76 { 77 struct ath10k_ce *ce = ath10k_ce_priv(ar); 78 79 return ce->bus_ops->read32(ar, offset); 80 } 81 82 static inline void ath10k_ce_write32(struct ath10k *ar, u32 offset, u32 value) 83 { 84 struct ath10k_ce *ce = ath10k_ce_priv(ar); 85 86 ce->bus_ops->write32(ar, offset, value); 87 } 88 89 static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar, 90 u32 ce_ctrl_addr, 91 unsigned int n) 92 { 93 ath10k_ce_write32(ar, ce_ctrl_addr + 94 ar->hw_ce_regs->dst_wr_index_addr, n); 95 } 96 97 static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar, 98 u32 ce_ctrl_addr) 99 { 100 return ath10k_ce_read32(ar, ce_ctrl_addr + 101 ar->hw_ce_regs->dst_wr_index_addr); 102 } 103 104 static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar, 105 u32 ce_ctrl_addr, 106 unsigned int n) 107 { 108 ath10k_ce_write32(ar, ce_ctrl_addr + 109 ar->hw_ce_regs->sr_wr_index_addr, n); 110 } 111 112 static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar, 113 u32 ce_ctrl_addr) 114 { 115 return ath10k_ce_read32(ar, ce_ctrl_addr + 116 ar->hw_ce_regs->sr_wr_index_addr); 117 } 118 119 static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar, 120 u32 ce_ctrl_addr) 121 { 122 return ath10k_ce_read32(ar, ce_ctrl_addr + 123 ar->hw_ce_regs->current_srri_addr); 124 } 125 126 static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar, 127 u32 ce_ctrl_addr, 128 unsigned int addr) 129 { 130 ath10k_ce_write32(ar, ce_ctrl_addr + 131 ar->hw_ce_regs->sr_base_addr, addr); 132 } 133 134 static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar, 135 u32 ce_ctrl_addr, 136 unsigned int n) 137 { 138 ath10k_ce_write32(ar, ce_ctrl_addr + 139 ar->hw_ce_regs->sr_size_addr, n); 140 } 141 142 static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar, 143 u32 ce_ctrl_addr, 144 unsigned int n) 145 { 146 struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs; 147 148 u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr + 149 ctrl_regs->addr); 150 151 ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr, 152 (ctrl1_addr & ~(ctrl_regs->dmax->mask)) | 153 ath10k_set_ring_byte(n, ctrl_regs->dmax)); 154 } 155 156 static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar, 157 u32 ce_ctrl_addr, 158 unsigned int n) 159 { 160 struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs; 161 162 u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr + 163 ctrl_regs->addr); 164 165 ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr, 166 (ctrl1_addr & ~(ctrl_regs->src_ring->mask)) | 167 ath10k_set_ring_byte(n, ctrl_regs->src_ring)); 168 } 169 170 static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar, 171 u32 ce_ctrl_addr, 172 unsigned int n) 173 { 174 struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs; 175 176 u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr + 177 ctrl_regs->addr); 178 179 ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr, 180 (ctrl1_addr & ~(ctrl_regs->dst_ring->mask)) | 181 ath10k_set_ring_byte(n, ctrl_regs->dst_ring)); 182 } 183 184 static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar, 185 u32 ce_ctrl_addr) 186 { 187 return ath10k_ce_read32(ar, ce_ctrl_addr + 188 ar->hw_ce_regs->current_drri_addr); 189 } 190 191 static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar, 192 u32 ce_ctrl_addr, 193 u32 addr) 194 { 195 ath10k_ce_write32(ar, ce_ctrl_addr + 196 ar->hw_ce_regs->dr_base_addr, addr); 197 } 198 199 static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar, 200 u32 ce_ctrl_addr, 201 unsigned int n) 202 { 203 ath10k_ce_write32(ar, ce_ctrl_addr + 204 ar->hw_ce_regs->dr_size_addr, n); 205 } 206 207 static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar, 208 u32 ce_ctrl_addr, 209 unsigned int n) 210 { 211 struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr; 212 u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr); 213 214 ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr, 215 (addr & ~(srcr_wm->wm_high->mask)) | 216 (ath10k_set_ring_byte(n, srcr_wm->wm_high))); 217 } 218 219 static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar, 220 u32 ce_ctrl_addr, 221 unsigned int n) 222 { 223 struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr; 224 u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr); 225 226 ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr, 227 (addr & ~(srcr_wm->wm_low->mask)) | 228 (ath10k_set_ring_byte(n, srcr_wm->wm_low))); 229 } 230 231 static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar, 232 u32 ce_ctrl_addr, 233 unsigned int n) 234 { 235 struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr; 236 u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr); 237 238 ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr, 239 (addr & ~(dstr_wm->wm_high->mask)) | 240 (ath10k_set_ring_byte(n, dstr_wm->wm_high))); 241 } 242 243 static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar, 244 u32 ce_ctrl_addr, 245 unsigned int n) 246 { 247 struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr; 248 u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr); 249 250 ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr, 251 (addr & ~(dstr_wm->wm_low->mask)) | 252 (ath10k_set_ring_byte(n, dstr_wm->wm_low))); 253 } 254 255 static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar, 256 u32 ce_ctrl_addr) 257 { 258 struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie; 259 260 u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr + 261 ar->hw_ce_regs->host_ie_addr); 262 263 ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr, 264 host_ie_addr | host_ie->copy_complete->mask); 265 } 266 267 static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar, 268 u32 ce_ctrl_addr) 269 { 270 struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie; 271 272 u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr + 273 ar->hw_ce_regs->host_ie_addr); 274 275 ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr, 276 host_ie_addr & ~(host_ie->copy_complete->mask)); 277 } 278 279 static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar, 280 u32 ce_ctrl_addr) 281 { 282 struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs; 283 284 u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr + 285 ar->hw_ce_regs->host_ie_addr); 286 287 ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr, 288 host_ie_addr & ~(wm_regs->wm_mask)); 289 } 290 291 static inline void ath10k_ce_error_intr_enable(struct ath10k *ar, 292 u32 ce_ctrl_addr) 293 { 294 struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs; 295 296 u32 misc_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr + 297 ar->hw_ce_regs->misc_ie_addr); 298 299 ath10k_ce_write32(ar, 300 ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr, 301 misc_ie_addr | misc_regs->err_mask); 302 } 303 304 static inline void ath10k_ce_error_intr_disable(struct ath10k *ar, 305 u32 ce_ctrl_addr) 306 { 307 struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs; 308 309 u32 misc_ie_addr = ath10k_ce_read32(ar, 310 ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr); 311 312 ath10k_ce_write32(ar, 313 ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr, 314 misc_ie_addr & ~(misc_regs->err_mask)); 315 } 316 317 static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar, 318 u32 ce_ctrl_addr, 319 unsigned int mask) 320 { 321 struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs; 322 323 ath10k_ce_write32(ar, ce_ctrl_addr + wm_regs->addr, mask); 324 } 325 326 /* 327 * Guts of ath10k_ce_send. 328 * The caller takes responsibility for any needed locking. 329 */ 330 int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, 331 void *per_transfer_context, 332 u32 buffer, 333 unsigned int nbytes, 334 unsigned int transfer_id, 335 unsigned int flags) 336 { 337 struct ath10k *ar = ce_state->ar; 338 struct ath10k_ce_ring *src_ring = ce_state->src_ring; 339 struct ce_desc *desc, sdesc; 340 unsigned int nentries_mask = src_ring->nentries_mask; 341 unsigned int sw_index = src_ring->sw_index; 342 unsigned int write_index = src_ring->write_index; 343 u32 ctrl_addr = ce_state->ctrl_addr; 344 u32 desc_flags = 0; 345 int ret = 0; 346 347 if (nbytes > ce_state->src_sz_max) 348 ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n", 349 __func__, nbytes, ce_state->src_sz_max); 350 351 if (unlikely(CE_RING_DELTA(nentries_mask, 352 write_index, sw_index - 1) <= 0)) { 353 ret = -ENOSR; 354 goto exit; 355 } 356 357 desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space, 358 write_index); 359 360 desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA); 361 362 if (flags & CE_SEND_FLAG_GATHER) 363 desc_flags |= CE_DESC_FLAGS_GATHER; 364 if (flags & CE_SEND_FLAG_BYTE_SWAP) 365 desc_flags |= CE_DESC_FLAGS_BYTE_SWAP; 366 367 sdesc.addr = __cpu_to_le32(buffer); 368 sdesc.nbytes = __cpu_to_le16(nbytes); 369 sdesc.flags = __cpu_to_le16(desc_flags); 370 371 *desc = sdesc; 372 373 src_ring->per_transfer_context[write_index] = per_transfer_context; 374 375 /* Update Source Ring Write Index */ 376 write_index = CE_RING_IDX_INCR(nentries_mask, write_index); 377 378 /* WORKAROUND */ 379 if (!(flags & CE_SEND_FLAG_GATHER)) 380 ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index); 381 382 src_ring->write_index = write_index; 383 exit: 384 return ret; 385 } 386 387 void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe) 388 { 389 struct ath10k *ar = pipe->ar; 390 struct ath10k_ce *ce = ath10k_ce_priv(ar); 391 struct ath10k_ce_ring *src_ring = pipe->src_ring; 392 u32 ctrl_addr = pipe->ctrl_addr; 393 394 lockdep_assert_held(&ce->ce_lock); 395 396 /* 397 * This function must be called only if there is an incomplete 398 * scatter-gather transfer (before index register is updated) 399 * that needs to be cleaned up. 400 */ 401 if (WARN_ON_ONCE(src_ring->write_index == src_ring->sw_index)) 402 return; 403 404 if (WARN_ON_ONCE(src_ring->write_index == 405 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr))) 406 return; 407 408 src_ring->write_index--; 409 src_ring->write_index &= src_ring->nentries_mask; 410 411 src_ring->per_transfer_context[src_ring->write_index] = NULL; 412 } 413 414 int ath10k_ce_send(struct ath10k_ce_pipe *ce_state, 415 void *per_transfer_context, 416 u32 buffer, 417 unsigned int nbytes, 418 unsigned int transfer_id, 419 unsigned int flags) 420 { 421 struct ath10k *ar = ce_state->ar; 422 struct ath10k_ce *ce = ath10k_ce_priv(ar); 423 int ret; 424 425 spin_lock_bh(&ce->ce_lock); 426 ret = ath10k_ce_send_nolock(ce_state, per_transfer_context, 427 buffer, nbytes, transfer_id, flags); 428 spin_unlock_bh(&ce->ce_lock); 429 430 return ret; 431 } 432 433 int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe) 434 { 435 struct ath10k *ar = pipe->ar; 436 struct ath10k_ce *ce = ath10k_ce_priv(ar); 437 int delta; 438 439 spin_lock_bh(&ce->ce_lock); 440 delta = CE_RING_DELTA(pipe->src_ring->nentries_mask, 441 pipe->src_ring->write_index, 442 pipe->src_ring->sw_index - 1); 443 spin_unlock_bh(&ce->ce_lock); 444 445 return delta; 446 } 447 448 int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe) 449 { 450 struct ath10k *ar = pipe->ar; 451 struct ath10k_ce *ce = ath10k_ce_priv(ar); 452 struct ath10k_ce_ring *dest_ring = pipe->dest_ring; 453 unsigned int nentries_mask = dest_ring->nentries_mask; 454 unsigned int write_index = dest_ring->write_index; 455 unsigned int sw_index = dest_ring->sw_index; 456 457 lockdep_assert_held(&ce->ce_lock); 458 459 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1); 460 } 461 462 int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr) 463 { 464 struct ath10k *ar = pipe->ar; 465 struct ath10k_ce *ce = ath10k_ce_priv(ar); 466 struct ath10k_ce_ring *dest_ring = pipe->dest_ring; 467 unsigned int nentries_mask = dest_ring->nentries_mask; 468 unsigned int write_index = dest_ring->write_index; 469 unsigned int sw_index = dest_ring->sw_index; 470 struct ce_desc *base = dest_ring->base_addr_owner_space; 471 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index); 472 u32 ctrl_addr = pipe->ctrl_addr; 473 474 lockdep_assert_held(&ce->ce_lock); 475 476 if ((pipe->id != 5) && 477 CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0) 478 return -ENOSPC; 479 480 desc->addr = __cpu_to_le32(paddr); 481 desc->nbytes = 0; 482 483 dest_ring->per_transfer_context[write_index] = ctx; 484 write_index = CE_RING_IDX_INCR(nentries_mask, write_index); 485 ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index); 486 dest_ring->write_index = write_index; 487 488 return 0; 489 } 490 491 void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries) 492 { 493 struct ath10k *ar = pipe->ar; 494 struct ath10k_ce_ring *dest_ring = pipe->dest_ring; 495 unsigned int nentries_mask = dest_ring->nentries_mask; 496 unsigned int write_index = dest_ring->write_index; 497 u32 ctrl_addr = pipe->ctrl_addr; 498 u32 cur_write_idx = ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr); 499 500 /* Prevent CE ring stuck issue that will occur when ring is full. 501 * Make sure that write index is 1 less than read index. 502 */ 503 if ((cur_write_idx + nentries) == dest_ring->sw_index) 504 nentries -= 1; 505 506 write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries); 507 ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index); 508 dest_ring->write_index = write_index; 509 } 510 511 int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr) 512 { 513 struct ath10k *ar = pipe->ar; 514 struct ath10k_ce *ce = ath10k_ce_priv(ar); 515 int ret; 516 517 spin_lock_bh(&ce->ce_lock); 518 ret = __ath10k_ce_rx_post_buf(pipe, ctx, paddr); 519 spin_unlock_bh(&ce->ce_lock); 520 521 return ret; 522 } 523 524 /* 525 * Guts of ath10k_ce_completed_recv_next. 526 * The caller takes responsibility for any necessary locking. 527 */ 528 int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state, 529 void **per_transfer_contextp, 530 unsigned int *nbytesp) 531 { 532 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; 533 unsigned int nentries_mask = dest_ring->nentries_mask; 534 unsigned int sw_index = dest_ring->sw_index; 535 536 struct ce_desc *base = dest_ring->base_addr_owner_space; 537 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index); 538 struct ce_desc sdesc; 539 u16 nbytes; 540 541 /* Copy in one go for performance reasons */ 542 sdesc = *desc; 543 544 nbytes = __le16_to_cpu(sdesc.nbytes); 545 if (nbytes == 0) { 546 /* 547 * This closes a relatively unusual race where the Host 548 * sees the updated DRRI before the update to the 549 * corresponding descriptor has completed. We treat this 550 * as a descriptor that is not yet done. 551 */ 552 return -EIO; 553 } 554 555 desc->nbytes = 0; 556 557 /* Return data from completed destination descriptor */ 558 *nbytesp = nbytes; 559 560 if (per_transfer_contextp) 561 *per_transfer_contextp = 562 dest_ring->per_transfer_context[sw_index]; 563 564 /* Copy engine 5 (HTT Rx) will reuse the same transfer context. 565 * So update transfer context all CEs except CE5. 566 */ 567 if (ce_state->id != 5) 568 dest_ring->per_transfer_context[sw_index] = NULL; 569 570 /* Update sw_index */ 571 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); 572 dest_ring->sw_index = sw_index; 573 574 return 0; 575 } 576 577 int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state, 578 void **per_transfer_contextp, 579 unsigned int *nbytesp) 580 { 581 struct ath10k *ar = ce_state->ar; 582 struct ath10k_ce *ce = ath10k_ce_priv(ar); 583 int ret; 584 585 spin_lock_bh(&ce->ce_lock); 586 ret = ath10k_ce_completed_recv_next_nolock(ce_state, 587 per_transfer_contextp, 588 nbytesp); 589 spin_unlock_bh(&ce->ce_lock); 590 591 return ret; 592 } 593 594 int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, 595 void **per_transfer_contextp, 596 u32 *bufferp) 597 { 598 struct ath10k_ce_ring *dest_ring; 599 unsigned int nentries_mask; 600 unsigned int sw_index; 601 unsigned int write_index; 602 int ret; 603 struct ath10k *ar; 604 struct ath10k_ce *ce; 605 606 dest_ring = ce_state->dest_ring; 607 608 if (!dest_ring) 609 return -EIO; 610 611 ar = ce_state->ar; 612 ce = ath10k_ce_priv(ar); 613 614 spin_lock_bh(&ce->ce_lock); 615 616 nentries_mask = dest_ring->nentries_mask; 617 sw_index = dest_ring->sw_index; 618 write_index = dest_ring->write_index; 619 if (write_index != sw_index) { 620 struct ce_desc *base = dest_ring->base_addr_owner_space; 621 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index); 622 623 /* Return data from completed destination descriptor */ 624 *bufferp = __le32_to_cpu(desc->addr); 625 626 if (per_transfer_contextp) 627 *per_transfer_contextp = 628 dest_ring->per_transfer_context[sw_index]; 629 630 /* sanity */ 631 dest_ring->per_transfer_context[sw_index] = NULL; 632 desc->nbytes = 0; 633 634 /* Update sw_index */ 635 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); 636 dest_ring->sw_index = sw_index; 637 ret = 0; 638 } else { 639 ret = -EIO; 640 } 641 642 spin_unlock_bh(&ce->ce_lock); 643 644 return ret; 645 } 646 647 /* 648 * Guts of ath10k_ce_completed_send_next. 649 * The caller takes responsibility for any necessary locking. 650 */ 651 int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, 652 void **per_transfer_contextp) 653 { 654 struct ath10k_ce_ring *src_ring = ce_state->src_ring; 655 u32 ctrl_addr = ce_state->ctrl_addr; 656 struct ath10k *ar = ce_state->ar; 657 unsigned int nentries_mask = src_ring->nentries_mask; 658 unsigned int sw_index = src_ring->sw_index; 659 unsigned int read_index; 660 struct ce_desc *desc; 661 662 if (src_ring->hw_index == sw_index) { 663 /* 664 * The SW completion index has caught up with the cached 665 * version of the HW completion index. 666 * Update the cached HW completion index to see whether 667 * the SW has really caught up to the HW, or if the cached 668 * value of the HW index has become stale. 669 */ 670 671 read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); 672 if (read_index == 0xffffffff) 673 return -ENODEV; 674 675 read_index &= nentries_mask; 676 src_ring->hw_index = read_index; 677 } 678 679 read_index = src_ring->hw_index; 680 681 if (read_index == sw_index) 682 return -EIO; 683 684 if (per_transfer_contextp) 685 *per_transfer_contextp = 686 src_ring->per_transfer_context[sw_index]; 687 688 /* sanity */ 689 src_ring->per_transfer_context[sw_index] = NULL; 690 desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space, 691 sw_index); 692 desc->nbytes = 0; 693 694 /* Update sw_index */ 695 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); 696 src_ring->sw_index = sw_index; 697 698 return 0; 699 } 700 701 /* NB: Modeled after ath10k_ce_completed_send_next */ 702 int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state, 703 void **per_transfer_contextp, 704 u32 *bufferp, 705 unsigned int *nbytesp, 706 unsigned int *transfer_idp) 707 { 708 struct ath10k_ce_ring *src_ring; 709 unsigned int nentries_mask; 710 unsigned int sw_index; 711 unsigned int write_index; 712 int ret; 713 struct ath10k *ar; 714 struct ath10k_ce *ce; 715 716 src_ring = ce_state->src_ring; 717 718 if (!src_ring) 719 return -EIO; 720 721 ar = ce_state->ar; 722 ce = ath10k_ce_priv(ar); 723 724 spin_lock_bh(&ce->ce_lock); 725 726 nentries_mask = src_ring->nentries_mask; 727 sw_index = src_ring->sw_index; 728 write_index = src_ring->write_index; 729 730 if (write_index != sw_index) { 731 struct ce_desc *base = src_ring->base_addr_owner_space; 732 struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index); 733 734 /* Return data from completed source descriptor */ 735 *bufferp = __le32_to_cpu(desc->addr); 736 *nbytesp = __le16_to_cpu(desc->nbytes); 737 *transfer_idp = MS(__le16_to_cpu(desc->flags), 738 CE_DESC_FLAGS_META_DATA); 739 740 if (per_transfer_contextp) 741 *per_transfer_contextp = 742 src_ring->per_transfer_context[sw_index]; 743 744 /* sanity */ 745 src_ring->per_transfer_context[sw_index] = NULL; 746 747 /* Update sw_index */ 748 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); 749 src_ring->sw_index = sw_index; 750 ret = 0; 751 } else { 752 ret = -EIO; 753 } 754 755 spin_unlock_bh(&ce->ce_lock); 756 757 return ret; 758 } 759 760 int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state, 761 void **per_transfer_contextp) 762 { 763 struct ath10k *ar = ce_state->ar; 764 struct ath10k_ce *ce = ath10k_ce_priv(ar); 765 int ret; 766 767 spin_lock_bh(&ce->ce_lock); 768 ret = ath10k_ce_completed_send_next_nolock(ce_state, 769 per_transfer_contextp); 770 spin_unlock_bh(&ce->ce_lock); 771 772 return ret; 773 } 774 775 /* 776 * Guts of interrupt handler for per-engine interrupts on a particular CE. 777 * 778 * Invokes registered callbacks for recv_complete, 779 * send_complete, and watermarks. 780 */ 781 void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id) 782 { 783 struct ath10k_ce *ce = ath10k_ce_priv(ar); 784 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; 785 struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs; 786 u32 ctrl_addr = ce_state->ctrl_addr; 787 788 spin_lock_bh(&ce->ce_lock); 789 790 /* Clear the copy-complete interrupts that will be handled here. */ 791 ath10k_ce_engine_int_status_clear(ar, ctrl_addr, 792 wm_regs->cc_mask); 793 794 spin_unlock_bh(&ce->ce_lock); 795 796 if (ce_state->recv_cb) 797 ce_state->recv_cb(ce_state); 798 799 if (ce_state->send_cb) 800 ce_state->send_cb(ce_state); 801 802 spin_lock_bh(&ce->ce_lock); 803 804 /* 805 * Misc CE interrupts are not being handled, but still need 806 * to be cleared. 807 */ 808 ath10k_ce_engine_int_status_clear(ar, ctrl_addr, wm_regs->wm_mask); 809 810 spin_unlock_bh(&ce->ce_lock); 811 } 812 813 /* 814 * Handler for per-engine interrupts on ALL active CEs. 815 * This is used in cases where the system is sharing a 816 * single interrput for all CEs 817 */ 818 819 void ath10k_ce_per_engine_service_any(struct ath10k *ar) 820 { 821 int ce_id; 822 u32 intr_summary; 823 824 intr_summary = ath10k_ce_interrupt_summary(ar); 825 826 for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) { 827 if (intr_summary & (1 << ce_id)) 828 intr_summary &= ~(1 << ce_id); 829 else 830 /* no intr pending on this CE */ 831 continue; 832 833 ath10k_ce_per_engine_service(ar, ce_id); 834 } 835 } 836 837 /* 838 * Adjust interrupts for the copy complete handler. 839 * If it's needed for either send or recv, then unmask 840 * this interrupt; otherwise, mask it. 841 * 842 * Called with ce_lock held. 843 */ 844 static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state) 845 { 846 u32 ctrl_addr = ce_state->ctrl_addr; 847 struct ath10k *ar = ce_state->ar; 848 bool disable_copy_compl_intr = ce_state->attr_flags & CE_ATTR_DIS_INTR; 849 850 if ((!disable_copy_compl_intr) && 851 (ce_state->send_cb || ce_state->recv_cb)) 852 ath10k_ce_copy_complete_inter_enable(ar, ctrl_addr); 853 else 854 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr); 855 856 ath10k_ce_watermark_intr_disable(ar, ctrl_addr); 857 } 858 859 int ath10k_ce_disable_interrupts(struct ath10k *ar) 860 { 861 int ce_id; 862 863 for (ce_id = 0; ce_id < CE_COUNT; ce_id++) { 864 u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id); 865 866 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr); 867 ath10k_ce_error_intr_disable(ar, ctrl_addr); 868 ath10k_ce_watermark_intr_disable(ar, ctrl_addr); 869 } 870 871 return 0; 872 } 873 874 void ath10k_ce_enable_interrupts(struct ath10k *ar) 875 { 876 struct ath10k_ce *ce = ath10k_ce_priv(ar); 877 int ce_id; 878 struct ath10k_ce_pipe *ce_state; 879 880 /* Skip the last copy engine, CE7 the diagnostic window, as that 881 * uses polling and isn't initialized for interrupts. 882 */ 883 for (ce_id = 0; ce_id < CE_COUNT - 1; ce_id++) { 884 ce_state = &ce->ce_states[ce_id]; 885 ath10k_ce_per_engine_handler_adjust(ce_state); 886 } 887 } 888 889 static int ath10k_ce_init_src_ring(struct ath10k *ar, 890 unsigned int ce_id, 891 const struct ce_attr *attr) 892 { 893 struct ath10k_ce *ce = ath10k_ce_priv(ar); 894 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; 895 struct ath10k_ce_ring *src_ring = ce_state->src_ring; 896 u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id); 897 898 nentries = roundup_pow_of_two(attr->src_nentries); 899 900 memset(src_ring->base_addr_owner_space, 0, 901 nentries * sizeof(struct ce_desc)); 902 903 src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); 904 src_ring->sw_index &= src_ring->nentries_mask; 905 src_ring->hw_index = src_ring->sw_index; 906 907 src_ring->write_index = 908 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr); 909 src_ring->write_index &= src_ring->nentries_mask; 910 911 ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 912 src_ring->base_addr_ce_space); 913 ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries); 914 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max); 915 ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0); 916 ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0); 917 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries); 918 919 ath10k_dbg(ar, ATH10K_DBG_BOOT, 920 "boot init ce src ring id %d entries %d base_addr %pK\n", 921 ce_id, nentries, src_ring->base_addr_owner_space); 922 923 return 0; 924 } 925 926 static int ath10k_ce_init_dest_ring(struct ath10k *ar, 927 unsigned int ce_id, 928 const struct ce_attr *attr) 929 { 930 struct ath10k_ce *ce = ath10k_ce_priv(ar); 931 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; 932 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; 933 u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id); 934 935 nentries = roundup_pow_of_two(attr->dest_nentries); 936 937 memset(dest_ring->base_addr_owner_space, 0, 938 nentries * sizeof(struct ce_desc)); 939 940 dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr); 941 dest_ring->sw_index &= dest_ring->nentries_mask; 942 dest_ring->write_index = 943 ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr); 944 dest_ring->write_index &= dest_ring->nentries_mask; 945 946 ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 947 dest_ring->base_addr_ce_space); 948 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries); 949 ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0); 950 ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0); 951 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries); 952 953 ath10k_dbg(ar, ATH10K_DBG_BOOT, 954 "boot ce dest ring id %d entries %d base_addr %pK\n", 955 ce_id, nentries, dest_ring->base_addr_owner_space); 956 957 return 0; 958 } 959 960 static struct ath10k_ce_ring * 961 ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id, 962 const struct ce_attr *attr) 963 { 964 struct ath10k_ce_ring *src_ring; 965 u32 nentries = attr->src_nentries; 966 dma_addr_t base_addr; 967 968 nentries = roundup_pow_of_two(nentries); 969 970 src_ring = kzalloc(sizeof(*src_ring) + 971 (nentries * 972 sizeof(*src_ring->per_transfer_context)), 973 GFP_KERNEL); 974 if (src_ring == NULL) 975 return ERR_PTR(-ENOMEM); 976 977 src_ring->nentries = nentries; 978 src_ring->nentries_mask = nentries - 1; 979 980 /* 981 * Legacy platforms that do not support cache 982 * coherent DMA are unsupported 983 */ 984 src_ring->base_addr_owner_space_unaligned = 985 dma_alloc_coherent(ar->dev, 986 (nentries * sizeof(struct ce_desc) + 987 CE_DESC_RING_ALIGN), 988 &base_addr, GFP_KERNEL); 989 if (!src_ring->base_addr_owner_space_unaligned) { 990 kfree(src_ring); 991 return ERR_PTR(-ENOMEM); 992 } 993 994 src_ring->base_addr_ce_space_unaligned = base_addr; 995 996 src_ring->base_addr_owner_space = PTR_ALIGN( 997 src_ring->base_addr_owner_space_unaligned, 998 CE_DESC_RING_ALIGN); 999 src_ring->base_addr_ce_space = ALIGN( 1000 src_ring->base_addr_ce_space_unaligned, 1001 CE_DESC_RING_ALIGN); 1002 1003 return src_ring; 1004 } 1005 1006 static struct ath10k_ce_ring * 1007 ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id, 1008 const struct ce_attr *attr) 1009 { 1010 struct ath10k_ce_ring *dest_ring; 1011 u32 nentries; 1012 dma_addr_t base_addr; 1013 1014 nentries = roundup_pow_of_two(attr->dest_nentries); 1015 1016 dest_ring = kzalloc(sizeof(*dest_ring) + 1017 (nentries * 1018 sizeof(*dest_ring->per_transfer_context)), 1019 GFP_KERNEL); 1020 if (dest_ring == NULL) 1021 return ERR_PTR(-ENOMEM); 1022 1023 dest_ring->nentries = nentries; 1024 dest_ring->nentries_mask = nentries - 1; 1025 1026 /* 1027 * Legacy platforms that do not support cache 1028 * coherent DMA are unsupported 1029 */ 1030 dest_ring->base_addr_owner_space_unaligned = 1031 dma_zalloc_coherent(ar->dev, 1032 (nentries * sizeof(struct ce_desc) + 1033 CE_DESC_RING_ALIGN), 1034 &base_addr, GFP_KERNEL); 1035 if (!dest_ring->base_addr_owner_space_unaligned) { 1036 kfree(dest_ring); 1037 return ERR_PTR(-ENOMEM); 1038 } 1039 1040 dest_ring->base_addr_ce_space_unaligned = base_addr; 1041 1042 dest_ring->base_addr_owner_space = PTR_ALIGN( 1043 dest_ring->base_addr_owner_space_unaligned, 1044 CE_DESC_RING_ALIGN); 1045 dest_ring->base_addr_ce_space = ALIGN( 1046 dest_ring->base_addr_ce_space_unaligned, 1047 CE_DESC_RING_ALIGN); 1048 1049 return dest_ring; 1050 } 1051 1052 /* 1053 * Initialize a Copy Engine based on caller-supplied attributes. 1054 * This may be called once to initialize both source and destination 1055 * rings or it may be called twice for separate source and destination 1056 * initialization. It may be that only one side or the other is 1057 * initialized by software/firmware. 1058 */ 1059 int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id, 1060 const struct ce_attr *attr) 1061 { 1062 int ret; 1063 1064 if (attr->src_nentries) { 1065 ret = ath10k_ce_init_src_ring(ar, ce_id, attr); 1066 if (ret) { 1067 ath10k_err(ar, "Failed to initialize CE src ring for ID: %d (%d)\n", 1068 ce_id, ret); 1069 return ret; 1070 } 1071 } 1072 1073 if (attr->dest_nentries) { 1074 ret = ath10k_ce_init_dest_ring(ar, ce_id, attr); 1075 if (ret) { 1076 ath10k_err(ar, "Failed to initialize CE dest ring for ID: %d (%d)\n", 1077 ce_id, ret); 1078 return ret; 1079 } 1080 } 1081 1082 return 0; 1083 } 1084 1085 static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id) 1086 { 1087 u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id); 1088 1089 ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0); 1090 ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0); 1091 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0); 1092 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0); 1093 } 1094 1095 static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id) 1096 { 1097 u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id); 1098 1099 ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0); 1100 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0); 1101 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0); 1102 } 1103 1104 void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id) 1105 { 1106 ath10k_ce_deinit_src_ring(ar, ce_id); 1107 ath10k_ce_deinit_dest_ring(ar, ce_id); 1108 } 1109 1110 int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, 1111 const struct ce_attr *attr) 1112 { 1113 struct ath10k_ce *ce = ath10k_ce_priv(ar); 1114 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; 1115 int ret; 1116 1117 /* 1118 * Make sure there's enough CE ringbuffer entries for HTT TX to avoid 1119 * additional TX locking checks. 1120 * 1121 * For the lack of a better place do the check here. 1122 */ 1123 BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC > 1124 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); 1125 BUILD_BUG_ON(2 * TARGET_10_4_NUM_MSDU_DESC_PFC > 1126 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); 1127 BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC > 1128 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); 1129 1130 ce_state->ar = ar; 1131 ce_state->id = ce_id; 1132 ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id); 1133 ce_state->attr_flags = attr->flags; 1134 ce_state->src_sz_max = attr->src_sz_max; 1135 1136 if (attr->src_nentries) 1137 ce_state->send_cb = attr->send_cb; 1138 1139 if (attr->dest_nentries) 1140 ce_state->recv_cb = attr->recv_cb; 1141 1142 if (attr->src_nentries) { 1143 ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr); 1144 if (IS_ERR(ce_state->src_ring)) { 1145 ret = PTR_ERR(ce_state->src_ring); 1146 ath10k_err(ar, "failed to allocate copy engine source ring %d: %d\n", 1147 ce_id, ret); 1148 ce_state->src_ring = NULL; 1149 return ret; 1150 } 1151 } 1152 1153 if (attr->dest_nentries) { 1154 ce_state->dest_ring = ath10k_ce_alloc_dest_ring(ar, ce_id, 1155 attr); 1156 if (IS_ERR(ce_state->dest_ring)) { 1157 ret = PTR_ERR(ce_state->dest_ring); 1158 ath10k_err(ar, "failed to allocate copy engine destination ring %d: %d\n", 1159 ce_id, ret); 1160 ce_state->dest_ring = NULL; 1161 return ret; 1162 } 1163 } 1164 1165 return 0; 1166 } 1167 1168 void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id) 1169 { 1170 struct ath10k_ce *ce = ath10k_ce_priv(ar); 1171 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; 1172 1173 if (ce_state->src_ring) { 1174 dma_free_coherent(ar->dev, 1175 (ce_state->src_ring->nentries * 1176 sizeof(struct ce_desc) + 1177 CE_DESC_RING_ALIGN), 1178 ce_state->src_ring->base_addr_owner_space, 1179 ce_state->src_ring->base_addr_ce_space); 1180 kfree(ce_state->src_ring); 1181 } 1182 1183 if (ce_state->dest_ring) { 1184 dma_free_coherent(ar->dev, 1185 (ce_state->dest_ring->nentries * 1186 sizeof(struct ce_desc) + 1187 CE_DESC_RING_ALIGN), 1188 ce_state->dest_ring->base_addr_owner_space, 1189 ce_state->dest_ring->base_addr_ce_space); 1190 kfree(ce_state->dest_ring); 1191 } 1192 1193 ce_state->src_ring = NULL; 1194 ce_state->dest_ring = NULL; 1195 } 1196 1197 void ath10k_ce_dump_registers(struct ath10k *ar, 1198 struct ath10k_fw_crash_data *crash_data) 1199 { 1200 struct ath10k_ce *ce = ath10k_ce_priv(ar); 1201 struct ath10k_ce_crash_data ce_data; 1202 u32 addr, id; 1203 1204 lockdep_assert_held(&ar->data_lock); 1205 1206 ath10k_err(ar, "Copy Engine register dump:\n"); 1207 1208 spin_lock_bh(&ce->ce_lock); 1209 for (id = 0; id < CE_COUNT; id++) { 1210 addr = ath10k_ce_base_address(ar, id); 1211 ce_data.base_addr = cpu_to_le32(addr); 1212 1213 ce_data.src_wr_idx = 1214 cpu_to_le32(ath10k_ce_src_ring_write_index_get(ar, addr)); 1215 ce_data.src_r_idx = 1216 cpu_to_le32(ath10k_ce_src_ring_read_index_get(ar, addr)); 1217 ce_data.dst_wr_idx = 1218 cpu_to_le32(ath10k_ce_dest_ring_write_index_get(ar, addr)); 1219 ce_data.dst_r_idx = 1220 cpu_to_le32(ath10k_ce_dest_ring_read_index_get(ar, addr)); 1221 1222 if (crash_data) 1223 crash_data->ce_crash_data[id] = ce_data; 1224 1225 ath10k_err(ar, "[%02d]: 0x%08x %3u %3u %3u %3u", id, 1226 le32_to_cpu(ce_data.base_addr), 1227 le32_to_cpu(ce_data.src_wr_idx), 1228 le32_to_cpu(ce_data.src_r_idx), 1229 le32_to_cpu(ce_data.dst_wr_idx), 1230 le32_to_cpu(ce_data.dst_r_idx)); 1231 } 1232 1233 spin_unlock_bh(&ce->ce_lock); 1234 } 1235