1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <linux/etherdevice.h> 19 #include "htt.h" 20 #include "mac.h" 21 #include "hif.h" 22 #include "txrx.h" 23 #include "debug.h" 24 25 static u8 ath10k_htt_tx_txq_calc_size(size_t count) 26 { 27 int exp; 28 int factor; 29 30 exp = 0; 31 factor = count >> 7; 32 33 while (factor >= 64 && exp < 4) { 34 factor >>= 3; 35 exp++; 36 } 37 38 if (exp == 4) 39 return 0xff; 40 41 if (count > 0) 42 factor = max(1, factor); 43 44 return SM(exp, HTT_TX_Q_STATE_ENTRY_EXP) | 45 SM(factor, HTT_TX_Q_STATE_ENTRY_FACTOR); 46 } 47 48 static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw, 49 struct ieee80211_txq *txq) 50 { 51 struct ath10k *ar = hw->priv; 52 struct ath10k_sta *arsta; 53 struct ath10k_vif *arvif = (void *)txq->vif->drv_priv; 54 unsigned long frame_cnt; 55 unsigned long byte_cnt; 56 int idx; 57 u32 bit; 58 u16 peer_id; 59 u8 tid; 60 u8 count; 61 62 lockdep_assert_held(&ar->htt.tx_lock); 63 64 if (!ar->htt.tx_q_state.enabled) 65 return; 66 67 if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL) 68 return; 69 70 if (txq->sta) { 71 arsta = (void *)txq->sta->drv_priv; 72 peer_id = arsta->peer_id; 73 } else { 74 peer_id = arvif->peer_id; 75 } 76 77 tid = txq->tid; 78 bit = BIT(peer_id % 32); 79 idx = peer_id / 32; 80 81 ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt); 82 count = ath10k_htt_tx_txq_calc_size(byte_cnt); 83 84 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) || 85 unlikely(tid >= ar->htt.tx_q_state.num_tids)) { 86 ath10k_warn(ar, "refusing to update txq for peer_id %hu tid %hhu due to out of bounds\n", 87 peer_id, tid); 88 return; 89 } 90 91 ar->htt.tx_q_state.vaddr->count[tid][peer_id] = count; 92 ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit; 93 ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0; 94 95 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update peer_id %hu tid %hhu count %hhu\n", 96 peer_id, tid, count); 97 } 98 99 static void __ath10k_htt_tx_txq_sync(struct ath10k *ar) 100 { 101 u32 seq; 102 size_t size; 103 104 lockdep_assert_held(&ar->htt.tx_lock); 105 106 if (!ar->htt.tx_q_state.enabled) 107 return; 108 109 if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL) 110 return; 111 112 seq = le32_to_cpu(ar->htt.tx_q_state.vaddr->seq); 113 seq++; 114 ar->htt.tx_q_state.vaddr->seq = cpu_to_le32(seq); 115 116 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update commit seq %u\n", 117 seq); 118 119 size = sizeof(*ar->htt.tx_q_state.vaddr); 120 dma_sync_single_for_device(ar->dev, 121 ar->htt.tx_q_state.paddr, 122 size, 123 DMA_TO_DEVICE); 124 } 125 126 void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw, 127 struct ieee80211_txq *txq) 128 { 129 struct ath10k *ar = hw->priv; 130 131 spin_lock_bh(&ar->htt.tx_lock); 132 __ath10k_htt_tx_txq_recalc(hw, txq); 133 spin_unlock_bh(&ar->htt.tx_lock); 134 } 135 136 void ath10k_htt_tx_txq_sync(struct ath10k *ar) 137 { 138 spin_lock_bh(&ar->htt.tx_lock); 139 __ath10k_htt_tx_txq_sync(ar); 140 spin_unlock_bh(&ar->htt.tx_lock); 141 } 142 143 void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw, 144 struct ieee80211_txq *txq) 145 { 146 struct ath10k *ar = hw->priv; 147 148 spin_lock_bh(&ar->htt.tx_lock); 149 __ath10k_htt_tx_txq_recalc(hw, txq); 150 __ath10k_htt_tx_txq_sync(ar); 151 spin_unlock_bh(&ar->htt.tx_lock); 152 } 153 154 void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt) 155 { 156 lockdep_assert_held(&htt->tx_lock); 157 158 htt->num_pending_tx--; 159 if (htt->num_pending_tx == htt->max_num_pending_tx - 1) 160 ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL); 161 } 162 163 int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt) 164 { 165 lockdep_assert_held(&htt->tx_lock); 166 167 if (htt->num_pending_tx >= htt->max_num_pending_tx) 168 return -EBUSY; 169 170 htt->num_pending_tx++; 171 if (htt->num_pending_tx == htt->max_num_pending_tx) 172 ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL); 173 174 return 0; 175 } 176 177 int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt, 178 bool is_presp) 179 { 180 struct ath10k *ar = htt->ar; 181 182 lockdep_assert_held(&htt->tx_lock); 183 184 if (!is_mgmt || !ar->hw_params.max_probe_resp_desc_thres) 185 return 0; 186 187 if (is_presp && 188 ar->hw_params.max_probe_resp_desc_thres < htt->num_pending_mgmt_tx) 189 return -EBUSY; 190 191 htt->num_pending_mgmt_tx++; 192 193 return 0; 194 } 195 196 void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt) 197 { 198 lockdep_assert_held(&htt->tx_lock); 199 200 if (!htt->ar->hw_params.max_probe_resp_desc_thres) 201 return; 202 203 htt->num_pending_mgmt_tx--; 204 } 205 206 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb) 207 { 208 struct ath10k *ar = htt->ar; 209 int ret; 210 211 spin_lock_bh(&htt->tx_lock); 212 ret = idr_alloc(&htt->pending_tx, skb, 0, 213 htt->max_num_pending_tx, GFP_ATOMIC); 214 spin_unlock_bh(&htt->tx_lock); 215 216 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret); 217 218 return ret; 219 } 220 221 void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) 222 { 223 struct ath10k *ar = htt->ar; 224 225 lockdep_assert_held(&htt->tx_lock); 226 227 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id); 228 229 idr_remove(&htt->pending_tx, msdu_id); 230 } 231 232 static void ath10k_htt_tx_free_cont_txbuf_32(struct ath10k_htt *htt) 233 { 234 struct ath10k *ar = htt->ar; 235 size_t size; 236 237 if (!htt->txbuf.vaddr_txbuff_32) 238 return; 239 240 size = htt->txbuf.size; 241 dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_32, 242 htt->txbuf.paddr); 243 htt->txbuf.vaddr_txbuff_32 = NULL; 244 } 245 246 static int ath10k_htt_tx_alloc_cont_txbuf_32(struct ath10k_htt *htt) 247 { 248 struct ath10k *ar = htt->ar; 249 size_t size; 250 251 size = htt->max_num_pending_tx * 252 sizeof(struct ath10k_htt_txbuf_32); 253 254 htt->txbuf.vaddr_txbuff_32 = dma_alloc_coherent(ar->dev, size, 255 &htt->txbuf.paddr, 256 GFP_KERNEL); 257 if (!htt->txbuf.vaddr_txbuff_32) 258 return -ENOMEM; 259 260 htt->txbuf.size = size; 261 262 return 0; 263 } 264 265 static void ath10k_htt_tx_free_cont_txbuf_64(struct ath10k_htt *htt) 266 { 267 struct ath10k *ar = htt->ar; 268 size_t size; 269 270 if (!htt->txbuf.vaddr_txbuff_64) 271 return; 272 273 size = htt->txbuf.size; 274 dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_64, 275 htt->txbuf.paddr); 276 htt->txbuf.vaddr_txbuff_64 = NULL; 277 } 278 279 static int ath10k_htt_tx_alloc_cont_txbuf_64(struct ath10k_htt *htt) 280 { 281 struct ath10k *ar = htt->ar; 282 size_t size; 283 284 size = htt->max_num_pending_tx * 285 sizeof(struct ath10k_htt_txbuf_64); 286 287 htt->txbuf.vaddr_txbuff_64 = dma_alloc_coherent(ar->dev, size, 288 &htt->txbuf.paddr, 289 GFP_KERNEL); 290 if (!htt->txbuf.vaddr_txbuff_64) 291 return -ENOMEM; 292 293 htt->txbuf.size = size; 294 295 return 0; 296 } 297 298 static void ath10k_htt_tx_free_cont_frag_desc_32(struct ath10k_htt *htt) 299 { 300 size_t size; 301 302 if (!htt->frag_desc.vaddr_desc_32) 303 return; 304 305 size = htt->max_num_pending_tx * 306 sizeof(struct htt_msdu_ext_desc); 307 308 dma_free_coherent(htt->ar->dev, 309 size, 310 htt->frag_desc.vaddr_desc_32, 311 htt->frag_desc.paddr); 312 313 htt->frag_desc.vaddr_desc_32 = NULL; 314 } 315 316 static int ath10k_htt_tx_alloc_cont_frag_desc_32(struct ath10k_htt *htt) 317 { 318 struct ath10k *ar = htt->ar; 319 size_t size; 320 321 if (!ar->hw_params.continuous_frag_desc) 322 return 0; 323 324 size = htt->max_num_pending_tx * 325 sizeof(struct htt_msdu_ext_desc); 326 htt->frag_desc.vaddr_desc_32 = dma_alloc_coherent(ar->dev, size, 327 &htt->frag_desc.paddr, 328 GFP_KERNEL); 329 if (!htt->frag_desc.vaddr_desc_32) { 330 ath10k_err(ar, "failed to alloc fragment desc memory\n"); 331 return -ENOMEM; 332 } 333 htt->frag_desc.size = size; 334 335 return 0; 336 } 337 338 static void ath10k_htt_tx_free_cont_frag_desc_64(struct ath10k_htt *htt) 339 { 340 size_t size; 341 342 if (!htt->frag_desc.vaddr_desc_64) 343 return; 344 345 size = htt->max_num_pending_tx * 346 sizeof(struct htt_msdu_ext_desc_64); 347 348 dma_free_coherent(htt->ar->dev, 349 size, 350 htt->frag_desc.vaddr_desc_64, 351 htt->frag_desc.paddr); 352 353 htt->frag_desc.vaddr_desc_64 = NULL; 354 } 355 356 static int ath10k_htt_tx_alloc_cont_frag_desc_64(struct ath10k_htt *htt) 357 { 358 struct ath10k *ar = htt->ar; 359 size_t size; 360 361 if (!ar->hw_params.continuous_frag_desc) 362 return 0; 363 364 size = htt->max_num_pending_tx * 365 sizeof(struct htt_msdu_ext_desc_64); 366 367 htt->frag_desc.vaddr_desc_64 = dma_alloc_coherent(ar->dev, size, 368 &htt->frag_desc.paddr, 369 GFP_KERNEL); 370 if (!htt->frag_desc.vaddr_desc_64) { 371 ath10k_err(ar, "failed to alloc fragment desc memory\n"); 372 return -ENOMEM; 373 } 374 htt->frag_desc.size = size; 375 376 return 0; 377 } 378 379 static void ath10k_htt_tx_free_txq(struct ath10k_htt *htt) 380 { 381 struct ath10k *ar = htt->ar; 382 size_t size; 383 384 if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, 385 ar->running_fw->fw_file.fw_features)) 386 return; 387 388 size = sizeof(*htt->tx_q_state.vaddr); 389 390 dma_unmap_single(ar->dev, htt->tx_q_state.paddr, size, DMA_TO_DEVICE); 391 kfree(htt->tx_q_state.vaddr); 392 } 393 394 static int ath10k_htt_tx_alloc_txq(struct ath10k_htt *htt) 395 { 396 struct ath10k *ar = htt->ar; 397 size_t size; 398 int ret; 399 400 if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, 401 ar->running_fw->fw_file.fw_features)) 402 return 0; 403 404 htt->tx_q_state.num_peers = HTT_TX_Q_STATE_NUM_PEERS; 405 htt->tx_q_state.num_tids = HTT_TX_Q_STATE_NUM_TIDS; 406 htt->tx_q_state.type = HTT_Q_DEPTH_TYPE_BYTES; 407 408 size = sizeof(*htt->tx_q_state.vaddr); 409 htt->tx_q_state.vaddr = kzalloc(size, GFP_KERNEL); 410 if (!htt->tx_q_state.vaddr) 411 return -ENOMEM; 412 413 htt->tx_q_state.paddr = dma_map_single(ar->dev, htt->tx_q_state.vaddr, 414 size, DMA_TO_DEVICE); 415 ret = dma_mapping_error(ar->dev, htt->tx_q_state.paddr); 416 if (ret) { 417 ath10k_warn(ar, "failed to dma map tx_q_state: %d\n", ret); 418 kfree(htt->tx_q_state.vaddr); 419 return -EIO; 420 } 421 422 return 0; 423 } 424 425 static void ath10k_htt_tx_free_txdone_fifo(struct ath10k_htt *htt) 426 { 427 WARN_ON(!kfifo_is_empty(&htt->txdone_fifo)); 428 kfifo_free(&htt->txdone_fifo); 429 } 430 431 static int ath10k_htt_tx_alloc_txdone_fifo(struct ath10k_htt *htt) 432 { 433 int ret; 434 size_t size; 435 436 size = roundup_pow_of_two(htt->max_num_pending_tx); 437 ret = kfifo_alloc(&htt->txdone_fifo, size, GFP_KERNEL); 438 return ret; 439 } 440 441 static int ath10k_htt_tx_alloc_buf(struct ath10k_htt *htt) 442 { 443 struct ath10k *ar = htt->ar; 444 int ret; 445 446 ret = ath10k_htt_alloc_txbuff(htt); 447 if (ret) { 448 ath10k_err(ar, "failed to alloc cont tx buffer: %d\n", ret); 449 return ret; 450 } 451 452 ret = ath10k_htt_alloc_frag_desc(htt); 453 if (ret) { 454 ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret); 455 goto free_txbuf; 456 } 457 458 ret = ath10k_htt_tx_alloc_txq(htt); 459 if (ret) { 460 ath10k_err(ar, "failed to alloc txq: %d\n", ret); 461 goto free_frag_desc; 462 } 463 464 ret = ath10k_htt_tx_alloc_txdone_fifo(htt); 465 if (ret) { 466 ath10k_err(ar, "failed to alloc txdone fifo: %d\n", ret); 467 goto free_txq; 468 } 469 470 return 0; 471 472 free_txq: 473 ath10k_htt_tx_free_txq(htt); 474 475 free_frag_desc: 476 ath10k_htt_free_frag_desc(htt); 477 478 free_txbuf: 479 ath10k_htt_free_txbuff(htt); 480 481 return ret; 482 } 483 484 int ath10k_htt_tx_start(struct ath10k_htt *htt) 485 { 486 struct ath10k *ar = htt->ar; 487 int ret; 488 489 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", 490 htt->max_num_pending_tx); 491 492 spin_lock_init(&htt->tx_lock); 493 idr_init(&htt->pending_tx); 494 495 if (htt->tx_mem_allocated) 496 return 0; 497 498 if (ar->dev_type == ATH10K_DEV_TYPE_HL) 499 return 0; 500 501 ret = ath10k_htt_tx_alloc_buf(htt); 502 if (ret) 503 goto free_idr_pending_tx; 504 505 htt->tx_mem_allocated = true; 506 507 return 0; 508 509 free_idr_pending_tx: 510 idr_destroy(&htt->pending_tx); 511 512 return ret; 513 } 514 515 static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx) 516 { 517 struct ath10k *ar = ctx; 518 struct ath10k_htt *htt = &ar->htt; 519 struct htt_tx_done tx_done = {0}; 520 521 ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id); 522 523 tx_done.msdu_id = msdu_id; 524 tx_done.status = HTT_TX_COMPL_STATE_DISCARD; 525 526 ath10k_txrx_tx_unref(htt, &tx_done); 527 528 return 0; 529 } 530 531 void ath10k_htt_tx_destroy(struct ath10k_htt *htt) 532 { 533 if (!htt->tx_mem_allocated) 534 return; 535 536 ath10k_htt_free_txbuff(htt); 537 ath10k_htt_tx_free_txq(htt); 538 ath10k_htt_free_frag_desc(htt); 539 ath10k_htt_tx_free_txdone_fifo(htt); 540 htt->tx_mem_allocated = false; 541 } 542 543 void ath10k_htt_tx_stop(struct ath10k_htt *htt) 544 { 545 idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar); 546 idr_destroy(&htt->pending_tx); 547 } 548 549 void ath10k_htt_tx_free(struct ath10k_htt *htt) 550 { 551 ath10k_htt_tx_stop(htt); 552 ath10k_htt_tx_destroy(htt); 553 } 554 555 void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) 556 { 557 dev_kfree_skb_any(skb); 558 } 559 560 void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb) 561 { 562 dev_kfree_skb_any(skb); 563 } 564 EXPORT_SYMBOL(ath10k_htt_hif_tx_complete); 565 566 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) 567 { 568 struct ath10k *ar = htt->ar; 569 struct sk_buff *skb; 570 struct htt_cmd *cmd; 571 int len = 0; 572 int ret; 573 574 len += sizeof(cmd->hdr); 575 len += sizeof(cmd->ver_req); 576 577 skb = ath10k_htc_alloc_skb(ar, len); 578 if (!skb) 579 return -ENOMEM; 580 581 skb_put(skb, len); 582 cmd = (struct htt_cmd *)skb->data; 583 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ; 584 585 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); 586 if (ret) { 587 dev_kfree_skb_any(skb); 588 return ret; 589 } 590 591 return 0; 592 } 593 594 int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie) 595 { 596 struct ath10k *ar = htt->ar; 597 struct htt_stats_req *req; 598 struct sk_buff *skb; 599 struct htt_cmd *cmd; 600 int len = 0, ret; 601 602 len += sizeof(cmd->hdr); 603 len += sizeof(cmd->stats_req); 604 605 skb = ath10k_htc_alloc_skb(ar, len); 606 if (!skb) 607 return -ENOMEM; 608 609 skb_put(skb, len); 610 cmd = (struct htt_cmd *)skb->data; 611 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ; 612 613 req = &cmd->stats_req; 614 615 memset(req, 0, sizeof(*req)); 616 617 /* currently we support only max 8 bit masks so no need to worry 618 * about endian support 619 */ 620 req->upload_types[0] = mask; 621 req->reset_types[0] = mask; 622 req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID; 623 req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff); 624 req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32); 625 626 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); 627 if (ret) { 628 ath10k_warn(ar, "failed to send htt type stats request: %d", 629 ret); 630 dev_kfree_skb_any(skb); 631 return ret; 632 } 633 634 return 0; 635 } 636 637 static int ath10k_htt_send_frag_desc_bank_cfg_32(struct ath10k_htt *htt) 638 { 639 struct ath10k *ar = htt->ar; 640 struct sk_buff *skb; 641 struct htt_cmd *cmd; 642 struct htt_frag_desc_bank_cfg32 *cfg; 643 int ret, size; 644 u8 info; 645 646 if (!ar->hw_params.continuous_frag_desc) 647 return 0; 648 649 if (!htt->frag_desc.paddr) { 650 ath10k_warn(ar, "invalid frag desc memory\n"); 651 return -EINVAL; 652 } 653 654 size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg32); 655 skb = ath10k_htc_alloc_skb(ar, size); 656 if (!skb) 657 return -ENOMEM; 658 659 skb_put(skb, size); 660 cmd = (struct htt_cmd *)skb->data; 661 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG; 662 663 info = 0; 664 info |= SM(htt->tx_q_state.type, 665 HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE); 666 667 if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, 668 ar->running_fw->fw_file.fw_features)) 669 info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID; 670 671 cfg = &cmd->frag_desc_bank_cfg32; 672 cfg->info = info; 673 cfg->num_banks = 1; 674 cfg->desc_size = sizeof(struct htt_msdu_ext_desc); 675 cfg->bank_base_addrs[0] = __cpu_to_le32(htt->frag_desc.paddr); 676 cfg->bank_id[0].bank_min_id = 0; 677 cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx - 678 1); 679 680 cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr); 681 cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers); 682 cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids); 683 cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE; 684 cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER; 685 686 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n"); 687 688 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); 689 if (ret) { 690 ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n", 691 ret); 692 dev_kfree_skb_any(skb); 693 return ret; 694 } 695 696 return 0; 697 } 698 699 static int ath10k_htt_send_frag_desc_bank_cfg_64(struct ath10k_htt *htt) 700 { 701 struct ath10k *ar = htt->ar; 702 struct sk_buff *skb; 703 struct htt_cmd *cmd; 704 struct htt_frag_desc_bank_cfg64 *cfg; 705 int ret, size; 706 u8 info; 707 708 if (!ar->hw_params.continuous_frag_desc) 709 return 0; 710 711 if (!htt->frag_desc.paddr) { 712 ath10k_warn(ar, "invalid frag desc memory\n"); 713 return -EINVAL; 714 } 715 716 size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg64); 717 skb = ath10k_htc_alloc_skb(ar, size); 718 if (!skb) 719 return -ENOMEM; 720 721 skb_put(skb, size); 722 cmd = (struct htt_cmd *)skb->data; 723 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG; 724 725 info = 0; 726 info |= SM(htt->tx_q_state.type, 727 HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE); 728 729 if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, 730 ar->running_fw->fw_file.fw_features)) 731 info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID; 732 733 cfg = &cmd->frag_desc_bank_cfg64; 734 cfg->info = info; 735 cfg->num_banks = 1; 736 cfg->desc_size = sizeof(struct htt_msdu_ext_desc_64); 737 cfg->bank_base_addrs[0] = __cpu_to_le64(htt->frag_desc.paddr); 738 cfg->bank_id[0].bank_min_id = 0; 739 cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx - 740 1); 741 742 cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr); 743 cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers); 744 cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids); 745 cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE; 746 cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER; 747 748 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n"); 749 750 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); 751 if (ret) { 752 ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n", 753 ret); 754 dev_kfree_skb_any(skb); 755 return ret; 756 } 757 758 return 0; 759 } 760 761 static void ath10k_htt_fill_rx_desc_offset_32(void *rx_ring) 762 { 763 struct htt_rx_ring_setup_ring32 *ring = 764 (struct htt_rx_ring_setup_ring32 *)rx_ring; 765 766 #define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4) 767 ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status)); 768 ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload)); 769 ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start)); 770 ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end)); 771 ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start)); 772 ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end)); 773 ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start)); 774 ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end)); 775 ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention)); 776 ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info)); 777 #undef desc_offset 778 } 779 780 static void ath10k_htt_fill_rx_desc_offset_64(void *rx_ring) 781 { 782 struct htt_rx_ring_setup_ring64 *ring = 783 (struct htt_rx_ring_setup_ring64 *)rx_ring; 784 785 #define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4) 786 ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status)); 787 ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload)); 788 ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start)); 789 ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end)); 790 ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start)); 791 ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end)); 792 ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start)); 793 ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end)); 794 ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention)); 795 ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info)); 796 #undef desc_offset 797 } 798 799 static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt *htt) 800 { 801 struct ath10k *ar = htt->ar; 802 struct sk_buff *skb; 803 struct htt_cmd *cmd; 804 struct htt_rx_ring_setup_ring32 *ring; 805 const int num_rx_ring = 1; 806 u16 flags; 807 u32 fw_idx; 808 int len; 809 int ret; 810 811 /* 812 * the HW expects the buffer to be an integral number of 4-byte 813 * "words" 814 */ 815 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4)); 816 BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0); 817 818 len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr) 819 + (sizeof(*ring) * num_rx_ring); 820 skb = ath10k_htc_alloc_skb(ar, len); 821 if (!skb) 822 return -ENOMEM; 823 824 skb_put(skb, len); 825 826 cmd = (struct htt_cmd *)skb->data; 827 ring = &cmd->rx_setup_32.rings[0]; 828 829 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG; 830 cmd->rx_setup_32.hdr.num_rings = 1; 831 832 /* FIXME: do we need all of this? */ 833 flags = 0; 834 flags |= HTT_RX_RING_FLAGS_MAC80211_HDR; 835 flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD; 836 flags |= HTT_RX_RING_FLAGS_PPDU_START; 837 flags |= HTT_RX_RING_FLAGS_PPDU_END; 838 flags |= HTT_RX_RING_FLAGS_MPDU_START; 839 flags |= HTT_RX_RING_FLAGS_MPDU_END; 840 flags |= HTT_RX_RING_FLAGS_MSDU_START; 841 flags |= HTT_RX_RING_FLAGS_MSDU_END; 842 flags |= HTT_RX_RING_FLAGS_RX_ATTENTION; 843 flags |= HTT_RX_RING_FLAGS_FRAG_INFO; 844 flags |= HTT_RX_RING_FLAGS_UNICAST_RX; 845 flags |= HTT_RX_RING_FLAGS_MULTICAST_RX; 846 flags |= HTT_RX_RING_FLAGS_CTRL_RX; 847 flags |= HTT_RX_RING_FLAGS_MGMT_RX; 848 flags |= HTT_RX_RING_FLAGS_NULL_RX; 849 flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX; 850 851 fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); 852 853 ring->fw_idx_shadow_reg_paddr = 854 __cpu_to_le32(htt->rx_ring.alloc_idx.paddr); 855 ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr); 856 ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size); 857 ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE); 858 ring->flags = __cpu_to_le16(flags); 859 ring->fw_idx_init_val = __cpu_to_le16(fw_idx); 860 861 ath10k_htt_fill_rx_desc_offset_32(ring); 862 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); 863 if (ret) { 864 dev_kfree_skb_any(skb); 865 return ret; 866 } 867 868 return 0; 869 } 870 871 static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt *htt) 872 { 873 struct ath10k *ar = htt->ar; 874 struct sk_buff *skb; 875 struct htt_cmd *cmd; 876 struct htt_rx_ring_setup_ring64 *ring; 877 const int num_rx_ring = 1; 878 u16 flags; 879 u32 fw_idx; 880 int len; 881 int ret; 882 883 /* HW expects the buffer to be an integral number of 4-byte 884 * "words" 885 */ 886 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4)); 887 BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0); 888 889 len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_64.hdr) 890 + (sizeof(*ring) * num_rx_ring); 891 skb = ath10k_htc_alloc_skb(ar, len); 892 if (!skb) 893 return -ENOMEM; 894 895 skb_put(skb, len); 896 897 cmd = (struct htt_cmd *)skb->data; 898 ring = &cmd->rx_setup_64.rings[0]; 899 900 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG; 901 cmd->rx_setup_64.hdr.num_rings = 1; 902 903 flags = 0; 904 flags |= HTT_RX_RING_FLAGS_MAC80211_HDR; 905 flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD; 906 flags |= HTT_RX_RING_FLAGS_PPDU_START; 907 flags |= HTT_RX_RING_FLAGS_PPDU_END; 908 flags |= HTT_RX_RING_FLAGS_MPDU_START; 909 flags |= HTT_RX_RING_FLAGS_MPDU_END; 910 flags |= HTT_RX_RING_FLAGS_MSDU_START; 911 flags |= HTT_RX_RING_FLAGS_MSDU_END; 912 flags |= HTT_RX_RING_FLAGS_RX_ATTENTION; 913 flags |= HTT_RX_RING_FLAGS_FRAG_INFO; 914 flags |= HTT_RX_RING_FLAGS_UNICAST_RX; 915 flags |= HTT_RX_RING_FLAGS_MULTICAST_RX; 916 flags |= HTT_RX_RING_FLAGS_CTRL_RX; 917 flags |= HTT_RX_RING_FLAGS_MGMT_RX; 918 flags |= HTT_RX_RING_FLAGS_NULL_RX; 919 flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX; 920 921 fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); 922 923 ring->fw_idx_shadow_reg_paddr = __cpu_to_le64(htt->rx_ring.alloc_idx.paddr); 924 ring->rx_ring_base_paddr = __cpu_to_le64(htt->rx_ring.base_paddr); 925 ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size); 926 ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE); 927 ring->flags = __cpu_to_le16(flags); 928 ring->fw_idx_init_val = __cpu_to_le16(fw_idx); 929 930 ath10k_htt_fill_rx_desc_offset_64(ring); 931 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); 932 if (ret) { 933 dev_kfree_skb_any(skb); 934 return ret; 935 } 936 937 return 0; 938 } 939 940 static int ath10k_htt_send_rx_ring_cfg_hl(struct ath10k_htt *htt) 941 { 942 struct ath10k *ar = htt->ar; 943 struct sk_buff *skb; 944 struct htt_cmd *cmd; 945 struct htt_rx_ring_setup_ring32 *ring; 946 const int num_rx_ring = 1; 947 u16 flags; 948 int len; 949 int ret; 950 951 /* 952 * the HW expects the buffer to be an integral number of 4-byte 953 * "words" 954 */ 955 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4)); 956 BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0); 957 958 len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr) 959 + (sizeof(*ring) * num_rx_ring); 960 skb = ath10k_htc_alloc_skb(ar, len); 961 if (!skb) 962 return -ENOMEM; 963 964 skb_put(skb, len); 965 966 cmd = (struct htt_cmd *)skb->data; 967 ring = &cmd->rx_setup_32.rings[0]; 968 969 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG; 970 cmd->rx_setup_32.hdr.num_rings = 1; 971 972 flags = 0; 973 flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD; 974 flags |= HTT_RX_RING_FLAGS_UNICAST_RX; 975 flags |= HTT_RX_RING_FLAGS_MULTICAST_RX; 976 977 memset(ring, 0, sizeof(*ring)); 978 ring->rx_ring_len = __cpu_to_le16(HTT_RX_RING_SIZE_MIN); 979 ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE); 980 ring->flags = __cpu_to_le16(flags); 981 982 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); 983 if (ret) { 984 dev_kfree_skb_any(skb); 985 return ret; 986 } 987 988 return 0; 989 } 990 991 int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt, 992 u8 max_subfrms_ampdu, 993 u8 max_subfrms_amsdu) 994 { 995 struct ath10k *ar = htt->ar; 996 struct htt_aggr_conf *aggr_conf; 997 struct sk_buff *skb; 998 struct htt_cmd *cmd; 999 int len; 1000 int ret; 1001 1002 /* Firmware defaults are: amsdu = 3 and ampdu = 64 */ 1003 1004 if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64) 1005 return -EINVAL; 1006 1007 if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31) 1008 return -EINVAL; 1009 1010 len = sizeof(cmd->hdr); 1011 len += sizeof(cmd->aggr_conf); 1012 1013 skb = ath10k_htc_alloc_skb(ar, len); 1014 if (!skb) 1015 return -ENOMEM; 1016 1017 skb_put(skb, len); 1018 cmd = (struct htt_cmd *)skb->data; 1019 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG; 1020 1021 aggr_conf = &cmd->aggr_conf; 1022 aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu; 1023 aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu; 1024 1025 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d", 1026 aggr_conf->max_num_amsdu_subframes, 1027 aggr_conf->max_num_ampdu_subframes); 1028 1029 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); 1030 if (ret) { 1031 dev_kfree_skb_any(skb); 1032 return ret; 1033 } 1034 1035 return 0; 1036 } 1037 1038 int ath10k_htt_tx_fetch_resp(struct ath10k *ar, 1039 __le32 token, 1040 __le16 fetch_seq_num, 1041 struct htt_tx_fetch_record *records, 1042 size_t num_records) 1043 { 1044 struct sk_buff *skb; 1045 struct htt_cmd *cmd; 1046 const u16 resp_id = 0; 1047 int len = 0; 1048 int ret; 1049 1050 /* Response IDs are echo-ed back only for host driver convienence 1051 * purposes. They aren't used for anything in the driver yet so use 0. 1052 */ 1053 1054 len += sizeof(cmd->hdr); 1055 len += sizeof(cmd->tx_fetch_resp); 1056 len += sizeof(cmd->tx_fetch_resp.records[0]) * num_records; 1057 1058 skb = ath10k_htc_alloc_skb(ar, len); 1059 if (!skb) 1060 return -ENOMEM; 1061 1062 skb_put(skb, len); 1063 cmd = (struct htt_cmd *)skb->data; 1064 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FETCH_RESP; 1065 cmd->tx_fetch_resp.resp_id = cpu_to_le16(resp_id); 1066 cmd->tx_fetch_resp.fetch_seq_num = fetch_seq_num; 1067 cmd->tx_fetch_resp.num_records = cpu_to_le16(num_records); 1068 cmd->tx_fetch_resp.token = token; 1069 1070 memcpy(cmd->tx_fetch_resp.records, records, 1071 sizeof(records[0]) * num_records); 1072 1073 ret = ath10k_htc_send(&ar->htc, ar->htt.eid, skb); 1074 if (ret) { 1075 ath10k_warn(ar, "failed to submit htc command: %d\n", ret); 1076 goto err_free_skb; 1077 } 1078 1079 return 0; 1080 1081 err_free_skb: 1082 dev_kfree_skb_any(skb); 1083 1084 return ret; 1085 } 1086 1087 static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb) 1088 { 1089 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1090 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); 1091 struct ath10k_vif *arvif; 1092 1093 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { 1094 return ar->scan.vdev_id; 1095 } else if (cb->vif) { 1096 arvif = (void *)cb->vif->drv_priv; 1097 return arvif->vdev_id; 1098 } else if (ar->monitor_started) { 1099 return ar->monitor_vdev_id; 1100 } else { 1101 return 0; 1102 } 1103 } 1104 1105 static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth) 1106 { 1107 struct ieee80211_hdr *hdr = (void *)skb->data; 1108 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); 1109 1110 if (!is_eth && ieee80211_is_mgmt(hdr->frame_control)) 1111 return HTT_DATA_TX_EXT_TID_MGMT; 1112 else if (cb->flags & ATH10K_SKB_F_QOS) 1113 return skb->priority & IEEE80211_QOS_CTL_TID_MASK; 1114 else 1115 return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; 1116 } 1117 1118 int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) 1119 { 1120 struct ath10k *ar = htt->ar; 1121 struct device *dev = ar->dev; 1122 struct sk_buff *txdesc = NULL; 1123 struct htt_cmd *cmd; 1124 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); 1125 u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); 1126 int len = 0; 1127 int msdu_id = -1; 1128 int res; 1129 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; 1130 1131 len += sizeof(cmd->hdr); 1132 len += sizeof(cmd->mgmt_tx); 1133 1134 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); 1135 if (res < 0) 1136 goto err; 1137 1138 msdu_id = res; 1139 1140 if ((ieee80211_is_action(hdr->frame_control) || 1141 ieee80211_is_deauth(hdr->frame_control) || 1142 ieee80211_is_disassoc(hdr->frame_control)) && 1143 ieee80211_has_protected(hdr->frame_control)) { 1144 skb_put(msdu, IEEE80211_CCMP_MIC_LEN); 1145 } 1146 1147 txdesc = ath10k_htc_alloc_skb(ar, len); 1148 if (!txdesc) { 1149 res = -ENOMEM; 1150 goto err_free_msdu_id; 1151 } 1152 1153 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, 1154 DMA_TO_DEVICE); 1155 res = dma_mapping_error(dev, skb_cb->paddr); 1156 if (res) { 1157 res = -EIO; 1158 goto err_free_txdesc; 1159 } 1160 1161 skb_put(txdesc, len); 1162 cmd = (struct htt_cmd *)txdesc->data; 1163 memset(cmd, 0, len); 1164 1165 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX; 1166 cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); 1167 cmd->mgmt_tx.len = __cpu_to_le32(msdu->len); 1168 cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id); 1169 cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id); 1170 memcpy(cmd->mgmt_tx.hdr, msdu->data, 1171 min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); 1172 1173 res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); 1174 if (res) 1175 goto err_unmap_msdu; 1176 1177 return 0; 1178 1179 err_unmap_msdu: 1180 if (ar->dev_type != ATH10K_DEV_TYPE_HL) 1181 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); 1182 err_free_txdesc: 1183 dev_kfree_skb_any(txdesc); 1184 err_free_msdu_id: 1185 spin_lock_bh(&htt->tx_lock); 1186 ath10k_htt_tx_free_msdu_id(htt, msdu_id); 1187 spin_unlock_bh(&htt->tx_lock); 1188 err: 1189 return res; 1190 } 1191 1192 #define HTT_TX_HL_NEEDED_HEADROOM \ 1193 (unsigned int)(sizeof(struct htt_cmd_hdr) + \ 1194 sizeof(struct htt_data_tx_desc) + \ 1195 sizeof(struct ath10k_htc_hdr)) 1196 1197 static int ath10k_htt_tx_hl(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode, 1198 struct sk_buff *msdu) 1199 { 1200 struct ath10k *ar = htt->ar; 1201 int res, data_len; 1202 struct htt_cmd_hdr *cmd_hdr; 1203 struct htt_data_tx_desc *tx_desc; 1204 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); 1205 struct sk_buff *tmp_skb; 1206 bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET); 1207 u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); 1208 u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth); 1209 u8 flags0 = 0; 1210 u16 flags1 = 0; 1211 1212 data_len = msdu->len; 1213 1214 switch (txmode) { 1215 case ATH10K_HW_TXRX_RAW: 1216 case ATH10K_HW_TXRX_NATIVE_WIFI: 1217 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; 1218 /* fall through */ 1219 case ATH10K_HW_TXRX_ETHERNET: 1220 flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); 1221 break; 1222 case ATH10K_HW_TXRX_MGMT: 1223 flags0 |= SM(ATH10K_HW_TXRX_MGMT, 1224 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); 1225 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; 1226 break; 1227 } 1228 1229 if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) 1230 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; 1231 1232 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); 1233 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); 1234 if (msdu->ip_summed == CHECKSUM_PARTIAL && 1235 !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { 1236 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; 1237 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; 1238 } 1239 1240 /* Prepend the HTT header and TX desc struct to the data message 1241 * and realloc the skb if it does not have enough headroom. 1242 */ 1243 if (skb_headroom(msdu) < HTT_TX_HL_NEEDED_HEADROOM) { 1244 tmp_skb = msdu; 1245 1246 ath10k_dbg(htt->ar, ATH10K_DBG_HTT, 1247 "Not enough headroom in skb. Current headroom: %u, needed: %u. Reallocating...\n", 1248 skb_headroom(msdu), HTT_TX_HL_NEEDED_HEADROOM); 1249 msdu = skb_realloc_headroom(msdu, HTT_TX_HL_NEEDED_HEADROOM); 1250 kfree_skb(tmp_skb); 1251 if (!msdu) { 1252 ath10k_warn(htt->ar, "htt hl tx: Unable to realloc skb!\n"); 1253 res = -ENOMEM; 1254 goto out; 1255 } 1256 } 1257 1258 skb_push(msdu, sizeof(*cmd_hdr)); 1259 skb_push(msdu, sizeof(*tx_desc)); 1260 cmd_hdr = (struct htt_cmd_hdr *)msdu->data; 1261 tx_desc = (struct htt_data_tx_desc *)(msdu->data + sizeof(*cmd_hdr)); 1262 1263 cmd_hdr->msg_type = HTT_H2T_MSG_TYPE_TX_FRM; 1264 tx_desc->flags0 = flags0; 1265 tx_desc->flags1 = __cpu_to_le16(flags1); 1266 tx_desc->len = __cpu_to_le16(data_len); 1267 tx_desc->id = 0; 1268 tx_desc->frags_paddr = 0; /* always zero */ 1269 /* Initialize peer_id to INVALID_PEER because this is NOT 1270 * Reinjection path 1271 */ 1272 tx_desc->peerid = __cpu_to_le32(HTT_INVALID_PEERID); 1273 1274 res = ath10k_htc_send(&htt->ar->htc, htt->eid, msdu); 1275 1276 out: 1277 return res; 1278 } 1279 1280 static int ath10k_htt_tx_32(struct ath10k_htt *htt, 1281 enum ath10k_hw_txrx_mode txmode, 1282 struct sk_buff *msdu) 1283 { 1284 struct ath10k *ar = htt->ar; 1285 struct device *dev = ar->dev; 1286 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; 1287 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu); 1288 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); 1289 struct ath10k_hif_sg_item sg_items[2]; 1290 struct ath10k_htt_txbuf_32 *txbuf; 1291 struct htt_data_tx_desc_frag *frags; 1292 bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET); 1293 u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); 1294 u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth); 1295 int prefetch_len; 1296 int res; 1297 u8 flags0 = 0; 1298 u16 msdu_id, flags1 = 0; 1299 u16 freq = 0; 1300 u32 frags_paddr = 0; 1301 u32 txbuf_paddr; 1302 struct htt_msdu_ext_desc *ext_desc = NULL; 1303 struct htt_msdu_ext_desc *ext_desc_t = NULL; 1304 1305 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); 1306 if (res < 0) 1307 goto err; 1308 1309 msdu_id = res; 1310 1311 prefetch_len = min(htt->prefetch_len, msdu->len); 1312 prefetch_len = roundup(prefetch_len, 4); 1313 1314 txbuf = htt->txbuf.vaddr_txbuff_32 + msdu_id; 1315 txbuf_paddr = htt->txbuf.paddr + 1316 (sizeof(struct ath10k_htt_txbuf_32) * msdu_id); 1317 1318 if ((ieee80211_is_action(hdr->frame_control) || 1319 ieee80211_is_deauth(hdr->frame_control) || 1320 ieee80211_is_disassoc(hdr->frame_control)) && 1321 ieee80211_has_protected(hdr->frame_control)) { 1322 skb_put(msdu, IEEE80211_CCMP_MIC_LEN); 1323 } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) && 1324 txmode == ATH10K_HW_TXRX_RAW && 1325 ieee80211_has_protected(hdr->frame_control)) { 1326 skb_put(msdu, IEEE80211_CCMP_MIC_LEN); 1327 } 1328 1329 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, 1330 DMA_TO_DEVICE); 1331 res = dma_mapping_error(dev, skb_cb->paddr); 1332 if (res) { 1333 res = -EIO; 1334 goto err_free_msdu_id; 1335 } 1336 1337 if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) 1338 freq = ar->scan.roc_freq; 1339 1340 switch (txmode) { 1341 case ATH10K_HW_TXRX_RAW: 1342 case ATH10K_HW_TXRX_NATIVE_WIFI: 1343 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; 1344 /* fall through */ 1345 case ATH10K_HW_TXRX_ETHERNET: 1346 if (ar->hw_params.continuous_frag_desc) { 1347 ext_desc_t = htt->frag_desc.vaddr_desc_32; 1348 memset(&ext_desc_t[msdu_id], 0, 1349 sizeof(struct htt_msdu_ext_desc)); 1350 frags = (struct htt_data_tx_desc_frag *) 1351 &ext_desc_t[msdu_id].frags; 1352 ext_desc = &ext_desc_t[msdu_id]; 1353 frags[0].tword_addr.paddr_lo = 1354 __cpu_to_le32(skb_cb->paddr); 1355 frags[0].tword_addr.paddr_hi = 0; 1356 frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len); 1357 1358 frags_paddr = htt->frag_desc.paddr + 1359 (sizeof(struct htt_msdu_ext_desc) * msdu_id); 1360 } else { 1361 frags = txbuf->frags; 1362 frags[0].dword_addr.paddr = 1363 __cpu_to_le32(skb_cb->paddr); 1364 frags[0].dword_addr.len = __cpu_to_le32(msdu->len); 1365 frags[1].dword_addr.paddr = 0; 1366 frags[1].dword_addr.len = 0; 1367 1368 frags_paddr = txbuf_paddr; 1369 } 1370 flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); 1371 break; 1372 case ATH10K_HW_TXRX_MGMT: 1373 flags0 |= SM(ATH10K_HW_TXRX_MGMT, 1374 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); 1375 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; 1376 1377 frags_paddr = skb_cb->paddr; 1378 break; 1379 } 1380 1381 /* Normally all commands go through HTC which manages tx credits for 1382 * each endpoint and notifies when tx is completed. 1383 * 1384 * HTT endpoint is creditless so there's no need to care about HTC 1385 * flags. In that case it is trivial to fill the HTC header here. 1386 * 1387 * MSDU transmission is considered completed upon HTT event. This 1388 * implies no relevant resources can be freed until after the event is 1389 * received. That's why HTC tx completion handler itself is ignored by 1390 * setting NULL to transfer_context for all sg items. 1391 * 1392 * There is simply no point in pushing HTT TX_FRM through HTC tx path 1393 * as it's a waste of resources. By bypassing HTC it is possible to 1394 * avoid extra memory allocations, compress data structures and thus 1395 * improve performance. 1396 */ 1397 1398 txbuf->htc_hdr.eid = htt->eid; 1399 txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) + 1400 sizeof(txbuf->cmd_tx) + 1401 prefetch_len); 1402 txbuf->htc_hdr.flags = 0; 1403 1404 if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) 1405 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; 1406 1407 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); 1408 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); 1409 if (msdu->ip_summed == CHECKSUM_PARTIAL && 1410 !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { 1411 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; 1412 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; 1413 if (ar->hw_params.continuous_frag_desc) 1414 ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE; 1415 } 1416 1417 /* Prevent firmware from sending up tx inspection requests. There's 1418 * nothing ath10k can do with frames requested for inspection so force 1419 * it to simply rely a regular tx completion with discard status. 1420 */ 1421 flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED; 1422 1423 txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; 1424 txbuf->cmd_tx.flags0 = flags0; 1425 txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1); 1426 txbuf->cmd_tx.len = __cpu_to_le16(msdu->len); 1427 txbuf->cmd_tx.id = __cpu_to_le16(msdu_id); 1428 txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr); 1429 if (ath10k_mac_tx_frm_has_freq(ar)) { 1430 txbuf->cmd_tx.offchan_tx.peerid = 1431 __cpu_to_le16(HTT_INVALID_PEERID); 1432 txbuf->cmd_tx.offchan_tx.freq = 1433 __cpu_to_le16(freq); 1434 } else { 1435 txbuf->cmd_tx.peerid = 1436 __cpu_to_le32(HTT_INVALID_PEERID); 1437 } 1438 1439 trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid); 1440 ath10k_dbg(ar, ATH10K_DBG_HTT, 1441 "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %pad, msdu_paddr %pad vdev %hhu tid %hhu freq %hu\n", 1442 flags0, flags1, msdu->len, msdu_id, &frags_paddr, 1443 &skb_cb->paddr, vdev_id, tid, freq); 1444 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ", 1445 msdu->data, msdu->len); 1446 trace_ath10k_tx_hdr(ar, msdu->data, msdu->len); 1447 trace_ath10k_tx_payload(ar, msdu->data, msdu->len); 1448 1449 sg_items[0].transfer_id = 0; 1450 sg_items[0].transfer_context = NULL; 1451 sg_items[0].vaddr = &txbuf->htc_hdr; 1452 sg_items[0].paddr = txbuf_paddr + 1453 sizeof(txbuf->frags); 1454 sg_items[0].len = sizeof(txbuf->htc_hdr) + 1455 sizeof(txbuf->cmd_hdr) + 1456 sizeof(txbuf->cmd_tx); 1457 1458 sg_items[1].transfer_id = 0; 1459 sg_items[1].transfer_context = NULL; 1460 sg_items[1].vaddr = msdu->data; 1461 sg_items[1].paddr = skb_cb->paddr; 1462 sg_items[1].len = prefetch_len; 1463 1464 res = ath10k_hif_tx_sg(htt->ar, 1465 htt->ar->htc.endpoint[htt->eid].ul_pipe_id, 1466 sg_items, ARRAY_SIZE(sg_items)); 1467 if (res) 1468 goto err_unmap_msdu; 1469 1470 return 0; 1471 1472 err_unmap_msdu: 1473 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); 1474 err_free_msdu_id: 1475 ath10k_htt_tx_free_msdu_id(htt, msdu_id); 1476 err: 1477 return res; 1478 } 1479 1480 static int ath10k_htt_tx_64(struct ath10k_htt *htt, 1481 enum ath10k_hw_txrx_mode txmode, 1482 struct sk_buff *msdu) 1483 { 1484 struct ath10k *ar = htt->ar; 1485 struct device *dev = ar->dev; 1486 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; 1487 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu); 1488 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); 1489 struct ath10k_hif_sg_item sg_items[2]; 1490 struct ath10k_htt_txbuf_64 *txbuf; 1491 struct htt_data_tx_desc_frag *frags; 1492 bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET); 1493 u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); 1494 u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth); 1495 int prefetch_len; 1496 int res; 1497 u8 flags0 = 0; 1498 u16 msdu_id, flags1 = 0; 1499 u16 freq = 0; 1500 dma_addr_t frags_paddr = 0; 1501 u32 txbuf_paddr; 1502 struct htt_msdu_ext_desc_64 *ext_desc = NULL; 1503 struct htt_msdu_ext_desc_64 *ext_desc_t = NULL; 1504 1505 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); 1506 if (res < 0) 1507 goto err; 1508 1509 msdu_id = res; 1510 1511 prefetch_len = min(htt->prefetch_len, msdu->len); 1512 prefetch_len = roundup(prefetch_len, 4); 1513 1514 txbuf = htt->txbuf.vaddr_txbuff_64 + msdu_id; 1515 txbuf_paddr = htt->txbuf.paddr + 1516 (sizeof(struct ath10k_htt_txbuf_64) * msdu_id); 1517 1518 if ((ieee80211_is_action(hdr->frame_control) || 1519 ieee80211_is_deauth(hdr->frame_control) || 1520 ieee80211_is_disassoc(hdr->frame_control)) && 1521 ieee80211_has_protected(hdr->frame_control)) { 1522 skb_put(msdu, IEEE80211_CCMP_MIC_LEN); 1523 } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) && 1524 txmode == ATH10K_HW_TXRX_RAW && 1525 ieee80211_has_protected(hdr->frame_control)) { 1526 skb_put(msdu, IEEE80211_CCMP_MIC_LEN); 1527 } 1528 1529 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, 1530 DMA_TO_DEVICE); 1531 res = dma_mapping_error(dev, skb_cb->paddr); 1532 if (res) { 1533 res = -EIO; 1534 goto err_free_msdu_id; 1535 } 1536 1537 if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) 1538 freq = ar->scan.roc_freq; 1539 1540 switch (txmode) { 1541 case ATH10K_HW_TXRX_RAW: 1542 case ATH10K_HW_TXRX_NATIVE_WIFI: 1543 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; 1544 /* fall through */ 1545 case ATH10K_HW_TXRX_ETHERNET: 1546 if (ar->hw_params.continuous_frag_desc) { 1547 ext_desc_t = htt->frag_desc.vaddr_desc_64; 1548 memset(&ext_desc_t[msdu_id], 0, 1549 sizeof(struct htt_msdu_ext_desc_64)); 1550 frags = (struct htt_data_tx_desc_frag *) 1551 &ext_desc_t[msdu_id].frags; 1552 ext_desc = &ext_desc_t[msdu_id]; 1553 frags[0].tword_addr.paddr_lo = 1554 __cpu_to_le32(skb_cb->paddr); 1555 frags[0].tword_addr.paddr_hi = 1556 __cpu_to_le16(upper_32_bits(skb_cb->paddr)); 1557 frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len); 1558 1559 frags_paddr = htt->frag_desc.paddr + 1560 (sizeof(struct htt_msdu_ext_desc_64) * msdu_id); 1561 } else { 1562 frags = txbuf->frags; 1563 frags[0].tword_addr.paddr_lo = 1564 __cpu_to_le32(skb_cb->paddr); 1565 frags[0].tword_addr.paddr_hi = 1566 __cpu_to_le16(upper_32_bits(skb_cb->paddr)); 1567 frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len); 1568 frags[1].tword_addr.paddr_lo = 0; 1569 frags[1].tword_addr.paddr_hi = 0; 1570 frags[1].tword_addr.len_16 = 0; 1571 } 1572 flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); 1573 break; 1574 case ATH10K_HW_TXRX_MGMT: 1575 flags0 |= SM(ATH10K_HW_TXRX_MGMT, 1576 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); 1577 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; 1578 1579 frags_paddr = skb_cb->paddr; 1580 break; 1581 } 1582 1583 /* Normally all commands go through HTC which manages tx credits for 1584 * each endpoint and notifies when tx is completed. 1585 * 1586 * HTT endpoint is creditless so there's no need to care about HTC 1587 * flags. In that case it is trivial to fill the HTC header here. 1588 * 1589 * MSDU transmission is considered completed upon HTT event. This 1590 * implies no relevant resources can be freed until after the event is 1591 * received. That's why HTC tx completion handler itself is ignored by 1592 * setting NULL to transfer_context for all sg items. 1593 * 1594 * There is simply no point in pushing HTT TX_FRM through HTC tx path 1595 * as it's a waste of resources. By bypassing HTC it is possible to 1596 * avoid extra memory allocations, compress data structures and thus 1597 * improve performance. 1598 */ 1599 1600 txbuf->htc_hdr.eid = htt->eid; 1601 txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) + 1602 sizeof(txbuf->cmd_tx) + 1603 prefetch_len); 1604 txbuf->htc_hdr.flags = 0; 1605 1606 if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) 1607 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; 1608 1609 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); 1610 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); 1611 if (msdu->ip_summed == CHECKSUM_PARTIAL && 1612 !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { 1613 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; 1614 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; 1615 if (ar->hw_params.continuous_frag_desc) { 1616 memset(ext_desc->tso_flag, 0, sizeof(ext_desc->tso_flag)); 1617 ext_desc->tso_flag[3] |= 1618 __cpu_to_le32(HTT_MSDU_CHECKSUM_ENABLE_64); 1619 } 1620 } 1621 1622 /* Prevent firmware from sending up tx inspection requests. There's 1623 * nothing ath10k can do with frames requested for inspection so force 1624 * it to simply rely a regular tx completion with discard status. 1625 */ 1626 flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED; 1627 1628 txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; 1629 txbuf->cmd_tx.flags0 = flags0; 1630 txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1); 1631 txbuf->cmd_tx.len = __cpu_to_le16(msdu->len); 1632 txbuf->cmd_tx.id = __cpu_to_le16(msdu_id); 1633 1634 /* fill fragment descriptor */ 1635 txbuf->cmd_tx.frags_paddr = __cpu_to_le64(frags_paddr); 1636 if (ath10k_mac_tx_frm_has_freq(ar)) { 1637 txbuf->cmd_tx.offchan_tx.peerid = 1638 __cpu_to_le16(HTT_INVALID_PEERID); 1639 txbuf->cmd_tx.offchan_tx.freq = 1640 __cpu_to_le16(freq); 1641 } else { 1642 txbuf->cmd_tx.peerid = 1643 __cpu_to_le32(HTT_INVALID_PEERID); 1644 } 1645 1646 trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid); 1647 ath10k_dbg(ar, ATH10K_DBG_HTT, 1648 "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %pad, msdu_paddr %pad vdev %hhu tid %hhu freq %hu\n", 1649 flags0, flags1, msdu->len, msdu_id, &frags_paddr, 1650 &skb_cb->paddr, vdev_id, tid, freq); 1651 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ", 1652 msdu->data, msdu->len); 1653 trace_ath10k_tx_hdr(ar, msdu->data, msdu->len); 1654 trace_ath10k_tx_payload(ar, msdu->data, msdu->len); 1655 1656 sg_items[0].transfer_id = 0; 1657 sg_items[0].transfer_context = NULL; 1658 sg_items[0].vaddr = &txbuf->htc_hdr; 1659 sg_items[0].paddr = txbuf_paddr + 1660 sizeof(txbuf->frags); 1661 sg_items[0].len = sizeof(txbuf->htc_hdr) + 1662 sizeof(txbuf->cmd_hdr) + 1663 sizeof(txbuf->cmd_tx); 1664 1665 sg_items[1].transfer_id = 0; 1666 sg_items[1].transfer_context = NULL; 1667 sg_items[1].vaddr = msdu->data; 1668 sg_items[1].paddr = skb_cb->paddr; 1669 sg_items[1].len = prefetch_len; 1670 1671 res = ath10k_hif_tx_sg(htt->ar, 1672 htt->ar->htc.endpoint[htt->eid].ul_pipe_id, 1673 sg_items, ARRAY_SIZE(sg_items)); 1674 if (res) 1675 goto err_unmap_msdu; 1676 1677 return 0; 1678 1679 err_unmap_msdu: 1680 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); 1681 err_free_msdu_id: 1682 ath10k_htt_tx_free_msdu_id(htt, msdu_id); 1683 err: 1684 return res; 1685 } 1686 1687 static const struct ath10k_htt_tx_ops htt_tx_ops_32 = { 1688 .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_32, 1689 .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32, 1690 .htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_32, 1691 .htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_32, 1692 .htt_tx = ath10k_htt_tx_32, 1693 .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_32, 1694 .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_32, 1695 }; 1696 1697 static const struct ath10k_htt_tx_ops htt_tx_ops_64 = { 1698 .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_64, 1699 .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_64, 1700 .htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_64, 1701 .htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_64, 1702 .htt_tx = ath10k_htt_tx_64, 1703 .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_64, 1704 .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_64, 1705 }; 1706 1707 static const struct ath10k_htt_tx_ops htt_tx_ops_hl = { 1708 .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_hl, 1709 .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32, 1710 .htt_tx = ath10k_htt_tx_hl, 1711 }; 1712 1713 void ath10k_htt_set_tx_ops(struct ath10k_htt *htt) 1714 { 1715 struct ath10k *ar = htt->ar; 1716 1717 if (ar->dev_type == ATH10K_DEV_TYPE_HL) 1718 htt->tx_ops = &htt_tx_ops_hl; 1719 else if (ar->hw_params.target_64bit) 1720 htt->tx_ops = &htt_tx_ops_64; 1721 else 1722 htt->tx_ops = &htt_tx_ops_32; 1723 } 1724