1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <linux/etherdevice.h> 19 #include "htt.h" 20 #include "mac.h" 21 #include "hif.h" 22 #include "txrx.h" 23 #include "debug.h" 24 25 static u8 ath10k_htt_tx_txq_calc_size(size_t count) 26 { 27 int exp; 28 int factor; 29 30 exp = 0; 31 factor = count >> 7; 32 33 while (factor >= 64 && exp < 4) { 34 factor >>= 3; 35 exp++; 36 } 37 38 if (exp == 4) 39 return 0xff; 40 41 if (count > 0) 42 factor = max(1, factor); 43 44 return SM(exp, HTT_TX_Q_STATE_ENTRY_EXP) | 45 SM(factor, HTT_TX_Q_STATE_ENTRY_FACTOR); 46 } 47 48 static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw, 49 struct ieee80211_txq *txq) 50 { 51 struct ath10k *ar = hw->priv; 52 struct ath10k_sta *arsta = (void *)txq->sta->drv_priv; 53 struct ath10k_vif *arvif = (void *)txq->vif->drv_priv; 54 unsigned long frame_cnt; 55 unsigned long byte_cnt; 56 int idx; 57 u32 bit; 58 u16 peer_id; 59 u8 tid; 60 u8 count; 61 62 lockdep_assert_held(&ar->htt.tx_lock); 63 64 if (!ar->htt.tx_q_state.enabled) 65 return; 66 67 if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL) 68 return; 69 70 if (txq->sta) 71 peer_id = arsta->peer_id; 72 else 73 peer_id = arvif->peer_id; 74 75 tid = txq->tid; 76 bit = BIT(peer_id % 32); 77 idx = peer_id / 32; 78 79 ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt); 80 count = ath10k_htt_tx_txq_calc_size(byte_cnt); 81 82 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) || 83 unlikely(tid >= ar->htt.tx_q_state.num_tids)) { 84 ath10k_warn(ar, "refusing to update txq for peer_id %hu tid %hhu due to out of bounds\n", 85 peer_id, tid); 86 return; 87 } 88 89 ar->htt.tx_q_state.vaddr->count[tid][peer_id] = count; 90 ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit; 91 ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0; 92 93 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update peer_id %hu tid %hhu count %hhu\n", 94 peer_id, tid, count); 95 } 96 97 static void __ath10k_htt_tx_txq_sync(struct ath10k *ar) 98 { 99 u32 seq; 100 size_t size; 101 102 lockdep_assert_held(&ar->htt.tx_lock); 103 104 if (!ar->htt.tx_q_state.enabled) 105 return; 106 107 if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL) 108 return; 109 110 seq = le32_to_cpu(ar->htt.tx_q_state.vaddr->seq); 111 seq++; 112 ar->htt.tx_q_state.vaddr->seq = cpu_to_le32(seq); 113 114 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update commit seq %u\n", 115 seq); 116 117 size = sizeof(*ar->htt.tx_q_state.vaddr); 118 dma_sync_single_for_device(ar->dev, 119 ar->htt.tx_q_state.paddr, 120 size, 121 DMA_TO_DEVICE); 122 } 123 124 void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw, 125 struct ieee80211_txq *txq) 126 { 127 struct ath10k *ar = hw->priv; 128 129 spin_lock_bh(&ar->htt.tx_lock); 130 __ath10k_htt_tx_txq_recalc(hw, txq); 131 spin_unlock_bh(&ar->htt.tx_lock); 132 } 133 134 void ath10k_htt_tx_txq_sync(struct ath10k *ar) 135 { 136 spin_lock_bh(&ar->htt.tx_lock); 137 __ath10k_htt_tx_txq_sync(ar); 138 spin_unlock_bh(&ar->htt.tx_lock); 139 } 140 141 void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw, 142 struct ieee80211_txq *txq) 143 { 144 struct ath10k *ar = hw->priv; 145 146 spin_lock_bh(&ar->htt.tx_lock); 147 __ath10k_htt_tx_txq_recalc(hw, txq); 148 __ath10k_htt_tx_txq_sync(ar); 149 spin_unlock_bh(&ar->htt.tx_lock); 150 } 151 152 void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt) 153 { 154 lockdep_assert_held(&htt->tx_lock); 155 156 htt->num_pending_tx--; 157 if (htt->num_pending_tx == htt->max_num_pending_tx - 1) 158 ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL); 159 } 160 161 int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt) 162 { 163 lockdep_assert_held(&htt->tx_lock); 164 165 if (htt->num_pending_tx >= htt->max_num_pending_tx) 166 return -EBUSY; 167 168 htt->num_pending_tx++; 169 if (htt->num_pending_tx == htt->max_num_pending_tx) 170 ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL); 171 172 return 0; 173 } 174 175 int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt, 176 bool is_presp) 177 { 178 struct ath10k *ar = htt->ar; 179 180 lockdep_assert_held(&htt->tx_lock); 181 182 if (!is_mgmt || !ar->hw_params.max_probe_resp_desc_thres) 183 return 0; 184 185 if (is_presp && 186 ar->hw_params.max_probe_resp_desc_thres < htt->num_pending_mgmt_tx) 187 return -EBUSY; 188 189 htt->num_pending_mgmt_tx++; 190 191 return 0; 192 } 193 194 void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt) 195 { 196 lockdep_assert_held(&htt->tx_lock); 197 198 if (!htt->ar->hw_params.max_probe_resp_desc_thres) 199 return; 200 201 htt->num_pending_mgmt_tx--; 202 } 203 204 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb) 205 { 206 struct ath10k *ar = htt->ar; 207 int ret; 208 209 lockdep_assert_held(&htt->tx_lock); 210 211 ret = idr_alloc(&htt->pending_tx, skb, 0, 212 htt->max_num_pending_tx, GFP_ATOMIC); 213 214 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret); 215 216 return ret; 217 } 218 219 void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) 220 { 221 struct ath10k *ar = htt->ar; 222 223 lockdep_assert_held(&htt->tx_lock); 224 225 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id); 226 227 idr_remove(&htt->pending_tx, msdu_id); 228 } 229 230 static void ath10k_htt_tx_free_cont_frag_desc(struct ath10k_htt *htt) 231 { 232 size_t size; 233 234 if (!htt->frag_desc.vaddr) 235 return; 236 237 size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc); 238 239 dma_free_coherent(htt->ar->dev, 240 size, 241 htt->frag_desc.vaddr, 242 htt->frag_desc.paddr); 243 } 244 245 static int ath10k_htt_tx_alloc_cont_frag_desc(struct ath10k_htt *htt) 246 { 247 struct ath10k *ar = htt->ar; 248 size_t size; 249 250 if (!ar->hw_params.continuous_frag_desc) 251 return 0; 252 253 size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc); 254 htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size, 255 &htt->frag_desc.paddr, 256 GFP_KERNEL); 257 if (!htt->frag_desc.vaddr) { 258 ath10k_err(ar, "failed to alloc fragment desc memory\n"); 259 return -ENOMEM; 260 } 261 262 return 0; 263 } 264 265 static void ath10k_htt_tx_free_txq(struct ath10k_htt *htt) 266 { 267 struct ath10k *ar = htt->ar; 268 size_t size; 269 270 if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, 271 ar->running_fw->fw_file.fw_features)) 272 return; 273 274 size = sizeof(*htt->tx_q_state.vaddr); 275 276 dma_unmap_single(ar->dev, htt->tx_q_state.paddr, size, DMA_TO_DEVICE); 277 kfree(htt->tx_q_state.vaddr); 278 } 279 280 static int ath10k_htt_tx_alloc_txq(struct ath10k_htt *htt) 281 { 282 struct ath10k *ar = htt->ar; 283 size_t size; 284 int ret; 285 286 if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, 287 ar->running_fw->fw_file.fw_features)) 288 return 0; 289 290 htt->tx_q_state.num_peers = HTT_TX_Q_STATE_NUM_PEERS; 291 htt->tx_q_state.num_tids = HTT_TX_Q_STATE_NUM_TIDS; 292 htt->tx_q_state.type = HTT_Q_DEPTH_TYPE_BYTES; 293 294 size = sizeof(*htt->tx_q_state.vaddr); 295 htt->tx_q_state.vaddr = kzalloc(size, GFP_KERNEL); 296 if (!htt->tx_q_state.vaddr) 297 return -ENOMEM; 298 299 htt->tx_q_state.paddr = dma_map_single(ar->dev, htt->tx_q_state.vaddr, 300 size, DMA_TO_DEVICE); 301 ret = dma_mapping_error(ar->dev, htt->tx_q_state.paddr); 302 if (ret) { 303 ath10k_warn(ar, "failed to dma map tx_q_state: %d\n", ret); 304 kfree(htt->tx_q_state.vaddr); 305 return -EIO; 306 } 307 308 return 0; 309 } 310 311 int ath10k_htt_tx_alloc(struct ath10k_htt *htt) 312 { 313 struct ath10k *ar = htt->ar; 314 int ret, size; 315 316 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", 317 htt->max_num_pending_tx); 318 319 spin_lock_init(&htt->tx_lock); 320 idr_init(&htt->pending_tx); 321 322 size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf); 323 htt->txbuf.vaddr = dma_alloc_coherent(ar->dev, size, 324 &htt->txbuf.paddr, 325 GFP_KERNEL); 326 if (!htt->txbuf.vaddr) { 327 ath10k_err(ar, "failed to alloc tx buffer\n"); 328 ret = -ENOMEM; 329 goto free_idr_pending_tx; 330 } 331 332 ret = ath10k_htt_tx_alloc_cont_frag_desc(htt); 333 if (ret) { 334 ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret); 335 goto free_txbuf; 336 } 337 338 ret = ath10k_htt_tx_alloc_txq(htt); 339 if (ret) { 340 ath10k_err(ar, "failed to alloc txq: %d\n", ret); 341 goto free_frag_desc; 342 } 343 344 size = roundup_pow_of_two(htt->max_num_pending_tx); 345 ret = kfifo_alloc(&htt->txdone_fifo, size, GFP_KERNEL); 346 if (ret) { 347 ath10k_err(ar, "failed to alloc txdone fifo: %d\n", ret); 348 goto free_txq; 349 } 350 351 return 0; 352 353 free_txq: 354 ath10k_htt_tx_free_txq(htt); 355 356 free_frag_desc: 357 ath10k_htt_tx_free_cont_frag_desc(htt); 358 359 free_txbuf: 360 size = htt->max_num_pending_tx * 361 sizeof(struct ath10k_htt_txbuf); 362 dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr, 363 htt->txbuf.paddr); 364 365 free_idr_pending_tx: 366 idr_destroy(&htt->pending_tx); 367 368 return ret; 369 } 370 371 static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx) 372 { 373 struct ath10k *ar = ctx; 374 struct ath10k_htt *htt = &ar->htt; 375 struct htt_tx_done tx_done = {0}; 376 377 ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id); 378 379 tx_done.msdu_id = msdu_id; 380 tx_done.status = HTT_TX_COMPL_STATE_DISCARD; 381 382 ath10k_txrx_tx_unref(htt, &tx_done); 383 384 return 0; 385 } 386 387 void ath10k_htt_tx_free(struct ath10k_htt *htt) 388 { 389 int size; 390 391 idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar); 392 idr_destroy(&htt->pending_tx); 393 394 if (htt->txbuf.vaddr) { 395 size = htt->max_num_pending_tx * 396 sizeof(struct ath10k_htt_txbuf); 397 dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr, 398 htt->txbuf.paddr); 399 } 400 401 ath10k_htt_tx_free_txq(htt); 402 ath10k_htt_tx_free_cont_frag_desc(htt); 403 WARN_ON(!kfifo_is_empty(&htt->txdone_fifo)); 404 kfifo_free(&htt->txdone_fifo); 405 } 406 407 void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) 408 { 409 dev_kfree_skb_any(skb); 410 } 411 412 void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb) 413 { 414 dev_kfree_skb_any(skb); 415 } 416 EXPORT_SYMBOL(ath10k_htt_hif_tx_complete); 417 418 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) 419 { 420 struct ath10k *ar = htt->ar; 421 struct sk_buff *skb; 422 struct htt_cmd *cmd; 423 int len = 0; 424 int ret; 425 426 len += sizeof(cmd->hdr); 427 len += sizeof(cmd->ver_req); 428 429 skb = ath10k_htc_alloc_skb(ar, len); 430 if (!skb) 431 return -ENOMEM; 432 433 skb_put(skb, len); 434 cmd = (struct htt_cmd *)skb->data; 435 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ; 436 437 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); 438 if (ret) { 439 dev_kfree_skb_any(skb); 440 return ret; 441 } 442 443 return 0; 444 } 445 446 int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie) 447 { 448 struct ath10k *ar = htt->ar; 449 struct htt_stats_req *req; 450 struct sk_buff *skb; 451 struct htt_cmd *cmd; 452 int len = 0, ret; 453 454 len += sizeof(cmd->hdr); 455 len += sizeof(cmd->stats_req); 456 457 skb = ath10k_htc_alloc_skb(ar, len); 458 if (!skb) 459 return -ENOMEM; 460 461 skb_put(skb, len); 462 cmd = (struct htt_cmd *)skb->data; 463 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ; 464 465 req = &cmd->stats_req; 466 467 memset(req, 0, sizeof(*req)); 468 469 /* currently we support only max 8 bit masks so no need to worry 470 * about endian support */ 471 req->upload_types[0] = mask; 472 req->reset_types[0] = mask; 473 req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID; 474 req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff); 475 req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32); 476 477 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); 478 if (ret) { 479 ath10k_warn(ar, "failed to send htt type stats request: %d", 480 ret); 481 dev_kfree_skb_any(skb); 482 return ret; 483 } 484 485 return 0; 486 } 487 488 int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt) 489 { 490 struct ath10k *ar = htt->ar; 491 struct sk_buff *skb; 492 struct htt_cmd *cmd; 493 struct htt_frag_desc_bank_cfg *cfg; 494 int ret, size; 495 u8 info; 496 497 if (!ar->hw_params.continuous_frag_desc) 498 return 0; 499 500 if (!htt->frag_desc.paddr) { 501 ath10k_warn(ar, "invalid frag desc memory\n"); 502 return -EINVAL; 503 } 504 505 size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg); 506 skb = ath10k_htc_alloc_skb(ar, size); 507 if (!skb) 508 return -ENOMEM; 509 510 skb_put(skb, size); 511 cmd = (struct htt_cmd *)skb->data; 512 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG; 513 514 info = 0; 515 info |= SM(htt->tx_q_state.type, 516 HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE); 517 518 if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, 519 ar->running_fw->fw_file.fw_features)) 520 info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID; 521 522 cfg = &cmd->frag_desc_bank_cfg; 523 cfg->info = info; 524 cfg->num_banks = 1; 525 cfg->desc_size = sizeof(struct htt_msdu_ext_desc); 526 cfg->bank_base_addrs[0] = __cpu_to_le32(htt->frag_desc.paddr); 527 cfg->bank_id[0].bank_min_id = 0; 528 cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx - 529 1); 530 531 cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr); 532 cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers); 533 cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids); 534 cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE; 535 cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER; 536 537 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n"); 538 539 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); 540 if (ret) { 541 ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n", 542 ret); 543 dev_kfree_skb_any(skb); 544 return ret; 545 } 546 547 return 0; 548 } 549 550 int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt) 551 { 552 struct ath10k *ar = htt->ar; 553 struct sk_buff *skb; 554 struct htt_cmd *cmd; 555 struct htt_rx_ring_setup_ring *ring; 556 const int num_rx_ring = 1; 557 u16 flags; 558 u32 fw_idx; 559 int len; 560 int ret; 561 562 /* 563 * the HW expects the buffer to be an integral number of 4-byte 564 * "words" 565 */ 566 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4)); 567 BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0); 568 569 len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr) 570 + (sizeof(*ring) * num_rx_ring); 571 skb = ath10k_htc_alloc_skb(ar, len); 572 if (!skb) 573 return -ENOMEM; 574 575 skb_put(skb, len); 576 577 cmd = (struct htt_cmd *)skb->data; 578 ring = &cmd->rx_setup.rings[0]; 579 580 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG; 581 cmd->rx_setup.hdr.num_rings = 1; 582 583 /* FIXME: do we need all of this? */ 584 flags = 0; 585 flags |= HTT_RX_RING_FLAGS_MAC80211_HDR; 586 flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD; 587 flags |= HTT_RX_RING_FLAGS_PPDU_START; 588 flags |= HTT_RX_RING_FLAGS_PPDU_END; 589 flags |= HTT_RX_RING_FLAGS_MPDU_START; 590 flags |= HTT_RX_RING_FLAGS_MPDU_END; 591 flags |= HTT_RX_RING_FLAGS_MSDU_START; 592 flags |= HTT_RX_RING_FLAGS_MSDU_END; 593 flags |= HTT_RX_RING_FLAGS_RX_ATTENTION; 594 flags |= HTT_RX_RING_FLAGS_FRAG_INFO; 595 flags |= HTT_RX_RING_FLAGS_UNICAST_RX; 596 flags |= HTT_RX_RING_FLAGS_MULTICAST_RX; 597 flags |= HTT_RX_RING_FLAGS_CTRL_RX; 598 flags |= HTT_RX_RING_FLAGS_MGMT_RX; 599 flags |= HTT_RX_RING_FLAGS_NULL_RX; 600 flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX; 601 602 fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); 603 604 ring->fw_idx_shadow_reg_paddr = 605 __cpu_to_le32(htt->rx_ring.alloc_idx.paddr); 606 ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr); 607 ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size); 608 ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE); 609 ring->flags = __cpu_to_le16(flags); 610 ring->fw_idx_init_val = __cpu_to_le16(fw_idx); 611 612 #define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4) 613 614 ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status)); 615 ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload)); 616 ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start)); 617 ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end)); 618 ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start)); 619 ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end)); 620 ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start)); 621 ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end)); 622 ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention)); 623 ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info)); 624 625 #undef desc_offset 626 627 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); 628 if (ret) { 629 dev_kfree_skb_any(skb); 630 return ret; 631 } 632 633 return 0; 634 } 635 636 int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt, 637 u8 max_subfrms_ampdu, 638 u8 max_subfrms_amsdu) 639 { 640 struct ath10k *ar = htt->ar; 641 struct htt_aggr_conf *aggr_conf; 642 struct sk_buff *skb; 643 struct htt_cmd *cmd; 644 int len; 645 int ret; 646 647 /* Firmware defaults are: amsdu = 3 and ampdu = 64 */ 648 649 if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64) 650 return -EINVAL; 651 652 if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31) 653 return -EINVAL; 654 655 len = sizeof(cmd->hdr); 656 len += sizeof(cmd->aggr_conf); 657 658 skb = ath10k_htc_alloc_skb(ar, len); 659 if (!skb) 660 return -ENOMEM; 661 662 skb_put(skb, len); 663 cmd = (struct htt_cmd *)skb->data; 664 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG; 665 666 aggr_conf = &cmd->aggr_conf; 667 aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu; 668 aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu; 669 670 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d", 671 aggr_conf->max_num_amsdu_subframes, 672 aggr_conf->max_num_ampdu_subframes); 673 674 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); 675 if (ret) { 676 dev_kfree_skb_any(skb); 677 return ret; 678 } 679 680 return 0; 681 } 682 683 int ath10k_htt_tx_fetch_resp(struct ath10k *ar, 684 __le32 token, 685 __le16 fetch_seq_num, 686 struct htt_tx_fetch_record *records, 687 size_t num_records) 688 { 689 struct sk_buff *skb; 690 struct htt_cmd *cmd; 691 const u16 resp_id = 0; 692 int len = 0; 693 int ret; 694 695 /* Response IDs are echo-ed back only for host driver convienence 696 * purposes. They aren't used for anything in the driver yet so use 0. 697 */ 698 699 len += sizeof(cmd->hdr); 700 len += sizeof(cmd->tx_fetch_resp); 701 len += sizeof(cmd->tx_fetch_resp.records[0]) * num_records; 702 703 skb = ath10k_htc_alloc_skb(ar, len); 704 if (!skb) 705 return -ENOMEM; 706 707 skb_put(skb, len); 708 cmd = (struct htt_cmd *)skb->data; 709 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FETCH_RESP; 710 cmd->tx_fetch_resp.resp_id = cpu_to_le16(resp_id); 711 cmd->tx_fetch_resp.fetch_seq_num = fetch_seq_num; 712 cmd->tx_fetch_resp.num_records = cpu_to_le16(num_records); 713 cmd->tx_fetch_resp.token = token; 714 715 memcpy(cmd->tx_fetch_resp.records, records, 716 sizeof(records[0]) * num_records); 717 718 ret = ath10k_htc_send(&ar->htc, ar->htt.eid, skb); 719 if (ret) { 720 ath10k_warn(ar, "failed to submit htc command: %d\n", ret); 721 goto err_free_skb; 722 } 723 724 return 0; 725 726 err_free_skb: 727 dev_kfree_skb_any(skb); 728 729 return ret; 730 } 731 732 static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb) 733 { 734 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 735 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); 736 struct ath10k_vif *arvif = (void *)cb->vif->drv_priv; 737 738 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) 739 return ar->scan.vdev_id; 740 else if (cb->vif) 741 return arvif->vdev_id; 742 else if (ar->monitor_started) 743 return ar->monitor_vdev_id; 744 else 745 return 0; 746 } 747 748 static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth) 749 { 750 struct ieee80211_hdr *hdr = (void *)skb->data; 751 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); 752 753 if (!is_eth && ieee80211_is_mgmt(hdr->frame_control)) 754 return HTT_DATA_TX_EXT_TID_MGMT; 755 else if (cb->flags & ATH10K_SKB_F_QOS) 756 return skb->priority % IEEE80211_QOS_CTL_TID_MASK; 757 else 758 return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; 759 } 760 761 int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) 762 { 763 struct ath10k *ar = htt->ar; 764 struct device *dev = ar->dev; 765 struct sk_buff *txdesc = NULL; 766 struct htt_cmd *cmd; 767 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); 768 u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); 769 int len = 0; 770 int msdu_id = -1; 771 int res; 772 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; 773 774 len += sizeof(cmd->hdr); 775 len += sizeof(cmd->mgmt_tx); 776 777 spin_lock_bh(&htt->tx_lock); 778 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); 779 spin_unlock_bh(&htt->tx_lock); 780 if (res < 0) 781 goto err; 782 783 msdu_id = res; 784 785 if ((ieee80211_is_action(hdr->frame_control) || 786 ieee80211_is_deauth(hdr->frame_control) || 787 ieee80211_is_disassoc(hdr->frame_control)) && 788 ieee80211_has_protected(hdr->frame_control)) { 789 skb_put(msdu, IEEE80211_CCMP_MIC_LEN); 790 } 791 792 txdesc = ath10k_htc_alloc_skb(ar, len); 793 if (!txdesc) { 794 res = -ENOMEM; 795 goto err_free_msdu_id; 796 } 797 798 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, 799 DMA_TO_DEVICE); 800 res = dma_mapping_error(dev, skb_cb->paddr); 801 if (res) { 802 res = -EIO; 803 goto err_free_txdesc; 804 } 805 806 skb_put(txdesc, len); 807 cmd = (struct htt_cmd *)txdesc->data; 808 memset(cmd, 0, len); 809 810 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX; 811 cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); 812 cmd->mgmt_tx.len = __cpu_to_le32(msdu->len); 813 cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id); 814 cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id); 815 memcpy(cmd->mgmt_tx.hdr, msdu->data, 816 min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); 817 818 res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); 819 if (res) 820 goto err_unmap_msdu; 821 822 return 0; 823 824 err_unmap_msdu: 825 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); 826 err_free_txdesc: 827 dev_kfree_skb_any(txdesc); 828 err_free_msdu_id: 829 spin_lock_bh(&htt->tx_lock); 830 ath10k_htt_tx_free_msdu_id(htt, msdu_id); 831 spin_unlock_bh(&htt->tx_lock); 832 err: 833 return res; 834 } 835 836 int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode, 837 struct sk_buff *msdu) 838 { 839 struct ath10k *ar = htt->ar; 840 struct device *dev = ar->dev; 841 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; 842 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu); 843 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); 844 struct ath10k_hif_sg_item sg_items[2]; 845 struct ath10k_htt_txbuf *txbuf; 846 struct htt_data_tx_desc_frag *frags; 847 bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET); 848 u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); 849 u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth); 850 int prefetch_len; 851 int res; 852 u8 flags0 = 0; 853 u16 msdu_id, flags1 = 0; 854 u16 freq = 0; 855 u32 frags_paddr = 0; 856 u32 txbuf_paddr; 857 struct htt_msdu_ext_desc *ext_desc = NULL; 858 859 spin_lock_bh(&htt->tx_lock); 860 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); 861 spin_unlock_bh(&htt->tx_lock); 862 if (res < 0) 863 goto err; 864 865 msdu_id = res; 866 867 prefetch_len = min(htt->prefetch_len, msdu->len); 868 prefetch_len = roundup(prefetch_len, 4); 869 870 txbuf = &htt->txbuf.vaddr[msdu_id]; 871 txbuf_paddr = htt->txbuf.paddr + 872 (sizeof(struct ath10k_htt_txbuf) * msdu_id); 873 874 if ((ieee80211_is_action(hdr->frame_control) || 875 ieee80211_is_deauth(hdr->frame_control) || 876 ieee80211_is_disassoc(hdr->frame_control)) && 877 ieee80211_has_protected(hdr->frame_control)) { 878 skb_put(msdu, IEEE80211_CCMP_MIC_LEN); 879 } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) && 880 txmode == ATH10K_HW_TXRX_RAW && 881 ieee80211_has_protected(hdr->frame_control)) { 882 skb_put(msdu, IEEE80211_CCMP_MIC_LEN); 883 } 884 885 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, 886 DMA_TO_DEVICE); 887 res = dma_mapping_error(dev, skb_cb->paddr); 888 if (res) { 889 res = -EIO; 890 goto err_free_msdu_id; 891 } 892 893 if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) 894 freq = ar->scan.roc_freq; 895 896 switch (txmode) { 897 case ATH10K_HW_TXRX_RAW: 898 case ATH10K_HW_TXRX_NATIVE_WIFI: 899 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; 900 /* pass through */ 901 case ATH10K_HW_TXRX_ETHERNET: 902 if (ar->hw_params.continuous_frag_desc) { 903 memset(&htt->frag_desc.vaddr[msdu_id], 0, 904 sizeof(struct htt_msdu_ext_desc)); 905 frags = (struct htt_data_tx_desc_frag *) 906 &htt->frag_desc.vaddr[msdu_id].frags; 907 ext_desc = &htt->frag_desc.vaddr[msdu_id]; 908 frags[0].tword_addr.paddr_lo = 909 __cpu_to_le32(skb_cb->paddr); 910 frags[0].tword_addr.paddr_hi = 0; 911 frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len); 912 913 frags_paddr = htt->frag_desc.paddr + 914 (sizeof(struct htt_msdu_ext_desc) * msdu_id); 915 } else { 916 frags = txbuf->frags; 917 frags[0].dword_addr.paddr = 918 __cpu_to_le32(skb_cb->paddr); 919 frags[0].dword_addr.len = __cpu_to_le32(msdu->len); 920 frags[1].dword_addr.paddr = 0; 921 frags[1].dword_addr.len = 0; 922 923 frags_paddr = txbuf_paddr; 924 } 925 flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); 926 break; 927 case ATH10K_HW_TXRX_MGMT: 928 flags0 |= SM(ATH10K_HW_TXRX_MGMT, 929 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); 930 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; 931 932 frags_paddr = skb_cb->paddr; 933 break; 934 } 935 936 /* Normally all commands go through HTC which manages tx credits for 937 * each endpoint and notifies when tx is completed. 938 * 939 * HTT endpoint is creditless so there's no need to care about HTC 940 * flags. In that case it is trivial to fill the HTC header here. 941 * 942 * MSDU transmission is considered completed upon HTT event. This 943 * implies no relevant resources can be freed until after the event is 944 * received. That's why HTC tx completion handler itself is ignored by 945 * setting NULL to transfer_context for all sg items. 946 * 947 * There is simply no point in pushing HTT TX_FRM through HTC tx path 948 * as it's a waste of resources. By bypassing HTC it is possible to 949 * avoid extra memory allocations, compress data structures and thus 950 * improve performance. */ 951 952 txbuf->htc_hdr.eid = htt->eid; 953 txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) + 954 sizeof(txbuf->cmd_tx) + 955 prefetch_len); 956 txbuf->htc_hdr.flags = 0; 957 958 if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) 959 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; 960 961 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); 962 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); 963 if (msdu->ip_summed == CHECKSUM_PARTIAL && 964 !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { 965 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; 966 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; 967 if (ar->hw_params.continuous_frag_desc) 968 ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE; 969 } 970 971 /* Prevent firmware from sending up tx inspection requests. There's 972 * nothing ath10k can do with frames requested for inspection so force 973 * it to simply rely a regular tx completion with discard status. 974 */ 975 flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED; 976 977 txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; 978 txbuf->cmd_tx.flags0 = flags0; 979 txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1); 980 txbuf->cmd_tx.len = __cpu_to_le16(msdu->len); 981 txbuf->cmd_tx.id = __cpu_to_le16(msdu_id); 982 txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr); 983 if (ath10k_mac_tx_frm_has_freq(ar)) { 984 txbuf->cmd_tx.offchan_tx.peerid = 985 __cpu_to_le16(HTT_INVALID_PEERID); 986 txbuf->cmd_tx.offchan_tx.freq = 987 __cpu_to_le16(freq); 988 } else { 989 txbuf->cmd_tx.peerid = 990 __cpu_to_le32(HTT_INVALID_PEERID); 991 } 992 993 trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid); 994 ath10k_dbg(ar, ATH10K_DBG_HTT, 995 "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n", 996 flags0, flags1, msdu->len, msdu_id, frags_paddr, 997 (u32)skb_cb->paddr, vdev_id, tid, freq); 998 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ", 999 msdu->data, msdu->len); 1000 trace_ath10k_tx_hdr(ar, msdu->data, msdu->len); 1001 trace_ath10k_tx_payload(ar, msdu->data, msdu->len); 1002 1003 sg_items[0].transfer_id = 0; 1004 sg_items[0].transfer_context = NULL; 1005 sg_items[0].vaddr = &txbuf->htc_hdr; 1006 sg_items[0].paddr = txbuf_paddr + 1007 sizeof(txbuf->frags); 1008 sg_items[0].len = sizeof(txbuf->htc_hdr) + 1009 sizeof(txbuf->cmd_hdr) + 1010 sizeof(txbuf->cmd_tx); 1011 1012 sg_items[1].transfer_id = 0; 1013 sg_items[1].transfer_context = NULL; 1014 sg_items[1].vaddr = msdu->data; 1015 sg_items[1].paddr = skb_cb->paddr; 1016 sg_items[1].len = prefetch_len; 1017 1018 res = ath10k_hif_tx_sg(htt->ar, 1019 htt->ar->htc.endpoint[htt->eid].ul_pipe_id, 1020 sg_items, ARRAY_SIZE(sg_items)); 1021 if (res) 1022 goto err_unmap_msdu; 1023 1024 return 0; 1025 1026 err_unmap_msdu: 1027 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); 1028 err_free_msdu_id: 1029 ath10k_htt_tx_free_msdu_id(htt, msdu_id); 1030 err: 1031 return res; 1032 } 1033