1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include "core.h" 19 #include "htc.h" 20 #include "htt.h" 21 #include "txrx.h" 22 #include "debug.h" 23 #include "trace.h" 24 #include "mac.h" 25 26 #include <linux/log2.h> 27 28 #define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX 29 #define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1) 30 31 /* when under memory pressure rx ring refill may fail and needs a retry */ 32 #define HTT_RX_RING_REFILL_RETRY_MS 50 33 34 #define HTT_RX_RING_REFILL_RESCHED_MS 5 35 36 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb); 37 38 static struct sk_buff * 39 ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr) 40 { 41 struct ath10k_skb_rxcb *rxcb; 42 43 hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr) 44 if (rxcb->paddr == paddr) 45 return ATH10K_RXCB_SKB(rxcb); 46 47 WARN_ON_ONCE(1); 48 return NULL; 49 } 50 51 static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt) 52 { 53 struct sk_buff *skb; 54 struct ath10k_skb_rxcb *rxcb; 55 struct hlist_node *n; 56 int i; 57 58 if (htt->rx_ring.in_ord_rx) { 59 hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) { 60 skb = ATH10K_RXCB_SKB(rxcb); 61 dma_unmap_single(htt->ar->dev, rxcb->paddr, 62 skb->len + skb_tailroom(skb), 63 DMA_FROM_DEVICE); 64 hash_del(&rxcb->hlist); 65 dev_kfree_skb_any(skb); 66 } 67 } else { 68 for (i = 0; i < htt->rx_ring.size; i++) { 69 skb = htt->rx_ring.netbufs_ring[i]; 70 if (!skb) 71 continue; 72 73 rxcb = ATH10K_SKB_RXCB(skb); 74 dma_unmap_single(htt->ar->dev, rxcb->paddr, 75 skb->len + skb_tailroom(skb), 76 DMA_FROM_DEVICE); 77 dev_kfree_skb_any(skb); 78 } 79 } 80 81 htt->rx_ring.fill_cnt = 0; 82 hash_init(htt->rx_ring.skb_table); 83 memset(htt->rx_ring.netbufs_ring, 0, 84 htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0])); 85 } 86 87 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) 88 { 89 struct htt_rx_desc *rx_desc; 90 struct ath10k_skb_rxcb *rxcb; 91 struct sk_buff *skb; 92 dma_addr_t paddr; 93 int ret = 0, idx; 94 95 /* The Full Rx Reorder firmware has no way of telling the host 96 * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring. 97 * To keep things simple make sure ring is always half empty. This 98 * guarantees there'll be no replenishment overruns possible. 99 */ 100 BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2); 101 102 idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); 103 while (num > 0) { 104 skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN); 105 if (!skb) { 106 ret = -ENOMEM; 107 goto fail; 108 } 109 110 if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN)) 111 skb_pull(skb, 112 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) - 113 skb->data); 114 115 /* Clear rx_desc attention word before posting to Rx ring */ 116 rx_desc = (struct htt_rx_desc *)skb->data; 117 rx_desc->attention.flags = __cpu_to_le32(0); 118 119 paddr = dma_map_single(htt->ar->dev, skb->data, 120 skb->len + skb_tailroom(skb), 121 DMA_FROM_DEVICE); 122 123 if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) { 124 dev_kfree_skb_any(skb); 125 ret = -ENOMEM; 126 goto fail; 127 } 128 129 rxcb = ATH10K_SKB_RXCB(skb); 130 rxcb->paddr = paddr; 131 htt->rx_ring.netbufs_ring[idx] = skb; 132 htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr); 133 htt->rx_ring.fill_cnt++; 134 135 if (htt->rx_ring.in_ord_rx) { 136 hash_add(htt->rx_ring.skb_table, 137 &ATH10K_SKB_RXCB(skb)->hlist, 138 (u32)paddr); 139 } 140 141 num--; 142 idx++; 143 idx &= htt->rx_ring.size_mask; 144 } 145 146 fail: 147 /* 148 * Make sure the rx buffer is updated before available buffer 149 * index to avoid any potential rx ring corruption. 150 */ 151 mb(); 152 *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx); 153 return ret; 154 } 155 156 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) 157 { 158 lockdep_assert_held(&htt->rx_ring.lock); 159 return __ath10k_htt_rx_ring_fill_n(htt, num); 160 } 161 162 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt) 163 { 164 int ret, num_deficit, num_to_fill; 165 166 /* Refilling the whole RX ring buffer proves to be a bad idea. The 167 * reason is RX may take up significant amount of CPU cycles and starve 168 * other tasks, e.g. TX on an ethernet device while acting as a bridge 169 * with ath10k wlan interface. This ended up with very poor performance 170 * once CPU the host system was overwhelmed with RX on ath10k. 171 * 172 * By limiting the number of refills the replenishing occurs 173 * progressively. This in turns makes use of the fact tasklets are 174 * processed in FIFO order. This means actual RX processing can starve 175 * out refilling. If there's not enough buffers on RX ring FW will not 176 * report RX until it is refilled with enough buffers. This 177 * automatically balances load wrt to CPU power. 178 * 179 * This probably comes at a cost of lower maximum throughput but 180 * improves the average and stability. 181 */ 182 spin_lock_bh(&htt->rx_ring.lock); 183 num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt; 184 num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit); 185 num_deficit -= num_to_fill; 186 ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill); 187 if (ret == -ENOMEM) { 188 /* 189 * Failed to fill it to the desired level - 190 * we'll start a timer and try again next time. 191 * As long as enough buffers are left in the ring for 192 * another A-MPDU rx, no special recovery is needed. 193 */ 194 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + 195 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS)); 196 } else if (num_deficit > 0) { 197 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + 198 msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS)); 199 } 200 spin_unlock_bh(&htt->rx_ring.lock); 201 } 202 203 static void ath10k_htt_rx_ring_refill_retry(struct timer_list *t) 204 { 205 struct ath10k_htt *htt = from_timer(htt, t, rx_ring.refill_retry_timer); 206 207 ath10k_htt_rx_msdu_buff_replenish(htt); 208 } 209 210 int ath10k_htt_rx_ring_refill(struct ath10k *ar) 211 { 212 struct ath10k_htt *htt = &ar->htt; 213 int ret; 214 215 spin_lock_bh(&htt->rx_ring.lock); 216 ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level - 217 htt->rx_ring.fill_cnt)); 218 spin_unlock_bh(&htt->rx_ring.lock); 219 220 if (ret) 221 ath10k_htt_rx_ring_free(htt); 222 223 return ret; 224 } 225 226 void ath10k_htt_rx_free(struct ath10k_htt *htt) 227 { 228 del_timer_sync(&htt->rx_ring.refill_retry_timer); 229 230 skb_queue_purge(&htt->rx_compl_q); 231 skb_queue_purge(&htt->rx_in_ord_compl_q); 232 skb_queue_purge(&htt->tx_fetch_ind_q); 233 234 ath10k_htt_rx_ring_free(htt); 235 236 dma_free_coherent(htt->ar->dev, 237 (htt->rx_ring.size * 238 sizeof(htt->rx_ring.paddrs_ring)), 239 htt->rx_ring.paddrs_ring, 240 htt->rx_ring.base_paddr); 241 242 dma_free_coherent(htt->ar->dev, 243 sizeof(*htt->rx_ring.alloc_idx.vaddr), 244 htt->rx_ring.alloc_idx.vaddr, 245 htt->rx_ring.alloc_idx.paddr); 246 247 kfree(htt->rx_ring.netbufs_ring); 248 } 249 250 static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt) 251 { 252 struct ath10k *ar = htt->ar; 253 int idx; 254 struct sk_buff *msdu; 255 256 lockdep_assert_held(&htt->rx_ring.lock); 257 258 if (htt->rx_ring.fill_cnt == 0) { 259 ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n"); 260 return NULL; 261 } 262 263 idx = htt->rx_ring.sw_rd_idx.msdu_payld; 264 msdu = htt->rx_ring.netbufs_ring[idx]; 265 htt->rx_ring.netbufs_ring[idx] = NULL; 266 htt->rx_ring.paddrs_ring[idx] = 0; 267 268 idx++; 269 idx &= htt->rx_ring.size_mask; 270 htt->rx_ring.sw_rd_idx.msdu_payld = idx; 271 htt->rx_ring.fill_cnt--; 272 273 dma_unmap_single(htt->ar->dev, 274 ATH10K_SKB_RXCB(msdu)->paddr, 275 msdu->len + skb_tailroom(msdu), 276 DMA_FROM_DEVICE); 277 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ", 278 msdu->data, msdu->len + skb_tailroom(msdu)); 279 280 return msdu; 281 } 282 283 /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */ 284 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, 285 struct sk_buff_head *amsdu) 286 { 287 struct ath10k *ar = htt->ar; 288 int msdu_len, msdu_chaining = 0; 289 struct sk_buff *msdu; 290 struct htt_rx_desc *rx_desc; 291 292 lockdep_assert_held(&htt->rx_ring.lock); 293 294 for (;;) { 295 int last_msdu, msdu_len_invalid, msdu_chained; 296 297 msdu = ath10k_htt_rx_netbuf_pop(htt); 298 if (!msdu) { 299 __skb_queue_purge(amsdu); 300 return -ENOENT; 301 } 302 303 __skb_queue_tail(amsdu, msdu); 304 305 rx_desc = (struct htt_rx_desc *)msdu->data; 306 307 /* FIXME: we must report msdu payload since this is what caller 308 * expects now 309 */ 310 skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload)); 311 skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload)); 312 313 /* 314 * Sanity check - confirm the HW is finished filling in the 315 * rx data. 316 * If the HW and SW are working correctly, then it's guaranteed 317 * that the HW's MAC DMA is done before this point in the SW. 318 * To prevent the case that we handle a stale Rx descriptor, 319 * just assert for now until we have a way to recover. 320 */ 321 if (!(__le32_to_cpu(rx_desc->attention.flags) 322 & RX_ATTENTION_FLAGS_MSDU_DONE)) { 323 __skb_queue_purge(amsdu); 324 return -EIO; 325 } 326 327 msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags) 328 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR | 329 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR)); 330 msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0), 331 RX_MSDU_START_INFO0_MSDU_LENGTH); 332 msdu_chained = rx_desc->frag_info.ring2_more_count; 333 334 if (msdu_len_invalid) 335 msdu_len = 0; 336 337 skb_trim(msdu, 0); 338 skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE)); 339 msdu_len -= msdu->len; 340 341 /* Note: Chained buffers do not contain rx descriptor */ 342 while (msdu_chained--) { 343 msdu = ath10k_htt_rx_netbuf_pop(htt); 344 if (!msdu) { 345 __skb_queue_purge(amsdu); 346 return -ENOENT; 347 } 348 349 __skb_queue_tail(amsdu, msdu); 350 skb_trim(msdu, 0); 351 skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE)); 352 msdu_len -= msdu->len; 353 msdu_chaining = 1; 354 } 355 356 last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) & 357 RX_MSDU_END_INFO0_LAST_MSDU; 358 359 trace_ath10k_htt_rx_desc(ar, &rx_desc->attention, 360 sizeof(*rx_desc) - sizeof(u32)); 361 362 if (last_msdu) 363 break; 364 } 365 366 if (skb_queue_empty(amsdu)) 367 msdu_chaining = -1; 368 369 /* 370 * Don't refill the ring yet. 371 * 372 * First, the elements popped here are still in use - it is not 373 * safe to overwrite them until the matching call to 374 * mpdu_desc_list_next. Second, for efficiency it is preferable to 375 * refill the rx ring with 1 PPDU's worth of rx buffers (something 376 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers 377 * (something like 3 buffers). Consequently, we'll rely on the txrx 378 * SW to tell us when it is done pulling all the PPDU's rx buffers 379 * out of the rx ring, and then refill it just once. 380 */ 381 382 return msdu_chaining; 383 } 384 385 static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt, 386 u32 paddr) 387 { 388 struct ath10k *ar = htt->ar; 389 struct ath10k_skb_rxcb *rxcb; 390 struct sk_buff *msdu; 391 392 lockdep_assert_held(&htt->rx_ring.lock); 393 394 msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr); 395 if (!msdu) 396 return NULL; 397 398 rxcb = ATH10K_SKB_RXCB(msdu); 399 hash_del(&rxcb->hlist); 400 htt->rx_ring.fill_cnt--; 401 402 dma_unmap_single(htt->ar->dev, rxcb->paddr, 403 msdu->len + skb_tailroom(msdu), 404 DMA_FROM_DEVICE); 405 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ", 406 msdu->data, msdu->len + skb_tailroom(msdu)); 407 408 return msdu; 409 } 410 411 static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt, 412 struct htt_rx_in_ord_ind *ev, 413 struct sk_buff_head *list) 414 { 415 struct ath10k *ar = htt->ar; 416 struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs; 417 struct htt_rx_desc *rxd; 418 struct sk_buff *msdu; 419 int msdu_count; 420 bool is_offload; 421 u32 paddr; 422 423 lockdep_assert_held(&htt->rx_ring.lock); 424 425 msdu_count = __le16_to_cpu(ev->msdu_count); 426 is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK); 427 428 while (msdu_count--) { 429 paddr = __le32_to_cpu(msdu_desc->msdu_paddr); 430 431 msdu = ath10k_htt_rx_pop_paddr(htt, paddr); 432 if (!msdu) { 433 __skb_queue_purge(list); 434 return -ENOENT; 435 } 436 437 __skb_queue_tail(list, msdu); 438 439 if (!is_offload) { 440 rxd = (void *)msdu->data; 441 442 trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd)); 443 444 skb_put(msdu, sizeof(*rxd)); 445 skb_pull(msdu, sizeof(*rxd)); 446 skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len)); 447 448 if (!(__le32_to_cpu(rxd->attention.flags) & 449 RX_ATTENTION_FLAGS_MSDU_DONE)) { 450 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n"); 451 return -EIO; 452 } 453 } 454 455 msdu_desc++; 456 } 457 458 return 0; 459 } 460 461 int ath10k_htt_rx_alloc(struct ath10k_htt *htt) 462 { 463 struct ath10k *ar = htt->ar; 464 dma_addr_t paddr; 465 void *vaddr; 466 size_t size; 467 struct timer_list *timer = &htt->rx_ring.refill_retry_timer; 468 469 htt->rx_confused = false; 470 471 /* XXX: The fill level could be changed during runtime in response to 472 * the host processing latency. Is this really worth it? 473 */ 474 htt->rx_ring.size = HTT_RX_RING_SIZE; 475 htt->rx_ring.size_mask = htt->rx_ring.size - 1; 476 htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL; 477 478 if (!is_power_of_2(htt->rx_ring.size)) { 479 ath10k_warn(ar, "htt rx ring size is not power of 2\n"); 480 return -EINVAL; 481 } 482 483 htt->rx_ring.netbufs_ring = 484 kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *), 485 GFP_KERNEL); 486 if (!htt->rx_ring.netbufs_ring) 487 goto err_netbuf; 488 489 size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring); 490 491 vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL); 492 if (!vaddr) 493 goto err_dma_ring; 494 495 htt->rx_ring.paddrs_ring = vaddr; 496 htt->rx_ring.base_paddr = paddr; 497 498 vaddr = dma_alloc_coherent(htt->ar->dev, 499 sizeof(*htt->rx_ring.alloc_idx.vaddr), 500 &paddr, GFP_KERNEL); 501 if (!vaddr) 502 goto err_dma_idx; 503 504 htt->rx_ring.alloc_idx.vaddr = vaddr; 505 htt->rx_ring.alloc_idx.paddr = paddr; 506 htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask; 507 *htt->rx_ring.alloc_idx.vaddr = 0; 508 509 /* Initialize the Rx refill retry timer */ 510 timer_setup(timer, ath10k_htt_rx_ring_refill_retry, 0); 511 512 spin_lock_init(&htt->rx_ring.lock); 513 514 htt->rx_ring.fill_cnt = 0; 515 htt->rx_ring.sw_rd_idx.msdu_payld = 0; 516 hash_init(htt->rx_ring.skb_table); 517 518 skb_queue_head_init(&htt->rx_compl_q); 519 skb_queue_head_init(&htt->rx_in_ord_compl_q); 520 skb_queue_head_init(&htt->tx_fetch_ind_q); 521 atomic_set(&htt->num_mpdus_ready, 0); 522 523 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n", 524 htt->rx_ring.size, htt->rx_ring.fill_level); 525 return 0; 526 527 err_dma_idx: 528 dma_free_coherent(htt->ar->dev, 529 (htt->rx_ring.size * 530 sizeof(htt->rx_ring.paddrs_ring)), 531 htt->rx_ring.paddrs_ring, 532 htt->rx_ring.base_paddr); 533 err_dma_ring: 534 kfree(htt->rx_ring.netbufs_ring); 535 err_netbuf: 536 return -ENOMEM; 537 } 538 539 static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar, 540 enum htt_rx_mpdu_encrypt_type type) 541 { 542 switch (type) { 543 case HTT_RX_MPDU_ENCRYPT_NONE: 544 return 0; 545 case HTT_RX_MPDU_ENCRYPT_WEP40: 546 case HTT_RX_MPDU_ENCRYPT_WEP104: 547 return IEEE80211_WEP_IV_LEN; 548 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: 549 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: 550 return IEEE80211_TKIP_IV_LEN; 551 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: 552 return IEEE80211_CCMP_HDR_LEN; 553 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2: 554 return IEEE80211_CCMP_256_HDR_LEN; 555 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2: 556 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2: 557 return IEEE80211_GCMP_HDR_LEN; 558 case HTT_RX_MPDU_ENCRYPT_WEP128: 559 case HTT_RX_MPDU_ENCRYPT_WAPI: 560 break; 561 } 562 563 ath10k_warn(ar, "unsupported encryption type %d\n", type); 564 return 0; 565 } 566 567 #define MICHAEL_MIC_LEN 8 568 569 static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar, 570 enum htt_rx_mpdu_encrypt_type type) 571 { 572 switch (type) { 573 case HTT_RX_MPDU_ENCRYPT_NONE: 574 return 0; 575 case HTT_RX_MPDU_ENCRYPT_WEP40: 576 case HTT_RX_MPDU_ENCRYPT_WEP104: 577 return IEEE80211_WEP_ICV_LEN; 578 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: 579 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: 580 return IEEE80211_TKIP_ICV_LEN; 581 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: 582 return IEEE80211_CCMP_MIC_LEN; 583 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2: 584 return IEEE80211_CCMP_256_MIC_LEN; 585 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2: 586 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2: 587 return IEEE80211_GCMP_MIC_LEN; 588 case HTT_RX_MPDU_ENCRYPT_WEP128: 589 case HTT_RX_MPDU_ENCRYPT_WAPI: 590 break; 591 } 592 593 ath10k_warn(ar, "unsupported encryption type %d\n", type); 594 return 0; 595 } 596 597 struct amsdu_subframe_hdr { 598 u8 dst[ETH_ALEN]; 599 u8 src[ETH_ALEN]; 600 __be16 len; 601 } __packed; 602 603 #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63) 604 605 static void ath10k_htt_rx_h_rates(struct ath10k *ar, 606 struct ieee80211_rx_status *status, 607 struct htt_rx_desc *rxd) 608 { 609 struct ieee80211_supported_band *sband; 610 u8 cck, rate, bw, sgi, mcs, nss; 611 u8 preamble = 0; 612 u8 group_id; 613 u32 info1, info2, info3; 614 615 info1 = __le32_to_cpu(rxd->ppdu_start.info1); 616 info2 = __le32_to_cpu(rxd->ppdu_start.info2); 617 info3 = __le32_to_cpu(rxd->ppdu_start.info3); 618 619 preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE); 620 621 switch (preamble) { 622 case HTT_RX_LEGACY: 623 /* To get legacy rate index band is required. Since band can't 624 * be undefined check if freq is non-zero. 625 */ 626 if (!status->freq) 627 return; 628 629 cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT; 630 rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE); 631 rate &= ~RX_PPDU_START_RATE_FLAG; 632 633 sband = &ar->mac.sbands[status->band]; 634 status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck); 635 break; 636 case HTT_RX_HT: 637 case HTT_RX_HT_WITH_TXBF: 638 /* HT-SIG - Table 20-11 in info2 and info3 */ 639 mcs = info2 & 0x1F; 640 nss = mcs >> 3; 641 bw = (info2 >> 7) & 1; 642 sgi = (info3 >> 7) & 1; 643 644 status->rate_idx = mcs; 645 status->encoding = RX_ENC_HT; 646 if (sgi) 647 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 648 if (bw) 649 status->bw = RATE_INFO_BW_40; 650 break; 651 case HTT_RX_VHT: 652 case HTT_RX_VHT_WITH_TXBF: 653 /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3 654 * TODO check this 655 */ 656 bw = info2 & 3; 657 sgi = info3 & 1; 658 group_id = (info2 >> 4) & 0x3F; 659 660 if (GROUP_ID_IS_SU_MIMO(group_id)) { 661 mcs = (info3 >> 4) & 0x0F; 662 nss = ((info2 >> 10) & 0x07) + 1; 663 } else { 664 /* Hardware doesn't decode VHT-SIG-B into Rx descriptor 665 * so it's impossible to decode MCS. Also since 666 * firmware consumes Group Id Management frames host 667 * has no knowledge regarding group/user position 668 * mapping so it's impossible to pick the correct Nsts 669 * from VHT-SIG-A1. 670 * 671 * Bandwidth and SGI are valid so report the rateinfo 672 * on best-effort basis. 673 */ 674 mcs = 0; 675 nss = 1; 676 } 677 678 if (mcs > 0x09) { 679 ath10k_warn(ar, "invalid MCS received %u\n", mcs); 680 ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n", 681 __le32_to_cpu(rxd->attention.flags), 682 __le32_to_cpu(rxd->mpdu_start.info0), 683 __le32_to_cpu(rxd->mpdu_start.info1), 684 __le32_to_cpu(rxd->msdu_start.common.info0), 685 __le32_to_cpu(rxd->msdu_start.common.info1), 686 rxd->ppdu_start.info0, 687 __le32_to_cpu(rxd->ppdu_start.info1), 688 __le32_to_cpu(rxd->ppdu_start.info2), 689 __le32_to_cpu(rxd->ppdu_start.info3), 690 __le32_to_cpu(rxd->ppdu_start.info4)); 691 692 ath10k_warn(ar, "msdu end %08x mpdu end %08x\n", 693 __le32_to_cpu(rxd->msdu_end.common.info0), 694 __le32_to_cpu(rxd->mpdu_end.info0)); 695 696 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, 697 "rx desc msdu payload: ", 698 rxd->msdu_payload, 50); 699 } 700 701 status->rate_idx = mcs; 702 status->nss = nss; 703 704 if (sgi) 705 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 706 707 switch (bw) { 708 /* 20MHZ */ 709 case 0: 710 break; 711 /* 40MHZ */ 712 case 1: 713 status->bw = RATE_INFO_BW_40; 714 break; 715 /* 80MHZ */ 716 case 2: 717 status->bw = RATE_INFO_BW_80; 718 break; 719 case 3: 720 status->bw = RATE_INFO_BW_160; 721 break; 722 } 723 724 status->encoding = RX_ENC_VHT; 725 break; 726 default: 727 break; 728 } 729 } 730 731 static struct ieee80211_channel * 732 ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd) 733 { 734 struct ath10k_peer *peer; 735 struct ath10k_vif *arvif; 736 struct cfg80211_chan_def def; 737 u16 peer_id; 738 739 lockdep_assert_held(&ar->data_lock); 740 741 if (!rxd) 742 return NULL; 743 744 if (rxd->attention.flags & 745 __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID)) 746 return NULL; 747 748 if (!(rxd->msdu_end.common.info0 & 749 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU))) 750 return NULL; 751 752 peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0), 753 RX_MPDU_START_INFO0_PEER_IDX); 754 755 peer = ath10k_peer_find_by_id(ar, peer_id); 756 if (!peer) 757 return NULL; 758 759 arvif = ath10k_get_arvif(ar, peer->vdev_id); 760 if (WARN_ON_ONCE(!arvif)) 761 return NULL; 762 763 if (ath10k_mac_vif_chan(arvif->vif, &def)) 764 return NULL; 765 766 return def.chan; 767 } 768 769 static struct ieee80211_channel * 770 ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id) 771 { 772 struct ath10k_vif *arvif; 773 struct cfg80211_chan_def def; 774 775 lockdep_assert_held(&ar->data_lock); 776 777 list_for_each_entry(arvif, &ar->arvifs, list) { 778 if (arvif->vdev_id == vdev_id && 779 ath10k_mac_vif_chan(arvif->vif, &def) == 0) 780 return def.chan; 781 } 782 783 return NULL; 784 } 785 786 static void 787 ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw, 788 struct ieee80211_chanctx_conf *conf, 789 void *data) 790 { 791 struct cfg80211_chan_def *def = data; 792 793 *def = conf->def; 794 } 795 796 static struct ieee80211_channel * 797 ath10k_htt_rx_h_any_channel(struct ath10k *ar) 798 { 799 struct cfg80211_chan_def def = {}; 800 801 ieee80211_iter_chan_contexts_atomic(ar->hw, 802 ath10k_htt_rx_h_any_chan_iter, 803 &def); 804 805 return def.chan; 806 } 807 808 static bool ath10k_htt_rx_h_channel(struct ath10k *ar, 809 struct ieee80211_rx_status *status, 810 struct htt_rx_desc *rxd, 811 u32 vdev_id) 812 { 813 struct ieee80211_channel *ch; 814 815 spin_lock_bh(&ar->data_lock); 816 ch = ar->scan_channel; 817 if (!ch) 818 ch = ar->rx_channel; 819 if (!ch) 820 ch = ath10k_htt_rx_h_peer_channel(ar, rxd); 821 if (!ch) 822 ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id); 823 if (!ch) 824 ch = ath10k_htt_rx_h_any_channel(ar); 825 if (!ch) 826 ch = ar->tgt_oper_chan; 827 spin_unlock_bh(&ar->data_lock); 828 829 if (!ch) 830 return false; 831 832 status->band = ch->band; 833 status->freq = ch->center_freq; 834 835 return true; 836 } 837 838 static void ath10k_htt_rx_h_signal(struct ath10k *ar, 839 struct ieee80211_rx_status *status, 840 struct htt_rx_desc *rxd) 841 { 842 int i; 843 844 for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) { 845 status->chains &= ~BIT(i); 846 847 if (rxd->ppdu_start.rssi_chains[i].pri20_mhz != 0x80) { 848 status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR + 849 rxd->ppdu_start.rssi_chains[i].pri20_mhz; 850 851 status->chains |= BIT(i); 852 } 853 } 854 855 /* FIXME: Get real NF */ 856 status->signal = ATH10K_DEFAULT_NOISE_FLOOR + 857 rxd->ppdu_start.rssi_comb; 858 status->flag &= ~RX_FLAG_NO_SIGNAL_VAL; 859 } 860 861 static void ath10k_htt_rx_h_mactime(struct ath10k *ar, 862 struct ieee80211_rx_status *status, 863 struct htt_rx_desc *rxd) 864 { 865 /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This 866 * means all prior MSDUs in a PPDU are reported to mac80211 without the 867 * TSF. Is it worth holding frames until end of PPDU is known? 868 * 869 * FIXME: Can we get/compute 64bit TSF? 870 */ 871 status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp); 872 status->flag |= RX_FLAG_MACTIME_END; 873 } 874 875 static void ath10k_htt_rx_h_ppdu(struct ath10k *ar, 876 struct sk_buff_head *amsdu, 877 struct ieee80211_rx_status *status, 878 u32 vdev_id) 879 { 880 struct sk_buff *first; 881 struct htt_rx_desc *rxd; 882 bool is_first_ppdu; 883 bool is_last_ppdu; 884 885 if (skb_queue_empty(amsdu)) 886 return; 887 888 first = skb_peek(amsdu); 889 rxd = (void *)first->data - sizeof(*rxd); 890 891 is_first_ppdu = !!(rxd->attention.flags & 892 __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU)); 893 is_last_ppdu = !!(rxd->attention.flags & 894 __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU)); 895 896 if (is_first_ppdu) { 897 /* New PPDU starts so clear out the old per-PPDU status. */ 898 status->freq = 0; 899 status->rate_idx = 0; 900 status->nss = 0; 901 status->encoding = RX_ENC_LEGACY; 902 status->bw = RATE_INFO_BW_20; 903 904 status->flag &= ~RX_FLAG_MACTIME_END; 905 status->flag |= RX_FLAG_NO_SIGNAL_VAL; 906 907 status->flag &= ~(RX_FLAG_AMPDU_IS_LAST); 908 status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN; 909 status->ampdu_reference = ar->ampdu_reference; 910 911 ath10k_htt_rx_h_signal(ar, status, rxd); 912 ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id); 913 ath10k_htt_rx_h_rates(ar, status, rxd); 914 } 915 916 if (is_last_ppdu) { 917 ath10k_htt_rx_h_mactime(ar, status, rxd); 918 919 /* set ampdu last segment flag */ 920 status->flag |= RX_FLAG_AMPDU_IS_LAST; 921 ar->ampdu_reference++; 922 } 923 } 924 925 static const char * const tid_to_ac[] = { 926 "BE", 927 "BK", 928 "BK", 929 "BE", 930 "VI", 931 "VI", 932 "VO", 933 "VO", 934 }; 935 936 static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size) 937 { 938 u8 *qc; 939 int tid; 940 941 if (!ieee80211_is_data_qos(hdr->frame_control)) 942 return ""; 943 944 qc = ieee80211_get_qos_ctl(hdr); 945 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 946 if (tid < 8) 947 snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]); 948 else 949 snprintf(out, size, "tid %d", tid); 950 951 return out; 952 } 953 954 static void ath10k_process_rx(struct ath10k *ar, 955 struct ieee80211_rx_status *rx_status, 956 struct sk_buff *skb) 957 { 958 struct ieee80211_rx_status *status; 959 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 960 char tid[32]; 961 962 status = IEEE80211_SKB_RXCB(skb); 963 *status = *rx_status; 964 965 ath10k_dbg(ar, ATH10K_DBG_DATA, 966 "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", 967 skb, 968 skb->len, 969 ieee80211_get_SA(hdr), 970 ath10k_get_tid(hdr, tid, sizeof(tid)), 971 is_multicast_ether_addr(ieee80211_get_DA(hdr)) ? 972 "mcast" : "ucast", 973 (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4, 974 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", 975 (status->encoding == RX_ENC_HT) ? "ht" : "", 976 (status->encoding == RX_ENC_VHT) ? "vht" : "", 977 (status->bw == RATE_INFO_BW_40) ? "40" : "", 978 (status->bw == RATE_INFO_BW_80) ? "80" : "", 979 (status->bw == RATE_INFO_BW_160) ? "160" : "", 980 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", 981 status->rate_idx, 982 status->nss, 983 status->freq, 984 status->band, status->flag, 985 !!(status->flag & RX_FLAG_FAILED_FCS_CRC), 986 !!(status->flag & RX_FLAG_MMIC_ERROR), 987 !!(status->flag & RX_FLAG_AMSDU_MORE)); 988 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ", 989 skb->data, skb->len); 990 trace_ath10k_rx_hdr(ar, skb->data, skb->len); 991 trace_ath10k_rx_payload(ar, skb->data, skb->len); 992 993 ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi); 994 } 995 996 static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar, 997 struct ieee80211_hdr *hdr) 998 { 999 int len = ieee80211_hdrlen(hdr->frame_control); 1000 1001 if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING, 1002 ar->running_fw->fw_file.fw_features)) 1003 len = round_up(len, 4); 1004 1005 return len; 1006 } 1007 1008 static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar, 1009 struct sk_buff *msdu, 1010 struct ieee80211_rx_status *status, 1011 enum htt_rx_mpdu_encrypt_type enctype, 1012 bool is_decrypted) 1013 { 1014 struct ieee80211_hdr *hdr; 1015 struct htt_rx_desc *rxd; 1016 size_t hdr_len; 1017 size_t crypto_len; 1018 bool is_first; 1019 bool is_last; 1020 1021 rxd = (void *)msdu->data - sizeof(*rxd); 1022 is_first = !!(rxd->msdu_end.common.info0 & 1023 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); 1024 is_last = !!(rxd->msdu_end.common.info0 & 1025 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); 1026 1027 /* Delivered decapped frame: 1028 * [802.11 header] 1029 * [crypto param] <-- can be trimmed if !fcs_err && 1030 * !decrypt_err && !peer_idx_invalid 1031 * [amsdu header] <-- only if A-MSDU 1032 * [rfc1042/llc] 1033 * [payload] 1034 * [FCS] <-- at end, needs to be trimmed 1035 */ 1036 1037 /* This probably shouldn't happen but warn just in case */ 1038 if (unlikely(WARN_ON_ONCE(!is_first))) 1039 return; 1040 1041 /* This probably shouldn't happen but warn just in case */ 1042 if (unlikely(WARN_ON_ONCE(!(is_first && is_last)))) 1043 return; 1044 1045 skb_trim(msdu, msdu->len - FCS_LEN); 1046 1047 /* In most cases this will be true for sniffed frames. It makes sense 1048 * to deliver them as-is without stripping the crypto param. This is 1049 * necessary for software based decryption. 1050 * 1051 * If there's no error then the frame is decrypted. At least that is 1052 * the case for frames that come in via fragmented rx indication. 1053 */ 1054 if (!is_decrypted) 1055 return; 1056 1057 /* The payload is decrypted so strip crypto params. Start from tail 1058 * since hdr is used to compute some stuff. 1059 */ 1060 1061 hdr = (void *)msdu->data; 1062 1063 /* Tail */ 1064 if (status->flag & RX_FLAG_IV_STRIPPED) { 1065 skb_trim(msdu, msdu->len - 1066 ath10k_htt_rx_crypto_tail_len(ar, enctype)); 1067 } else { 1068 /* MIC */ 1069 if ((status->flag & RX_FLAG_MIC_STRIPPED) && 1070 enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2) 1071 skb_trim(msdu, msdu->len - 8); 1072 1073 /* ICV */ 1074 if (status->flag & RX_FLAG_ICV_STRIPPED && 1075 enctype != HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2) 1076 skb_trim(msdu, msdu->len - 1077 ath10k_htt_rx_crypto_tail_len(ar, enctype)); 1078 } 1079 1080 /* MMIC */ 1081 if ((status->flag & RX_FLAG_MMIC_STRIPPED) && 1082 !ieee80211_has_morefrags(hdr->frame_control) && 1083 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) 1084 skb_trim(msdu, msdu->len - 8); 1085 1086 /* Head */ 1087 if (status->flag & RX_FLAG_IV_STRIPPED) { 1088 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1089 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); 1090 1091 memmove((void *)msdu->data + crypto_len, 1092 (void *)msdu->data, hdr_len); 1093 skb_pull(msdu, crypto_len); 1094 } 1095 } 1096 1097 static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar, 1098 struct sk_buff *msdu, 1099 struct ieee80211_rx_status *status, 1100 const u8 first_hdr[64], 1101 enum htt_rx_mpdu_encrypt_type enctype) 1102 { 1103 struct ieee80211_hdr *hdr; 1104 struct htt_rx_desc *rxd; 1105 size_t hdr_len; 1106 u8 da[ETH_ALEN]; 1107 u8 sa[ETH_ALEN]; 1108 int l3_pad_bytes; 1109 int bytes_aligned = ar->hw_params.decap_align_bytes; 1110 1111 /* Delivered decapped frame: 1112 * [nwifi 802.11 header] <-- replaced with 802.11 hdr 1113 * [rfc1042/llc] 1114 * 1115 * Note: The nwifi header doesn't have QoS Control and is 1116 * (always?) a 3addr frame. 1117 * 1118 * Note2: There's no A-MSDU subframe header. Even if it's part 1119 * of an A-MSDU. 1120 */ 1121 1122 /* pull decapped header and copy SA & DA */ 1123 rxd = (void *)msdu->data - sizeof(*rxd); 1124 1125 l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd); 1126 skb_put(msdu, l3_pad_bytes); 1127 1128 hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes); 1129 1130 hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr); 1131 ether_addr_copy(da, ieee80211_get_DA(hdr)); 1132 ether_addr_copy(sa, ieee80211_get_SA(hdr)); 1133 skb_pull(msdu, hdr_len); 1134 1135 /* push original 802.11 header */ 1136 hdr = (struct ieee80211_hdr *)first_hdr; 1137 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1138 1139 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 1140 memcpy(skb_push(msdu, 1141 ath10k_htt_rx_crypto_param_len(ar, enctype)), 1142 (void *)hdr + round_up(hdr_len, bytes_aligned), 1143 ath10k_htt_rx_crypto_param_len(ar, enctype)); 1144 } 1145 1146 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1147 1148 /* original 802.11 header has a different DA and in 1149 * case of 4addr it may also have different SA 1150 */ 1151 hdr = (struct ieee80211_hdr *)msdu->data; 1152 ether_addr_copy(ieee80211_get_DA(hdr), da); 1153 ether_addr_copy(ieee80211_get_SA(hdr), sa); 1154 } 1155 1156 static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar, 1157 struct sk_buff *msdu, 1158 enum htt_rx_mpdu_encrypt_type enctype) 1159 { 1160 struct ieee80211_hdr *hdr; 1161 struct htt_rx_desc *rxd; 1162 size_t hdr_len, crypto_len; 1163 void *rfc1042; 1164 bool is_first, is_last, is_amsdu; 1165 int bytes_aligned = ar->hw_params.decap_align_bytes; 1166 1167 rxd = (void *)msdu->data - sizeof(*rxd); 1168 hdr = (void *)rxd->rx_hdr_status; 1169 1170 is_first = !!(rxd->msdu_end.common.info0 & 1171 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); 1172 is_last = !!(rxd->msdu_end.common.info0 & 1173 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); 1174 is_amsdu = !(is_first && is_last); 1175 1176 rfc1042 = hdr; 1177 1178 if (is_first) { 1179 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1180 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); 1181 1182 rfc1042 += round_up(hdr_len, bytes_aligned) + 1183 round_up(crypto_len, bytes_aligned); 1184 } 1185 1186 if (is_amsdu) 1187 rfc1042 += sizeof(struct amsdu_subframe_hdr); 1188 1189 return rfc1042; 1190 } 1191 1192 static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar, 1193 struct sk_buff *msdu, 1194 struct ieee80211_rx_status *status, 1195 const u8 first_hdr[64], 1196 enum htt_rx_mpdu_encrypt_type enctype) 1197 { 1198 struct ieee80211_hdr *hdr; 1199 struct ethhdr *eth; 1200 size_t hdr_len; 1201 void *rfc1042; 1202 u8 da[ETH_ALEN]; 1203 u8 sa[ETH_ALEN]; 1204 int l3_pad_bytes; 1205 struct htt_rx_desc *rxd; 1206 int bytes_aligned = ar->hw_params.decap_align_bytes; 1207 1208 /* Delivered decapped frame: 1209 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc 1210 * [payload] 1211 */ 1212 1213 rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype); 1214 if (WARN_ON_ONCE(!rfc1042)) 1215 return; 1216 1217 rxd = (void *)msdu->data - sizeof(*rxd); 1218 l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd); 1219 skb_put(msdu, l3_pad_bytes); 1220 skb_pull(msdu, l3_pad_bytes); 1221 1222 /* pull decapped header and copy SA & DA */ 1223 eth = (struct ethhdr *)msdu->data; 1224 ether_addr_copy(da, eth->h_dest); 1225 ether_addr_copy(sa, eth->h_source); 1226 skb_pull(msdu, sizeof(struct ethhdr)); 1227 1228 /* push rfc1042/llc/snap */ 1229 memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042, 1230 sizeof(struct rfc1042_hdr)); 1231 1232 /* push original 802.11 header */ 1233 hdr = (struct ieee80211_hdr *)first_hdr; 1234 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1235 1236 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 1237 memcpy(skb_push(msdu, 1238 ath10k_htt_rx_crypto_param_len(ar, enctype)), 1239 (void *)hdr + round_up(hdr_len, bytes_aligned), 1240 ath10k_htt_rx_crypto_param_len(ar, enctype)); 1241 } 1242 1243 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1244 1245 /* original 802.11 header has a different DA and in 1246 * case of 4addr it may also have different SA 1247 */ 1248 hdr = (struct ieee80211_hdr *)msdu->data; 1249 ether_addr_copy(ieee80211_get_DA(hdr), da); 1250 ether_addr_copy(ieee80211_get_SA(hdr), sa); 1251 } 1252 1253 static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar, 1254 struct sk_buff *msdu, 1255 struct ieee80211_rx_status *status, 1256 const u8 first_hdr[64], 1257 enum htt_rx_mpdu_encrypt_type enctype) 1258 { 1259 struct ieee80211_hdr *hdr; 1260 size_t hdr_len; 1261 int l3_pad_bytes; 1262 struct htt_rx_desc *rxd; 1263 int bytes_aligned = ar->hw_params.decap_align_bytes; 1264 1265 /* Delivered decapped frame: 1266 * [amsdu header] <-- replaced with 802.11 hdr 1267 * [rfc1042/llc] 1268 * [payload] 1269 */ 1270 1271 rxd = (void *)msdu->data - sizeof(*rxd); 1272 l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd); 1273 1274 skb_put(msdu, l3_pad_bytes); 1275 skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes); 1276 1277 hdr = (struct ieee80211_hdr *)first_hdr; 1278 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1279 1280 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 1281 memcpy(skb_push(msdu, 1282 ath10k_htt_rx_crypto_param_len(ar, enctype)), 1283 (void *)hdr + round_up(hdr_len, bytes_aligned), 1284 ath10k_htt_rx_crypto_param_len(ar, enctype)); 1285 } 1286 1287 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1288 } 1289 1290 static void ath10k_htt_rx_h_undecap(struct ath10k *ar, 1291 struct sk_buff *msdu, 1292 struct ieee80211_rx_status *status, 1293 u8 first_hdr[64], 1294 enum htt_rx_mpdu_encrypt_type enctype, 1295 bool is_decrypted) 1296 { 1297 struct htt_rx_desc *rxd; 1298 enum rx_msdu_decap_format decap; 1299 1300 /* First msdu's decapped header: 1301 * [802.11 header] <-- padded to 4 bytes long 1302 * [crypto param] <-- padded to 4 bytes long 1303 * [amsdu header] <-- only if A-MSDU 1304 * [rfc1042/llc] 1305 * 1306 * Other (2nd, 3rd, ..) msdu's decapped header: 1307 * [amsdu header] <-- only if A-MSDU 1308 * [rfc1042/llc] 1309 */ 1310 1311 rxd = (void *)msdu->data - sizeof(*rxd); 1312 decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1), 1313 RX_MSDU_START_INFO1_DECAP_FORMAT); 1314 1315 switch (decap) { 1316 case RX_MSDU_DECAP_RAW: 1317 ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype, 1318 is_decrypted); 1319 break; 1320 case RX_MSDU_DECAP_NATIVE_WIFI: 1321 ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr, 1322 enctype); 1323 break; 1324 case RX_MSDU_DECAP_ETHERNET2_DIX: 1325 ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype); 1326 break; 1327 case RX_MSDU_DECAP_8023_SNAP_LLC: 1328 ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr, 1329 enctype); 1330 break; 1331 } 1332 } 1333 1334 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb) 1335 { 1336 struct htt_rx_desc *rxd; 1337 u32 flags, info; 1338 bool is_ip4, is_ip6; 1339 bool is_tcp, is_udp; 1340 bool ip_csum_ok, tcpudp_csum_ok; 1341 1342 rxd = (void *)skb->data - sizeof(*rxd); 1343 flags = __le32_to_cpu(rxd->attention.flags); 1344 info = __le32_to_cpu(rxd->msdu_start.common.info1); 1345 1346 is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO); 1347 is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO); 1348 is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO); 1349 is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO); 1350 ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL); 1351 tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL); 1352 1353 if (!is_ip4 && !is_ip6) 1354 return CHECKSUM_NONE; 1355 if (!is_tcp && !is_udp) 1356 return CHECKSUM_NONE; 1357 if (!ip_csum_ok) 1358 return CHECKSUM_NONE; 1359 if (!tcpudp_csum_ok) 1360 return CHECKSUM_NONE; 1361 1362 return CHECKSUM_UNNECESSARY; 1363 } 1364 1365 static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu) 1366 { 1367 msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu); 1368 } 1369 1370 static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, 1371 struct sk_buff_head *amsdu, 1372 struct ieee80211_rx_status *status, 1373 bool fill_crypt_header) 1374 { 1375 struct sk_buff *first; 1376 struct sk_buff *last; 1377 struct sk_buff *msdu; 1378 struct htt_rx_desc *rxd; 1379 struct ieee80211_hdr *hdr; 1380 enum htt_rx_mpdu_encrypt_type enctype; 1381 u8 first_hdr[64]; 1382 u8 *qos; 1383 bool has_fcs_err; 1384 bool has_crypto_err; 1385 bool has_tkip_err; 1386 bool has_peer_idx_invalid; 1387 bool is_decrypted; 1388 bool is_mgmt; 1389 u32 attention; 1390 1391 if (skb_queue_empty(amsdu)) 1392 return; 1393 1394 first = skb_peek(amsdu); 1395 rxd = (void *)first->data - sizeof(*rxd); 1396 1397 is_mgmt = !!(rxd->attention.flags & 1398 __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE)); 1399 1400 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), 1401 RX_MPDU_START_INFO0_ENCRYPT_TYPE); 1402 1403 /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11 1404 * decapped header. It'll be used for undecapping of each MSDU. 1405 */ 1406 hdr = (void *)rxd->rx_hdr_status; 1407 memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN); 1408 1409 /* Each A-MSDU subframe will use the original header as the base and be 1410 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl. 1411 */ 1412 hdr = (void *)first_hdr; 1413 1414 if (ieee80211_is_data_qos(hdr->frame_control)) { 1415 qos = ieee80211_get_qos_ctl(hdr); 1416 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; 1417 } 1418 1419 /* Some attention flags are valid only in the last MSDU. */ 1420 last = skb_peek_tail(amsdu); 1421 rxd = (void *)last->data - sizeof(*rxd); 1422 attention = __le32_to_cpu(rxd->attention.flags); 1423 1424 has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR); 1425 has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR); 1426 has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR); 1427 has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID); 1428 1429 /* Note: If hardware captures an encrypted frame that it can't decrypt, 1430 * e.g. due to fcs error, missing peer or invalid key data it will 1431 * report the frame as raw. 1432 */ 1433 is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE && 1434 !has_fcs_err && 1435 !has_crypto_err && 1436 !has_peer_idx_invalid); 1437 1438 /* Clear per-MPDU flags while leaving per-PPDU flags intact. */ 1439 status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | 1440 RX_FLAG_MMIC_ERROR | 1441 RX_FLAG_DECRYPTED | 1442 RX_FLAG_IV_STRIPPED | 1443 RX_FLAG_ONLY_MONITOR | 1444 RX_FLAG_MMIC_STRIPPED); 1445 1446 if (has_fcs_err) 1447 status->flag |= RX_FLAG_FAILED_FCS_CRC; 1448 1449 if (has_tkip_err) 1450 status->flag |= RX_FLAG_MMIC_ERROR; 1451 1452 /* Firmware reports all necessary management frames via WMI already. 1453 * They are not reported to monitor interfaces at all so pass the ones 1454 * coming via HTT to monitor interfaces instead. This simplifies 1455 * matters a lot. 1456 */ 1457 if (is_mgmt) 1458 status->flag |= RX_FLAG_ONLY_MONITOR; 1459 1460 if (is_decrypted) { 1461 status->flag |= RX_FLAG_DECRYPTED; 1462 1463 if (likely(!is_mgmt)) 1464 status->flag |= RX_FLAG_MMIC_STRIPPED; 1465 1466 if (fill_crypt_header) 1467 status->flag |= RX_FLAG_MIC_STRIPPED | 1468 RX_FLAG_ICV_STRIPPED; 1469 else 1470 status->flag |= RX_FLAG_IV_STRIPPED; 1471 } 1472 1473 skb_queue_walk(amsdu, msdu) { 1474 ath10k_htt_rx_h_csum_offload(msdu); 1475 ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype, 1476 is_decrypted); 1477 1478 /* Undecapping involves copying the original 802.11 header back 1479 * to sk_buff. If frame is protected and hardware has decrypted 1480 * it then remove the protected bit. 1481 */ 1482 if (!is_decrypted) 1483 continue; 1484 if (is_mgmt) 1485 continue; 1486 1487 if (fill_crypt_header) 1488 continue; 1489 1490 hdr = (void *)msdu->data; 1491 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 1492 } 1493 } 1494 1495 static void ath10k_htt_rx_h_deliver(struct ath10k *ar, 1496 struct sk_buff_head *amsdu, 1497 struct ieee80211_rx_status *status) 1498 { 1499 struct sk_buff *msdu; 1500 struct sk_buff *first_subframe; 1501 1502 first_subframe = skb_peek(amsdu); 1503 1504 while ((msdu = __skb_dequeue(amsdu))) { 1505 /* Setup per-MSDU flags */ 1506 if (skb_queue_empty(amsdu)) 1507 status->flag &= ~RX_FLAG_AMSDU_MORE; 1508 else 1509 status->flag |= RX_FLAG_AMSDU_MORE; 1510 1511 if (msdu == first_subframe) { 1512 first_subframe = NULL; 1513 status->flag &= ~RX_FLAG_ALLOW_SAME_PN; 1514 } else { 1515 status->flag |= RX_FLAG_ALLOW_SAME_PN; 1516 } 1517 1518 ath10k_process_rx(ar, status, msdu); 1519 } 1520 } 1521 1522 static int ath10k_unchain_msdu(struct sk_buff_head *amsdu) 1523 { 1524 struct sk_buff *skb, *first; 1525 int space; 1526 int total_len = 0; 1527 1528 /* TODO: Might could optimize this by using 1529 * skb_try_coalesce or similar method to 1530 * decrease copying, or maybe get mac80211 to 1531 * provide a way to just receive a list of 1532 * skb? 1533 */ 1534 1535 first = __skb_dequeue(amsdu); 1536 1537 /* Allocate total length all at once. */ 1538 skb_queue_walk(amsdu, skb) 1539 total_len += skb->len; 1540 1541 space = total_len - skb_tailroom(first); 1542 if ((space > 0) && 1543 (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) { 1544 /* TODO: bump some rx-oom error stat */ 1545 /* put it back together so we can free the 1546 * whole list at once. 1547 */ 1548 __skb_queue_head(amsdu, first); 1549 return -1; 1550 } 1551 1552 /* Walk list again, copying contents into 1553 * msdu_head 1554 */ 1555 while ((skb = __skb_dequeue(amsdu))) { 1556 skb_copy_from_linear_data(skb, skb_put(first, skb->len), 1557 skb->len); 1558 dev_kfree_skb_any(skb); 1559 } 1560 1561 __skb_queue_head(amsdu, first); 1562 return 0; 1563 } 1564 1565 static void ath10k_htt_rx_h_unchain(struct ath10k *ar, 1566 struct sk_buff_head *amsdu) 1567 { 1568 struct sk_buff *first; 1569 struct htt_rx_desc *rxd; 1570 enum rx_msdu_decap_format decap; 1571 1572 first = skb_peek(amsdu); 1573 rxd = (void *)first->data - sizeof(*rxd); 1574 decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1), 1575 RX_MSDU_START_INFO1_DECAP_FORMAT); 1576 1577 /* FIXME: Current unchaining logic can only handle simple case of raw 1578 * msdu chaining. If decapping is other than raw the chaining may be 1579 * more complex and this isn't handled by the current code. Don't even 1580 * try re-constructing such frames - it'll be pretty much garbage. 1581 */ 1582 if (decap != RX_MSDU_DECAP_RAW || 1583 skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) { 1584 __skb_queue_purge(amsdu); 1585 return; 1586 } 1587 1588 ath10k_unchain_msdu(amsdu); 1589 } 1590 1591 static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar, 1592 struct sk_buff_head *amsdu, 1593 struct ieee80211_rx_status *rx_status) 1594 { 1595 /* FIXME: It might be a good idea to do some fuzzy-testing to drop 1596 * invalid/dangerous frames. 1597 */ 1598 1599 if (!rx_status->freq) { 1600 ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n"); 1601 return false; 1602 } 1603 1604 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) { 1605 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n"); 1606 return false; 1607 } 1608 1609 return true; 1610 } 1611 1612 static void ath10k_htt_rx_h_filter(struct ath10k *ar, 1613 struct sk_buff_head *amsdu, 1614 struct ieee80211_rx_status *rx_status) 1615 { 1616 if (skb_queue_empty(amsdu)) 1617 return; 1618 1619 if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status)) 1620 return; 1621 1622 __skb_queue_purge(amsdu); 1623 } 1624 1625 static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt) 1626 { 1627 struct ath10k *ar = htt->ar; 1628 struct ieee80211_rx_status *rx_status = &htt->rx_status; 1629 struct sk_buff_head amsdu; 1630 int ret, num_msdus; 1631 1632 __skb_queue_head_init(&amsdu); 1633 1634 spin_lock_bh(&htt->rx_ring.lock); 1635 if (htt->rx_confused) { 1636 spin_unlock_bh(&htt->rx_ring.lock); 1637 return -EIO; 1638 } 1639 ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu); 1640 spin_unlock_bh(&htt->rx_ring.lock); 1641 1642 if (ret < 0) { 1643 ath10k_warn(ar, "rx ring became corrupted: %d\n", ret); 1644 __skb_queue_purge(&amsdu); 1645 /* FIXME: It's probably a good idea to reboot the 1646 * device instead of leaving it inoperable. 1647 */ 1648 htt->rx_confused = true; 1649 return ret; 1650 } 1651 1652 num_msdus = skb_queue_len(&amsdu); 1653 ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff); 1654 1655 /* only for ret = 1 indicates chained msdus */ 1656 if (ret > 0) 1657 ath10k_htt_rx_h_unchain(ar, &amsdu); 1658 1659 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status); 1660 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true); 1661 ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status); 1662 1663 return num_msdus; 1664 } 1665 1666 static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt, 1667 struct htt_rx_indication *rx) 1668 { 1669 struct ath10k *ar = htt->ar; 1670 struct htt_rx_indication_mpdu_range *mpdu_ranges; 1671 int num_mpdu_ranges; 1672 int i, mpdu_count = 0; 1673 1674 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), 1675 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); 1676 mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx); 1677 1678 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ", 1679 rx, sizeof(*rx) + 1680 (sizeof(struct htt_rx_indication_mpdu_range) * 1681 num_mpdu_ranges)); 1682 1683 for (i = 0; i < num_mpdu_ranges; i++) 1684 mpdu_count += mpdu_ranges[i].mpdu_count; 1685 1686 atomic_add(mpdu_count, &htt->num_mpdus_ready); 1687 } 1688 1689 static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar, 1690 struct sk_buff *skb) 1691 { 1692 struct ath10k_htt *htt = &ar->htt; 1693 struct htt_resp *resp = (struct htt_resp *)skb->data; 1694 struct htt_tx_done tx_done = {}; 1695 int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS); 1696 __le16 msdu_id; 1697 int i; 1698 1699 switch (status) { 1700 case HTT_DATA_TX_STATUS_NO_ACK: 1701 tx_done.status = HTT_TX_COMPL_STATE_NOACK; 1702 break; 1703 case HTT_DATA_TX_STATUS_OK: 1704 tx_done.status = HTT_TX_COMPL_STATE_ACK; 1705 break; 1706 case HTT_DATA_TX_STATUS_DISCARD: 1707 case HTT_DATA_TX_STATUS_POSTPONE: 1708 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL: 1709 tx_done.status = HTT_TX_COMPL_STATE_DISCARD; 1710 break; 1711 default: 1712 ath10k_warn(ar, "unhandled tx completion status %d\n", status); 1713 tx_done.status = HTT_TX_COMPL_STATE_DISCARD; 1714 break; 1715 } 1716 1717 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n", 1718 resp->data_tx_completion.num_msdus); 1719 1720 for (i = 0; i < resp->data_tx_completion.num_msdus; i++) { 1721 msdu_id = resp->data_tx_completion.msdus[i]; 1722 tx_done.msdu_id = __le16_to_cpu(msdu_id); 1723 1724 /* kfifo_put: In practice firmware shouldn't fire off per-CE 1725 * interrupt and main interrupt (MSI/-X range case) for the same 1726 * HTC service so it should be safe to use kfifo_put w/o lock. 1727 * 1728 * From kfifo_put() documentation: 1729 * Note that with only one concurrent reader and one concurrent 1730 * writer, you don't need extra locking to use these macro. 1731 */ 1732 if (!kfifo_put(&htt->txdone_fifo, tx_done)) { 1733 ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n", 1734 tx_done.msdu_id, tx_done.status); 1735 ath10k_txrx_tx_unref(htt, &tx_done); 1736 } 1737 } 1738 } 1739 1740 static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp) 1741 { 1742 struct htt_rx_addba *ev = &resp->rx_addba; 1743 struct ath10k_peer *peer; 1744 struct ath10k_vif *arvif; 1745 u16 info0, tid, peer_id; 1746 1747 info0 = __le16_to_cpu(ev->info0); 1748 tid = MS(info0, HTT_RX_BA_INFO0_TID); 1749 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID); 1750 1751 ath10k_dbg(ar, ATH10K_DBG_HTT, 1752 "htt rx addba tid %hu peer_id %hu size %hhu\n", 1753 tid, peer_id, ev->window_size); 1754 1755 spin_lock_bh(&ar->data_lock); 1756 peer = ath10k_peer_find_by_id(ar, peer_id); 1757 if (!peer) { 1758 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n", 1759 peer_id); 1760 spin_unlock_bh(&ar->data_lock); 1761 return; 1762 } 1763 1764 arvif = ath10k_get_arvif(ar, peer->vdev_id); 1765 if (!arvif) { 1766 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n", 1767 peer->vdev_id); 1768 spin_unlock_bh(&ar->data_lock); 1769 return; 1770 } 1771 1772 ath10k_dbg(ar, ATH10K_DBG_HTT, 1773 "htt rx start rx ba session sta %pM tid %hu size %hhu\n", 1774 peer->addr, tid, ev->window_size); 1775 1776 ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid); 1777 spin_unlock_bh(&ar->data_lock); 1778 } 1779 1780 static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp) 1781 { 1782 struct htt_rx_delba *ev = &resp->rx_delba; 1783 struct ath10k_peer *peer; 1784 struct ath10k_vif *arvif; 1785 u16 info0, tid, peer_id; 1786 1787 info0 = __le16_to_cpu(ev->info0); 1788 tid = MS(info0, HTT_RX_BA_INFO0_TID); 1789 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID); 1790 1791 ath10k_dbg(ar, ATH10K_DBG_HTT, 1792 "htt rx delba tid %hu peer_id %hu\n", 1793 tid, peer_id); 1794 1795 spin_lock_bh(&ar->data_lock); 1796 peer = ath10k_peer_find_by_id(ar, peer_id); 1797 if (!peer) { 1798 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n", 1799 peer_id); 1800 spin_unlock_bh(&ar->data_lock); 1801 return; 1802 } 1803 1804 arvif = ath10k_get_arvif(ar, peer->vdev_id); 1805 if (!arvif) { 1806 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n", 1807 peer->vdev_id); 1808 spin_unlock_bh(&ar->data_lock); 1809 return; 1810 } 1811 1812 ath10k_dbg(ar, ATH10K_DBG_HTT, 1813 "htt rx stop rx ba session sta %pM tid %hu\n", 1814 peer->addr, tid); 1815 1816 ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid); 1817 spin_unlock_bh(&ar->data_lock); 1818 } 1819 1820 static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list, 1821 struct sk_buff_head *amsdu) 1822 { 1823 struct sk_buff *msdu; 1824 struct htt_rx_desc *rxd; 1825 1826 if (skb_queue_empty(list)) 1827 return -ENOBUFS; 1828 1829 if (WARN_ON(!skb_queue_empty(amsdu))) 1830 return -EINVAL; 1831 1832 while ((msdu = __skb_dequeue(list))) { 1833 __skb_queue_tail(amsdu, msdu); 1834 1835 rxd = (void *)msdu->data - sizeof(*rxd); 1836 if (rxd->msdu_end.common.info0 & 1837 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)) 1838 break; 1839 } 1840 1841 msdu = skb_peek_tail(amsdu); 1842 rxd = (void *)msdu->data - sizeof(*rxd); 1843 if (!(rxd->msdu_end.common.info0 & 1844 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) { 1845 skb_queue_splice_init(amsdu, list); 1846 return -EAGAIN; 1847 } 1848 1849 return 0; 1850 } 1851 1852 static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status, 1853 struct sk_buff *skb) 1854 { 1855 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1856 1857 if (!ieee80211_has_protected(hdr->frame_control)) 1858 return; 1859 1860 /* Offloaded frames are already decrypted but firmware insists they are 1861 * protected in the 802.11 header. Strip the flag. Otherwise mac80211 1862 * will drop the frame. 1863 */ 1864 1865 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 1866 status->flag |= RX_FLAG_DECRYPTED | 1867 RX_FLAG_IV_STRIPPED | 1868 RX_FLAG_MMIC_STRIPPED; 1869 } 1870 1871 static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar, 1872 struct sk_buff_head *list) 1873 { 1874 struct ath10k_htt *htt = &ar->htt; 1875 struct ieee80211_rx_status *status = &htt->rx_status; 1876 struct htt_rx_offload_msdu *rx; 1877 struct sk_buff *msdu; 1878 size_t offset; 1879 int num_msdu = 0; 1880 1881 while ((msdu = __skb_dequeue(list))) { 1882 /* Offloaded frames don't have Rx descriptor. Instead they have 1883 * a short meta information header. 1884 */ 1885 1886 rx = (void *)msdu->data; 1887 1888 skb_put(msdu, sizeof(*rx)); 1889 skb_pull(msdu, sizeof(*rx)); 1890 1891 if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) { 1892 ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n"); 1893 dev_kfree_skb_any(msdu); 1894 continue; 1895 } 1896 1897 skb_put(msdu, __le16_to_cpu(rx->msdu_len)); 1898 1899 /* Offloaded rx header length isn't multiple of 2 nor 4 so the 1900 * actual payload is unaligned. Align the frame. Otherwise 1901 * mac80211 complains. This shouldn't reduce performance much 1902 * because these offloaded frames are rare. 1903 */ 1904 offset = 4 - ((unsigned long)msdu->data & 3); 1905 skb_put(msdu, offset); 1906 memmove(msdu->data + offset, msdu->data, msdu->len); 1907 skb_pull(msdu, offset); 1908 1909 /* FIXME: The frame is NWifi. Re-construct QoS Control 1910 * if possible later. 1911 */ 1912 1913 memset(status, 0, sizeof(*status)); 1914 status->flag |= RX_FLAG_NO_SIGNAL_VAL; 1915 1916 ath10k_htt_rx_h_rx_offload_prot(status, msdu); 1917 ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id); 1918 ath10k_process_rx(ar, status, msdu); 1919 num_msdu++; 1920 } 1921 return num_msdu; 1922 } 1923 1924 static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) 1925 { 1926 struct ath10k_htt *htt = &ar->htt; 1927 struct htt_resp *resp = (void *)skb->data; 1928 struct ieee80211_rx_status *status = &htt->rx_status; 1929 struct sk_buff_head list; 1930 struct sk_buff_head amsdu; 1931 u16 peer_id; 1932 u16 msdu_count; 1933 u8 vdev_id; 1934 u8 tid; 1935 bool offload; 1936 bool frag; 1937 int ret, num_msdus = 0; 1938 1939 lockdep_assert_held(&htt->rx_ring.lock); 1940 1941 if (htt->rx_confused) 1942 return -EIO; 1943 1944 skb_pull(skb, sizeof(resp->hdr)); 1945 skb_pull(skb, sizeof(resp->rx_in_ord_ind)); 1946 1947 peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id); 1948 msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count); 1949 vdev_id = resp->rx_in_ord_ind.vdev_id; 1950 tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID); 1951 offload = !!(resp->rx_in_ord_ind.info & 1952 HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK); 1953 frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK); 1954 1955 ath10k_dbg(ar, ATH10K_DBG_HTT, 1956 "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n", 1957 vdev_id, peer_id, tid, offload, frag, msdu_count); 1958 1959 if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) { 1960 ath10k_warn(ar, "dropping invalid in order rx indication\n"); 1961 return -EINVAL; 1962 } 1963 1964 /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later 1965 * extracted and processed. 1966 */ 1967 __skb_queue_head_init(&list); 1968 ret = ath10k_htt_rx_pop_paddr_list(htt, &resp->rx_in_ord_ind, &list); 1969 if (ret < 0) { 1970 ath10k_warn(ar, "failed to pop paddr list: %d\n", ret); 1971 htt->rx_confused = true; 1972 return -EIO; 1973 } 1974 1975 /* Offloaded frames are very different and need to be handled 1976 * separately. 1977 */ 1978 if (offload) 1979 num_msdus = ath10k_htt_rx_h_rx_offload(ar, &list); 1980 1981 while (!skb_queue_empty(&list)) { 1982 __skb_queue_head_init(&amsdu); 1983 ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu); 1984 switch (ret) { 1985 case 0: 1986 /* Note: The in-order indication may report interleaved 1987 * frames from different PPDUs meaning reported rx rate 1988 * to mac80211 isn't accurate/reliable. It's still 1989 * better to report something than nothing though. This 1990 * should still give an idea about rx rate to the user. 1991 */ 1992 num_msdus += skb_queue_len(&amsdu); 1993 ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id); 1994 ath10k_htt_rx_h_filter(ar, &amsdu, status); 1995 ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false); 1996 ath10k_htt_rx_h_deliver(ar, &amsdu, status); 1997 break; 1998 case -EAGAIN: 1999 /* fall through */ 2000 default: 2001 /* Should not happen. */ 2002 ath10k_warn(ar, "failed to extract amsdu: %d\n", ret); 2003 htt->rx_confused = true; 2004 __skb_queue_purge(&list); 2005 return -EIO; 2006 } 2007 } 2008 return num_msdus; 2009 } 2010 2011 static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar, 2012 const __le32 *resp_ids, 2013 int num_resp_ids) 2014 { 2015 int i; 2016 u32 resp_id; 2017 2018 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n", 2019 num_resp_ids); 2020 2021 for (i = 0; i < num_resp_ids; i++) { 2022 resp_id = le32_to_cpu(resp_ids[i]); 2023 2024 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n", 2025 resp_id); 2026 2027 /* TODO: free resp_id */ 2028 } 2029 } 2030 2031 static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb) 2032 { 2033 struct ieee80211_hw *hw = ar->hw; 2034 struct ieee80211_txq *txq; 2035 struct htt_resp *resp = (struct htt_resp *)skb->data; 2036 struct htt_tx_fetch_record *record; 2037 size_t len; 2038 size_t max_num_bytes; 2039 size_t max_num_msdus; 2040 size_t num_bytes; 2041 size_t num_msdus; 2042 const __le32 *resp_ids; 2043 u16 num_records; 2044 u16 num_resp_ids; 2045 u16 peer_id; 2046 u8 tid; 2047 int ret; 2048 int i; 2049 2050 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n"); 2051 2052 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind); 2053 if (unlikely(skb->len < len)) { 2054 ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n"); 2055 return; 2056 } 2057 2058 num_records = le16_to_cpu(resp->tx_fetch_ind.num_records); 2059 num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids); 2060 2061 len += sizeof(resp->tx_fetch_ind.records[0]) * num_records; 2062 len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids; 2063 2064 if (unlikely(skb->len < len)) { 2065 ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n"); 2066 return; 2067 } 2068 2069 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n", 2070 num_records, num_resp_ids, 2071 le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num)); 2072 2073 if (!ar->htt.tx_q_state.enabled) { 2074 ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n"); 2075 return; 2076 } 2077 2078 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) { 2079 ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n"); 2080 return; 2081 } 2082 2083 rcu_read_lock(); 2084 2085 for (i = 0; i < num_records; i++) { 2086 record = &resp->tx_fetch_ind.records[i]; 2087 peer_id = MS(le16_to_cpu(record->info), 2088 HTT_TX_FETCH_RECORD_INFO_PEER_ID); 2089 tid = MS(le16_to_cpu(record->info), 2090 HTT_TX_FETCH_RECORD_INFO_TID); 2091 max_num_msdus = le16_to_cpu(record->num_msdus); 2092 max_num_bytes = le32_to_cpu(record->num_bytes); 2093 2094 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n", 2095 i, peer_id, tid, max_num_msdus, max_num_bytes); 2096 2097 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) || 2098 unlikely(tid >= ar->htt.tx_q_state.num_tids)) { 2099 ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n", 2100 peer_id, tid); 2101 continue; 2102 } 2103 2104 spin_lock_bh(&ar->data_lock); 2105 txq = ath10k_mac_txq_lookup(ar, peer_id, tid); 2106 spin_unlock_bh(&ar->data_lock); 2107 2108 /* It is okay to release the lock and use txq because RCU read 2109 * lock is held. 2110 */ 2111 2112 if (unlikely(!txq)) { 2113 ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n", 2114 peer_id, tid); 2115 continue; 2116 } 2117 2118 num_msdus = 0; 2119 num_bytes = 0; 2120 2121 while (num_msdus < max_num_msdus && 2122 num_bytes < max_num_bytes) { 2123 ret = ath10k_mac_tx_push_txq(hw, txq); 2124 if (ret < 0) 2125 break; 2126 2127 num_msdus++; 2128 num_bytes += ret; 2129 } 2130 2131 record->num_msdus = cpu_to_le16(num_msdus); 2132 record->num_bytes = cpu_to_le32(num_bytes); 2133 2134 ath10k_htt_tx_txq_recalc(hw, txq); 2135 } 2136 2137 rcu_read_unlock(); 2138 2139 resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind); 2140 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids); 2141 2142 ret = ath10k_htt_tx_fetch_resp(ar, 2143 resp->tx_fetch_ind.token, 2144 resp->tx_fetch_ind.fetch_seq_num, 2145 resp->tx_fetch_ind.records, 2146 num_records); 2147 if (unlikely(ret)) { 2148 ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n", 2149 le32_to_cpu(resp->tx_fetch_ind.token), ret); 2150 /* FIXME: request fw restart */ 2151 } 2152 2153 ath10k_htt_tx_txq_sync(ar); 2154 } 2155 2156 static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar, 2157 struct sk_buff *skb) 2158 { 2159 const struct htt_resp *resp = (void *)skb->data; 2160 size_t len; 2161 int num_resp_ids; 2162 2163 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n"); 2164 2165 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm); 2166 if (unlikely(skb->len < len)) { 2167 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n"); 2168 return; 2169 } 2170 2171 num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids); 2172 len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids; 2173 2174 if (unlikely(skb->len < len)) { 2175 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n"); 2176 return; 2177 } 2178 2179 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, 2180 resp->tx_fetch_confirm.resp_ids, 2181 num_resp_ids); 2182 } 2183 2184 static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar, 2185 struct sk_buff *skb) 2186 { 2187 const struct htt_resp *resp = (void *)skb->data; 2188 const struct htt_tx_mode_switch_record *record; 2189 struct ieee80211_txq *txq; 2190 struct ath10k_txq *artxq; 2191 size_t len; 2192 size_t num_records; 2193 enum htt_tx_mode_switch_mode mode; 2194 bool enable; 2195 u16 info0; 2196 u16 info1; 2197 u16 threshold; 2198 u16 peer_id; 2199 u8 tid; 2200 int i; 2201 2202 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n"); 2203 2204 len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind); 2205 if (unlikely(skb->len < len)) { 2206 ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n"); 2207 return; 2208 } 2209 2210 info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0); 2211 info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1); 2212 2213 enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE); 2214 num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD); 2215 mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE); 2216 threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD); 2217 2218 ath10k_dbg(ar, ATH10K_DBG_HTT, 2219 "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n", 2220 info0, info1, enable, num_records, mode, threshold); 2221 2222 len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records; 2223 2224 if (unlikely(skb->len < len)) { 2225 ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n"); 2226 return; 2227 } 2228 2229 switch (mode) { 2230 case HTT_TX_MODE_SWITCH_PUSH: 2231 case HTT_TX_MODE_SWITCH_PUSH_PULL: 2232 break; 2233 default: 2234 ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n", 2235 mode); 2236 return; 2237 } 2238 2239 if (!enable) 2240 return; 2241 2242 ar->htt.tx_q_state.enabled = enable; 2243 ar->htt.tx_q_state.mode = mode; 2244 ar->htt.tx_q_state.num_push_allowed = threshold; 2245 2246 rcu_read_lock(); 2247 2248 for (i = 0; i < num_records; i++) { 2249 record = &resp->tx_mode_switch_ind.records[i]; 2250 info0 = le16_to_cpu(record->info0); 2251 peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID); 2252 tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID); 2253 2254 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) || 2255 unlikely(tid >= ar->htt.tx_q_state.num_tids)) { 2256 ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n", 2257 peer_id, tid); 2258 continue; 2259 } 2260 2261 spin_lock_bh(&ar->data_lock); 2262 txq = ath10k_mac_txq_lookup(ar, peer_id, tid); 2263 spin_unlock_bh(&ar->data_lock); 2264 2265 /* It is okay to release the lock and use txq because RCU read 2266 * lock is held. 2267 */ 2268 2269 if (unlikely(!txq)) { 2270 ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n", 2271 peer_id, tid); 2272 continue; 2273 } 2274 2275 spin_lock_bh(&ar->htt.tx_lock); 2276 artxq = (void *)txq->drv_priv; 2277 artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus); 2278 spin_unlock_bh(&ar->htt.tx_lock); 2279 } 2280 2281 rcu_read_unlock(); 2282 2283 ath10k_mac_tx_push_pending(ar); 2284 } 2285 2286 void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) 2287 { 2288 bool release; 2289 2290 release = ath10k_htt_t2h_msg_handler(ar, skb); 2291 2292 /* Free the indication buffer */ 2293 if (release) 2294 dev_kfree_skb_any(skb); 2295 } 2296 2297 static inline bool is_valid_legacy_rate(u8 rate) 2298 { 2299 static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12, 2300 18, 24, 36, 48, 54}; 2301 int i; 2302 2303 for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) { 2304 if (rate == legacy_rates[i]) 2305 return true; 2306 } 2307 2308 return false; 2309 } 2310 2311 static void 2312 ath10k_update_per_peer_tx_stats(struct ath10k *ar, 2313 struct ieee80211_sta *sta, 2314 struct ath10k_per_peer_tx_stats *peer_stats) 2315 { 2316 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 2317 u8 rate = 0, sgi; 2318 struct rate_info txrate; 2319 2320 lockdep_assert_held(&ar->data_lock); 2321 2322 txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode); 2323 txrate.bw = ATH10K_HW_BW(peer_stats->flags); 2324 txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode); 2325 txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode); 2326 sgi = ATH10K_HW_GI(peer_stats->flags); 2327 2328 if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) { 2329 ath10k_warn(ar, "Invalid VHT mcs %hhd peer stats", txrate.mcs); 2330 return; 2331 } 2332 2333 if (txrate.flags == WMI_RATE_PREAMBLE_HT && 2334 (txrate.mcs > 7 || txrate.nss < 1)) { 2335 ath10k_warn(ar, "Invalid HT mcs %hhd nss %hhd peer stats", 2336 txrate.mcs, txrate.nss); 2337 return; 2338 } 2339 2340 memset(&arsta->txrate, 0, sizeof(arsta->txrate)); 2341 2342 if (txrate.flags == WMI_RATE_PREAMBLE_CCK || 2343 txrate.flags == WMI_RATE_PREAMBLE_OFDM) { 2344 rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode); 2345 2346 if (!is_valid_legacy_rate(rate)) { 2347 ath10k_warn(ar, "Invalid legacy rate %hhd peer stats", 2348 rate); 2349 return; 2350 } 2351 2352 /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */ 2353 rate *= 10; 2354 if (rate == 60 && txrate.flags == WMI_RATE_PREAMBLE_CCK) 2355 rate = rate - 5; 2356 arsta->txrate.legacy = rate; 2357 } else if (txrate.flags == WMI_RATE_PREAMBLE_HT) { 2358 arsta->txrate.flags = RATE_INFO_FLAGS_MCS; 2359 arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1); 2360 } else { 2361 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; 2362 arsta->txrate.mcs = txrate.mcs; 2363 } 2364 2365 if (sgi) 2366 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 2367 2368 arsta->txrate.nss = txrate.nss; 2369 arsta->txrate.bw = txrate.bw + RATE_INFO_BW_20; 2370 } 2371 2372 static void ath10k_htt_fetch_peer_stats(struct ath10k *ar, 2373 struct sk_buff *skb) 2374 { 2375 struct htt_resp *resp = (struct htt_resp *)skb->data; 2376 struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats; 2377 struct htt_per_peer_tx_stats_ind *tx_stats; 2378 struct ieee80211_sta *sta; 2379 struct ath10k_peer *peer; 2380 int peer_id, i; 2381 u8 ppdu_len, num_ppdu; 2382 2383 num_ppdu = resp->peer_tx_stats.num_ppdu; 2384 ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32); 2385 2386 if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) { 2387 ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len); 2388 return; 2389 } 2390 2391 tx_stats = (struct htt_per_peer_tx_stats_ind *) 2392 (resp->peer_tx_stats.payload); 2393 peer_id = __le16_to_cpu(tx_stats->peer_id); 2394 2395 rcu_read_lock(); 2396 spin_lock_bh(&ar->data_lock); 2397 peer = ath10k_peer_find_by_id(ar, peer_id); 2398 if (!peer) { 2399 ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n", 2400 peer_id); 2401 goto out; 2402 } 2403 2404 sta = peer->sta; 2405 for (i = 0; i < num_ppdu; i++) { 2406 tx_stats = (struct htt_per_peer_tx_stats_ind *) 2407 (resp->peer_tx_stats.payload + i * ppdu_len); 2408 2409 p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes); 2410 p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes); 2411 p_tx_stats->failed_bytes = 2412 __le32_to_cpu(tx_stats->failed_bytes); 2413 p_tx_stats->ratecode = tx_stats->ratecode; 2414 p_tx_stats->flags = tx_stats->flags; 2415 p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts); 2416 p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts); 2417 p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts); 2418 2419 ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats); 2420 } 2421 2422 out: 2423 spin_unlock_bh(&ar->data_lock); 2424 rcu_read_unlock(); 2425 } 2426 2427 bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) 2428 { 2429 struct ath10k_htt *htt = &ar->htt; 2430 struct htt_resp *resp = (struct htt_resp *)skb->data; 2431 enum htt_t2h_msg_type type; 2432 2433 /* confirm alignment */ 2434 if (!IS_ALIGNED((unsigned long)skb->data, 4)) 2435 ath10k_warn(ar, "unaligned htt message, expect trouble\n"); 2436 2437 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n", 2438 resp->hdr.msg_type); 2439 2440 if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) { 2441 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X", 2442 resp->hdr.msg_type, ar->htt.t2h_msg_types_max); 2443 return true; 2444 } 2445 type = ar->htt.t2h_msg_types[resp->hdr.msg_type]; 2446 2447 switch (type) { 2448 case HTT_T2H_MSG_TYPE_VERSION_CONF: { 2449 htt->target_version_major = resp->ver_resp.major; 2450 htt->target_version_minor = resp->ver_resp.minor; 2451 complete(&htt->target_version_received); 2452 break; 2453 } 2454 case HTT_T2H_MSG_TYPE_RX_IND: 2455 ath10k_htt_rx_proc_rx_ind(htt, &resp->rx_ind); 2456 break; 2457 case HTT_T2H_MSG_TYPE_PEER_MAP: { 2458 struct htt_peer_map_event ev = { 2459 .vdev_id = resp->peer_map.vdev_id, 2460 .peer_id = __le16_to_cpu(resp->peer_map.peer_id), 2461 }; 2462 memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr)); 2463 ath10k_peer_map_event(htt, &ev); 2464 break; 2465 } 2466 case HTT_T2H_MSG_TYPE_PEER_UNMAP: { 2467 struct htt_peer_unmap_event ev = { 2468 .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id), 2469 }; 2470 ath10k_peer_unmap_event(htt, &ev); 2471 break; 2472 } 2473 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: { 2474 struct htt_tx_done tx_done = {}; 2475 int status = __le32_to_cpu(resp->mgmt_tx_completion.status); 2476 2477 tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id); 2478 2479 switch (status) { 2480 case HTT_MGMT_TX_STATUS_OK: 2481 tx_done.status = HTT_TX_COMPL_STATE_ACK; 2482 break; 2483 case HTT_MGMT_TX_STATUS_RETRY: 2484 tx_done.status = HTT_TX_COMPL_STATE_NOACK; 2485 break; 2486 case HTT_MGMT_TX_STATUS_DROP: 2487 tx_done.status = HTT_TX_COMPL_STATE_DISCARD; 2488 break; 2489 } 2490 2491 status = ath10k_txrx_tx_unref(htt, &tx_done); 2492 if (!status) { 2493 spin_lock_bh(&htt->tx_lock); 2494 ath10k_htt_tx_mgmt_dec_pending(htt); 2495 spin_unlock_bh(&htt->tx_lock); 2496 } 2497 break; 2498 } 2499 case HTT_T2H_MSG_TYPE_TX_COMPL_IND: 2500 ath10k_htt_rx_tx_compl_ind(htt->ar, skb); 2501 break; 2502 case HTT_T2H_MSG_TYPE_SEC_IND: { 2503 struct ath10k *ar = htt->ar; 2504 struct htt_security_indication *ev = &resp->security_indication; 2505 2506 ath10k_dbg(ar, ATH10K_DBG_HTT, 2507 "sec ind peer_id %d unicast %d type %d\n", 2508 __le16_to_cpu(ev->peer_id), 2509 !!(ev->flags & HTT_SECURITY_IS_UNICAST), 2510 MS(ev->flags, HTT_SECURITY_TYPE)); 2511 complete(&ar->install_key_done); 2512 break; 2513 } 2514 case HTT_T2H_MSG_TYPE_RX_FRAG_IND: { 2515 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", 2516 skb->data, skb->len); 2517 atomic_inc(&htt->num_mpdus_ready); 2518 break; 2519 } 2520 case HTT_T2H_MSG_TYPE_TEST: 2521 break; 2522 case HTT_T2H_MSG_TYPE_STATS_CONF: 2523 trace_ath10k_htt_stats(ar, skb->data, skb->len); 2524 break; 2525 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND: 2526 /* Firmware can return tx frames if it's unable to fully 2527 * process them and suspects host may be able to fix it. ath10k 2528 * sends all tx frames as already inspected so this shouldn't 2529 * happen unless fw has a bug. 2530 */ 2531 ath10k_warn(ar, "received an unexpected htt tx inspect event\n"); 2532 break; 2533 case HTT_T2H_MSG_TYPE_RX_ADDBA: 2534 ath10k_htt_rx_addba(ar, resp); 2535 break; 2536 case HTT_T2H_MSG_TYPE_RX_DELBA: 2537 ath10k_htt_rx_delba(ar, resp); 2538 break; 2539 case HTT_T2H_MSG_TYPE_PKTLOG: { 2540 trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload, 2541 skb->len - 2542 offsetof(struct htt_resp, 2543 pktlog_msg.payload)); 2544 break; 2545 } 2546 case HTT_T2H_MSG_TYPE_RX_FLUSH: { 2547 /* Ignore this event because mac80211 takes care of Rx 2548 * aggregation reordering. 2549 */ 2550 break; 2551 } 2552 case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: { 2553 __skb_queue_tail(&htt->rx_in_ord_compl_q, skb); 2554 return false; 2555 } 2556 case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND: 2557 break; 2558 case HTT_T2H_MSG_TYPE_CHAN_CHANGE: { 2559 u32 phymode = __le32_to_cpu(resp->chan_change.phymode); 2560 u32 freq = __le32_to_cpu(resp->chan_change.freq); 2561 2562 ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq); 2563 ath10k_dbg(ar, ATH10K_DBG_HTT, 2564 "htt chan change freq %u phymode %s\n", 2565 freq, ath10k_wmi_phymode_str(phymode)); 2566 break; 2567 } 2568 case HTT_T2H_MSG_TYPE_AGGR_CONF: 2569 break; 2570 case HTT_T2H_MSG_TYPE_TX_FETCH_IND: { 2571 struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC); 2572 2573 if (!tx_fetch_ind) { 2574 ath10k_warn(ar, "failed to copy htt tx fetch ind\n"); 2575 break; 2576 } 2577 skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind); 2578 break; 2579 } 2580 case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM: 2581 ath10k_htt_rx_tx_fetch_confirm(ar, skb); 2582 break; 2583 case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND: 2584 ath10k_htt_rx_tx_mode_switch_ind(ar, skb); 2585 break; 2586 case HTT_T2H_MSG_TYPE_PEER_STATS: 2587 ath10k_htt_fetch_peer_stats(ar, skb); 2588 break; 2589 case HTT_T2H_MSG_TYPE_EN_STATS: 2590 default: 2591 ath10k_warn(ar, "htt event (%d) not handled\n", 2592 resp->hdr.msg_type); 2593 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", 2594 skb->data, skb->len); 2595 break; 2596 } 2597 return true; 2598 } 2599 EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler); 2600 2601 void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar, 2602 struct sk_buff *skb) 2603 { 2604 trace_ath10k_htt_pktlog(ar, skb->data, skb->len); 2605 dev_kfree_skb_any(skb); 2606 } 2607 EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler); 2608 2609 int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget) 2610 { 2611 struct ath10k_htt *htt = &ar->htt; 2612 struct htt_tx_done tx_done = {}; 2613 struct sk_buff_head tx_ind_q; 2614 struct sk_buff *skb; 2615 unsigned long flags; 2616 int quota = 0, done, num_rx_msdus; 2617 bool resched_napi = false; 2618 2619 __skb_queue_head_init(&tx_ind_q); 2620 2621 /* Since in-ord-ind can deliver more than 1 A-MSDU in single event, 2622 * process it first to utilize full available quota. 2623 */ 2624 while (quota < budget) { 2625 if (skb_queue_empty(&htt->rx_in_ord_compl_q)) 2626 break; 2627 2628 skb = __skb_dequeue(&htt->rx_in_ord_compl_q); 2629 if (!skb) { 2630 resched_napi = true; 2631 goto exit; 2632 } 2633 2634 spin_lock_bh(&htt->rx_ring.lock); 2635 num_rx_msdus = ath10k_htt_rx_in_ord_ind(ar, skb); 2636 spin_unlock_bh(&htt->rx_ring.lock); 2637 if (num_rx_msdus < 0) { 2638 resched_napi = true; 2639 goto exit; 2640 } 2641 2642 dev_kfree_skb_any(skb); 2643 if (num_rx_msdus > 0) 2644 quota += num_rx_msdus; 2645 2646 if ((quota > ATH10K_NAPI_QUOTA_LIMIT) && 2647 !skb_queue_empty(&htt->rx_in_ord_compl_q)) { 2648 resched_napi = true; 2649 goto exit; 2650 } 2651 } 2652 2653 while (quota < budget) { 2654 /* no more data to receive */ 2655 if (!atomic_read(&htt->num_mpdus_ready)) 2656 break; 2657 2658 num_rx_msdus = ath10k_htt_rx_handle_amsdu(htt); 2659 if (num_rx_msdus < 0) { 2660 resched_napi = true; 2661 goto exit; 2662 } 2663 2664 quota += num_rx_msdus; 2665 atomic_dec(&htt->num_mpdus_ready); 2666 if ((quota > ATH10K_NAPI_QUOTA_LIMIT) && 2667 atomic_read(&htt->num_mpdus_ready)) { 2668 resched_napi = true; 2669 goto exit; 2670 } 2671 } 2672 2673 /* From NAPI documentation: 2674 * The napi poll() function may also process TX completions, in which 2675 * case if it processes the entire TX ring then it should count that 2676 * work as the rest of the budget. 2677 */ 2678 if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo)) 2679 quota = budget; 2680 2681 /* kfifo_get: called only within txrx_tasklet so it's neatly serialized. 2682 * From kfifo_get() documentation: 2683 * Note that with only one concurrent reader and one concurrent writer, 2684 * you don't need extra locking to use these macro. 2685 */ 2686 while (kfifo_get(&htt->txdone_fifo, &tx_done)) 2687 ath10k_txrx_tx_unref(htt, &tx_done); 2688 2689 ath10k_mac_tx_push_pending(ar); 2690 2691 spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags); 2692 skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q); 2693 spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags); 2694 2695 while ((skb = __skb_dequeue(&tx_ind_q))) { 2696 ath10k_htt_rx_tx_fetch_ind(ar, skb); 2697 dev_kfree_skb_any(skb); 2698 } 2699 2700 exit: 2701 ath10k_htt_rx_msdu_buff_replenish(htt); 2702 /* In case of rx failure or more data to read, report budget 2703 * to reschedule NAPI poll 2704 */ 2705 done = resched_napi ? budget : quota; 2706 2707 return done; 2708 } 2709 EXPORT_SYMBOL(ath10k_htt_txrx_compl_task); 2710