1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 4 * Copyright (c) 2018, The Linux Foundation. All rights reserved. 5 * 6 * Permission to use, copy, modify, and/or distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "core.h" 20 #include "htc.h" 21 #include "htt.h" 22 #include "txrx.h" 23 #include "debug.h" 24 #include "trace.h" 25 #include "mac.h" 26 27 #include <linux/log2.h> 28 #include <linux/bitfield.h> 29 30 /* when under memory pressure rx ring refill may fail and needs a retry */ 31 #define HTT_RX_RING_REFILL_RETRY_MS 50 32 33 #define HTT_RX_RING_REFILL_RESCHED_MS 5 34 35 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb); 36 37 static struct sk_buff * 38 ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr) 39 { 40 struct ath10k_skb_rxcb *rxcb; 41 42 hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr) 43 if (rxcb->paddr == paddr) 44 return ATH10K_RXCB_SKB(rxcb); 45 46 WARN_ON_ONCE(1); 47 return NULL; 48 } 49 50 static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt) 51 { 52 struct sk_buff *skb; 53 struct ath10k_skb_rxcb *rxcb; 54 struct hlist_node *n; 55 int i; 56 57 if (htt->rx_ring.in_ord_rx) { 58 hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) { 59 skb = ATH10K_RXCB_SKB(rxcb); 60 dma_unmap_single(htt->ar->dev, rxcb->paddr, 61 skb->len + skb_tailroom(skb), 62 DMA_FROM_DEVICE); 63 hash_del(&rxcb->hlist); 64 dev_kfree_skb_any(skb); 65 } 66 } else { 67 for (i = 0; i < htt->rx_ring.size; i++) { 68 skb = htt->rx_ring.netbufs_ring[i]; 69 if (!skb) 70 continue; 71 72 rxcb = ATH10K_SKB_RXCB(skb); 73 dma_unmap_single(htt->ar->dev, rxcb->paddr, 74 skb->len + skb_tailroom(skb), 75 DMA_FROM_DEVICE); 76 dev_kfree_skb_any(skb); 77 } 78 } 79 80 htt->rx_ring.fill_cnt = 0; 81 hash_init(htt->rx_ring.skb_table); 82 memset(htt->rx_ring.netbufs_ring, 0, 83 htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0])); 84 } 85 86 static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt) 87 { 88 return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32); 89 } 90 91 static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt) 92 { 93 return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64); 94 } 95 96 static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt, 97 void *vaddr) 98 { 99 htt->rx_ring.paddrs_ring_32 = vaddr; 100 } 101 102 static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt, 103 void *vaddr) 104 { 105 htt->rx_ring.paddrs_ring_64 = vaddr; 106 } 107 108 static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt, 109 dma_addr_t paddr, int idx) 110 { 111 htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr); 112 } 113 114 static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt, 115 dma_addr_t paddr, int idx) 116 { 117 htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr); 118 } 119 120 static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx) 121 { 122 htt->rx_ring.paddrs_ring_32[idx] = 0; 123 } 124 125 static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx) 126 { 127 htt->rx_ring.paddrs_ring_64[idx] = 0; 128 } 129 130 static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt) 131 { 132 return (void *)htt->rx_ring.paddrs_ring_32; 133 } 134 135 static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt) 136 { 137 return (void *)htt->rx_ring.paddrs_ring_64; 138 } 139 140 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) 141 { 142 struct htt_rx_desc *rx_desc; 143 struct ath10k_skb_rxcb *rxcb; 144 struct sk_buff *skb; 145 dma_addr_t paddr; 146 int ret = 0, idx; 147 148 /* The Full Rx Reorder firmware has no way of telling the host 149 * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring. 150 * To keep things simple make sure ring is always half empty. This 151 * guarantees there'll be no replenishment overruns possible. 152 */ 153 BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2); 154 155 idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); 156 while (num > 0) { 157 skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN); 158 if (!skb) { 159 ret = -ENOMEM; 160 goto fail; 161 } 162 163 if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN)) 164 skb_pull(skb, 165 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) - 166 skb->data); 167 168 /* Clear rx_desc attention word before posting to Rx ring */ 169 rx_desc = (struct htt_rx_desc *)skb->data; 170 rx_desc->attention.flags = __cpu_to_le32(0); 171 172 paddr = dma_map_single(htt->ar->dev, skb->data, 173 skb->len + skb_tailroom(skb), 174 DMA_FROM_DEVICE); 175 176 if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) { 177 dev_kfree_skb_any(skb); 178 ret = -ENOMEM; 179 goto fail; 180 } 181 182 rxcb = ATH10K_SKB_RXCB(skb); 183 rxcb->paddr = paddr; 184 htt->rx_ring.netbufs_ring[idx] = skb; 185 ath10k_htt_set_paddrs_ring(htt, paddr, idx); 186 htt->rx_ring.fill_cnt++; 187 188 if (htt->rx_ring.in_ord_rx) { 189 hash_add(htt->rx_ring.skb_table, 190 &ATH10K_SKB_RXCB(skb)->hlist, 191 paddr); 192 } 193 194 num--; 195 idx++; 196 idx &= htt->rx_ring.size_mask; 197 } 198 199 fail: 200 /* 201 * Make sure the rx buffer is updated before available buffer 202 * index to avoid any potential rx ring corruption. 203 */ 204 mb(); 205 *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx); 206 return ret; 207 } 208 209 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) 210 { 211 lockdep_assert_held(&htt->rx_ring.lock); 212 return __ath10k_htt_rx_ring_fill_n(htt, num); 213 } 214 215 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt) 216 { 217 int ret, num_deficit, num_to_fill; 218 219 /* Refilling the whole RX ring buffer proves to be a bad idea. The 220 * reason is RX may take up significant amount of CPU cycles and starve 221 * other tasks, e.g. TX on an ethernet device while acting as a bridge 222 * with ath10k wlan interface. This ended up with very poor performance 223 * once CPU the host system was overwhelmed with RX on ath10k. 224 * 225 * By limiting the number of refills the replenishing occurs 226 * progressively. This in turns makes use of the fact tasklets are 227 * processed in FIFO order. This means actual RX processing can starve 228 * out refilling. If there's not enough buffers on RX ring FW will not 229 * report RX until it is refilled with enough buffers. This 230 * automatically balances load wrt to CPU power. 231 * 232 * This probably comes at a cost of lower maximum throughput but 233 * improves the average and stability. 234 */ 235 spin_lock_bh(&htt->rx_ring.lock); 236 num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt; 237 num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit); 238 num_deficit -= num_to_fill; 239 ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill); 240 if (ret == -ENOMEM) { 241 /* 242 * Failed to fill it to the desired level - 243 * we'll start a timer and try again next time. 244 * As long as enough buffers are left in the ring for 245 * another A-MPDU rx, no special recovery is needed. 246 */ 247 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + 248 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS)); 249 } else if (num_deficit > 0) { 250 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + 251 msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS)); 252 } 253 spin_unlock_bh(&htt->rx_ring.lock); 254 } 255 256 static void ath10k_htt_rx_ring_refill_retry(struct timer_list *t) 257 { 258 struct ath10k_htt *htt = from_timer(htt, t, rx_ring.refill_retry_timer); 259 260 ath10k_htt_rx_msdu_buff_replenish(htt); 261 } 262 263 int ath10k_htt_rx_ring_refill(struct ath10k *ar) 264 { 265 struct ath10k_htt *htt = &ar->htt; 266 int ret; 267 268 if (ar->dev_type == ATH10K_DEV_TYPE_HL) 269 return 0; 270 271 spin_lock_bh(&htt->rx_ring.lock); 272 ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level - 273 htt->rx_ring.fill_cnt)); 274 275 if (ret) 276 ath10k_htt_rx_ring_free(htt); 277 278 spin_unlock_bh(&htt->rx_ring.lock); 279 280 return ret; 281 } 282 283 void ath10k_htt_rx_free(struct ath10k_htt *htt) 284 { 285 if (htt->ar->dev_type == ATH10K_DEV_TYPE_HL) 286 return; 287 288 del_timer_sync(&htt->rx_ring.refill_retry_timer); 289 290 skb_queue_purge(&htt->rx_msdus_q); 291 skb_queue_purge(&htt->rx_in_ord_compl_q); 292 skb_queue_purge(&htt->tx_fetch_ind_q); 293 294 spin_lock_bh(&htt->rx_ring.lock); 295 ath10k_htt_rx_ring_free(htt); 296 spin_unlock_bh(&htt->rx_ring.lock); 297 298 dma_free_coherent(htt->ar->dev, 299 ath10k_htt_get_rx_ring_size(htt), 300 ath10k_htt_get_vaddr_ring(htt), 301 htt->rx_ring.base_paddr); 302 303 dma_free_coherent(htt->ar->dev, 304 sizeof(*htt->rx_ring.alloc_idx.vaddr), 305 htt->rx_ring.alloc_idx.vaddr, 306 htt->rx_ring.alloc_idx.paddr); 307 308 kfree(htt->rx_ring.netbufs_ring); 309 } 310 311 static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt) 312 { 313 struct ath10k *ar = htt->ar; 314 int idx; 315 struct sk_buff *msdu; 316 317 lockdep_assert_held(&htt->rx_ring.lock); 318 319 if (htt->rx_ring.fill_cnt == 0) { 320 ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n"); 321 return NULL; 322 } 323 324 idx = htt->rx_ring.sw_rd_idx.msdu_payld; 325 msdu = htt->rx_ring.netbufs_ring[idx]; 326 htt->rx_ring.netbufs_ring[idx] = NULL; 327 ath10k_htt_reset_paddrs_ring(htt, idx); 328 329 idx++; 330 idx &= htt->rx_ring.size_mask; 331 htt->rx_ring.sw_rd_idx.msdu_payld = idx; 332 htt->rx_ring.fill_cnt--; 333 334 dma_unmap_single(htt->ar->dev, 335 ATH10K_SKB_RXCB(msdu)->paddr, 336 msdu->len + skb_tailroom(msdu), 337 DMA_FROM_DEVICE); 338 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ", 339 msdu->data, msdu->len + skb_tailroom(msdu)); 340 341 return msdu; 342 } 343 344 /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */ 345 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, 346 struct sk_buff_head *amsdu) 347 { 348 struct ath10k *ar = htt->ar; 349 int msdu_len, msdu_chaining = 0; 350 struct sk_buff *msdu; 351 struct htt_rx_desc *rx_desc; 352 353 lockdep_assert_held(&htt->rx_ring.lock); 354 355 for (;;) { 356 int last_msdu, msdu_len_invalid, msdu_chained; 357 358 msdu = ath10k_htt_rx_netbuf_pop(htt); 359 if (!msdu) { 360 __skb_queue_purge(amsdu); 361 return -ENOENT; 362 } 363 364 __skb_queue_tail(amsdu, msdu); 365 366 rx_desc = (struct htt_rx_desc *)msdu->data; 367 368 /* FIXME: we must report msdu payload since this is what caller 369 * expects now 370 */ 371 skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload)); 372 skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload)); 373 374 /* 375 * Sanity check - confirm the HW is finished filling in the 376 * rx data. 377 * If the HW and SW are working correctly, then it's guaranteed 378 * that the HW's MAC DMA is done before this point in the SW. 379 * To prevent the case that we handle a stale Rx descriptor, 380 * just assert for now until we have a way to recover. 381 */ 382 if (!(__le32_to_cpu(rx_desc->attention.flags) 383 & RX_ATTENTION_FLAGS_MSDU_DONE)) { 384 __skb_queue_purge(amsdu); 385 return -EIO; 386 } 387 388 msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags) 389 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR | 390 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR)); 391 msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0), 392 RX_MSDU_START_INFO0_MSDU_LENGTH); 393 msdu_chained = rx_desc->frag_info.ring2_more_count; 394 395 if (msdu_len_invalid) 396 msdu_len = 0; 397 398 skb_trim(msdu, 0); 399 skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE)); 400 msdu_len -= msdu->len; 401 402 /* Note: Chained buffers do not contain rx descriptor */ 403 while (msdu_chained--) { 404 msdu = ath10k_htt_rx_netbuf_pop(htt); 405 if (!msdu) { 406 __skb_queue_purge(amsdu); 407 return -ENOENT; 408 } 409 410 __skb_queue_tail(amsdu, msdu); 411 skb_trim(msdu, 0); 412 skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE)); 413 msdu_len -= msdu->len; 414 msdu_chaining = 1; 415 } 416 417 last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) & 418 RX_MSDU_END_INFO0_LAST_MSDU; 419 420 trace_ath10k_htt_rx_desc(ar, &rx_desc->attention, 421 sizeof(*rx_desc) - sizeof(u32)); 422 423 if (last_msdu) 424 break; 425 } 426 427 if (skb_queue_empty(amsdu)) 428 msdu_chaining = -1; 429 430 /* 431 * Don't refill the ring yet. 432 * 433 * First, the elements popped here are still in use - it is not 434 * safe to overwrite them until the matching call to 435 * mpdu_desc_list_next. Second, for efficiency it is preferable to 436 * refill the rx ring with 1 PPDU's worth of rx buffers (something 437 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers 438 * (something like 3 buffers). Consequently, we'll rely on the txrx 439 * SW to tell us when it is done pulling all the PPDU's rx buffers 440 * out of the rx ring, and then refill it just once. 441 */ 442 443 return msdu_chaining; 444 } 445 446 static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt, 447 u64 paddr) 448 { 449 struct ath10k *ar = htt->ar; 450 struct ath10k_skb_rxcb *rxcb; 451 struct sk_buff *msdu; 452 453 lockdep_assert_held(&htt->rx_ring.lock); 454 455 msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr); 456 if (!msdu) 457 return NULL; 458 459 rxcb = ATH10K_SKB_RXCB(msdu); 460 hash_del(&rxcb->hlist); 461 htt->rx_ring.fill_cnt--; 462 463 dma_unmap_single(htt->ar->dev, rxcb->paddr, 464 msdu->len + skb_tailroom(msdu), 465 DMA_FROM_DEVICE); 466 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ", 467 msdu->data, msdu->len + skb_tailroom(msdu)); 468 469 return msdu; 470 } 471 472 static inline void ath10k_htt_append_frag_list(struct sk_buff *skb_head, 473 struct sk_buff *frag_list, 474 unsigned int frag_len) 475 { 476 skb_shinfo(skb_head)->frag_list = frag_list; 477 skb_head->data_len = frag_len; 478 skb_head->len += skb_head->data_len; 479 } 480 481 static int ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt *htt, 482 struct sk_buff *msdu, 483 struct htt_rx_in_ord_msdu_desc **msdu_desc) 484 { 485 struct ath10k *ar = htt->ar; 486 u32 paddr; 487 struct sk_buff *frag_buf; 488 struct sk_buff *prev_frag_buf; 489 u8 last_frag; 490 struct htt_rx_in_ord_msdu_desc *ind_desc = *msdu_desc; 491 struct htt_rx_desc *rxd; 492 int amsdu_len = __le16_to_cpu(ind_desc->msdu_len); 493 494 rxd = (void *)msdu->data; 495 trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd)); 496 497 skb_put(msdu, sizeof(struct htt_rx_desc)); 498 skb_pull(msdu, sizeof(struct htt_rx_desc)); 499 skb_put(msdu, min(amsdu_len, HTT_RX_MSDU_SIZE)); 500 amsdu_len -= msdu->len; 501 502 last_frag = ind_desc->reserved; 503 if (last_frag) { 504 if (amsdu_len) { 505 ath10k_warn(ar, "invalid amsdu len %u, left %d", 506 __le16_to_cpu(ind_desc->msdu_len), 507 amsdu_len); 508 } 509 return 0; 510 } 511 512 ind_desc++; 513 paddr = __le32_to_cpu(ind_desc->msdu_paddr); 514 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr); 515 if (!frag_buf) { 516 ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%x", paddr); 517 return -ENOENT; 518 } 519 520 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE)); 521 ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len); 522 523 amsdu_len -= frag_buf->len; 524 prev_frag_buf = frag_buf; 525 last_frag = ind_desc->reserved; 526 while (!last_frag) { 527 ind_desc++; 528 paddr = __le32_to_cpu(ind_desc->msdu_paddr); 529 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr); 530 if (!frag_buf) { 531 ath10k_warn(ar, "failed to pop frag-n paddr: 0x%x", 532 paddr); 533 prev_frag_buf->next = NULL; 534 return -ENOENT; 535 } 536 537 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE)); 538 last_frag = ind_desc->reserved; 539 amsdu_len -= frag_buf->len; 540 541 prev_frag_buf->next = frag_buf; 542 prev_frag_buf = frag_buf; 543 } 544 545 if (amsdu_len) { 546 ath10k_warn(ar, "invalid amsdu len %u, left %d", 547 __le16_to_cpu(ind_desc->msdu_len), amsdu_len); 548 } 549 550 *msdu_desc = ind_desc; 551 552 prev_frag_buf->next = NULL; 553 return 0; 554 } 555 556 static int 557 ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt *htt, 558 struct sk_buff *msdu, 559 struct htt_rx_in_ord_msdu_desc_ext **msdu_desc) 560 { 561 struct ath10k *ar = htt->ar; 562 u64 paddr; 563 struct sk_buff *frag_buf; 564 struct sk_buff *prev_frag_buf; 565 u8 last_frag; 566 struct htt_rx_in_ord_msdu_desc_ext *ind_desc = *msdu_desc; 567 struct htt_rx_desc *rxd; 568 int amsdu_len = __le16_to_cpu(ind_desc->msdu_len); 569 570 rxd = (void *)msdu->data; 571 trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd)); 572 573 skb_put(msdu, sizeof(struct htt_rx_desc)); 574 skb_pull(msdu, sizeof(struct htt_rx_desc)); 575 skb_put(msdu, min(amsdu_len, HTT_RX_MSDU_SIZE)); 576 amsdu_len -= msdu->len; 577 578 last_frag = ind_desc->reserved; 579 if (last_frag) { 580 if (amsdu_len) { 581 ath10k_warn(ar, "invalid amsdu len %u, left %d", 582 __le16_to_cpu(ind_desc->msdu_len), 583 amsdu_len); 584 } 585 return 0; 586 } 587 588 ind_desc++; 589 paddr = __le64_to_cpu(ind_desc->msdu_paddr); 590 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr); 591 if (!frag_buf) { 592 ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%llx", paddr); 593 return -ENOENT; 594 } 595 596 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE)); 597 ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len); 598 599 amsdu_len -= frag_buf->len; 600 prev_frag_buf = frag_buf; 601 last_frag = ind_desc->reserved; 602 while (!last_frag) { 603 ind_desc++; 604 paddr = __le64_to_cpu(ind_desc->msdu_paddr); 605 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr); 606 if (!frag_buf) { 607 ath10k_warn(ar, "failed to pop frag-n paddr: 0x%llx", 608 paddr); 609 prev_frag_buf->next = NULL; 610 return -ENOENT; 611 } 612 613 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE)); 614 last_frag = ind_desc->reserved; 615 amsdu_len -= frag_buf->len; 616 617 prev_frag_buf->next = frag_buf; 618 prev_frag_buf = frag_buf; 619 } 620 621 if (amsdu_len) { 622 ath10k_warn(ar, "invalid amsdu len %u, left %d", 623 __le16_to_cpu(ind_desc->msdu_len), amsdu_len); 624 } 625 626 *msdu_desc = ind_desc; 627 628 prev_frag_buf->next = NULL; 629 return 0; 630 } 631 632 static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt, 633 struct htt_rx_in_ord_ind *ev, 634 struct sk_buff_head *list) 635 { 636 struct ath10k *ar = htt->ar; 637 struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32; 638 struct htt_rx_desc *rxd; 639 struct sk_buff *msdu; 640 int msdu_count, ret; 641 bool is_offload; 642 u32 paddr; 643 644 lockdep_assert_held(&htt->rx_ring.lock); 645 646 msdu_count = __le16_to_cpu(ev->msdu_count); 647 is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK); 648 649 while (msdu_count--) { 650 paddr = __le32_to_cpu(msdu_desc->msdu_paddr); 651 652 msdu = ath10k_htt_rx_pop_paddr(htt, paddr); 653 if (!msdu) { 654 __skb_queue_purge(list); 655 return -ENOENT; 656 } 657 658 if (!is_offload && ar->monitor_arvif) { 659 ret = ath10k_htt_rx_handle_amsdu_mon_32(htt, msdu, 660 &msdu_desc); 661 if (ret) { 662 __skb_queue_purge(list); 663 return ret; 664 } 665 __skb_queue_tail(list, msdu); 666 msdu_desc++; 667 continue; 668 } 669 670 __skb_queue_tail(list, msdu); 671 672 if (!is_offload) { 673 rxd = (void *)msdu->data; 674 675 trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd)); 676 677 skb_put(msdu, sizeof(*rxd)); 678 skb_pull(msdu, sizeof(*rxd)); 679 skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len)); 680 681 if (!(__le32_to_cpu(rxd->attention.flags) & 682 RX_ATTENTION_FLAGS_MSDU_DONE)) { 683 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n"); 684 return -EIO; 685 } 686 } 687 688 msdu_desc++; 689 } 690 691 return 0; 692 } 693 694 static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt, 695 struct htt_rx_in_ord_ind *ev, 696 struct sk_buff_head *list) 697 { 698 struct ath10k *ar = htt->ar; 699 struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64; 700 struct htt_rx_desc *rxd; 701 struct sk_buff *msdu; 702 int msdu_count, ret; 703 bool is_offload; 704 u64 paddr; 705 706 lockdep_assert_held(&htt->rx_ring.lock); 707 708 msdu_count = __le16_to_cpu(ev->msdu_count); 709 is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK); 710 711 while (msdu_count--) { 712 paddr = __le64_to_cpu(msdu_desc->msdu_paddr); 713 msdu = ath10k_htt_rx_pop_paddr(htt, paddr); 714 if (!msdu) { 715 __skb_queue_purge(list); 716 return -ENOENT; 717 } 718 719 if (!is_offload && ar->monitor_arvif) { 720 ret = ath10k_htt_rx_handle_amsdu_mon_64(htt, msdu, 721 &msdu_desc); 722 if (ret) { 723 __skb_queue_purge(list); 724 return ret; 725 } 726 __skb_queue_tail(list, msdu); 727 msdu_desc++; 728 continue; 729 } 730 731 __skb_queue_tail(list, msdu); 732 733 if (!is_offload) { 734 rxd = (void *)msdu->data; 735 736 trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd)); 737 738 skb_put(msdu, sizeof(*rxd)); 739 skb_pull(msdu, sizeof(*rxd)); 740 skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len)); 741 742 if (!(__le32_to_cpu(rxd->attention.flags) & 743 RX_ATTENTION_FLAGS_MSDU_DONE)) { 744 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n"); 745 return -EIO; 746 } 747 } 748 749 msdu_desc++; 750 } 751 752 return 0; 753 } 754 755 int ath10k_htt_rx_alloc(struct ath10k_htt *htt) 756 { 757 struct ath10k *ar = htt->ar; 758 dma_addr_t paddr; 759 void *vaddr, *vaddr_ring; 760 size_t size; 761 struct timer_list *timer = &htt->rx_ring.refill_retry_timer; 762 763 if (ar->dev_type == ATH10K_DEV_TYPE_HL) 764 return 0; 765 766 htt->rx_confused = false; 767 768 /* XXX: The fill level could be changed during runtime in response to 769 * the host processing latency. Is this really worth it? 770 */ 771 htt->rx_ring.size = HTT_RX_RING_SIZE; 772 htt->rx_ring.size_mask = htt->rx_ring.size - 1; 773 htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level; 774 775 if (!is_power_of_2(htt->rx_ring.size)) { 776 ath10k_warn(ar, "htt rx ring size is not power of 2\n"); 777 return -EINVAL; 778 } 779 780 htt->rx_ring.netbufs_ring = 781 kcalloc(htt->rx_ring.size, sizeof(struct sk_buff *), 782 GFP_KERNEL); 783 if (!htt->rx_ring.netbufs_ring) 784 goto err_netbuf; 785 786 size = ath10k_htt_get_rx_ring_size(htt); 787 788 vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL); 789 if (!vaddr_ring) 790 goto err_dma_ring; 791 792 ath10k_htt_config_paddrs_ring(htt, vaddr_ring); 793 htt->rx_ring.base_paddr = paddr; 794 795 vaddr = dma_alloc_coherent(htt->ar->dev, 796 sizeof(*htt->rx_ring.alloc_idx.vaddr), 797 &paddr, GFP_KERNEL); 798 if (!vaddr) 799 goto err_dma_idx; 800 801 htt->rx_ring.alloc_idx.vaddr = vaddr; 802 htt->rx_ring.alloc_idx.paddr = paddr; 803 htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask; 804 *htt->rx_ring.alloc_idx.vaddr = 0; 805 806 /* Initialize the Rx refill retry timer */ 807 timer_setup(timer, ath10k_htt_rx_ring_refill_retry, 0); 808 809 spin_lock_init(&htt->rx_ring.lock); 810 811 htt->rx_ring.fill_cnt = 0; 812 htt->rx_ring.sw_rd_idx.msdu_payld = 0; 813 hash_init(htt->rx_ring.skb_table); 814 815 skb_queue_head_init(&htt->rx_msdus_q); 816 skb_queue_head_init(&htt->rx_in_ord_compl_q); 817 skb_queue_head_init(&htt->tx_fetch_ind_q); 818 atomic_set(&htt->num_mpdus_ready, 0); 819 820 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n", 821 htt->rx_ring.size, htt->rx_ring.fill_level); 822 return 0; 823 824 err_dma_idx: 825 dma_free_coherent(htt->ar->dev, 826 ath10k_htt_get_rx_ring_size(htt), 827 vaddr_ring, 828 htt->rx_ring.base_paddr); 829 err_dma_ring: 830 kfree(htt->rx_ring.netbufs_ring); 831 err_netbuf: 832 return -ENOMEM; 833 } 834 835 static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar, 836 enum htt_rx_mpdu_encrypt_type type) 837 { 838 switch (type) { 839 case HTT_RX_MPDU_ENCRYPT_NONE: 840 return 0; 841 case HTT_RX_MPDU_ENCRYPT_WEP40: 842 case HTT_RX_MPDU_ENCRYPT_WEP104: 843 return IEEE80211_WEP_IV_LEN; 844 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: 845 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: 846 return IEEE80211_TKIP_IV_LEN; 847 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: 848 return IEEE80211_CCMP_HDR_LEN; 849 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2: 850 return IEEE80211_CCMP_256_HDR_LEN; 851 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2: 852 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2: 853 return IEEE80211_GCMP_HDR_LEN; 854 case HTT_RX_MPDU_ENCRYPT_WEP128: 855 case HTT_RX_MPDU_ENCRYPT_WAPI: 856 break; 857 } 858 859 ath10k_warn(ar, "unsupported encryption type %d\n", type); 860 return 0; 861 } 862 863 #define MICHAEL_MIC_LEN 8 864 865 static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar, 866 enum htt_rx_mpdu_encrypt_type type) 867 { 868 switch (type) { 869 case HTT_RX_MPDU_ENCRYPT_NONE: 870 case HTT_RX_MPDU_ENCRYPT_WEP40: 871 case HTT_RX_MPDU_ENCRYPT_WEP104: 872 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: 873 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: 874 return 0; 875 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: 876 return IEEE80211_CCMP_MIC_LEN; 877 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2: 878 return IEEE80211_CCMP_256_MIC_LEN; 879 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2: 880 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2: 881 return IEEE80211_GCMP_MIC_LEN; 882 case HTT_RX_MPDU_ENCRYPT_WEP128: 883 case HTT_RX_MPDU_ENCRYPT_WAPI: 884 break; 885 } 886 887 ath10k_warn(ar, "unsupported encryption type %d\n", type); 888 return 0; 889 } 890 891 static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar, 892 enum htt_rx_mpdu_encrypt_type type) 893 { 894 switch (type) { 895 case HTT_RX_MPDU_ENCRYPT_NONE: 896 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: 897 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2: 898 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2: 899 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2: 900 return 0; 901 case HTT_RX_MPDU_ENCRYPT_WEP40: 902 case HTT_RX_MPDU_ENCRYPT_WEP104: 903 return IEEE80211_WEP_ICV_LEN; 904 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: 905 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: 906 return IEEE80211_TKIP_ICV_LEN; 907 case HTT_RX_MPDU_ENCRYPT_WEP128: 908 case HTT_RX_MPDU_ENCRYPT_WAPI: 909 break; 910 } 911 912 ath10k_warn(ar, "unsupported encryption type %d\n", type); 913 return 0; 914 } 915 916 struct amsdu_subframe_hdr { 917 u8 dst[ETH_ALEN]; 918 u8 src[ETH_ALEN]; 919 __be16 len; 920 } __packed; 921 922 #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63) 923 924 static inline u8 ath10k_bw_to_mac80211_bw(u8 bw) 925 { 926 u8 ret = 0; 927 928 switch (bw) { 929 case 0: 930 ret = RATE_INFO_BW_20; 931 break; 932 case 1: 933 ret = RATE_INFO_BW_40; 934 break; 935 case 2: 936 ret = RATE_INFO_BW_80; 937 break; 938 case 3: 939 ret = RATE_INFO_BW_160; 940 break; 941 } 942 943 return ret; 944 } 945 946 static void ath10k_htt_rx_h_rates(struct ath10k *ar, 947 struct ieee80211_rx_status *status, 948 struct htt_rx_desc *rxd) 949 { 950 struct ieee80211_supported_band *sband; 951 u8 cck, rate, bw, sgi, mcs, nss; 952 u8 preamble = 0; 953 u8 group_id; 954 u32 info1, info2, info3; 955 956 info1 = __le32_to_cpu(rxd->ppdu_start.info1); 957 info2 = __le32_to_cpu(rxd->ppdu_start.info2); 958 info3 = __le32_to_cpu(rxd->ppdu_start.info3); 959 960 preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE); 961 962 switch (preamble) { 963 case HTT_RX_LEGACY: 964 /* To get legacy rate index band is required. Since band can't 965 * be undefined check if freq is non-zero. 966 */ 967 if (!status->freq) 968 return; 969 970 cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT; 971 rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE); 972 rate &= ~RX_PPDU_START_RATE_FLAG; 973 974 sband = &ar->mac.sbands[status->band]; 975 status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck); 976 break; 977 case HTT_RX_HT: 978 case HTT_RX_HT_WITH_TXBF: 979 /* HT-SIG - Table 20-11 in info2 and info3 */ 980 mcs = info2 & 0x1F; 981 nss = mcs >> 3; 982 bw = (info2 >> 7) & 1; 983 sgi = (info3 >> 7) & 1; 984 985 status->rate_idx = mcs; 986 status->encoding = RX_ENC_HT; 987 if (sgi) 988 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 989 if (bw) 990 status->bw = RATE_INFO_BW_40; 991 break; 992 case HTT_RX_VHT: 993 case HTT_RX_VHT_WITH_TXBF: 994 /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3 995 * TODO check this 996 */ 997 bw = info2 & 3; 998 sgi = info3 & 1; 999 group_id = (info2 >> 4) & 0x3F; 1000 1001 if (GROUP_ID_IS_SU_MIMO(group_id)) { 1002 mcs = (info3 >> 4) & 0x0F; 1003 nss = ((info2 >> 10) & 0x07) + 1; 1004 } else { 1005 /* Hardware doesn't decode VHT-SIG-B into Rx descriptor 1006 * so it's impossible to decode MCS. Also since 1007 * firmware consumes Group Id Management frames host 1008 * has no knowledge regarding group/user position 1009 * mapping so it's impossible to pick the correct Nsts 1010 * from VHT-SIG-A1. 1011 * 1012 * Bandwidth and SGI are valid so report the rateinfo 1013 * on best-effort basis. 1014 */ 1015 mcs = 0; 1016 nss = 1; 1017 } 1018 1019 if (mcs > 0x09) { 1020 ath10k_warn(ar, "invalid MCS received %u\n", mcs); 1021 ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n", 1022 __le32_to_cpu(rxd->attention.flags), 1023 __le32_to_cpu(rxd->mpdu_start.info0), 1024 __le32_to_cpu(rxd->mpdu_start.info1), 1025 __le32_to_cpu(rxd->msdu_start.common.info0), 1026 __le32_to_cpu(rxd->msdu_start.common.info1), 1027 rxd->ppdu_start.info0, 1028 __le32_to_cpu(rxd->ppdu_start.info1), 1029 __le32_to_cpu(rxd->ppdu_start.info2), 1030 __le32_to_cpu(rxd->ppdu_start.info3), 1031 __le32_to_cpu(rxd->ppdu_start.info4)); 1032 1033 ath10k_warn(ar, "msdu end %08x mpdu end %08x\n", 1034 __le32_to_cpu(rxd->msdu_end.common.info0), 1035 __le32_to_cpu(rxd->mpdu_end.info0)); 1036 1037 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, 1038 "rx desc msdu payload: ", 1039 rxd->msdu_payload, 50); 1040 } 1041 1042 status->rate_idx = mcs; 1043 status->nss = nss; 1044 1045 if (sgi) 1046 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 1047 1048 status->bw = ath10k_bw_to_mac80211_bw(bw); 1049 status->encoding = RX_ENC_VHT; 1050 break; 1051 default: 1052 break; 1053 } 1054 } 1055 1056 static struct ieee80211_channel * 1057 ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd) 1058 { 1059 struct ath10k_peer *peer; 1060 struct ath10k_vif *arvif; 1061 struct cfg80211_chan_def def; 1062 u16 peer_id; 1063 1064 lockdep_assert_held(&ar->data_lock); 1065 1066 if (!rxd) 1067 return NULL; 1068 1069 if (rxd->attention.flags & 1070 __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID)) 1071 return NULL; 1072 1073 if (!(rxd->msdu_end.common.info0 & 1074 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU))) 1075 return NULL; 1076 1077 peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0), 1078 RX_MPDU_START_INFO0_PEER_IDX); 1079 1080 peer = ath10k_peer_find_by_id(ar, peer_id); 1081 if (!peer) 1082 return NULL; 1083 1084 arvif = ath10k_get_arvif(ar, peer->vdev_id); 1085 if (WARN_ON_ONCE(!arvif)) 1086 return NULL; 1087 1088 if (ath10k_mac_vif_chan(arvif->vif, &def)) 1089 return NULL; 1090 1091 return def.chan; 1092 } 1093 1094 static struct ieee80211_channel * 1095 ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id) 1096 { 1097 struct ath10k_vif *arvif; 1098 struct cfg80211_chan_def def; 1099 1100 lockdep_assert_held(&ar->data_lock); 1101 1102 list_for_each_entry(arvif, &ar->arvifs, list) { 1103 if (arvif->vdev_id == vdev_id && 1104 ath10k_mac_vif_chan(arvif->vif, &def) == 0) 1105 return def.chan; 1106 } 1107 1108 return NULL; 1109 } 1110 1111 static void 1112 ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw, 1113 struct ieee80211_chanctx_conf *conf, 1114 void *data) 1115 { 1116 struct cfg80211_chan_def *def = data; 1117 1118 *def = conf->def; 1119 } 1120 1121 static struct ieee80211_channel * 1122 ath10k_htt_rx_h_any_channel(struct ath10k *ar) 1123 { 1124 struct cfg80211_chan_def def = {}; 1125 1126 ieee80211_iter_chan_contexts_atomic(ar->hw, 1127 ath10k_htt_rx_h_any_chan_iter, 1128 &def); 1129 1130 return def.chan; 1131 } 1132 1133 static bool ath10k_htt_rx_h_channel(struct ath10k *ar, 1134 struct ieee80211_rx_status *status, 1135 struct htt_rx_desc *rxd, 1136 u32 vdev_id) 1137 { 1138 struct ieee80211_channel *ch; 1139 1140 spin_lock_bh(&ar->data_lock); 1141 ch = ar->scan_channel; 1142 if (!ch) 1143 ch = ar->rx_channel; 1144 if (!ch) 1145 ch = ath10k_htt_rx_h_peer_channel(ar, rxd); 1146 if (!ch) 1147 ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id); 1148 if (!ch) 1149 ch = ath10k_htt_rx_h_any_channel(ar); 1150 if (!ch) 1151 ch = ar->tgt_oper_chan; 1152 spin_unlock_bh(&ar->data_lock); 1153 1154 if (!ch) 1155 return false; 1156 1157 status->band = ch->band; 1158 status->freq = ch->center_freq; 1159 1160 return true; 1161 } 1162 1163 static void ath10k_htt_rx_h_signal(struct ath10k *ar, 1164 struct ieee80211_rx_status *status, 1165 struct htt_rx_desc *rxd) 1166 { 1167 int i; 1168 1169 for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) { 1170 status->chains &= ~BIT(i); 1171 1172 if (rxd->ppdu_start.rssi_chains[i].pri20_mhz != 0x80) { 1173 status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR + 1174 rxd->ppdu_start.rssi_chains[i].pri20_mhz; 1175 1176 status->chains |= BIT(i); 1177 } 1178 } 1179 1180 /* FIXME: Get real NF */ 1181 status->signal = ATH10K_DEFAULT_NOISE_FLOOR + 1182 rxd->ppdu_start.rssi_comb; 1183 status->flag &= ~RX_FLAG_NO_SIGNAL_VAL; 1184 } 1185 1186 static void ath10k_htt_rx_h_mactime(struct ath10k *ar, 1187 struct ieee80211_rx_status *status, 1188 struct htt_rx_desc *rxd) 1189 { 1190 /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This 1191 * means all prior MSDUs in a PPDU are reported to mac80211 without the 1192 * TSF. Is it worth holding frames until end of PPDU is known? 1193 * 1194 * FIXME: Can we get/compute 64bit TSF? 1195 */ 1196 status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp); 1197 status->flag |= RX_FLAG_MACTIME_END; 1198 } 1199 1200 static void ath10k_htt_rx_h_ppdu(struct ath10k *ar, 1201 struct sk_buff_head *amsdu, 1202 struct ieee80211_rx_status *status, 1203 u32 vdev_id) 1204 { 1205 struct sk_buff *first; 1206 struct htt_rx_desc *rxd; 1207 bool is_first_ppdu; 1208 bool is_last_ppdu; 1209 1210 if (skb_queue_empty(amsdu)) 1211 return; 1212 1213 first = skb_peek(amsdu); 1214 rxd = (void *)first->data - sizeof(*rxd); 1215 1216 is_first_ppdu = !!(rxd->attention.flags & 1217 __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU)); 1218 is_last_ppdu = !!(rxd->attention.flags & 1219 __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU)); 1220 1221 if (is_first_ppdu) { 1222 /* New PPDU starts so clear out the old per-PPDU status. */ 1223 status->freq = 0; 1224 status->rate_idx = 0; 1225 status->nss = 0; 1226 status->encoding = RX_ENC_LEGACY; 1227 status->bw = RATE_INFO_BW_20; 1228 1229 status->flag &= ~RX_FLAG_MACTIME_END; 1230 status->flag |= RX_FLAG_NO_SIGNAL_VAL; 1231 1232 status->flag &= ~(RX_FLAG_AMPDU_IS_LAST); 1233 status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN; 1234 status->ampdu_reference = ar->ampdu_reference; 1235 1236 ath10k_htt_rx_h_signal(ar, status, rxd); 1237 ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id); 1238 ath10k_htt_rx_h_rates(ar, status, rxd); 1239 } 1240 1241 if (is_last_ppdu) { 1242 ath10k_htt_rx_h_mactime(ar, status, rxd); 1243 1244 /* set ampdu last segment flag */ 1245 status->flag |= RX_FLAG_AMPDU_IS_LAST; 1246 ar->ampdu_reference++; 1247 } 1248 } 1249 1250 static const char * const tid_to_ac[] = { 1251 "BE", 1252 "BK", 1253 "BK", 1254 "BE", 1255 "VI", 1256 "VI", 1257 "VO", 1258 "VO", 1259 }; 1260 1261 static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size) 1262 { 1263 u8 *qc; 1264 int tid; 1265 1266 if (!ieee80211_is_data_qos(hdr->frame_control)) 1267 return ""; 1268 1269 qc = ieee80211_get_qos_ctl(hdr); 1270 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 1271 if (tid < 8) 1272 snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]); 1273 else 1274 snprintf(out, size, "tid %d", tid); 1275 1276 return out; 1277 } 1278 1279 static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar, 1280 struct ieee80211_rx_status *rx_status, 1281 struct sk_buff *skb) 1282 { 1283 struct ieee80211_rx_status *status; 1284 1285 status = IEEE80211_SKB_RXCB(skb); 1286 *status = *rx_status; 1287 1288 skb_queue_tail(&ar->htt.rx_msdus_q, skb); 1289 } 1290 1291 static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb) 1292 { 1293 struct ieee80211_rx_status *status; 1294 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1295 char tid[32]; 1296 1297 status = IEEE80211_SKB_RXCB(skb); 1298 1299 ath10k_dbg(ar, ATH10K_DBG_DATA, 1300 "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", 1301 skb, 1302 skb->len, 1303 ieee80211_get_SA(hdr), 1304 ath10k_get_tid(hdr, tid, sizeof(tid)), 1305 is_multicast_ether_addr(ieee80211_get_DA(hdr)) ? 1306 "mcast" : "ucast", 1307 (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4, 1308 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", 1309 (status->encoding == RX_ENC_HT) ? "ht" : "", 1310 (status->encoding == RX_ENC_VHT) ? "vht" : "", 1311 (status->bw == RATE_INFO_BW_40) ? "40" : "", 1312 (status->bw == RATE_INFO_BW_80) ? "80" : "", 1313 (status->bw == RATE_INFO_BW_160) ? "160" : "", 1314 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", 1315 status->rate_idx, 1316 status->nss, 1317 status->freq, 1318 status->band, status->flag, 1319 !!(status->flag & RX_FLAG_FAILED_FCS_CRC), 1320 !!(status->flag & RX_FLAG_MMIC_ERROR), 1321 !!(status->flag & RX_FLAG_AMSDU_MORE)); 1322 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ", 1323 skb->data, skb->len); 1324 trace_ath10k_rx_hdr(ar, skb->data, skb->len); 1325 trace_ath10k_rx_payload(ar, skb->data, skb->len); 1326 1327 ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi); 1328 } 1329 1330 static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar, 1331 struct ieee80211_hdr *hdr) 1332 { 1333 int len = ieee80211_hdrlen(hdr->frame_control); 1334 1335 if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING, 1336 ar->running_fw->fw_file.fw_features)) 1337 len = round_up(len, 4); 1338 1339 return len; 1340 } 1341 1342 static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar, 1343 struct sk_buff *msdu, 1344 struct ieee80211_rx_status *status, 1345 enum htt_rx_mpdu_encrypt_type enctype, 1346 bool is_decrypted, 1347 const u8 first_hdr[64]) 1348 { 1349 struct ieee80211_hdr *hdr; 1350 struct htt_rx_desc *rxd; 1351 size_t hdr_len; 1352 size_t crypto_len; 1353 bool is_first; 1354 bool is_last; 1355 bool msdu_limit_err; 1356 int bytes_aligned = ar->hw_params.decap_align_bytes; 1357 u8 *qos; 1358 1359 rxd = (void *)msdu->data - sizeof(*rxd); 1360 is_first = !!(rxd->msdu_end.common.info0 & 1361 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); 1362 is_last = !!(rxd->msdu_end.common.info0 & 1363 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); 1364 1365 /* Delivered decapped frame: 1366 * [802.11 header] 1367 * [crypto param] <-- can be trimmed if !fcs_err && 1368 * !decrypt_err && !peer_idx_invalid 1369 * [amsdu header] <-- only if A-MSDU 1370 * [rfc1042/llc] 1371 * [payload] 1372 * [FCS] <-- at end, needs to be trimmed 1373 */ 1374 1375 /* Some hardwares(QCA99x0 variants) limit number of msdus in a-msdu when 1376 * deaggregate, so that unwanted MSDU-deaggregation is avoided for 1377 * error packets. If limit exceeds, hw sends all remaining MSDUs as 1378 * a single last MSDU with this msdu limit error set. 1379 */ 1380 msdu_limit_err = ath10k_rx_desc_msdu_limit_error(&ar->hw_params, rxd); 1381 1382 /* If MSDU limit error happens, then don't warn on, the partial raw MSDU 1383 * without first MSDU is expected in that case, and handled later here. 1384 */ 1385 /* This probably shouldn't happen but warn just in case */ 1386 if (WARN_ON_ONCE(!is_first && !msdu_limit_err)) 1387 return; 1388 1389 /* This probably shouldn't happen but warn just in case */ 1390 if (WARN_ON_ONCE(!(is_first && is_last) && !msdu_limit_err)) 1391 return; 1392 1393 skb_trim(msdu, msdu->len - FCS_LEN); 1394 1395 /* Push original 80211 header */ 1396 if (unlikely(msdu_limit_err)) { 1397 hdr = (struct ieee80211_hdr *)first_hdr; 1398 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1399 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); 1400 1401 if (ieee80211_is_data_qos(hdr->frame_control)) { 1402 qos = ieee80211_get_qos_ctl(hdr); 1403 qos[0] |= IEEE80211_QOS_CTL_A_MSDU_PRESENT; 1404 } 1405 1406 if (crypto_len) 1407 memcpy(skb_push(msdu, crypto_len), 1408 (void *)hdr + round_up(hdr_len, bytes_aligned), 1409 crypto_len); 1410 1411 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1412 } 1413 1414 /* In most cases this will be true for sniffed frames. It makes sense 1415 * to deliver them as-is without stripping the crypto param. This is 1416 * necessary for software based decryption. 1417 * 1418 * If there's no error then the frame is decrypted. At least that is 1419 * the case for frames that come in via fragmented rx indication. 1420 */ 1421 if (!is_decrypted) 1422 return; 1423 1424 /* The payload is decrypted so strip crypto params. Start from tail 1425 * since hdr is used to compute some stuff. 1426 */ 1427 1428 hdr = (void *)msdu->data; 1429 1430 /* Tail */ 1431 if (status->flag & RX_FLAG_IV_STRIPPED) { 1432 skb_trim(msdu, msdu->len - 1433 ath10k_htt_rx_crypto_mic_len(ar, enctype)); 1434 1435 skb_trim(msdu, msdu->len - 1436 ath10k_htt_rx_crypto_icv_len(ar, enctype)); 1437 } else { 1438 /* MIC */ 1439 if (status->flag & RX_FLAG_MIC_STRIPPED) 1440 skb_trim(msdu, msdu->len - 1441 ath10k_htt_rx_crypto_mic_len(ar, enctype)); 1442 1443 /* ICV */ 1444 if (status->flag & RX_FLAG_ICV_STRIPPED) 1445 skb_trim(msdu, msdu->len - 1446 ath10k_htt_rx_crypto_icv_len(ar, enctype)); 1447 } 1448 1449 /* MMIC */ 1450 if ((status->flag & RX_FLAG_MMIC_STRIPPED) && 1451 !ieee80211_has_morefrags(hdr->frame_control) && 1452 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) 1453 skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN); 1454 1455 /* Head */ 1456 if (status->flag & RX_FLAG_IV_STRIPPED) { 1457 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1458 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); 1459 1460 memmove((void *)msdu->data + crypto_len, 1461 (void *)msdu->data, hdr_len); 1462 skb_pull(msdu, crypto_len); 1463 } 1464 } 1465 1466 static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar, 1467 struct sk_buff *msdu, 1468 struct ieee80211_rx_status *status, 1469 const u8 first_hdr[64], 1470 enum htt_rx_mpdu_encrypt_type enctype) 1471 { 1472 struct ieee80211_hdr *hdr; 1473 struct htt_rx_desc *rxd; 1474 size_t hdr_len; 1475 u8 da[ETH_ALEN]; 1476 u8 sa[ETH_ALEN]; 1477 int l3_pad_bytes; 1478 int bytes_aligned = ar->hw_params.decap_align_bytes; 1479 1480 /* Delivered decapped frame: 1481 * [nwifi 802.11 header] <-- replaced with 802.11 hdr 1482 * [rfc1042/llc] 1483 * 1484 * Note: The nwifi header doesn't have QoS Control and is 1485 * (always?) a 3addr frame. 1486 * 1487 * Note2: There's no A-MSDU subframe header. Even if it's part 1488 * of an A-MSDU. 1489 */ 1490 1491 /* pull decapped header and copy SA & DA */ 1492 rxd = (void *)msdu->data - sizeof(*rxd); 1493 1494 l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd); 1495 skb_put(msdu, l3_pad_bytes); 1496 1497 hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes); 1498 1499 hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr); 1500 ether_addr_copy(da, ieee80211_get_DA(hdr)); 1501 ether_addr_copy(sa, ieee80211_get_SA(hdr)); 1502 skb_pull(msdu, hdr_len); 1503 1504 /* push original 802.11 header */ 1505 hdr = (struct ieee80211_hdr *)first_hdr; 1506 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1507 1508 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 1509 memcpy(skb_push(msdu, 1510 ath10k_htt_rx_crypto_param_len(ar, enctype)), 1511 (void *)hdr + round_up(hdr_len, bytes_aligned), 1512 ath10k_htt_rx_crypto_param_len(ar, enctype)); 1513 } 1514 1515 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1516 1517 /* original 802.11 header has a different DA and in 1518 * case of 4addr it may also have different SA 1519 */ 1520 hdr = (struct ieee80211_hdr *)msdu->data; 1521 ether_addr_copy(ieee80211_get_DA(hdr), da); 1522 ether_addr_copy(ieee80211_get_SA(hdr), sa); 1523 } 1524 1525 static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar, 1526 struct sk_buff *msdu, 1527 enum htt_rx_mpdu_encrypt_type enctype) 1528 { 1529 struct ieee80211_hdr *hdr; 1530 struct htt_rx_desc *rxd; 1531 size_t hdr_len, crypto_len; 1532 void *rfc1042; 1533 bool is_first, is_last, is_amsdu; 1534 int bytes_aligned = ar->hw_params.decap_align_bytes; 1535 1536 rxd = (void *)msdu->data - sizeof(*rxd); 1537 hdr = (void *)rxd->rx_hdr_status; 1538 1539 is_first = !!(rxd->msdu_end.common.info0 & 1540 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); 1541 is_last = !!(rxd->msdu_end.common.info0 & 1542 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); 1543 is_amsdu = !(is_first && is_last); 1544 1545 rfc1042 = hdr; 1546 1547 if (is_first) { 1548 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1549 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); 1550 1551 rfc1042 += round_up(hdr_len, bytes_aligned) + 1552 round_up(crypto_len, bytes_aligned); 1553 } 1554 1555 if (is_amsdu) 1556 rfc1042 += sizeof(struct amsdu_subframe_hdr); 1557 1558 return rfc1042; 1559 } 1560 1561 static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar, 1562 struct sk_buff *msdu, 1563 struct ieee80211_rx_status *status, 1564 const u8 first_hdr[64], 1565 enum htt_rx_mpdu_encrypt_type enctype) 1566 { 1567 struct ieee80211_hdr *hdr; 1568 struct ethhdr *eth; 1569 size_t hdr_len; 1570 void *rfc1042; 1571 u8 da[ETH_ALEN]; 1572 u8 sa[ETH_ALEN]; 1573 int l3_pad_bytes; 1574 struct htt_rx_desc *rxd; 1575 int bytes_aligned = ar->hw_params.decap_align_bytes; 1576 1577 /* Delivered decapped frame: 1578 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc 1579 * [payload] 1580 */ 1581 1582 rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype); 1583 if (WARN_ON_ONCE(!rfc1042)) 1584 return; 1585 1586 rxd = (void *)msdu->data - sizeof(*rxd); 1587 l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd); 1588 skb_put(msdu, l3_pad_bytes); 1589 skb_pull(msdu, l3_pad_bytes); 1590 1591 /* pull decapped header and copy SA & DA */ 1592 eth = (struct ethhdr *)msdu->data; 1593 ether_addr_copy(da, eth->h_dest); 1594 ether_addr_copy(sa, eth->h_source); 1595 skb_pull(msdu, sizeof(struct ethhdr)); 1596 1597 /* push rfc1042/llc/snap */ 1598 memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042, 1599 sizeof(struct rfc1042_hdr)); 1600 1601 /* push original 802.11 header */ 1602 hdr = (struct ieee80211_hdr *)first_hdr; 1603 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1604 1605 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 1606 memcpy(skb_push(msdu, 1607 ath10k_htt_rx_crypto_param_len(ar, enctype)), 1608 (void *)hdr + round_up(hdr_len, bytes_aligned), 1609 ath10k_htt_rx_crypto_param_len(ar, enctype)); 1610 } 1611 1612 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1613 1614 /* original 802.11 header has a different DA and in 1615 * case of 4addr it may also have different SA 1616 */ 1617 hdr = (struct ieee80211_hdr *)msdu->data; 1618 ether_addr_copy(ieee80211_get_DA(hdr), da); 1619 ether_addr_copy(ieee80211_get_SA(hdr), sa); 1620 } 1621 1622 static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar, 1623 struct sk_buff *msdu, 1624 struct ieee80211_rx_status *status, 1625 const u8 first_hdr[64], 1626 enum htt_rx_mpdu_encrypt_type enctype) 1627 { 1628 struct ieee80211_hdr *hdr; 1629 size_t hdr_len; 1630 int l3_pad_bytes; 1631 struct htt_rx_desc *rxd; 1632 int bytes_aligned = ar->hw_params.decap_align_bytes; 1633 1634 /* Delivered decapped frame: 1635 * [amsdu header] <-- replaced with 802.11 hdr 1636 * [rfc1042/llc] 1637 * [payload] 1638 */ 1639 1640 rxd = (void *)msdu->data - sizeof(*rxd); 1641 l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd); 1642 1643 skb_put(msdu, l3_pad_bytes); 1644 skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes); 1645 1646 hdr = (struct ieee80211_hdr *)first_hdr; 1647 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1648 1649 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 1650 memcpy(skb_push(msdu, 1651 ath10k_htt_rx_crypto_param_len(ar, enctype)), 1652 (void *)hdr + round_up(hdr_len, bytes_aligned), 1653 ath10k_htt_rx_crypto_param_len(ar, enctype)); 1654 } 1655 1656 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1657 } 1658 1659 static void ath10k_htt_rx_h_undecap(struct ath10k *ar, 1660 struct sk_buff *msdu, 1661 struct ieee80211_rx_status *status, 1662 u8 first_hdr[64], 1663 enum htt_rx_mpdu_encrypt_type enctype, 1664 bool is_decrypted) 1665 { 1666 struct htt_rx_desc *rxd; 1667 enum rx_msdu_decap_format decap; 1668 1669 /* First msdu's decapped header: 1670 * [802.11 header] <-- padded to 4 bytes long 1671 * [crypto param] <-- padded to 4 bytes long 1672 * [amsdu header] <-- only if A-MSDU 1673 * [rfc1042/llc] 1674 * 1675 * Other (2nd, 3rd, ..) msdu's decapped header: 1676 * [amsdu header] <-- only if A-MSDU 1677 * [rfc1042/llc] 1678 */ 1679 1680 rxd = (void *)msdu->data - sizeof(*rxd); 1681 decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1), 1682 RX_MSDU_START_INFO1_DECAP_FORMAT); 1683 1684 switch (decap) { 1685 case RX_MSDU_DECAP_RAW: 1686 ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype, 1687 is_decrypted, first_hdr); 1688 break; 1689 case RX_MSDU_DECAP_NATIVE_WIFI: 1690 ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr, 1691 enctype); 1692 break; 1693 case RX_MSDU_DECAP_ETHERNET2_DIX: 1694 ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype); 1695 break; 1696 case RX_MSDU_DECAP_8023_SNAP_LLC: 1697 ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr, 1698 enctype); 1699 break; 1700 } 1701 } 1702 1703 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb) 1704 { 1705 struct htt_rx_desc *rxd; 1706 u32 flags, info; 1707 bool is_ip4, is_ip6; 1708 bool is_tcp, is_udp; 1709 bool ip_csum_ok, tcpudp_csum_ok; 1710 1711 rxd = (void *)skb->data - sizeof(*rxd); 1712 flags = __le32_to_cpu(rxd->attention.flags); 1713 info = __le32_to_cpu(rxd->msdu_start.common.info1); 1714 1715 is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO); 1716 is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO); 1717 is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO); 1718 is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO); 1719 ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL); 1720 tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL); 1721 1722 if (!is_ip4 && !is_ip6) 1723 return CHECKSUM_NONE; 1724 if (!is_tcp && !is_udp) 1725 return CHECKSUM_NONE; 1726 if (!ip_csum_ok) 1727 return CHECKSUM_NONE; 1728 if (!tcpudp_csum_ok) 1729 return CHECKSUM_NONE; 1730 1731 return CHECKSUM_UNNECESSARY; 1732 } 1733 1734 static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu) 1735 { 1736 msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu); 1737 } 1738 1739 static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, 1740 struct sk_buff_head *amsdu, 1741 struct ieee80211_rx_status *status, 1742 bool fill_crypt_header, 1743 u8 *rx_hdr, 1744 enum ath10k_pkt_rx_err *err) 1745 { 1746 struct sk_buff *first; 1747 struct sk_buff *last; 1748 struct sk_buff *msdu; 1749 struct htt_rx_desc *rxd; 1750 struct ieee80211_hdr *hdr; 1751 enum htt_rx_mpdu_encrypt_type enctype; 1752 u8 first_hdr[64]; 1753 u8 *qos; 1754 bool has_fcs_err; 1755 bool has_crypto_err; 1756 bool has_tkip_err; 1757 bool has_peer_idx_invalid; 1758 bool is_decrypted; 1759 bool is_mgmt; 1760 u32 attention; 1761 1762 if (skb_queue_empty(amsdu)) 1763 return; 1764 1765 first = skb_peek(amsdu); 1766 rxd = (void *)first->data - sizeof(*rxd); 1767 1768 is_mgmt = !!(rxd->attention.flags & 1769 __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE)); 1770 1771 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), 1772 RX_MPDU_START_INFO0_ENCRYPT_TYPE); 1773 1774 /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11 1775 * decapped header. It'll be used for undecapping of each MSDU. 1776 */ 1777 hdr = (void *)rxd->rx_hdr_status; 1778 memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN); 1779 1780 if (rx_hdr) 1781 memcpy(rx_hdr, hdr, RX_HTT_HDR_STATUS_LEN); 1782 1783 /* Each A-MSDU subframe will use the original header as the base and be 1784 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl. 1785 */ 1786 hdr = (void *)first_hdr; 1787 1788 if (ieee80211_is_data_qos(hdr->frame_control)) { 1789 qos = ieee80211_get_qos_ctl(hdr); 1790 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; 1791 } 1792 1793 /* Some attention flags are valid only in the last MSDU. */ 1794 last = skb_peek_tail(amsdu); 1795 rxd = (void *)last->data - sizeof(*rxd); 1796 attention = __le32_to_cpu(rxd->attention.flags); 1797 1798 has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR); 1799 has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR); 1800 has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR); 1801 has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID); 1802 1803 /* Note: If hardware captures an encrypted frame that it can't decrypt, 1804 * e.g. due to fcs error, missing peer or invalid key data it will 1805 * report the frame as raw. 1806 */ 1807 is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE && 1808 !has_fcs_err && 1809 !has_crypto_err && 1810 !has_peer_idx_invalid); 1811 1812 /* Clear per-MPDU flags while leaving per-PPDU flags intact. */ 1813 status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | 1814 RX_FLAG_MMIC_ERROR | 1815 RX_FLAG_DECRYPTED | 1816 RX_FLAG_IV_STRIPPED | 1817 RX_FLAG_ONLY_MONITOR | 1818 RX_FLAG_MMIC_STRIPPED); 1819 1820 if (has_fcs_err) 1821 status->flag |= RX_FLAG_FAILED_FCS_CRC; 1822 1823 if (has_tkip_err) 1824 status->flag |= RX_FLAG_MMIC_ERROR; 1825 1826 if (err) { 1827 if (has_fcs_err) 1828 *err = ATH10K_PKT_RX_ERR_FCS; 1829 else if (has_tkip_err) 1830 *err = ATH10K_PKT_RX_ERR_TKIP; 1831 else if (has_crypto_err) 1832 *err = ATH10K_PKT_RX_ERR_CRYPT; 1833 else if (has_peer_idx_invalid) 1834 *err = ATH10K_PKT_RX_ERR_PEER_IDX_INVAL; 1835 } 1836 1837 /* Firmware reports all necessary management frames via WMI already. 1838 * They are not reported to monitor interfaces at all so pass the ones 1839 * coming via HTT to monitor interfaces instead. This simplifies 1840 * matters a lot. 1841 */ 1842 if (is_mgmt) 1843 status->flag |= RX_FLAG_ONLY_MONITOR; 1844 1845 if (is_decrypted) { 1846 status->flag |= RX_FLAG_DECRYPTED; 1847 1848 if (likely(!is_mgmt)) 1849 status->flag |= RX_FLAG_MMIC_STRIPPED; 1850 1851 if (fill_crypt_header) 1852 status->flag |= RX_FLAG_MIC_STRIPPED | 1853 RX_FLAG_ICV_STRIPPED; 1854 else 1855 status->flag |= RX_FLAG_IV_STRIPPED; 1856 } 1857 1858 skb_queue_walk(amsdu, msdu) { 1859 ath10k_htt_rx_h_csum_offload(msdu); 1860 ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype, 1861 is_decrypted); 1862 1863 /* Undecapping involves copying the original 802.11 header back 1864 * to sk_buff. If frame is protected and hardware has decrypted 1865 * it then remove the protected bit. 1866 */ 1867 if (!is_decrypted) 1868 continue; 1869 if (is_mgmt) 1870 continue; 1871 1872 if (fill_crypt_header) 1873 continue; 1874 1875 hdr = (void *)msdu->data; 1876 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 1877 } 1878 } 1879 1880 static void ath10k_htt_rx_h_enqueue(struct ath10k *ar, 1881 struct sk_buff_head *amsdu, 1882 struct ieee80211_rx_status *status) 1883 { 1884 struct sk_buff *msdu; 1885 struct sk_buff *first_subframe; 1886 1887 first_subframe = skb_peek(amsdu); 1888 1889 while ((msdu = __skb_dequeue(amsdu))) { 1890 /* Setup per-MSDU flags */ 1891 if (skb_queue_empty(amsdu)) 1892 status->flag &= ~RX_FLAG_AMSDU_MORE; 1893 else 1894 status->flag |= RX_FLAG_AMSDU_MORE; 1895 1896 if (msdu == first_subframe) { 1897 first_subframe = NULL; 1898 status->flag &= ~RX_FLAG_ALLOW_SAME_PN; 1899 } else { 1900 status->flag |= RX_FLAG_ALLOW_SAME_PN; 1901 } 1902 1903 ath10k_htt_rx_h_queue_msdu(ar, status, msdu); 1904 } 1905 } 1906 1907 static int ath10k_unchain_msdu(struct sk_buff_head *amsdu, 1908 unsigned long int *unchain_cnt) 1909 { 1910 struct sk_buff *skb, *first; 1911 int space; 1912 int total_len = 0; 1913 int amsdu_len = skb_queue_len(amsdu); 1914 1915 /* TODO: Might could optimize this by using 1916 * skb_try_coalesce or similar method to 1917 * decrease copying, or maybe get mac80211 to 1918 * provide a way to just receive a list of 1919 * skb? 1920 */ 1921 1922 first = __skb_dequeue(amsdu); 1923 1924 /* Allocate total length all at once. */ 1925 skb_queue_walk(amsdu, skb) 1926 total_len += skb->len; 1927 1928 space = total_len - skb_tailroom(first); 1929 if ((space > 0) && 1930 (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) { 1931 /* TODO: bump some rx-oom error stat */ 1932 /* put it back together so we can free the 1933 * whole list at once. 1934 */ 1935 __skb_queue_head(amsdu, first); 1936 return -1; 1937 } 1938 1939 /* Walk list again, copying contents into 1940 * msdu_head 1941 */ 1942 while ((skb = __skb_dequeue(amsdu))) { 1943 skb_copy_from_linear_data(skb, skb_put(first, skb->len), 1944 skb->len); 1945 dev_kfree_skb_any(skb); 1946 } 1947 1948 __skb_queue_head(amsdu, first); 1949 1950 *unchain_cnt += amsdu_len - 1; 1951 1952 return 0; 1953 } 1954 1955 static void ath10k_htt_rx_h_unchain(struct ath10k *ar, 1956 struct sk_buff_head *amsdu, 1957 unsigned long int *drop_cnt, 1958 unsigned long int *unchain_cnt) 1959 { 1960 struct sk_buff *first; 1961 struct htt_rx_desc *rxd; 1962 enum rx_msdu_decap_format decap; 1963 1964 first = skb_peek(amsdu); 1965 rxd = (void *)first->data - sizeof(*rxd); 1966 decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1), 1967 RX_MSDU_START_INFO1_DECAP_FORMAT); 1968 1969 /* FIXME: Current unchaining logic can only handle simple case of raw 1970 * msdu chaining. If decapping is other than raw the chaining may be 1971 * more complex and this isn't handled by the current code. Don't even 1972 * try re-constructing such frames - it'll be pretty much garbage. 1973 */ 1974 if (decap != RX_MSDU_DECAP_RAW || 1975 skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) { 1976 *drop_cnt += skb_queue_len(amsdu); 1977 __skb_queue_purge(amsdu); 1978 return; 1979 } 1980 1981 ath10k_unchain_msdu(amsdu, unchain_cnt); 1982 } 1983 1984 static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar, 1985 struct sk_buff_head *amsdu, 1986 struct ieee80211_rx_status *rx_status) 1987 { 1988 /* FIXME: It might be a good idea to do some fuzzy-testing to drop 1989 * invalid/dangerous frames. 1990 */ 1991 1992 if (!rx_status->freq) { 1993 ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n"); 1994 return false; 1995 } 1996 1997 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) { 1998 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n"); 1999 return false; 2000 } 2001 2002 return true; 2003 } 2004 2005 static void ath10k_htt_rx_h_filter(struct ath10k *ar, 2006 struct sk_buff_head *amsdu, 2007 struct ieee80211_rx_status *rx_status, 2008 unsigned long int *drop_cnt) 2009 { 2010 if (skb_queue_empty(amsdu)) 2011 return; 2012 2013 if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status)) 2014 return; 2015 2016 if (drop_cnt) 2017 *drop_cnt += skb_queue_len(amsdu); 2018 2019 __skb_queue_purge(amsdu); 2020 } 2021 2022 static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt) 2023 { 2024 struct ath10k *ar = htt->ar; 2025 struct ieee80211_rx_status *rx_status = &htt->rx_status; 2026 struct sk_buff_head amsdu; 2027 int ret; 2028 unsigned long int drop_cnt = 0; 2029 unsigned long int unchain_cnt = 0; 2030 unsigned long int drop_cnt_filter = 0; 2031 unsigned long int msdus_to_queue, num_msdus; 2032 enum ath10k_pkt_rx_err err = ATH10K_PKT_RX_ERR_MAX; 2033 u8 first_hdr[RX_HTT_HDR_STATUS_LEN]; 2034 2035 __skb_queue_head_init(&amsdu); 2036 2037 spin_lock_bh(&htt->rx_ring.lock); 2038 if (htt->rx_confused) { 2039 spin_unlock_bh(&htt->rx_ring.lock); 2040 return -EIO; 2041 } 2042 ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu); 2043 spin_unlock_bh(&htt->rx_ring.lock); 2044 2045 if (ret < 0) { 2046 ath10k_warn(ar, "rx ring became corrupted: %d\n", ret); 2047 __skb_queue_purge(&amsdu); 2048 /* FIXME: It's probably a good idea to reboot the 2049 * device instead of leaving it inoperable. 2050 */ 2051 htt->rx_confused = true; 2052 return ret; 2053 } 2054 2055 num_msdus = skb_queue_len(&amsdu); 2056 2057 ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff); 2058 2059 /* only for ret = 1 indicates chained msdus */ 2060 if (ret > 0) 2061 ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt); 2062 2063 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter); 2064 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err); 2065 msdus_to_queue = skb_queue_len(&amsdu); 2066 ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status); 2067 2068 ath10k_sta_update_rx_tid_stats(ar, first_hdr, num_msdus, err, 2069 unchain_cnt, drop_cnt, drop_cnt_filter, 2070 msdus_to_queue); 2071 2072 return 0; 2073 } 2074 2075 static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt, 2076 struct htt_rx_indication_hl *rx, 2077 struct sk_buff *skb) 2078 { 2079 struct ath10k *ar = htt->ar; 2080 struct ath10k_peer *peer; 2081 struct htt_rx_indication_mpdu_range *mpdu_ranges; 2082 struct fw_rx_desc_hl *fw_desc; 2083 struct ieee80211_hdr *hdr; 2084 struct ieee80211_rx_status *rx_status; 2085 u16 peer_id; 2086 u8 rx_desc_len; 2087 int num_mpdu_ranges; 2088 size_t tot_hdr_len; 2089 struct ieee80211_channel *ch; 2090 2091 peer_id = __le16_to_cpu(rx->hdr.peer_id); 2092 2093 spin_lock_bh(&ar->data_lock); 2094 peer = ath10k_peer_find_by_id(ar, peer_id); 2095 spin_unlock_bh(&ar->data_lock); 2096 if (!peer) 2097 ath10k_warn(ar, "Got RX ind from invalid peer: %u\n", peer_id); 2098 2099 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), 2100 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); 2101 mpdu_ranges = htt_rx_ind_get_mpdu_ranges_hl(rx); 2102 fw_desc = &rx->fw_desc; 2103 rx_desc_len = fw_desc->len; 2104 2105 /* I have not yet seen any case where num_mpdu_ranges > 1. 2106 * qcacld does not seem handle that case either, so we introduce the 2107 * same limitiation here as well. 2108 */ 2109 if (num_mpdu_ranges > 1) 2110 ath10k_warn(ar, 2111 "Unsupported number of MPDU ranges: %d, ignoring all but the first\n", 2112 num_mpdu_ranges); 2113 2114 if (mpdu_ranges->mpdu_range_status != 2115 HTT_RX_IND_MPDU_STATUS_OK) { 2116 ath10k_warn(ar, "MPDU range status: %d\n", 2117 mpdu_ranges->mpdu_range_status); 2118 goto err; 2119 } 2120 2121 /* Strip off all headers before the MAC header before delivery to 2122 * mac80211 2123 */ 2124 tot_hdr_len = sizeof(struct htt_resp_hdr) + sizeof(rx->hdr) + 2125 sizeof(rx->ppdu) + sizeof(rx->prefix) + 2126 sizeof(rx->fw_desc) + 2127 sizeof(*mpdu_ranges) * num_mpdu_ranges + rx_desc_len; 2128 skb_pull(skb, tot_hdr_len); 2129 2130 hdr = (struct ieee80211_hdr *)skb->data; 2131 rx_status = IEEE80211_SKB_RXCB(skb); 2132 rx_status->chains |= BIT(0); 2133 rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR + 2134 rx->ppdu.combined_rssi; 2135 rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL; 2136 2137 spin_lock_bh(&ar->data_lock); 2138 ch = ar->scan_channel; 2139 if (!ch) 2140 ch = ar->rx_channel; 2141 if (!ch) 2142 ch = ath10k_htt_rx_h_any_channel(ar); 2143 if (!ch) 2144 ch = ar->tgt_oper_chan; 2145 spin_unlock_bh(&ar->data_lock); 2146 2147 if (ch) { 2148 rx_status->band = ch->band; 2149 rx_status->freq = ch->center_freq; 2150 } 2151 if (rx->fw_desc.flags & FW_RX_DESC_FLAGS_LAST_MSDU) 2152 rx_status->flag &= ~RX_FLAG_AMSDU_MORE; 2153 else 2154 rx_status->flag |= RX_FLAG_AMSDU_MORE; 2155 2156 /* Not entirely sure about this, but all frames from the chipset has 2157 * the protected flag set even though they have already been decrypted. 2158 * Unmasking this flag is necessary in order for mac80211 not to drop 2159 * the frame. 2160 * TODO: Verify this is always the case or find out a way to check 2161 * if there has been hw decryption. 2162 */ 2163 if (ieee80211_has_protected(hdr->frame_control)) { 2164 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 2165 rx_status->flag |= RX_FLAG_DECRYPTED | 2166 RX_FLAG_IV_STRIPPED | 2167 RX_FLAG_MMIC_STRIPPED; 2168 } 2169 2170 ieee80211_rx_ni(ar->hw, skb); 2171 2172 /* We have delivered the skb to the upper layers (mac80211) so we 2173 * must not free it. 2174 */ 2175 return false; 2176 err: 2177 /* Tell the caller that it must free the skb since we have not 2178 * consumed it 2179 */ 2180 return true; 2181 } 2182 2183 static void ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt *htt, 2184 struct htt_rx_indication *rx) 2185 { 2186 struct ath10k *ar = htt->ar; 2187 struct htt_rx_indication_mpdu_range *mpdu_ranges; 2188 int num_mpdu_ranges; 2189 int i, mpdu_count = 0; 2190 u16 peer_id; 2191 u8 tid; 2192 2193 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), 2194 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); 2195 peer_id = __le16_to_cpu(rx->hdr.peer_id); 2196 tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID); 2197 2198 mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx); 2199 2200 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ", 2201 rx, sizeof(*rx) + 2202 (sizeof(struct htt_rx_indication_mpdu_range) * 2203 num_mpdu_ranges)); 2204 2205 for (i = 0; i < num_mpdu_ranges; i++) 2206 mpdu_count += mpdu_ranges[i].mpdu_count; 2207 2208 atomic_add(mpdu_count, &htt->num_mpdus_ready); 2209 2210 ath10k_sta_update_rx_tid_stats_ampdu(ar, peer_id, tid, mpdu_ranges, 2211 num_mpdu_ranges); 2212 } 2213 2214 static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar, 2215 struct sk_buff *skb) 2216 { 2217 struct ath10k_htt *htt = &ar->htt; 2218 struct htt_resp *resp = (struct htt_resp *)skb->data; 2219 struct htt_tx_done tx_done = {}; 2220 int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS); 2221 __le16 msdu_id, *msdus; 2222 bool rssi_enabled = false; 2223 u8 msdu_count = 0; 2224 int i; 2225 2226 switch (status) { 2227 case HTT_DATA_TX_STATUS_NO_ACK: 2228 tx_done.status = HTT_TX_COMPL_STATE_NOACK; 2229 break; 2230 case HTT_DATA_TX_STATUS_OK: 2231 tx_done.status = HTT_TX_COMPL_STATE_ACK; 2232 break; 2233 case HTT_DATA_TX_STATUS_DISCARD: 2234 case HTT_DATA_TX_STATUS_POSTPONE: 2235 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL: 2236 tx_done.status = HTT_TX_COMPL_STATE_DISCARD; 2237 break; 2238 default: 2239 ath10k_warn(ar, "unhandled tx completion status %d\n", status); 2240 tx_done.status = HTT_TX_COMPL_STATE_DISCARD; 2241 break; 2242 } 2243 2244 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n", 2245 resp->data_tx_completion.num_msdus); 2246 2247 msdu_count = resp->data_tx_completion.num_msdus; 2248 2249 if (resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_DATA_RSSI) 2250 rssi_enabled = true; 2251 2252 for (i = 0; i < msdu_count; i++) { 2253 msdus = resp->data_tx_completion.msdus; 2254 msdu_id = msdus[i]; 2255 tx_done.msdu_id = __le16_to_cpu(msdu_id); 2256 2257 if (rssi_enabled) { 2258 /* Total no of MSDUs should be even, 2259 * if odd MSDUs are sent firmware fills 2260 * last msdu id with 0xffff 2261 */ 2262 if (msdu_count & 0x01) { 2263 msdu_id = msdus[msdu_count + i + 1]; 2264 tx_done.ack_rssi = __le16_to_cpu(msdu_id); 2265 } else { 2266 msdu_id = msdus[msdu_count + i]; 2267 tx_done.ack_rssi = __le16_to_cpu(msdu_id); 2268 } 2269 } 2270 2271 /* kfifo_put: In practice firmware shouldn't fire off per-CE 2272 * interrupt and main interrupt (MSI/-X range case) for the same 2273 * HTC service so it should be safe to use kfifo_put w/o lock. 2274 * 2275 * From kfifo_put() documentation: 2276 * Note that with only one concurrent reader and one concurrent 2277 * writer, you don't need extra locking to use these macro. 2278 */ 2279 if (!kfifo_put(&htt->txdone_fifo, tx_done)) { 2280 ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n", 2281 tx_done.msdu_id, tx_done.status); 2282 ath10k_txrx_tx_unref(htt, &tx_done); 2283 } 2284 } 2285 } 2286 2287 static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp) 2288 { 2289 struct htt_rx_addba *ev = &resp->rx_addba; 2290 struct ath10k_peer *peer; 2291 struct ath10k_vif *arvif; 2292 u16 info0, tid, peer_id; 2293 2294 info0 = __le16_to_cpu(ev->info0); 2295 tid = MS(info0, HTT_RX_BA_INFO0_TID); 2296 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID); 2297 2298 ath10k_dbg(ar, ATH10K_DBG_HTT, 2299 "htt rx addba tid %hu peer_id %hu size %hhu\n", 2300 tid, peer_id, ev->window_size); 2301 2302 spin_lock_bh(&ar->data_lock); 2303 peer = ath10k_peer_find_by_id(ar, peer_id); 2304 if (!peer) { 2305 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n", 2306 peer_id); 2307 spin_unlock_bh(&ar->data_lock); 2308 return; 2309 } 2310 2311 arvif = ath10k_get_arvif(ar, peer->vdev_id); 2312 if (!arvif) { 2313 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n", 2314 peer->vdev_id); 2315 spin_unlock_bh(&ar->data_lock); 2316 return; 2317 } 2318 2319 ath10k_dbg(ar, ATH10K_DBG_HTT, 2320 "htt rx start rx ba session sta %pM tid %hu size %hhu\n", 2321 peer->addr, tid, ev->window_size); 2322 2323 ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid); 2324 spin_unlock_bh(&ar->data_lock); 2325 } 2326 2327 static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp) 2328 { 2329 struct htt_rx_delba *ev = &resp->rx_delba; 2330 struct ath10k_peer *peer; 2331 struct ath10k_vif *arvif; 2332 u16 info0, tid, peer_id; 2333 2334 info0 = __le16_to_cpu(ev->info0); 2335 tid = MS(info0, HTT_RX_BA_INFO0_TID); 2336 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID); 2337 2338 ath10k_dbg(ar, ATH10K_DBG_HTT, 2339 "htt rx delba tid %hu peer_id %hu\n", 2340 tid, peer_id); 2341 2342 spin_lock_bh(&ar->data_lock); 2343 peer = ath10k_peer_find_by_id(ar, peer_id); 2344 if (!peer) { 2345 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n", 2346 peer_id); 2347 spin_unlock_bh(&ar->data_lock); 2348 return; 2349 } 2350 2351 arvif = ath10k_get_arvif(ar, peer->vdev_id); 2352 if (!arvif) { 2353 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n", 2354 peer->vdev_id); 2355 spin_unlock_bh(&ar->data_lock); 2356 return; 2357 } 2358 2359 ath10k_dbg(ar, ATH10K_DBG_HTT, 2360 "htt rx stop rx ba session sta %pM tid %hu\n", 2361 peer->addr, tid); 2362 2363 ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid); 2364 spin_unlock_bh(&ar->data_lock); 2365 } 2366 2367 static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list, 2368 struct sk_buff_head *amsdu) 2369 { 2370 struct sk_buff *msdu; 2371 struct htt_rx_desc *rxd; 2372 2373 if (skb_queue_empty(list)) 2374 return -ENOBUFS; 2375 2376 if (WARN_ON(!skb_queue_empty(amsdu))) 2377 return -EINVAL; 2378 2379 while ((msdu = __skb_dequeue(list))) { 2380 __skb_queue_tail(amsdu, msdu); 2381 2382 rxd = (void *)msdu->data - sizeof(*rxd); 2383 if (rxd->msdu_end.common.info0 & 2384 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)) 2385 break; 2386 } 2387 2388 msdu = skb_peek_tail(amsdu); 2389 rxd = (void *)msdu->data - sizeof(*rxd); 2390 if (!(rxd->msdu_end.common.info0 & 2391 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) { 2392 skb_queue_splice_init(amsdu, list); 2393 return -EAGAIN; 2394 } 2395 2396 return 0; 2397 } 2398 2399 static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status, 2400 struct sk_buff *skb) 2401 { 2402 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2403 2404 if (!ieee80211_has_protected(hdr->frame_control)) 2405 return; 2406 2407 /* Offloaded frames are already decrypted but firmware insists they are 2408 * protected in the 802.11 header. Strip the flag. Otherwise mac80211 2409 * will drop the frame. 2410 */ 2411 2412 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 2413 status->flag |= RX_FLAG_DECRYPTED | 2414 RX_FLAG_IV_STRIPPED | 2415 RX_FLAG_MMIC_STRIPPED; 2416 } 2417 2418 static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar, 2419 struct sk_buff_head *list) 2420 { 2421 struct ath10k_htt *htt = &ar->htt; 2422 struct ieee80211_rx_status *status = &htt->rx_status; 2423 struct htt_rx_offload_msdu *rx; 2424 struct sk_buff *msdu; 2425 size_t offset; 2426 2427 while ((msdu = __skb_dequeue(list))) { 2428 /* Offloaded frames don't have Rx descriptor. Instead they have 2429 * a short meta information header. 2430 */ 2431 2432 rx = (void *)msdu->data; 2433 2434 skb_put(msdu, sizeof(*rx)); 2435 skb_pull(msdu, sizeof(*rx)); 2436 2437 if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) { 2438 ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n"); 2439 dev_kfree_skb_any(msdu); 2440 continue; 2441 } 2442 2443 skb_put(msdu, __le16_to_cpu(rx->msdu_len)); 2444 2445 /* Offloaded rx header length isn't multiple of 2 nor 4 so the 2446 * actual payload is unaligned. Align the frame. Otherwise 2447 * mac80211 complains. This shouldn't reduce performance much 2448 * because these offloaded frames are rare. 2449 */ 2450 offset = 4 - ((unsigned long)msdu->data & 3); 2451 skb_put(msdu, offset); 2452 memmove(msdu->data + offset, msdu->data, msdu->len); 2453 skb_pull(msdu, offset); 2454 2455 /* FIXME: The frame is NWifi. Re-construct QoS Control 2456 * if possible later. 2457 */ 2458 2459 memset(status, 0, sizeof(*status)); 2460 status->flag |= RX_FLAG_NO_SIGNAL_VAL; 2461 2462 ath10k_htt_rx_h_rx_offload_prot(status, msdu); 2463 ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id); 2464 ath10k_htt_rx_h_queue_msdu(ar, status, msdu); 2465 } 2466 } 2467 2468 static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) 2469 { 2470 struct ath10k_htt *htt = &ar->htt; 2471 struct htt_resp *resp = (void *)skb->data; 2472 struct ieee80211_rx_status *status = &htt->rx_status; 2473 struct sk_buff_head list; 2474 struct sk_buff_head amsdu; 2475 u16 peer_id; 2476 u16 msdu_count; 2477 u8 vdev_id; 2478 u8 tid; 2479 bool offload; 2480 bool frag; 2481 int ret; 2482 2483 lockdep_assert_held(&htt->rx_ring.lock); 2484 2485 if (htt->rx_confused) 2486 return -EIO; 2487 2488 skb_pull(skb, sizeof(resp->hdr)); 2489 skb_pull(skb, sizeof(resp->rx_in_ord_ind)); 2490 2491 peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id); 2492 msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count); 2493 vdev_id = resp->rx_in_ord_ind.vdev_id; 2494 tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID); 2495 offload = !!(resp->rx_in_ord_ind.info & 2496 HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK); 2497 frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK); 2498 2499 ath10k_dbg(ar, ATH10K_DBG_HTT, 2500 "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n", 2501 vdev_id, peer_id, tid, offload, frag, msdu_count); 2502 2503 if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) { 2504 ath10k_warn(ar, "dropping invalid in order rx indication\n"); 2505 return -EINVAL; 2506 } 2507 2508 /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later 2509 * extracted and processed. 2510 */ 2511 __skb_queue_head_init(&list); 2512 if (ar->hw_params.target_64bit) 2513 ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind, 2514 &list); 2515 else 2516 ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind, 2517 &list); 2518 2519 if (ret < 0) { 2520 ath10k_warn(ar, "failed to pop paddr list: %d\n", ret); 2521 htt->rx_confused = true; 2522 return -EIO; 2523 } 2524 2525 /* Offloaded frames are very different and need to be handled 2526 * separately. 2527 */ 2528 if (offload) 2529 ath10k_htt_rx_h_rx_offload(ar, &list); 2530 2531 while (!skb_queue_empty(&list)) { 2532 __skb_queue_head_init(&amsdu); 2533 ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu); 2534 switch (ret) { 2535 case 0: 2536 /* Note: The in-order indication may report interleaved 2537 * frames from different PPDUs meaning reported rx rate 2538 * to mac80211 isn't accurate/reliable. It's still 2539 * better to report something than nothing though. This 2540 * should still give an idea about rx rate to the user. 2541 */ 2542 ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id); 2543 ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL); 2544 ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL, 2545 NULL); 2546 ath10k_htt_rx_h_enqueue(ar, &amsdu, status); 2547 break; 2548 case -EAGAIN: 2549 /* fall through */ 2550 default: 2551 /* Should not happen. */ 2552 ath10k_warn(ar, "failed to extract amsdu: %d\n", ret); 2553 htt->rx_confused = true; 2554 __skb_queue_purge(&list); 2555 return -EIO; 2556 } 2557 } 2558 return ret; 2559 } 2560 2561 static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar, 2562 const __le32 *resp_ids, 2563 int num_resp_ids) 2564 { 2565 int i; 2566 u32 resp_id; 2567 2568 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n", 2569 num_resp_ids); 2570 2571 for (i = 0; i < num_resp_ids; i++) { 2572 resp_id = le32_to_cpu(resp_ids[i]); 2573 2574 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n", 2575 resp_id); 2576 2577 /* TODO: free resp_id */ 2578 } 2579 } 2580 2581 static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb) 2582 { 2583 struct ieee80211_hw *hw = ar->hw; 2584 struct ieee80211_txq *txq; 2585 struct htt_resp *resp = (struct htt_resp *)skb->data; 2586 struct htt_tx_fetch_record *record; 2587 size_t len; 2588 size_t max_num_bytes; 2589 size_t max_num_msdus; 2590 size_t num_bytes; 2591 size_t num_msdus; 2592 const __le32 *resp_ids; 2593 u16 num_records; 2594 u16 num_resp_ids; 2595 u16 peer_id; 2596 u8 tid; 2597 int ret; 2598 int i; 2599 2600 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n"); 2601 2602 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind); 2603 if (unlikely(skb->len < len)) { 2604 ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n"); 2605 return; 2606 } 2607 2608 num_records = le16_to_cpu(resp->tx_fetch_ind.num_records); 2609 num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids); 2610 2611 len += sizeof(resp->tx_fetch_ind.records[0]) * num_records; 2612 len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids; 2613 2614 if (unlikely(skb->len < len)) { 2615 ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n"); 2616 return; 2617 } 2618 2619 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n", 2620 num_records, num_resp_ids, 2621 le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num)); 2622 2623 if (!ar->htt.tx_q_state.enabled) { 2624 ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n"); 2625 return; 2626 } 2627 2628 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) { 2629 ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n"); 2630 return; 2631 } 2632 2633 rcu_read_lock(); 2634 2635 for (i = 0; i < num_records; i++) { 2636 record = &resp->tx_fetch_ind.records[i]; 2637 peer_id = MS(le16_to_cpu(record->info), 2638 HTT_TX_FETCH_RECORD_INFO_PEER_ID); 2639 tid = MS(le16_to_cpu(record->info), 2640 HTT_TX_FETCH_RECORD_INFO_TID); 2641 max_num_msdus = le16_to_cpu(record->num_msdus); 2642 max_num_bytes = le32_to_cpu(record->num_bytes); 2643 2644 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n", 2645 i, peer_id, tid, max_num_msdus, max_num_bytes); 2646 2647 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) || 2648 unlikely(tid >= ar->htt.tx_q_state.num_tids)) { 2649 ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n", 2650 peer_id, tid); 2651 continue; 2652 } 2653 2654 spin_lock_bh(&ar->data_lock); 2655 txq = ath10k_mac_txq_lookup(ar, peer_id, tid); 2656 spin_unlock_bh(&ar->data_lock); 2657 2658 /* It is okay to release the lock and use txq because RCU read 2659 * lock is held. 2660 */ 2661 2662 if (unlikely(!txq)) { 2663 ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n", 2664 peer_id, tid); 2665 continue; 2666 } 2667 2668 num_msdus = 0; 2669 num_bytes = 0; 2670 2671 while (num_msdus < max_num_msdus && 2672 num_bytes < max_num_bytes) { 2673 ret = ath10k_mac_tx_push_txq(hw, txq); 2674 if (ret < 0) 2675 break; 2676 2677 num_msdus++; 2678 num_bytes += ret; 2679 } 2680 2681 record->num_msdus = cpu_to_le16(num_msdus); 2682 record->num_bytes = cpu_to_le32(num_bytes); 2683 2684 ath10k_htt_tx_txq_recalc(hw, txq); 2685 } 2686 2687 rcu_read_unlock(); 2688 2689 resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind); 2690 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids); 2691 2692 ret = ath10k_htt_tx_fetch_resp(ar, 2693 resp->tx_fetch_ind.token, 2694 resp->tx_fetch_ind.fetch_seq_num, 2695 resp->tx_fetch_ind.records, 2696 num_records); 2697 if (unlikely(ret)) { 2698 ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n", 2699 le32_to_cpu(resp->tx_fetch_ind.token), ret); 2700 /* FIXME: request fw restart */ 2701 } 2702 2703 ath10k_htt_tx_txq_sync(ar); 2704 } 2705 2706 static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar, 2707 struct sk_buff *skb) 2708 { 2709 const struct htt_resp *resp = (void *)skb->data; 2710 size_t len; 2711 int num_resp_ids; 2712 2713 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n"); 2714 2715 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm); 2716 if (unlikely(skb->len < len)) { 2717 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n"); 2718 return; 2719 } 2720 2721 num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids); 2722 len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids; 2723 2724 if (unlikely(skb->len < len)) { 2725 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n"); 2726 return; 2727 } 2728 2729 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, 2730 resp->tx_fetch_confirm.resp_ids, 2731 num_resp_ids); 2732 } 2733 2734 static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar, 2735 struct sk_buff *skb) 2736 { 2737 const struct htt_resp *resp = (void *)skb->data; 2738 const struct htt_tx_mode_switch_record *record; 2739 struct ieee80211_txq *txq; 2740 struct ath10k_txq *artxq; 2741 size_t len; 2742 size_t num_records; 2743 enum htt_tx_mode_switch_mode mode; 2744 bool enable; 2745 u16 info0; 2746 u16 info1; 2747 u16 threshold; 2748 u16 peer_id; 2749 u8 tid; 2750 int i; 2751 2752 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n"); 2753 2754 len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind); 2755 if (unlikely(skb->len < len)) { 2756 ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n"); 2757 return; 2758 } 2759 2760 info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0); 2761 info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1); 2762 2763 enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE); 2764 num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD); 2765 mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE); 2766 threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD); 2767 2768 ath10k_dbg(ar, ATH10K_DBG_HTT, 2769 "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n", 2770 info0, info1, enable, num_records, mode, threshold); 2771 2772 len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records; 2773 2774 if (unlikely(skb->len < len)) { 2775 ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n"); 2776 return; 2777 } 2778 2779 switch (mode) { 2780 case HTT_TX_MODE_SWITCH_PUSH: 2781 case HTT_TX_MODE_SWITCH_PUSH_PULL: 2782 break; 2783 default: 2784 ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n", 2785 mode); 2786 return; 2787 } 2788 2789 if (!enable) 2790 return; 2791 2792 ar->htt.tx_q_state.enabled = enable; 2793 ar->htt.tx_q_state.mode = mode; 2794 ar->htt.tx_q_state.num_push_allowed = threshold; 2795 2796 rcu_read_lock(); 2797 2798 for (i = 0; i < num_records; i++) { 2799 record = &resp->tx_mode_switch_ind.records[i]; 2800 info0 = le16_to_cpu(record->info0); 2801 peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID); 2802 tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID); 2803 2804 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) || 2805 unlikely(tid >= ar->htt.tx_q_state.num_tids)) { 2806 ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n", 2807 peer_id, tid); 2808 continue; 2809 } 2810 2811 spin_lock_bh(&ar->data_lock); 2812 txq = ath10k_mac_txq_lookup(ar, peer_id, tid); 2813 spin_unlock_bh(&ar->data_lock); 2814 2815 /* It is okay to release the lock and use txq because RCU read 2816 * lock is held. 2817 */ 2818 2819 if (unlikely(!txq)) { 2820 ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n", 2821 peer_id, tid); 2822 continue; 2823 } 2824 2825 spin_lock_bh(&ar->htt.tx_lock); 2826 artxq = (void *)txq->drv_priv; 2827 artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus); 2828 spin_unlock_bh(&ar->htt.tx_lock); 2829 } 2830 2831 rcu_read_unlock(); 2832 2833 ath10k_mac_tx_push_pending(ar); 2834 } 2835 2836 void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) 2837 { 2838 bool release; 2839 2840 release = ath10k_htt_t2h_msg_handler(ar, skb); 2841 2842 /* Free the indication buffer */ 2843 if (release) 2844 dev_kfree_skb_any(skb); 2845 } 2846 2847 static inline s8 ath10k_get_legacy_rate_idx(struct ath10k *ar, u8 rate) 2848 { 2849 static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12, 2850 18, 24, 36, 48, 54}; 2851 int i; 2852 2853 for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) { 2854 if (rate == legacy_rates[i]) 2855 return i; 2856 } 2857 2858 ath10k_warn(ar, "Invalid legacy rate %hhd peer stats", rate); 2859 return -EINVAL; 2860 } 2861 2862 static void 2863 ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar, 2864 struct ath10k_sta *arsta, 2865 struct ath10k_per_peer_tx_stats *pstats, 2866 s8 legacy_rate_idx) 2867 { 2868 struct rate_info *txrate = &arsta->txrate; 2869 struct ath10k_htt_tx_stats *tx_stats; 2870 int idx, ht_idx, gi, mcs, bw, nss; 2871 2872 if (!arsta->tx_stats) 2873 return; 2874 2875 tx_stats = arsta->tx_stats; 2876 gi = (arsta->txrate.flags & RATE_INFO_FLAGS_SHORT_GI); 2877 ht_idx = txrate->mcs + txrate->nss * 8; 2878 mcs = txrate->mcs; 2879 bw = txrate->bw; 2880 nss = txrate->nss; 2881 idx = mcs * 8 + 8 * 10 * nss; 2882 idx += bw * 2 + gi; 2883 2884 #define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name] 2885 2886 if (txrate->flags == RATE_INFO_FLAGS_VHT_MCS) { 2887 STATS_OP_FMT(SUCC).vht[0][mcs] += pstats->succ_bytes; 2888 STATS_OP_FMT(SUCC).vht[1][mcs] += pstats->succ_pkts; 2889 STATS_OP_FMT(FAIL).vht[0][mcs] += pstats->failed_bytes; 2890 STATS_OP_FMT(FAIL).vht[1][mcs] += pstats->failed_pkts; 2891 STATS_OP_FMT(RETRY).vht[0][mcs] += pstats->retry_bytes; 2892 STATS_OP_FMT(RETRY).vht[1][mcs] += pstats->retry_pkts; 2893 } else if (txrate->flags == RATE_INFO_FLAGS_MCS) { 2894 STATS_OP_FMT(SUCC).ht[0][ht_idx] += pstats->succ_bytes; 2895 STATS_OP_FMT(SUCC).ht[1][ht_idx] += pstats->succ_pkts; 2896 STATS_OP_FMT(FAIL).ht[0][ht_idx] += pstats->failed_bytes; 2897 STATS_OP_FMT(FAIL).ht[1][ht_idx] += pstats->failed_pkts; 2898 STATS_OP_FMT(RETRY).ht[0][ht_idx] += pstats->retry_bytes; 2899 STATS_OP_FMT(RETRY).ht[1][ht_idx] += pstats->retry_pkts; 2900 } else { 2901 mcs = legacy_rate_idx; 2902 2903 STATS_OP_FMT(SUCC).legacy[0][mcs] += pstats->succ_bytes; 2904 STATS_OP_FMT(SUCC).legacy[1][mcs] += pstats->succ_pkts; 2905 STATS_OP_FMT(FAIL).legacy[0][mcs] += pstats->failed_bytes; 2906 STATS_OP_FMT(FAIL).legacy[1][mcs] += pstats->failed_pkts; 2907 STATS_OP_FMT(RETRY).legacy[0][mcs] += pstats->retry_bytes; 2908 STATS_OP_FMT(RETRY).legacy[1][mcs] += pstats->retry_pkts; 2909 } 2910 2911 if (ATH10K_HW_AMPDU(pstats->flags)) { 2912 tx_stats->ba_fails += ATH10K_HW_BA_FAIL(pstats->flags); 2913 2914 if (txrate->flags == RATE_INFO_FLAGS_MCS) { 2915 STATS_OP_FMT(AMPDU).ht[0][ht_idx] += 2916 pstats->succ_bytes + pstats->retry_bytes; 2917 STATS_OP_FMT(AMPDU).ht[1][ht_idx] += 2918 pstats->succ_pkts + pstats->retry_pkts; 2919 } else { 2920 STATS_OP_FMT(AMPDU).vht[0][mcs] += 2921 pstats->succ_bytes + pstats->retry_bytes; 2922 STATS_OP_FMT(AMPDU).vht[1][mcs] += 2923 pstats->succ_pkts + pstats->retry_pkts; 2924 } 2925 STATS_OP_FMT(AMPDU).bw[0][bw] += 2926 pstats->succ_bytes + pstats->retry_bytes; 2927 STATS_OP_FMT(AMPDU).nss[0][nss] += 2928 pstats->succ_bytes + pstats->retry_bytes; 2929 STATS_OP_FMT(AMPDU).gi[0][gi] += 2930 pstats->succ_bytes + pstats->retry_bytes; 2931 STATS_OP_FMT(AMPDU).rate_table[0][idx] += 2932 pstats->succ_bytes + pstats->retry_bytes; 2933 STATS_OP_FMT(AMPDU).bw[1][bw] += 2934 pstats->succ_pkts + pstats->retry_pkts; 2935 STATS_OP_FMT(AMPDU).nss[1][nss] += 2936 pstats->succ_pkts + pstats->retry_pkts; 2937 STATS_OP_FMT(AMPDU).gi[1][gi] += 2938 pstats->succ_pkts + pstats->retry_pkts; 2939 STATS_OP_FMT(AMPDU).rate_table[1][idx] += 2940 pstats->succ_pkts + pstats->retry_pkts; 2941 } else { 2942 tx_stats->ack_fails += 2943 ATH10K_HW_BA_FAIL(pstats->flags); 2944 } 2945 2946 STATS_OP_FMT(SUCC).bw[0][bw] += pstats->succ_bytes; 2947 STATS_OP_FMT(SUCC).nss[0][nss] += pstats->succ_bytes; 2948 STATS_OP_FMT(SUCC).gi[0][gi] += pstats->succ_bytes; 2949 2950 STATS_OP_FMT(SUCC).bw[1][bw] += pstats->succ_pkts; 2951 STATS_OP_FMT(SUCC).nss[1][nss] += pstats->succ_pkts; 2952 STATS_OP_FMT(SUCC).gi[1][gi] += pstats->succ_pkts; 2953 2954 STATS_OP_FMT(FAIL).bw[0][bw] += pstats->failed_bytes; 2955 STATS_OP_FMT(FAIL).nss[0][nss] += pstats->failed_bytes; 2956 STATS_OP_FMT(FAIL).gi[0][gi] += pstats->failed_bytes; 2957 2958 STATS_OP_FMT(FAIL).bw[1][bw] += pstats->failed_pkts; 2959 STATS_OP_FMT(FAIL).nss[1][nss] += pstats->failed_pkts; 2960 STATS_OP_FMT(FAIL).gi[1][gi] += pstats->failed_pkts; 2961 2962 STATS_OP_FMT(RETRY).bw[0][bw] += pstats->retry_bytes; 2963 STATS_OP_FMT(RETRY).nss[0][nss] += pstats->retry_bytes; 2964 STATS_OP_FMT(RETRY).gi[0][gi] += pstats->retry_bytes; 2965 2966 STATS_OP_FMT(RETRY).bw[1][bw] += pstats->retry_pkts; 2967 STATS_OP_FMT(RETRY).nss[1][nss] += pstats->retry_pkts; 2968 STATS_OP_FMT(RETRY).gi[1][gi] += pstats->retry_pkts; 2969 2970 if (txrate->flags >= RATE_INFO_FLAGS_MCS) { 2971 STATS_OP_FMT(SUCC).rate_table[0][idx] += pstats->succ_bytes; 2972 STATS_OP_FMT(SUCC).rate_table[1][idx] += pstats->succ_pkts; 2973 STATS_OP_FMT(FAIL).rate_table[0][idx] += pstats->failed_bytes; 2974 STATS_OP_FMT(FAIL).rate_table[1][idx] += pstats->failed_pkts; 2975 STATS_OP_FMT(RETRY).rate_table[0][idx] += pstats->retry_bytes; 2976 STATS_OP_FMT(RETRY).rate_table[1][idx] += pstats->retry_pkts; 2977 } 2978 } 2979 2980 static void 2981 ath10k_update_per_peer_tx_stats(struct ath10k *ar, 2982 struct ieee80211_sta *sta, 2983 struct ath10k_per_peer_tx_stats *peer_stats) 2984 { 2985 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 2986 struct ieee80211_chanctx_conf *conf = NULL; 2987 u8 rate = 0, sgi; 2988 s8 rate_idx = 0; 2989 bool skip_auto_rate; 2990 struct rate_info txrate; 2991 2992 lockdep_assert_held(&ar->data_lock); 2993 2994 txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode); 2995 txrate.bw = ATH10K_HW_BW(peer_stats->flags); 2996 txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode); 2997 txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode); 2998 sgi = ATH10K_HW_GI(peer_stats->flags); 2999 skip_auto_rate = ATH10K_FW_SKIPPED_RATE_CTRL(peer_stats->flags); 3000 3001 /* Firmware's rate control skips broadcast/management frames, 3002 * if host has configure fixed rates and in some other special cases. 3003 */ 3004 if (skip_auto_rate) 3005 return; 3006 3007 if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) { 3008 ath10k_warn(ar, "Invalid VHT mcs %hhd peer stats", txrate.mcs); 3009 return; 3010 } 3011 3012 if (txrate.flags == WMI_RATE_PREAMBLE_HT && 3013 (txrate.mcs > 7 || txrate.nss < 1)) { 3014 ath10k_warn(ar, "Invalid HT mcs %hhd nss %hhd peer stats", 3015 txrate.mcs, txrate.nss); 3016 return; 3017 } 3018 3019 memset(&arsta->txrate, 0, sizeof(arsta->txrate)); 3020 memset(&arsta->tx_info.status, 0, sizeof(arsta->tx_info.status)); 3021 if (txrate.flags == WMI_RATE_PREAMBLE_CCK || 3022 txrate.flags == WMI_RATE_PREAMBLE_OFDM) { 3023 rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode); 3024 /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */ 3025 if (rate == 6 && txrate.flags == WMI_RATE_PREAMBLE_CCK) 3026 rate = 5; 3027 rate_idx = ath10k_get_legacy_rate_idx(ar, rate); 3028 if (rate_idx < 0) 3029 return; 3030 arsta->txrate.legacy = rate; 3031 } else if (txrate.flags == WMI_RATE_PREAMBLE_HT) { 3032 arsta->txrate.flags = RATE_INFO_FLAGS_MCS; 3033 arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1); 3034 } else { 3035 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; 3036 arsta->txrate.mcs = txrate.mcs; 3037 } 3038 3039 switch (txrate.flags) { 3040 case WMI_RATE_PREAMBLE_OFDM: 3041 if (arsta->arvif && arsta->arvif->vif) 3042 conf = rcu_dereference(arsta->arvif->vif->chanctx_conf); 3043 if (conf && conf->def.chan->band == NL80211_BAND_5GHZ) 3044 arsta->tx_info.status.rates[0].idx = rate_idx - 4; 3045 break; 3046 case WMI_RATE_PREAMBLE_CCK: 3047 arsta->tx_info.status.rates[0].idx = rate_idx; 3048 if (sgi) 3049 arsta->tx_info.status.rates[0].flags |= 3050 (IEEE80211_TX_RC_USE_SHORT_PREAMBLE | 3051 IEEE80211_TX_RC_SHORT_GI); 3052 break; 3053 case WMI_RATE_PREAMBLE_HT: 3054 arsta->tx_info.status.rates[0].idx = 3055 txrate.mcs + ((txrate.nss - 1) * 8); 3056 if (sgi) 3057 arsta->tx_info.status.rates[0].flags |= 3058 IEEE80211_TX_RC_SHORT_GI; 3059 arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_MCS; 3060 break; 3061 case WMI_RATE_PREAMBLE_VHT: 3062 ieee80211_rate_set_vht(&arsta->tx_info.status.rates[0], 3063 txrate.mcs, txrate.nss); 3064 if (sgi) 3065 arsta->tx_info.status.rates[0].flags |= 3066 IEEE80211_TX_RC_SHORT_GI; 3067 arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_VHT_MCS; 3068 break; 3069 } 3070 3071 arsta->txrate.nss = txrate.nss; 3072 arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw); 3073 if (sgi) 3074 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 3075 3076 switch (arsta->txrate.bw) { 3077 case RATE_INFO_BW_40: 3078 arsta->tx_info.status.rates[0].flags |= 3079 IEEE80211_TX_RC_40_MHZ_WIDTH; 3080 break; 3081 case RATE_INFO_BW_80: 3082 arsta->tx_info.status.rates[0].flags |= 3083 IEEE80211_TX_RC_80_MHZ_WIDTH; 3084 break; 3085 } 3086 3087 if (peer_stats->succ_pkts) { 3088 arsta->tx_info.flags = IEEE80211_TX_STAT_ACK; 3089 arsta->tx_info.status.rates[0].count = 1; 3090 ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info); 3091 } 3092 3093 if (ath10k_debug_is_extd_tx_stats_enabled(ar)) 3094 ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats, 3095 rate_idx); 3096 } 3097 3098 static void ath10k_htt_fetch_peer_stats(struct ath10k *ar, 3099 struct sk_buff *skb) 3100 { 3101 struct htt_resp *resp = (struct htt_resp *)skb->data; 3102 struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats; 3103 struct htt_per_peer_tx_stats_ind *tx_stats; 3104 struct ieee80211_sta *sta; 3105 struct ath10k_peer *peer; 3106 int peer_id, i; 3107 u8 ppdu_len, num_ppdu; 3108 3109 num_ppdu = resp->peer_tx_stats.num_ppdu; 3110 ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32); 3111 3112 if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) { 3113 ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len); 3114 return; 3115 } 3116 3117 tx_stats = (struct htt_per_peer_tx_stats_ind *) 3118 (resp->peer_tx_stats.payload); 3119 peer_id = __le16_to_cpu(tx_stats->peer_id); 3120 3121 rcu_read_lock(); 3122 spin_lock_bh(&ar->data_lock); 3123 peer = ath10k_peer_find_by_id(ar, peer_id); 3124 if (!peer || !peer->sta) { 3125 ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n", 3126 peer_id); 3127 goto out; 3128 } 3129 3130 sta = peer->sta; 3131 for (i = 0; i < num_ppdu; i++) { 3132 tx_stats = (struct htt_per_peer_tx_stats_ind *) 3133 (resp->peer_tx_stats.payload + i * ppdu_len); 3134 3135 p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes); 3136 p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes); 3137 p_tx_stats->failed_bytes = 3138 __le32_to_cpu(tx_stats->failed_bytes); 3139 p_tx_stats->ratecode = tx_stats->ratecode; 3140 p_tx_stats->flags = tx_stats->flags; 3141 p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts); 3142 p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts); 3143 p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts); 3144 3145 ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats); 3146 } 3147 3148 out: 3149 spin_unlock_bh(&ar->data_lock); 3150 rcu_read_unlock(); 3151 } 3152 3153 static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data) 3154 { 3155 struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data; 3156 struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats; 3157 struct ath10k_10_2_peer_tx_stats *tx_stats; 3158 struct ieee80211_sta *sta; 3159 struct ath10k_peer *peer; 3160 u16 log_type = __le16_to_cpu(hdr->log_type); 3161 u32 peer_id = 0, i; 3162 3163 if (log_type != ATH_PKTLOG_TYPE_TX_STAT) 3164 return; 3165 3166 tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) + 3167 ATH10K_10_2_TX_STATS_OFFSET); 3168 3169 if (!tx_stats->tx_ppdu_cnt) 3170 return; 3171 3172 peer_id = tx_stats->peer_id; 3173 3174 rcu_read_lock(); 3175 spin_lock_bh(&ar->data_lock); 3176 peer = ath10k_peer_find_by_id(ar, peer_id); 3177 if (!peer || !peer->sta) { 3178 ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n", 3179 peer_id); 3180 goto out; 3181 } 3182 3183 sta = peer->sta; 3184 for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) { 3185 p_tx_stats->succ_bytes = 3186 __le16_to_cpu(tx_stats->success_bytes[i]); 3187 p_tx_stats->retry_bytes = 3188 __le16_to_cpu(tx_stats->retry_bytes[i]); 3189 p_tx_stats->failed_bytes = 3190 __le16_to_cpu(tx_stats->failed_bytes[i]); 3191 p_tx_stats->ratecode = tx_stats->ratecode[i]; 3192 p_tx_stats->flags = tx_stats->flags[i]; 3193 p_tx_stats->succ_pkts = tx_stats->success_pkts[i]; 3194 p_tx_stats->retry_pkts = tx_stats->retry_pkts[i]; 3195 p_tx_stats->failed_pkts = tx_stats->failed_pkts[i]; 3196 3197 ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats); 3198 } 3199 spin_unlock_bh(&ar->data_lock); 3200 rcu_read_unlock(); 3201 3202 return; 3203 3204 out: 3205 spin_unlock_bh(&ar->data_lock); 3206 rcu_read_unlock(); 3207 } 3208 3209 bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) 3210 { 3211 struct ath10k_htt *htt = &ar->htt; 3212 struct htt_resp *resp = (struct htt_resp *)skb->data; 3213 enum htt_t2h_msg_type type; 3214 3215 /* confirm alignment */ 3216 if (!IS_ALIGNED((unsigned long)skb->data, 4)) 3217 ath10k_warn(ar, "unaligned htt message, expect trouble\n"); 3218 3219 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n", 3220 resp->hdr.msg_type); 3221 3222 if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) { 3223 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X", 3224 resp->hdr.msg_type, ar->htt.t2h_msg_types_max); 3225 return true; 3226 } 3227 type = ar->htt.t2h_msg_types[resp->hdr.msg_type]; 3228 3229 switch (type) { 3230 case HTT_T2H_MSG_TYPE_VERSION_CONF: { 3231 htt->target_version_major = resp->ver_resp.major; 3232 htt->target_version_minor = resp->ver_resp.minor; 3233 complete(&htt->target_version_received); 3234 break; 3235 } 3236 case HTT_T2H_MSG_TYPE_RX_IND: 3237 if (ar->dev_type == ATH10K_DEV_TYPE_HL) 3238 return ath10k_htt_rx_proc_rx_ind_hl(htt, 3239 &resp->rx_ind_hl, 3240 skb); 3241 else 3242 ath10k_htt_rx_proc_rx_ind_ll(htt, &resp->rx_ind); 3243 break; 3244 case HTT_T2H_MSG_TYPE_PEER_MAP: { 3245 struct htt_peer_map_event ev = { 3246 .vdev_id = resp->peer_map.vdev_id, 3247 .peer_id = __le16_to_cpu(resp->peer_map.peer_id), 3248 }; 3249 memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr)); 3250 ath10k_peer_map_event(htt, &ev); 3251 break; 3252 } 3253 case HTT_T2H_MSG_TYPE_PEER_UNMAP: { 3254 struct htt_peer_unmap_event ev = { 3255 .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id), 3256 }; 3257 ath10k_peer_unmap_event(htt, &ev); 3258 break; 3259 } 3260 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: { 3261 struct htt_tx_done tx_done = {}; 3262 int status = __le32_to_cpu(resp->mgmt_tx_completion.status); 3263 int info = __le32_to_cpu(resp->mgmt_tx_completion.info); 3264 3265 tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id); 3266 3267 switch (status) { 3268 case HTT_MGMT_TX_STATUS_OK: 3269 tx_done.status = HTT_TX_COMPL_STATE_ACK; 3270 if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, 3271 ar->wmi.svc_map) && 3272 (resp->mgmt_tx_completion.flags & 3273 HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI)) { 3274 tx_done.ack_rssi = 3275 FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK, 3276 info); 3277 } 3278 break; 3279 case HTT_MGMT_TX_STATUS_RETRY: 3280 tx_done.status = HTT_TX_COMPL_STATE_NOACK; 3281 break; 3282 case HTT_MGMT_TX_STATUS_DROP: 3283 tx_done.status = HTT_TX_COMPL_STATE_DISCARD; 3284 break; 3285 } 3286 3287 status = ath10k_txrx_tx_unref(htt, &tx_done); 3288 if (!status) { 3289 spin_lock_bh(&htt->tx_lock); 3290 ath10k_htt_tx_mgmt_dec_pending(htt); 3291 spin_unlock_bh(&htt->tx_lock); 3292 } 3293 break; 3294 } 3295 case HTT_T2H_MSG_TYPE_TX_COMPL_IND: 3296 ath10k_htt_rx_tx_compl_ind(htt->ar, skb); 3297 break; 3298 case HTT_T2H_MSG_TYPE_SEC_IND: { 3299 struct ath10k *ar = htt->ar; 3300 struct htt_security_indication *ev = &resp->security_indication; 3301 3302 ath10k_dbg(ar, ATH10K_DBG_HTT, 3303 "sec ind peer_id %d unicast %d type %d\n", 3304 __le16_to_cpu(ev->peer_id), 3305 !!(ev->flags & HTT_SECURITY_IS_UNICAST), 3306 MS(ev->flags, HTT_SECURITY_TYPE)); 3307 complete(&ar->install_key_done); 3308 break; 3309 } 3310 case HTT_T2H_MSG_TYPE_RX_FRAG_IND: { 3311 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", 3312 skb->data, skb->len); 3313 atomic_inc(&htt->num_mpdus_ready); 3314 break; 3315 } 3316 case HTT_T2H_MSG_TYPE_TEST: 3317 break; 3318 case HTT_T2H_MSG_TYPE_STATS_CONF: 3319 trace_ath10k_htt_stats(ar, skb->data, skb->len); 3320 break; 3321 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND: 3322 /* Firmware can return tx frames if it's unable to fully 3323 * process them and suspects host may be able to fix it. ath10k 3324 * sends all tx frames as already inspected so this shouldn't 3325 * happen unless fw has a bug. 3326 */ 3327 ath10k_warn(ar, "received an unexpected htt tx inspect event\n"); 3328 break; 3329 case HTT_T2H_MSG_TYPE_RX_ADDBA: 3330 ath10k_htt_rx_addba(ar, resp); 3331 break; 3332 case HTT_T2H_MSG_TYPE_RX_DELBA: 3333 ath10k_htt_rx_delba(ar, resp); 3334 break; 3335 case HTT_T2H_MSG_TYPE_PKTLOG: { 3336 trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload, 3337 skb->len - 3338 offsetof(struct htt_resp, 3339 pktlog_msg.payload)); 3340 3341 if (ath10k_peer_stats_enabled(ar)) 3342 ath10k_fetch_10_2_tx_stats(ar, 3343 resp->pktlog_msg.payload); 3344 break; 3345 } 3346 case HTT_T2H_MSG_TYPE_RX_FLUSH: { 3347 /* Ignore this event because mac80211 takes care of Rx 3348 * aggregation reordering. 3349 */ 3350 break; 3351 } 3352 case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: { 3353 skb_queue_tail(&htt->rx_in_ord_compl_q, skb); 3354 return false; 3355 } 3356 case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND: 3357 break; 3358 case HTT_T2H_MSG_TYPE_CHAN_CHANGE: { 3359 u32 phymode = __le32_to_cpu(resp->chan_change.phymode); 3360 u32 freq = __le32_to_cpu(resp->chan_change.freq); 3361 3362 ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq); 3363 ath10k_dbg(ar, ATH10K_DBG_HTT, 3364 "htt chan change freq %u phymode %s\n", 3365 freq, ath10k_wmi_phymode_str(phymode)); 3366 break; 3367 } 3368 case HTT_T2H_MSG_TYPE_AGGR_CONF: 3369 break; 3370 case HTT_T2H_MSG_TYPE_TX_FETCH_IND: { 3371 struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC); 3372 3373 if (!tx_fetch_ind) { 3374 ath10k_warn(ar, "failed to copy htt tx fetch ind\n"); 3375 break; 3376 } 3377 skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind); 3378 break; 3379 } 3380 case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM: 3381 ath10k_htt_rx_tx_fetch_confirm(ar, skb); 3382 break; 3383 case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND: 3384 ath10k_htt_rx_tx_mode_switch_ind(ar, skb); 3385 break; 3386 case HTT_T2H_MSG_TYPE_PEER_STATS: 3387 ath10k_htt_fetch_peer_stats(ar, skb); 3388 break; 3389 case HTT_T2H_MSG_TYPE_EN_STATS: 3390 default: 3391 ath10k_warn(ar, "htt event (%d) not handled\n", 3392 resp->hdr.msg_type); 3393 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", 3394 skb->data, skb->len); 3395 break; 3396 } 3397 return true; 3398 } 3399 EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler); 3400 3401 void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar, 3402 struct sk_buff *skb) 3403 { 3404 trace_ath10k_htt_pktlog(ar, skb->data, skb->len); 3405 dev_kfree_skb_any(skb); 3406 } 3407 EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler); 3408 3409 static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget) 3410 { 3411 struct sk_buff *skb; 3412 3413 while (quota < budget) { 3414 if (skb_queue_empty(&ar->htt.rx_msdus_q)) 3415 break; 3416 3417 skb = skb_dequeue(&ar->htt.rx_msdus_q); 3418 if (!skb) 3419 break; 3420 ath10k_process_rx(ar, skb); 3421 quota++; 3422 } 3423 3424 return quota; 3425 } 3426 3427 int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget) 3428 { 3429 struct ath10k_htt *htt = &ar->htt; 3430 struct htt_tx_done tx_done = {}; 3431 struct sk_buff_head tx_ind_q; 3432 struct sk_buff *skb; 3433 unsigned long flags; 3434 int quota = 0, done, ret; 3435 bool resched_napi = false; 3436 3437 __skb_queue_head_init(&tx_ind_q); 3438 3439 /* Process pending frames before dequeuing more data 3440 * from hardware. 3441 */ 3442 quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget); 3443 if (quota == budget) { 3444 resched_napi = true; 3445 goto exit; 3446 } 3447 3448 while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) { 3449 spin_lock_bh(&htt->rx_ring.lock); 3450 ret = ath10k_htt_rx_in_ord_ind(ar, skb); 3451 spin_unlock_bh(&htt->rx_ring.lock); 3452 3453 dev_kfree_skb_any(skb); 3454 if (ret == -EIO) { 3455 resched_napi = true; 3456 goto exit; 3457 } 3458 } 3459 3460 while (atomic_read(&htt->num_mpdus_ready)) { 3461 ret = ath10k_htt_rx_handle_amsdu(htt); 3462 if (ret == -EIO) { 3463 resched_napi = true; 3464 goto exit; 3465 } 3466 atomic_dec(&htt->num_mpdus_ready); 3467 } 3468 3469 /* Deliver received data after processing data from hardware */ 3470 quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget); 3471 3472 /* From NAPI documentation: 3473 * The napi poll() function may also process TX completions, in which 3474 * case if it processes the entire TX ring then it should count that 3475 * work as the rest of the budget. 3476 */ 3477 if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo)) 3478 quota = budget; 3479 3480 /* kfifo_get: called only within txrx_tasklet so it's neatly serialized. 3481 * From kfifo_get() documentation: 3482 * Note that with only one concurrent reader and one concurrent writer, 3483 * you don't need extra locking to use these macro. 3484 */ 3485 while (kfifo_get(&htt->txdone_fifo, &tx_done)) 3486 ath10k_txrx_tx_unref(htt, &tx_done); 3487 3488 ath10k_mac_tx_push_pending(ar); 3489 3490 spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags); 3491 skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q); 3492 spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags); 3493 3494 while ((skb = __skb_dequeue(&tx_ind_q))) { 3495 ath10k_htt_rx_tx_fetch_ind(ar, skb); 3496 dev_kfree_skb_any(skb); 3497 } 3498 3499 exit: 3500 ath10k_htt_rx_msdu_buff_replenish(htt); 3501 /* In case of rx failure or more data to read, report budget 3502 * to reschedule NAPI poll 3503 */ 3504 done = resched_napi ? budget : quota; 3505 3506 return done; 3507 } 3508 EXPORT_SYMBOL(ath10k_htt_txrx_compl_task); 3509 3510 static const struct ath10k_htt_rx_ops htt_rx_ops_32 = { 3511 .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32, 3512 .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32, 3513 .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32, 3514 .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32, 3515 .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32, 3516 }; 3517 3518 static const struct ath10k_htt_rx_ops htt_rx_ops_64 = { 3519 .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64, 3520 .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64, 3521 .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64, 3522 .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64, 3523 .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64, 3524 }; 3525 3526 static const struct ath10k_htt_rx_ops htt_rx_ops_hl = { 3527 }; 3528 3529 void ath10k_htt_set_rx_ops(struct ath10k_htt *htt) 3530 { 3531 struct ath10k *ar = htt->ar; 3532 3533 if (ar->dev_type == ATH10K_DEV_TYPE_HL) 3534 htt->rx_ops = &htt_rx_ops_hl; 3535 else if (ar->hw_params.target_64bit) 3536 htt->rx_ops = &htt_rx_ops_64; 3537 else 3538 htt->rx_ops = &htt_rx_ops_32; 3539 } 3540