1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include "core.h" 19 #include "htc.h" 20 #include "htt.h" 21 #include "txrx.h" 22 #include "debug.h" 23 #include "trace.h" 24 25 #include <linux/log2.h> 26 27 /* slightly larger than one large A-MPDU */ 28 #define HTT_RX_RING_SIZE_MIN 128 29 30 /* roughly 20 ms @ 1 Gbps of 1500B MSDUs */ 31 #define HTT_RX_RING_SIZE_MAX 2048 32 33 #define HTT_RX_AVG_FRM_BYTES 1000 34 35 /* ms, very conservative */ 36 #define HTT_RX_HOST_LATENCY_MAX_MS 20 37 38 /* ms, conservative */ 39 #define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10 40 41 /* when under memory pressure rx ring refill may fail and needs a retry */ 42 #define HTT_RX_RING_REFILL_RETRY_MS 50 43 44 45 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb); 46 47 48 static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt) 49 { 50 int size; 51 52 /* 53 * It is expected that the host CPU will typically be able to 54 * service the rx indication from one A-MPDU before the rx 55 * indication from the subsequent A-MPDU happens, roughly 1-2 ms 56 * later. However, the rx ring should be sized very conservatively, 57 * to accomodate the worst reasonable delay before the host CPU 58 * services a rx indication interrupt. 59 * 60 * The rx ring need not be kept full of empty buffers. In theory, 61 * the htt host SW can dynamically track the low-water mark in the 62 * rx ring, and dynamically adjust the level to which the rx ring 63 * is filled with empty buffers, to dynamically meet the desired 64 * low-water mark. 65 * 66 * In contrast, it's difficult to resize the rx ring itself, once 67 * it's in use. Thus, the ring itself should be sized very 68 * conservatively, while the degree to which the ring is filled 69 * with empty buffers should be sized moderately conservatively. 70 */ 71 72 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ 73 size = 74 htt->max_throughput_mbps + 75 1000 / 76 (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS; 77 78 if (size < HTT_RX_RING_SIZE_MIN) 79 size = HTT_RX_RING_SIZE_MIN; 80 81 if (size > HTT_RX_RING_SIZE_MAX) 82 size = HTT_RX_RING_SIZE_MAX; 83 84 size = roundup_pow_of_two(size); 85 86 return size; 87 } 88 89 static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt *htt) 90 { 91 int size; 92 93 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ 94 size = 95 htt->max_throughput_mbps * 96 1000 / 97 (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS; 98 99 /* 100 * Make sure the fill level is at least 1 less than the ring size. 101 * Leaving 1 element empty allows the SW to easily distinguish 102 * between a full ring vs. an empty ring. 103 */ 104 if (size >= htt->rx_ring.size) 105 size = htt->rx_ring.size - 1; 106 107 return size; 108 } 109 110 static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt) 111 { 112 struct sk_buff *skb; 113 struct ath10k_skb_cb *cb; 114 int i; 115 116 for (i = 0; i < htt->rx_ring.fill_cnt; i++) { 117 skb = htt->rx_ring.netbufs_ring[i]; 118 cb = ATH10K_SKB_CB(skb); 119 dma_unmap_single(htt->ar->dev, cb->paddr, 120 skb->len + skb_tailroom(skb), 121 DMA_FROM_DEVICE); 122 dev_kfree_skb_any(skb); 123 } 124 125 htt->rx_ring.fill_cnt = 0; 126 } 127 128 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) 129 { 130 struct htt_rx_desc *rx_desc; 131 struct sk_buff *skb; 132 dma_addr_t paddr; 133 int ret = 0, idx; 134 135 idx = __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr)); 136 while (num > 0) { 137 skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN); 138 if (!skb) { 139 ret = -ENOMEM; 140 goto fail; 141 } 142 143 if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN)) 144 skb_pull(skb, 145 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) - 146 skb->data); 147 148 /* Clear rx_desc attention word before posting to Rx ring */ 149 rx_desc = (struct htt_rx_desc *)skb->data; 150 rx_desc->attention.flags = __cpu_to_le32(0); 151 152 paddr = dma_map_single(htt->ar->dev, skb->data, 153 skb->len + skb_tailroom(skb), 154 DMA_FROM_DEVICE); 155 156 if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) { 157 dev_kfree_skb_any(skb); 158 ret = -ENOMEM; 159 goto fail; 160 } 161 162 ATH10K_SKB_CB(skb)->paddr = paddr; 163 htt->rx_ring.netbufs_ring[idx] = skb; 164 htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr); 165 htt->rx_ring.fill_cnt++; 166 167 num--; 168 idx++; 169 idx &= htt->rx_ring.size_mask; 170 } 171 172 fail: 173 *(htt->rx_ring.alloc_idx.vaddr) = __cpu_to_le32(idx); 174 return ret; 175 } 176 177 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) 178 { 179 lockdep_assert_held(&htt->rx_ring.lock); 180 return __ath10k_htt_rx_ring_fill_n(htt, num); 181 } 182 183 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt) 184 { 185 int ret, num_deficit, num_to_fill; 186 187 /* Refilling the whole RX ring buffer proves to be a bad idea. The 188 * reason is RX may take up significant amount of CPU cycles and starve 189 * other tasks, e.g. TX on an ethernet device while acting as a bridge 190 * with ath10k wlan interface. This ended up with very poor performance 191 * once CPU the host system was overwhelmed with RX on ath10k. 192 * 193 * By limiting the number of refills the replenishing occurs 194 * progressively. This in turns makes use of the fact tasklets are 195 * processed in FIFO order. This means actual RX processing can starve 196 * out refilling. If there's not enough buffers on RX ring FW will not 197 * report RX until it is refilled with enough buffers. This 198 * automatically balances load wrt to CPU power. 199 * 200 * This probably comes at a cost of lower maximum throughput but 201 * improves the avarage and stability. */ 202 spin_lock_bh(&htt->rx_ring.lock); 203 num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt; 204 num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit); 205 num_deficit -= num_to_fill; 206 ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill); 207 if (ret == -ENOMEM) { 208 /* 209 * Failed to fill it to the desired level - 210 * we'll start a timer and try again next time. 211 * As long as enough buffers are left in the ring for 212 * another A-MPDU rx, no special recovery is needed. 213 */ 214 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + 215 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS)); 216 } else if (num_deficit > 0) { 217 tasklet_schedule(&htt->rx_replenish_task); 218 } 219 spin_unlock_bh(&htt->rx_ring.lock); 220 } 221 222 static void ath10k_htt_rx_ring_refill_retry(unsigned long arg) 223 { 224 struct ath10k_htt *htt = (struct ath10k_htt *)arg; 225 ath10k_htt_rx_msdu_buff_replenish(htt); 226 } 227 228 static unsigned ath10k_htt_rx_ring_elems(struct ath10k_htt *htt) 229 { 230 return (__le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr) - 231 htt->rx_ring.sw_rd_idx.msdu_payld) & htt->rx_ring.size_mask; 232 } 233 234 void ath10k_htt_rx_detach(struct ath10k_htt *htt) 235 { 236 int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld; 237 238 del_timer_sync(&htt->rx_ring.refill_retry_timer); 239 tasklet_kill(&htt->rx_replenish_task); 240 241 while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) { 242 struct sk_buff *skb = 243 htt->rx_ring.netbufs_ring[sw_rd_idx]; 244 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); 245 246 dma_unmap_single(htt->ar->dev, cb->paddr, 247 skb->len + skb_tailroom(skb), 248 DMA_FROM_DEVICE); 249 dev_kfree_skb_any(htt->rx_ring.netbufs_ring[sw_rd_idx]); 250 sw_rd_idx++; 251 sw_rd_idx &= htt->rx_ring.size_mask; 252 } 253 254 dma_free_coherent(htt->ar->dev, 255 (htt->rx_ring.size * 256 sizeof(htt->rx_ring.paddrs_ring)), 257 htt->rx_ring.paddrs_ring, 258 htt->rx_ring.base_paddr); 259 260 dma_free_coherent(htt->ar->dev, 261 sizeof(*htt->rx_ring.alloc_idx.vaddr), 262 htt->rx_ring.alloc_idx.vaddr, 263 htt->rx_ring.alloc_idx.paddr); 264 265 kfree(htt->rx_ring.netbufs_ring); 266 } 267 268 static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt) 269 { 270 int idx; 271 struct sk_buff *msdu; 272 273 spin_lock_bh(&htt->rx_ring.lock); 274 275 if (ath10k_htt_rx_ring_elems(htt) == 0) 276 ath10k_warn("htt rx ring is empty!\n"); 277 278 idx = htt->rx_ring.sw_rd_idx.msdu_payld; 279 msdu = htt->rx_ring.netbufs_ring[idx]; 280 281 idx++; 282 idx &= htt->rx_ring.size_mask; 283 htt->rx_ring.sw_rd_idx.msdu_payld = idx; 284 htt->rx_ring.fill_cnt--; 285 286 spin_unlock_bh(&htt->rx_ring.lock); 287 return msdu; 288 } 289 290 static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb) 291 { 292 struct sk_buff *next; 293 294 while (skb) { 295 next = skb->next; 296 dev_kfree_skb_any(skb); 297 skb = next; 298 } 299 } 300 301 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, 302 u8 **fw_desc, int *fw_desc_len, 303 struct sk_buff **head_msdu, 304 struct sk_buff **tail_msdu) 305 { 306 int msdu_len, msdu_chaining = 0; 307 struct sk_buff *msdu; 308 struct htt_rx_desc *rx_desc; 309 310 if (ath10k_htt_rx_ring_elems(htt) == 0) 311 ath10k_warn("htt rx ring is empty!\n"); 312 313 if (htt->rx_confused) { 314 ath10k_warn("htt is confused. refusing rx\n"); 315 return 0; 316 } 317 318 msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt); 319 while (msdu) { 320 int last_msdu, msdu_len_invalid, msdu_chained; 321 322 dma_unmap_single(htt->ar->dev, 323 ATH10K_SKB_CB(msdu)->paddr, 324 msdu->len + skb_tailroom(msdu), 325 DMA_FROM_DEVICE); 326 327 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ", 328 msdu->data, msdu->len + skb_tailroom(msdu)); 329 330 rx_desc = (struct htt_rx_desc *)msdu->data; 331 332 /* FIXME: we must report msdu payload since this is what caller 333 * expects now */ 334 skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload)); 335 skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload)); 336 337 /* 338 * Sanity check - confirm the HW is finished filling in the 339 * rx data. 340 * If the HW and SW are working correctly, then it's guaranteed 341 * that the HW's MAC DMA is done before this point in the SW. 342 * To prevent the case that we handle a stale Rx descriptor, 343 * just assert for now until we have a way to recover. 344 */ 345 if (!(__le32_to_cpu(rx_desc->attention.flags) 346 & RX_ATTENTION_FLAGS_MSDU_DONE)) { 347 ath10k_htt_rx_free_msdu_chain(*head_msdu); 348 *head_msdu = NULL; 349 msdu = NULL; 350 ath10k_err("htt rx stopped. cannot recover\n"); 351 htt->rx_confused = true; 352 break; 353 } 354 355 /* 356 * Copy the FW rx descriptor for this MSDU from the rx 357 * indication message into the MSDU's netbuf. HL uses the 358 * same rx indication message definition as LL, and simply 359 * appends new info (fields from the HW rx desc, and the 360 * MSDU payload itself). So, the offset into the rx 361 * indication message only has to account for the standard 362 * offset of the per-MSDU FW rx desc info within the 363 * message, and how many bytes of the per-MSDU FW rx desc 364 * info have already been consumed. (And the endianness of 365 * the host, since for a big-endian host, the rx ind 366 * message contents, including the per-MSDU rx desc bytes, 367 * were byteswapped during upload.) 368 */ 369 if (*fw_desc_len > 0) { 370 rx_desc->fw_desc.info0 = **fw_desc; 371 /* 372 * The target is expected to only provide the basic 373 * per-MSDU rx descriptors. Just to be sure, verify 374 * that the target has not attached extension data 375 * (e.g. LRO flow ID). 376 */ 377 378 /* or more, if there's extension data */ 379 (*fw_desc)++; 380 (*fw_desc_len)--; 381 } else { 382 /* 383 * When an oversized AMSDU happened, FW will lost 384 * some of MSDU status - in this case, the FW 385 * descriptors provided will be less than the 386 * actual MSDUs inside this MPDU. Mark the FW 387 * descriptors so that it will still deliver to 388 * upper stack, if no CRC error for this MPDU. 389 * 390 * FIX THIS - the FW descriptors are actually for 391 * MSDUs in the end of this A-MSDU instead of the 392 * beginning. 393 */ 394 rx_desc->fw_desc.info0 = 0; 395 } 396 397 msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags) 398 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR | 399 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR)); 400 msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0), 401 RX_MSDU_START_INFO0_MSDU_LENGTH); 402 msdu_chained = rx_desc->frag_info.ring2_more_count; 403 404 if (msdu_len_invalid) 405 msdu_len = 0; 406 407 skb_trim(msdu, 0); 408 skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE)); 409 msdu_len -= msdu->len; 410 411 /* FIXME: Do chained buffers include htt_rx_desc or not? */ 412 while (msdu_chained--) { 413 struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt); 414 415 dma_unmap_single(htt->ar->dev, 416 ATH10K_SKB_CB(next)->paddr, 417 next->len + skb_tailroom(next), 418 DMA_FROM_DEVICE); 419 420 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ", 421 next->data, 422 next->len + skb_tailroom(next)); 423 424 skb_trim(next, 0); 425 skb_put(next, min(msdu_len, HTT_RX_BUF_SIZE)); 426 msdu_len -= next->len; 427 428 msdu->next = next; 429 msdu = next; 430 msdu_chaining = 1; 431 } 432 433 if (msdu_len > 0) { 434 /* This may suggest FW bug? */ 435 ath10k_warn("htt rx msdu len not consumed (%d)\n", 436 msdu_len); 437 } 438 439 last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) & 440 RX_MSDU_END_INFO0_LAST_MSDU; 441 442 if (last_msdu) { 443 msdu->next = NULL; 444 break; 445 } else { 446 struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt); 447 msdu->next = next; 448 msdu = next; 449 } 450 } 451 *tail_msdu = msdu; 452 453 /* 454 * Don't refill the ring yet. 455 * 456 * First, the elements popped here are still in use - it is not 457 * safe to overwrite them until the matching call to 458 * mpdu_desc_list_next. Second, for efficiency it is preferable to 459 * refill the rx ring with 1 PPDU's worth of rx buffers (something 460 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers 461 * (something like 3 buffers). Consequently, we'll rely on the txrx 462 * SW to tell us when it is done pulling all the PPDU's rx buffers 463 * out of the rx ring, and then refill it just once. 464 */ 465 466 return msdu_chaining; 467 } 468 469 static void ath10k_htt_rx_replenish_task(unsigned long ptr) 470 { 471 struct ath10k_htt *htt = (struct ath10k_htt *)ptr; 472 ath10k_htt_rx_msdu_buff_replenish(htt); 473 } 474 475 int ath10k_htt_rx_attach(struct ath10k_htt *htt) 476 { 477 dma_addr_t paddr; 478 void *vaddr; 479 struct timer_list *timer = &htt->rx_ring.refill_retry_timer; 480 481 htt->rx_ring.size = ath10k_htt_rx_ring_size(htt); 482 if (!is_power_of_2(htt->rx_ring.size)) { 483 ath10k_warn("htt rx ring size is not power of 2\n"); 484 return -EINVAL; 485 } 486 487 htt->rx_ring.size_mask = htt->rx_ring.size - 1; 488 489 /* 490 * Set the initial value for the level to which the rx ring 491 * should be filled, based on the max throughput and the 492 * worst likely latency for the host to fill the rx ring 493 * with new buffers. In theory, this fill level can be 494 * dynamically adjusted from the initial value set here, to 495 * reflect the actual host latency rather than a 496 * conservative assumption about the host latency. 497 */ 498 htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt); 499 500 htt->rx_ring.netbufs_ring = 501 kmalloc(htt->rx_ring.size * sizeof(struct sk_buff *), 502 GFP_KERNEL); 503 if (!htt->rx_ring.netbufs_ring) 504 goto err_netbuf; 505 506 vaddr = dma_alloc_coherent(htt->ar->dev, 507 (htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring)), 508 &paddr, GFP_DMA); 509 if (!vaddr) 510 goto err_dma_ring; 511 512 htt->rx_ring.paddrs_ring = vaddr; 513 htt->rx_ring.base_paddr = paddr; 514 515 vaddr = dma_alloc_coherent(htt->ar->dev, 516 sizeof(*htt->rx_ring.alloc_idx.vaddr), 517 &paddr, GFP_DMA); 518 if (!vaddr) 519 goto err_dma_idx; 520 521 htt->rx_ring.alloc_idx.vaddr = vaddr; 522 htt->rx_ring.alloc_idx.paddr = paddr; 523 htt->rx_ring.sw_rd_idx.msdu_payld = 0; 524 *htt->rx_ring.alloc_idx.vaddr = 0; 525 526 /* Initialize the Rx refill retry timer */ 527 setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt); 528 529 spin_lock_init(&htt->rx_ring.lock); 530 531 htt->rx_ring.fill_cnt = 0; 532 if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level)) 533 goto err_fill_ring; 534 535 tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task, 536 (unsigned long)htt); 537 538 ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n", 539 htt->rx_ring.size, htt->rx_ring.fill_level); 540 return 0; 541 542 err_fill_ring: 543 ath10k_htt_rx_ring_free(htt); 544 dma_free_coherent(htt->ar->dev, 545 sizeof(*htt->rx_ring.alloc_idx.vaddr), 546 htt->rx_ring.alloc_idx.vaddr, 547 htt->rx_ring.alloc_idx.paddr); 548 err_dma_idx: 549 dma_free_coherent(htt->ar->dev, 550 (htt->rx_ring.size * 551 sizeof(htt->rx_ring.paddrs_ring)), 552 htt->rx_ring.paddrs_ring, 553 htt->rx_ring.base_paddr); 554 err_dma_ring: 555 kfree(htt->rx_ring.netbufs_ring); 556 err_netbuf: 557 return -ENOMEM; 558 } 559 560 static int ath10k_htt_rx_crypto_param_len(enum htt_rx_mpdu_encrypt_type type) 561 { 562 switch (type) { 563 case HTT_RX_MPDU_ENCRYPT_WEP40: 564 case HTT_RX_MPDU_ENCRYPT_WEP104: 565 return 4; 566 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: 567 case HTT_RX_MPDU_ENCRYPT_WEP128: /* not tested */ 568 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: 569 case HTT_RX_MPDU_ENCRYPT_WAPI: /* not tested */ 570 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: 571 return 8; 572 case HTT_RX_MPDU_ENCRYPT_NONE: 573 return 0; 574 } 575 576 ath10k_warn("unknown encryption type %d\n", type); 577 return 0; 578 } 579 580 static int ath10k_htt_rx_crypto_tail_len(enum htt_rx_mpdu_encrypt_type type) 581 { 582 switch (type) { 583 case HTT_RX_MPDU_ENCRYPT_NONE: 584 case HTT_RX_MPDU_ENCRYPT_WEP40: 585 case HTT_RX_MPDU_ENCRYPT_WEP104: 586 case HTT_RX_MPDU_ENCRYPT_WEP128: 587 case HTT_RX_MPDU_ENCRYPT_WAPI: 588 return 0; 589 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: 590 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: 591 return 4; 592 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: 593 return 8; 594 } 595 596 ath10k_warn("unknown encryption type %d\n", type); 597 return 0; 598 } 599 600 /* Applies for first msdu in chain, before altering it. */ 601 static struct ieee80211_hdr *ath10k_htt_rx_skb_get_hdr(struct sk_buff *skb) 602 { 603 struct htt_rx_desc *rxd; 604 enum rx_msdu_decap_format fmt; 605 606 rxd = (void *)skb->data - sizeof(*rxd); 607 fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), 608 RX_MSDU_START_INFO1_DECAP_FORMAT); 609 610 if (fmt == RX_MSDU_DECAP_RAW) 611 return (void *)skb->data; 612 else 613 return (void *)skb->data - RX_HTT_HDR_STATUS_LEN; 614 } 615 616 /* This function only applies for first msdu in an msdu chain */ 617 static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr) 618 { 619 if (ieee80211_is_data_qos(hdr->frame_control)) { 620 u8 *qc = ieee80211_get_qos_ctl(hdr); 621 if (qc[0] & 0x80) 622 return true; 623 } 624 return false; 625 } 626 627 struct rfc1042_hdr { 628 u8 llc_dsap; 629 u8 llc_ssap; 630 u8 llc_ctrl; 631 u8 snap_oui[3]; 632 __be16 snap_type; 633 } __packed; 634 635 struct amsdu_subframe_hdr { 636 u8 dst[ETH_ALEN]; 637 u8 src[ETH_ALEN]; 638 __be16 len; 639 } __packed; 640 641 static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt, 642 struct htt_rx_info *info) 643 { 644 struct htt_rx_desc *rxd; 645 struct sk_buff *first; 646 struct sk_buff *skb = info->skb; 647 enum rx_msdu_decap_format fmt; 648 enum htt_rx_mpdu_encrypt_type enctype; 649 struct ieee80211_hdr *hdr; 650 u8 hdr_buf[64], addr[ETH_ALEN], *qos; 651 unsigned int hdr_len; 652 653 rxd = (void *)skb->data - sizeof(*rxd); 654 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), 655 RX_MPDU_START_INFO0_ENCRYPT_TYPE); 656 657 hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status; 658 hdr_len = ieee80211_hdrlen(hdr->frame_control); 659 memcpy(hdr_buf, hdr, hdr_len); 660 hdr = (struct ieee80211_hdr *)hdr_buf; 661 662 /* FIXME: Hopefully this is a temporary measure. 663 * 664 * Reporting individual A-MSDU subframes means each reported frame 665 * shares the same sequence number. 666 * 667 * mac80211 drops frames it recognizes as duplicates, i.e. 668 * retransmission flag is set and sequence number matches sequence 669 * number from a previous frame (as per IEEE 802.11-2012: 9.3.2.10 670 * "Duplicate detection and recovery") 671 * 672 * To avoid frames being dropped clear retransmission flag for all 673 * received A-MSDUs. 674 * 675 * Worst case: actual duplicate frames will be reported but this should 676 * still be handled gracefully by other OSI/ISO layers. */ 677 hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_RETRY); 678 679 first = skb; 680 while (skb) { 681 void *decap_hdr; 682 int len; 683 684 rxd = (void *)skb->data - sizeof(*rxd); 685 fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), 686 RX_MSDU_START_INFO1_DECAP_FORMAT); 687 decap_hdr = (void *)rxd->rx_hdr_status; 688 689 skb->ip_summed = ath10k_htt_rx_get_csum_state(skb); 690 691 /* First frame in an A-MSDU chain has more decapped data. */ 692 if (skb == first) { 693 len = round_up(ieee80211_hdrlen(hdr->frame_control), 4); 694 len += round_up(ath10k_htt_rx_crypto_param_len(enctype), 695 4); 696 decap_hdr += len; 697 } 698 699 switch (fmt) { 700 case RX_MSDU_DECAP_RAW: 701 /* remove trailing FCS */ 702 skb_trim(skb, skb->len - FCS_LEN); 703 break; 704 case RX_MSDU_DECAP_NATIVE_WIFI: 705 /* pull decapped header and copy DA */ 706 hdr = (struct ieee80211_hdr *)skb->data; 707 hdr_len = ieee80211_hdrlen(hdr->frame_control); 708 memcpy(addr, ieee80211_get_DA(hdr), ETH_ALEN); 709 skb_pull(skb, hdr_len); 710 711 /* push original 802.11 header */ 712 hdr = (struct ieee80211_hdr *)hdr_buf; 713 hdr_len = ieee80211_hdrlen(hdr->frame_control); 714 memcpy(skb_push(skb, hdr_len), hdr, hdr_len); 715 716 /* original A-MSDU header has the bit set but we're 717 * not including A-MSDU subframe header */ 718 hdr = (struct ieee80211_hdr *)skb->data; 719 qos = ieee80211_get_qos_ctl(hdr); 720 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; 721 722 /* original 802.11 header has a different DA */ 723 memcpy(ieee80211_get_DA(hdr), addr, ETH_ALEN); 724 break; 725 case RX_MSDU_DECAP_ETHERNET2_DIX: 726 /* strip ethernet header and insert decapped 802.11 727 * header, amsdu subframe header and rfc1042 header */ 728 729 len = 0; 730 len += sizeof(struct rfc1042_hdr); 731 len += sizeof(struct amsdu_subframe_hdr); 732 733 skb_pull(skb, sizeof(struct ethhdr)); 734 memcpy(skb_push(skb, len), decap_hdr, len); 735 memcpy(skb_push(skb, hdr_len), hdr, hdr_len); 736 break; 737 case RX_MSDU_DECAP_8023_SNAP_LLC: 738 /* insert decapped 802.11 header making a singly 739 * A-MSDU */ 740 memcpy(skb_push(skb, hdr_len), hdr, hdr_len); 741 break; 742 } 743 744 info->skb = skb; 745 info->encrypt_type = enctype; 746 skb = skb->next; 747 info->skb->next = NULL; 748 749 ath10k_process_rx(htt->ar, info); 750 } 751 752 /* FIXME: It might be nice to re-assemble the A-MSDU when there's a 753 * monitor interface active for sniffing purposes. */ 754 } 755 756 static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info) 757 { 758 struct sk_buff *skb = info->skb; 759 struct htt_rx_desc *rxd; 760 struct ieee80211_hdr *hdr; 761 enum rx_msdu_decap_format fmt; 762 enum htt_rx_mpdu_encrypt_type enctype; 763 int hdr_len; 764 void *rfc1042; 765 766 /* This shouldn't happen. If it does than it may be a FW bug. */ 767 if (skb->next) { 768 ath10k_warn("received chained non A-MSDU frame\n"); 769 ath10k_htt_rx_free_msdu_chain(skb->next); 770 skb->next = NULL; 771 } 772 773 rxd = (void *)skb->data - sizeof(*rxd); 774 fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), 775 RX_MSDU_START_INFO1_DECAP_FORMAT); 776 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), 777 RX_MPDU_START_INFO0_ENCRYPT_TYPE); 778 hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status; 779 hdr_len = ieee80211_hdrlen(hdr->frame_control); 780 781 skb->ip_summed = ath10k_htt_rx_get_csum_state(skb); 782 783 switch (fmt) { 784 case RX_MSDU_DECAP_RAW: 785 /* remove trailing FCS */ 786 skb_trim(skb, skb->len - FCS_LEN); 787 break; 788 case RX_MSDU_DECAP_NATIVE_WIFI: 789 /* Pull decapped header */ 790 hdr = (struct ieee80211_hdr *)skb->data; 791 hdr_len = ieee80211_hdrlen(hdr->frame_control); 792 skb_pull(skb, hdr_len); 793 794 /* Push original header */ 795 hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status; 796 hdr_len = ieee80211_hdrlen(hdr->frame_control); 797 memcpy(skb_push(skb, hdr_len), hdr, hdr_len); 798 break; 799 case RX_MSDU_DECAP_ETHERNET2_DIX: 800 /* strip ethernet header and insert decapped 802.11 header and 801 * rfc1042 header */ 802 803 rfc1042 = hdr; 804 rfc1042 += roundup(hdr_len, 4); 805 rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4); 806 807 skb_pull(skb, sizeof(struct ethhdr)); 808 memcpy(skb_push(skb, sizeof(struct rfc1042_hdr)), 809 rfc1042, sizeof(struct rfc1042_hdr)); 810 memcpy(skb_push(skb, hdr_len), hdr, hdr_len); 811 break; 812 case RX_MSDU_DECAP_8023_SNAP_LLC: 813 /* remove A-MSDU subframe header and insert 814 * decapped 802.11 header. rfc1042 header is already there */ 815 816 skb_pull(skb, sizeof(struct amsdu_subframe_hdr)); 817 memcpy(skb_push(skb, hdr_len), hdr, hdr_len); 818 break; 819 } 820 821 info->skb = skb; 822 info->encrypt_type = enctype; 823 824 ath10k_process_rx(htt->ar, info); 825 } 826 827 static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb) 828 { 829 struct htt_rx_desc *rxd; 830 u32 flags; 831 832 rxd = (void *)skb->data - sizeof(*rxd); 833 flags = __le32_to_cpu(rxd->attention.flags); 834 835 if (flags & RX_ATTENTION_FLAGS_DECRYPT_ERR) 836 return true; 837 838 return false; 839 } 840 841 static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb) 842 { 843 struct htt_rx_desc *rxd; 844 u32 flags; 845 846 rxd = (void *)skb->data - sizeof(*rxd); 847 flags = __le32_to_cpu(rxd->attention.flags); 848 849 if (flags & RX_ATTENTION_FLAGS_FCS_ERR) 850 return true; 851 852 return false; 853 } 854 855 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb) 856 { 857 struct htt_rx_desc *rxd; 858 u32 flags, info; 859 bool is_ip4, is_ip6; 860 bool is_tcp, is_udp; 861 bool ip_csum_ok, tcpudp_csum_ok; 862 863 rxd = (void *)skb->data - sizeof(*rxd); 864 flags = __le32_to_cpu(rxd->attention.flags); 865 info = __le32_to_cpu(rxd->msdu_start.info1); 866 867 is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO); 868 is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO); 869 is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO); 870 is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO); 871 ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL); 872 tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL); 873 874 if (!is_ip4 && !is_ip6) 875 return CHECKSUM_NONE; 876 if (!is_tcp && !is_udp) 877 return CHECKSUM_NONE; 878 if (!ip_csum_ok) 879 return CHECKSUM_NONE; 880 if (!tcpudp_csum_ok) 881 return CHECKSUM_NONE; 882 883 return CHECKSUM_UNNECESSARY; 884 } 885 886 static void ath10k_htt_rx_handler(struct ath10k_htt *htt, 887 struct htt_rx_indication *rx) 888 { 889 struct htt_rx_info info; 890 struct htt_rx_indication_mpdu_range *mpdu_ranges; 891 struct ieee80211_hdr *hdr; 892 int num_mpdu_ranges; 893 int fw_desc_len; 894 u8 *fw_desc; 895 int i, j; 896 897 memset(&info, 0, sizeof(info)); 898 899 fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes); 900 fw_desc = (u8 *)&rx->fw_desc; 901 902 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), 903 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); 904 mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx); 905 906 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ", 907 rx, sizeof(*rx) + 908 (sizeof(struct htt_rx_indication_mpdu_range) * 909 num_mpdu_ranges)); 910 911 for (i = 0; i < num_mpdu_ranges; i++) { 912 info.status = mpdu_ranges[i].mpdu_range_status; 913 914 for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) { 915 struct sk_buff *msdu_head, *msdu_tail; 916 enum htt_rx_mpdu_status status; 917 int msdu_chaining; 918 919 msdu_head = NULL; 920 msdu_tail = NULL; 921 msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, 922 &fw_desc, 923 &fw_desc_len, 924 &msdu_head, 925 &msdu_tail); 926 927 if (!msdu_head) { 928 ath10k_warn("htt rx no data!\n"); 929 continue; 930 } 931 932 if (msdu_head->len == 0) { 933 ath10k_dbg(ATH10K_DBG_HTT, 934 "htt rx dropping due to zero-len\n"); 935 ath10k_htt_rx_free_msdu_chain(msdu_head); 936 continue; 937 } 938 939 if (ath10k_htt_rx_has_decrypt_err(msdu_head)) { 940 ath10k_htt_rx_free_msdu_chain(msdu_head); 941 continue; 942 } 943 944 status = info.status; 945 946 /* Skip mgmt frames while we handle this in WMI */ 947 if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL) { 948 ath10k_htt_rx_free_msdu_chain(msdu_head); 949 continue; 950 } 951 952 if (status != HTT_RX_IND_MPDU_STATUS_OK && 953 status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR && 954 !htt->ar->monitor_enabled) { 955 ath10k_dbg(ATH10K_DBG_HTT, 956 "htt rx ignoring frame w/ status %d\n", 957 status); 958 ath10k_htt_rx_free_msdu_chain(msdu_head); 959 continue; 960 } 961 962 /* FIXME: we do not support chaining yet. 963 * this needs investigation */ 964 if (msdu_chaining) { 965 ath10k_warn("msdu_chaining is true\n"); 966 ath10k_htt_rx_free_msdu_chain(msdu_head); 967 continue; 968 } 969 970 info.skb = msdu_head; 971 info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head); 972 info.signal = ATH10K_DEFAULT_NOISE_FLOOR; 973 info.signal += rx->ppdu.combined_rssi; 974 975 info.rate.info0 = rx->ppdu.info0; 976 info.rate.info1 = __le32_to_cpu(rx->ppdu.info1); 977 info.rate.info2 = __le32_to_cpu(rx->ppdu.info2); 978 979 hdr = ath10k_htt_rx_skb_get_hdr(msdu_head); 980 981 if (ath10k_htt_rx_hdr_is_amsdu(hdr)) 982 ath10k_htt_rx_amsdu(htt, &info); 983 else 984 ath10k_htt_rx_msdu(htt, &info); 985 } 986 } 987 988 tasklet_schedule(&htt->rx_replenish_task); 989 } 990 991 static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt, 992 struct htt_rx_fragment_indication *frag) 993 { 994 struct sk_buff *msdu_head, *msdu_tail; 995 struct htt_rx_desc *rxd; 996 enum rx_msdu_decap_format fmt; 997 struct htt_rx_info info = {}; 998 struct ieee80211_hdr *hdr; 999 int msdu_chaining; 1000 bool tkip_mic_err; 1001 bool decrypt_err; 1002 u8 *fw_desc; 1003 int fw_desc_len, hdrlen, paramlen; 1004 int trim; 1005 1006 fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes); 1007 fw_desc = (u8 *)frag->fw_msdu_rx_desc; 1008 1009 msdu_head = NULL; 1010 msdu_tail = NULL; 1011 msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len, 1012 &msdu_head, &msdu_tail); 1013 1014 ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n"); 1015 1016 if (!msdu_head) { 1017 ath10k_warn("htt rx frag no data\n"); 1018 return; 1019 } 1020 1021 if (msdu_chaining || msdu_head != msdu_tail) { 1022 ath10k_warn("aggregation with fragmentation?!\n"); 1023 ath10k_htt_rx_free_msdu_chain(msdu_head); 1024 return; 1025 } 1026 1027 /* FIXME: implement signal strength */ 1028 1029 hdr = (struct ieee80211_hdr *)msdu_head->data; 1030 rxd = (void *)msdu_head->data - sizeof(*rxd); 1031 tkip_mic_err = !!(__le32_to_cpu(rxd->attention.flags) & 1032 RX_ATTENTION_FLAGS_TKIP_MIC_ERR); 1033 decrypt_err = !!(__le32_to_cpu(rxd->attention.flags) & 1034 RX_ATTENTION_FLAGS_DECRYPT_ERR); 1035 fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), 1036 RX_MSDU_START_INFO1_DECAP_FORMAT); 1037 1038 if (fmt != RX_MSDU_DECAP_RAW) { 1039 ath10k_warn("we dont support non-raw fragmented rx yet\n"); 1040 dev_kfree_skb_any(msdu_head); 1041 goto end; 1042 } 1043 1044 info.skb = msdu_head; 1045 info.status = HTT_RX_IND_MPDU_STATUS_OK; 1046 info.encrypt_type = MS(__le32_to_cpu(rxd->mpdu_start.info0), 1047 RX_MPDU_START_INFO0_ENCRYPT_TYPE); 1048 info.skb->ip_summed = ath10k_htt_rx_get_csum_state(info.skb); 1049 1050 if (tkip_mic_err) { 1051 ath10k_warn("tkip mic error\n"); 1052 info.status = HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR; 1053 } 1054 1055 if (decrypt_err) { 1056 ath10k_warn("decryption err in fragmented rx\n"); 1057 dev_kfree_skb_any(info.skb); 1058 goto end; 1059 } 1060 1061 if (info.encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) { 1062 hdrlen = ieee80211_hdrlen(hdr->frame_control); 1063 paramlen = ath10k_htt_rx_crypto_param_len(info.encrypt_type); 1064 1065 /* It is more efficient to move the header than the payload */ 1066 memmove((void *)info.skb->data + paramlen, 1067 (void *)info.skb->data, 1068 hdrlen); 1069 skb_pull(info.skb, paramlen); 1070 hdr = (struct ieee80211_hdr *)info.skb->data; 1071 } 1072 1073 /* remove trailing FCS */ 1074 trim = 4; 1075 1076 /* remove crypto trailer */ 1077 trim += ath10k_htt_rx_crypto_tail_len(info.encrypt_type); 1078 1079 /* last fragment of TKIP frags has MIC */ 1080 if (!ieee80211_has_morefrags(hdr->frame_control) && 1081 info.encrypt_type == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) 1082 trim += 8; 1083 1084 if (trim > info.skb->len) { 1085 ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n"); 1086 dev_kfree_skb_any(info.skb); 1087 goto end; 1088 } 1089 1090 skb_trim(info.skb, info.skb->len - trim); 1091 1092 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt frag mpdu: ", 1093 info.skb->data, info.skb->len); 1094 ath10k_process_rx(htt->ar, &info); 1095 1096 end: 1097 if (fw_desc_len > 0) { 1098 ath10k_dbg(ATH10K_DBG_HTT, 1099 "expecting more fragmented rx in one indication %d\n", 1100 fw_desc_len); 1101 } 1102 } 1103 1104 void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) 1105 { 1106 struct ath10k_htt *htt = &ar->htt; 1107 struct htt_resp *resp = (struct htt_resp *)skb->data; 1108 1109 /* confirm alignment */ 1110 if (!IS_ALIGNED((unsigned long)skb->data, 4)) 1111 ath10k_warn("unaligned htt message, expect trouble\n"); 1112 1113 ath10k_dbg(ATH10K_DBG_HTT, "HTT RX, msg_type: 0x%0X\n", 1114 resp->hdr.msg_type); 1115 switch (resp->hdr.msg_type) { 1116 case HTT_T2H_MSG_TYPE_VERSION_CONF: { 1117 htt->target_version_major = resp->ver_resp.major; 1118 htt->target_version_minor = resp->ver_resp.minor; 1119 complete(&htt->target_version_received); 1120 break; 1121 } 1122 case HTT_T2H_MSG_TYPE_RX_IND: { 1123 ath10k_htt_rx_handler(htt, &resp->rx_ind); 1124 break; 1125 } 1126 case HTT_T2H_MSG_TYPE_PEER_MAP: { 1127 struct htt_peer_map_event ev = { 1128 .vdev_id = resp->peer_map.vdev_id, 1129 .peer_id = __le16_to_cpu(resp->peer_map.peer_id), 1130 }; 1131 memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr)); 1132 ath10k_peer_map_event(htt, &ev); 1133 break; 1134 } 1135 case HTT_T2H_MSG_TYPE_PEER_UNMAP: { 1136 struct htt_peer_unmap_event ev = { 1137 .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id), 1138 }; 1139 ath10k_peer_unmap_event(htt, &ev); 1140 break; 1141 } 1142 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: { 1143 struct htt_tx_done tx_done = {}; 1144 int status = __le32_to_cpu(resp->mgmt_tx_completion.status); 1145 1146 tx_done.msdu_id = 1147 __le32_to_cpu(resp->mgmt_tx_completion.desc_id); 1148 1149 switch (status) { 1150 case HTT_MGMT_TX_STATUS_OK: 1151 break; 1152 case HTT_MGMT_TX_STATUS_RETRY: 1153 tx_done.no_ack = true; 1154 break; 1155 case HTT_MGMT_TX_STATUS_DROP: 1156 tx_done.discard = true; 1157 break; 1158 } 1159 1160 ath10k_txrx_tx_unref(htt, &tx_done); 1161 break; 1162 } 1163 case HTT_T2H_MSG_TYPE_TX_COMPL_IND: { 1164 struct htt_tx_done tx_done = {}; 1165 int status = MS(resp->data_tx_completion.flags, 1166 HTT_DATA_TX_STATUS); 1167 __le16 msdu_id; 1168 int i; 1169 1170 switch (status) { 1171 case HTT_DATA_TX_STATUS_NO_ACK: 1172 tx_done.no_ack = true; 1173 break; 1174 case HTT_DATA_TX_STATUS_OK: 1175 break; 1176 case HTT_DATA_TX_STATUS_DISCARD: 1177 case HTT_DATA_TX_STATUS_POSTPONE: 1178 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL: 1179 tx_done.discard = true; 1180 break; 1181 default: 1182 ath10k_warn("unhandled tx completion status %d\n", 1183 status); 1184 tx_done.discard = true; 1185 break; 1186 } 1187 1188 ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n", 1189 resp->data_tx_completion.num_msdus); 1190 1191 for (i = 0; i < resp->data_tx_completion.num_msdus; i++) { 1192 msdu_id = resp->data_tx_completion.msdus[i]; 1193 tx_done.msdu_id = __le16_to_cpu(msdu_id); 1194 ath10k_txrx_tx_unref(htt, &tx_done); 1195 } 1196 break; 1197 } 1198 case HTT_T2H_MSG_TYPE_SEC_IND: { 1199 struct ath10k *ar = htt->ar; 1200 struct htt_security_indication *ev = &resp->security_indication; 1201 1202 ath10k_dbg(ATH10K_DBG_HTT, 1203 "sec ind peer_id %d unicast %d type %d\n", 1204 __le16_to_cpu(ev->peer_id), 1205 !!(ev->flags & HTT_SECURITY_IS_UNICAST), 1206 MS(ev->flags, HTT_SECURITY_TYPE)); 1207 complete(&ar->install_key_done); 1208 break; 1209 } 1210 case HTT_T2H_MSG_TYPE_RX_FRAG_IND: { 1211 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", 1212 skb->data, skb->len); 1213 ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind); 1214 break; 1215 } 1216 case HTT_T2H_MSG_TYPE_TEST: 1217 /* FIX THIS */ 1218 break; 1219 case HTT_T2H_MSG_TYPE_STATS_CONF: 1220 trace_ath10k_htt_stats(skb->data, skb->len); 1221 break; 1222 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND: 1223 case HTT_T2H_MSG_TYPE_RX_ADDBA: 1224 case HTT_T2H_MSG_TYPE_RX_DELBA: 1225 case HTT_T2H_MSG_TYPE_RX_FLUSH: 1226 default: 1227 ath10k_dbg(ATH10K_DBG_HTT, "htt event (%d) not handled\n", 1228 resp->hdr.msg_type); 1229 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", 1230 skb->data, skb->len); 1231 break; 1232 }; 1233 1234 /* Free the indication buffer */ 1235 dev_kfree_skb_any(skb); 1236 } 1237