1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include "core.h" 19 #include "htc.h" 20 #include "htt.h" 21 #include "txrx.h" 22 #include "debug.h" 23 #include "trace.h" 24 25 #include <linux/log2.h> 26 27 /* slightly larger than one large A-MPDU */ 28 #define HTT_RX_RING_SIZE_MIN 128 29 30 /* roughly 20 ms @ 1 Gbps of 1500B MSDUs */ 31 #define HTT_RX_RING_SIZE_MAX 2048 32 33 #define HTT_RX_AVG_FRM_BYTES 1000 34 35 /* ms, very conservative */ 36 #define HTT_RX_HOST_LATENCY_MAX_MS 20 37 38 /* ms, conservative */ 39 #define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10 40 41 /* when under memory pressure rx ring refill may fail and needs a retry */ 42 #define HTT_RX_RING_REFILL_RETRY_MS 50 43 44 45 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb); 46 47 48 static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt) 49 { 50 int size; 51 52 /* 53 * It is expected that the host CPU will typically be able to 54 * service the rx indication from one A-MPDU before the rx 55 * indication from the subsequent A-MPDU happens, roughly 1-2 ms 56 * later. However, the rx ring should be sized very conservatively, 57 * to accomodate the worst reasonable delay before the host CPU 58 * services a rx indication interrupt. 59 * 60 * The rx ring need not be kept full of empty buffers. In theory, 61 * the htt host SW can dynamically track the low-water mark in the 62 * rx ring, and dynamically adjust the level to which the rx ring 63 * is filled with empty buffers, to dynamically meet the desired 64 * low-water mark. 65 * 66 * In contrast, it's difficult to resize the rx ring itself, once 67 * it's in use. Thus, the ring itself should be sized very 68 * conservatively, while the degree to which the ring is filled 69 * with empty buffers should be sized moderately conservatively. 70 */ 71 72 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ 73 size = 74 htt->max_throughput_mbps + 75 1000 / 76 (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS; 77 78 if (size < HTT_RX_RING_SIZE_MIN) 79 size = HTT_RX_RING_SIZE_MIN; 80 81 if (size > HTT_RX_RING_SIZE_MAX) 82 size = HTT_RX_RING_SIZE_MAX; 83 84 size = roundup_pow_of_two(size); 85 86 return size; 87 } 88 89 static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt *htt) 90 { 91 int size; 92 93 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ 94 size = 95 htt->max_throughput_mbps * 96 1000 / 97 (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS; 98 99 /* 100 * Make sure the fill level is at least 1 less than the ring size. 101 * Leaving 1 element empty allows the SW to easily distinguish 102 * between a full ring vs. an empty ring. 103 */ 104 if (size >= htt->rx_ring.size) 105 size = htt->rx_ring.size - 1; 106 107 return size; 108 } 109 110 static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt) 111 { 112 struct sk_buff *skb; 113 struct ath10k_skb_cb *cb; 114 int i; 115 116 for (i = 0; i < htt->rx_ring.fill_cnt; i++) { 117 skb = htt->rx_ring.netbufs_ring[i]; 118 cb = ATH10K_SKB_CB(skb); 119 dma_unmap_single(htt->ar->dev, cb->paddr, 120 skb->len + skb_tailroom(skb), 121 DMA_FROM_DEVICE); 122 dev_kfree_skb_any(skb); 123 } 124 125 htt->rx_ring.fill_cnt = 0; 126 } 127 128 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) 129 { 130 struct htt_rx_desc *rx_desc; 131 struct sk_buff *skb; 132 dma_addr_t paddr; 133 int ret = 0, idx; 134 135 idx = __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr)); 136 while (num > 0) { 137 skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN); 138 if (!skb) { 139 ret = -ENOMEM; 140 goto fail; 141 } 142 143 if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN)) 144 skb_pull(skb, 145 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) - 146 skb->data); 147 148 /* Clear rx_desc attention word before posting to Rx ring */ 149 rx_desc = (struct htt_rx_desc *)skb->data; 150 rx_desc->attention.flags = __cpu_to_le32(0); 151 152 paddr = dma_map_single(htt->ar->dev, skb->data, 153 skb->len + skb_tailroom(skb), 154 DMA_FROM_DEVICE); 155 156 if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) { 157 dev_kfree_skb_any(skb); 158 ret = -ENOMEM; 159 goto fail; 160 } 161 162 ATH10K_SKB_CB(skb)->paddr = paddr; 163 htt->rx_ring.netbufs_ring[idx] = skb; 164 htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr); 165 htt->rx_ring.fill_cnt++; 166 167 num--; 168 idx++; 169 idx &= htt->rx_ring.size_mask; 170 } 171 172 fail: 173 *(htt->rx_ring.alloc_idx.vaddr) = __cpu_to_le32(idx); 174 return ret; 175 } 176 177 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) 178 { 179 lockdep_assert_held(&htt->rx_ring.lock); 180 return __ath10k_htt_rx_ring_fill_n(htt, num); 181 } 182 183 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt) 184 { 185 int ret, num_deficit, num_to_fill; 186 187 /* Refilling the whole RX ring buffer proves to be a bad idea. The 188 * reason is RX may take up significant amount of CPU cycles and starve 189 * other tasks, e.g. TX on an ethernet device while acting as a bridge 190 * with ath10k wlan interface. This ended up with very poor performance 191 * once CPU the host system was overwhelmed with RX on ath10k. 192 * 193 * By limiting the number of refills the replenishing occurs 194 * progressively. This in turns makes use of the fact tasklets are 195 * processed in FIFO order. This means actual RX processing can starve 196 * out refilling. If there's not enough buffers on RX ring FW will not 197 * report RX until it is refilled with enough buffers. This 198 * automatically balances load wrt to CPU power. 199 * 200 * This probably comes at a cost of lower maximum throughput but 201 * improves the avarage and stability. */ 202 spin_lock_bh(&htt->rx_ring.lock); 203 num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt; 204 num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit); 205 num_deficit -= num_to_fill; 206 ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill); 207 if (ret == -ENOMEM) { 208 /* 209 * Failed to fill it to the desired level - 210 * we'll start a timer and try again next time. 211 * As long as enough buffers are left in the ring for 212 * another A-MPDU rx, no special recovery is needed. 213 */ 214 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + 215 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS)); 216 } else if (num_deficit > 0) { 217 tasklet_schedule(&htt->rx_replenish_task); 218 } 219 spin_unlock_bh(&htt->rx_ring.lock); 220 } 221 222 static void ath10k_htt_rx_ring_refill_retry(unsigned long arg) 223 { 224 struct ath10k_htt *htt = (struct ath10k_htt *)arg; 225 ath10k_htt_rx_msdu_buff_replenish(htt); 226 } 227 228 static unsigned ath10k_htt_rx_ring_elems(struct ath10k_htt *htt) 229 { 230 return (__le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr) - 231 htt->rx_ring.sw_rd_idx.msdu_payld) & htt->rx_ring.size_mask; 232 } 233 234 void ath10k_htt_rx_detach(struct ath10k_htt *htt) 235 { 236 int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld; 237 238 del_timer_sync(&htt->rx_ring.refill_retry_timer); 239 tasklet_kill(&htt->rx_replenish_task); 240 241 while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) { 242 struct sk_buff *skb = 243 htt->rx_ring.netbufs_ring[sw_rd_idx]; 244 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); 245 246 dma_unmap_single(htt->ar->dev, cb->paddr, 247 skb->len + skb_tailroom(skb), 248 DMA_FROM_DEVICE); 249 dev_kfree_skb_any(htt->rx_ring.netbufs_ring[sw_rd_idx]); 250 sw_rd_idx++; 251 sw_rd_idx &= htt->rx_ring.size_mask; 252 } 253 254 dma_free_coherent(htt->ar->dev, 255 (htt->rx_ring.size * 256 sizeof(htt->rx_ring.paddrs_ring)), 257 htt->rx_ring.paddrs_ring, 258 htt->rx_ring.base_paddr); 259 260 dma_free_coherent(htt->ar->dev, 261 sizeof(*htt->rx_ring.alloc_idx.vaddr), 262 htt->rx_ring.alloc_idx.vaddr, 263 htt->rx_ring.alloc_idx.paddr); 264 265 kfree(htt->rx_ring.netbufs_ring); 266 } 267 268 static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt) 269 { 270 int idx; 271 struct sk_buff *msdu; 272 273 spin_lock_bh(&htt->rx_ring.lock); 274 275 if (ath10k_htt_rx_ring_elems(htt) == 0) 276 ath10k_warn("htt rx ring is empty!\n"); 277 278 idx = htt->rx_ring.sw_rd_idx.msdu_payld; 279 msdu = htt->rx_ring.netbufs_ring[idx]; 280 281 idx++; 282 idx &= htt->rx_ring.size_mask; 283 htt->rx_ring.sw_rd_idx.msdu_payld = idx; 284 htt->rx_ring.fill_cnt--; 285 286 spin_unlock_bh(&htt->rx_ring.lock); 287 return msdu; 288 } 289 290 static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb) 291 { 292 struct sk_buff *next; 293 294 while (skb) { 295 next = skb->next; 296 dev_kfree_skb_any(skb); 297 skb = next; 298 } 299 } 300 301 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, 302 u8 **fw_desc, int *fw_desc_len, 303 struct sk_buff **head_msdu, 304 struct sk_buff **tail_msdu) 305 { 306 int msdu_len, msdu_chaining = 0; 307 struct sk_buff *msdu; 308 struct htt_rx_desc *rx_desc; 309 310 if (ath10k_htt_rx_ring_elems(htt) == 0) 311 ath10k_warn("htt rx ring is empty!\n"); 312 313 if (htt->rx_confused) { 314 ath10k_warn("htt is confused. refusing rx\n"); 315 return 0; 316 } 317 318 msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt); 319 while (msdu) { 320 int last_msdu, msdu_len_invalid, msdu_chained; 321 322 dma_unmap_single(htt->ar->dev, 323 ATH10K_SKB_CB(msdu)->paddr, 324 msdu->len + skb_tailroom(msdu), 325 DMA_FROM_DEVICE); 326 327 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ", 328 msdu->data, msdu->len + skb_tailroom(msdu)); 329 330 rx_desc = (struct htt_rx_desc *)msdu->data; 331 332 /* FIXME: we must report msdu payload since this is what caller 333 * expects now */ 334 skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload)); 335 skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload)); 336 337 /* 338 * Sanity check - confirm the HW is finished filling in the 339 * rx data. 340 * If the HW and SW are working correctly, then it's guaranteed 341 * that the HW's MAC DMA is done before this point in the SW. 342 * To prevent the case that we handle a stale Rx descriptor, 343 * just assert for now until we have a way to recover. 344 */ 345 if (!(__le32_to_cpu(rx_desc->attention.flags) 346 & RX_ATTENTION_FLAGS_MSDU_DONE)) { 347 ath10k_htt_rx_free_msdu_chain(*head_msdu); 348 *head_msdu = NULL; 349 msdu = NULL; 350 ath10k_err("htt rx stopped. cannot recover\n"); 351 htt->rx_confused = true; 352 break; 353 } 354 355 /* 356 * Copy the FW rx descriptor for this MSDU from the rx 357 * indication message into the MSDU's netbuf. HL uses the 358 * same rx indication message definition as LL, and simply 359 * appends new info (fields from the HW rx desc, and the 360 * MSDU payload itself). So, the offset into the rx 361 * indication message only has to account for the standard 362 * offset of the per-MSDU FW rx desc info within the 363 * message, and how many bytes of the per-MSDU FW rx desc 364 * info have already been consumed. (And the endianness of 365 * the host, since for a big-endian host, the rx ind 366 * message contents, including the per-MSDU rx desc bytes, 367 * were byteswapped during upload.) 368 */ 369 if (*fw_desc_len > 0) { 370 rx_desc->fw_desc.info0 = **fw_desc; 371 /* 372 * The target is expected to only provide the basic 373 * per-MSDU rx descriptors. Just to be sure, verify 374 * that the target has not attached extension data 375 * (e.g. LRO flow ID). 376 */ 377 378 /* or more, if there's extension data */ 379 (*fw_desc)++; 380 (*fw_desc_len)--; 381 } else { 382 /* 383 * When an oversized AMSDU happened, FW will lost 384 * some of MSDU status - in this case, the FW 385 * descriptors provided will be less than the 386 * actual MSDUs inside this MPDU. Mark the FW 387 * descriptors so that it will still deliver to 388 * upper stack, if no CRC error for this MPDU. 389 * 390 * FIX THIS - the FW descriptors are actually for 391 * MSDUs in the end of this A-MSDU instead of the 392 * beginning. 393 */ 394 rx_desc->fw_desc.info0 = 0; 395 } 396 397 msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags) 398 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR | 399 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR)); 400 msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0), 401 RX_MSDU_START_INFO0_MSDU_LENGTH); 402 msdu_chained = rx_desc->frag_info.ring2_more_count; 403 404 if (msdu_len_invalid) 405 msdu_len = 0; 406 407 skb_trim(msdu, 0); 408 skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE)); 409 msdu_len -= msdu->len; 410 411 /* FIXME: Do chained buffers include htt_rx_desc or not? */ 412 while (msdu_chained--) { 413 struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt); 414 415 dma_unmap_single(htt->ar->dev, 416 ATH10K_SKB_CB(next)->paddr, 417 next->len + skb_tailroom(next), 418 DMA_FROM_DEVICE); 419 420 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ", 421 next->data, 422 next->len + skb_tailroom(next)); 423 424 skb_trim(next, 0); 425 skb_put(next, min(msdu_len, HTT_RX_BUF_SIZE)); 426 msdu_len -= next->len; 427 428 msdu->next = next; 429 msdu = next; 430 msdu_chaining = 1; 431 } 432 433 if (msdu_len > 0) { 434 /* This may suggest FW bug? */ 435 ath10k_warn("htt rx msdu len not consumed (%d)\n", 436 msdu_len); 437 } 438 439 last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) & 440 RX_MSDU_END_INFO0_LAST_MSDU; 441 442 if (last_msdu) { 443 msdu->next = NULL; 444 break; 445 } else { 446 struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt); 447 msdu->next = next; 448 msdu = next; 449 } 450 } 451 *tail_msdu = msdu; 452 453 /* 454 * Don't refill the ring yet. 455 * 456 * First, the elements popped here are still in use - it is not 457 * safe to overwrite them until the matching call to 458 * mpdu_desc_list_next. Second, for efficiency it is preferable to 459 * refill the rx ring with 1 PPDU's worth of rx buffers (something 460 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers 461 * (something like 3 buffers). Consequently, we'll rely on the txrx 462 * SW to tell us when it is done pulling all the PPDU's rx buffers 463 * out of the rx ring, and then refill it just once. 464 */ 465 466 return msdu_chaining; 467 } 468 469 static void ath10k_htt_rx_replenish_task(unsigned long ptr) 470 { 471 struct ath10k_htt *htt = (struct ath10k_htt *)ptr; 472 ath10k_htt_rx_msdu_buff_replenish(htt); 473 } 474 475 int ath10k_htt_rx_attach(struct ath10k_htt *htt) 476 { 477 dma_addr_t paddr; 478 void *vaddr; 479 struct timer_list *timer = &htt->rx_ring.refill_retry_timer; 480 481 htt->rx_ring.size = ath10k_htt_rx_ring_size(htt); 482 if (!is_power_of_2(htt->rx_ring.size)) { 483 ath10k_warn("htt rx ring size is not power of 2\n"); 484 return -EINVAL; 485 } 486 487 htt->rx_ring.size_mask = htt->rx_ring.size - 1; 488 489 /* 490 * Set the initial value for the level to which the rx ring 491 * should be filled, based on the max throughput and the 492 * worst likely latency for the host to fill the rx ring 493 * with new buffers. In theory, this fill level can be 494 * dynamically adjusted from the initial value set here, to 495 * reflect the actual host latency rather than a 496 * conservative assumption about the host latency. 497 */ 498 htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt); 499 500 htt->rx_ring.netbufs_ring = 501 kmalloc(htt->rx_ring.size * sizeof(struct sk_buff *), 502 GFP_KERNEL); 503 if (!htt->rx_ring.netbufs_ring) 504 goto err_netbuf; 505 506 vaddr = dma_alloc_coherent(htt->ar->dev, 507 (htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring)), 508 &paddr, GFP_DMA); 509 if (!vaddr) 510 goto err_dma_ring; 511 512 htt->rx_ring.paddrs_ring = vaddr; 513 htt->rx_ring.base_paddr = paddr; 514 515 vaddr = dma_alloc_coherent(htt->ar->dev, 516 sizeof(*htt->rx_ring.alloc_idx.vaddr), 517 &paddr, GFP_DMA); 518 if (!vaddr) 519 goto err_dma_idx; 520 521 htt->rx_ring.alloc_idx.vaddr = vaddr; 522 htt->rx_ring.alloc_idx.paddr = paddr; 523 htt->rx_ring.sw_rd_idx.msdu_payld = 0; 524 *htt->rx_ring.alloc_idx.vaddr = 0; 525 526 /* Initialize the Rx refill retry timer */ 527 setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt); 528 529 spin_lock_init(&htt->rx_ring.lock); 530 531 htt->rx_ring.fill_cnt = 0; 532 if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level)) 533 goto err_fill_ring; 534 535 tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task, 536 (unsigned long)htt); 537 538 ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n", 539 htt->rx_ring.size, htt->rx_ring.fill_level); 540 return 0; 541 542 err_fill_ring: 543 ath10k_htt_rx_ring_free(htt); 544 dma_free_coherent(htt->ar->dev, 545 sizeof(*htt->rx_ring.alloc_idx.vaddr), 546 htt->rx_ring.alloc_idx.vaddr, 547 htt->rx_ring.alloc_idx.paddr); 548 err_dma_idx: 549 dma_free_coherent(htt->ar->dev, 550 (htt->rx_ring.size * 551 sizeof(htt->rx_ring.paddrs_ring)), 552 htt->rx_ring.paddrs_ring, 553 htt->rx_ring.base_paddr); 554 err_dma_ring: 555 kfree(htt->rx_ring.netbufs_ring); 556 err_netbuf: 557 return -ENOMEM; 558 } 559 560 static int ath10k_htt_rx_crypto_param_len(enum htt_rx_mpdu_encrypt_type type) 561 { 562 switch (type) { 563 case HTT_RX_MPDU_ENCRYPT_WEP40: 564 case HTT_RX_MPDU_ENCRYPT_WEP104: 565 return 4; 566 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: 567 case HTT_RX_MPDU_ENCRYPT_WEP128: /* not tested */ 568 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: 569 case HTT_RX_MPDU_ENCRYPT_WAPI: /* not tested */ 570 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: 571 return 8; 572 case HTT_RX_MPDU_ENCRYPT_NONE: 573 return 0; 574 } 575 576 ath10k_warn("unknown encryption type %d\n", type); 577 return 0; 578 } 579 580 static int ath10k_htt_rx_crypto_tail_len(enum htt_rx_mpdu_encrypt_type type) 581 { 582 switch (type) { 583 case HTT_RX_MPDU_ENCRYPT_NONE: 584 case HTT_RX_MPDU_ENCRYPT_WEP40: 585 case HTT_RX_MPDU_ENCRYPT_WEP104: 586 case HTT_RX_MPDU_ENCRYPT_WEP128: 587 case HTT_RX_MPDU_ENCRYPT_WAPI: 588 return 0; 589 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: 590 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: 591 return 4; 592 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: 593 return 8; 594 } 595 596 ath10k_warn("unknown encryption type %d\n", type); 597 return 0; 598 } 599 600 /* Applies for first msdu in chain, before altering it. */ 601 static struct ieee80211_hdr *ath10k_htt_rx_skb_get_hdr(struct sk_buff *skb) 602 { 603 struct htt_rx_desc *rxd; 604 enum rx_msdu_decap_format fmt; 605 606 rxd = (void *)skb->data - sizeof(*rxd); 607 fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), 608 RX_MSDU_START_INFO1_DECAP_FORMAT); 609 610 if (fmt == RX_MSDU_DECAP_RAW) 611 return (void *)skb->data; 612 else 613 return (void *)skb->data - RX_HTT_HDR_STATUS_LEN; 614 } 615 616 /* This function only applies for first msdu in an msdu chain */ 617 static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr) 618 { 619 if (ieee80211_is_data_qos(hdr->frame_control)) { 620 u8 *qc = ieee80211_get_qos_ctl(hdr); 621 if (qc[0] & 0x80) 622 return true; 623 } 624 return false; 625 } 626 627 struct rfc1042_hdr { 628 u8 llc_dsap; 629 u8 llc_ssap; 630 u8 llc_ctrl; 631 u8 snap_oui[3]; 632 __be16 snap_type; 633 } __packed; 634 635 struct amsdu_subframe_hdr { 636 u8 dst[ETH_ALEN]; 637 u8 src[ETH_ALEN]; 638 __be16 len; 639 } __packed; 640 641 static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt, 642 struct htt_rx_info *info) 643 { 644 struct htt_rx_desc *rxd; 645 struct sk_buff *first; 646 struct sk_buff *skb = info->skb; 647 enum rx_msdu_decap_format fmt; 648 enum htt_rx_mpdu_encrypt_type enctype; 649 struct ieee80211_hdr *hdr; 650 u8 hdr_buf[64], addr[ETH_ALEN], *qos; 651 unsigned int hdr_len; 652 653 rxd = (void *)skb->data - sizeof(*rxd); 654 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), 655 RX_MPDU_START_INFO0_ENCRYPT_TYPE); 656 657 hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status; 658 hdr_len = ieee80211_hdrlen(hdr->frame_control); 659 memcpy(hdr_buf, hdr, hdr_len); 660 hdr = (struct ieee80211_hdr *)hdr_buf; 661 662 first = skb; 663 while (skb) { 664 void *decap_hdr; 665 int len; 666 667 rxd = (void *)skb->data - sizeof(*rxd); 668 fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), 669 RX_MSDU_START_INFO1_DECAP_FORMAT); 670 decap_hdr = (void *)rxd->rx_hdr_status; 671 672 skb->ip_summed = ath10k_htt_rx_get_csum_state(skb); 673 674 /* First frame in an A-MSDU chain has more decapped data. */ 675 if (skb == first) { 676 len = round_up(ieee80211_hdrlen(hdr->frame_control), 4); 677 len += round_up(ath10k_htt_rx_crypto_param_len(enctype), 678 4); 679 decap_hdr += len; 680 } 681 682 switch (fmt) { 683 case RX_MSDU_DECAP_RAW: 684 /* remove trailing FCS */ 685 skb_trim(skb, skb->len - FCS_LEN); 686 break; 687 case RX_MSDU_DECAP_NATIVE_WIFI: 688 /* pull decapped header and copy DA */ 689 hdr = (struct ieee80211_hdr *)skb->data; 690 hdr_len = ieee80211_hdrlen(hdr->frame_control); 691 memcpy(addr, ieee80211_get_DA(hdr), ETH_ALEN); 692 skb_pull(skb, hdr_len); 693 694 /* push original 802.11 header */ 695 hdr = (struct ieee80211_hdr *)hdr_buf; 696 hdr_len = ieee80211_hdrlen(hdr->frame_control); 697 memcpy(skb_push(skb, hdr_len), hdr, hdr_len); 698 699 /* original A-MSDU header has the bit set but we're 700 * not including A-MSDU subframe header */ 701 hdr = (struct ieee80211_hdr *)skb->data; 702 qos = ieee80211_get_qos_ctl(hdr); 703 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; 704 705 /* original 802.11 header has a different DA */ 706 memcpy(ieee80211_get_DA(hdr), addr, ETH_ALEN); 707 break; 708 case RX_MSDU_DECAP_ETHERNET2_DIX: 709 /* strip ethernet header and insert decapped 802.11 710 * header, amsdu subframe header and rfc1042 header */ 711 712 len = 0; 713 len += sizeof(struct rfc1042_hdr); 714 len += sizeof(struct amsdu_subframe_hdr); 715 716 skb_pull(skb, sizeof(struct ethhdr)); 717 memcpy(skb_push(skb, len), decap_hdr, len); 718 memcpy(skb_push(skb, hdr_len), hdr, hdr_len); 719 break; 720 case RX_MSDU_DECAP_8023_SNAP_LLC: 721 /* insert decapped 802.11 header making a singly 722 * A-MSDU */ 723 memcpy(skb_push(skb, hdr_len), hdr, hdr_len); 724 break; 725 } 726 727 info->skb = skb; 728 info->encrypt_type = enctype; 729 skb = skb->next; 730 info->skb->next = NULL; 731 732 if (skb) 733 info->amsdu_more = true; 734 735 ath10k_process_rx(htt->ar, info); 736 } 737 738 /* FIXME: It might be nice to re-assemble the A-MSDU when there's a 739 * monitor interface active for sniffing purposes. */ 740 } 741 742 static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info) 743 { 744 struct sk_buff *skb = info->skb; 745 struct htt_rx_desc *rxd; 746 struct ieee80211_hdr *hdr; 747 enum rx_msdu_decap_format fmt; 748 enum htt_rx_mpdu_encrypt_type enctype; 749 int hdr_len; 750 void *rfc1042; 751 752 /* This shouldn't happen. If it does than it may be a FW bug. */ 753 if (skb->next) { 754 ath10k_warn("received chained non A-MSDU frame\n"); 755 ath10k_htt_rx_free_msdu_chain(skb->next); 756 skb->next = NULL; 757 } 758 759 rxd = (void *)skb->data - sizeof(*rxd); 760 fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), 761 RX_MSDU_START_INFO1_DECAP_FORMAT); 762 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), 763 RX_MPDU_START_INFO0_ENCRYPT_TYPE); 764 hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status; 765 hdr_len = ieee80211_hdrlen(hdr->frame_control); 766 767 skb->ip_summed = ath10k_htt_rx_get_csum_state(skb); 768 769 switch (fmt) { 770 case RX_MSDU_DECAP_RAW: 771 /* remove trailing FCS */ 772 skb_trim(skb, skb->len - FCS_LEN); 773 break; 774 case RX_MSDU_DECAP_NATIVE_WIFI: 775 /* Pull decapped header */ 776 hdr = (struct ieee80211_hdr *)skb->data; 777 hdr_len = ieee80211_hdrlen(hdr->frame_control); 778 skb_pull(skb, hdr_len); 779 780 /* Push original header */ 781 hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status; 782 hdr_len = ieee80211_hdrlen(hdr->frame_control); 783 memcpy(skb_push(skb, hdr_len), hdr, hdr_len); 784 break; 785 case RX_MSDU_DECAP_ETHERNET2_DIX: 786 /* strip ethernet header and insert decapped 802.11 header and 787 * rfc1042 header */ 788 789 rfc1042 = hdr; 790 rfc1042 += roundup(hdr_len, 4); 791 rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4); 792 793 skb_pull(skb, sizeof(struct ethhdr)); 794 memcpy(skb_push(skb, sizeof(struct rfc1042_hdr)), 795 rfc1042, sizeof(struct rfc1042_hdr)); 796 memcpy(skb_push(skb, hdr_len), hdr, hdr_len); 797 break; 798 case RX_MSDU_DECAP_8023_SNAP_LLC: 799 /* remove A-MSDU subframe header and insert 800 * decapped 802.11 header. rfc1042 header is already there */ 801 802 skb_pull(skb, sizeof(struct amsdu_subframe_hdr)); 803 memcpy(skb_push(skb, hdr_len), hdr, hdr_len); 804 break; 805 } 806 807 info->skb = skb; 808 info->encrypt_type = enctype; 809 810 ath10k_process_rx(htt->ar, info); 811 } 812 813 static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb) 814 { 815 struct htt_rx_desc *rxd; 816 u32 flags; 817 818 rxd = (void *)skb->data - sizeof(*rxd); 819 flags = __le32_to_cpu(rxd->attention.flags); 820 821 if (flags & RX_ATTENTION_FLAGS_DECRYPT_ERR) 822 return true; 823 824 return false; 825 } 826 827 static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb) 828 { 829 struct htt_rx_desc *rxd; 830 u32 flags; 831 832 rxd = (void *)skb->data - sizeof(*rxd); 833 flags = __le32_to_cpu(rxd->attention.flags); 834 835 if (flags & RX_ATTENTION_FLAGS_FCS_ERR) 836 return true; 837 838 return false; 839 } 840 841 static bool ath10k_htt_rx_has_mic_err(struct sk_buff *skb) 842 { 843 struct htt_rx_desc *rxd; 844 u32 flags; 845 846 rxd = (void *)skb->data - sizeof(*rxd); 847 flags = __le32_to_cpu(rxd->attention.flags); 848 849 if (flags & RX_ATTENTION_FLAGS_TKIP_MIC_ERR) 850 return true; 851 852 return false; 853 } 854 855 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb) 856 { 857 struct htt_rx_desc *rxd; 858 u32 flags, info; 859 bool is_ip4, is_ip6; 860 bool is_tcp, is_udp; 861 bool ip_csum_ok, tcpudp_csum_ok; 862 863 rxd = (void *)skb->data - sizeof(*rxd); 864 flags = __le32_to_cpu(rxd->attention.flags); 865 info = __le32_to_cpu(rxd->msdu_start.info1); 866 867 is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO); 868 is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO); 869 is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO); 870 is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO); 871 ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL); 872 tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL); 873 874 if (!is_ip4 && !is_ip6) 875 return CHECKSUM_NONE; 876 if (!is_tcp && !is_udp) 877 return CHECKSUM_NONE; 878 if (!ip_csum_ok) 879 return CHECKSUM_NONE; 880 if (!tcpudp_csum_ok) 881 return CHECKSUM_NONE; 882 883 return CHECKSUM_UNNECESSARY; 884 } 885 886 static void ath10k_htt_rx_handler(struct ath10k_htt *htt, 887 struct htt_rx_indication *rx) 888 { 889 struct htt_rx_info info; 890 struct htt_rx_indication_mpdu_range *mpdu_ranges; 891 struct ieee80211_hdr *hdr; 892 int num_mpdu_ranges; 893 int fw_desc_len; 894 u8 *fw_desc; 895 int i, j; 896 897 memset(&info, 0, sizeof(info)); 898 899 fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes); 900 fw_desc = (u8 *)&rx->fw_desc; 901 902 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), 903 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); 904 mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx); 905 906 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ", 907 rx, sizeof(*rx) + 908 (sizeof(struct htt_rx_indication_mpdu_range) * 909 num_mpdu_ranges)); 910 911 for (i = 0; i < num_mpdu_ranges; i++) { 912 info.status = mpdu_ranges[i].mpdu_range_status; 913 914 for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) { 915 struct sk_buff *msdu_head, *msdu_tail; 916 enum htt_rx_mpdu_status status; 917 int msdu_chaining; 918 919 msdu_head = NULL; 920 msdu_tail = NULL; 921 msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, 922 &fw_desc, 923 &fw_desc_len, 924 &msdu_head, 925 &msdu_tail); 926 927 if (!msdu_head) { 928 ath10k_warn("htt rx no data!\n"); 929 continue; 930 } 931 932 if (msdu_head->len == 0) { 933 ath10k_dbg(ATH10K_DBG_HTT, 934 "htt rx dropping due to zero-len\n"); 935 ath10k_htt_rx_free_msdu_chain(msdu_head); 936 continue; 937 } 938 939 if (ath10k_htt_rx_has_decrypt_err(msdu_head)) { 940 ath10k_htt_rx_free_msdu_chain(msdu_head); 941 continue; 942 } 943 944 status = info.status; 945 946 /* Skip mgmt frames while we handle this in WMI */ 947 if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL) { 948 ath10k_htt_rx_free_msdu_chain(msdu_head); 949 continue; 950 } 951 952 if (status != HTT_RX_IND_MPDU_STATUS_OK && 953 status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR && 954 !htt->ar->monitor_enabled) { 955 ath10k_dbg(ATH10K_DBG_HTT, 956 "htt rx ignoring frame w/ status %d\n", 957 status); 958 ath10k_htt_rx_free_msdu_chain(msdu_head); 959 continue; 960 } 961 962 if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) { 963 ath10k_htt_rx_free_msdu_chain(msdu_head); 964 continue; 965 } 966 967 /* FIXME: we do not support chaining yet. 968 * this needs investigation */ 969 if (msdu_chaining) { 970 ath10k_warn("msdu_chaining is true\n"); 971 ath10k_htt_rx_free_msdu_chain(msdu_head); 972 continue; 973 } 974 975 info.skb = msdu_head; 976 info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head); 977 info.mic_err = ath10k_htt_rx_has_mic_err(msdu_head); 978 info.signal = ATH10K_DEFAULT_NOISE_FLOOR; 979 info.signal += rx->ppdu.combined_rssi; 980 981 info.rate.info0 = rx->ppdu.info0; 982 info.rate.info1 = __le32_to_cpu(rx->ppdu.info1); 983 info.rate.info2 = __le32_to_cpu(rx->ppdu.info2); 984 985 hdr = ath10k_htt_rx_skb_get_hdr(msdu_head); 986 987 if (ath10k_htt_rx_hdr_is_amsdu(hdr)) 988 ath10k_htt_rx_amsdu(htt, &info); 989 else 990 ath10k_htt_rx_msdu(htt, &info); 991 } 992 } 993 994 tasklet_schedule(&htt->rx_replenish_task); 995 } 996 997 static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt, 998 struct htt_rx_fragment_indication *frag) 999 { 1000 struct sk_buff *msdu_head, *msdu_tail; 1001 struct htt_rx_desc *rxd; 1002 enum rx_msdu_decap_format fmt; 1003 struct htt_rx_info info = {}; 1004 struct ieee80211_hdr *hdr; 1005 int msdu_chaining; 1006 bool tkip_mic_err; 1007 bool decrypt_err; 1008 u8 *fw_desc; 1009 int fw_desc_len, hdrlen, paramlen; 1010 int trim; 1011 1012 fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes); 1013 fw_desc = (u8 *)frag->fw_msdu_rx_desc; 1014 1015 msdu_head = NULL; 1016 msdu_tail = NULL; 1017 msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len, 1018 &msdu_head, &msdu_tail); 1019 1020 ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n"); 1021 1022 if (!msdu_head) { 1023 ath10k_warn("htt rx frag no data\n"); 1024 return; 1025 } 1026 1027 if (msdu_chaining || msdu_head != msdu_tail) { 1028 ath10k_warn("aggregation with fragmentation?!\n"); 1029 ath10k_htt_rx_free_msdu_chain(msdu_head); 1030 return; 1031 } 1032 1033 /* FIXME: implement signal strength */ 1034 1035 hdr = (struct ieee80211_hdr *)msdu_head->data; 1036 rxd = (void *)msdu_head->data - sizeof(*rxd); 1037 tkip_mic_err = !!(__le32_to_cpu(rxd->attention.flags) & 1038 RX_ATTENTION_FLAGS_TKIP_MIC_ERR); 1039 decrypt_err = !!(__le32_to_cpu(rxd->attention.flags) & 1040 RX_ATTENTION_FLAGS_DECRYPT_ERR); 1041 fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), 1042 RX_MSDU_START_INFO1_DECAP_FORMAT); 1043 1044 if (fmt != RX_MSDU_DECAP_RAW) { 1045 ath10k_warn("we dont support non-raw fragmented rx yet\n"); 1046 dev_kfree_skb_any(msdu_head); 1047 goto end; 1048 } 1049 1050 info.skb = msdu_head; 1051 info.status = HTT_RX_IND_MPDU_STATUS_OK; 1052 info.encrypt_type = MS(__le32_to_cpu(rxd->mpdu_start.info0), 1053 RX_MPDU_START_INFO0_ENCRYPT_TYPE); 1054 info.skb->ip_summed = ath10k_htt_rx_get_csum_state(info.skb); 1055 1056 if (tkip_mic_err) { 1057 ath10k_warn("tkip mic error\n"); 1058 info.status = HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR; 1059 } 1060 1061 if (decrypt_err) { 1062 ath10k_warn("decryption err in fragmented rx\n"); 1063 dev_kfree_skb_any(info.skb); 1064 goto end; 1065 } 1066 1067 if (info.encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) { 1068 hdrlen = ieee80211_hdrlen(hdr->frame_control); 1069 paramlen = ath10k_htt_rx_crypto_param_len(info.encrypt_type); 1070 1071 /* It is more efficient to move the header than the payload */ 1072 memmove((void *)info.skb->data + paramlen, 1073 (void *)info.skb->data, 1074 hdrlen); 1075 skb_pull(info.skb, paramlen); 1076 hdr = (struct ieee80211_hdr *)info.skb->data; 1077 } 1078 1079 /* remove trailing FCS */ 1080 trim = 4; 1081 1082 /* remove crypto trailer */ 1083 trim += ath10k_htt_rx_crypto_tail_len(info.encrypt_type); 1084 1085 /* last fragment of TKIP frags has MIC */ 1086 if (!ieee80211_has_morefrags(hdr->frame_control) && 1087 info.encrypt_type == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) 1088 trim += 8; 1089 1090 if (trim > info.skb->len) { 1091 ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n"); 1092 dev_kfree_skb_any(info.skb); 1093 goto end; 1094 } 1095 1096 skb_trim(info.skb, info.skb->len - trim); 1097 1098 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt frag mpdu: ", 1099 info.skb->data, info.skb->len); 1100 ath10k_process_rx(htt->ar, &info); 1101 1102 end: 1103 if (fw_desc_len > 0) { 1104 ath10k_dbg(ATH10K_DBG_HTT, 1105 "expecting more fragmented rx in one indication %d\n", 1106 fw_desc_len); 1107 } 1108 } 1109 1110 void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) 1111 { 1112 struct ath10k_htt *htt = &ar->htt; 1113 struct htt_resp *resp = (struct htt_resp *)skb->data; 1114 1115 /* confirm alignment */ 1116 if (!IS_ALIGNED((unsigned long)skb->data, 4)) 1117 ath10k_warn("unaligned htt message, expect trouble\n"); 1118 1119 ath10k_dbg(ATH10K_DBG_HTT, "HTT RX, msg_type: 0x%0X\n", 1120 resp->hdr.msg_type); 1121 switch (resp->hdr.msg_type) { 1122 case HTT_T2H_MSG_TYPE_VERSION_CONF: { 1123 htt->target_version_major = resp->ver_resp.major; 1124 htt->target_version_minor = resp->ver_resp.minor; 1125 complete(&htt->target_version_received); 1126 break; 1127 } 1128 case HTT_T2H_MSG_TYPE_RX_IND: { 1129 ath10k_htt_rx_handler(htt, &resp->rx_ind); 1130 break; 1131 } 1132 case HTT_T2H_MSG_TYPE_PEER_MAP: { 1133 struct htt_peer_map_event ev = { 1134 .vdev_id = resp->peer_map.vdev_id, 1135 .peer_id = __le16_to_cpu(resp->peer_map.peer_id), 1136 }; 1137 memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr)); 1138 ath10k_peer_map_event(htt, &ev); 1139 break; 1140 } 1141 case HTT_T2H_MSG_TYPE_PEER_UNMAP: { 1142 struct htt_peer_unmap_event ev = { 1143 .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id), 1144 }; 1145 ath10k_peer_unmap_event(htt, &ev); 1146 break; 1147 } 1148 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: { 1149 struct htt_tx_done tx_done = {}; 1150 int status = __le32_to_cpu(resp->mgmt_tx_completion.status); 1151 1152 tx_done.msdu_id = 1153 __le32_to_cpu(resp->mgmt_tx_completion.desc_id); 1154 1155 switch (status) { 1156 case HTT_MGMT_TX_STATUS_OK: 1157 break; 1158 case HTT_MGMT_TX_STATUS_RETRY: 1159 tx_done.no_ack = true; 1160 break; 1161 case HTT_MGMT_TX_STATUS_DROP: 1162 tx_done.discard = true; 1163 break; 1164 } 1165 1166 ath10k_txrx_tx_unref(htt, &tx_done); 1167 break; 1168 } 1169 case HTT_T2H_MSG_TYPE_TX_COMPL_IND: { 1170 struct htt_tx_done tx_done = {}; 1171 int status = MS(resp->data_tx_completion.flags, 1172 HTT_DATA_TX_STATUS); 1173 __le16 msdu_id; 1174 int i; 1175 1176 switch (status) { 1177 case HTT_DATA_TX_STATUS_NO_ACK: 1178 tx_done.no_ack = true; 1179 break; 1180 case HTT_DATA_TX_STATUS_OK: 1181 break; 1182 case HTT_DATA_TX_STATUS_DISCARD: 1183 case HTT_DATA_TX_STATUS_POSTPONE: 1184 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL: 1185 tx_done.discard = true; 1186 break; 1187 default: 1188 ath10k_warn("unhandled tx completion status %d\n", 1189 status); 1190 tx_done.discard = true; 1191 break; 1192 } 1193 1194 ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n", 1195 resp->data_tx_completion.num_msdus); 1196 1197 for (i = 0; i < resp->data_tx_completion.num_msdus; i++) { 1198 msdu_id = resp->data_tx_completion.msdus[i]; 1199 tx_done.msdu_id = __le16_to_cpu(msdu_id); 1200 ath10k_txrx_tx_unref(htt, &tx_done); 1201 } 1202 break; 1203 } 1204 case HTT_T2H_MSG_TYPE_SEC_IND: { 1205 struct ath10k *ar = htt->ar; 1206 struct htt_security_indication *ev = &resp->security_indication; 1207 1208 ath10k_dbg(ATH10K_DBG_HTT, 1209 "sec ind peer_id %d unicast %d type %d\n", 1210 __le16_to_cpu(ev->peer_id), 1211 !!(ev->flags & HTT_SECURITY_IS_UNICAST), 1212 MS(ev->flags, HTT_SECURITY_TYPE)); 1213 complete(&ar->install_key_done); 1214 break; 1215 } 1216 case HTT_T2H_MSG_TYPE_RX_FRAG_IND: { 1217 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", 1218 skb->data, skb->len); 1219 ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind); 1220 break; 1221 } 1222 case HTT_T2H_MSG_TYPE_TEST: 1223 /* FIX THIS */ 1224 break; 1225 case HTT_T2H_MSG_TYPE_STATS_CONF: 1226 trace_ath10k_htt_stats(skb->data, skb->len); 1227 break; 1228 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND: 1229 case HTT_T2H_MSG_TYPE_RX_ADDBA: 1230 case HTT_T2H_MSG_TYPE_RX_DELBA: 1231 case HTT_T2H_MSG_TYPE_RX_FLUSH: 1232 default: 1233 ath10k_dbg(ATH10K_DBG_HTT, "htt event (%d) not handled\n", 1234 resp->hdr.msg_type); 1235 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", 1236 skb->data, skb->len); 1237 break; 1238 }; 1239 1240 /* Free the indication buffer */ 1241 dev_kfree_skb_any(skb); 1242 } 1243