1 /* 2 * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. 3 * Copyright (c) 2018, The Linux Foundation. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <linux/etherdevice.h> 19 #include <net/ieee80211_radiotap.h> 20 #include <linux/if_arp.h> 21 #include <linux/moduleparam.h> 22 #include <linux/ip.h> 23 #include <linux/ipv6.h> 24 #include <net/ipv6.h> 25 #include <linux/prefetch.h> 26 27 #include "wil6210.h" 28 #include "wmi.h" 29 #include "txrx.h" 30 #include "trace.h" 31 32 static bool rtap_include_phy_info; 33 module_param(rtap_include_phy_info, bool, 0444); 34 MODULE_PARM_DESC(rtap_include_phy_info, 35 " Include PHY info in the radiotap header, default - no"); 36 37 bool rx_align_2; 38 module_param(rx_align_2, bool, 0444); 39 MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no"); 40 41 bool rx_large_buf; 42 module_param(rx_large_buf, bool, 0444); 43 MODULE_PARM_DESC(rx_large_buf, " allocate 8KB RX buffers, default - no"); 44 45 static inline uint wil_rx_snaplen(void) 46 { 47 return rx_align_2 ? 6 : 0; 48 } 49 50 /* wil_ring_wmark_low - low watermark for available descriptor space */ 51 static inline int wil_ring_wmark_low(struct wil_ring *ring) 52 { 53 return ring->size / 8; 54 } 55 56 /* wil_ring_wmark_high - high watermark for available descriptor space */ 57 static inline int wil_ring_wmark_high(struct wil_ring *ring) 58 { 59 return ring->size / 4; 60 } 61 62 /* returns true if num avail descriptors is lower than wmark_low */ 63 static inline int wil_ring_avail_low(struct wil_ring *ring) 64 { 65 return wil_ring_avail_tx(ring) < wil_ring_wmark_low(ring); 66 } 67 68 /* returns true if num avail descriptors is higher than wmark_high */ 69 static inline int wil_ring_avail_high(struct wil_ring *ring) 70 { 71 return wil_ring_avail_tx(ring) > wil_ring_wmark_high(ring); 72 } 73 74 /* returns true when all tx vrings are empty */ 75 bool wil_is_tx_idle(struct wil6210_priv *wil) 76 { 77 int i; 78 unsigned long data_comp_to; 79 80 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { 81 struct wil_ring *vring = &wil->ring_tx[i]; 82 int vring_index = vring - wil->ring_tx; 83 struct wil_ring_tx_data *txdata = 84 &wil->ring_tx_data[vring_index]; 85 86 spin_lock(&txdata->lock); 87 88 if (!vring->va || !txdata->enabled) { 89 spin_unlock(&txdata->lock); 90 continue; 91 } 92 93 data_comp_to = jiffies + msecs_to_jiffies( 94 WIL_DATA_COMPLETION_TO_MS); 95 if (test_bit(wil_status_napi_en, wil->status)) { 96 while (!wil_ring_is_empty(vring)) { 97 if (time_after(jiffies, data_comp_to)) { 98 wil_dbg_pm(wil, 99 "TO waiting for idle tx\n"); 100 spin_unlock(&txdata->lock); 101 return false; 102 } 103 wil_dbg_ratelimited(wil, 104 "tx vring is not empty -> NAPI\n"); 105 spin_unlock(&txdata->lock); 106 napi_synchronize(&wil->napi_tx); 107 msleep(20); 108 spin_lock(&txdata->lock); 109 if (!vring->va || !txdata->enabled) 110 break; 111 } 112 } 113 114 spin_unlock(&txdata->lock); 115 } 116 117 return true; 118 } 119 120 /* wil_val_in_range - check if value in [min,max) */ 121 static inline bool wil_val_in_range(int val, int min, int max) 122 { 123 return val >= min && val < max; 124 } 125 126 static int wil_vring_alloc(struct wil6210_priv *wil, struct wil_ring *vring) 127 { 128 struct device *dev = wil_to_dev(wil); 129 size_t sz = vring->size * sizeof(vring->va[0]); 130 uint i; 131 132 wil_dbg_misc(wil, "vring_alloc:\n"); 133 134 BUILD_BUG_ON(sizeof(vring->va[0]) != 32); 135 136 vring->swhead = 0; 137 vring->swtail = 0; 138 vring->ctx = kcalloc(vring->size, sizeof(vring->ctx[0]), GFP_KERNEL); 139 if (!vring->ctx) { 140 vring->va = NULL; 141 return -ENOMEM; 142 } 143 144 /* vring->va should be aligned on its size rounded up to power of 2 145 * This is granted by the dma_alloc_coherent. 146 * 147 * HW has limitation that all vrings addresses must share the same 148 * upper 16 msb bits part of 48 bits address. To workaround that, 149 * if we are using more than 32 bit addresses switch to 32 bit 150 * allocation before allocating vring memory. 151 * 152 * There's no check for the return value of dma_set_mask_and_coherent, 153 * since we assume if we were able to set the mask during 154 * initialization in this system it will not fail if we set it again 155 */ 156 if (wil->dma_addr_size > 32) 157 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 158 159 vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL); 160 if (!vring->va) { 161 kfree(vring->ctx); 162 vring->ctx = NULL; 163 return -ENOMEM; 164 } 165 166 if (wil->dma_addr_size > 32) 167 dma_set_mask_and_coherent(dev, 168 DMA_BIT_MASK(wil->dma_addr_size)); 169 170 /* initially, all descriptors are SW owned 171 * For Tx and Rx, ownership bit is at the same location, thus 172 * we can use any 173 */ 174 for (i = 0; i < vring->size; i++) { 175 volatile struct vring_tx_desc *_d = 176 &vring->va[i].tx.legacy; 177 178 _d->dma.status = TX_DMA_STATUS_DU; 179 } 180 181 wil_dbg_misc(wil, "vring[%d] 0x%p:%pad 0x%p\n", vring->size, 182 vring->va, &vring->pa, vring->ctx); 183 184 return 0; 185 } 186 187 static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d, 188 struct wil_ctx *ctx) 189 { 190 dma_addr_t pa = wil_desc_addr(&d->dma.addr); 191 u16 dmalen = le16_to_cpu(d->dma.length); 192 193 switch (ctx->mapped_as) { 194 case wil_mapped_as_single: 195 dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE); 196 break; 197 case wil_mapped_as_page: 198 dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE); 199 break; 200 default: 201 break; 202 } 203 } 204 205 static void wil_vring_free(struct wil6210_priv *wil, struct wil_ring *vring, 206 int tx) 207 { 208 struct device *dev = wil_to_dev(wil); 209 size_t sz = vring->size * sizeof(vring->va[0]); 210 211 lockdep_assert_held(&wil->mutex); 212 if (tx) { 213 int vring_index = vring - wil->ring_tx; 214 215 wil_dbg_misc(wil, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n", 216 vring_index, vring->size, vring->va, 217 &vring->pa, vring->ctx); 218 } else { 219 wil_dbg_misc(wil, "free Rx vring [%d] 0x%p:%pad 0x%p\n", 220 vring->size, vring->va, 221 &vring->pa, vring->ctx); 222 } 223 224 while (!wil_ring_is_empty(vring)) { 225 dma_addr_t pa; 226 u16 dmalen; 227 struct wil_ctx *ctx; 228 229 if (tx) { 230 struct vring_tx_desc dd, *d = ⅆ 231 volatile struct vring_tx_desc *_d = 232 &vring->va[vring->swtail].tx.legacy; 233 234 ctx = &vring->ctx[vring->swtail]; 235 if (!ctx) { 236 wil_dbg_txrx(wil, 237 "ctx(%d) was already completed\n", 238 vring->swtail); 239 vring->swtail = wil_ring_next_tail(vring); 240 continue; 241 } 242 *d = *_d; 243 wil_txdesc_unmap(dev, d, ctx); 244 if (ctx->skb) 245 dev_kfree_skb_any(ctx->skb); 246 vring->swtail = wil_ring_next_tail(vring); 247 } else { /* rx */ 248 struct vring_rx_desc dd, *d = ⅆ 249 volatile struct vring_rx_desc *_d = 250 &vring->va[vring->swhead].rx.legacy; 251 252 ctx = &vring->ctx[vring->swhead]; 253 *d = *_d; 254 pa = wil_desc_addr(&d->dma.addr); 255 dmalen = le16_to_cpu(d->dma.length); 256 dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE); 257 kfree_skb(ctx->skb); 258 wil_ring_advance_head(vring, 1); 259 } 260 } 261 dma_free_coherent(dev, sz, (void *)vring->va, vring->pa); 262 kfree(vring->ctx); 263 vring->pa = 0; 264 vring->va = NULL; 265 vring->ctx = NULL; 266 } 267 268 /** 269 * Allocate one skb for Rx VRING 270 * 271 * Safe to call from IRQ 272 */ 273 static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct wil_ring *vring, 274 u32 i, int headroom) 275 { 276 struct device *dev = wil_to_dev(wil); 277 unsigned int sz = wil->rx_buf_len + ETH_HLEN + wil_rx_snaplen(); 278 struct vring_rx_desc dd, *d = ⅆ 279 volatile struct vring_rx_desc *_d = &vring->va[i].rx.legacy; 280 dma_addr_t pa; 281 struct sk_buff *skb = dev_alloc_skb(sz + headroom); 282 283 if (unlikely(!skb)) 284 return -ENOMEM; 285 286 skb_reserve(skb, headroom); 287 skb_put(skb, sz); 288 289 pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE); 290 if (unlikely(dma_mapping_error(dev, pa))) { 291 kfree_skb(skb); 292 return -ENOMEM; 293 } 294 295 d->dma.d0 = RX_DMA_D0_CMD_DMA_RT | RX_DMA_D0_CMD_DMA_IT; 296 wil_desc_addr_set(&d->dma.addr, pa); 297 /* ip_length don't care */ 298 /* b11 don't care */ 299 /* error don't care */ 300 d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */ 301 d->dma.length = cpu_to_le16(sz); 302 *_d = *d; 303 vring->ctx[i].skb = skb; 304 305 return 0; 306 } 307 308 /** 309 * Adds radiotap header 310 * 311 * Any error indicated as "Bad FCS" 312 * 313 * Vendor data for 04:ce:14-1 (Wilocity-1) consists of: 314 * - Rx descriptor: 32 bytes 315 * - Phy info 316 */ 317 static void wil_rx_add_radiotap_header(struct wil6210_priv *wil, 318 struct sk_buff *skb) 319 { 320 struct wil6210_rtap { 321 struct ieee80211_radiotap_header rthdr; 322 /* fields should be in the order of bits in rthdr.it_present */ 323 /* flags */ 324 u8 flags; 325 /* channel */ 326 __le16 chnl_freq __aligned(2); 327 __le16 chnl_flags; 328 /* MCS */ 329 u8 mcs_present; 330 u8 mcs_flags; 331 u8 mcs_index; 332 } __packed; 333 struct wil6210_rtap_vendor { 334 struct wil6210_rtap rtap; 335 /* vendor */ 336 u8 vendor_oui[3] __aligned(2); 337 u8 vendor_ns; 338 __le16 vendor_skip; 339 u8 vendor_data[0]; 340 } __packed; 341 struct vring_rx_desc *d = wil_skb_rxdesc(skb); 342 struct wil6210_rtap_vendor *rtap_vendor; 343 int rtap_len = sizeof(struct wil6210_rtap); 344 int phy_length = 0; /* phy info header size, bytes */ 345 static char phy_data[128]; 346 struct ieee80211_channel *ch = wil->monitor_chandef.chan; 347 348 if (rtap_include_phy_info) { 349 rtap_len = sizeof(*rtap_vendor) + sizeof(*d); 350 /* calculate additional length */ 351 if (d->dma.status & RX_DMA_STATUS_PHY_INFO) { 352 /** 353 * PHY info starts from 8-byte boundary 354 * there are 8-byte lines, last line may be partially 355 * written (HW bug), thus FW configures for last line 356 * to be excessive. Driver skips this last line. 357 */ 358 int len = min_t(int, 8 + sizeof(phy_data), 359 wil_rxdesc_phy_length(d)); 360 361 if (len > 8) { 362 void *p = skb_tail_pointer(skb); 363 void *pa = PTR_ALIGN(p, 8); 364 365 if (skb_tailroom(skb) >= len + (pa - p)) { 366 phy_length = len - 8; 367 memcpy(phy_data, pa, phy_length); 368 } 369 } 370 } 371 rtap_len += phy_length; 372 } 373 374 if (skb_headroom(skb) < rtap_len && 375 pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) { 376 wil_err(wil, "Unable to expand headroom to %d\n", rtap_len); 377 return; 378 } 379 380 rtap_vendor = skb_push(skb, rtap_len); 381 memset(rtap_vendor, 0, rtap_len); 382 383 rtap_vendor->rtap.rthdr.it_version = PKTHDR_RADIOTAP_VERSION; 384 rtap_vendor->rtap.rthdr.it_len = cpu_to_le16(rtap_len); 385 rtap_vendor->rtap.rthdr.it_present = cpu_to_le32( 386 (1 << IEEE80211_RADIOTAP_FLAGS) | 387 (1 << IEEE80211_RADIOTAP_CHANNEL) | 388 (1 << IEEE80211_RADIOTAP_MCS)); 389 if (d->dma.status & RX_DMA_STATUS_ERROR) 390 rtap_vendor->rtap.flags |= IEEE80211_RADIOTAP_F_BADFCS; 391 392 rtap_vendor->rtap.chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320); 393 rtap_vendor->rtap.chnl_flags = cpu_to_le16(0); 394 395 rtap_vendor->rtap.mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS; 396 rtap_vendor->rtap.mcs_flags = 0; 397 rtap_vendor->rtap.mcs_index = wil_rxdesc_mcs(d); 398 399 if (rtap_include_phy_info) { 400 rtap_vendor->rtap.rthdr.it_present |= cpu_to_le32(1 << 401 IEEE80211_RADIOTAP_VENDOR_NAMESPACE); 402 /* OUI for Wilocity 04:ce:14 */ 403 rtap_vendor->vendor_oui[0] = 0x04; 404 rtap_vendor->vendor_oui[1] = 0xce; 405 rtap_vendor->vendor_oui[2] = 0x14; 406 rtap_vendor->vendor_ns = 1; 407 /* Rx descriptor + PHY data */ 408 rtap_vendor->vendor_skip = cpu_to_le16(sizeof(*d) + 409 phy_length); 410 memcpy(rtap_vendor->vendor_data, (void *)d, sizeof(*d)); 411 memcpy(rtap_vendor->vendor_data + sizeof(*d), phy_data, 412 phy_length); 413 } 414 } 415 416 /* similar to ieee80211_ version, but FC contain only 1-st byte */ 417 static inline int wil_is_back_req(u8 fc) 418 { 419 return (fc & (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == 420 (IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ); 421 } 422 423 bool wil_is_rx_idle(struct wil6210_priv *wil) 424 { 425 struct vring_rx_desc *_d; 426 struct wil_ring *ring = &wil->ring_rx; 427 428 _d = (struct vring_rx_desc *)&ring->va[ring->swhead].rx.legacy; 429 if (_d->dma.status & RX_DMA_STATUS_DU) 430 return false; 431 432 return true; 433 } 434 435 /** 436 * reap 1 frame from @swhead 437 * 438 * Rx descriptor copied to skb->cb 439 * 440 * Safe to call from IRQ 441 */ 442 static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, 443 struct wil_ring *vring) 444 { 445 struct device *dev = wil_to_dev(wil); 446 struct wil6210_vif *vif; 447 struct net_device *ndev; 448 volatile struct vring_rx_desc *_d; 449 struct vring_rx_desc *d; 450 struct sk_buff *skb; 451 dma_addr_t pa; 452 unsigned int snaplen = wil_rx_snaplen(); 453 unsigned int sz = wil->rx_buf_len + ETH_HLEN + snaplen; 454 u16 dmalen; 455 u8 ftype; 456 int cid, mid; 457 int i; 458 struct wil_net_stats *stats; 459 460 BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb)); 461 462 again: 463 if (unlikely(wil_ring_is_empty(vring))) 464 return NULL; 465 466 i = (int)vring->swhead; 467 _d = &vring->va[i].rx.legacy; 468 if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) { 469 /* it is not error, we just reached end of Rx done area */ 470 return NULL; 471 } 472 473 skb = vring->ctx[i].skb; 474 vring->ctx[i].skb = NULL; 475 wil_ring_advance_head(vring, 1); 476 if (!skb) { 477 wil_err(wil, "No Rx skb at [%d]\n", i); 478 goto again; 479 } 480 d = wil_skb_rxdesc(skb); 481 *d = *_d; 482 pa = wil_desc_addr(&d->dma.addr); 483 484 dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE); 485 dmalen = le16_to_cpu(d->dma.length); 486 487 trace_wil6210_rx(i, d); 488 wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", i, dmalen); 489 wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4, 490 (const void *)d, sizeof(*d), false); 491 492 cid = wil_rxdesc_cid(d); 493 mid = wil_rxdesc_mid(d); 494 vif = wil->vifs[mid]; 495 496 if (unlikely(!vif)) { 497 wil_dbg_txrx(wil, "skipped RX descriptor with invalid mid %d", 498 mid); 499 kfree_skb(skb); 500 goto again; 501 } 502 ndev = vif_to_ndev(vif); 503 stats = &wil->sta[cid].stats; 504 505 if (unlikely(dmalen > sz)) { 506 wil_err(wil, "Rx size too large: %d bytes!\n", dmalen); 507 stats->rx_large_frame++; 508 kfree_skb(skb); 509 goto again; 510 } 511 skb_trim(skb, dmalen); 512 513 prefetch(skb->data); 514 515 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1, 516 skb->data, skb_headlen(skb), false); 517 518 stats->last_mcs_rx = wil_rxdesc_mcs(d); 519 if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs)) 520 stats->rx_per_mcs[stats->last_mcs_rx]++; 521 522 /* use radiotap header only if required */ 523 if (ndev->type == ARPHRD_IEEE80211_RADIOTAP) 524 wil_rx_add_radiotap_header(wil, skb); 525 526 /* no extra checks if in sniffer mode */ 527 if (ndev->type != ARPHRD_ETHER) 528 return skb; 529 /* Non-data frames may be delivered through Rx DMA channel (ex: BAR) 530 * Driver should recognize it by frame type, that is found 531 * in Rx descriptor. If type is not data, it is 802.11 frame as is 532 */ 533 ftype = wil_rxdesc_ftype(d) << 2; 534 if (unlikely(ftype != IEEE80211_FTYPE_DATA)) { 535 u8 fc1 = wil_rxdesc_fc1(d); 536 int tid = wil_rxdesc_tid(d); 537 u16 seq = wil_rxdesc_seq(d); 538 539 wil_dbg_txrx(wil, 540 "Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n", 541 fc1, mid, cid, tid, seq); 542 stats->rx_non_data_frame++; 543 if (wil_is_back_req(fc1)) { 544 wil_dbg_txrx(wil, 545 "BAR: MID %d CID %d TID %d Seq 0x%03x\n", 546 mid, cid, tid, seq); 547 wil_rx_bar(wil, vif, cid, tid, seq); 548 } else { 549 /* print again all info. One can enable only this 550 * without overhead for printing every Rx frame 551 */ 552 wil_dbg_txrx(wil, 553 "Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n", 554 fc1, mid, cid, tid, seq); 555 wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4, 556 (const void *)d, sizeof(*d), false); 557 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1, 558 skb->data, skb_headlen(skb), false); 559 } 560 kfree_skb(skb); 561 goto again; 562 } 563 564 if (unlikely(skb->len < ETH_HLEN + snaplen)) { 565 wil_err(wil, "Short frame, len = %d\n", skb->len); 566 stats->rx_short_frame++; 567 kfree_skb(skb); 568 goto again; 569 } 570 571 /* L4 IDENT is on when HW calculated checksum, check status 572 * and in case of error drop the packet 573 * higher stack layers will handle retransmission (if required) 574 */ 575 if (likely(d->dma.status & RX_DMA_STATUS_L4I)) { 576 /* L4 protocol identified, csum calculated */ 577 if (likely((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0)) 578 skb->ip_summed = CHECKSUM_UNNECESSARY; 579 /* If HW reports bad checksum, let IP stack re-check it 580 * For example, HW don't understand Microsoft IP stack that 581 * mis-calculates TCP checksum - if it should be 0x0, 582 * it writes 0xffff in violation of RFC 1624 583 */ 584 } 585 586 if (snaplen) { 587 /* Packet layout 588 * +-------+-------+---------+------------+------+ 589 * | SA(6) | DA(6) | SNAP(6) | ETHTYPE(2) | DATA | 590 * +-------+-------+---------+------------+------+ 591 * Need to remove SNAP, shifting SA and DA forward 592 */ 593 memmove(skb->data + snaplen, skb->data, 2 * ETH_ALEN); 594 skb_pull(skb, snaplen); 595 } 596 597 return skb; 598 } 599 600 /** 601 * allocate and fill up to @count buffers in rx ring 602 * buffers posted at @swtail 603 * Note: we have a single RX queue for servicing all VIFs, but we 604 * allocate skbs with headroom according to main interface only. This 605 * means it will not work with monitor interface together with other VIFs. 606 * Currently we only support monitor interface on its own without other VIFs, 607 * and we will need to fix this code once we add support. 608 */ 609 static int wil_rx_refill(struct wil6210_priv *wil, int count) 610 { 611 struct net_device *ndev = wil->main_ndev; 612 struct wil_ring *v = &wil->ring_rx; 613 u32 next_tail; 614 int rc = 0; 615 int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ? 616 WIL6210_RTAP_SIZE : 0; 617 618 for (; next_tail = wil_ring_next_tail(v), 619 (next_tail != v->swhead) && (count-- > 0); 620 v->swtail = next_tail) { 621 rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom); 622 if (unlikely(rc)) { 623 wil_err_ratelimited(wil, "Error %d in rx refill[%d]\n", 624 rc, v->swtail); 625 break; 626 } 627 } 628 629 /* make sure all writes to descriptors (shared memory) are done before 630 * committing them to HW 631 */ 632 wmb(); 633 634 wil_w(wil, v->hwtail, v->swtail); 635 636 return rc; 637 } 638 639 /** 640 * reverse_memcmp - Compare two areas of memory, in reverse order 641 * @cs: One area of memory 642 * @ct: Another area of memory 643 * @count: The size of the area. 644 * 645 * Cut'n'paste from original memcmp (see lib/string.c) 646 * with minimal modifications 647 */ 648 static int reverse_memcmp(const void *cs, const void *ct, size_t count) 649 { 650 const unsigned char *su1, *su2; 651 int res = 0; 652 653 for (su1 = cs + count - 1, su2 = ct + count - 1; count > 0; 654 --su1, --su2, count--) { 655 res = *su1 - *su2; 656 if (res) 657 break; 658 } 659 return res; 660 } 661 662 static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb) 663 { 664 struct vring_rx_desc *d = wil_skb_rxdesc(skb); 665 int cid = wil_rxdesc_cid(d); 666 int tid = wil_rxdesc_tid(d); 667 int key_id = wil_rxdesc_key_id(d); 668 int mc = wil_rxdesc_mcast(d); 669 struct wil_sta_info *s = &wil->sta[cid]; 670 struct wil_tid_crypto_rx *c = mc ? &s->group_crypto_rx : 671 &s->tid_crypto_rx[tid]; 672 struct wil_tid_crypto_rx_single *cc = &c->key_id[key_id]; 673 const u8 *pn = (u8 *)&d->mac.pn_15_0; 674 675 if (!cc->key_set) { 676 wil_err_ratelimited(wil, 677 "Key missing. CID %d TID %d MCast %d KEY_ID %d\n", 678 cid, tid, mc, key_id); 679 return -EINVAL; 680 } 681 682 if (reverse_memcmp(pn, cc->pn, IEEE80211_GCMP_PN_LEN) <= 0) { 683 wil_err_ratelimited(wil, 684 "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n", 685 cid, tid, mc, key_id, pn, cc->pn); 686 return -EINVAL; 687 } 688 memcpy(cc->pn, pn, IEEE80211_GCMP_PN_LEN); 689 690 return 0; 691 } 692 693 /* 694 * Pass Rx packet to the netif. Update statistics. 695 * Called in softirq context (NAPI poll). 696 */ 697 void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) 698 { 699 gro_result_t rc = GRO_NORMAL; 700 struct wil6210_vif *vif = ndev_to_vif(ndev); 701 struct wil6210_priv *wil = ndev_to_wil(ndev); 702 struct wireless_dev *wdev = vif_to_wdev(vif); 703 unsigned int len = skb->len; 704 struct vring_rx_desc *d = wil_skb_rxdesc(skb); 705 int cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */ 706 int security = wil_rxdesc_security(d); 707 struct ethhdr *eth = (void *)skb->data; 708 /* here looking for DA, not A1, thus Rxdesc's 'mcast' indication 709 * is not suitable, need to look at data 710 */ 711 int mcast = is_multicast_ether_addr(eth->h_dest); 712 struct wil_net_stats *stats = &wil->sta[cid].stats; 713 struct sk_buff *xmit_skb = NULL; 714 static const char * const gro_res_str[] = { 715 [GRO_MERGED] = "GRO_MERGED", 716 [GRO_MERGED_FREE] = "GRO_MERGED_FREE", 717 [GRO_HELD] = "GRO_HELD", 718 [GRO_NORMAL] = "GRO_NORMAL", 719 [GRO_DROP] = "GRO_DROP", 720 }; 721 722 if (ndev->features & NETIF_F_RXHASH) 723 /* fake L4 to ensure it won't be re-calculated later 724 * set hash to any non-zero value to activate rps 725 * mechanism, core will be chosen according 726 * to user-level rps configuration. 727 */ 728 skb_set_hash(skb, 1, PKT_HASH_TYPE_L4); 729 730 skb_orphan(skb); 731 732 if (security && (wil_rx_crypto_check(wil, skb) != 0)) { 733 rc = GRO_DROP; 734 dev_kfree_skb(skb); 735 stats->rx_replay++; 736 goto stats; 737 } 738 739 if (wdev->iftype == NL80211_IFTYPE_AP && !vif->ap_isolate) { 740 if (mcast) { 741 /* send multicast frames both to higher layers in 742 * local net stack and back to the wireless medium 743 */ 744 xmit_skb = skb_copy(skb, GFP_ATOMIC); 745 } else { 746 int xmit_cid = wil_find_cid(wil, vif->mid, 747 eth->h_dest); 748 749 if (xmit_cid >= 0) { 750 /* The destination station is associated to 751 * this AP (in this VLAN), so send the frame 752 * directly to it and do not pass it to local 753 * net stack. 754 */ 755 xmit_skb = skb; 756 skb = NULL; 757 } 758 } 759 } 760 if (xmit_skb) { 761 /* Send to wireless media and increase priority by 256 to 762 * keep the received priority instead of reclassifying 763 * the frame (see cfg80211_classify8021d). 764 */ 765 xmit_skb->dev = ndev; 766 xmit_skb->priority += 256; 767 xmit_skb->protocol = htons(ETH_P_802_3); 768 skb_reset_network_header(xmit_skb); 769 skb_reset_mac_header(xmit_skb); 770 wil_dbg_txrx(wil, "Rx -> Tx %d bytes\n", len); 771 dev_queue_xmit(xmit_skb); 772 } 773 774 if (skb) { /* deliver to local stack */ 775 skb->protocol = eth_type_trans(skb, ndev); 776 skb->dev = ndev; 777 rc = napi_gro_receive(&wil->napi_rx, skb); 778 wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n", 779 len, gro_res_str[rc]); 780 } 781 stats: 782 /* statistics. rc set to GRO_NORMAL for AP bridging */ 783 if (unlikely(rc == GRO_DROP)) { 784 ndev->stats.rx_dropped++; 785 stats->rx_dropped++; 786 wil_dbg_txrx(wil, "Rx drop %d bytes\n", len); 787 } else { 788 ndev->stats.rx_packets++; 789 stats->rx_packets++; 790 ndev->stats.rx_bytes += len; 791 stats->rx_bytes += len; 792 if (mcast) 793 ndev->stats.multicast++; 794 } 795 } 796 797 /** 798 * Proceed all completed skb's from Rx VRING 799 * 800 * Safe to call from NAPI poll, i.e. softirq with interrupts enabled 801 */ 802 void wil_rx_handle(struct wil6210_priv *wil, int *quota) 803 { 804 struct net_device *ndev = wil->main_ndev; 805 struct wireless_dev *wdev = ndev->ieee80211_ptr; 806 struct wil_ring *v = &wil->ring_rx; 807 struct sk_buff *skb; 808 809 if (unlikely(!v->va)) { 810 wil_err(wil, "Rx IRQ while Rx not yet initialized\n"); 811 return; 812 } 813 wil_dbg_txrx(wil, "rx_handle\n"); 814 while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) { 815 (*quota)--; 816 817 /* monitor is currently supported on main interface only */ 818 if (wdev->iftype == NL80211_IFTYPE_MONITOR) { 819 skb->dev = ndev; 820 skb_reset_mac_header(skb); 821 skb->ip_summed = CHECKSUM_UNNECESSARY; 822 skb->pkt_type = PACKET_OTHERHOST; 823 skb->protocol = htons(ETH_P_802_2); 824 wil_netif_rx_any(skb, ndev); 825 } else { 826 wil_rx_reorder(wil, skb); 827 } 828 } 829 wil_rx_refill(wil, v->size); 830 } 831 832 static void wil_rx_buf_len_init(struct wil6210_priv *wil) 833 { 834 wil->rx_buf_len = rx_large_buf ? 835 WIL_MAX_ETH_MTU : TXRX_BUF_LEN_DEFAULT - WIL_MAX_MPDU_OVERHEAD; 836 if (mtu_max > wil->rx_buf_len) { 837 /* do not allow RX buffers to be smaller than mtu_max, for 838 * backward compatibility (mtu_max parameter was also used 839 * to support receiving large packets) 840 */ 841 wil_info(wil, "Override RX buffer to mtu_max(%d)\n", mtu_max); 842 wil->rx_buf_len = mtu_max; 843 } 844 } 845 846 int wil_rx_init(struct wil6210_priv *wil, u16 size) 847 { 848 struct wil_ring *vring = &wil->ring_rx; 849 int rc; 850 851 wil_dbg_misc(wil, "rx_init\n"); 852 853 if (vring->va) { 854 wil_err(wil, "Rx ring already allocated\n"); 855 return -EINVAL; 856 } 857 858 wil_rx_buf_len_init(wil); 859 860 vring->size = size; 861 rc = wil_vring_alloc(wil, vring); 862 if (rc) 863 return rc; 864 865 rc = wmi_rx_chain_add(wil, vring); 866 if (rc) 867 goto err_free; 868 869 rc = wil_rx_refill(wil, vring->size); 870 if (rc) 871 goto err_free; 872 873 return 0; 874 err_free: 875 wil_vring_free(wil, vring, 0); 876 877 return rc; 878 } 879 880 void wil_rx_fini(struct wil6210_priv *wil) 881 { 882 struct wil_ring *vring = &wil->ring_rx; 883 884 wil_dbg_misc(wil, "rx_fini\n"); 885 886 if (vring->va) 887 wil_vring_free(wil, vring, 0); 888 } 889 890 static inline void wil_tx_data_init(struct wil_ring_tx_data *txdata) 891 { 892 spin_lock_bh(&txdata->lock); 893 txdata->dot1x_open = 0; 894 txdata->enabled = 0; 895 txdata->idle = 0; 896 txdata->last_idle = 0; 897 txdata->begin = 0; 898 txdata->agg_wsize = 0; 899 txdata->agg_timeout = 0; 900 txdata->agg_amsdu = 0; 901 txdata->addba_in_progress = false; 902 txdata->mid = U8_MAX; 903 spin_unlock_bh(&txdata->lock); 904 } 905 906 int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size, 907 int cid, int tid) 908 { 909 struct wil6210_priv *wil = vif_to_wil(vif); 910 int rc; 911 struct wmi_vring_cfg_cmd cmd = { 912 .action = cpu_to_le32(WMI_VRING_CMD_ADD), 913 .vring_cfg = { 914 .tx_sw_ring = { 915 .max_mpdu_size = 916 cpu_to_le16(wil_mtu2macbuf(mtu_max)), 917 .ring_size = cpu_to_le16(size), 918 }, 919 .ringid = id, 920 .cidxtid = mk_cidxtid(cid, tid), 921 .encap_trans_type = WMI_VRING_ENC_TYPE_802_3, 922 .mac_ctrl = 0, 923 .to_resolution = 0, 924 .agg_max_wsize = 0, 925 .schd_params = { 926 .priority = cpu_to_le16(0), 927 .timeslot_us = cpu_to_le16(0xfff), 928 }, 929 }, 930 }; 931 struct { 932 struct wmi_cmd_hdr wmi; 933 struct wmi_vring_cfg_done_event cmd; 934 } __packed reply = { 935 .cmd = {.status = WMI_FW_STATUS_FAILURE}, 936 }; 937 struct wil_ring *vring = &wil->ring_tx[id]; 938 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id]; 939 940 wil_dbg_misc(wil, "vring_init_tx: max_mpdu_size %d\n", 941 cmd.vring_cfg.tx_sw_ring.max_mpdu_size); 942 lockdep_assert_held(&wil->mutex); 943 944 if (vring->va) { 945 wil_err(wil, "Tx ring [%d] already allocated\n", id); 946 rc = -EINVAL; 947 goto out; 948 } 949 950 wil_tx_data_init(txdata); 951 vring->size = size; 952 rc = wil_vring_alloc(wil, vring); 953 if (rc) 954 goto out; 955 956 wil->ring2cid_tid[id][0] = cid; 957 wil->ring2cid_tid[id][1] = tid; 958 959 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa); 960 961 if (!vif->privacy) 962 txdata->dot1x_open = true; 963 rc = wmi_call(wil, WMI_VRING_CFG_CMDID, vif->mid, &cmd, sizeof(cmd), 964 WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100); 965 if (rc) 966 goto out_free; 967 968 if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) { 969 wil_err(wil, "Tx config failed, status 0x%02x\n", 970 reply.cmd.status); 971 rc = -EINVAL; 972 goto out_free; 973 } 974 975 spin_lock_bh(&txdata->lock); 976 vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr); 977 txdata->mid = vif->mid; 978 txdata->enabled = 1; 979 spin_unlock_bh(&txdata->lock); 980 981 if (txdata->dot1x_open && (agg_wsize >= 0)) 982 wil_addba_tx_request(wil, id, agg_wsize); 983 984 return 0; 985 out_free: 986 spin_lock_bh(&txdata->lock); 987 txdata->dot1x_open = false; 988 txdata->enabled = 0; 989 spin_unlock_bh(&txdata->lock); 990 wil_vring_free(wil, vring, 1); 991 wil->ring2cid_tid[id][0] = WIL6210_MAX_CID; 992 wil->ring2cid_tid[id][1] = 0; 993 994 out: 995 996 return rc; 997 } 998 999 int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size) 1000 { 1001 struct wil6210_priv *wil = vif_to_wil(vif); 1002 int rc; 1003 struct wmi_bcast_vring_cfg_cmd cmd = { 1004 .action = cpu_to_le32(WMI_VRING_CMD_ADD), 1005 .vring_cfg = { 1006 .tx_sw_ring = { 1007 .max_mpdu_size = 1008 cpu_to_le16(wil_mtu2macbuf(mtu_max)), 1009 .ring_size = cpu_to_le16(size), 1010 }, 1011 .ringid = id, 1012 .encap_trans_type = WMI_VRING_ENC_TYPE_802_3, 1013 }, 1014 }; 1015 struct { 1016 struct wmi_cmd_hdr wmi; 1017 struct wmi_vring_cfg_done_event cmd; 1018 } __packed reply = { 1019 .cmd = {.status = WMI_FW_STATUS_FAILURE}, 1020 }; 1021 struct wil_ring *vring = &wil->ring_tx[id]; 1022 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id]; 1023 1024 wil_dbg_misc(wil, "vring_init_bcast: max_mpdu_size %d\n", 1025 cmd.vring_cfg.tx_sw_ring.max_mpdu_size); 1026 lockdep_assert_held(&wil->mutex); 1027 1028 if (vring->va) { 1029 wil_err(wil, "Tx ring [%d] already allocated\n", id); 1030 rc = -EINVAL; 1031 goto out; 1032 } 1033 1034 wil_tx_data_init(txdata); 1035 vring->size = size; 1036 rc = wil_vring_alloc(wil, vring); 1037 if (rc) 1038 goto out; 1039 1040 wil->ring2cid_tid[id][0] = WIL6210_MAX_CID; /* CID */ 1041 wil->ring2cid_tid[id][1] = 0; /* TID */ 1042 1043 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa); 1044 1045 if (!vif->privacy) 1046 txdata->dot1x_open = true; 1047 rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, vif->mid, 1048 &cmd, sizeof(cmd), 1049 WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100); 1050 if (rc) 1051 goto out_free; 1052 1053 if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) { 1054 wil_err(wil, "Tx config failed, status 0x%02x\n", 1055 reply.cmd.status); 1056 rc = -EINVAL; 1057 goto out_free; 1058 } 1059 1060 spin_lock_bh(&txdata->lock); 1061 vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr); 1062 txdata->mid = vif->mid; 1063 txdata->enabled = 1; 1064 spin_unlock_bh(&txdata->lock); 1065 1066 return 0; 1067 out_free: 1068 spin_lock_bh(&txdata->lock); 1069 txdata->enabled = 0; 1070 txdata->dot1x_open = false; 1071 spin_unlock_bh(&txdata->lock); 1072 wil_vring_free(wil, vring, 1); 1073 out: 1074 1075 return rc; 1076 } 1077 1078 void wil_ring_fini_tx(struct wil6210_priv *wil, int id) 1079 { 1080 struct wil_ring *vring = &wil->ring_tx[id]; 1081 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id]; 1082 1083 lockdep_assert_held(&wil->mutex); 1084 1085 if (!vring->va) 1086 return; 1087 1088 wil_dbg_misc(wil, "vring_fini_tx: id=%d\n", id); 1089 1090 spin_lock_bh(&txdata->lock); 1091 txdata->dot1x_open = false; 1092 txdata->mid = U8_MAX; 1093 txdata->enabled = 0; /* no Tx can be in progress or start anew */ 1094 spin_unlock_bh(&txdata->lock); 1095 /* napi_synchronize waits for completion of the current NAPI but will 1096 * not prevent the next NAPI run. 1097 * Add a memory barrier to guarantee that txdata->enabled is zeroed 1098 * before napi_synchronize so that the next scheduled NAPI will not 1099 * handle this vring 1100 */ 1101 wmb(); 1102 /* make sure NAPI won't touch this vring */ 1103 if (test_bit(wil_status_napi_en, wil->status)) 1104 napi_synchronize(&wil->napi_tx); 1105 1106 wil_vring_free(wil, vring, 1); 1107 } 1108 1109 static struct wil_ring *wil_find_tx_ucast(struct wil6210_priv *wil, 1110 struct wil6210_vif *vif, 1111 struct sk_buff *skb) 1112 { 1113 int i; 1114 struct ethhdr *eth = (void *)skb->data; 1115 int cid = wil_find_cid(wil, vif->mid, eth->h_dest); 1116 1117 if (cid < 0) 1118 return NULL; 1119 1120 /* TODO: fix for multiple TID */ 1121 for (i = 0; i < ARRAY_SIZE(wil->ring2cid_tid); i++) { 1122 if (!wil->ring_tx_data[i].dot1x_open && 1123 skb->protocol != cpu_to_be16(ETH_P_PAE)) 1124 continue; 1125 if (wil->ring2cid_tid[i][0] == cid) { 1126 struct wil_ring *v = &wil->ring_tx[i]; 1127 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i]; 1128 1129 wil_dbg_txrx(wil, "find_tx_ucast: (%pM) -> [%d]\n", 1130 eth->h_dest, i); 1131 if (v->va && txdata->enabled) { 1132 return v; 1133 } else { 1134 wil_dbg_txrx(wil, 1135 "find_tx_ucast: vring[%d] not valid\n", 1136 i); 1137 return NULL; 1138 } 1139 } 1140 } 1141 1142 return NULL; 1143 } 1144 1145 static int wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif, 1146 struct wil_ring *vring, struct sk_buff *skb); 1147 1148 static struct wil_ring *wil_find_tx_ring_sta(struct wil6210_priv *wil, 1149 struct wil6210_vif *vif, 1150 struct sk_buff *skb) 1151 { 1152 struct wil_ring *ring; 1153 int i; 1154 u8 cid; 1155 struct wil_ring_tx_data *txdata; 1156 1157 /* In the STA mode, it is expected to have only 1 VRING 1158 * for the AP we connected to. 1159 * find 1-st vring eligible for this skb and use it. 1160 */ 1161 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { 1162 ring = &wil->ring_tx[i]; 1163 txdata = &wil->ring_tx_data[i]; 1164 if (!ring->va || !txdata->enabled || txdata->mid != vif->mid) 1165 continue; 1166 1167 cid = wil->ring2cid_tid[i][0]; 1168 if (cid >= WIL6210_MAX_CID) /* skip BCAST */ 1169 continue; 1170 1171 if (!wil->ring_tx_data[i].dot1x_open && 1172 skb->protocol != cpu_to_be16(ETH_P_PAE)) 1173 continue; 1174 1175 wil_dbg_txrx(wil, "Tx -> ring %d\n", i); 1176 1177 return ring; 1178 } 1179 1180 wil_dbg_txrx(wil, "Tx while no rings active?\n"); 1181 1182 return NULL; 1183 } 1184 1185 /* Use one of 2 strategies: 1186 * 1187 * 1. New (real broadcast): 1188 * use dedicated broadcast vring 1189 * 2. Old (pseudo-DMS): 1190 * Find 1-st vring and return it; 1191 * duplicate skb and send it to other active vrings; 1192 * in all cases override dest address to unicast peer's address 1193 * Use old strategy when new is not supported yet: 1194 * - for PBSS 1195 */ 1196 static struct wil_ring *wil_find_tx_bcast_1(struct wil6210_priv *wil, 1197 struct wil6210_vif *vif, 1198 struct sk_buff *skb) 1199 { 1200 struct wil_ring *v; 1201 struct wil_ring_tx_data *txdata; 1202 int i = vif->bcast_ring; 1203 1204 if (i < 0) 1205 return NULL; 1206 v = &wil->ring_tx[i]; 1207 txdata = &wil->ring_tx_data[i]; 1208 if (!v->va || !txdata->enabled) 1209 return NULL; 1210 if (!wil->ring_tx_data[i].dot1x_open && 1211 skb->protocol != cpu_to_be16(ETH_P_PAE)) 1212 return NULL; 1213 1214 return v; 1215 } 1216 1217 static void wil_set_da_for_vring(struct wil6210_priv *wil, 1218 struct sk_buff *skb, int vring_index) 1219 { 1220 struct ethhdr *eth = (void *)skb->data; 1221 int cid = wil->ring2cid_tid[vring_index][0]; 1222 1223 ether_addr_copy(eth->h_dest, wil->sta[cid].addr); 1224 } 1225 1226 static struct wil_ring *wil_find_tx_bcast_2(struct wil6210_priv *wil, 1227 struct wil6210_vif *vif, 1228 struct sk_buff *skb) 1229 { 1230 struct wil_ring *v, *v2; 1231 struct sk_buff *skb2; 1232 int i; 1233 u8 cid; 1234 struct ethhdr *eth = (void *)skb->data; 1235 char *src = eth->h_source; 1236 struct wil_ring_tx_data *txdata, *txdata2; 1237 1238 /* find 1-st vring eligible for data */ 1239 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { 1240 v = &wil->ring_tx[i]; 1241 txdata = &wil->ring_tx_data[i]; 1242 if (!v->va || !txdata->enabled || txdata->mid != vif->mid) 1243 continue; 1244 1245 cid = wil->ring2cid_tid[i][0]; 1246 if (cid >= WIL6210_MAX_CID) /* skip BCAST */ 1247 continue; 1248 if (!wil->ring_tx_data[i].dot1x_open && 1249 skb->protocol != cpu_to_be16(ETH_P_PAE)) 1250 continue; 1251 1252 /* don't Tx back to source when re-routing Rx->Tx at the AP */ 1253 if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN)) 1254 continue; 1255 1256 goto found; 1257 } 1258 1259 wil_dbg_txrx(wil, "Tx while no vrings active?\n"); 1260 1261 return NULL; 1262 1263 found: 1264 wil_dbg_txrx(wil, "BCAST -> ring %d\n", i); 1265 wil_set_da_for_vring(wil, skb, i); 1266 1267 /* find other active vrings and duplicate skb for each */ 1268 for (i++; i < WIL6210_MAX_TX_RINGS; i++) { 1269 v2 = &wil->ring_tx[i]; 1270 txdata2 = &wil->ring_tx_data[i]; 1271 if (!v2->va || txdata2->mid != vif->mid) 1272 continue; 1273 cid = wil->ring2cid_tid[i][0]; 1274 if (cid >= WIL6210_MAX_CID) /* skip BCAST */ 1275 continue; 1276 if (!wil->ring_tx_data[i].dot1x_open && 1277 skb->protocol != cpu_to_be16(ETH_P_PAE)) 1278 continue; 1279 1280 if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN)) 1281 continue; 1282 1283 skb2 = skb_copy(skb, GFP_ATOMIC); 1284 if (skb2) { 1285 wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i); 1286 wil_set_da_for_vring(wil, skb2, i); 1287 wil_tx_vring(wil, vif, v2, skb2); 1288 } else { 1289 wil_err(wil, "skb_copy failed\n"); 1290 } 1291 } 1292 1293 return v; 1294 } 1295 1296 static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len, 1297 int vring_index) 1298 { 1299 wil_desc_addr_set(&d->dma.addr, pa); 1300 d->dma.ip_length = 0; 1301 /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/ 1302 d->dma.b11 = 0/*14 | BIT(7)*/; 1303 d->dma.error = 0; 1304 d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */ 1305 d->dma.length = cpu_to_le16((u16)len); 1306 d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS); 1307 d->mac.d[0] = 0; 1308 d->mac.d[1] = 0; 1309 d->mac.d[2] = 0; 1310 d->mac.ucode_cmd = 0; 1311 /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */ 1312 d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) | 1313 (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS); 1314 1315 return 0; 1316 } 1317 1318 static inline 1319 void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags) 1320 { 1321 d->mac.d[2] |= (nr_frags << MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS); 1322 } 1323 1324 /** 1325 * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding 1326 * @skb is used to obtain the protocol and headers length. 1327 * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data, 1328 * 2 - middle, 3 - last descriptor. 1329 */ 1330 1331 static void wil_tx_desc_offload_setup_tso(struct vring_tx_desc *d, 1332 struct sk_buff *skb, 1333 int tso_desc_type, bool is_ipv4, 1334 int tcp_hdr_len, int skb_net_hdr_len) 1335 { 1336 d->dma.b11 = ETH_HLEN; /* MAC header length */ 1337 d->dma.b11 |= is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS; 1338 1339 d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS); 1340 /* L4 header len: TCP header length */ 1341 d->dma.d0 |= (tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK); 1342 1343 /* Setup TSO: bit and desc type */ 1344 d->dma.d0 |= (BIT(DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS)) | 1345 (tso_desc_type << DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS); 1346 d->dma.d0 |= (is_ipv4 << DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS); 1347 1348 d->dma.ip_length = skb_net_hdr_len; 1349 /* Enable TCP/UDP checksum */ 1350 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS); 1351 /* Calculate pseudo-header */ 1352 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS); 1353 } 1354 1355 /** 1356 * Sets the descriptor @d up for csum. The corresponding 1357 * @skb is used to obtain the protocol and headers length. 1358 * Returns the protocol: 0 - not TCP, 1 - TCPv4, 2 - TCPv6. 1359 * Note, if d==NULL, the function only returns the protocol result. 1360 * 1361 * It is very similar to previous wil_tx_desc_offload_setup_tso. This 1362 * is "if unrolling" to optimize the critical path. 1363 */ 1364 1365 static int wil_tx_desc_offload_setup(struct vring_tx_desc *d, 1366 struct sk_buff *skb){ 1367 int protocol; 1368 1369 if (skb->ip_summed != CHECKSUM_PARTIAL) 1370 return 0; 1371 1372 d->dma.b11 = ETH_HLEN; /* MAC header length */ 1373 1374 switch (skb->protocol) { 1375 case cpu_to_be16(ETH_P_IP): 1376 protocol = ip_hdr(skb)->protocol; 1377 d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS); 1378 break; 1379 case cpu_to_be16(ETH_P_IPV6): 1380 protocol = ipv6_hdr(skb)->nexthdr; 1381 break; 1382 default: 1383 return -EINVAL; 1384 } 1385 1386 switch (protocol) { 1387 case IPPROTO_TCP: 1388 d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS); 1389 /* L4 header len: TCP header length */ 1390 d->dma.d0 |= 1391 (tcp_hdrlen(skb) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK); 1392 break; 1393 case IPPROTO_UDP: 1394 /* L4 header len: UDP header length */ 1395 d->dma.d0 |= 1396 (sizeof(struct udphdr) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK); 1397 break; 1398 default: 1399 return -EINVAL; 1400 } 1401 1402 d->dma.ip_length = skb_network_header_len(skb); 1403 /* Enable TCP/UDP checksum */ 1404 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS); 1405 /* Calculate pseudo-header */ 1406 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS); 1407 1408 return 0; 1409 } 1410 1411 static inline void wil_tx_last_desc(struct vring_tx_desc *d) 1412 { 1413 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS) | 1414 BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS) | 1415 BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS); 1416 } 1417 1418 static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc *d) 1419 { 1420 d->dma.d0 |= wil_tso_type_lst << 1421 DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS; 1422 } 1423 1424 static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif, 1425 struct wil_ring *vring, struct sk_buff *skb) 1426 { 1427 struct device *dev = wil_to_dev(wil); 1428 1429 /* point to descriptors in shared memory */ 1430 volatile struct vring_tx_desc *_desc = NULL, *_hdr_desc, 1431 *_first_desc = NULL; 1432 1433 /* pointers to shadow descriptors */ 1434 struct vring_tx_desc desc_mem, hdr_desc_mem, first_desc_mem, 1435 *d = &hdr_desc_mem, *hdr_desc = &hdr_desc_mem, 1436 *first_desc = &first_desc_mem; 1437 1438 /* pointer to shadow descriptors' context */ 1439 struct wil_ctx *hdr_ctx, *first_ctx = NULL; 1440 1441 int descs_used = 0; /* total number of used descriptors */ 1442 int sg_desc_cnt = 0; /* number of descriptors for current mss*/ 1443 1444 u32 swhead = vring->swhead; 1445 int used, avail = wil_ring_avail_tx(vring); 1446 int nr_frags = skb_shinfo(skb)->nr_frags; 1447 int min_desc_required = nr_frags + 1; 1448 int mss = skb_shinfo(skb)->gso_size; /* payload size w/o headers */ 1449 int f, len, hdrlen, headlen; 1450 int vring_index = vring - wil->ring_tx; 1451 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[vring_index]; 1452 uint i = swhead; 1453 dma_addr_t pa; 1454 const skb_frag_t *frag = NULL; 1455 int rem_data = mss; 1456 int lenmss; 1457 int hdr_compensation_need = true; 1458 int desc_tso_type = wil_tso_type_first; 1459 bool is_ipv4; 1460 int tcp_hdr_len; 1461 int skb_net_hdr_len; 1462 int gso_type; 1463 int rc = -EINVAL; 1464 1465 wil_dbg_txrx(wil, "tx_vring_tso: %d bytes to vring %d\n", skb->len, 1466 vring_index); 1467 1468 if (unlikely(!txdata->enabled)) 1469 return -EINVAL; 1470 1471 /* A typical page 4K is 3-4 payloads, we assume each fragment 1472 * is a full payload, that's how min_desc_required has been 1473 * calculated. In real we might need more or less descriptors, 1474 * this is the initial check only. 1475 */ 1476 if (unlikely(avail < min_desc_required)) { 1477 wil_err_ratelimited(wil, 1478 "TSO: Tx ring[%2d] full. No space for %d fragments\n", 1479 vring_index, min_desc_required); 1480 return -ENOMEM; 1481 } 1482 1483 /* Header Length = MAC header len + IP header len + TCP header len*/ 1484 hdrlen = ETH_HLEN + 1485 (int)skb_network_header_len(skb) + 1486 tcp_hdrlen(skb); 1487 1488 gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4); 1489 switch (gso_type) { 1490 case SKB_GSO_TCPV4: 1491 /* TCP v4, zero out the IP length and IPv4 checksum fields 1492 * as required by the offloading doc 1493 */ 1494 ip_hdr(skb)->tot_len = 0; 1495 ip_hdr(skb)->check = 0; 1496 is_ipv4 = true; 1497 break; 1498 case SKB_GSO_TCPV6: 1499 /* TCP v6, zero out the payload length */ 1500 ipv6_hdr(skb)->payload_len = 0; 1501 is_ipv4 = false; 1502 break; 1503 default: 1504 /* other than TCPv4 or TCPv6 types are not supported for TSO. 1505 * It is also illegal for both to be set simultaneously 1506 */ 1507 return -EINVAL; 1508 } 1509 1510 if (skb->ip_summed != CHECKSUM_PARTIAL) 1511 return -EINVAL; 1512 1513 /* tcp header length and skb network header length are fixed for all 1514 * packet's descriptors - read then once here 1515 */ 1516 tcp_hdr_len = tcp_hdrlen(skb); 1517 skb_net_hdr_len = skb_network_header_len(skb); 1518 1519 _hdr_desc = &vring->va[i].tx.legacy; 1520 1521 pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE); 1522 if (unlikely(dma_mapping_error(dev, pa))) { 1523 wil_err(wil, "TSO: Skb head DMA map error\n"); 1524 goto err_exit; 1525 } 1526 1527 wil_tx_desc_map(hdr_desc, pa, hdrlen, vring_index); 1528 wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4, 1529 tcp_hdr_len, skb_net_hdr_len); 1530 wil_tx_last_desc(hdr_desc); 1531 1532 vring->ctx[i].mapped_as = wil_mapped_as_single; 1533 hdr_ctx = &vring->ctx[i]; 1534 1535 descs_used++; 1536 headlen = skb_headlen(skb) - hdrlen; 1537 1538 for (f = headlen ? -1 : 0; f < nr_frags; f++) { 1539 if (headlen) { 1540 len = headlen; 1541 wil_dbg_txrx(wil, "TSO: process skb head, len %u\n", 1542 len); 1543 } else { 1544 frag = &skb_shinfo(skb)->frags[f]; 1545 len = frag->size; 1546 wil_dbg_txrx(wil, "TSO: frag[%d]: len %u\n", f, len); 1547 } 1548 1549 while (len) { 1550 wil_dbg_txrx(wil, 1551 "TSO: len %d, rem_data %d, descs_used %d\n", 1552 len, rem_data, descs_used); 1553 1554 if (descs_used == avail) { 1555 wil_err_ratelimited(wil, "TSO: ring overflow\n"); 1556 rc = -ENOMEM; 1557 goto mem_error; 1558 } 1559 1560 lenmss = min_t(int, rem_data, len); 1561 i = (swhead + descs_used) % vring->size; 1562 wil_dbg_txrx(wil, "TSO: lenmss %d, i %d\n", lenmss, i); 1563 1564 if (!headlen) { 1565 pa = skb_frag_dma_map(dev, frag, 1566 frag->size - len, lenmss, 1567 DMA_TO_DEVICE); 1568 vring->ctx[i].mapped_as = wil_mapped_as_page; 1569 } else { 1570 pa = dma_map_single(dev, 1571 skb->data + 1572 skb_headlen(skb) - headlen, 1573 lenmss, 1574 DMA_TO_DEVICE); 1575 vring->ctx[i].mapped_as = wil_mapped_as_single; 1576 headlen -= lenmss; 1577 } 1578 1579 if (unlikely(dma_mapping_error(dev, pa))) { 1580 wil_err(wil, "TSO: DMA map page error\n"); 1581 goto mem_error; 1582 } 1583 1584 _desc = &vring->va[i].tx.legacy; 1585 1586 if (!_first_desc) { 1587 _first_desc = _desc; 1588 first_ctx = &vring->ctx[i]; 1589 d = first_desc; 1590 } else { 1591 d = &desc_mem; 1592 } 1593 1594 wil_tx_desc_map(d, pa, lenmss, vring_index); 1595 wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type, 1596 is_ipv4, tcp_hdr_len, 1597 skb_net_hdr_len); 1598 1599 /* use tso_type_first only once */ 1600 desc_tso_type = wil_tso_type_mid; 1601 1602 descs_used++; /* desc used so far */ 1603 sg_desc_cnt++; /* desc used for this segment */ 1604 len -= lenmss; 1605 rem_data -= lenmss; 1606 1607 wil_dbg_txrx(wil, 1608 "TSO: len %d, rem_data %d, descs_used %d, sg_desc_cnt %d,\n", 1609 len, rem_data, descs_used, sg_desc_cnt); 1610 1611 /* Close the segment if reached mss size or last frag*/ 1612 if (rem_data == 0 || (f == nr_frags - 1 && len == 0)) { 1613 if (hdr_compensation_need) { 1614 /* first segment include hdr desc for 1615 * release 1616 */ 1617 hdr_ctx->nr_frags = sg_desc_cnt; 1618 wil_tx_desc_set_nr_frags(first_desc, 1619 sg_desc_cnt + 1620 1); 1621 hdr_compensation_need = false; 1622 } else { 1623 wil_tx_desc_set_nr_frags(first_desc, 1624 sg_desc_cnt); 1625 } 1626 first_ctx->nr_frags = sg_desc_cnt - 1; 1627 1628 wil_tx_last_desc(d); 1629 1630 /* first descriptor may also be the last 1631 * for this mss - make sure not to copy 1632 * it twice 1633 */ 1634 if (first_desc != d) 1635 *_first_desc = *first_desc; 1636 1637 /*last descriptor will be copied at the end 1638 * of this TS processing 1639 */ 1640 if (f < nr_frags - 1 || len > 0) 1641 *_desc = *d; 1642 1643 rem_data = mss; 1644 _first_desc = NULL; 1645 sg_desc_cnt = 0; 1646 } else if (first_desc != d) /* update mid descriptor */ 1647 *_desc = *d; 1648 } 1649 } 1650 1651 /* first descriptor may also be the last. 1652 * in this case d pointer is invalid 1653 */ 1654 if (_first_desc == _desc) 1655 d = first_desc; 1656 1657 /* Last data descriptor */ 1658 wil_set_tx_desc_last_tso(d); 1659 *_desc = *d; 1660 1661 /* Fill the total number of descriptors in first desc (hdr)*/ 1662 wil_tx_desc_set_nr_frags(hdr_desc, descs_used); 1663 *_hdr_desc = *hdr_desc; 1664 1665 /* hold reference to skb 1666 * to prevent skb release before accounting 1667 * in case of immediate "tx done" 1668 */ 1669 vring->ctx[i].skb = skb_get(skb); 1670 1671 /* performance monitoring */ 1672 used = wil_ring_used_tx(vring); 1673 if (wil_val_in_range(wil->ring_idle_trsh, 1674 used, used + descs_used)) { 1675 txdata->idle += get_cycles() - txdata->last_idle; 1676 wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n", 1677 vring_index, used, used + descs_used); 1678 } 1679 1680 /* Make sure to advance the head only after descriptor update is done. 1681 * This will prevent a race condition where the completion thread 1682 * will see the DU bit set from previous run and will handle the 1683 * skb before it was completed. 1684 */ 1685 wmb(); 1686 1687 /* advance swhead */ 1688 wil_ring_advance_head(vring, descs_used); 1689 wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead); 1690 1691 /* make sure all writes to descriptors (shared memory) are done before 1692 * committing them to HW 1693 */ 1694 wmb(); 1695 1696 wil_w(wil, vring->hwtail, vring->swhead); 1697 return 0; 1698 1699 mem_error: 1700 while (descs_used > 0) { 1701 struct wil_ctx *ctx; 1702 1703 i = (swhead + descs_used - 1) % vring->size; 1704 d = (struct vring_tx_desc *)&vring->va[i].tx.legacy; 1705 _desc = &vring->va[i].tx.legacy; 1706 *d = *_desc; 1707 _desc->dma.status = TX_DMA_STATUS_DU; 1708 ctx = &vring->ctx[i]; 1709 wil_txdesc_unmap(dev, d, ctx); 1710 memset(ctx, 0, sizeof(*ctx)); 1711 descs_used--; 1712 } 1713 err_exit: 1714 return rc; 1715 } 1716 1717 static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif, 1718 struct wil_ring *vring, struct sk_buff *skb) 1719 { 1720 struct device *dev = wil_to_dev(wil); 1721 struct vring_tx_desc dd, *d = ⅆ 1722 volatile struct vring_tx_desc *_d; 1723 u32 swhead = vring->swhead; 1724 int avail = wil_ring_avail_tx(vring); 1725 int nr_frags = skb_shinfo(skb)->nr_frags; 1726 uint f = 0; 1727 int vring_index = vring - wil->ring_tx; 1728 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[vring_index]; 1729 uint i = swhead; 1730 dma_addr_t pa; 1731 int used; 1732 bool mcast = (vring_index == vif->bcast_ring); 1733 uint len = skb_headlen(skb); 1734 1735 wil_dbg_txrx(wil, "tx_ring: %d bytes to ring %d, nr_frags %d\n", 1736 skb->len, vring_index, nr_frags); 1737 1738 if (unlikely(!txdata->enabled)) 1739 return -EINVAL; 1740 1741 if (unlikely(avail < 1 + nr_frags)) { 1742 wil_err_ratelimited(wil, 1743 "Tx ring[%2d] full. No space for %d fragments\n", 1744 vring_index, 1 + nr_frags); 1745 return -ENOMEM; 1746 } 1747 _d = &vring->va[i].tx.legacy; 1748 1749 pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); 1750 1751 wil_dbg_txrx(wil, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", vring_index, 1752 skb_headlen(skb), skb->data, &pa); 1753 wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1, 1754 skb->data, skb_headlen(skb), false); 1755 1756 if (unlikely(dma_mapping_error(dev, pa))) 1757 return -EINVAL; 1758 vring->ctx[i].mapped_as = wil_mapped_as_single; 1759 /* 1-st segment */ 1760 wil_tx_desc_map(d, pa, len, vring_index); 1761 if (unlikely(mcast)) { 1762 d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */ 1763 if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) /* set MCS 1 */ 1764 d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS); 1765 } 1766 /* Process TCP/UDP checksum offloading */ 1767 if (unlikely(wil_tx_desc_offload_setup(d, skb))) { 1768 wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n", 1769 vring_index); 1770 goto dma_error; 1771 } 1772 1773 vring->ctx[i].nr_frags = nr_frags; 1774 wil_tx_desc_set_nr_frags(d, nr_frags + 1); 1775 1776 /* middle segments */ 1777 for (; f < nr_frags; f++) { 1778 const struct skb_frag_struct *frag = 1779 &skb_shinfo(skb)->frags[f]; 1780 int len = skb_frag_size(frag); 1781 1782 *_d = *d; 1783 wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i); 1784 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4, 1785 (const void *)d, sizeof(*d), false); 1786 i = (swhead + f + 1) % vring->size; 1787 _d = &vring->va[i].tx.legacy; 1788 pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag), 1789 DMA_TO_DEVICE); 1790 if (unlikely(dma_mapping_error(dev, pa))) { 1791 wil_err(wil, "Tx[%2d] failed to map fragment\n", 1792 vring_index); 1793 goto dma_error; 1794 } 1795 vring->ctx[i].mapped_as = wil_mapped_as_page; 1796 wil_tx_desc_map(d, pa, len, vring_index); 1797 /* no need to check return code - 1798 * if it succeeded for 1-st descriptor, 1799 * it will succeed here too 1800 */ 1801 wil_tx_desc_offload_setup(d, skb); 1802 } 1803 /* for the last seg only */ 1804 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS); 1805 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS); 1806 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS); 1807 *_d = *d; 1808 wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i); 1809 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4, 1810 (const void *)d, sizeof(*d), false); 1811 1812 /* hold reference to skb 1813 * to prevent skb release before accounting 1814 * in case of immediate "tx done" 1815 */ 1816 vring->ctx[i].skb = skb_get(skb); 1817 1818 /* performance monitoring */ 1819 used = wil_ring_used_tx(vring); 1820 if (wil_val_in_range(wil->ring_idle_trsh, 1821 used, used + nr_frags + 1)) { 1822 txdata->idle += get_cycles() - txdata->last_idle; 1823 wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n", 1824 vring_index, used, used + nr_frags + 1); 1825 } 1826 1827 /* Make sure to advance the head only after descriptor update is done. 1828 * This will prevent a race condition where the completion thread 1829 * will see the DU bit set from previous run and will handle the 1830 * skb before it was completed. 1831 */ 1832 wmb(); 1833 1834 /* advance swhead */ 1835 wil_ring_advance_head(vring, nr_frags + 1); 1836 wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead, 1837 vring->swhead); 1838 trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags); 1839 1840 /* make sure all writes to descriptors (shared memory) are done before 1841 * committing them to HW 1842 */ 1843 wmb(); 1844 1845 wil_w(wil, vring->hwtail, vring->swhead); 1846 1847 return 0; 1848 dma_error: 1849 /* unmap what we have mapped */ 1850 nr_frags = f + 1; /* frags mapped + one for skb head */ 1851 for (f = 0; f < nr_frags; f++) { 1852 struct wil_ctx *ctx; 1853 1854 i = (swhead + f) % vring->size; 1855 ctx = &vring->ctx[i]; 1856 _d = &vring->va[i].tx.legacy; 1857 *d = *_d; 1858 _d->dma.status = TX_DMA_STATUS_DU; 1859 wil_txdesc_unmap(dev, d, ctx); 1860 1861 memset(ctx, 0, sizeof(*ctx)); 1862 } 1863 1864 return -EINVAL; 1865 } 1866 1867 static int wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif, 1868 struct wil_ring *vring, struct sk_buff *skb) 1869 { 1870 int ring_index = vring - wil->ring_tx; 1871 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index]; 1872 int rc; 1873 1874 spin_lock(&txdata->lock); 1875 1876 if (test_bit(wil_status_suspending, wil->status) || 1877 test_bit(wil_status_suspended, wil->status) || 1878 test_bit(wil_status_resuming, wil->status)) { 1879 wil_dbg_txrx(wil, 1880 "suspend/resume in progress. drop packet\n"); 1881 spin_unlock(&txdata->lock); 1882 return -EINVAL; 1883 } 1884 1885 rc = (skb_is_gso(skb) ? __wil_tx_vring_tso : __wil_tx_vring) 1886 (wil, vif, vring, skb); 1887 1888 spin_unlock(&txdata->lock); 1889 1890 return rc; 1891 } 1892 1893 /** 1894 * Check status of tx vrings and stop/wake net queues if needed 1895 * It will start/stop net queues of a specific VIF net_device. 1896 * 1897 * This function does one of two checks: 1898 * In case check_stop is true, will check if net queues need to be stopped. If 1899 * the conditions for stopping are met, netif_tx_stop_all_queues() is called. 1900 * In case check_stop is false, will check if net queues need to be waked. If 1901 * the conditions for waking are met, netif_tx_wake_all_queues() is called. 1902 * vring is the vring which is currently being modified by either adding 1903 * descriptors (tx) into it or removing descriptors (tx complete) from it. Can 1904 * be null when irrelevant (e.g. connect/disconnect events). 1905 * 1906 * The implementation is to stop net queues if modified vring has low 1907 * descriptor availability. Wake if all vrings are not in low descriptor 1908 * availability and modified vring has high descriptor availability. 1909 */ 1910 static inline void __wil_update_net_queues(struct wil6210_priv *wil, 1911 struct wil6210_vif *vif, 1912 struct wil_ring *ring, 1913 bool check_stop) 1914 { 1915 int i; 1916 1917 if (unlikely(!vif)) 1918 return; 1919 1920 if (ring) 1921 wil_dbg_txrx(wil, "vring %d, mid %d, check_stop=%d, stopped=%d", 1922 (int)(ring - wil->ring_tx), vif->mid, check_stop, 1923 vif->net_queue_stopped); 1924 else 1925 wil_dbg_txrx(wil, "check_stop=%d, mid=%d, stopped=%d", 1926 check_stop, vif->mid, vif->net_queue_stopped); 1927 1928 if (check_stop == vif->net_queue_stopped) 1929 /* net queues already in desired state */ 1930 return; 1931 1932 if (check_stop) { 1933 if (!ring || unlikely(wil_ring_avail_low(ring))) { 1934 /* not enough room in the vring */ 1935 netif_tx_stop_all_queues(vif_to_ndev(vif)); 1936 vif->net_queue_stopped = true; 1937 wil_dbg_txrx(wil, "netif_tx_stop called\n"); 1938 } 1939 return; 1940 } 1941 1942 /* Do not wake the queues in suspend flow */ 1943 if (test_bit(wil_status_suspending, wil->status) || 1944 test_bit(wil_status_suspended, wil->status)) 1945 return; 1946 1947 /* check wake */ 1948 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { 1949 struct wil_ring *cur_ring = &wil->ring_tx[i]; 1950 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i]; 1951 1952 if (txdata->mid != vif->mid || !cur_ring->va || 1953 !txdata->enabled || cur_ring == ring) 1954 continue; 1955 1956 if (wil_ring_avail_low(cur_ring)) { 1957 wil_dbg_txrx(wil, "ring %d full, can't wake\n", 1958 (int)(cur_ring - wil->ring_tx)); 1959 return; 1960 } 1961 } 1962 1963 if (!ring || wil_ring_avail_high(ring)) { 1964 /* enough room in the ring */ 1965 wil_dbg_txrx(wil, "calling netif_tx_wake\n"); 1966 netif_tx_wake_all_queues(vif_to_ndev(vif)); 1967 vif->net_queue_stopped = false; 1968 } 1969 } 1970 1971 void wil_update_net_queues(struct wil6210_priv *wil, struct wil6210_vif *vif, 1972 struct wil_ring *ring, bool check_stop) 1973 { 1974 spin_lock(&wil->net_queue_lock); 1975 __wil_update_net_queues(wil, vif, ring, check_stop); 1976 spin_unlock(&wil->net_queue_lock); 1977 } 1978 1979 void wil_update_net_queues_bh(struct wil6210_priv *wil, struct wil6210_vif *vif, 1980 struct wil_ring *ring, bool check_stop) 1981 { 1982 spin_lock_bh(&wil->net_queue_lock); 1983 __wil_update_net_queues(wil, vif, ring, check_stop); 1984 spin_unlock_bh(&wil->net_queue_lock); 1985 } 1986 1987 netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) 1988 { 1989 struct wil6210_vif *vif = ndev_to_vif(ndev); 1990 struct wil6210_priv *wil = vif_to_wil(vif); 1991 struct ethhdr *eth = (void *)skb->data; 1992 bool bcast = is_multicast_ether_addr(eth->h_dest); 1993 struct wil_ring *vring; 1994 static bool pr_once_fw; 1995 int rc; 1996 1997 wil_dbg_txrx(wil, "start_xmit\n"); 1998 if (unlikely(!test_bit(wil_status_fwready, wil->status))) { 1999 if (!pr_once_fw) { 2000 wil_err(wil, "FW not ready\n"); 2001 pr_once_fw = true; 2002 } 2003 goto drop; 2004 } 2005 if (unlikely(!test_bit(wil_vif_fwconnected, vif->status))) { 2006 wil_dbg_ratelimited(wil, 2007 "VIF not connected, packet dropped\n"); 2008 goto drop; 2009 } 2010 if (unlikely(vif->wdev.iftype == NL80211_IFTYPE_MONITOR)) { 2011 wil_err(wil, "Xmit in monitor mode not supported\n"); 2012 goto drop; 2013 } 2014 pr_once_fw = false; 2015 2016 /* find vring */ 2017 if (vif->wdev.iftype == NL80211_IFTYPE_STATION && !vif->pbss) { 2018 /* in STA mode (ESS), all to same VRING (to AP) */ 2019 vring = wil_find_tx_ring_sta(wil, vif, skb); 2020 } else if (bcast) { 2021 if (vif->pbss) 2022 /* in pbss, no bcast VRING - duplicate skb in 2023 * all stations VRINGs 2024 */ 2025 vring = wil_find_tx_bcast_2(wil, vif, skb); 2026 else if (vif->wdev.iftype == NL80211_IFTYPE_AP) 2027 /* AP has a dedicated bcast VRING */ 2028 vring = wil_find_tx_bcast_1(wil, vif, skb); 2029 else 2030 /* unexpected combination, fallback to duplicating 2031 * the skb in all stations VRINGs 2032 */ 2033 vring = wil_find_tx_bcast_2(wil, vif, skb); 2034 } else { 2035 /* unicast, find specific VRING by dest. address */ 2036 vring = wil_find_tx_ucast(wil, vif, skb); 2037 } 2038 if (unlikely(!vring)) { 2039 wil_dbg_txrx(wil, "No Tx RING found for %pM\n", eth->h_dest); 2040 goto drop; 2041 } 2042 /* set up vring entry */ 2043 rc = wil_tx_vring(wil, vif, vring, skb); 2044 2045 switch (rc) { 2046 case 0: 2047 /* shall we stop net queues? */ 2048 wil_update_net_queues_bh(wil, vif, vring, true); 2049 /* statistics will be updated on the tx_complete */ 2050 dev_kfree_skb_any(skb); 2051 return NETDEV_TX_OK; 2052 case -ENOMEM: 2053 return NETDEV_TX_BUSY; 2054 default: 2055 break; /* goto drop; */ 2056 } 2057 drop: 2058 ndev->stats.tx_dropped++; 2059 dev_kfree_skb_any(skb); 2060 2061 return NET_XMIT_DROP; 2062 } 2063 2064 static inline bool wil_need_txstat(struct sk_buff *skb) 2065 { 2066 struct ethhdr *eth = (void *)skb->data; 2067 2068 return is_unicast_ether_addr(eth->h_dest) && skb->sk && 2069 (skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS); 2070 } 2071 2072 static inline void wil_consume_skb(struct sk_buff *skb, bool acked) 2073 { 2074 if (unlikely(wil_need_txstat(skb))) 2075 skb_complete_wifi_ack(skb, acked); 2076 else 2077 acked ? dev_consume_skb_any(skb) : dev_kfree_skb_any(skb); 2078 } 2079 2080 /** 2081 * Clean up transmitted skb's from the Tx VRING 2082 * 2083 * Return number of descriptors cleared 2084 * 2085 * Safe to call from IRQ 2086 */ 2087 int wil_tx_complete(struct wil6210_vif *vif, int ringid) 2088 { 2089 struct wil6210_priv *wil = vif_to_wil(vif); 2090 struct net_device *ndev = vif_to_ndev(vif); 2091 struct device *dev = wil_to_dev(wil); 2092 struct wil_ring *vring = &wil->ring_tx[ringid]; 2093 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ringid]; 2094 int done = 0; 2095 int cid = wil->ring2cid_tid[ringid][0]; 2096 struct wil_net_stats *stats = NULL; 2097 volatile struct vring_tx_desc *_d; 2098 int used_before_complete; 2099 int used_new; 2100 2101 if (unlikely(!vring->va)) { 2102 wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid); 2103 return 0; 2104 } 2105 2106 if (unlikely(!txdata->enabled)) { 2107 wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid); 2108 return 0; 2109 } 2110 2111 wil_dbg_txrx(wil, "tx_complete: (%d)\n", ringid); 2112 2113 used_before_complete = wil_ring_used_tx(vring); 2114 2115 if (cid < WIL6210_MAX_CID) 2116 stats = &wil->sta[cid].stats; 2117 2118 while (!wil_ring_is_empty(vring)) { 2119 int new_swtail; 2120 struct wil_ctx *ctx = &vring->ctx[vring->swtail]; 2121 /** 2122 * For the fragmented skb, HW will set DU bit only for the 2123 * last fragment. look for it. 2124 * In TSO the first DU will include hdr desc 2125 */ 2126 int lf = (vring->swtail + ctx->nr_frags) % vring->size; 2127 /* TODO: check we are not past head */ 2128 2129 _d = &vring->va[lf].tx.legacy; 2130 if (unlikely(!(_d->dma.status & TX_DMA_STATUS_DU))) 2131 break; 2132 2133 new_swtail = (lf + 1) % vring->size; 2134 while (vring->swtail != new_swtail) { 2135 struct vring_tx_desc dd, *d = ⅆ 2136 u16 dmalen; 2137 struct sk_buff *skb; 2138 2139 ctx = &vring->ctx[vring->swtail]; 2140 skb = ctx->skb; 2141 _d = &vring->va[vring->swtail].tx.legacy; 2142 2143 *d = *_d; 2144 2145 dmalen = le16_to_cpu(d->dma.length); 2146 trace_wil6210_tx_done(ringid, vring->swtail, dmalen, 2147 d->dma.error); 2148 wil_dbg_txrx(wil, 2149 "TxC[%2d][%3d] : %d bytes, status 0x%02x err 0x%02x\n", 2150 ringid, vring->swtail, dmalen, 2151 d->dma.status, d->dma.error); 2152 wil_hex_dump_txrx("TxCD ", DUMP_PREFIX_NONE, 32, 4, 2153 (const void *)d, sizeof(*d), false); 2154 2155 wil_txdesc_unmap(dev, d, ctx); 2156 2157 if (skb) { 2158 if (likely(d->dma.error == 0)) { 2159 ndev->stats.tx_packets++; 2160 ndev->stats.tx_bytes += skb->len; 2161 if (stats) { 2162 stats->tx_packets++; 2163 stats->tx_bytes += skb->len; 2164 } 2165 } else { 2166 ndev->stats.tx_errors++; 2167 if (stats) 2168 stats->tx_errors++; 2169 } 2170 wil_consume_skb(skb, d->dma.error == 0); 2171 } 2172 memset(ctx, 0, sizeof(*ctx)); 2173 /* Make sure the ctx is zeroed before updating the tail 2174 * to prevent a case where wil_tx_ring will see 2175 * this descriptor as used and handle it before ctx zero 2176 * is completed. 2177 */ 2178 wmb(); 2179 /* There is no need to touch HW descriptor: 2180 * - ststus bit TX_DMA_STATUS_DU is set by design, 2181 * so hardware will not try to process this desc., 2182 * - rest of descriptor will be initialized on Tx. 2183 */ 2184 vring->swtail = wil_ring_next_tail(vring); 2185 done++; 2186 } 2187 } 2188 2189 /* performance monitoring */ 2190 used_new = wil_ring_used_tx(vring); 2191 if (wil_val_in_range(wil->ring_idle_trsh, 2192 used_new, used_before_complete)) { 2193 wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n", 2194 ringid, used_before_complete, used_new); 2195 txdata->last_idle = get_cycles(); 2196 } 2197 2198 /* shall we wake net queues? */ 2199 if (done) 2200 wil_update_net_queues(wil, vif, vring, false); 2201 2202 return done; 2203 } 2204