1 /* 2 * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. 3 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <linux/etherdevice.h> 19 #include <net/ieee80211_radiotap.h> 20 #include <linux/if_arp.h> 21 #include <linux/moduleparam.h> 22 #include <linux/ip.h> 23 #include <linux/ipv6.h> 24 #include <net/ipv6.h> 25 #include <linux/prefetch.h> 26 27 #include "wil6210.h" 28 #include "wmi.h" 29 #include "txrx.h" 30 #include "trace.h" 31 #include "txrx_edma.h" 32 33 bool rx_align_2; 34 module_param(rx_align_2, bool, 0444); 35 MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no"); 36 37 bool rx_large_buf; 38 module_param(rx_large_buf, bool, 0444); 39 MODULE_PARM_DESC(rx_large_buf, " allocate 8KB RX buffers, default - no"); 40 41 /* Drop Tx packets in case Tx ring is full */ 42 bool drop_if_ring_full; 43 44 static inline uint wil_rx_snaplen(void) 45 { 46 return rx_align_2 ? 6 : 0; 47 } 48 49 /* wil_ring_wmark_low - low watermark for available descriptor space */ 50 static inline int wil_ring_wmark_low(struct wil_ring *ring) 51 { 52 return ring->size / 8; 53 } 54 55 /* wil_ring_wmark_high - high watermark for available descriptor space */ 56 static inline int wil_ring_wmark_high(struct wil_ring *ring) 57 { 58 return ring->size / 4; 59 } 60 61 /* returns true if num avail descriptors is lower than wmark_low */ 62 static inline int wil_ring_avail_low(struct wil_ring *ring) 63 { 64 return wil_ring_avail_tx(ring) < wil_ring_wmark_low(ring); 65 } 66 67 /* returns true if num avail descriptors is higher than wmark_high */ 68 static inline int wil_ring_avail_high(struct wil_ring *ring) 69 { 70 return wil_ring_avail_tx(ring) > wil_ring_wmark_high(ring); 71 } 72 73 /* returns true when all tx vrings are empty */ 74 bool wil_is_tx_idle(struct wil6210_priv *wil) 75 { 76 int i; 77 unsigned long data_comp_to; 78 int min_ring_id = wil_get_min_tx_ring_id(wil); 79 80 for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) { 81 struct wil_ring *vring = &wil->ring_tx[i]; 82 int vring_index = vring - wil->ring_tx; 83 struct wil_ring_tx_data *txdata = 84 &wil->ring_tx_data[vring_index]; 85 86 spin_lock(&txdata->lock); 87 88 if (!vring->va || !txdata->enabled) { 89 spin_unlock(&txdata->lock); 90 continue; 91 } 92 93 data_comp_to = jiffies + msecs_to_jiffies( 94 WIL_DATA_COMPLETION_TO_MS); 95 if (test_bit(wil_status_napi_en, wil->status)) { 96 while (!wil_ring_is_empty(vring)) { 97 if (time_after(jiffies, data_comp_to)) { 98 wil_dbg_pm(wil, 99 "TO waiting for idle tx\n"); 100 spin_unlock(&txdata->lock); 101 return false; 102 } 103 wil_dbg_ratelimited(wil, 104 "tx vring is not empty -> NAPI\n"); 105 spin_unlock(&txdata->lock); 106 napi_synchronize(&wil->napi_tx); 107 msleep(20); 108 spin_lock(&txdata->lock); 109 if (!vring->va || !txdata->enabled) 110 break; 111 } 112 } 113 114 spin_unlock(&txdata->lock); 115 } 116 117 return true; 118 } 119 120 static int wil_vring_alloc(struct wil6210_priv *wil, struct wil_ring *vring) 121 { 122 struct device *dev = wil_to_dev(wil); 123 size_t sz = vring->size * sizeof(vring->va[0]); 124 uint i; 125 126 wil_dbg_misc(wil, "vring_alloc:\n"); 127 128 BUILD_BUG_ON(sizeof(vring->va[0]) != 32); 129 130 vring->swhead = 0; 131 vring->swtail = 0; 132 vring->ctx = kcalloc(vring->size, sizeof(vring->ctx[0]), GFP_KERNEL); 133 if (!vring->ctx) { 134 vring->va = NULL; 135 return -ENOMEM; 136 } 137 138 /* vring->va should be aligned on its size rounded up to power of 2 139 * This is granted by the dma_alloc_coherent. 140 * 141 * HW has limitation that all vrings addresses must share the same 142 * upper 16 msb bits part of 48 bits address. To workaround that, 143 * if we are using more than 32 bit addresses switch to 32 bit 144 * allocation before allocating vring memory. 145 * 146 * There's no check for the return value of dma_set_mask_and_coherent, 147 * since we assume if we were able to set the mask during 148 * initialization in this system it will not fail if we set it again 149 */ 150 if (wil->dma_addr_size > 32) 151 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 152 153 vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL); 154 if (!vring->va) { 155 kfree(vring->ctx); 156 vring->ctx = NULL; 157 return -ENOMEM; 158 } 159 160 if (wil->dma_addr_size > 32) 161 dma_set_mask_and_coherent(dev, 162 DMA_BIT_MASK(wil->dma_addr_size)); 163 164 /* initially, all descriptors are SW owned 165 * For Tx and Rx, ownership bit is at the same location, thus 166 * we can use any 167 */ 168 for (i = 0; i < vring->size; i++) { 169 volatile struct vring_tx_desc *_d = 170 &vring->va[i].tx.legacy; 171 172 _d->dma.status = TX_DMA_STATUS_DU; 173 } 174 175 wil_dbg_misc(wil, "vring[%d] 0x%p:%pad 0x%p\n", vring->size, 176 vring->va, &vring->pa, vring->ctx); 177 178 return 0; 179 } 180 181 static void wil_txdesc_unmap(struct device *dev, union wil_tx_desc *desc, 182 struct wil_ctx *ctx) 183 { 184 struct vring_tx_desc *d = &desc->legacy; 185 dma_addr_t pa = wil_desc_addr(&d->dma.addr); 186 u16 dmalen = le16_to_cpu(d->dma.length); 187 188 switch (ctx->mapped_as) { 189 case wil_mapped_as_single: 190 dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE); 191 break; 192 case wil_mapped_as_page: 193 dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE); 194 break; 195 default: 196 break; 197 } 198 } 199 200 static void wil_vring_free(struct wil6210_priv *wil, struct wil_ring *vring) 201 { 202 struct device *dev = wil_to_dev(wil); 203 size_t sz = vring->size * sizeof(vring->va[0]); 204 205 lockdep_assert_held(&wil->mutex); 206 if (!vring->is_rx) { 207 int vring_index = vring - wil->ring_tx; 208 209 wil_dbg_misc(wil, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n", 210 vring_index, vring->size, vring->va, 211 &vring->pa, vring->ctx); 212 } else { 213 wil_dbg_misc(wil, "free Rx vring [%d] 0x%p:%pad 0x%p\n", 214 vring->size, vring->va, 215 &vring->pa, vring->ctx); 216 } 217 218 while (!wil_ring_is_empty(vring)) { 219 dma_addr_t pa; 220 u16 dmalen; 221 struct wil_ctx *ctx; 222 223 if (!vring->is_rx) { 224 struct vring_tx_desc dd, *d = ⅆ 225 volatile struct vring_tx_desc *_d = 226 &vring->va[vring->swtail].tx.legacy; 227 228 ctx = &vring->ctx[vring->swtail]; 229 if (!ctx) { 230 wil_dbg_txrx(wil, 231 "ctx(%d) was already completed\n", 232 vring->swtail); 233 vring->swtail = wil_ring_next_tail(vring); 234 continue; 235 } 236 *d = *_d; 237 wil_txdesc_unmap(dev, (union wil_tx_desc *)d, ctx); 238 if (ctx->skb) 239 dev_kfree_skb_any(ctx->skb); 240 vring->swtail = wil_ring_next_tail(vring); 241 } else { /* rx */ 242 struct vring_rx_desc dd, *d = ⅆ 243 volatile struct vring_rx_desc *_d = 244 &vring->va[vring->swhead].rx.legacy; 245 246 ctx = &vring->ctx[vring->swhead]; 247 *d = *_d; 248 pa = wil_desc_addr(&d->dma.addr); 249 dmalen = le16_to_cpu(d->dma.length); 250 dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE); 251 kfree_skb(ctx->skb); 252 wil_ring_advance_head(vring, 1); 253 } 254 } 255 dma_free_coherent(dev, sz, (void *)vring->va, vring->pa); 256 kfree(vring->ctx); 257 vring->pa = 0; 258 vring->va = NULL; 259 vring->ctx = NULL; 260 } 261 262 /** 263 * Allocate one skb for Rx VRING 264 * 265 * Safe to call from IRQ 266 */ 267 static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct wil_ring *vring, 268 u32 i, int headroom) 269 { 270 struct device *dev = wil_to_dev(wil); 271 unsigned int sz = wil->rx_buf_len + ETH_HLEN + wil_rx_snaplen(); 272 struct vring_rx_desc dd, *d = ⅆ 273 volatile struct vring_rx_desc *_d = &vring->va[i].rx.legacy; 274 dma_addr_t pa; 275 struct sk_buff *skb = dev_alloc_skb(sz + headroom); 276 277 if (unlikely(!skb)) 278 return -ENOMEM; 279 280 skb_reserve(skb, headroom); 281 skb_put(skb, sz); 282 283 /** 284 * Make sure that the network stack calculates checksum for packets 285 * which failed the HW checksum calculation 286 */ 287 skb->ip_summed = CHECKSUM_NONE; 288 289 pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE); 290 if (unlikely(dma_mapping_error(dev, pa))) { 291 kfree_skb(skb); 292 return -ENOMEM; 293 } 294 295 d->dma.d0 = RX_DMA_D0_CMD_DMA_RT | RX_DMA_D0_CMD_DMA_IT; 296 wil_desc_addr_set(&d->dma.addr, pa); 297 /* ip_length don't care */ 298 /* b11 don't care */ 299 /* error don't care */ 300 d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */ 301 d->dma.length = cpu_to_le16(sz); 302 *_d = *d; 303 vring->ctx[i].skb = skb; 304 305 return 0; 306 } 307 308 /** 309 * Adds radiotap header 310 * 311 * Any error indicated as "Bad FCS" 312 * 313 * Vendor data for 04:ce:14-1 (Wilocity-1) consists of: 314 * - Rx descriptor: 32 bytes 315 * - Phy info 316 */ 317 static void wil_rx_add_radiotap_header(struct wil6210_priv *wil, 318 struct sk_buff *skb) 319 { 320 struct wil6210_rtap { 321 struct ieee80211_radiotap_header rthdr; 322 /* fields should be in the order of bits in rthdr.it_present */ 323 /* flags */ 324 u8 flags; 325 /* channel */ 326 __le16 chnl_freq __aligned(2); 327 __le16 chnl_flags; 328 /* MCS */ 329 u8 mcs_present; 330 u8 mcs_flags; 331 u8 mcs_index; 332 } __packed; 333 struct vring_rx_desc *d = wil_skb_rxdesc(skb); 334 struct wil6210_rtap *rtap; 335 int rtap_len = sizeof(struct wil6210_rtap); 336 struct ieee80211_channel *ch = wil->monitor_chandef.chan; 337 338 if (skb_headroom(skb) < rtap_len && 339 pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) { 340 wil_err(wil, "Unable to expand headroom to %d\n", rtap_len); 341 return; 342 } 343 344 rtap = skb_push(skb, rtap_len); 345 memset(rtap, 0, rtap_len); 346 347 rtap->rthdr.it_version = PKTHDR_RADIOTAP_VERSION; 348 rtap->rthdr.it_len = cpu_to_le16(rtap_len); 349 rtap->rthdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | 350 (1 << IEEE80211_RADIOTAP_CHANNEL) | 351 (1 << IEEE80211_RADIOTAP_MCS)); 352 if (d->dma.status & RX_DMA_STATUS_ERROR) 353 rtap->flags |= IEEE80211_RADIOTAP_F_BADFCS; 354 355 rtap->chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320); 356 rtap->chnl_flags = cpu_to_le16(0); 357 358 rtap->mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS; 359 rtap->mcs_flags = 0; 360 rtap->mcs_index = wil_rxdesc_mcs(d); 361 } 362 363 static bool wil_is_rx_idle(struct wil6210_priv *wil) 364 { 365 struct vring_rx_desc *_d; 366 struct wil_ring *ring = &wil->ring_rx; 367 368 _d = (struct vring_rx_desc *)&ring->va[ring->swhead].rx.legacy; 369 if (_d->dma.status & RX_DMA_STATUS_DU) 370 return false; 371 372 return true; 373 } 374 375 static int wil_rx_get_cid_by_skb(struct wil6210_priv *wil, struct sk_buff *skb) 376 { 377 struct vring_rx_desc *d = wil_skb_rxdesc(skb); 378 int mid = wil_rxdesc_mid(d); 379 struct wil6210_vif *vif = wil->vifs[mid]; 380 /* cid from DMA descriptor is limited to 3 bits. 381 * In case of cid>=8, the value would be cid modulo 8 and we need to 382 * find real cid by locating the transmitter (ta) inside sta array 383 */ 384 int cid = wil_rxdesc_cid(d); 385 unsigned int snaplen = wil_rx_snaplen(); 386 struct ethhdr *eth; 387 struct ieee80211_hdr_3addr *hdr; 388 int i; 389 unsigned char *ta; 390 u8 ftype; 391 392 /* in monitor mode there are no connections */ 393 if (vif->wdev.iftype == NL80211_IFTYPE_MONITOR) 394 return cid; 395 396 ftype = wil_rxdesc_ftype(d) << 2; 397 if (likely(ftype == IEEE80211_FTYPE_DATA)) { 398 if (unlikely(skb->len < ETH_HLEN + snaplen)) { 399 wil_err_ratelimited(wil, 400 "Short data frame, len = %d\n", 401 skb->len); 402 return -ENOENT; 403 } 404 eth = (void *)skb->data; 405 ta = eth->h_source; 406 } else { 407 if (unlikely(skb->len < sizeof(struct ieee80211_hdr_3addr))) { 408 wil_err_ratelimited(wil, "Short frame, len = %d\n", 409 skb->len); 410 return -ENOENT; 411 } 412 hdr = (void *)skb->data; 413 ta = hdr->addr2; 414 } 415 416 if (max_assoc_sta <= WIL6210_RX_DESC_MAX_CID) 417 return cid; 418 419 /* assuming no concurrency between AP interfaces and STA interfaces. 420 * multista is used only in P2P_GO or AP mode. In other modes return 421 * cid from the rx descriptor 422 */ 423 if (vif->wdev.iftype != NL80211_IFTYPE_P2P_GO && 424 vif->wdev.iftype != NL80211_IFTYPE_AP) 425 return cid; 426 427 /* For Rx packets cid from rx descriptor is limited to 3 bits (0..7), 428 * to find the real cid, compare transmitter address with the stored 429 * stations mac address in the driver sta array 430 */ 431 for (i = cid; i < max_assoc_sta; i += WIL6210_RX_DESC_MAX_CID) { 432 if (wil->sta[i].status != wil_sta_unused && 433 ether_addr_equal(wil->sta[i].addr, ta)) { 434 cid = i; 435 break; 436 } 437 } 438 if (i >= max_assoc_sta) { 439 wil_err_ratelimited(wil, "Could not find cid for frame with transmit addr = %pM, iftype = %d, frametype = %d, len = %d\n", 440 ta, vif->wdev.iftype, ftype, skb->len); 441 cid = -ENOENT; 442 } 443 444 return cid; 445 } 446 447 /** 448 * reap 1 frame from @swhead 449 * 450 * Rx descriptor copied to skb->cb 451 * 452 * Safe to call from IRQ 453 */ 454 static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, 455 struct wil_ring *vring) 456 { 457 struct device *dev = wil_to_dev(wil); 458 struct wil6210_vif *vif; 459 struct net_device *ndev; 460 volatile struct vring_rx_desc *_d; 461 struct vring_rx_desc *d; 462 struct sk_buff *skb; 463 dma_addr_t pa; 464 unsigned int snaplen = wil_rx_snaplen(); 465 unsigned int sz = wil->rx_buf_len + ETH_HLEN + snaplen; 466 u16 dmalen; 467 u8 ftype; 468 int cid, mid; 469 int i; 470 struct wil_net_stats *stats; 471 472 BUILD_BUG_ON(sizeof(struct skb_rx_info) > sizeof(skb->cb)); 473 474 again: 475 if (unlikely(wil_ring_is_empty(vring))) 476 return NULL; 477 478 i = (int)vring->swhead; 479 _d = &vring->va[i].rx.legacy; 480 if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) { 481 /* it is not error, we just reached end of Rx done area */ 482 return NULL; 483 } 484 485 skb = vring->ctx[i].skb; 486 vring->ctx[i].skb = NULL; 487 wil_ring_advance_head(vring, 1); 488 if (!skb) { 489 wil_err(wil, "No Rx skb at [%d]\n", i); 490 goto again; 491 } 492 d = wil_skb_rxdesc(skb); 493 *d = *_d; 494 pa = wil_desc_addr(&d->dma.addr); 495 496 dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE); 497 dmalen = le16_to_cpu(d->dma.length); 498 499 trace_wil6210_rx(i, d); 500 wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", i, dmalen); 501 wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4, 502 (const void *)d, sizeof(*d), false); 503 504 mid = wil_rxdesc_mid(d); 505 vif = wil->vifs[mid]; 506 507 if (unlikely(!vif)) { 508 wil_dbg_txrx(wil, "skipped RX descriptor with invalid mid %d", 509 mid); 510 kfree_skb(skb); 511 goto again; 512 } 513 ndev = vif_to_ndev(vif); 514 if (unlikely(dmalen > sz)) { 515 wil_err_ratelimited(wil, "Rx size too large: %d bytes!\n", 516 dmalen); 517 kfree_skb(skb); 518 goto again; 519 } 520 skb_trim(skb, dmalen); 521 522 prefetch(skb->data); 523 524 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1, 525 skb->data, skb_headlen(skb), false); 526 527 cid = wil_rx_get_cid_by_skb(wil, skb); 528 if (cid == -ENOENT) { 529 kfree_skb(skb); 530 goto again; 531 } 532 wil_skb_set_cid(skb, (u8)cid); 533 stats = &wil->sta[cid].stats; 534 535 stats->last_mcs_rx = wil_rxdesc_mcs(d); 536 if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs)) 537 stats->rx_per_mcs[stats->last_mcs_rx]++; 538 539 /* use radiotap header only if required */ 540 if (ndev->type == ARPHRD_IEEE80211_RADIOTAP) 541 wil_rx_add_radiotap_header(wil, skb); 542 543 /* no extra checks if in sniffer mode */ 544 if (ndev->type != ARPHRD_ETHER) 545 return skb; 546 /* Non-data frames may be delivered through Rx DMA channel (ex: BAR) 547 * Driver should recognize it by frame type, that is found 548 * in Rx descriptor. If type is not data, it is 802.11 frame as is 549 */ 550 ftype = wil_rxdesc_ftype(d) << 2; 551 if (unlikely(ftype != IEEE80211_FTYPE_DATA)) { 552 u8 fc1 = wil_rxdesc_fc1(d); 553 int tid = wil_rxdesc_tid(d); 554 u16 seq = wil_rxdesc_seq(d); 555 556 wil_dbg_txrx(wil, 557 "Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n", 558 fc1, mid, cid, tid, seq); 559 stats->rx_non_data_frame++; 560 if (wil_is_back_req(fc1)) { 561 wil_dbg_txrx(wil, 562 "BAR: MID %d CID %d TID %d Seq 0x%03x\n", 563 mid, cid, tid, seq); 564 wil_rx_bar(wil, vif, cid, tid, seq); 565 } else { 566 /* print again all info. One can enable only this 567 * without overhead for printing every Rx frame 568 */ 569 wil_dbg_txrx(wil, 570 "Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n", 571 fc1, mid, cid, tid, seq); 572 wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4, 573 (const void *)d, sizeof(*d), false); 574 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1, 575 skb->data, skb_headlen(skb), false); 576 } 577 kfree_skb(skb); 578 goto again; 579 } 580 581 /* L4 IDENT is on when HW calculated checksum, check status 582 * and in case of error drop the packet 583 * higher stack layers will handle retransmission (if required) 584 */ 585 if (likely(d->dma.status & RX_DMA_STATUS_L4I)) { 586 /* L4 protocol identified, csum calculated */ 587 if (likely((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0)) 588 skb->ip_summed = CHECKSUM_UNNECESSARY; 589 /* If HW reports bad checksum, let IP stack re-check it 590 * For example, HW don't understand Microsoft IP stack that 591 * mis-calculates TCP checksum - if it should be 0x0, 592 * it writes 0xffff in violation of RFC 1624 593 */ 594 else 595 stats->rx_csum_err++; 596 } 597 598 if (snaplen) { 599 /* Packet layout 600 * +-------+-------+---------+------------+------+ 601 * | SA(6) | DA(6) | SNAP(6) | ETHTYPE(2) | DATA | 602 * +-------+-------+---------+------------+------+ 603 * Need to remove SNAP, shifting SA and DA forward 604 */ 605 memmove(skb->data + snaplen, skb->data, 2 * ETH_ALEN); 606 skb_pull(skb, snaplen); 607 } 608 609 return skb; 610 } 611 612 /** 613 * allocate and fill up to @count buffers in rx ring 614 * buffers posted at @swtail 615 * Note: we have a single RX queue for servicing all VIFs, but we 616 * allocate skbs with headroom according to main interface only. This 617 * means it will not work with monitor interface together with other VIFs. 618 * Currently we only support monitor interface on its own without other VIFs, 619 * and we will need to fix this code once we add support. 620 */ 621 static int wil_rx_refill(struct wil6210_priv *wil, int count) 622 { 623 struct net_device *ndev = wil->main_ndev; 624 struct wil_ring *v = &wil->ring_rx; 625 u32 next_tail; 626 int rc = 0; 627 int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ? 628 WIL6210_RTAP_SIZE : 0; 629 630 for (; next_tail = wil_ring_next_tail(v), 631 (next_tail != v->swhead) && (count-- > 0); 632 v->swtail = next_tail) { 633 rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom); 634 if (unlikely(rc)) { 635 wil_err_ratelimited(wil, "Error %d in rx refill[%d]\n", 636 rc, v->swtail); 637 break; 638 } 639 } 640 641 /* make sure all writes to descriptors (shared memory) are done before 642 * committing them to HW 643 */ 644 wmb(); 645 646 wil_w(wil, v->hwtail, v->swtail); 647 648 return rc; 649 } 650 651 /** 652 * reverse_memcmp - Compare two areas of memory, in reverse order 653 * @cs: One area of memory 654 * @ct: Another area of memory 655 * @count: The size of the area. 656 * 657 * Cut'n'paste from original memcmp (see lib/string.c) 658 * with minimal modifications 659 */ 660 int reverse_memcmp(const void *cs, const void *ct, size_t count) 661 { 662 const unsigned char *su1, *su2; 663 int res = 0; 664 665 for (su1 = cs + count - 1, su2 = ct + count - 1; count > 0; 666 --su1, --su2, count--) { 667 res = *su1 - *su2; 668 if (res) 669 break; 670 } 671 return res; 672 } 673 674 static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb) 675 { 676 struct vring_rx_desc *d = wil_skb_rxdesc(skb); 677 int cid = wil_skb_get_cid(skb); 678 int tid = wil_rxdesc_tid(d); 679 int key_id = wil_rxdesc_key_id(d); 680 int mc = wil_rxdesc_mcast(d); 681 struct wil_sta_info *s = &wil->sta[cid]; 682 struct wil_tid_crypto_rx *c = mc ? &s->group_crypto_rx : 683 &s->tid_crypto_rx[tid]; 684 struct wil_tid_crypto_rx_single *cc = &c->key_id[key_id]; 685 const u8 *pn = (u8 *)&d->mac.pn_15_0; 686 687 if (!cc->key_set) { 688 wil_err_ratelimited(wil, 689 "Key missing. CID %d TID %d MCast %d KEY_ID %d\n", 690 cid, tid, mc, key_id); 691 return -EINVAL; 692 } 693 694 if (reverse_memcmp(pn, cc->pn, IEEE80211_GCMP_PN_LEN) <= 0) { 695 wil_err_ratelimited(wil, 696 "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n", 697 cid, tid, mc, key_id, pn, cc->pn); 698 return -EINVAL; 699 } 700 memcpy(cc->pn, pn, IEEE80211_GCMP_PN_LEN); 701 702 return 0; 703 } 704 705 static int wil_rx_error_check(struct wil6210_priv *wil, struct sk_buff *skb, 706 struct wil_net_stats *stats) 707 { 708 struct vring_rx_desc *d = wil_skb_rxdesc(skb); 709 710 if ((d->dma.status & RX_DMA_STATUS_ERROR) && 711 (d->dma.error & RX_DMA_ERROR_MIC)) { 712 stats->rx_mic_error++; 713 wil_dbg_txrx(wil, "MIC error, dropping packet\n"); 714 return -EFAULT; 715 } 716 717 return 0; 718 } 719 720 static void wil_get_netif_rx_params(struct sk_buff *skb, int *cid, 721 int *security) 722 { 723 struct vring_rx_desc *d = wil_skb_rxdesc(skb); 724 725 *cid = wil_skb_get_cid(skb); 726 *security = wil_rxdesc_security(d); 727 } 728 729 /* 730 * Pass Rx packet to the netif. Update statistics. 731 * Called in softirq context (NAPI poll). 732 */ 733 void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) 734 { 735 gro_result_t rc = GRO_NORMAL; 736 struct wil6210_vif *vif = ndev_to_vif(ndev); 737 struct wil6210_priv *wil = ndev_to_wil(ndev); 738 struct wireless_dev *wdev = vif_to_wdev(vif); 739 unsigned int len = skb->len; 740 int cid; 741 int security; 742 struct ethhdr *eth = (void *)skb->data; 743 /* here looking for DA, not A1, thus Rxdesc's 'mcast' indication 744 * is not suitable, need to look at data 745 */ 746 int mcast = is_multicast_ether_addr(eth->h_dest); 747 struct wil_net_stats *stats; 748 struct sk_buff *xmit_skb = NULL; 749 static const char * const gro_res_str[] = { 750 [GRO_MERGED] = "GRO_MERGED", 751 [GRO_MERGED_FREE] = "GRO_MERGED_FREE", 752 [GRO_HELD] = "GRO_HELD", 753 [GRO_NORMAL] = "GRO_NORMAL", 754 [GRO_DROP] = "GRO_DROP", 755 }; 756 757 wil->txrx_ops.get_netif_rx_params(skb, &cid, &security); 758 759 stats = &wil->sta[cid].stats; 760 761 skb_orphan(skb); 762 763 if (security && (wil->txrx_ops.rx_crypto_check(wil, skb) != 0)) { 764 rc = GRO_DROP; 765 dev_kfree_skb(skb); 766 stats->rx_replay++; 767 goto stats; 768 } 769 770 /* check errors reported by HW and update statistics */ 771 if (unlikely(wil->txrx_ops.rx_error_check(wil, skb, stats))) { 772 dev_kfree_skb(skb); 773 return; 774 } 775 776 if (wdev->iftype == NL80211_IFTYPE_STATION) { 777 if (mcast && ether_addr_equal(eth->h_source, ndev->dev_addr)) { 778 /* mcast packet looped back to us */ 779 rc = GRO_DROP; 780 dev_kfree_skb(skb); 781 goto stats; 782 } 783 } else if (wdev->iftype == NL80211_IFTYPE_AP && !vif->ap_isolate) { 784 if (mcast) { 785 /* send multicast frames both to higher layers in 786 * local net stack and back to the wireless medium 787 */ 788 xmit_skb = skb_copy(skb, GFP_ATOMIC); 789 } else { 790 int xmit_cid = wil_find_cid(wil, vif->mid, 791 eth->h_dest); 792 793 if (xmit_cid >= 0) { 794 /* The destination station is associated to 795 * this AP (in this VLAN), so send the frame 796 * directly to it and do not pass it to local 797 * net stack. 798 */ 799 xmit_skb = skb; 800 skb = NULL; 801 } 802 } 803 } 804 if (xmit_skb) { 805 /* Send to wireless media and increase priority by 256 to 806 * keep the received priority instead of reclassifying 807 * the frame (see cfg80211_classify8021d). 808 */ 809 xmit_skb->dev = ndev; 810 xmit_skb->priority += 256; 811 xmit_skb->protocol = htons(ETH_P_802_3); 812 skb_reset_network_header(xmit_skb); 813 skb_reset_mac_header(xmit_skb); 814 wil_dbg_txrx(wil, "Rx -> Tx %d bytes\n", len); 815 dev_queue_xmit(xmit_skb); 816 } 817 818 if (skb) { /* deliver to local stack */ 819 skb->protocol = eth_type_trans(skb, ndev); 820 skb->dev = ndev; 821 rc = napi_gro_receive(&wil->napi_rx, skb); 822 wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n", 823 len, gro_res_str[rc]); 824 } 825 stats: 826 /* statistics. rc set to GRO_NORMAL for AP bridging */ 827 if (unlikely(rc == GRO_DROP)) { 828 ndev->stats.rx_dropped++; 829 stats->rx_dropped++; 830 wil_dbg_txrx(wil, "Rx drop %d bytes\n", len); 831 } else { 832 ndev->stats.rx_packets++; 833 stats->rx_packets++; 834 ndev->stats.rx_bytes += len; 835 stats->rx_bytes += len; 836 if (mcast) 837 ndev->stats.multicast++; 838 } 839 } 840 841 /** 842 * Proceed all completed skb's from Rx VRING 843 * 844 * Safe to call from NAPI poll, i.e. softirq with interrupts enabled 845 */ 846 void wil_rx_handle(struct wil6210_priv *wil, int *quota) 847 { 848 struct net_device *ndev = wil->main_ndev; 849 struct wireless_dev *wdev = ndev->ieee80211_ptr; 850 struct wil_ring *v = &wil->ring_rx; 851 struct sk_buff *skb; 852 853 if (unlikely(!v->va)) { 854 wil_err(wil, "Rx IRQ while Rx not yet initialized\n"); 855 return; 856 } 857 wil_dbg_txrx(wil, "rx_handle\n"); 858 while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) { 859 (*quota)--; 860 861 /* monitor is currently supported on main interface only */ 862 if (wdev->iftype == NL80211_IFTYPE_MONITOR) { 863 skb->dev = ndev; 864 skb_reset_mac_header(skb); 865 skb->ip_summed = CHECKSUM_UNNECESSARY; 866 skb->pkt_type = PACKET_OTHERHOST; 867 skb->protocol = htons(ETH_P_802_2); 868 wil_netif_rx_any(skb, ndev); 869 } else { 870 wil_rx_reorder(wil, skb); 871 } 872 } 873 wil_rx_refill(wil, v->size); 874 } 875 876 static void wil_rx_buf_len_init(struct wil6210_priv *wil) 877 { 878 wil->rx_buf_len = rx_large_buf ? 879 WIL_MAX_ETH_MTU : TXRX_BUF_LEN_DEFAULT - WIL_MAX_MPDU_OVERHEAD; 880 if (mtu_max > wil->rx_buf_len) { 881 /* do not allow RX buffers to be smaller than mtu_max, for 882 * backward compatibility (mtu_max parameter was also used 883 * to support receiving large packets) 884 */ 885 wil_info(wil, "Override RX buffer to mtu_max(%d)\n", mtu_max); 886 wil->rx_buf_len = mtu_max; 887 } 888 } 889 890 static int wil_rx_init(struct wil6210_priv *wil, uint order) 891 { 892 struct wil_ring *vring = &wil->ring_rx; 893 int rc; 894 895 wil_dbg_misc(wil, "rx_init\n"); 896 897 if (vring->va) { 898 wil_err(wil, "Rx ring already allocated\n"); 899 return -EINVAL; 900 } 901 902 wil_rx_buf_len_init(wil); 903 904 vring->size = 1 << order; 905 vring->is_rx = true; 906 rc = wil_vring_alloc(wil, vring); 907 if (rc) 908 return rc; 909 910 rc = wmi_rx_chain_add(wil, vring); 911 if (rc) 912 goto err_free; 913 914 rc = wil_rx_refill(wil, vring->size); 915 if (rc) 916 goto err_free; 917 918 return 0; 919 err_free: 920 wil_vring_free(wil, vring); 921 922 return rc; 923 } 924 925 static void wil_rx_fini(struct wil6210_priv *wil) 926 { 927 struct wil_ring *vring = &wil->ring_rx; 928 929 wil_dbg_misc(wil, "rx_fini\n"); 930 931 if (vring->va) 932 wil_vring_free(wil, vring); 933 } 934 935 static int wil_tx_desc_map(union wil_tx_desc *desc, dma_addr_t pa, 936 u32 len, int vring_index) 937 { 938 struct vring_tx_desc *d = &desc->legacy; 939 940 wil_desc_addr_set(&d->dma.addr, pa); 941 d->dma.ip_length = 0; 942 /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/ 943 d->dma.b11 = 0/*14 | BIT(7)*/; 944 d->dma.error = 0; 945 d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */ 946 d->dma.length = cpu_to_le16((u16)len); 947 d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS); 948 d->mac.d[0] = 0; 949 d->mac.d[1] = 0; 950 d->mac.d[2] = 0; 951 d->mac.ucode_cmd = 0; 952 /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */ 953 d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) | 954 (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS); 955 956 return 0; 957 } 958 959 void wil_tx_data_init(struct wil_ring_tx_data *txdata) 960 { 961 spin_lock_bh(&txdata->lock); 962 txdata->dot1x_open = 0; 963 txdata->enabled = 0; 964 txdata->idle = 0; 965 txdata->last_idle = 0; 966 txdata->begin = 0; 967 txdata->agg_wsize = 0; 968 txdata->agg_timeout = 0; 969 txdata->agg_amsdu = 0; 970 txdata->addba_in_progress = false; 971 txdata->mid = U8_MAX; 972 spin_unlock_bh(&txdata->lock); 973 } 974 975 static int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size, 976 int cid, int tid) 977 { 978 struct wil6210_priv *wil = vif_to_wil(vif); 979 int rc; 980 struct wmi_vring_cfg_cmd cmd = { 981 .action = cpu_to_le32(WMI_VRING_CMD_ADD), 982 .vring_cfg = { 983 .tx_sw_ring = { 984 .max_mpdu_size = 985 cpu_to_le16(wil_mtu2macbuf(mtu_max)), 986 .ring_size = cpu_to_le16(size), 987 }, 988 .ringid = id, 989 .encap_trans_type = WMI_VRING_ENC_TYPE_802_3, 990 .mac_ctrl = 0, 991 .to_resolution = 0, 992 .agg_max_wsize = 0, 993 .schd_params = { 994 .priority = cpu_to_le16(0), 995 .timeslot_us = cpu_to_le16(0xfff), 996 }, 997 }, 998 }; 999 struct { 1000 struct wmi_cmd_hdr wmi; 1001 struct wmi_vring_cfg_done_event cmd; 1002 } __packed reply = { 1003 .cmd = {.status = WMI_FW_STATUS_FAILURE}, 1004 }; 1005 struct wil_ring *vring = &wil->ring_tx[id]; 1006 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id]; 1007 1008 if (cid >= WIL6210_RX_DESC_MAX_CID) { 1009 cmd.vring_cfg.cidxtid = CIDXTID_EXTENDED_CID_TID; 1010 cmd.vring_cfg.cid = cid; 1011 cmd.vring_cfg.tid = tid; 1012 } else { 1013 cmd.vring_cfg.cidxtid = mk_cidxtid(cid, tid); 1014 } 1015 1016 wil_dbg_misc(wil, "vring_init_tx: max_mpdu_size %d\n", 1017 cmd.vring_cfg.tx_sw_ring.max_mpdu_size); 1018 lockdep_assert_held(&wil->mutex); 1019 1020 if (vring->va) { 1021 wil_err(wil, "Tx ring [%d] already allocated\n", id); 1022 rc = -EINVAL; 1023 goto out; 1024 } 1025 1026 wil_tx_data_init(txdata); 1027 vring->is_rx = false; 1028 vring->size = size; 1029 rc = wil_vring_alloc(wil, vring); 1030 if (rc) 1031 goto out; 1032 1033 wil->ring2cid_tid[id][0] = cid; 1034 wil->ring2cid_tid[id][1] = tid; 1035 1036 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa); 1037 1038 if (!vif->privacy) 1039 txdata->dot1x_open = true; 1040 rc = wmi_call(wil, WMI_VRING_CFG_CMDID, vif->mid, &cmd, sizeof(cmd), 1041 WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100); 1042 if (rc) 1043 goto out_free; 1044 1045 if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) { 1046 wil_err(wil, "Tx config failed, status 0x%02x\n", 1047 reply.cmd.status); 1048 rc = -EINVAL; 1049 goto out_free; 1050 } 1051 1052 spin_lock_bh(&txdata->lock); 1053 vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr); 1054 txdata->mid = vif->mid; 1055 txdata->enabled = 1; 1056 spin_unlock_bh(&txdata->lock); 1057 1058 if (txdata->dot1x_open && (agg_wsize >= 0)) 1059 wil_addba_tx_request(wil, id, agg_wsize); 1060 1061 return 0; 1062 out_free: 1063 spin_lock_bh(&txdata->lock); 1064 txdata->dot1x_open = false; 1065 txdata->enabled = 0; 1066 spin_unlock_bh(&txdata->lock); 1067 wil_vring_free(wil, vring); 1068 wil->ring2cid_tid[id][0] = max_assoc_sta; 1069 wil->ring2cid_tid[id][1] = 0; 1070 1071 out: 1072 1073 return rc; 1074 } 1075 1076 static int wil_tx_vring_modify(struct wil6210_vif *vif, int ring_id, int cid, 1077 int tid) 1078 { 1079 struct wil6210_priv *wil = vif_to_wil(vif); 1080 int rc; 1081 struct wmi_vring_cfg_cmd cmd = { 1082 .action = cpu_to_le32(WMI_VRING_CMD_MODIFY), 1083 .vring_cfg = { 1084 .tx_sw_ring = { 1085 .max_mpdu_size = 1086 cpu_to_le16(wil_mtu2macbuf(mtu_max)), 1087 .ring_size = 0, 1088 }, 1089 .ringid = ring_id, 1090 .cidxtid = mk_cidxtid(cid, tid), 1091 .encap_trans_type = WMI_VRING_ENC_TYPE_802_3, 1092 .mac_ctrl = 0, 1093 .to_resolution = 0, 1094 .agg_max_wsize = 0, 1095 .schd_params = { 1096 .priority = cpu_to_le16(0), 1097 .timeslot_us = cpu_to_le16(0xfff), 1098 }, 1099 }, 1100 }; 1101 struct { 1102 struct wmi_cmd_hdr wmi; 1103 struct wmi_vring_cfg_done_event cmd; 1104 } __packed reply = { 1105 .cmd = {.status = WMI_FW_STATUS_FAILURE}, 1106 }; 1107 struct wil_ring *vring = &wil->ring_tx[ring_id]; 1108 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id]; 1109 1110 wil_dbg_misc(wil, "vring_modify: ring %d cid %d tid %d\n", ring_id, 1111 cid, tid); 1112 lockdep_assert_held(&wil->mutex); 1113 1114 if (!vring->va) { 1115 wil_err(wil, "Tx ring [%d] not allocated\n", ring_id); 1116 return -EINVAL; 1117 } 1118 1119 if (wil->ring2cid_tid[ring_id][0] != cid || 1120 wil->ring2cid_tid[ring_id][1] != tid) { 1121 wil_err(wil, "ring info does not match cid=%u tid=%u\n", 1122 wil->ring2cid_tid[ring_id][0], 1123 wil->ring2cid_tid[ring_id][1]); 1124 } 1125 1126 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa); 1127 1128 rc = wmi_call(wil, WMI_VRING_CFG_CMDID, vif->mid, &cmd, sizeof(cmd), 1129 WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100); 1130 if (rc) 1131 goto fail; 1132 1133 if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) { 1134 wil_err(wil, "Tx modify failed, status 0x%02x\n", 1135 reply.cmd.status); 1136 rc = -EINVAL; 1137 goto fail; 1138 } 1139 1140 /* set BA aggregation window size to 0 to force a new BA with the 1141 * new AP 1142 */ 1143 txdata->agg_wsize = 0; 1144 if (txdata->dot1x_open && agg_wsize >= 0) 1145 wil_addba_tx_request(wil, ring_id, agg_wsize); 1146 1147 return 0; 1148 fail: 1149 spin_lock_bh(&txdata->lock); 1150 txdata->dot1x_open = false; 1151 txdata->enabled = 0; 1152 spin_unlock_bh(&txdata->lock); 1153 wil->ring2cid_tid[ring_id][0] = max_assoc_sta; 1154 wil->ring2cid_tid[ring_id][1] = 0; 1155 return rc; 1156 } 1157 1158 int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size) 1159 { 1160 struct wil6210_priv *wil = vif_to_wil(vif); 1161 int rc; 1162 struct wmi_bcast_vring_cfg_cmd cmd = { 1163 .action = cpu_to_le32(WMI_VRING_CMD_ADD), 1164 .vring_cfg = { 1165 .tx_sw_ring = { 1166 .max_mpdu_size = 1167 cpu_to_le16(wil_mtu2macbuf(mtu_max)), 1168 .ring_size = cpu_to_le16(size), 1169 }, 1170 .ringid = id, 1171 .encap_trans_type = WMI_VRING_ENC_TYPE_802_3, 1172 }, 1173 }; 1174 struct { 1175 struct wmi_cmd_hdr wmi; 1176 struct wmi_vring_cfg_done_event cmd; 1177 } __packed reply = { 1178 .cmd = {.status = WMI_FW_STATUS_FAILURE}, 1179 }; 1180 struct wil_ring *vring = &wil->ring_tx[id]; 1181 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id]; 1182 1183 wil_dbg_misc(wil, "vring_init_bcast: max_mpdu_size %d\n", 1184 cmd.vring_cfg.tx_sw_ring.max_mpdu_size); 1185 lockdep_assert_held(&wil->mutex); 1186 1187 if (vring->va) { 1188 wil_err(wil, "Tx ring [%d] already allocated\n", id); 1189 rc = -EINVAL; 1190 goto out; 1191 } 1192 1193 wil_tx_data_init(txdata); 1194 vring->is_rx = false; 1195 vring->size = size; 1196 rc = wil_vring_alloc(wil, vring); 1197 if (rc) 1198 goto out; 1199 1200 wil->ring2cid_tid[id][0] = max_assoc_sta; /* CID */ 1201 wil->ring2cid_tid[id][1] = 0; /* TID */ 1202 1203 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa); 1204 1205 if (!vif->privacy) 1206 txdata->dot1x_open = true; 1207 rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, vif->mid, 1208 &cmd, sizeof(cmd), 1209 WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100); 1210 if (rc) 1211 goto out_free; 1212 1213 if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) { 1214 wil_err(wil, "Tx config failed, status 0x%02x\n", 1215 reply.cmd.status); 1216 rc = -EINVAL; 1217 goto out_free; 1218 } 1219 1220 spin_lock_bh(&txdata->lock); 1221 vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr); 1222 txdata->mid = vif->mid; 1223 txdata->enabled = 1; 1224 spin_unlock_bh(&txdata->lock); 1225 1226 return 0; 1227 out_free: 1228 spin_lock_bh(&txdata->lock); 1229 txdata->enabled = 0; 1230 txdata->dot1x_open = false; 1231 spin_unlock_bh(&txdata->lock); 1232 wil_vring_free(wil, vring); 1233 out: 1234 1235 return rc; 1236 } 1237 1238 static struct wil_ring *wil_find_tx_ucast(struct wil6210_priv *wil, 1239 struct wil6210_vif *vif, 1240 struct sk_buff *skb) 1241 { 1242 int i; 1243 struct ethhdr *eth = (void *)skb->data; 1244 int cid = wil_find_cid(wil, vif->mid, eth->h_dest); 1245 int min_ring_id = wil_get_min_tx_ring_id(wil); 1246 1247 if (cid < 0 || cid >= max_assoc_sta) 1248 return NULL; 1249 1250 /* TODO: fix for multiple TID */ 1251 for (i = min_ring_id; i < ARRAY_SIZE(wil->ring2cid_tid); i++) { 1252 if (!wil->ring_tx_data[i].dot1x_open && 1253 skb->protocol != cpu_to_be16(ETH_P_PAE)) 1254 continue; 1255 if (wil->ring2cid_tid[i][0] == cid) { 1256 struct wil_ring *v = &wil->ring_tx[i]; 1257 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i]; 1258 1259 wil_dbg_txrx(wil, "find_tx_ucast: (%pM) -> [%d]\n", 1260 eth->h_dest, i); 1261 if (v->va && txdata->enabled) { 1262 return v; 1263 } else { 1264 wil_dbg_txrx(wil, 1265 "find_tx_ucast: vring[%d] not valid\n", 1266 i); 1267 return NULL; 1268 } 1269 } 1270 } 1271 1272 return NULL; 1273 } 1274 1275 static int wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif, 1276 struct wil_ring *ring, struct sk_buff *skb); 1277 1278 static struct wil_ring *wil_find_tx_ring_sta(struct wil6210_priv *wil, 1279 struct wil6210_vif *vif, 1280 struct sk_buff *skb) 1281 { 1282 struct wil_ring *ring; 1283 int i; 1284 u8 cid; 1285 struct wil_ring_tx_data *txdata; 1286 int min_ring_id = wil_get_min_tx_ring_id(wil); 1287 1288 /* In the STA mode, it is expected to have only 1 VRING 1289 * for the AP we connected to. 1290 * find 1-st vring eligible for this skb and use it. 1291 */ 1292 for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) { 1293 ring = &wil->ring_tx[i]; 1294 txdata = &wil->ring_tx_data[i]; 1295 if (!ring->va || !txdata->enabled || txdata->mid != vif->mid) 1296 continue; 1297 1298 cid = wil->ring2cid_tid[i][0]; 1299 if (cid >= max_assoc_sta) /* skip BCAST */ 1300 continue; 1301 1302 if (!wil->ring_tx_data[i].dot1x_open && 1303 skb->protocol != cpu_to_be16(ETH_P_PAE)) 1304 continue; 1305 1306 wil_dbg_txrx(wil, "Tx -> ring %d\n", i); 1307 1308 return ring; 1309 } 1310 1311 wil_dbg_txrx(wil, "Tx while no rings active?\n"); 1312 1313 return NULL; 1314 } 1315 1316 /* Use one of 2 strategies: 1317 * 1318 * 1. New (real broadcast): 1319 * use dedicated broadcast vring 1320 * 2. Old (pseudo-DMS): 1321 * Find 1-st vring and return it; 1322 * duplicate skb and send it to other active vrings; 1323 * in all cases override dest address to unicast peer's address 1324 * Use old strategy when new is not supported yet: 1325 * - for PBSS 1326 */ 1327 static struct wil_ring *wil_find_tx_bcast_1(struct wil6210_priv *wil, 1328 struct wil6210_vif *vif, 1329 struct sk_buff *skb) 1330 { 1331 struct wil_ring *v; 1332 struct wil_ring_tx_data *txdata; 1333 int i = vif->bcast_ring; 1334 1335 if (i < 0) 1336 return NULL; 1337 v = &wil->ring_tx[i]; 1338 txdata = &wil->ring_tx_data[i]; 1339 if (!v->va || !txdata->enabled) 1340 return NULL; 1341 if (!wil->ring_tx_data[i].dot1x_open && 1342 skb->protocol != cpu_to_be16(ETH_P_PAE)) 1343 return NULL; 1344 1345 return v; 1346 } 1347 1348 static void wil_set_da_for_vring(struct wil6210_priv *wil, 1349 struct sk_buff *skb, int vring_index) 1350 { 1351 struct ethhdr *eth = (void *)skb->data; 1352 int cid = wil->ring2cid_tid[vring_index][0]; 1353 1354 ether_addr_copy(eth->h_dest, wil->sta[cid].addr); 1355 } 1356 1357 static struct wil_ring *wil_find_tx_bcast_2(struct wil6210_priv *wil, 1358 struct wil6210_vif *vif, 1359 struct sk_buff *skb) 1360 { 1361 struct wil_ring *v, *v2; 1362 struct sk_buff *skb2; 1363 int i; 1364 u8 cid; 1365 struct ethhdr *eth = (void *)skb->data; 1366 char *src = eth->h_source; 1367 struct wil_ring_tx_data *txdata, *txdata2; 1368 int min_ring_id = wil_get_min_tx_ring_id(wil); 1369 1370 /* find 1-st vring eligible for data */ 1371 for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) { 1372 v = &wil->ring_tx[i]; 1373 txdata = &wil->ring_tx_data[i]; 1374 if (!v->va || !txdata->enabled || txdata->mid != vif->mid) 1375 continue; 1376 1377 cid = wil->ring2cid_tid[i][0]; 1378 if (cid >= max_assoc_sta) /* skip BCAST */ 1379 continue; 1380 if (!wil->ring_tx_data[i].dot1x_open && 1381 skb->protocol != cpu_to_be16(ETH_P_PAE)) 1382 continue; 1383 1384 /* don't Tx back to source when re-routing Rx->Tx at the AP */ 1385 if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN)) 1386 continue; 1387 1388 goto found; 1389 } 1390 1391 wil_dbg_txrx(wil, "Tx while no vrings active?\n"); 1392 1393 return NULL; 1394 1395 found: 1396 wil_dbg_txrx(wil, "BCAST -> ring %d\n", i); 1397 wil_set_da_for_vring(wil, skb, i); 1398 1399 /* find other active vrings and duplicate skb for each */ 1400 for (i++; i < WIL6210_MAX_TX_RINGS; i++) { 1401 v2 = &wil->ring_tx[i]; 1402 txdata2 = &wil->ring_tx_data[i]; 1403 if (!v2->va || txdata2->mid != vif->mid) 1404 continue; 1405 cid = wil->ring2cid_tid[i][0]; 1406 if (cid >= max_assoc_sta) /* skip BCAST */ 1407 continue; 1408 if (!wil->ring_tx_data[i].dot1x_open && 1409 skb->protocol != cpu_to_be16(ETH_P_PAE)) 1410 continue; 1411 1412 if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN)) 1413 continue; 1414 1415 skb2 = skb_copy(skb, GFP_ATOMIC); 1416 if (skb2) { 1417 wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i); 1418 wil_set_da_for_vring(wil, skb2, i); 1419 wil_tx_ring(wil, vif, v2, skb2); 1420 /* successful call to wil_tx_ring takes skb2 ref */ 1421 dev_kfree_skb_any(skb2); 1422 } else { 1423 wil_err(wil, "skb_copy failed\n"); 1424 } 1425 } 1426 1427 return v; 1428 } 1429 1430 static inline 1431 void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags) 1432 { 1433 d->mac.d[2] |= (nr_frags << MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS); 1434 } 1435 1436 /** 1437 * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding 1438 * @skb is used to obtain the protocol and headers length. 1439 * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data, 1440 * 2 - middle, 3 - last descriptor. 1441 */ 1442 1443 static void wil_tx_desc_offload_setup_tso(struct vring_tx_desc *d, 1444 struct sk_buff *skb, 1445 int tso_desc_type, bool is_ipv4, 1446 int tcp_hdr_len, int skb_net_hdr_len) 1447 { 1448 d->dma.b11 = ETH_HLEN; /* MAC header length */ 1449 d->dma.b11 |= is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS; 1450 1451 d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS); 1452 /* L4 header len: TCP header length */ 1453 d->dma.d0 |= (tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK); 1454 1455 /* Setup TSO: bit and desc type */ 1456 d->dma.d0 |= (BIT(DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS)) | 1457 (tso_desc_type << DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS); 1458 d->dma.d0 |= (is_ipv4 << DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS); 1459 1460 d->dma.ip_length = skb_net_hdr_len; 1461 /* Enable TCP/UDP checksum */ 1462 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS); 1463 /* Calculate pseudo-header */ 1464 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS); 1465 } 1466 1467 /** 1468 * Sets the descriptor @d up for csum. The corresponding 1469 * @skb is used to obtain the protocol and headers length. 1470 * Returns the protocol: 0 - not TCP, 1 - TCPv4, 2 - TCPv6. 1471 * Note, if d==NULL, the function only returns the protocol result. 1472 * 1473 * It is very similar to previous wil_tx_desc_offload_setup_tso. This 1474 * is "if unrolling" to optimize the critical path. 1475 */ 1476 1477 static int wil_tx_desc_offload_setup(struct vring_tx_desc *d, 1478 struct sk_buff *skb){ 1479 int protocol; 1480 1481 if (skb->ip_summed != CHECKSUM_PARTIAL) 1482 return 0; 1483 1484 d->dma.b11 = ETH_HLEN; /* MAC header length */ 1485 1486 switch (skb->protocol) { 1487 case cpu_to_be16(ETH_P_IP): 1488 protocol = ip_hdr(skb)->protocol; 1489 d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS); 1490 break; 1491 case cpu_to_be16(ETH_P_IPV6): 1492 protocol = ipv6_hdr(skb)->nexthdr; 1493 break; 1494 default: 1495 return -EINVAL; 1496 } 1497 1498 switch (protocol) { 1499 case IPPROTO_TCP: 1500 d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS); 1501 /* L4 header len: TCP header length */ 1502 d->dma.d0 |= 1503 (tcp_hdrlen(skb) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK); 1504 break; 1505 case IPPROTO_UDP: 1506 /* L4 header len: UDP header length */ 1507 d->dma.d0 |= 1508 (sizeof(struct udphdr) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK); 1509 break; 1510 default: 1511 return -EINVAL; 1512 } 1513 1514 d->dma.ip_length = skb_network_header_len(skb); 1515 /* Enable TCP/UDP checksum */ 1516 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS); 1517 /* Calculate pseudo-header */ 1518 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS); 1519 1520 return 0; 1521 } 1522 1523 static inline void wil_tx_last_desc(struct vring_tx_desc *d) 1524 { 1525 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS) | 1526 BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS) | 1527 BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS); 1528 } 1529 1530 static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc *d) 1531 { 1532 d->dma.d0 |= wil_tso_type_lst << 1533 DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS; 1534 } 1535 1536 static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif, 1537 struct wil_ring *vring, struct sk_buff *skb) 1538 { 1539 struct device *dev = wil_to_dev(wil); 1540 1541 /* point to descriptors in shared memory */ 1542 volatile struct vring_tx_desc *_desc = NULL, *_hdr_desc, 1543 *_first_desc = NULL; 1544 1545 /* pointers to shadow descriptors */ 1546 struct vring_tx_desc desc_mem, hdr_desc_mem, first_desc_mem, 1547 *d = &hdr_desc_mem, *hdr_desc = &hdr_desc_mem, 1548 *first_desc = &first_desc_mem; 1549 1550 /* pointer to shadow descriptors' context */ 1551 struct wil_ctx *hdr_ctx, *first_ctx = NULL; 1552 1553 int descs_used = 0; /* total number of used descriptors */ 1554 int sg_desc_cnt = 0; /* number of descriptors for current mss*/ 1555 1556 u32 swhead = vring->swhead; 1557 int used, avail = wil_ring_avail_tx(vring); 1558 int nr_frags = skb_shinfo(skb)->nr_frags; 1559 int min_desc_required = nr_frags + 1; 1560 int mss = skb_shinfo(skb)->gso_size; /* payload size w/o headers */ 1561 int f, len, hdrlen, headlen; 1562 int vring_index = vring - wil->ring_tx; 1563 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[vring_index]; 1564 uint i = swhead; 1565 dma_addr_t pa; 1566 const skb_frag_t *frag = NULL; 1567 int rem_data = mss; 1568 int lenmss; 1569 int hdr_compensation_need = true; 1570 int desc_tso_type = wil_tso_type_first; 1571 bool is_ipv4; 1572 int tcp_hdr_len; 1573 int skb_net_hdr_len; 1574 int gso_type; 1575 int rc = -EINVAL; 1576 1577 wil_dbg_txrx(wil, "tx_vring_tso: %d bytes to vring %d\n", skb->len, 1578 vring_index); 1579 1580 if (unlikely(!txdata->enabled)) 1581 return -EINVAL; 1582 1583 /* A typical page 4K is 3-4 payloads, we assume each fragment 1584 * is a full payload, that's how min_desc_required has been 1585 * calculated. In real we might need more or less descriptors, 1586 * this is the initial check only. 1587 */ 1588 if (unlikely(avail < min_desc_required)) { 1589 wil_err_ratelimited(wil, 1590 "TSO: Tx ring[%2d] full. No space for %d fragments\n", 1591 vring_index, min_desc_required); 1592 return -ENOMEM; 1593 } 1594 1595 /* Header Length = MAC header len + IP header len + TCP header len*/ 1596 hdrlen = ETH_HLEN + 1597 (int)skb_network_header_len(skb) + 1598 tcp_hdrlen(skb); 1599 1600 gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4); 1601 switch (gso_type) { 1602 case SKB_GSO_TCPV4: 1603 /* TCP v4, zero out the IP length and IPv4 checksum fields 1604 * as required by the offloading doc 1605 */ 1606 ip_hdr(skb)->tot_len = 0; 1607 ip_hdr(skb)->check = 0; 1608 is_ipv4 = true; 1609 break; 1610 case SKB_GSO_TCPV6: 1611 /* TCP v6, zero out the payload length */ 1612 ipv6_hdr(skb)->payload_len = 0; 1613 is_ipv4 = false; 1614 break; 1615 default: 1616 /* other than TCPv4 or TCPv6 types are not supported for TSO. 1617 * It is also illegal for both to be set simultaneously 1618 */ 1619 return -EINVAL; 1620 } 1621 1622 if (skb->ip_summed != CHECKSUM_PARTIAL) 1623 return -EINVAL; 1624 1625 /* tcp header length and skb network header length are fixed for all 1626 * packet's descriptors - read then once here 1627 */ 1628 tcp_hdr_len = tcp_hdrlen(skb); 1629 skb_net_hdr_len = skb_network_header_len(skb); 1630 1631 _hdr_desc = &vring->va[i].tx.legacy; 1632 1633 pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE); 1634 if (unlikely(dma_mapping_error(dev, pa))) { 1635 wil_err(wil, "TSO: Skb head DMA map error\n"); 1636 goto err_exit; 1637 } 1638 1639 wil->txrx_ops.tx_desc_map((union wil_tx_desc *)hdr_desc, pa, 1640 hdrlen, vring_index); 1641 wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4, 1642 tcp_hdr_len, skb_net_hdr_len); 1643 wil_tx_last_desc(hdr_desc); 1644 1645 vring->ctx[i].mapped_as = wil_mapped_as_single; 1646 hdr_ctx = &vring->ctx[i]; 1647 1648 descs_used++; 1649 headlen = skb_headlen(skb) - hdrlen; 1650 1651 for (f = headlen ? -1 : 0; f < nr_frags; f++) { 1652 if (headlen) { 1653 len = headlen; 1654 wil_dbg_txrx(wil, "TSO: process skb head, len %u\n", 1655 len); 1656 } else { 1657 frag = &skb_shinfo(skb)->frags[f]; 1658 len = frag->size; 1659 wil_dbg_txrx(wil, "TSO: frag[%d]: len %u\n", f, len); 1660 } 1661 1662 while (len) { 1663 wil_dbg_txrx(wil, 1664 "TSO: len %d, rem_data %d, descs_used %d\n", 1665 len, rem_data, descs_used); 1666 1667 if (descs_used == avail) { 1668 wil_err_ratelimited(wil, "TSO: ring overflow\n"); 1669 rc = -ENOMEM; 1670 goto mem_error; 1671 } 1672 1673 lenmss = min_t(int, rem_data, len); 1674 i = (swhead + descs_used) % vring->size; 1675 wil_dbg_txrx(wil, "TSO: lenmss %d, i %d\n", lenmss, i); 1676 1677 if (!headlen) { 1678 pa = skb_frag_dma_map(dev, frag, 1679 frag->size - len, lenmss, 1680 DMA_TO_DEVICE); 1681 vring->ctx[i].mapped_as = wil_mapped_as_page; 1682 } else { 1683 pa = dma_map_single(dev, 1684 skb->data + 1685 skb_headlen(skb) - headlen, 1686 lenmss, 1687 DMA_TO_DEVICE); 1688 vring->ctx[i].mapped_as = wil_mapped_as_single; 1689 headlen -= lenmss; 1690 } 1691 1692 if (unlikely(dma_mapping_error(dev, pa))) { 1693 wil_err(wil, "TSO: DMA map page error\n"); 1694 goto mem_error; 1695 } 1696 1697 _desc = &vring->va[i].tx.legacy; 1698 1699 if (!_first_desc) { 1700 _first_desc = _desc; 1701 first_ctx = &vring->ctx[i]; 1702 d = first_desc; 1703 } else { 1704 d = &desc_mem; 1705 } 1706 1707 wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d, 1708 pa, lenmss, vring_index); 1709 wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type, 1710 is_ipv4, tcp_hdr_len, 1711 skb_net_hdr_len); 1712 1713 /* use tso_type_first only once */ 1714 desc_tso_type = wil_tso_type_mid; 1715 1716 descs_used++; /* desc used so far */ 1717 sg_desc_cnt++; /* desc used for this segment */ 1718 len -= lenmss; 1719 rem_data -= lenmss; 1720 1721 wil_dbg_txrx(wil, 1722 "TSO: len %d, rem_data %d, descs_used %d, sg_desc_cnt %d,\n", 1723 len, rem_data, descs_used, sg_desc_cnt); 1724 1725 /* Close the segment if reached mss size or last frag*/ 1726 if (rem_data == 0 || (f == nr_frags - 1 && len == 0)) { 1727 if (hdr_compensation_need) { 1728 /* first segment include hdr desc for 1729 * release 1730 */ 1731 hdr_ctx->nr_frags = sg_desc_cnt; 1732 wil_tx_desc_set_nr_frags(first_desc, 1733 sg_desc_cnt + 1734 1); 1735 hdr_compensation_need = false; 1736 } else { 1737 wil_tx_desc_set_nr_frags(first_desc, 1738 sg_desc_cnt); 1739 } 1740 first_ctx->nr_frags = sg_desc_cnt - 1; 1741 1742 wil_tx_last_desc(d); 1743 1744 /* first descriptor may also be the last 1745 * for this mss - make sure not to copy 1746 * it twice 1747 */ 1748 if (first_desc != d) 1749 *_first_desc = *first_desc; 1750 1751 /*last descriptor will be copied at the end 1752 * of this TS processing 1753 */ 1754 if (f < nr_frags - 1 || len > 0) 1755 *_desc = *d; 1756 1757 rem_data = mss; 1758 _first_desc = NULL; 1759 sg_desc_cnt = 0; 1760 } else if (first_desc != d) /* update mid descriptor */ 1761 *_desc = *d; 1762 } 1763 } 1764 1765 /* first descriptor may also be the last. 1766 * in this case d pointer is invalid 1767 */ 1768 if (_first_desc == _desc) 1769 d = first_desc; 1770 1771 /* Last data descriptor */ 1772 wil_set_tx_desc_last_tso(d); 1773 *_desc = *d; 1774 1775 /* Fill the total number of descriptors in first desc (hdr)*/ 1776 wil_tx_desc_set_nr_frags(hdr_desc, descs_used); 1777 *_hdr_desc = *hdr_desc; 1778 1779 /* hold reference to skb 1780 * to prevent skb release before accounting 1781 * in case of immediate "tx done" 1782 */ 1783 vring->ctx[i].skb = skb_get(skb); 1784 1785 /* performance monitoring */ 1786 used = wil_ring_used_tx(vring); 1787 if (wil_val_in_range(wil->ring_idle_trsh, 1788 used, used + descs_used)) { 1789 txdata->idle += get_cycles() - txdata->last_idle; 1790 wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n", 1791 vring_index, used, used + descs_used); 1792 } 1793 1794 /* Make sure to advance the head only after descriptor update is done. 1795 * This will prevent a race condition where the completion thread 1796 * will see the DU bit set from previous run and will handle the 1797 * skb before it was completed. 1798 */ 1799 wmb(); 1800 1801 /* advance swhead */ 1802 wil_ring_advance_head(vring, descs_used); 1803 wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead); 1804 1805 /* make sure all writes to descriptors (shared memory) are done before 1806 * committing them to HW 1807 */ 1808 wmb(); 1809 1810 if (wil->tx_latency) 1811 *(ktime_t *)&skb->cb = ktime_get(); 1812 else 1813 memset(skb->cb, 0, sizeof(ktime_t)); 1814 1815 wil_w(wil, vring->hwtail, vring->swhead); 1816 return 0; 1817 1818 mem_error: 1819 while (descs_used > 0) { 1820 struct wil_ctx *ctx; 1821 1822 i = (swhead + descs_used - 1) % vring->size; 1823 d = (struct vring_tx_desc *)&vring->va[i].tx.legacy; 1824 _desc = &vring->va[i].tx.legacy; 1825 *d = *_desc; 1826 _desc->dma.status = TX_DMA_STATUS_DU; 1827 ctx = &vring->ctx[i]; 1828 wil_txdesc_unmap(dev, (union wil_tx_desc *)d, ctx); 1829 memset(ctx, 0, sizeof(*ctx)); 1830 descs_used--; 1831 } 1832 err_exit: 1833 return rc; 1834 } 1835 1836 static int __wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif, 1837 struct wil_ring *ring, struct sk_buff *skb) 1838 { 1839 struct device *dev = wil_to_dev(wil); 1840 struct vring_tx_desc dd, *d = ⅆ 1841 volatile struct vring_tx_desc *_d; 1842 u32 swhead = ring->swhead; 1843 int avail = wil_ring_avail_tx(ring); 1844 int nr_frags = skb_shinfo(skb)->nr_frags; 1845 uint f = 0; 1846 int ring_index = ring - wil->ring_tx; 1847 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index]; 1848 uint i = swhead; 1849 dma_addr_t pa; 1850 int used; 1851 bool mcast = (ring_index == vif->bcast_ring); 1852 uint len = skb_headlen(skb); 1853 1854 wil_dbg_txrx(wil, "tx_ring: %d bytes to ring %d, nr_frags %d\n", 1855 skb->len, ring_index, nr_frags); 1856 1857 if (unlikely(!txdata->enabled)) 1858 return -EINVAL; 1859 1860 if (unlikely(avail < 1 + nr_frags)) { 1861 wil_err_ratelimited(wil, 1862 "Tx ring[%2d] full. No space for %d fragments\n", 1863 ring_index, 1 + nr_frags); 1864 return -ENOMEM; 1865 } 1866 _d = &ring->va[i].tx.legacy; 1867 1868 pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); 1869 1870 wil_dbg_txrx(wil, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", ring_index, 1871 skb_headlen(skb), skb->data, &pa); 1872 wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1, 1873 skb->data, skb_headlen(skb), false); 1874 1875 if (unlikely(dma_mapping_error(dev, pa))) 1876 return -EINVAL; 1877 ring->ctx[i].mapped_as = wil_mapped_as_single; 1878 /* 1-st segment */ 1879 wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d, pa, len, 1880 ring_index); 1881 if (unlikely(mcast)) { 1882 d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */ 1883 if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) /* set MCS 1 */ 1884 d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS); 1885 } 1886 /* Process TCP/UDP checksum offloading */ 1887 if (unlikely(wil_tx_desc_offload_setup(d, skb))) { 1888 wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n", 1889 ring_index); 1890 goto dma_error; 1891 } 1892 1893 ring->ctx[i].nr_frags = nr_frags; 1894 wil_tx_desc_set_nr_frags(d, nr_frags + 1); 1895 1896 /* middle segments */ 1897 for (; f < nr_frags; f++) { 1898 const struct skb_frag_struct *frag = 1899 &skb_shinfo(skb)->frags[f]; 1900 int len = skb_frag_size(frag); 1901 1902 *_d = *d; 1903 wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", ring_index, i); 1904 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4, 1905 (const void *)d, sizeof(*d), false); 1906 i = (swhead + f + 1) % ring->size; 1907 _d = &ring->va[i].tx.legacy; 1908 pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag), 1909 DMA_TO_DEVICE); 1910 if (unlikely(dma_mapping_error(dev, pa))) { 1911 wil_err(wil, "Tx[%2d] failed to map fragment\n", 1912 ring_index); 1913 goto dma_error; 1914 } 1915 ring->ctx[i].mapped_as = wil_mapped_as_page; 1916 wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d, 1917 pa, len, ring_index); 1918 /* no need to check return code - 1919 * if it succeeded for 1-st descriptor, 1920 * it will succeed here too 1921 */ 1922 wil_tx_desc_offload_setup(d, skb); 1923 } 1924 /* for the last seg only */ 1925 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS); 1926 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS); 1927 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS); 1928 *_d = *d; 1929 wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", ring_index, i); 1930 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4, 1931 (const void *)d, sizeof(*d), false); 1932 1933 /* hold reference to skb 1934 * to prevent skb release before accounting 1935 * in case of immediate "tx done" 1936 */ 1937 ring->ctx[i].skb = skb_get(skb); 1938 1939 /* performance monitoring */ 1940 used = wil_ring_used_tx(ring); 1941 if (wil_val_in_range(wil->ring_idle_trsh, 1942 used, used + nr_frags + 1)) { 1943 txdata->idle += get_cycles() - txdata->last_idle; 1944 wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n", 1945 ring_index, used, used + nr_frags + 1); 1946 } 1947 1948 /* Make sure to advance the head only after descriptor update is done. 1949 * This will prevent a race condition where the completion thread 1950 * will see the DU bit set from previous run and will handle the 1951 * skb before it was completed. 1952 */ 1953 wmb(); 1954 1955 /* advance swhead */ 1956 wil_ring_advance_head(ring, nr_frags + 1); 1957 wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", ring_index, swhead, 1958 ring->swhead); 1959 trace_wil6210_tx(ring_index, swhead, skb->len, nr_frags); 1960 1961 /* make sure all writes to descriptors (shared memory) are done before 1962 * committing them to HW 1963 */ 1964 wmb(); 1965 1966 if (wil->tx_latency) 1967 *(ktime_t *)&skb->cb = ktime_get(); 1968 else 1969 memset(skb->cb, 0, sizeof(ktime_t)); 1970 1971 wil_w(wil, ring->hwtail, ring->swhead); 1972 1973 return 0; 1974 dma_error: 1975 /* unmap what we have mapped */ 1976 nr_frags = f + 1; /* frags mapped + one for skb head */ 1977 for (f = 0; f < nr_frags; f++) { 1978 struct wil_ctx *ctx; 1979 1980 i = (swhead + f) % ring->size; 1981 ctx = &ring->ctx[i]; 1982 _d = &ring->va[i].tx.legacy; 1983 *d = *_d; 1984 _d->dma.status = TX_DMA_STATUS_DU; 1985 wil->txrx_ops.tx_desc_unmap(dev, 1986 (union wil_tx_desc *)d, 1987 ctx); 1988 1989 memset(ctx, 0, sizeof(*ctx)); 1990 } 1991 1992 return -EINVAL; 1993 } 1994 1995 static int wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif, 1996 struct wil_ring *ring, struct sk_buff *skb) 1997 { 1998 int ring_index = ring - wil->ring_tx; 1999 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index]; 2000 int rc; 2001 2002 spin_lock(&txdata->lock); 2003 2004 if (test_bit(wil_status_suspending, wil->status) || 2005 test_bit(wil_status_suspended, wil->status) || 2006 test_bit(wil_status_resuming, wil->status)) { 2007 wil_dbg_txrx(wil, 2008 "suspend/resume in progress. drop packet\n"); 2009 spin_unlock(&txdata->lock); 2010 return -EINVAL; 2011 } 2012 2013 rc = (skb_is_gso(skb) ? wil->txrx_ops.tx_ring_tso : __wil_tx_ring) 2014 (wil, vif, ring, skb); 2015 2016 spin_unlock(&txdata->lock); 2017 2018 return rc; 2019 } 2020 2021 /** 2022 * Check status of tx vrings and stop/wake net queues if needed 2023 * It will start/stop net queues of a specific VIF net_device. 2024 * 2025 * This function does one of two checks: 2026 * In case check_stop is true, will check if net queues need to be stopped. If 2027 * the conditions for stopping are met, netif_tx_stop_all_queues() is called. 2028 * In case check_stop is false, will check if net queues need to be waked. If 2029 * the conditions for waking are met, netif_tx_wake_all_queues() is called. 2030 * vring is the vring which is currently being modified by either adding 2031 * descriptors (tx) into it or removing descriptors (tx complete) from it. Can 2032 * be null when irrelevant (e.g. connect/disconnect events). 2033 * 2034 * The implementation is to stop net queues if modified vring has low 2035 * descriptor availability. Wake if all vrings are not in low descriptor 2036 * availability and modified vring has high descriptor availability. 2037 */ 2038 static inline void __wil_update_net_queues(struct wil6210_priv *wil, 2039 struct wil6210_vif *vif, 2040 struct wil_ring *ring, 2041 bool check_stop) 2042 { 2043 int i; 2044 int min_ring_id = wil_get_min_tx_ring_id(wil); 2045 2046 if (unlikely(!vif)) 2047 return; 2048 2049 if (ring) 2050 wil_dbg_txrx(wil, "vring %d, mid %d, check_stop=%d, stopped=%d", 2051 (int)(ring - wil->ring_tx), vif->mid, check_stop, 2052 vif->net_queue_stopped); 2053 else 2054 wil_dbg_txrx(wil, "check_stop=%d, mid=%d, stopped=%d", 2055 check_stop, vif->mid, vif->net_queue_stopped); 2056 2057 if (ring && drop_if_ring_full) 2058 /* no need to stop/wake net queues */ 2059 return; 2060 2061 if (check_stop == vif->net_queue_stopped) 2062 /* net queues already in desired state */ 2063 return; 2064 2065 if (check_stop) { 2066 if (!ring || unlikely(wil_ring_avail_low(ring))) { 2067 /* not enough room in the vring */ 2068 netif_tx_stop_all_queues(vif_to_ndev(vif)); 2069 vif->net_queue_stopped = true; 2070 wil_dbg_txrx(wil, "netif_tx_stop called\n"); 2071 } 2072 return; 2073 } 2074 2075 /* Do not wake the queues in suspend flow */ 2076 if (test_bit(wil_status_suspending, wil->status) || 2077 test_bit(wil_status_suspended, wil->status)) 2078 return; 2079 2080 /* check wake */ 2081 for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) { 2082 struct wil_ring *cur_ring = &wil->ring_tx[i]; 2083 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i]; 2084 2085 if (txdata->mid != vif->mid || !cur_ring->va || 2086 !txdata->enabled || cur_ring == ring) 2087 continue; 2088 2089 if (wil_ring_avail_low(cur_ring)) { 2090 wil_dbg_txrx(wil, "ring %d full, can't wake\n", 2091 (int)(cur_ring - wil->ring_tx)); 2092 return; 2093 } 2094 } 2095 2096 if (!ring || wil_ring_avail_high(ring)) { 2097 /* enough room in the ring */ 2098 wil_dbg_txrx(wil, "calling netif_tx_wake\n"); 2099 netif_tx_wake_all_queues(vif_to_ndev(vif)); 2100 vif->net_queue_stopped = false; 2101 } 2102 } 2103 2104 void wil_update_net_queues(struct wil6210_priv *wil, struct wil6210_vif *vif, 2105 struct wil_ring *ring, bool check_stop) 2106 { 2107 spin_lock(&wil->net_queue_lock); 2108 __wil_update_net_queues(wil, vif, ring, check_stop); 2109 spin_unlock(&wil->net_queue_lock); 2110 } 2111 2112 void wil_update_net_queues_bh(struct wil6210_priv *wil, struct wil6210_vif *vif, 2113 struct wil_ring *ring, bool check_stop) 2114 { 2115 spin_lock_bh(&wil->net_queue_lock); 2116 __wil_update_net_queues(wil, vif, ring, check_stop); 2117 spin_unlock_bh(&wil->net_queue_lock); 2118 } 2119 2120 netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) 2121 { 2122 struct wil6210_vif *vif = ndev_to_vif(ndev); 2123 struct wil6210_priv *wil = vif_to_wil(vif); 2124 struct ethhdr *eth = (void *)skb->data; 2125 bool bcast = is_multicast_ether_addr(eth->h_dest); 2126 struct wil_ring *ring; 2127 static bool pr_once_fw; 2128 int rc; 2129 2130 wil_dbg_txrx(wil, "start_xmit\n"); 2131 if (unlikely(!test_bit(wil_status_fwready, wil->status))) { 2132 if (!pr_once_fw) { 2133 wil_err(wil, "FW not ready\n"); 2134 pr_once_fw = true; 2135 } 2136 goto drop; 2137 } 2138 if (unlikely(!test_bit(wil_vif_fwconnected, vif->status))) { 2139 wil_dbg_ratelimited(wil, 2140 "VIF not connected, packet dropped\n"); 2141 goto drop; 2142 } 2143 if (unlikely(vif->wdev.iftype == NL80211_IFTYPE_MONITOR)) { 2144 wil_err(wil, "Xmit in monitor mode not supported\n"); 2145 goto drop; 2146 } 2147 pr_once_fw = false; 2148 2149 /* find vring */ 2150 if (vif->wdev.iftype == NL80211_IFTYPE_STATION && !vif->pbss) { 2151 /* in STA mode (ESS), all to same VRING (to AP) */ 2152 ring = wil_find_tx_ring_sta(wil, vif, skb); 2153 } else if (bcast) { 2154 if (vif->pbss) 2155 /* in pbss, no bcast VRING - duplicate skb in 2156 * all stations VRINGs 2157 */ 2158 ring = wil_find_tx_bcast_2(wil, vif, skb); 2159 else if (vif->wdev.iftype == NL80211_IFTYPE_AP) 2160 /* AP has a dedicated bcast VRING */ 2161 ring = wil_find_tx_bcast_1(wil, vif, skb); 2162 else 2163 /* unexpected combination, fallback to duplicating 2164 * the skb in all stations VRINGs 2165 */ 2166 ring = wil_find_tx_bcast_2(wil, vif, skb); 2167 } else { 2168 /* unicast, find specific VRING by dest. address */ 2169 ring = wil_find_tx_ucast(wil, vif, skb); 2170 } 2171 if (unlikely(!ring)) { 2172 wil_dbg_txrx(wil, "No Tx RING found for %pM\n", eth->h_dest); 2173 goto drop; 2174 } 2175 /* set up vring entry */ 2176 rc = wil_tx_ring(wil, vif, ring, skb); 2177 2178 switch (rc) { 2179 case 0: 2180 /* shall we stop net queues? */ 2181 wil_update_net_queues_bh(wil, vif, ring, true); 2182 /* statistics will be updated on the tx_complete */ 2183 dev_kfree_skb_any(skb); 2184 return NETDEV_TX_OK; 2185 case -ENOMEM: 2186 if (drop_if_ring_full) 2187 goto drop; 2188 return NETDEV_TX_BUSY; 2189 default: 2190 break; /* goto drop; */ 2191 } 2192 drop: 2193 ndev->stats.tx_dropped++; 2194 dev_kfree_skb_any(skb); 2195 2196 return NET_XMIT_DROP; 2197 } 2198 2199 void wil_tx_latency_calc(struct wil6210_priv *wil, struct sk_buff *skb, 2200 struct wil_sta_info *sta) 2201 { 2202 int skb_time_us; 2203 int bin; 2204 2205 if (!wil->tx_latency) 2206 return; 2207 2208 if (ktime_to_ms(*(ktime_t *)&skb->cb) == 0) 2209 return; 2210 2211 skb_time_us = ktime_us_delta(ktime_get(), *(ktime_t *)&skb->cb); 2212 bin = skb_time_us / wil->tx_latency_res; 2213 bin = min_t(int, bin, WIL_NUM_LATENCY_BINS - 1); 2214 2215 wil_dbg_txrx(wil, "skb time %dus => bin %d\n", skb_time_us, bin); 2216 sta->tx_latency_bins[bin]++; 2217 sta->stats.tx_latency_total_us += skb_time_us; 2218 if (skb_time_us < sta->stats.tx_latency_min_us) 2219 sta->stats.tx_latency_min_us = skb_time_us; 2220 if (skb_time_us > sta->stats.tx_latency_max_us) 2221 sta->stats.tx_latency_max_us = skb_time_us; 2222 } 2223 2224 /** 2225 * Clean up transmitted skb's from the Tx VRING 2226 * 2227 * Return number of descriptors cleared 2228 * 2229 * Safe to call from IRQ 2230 */ 2231 int wil_tx_complete(struct wil6210_vif *vif, int ringid) 2232 { 2233 struct wil6210_priv *wil = vif_to_wil(vif); 2234 struct net_device *ndev = vif_to_ndev(vif); 2235 struct device *dev = wil_to_dev(wil); 2236 struct wil_ring *vring = &wil->ring_tx[ringid]; 2237 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ringid]; 2238 int done = 0; 2239 int cid = wil->ring2cid_tid[ringid][0]; 2240 struct wil_net_stats *stats = NULL; 2241 volatile struct vring_tx_desc *_d; 2242 int used_before_complete; 2243 int used_new; 2244 2245 if (unlikely(!vring->va)) { 2246 wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid); 2247 return 0; 2248 } 2249 2250 if (unlikely(!txdata->enabled)) { 2251 wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid); 2252 return 0; 2253 } 2254 2255 wil_dbg_txrx(wil, "tx_complete: (%d)\n", ringid); 2256 2257 used_before_complete = wil_ring_used_tx(vring); 2258 2259 if (cid < max_assoc_sta) 2260 stats = &wil->sta[cid].stats; 2261 2262 while (!wil_ring_is_empty(vring)) { 2263 int new_swtail; 2264 struct wil_ctx *ctx = &vring->ctx[vring->swtail]; 2265 /** 2266 * For the fragmented skb, HW will set DU bit only for the 2267 * last fragment. look for it. 2268 * In TSO the first DU will include hdr desc 2269 */ 2270 int lf = (vring->swtail + ctx->nr_frags) % vring->size; 2271 /* TODO: check we are not past head */ 2272 2273 _d = &vring->va[lf].tx.legacy; 2274 if (unlikely(!(_d->dma.status & TX_DMA_STATUS_DU))) 2275 break; 2276 2277 new_swtail = (lf + 1) % vring->size; 2278 while (vring->swtail != new_swtail) { 2279 struct vring_tx_desc dd, *d = ⅆ 2280 u16 dmalen; 2281 struct sk_buff *skb; 2282 2283 ctx = &vring->ctx[vring->swtail]; 2284 skb = ctx->skb; 2285 _d = &vring->va[vring->swtail].tx.legacy; 2286 2287 *d = *_d; 2288 2289 dmalen = le16_to_cpu(d->dma.length); 2290 trace_wil6210_tx_done(ringid, vring->swtail, dmalen, 2291 d->dma.error); 2292 wil_dbg_txrx(wil, 2293 "TxC[%2d][%3d] : %d bytes, status 0x%02x err 0x%02x\n", 2294 ringid, vring->swtail, dmalen, 2295 d->dma.status, d->dma.error); 2296 wil_hex_dump_txrx("TxCD ", DUMP_PREFIX_NONE, 32, 4, 2297 (const void *)d, sizeof(*d), false); 2298 2299 wil->txrx_ops.tx_desc_unmap(dev, 2300 (union wil_tx_desc *)d, 2301 ctx); 2302 2303 if (skb) { 2304 if (likely(d->dma.error == 0)) { 2305 ndev->stats.tx_packets++; 2306 ndev->stats.tx_bytes += skb->len; 2307 if (stats) { 2308 stats->tx_packets++; 2309 stats->tx_bytes += skb->len; 2310 2311 wil_tx_latency_calc(wil, skb, 2312 &wil->sta[cid]); 2313 } 2314 } else { 2315 ndev->stats.tx_errors++; 2316 if (stats) 2317 stats->tx_errors++; 2318 } 2319 wil_consume_skb(skb, d->dma.error == 0); 2320 } 2321 memset(ctx, 0, sizeof(*ctx)); 2322 /* Make sure the ctx is zeroed before updating the tail 2323 * to prevent a case where wil_tx_ring will see 2324 * this descriptor as used and handle it before ctx zero 2325 * is completed. 2326 */ 2327 wmb(); 2328 /* There is no need to touch HW descriptor: 2329 * - ststus bit TX_DMA_STATUS_DU is set by design, 2330 * so hardware will not try to process this desc., 2331 * - rest of descriptor will be initialized on Tx. 2332 */ 2333 vring->swtail = wil_ring_next_tail(vring); 2334 done++; 2335 } 2336 } 2337 2338 /* performance monitoring */ 2339 used_new = wil_ring_used_tx(vring); 2340 if (wil_val_in_range(wil->ring_idle_trsh, 2341 used_new, used_before_complete)) { 2342 wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n", 2343 ringid, used_before_complete, used_new); 2344 txdata->last_idle = get_cycles(); 2345 } 2346 2347 /* shall we wake net queues? */ 2348 if (done) 2349 wil_update_net_queues(wil, vif, vring, false); 2350 2351 return done; 2352 } 2353 2354 static inline int wil_tx_init(struct wil6210_priv *wil) 2355 { 2356 return 0; 2357 } 2358 2359 static inline void wil_tx_fini(struct wil6210_priv *wil) {} 2360 2361 static void wil_get_reorder_params(struct wil6210_priv *wil, 2362 struct sk_buff *skb, int *tid, int *cid, 2363 int *mid, u16 *seq, int *mcast, int *retry) 2364 { 2365 struct vring_rx_desc *d = wil_skb_rxdesc(skb); 2366 2367 *tid = wil_rxdesc_tid(d); 2368 *cid = wil_skb_get_cid(skb); 2369 *mid = wil_rxdesc_mid(d); 2370 *seq = wil_rxdesc_seq(d); 2371 *mcast = wil_rxdesc_mcast(d); 2372 *retry = wil_rxdesc_retry(d); 2373 } 2374 2375 void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil) 2376 { 2377 wil->txrx_ops.configure_interrupt_moderation = 2378 wil_configure_interrupt_moderation; 2379 /* TX ops */ 2380 wil->txrx_ops.tx_desc_map = wil_tx_desc_map; 2381 wil->txrx_ops.tx_desc_unmap = wil_txdesc_unmap; 2382 wil->txrx_ops.tx_ring_tso = __wil_tx_vring_tso; 2383 wil->txrx_ops.ring_init_tx = wil_vring_init_tx; 2384 wil->txrx_ops.ring_fini_tx = wil_vring_free; 2385 wil->txrx_ops.ring_init_bcast = wil_vring_init_bcast; 2386 wil->txrx_ops.tx_init = wil_tx_init; 2387 wil->txrx_ops.tx_fini = wil_tx_fini; 2388 wil->txrx_ops.tx_ring_modify = wil_tx_vring_modify; 2389 /* RX ops */ 2390 wil->txrx_ops.rx_init = wil_rx_init; 2391 wil->txrx_ops.wmi_addba_rx_resp = wmi_addba_rx_resp; 2392 wil->txrx_ops.get_reorder_params = wil_get_reorder_params; 2393 wil->txrx_ops.get_netif_rx_params = 2394 wil_get_netif_rx_params; 2395 wil->txrx_ops.rx_crypto_check = wil_rx_crypto_check; 2396 wil->txrx_ops.rx_error_check = wil_rx_error_check; 2397 wil->txrx_ops.is_rx_idle = wil_is_rx_idle; 2398 wil->txrx_ops.rx_fini = wil_rx_fini; 2399 } 2400