1 /* 2 * Copyright (c) 2012 Qualcomm Atheros, Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/etherdevice.h> 18 #include <net/ieee80211_radiotap.h> 19 #include <linux/if_arp.h> 20 #include <linux/moduleparam.h> 21 #include <linux/ip.h> 22 #include <linux/ipv6.h> 23 #include <net/ipv6.h> 24 #include <linux/prefetch.h> 25 26 #include "wil6210.h" 27 #include "wmi.h" 28 #include "txrx.h" 29 #include "trace.h" 30 31 static bool rtap_include_phy_info; 32 module_param(rtap_include_phy_info, bool, S_IRUGO); 33 MODULE_PARM_DESC(rtap_include_phy_info, 34 " Include PHY info in the radiotap header, default - no"); 35 36 static inline int wil_vring_is_empty(struct vring *vring) 37 { 38 return vring->swhead == vring->swtail; 39 } 40 41 static inline u32 wil_vring_next_tail(struct vring *vring) 42 { 43 return (vring->swtail + 1) % vring->size; 44 } 45 46 static inline void wil_vring_advance_head(struct vring *vring, int n) 47 { 48 vring->swhead = (vring->swhead + n) % vring->size; 49 } 50 51 static inline int wil_vring_is_full(struct vring *vring) 52 { 53 return wil_vring_next_tail(vring) == vring->swhead; 54 } 55 /* 56 * Available space in Tx Vring 57 */ 58 static inline int wil_vring_avail_tx(struct vring *vring) 59 { 60 u32 swhead = vring->swhead; 61 u32 swtail = vring->swtail; 62 int used = (vring->size + swhead - swtail) % vring->size; 63 64 return vring->size - used - 1; 65 } 66 67 /** 68 * wil_vring_wmark_low - low watermark for available descriptor space 69 */ 70 static inline int wil_vring_wmark_low(struct vring *vring) 71 { 72 return vring->size/8; 73 } 74 75 /** 76 * wil_vring_wmark_high - high watermark for available descriptor space 77 */ 78 static inline int wil_vring_wmark_high(struct vring *vring) 79 { 80 return vring->size/4; 81 } 82 83 static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring) 84 { 85 struct device *dev = wil_to_dev(wil); 86 size_t sz = vring->size * sizeof(vring->va[0]); 87 uint i; 88 89 BUILD_BUG_ON(sizeof(vring->va[0]) != 32); 90 91 vring->swhead = 0; 92 vring->swtail = 0; 93 vring->ctx = kcalloc(vring->size, sizeof(vring->ctx[0]), GFP_KERNEL); 94 if (!vring->ctx) { 95 vring->va = NULL; 96 return -ENOMEM; 97 } 98 /* 99 * vring->va should be aligned on its size rounded up to power of 2 100 * This is granted by the dma_alloc_coherent 101 */ 102 vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL); 103 if (!vring->va) { 104 kfree(vring->ctx); 105 vring->ctx = NULL; 106 return -ENOMEM; 107 } 108 /* initially, all descriptors are SW owned 109 * For Tx and Rx, ownership bit is at the same location, thus 110 * we can use any 111 */ 112 for (i = 0; i < vring->size; i++) { 113 volatile struct vring_tx_desc *_d = &(vring->va[i].tx); 114 _d->dma.status = TX_DMA_STATUS_DU; 115 } 116 117 wil_dbg_misc(wil, "vring[%d] 0x%p:%pad 0x%p\n", vring->size, 118 vring->va, &vring->pa, vring->ctx); 119 120 return 0; 121 } 122 123 static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d, 124 struct wil_ctx *ctx) 125 { 126 dma_addr_t pa = wil_desc_addr(&d->dma.addr); 127 u16 dmalen = le16_to_cpu(d->dma.length); 128 switch (ctx->mapped_as) { 129 case wil_mapped_as_single: 130 dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE); 131 break; 132 case wil_mapped_as_page: 133 dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE); 134 break; 135 default: 136 break; 137 } 138 } 139 140 static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring, 141 int tx) 142 { 143 struct device *dev = wil_to_dev(wil); 144 size_t sz = vring->size * sizeof(vring->va[0]); 145 146 while (!wil_vring_is_empty(vring)) { 147 dma_addr_t pa; 148 u16 dmalen; 149 struct wil_ctx *ctx; 150 151 if (tx) { 152 struct vring_tx_desc dd, *d = ⅆ 153 volatile struct vring_tx_desc *_d = 154 &vring->va[vring->swtail].tx; 155 156 ctx = &vring->ctx[vring->swtail]; 157 *d = *_d; 158 wil_txdesc_unmap(dev, d, ctx); 159 if (ctx->skb) 160 dev_kfree_skb_any(ctx->skb); 161 vring->swtail = wil_vring_next_tail(vring); 162 } else { /* rx */ 163 struct vring_rx_desc dd, *d = ⅆ 164 volatile struct vring_rx_desc *_d = 165 &vring->va[vring->swhead].rx; 166 167 ctx = &vring->ctx[vring->swhead]; 168 *d = *_d; 169 pa = wil_desc_addr(&d->dma.addr); 170 dmalen = le16_to_cpu(d->dma.length); 171 dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE); 172 kfree_skb(ctx->skb); 173 wil_vring_advance_head(vring, 1); 174 } 175 } 176 dma_free_coherent(dev, sz, (void *)vring->va, vring->pa); 177 kfree(vring->ctx); 178 vring->pa = 0; 179 vring->va = NULL; 180 vring->ctx = NULL; 181 } 182 183 /** 184 * Allocate one skb for Rx VRING 185 * 186 * Safe to call from IRQ 187 */ 188 static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring, 189 u32 i, int headroom) 190 { 191 struct device *dev = wil_to_dev(wil); 192 unsigned int sz = RX_BUF_LEN; 193 struct vring_rx_desc dd, *d = ⅆ 194 volatile struct vring_rx_desc *_d = &(vring->va[i].rx); 195 dma_addr_t pa; 196 197 /* TODO align */ 198 struct sk_buff *skb = dev_alloc_skb(sz + headroom); 199 if (unlikely(!skb)) 200 return -ENOMEM; 201 202 skb_reserve(skb, headroom); 203 skb_put(skb, sz); 204 205 pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE); 206 if (unlikely(dma_mapping_error(dev, pa))) { 207 kfree_skb(skb); 208 return -ENOMEM; 209 } 210 211 d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT; 212 wil_desc_addr_set(&d->dma.addr, pa); 213 /* ip_length don't care */ 214 /* b11 don't care */ 215 /* error don't care */ 216 d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */ 217 d->dma.length = cpu_to_le16(sz); 218 *_d = *d; 219 vring->ctx[i].skb = skb; 220 221 return 0; 222 } 223 224 /** 225 * Adds radiotap header 226 * 227 * Any error indicated as "Bad FCS" 228 * 229 * Vendor data for 04:ce:14-1 (Wilocity-1) consists of: 230 * - Rx descriptor: 32 bytes 231 * - Phy info 232 */ 233 static void wil_rx_add_radiotap_header(struct wil6210_priv *wil, 234 struct sk_buff *skb) 235 { 236 struct wireless_dev *wdev = wil->wdev; 237 struct wil6210_rtap { 238 struct ieee80211_radiotap_header rthdr; 239 /* fields should be in the order of bits in rthdr.it_present */ 240 /* flags */ 241 u8 flags; 242 /* channel */ 243 __le16 chnl_freq __aligned(2); 244 __le16 chnl_flags; 245 /* MCS */ 246 u8 mcs_present; 247 u8 mcs_flags; 248 u8 mcs_index; 249 } __packed; 250 struct wil6210_rtap_vendor { 251 struct wil6210_rtap rtap; 252 /* vendor */ 253 u8 vendor_oui[3] __aligned(2); 254 u8 vendor_ns; 255 __le16 vendor_skip; 256 u8 vendor_data[0]; 257 } __packed; 258 struct vring_rx_desc *d = wil_skb_rxdesc(skb); 259 struct wil6210_rtap_vendor *rtap_vendor; 260 int rtap_len = sizeof(struct wil6210_rtap); 261 int phy_length = 0; /* phy info header size, bytes */ 262 static char phy_data[128]; 263 struct ieee80211_channel *ch = wdev->preset_chandef.chan; 264 265 if (rtap_include_phy_info) { 266 rtap_len = sizeof(*rtap_vendor) + sizeof(*d); 267 /* calculate additional length */ 268 if (d->dma.status & RX_DMA_STATUS_PHY_INFO) { 269 /** 270 * PHY info starts from 8-byte boundary 271 * there are 8-byte lines, last line may be partially 272 * written (HW bug), thus FW configures for last line 273 * to be excessive. Driver skips this last line. 274 */ 275 int len = min_t(int, 8 + sizeof(phy_data), 276 wil_rxdesc_phy_length(d)); 277 if (len > 8) { 278 void *p = skb_tail_pointer(skb); 279 void *pa = PTR_ALIGN(p, 8); 280 if (skb_tailroom(skb) >= len + (pa - p)) { 281 phy_length = len - 8; 282 memcpy(phy_data, pa, phy_length); 283 } 284 } 285 } 286 rtap_len += phy_length; 287 } 288 289 if (skb_headroom(skb) < rtap_len && 290 pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) { 291 wil_err(wil, "Unable to expand headrom to %d\n", rtap_len); 292 return; 293 } 294 295 rtap_vendor = (void *)skb_push(skb, rtap_len); 296 memset(rtap_vendor, 0, rtap_len); 297 298 rtap_vendor->rtap.rthdr.it_version = PKTHDR_RADIOTAP_VERSION; 299 rtap_vendor->rtap.rthdr.it_len = cpu_to_le16(rtap_len); 300 rtap_vendor->rtap.rthdr.it_present = cpu_to_le32( 301 (1 << IEEE80211_RADIOTAP_FLAGS) | 302 (1 << IEEE80211_RADIOTAP_CHANNEL) | 303 (1 << IEEE80211_RADIOTAP_MCS)); 304 if (d->dma.status & RX_DMA_STATUS_ERROR) 305 rtap_vendor->rtap.flags |= IEEE80211_RADIOTAP_F_BADFCS; 306 307 rtap_vendor->rtap.chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320); 308 rtap_vendor->rtap.chnl_flags = cpu_to_le16(0); 309 310 rtap_vendor->rtap.mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS; 311 rtap_vendor->rtap.mcs_flags = 0; 312 rtap_vendor->rtap.mcs_index = wil_rxdesc_mcs(d); 313 314 if (rtap_include_phy_info) { 315 rtap_vendor->rtap.rthdr.it_present |= cpu_to_le32(1 << 316 IEEE80211_RADIOTAP_VENDOR_NAMESPACE); 317 /* OUI for Wilocity 04:ce:14 */ 318 rtap_vendor->vendor_oui[0] = 0x04; 319 rtap_vendor->vendor_oui[1] = 0xce; 320 rtap_vendor->vendor_oui[2] = 0x14; 321 rtap_vendor->vendor_ns = 1; 322 /* Rx descriptor + PHY data */ 323 rtap_vendor->vendor_skip = cpu_to_le16(sizeof(*d) + 324 phy_length); 325 memcpy(rtap_vendor->vendor_data, (void *)d, sizeof(*d)); 326 memcpy(rtap_vendor->vendor_data + sizeof(*d), phy_data, 327 phy_length); 328 } 329 } 330 331 /* 332 * Fast swap in place between 2 registers 333 */ 334 static void wil_swap_u16(u16 *a, u16 *b) 335 { 336 *a ^= *b; 337 *b ^= *a; 338 *a ^= *b; 339 } 340 341 static void wil_swap_ethaddr(void *data) 342 { 343 struct ethhdr *eth = data; 344 u16 *s = (u16 *)eth->h_source; 345 u16 *d = (u16 *)eth->h_dest; 346 347 wil_swap_u16(s++, d++); 348 wil_swap_u16(s++, d++); 349 wil_swap_u16(s, d); 350 } 351 352 /** 353 * reap 1 frame from @swhead 354 * 355 * Rx descriptor copied to skb->cb 356 * 357 * Safe to call from IRQ 358 */ 359 static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, 360 struct vring *vring) 361 { 362 struct device *dev = wil_to_dev(wil); 363 struct net_device *ndev = wil_to_ndev(wil); 364 volatile struct vring_rx_desc *_d; 365 struct vring_rx_desc *d; 366 struct sk_buff *skb; 367 dma_addr_t pa; 368 unsigned int sz = RX_BUF_LEN; 369 u16 dmalen; 370 u8 ftype; 371 u8 ds_bits; 372 int cid; 373 struct wil_net_stats *stats; 374 375 376 BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb)); 377 378 if (wil_vring_is_empty(vring)) 379 return NULL; 380 381 _d = &(vring->va[vring->swhead].rx); 382 if (!(_d->dma.status & RX_DMA_STATUS_DU)) { 383 /* it is not error, we just reached end of Rx done area */ 384 return NULL; 385 } 386 387 skb = vring->ctx[vring->swhead].skb; 388 d = wil_skb_rxdesc(skb); 389 *d = *_d; 390 pa = wil_desc_addr(&d->dma.addr); 391 vring->ctx[vring->swhead].skb = NULL; 392 wil_vring_advance_head(vring, 1); 393 394 dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE); 395 dmalen = le16_to_cpu(d->dma.length); 396 397 trace_wil6210_rx(vring->swhead, d); 398 wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", vring->swhead, dmalen); 399 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4, 400 (const void *)d, sizeof(*d), false); 401 402 if (dmalen > sz) { 403 wil_err(wil, "Rx size too large: %d bytes!\n", dmalen); 404 kfree_skb(skb); 405 return NULL; 406 } 407 skb_trim(skb, dmalen); 408 409 prefetch(skb->data); 410 411 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1, 412 skb->data, skb_headlen(skb), false); 413 414 cid = wil_rxdesc_cid(d); 415 stats = &wil->sta[cid].stats; 416 stats->last_mcs_rx = wil_rxdesc_mcs(d); 417 wil->stats.last_mcs_rx = stats->last_mcs_rx; 418 419 /* use radiotap header only if required */ 420 if (ndev->type == ARPHRD_IEEE80211_RADIOTAP) 421 wil_rx_add_radiotap_header(wil, skb); 422 423 /* no extra checks if in sniffer mode */ 424 if (ndev->type != ARPHRD_ETHER) 425 return skb; 426 /* 427 * Non-data frames may be delivered through Rx DMA channel (ex: BAR) 428 * Driver should recognize it by frame type, that is found 429 * in Rx descriptor. If type is not data, it is 802.11 frame as is 430 */ 431 ftype = wil_rxdesc_ftype(d) << 2; 432 if (ftype != IEEE80211_FTYPE_DATA) { 433 wil_dbg_txrx(wil, "Non-data frame ftype 0x%08x\n", ftype); 434 /* TODO: process it */ 435 kfree_skb(skb); 436 return NULL; 437 } 438 439 if (skb->len < ETH_HLEN) { 440 wil_err(wil, "Short frame, len = %d\n", skb->len); 441 /* TODO: process it (i.e. BAR) */ 442 kfree_skb(skb); 443 return NULL; 444 } 445 446 /* L4 IDENT is on when HW calculated checksum, check status 447 * and in case of error drop the packet 448 * higher stack layers will handle retransmission (if required) 449 */ 450 if (d->dma.status & RX_DMA_STATUS_L4_IDENT) { 451 /* L4 protocol identified, csum calculated */ 452 if ((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0) 453 skb->ip_summed = CHECKSUM_UNNECESSARY; 454 /* If HW reports bad checksum, let IP stack re-check it 455 * For example, HW don't understand Microsoft IP stack that 456 * mis-calculates TCP checksum - if it should be 0x0, 457 * it writes 0xffff in violation of RFC 1624 458 */ 459 } 460 461 ds_bits = wil_rxdesc_ds_bits(d); 462 if (ds_bits == 1) { 463 /* 464 * HW bug - in ToDS mode, i.e. Rx on AP side, 465 * addresses get swapped 466 */ 467 wil_swap_ethaddr(skb->data); 468 } 469 470 return skb; 471 } 472 473 /** 474 * allocate and fill up to @count buffers in rx ring 475 * buffers posted at @swtail 476 */ 477 static int wil_rx_refill(struct wil6210_priv *wil, int count) 478 { 479 struct net_device *ndev = wil_to_ndev(wil); 480 struct vring *v = &wil->vring_rx; 481 u32 next_tail; 482 int rc = 0; 483 int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ? 484 WIL6210_RTAP_SIZE : 0; 485 486 for (; next_tail = wil_vring_next_tail(v), 487 (next_tail != v->swhead) && (count-- > 0); 488 v->swtail = next_tail) { 489 rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom); 490 if (rc) { 491 wil_err(wil, "Error %d in wil_rx_refill[%d]\n", 492 rc, v->swtail); 493 break; 494 } 495 } 496 iowrite32(v->swtail, wil->csr + HOSTADDR(v->hwtail)); 497 498 return rc; 499 } 500 501 /* 502 * Pass Rx packet to the netif. Update statistics. 503 * Called in softirq context (NAPI poll). 504 */ 505 void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) 506 { 507 gro_result_t rc; 508 struct wil6210_priv *wil = ndev_to_wil(ndev); 509 unsigned int len = skb->len; 510 struct vring_rx_desc *d = wil_skb_rxdesc(skb); 511 int cid = wil_rxdesc_cid(d); 512 struct wil_net_stats *stats = &wil->sta[cid].stats; 513 514 skb_orphan(skb); 515 516 rc = napi_gro_receive(&wil->napi_rx, skb); 517 518 if (unlikely(rc == GRO_DROP)) { 519 ndev->stats.rx_dropped++; 520 stats->rx_dropped++; 521 wil_dbg_txrx(wil, "Rx drop %d bytes\n", len); 522 } else { 523 ndev->stats.rx_packets++; 524 stats->rx_packets++; 525 ndev->stats.rx_bytes += len; 526 stats->rx_bytes += len; 527 } 528 } 529 530 /** 531 * Proceed all completed skb's from Rx VRING 532 * 533 * Safe to call from NAPI poll, i.e. softirq with interrupts enabled 534 */ 535 void wil_rx_handle(struct wil6210_priv *wil, int *quota) 536 { 537 struct net_device *ndev = wil_to_ndev(wil); 538 struct vring *v = &wil->vring_rx; 539 struct sk_buff *skb; 540 541 if (!v->va) { 542 wil_err(wil, "Rx IRQ while Rx not yet initialized\n"); 543 return; 544 } 545 wil_dbg_txrx(wil, "%s()\n", __func__); 546 while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) { 547 (*quota)--; 548 549 if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) { 550 skb->dev = ndev; 551 skb_reset_mac_header(skb); 552 skb->ip_summed = CHECKSUM_UNNECESSARY; 553 skb->pkt_type = PACKET_OTHERHOST; 554 skb->protocol = htons(ETH_P_802_2); 555 wil_netif_rx_any(skb, ndev); 556 } else { 557 struct ethhdr *eth = (void *)skb->data; 558 559 skb->protocol = eth_type_trans(skb, ndev); 560 561 if (is_unicast_ether_addr(eth->h_dest)) 562 wil_rx_reorder(wil, skb); 563 else 564 wil_netif_rx_any(skb, ndev); 565 } 566 567 } 568 wil_rx_refill(wil, v->size); 569 } 570 571 int wil_rx_init(struct wil6210_priv *wil) 572 { 573 struct vring *vring = &wil->vring_rx; 574 int rc; 575 576 if (vring->va) { 577 wil_err(wil, "Rx ring already allocated\n"); 578 return -EINVAL; 579 } 580 581 vring->size = WIL6210_RX_RING_SIZE; 582 rc = wil_vring_alloc(wil, vring); 583 if (rc) 584 return rc; 585 586 rc = wmi_rx_chain_add(wil, vring); 587 if (rc) 588 goto err_free; 589 590 rc = wil_rx_refill(wil, vring->size); 591 if (rc) 592 goto err_free; 593 594 return 0; 595 err_free: 596 wil_vring_free(wil, vring, 0); 597 598 return rc; 599 } 600 601 void wil_rx_fini(struct wil6210_priv *wil) 602 { 603 struct vring *vring = &wil->vring_rx; 604 605 if (vring->va) 606 wil_vring_free(wil, vring, 0); 607 } 608 609 int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, 610 int cid, int tid) 611 { 612 int rc; 613 struct wmi_vring_cfg_cmd cmd = { 614 .action = cpu_to_le32(WMI_VRING_CMD_ADD), 615 .vring_cfg = { 616 .tx_sw_ring = { 617 .max_mpdu_size = cpu_to_le16(TX_BUF_LEN), 618 .ring_size = cpu_to_le16(size), 619 }, 620 .ringid = id, 621 .cidxtid = mk_cidxtid(cid, tid), 622 .encap_trans_type = WMI_VRING_ENC_TYPE_802_3, 623 .mac_ctrl = 0, 624 .to_resolution = 0, 625 .agg_max_wsize = 16, 626 .schd_params = { 627 .priority = cpu_to_le16(0), 628 .timeslot_us = cpu_to_le16(0xfff), 629 }, 630 }, 631 }; 632 struct { 633 struct wil6210_mbox_hdr_wmi wmi; 634 struct wmi_vring_cfg_done_event cmd; 635 } __packed reply; 636 struct vring *vring = &wil->vring_tx[id]; 637 struct vring_tx_data *txdata = &wil->vring_tx_data[id]; 638 639 if (vring->va) { 640 wil_err(wil, "Tx ring [%d] already allocated\n", id); 641 rc = -EINVAL; 642 goto out; 643 } 644 645 memset(txdata, 0, sizeof(*txdata)); 646 vring->size = size; 647 rc = wil_vring_alloc(wil, vring); 648 if (rc) 649 goto out; 650 651 wil->vring2cid_tid[id][0] = cid; 652 wil->vring2cid_tid[id][1] = tid; 653 654 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa); 655 656 rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd), 657 WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100); 658 if (rc) 659 goto out_free; 660 661 if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) { 662 wil_err(wil, "Tx config failed, status 0x%02x\n", 663 reply.cmd.status); 664 rc = -EINVAL; 665 goto out_free; 666 } 667 vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr); 668 669 txdata->enabled = 1; 670 671 return 0; 672 out_free: 673 wil_vring_free(wil, vring, 1); 674 out: 675 676 return rc; 677 } 678 679 void wil_vring_fini_tx(struct wil6210_priv *wil, int id) 680 { 681 struct vring *vring = &wil->vring_tx[id]; 682 683 WARN_ON(!mutex_is_locked(&wil->mutex)); 684 685 if (!vring->va) 686 return; 687 688 /* make sure NAPI won't touch this vring */ 689 wil->vring_tx_data[id].enabled = 0; 690 if (test_bit(wil_status_napi_en, &wil->status)) 691 napi_synchronize(&wil->napi_tx); 692 693 wil_vring_free(wil, vring, 1); 694 } 695 696 static struct vring *wil_find_tx_vring(struct wil6210_priv *wil, 697 struct sk_buff *skb) 698 { 699 int i; 700 struct ethhdr *eth = (void *)skb->data; 701 int cid = wil_find_cid(wil, eth->h_dest); 702 703 if (cid < 0) 704 return NULL; 705 706 if (!wil->sta[cid].data_port_open && 707 (skb->protocol != cpu_to_be16(ETH_P_PAE))) 708 return NULL; 709 710 /* TODO: fix for multiple TID */ 711 for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) { 712 if (wil->vring2cid_tid[i][0] == cid) { 713 struct vring *v = &wil->vring_tx[i]; 714 wil_dbg_txrx(wil, "%s(%pM) -> [%d]\n", 715 __func__, eth->h_dest, i); 716 if (v->va) { 717 return v; 718 } else { 719 wil_dbg_txrx(wil, "vring[%d] not valid\n", i); 720 return NULL; 721 } 722 } 723 } 724 725 return NULL; 726 } 727 728 static void wil_set_da_for_vring(struct wil6210_priv *wil, 729 struct sk_buff *skb, int vring_index) 730 { 731 struct ethhdr *eth = (void *)skb->data; 732 int cid = wil->vring2cid_tid[vring_index][0]; 733 memcpy(eth->h_dest, wil->sta[cid].addr, ETH_ALEN); 734 } 735 736 static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, 737 struct sk_buff *skb); 738 /* 739 * Find 1-st vring and return it; set dest address for this vring in skb 740 * duplicate skb and send it to other active vrings 741 */ 742 static struct vring *wil_tx_bcast(struct wil6210_priv *wil, 743 struct sk_buff *skb) 744 { 745 struct vring *v, *v2; 746 struct sk_buff *skb2; 747 int i; 748 u8 cid; 749 750 /* find 1-st vring eligible for data */ 751 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { 752 v = &wil->vring_tx[i]; 753 if (!v->va) 754 continue; 755 756 cid = wil->vring2cid_tid[i][0]; 757 if (!wil->sta[cid].data_port_open) 758 continue; 759 760 goto found; 761 } 762 763 wil_err(wil, "Tx while no vrings active?\n"); 764 765 return NULL; 766 767 found: 768 wil_dbg_txrx(wil, "BCAST -> ring %d\n", i); 769 wil_set_da_for_vring(wil, skb, i); 770 771 /* find other active vrings and duplicate skb for each */ 772 for (i++; i < WIL6210_MAX_TX_RINGS; i++) { 773 v2 = &wil->vring_tx[i]; 774 if (!v2->va) 775 continue; 776 cid = wil->vring2cid_tid[i][0]; 777 if (!wil->sta[cid].data_port_open) 778 continue; 779 780 skb2 = skb_copy(skb, GFP_ATOMIC); 781 if (skb2) { 782 wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i); 783 wil_set_da_for_vring(wil, skb2, i); 784 wil_tx_vring(wil, v2, skb2); 785 } else { 786 wil_err(wil, "skb_copy failed\n"); 787 } 788 } 789 790 return v; 791 } 792 793 static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len, 794 int vring_index) 795 { 796 wil_desc_addr_set(&d->dma.addr, pa); 797 d->dma.ip_length = 0; 798 /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/ 799 d->dma.b11 = 0/*14 | BIT(7)*/; 800 d->dma.error = 0; 801 d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */ 802 d->dma.length = cpu_to_le16((u16)len); 803 d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS); 804 d->mac.d[0] = 0; 805 d->mac.d[1] = 0; 806 d->mac.d[2] = 0; 807 d->mac.ucode_cmd = 0; 808 /* use dst index 0 */ 809 d->mac.d[1] |= BIT(MAC_CFG_DESC_TX_1_DST_INDEX_EN_POS) | 810 (0 << MAC_CFG_DESC_TX_1_DST_INDEX_POS); 811 /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */ 812 d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) | 813 (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS); 814 815 return 0; 816 } 817 818 static inline 819 void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags) 820 { 821 d->mac.d[2] |= ((nr_frags + 1) << 822 MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS); 823 } 824 825 static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil, 826 struct vring_tx_desc *d, 827 struct sk_buff *skb) 828 { 829 int protocol; 830 831 if (skb->ip_summed != CHECKSUM_PARTIAL) 832 return 0; 833 834 d->dma.b11 = ETH_HLEN; /* MAC header length */ 835 836 switch (skb->protocol) { 837 case cpu_to_be16(ETH_P_IP): 838 protocol = ip_hdr(skb)->protocol; 839 d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS); 840 break; 841 case cpu_to_be16(ETH_P_IPV6): 842 protocol = ipv6_hdr(skb)->nexthdr; 843 break; 844 default: 845 return -EINVAL; 846 } 847 848 switch (protocol) { 849 case IPPROTO_TCP: 850 d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS); 851 /* L4 header len: TCP header length */ 852 d->dma.d0 |= 853 (tcp_hdrlen(skb) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK); 854 break; 855 case IPPROTO_UDP: 856 /* L4 header len: UDP header length */ 857 d->dma.d0 |= 858 (sizeof(struct udphdr) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK); 859 break; 860 default: 861 return -EINVAL; 862 } 863 864 d->dma.ip_length = skb_network_header_len(skb); 865 /* Enable TCP/UDP checksum */ 866 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS); 867 /* Calculate pseudo-header */ 868 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS); 869 870 return 0; 871 } 872 873 static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, 874 struct sk_buff *skb) 875 { 876 struct device *dev = wil_to_dev(wil); 877 struct vring_tx_desc dd, *d = ⅆ 878 volatile struct vring_tx_desc *_d; 879 u32 swhead = vring->swhead; 880 int avail = wil_vring_avail_tx(vring); 881 int nr_frags = skb_shinfo(skb)->nr_frags; 882 uint f = 0; 883 int vring_index = vring - wil->vring_tx; 884 uint i = swhead; 885 dma_addr_t pa; 886 887 wil_dbg_txrx(wil, "%s()\n", __func__); 888 889 if (avail < 1 + nr_frags) { 890 wil_err(wil, "Tx ring full. No space for %d fragments\n", 891 1 + nr_frags); 892 return -ENOMEM; 893 } 894 _d = &(vring->va[i].tx); 895 896 pa = dma_map_single(dev, skb->data, 897 skb_headlen(skb), DMA_TO_DEVICE); 898 899 wil_dbg_txrx(wil, "Tx skb %d bytes 0x%p -> %pad\n", skb_headlen(skb), 900 skb->data, &pa); 901 wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1, 902 skb->data, skb_headlen(skb), false); 903 904 if (unlikely(dma_mapping_error(dev, pa))) 905 return -EINVAL; 906 vring->ctx[i].mapped_as = wil_mapped_as_single; 907 /* 1-st segment */ 908 wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index); 909 /* Process TCP/UDP checksum offloading */ 910 if (wil_tx_desc_offload_cksum_set(wil, d, skb)) { 911 wil_err(wil, "VRING #%d Failed to set cksum, drop packet\n", 912 vring_index); 913 goto dma_error; 914 } 915 916 vring->ctx[i].nr_frags = nr_frags; 917 wil_tx_desc_set_nr_frags(d, nr_frags); 918 if (nr_frags) 919 *_d = *d; 920 921 /* middle segments */ 922 for (; f < nr_frags; f++) { 923 const struct skb_frag_struct *frag = 924 &skb_shinfo(skb)->frags[f]; 925 int len = skb_frag_size(frag); 926 i = (swhead + f + 1) % vring->size; 927 _d = &(vring->va[i].tx); 928 pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag), 929 DMA_TO_DEVICE); 930 if (unlikely(dma_mapping_error(dev, pa))) 931 goto dma_error; 932 vring->ctx[i].mapped_as = wil_mapped_as_page; 933 wil_tx_desc_map(d, pa, len, vring_index); 934 /* no need to check return code - 935 * if it succeeded for 1-st descriptor, 936 * it will succeed here too 937 */ 938 wil_tx_desc_offload_cksum_set(wil, d, skb); 939 *_d = *d; 940 } 941 /* for the last seg only */ 942 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS); 943 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS); 944 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS); 945 *_d = *d; 946 947 /* hold reference to skb 948 * to prevent skb release before accounting 949 * in case of immediate "tx done" 950 */ 951 vring->ctx[i].skb = skb_get(skb); 952 953 wil_hex_dump_txrx("Tx ", DUMP_PREFIX_NONE, 32, 4, 954 (const void *)d, sizeof(*d), false); 955 956 /* advance swhead */ 957 wil_vring_advance_head(vring, nr_frags + 1); 958 wil_dbg_txrx(wil, "Tx swhead %d -> %d\n", swhead, vring->swhead); 959 trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags); 960 iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail)); 961 962 return 0; 963 dma_error: 964 /* unmap what we have mapped */ 965 nr_frags = f + 1; /* frags mapped + one for skb head */ 966 for (f = 0; f < nr_frags; f++) { 967 struct wil_ctx *ctx; 968 969 i = (swhead + f) % vring->size; 970 ctx = &vring->ctx[i]; 971 _d = &(vring->va[i].tx); 972 *d = *_d; 973 _d->dma.status = TX_DMA_STATUS_DU; 974 wil_txdesc_unmap(dev, d, ctx); 975 976 if (ctx->skb) 977 dev_kfree_skb_any(ctx->skb); 978 979 memset(ctx, 0, sizeof(*ctx)); 980 } 981 982 return -EINVAL; 983 } 984 985 986 netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) 987 { 988 struct wil6210_priv *wil = ndev_to_wil(ndev); 989 struct ethhdr *eth = (void *)skb->data; 990 struct vring *vring; 991 static bool pr_once_fw; 992 int rc; 993 994 wil_dbg_txrx(wil, "%s()\n", __func__); 995 if (!test_bit(wil_status_fwready, &wil->status)) { 996 if (!pr_once_fw) { 997 wil_err(wil, "FW not ready\n"); 998 pr_once_fw = true; 999 } 1000 goto drop; 1001 } 1002 if (!test_bit(wil_status_fwconnected, &wil->status)) { 1003 wil_err(wil, "FW not connected\n"); 1004 goto drop; 1005 } 1006 if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) { 1007 wil_err(wil, "Xmit in monitor mode not supported\n"); 1008 goto drop; 1009 } 1010 pr_once_fw = false; 1011 1012 /* find vring */ 1013 if (is_unicast_ether_addr(eth->h_dest)) { 1014 vring = wil_find_tx_vring(wil, skb); 1015 } else { 1016 vring = wil_tx_bcast(wil, skb); 1017 } 1018 if (!vring) { 1019 wil_err(wil, "No Tx VRING found for %pM\n", eth->h_dest); 1020 goto drop; 1021 } 1022 /* set up vring entry */ 1023 rc = wil_tx_vring(wil, vring, skb); 1024 1025 /* do we still have enough room in the vring? */ 1026 if (wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring)) 1027 netif_tx_stop_all_queues(wil_to_ndev(wil)); 1028 1029 switch (rc) { 1030 case 0: 1031 /* statistics will be updated on the tx_complete */ 1032 dev_kfree_skb_any(skb); 1033 return NETDEV_TX_OK; 1034 case -ENOMEM: 1035 return NETDEV_TX_BUSY; 1036 default: 1037 break; /* goto drop; */ 1038 } 1039 drop: 1040 ndev->stats.tx_dropped++; 1041 dev_kfree_skb_any(skb); 1042 1043 return NET_XMIT_DROP; 1044 } 1045 1046 /** 1047 * Clean up transmitted skb's from the Tx VRING 1048 * 1049 * Return number of descriptors cleared 1050 * 1051 * Safe to call from IRQ 1052 */ 1053 int wil_tx_complete(struct wil6210_priv *wil, int ringid) 1054 { 1055 struct net_device *ndev = wil_to_ndev(wil); 1056 struct device *dev = wil_to_dev(wil); 1057 struct vring *vring = &wil->vring_tx[ringid]; 1058 struct vring_tx_data *txdata = &wil->vring_tx_data[ringid]; 1059 int done = 0; 1060 int cid = wil->vring2cid_tid[ringid][0]; 1061 struct wil_net_stats *stats = &wil->sta[cid].stats; 1062 volatile struct vring_tx_desc *_d; 1063 1064 if (!vring->va) { 1065 wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid); 1066 return 0; 1067 } 1068 1069 if (!txdata->enabled) { 1070 wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid); 1071 return 0; 1072 } 1073 1074 wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid); 1075 1076 while (!wil_vring_is_empty(vring)) { 1077 int new_swtail; 1078 struct wil_ctx *ctx = &vring->ctx[vring->swtail]; 1079 /** 1080 * For the fragmented skb, HW will set DU bit only for the 1081 * last fragment. look for it 1082 */ 1083 int lf = (vring->swtail + ctx->nr_frags) % vring->size; 1084 /* TODO: check we are not past head */ 1085 1086 _d = &vring->va[lf].tx; 1087 if (!(_d->dma.status & TX_DMA_STATUS_DU)) 1088 break; 1089 1090 new_swtail = (lf + 1) % vring->size; 1091 while (vring->swtail != new_swtail) { 1092 struct vring_tx_desc dd, *d = ⅆ 1093 u16 dmalen; 1094 struct wil_ctx *ctx = &vring->ctx[vring->swtail]; 1095 struct sk_buff *skb = ctx->skb; 1096 _d = &vring->va[vring->swtail].tx; 1097 1098 *d = *_d; 1099 1100 dmalen = le16_to_cpu(d->dma.length); 1101 trace_wil6210_tx_done(ringid, vring->swtail, dmalen, 1102 d->dma.error); 1103 wil_dbg_txrx(wil, 1104 "Tx[%3d] : %d bytes, status 0x%02x err 0x%02x\n", 1105 vring->swtail, dmalen, d->dma.status, 1106 d->dma.error); 1107 wil_hex_dump_txrx("TxC ", DUMP_PREFIX_NONE, 32, 4, 1108 (const void *)d, sizeof(*d), false); 1109 1110 wil_txdesc_unmap(dev, d, ctx); 1111 1112 if (skb) { 1113 if (d->dma.error == 0) { 1114 ndev->stats.tx_packets++; 1115 stats->tx_packets++; 1116 ndev->stats.tx_bytes += skb->len; 1117 stats->tx_bytes += skb->len; 1118 } else { 1119 ndev->stats.tx_errors++; 1120 stats->tx_errors++; 1121 } 1122 1123 dev_kfree_skb_any(skb); 1124 } 1125 memset(ctx, 0, sizeof(*ctx)); 1126 /* There is no need to touch HW descriptor: 1127 * - ststus bit TX_DMA_STATUS_DU is set by design, 1128 * so hardware will not try to process this desc., 1129 * - rest of descriptor will be initialized on Tx. 1130 */ 1131 vring->swtail = wil_vring_next_tail(vring); 1132 done++; 1133 } 1134 } 1135 if (wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring)) 1136 netif_tx_wake_all_queues(wil_to_ndev(wil)); 1137 1138 return done; 1139 } 1140