1 /* 2 * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/etherdevice.h> 18 #include <linux/moduleparam.h> 19 #include <linux/prefetch.h> 20 #include <linux/types.h> 21 #include <linux/list.h> 22 #include <linux/ip.h> 23 #include <linux/ipv6.h> 24 #include "wil6210.h" 25 #include "txrx_edma.h" 26 #include "txrx.h" 27 #include "trace.h" 28 29 #define WIL_EDMA_MAX_DATA_OFFSET (2) 30 31 static void wil_tx_desc_unmap_edma(struct device *dev, 32 union wil_tx_desc *desc, 33 struct wil_ctx *ctx) 34 { 35 struct wil_tx_enhanced_desc *d = (struct wil_tx_enhanced_desc *)desc; 36 dma_addr_t pa = wil_tx_desc_get_addr_edma(&d->dma); 37 u16 dmalen = le16_to_cpu(d->dma.length); 38 39 switch (ctx->mapped_as) { 40 case wil_mapped_as_single: 41 dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE); 42 break; 43 case wil_mapped_as_page: 44 dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE); 45 break; 46 default: 47 break; 48 } 49 } 50 51 static int wil_find_free_sring(struct wil6210_priv *wil) 52 { 53 int i; 54 55 for (i = 0; i < WIL6210_MAX_STATUS_RINGS; i++) { 56 if (!wil->srings[i].va) 57 return i; 58 } 59 60 return -EINVAL; 61 } 62 63 static void wil_sring_free(struct wil6210_priv *wil, 64 struct wil_status_ring *sring) 65 { 66 struct device *dev = wil_to_dev(wil); 67 size_t sz; 68 69 if (!sring || !sring->va) 70 return; 71 72 sz = sring->elem_size * sring->size; 73 74 wil_dbg_misc(wil, "status_ring_free, size(bytes)=%zu, 0x%p:%pad\n", 75 sz, sring->va, &sring->pa); 76 77 dma_free_coherent(dev, sz, (void *)sring->va, sring->pa); 78 sring->pa = 0; 79 sring->va = NULL; 80 } 81 82 static int wil_sring_alloc(struct wil6210_priv *wil, 83 struct wil_status_ring *sring) 84 { 85 struct device *dev = wil_to_dev(wil); 86 size_t sz = sring->elem_size * sring->size; 87 88 wil_dbg_misc(wil, "status_ring_alloc: size=%zu\n", sz); 89 90 if (sz == 0) { 91 wil_err(wil, "Cannot allocate a zero size status ring\n"); 92 return -EINVAL; 93 } 94 95 sring->swhead = 0; 96 97 /* Status messages are allocated and initialized to 0. This is necessary 98 * since DR bit should be initialized to 0. 99 */ 100 sring->va = dma_zalloc_coherent(dev, sz, &sring->pa, GFP_KERNEL); 101 if (!sring->va) 102 return -ENOMEM; 103 104 wil_dbg_misc(wil, "status_ring[%d] 0x%p:%pad\n", sring->size, sring->va, 105 &sring->pa); 106 107 return 0; 108 } 109 110 static int wil_tx_init_edma(struct wil6210_priv *wil) 111 { 112 int ring_id = wil_find_free_sring(wil); 113 struct wil_status_ring *sring; 114 int rc; 115 u16 status_ring_size; 116 117 if (wil->tx_status_ring_order < WIL_SRING_SIZE_ORDER_MIN || 118 wil->tx_status_ring_order > WIL_SRING_SIZE_ORDER_MAX) 119 wil->tx_status_ring_order = WIL_TX_SRING_SIZE_ORDER_DEFAULT; 120 121 status_ring_size = 1 << wil->tx_status_ring_order; 122 123 wil_dbg_misc(wil, "init TX sring: size=%u, ring_id=%u\n", 124 status_ring_size, ring_id); 125 126 if (ring_id < 0) 127 return ring_id; 128 129 /* Allocate Tx status ring. Tx descriptor rings will be 130 * allocated on WMI connect event 131 */ 132 sring = &wil->srings[ring_id]; 133 134 sring->is_rx = false; 135 sring->size = status_ring_size; 136 sring->elem_size = sizeof(struct wil_ring_tx_status); 137 rc = wil_sring_alloc(wil, sring); 138 if (rc) 139 return rc; 140 141 rc = wil_wmi_tx_sring_cfg(wil, ring_id); 142 if (rc) 143 goto out_free; 144 145 sring->desc_rdy_pol = 1; 146 wil->tx_sring_idx = ring_id; 147 148 return 0; 149 out_free: 150 wil_sring_free(wil, sring); 151 return rc; 152 } 153 154 /** 155 * Allocate one skb for Rx descriptor RING 156 */ 157 static int wil_ring_alloc_skb_edma(struct wil6210_priv *wil, 158 struct wil_ring *ring, u32 i) 159 { 160 struct device *dev = wil_to_dev(wil); 161 unsigned int sz = wil->rx_buf_len + ETH_HLEN + 162 WIL_EDMA_MAX_DATA_OFFSET; 163 dma_addr_t pa; 164 u16 buff_id; 165 struct list_head *active = &wil->rx_buff_mgmt.active; 166 struct list_head *free = &wil->rx_buff_mgmt.free; 167 struct wil_rx_buff *rx_buff; 168 struct wil_rx_buff *buff_arr = wil->rx_buff_mgmt.buff_arr; 169 struct sk_buff *skb; 170 struct wil_rx_enhanced_desc dd, *d = ⅆ 171 struct wil_rx_enhanced_desc *_d = (struct wil_rx_enhanced_desc *) 172 &ring->va[i].rx.enhanced; 173 174 if (unlikely(list_empty(free))) { 175 wil->rx_buff_mgmt.free_list_empty_cnt++; 176 return -EAGAIN; 177 } 178 179 skb = dev_alloc_skb(sz); 180 if (unlikely(!skb)) 181 return -ENOMEM; 182 183 skb_put(skb, sz); 184 185 pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE); 186 if (unlikely(dma_mapping_error(dev, pa))) { 187 kfree_skb(skb); 188 return -ENOMEM; 189 } 190 191 /* Get the buffer ID - the index of the rx buffer in the buff_arr */ 192 rx_buff = list_first_entry(free, struct wil_rx_buff, list); 193 buff_id = rx_buff->id; 194 195 /* Move a buffer from the free list to the active list */ 196 list_move(&rx_buff->list, active); 197 198 buff_arr[buff_id].skb = skb; 199 200 wil_desc_set_addr_edma(&d->dma.addr, &d->dma.addr_high_high, pa); 201 d->dma.length = cpu_to_le16(sz); 202 d->mac.buff_id = cpu_to_le16(buff_id); 203 *_d = *d; 204 205 /* Save the physical address in skb->cb for later use in dma_unmap */ 206 memcpy(skb->cb, &pa, sizeof(pa)); 207 208 return 0; 209 } 210 211 static inline 212 void wil_get_next_rx_status_msg(struct wil_status_ring *sring, void *msg) 213 { 214 memcpy(msg, (void *)(sring->va + (sring->elem_size * sring->swhead)), 215 sring->elem_size); 216 } 217 218 static inline void wil_sring_advance_swhead(struct wil_status_ring *sring) 219 { 220 sring->swhead = (sring->swhead + 1) % sring->size; 221 if (sring->swhead == 0) 222 sring->desc_rdy_pol = 1 - sring->desc_rdy_pol; 223 } 224 225 static int wil_rx_refill_edma(struct wil6210_priv *wil) 226 { 227 struct wil_ring *ring = &wil->ring_rx; 228 u32 next_head; 229 int rc = 0; 230 u32 swtail = *ring->edma_rx_swtail.va; 231 232 for (; next_head = wil_ring_next_head(ring), (next_head != swtail); 233 ring->swhead = next_head) { 234 rc = wil_ring_alloc_skb_edma(wil, ring, ring->swhead); 235 if (unlikely(rc)) { 236 if (rc == -EAGAIN) 237 wil_dbg_txrx(wil, "No free buffer ID found\n"); 238 else 239 wil_err_ratelimited(wil, 240 "Error %d in refill desc[%d]\n", 241 rc, ring->swhead); 242 break; 243 } 244 } 245 246 /* make sure all writes to descriptors (shared memory) are done before 247 * committing them to HW 248 */ 249 wmb(); 250 251 wil_w(wil, ring->hwtail, ring->swhead); 252 253 return rc; 254 } 255 256 static void wil_move_all_rx_buff_to_free_list(struct wil6210_priv *wil, 257 struct wil_ring *ring) 258 { 259 struct device *dev = wil_to_dev(wil); 260 u32 next_tail; 261 u32 swhead = (ring->swhead + 1) % ring->size; 262 dma_addr_t pa; 263 u16 dmalen; 264 265 for (; next_tail = wil_ring_next_tail(ring), (next_tail != swhead); 266 ring->swtail = next_tail) { 267 struct wil_rx_enhanced_desc dd, *d = ⅆ 268 struct wil_rx_enhanced_desc *_d = 269 (struct wil_rx_enhanced_desc *) 270 &ring->va[ring->swtail].rx.enhanced; 271 struct sk_buff *skb; 272 u16 buff_id; 273 274 *d = *_d; 275 pa = wil_rx_desc_get_addr_edma(&d->dma); 276 dmalen = le16_to_cpu(d->dma.length); 277 dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE); 278 279 /* Extract the SKB from the rx_buff management array */ 280 buff_id = __le16_to_cpu(d->mac.buff_id); 281 if (buff_id >= wil->rx_buff_mgmt.size) { 282 wil_err(wil, "invalid buff_id %d\n", buff_id); 283 continue; 284 } 285 skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb; 286 wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL; 287 if (unlikely(!skb)) 288 wil_err(wil, "No Rx skb at buff_id %d\n", buff_id); 289 else 290 kfree_skb(skb); 291 292 /* Move the buffer from the active to the free list */ 293 list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list, 294 &wil->rx_buff_mgmt.free); 295 } 296 } 297 298 static void wil_free_rx_buff_arr(struct wil6210_priv *wil) 299 { 300 struct wil_ring *ring = &wil->ring_rx; 301 302 if (!wil->rx_buff_mgmt.buff_arr) 303 return; 304 305 /* Move all the buffers to the free list in case active list is 306 * not empty in order to release all SKBs before deleting the array 307 */ 308 wil_move_all_rx_buff_to_free_list(wil, ring); 309 310 kfree(wil->rx_buff_mgmt.buff_arr); 311 wil->rx_buff_mgmt.buff_arr = NULL; 312 } 313 314 static int wil_init_rx_buff_arr(struct wil6210_priv *wil, 315 size_t size) 316 { 317 struct wil_rx_buff *buff_arr; 318 struct list_head *active = &wil->rx_buff_mgmt.active; 319 struct list_head *free = &wil->rx_buff_mgmt.free; 320 int i; 321 322 wil->rx_buff_mgmt.buff_arr = kcalloc(size, sizeof(struct wil_rx_buff), 323 GFP_KERNEL); 324 if (!wil->rx_buff_mgmt.buff_arr) 325 return -ENOMEM; 326 327 /* Set list heads */ 328 INIT_LIST_HEAD(active); 329 INIT_LIST_HEAD(free); 330 331 /* Linkify the list */ 332 buff_arr = wil->rx_buff_mgmt.buff_arr; 333 for (i = 0; i < size; i++) { 334 list_add(&buff_arr[i].list, free); 335 buff_arr[i].id = i; 336 } 337 338 wil->rx_buff_mgmt.size = size; 339 340 return 0; 341 } 342 343 static int wil_init_rx_sring(struct wil6210_priv *wil, 344 u16 status_ring_size, 345 size_t elem_size, 346 u16 ring_id) 347 { 348 struct wil_status_ring *sring = &wil->srings[ring_id]; 349 int rc; 350 351 wil_dbg_misc(wil, "init RX sring: size=%u, ring_id=%u\n", sring->size, 352 ring_id); 353 354 memset(&sring->rx_data, 0, sizeof(sring->rx_data)); 355 356 sring->is_rx = true; 357 sring->size = status_ring_size; 358 sring->elem_size = elem_size; 359 rc = wil_sring_alloc(wil, sring); 360 if (rc) 361 return rc; 362 363 rc = wil_wmi_rx_sring_add(wil, ring_id); 364 if (rc) 365 goto out_free; 366 367 sring->desc_rdy_pol = 1; 368 369 return 0; 370 out_free: 371 wil_sring_free(wil, sring); 372 return rc; 373 } 374 375 static int wil_ring_alloc_desc_ring(struct wil6210_priv *wil, 376 struct wil_ring *ring) 377 { 378 struct device *dev = wil_to_dev(wil); 379 size_t sz = ring->size * sizeof(ring->va[0]); 380 381 wil_dbg_misc(wil, "alloc_desc_ring:\n"); 382 383 BUILD_BUG_ON(sizeof(ring->va[0]) != 32); 384 385 ring->swhead = 0; 386 ring->swtail = 0; 387 ring->ctx = kcalloc(ring->size, sizeof(ring->ctx[0]), GFP_KERNEL); 388 if (!ring->ctx) 389 goto err; 390 391 ring->va = dma_zalloc_coherent(dev, sz, &ring->pa, GFP_KERNEL); 392 if (!ring->va) 393 goto err_free_ctx; 394 395 if (ring->is_rx) { 396 sz = sizeof(*ring->edma_rx_swtail.va); 397 ring->edma_rx_swtail.va = 398 dma_zalloc_coherent(dev, sz, &ring->edma_rx_swtail.pa, 399 GFP_KERNEL); 400 if (!ring->edma_rx_swtail.va) 401 goto err_free_va; 402 } 403 404 wil_dbg_misc(wil, "%s ring[%d] 0x%p:%pad 0x%p\n", 405 ring->is_rx ? "RX" : "TX", 406 ring->size, ring->va, &ring->pa, ring->ctx); 407 408 return 0; 409 err_free_va: 410 dma_free_coherent(dev, ring->size * sizeof(ring->va[0]), 411 (void *)ring->va, ring->pa); 412 ring->va = NULL; 413 err_free_ctx: 414 kfree(ring->ctx); 415 ring->ctx = NULL; 416 err: 417 return -ENOMEM; 418 } 419 420 static void wil_ring_free_edma(struct wil6210_priv *wil, struct wil_ring *ring) 421 { 422 struct device *dev = wil_to_dev(wil); 423 size_t sz; 424 int ring_index = 0; 425 426 if (!ring->va) 427 return; 428 429 sz = ring->size * sizeof(ring->va[0]); 430 431 lockdep_assert_held(&wil->mutex); 432 if (ring->is_rx) { 433 wil_dbg_misc(wil, "free Rx ring [%d] 0x%p:%pad 0x%p\n", 434 ring->size, ring->va, 435 &ring->pa, ring->ctx); 436 437 wil_move_all_rx_buff_to_free_list(wil, ring); 438 goto out; 439 } 440 441 /* TX ring */ 442 ring_index = ring - wil->ring_tx; 443 444 wil_dbg_misc(wil, "free Tx ring %d [%d] 0x%p:%pad 0x%p\n", 445 ring_index, ring->size, ring->va, 446 &ring->pa, ring->ctx); 447 448 while (!wil_ring_is_empty(ring)) { 449 struct wil_ctx *ctx; 450 451 struct wil_tx_enhanced_desc dd, *d = ⅆ 452 struct wil_tx_enhanced_desc *_d = 453 (struct wil_tx_enhanced_desc *) 454 &ring->va[ring->swtail].tx.enhanced; 455 456 ctx = &ring->ctx[ring->swtail]; 457 if (!ctx) { 458 wil_dbg_txrx(wil, 459 "ctx(%d) was already completed\n", 460 ring->swtail); 461 ring->swtail = wil_ring_next_tail(ring); 462 continue; 463 } 464 *d = *_d; 465 wil_tx_desc_unmap_edma(dev, (union wil_tx_desc *)d, ctx); 466 if (ctx->skb) 467 dev_kfree_skb_any(ctx->skb); 468 ring->swtail = wil_ring_next_tail(ring); 469 } 470 471 out: 472 dma_free_coherent(dev, sz, (void *)ring->va, ring->pa); 473 kfree(ring->ctx); 474 ring->pa = 0; 475 ring->va = NULL; 476 ring->ctx = NULL; 477 } 478 479 static int wil_init_rx_desc_ring(struct wil6210_priv *wil, u16 desc_ring_size, 480 int status_ring_id) 481 { 482 struct wil_ring *ring = &wil->ring_rx; 483 int rc; 484 485 wil_dbg_misc(wil, "init RX desc ring\n"); 486 487 ring->size = desc_ring_size; 488 ring->is_rx = true; 489 rc = wil_ring_alloc_desc_ring(wil, ring); 490 if (rc) 491 return rc; 492 493 rc = wil_wmi_rx_desc_ring_add(wil, status_ring_id); 494 if (rc) 495 goto out_free; 496 497 return 0; 498 out_free: 499 wil_ring_free_edma(wil, ring); 500 return rc; 501 } 502 503 static void wil_get_reorder_params_edma(struct wil6210_priv *wil, 504 struct sk_buff *skb, int *tid, 505 int *cid, int *mid, u16 *seq, 506 int *mcast) 507 { 508 struct wil_rx_status_extended *s = wil_skb_rxstatus(skb); 509 510 *tid = wil_rx_status_get_tid(s); 511 *cid = wil_rx_status_get_cid(s); 512 *mid = wil_rx_status_get_mid(s); 513 *seq = le16_to_cpu(wil_rx_status_get_seq(wil, s)); 514 *mcast = wil_rx_status_get_mcast(s); 515 } 516 517 static void wil_get_netif_rx_params_edma(struct sk_buff *skb, int *cid, 518 int *security) 519 { 520 struct wil_rx_status_extended *s = wil_skb_rxstatus(skb); 521 522 *cid = wil_rx_status_get_cid(s); 523 *security = wil_rx_status_get_security(s); 524 } 525 526 static int wil_rx_crypto_check_edma(struct wil6210_priv *wil, 527 struct sk_buff *skb) 528 { 529 struct wil_rx_status_extended *st; 530 int cid, tid, key_id, mc; 531 struct wil_sta_info *s; 532 struct wil_tid_crypto_rx *c; 533 struct wil_tid_crypto_rx_single *cc; 534 const u8 *pn; 535 536 /* In HW reorder, HW is responsible for crypto check */ 537 if (wil->use_rx_hw_reordering) 538 return 0; 539 540 st = wil_skb_rxstatus(skb); 541 542 cid = wil_rx_status_get_cid(st); 543 tid = wil_rx_status_get_tid(st); 544 key_id = wil_rx_status_get_key_id(st); 545 mc = wil_rx_status_get_mcast(st); 546 s = &wil->sta[cid]; 547 c = mc ? &s->group_crypto_rx : &s->tid_crypto_rx[tid]; 548 cc = &c->key_id[key_id]; 549 pn = (u8 *)&st->ext.pn_15_0; 550 551 if (!cc->key_set) { 552 wil_err_ratelimited(wil, 553 "Key missing. CID %d TID %d MCast %d KEY_ID %d\n", 554 cid, tid, mc, key_id); 555 return -EINVAL; 556 } 557 558 if (reverse_memcmp(pn, cc->pn, IEEE80211_GCMP_PN_LEN) <= 0) { 559 wil_err_ratelimited(wil, 560 "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n", 561 cid, tid, mc, key_id, pn, cc->pn); 562 return -EINVAL; 563 } 564 memcpy(cc->pn, pn, IEEE80211_GCMP_PN_LEN); 565 566 return 0; 567 } 568 569 static bool wil_is_rx_idle_edma(struct wil6210_priv *wil) 570 { 571 struct wil_status_ring *sring; 572 struct wil_rx_status_extended msg1; 573 void *msg = &msg1; 574 u8 dr_bit; 575 int i; 576 577 for (i = 0; i < wil->num_rx_status_rings; i++) { 578 sring = &wil->srings[i]; 579 if (!sring->va) 580 continue; 581 582 wil_get_next_rx_status_msg(sring, msg); 583 dr_bit = wil_rx_status_get_desc_rdy_bit(msg); 584 585 /* Check if there are unhandled RX status messages */ 586 if (dr_bit == sring->desc_rdy_pol) 587 return false; 588 } 589 590 return true; 591 } 592 593 static void wil_rx_buf_len_init_edma(struct wil6210_priv *wil) 594 { 595 wil->rx_buf_len = rx_large_buf ? 596 WIL_MAX_ETH_MTU : TXRX_BUF_LEN_DEFAULT - WIL_MAX_MPDU_OVERHEAD; 597 } 598 599 static int wil_rx_init_edma(struct wil6210_priv *wil, u16 desc_ring_size) 600 { 601 u16 status_ring_size; 602 struct wil_ring *ring = &wil->ring_rx; 603 int rc; 604 size_t elem_size = wil->use_compressed_rx_status ? 605 sizeof(struct wil_rx_status_compressed) : 606 sizeof(struct wil_rx_status_extended); 607 int i; 608 u16 max_rx_pl_per_desc; 609 610 /* In SW reorder one must use extended status messages */ 611 if (wil->use_compressed_rx_status && !wil->use_rx_hw_reordering) { 612 wil_err(wil, 613 "compressed RX status cannot be used with SW reorder\n"); 614 return -EINVAL; 615 } 616 617 if (wil->rx_status_ring_order < WIL_SRING_SIZE_ORDER_MIN || 618 wil->rx_status_ring_order > WIL_SRING_SIZE_ORDER_MAX) 619 wil->rx_status_ring_order = WIL_RX_SRING_SIZE_ORDER_DEFAULT; 620 621 status_ring_size = 1 << wil->rx_status_ring_order; 622 623 wil_dbg_misc(wil, 624 "rx_init, desc_ring_size=%u, status_ring_size=%u, elem_size=%zu\n", 625 desc_ring_size, status_ring_size, elem_size); 626 627 wil_rx_buf_len_init_edma(wil); 628 629 max_rx_pl_per_desc = wil->rx_buf_len + ETH_HLEN + 630 WIL_EDMA_MAX_DATA_OFFSET; 631 632 /* Use debugfs dbg_num_rx_srings if set, reserve one sring for TX */ 633 if (wil->num_rx_status_rings > WIL6210_MAX_STATUS_RINGS - 1) 634 wil->num_rx_status_rings = WIL6210_MAX_STATUS_RINGS - 1; 635 636 wil_dbg_misc(wil, "rx_init: allocate %d status rings\n", 637 wil->num_rx_status_rings); 638 639 rc = wil_wmi_cfg_def_rx_offload(wil, max_rx_pl_per_desc); 640 if (rc) 641 return rc; 642 643 /* Allocate status ring */ 644 for (i = 0; i < wil->num_rx_status_rings; i++) { 645 int sring_id = wil_find_free_sring(wil); 646 647 if (sring_id < 0) { 648 rc = -EFAULT; 649 goto err_free_status; 650 } 651 rc = wil_init_rx_sring(wil, status_ring_size, elem_size, 652 sring_id); 653 if (rc) 654 goto err_free_status; 655 } 656 657 /* Allocate descriptor ring */ 658 rc = wil_init_rx_desc_ring(wil, desc_ring_size, 659 WIL_DEFAULT_RX_STATUS_RING_ID); 660 if (rc) 661 goto err_free_status; 662 663 if (wil->rx_buff_id_count >= status_ring_size) { 664 wil_info(wil, 665 "rx_buff_id_count %d exceeds sring_size %d. set it to %d\n", 666 wil->rx_buff_id_count, status_ring_size, 667 status_ring_size - 1); 668 wil->rx_buff_id_count = status_ring_size - 1; 669 } 670 671 /* Allocate Rx buffer array */ 672 rc = wil_init_rx_buff_arr(wil, wil->rx_buff_id_count); 673 if (rc) 674 goto err_free_desc; 675 676 /* Fill descriptor ring with credits */ 677 rc = wil_rx_refill_edma(wil); 678 if (rc) 679 goto err_free_rx_buff_arr; 680 681 return 0; 682 err_free_rx_buff_arr: 683 wil_free_rx_buff_arr(wil); 684 err_free_desc: 685 wil_ring_free_edma(wil, ring); 686 err_free_status: 687 for (i = 0; i < wil->num_rx_status_rings; i++) 688 wil_sring_free(wil, &wil->srings[i]); 689 690 return rc; 691 } 692 693 static int wil_ring_init_tx_edma(struct wil6210_vif *vif, int ring_id, 694 int size, int cid, int tid) 695 { 696 struct wil6210_priv *wil = vif_to_wil(vif); 697 int rc; 698 struct wil_ring *ring = &wil->ring_tx[ring_id]; 699 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id]; 700 701 lockdep_assert_held(&wil->mutex); 702 703 wil_dbg_misc(wil, 704 "init TX ring: ring_id=%u, cid=%u, tid=%u, sring_id=%u\n", 705 ring_id, cid, tid, wil->tx_sring_idx); 706 707 wil_tx_data_init(txdata); 708 ring->size = size; 709 rc = wil_ring_alloc_desc_ring(wil, ring); 710 if (rc) 711 goto out; 712 713 wil->ring2cid_tid[ring_id][0] = cid; 714 wil->ring2cid_tid[ring_id][1] = tid; 715 if (!vif->privacy) 716 txdata->dot1x_open = true; 717 718 rc = wil_wmi_tx_desc_ring_add(vif, ring_id, cid, tid); 719 if (rc) { 720 wil_err(wil, "WMI_TX_DESC_RING_ADD_CMD failed\n"); 721 goto out_free; 722 } 723 724 if (txdata->dot1x_open && agg_wsize >= 0) 725 wil_addba_tx_request(wil, ring_id, agg_wsize); 726 727 return 0; 728 out_free: 729 spin_lock_bh(&txdata->lock); 730 txdata->dot1x_open = false; 731 txdata->enabled = 0; 732 spin_unlock_bh(&txdata->lock); 733 wil_ring_free_edma(wil, ring); 734 wil->ring2cid_tid[ring_id][0] = WIL6210_MAX_CID; 735 wil->ring2cid_tid[ring_id][1] = 0; 736 737 out: 738 return rc; 739 } 740 741 /* This function is used only for RX SW reorder */ 742 static int wil_check_bar(struct wil6210_priv *wil, void *msg, int cid, 743 struct sk_buff *skb, struct wil_net_stats *stats) 744 { 745 u8 ftype; 746 u8 fc1; 747 int mid; 748 int tid; 749 u16 seq; 750 struct wil6210_vif *vif; 751 752 ftype = wil_rx_status_get_frame_type(wil, msg); 753 if (ftype == IEEE80211_FTYPE_DATA) 754 return 0; 755 756 fc1 = wil_rx_status_get_fc1(wil, msg); 757 mid = wil_rx_status_get_mid(msg); 758 tid = wil_rx_status_get_tid(msg); 759 seq = le16_to_cpu(wil_rx_status_get_seq(wil, msg)); 760 vif = wil->vifs[mid]; 761 762 if (unlikely(!vif)) { 763 wil_dbg_txrx(wil, "RX descriptor with invalid mid %d", mid); 764 return -EAGAIN; 765 } 766 767 wil_dbg_txrx(wil, 768 "Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n", 769 fc1, mid, cid, tid, seq); 770 if (stats) 771 stats->rx_non_data_frame++; 772 if (wil_is_back_req(fc1)) { 773 wil_dbg_txrx(wil, 774 "BAR: MID %d CID %d TID %d Seq 0x%03x\n", 775 mid, cid, tid, seq); 776 wil_rx_bar(wil, vif, cid, tid, seq); 777 } else { 778 u32 sz = wil->use_compressed_rx_status ? 779 sizeof(struct wil_rx_status_compressed) : 780 sizeof(struct wil_rx_status_extended); 781 782 /* print again all info. One can enable only this 783 * without overhead for printing every Rx frame 784 */ 785 wil_dbg_txrx(wil, 786 "Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n", 787 fc1, mid, cid, tid, seq); 788 wil_hex_dump_txrx("RxS ", DUMP_PREFIX_NONE, 32, 4, 789 (const void *)msg, sz, false); 790 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1, 791 skb->data, skb_headlen(skb), false); 792 } 793 794 return -EAGAIN; 795 } 796 797 static int wil_rx_edma_check_errors(struct wil6210_priv *wil, void *msg, 798 struct wil_net_stats *stats, 799 struct sk_buff *skb) 800 { 801 int error; 802 int l2_rx_status; 803 int l3_rx_status; 804 int l4_rx_status; 805 806 error = wil_rx_status_get_error(msg); 807 if (!error) { 808 skb->ip_summed = CHECKSUM_UNNECESSARY; 809 return 0; 810 } 811 812 l2_rx_status = wil_rx_status_get_l2_rx_status(msg); 813 if (l2_rx_status != 0) { 814 wil_dbg_txrx(wil, "L2 RX error, l2_rx_status=0x%x\n", 815 l2_rx_status); 816 /* Due to HW issue, KEY error will trigger a MIC error */ 817 if (l2_rx_status & WIL_RX_EDMA_ERROR_MIC) { 818 wil_dbg_txrx(wil, 819 "L2 MIC/KEY error, dropping packet\n"); 820 stats->rx_mic_error++; 821 } 822 if (l2_rx_status & WIL_RX_EDMA_ERROR_KEY) { 823 wil_dbg_txrx(wil, "L2 KEY error, dropping packet\n"); 824 stats->rx_key_error++; 825 } 826 if (l2_rx_status & WIL_RX_EDMA_ERROR_REPLAY) { 827 wil_dbg_txrx(wil, 828 "L2 REPLAY error, dropping packet\n"); 829 stats->rx_replay++; 830 } 831 if (l2_rx_status & WIL_RX_EDMA_ERROR_AMSDU) { 832 wil_dbg_txrx(wil, 833 "L2 AMSDU error, dropping packet\n"); 834 stats->rx_amsdu_error++; 835 } 836 return -EFAULT; 837 } 838 839 l3_rx_status = wil_rx_status_get_l3_rx_status(msg); 840 l4_rx_status = wil_rx_status_get_l4_rx_status(msg); 841 if (!l3_rx_status && !l4_rx_status) 842 skb->ip_summed = CHECKSUM_UNNECESSARY; 843 /* If HW reports bad checksum, let IP stack re-check it 844 * For example, HW don't understand Microsoft IP stack that 845 * mis-calculates TCP checksum - if it should be 0x0, 846 * it writes 0xffff in violation of RFC 1624 847 */ 848 849 return 0; 850 } 851 852 static struct sk_buff *wil_sring_reap_rx_edma(struct wil6210_priv *wil, 853 struct wil_status_ring *sring) 854 { 855 struct device *dev = wil_to_dev(wil); 856 struct wil_rx_status_extended msg1; 857 void *msg = &msg1; 858 u16 buff_id; 859 struct sk_buff *skb; 860 dma_addr_t pa; 861 struct wil_ring_rx_data *rxdata = &sring->rx_data; 862 unsigned int sz = wil->rx_buf_len + ETH_HLEN + 863 WIL_EDMA_MAX_DATA_OFFSET; 864 struct wil_net_stats *stats = NULL; 865 u16 dmalen; 866 int cid; 867 int rc; 868 bool eop, headstolen; 869 int delta; 870 u8 dr_bit; 871 u8 data_offset; 872 struct wil_rx_status_extended *s; 873 u16 sring_idx = sring - wil->srings; 874 875 BUILD_BUG_ON(sizeof(struct wil_rx_status_extended) > sizeof(skb->cb)); 876 877 again: 878 wil_get_next_rx_status_msg(sring, msg); 879 dr_bit = wil_rx_status_get_desc_rdy_bit(msg); 880 881 /* Completed handling all the ready status messages */ 882 if (dr_bit != sring->desc_rdy_pol) 883 return NULL; 884 885 /* Extract the buffer ID from the status message */ 886 buff_id = le16_to_cpu(wil_rx_status_get_buff_id(msg)); 887 if (unlikely(!wil_val_in_range(buff_id, 0, wil->rx_buff_mgmt.size))) { 888 wil_err(wil, "Corrupt buff_id=%d, sring->swhead=%d\n", 889 buff_id, sring->swhead); 890 wil_sring_advance_swhead(sring); 891 goto again; 892 } 893 894 wil_sring_advance_swhead(sring); 895 896 /* Extract the SKB from the rx_buff management array */ 897 skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb; 898 wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL; 899 if (!skb) { 900 wil_err(wil, "No Rx skb at buff_id %d\n", buff_id); 901 goto again; 902 } 903 904 memcpy(&pa, skb->cb, sizeof(pa)); 905 dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE); 906 dmalen = le16_to_cpu(wil_rx_status_get_length(msg)); 907 908 trace_wil6210_rx_status(wil, wil->use_compressed_rx_status, buff_id, 909 msg); 910 wil_dbg_txrx(wil, "Rx, buff_id=%u, sring_idx=%u, dmalen=%u bytes\n", 911 buff_id, sring_idx, dmalen); 912 wil_hex_dump_txrx("RxS ", DUMP_PREFIX_NONE, 32, 4, 913 (const void *)msg, wil->use_compressed_rx_status ? 914 sizeof(struct wil_rx_status_compressed) : 915 sizeof(struct wil_rx_status_extended), false); 916 917 /* Move the buffer from the active list to the free list */ 918 list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list, 919 &wil->rx_buff_mgmt.free); 920 921 eop = wil_rx_status_get_eop(msg); 922 923 cid = wil_rx_status_get_cid(msg); 924 if (unlikely(!wil_val_in_range(cid, 0, WIL6210_MAX_CID))) { 925 wil_err(wil, "Corrupt cid=%d, sring->swhead=%d\n", 926 cid, sring->swhead); 927 rxdata->skipping = true; 928 goto skipping; 929 } 930 stats = &wil->sta[cid].stats; 931 932 if (unlikely(skb->len < ETH_HLEN)) { 933 wil_dbg_txrx(wil, "Short frame, len = %d\n", skb->len); 934 stats->rx_short_frame++; 935 rxdata->skipping = true; 936 goto skipping; 937 } 938 939 /* Check and treat errors reported by HW */ 940 rc = wil_rx_edma_check_errors(wil, msg, stats, skb); 941 if (rc) { 942 rxdata->skipping = true; 943 goto skipping; 944 } 945 946 if (unlikely(dmalen > sz)) { 947 wil_err(wil, "Rx size too large: %d bytes!\n", dmalen); 948 stats->rx_large_frame++; 949 rxdata->skipping = true; 950 } 951 952 skipping: 953 /* skipping indicates if a certain SKB should be dropped. 954 * It is set in case there is an error on the current SKB or in case 955 * of RX chaining: as long as we manage to merge the SKBs it will 956 * be false. once we have a bad SKB or we don't manage to merge SKBs 957 * it will be set to the !EOP value of the current SKB. 958 * This guarantees that all the following SKBs until EOP will also 959 * get dropped. 960 */ 961 if (unlikely(rxdata->skipping)) { 962 kfree_skb(skb); 963 if (rxdata->skb) { 964 kfree_skb(rxdata->skb); 965 rxdata->skb = NULL; 966 } 967 rxdata->skipping = !eop; 968 goto again; 969 } 970 971 skb_trim(skb, dmalen); 972 973 prefetch(skb->data); 974 975 if (!rxdata->skb) { 976 rxdata->skb = skb; 977 } else { 978 if (likely(skb_try_coalesce(rxdata->skb, skb, &headstolen, 979 &delta))) { 980 kfree_skb_partial(skb, headstolen); 981 } else { 982 wil_err(wil, "failed to merge skbs!\n"); 983 kfree_skb(skb); 984 kfree_skb(rxdata->skb); 985 rxdata->skb = NULL; 986 rxdata->skipping = !eop; 987 goto again; 988 } 989 } 990 991 if (!eop) 992 goto again; 993 994 /* reaching here rxdata->skb always contains a full packet */ 995 skb = rxdata->skb; 996 rxdata->skb = NULL; 997 rxdata->skipping = false; 998 999 if (stats) { 1000 stats->last_mcs_rx = wil_rx_status_get_mcs(msg); 1001 if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs)) 1002 stats->rx_per_mcs[stats->last_mcs_rx]++; 1003 } 1004 1005 if (!wil->use_rx_hw_reordering && !wil->use_compressed_rx_status && 1006 wil_check_bar(wil, msg, cid, skb, stats) == -EAGAIN) { 1007 kfree_skb(skb); 1008 goto again; 1009 } 1010 1011 /* Compensate for the HW data alignment according to the status 1012 * message 1013 */ 1014 data_offset = wil_rx_status_get_data_offset(msg); 1015 if (data_offset == 0xFF || 1016 data_offset > WIL_EDMA_MAX_DATA_OFFSET) { 1017 wil_err(wil, "Unexpected data offset %d\n", data_offset); 1018 kfree_skb(skb); 1019 goto again; 1020 } 1021 1022 skb_pull(skb, data_offset); 1023 1024 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1, 1025 skb->data, skb_headlen(skb), false); 1026 1027 /* Has to be done after dma_unmap_single as skb->cb is also 1028 * used for holding the pa 1029 */ 1030 s = wil_skb_rxstatus(skb); 1031 memcpy(s, msg, sring->elem_size); 1032 1033 return skb; 1034 } 1035 1036 void wil_rx_handle_edma(struct wil6210_priv *wil, int *quota) 1037 { 1038 struct net_device *ndev; 1039 struct wil_ring *ring = &wil->ring_rx; 1040 struct wil_status_ring *sring; 1041 struct sk_buff *skb; 1042 int i; 1043 1044 if (unlikely(!ring->va)) { 1045 wil_err(wil, "Rx IRQ while Rx not yet initialized\n"); 1046 return; 1047 } 1048 wil_dbg_txrx(wil, "rx_handle\n"); 1049 1050 for (i = 0; i < wil->num_rx_status_rings; i++) { 1051 sring = &wil->srings[i]; 1052 if (unlikely(!sring->va)) { 1053 wil_err(wil, 1054 "Rx IRQ while Rx status ring %d not yet initialized\n", 1055 i); 1056 continue; 1057 } 1058 1059 while ((*quota > 0) && 1060 (NULL != (skb = 1061 wil_sring_reap_rx_edma(wil, sring)))) { 1062 (*quota)--; 1063 if (wil->use_rx_hw_reordering) { 1064 void *msg = wil_skb_rxstatus(skb); 1065 int mid = wil_rx_status_get_mid(msg); 1066 struct wil6210_vif *vif = wil->vifs[mid]; 1067 1068 if (unlikely(!vif)) { 1069 wil_dbg_txrx(wil, 1070 "RX desc invalid mid %d", 1071 mid); 1072 kfree_skb(skb); 1073 continue; 1074 } 1075 ndev = vif_to_ndev(vif); 1076 wil_netif_rx_any(skb, ndev); 1077 } else { 1078 wil_rx_reorder(wil, skb); 1079 } 1080 } 1081 1082 wil_w(wil, sring->hwtail, (sring->swhead - 1) % sring->size); 1083 } 1084 1085 wil_rx_refill_edma(wil); 1086 } 1087 1088 static int wil_tx_desc_map_edma(union wil_tx_desc *desc, 1089 dma_addr_t pa, 1090 u32 len, 1091 int ring_index) 1092 { 1093 struct wil_tx_enhanced_desc *d = 1094 (struct wil_tx_enhanced_desc *)&desc->enhanced; 1095 1096 memset(d, 0, sizeof(struct wil_tx_enhanced_desc)); 1097 1098 wil_desc_set_addr_edma(&d->dma.addr, &d->dma.addr_high_high, pa); 1099 1100 /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/ 1101 d->dma.length = cpu_to_le16((u16)len); 1102 d->mac.d[0] = (ring_index << WIL_EDMA_DESC_TX_MAC_CFG_0_QID_POS); 1103 /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi; 1104 * 3 - eth mode 1105 */ 1106 d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) | 1107 (0x3 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS); 1108 1109 return 0; 1110 } 1111 1112 static inline void 1113 wil_get_next_tx_status_msg(struct wil_status_ring *sring, 1114 struct wil_ring_tx_status *msg) 1115 { 1116 struct wil_ring_tx_status *_msg = (struct wil_ring_tx_status *) 1117 (sring->va + (sring->elem_size * sring->swhead)); 1118 1119 *msg = *_msg; 1120 } 1121 1122 /** 1123 * Clean up transmitted skb's from the Tx descriptor RING. 1124 * Return number of descriptors cleared. 1125 */ 1126 int wil_tx_sring_handler(struct wil6210_priv *wil, 1127 struct wil_status_ring *sring) 1128 { 1129 struct net_device *ndev; 1130 struct device *dev = wil_to_dev(wil); 1131 struct wil_ring *ring = NULL; 1132 struct wil_ring_tx_data *txdata; 1133 /* Total number of completed descriptors in all descriptor rings */ 1134 int desc_cnt = 0; 1135 int cid; 1136 struct wil_net_stats *stats = NULL; 1137 struct wil_tx_enhanced_desc *_d; 1138 unsigned int ring_id; 1139 unsigned int num_descs; 1140 int i; 1141 u8 dr_bit; /* Descriptor Ready bit */ 1142 struct wil_ring_tx_status msg; 1143 struct wil6210_vif *vif; 1144 int used_before_complete; 1145 int used_new; 1146 1147 wil_get_next_tx_status_msg(sring, &msg); 1148 dr_bit = msg.desc_ready >> TX_STATUS_DESC_READY_POS; 1149 1150 /* Process completion messages while DR bit has the expected polarity */ 1151 while (dr_bit == sring->desc_rdy_pol) { 1152 num_descs = msg.num_descriptors; 1153 if (!num_descs) { 1154 wil_err(wil, "invalid num_descs 0\n"); 1155 goto again; 1156 } 1157 1158 /* Find the corresponding descriptor ring */ 1159 ring_id = msg.ring_id; 1160 1161 if (unlikely(ring_id >= WIL6210_MAX_TX_RINGS)) { 1162 wil_err(wil, "invalid ring id %d\n", ring_id); 1163 goto again; 1164 } 1165 ring = &wil->ring_tx[ring_id]; 1166 if (unlikely(!ring->va)) { 1167 wil_err(wil, "Tx irq[%d]: ring not initialized\n", 1168 ring_id); 1169 goto again; 1170 } 1171 txdata = &wil->ring_tx_data[ring_id]; 1172 if (unlikely(!txdata->enabled)) { 1173 wil_info(wil, "Tx irq[%d]: ring disabled\n", ring_id); 1174 goto again; 1175 } 1176 vif = wil->vifs[txdata->mid]; 1177 if (unlikely(!vif)) { 1178 wil_dbg_txrx(wil, "invalid MID %d for ring %d\n", 1179 txdata->mid, ring_id); 1180 goto again; 1181 } 1182 1183 ndev = vif_to_ndev(vif); 1184 1185 cid = wil->ring2cid_tid[ring_id][0]; 1186 if (cid < WIL6210_MAX_CID) 1187 stats = &wil->sta[cid].stats; 1188 1189 wil_dbg_txrx(wil, 1190 "tx_status: completed desc_ring (%d), num_descs (%d)\n", 1191 ring_id, num_descs); 1192 1193 used_before_complete = wil_ring_used_tx(ring); 1194 1195 for (i = 0 ; i < num_descs; ++i) { 1196 struct wil_ctx *ctx = &ring->ctx[ring->swtail]; 1197 struct wil_tx_enhanced_desc dd, *d = ⅆ 1198 u16 dmalen; 1199 struct sk_buff *skb = ctx->skb; 1200 1201 _d = (struct wil_tx_enhanced_desc *) 1202 &ring->va[ring->swtail].tx.enhanced; 1203 *d = *_d; 1204 1205 dmalen = le16_to_cpu(d->dma.length); 1206 trace_wil6210_tx_status(&msg, ring->swtail, dmalen); 1207 wil_dbg_txrx(wil, 1208 "TxC[%2d][%3d] : %d bytes, status 0x%02x\n", 1209 ring_id, ring->swtail, dmalen, 1210 msg.status); 1211 wil_hex_dump_txrx("TxS ", DUMP_PREFIX_NONE, 32, 4, 1212 (const void *)&msg, sizeof(msg), 1213 false); 1214 1215 wil_tx_desc_unmap_edma(dev, 1216 (union wil_tx_desc *)d, 1217 ctx); 1218 1219 if (skb) { 1220 if (likely(msg.status == 0)) { 1221 ndev->stats.tx_packets++; 1222 ndev->stats.tx_bytes += skb->len; 1223 if (stats) { 1224 stats->tx_packets++; 1225 stats->tx_bytes += skb->len; 1226 } 1227 } else { 1228 ndev->stats.tx_errors++; 1229 if (stats) 1230 stats->tx_errors++; 1231 } 1232 wil_consume_skb(skb, msg.status == 0); 1233 } 1234 memset(ctx, 0, sizeof(*ctx)); 1235 /* Make sure the ctx is zeroed before updating the tail 1236 * to prevent a case where wil_tx_ring will see 1237 * this descriptor as used and handle it before ctx zero 1238 * is completed. 1239 */ 1240 wmb(); 1241 1242 ring->swtail = wil_ring_next_tail(ring); 1243 1244 desc_cnt++; 1245 } 1246 1247 /* performance monitoring */ 1248 used_new = wil_ring_used_tx(ring); 1249 if (wil_val_in_range(wil->ring_idle_trsh, 1250 used_new, used_before_complete)) { 1251 wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n", 1252 ring_id, used_before_complete, used_new); 1253 txdata->last_idle = get_cycles(); 1254 } 1255 1256 again: 1257 wil_sring_advance_swhead(sring); 1258 1259 wil_get_next_tx_status_msg(sring, &msg); 1260 dr_bit = msg.desc_ready >> TX_STATUS_DESC_READY_POS; 1261 } 1262 1263 /* shall we wake net queues? */ 1264 if (desc_cnt) 1265 wil_update_net_queues(wil, vif, NULL, false); 1266 1267 /* Update the HW tail ptr (RD ptr) */ 1268 wil_w(wil, sring->hwtail, (sring->swhead - 1) % sring->size); 1269 1270 return desc_cnt; 1271 } 1272 1273 /** 1274 * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding 1275 * @skb is used to obtain the protocol and headers length. 1276 * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data, 1277 * 2 - middle, 3 - last descriptor. 1278 */ 1279 static void wil_tx_desc_offload_setup_tso_edma(struct wil_tx_enhanced_desc *d, 1280 int tso_desc_type, bool is_ipv4, 1281 int tcp_hdr_len, 1282 int skb_net_hdr_len, 1283 int mss) 1284 { 1285 /* Number of descriptors */ 1286 d->mac.d[2] |= 1; 1287 /* Maximum Segment Size */ 1288 d->mac.tso_mss |= cpu_to_le16(mss >> 2); 1289 /* L4 header len: TCP header length */ 1290 d->dma.l4_hdr_len |= tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK; 1291 /* EOP, TSO desc type, Segmentation enable, 1292 * Insert IPv4 and TCP / UDP Checksum 1293 */ 1294 d->dma.cmd |= BIT(WIL_EDMA_DESC_TX_CFG_EOP_POS) | 1295 tso_desc_type << WIL_EDMA_DESC_TX_CFG_TSO_DESC_TYPE_POS | 1296 BIT(WIL_EDMA_DESC_TX_CFG_SEG_EN_POS) | 1297 BIT(WIL_EDMA_DESC_TX_CFG_INSERT_IP_CHKSUM_POS) | 1298 BIT(WIL_EDMA_DESC_TX_CFG_INSERT_TCP_CHKSUM_POS); 1299 /* Calculate pseudo-header */ 1300 d->dma.w1 |= BIT(WIL_EDMA_DESC_TX_CFG_PSEUDO_HEADER_CALC_EN_POS) | 1301 BIT(WIL_EDMA_DESC_TX_CFG_L4_TYPE_POS); 1302 /* IP Header Length */ 1303 d->dma.ip_length |= skb_net_hdr_len; 1304 /* MAC header length and IP address family*/ 1305 d->dma.b11 |= ETH_HLEN | 1306 is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS; 1307 } 1308 1309 static int wil_tx_tso_gen_desc(struct wil6210_priv *wil, void *buff_addr, 1310 int len, uint i, int tso_desc_type, 1311 skb_frag_t *frag, struct wil_ring *ring, 1312 struct sk_buff *skb, bool is_ipv4, 1313 int tcp_hdr_len, int skb_net_hdr_len, 1314 int mss, int *descs_used) 1315 { 1316 struct device *dev = wil_to_dev(wil); 1317 struct wil_tx_enhanced_desc *_desc = (struct wil_tx_enhanced_desc *) 1318 &ring->va[i].tx.enhanced; 1319 struct wil_tx_enhanced_desc desc_mem, *d = &desc_mem; 1320 int ring_index = ring - wil->ring_tx; 1321 dma_addr_t pa; 1322 1323 if (len == 0) 1324 return 0; 1325 1326 if (!frag) { 1327 pa = dma_map_single(dev, buff_addr, len, DMA_TO_DEVICE); 1328 ring->ctx[i].mapped_as = wil_mapped_as_single; 1329 } else { 1330 pa = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE); 1331 ring->ctx[i].mapped_as = wil_mapped_as_page; 1332 } 1333 if (unlikely(dma_mapping_error(dev, pa))) { 1334 wil_err(wil, "TSO: Skb DMA map error\n"); 1335 return -EINVAL; 1336 } 1337 1338 wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d, pa, 1339 len, ring_index); 1340 wil_tx_desc_offload_setup_tso_edma(d, tso_desc_type, is_ipv4, 1341 tcp_hdr_len, 1342 skb_net_hdr_len, mss); 1343 1344 /* hold reference to skb 1345 * to prevent skb release before accounting 1346 * in case of immediate "tx done" 1347 */ 1348 if (tso_desc_type == wil_tso_type_lst) 1349 ring->ctx[i].skb = skb_get(skb); 1350 1351 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4, 1352 (const void *)d, sizeof(*d), false); 1353 1354 *_desc = *d; 1355 (*descs_used)++; 1356 1357 return 0; 1358 } 1359 1360 static int __wil_tx_ring_tso_edma(struct wil6210_priv *wil, 1361 struct wil6210_vif *vif, 1362 struct wil_ring *ring, 1363 struct sk_buff *skb) 1364 { 1365 int ring_index = ring - wil->ring_tx; 1366 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index]; 1367 int nr_frags = skb_shinfo(skb)->nr_frags; 1368 int min_desc_required = nr_frags + 2; /* Headers, Head, Fragments */ 1369 int used, avail = wil_ring_avail_tx(ring); 1370 int f, hdrlen, headlen; 1371 int gso_type; 1372 bool is_ipv4; 1373 u32 swhead = ring->swhead; 1374 int descs_used = 0; /* total number of used descriptors */ 1375 int rc = -EINVAL; 1376 int tcp_hdr_len; 1377 int skb_net_hdr_len; 1378 int mss = skb_shinfo(skb)->gso_size; 1379 1380 wil_dbg_txrx(wil, "tx_ring_tso: %d bytes to ring %d\n", skb->len, 1381 ring_index); 1382 1383 if (unlikely(!txdata->enabled)) 1384 return -EINVAL; 1385 1386 if (unlikely(avail < min_desc_required)) { 1387 wil_err_ratelimited(wil, 1388 "TSO: Tx ring[%2d] full. No space for %d fragments\n", 1389 ring_index, min_desc_required); 1390 return -ENOMEM; 1391 } 1392 1393 gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4); 1394 switch (gso_type) { 1395 case SKB_GSO_TCPV4: 1396 is_ipv4 = true; 1397 break; 1398 case SKB_GSO_TCPV6: 1399 is_ipv4 = false; 1400 break; 1401 default: 1402 return -EINVAL; 1403 } 1404 1405 if (skb->ip_summed != CHECKSUM_PARTIAL) 1406 return -EINVAL; 1407 1408 /* tcp header length and skb network header length are fixed for all 1409 * packet's descriptors - read them once here 1410 */ 1411 tcp_hdr_len = tcp_hdrlen(skb); 1412 skb_net_hdr_len = skb_network_header_len(skb); 1413 1414 /* First descriptor must contain the header only 1415 * Header Length = MAC header len + IP header len + TCP header len 1416 */ 1417 hdrlen = ETH_HLEN + tcp_hdr_len + skb_net_hdr_len; 1418 wil_dbg_txrx(wil, "TSO: process header descriptor, hdrlen %u\n", 1419 hdrlen); 1420 rc = wil_tx_tso_gen_desc(wil, skb->data, hdrlen, swhead, 1421 wil_tso_type_hdr, NULL, ring, skb, 1422 is_ipv4, tcp_hdr_len, skb_net_hdr_len, 1423 mss, &descs_used); 1424 if (rc) 1425 return -EINVAL; 1426 1427 /* Second descriptor contains the head */ 1428 headlen = skb_headlen(skb) - hdrlen; 1429 wil_dbg_txrx(wil, "TSO: process skb head, headlen %u\n", headlen); 1430 rc = wil_tx_tso_gen_desc(wil, skb->data + hdrlen, headlen, 1431 (swhead + descs_used) % ring->size, 1432 (nr_frags != 0) ? wil_tso_type_first : 1433 wil_tso_type_lst, NULL, ring, skb, 1434 is_ipv4, tcp_hdr_len, skb_net_hdr_len, 1435 mss, &descs_used); 1436 if (rc) 1437 goto mem_error; 1438 1439 /* Rest of the descriptors are from the SKB fragments */ 1440 for (f = 0; f < nr_frags; f++) { 1441 skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; 1442 int len = frag->size; 1443 1444 wil_dbg_txrx(wil, "TSO: frag[%d]: len %u, descs_used %d\n", f, 1445 len, descs_used); 1446 1447 rc = wil_tx_tso_gen_desc(wil, NULL, len, 1448 (swhead + descs_used) % ring->size, 1449 (f != nr_frags - 1) ? 1450 wil_tso_type_mid : wil_tso_type_lst, 1451 frag, ring, skb, is_ipv4, 1452 tcp_hdr_len, skb_net_hdr_len, 1453 mss, &descs_used); 1454 if (rc) 1455 goto mem_error; 1456 } 1457 1458 /* performance monitoring */ 1459 used = wil_ring_used_tx(ring); 1460 if (wil_val_in_range(wil->ring_idle_trsh, 1461 used, used + descs_used)) { 1462 txdata->idle += get_cycles() - txdata->last_idle; 1463 wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n", 1464 ring_index, used, used + descs_used); 1465 } 1466 1467 /* advance swhead */ 1468 wil_ring_advance_head(ring, descs_used); 1469 wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, ring->swhead); 1470 1471 /* make sure all writes to descriptors (shared memory) are done before 1472 * committing them to HW 1473 */ 1474 wmb(); 1475 1476 wil_w(wil, ring->hwtail, ring->swhead); 1477 1478 return 0; 1479 1480 mem_error: 1481 while (descs_used > 0) { 1482 struct device *dev = wil_to_dev(wil); 1483 struct wil_ctx *ctx; 1484 int i = (swhead + descs_used - 1) % ring->size; 1485 struct wil_tx_enhanced_desc dd, *d = ⅆ 1486 struct wil_tx_enhanced_desc *_desc = 1487 (struct wil_tx_enhanced_desc *) 1488 &ring->va[i].tx.enhanced; 1489 1490 *d = *_desc; 1491 ctx = &ring->ctx[i]; 1492 wil_tx_desc_unmap_edma(dev, (union wil_tx_desc *)d, ctx); 1493 memset(ctx, 0, sizeof(*ctx)); 1494 descs_used--; 1495 } 1496 return rc; 1497 } 1498 1499 static int wil_ring_init_bcast_edma(struct wil6210_vif *vif, int ring_id, 1500 int size) 1501 { 1502 struct wil6210_priv *wil = vif_to_wil(vif); 1503 struct wil_ring *ring = &wil->ring_tx[ring_id]; 1504 int rc; 1505 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id]; 1506 1507 wil_dbg_misc(wil, "init bcast: ring_id=%d, sring_id=%d\n", 1508 ring_id, wil->tx_sring_idx); 1509 1510 lockdep_assert_held(&wil->mutex); 1511 1512 wil_tx_data_init(txdata); 1513 ring->size = size; 1514 ring->is_rx = false; 1515 rc = wil_ring_alloc_desc_ring(wil, ring); 1516 if (rc) 1517 goto out; 1518 1519 wil->ring2cid_tid[ring_id][0] = WIL6210_MAX_CID; /* CID */ 1520 wil->ring2cid_tid[ring_id][1] = 0; /* TID */ 1521 if (!vif->privacy) 1522 txdata->dot1x_open = true; 1523 1524 rc = wil_wmi_bcast_desc_ring_add(vif, ring_id); 1525 if (rc) 1526 goto out_free; 1527 1528 return 0; 1529 1530 out_free: 1531 spin_lock_bh(&txdata->lock); 1532 txdata->enabled = 0; 1533 txdata->dot1x_open = false; 1534 spin_unlock_bh(&txdata->lock); 1535 wil_ring_free_edma(wil, ring); 1536 1537 out: 1538 return rc; 1539 } 1540 1541 static void wil_tx_fini_edma(struct wil6210_priv *wil) 1542 { 1543 struct wil_status_ring *sring = &wil->srings[wil->tx_sring_idx]; 1544 1545 wil_dbg_misc(wil, "free TX sring\n"); 1546 1547 wil_sring_free(wil, sring); 1548 } 1549 1550 static void wil_rx_data_free(struct wil_status_ring *sring) 1551 { 1552 if (!sring) 1553 return; 1554 1555 kfree_skb(sring->rx_data.skb); 1556 sring->rx_data.skb = NULL; 1557 } 1558 1559 static void wil_rx_fini_edma(struct wil6210_priv *wil) 1560 { 1561 struct wil_ring *ring = &wil->ring_rx; 1562 int i; 1563 1564 wil_dbg_misc(wil, "rx_fini_edma\n"); 1565 1566 wil_ring_free_edma(wil, ring); 1567 1568 for (i = 0; i < wil->num_rx_status_rings; i++) { 1569 wil_rx_data_free(&wil->srings[i]); 1570 wil_sring_free(wil, &wil->srings[i]); 1571 } 1572 1573 wil_free_rx_buff_arr(wil); 1574 } 1575 1576 void wil_init_txrx_ops_edma(struct wil6210_priv *wil) 1577 { 1578 wil->txrx_ops.configure_interrupt_moderation = 1579 wil_configure_interrupt_moderation_edma; 1580 /* TX ops */ 1581 wil->txrx_ops.ring_init_tx = wil_ring_init_tx_edma; 1582 wil->txrx_ops.ring_fini_tx = wil_ring_free_edma; 1583 wil->txrx_ops.ring_init_bcast = wil_ring_init_bcast_edma; 1584 wil->txrx_ops.tx_init = wil_tx_init_edma; 1585 wil->txrx_ops.tx_fini = wil_tx_fini_edma; 1586 wil->txrx_ops.tx_desc_map = wil_tx_desc_map_edma; 1587 wil->txrx_ops.tx_desc_unmap = wil_tx_desc_unmap_edma; 1588 wil->txrx_ops.tx_ring_tso = __wil_tx_ring_tso_edma; 1589 /* RX ops */ 1590 wil->txrx_ops.rx_init = wil_rx_init_edma; 1591 wil->txrx_ops.wmi_addba_rx_resp = wmi_addba_rx_resp_edma; 1592 wil->txrx_ops.get_reorder_params = wil_get_reorder_params_edma; 1593 wil->txrx_ops.get_netif_rx_params = wil_get_netif_rx_params_edma; 1594 wil->txrx_ops.rx_crypto_check = wil_rx_crypto_check_edma; 1595 wil->txrx_ops.is_rx_idle = wil_is_rx_idle_edma; 1596 wil->txrx_ops.rx_fini = wil_rx_fini_edma; 1597 } 1598 1599