1 /* 2 * Copyright (c) 2012-2019 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/etherdevice.h> 18 #include <linux/moduleparam.h> 19 #include <linux/prefetch.h> 20 #include <linux/types.h> 21 #include <linux/list.h> 22 #include <linux/ip.h> 23 #include <linux/ipv6.h> 24 #include "wil6210.h" 25 #include "txrx_edma.h" 26 #include "txrx.h" 27 #include "trace.h" 28 29 #define WIL_EDMA_MAX_DATA_OFFSET (2) 30 /* RX buffer size must be aligned to 4 bytes */ 31 #define WIL_EDMA_RX_BUF_LEN_DEFAULT (2048) 32 33 static void wil_tx_desc_unmap_edma(struct device *dev, 34 union wil_tx_desc *desc, 35 struct wil_ctx *ctx) 36 { 37 struct wil_tx_enhanced_desc *d = (struct wil_tx_enhanced_desc *)desc; 38 dma_addr_t pa = wil_tx_desc_get_addr_edma(&d->dma); 39 u16 dmalen = le16_to_cpu(d->dma.length); 40 41 switch (ctx->mapped_as) { 42 case wil_mapped_as_single: 43 dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE); 44 break; 45 case wil_mapped_as_page: 46 dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE); 47 break; 48 default: 49 break; 50 } 51 } 52 53 static int wil_find_free_sring(struct wil6210_priv *wil) 54 { 55 int i; 56 57 for (i = 0; i < WIL6210_MAX_STATUS_RINGS; i++) { 58 if (!wil->srings[i].va) 59 return i; 60 } 61 62 return -EINVAL; 63 } 64 65 static void wil_sring_free(struct wil6210_priv *wil, 66 struct wil_status_ring *sring) 67 { 68 struct device *dev = wil_to_dev(wil); 69 size_t sz; 70 71 if (!sring || !sring->va) 72 return; 73 74 sz = sring->elem_size * sring->size; 75 76 wil_dbg_misc(wil, "status_ring_free, size(bytes)=%zu, 0x%p:%pad\n", 77 sz, sring->va, &sring->pa); 78 79 dma_free_coherent(dev, sz, (void *)sring->va, sring->pa); 80 sring->pa = 0; 81 sring->va = NULL; 82 } 83 84 static int wil_sring_alloc(struct wil6210_priv *wil, 85 struct wil_status_ring *sring) 86 { 87 struct device *dev = wil_to_dev(wil); 88 size_t sz = sring->elem_size * sring->size; 89 90 wil_dbg_misc(wil, "status_ring_alloc: size=%zu\n", sz); 91 92 if (sz == 0) { 93 wil_err(wil, "Cannot allocate a zero size status ring\n"); 94 return -EINVAL; 95 } 96 97 sring->swhead = 0; 98 99 /* Status messages are allocated and initialized to 0. This is necessary 100 * since DR bit should be initialized to 0. 101 */ 102 sring->va = dma_alloc_coherent(dev, sz, &sring->pa, GFP_KERNEL); 103 if (!sring->va) 104 return -ENOMEM; 105 106 wil_dbg_misc(wil, "status_ring[%d] 0x%p:%pad\n", sring->size, sring->va, 107 &sring->pa); 108 109 return 0; 110 } 111 112 static int wil_tx_init_edma(struct wil6210_priv *wil) 113 { 114 int ring_id = wil_find_free_sring(wil); 115 struct wil_status_ring *sring; 116 int rc; 117 u16 status_ring_size; 118 119 if (wil->tx_status_ring_order < WIL_SRING_SIZE_ORDER_MIN || 120 wil->tx_status_ring_order > WIL_SRING_SIZE_ORDER_MAX) 121 wil->tx_status_ring_order = WIL_TX_SRING_SIZE_ORDER_DEFAULT; 122 123 status_ring_size = 1 << wil->tx_status_ring_order; 124 125 wil_dbg_misc(wil, "init TX sring: size=%u, ring_id=%u\n", 126 status_ring_size, ring_id); 127 128 if (ring_id < 0) 129 return ring_id; 130 131 /* Allocate Tx status ring. Tx descriptor rings will be 132 * allocated on WMI connect event 133 */ 134 sring = &wil->srings[ring_id]; 135 136 sring->is_rx = false; 137 sring->size = status_ring_size; 138 sring->elem_size = sizeof(struct wil_ring_tx_status); 139 rc = wil_sring_alloc(wil, sring); 140 if (rc) 141 return rc; 142 143 rc = wil_wmi_tx_sring_cfg(wil, ring_id); 144 if (rc) 145 goto out_free; 146 147 sring->desc_rdy_pol = 1; 148 wil->tx_sring_idx = ring_id; 149 150 return 0; 151 out_free: 152 wil_sring_free(wil, sring); 153 return rc; 154 } 155 156 /** 157 * Allocate one skb for Rx descriptor RING 158 */ 159 static int wil_ring_alloc_skb_edma(struct wil6210_priv *wil, 160 struct wil_ring *ring, u32 i) 161 { 162 struct device *dev = wil_to_dev(wil); 163 unsigned int sz = wil->rx_buf_len; 164 dma_addr_t pa; 165 u16 buff_id; 166 struct list_head *active = &wil->rx_buff_mgmt.active; 167 struct list_head *free = &wil->rx_buff_mgmt.free; 168 struct wil_rx_buff *rx_buff; 169 struct wil_rx_buff *buff_arr = wil->rx_buff_mgmt.buff_arr; 170 struct sk_buff *skb; 171 struct wil_rx_enhanced_desc dd, *d = ⅆ 172 struct wil_rx_enhanced_desc *_d = (struct wil_rx_enhanced_desc *) 173 &ring->va[i].rx.enhanced; 174 175 if (unlikely(list_empty(free))) { 176 wil->rx_buff_mgmt.free_list_empty_cnt++; 177 return -EAGAIN; 178 } 179 180 skb = dev_alloc_skb(sz); 181 if (unlikely(!skb)) 182 return -ENOMEM; 183 184 skb_put(skb, sz); 185 186 /** 187 * Make sure that the network stack calculates checksum for packets 188 * which failed the HW checksum calculation 189 */ 190 skb->ip_summed = CHECKSUM_NONE; 191 192 pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE); 193 if (unlikely(dma_mapping_error(dev, pa))) { 194 kfree_skb(skb); 195 return -ENOMEM; 196 } 197 198 /* Get the buffer ID - the index of the rx buffer in the buff_arr */ 199 rx_buff = list_first_entry(free, struct wil_rx_buff, list); 200 buff_id = rx_buff->id; 201 202 /* Move a buffer from the free list to the active list */ 203 list_move(&rx_buff->list, active); 204 205 buff_arr[buff_id].skb = skb; 206 207 wil_desc_set_addr_edma(&d->dma.addr, &d->dma.addr_high_high, pa); 208 d->dma.length = cpu_to_le16(sz); 209 d->mac.buff_id = cpu_to_le16(buff_id); 210 *_d = *d; 211 212 /* Save the physical address in skb->cb for later use in dma_unmap */ 213 memcpy(skb->cb, &pa, sizeof(pa)); 214 215 return 0; 216 } 217 218 static inline 219 void wil_get_next_rx_status_msg(struct wil_status_ring *sring, void *msg) 220 { 221 memcpy(msg, (void *)(sring->va + (sring->elem_size * sring->swhead)), 222 sring->elem_size); 223 } 224 225 static inline void wil_sring_advance_swhead(struct wil_status_ring *sring) 226 { 227 sring->swhead = (sring->swhead + 1) % sring->size; 228 if (sring->swhead == 0) 229 sring->desc_rdy_pol = 1 - sring->desc_rdy_pol; 230 } 231 232 static int wil_rx_refill_edma(struct wil6210_priv *wil) 233 { 234 struct wil_ring *ring = &wil->ring_rx; 235 u32 next_head; 236 int rc = 0; 237 ring->swtail = *ring->edma_rx_swtail.va; 238 239 for (; next_head = wil_ring_next_head(ring), 240 (next_head != ring->swtail); 241 ring->swhead = next_head) { 242 rc = wil_ring_alloc_skb_edma(wil, ring, ring->swhead); 243 if (unlikely(rc)) { 244 if (rc == -EAGAIN) 245 wil_dbg_txrx(wil, "No free buffer ID found\n"); 246 else 247 wil_err_ratelimited(wil, 248 "Error %d in refill desc[%d]\n", 249 rc, ring->swhead); 250 break; 251 } 252 } 253 254 /* make sure all writes to descriptors (shared memory) are done before 255 * committing them to HW 256 */ 257 wmb(); 258 259 wil_w(wil, ring->hwtail, ring->swhead); 260 261 return rc; 262 } 263 264 static void wil_move_all_rx_buff_to_free_list(struct wil6210_priv *wil, 265 struct wil_ring *ring) 266 { 267 struct device *dev = wil_to_dev(wil); 268 struct list_head *active = &wil->rx_buff_mgmt.active; 269 dma_addr_t pa; 270 271 while (!list_empty(active)) { 272 struct wil_rx_buff *rx_buff = 273 list_first_entry(active, struct wil_rx_buff, list); 274 struct sk_buff *skb = rx_buff->skb; 275 276 if (unlikely(!skb)) { 277 wil_err(wil, "No Rx skb at buff_id %d\n", rx_buff->id); 278 } else { 279 rx_buff->skb = NULL; 280 memcpy(&pa, skb->cb, sizeof(pa)); 281 dma_unmap_single(dev, pa, wil->rx_buf_len, 282 DMA_FROM_DEVICE); 283 kfree_skb(skb); 284 } 285 286 /* Move the buffer from the active to the free list */ 287 list_move(&rx_buff->list, &wil->rx_buff_mgmt.free); 288 } 289 } 290 291 static void wil_free_rx_buff_arr(struct wil6210_priv *wil) 292 { 293 struct wil_ring *ring = &wil->ring_rx; 294 295 if (!wil->rx_buff_mgmt.buff_arr) 296 return; 297 298 /* Move all the buffers to the free list in case active list is 299 * not empty in order to release all SKBs before deleting the array 300 */ 301 wil_move_all_rx_buff_to_free_list(wil, ring); 302 303 kfree(wil->rx_buff_mgmt.buff_arr); 304 wil->rx_buff_mgmt.buff_arr = NULL; 305 } 306 307 static int wil_init_rx_buff_arr(struct wil6210_priv *wil, 308 size_t size) 309 { 310 struct wil_rx_buff *buff_arr; 311 struct list_head *active = &wil->rx_buff_mgmt.active; 312 struct list_head *free = &wil->rx_buff_mgmt.free; 313 int i; 314 315 wil->rx_buff_mgmt.buff_arr = kcalloc(size, sizeof(struct wil_rx_buff), 316 GFP_KERNEL); 317 if (!wil->rx_buff_mgmt.buff_arr) 318 return -ENOMEM; 319 320 /* Set list heads */ 321 INIT_LIST_HEAD(active); 322 INIT_LIST_HEAD(free); 323 324 /* Linkify the list */ 325 buff_arr = wil->rx_buff_mgmt.buff_arr; 326 for (i = 0; i < size; i++) { 327 list_add(&buff_arr[i].list, free); 328 buff_arr[i].id = i; 329 } 330 331 wil->rx_buff_mgmt.size = size; 332 333 return 0; 334 } 335 336 static int wil_init_rx_sring(struct wil6210_priv *wil, 337 u16 status_ring_size, 338 size_t elem_size, 339 u16 ring_id) 340 { 341 struct wil_status_ring *sring = &wil->srings[ring_id]; 342 int rc; 343 344 wil_dbg_misc(wil, "init RX sring: size=%u, ring_id=%u\n", 345 status_ring_size, ring_id); 346 347 memset(&sring->rx_data, 0, sizeof(sring->rx_data)); 348 349 sring->is_rx = true; 350 sring->size = status_ring_size; 351 sring->elem_size = elem_size; 352 rc = wil_sring_alloc(wil, sring); 353 if (rc) 354 return rc; 355 356 rc = wil_wmi_rx_sring_add(wil, ring_id); 357 if (rc) 358 goto out_free; 359 360 sring->desc_rdy_pol = 1; 361 362 return 0; 363 out_free: 364 wil_sring_free(wil, sring); 365 return rc; 366 } 367 368 static int wil_ring_alloc_desc_ring(struct wil6210_priv *wil, 369 struct wil_ring *ring) 370 { 371 struct device *dev = wil_to_dev(wil); 372 size_t sz = ring->size * sizeof(ring->va[0]); 373 374 wil_dbg_misc(wil, "alloc_desc_ring:\n"); 375 376 BUILD_BUG_ON(sizeof(ring->va[0]) != 32); 377 378 ring->swhead = 0; 379 ring->swtail = 0; 380 ring->ctx = kcalloc(ring->size, sizeof(ring->ctx[0]), GFP_KERNEL); 381 if (!ring->ctx) 382 goto err; 383 384 ring->va = dma_alloc_coherent(dev, sz, &ring->pa, GFP_KERNEL); 385 if (!ring->va) 386 goto err_free_ctx; 387 388 if (ring->is_rx) { 389 sz = sizeof(*ring->edma_rx_swtail.va); 390 ring->edma_rx_swtail.va = 391 dma_alloc_coherent(dev, sz, &ring->edma_rx_swtail.pa, 392 GFP_KERNEL); 393 if (!ring->edma_rx_swtail.va) 394 goto err_free_va; 395 } 396 397 wil_dbg_misc(wil, "%s ring[%d] 0x%p:%pad 0x%p\n", 398 ring->is_rx ? "RX" : "TX", 399 ring->size, ring->va, &ring->pa, ring->ctx); 400 401 return 0; 402 err_free_va: 403 dma_free_coherent(dev, ring->size * sizeof(ring->va[0]), 404 (void *)ring->va, ring->pa); 405 ring->va = NULL; 406 err_free_ctx: 407 kfree(ring->ctx); 408 ring->ctx = NULL; 409 err: 410 return -ENOMEM; 411 } 412 413 static void wil_ring_free_edma(struct wil6210_priv *wil, struct wil_ring *ring) 414 { 415 struct device *dev = wil_to_dev(wil); 416 size_t sz; 417 int ring_index = 0; 418 419 if (!ring->va) 420 return; 421 422 sz = ring->size * sizeof(ring->va[0]); 423 424 lockdep_assert_held(&wil->mutex); 425 if (ring->is_rx) { 426 wil_dbg_misc(wil, "free Rx ring [%d] 0x%p:%pad 0x%p\n", 427 ring->size, ring->va, 428 &ring->pa, ring->ctx); 429 430 wil_move_all_rx_buff_to_free_list(wil, ring); 431 dma_free_coherent(dev, sizeof(*ring->edma_rx_swtail.va), 432 ring->edma_rx_swtail.va, 433 ring->edma_rx_swtail.pa); 434 goto out; 435 } 436 437 /* TX ring */ 438 ring_index = ring - wil->ring_tx; 439 440 wil_dbg_misc(wil, "free Tx ring %d [%d] 0x%p:%pad 0x%p\n", 441 ring_index, ring->size, ring->va, 442 &ring->pa, ring->ctx); 443 444 while (!wil_ring_is_empty(ring)) { 445 struct wil_ctx *ctx; 446 447 struct wil_tx_enhanced_desc dd, *d = ⅆ 448 struct wil_tx_enhanced_desc *_d = 449 (struct wil_tx_enhanced_desc *) 450 &ring->va[ring->swtail].tx.enhanced; 451 452 ctx = &ring->ctx[ring->swtail]; 453 if (!ctx) { 454 wil_dbg_txrx(wil, 455 "ctx(%d) was already completed\n", 456 ring->swtail); 457 ring->swtail = wil_ring_next_tail(ring); 458 continue; 459 } 460 *d = *_d; 461 wil_tx_desc_unmap_edma(dev, (union wil_tx_desc *)d, ctx); 462 if (ctx->skb) 463 dev_kfree_skb_any(ctx->skb); 464 ring->swtail = wil_ring_next_tail(ring); 465 } 466 467 out: 468 dma_free_coherent(dev, sz, (void *)ring->va, ring->pa); 469 kfree(ring->ctx); 470 ring->pa = 0; 471 ring->va = NULL; 472 ring->ctx = NULL; 473 } 474 475 static int wil_init_rx_desc_ring(struct wil6210_priv *wil, u16 desc_ring_size, 476 int status_ring_id) 477 { 478 struct wil_ring *ring = &wil->ring_rx; 479 int rc; 480 481 wil_dbg_misc(wil, "init RX desc ring\n"); 482 483 ring->size = desc_ring_size; 484 ring->is_rx = true; 485 rc = wil_ring_alloc_desc_ring(wil, ring); 486 if (rc) 487 return rc; 488 489 rc = wil_wmi_rx_desc_ring_add(wil, status_ring_id); 490 if (rc) 491 goto out_free; 492 493 return 0; 494 out_free: 495 wil_ring_free_edma(wil, ring); 496 return rc; 497 } 498 499 static void wil_get_reorder_params_edma(struct wil6210_priv *wil, 500 struct sk_buff *skb, int *tid, 501 int *cid, int *mid, u16 *seq, 502 int *mcast, int *retry) 503 { 504 struct wil_rx_status_extended *s = wil_skb_rxstatus(skb); 505 506 *tid = wil_rx_status_get_tid(s); 507 *cid = wil_rx_status_get_cid(s); 508 *mid = wil_rx_status_get_mid(s); 509 *seq = le16_to_cpu(wil_rx_status_get_seq(wil, s)); 510 *mcast = wil_rx_status_get_mcast(s); 511 *retry = wil_rx_status_get_retry(s); 512 } 513 514 static void wil_get_netif_rx_params_edma(struct sk_buff *skb, int *cid, 515 int *security) 516 { 517 struct wil_rx_status_extended *s = wil_skb_rxstatus(skb); 518 519 *cid = wil_rx_status_get_cid(s); 520 *security = wil_rx_status_get_security(s); 521 } 522 523 static int wil_rx_crypto_check_edma(struct wil6210_priv *wil, 524 struct sk_buff *skb) 525 { 526 struct wil_rx_status_extended *st; 527 int cid, tid, key_id, mc; 528 struct wil_sta_info *s; 529 struct wil_tid_crypto_rx *c; 530 struct wil_tid_crypto_rx_single *cc; 531 const u8 *pn; 532 533 /* In HW reorder, HW is responsible for crypto check */ 534 if (wil->use_rx_hw_reordering) 535 return 0; 536 537 st = wil_skb_rxstatus(skb); 538 539 cid = wil_rx_status_get_cid(st); 540 tid = wil_rx_status_get_tid(st); 541 key_id = wil_rx_status_get_key_id(st); 542 mc = wil_rx_status_get_mcast(st); 543 s = &wil->sta[cid]; 544 c = mc ? &s->group_crypto_rx : &s->tid_crypto_rx[tid]; 545 cc = &c->key_id[key_id]; 546 pn = (u8 *)&st->ext.pn_15_0; 547 548 if (!cc->key_set) { 549 wil_err_ratelimited(wil, 550 "Key missing. CID %d TID %d MCast %d KEY_ID %d\n", 551 cid, tid, mc, key_id); 552 return -EINVAL; 553 } 554 555 if (reverse_memcmp(pn, cc->pn, IEEE80211_GCMP_PN_LEN) <= 0) { 556 wil_err_ratelimited(wil, 557 "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n", 558 cid, tid, mc, key_id, pn, cc->pn); 559 return -EINVAL; 560 } 561 memcpy(cc->pn, pn, IEEE80211_GCMP_PN_LEN); 562 563 return 0; 564 } 565 566 static bool wil_is_rx_idle_edma(struct wil6210_priv *wil) 567 { 568 struct wil_status_ring *sring; 569 struct wil_rx_status_extended msg1; 570 void *msg = &msg1; 571 u8 dr_bit; 572 int i; 573 574 for (i = 0; i < wil->num_rx_status_rings; i++) { 575 sring = &wil->srings[i]; 576 if (!sring->va) 577 continue; 578 579 wil_get_next_rx_status_msg(sring, msg); 580 dr_bit = wil_rx_status_get_desc_rdy_bit(msg); 581 582 /* Check if there are unhandled RX status messages */ 583 if (dr_bit == sring->desc_rdy_pol) 584 return false; 585 } 586 587 return true; 588 } 589 590 static void wil_rx_buf_len_init_edma(struct wil6210_priv *wil) 591 { 592 /* RX buffer size must be aligned to 4 bytes */ 593 wil->rx_buf_len = rx_large_buf ? 594 WIL_MAX_ETH_MTU : WIL_EDMA_RX_BUF_LEN_DEFAULT; 595 } 596 597 static int wil_rx_init_edma(struct wil6210_priv *wil, uint desc_ring_order) 598 { 599 u16 status_ring_size, desc_ring_size = 1 << desc_ring_order; 600 struct wil_ring *ring = &wil->ring_rx; 601 int rc; 602 size_t elem_size = wil->use_compressed_rx_status ? 603 sizeof(struct wil_rx_status_compressed) : 604 sizeof(struct wil_rx_status_extended); 605 int i; 606 607 /* In SW reorder one must use extended status messages */ 608 if (wil->use_compressed_rx_status && !wil->use_rx_hw_reordering) { 609 wil_err(wil, 610 "compressed RX status cannot be used with SW reorder\n"); 611 return -EINVAL; 612 } 613 if (wil->rx_status_ring_order <= desc_ring_order) 614 /* make sure sring is larger than desc ring */ 615 wil->rx_status_ring_order = desc_ring_order + 1; 616 if (wil->rx_buff_id_count <= desc_ring_size) 617 /* make sure we will not run out of buff_ids */ 618 wil->rx_buff_id_count = desc_ring_size + 512; 619 if (wil->rx_status_ring_order < WIL_SRING_SIZE_ORDER_MIN || 620 wil->rx_status_ring_order > WIL_SRING_SIZE_ORDER_MAX) 621 wil->rx_status_ring_order = WIL_RX_SRING_SIZE_ORDER_DEFAULT; 622 623 status_ring_size = 1 << wil->rx_status_ring_order; 624 625 wil_dbg_misc(wil, 626 "rx_init, desc_ring_size=%u, status_ring_size=%u, elem_size=%zu\n", 627 desc_ring_size, status_ring_size, elem_size); 628 629 wil_rx_buf_len_init_edma(wil); 630 631 /* Use debugfs dbg_num_rx_srings if set, reserve one sring for TX */ 632 if (wil->num_rx_status_rings > WIL6210_MAX_STATUS_RINGS - 1) 633 wil->num_rx_status_rings = WIL6210_MAX_STATUS_RINGS - 1; 634 635 wil_dbg_misc(wil, "rx_init: allocate %d status rings\n", 636 wil->num_rx_status_rings); 637 638 rc = wil_wmi_cfg_def_rx_offload(wil, wil->rx_buf_len); 639 if (rc) 640 return rc; 641 642 /* Allocate status ring */ 643 for (i = 0; i < wil->num_rx_status_rings; i++) { 644 int sring_id = wil_find_free_sring(wil); 645 646 if (sring_id < 0) { 647 rc = -EFAULT; 648 goto err_free_status; 649 } 650 rc = wil_init_rx_sring(wil, status_ring_size, elem_size, 651 sring_id); 652 if (rc) 653 goto err_free_status; 654 } 655 656 /* Allocate descriptor ring */ 657 rc = wil_init_rx_desc_ring(wil, desc_ring_size, 658 WIL_DEFAULT_RX_STATUS_RING_ID); 659 if (rc) 660 goto err_free_status; 661 662 if (wil->rx_buff_id_count >= status_ring_size) { 663 wil_info(wil, 664 "rx_buff_id_count %d exceeds sring_size %d. set it to %d\n", 665 wil->rx_buff_id_count, status_ring_size, 666 status_ring_size - 1); 667 wil->rx_buff_id_count = status_ring_size - 1; 668 } 669 670 /* Allocate Rx buffer array */ 671 rc = wil_init_rx_buff_arr(wil, wil->rx_buff_id_count); 672 if (rc) 673 goto err_free_desc; 674 675 /* Fill descriptor ring with credits */ 676 rc = wil_rx_refill_edma(wil); 677 if (rc) 678 goto err_free_rx_buff_arr; 679 680 return 0; 681 err_free_rx_buff_arr: 682 wil_free_rx_buff_arr(wil); 683 err_free_desc: 684 wil_ring_free_edma(wil, ring); 685 err_free_status: 686 for (i = 0; i < wil->num_rx_status_rings; i++) 687 wil_sring_free(wil, &wil->srings[i]); 688 689 return rc; 690 } 691 692 static int wil_ring_init_tx_edma(struct wil6210_vif *vif, int ring_id, 693 int size, int cid, int tid) 694 { 695 struct wil6210_priv *wil = vif_to_wil(vif); 696 int rc; 697 struct wil_ring *ring = &wil->ring_tx[ring_id]; 698 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id]; 699 700 lockdep_assert_held(&wil->mutex); 701 702 wil_dbg_misc(wil, 703 "init TX ring: ring_id=%u, cid=%u, tid=%u, sring_id=%u\n", 704 ring_id, cid, tid, wil->tx_sring_idx); 705 706 wil_tx_data_init(txdata); 707 ring->size = size; 708 rc = wil_ring_alloc_desc_ring(wil, ring); 709 if (rc) 710 goto out; 711 712 wil->ring2cid_tid[ring_id][0] = cid; 713 wil->ring2cid_tid[ring_id][1] = tid; 714 if (!vif->privacy) 715 txdata->dot1x_open = true; 716 717 rc = wil_wmi_tx_desc_ring_add(vif, ring_id, cid, tid); 718 if (rc) { 719 wil_err(wil, "WMI_TX_DESC_RING_ADD_CMD failed\n"); 720 goto out_free; 721 } 722 723 if (txdata->dot1x_open && agg_wsize >= 0) 724 wil_addba_tx_request(wil, ring_id, agg_wsize); 725 726 return 0; 727 out_free: 728 spin_lock_bh(&txdata->lock); 729 txdata->dot1x_open = false; 730 txdata->enabled = 0; 731 spin_unlock_bh(&txdata->lock); 732 wil_ring_free_edma(wil, ring); 733 wil->ring2cid_tid[ring_id][0] = max_assoc_sta; 734 wil->ring2cid_tid[ring_id][1] = 0; 735 736 out: 737 return rc; 738 } 739 740 static int wil_tx_ring_modify_edma(struct wil6210_vif *vif, int ring_id, 741 int cid, int tid) 742 { 743 struct wil6210_priv *wil = vif_to_wil(vif); 744 745 wil_err(wil, "ring modify is not supported for EDMA\n"); 746 747 return -EOPNOTSUPP; 748 } 749 750 /* This function is used only for RX SW reorder */ 751 static int wil_check_bar(struct wil6210_priv *wil, void *msg, int cid, 752 struct sk_buff *skb, struct wil_net_stats *stats) 753 { 754 u8 ftype; 755 u8 fc1; 756 int mid; 757 int tid; 758 u16 seq; 759 struct wil6210_vif *vif; 760 761 ftype = wil_rx_status_get_frame_type(wil, msg); 762 if (ftype == IEEE80211_FTYPE_DATA) 763 return 0; 764 765 fc1 = wil_rx_status_get_fc1(wil, msg); 766 mid = wil_rx_status_get_mid(msg); 767 tid = wil_rx_status_get_tid(msg); 768 seq = le16_to_cpu(wil_rx_status_get_seq(wil, msg)); 769 vif = wil->vifs[mid]; 770 771 if (unlikely(!vif)) { 772 wil_dbg_txrx(wil, "RX descriptor with invalid mid %d", mid); 773 return -EAGAIN; 774 } 775 776 wil_dbg_txrx(wil, 777 "Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n", 778 fc1, mid, cid, tid, seq); 779 if (stats) 780 stats->rx_non_data_frame++; 781 if (wil_is_back_req(fc1)) { 782 wil_dbg_txrx(wil, 783 "BAR: MID %d CID %d TID %d Seq 0x%03x\n", 784 mid, cid, tid, seq); 785 wil_rx_bar(wil, vif, cid, tid, seq); 786 } else { 787 u32 sz = wil->use_compressed_rx_status ? 788 sizeof(struct wil_rx_status_compressed) : 789 sizeof(struct wil_rx_status_extended); 790 791 /* print again all info. One can enable only this 792 * without overhead for printing every Rx frame 793 */ 794 wil_dbg_txrx(wil, 795 "Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n", 796 fc1, mid, cid, tid, seq); 797 wil_hex_dump_txrx("RxS ", DUMP_PREFIX_NONE, 32, 4, 798 (const void *)msg, sz, false); 799 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1, 800 skb->data, skb_headlen(skb), false); 801 } 802 803 return -EAGAIN; 804 } 805 806 static int wil_rx_error_check_edma(struct wil6210_priv *wil, 807 struct sk_buff *skb, 808 struct wil_net_stats *stats) 809 { 810 int error; 811 int l2_rx_status; 812 int l3_rx_status; 813 int l4_rx_status; 814 void *msg = wil_skb_rxstatus(skb); 815 816 error = wil_rx_status_get_error(msg); 817 if (!error) { 818 skb->ip_summed = CHECKSUM_UNNECESSARY; 819 return 0; 820 } 821 822 l2_rx_status = wil_rx_status_get_l2_rx_status(msg); 823 if (l2_rx_status != 0) { 824 wil_dbg_txrx(wil, "L2 RX error, l2_rx_status=0x%x\n", 825 l2_rx_status); 826 /* Due to HW issue, KEY error will trigger a MIC error */ 827 if (l2_rx_status == WIL_RX_EDMA_ERROR_MIC) { 828 wil_err_ratelimited(wil, 829 "L2 MIC/KEY error, dropping packet\n"); 830 stats->rx_mic_error++; 831 } 832 if (l2_rx_status == WIL_RX_EDMA_ERROR_KEY) { 833 wil_err_ratelimited(wil, 834 "L2 KEY error, dropping packet\n"); 835 stats->rx_key_error++; 836 } 837 if (l2_rx_status == WIL_RX_EDMA_ERROR_REPLAY) { 838 wil_err_ratelimited(wil, 839 "L2 REPLAY error, dropping packet\n"); 840 stats->rx_replay++; 841 } 842 if (l2_rx_status == WIL_RX_EDMA_ERROR_AMSDU) { 843 wil_err_ratelimited(wil, 844 "L2 AMSDU error, dropping packet\n"); 845 stats->rx_amsdu_error++; 846 } 847 return -EFAULT; 848 } 849 850 l3_rx_status = wil_rx_status_get_l3_rx_status(msg); 851 l4_rx_status = wil_rx_status_get_l4_rx_status(msg); 852 if (!l3_rx_status && !l4_rx_status) 853 skb->ip_summed = CHECKSUM_UNNECESSARY; 854 /* If HW reports bad checksum, let IP stack re-check it 855 * For example, HW don't understand Microsoft IP stack that 856 * mis-calculates TCP checksum - if it should be 0x0, 857 * it writes 0xffff in violation of RFC 1624 858 */ 859 else 860 stats->rx_csum_err++; 861 862 return 0; 863 } 864 865 static struct sk_buff *wil_sring_reap_rx_edma(struct wil6210_priv *wil, 866 struct wil_status_ring *sring) 867 { 868 struct device *dev = wil_to_dev(wil); 869 struct wil_rx_status_extended msg1; 870 void *msg = &msg1; 871 u16 buff_id; 872 struct sk_buff *skb; 873 dma_addr_t pa; 874 struct wil_ring_rx_data *rxdata = &sring->rx_data; 875 unsigned int sz = wil->rx_buf_len; 876 struct wil_net_stats *stats = NULL; 877 u16 dmalen; 878 int cid; 879 bool eop, headstolen; 880 int delta; 881 u8 dr_bit; 882 u8 data_offset; 883 struct wil_rx_status_extended *s; 884 u16 sring_idx = sring - wil->srings; 885 886 BUILD_BUG_ON(sizeof(struct wil_rx_status_extended) > sizeof(skb->cb)); 887 888 again: 889 wil_get_next_rx_status_msg(sring, msg); 890 dr_bit = wil_rx_status_get_desc_rdy_bit(msg); 891 892 /* Completed handling all the ready status messages */ 893 if (dr_bit != sring->desc_rdy_pol) 894 return NULL; 895 896 /* Extract the buffer ID from the status message */ 897 buff_id = le16_to_cpu(wil_rx_status_get_buff_id(msg)); 898 if (unlikely(!wil_val_in_range(buff_id, 0, wil->rx_buff_mgmt.size))) { 899 wil_err(wil, "Corrupt buff_id=%d, sring->swhead=%d\n", 900 buff_id, sring->swhead); 901 wil_sring_advance_swhead(sring); 902 goto again; 903 } 904 905 wil_sring_advance_swhead(sring); 906 907 /* Extract the SKB from the rx_buff management array */ 908 skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb; 909 wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL; 910 if (!skb) { 911 wil_err(wil, "No Rx skb at buff_id %d\n", buff_id); 912 /* Move the buffer from the active list to the free list */ 913 list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list, 914 &wil->rx_buff_mgmt.free); 915 goto again; 916 } 917 918 memcpy(&pa, skb->cb, sizeof(pa)); 919 dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE); 920 dmalen = le16_to_cpu(wil_rx_status_get_length(msg)); 921 922 trace_wil6210_rx_status(wil, wil->use_compressed_rx_status, buff_id, 923 msg); 924 wil_dbg_txrx(wil, "Rx, buff_id=%u, sring_idx=%u, dmalen=%u bytes\n", 925 buff_id, sring_idx, dmalen); 926 wil_hex_dump_txrx("RxS ", DUMP_PREFIX_NONE, 32, 4, 927 (const void *)msg, wil->use_compressed_rx_status ? 928 sizeof(struct wil_rx_status_compressed) : 929 sizeof(struct wil_rx_status_extended), false); 930 931 /* Move the buffer from the active list to the free list */ 932 list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list, 933 &wil->rx_buff_mgmt.free); 934 935 eop = wil_rx_status_get_eop(msg); 936 937 cid = wil_rx_status_get_cid(msg); 938 if (unlikely(!wil_val_in_range(cid, 0, max_assoc_sta))) { 939 wil_err(wil, "Corrupt cid=%d, sring->swhead=%d\n", 940 cid, sring->swhead); 941 rxdata->skipping = true; 942 goto skipping; 943 } 944 stats = &wil->sta[cid].stats; 945 946 if (unlikely(skb->len < ETH_HLEN)) { 947 wil_dbg_txrx(wil, "Short frame, len = %d\n", skb->len); 948 stats->rx_short_frame++; 949 rxdata->skipping = true; 950 goto skipping; 951 } 952 953 if (unlikely(dmalen > sz)) { 954 wil_err(wil, "Rx size too large: %d bytes!\n", dmalen); 955 stats->rx_large_frame++; 956 rxdata->skipping = true; 957 } 958 959 skipping: 960 /* skipping indicates if a certain SKB should be dropped. 961 * It is set in case there is an error on the current SKB or in case 962 * of RX chaining: as long as we manage to merge the SKBs it will 963 * be false. once we have a bad SKB or we don't manage to merge SKBs 964 * it will be set to the !EOP value of the current SKB. 965 * This guarantees that all the following SKBs until EOP will also 966 * get dropped. 967 */ 968 if (unlikely(rxdata->skipping)) { 969 kfree_skb(skb); 970 if (rxdata->skb) { 971 kfree_skb(rxdata->skb); 972 rxdata->skb = NULL; 973 } 974 rxdata->skipping = !eop; 975 goto again; 976 } 977 978 skb_trim(skb, dmalen); 979 980 prefetch(skb->data); 981 982 if (!rxdata->skb) { 983 rxdata->skb = skb; 984 } else { 985 if (likely(skb_try_coalesce(rxdata->skb, skb, &headstolen, 986 &delta))) { 987 kfree_skb_partial(skb, headstolen); 988 } else { 989 wil_err(wil, "failed to merge skbs!\n"); 990 kfree_skb(skb); 991 kfree_skb(rxdata->skb); 992 rxdata->skb = NULL; 993 rxdata->skipping = !eop; 994 goto again; 995 } 996 } 997 998 if (!eop) 999 goto again; 1000 1001 /* reaching here rxdata->skb always contains a full packet */ 1002 skb = rxdata->skb; 1003 rxdata->skb = NULL; 1004 rxdata->skipping = false; 1005 1006 if (stats) { 1007 stats->last_mcs_rx = wil_rx_status_get_mcs(msg); 1008 if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs)) 1009 stats->rx_per_mcs[stats->last_mcs_rx]++; 1010 } 1011 1012 if (!wil->use_rx_hw_reordering && !wil->use_compressed_rx_status && 1013 wil_check_bar(wil, msg, cid, skb, stats) == -EAGAIN) { 1014 kfree_skb(skb); 1015 goto again; 1016 } 1017 1018 /* Compensate for the HW data alignment according to the status 1019 * message 1020 */ 1021 data_offset = wil_rx_status_get_data_offset(msg); 1022 if (data_offset == 0xFF || 1023 data_offset > WIL_EDMA_MAX_DATA_OFFSET) { 1024 wil_err(wil, "Unexpected data offset %d\n", data_offset); 1025 kfree_skb(skb); 1026 goto again; 1027 } 1028 1029 skb_pull(skb, data_offset); 1030 1031 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1, 1032 skb->data, skb_headlen(skb), false); 1033 1034 /* Has to be done after dma_unmap_single as skb->cb is also 1035 * used for holding the pa 1036 */ 1037 s = wil_skb_rxstatus(skb); 1038 memcpy(s, msg, sring->elem_size); 1039 1040 return skb; 1041 } 1042 1043 void wil_rx_handle_edma(struct wil6210_priv *wil, int *quota) 1044 { 1045 struct net_device *ndev; 1046 struct wil_ring *ring = &wil->ring_rx; 1047 struct wil_status_ring *sring; 1048 struct sk_buff *skb; 1049 int i; 1050 1051 if (unlikely(!ring->va)) { 1052 wil_err(wil, "Rx IRQ while Rx not yet initialized\n"); 1053 return; 1054 } 1055 wil_dbg_txrx(wil, "rx_handle\n"); 1056 1057 for (i = 0; i < wil->num_rx_status_rings; i++) { 1058 sring = &wil->srings[i]; 1059 if (unlikely(!sring->va)) { 1060 wil_err(wil, 1061 "Rx IRQ while Rx status ring %d not yet initialized\n", 1062 i); 1063 continue; 1064 } 1065 1066 while ((*quota > 0) && 1067 (NULL != (skb = 1068 wil_sring_reap_rx_edma(wil, sring)))) { 1069 (*quota)--; 1070 if (wil->use_rx_hw_reordering) { 1071 void *msg = wil_skb_rxstatus(skb); 1072 int mid = wil_rx_status_get_mid(msg); 1073 struct wil6210_vif *vif = wil->vifs[mid]; 1074 1075 if (unlikely(!vif)) { 1076 wil_dbg_txrx(wil, 1077 "RX desc invalid mid %d", 1078 mid); 1079 kfree_skb(skb); 1080 continue; 1081 } 1082 ndev = vif_to_ndev(vif); 1083 wil_netif_rx_any(skb, ndev); 1084 } else { 1085 wil_rx_reorder(wil, skb); 1086 } 1087 } 1088 1089 wil_w(wil, sring->hwtail, (sring->swhead - 1) % sring->size); 1090 } 1091 1092 wil_rx_refill_edma(wil); 1093 } 1094 1095 static int wil_tx_desc_map_edma(union wil_tx_desc *desc, 1096 dma_addr_t pa, 1097 u32 len, 1098 int ring_index) 1099 { 1100 struct wil_tx_enhanced_desc *d = 1101 (struct wil_tx_enhanced_desc *)&desc->enhanced; 1102 1103 memset(d, 0, sizeof(struct wil_tx_enhanced_desc)); 1104 1105 wil_desc_set_addr_edma(&d->dma.addr, &d->dma.addr_high_high, pa); 1106 1107 /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/ 1108 d->dma.length = cpu_to_le16((u16)len); 1109 d->mac.d[0] = (ring_index << WIL_EDMA_DESC_TX_MAC_CFG_0_QID_POS); 1110 /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi; 1111 * 3 - eth mode 1112 */ 1113 d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) | 1114 (0x3 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS); 1115 1116 return 0; 1117 } 1118 1119 static inline void 1120 wil_get_next_tx_status_msg(struct wil_status_ring *sring, 1121 struct wil_ring_tx_status *msg) 1122 { 1123 struct wil_ring_tx_status *_msg = (struct wil_ring_tx_status *) 1124 (sring->va + (sring->elem_size * sring->swhead)); 1125 1126 *msg = *_msg; 1127 } 1128 1129 /** 1130 * Clean up transmitted skb's from the Tx descriptor RING. 1131 * Return number of descriptors cleared. 1132 */ 1133 int wil_tx_sring_handler(struct wil6210_priv *wil, 1134 struct wil_status_ring *sring) 1135 { 1136 struct net_device *ndev; 1137 struct device *dev = wil_to_dev(wil); 1138 struct wil_ring *ring = NULL; 1139 struct wil_ring_tx_data *txdata; 1140 /* Total number of completed descriptors in all descriptor rings */ 1141 int desc_cnt = 0; 1142 int cid; 1143 struct wil_net_stats *stats; 1144 struct wil_tx_enhanced_desc *_d; 1145 unsigned int ring_id; 1146 unsigned int num_descs; 1147 int i; 1148 u8 dr_bit; /* Descriptor Ready bit */ 1149 struct wil_ring_tx_status msg; 1150 struct wil6210_vif *vif; 1151 int used_before_complete; 1152 int used_new; 1153 1154 wil_get_next_tx_status_msg(sring, &msg); 1155 dr_bit = msg.desc_ready >> TX_STATUS_DESC_READY_POS; 1156 1157 /* Process completion messages while DR bit has the expected polarity */ 1158 while (dr_bit == sring->desc_rdy_pol) { 1159 num_descs = msg.num_descriptors; 1160 if (!num_descs) { 1161 wil_err(wil, "invalid num_descs 0\n"); 1162 goto again; 1163 } 1164 1165 /* Find the corresponding descriptor ring */ 1166 ring_id = msg.ring_id; 1167 1168 if (unlikely(ring_id >= WIL6210_MAX_TX_RINGS)) { 1169 wil_err(wil, "invalid ring id %d\n", ring_id); 1170 goto again; 1171 } 1172 ring = &wil->ring_tx[ring_id]; 1173 if (unlikely(!ring->va)) { 1174 wil_err(wil, "Tx irq[%d]: ring not initialized\n", 1175 ring_id); 1176 goto again; 1177 } 1178 txdata = &wil->ring_tx_data[ring_id]; 1179 if (unlikely(!txdata->enabled)) { 1180 wil_info(wil, "Tx irq[%d]: ring disabled\n", ring_id); 1181 goto again; 1182 } 1183 vif = wil->vifs[txdata->mid]; 1184 if (unlikely(!vif)) { 1185 wil_dbg_txrx(wil, "invalid MID %d for ring %d\n", 1186 txdata->mid, ring_id); 1187 goto again; 1188 } 1189 1190 ndev = vif_to_ndev(vif); 1191 1192 cid = wil->ring2cid_tid[ring_id][0]; 1193 stats = (cid < max_assoc_sta ? &wil->sta[cid].stats : NULL); 1194 1195 wil_dbg_txrx(wil, 1196 "tx_status: completed desc_ring (%d), num_descs (%d)\n", 1197 ring_id, num_descs); 1198 1199 used_before_complete = wil_ring_used_tx(ring); 1200 1201 for (i = 0 ; i < num_descs; ++i) { 1202 struct wil_ctx *ctx = &ring->ctx[ring->swtail]; 1203 struct wil_tx_enhanced_desc dd, *d = ⅆ 1204 u16 dmalen; 1205 struct sk_buff *skb = ctx->skb; 1206 1207 _d = (struct wil_tx_enhanced_desc *) 1208 &ring->va[ring->swtail].tx.enhanced; 1209 *d = *_d; 1210 1211 dmalen = le16_to_cpu(d->dma.length); 1212 trace_wil6210_tx_status(&msg, ring->swtail, dmalen); 1213 wil_dbg_txrx(wil, 1214 "TxC[%2d][%3d] : %d bytes, status 0x%02x\n", 1215 ring_id, ring->swtail, dmalen, 1216 msg.status); 1217 wil_hex_dump_txrx("TxS ", DUMP_PREFIX_NONE, 32, 4, 1218 (const void *)&msg, sizeof(msg), 1219 false); 1220 1221 wil_tx_desc_unmap_edma(dev, 1222 (union wil_tx_desc *)d, 1223 ctx); 1224 1225 if (skb) { 1226 if (likely(msg.status == 0)) { 1227 ndev->stats.tx_packets++; 1228 ndev->stats.tx_bytes += skb->len; 1229 if (stats) { 1230 stats->tx_packets++; 1231 stats->tx_bytes += skb->len; 1232 1233 wil_tx_latency_calc(wil, skb, 1234 &wil->sta[cid]); 1235 } 1236 } else { 1237 ndev->stats.tx_errors++; 1238 if (stats) 1239 stats->tx_errors++; 1240 } 1241 wil_consume_skb(skb, msg.status == 0); 1242 } 1243 memset(ctx, 0, sizeof(*ctx)); 1244 /* Make sure the ctx is zeroed before updating the tail 1245 * to prevent a case where wil_tx_ring will see 1246 * this descriptor as used and handle it before ctx zero 1247 * is completed. 1248 */ 1249 wmb(); 1250 1251 ring->swtail = wil_ring_next_tail(ring); 1252 1253 desc_cnt++; 1254 } 1255 1256 /* performance monitoring */ 1257 used_new = wil_ring_used_tx(ring); 1258 if (wil_val_in_range(wil->ring_idle_trsh, 1259 used_new, used_before_complete)) { 1260 wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n", 1261 ring_id, used_before_complete, used_new); 1262 txdata->last_idle = get_cycles(); 1263 } 1264 1265 again: 1266 wil_sring_advance_swhead(sring); 1267 1268 wil_get_next_tx_status_msg(sring, &msg); 1269 dr_bit = msg.desc_ready >> TX_STATUS_DESC_READY_POS; 1270 } 1271 1272 /* shall we wake net queues? */ 1273 if (desc_cnt) 1274 wil_update_net_queues(wil, vif, NULL, false); 1275 1276 /* Update the HW tail ptr (RD ptr) */ 1277 wil_w(wil, sring->hwtail, (sring->swhead - 1) % sring->size); 1278 1279 return desc_cnt; 1280 } 1281 1282 /** 1283 * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding 1284 * @skb is used to obtain the protocol and headers length. 1285 * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data, 1286 * 2 - middle, 3 - last descriptor. 1287 */ 1288 static void wil_tx_desc_offload_setup_tso_edma(struct wil_tx_enhanced_desc *d, 1289 int tso_desc_type, bool is_ipv4, 1290 int tcp_hdr_len, 1291 int skb_net_hdr_len, 1292 int mss) 1293 { 1294 /* Number of descriptors */ 1295 d->mac.d[2] |= 1; 1296 /* Maximum Segment Size */ 1297 d->mac.tso_mss |= cpu_to_le16(mss >> 2); 1298 /* L4 header len: TCP header length */ 1299 d->dma.l4_hdr_len |= tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK; 1300 /* EOP, TSO desc type, Segmentation enable, 1301 * Insert IPv4 and TCP / UDP Checksum 1302 */ 1303 d->dma.cmd |= BIT(WIL_EDMA_DESC_TX_CFG_EOP_POS) | 1304 tso_desc_type << WIL_EDMA_DESC_TX_CFG_TSO_DESC_TYPE_POS | 1305 BIT(WIL_EDMA_DESC_TX_CFG_SEG_EN_POS) | 1306 BIT(WIL_EDMA_DESC_TX_CFG_INSERT_IP_CHKSUM_POS) | 1307 BIT(WIL_EDMA_DESC_TX_CFG_INSERT_TCP_CHKSUM_POS); 1308 /* Calculate pseudo-header */ 1309 d->dma.w1 |= BIT(WIL_EDMA_DESC_TX_CFG_PSEUDO_HEADER_CALC_EN_POS) | 1310 BIT(WIL_EDMA_DESC_TX_CFG_L4_TYPE_POS); 1311 /* IP Header Length */ 1312 d->dma.ip_length |= skb_net_hdr_len; 1313 /* MAC header length and IP address family*/ 1314 d->dma.b11 |= ETH_HLEN | 1315 is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS; 1316 } 1317 1318 static int wil_tx_tso_gen_desc(struct wil6210_priv *wil, void *buff_addr, 1319 int len, uint i, int tso_desc_type, 1320 skb_frag_t *frag, struct wil_ring *ring, 1321 struct sk_buff *skb, bool is_ipv4, 1322 int tcp_hdr_len, int skb_net_hdr_len, 1323 int mss, int *descs_used) 1324 { 1325 struct device *dev = wil_to_dev(wil); 1326 struct wil_tx_enhanced_desc *_desc = (struct wil_tx_enhanced_desc *) 1327 &ring->va[i].tx.enhanced; 1328 struct wil_tx_enhanced_desc desc_mem, *d = &desc_mem; 1329 int ring_index = ring - wil->ring_tx; 1330 dma_addr_t pa; 1331 1332 if (len == 0) 1333 return 0; 1334 1335 if (!frag) { 1336 pa = dma_map_single(dev, buff_addr, len, DMA_TO_DEVICE); 1337 ring->ctx[i].mapped_as = wil_mapped_as_single; 1338 } else { 1339 pa = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE); 1340 ring->ctx[i].mapped_as = wil_mapped_as_page; 1341 } 1342 if (unlikely(dma_mapping_error(dev, pa))) { 1343 wil_err(wil, "TSO: Skb DMA map error\n"); 1344 return -EINVAL; 1345 } 1346 1347 wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d, pa, 1348 len, ring_index); 1349 wil_tx_desc_offload_setup_tso_edma(d, tso_desc_type, is_ipv4, 1350 tcp_hdr_len, 1351 skb_net_hdr_len, mss); 1352 1353 /* hold reference to skb 1354 * to prevent skb release before accounting 1355 * in case of immediate "tx done" 1356 */ 1357 if (tso_desc_type == wil_tso_type_lst) 1358 ring->ctx[i].skb = skb_get(skb); 1359 1360 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4, 1361 (const void *)d, sizeof(*d), false); 1362 1363 *_desc = *d; 1364 (*descs_used)++; 1365 1366 return 0; 1367 } 1368 1369 static int __wil_tx_ring_tso_edma(struct wil6210_priv *wil, 1370 struct wil6210_vif *vif, 1371 struct wil_ring *ring, 1372 struct sk_buff *skb) 1373 { 1374 int ring_index = ring - wil->ring_tx; 1375 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index]; 1376 int nr_frags = skb_shinfo(skb)->nr_frags; 1377 int min_desc_required = nr_frags + 2; /* Headers, Head, Fragments */ 1378 int used, avail = wil_ring_avail_tx(ring); 1379 int f, hdrlen, headlen; 1380 int gso_type; 1381 bool is_ipv4; 1382 u32 swhead = ring->swhead; 1383 int descs_used = 0; /* total number of used descriptors */ 1384 int rc = -EINVAL; 1385 int tcp_hdr_len; 1386 int skb_net_hdr_len; 1387 int mss = skb_shinfo(skb)->gso_size; 1388 1389 wil_dbg_txrx(wil, "tx_ring_tso: %d bytes to ring %d\n", skb->len, 1390 ring_index); 1391 1392 if (unlikely(!txdata->enabled)) 1393 return -EINVAL; 1394 1395 if (unlikely(avail < min_desc_required)) { 1396 wil_err_ratelimited(wil, 1397 "TSO: Tx ring[%2d] full. No space for %d fragments\n", 1398 ring_index, min_desc_required); 1399 return -ENOMEM; 1400 } 1401 1402 gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4); 1403 switch (gso_type) { 1404 case SKB_GSO_TCPV4: 1405 is_ipv4 = true; 1406 break; 1407 case SKB_GSO_TCPV6: 1408 is_ipv4 = false; 1409 break; 1410 default: 1411 return -EINVAL; 1412 } 1413 1414 if (skb->ip_summed != CHECKSUM_PARTIAL) 1415 return -EINVAL; 1416 1417 /* tcp header length and skb network header length are fixed for all 1418 * packet's descriptors - read them once here 1419 */ 1420 tcp_hdr_len = tcp_hdrlen(skb); 1421 skb_net_hdr_len = skb_network_header_len(skb); 1422 1423 /* First descriptor must contain the header only 1424 * Header Length = MAC header len + IP header len + TCP header len 1425 */ 1426 hdrlen = ETH_HLEN + tcp_hdr_len + skb_net_hdr_len; 1427 wil_dbg_txrx(wil, "TSO: process header descriptor, hdrlen %u\n", 1428 hdrlen); 1429 rc = wil_tx_tso_gen_desc(wil, skb->data, hdrlen, swhead, 1430 wil_tso_type_hdr, NULL, ring, skb, 1431 is_ipv4, tcp_hdr_len, skb_net_hdr_len, 1432 mss, &descs_used); 1433 if (rc) 1434 return -EINVAL; 1435 1436 /* Second descriptor contains the head */ 1437 headlen = skb_headlen(skb) - hdrlen; 1438 wil_dbg_txrx(wil, "TSO: process skb head, headlen %u\n", headlen); 1439 rc = wil_tx_tso_gen_desc(wil, skb->data + hdrlen, headlen, 1440 (swhead + descs_used) % ring->size, 1441 (nr_frags != 0) ? wil_tso_type_first : 1442 wil_tso_type_lst, NULL, ring, skb, 1443 is_ipv4, tcp_hdr_len, skb_net_hdr_len, 1444 mss, &descs_used); 1445 if (rc) 1446 goto mem_error; 1447 1448 /* Rest of the descriptors are from the SKB fragments */ 1449 for (f = 0; f < nr_frags; f++) { 1450 skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; 1451 int len = frag->size; 1452 1453 wil_dbg_txrx(wil, "TSO: frag[%d]: len %u, descs_used %d\n", f, 1454 len, descs_used); 1455 1456 rc = wil_tx_tso_gen_desc(wil, NULL, len, 1457 (swhead + descs_used) % ring->size, 1458 (f != nr_frags - 1) ? 1459 wil_tso_type_mid : wil_tso_type_lst, 1460 frag, ring, skb, is_ipv4, 1461 tcp_hdr_len, skb_net_hdr_len, 1462 mss, &descs_used); 1463 if (rc) 1464 goto mem_error; 1465 } 1466 1467 /* performance monitoring */ 1468 used = wil_ring_used_tx(ring); 1469 if (wil_val_in_range(wil->ring_idle_trsh, 1470 used, used + descs_used)) { 1471 txdata->idle += get_cycles() - txdata->last_idle; 1472 wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n", 1473 ring_index, used, used + descs_used); 1474 } 1475 1476 /* advance swhead */ 1477 wil_ring_advance_head(ring, descs_used); 1478 wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, ring->swhead); 1479 1480 /* make sure all writes to descriptors (shared memory) are done before 1481 * committing them to HW 1482 */ 1483 wmb(); 1484 1485 if (wil->tx_latency) 1486 *(ktime_t *)&skb->cb = ktime_get(); 1487 else 1488 memset(skb->cb, 0, sizeof(ktime_t)); 1489 1490 wil_w(wil, ring->hwtail, ring->swhead); 1491 1492 return 0; 1493 1494 mem_error: 1495 while (descs_used > 0) { 1496 struct device *dev = wil_to_dev(wil); 1497 struct wil_ctx *ctx; 1498 int i = (swhead + descs_used - 1) % ring->size; 1499 struct wil_tx_enhanced_desc dd, *d = ⅆ 1500 struct wil_tx_enhanced_desc *_desc = 1501 (struct wil_tx_enhanced_desc *) 1502 &ring->va[i].tx.enhanced; 1503 1504 *d = *_desc; 1505 ctx = &ring->ctx[i]; 1506 wil_tx_desc_unmap_edma(dev, (union wil_tx_desc *)d, ctx); 1507 memset(ctx, 0, sizeof(*ctx)); 1508 descs_used--; 1509 } 1510 return rc; 1511 } 1512 1513 static int wil_ring_init_bcast_edma(struct wil6210_vif *vif, int ring_id, 1514 int size) 1515 { 1516 struct wil6210_priv *wil = vif_to_wil(vif); 1517 struct wil_ring *ring = &wil->ring_tx[ring_id]; 1518 int rc; 1519 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id]; 1520 1521 wil_dbg_misc(wil, "init bcast: ring_id=%d, sring_id=%d\n", 1522 ring_id, wil->tx_sring_idx); 1523 1524 lockdep_assert_held(&wil->mutex); 1525 1526 wil_tx_data_init(txdata); 1527 ring->size = size; 1528 ring->is_rx = false; 1529 rc = wil_ring_alloc_desc_ring(wil, ring); 1530 if (rc) 1531 goto out; 1532 1533 wil->ring2cid_tid[ring_id][0] = WIL6210_MAX_CID; /* CID */ 1534 wil->ring2cid_tid[ring_id][1] = 0; /* TID */ 1535 if (!vif->privacy) 1536 txdata->dot1x_open = true; 1537 1538 rc = wil_wmi_bcast_desc_ring_add(vif, ring_id); 1539 if (rc) 1540 goto out_free; 1541 1542 return 0; 1543 1544 out_free: 1545 spin_lock_bh(&txdata->lock); 1546 txdata->enabled = 0; 1547 txdata->dot1x_open = false; 1548 spin_unlock_bh(&txdata->lock); 1549 wil_ring_free_edma(wil, ring); 1550 1551 out: 1552 return rc; 1553 } 1554 1555 static void wil_tx_fini_edma(struct wil6210_priv *wil) 1556 { 1557 struct wil_status_ring *sring = &wil->srings[wil->tx_sring_idx]; 1558 1559 wil_dbg_misc(wil, "free TX sring\n"); 1560 1561 wil_sring_free(wil, sring); 1562 } 1563 1564 static void wil_rx_data_free(struct wil_status_ring *sring) 1565 { 1566 if (!sring) 1567 return; 1568 1569 kfree_skb(sring->rx_data.skb); 1570 sring->rx_data.skb = NULL; 1571 } 1572 1573 static void wil_rx_fini_edma(struct wil6210_priv *wil) 1574 { 1575 struct wil_ring *ring = &wil->ring_rx; 1576 int i; 1577 1578 wil_dbg_misc(wil, "rx_fini_edma\n"); 1579 1580 wil_ring_free_edma(wil, ring); 1581 1582 for (i = 0; i < wil->num_rx_status_rings; i++) { 1583 wil_rx_data_free(&wil->srings[i]); 1584 wil_sring_free(wil, &wil->srings[i]); 1585 } 1586 1587 wil_free_rx_buff_arr(wil); 1588 } 1589 1590 void wil_init_txrx_ops_edma(struct wil6210_priv *wil) 1591 { 1592 wil->txrx_ops.configure_interrupt_moderation = 1593 wil_configure_interrupt_moderation_edma; 1594 /* TX ops */ 1595 wil->txrx_ops.ring_init_tx = wil_ring_init_tx_edma; 1596 wil->txrx_ops.ring_fini_tx = wil_ring_free_edma; 1597 wil->txrx_ops.ring_init_bcast = wil_ring_init_bcast_edma; 1598 wil->txrx_ops.tx_init = wil_tx_init_edma; 1599 wil->txrx_ops.tx_fini = wil_tx_fini_edma; 1600 wil->txrx_ops.tx_desc_map = wil_tx_desc_map_edma; 1601 wil->txrx_ops.tx_desc_unmap = wil_tx_desc_unmap_edma; 1602 wil->txrx_ops.tx_ring_tso = __wil_tx_ring_tso_edma; 1603 wil->txrx_ops.tx_ring_modify = wil_tx_ring_modify_edma; 1604 /* RX ops */ 1605 wil->txrx_ops.rx_init = wil_rx_init_edma; 1606 wil->txrx_ops.wmi_addba_rx_resp = wmi_addba_rx_resp_edma; 1607 wil->txrx_ops.get_reorder_params = wil_get_reorder_params_edma; 1608 wil->txrx_ops.get_netif_rx_params = wil_get_netif_rx_params_edma; 1609 wil->txrx_ops.rx_crypto_check = wil_rx_crypto_check_edma; 1610 wil->txrx_ops.rx_error_check = wil_rx_error_check_edma; 1611 wil->txrx_ops.is_rx_idle = wil_is_rx_idle_edma; 1612 wil->txrx_ops.rx_fini = wil_rx_fini_edma; 1613 } 1614 1615