1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 4 Broadcom B43legacy wireless driver 5 6 DMA ringbuffer and descriptor allocation/management 7 8 Copyright (c) 2005, 2006 Michael Buesch <m@bues.ch> 9 10 Some code in this file is derived from the b44.c driver 11 Copyright (C) 2002 David S. Miller 12 Copyright (C) Pekka Pietikainen 13 14 15 */ 16 17 #include "b43legacy.h" 18 #include "dma.h" 19 #include "main.h" 20 #include "debugfs.h" 21 #include "xmit.h" 22 23 #include <linux/dma-mapping.h> 24 #include <linux/pci.h> 25 #include <linux/delay.h> 26 #include <linux/skbuff.h> 27 #include <linux/slab.h> 28 #include <net/dst.h> 29 30 /* 32bit DMA ops. */ 31 static 32 struct b43legacy_dmadesc32 *op32_idx2desc(struct b43legacy_dmaring *ring, 33 int slot, 34 struct b43legacy_dmadesc_meta **meta) 35 { 36 struct b43legacy_dmadesc32 *desc; 37 38 *meta = &(ring->meta[slot]); 39 desc = ring->descbase; 40 desc = &(desc[slot]); 41 42 return desc; 43 } 44 45 static void op32_fill_descriptor(struct b43legacy_dmaring *ring, 46 struct b43legacy_dmadesc32 *desc, 47 dma_addr_t dmaaddr, u16 bufsize, 48 int start, int end, int irq) 49 { 50 struct b43legacy_dmadesc32 *descbase = ring->descbase; 51 int slot; 52 u32 ctl; 53 u32 addr; 54 u32 addrext; 55 56 slot = (int)(desc - descbase); 57 B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); 58 59 addr = (u32)(dmaaddr & ~SSB_DMA_TRANSLATION_MASK); 60 addrext = (u32)(dmaaddr & SSB_DMA_TRANSLATION_MASK) 61 >> SSB_DMA_TRANSLATION_SHIFT; 62 addr |= ring->dev->dma.translation; 63 ctl = (bufsize - ring->frameoffset) 64 & B43legacy_DMA32_DCTL_BYTECNT; 65 if (slot == ring->nr_slots - 1) 66 ctl |= B43legacy_DMA32_DCTL_DTABLEEND; 67 if (start) 68 ctl |= B43legacy_DMA32_DCTL_FRAMESTART; 69 if (end) 70 ctl |= B43legacy_DMA32_DCTL_FRAMEEND; 71 if (irq) 72 ctl |= B43legacy_DMA32_DCTL_IRQ; 73 ctl |= (addrext << B43legacy_DMA32_DCTL_ADDREXT_SHIFT) 74 & B43legacy_DMA32_DCTL_ADDREXT_MASK; 75 76 desc->control = cpu_to_le32(ctl); 77 desc->address = cpu_to_le32(addr); 78 } 79 80 static void op32_poke_tx(struct b43legacy_dmaring *ring, int slot) 81 { 82 b43legacy_dma_write(ring, B43legacy_DMA32_TXINDEX, 83 (u32)(slot * sizeof(struct b43legacy_dmadesc32))); 84 } 85 86 static void op32_tx_suspend(struct b43legacy_dmaring *ring) 87 { 88 b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, 89 b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL) 90 | B43legacy_DMA32_TXSUSPEND); 91 } 92 93 static void op32_tx_resume(struct b43legacy_dmaring *ring) 94 { 95 b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, 96 b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL) 97 & ~B43legacy_DMA32_TXSUSPEND); 98 } 99 100 static int op32_get_current_rxslot(struct b43legacy_dmaring *ring) 101 { 102 u32 val; 103 104 val = b43legacy_dma_read(ring, B43legacy_DMA32_RXSTATUS); 105 val &= B43legacy_DMA32_RXDPTR; 106 107 return (val / sizeof(struct b43legacy_dmadesc32)); 108 } 109 110 static void op32_set_current_rxslot(struct b43legacy_dmaring *ring, 111 int slot) 112 { 113 b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX, 114 (u32)(slot * sizeof(struct b43legacy_dmadesc32))); 115 } 116 117 static inline int free_slots(struct b43legacy_dmaring *ring) 118 { 119 return (ring->nr_slots - ring->used_slots); 120 } 121 122 static inline int next_slot(struct b43legacy_dmaring *ring, int slot) 123 { 124 B43legacy_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1)); 125 if (slot == ring->nr_slots - 1) 126 return 0; 127 return slot + 1; 128 } 129 130 static inline int prev_slot(struct b43legacy_dmaring *ring, int slot) 131 { 132 B43legacy_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1)); 133 if (slot == 0) 134 return ring->nr_slots - 1; 135 return slot - 1; 136 } 137 138 #ifdef CONFIG_B43LEGACY_DEBUG 139 static void update_max_used_slots(struct b43legacy_dmaring *ring, 140 int current_used_slots) 141 { 142 if (current_used_slots <= ring->max_used_slots) 143 return; 144 ring->max_used_slots = current_used_slots; 145 if (b43legacy_debug(ring->dev, B43legacy_DBG_DMAVERBOSE)) 146 b43legacydbg(ring->dev->wl, 147 "max_used_slots increased to %d on %s ring %d\n", 148 ring->max_used_slots, 149 ring->tx ? "TX" : "RX", 150 ring->index); 151 } 152 #else 153 static inline 154 void update_max_used_slots(struct b43legacy_dmaring *ring, 155 int current_used_slots) 156 { } 157 #endif /* DEBUG */ 158 159 /* Request a slot for usage. */ 160 static inline 161 int request_slot(struct b43legacy_dmaring *ring) 162 { 163 int slot; 164 165 B43legacy_WARN_ON(!ring->tx); 166 B43legacy_WARN_ON(ring->stopped); 167 B43legacy_WARN_ON(free_slots(ring) == 0); 168 169 slot = next_slot(ring, ring->current_slot); 170 ring->current_slot = slot; 171 ring->used_slots++; 172 173 update_max_used_slots(ring, ring->used_slots); 174 175 return slot; 176 } 177 178 /* Mac80211-queue to b43legacy-ring mapping */ 179 static struct b43legacy_dmaring *priority_to_txring( 180 struct b43legacy_wldev *dev, 181 int queue_priority) 182 { 183 struct b43legacy_dmaring *ring; 184 185 /*FIXME: For now we always run on TX-ring-1 */ 186 return dev->dma.tx_ring1; 187 188 /* 0 = highest priority */ 189 switch (queue_priority) { 190 default: 191 B43legacy_WARN_ON(1); 192 /* fallthrough */ 193 case 0: 194 ring = dev->dma.tx_ring3; 195 break; 196 case 1: 197 ring = dev->dma.tx_ring2; 198 break; 199 case 2: 200 ring = dev->dma.tx_ring1; 201 break; 202 case 3: 203 ring = dev->dma.tx_ring0; 204 break; 205 case 4: 206 ring = dev->dma.tx_ring4; 207 break; 208 case 5: 209 ring = dev->dma.tx_ring5; 210 break; 211 } 212 213 return ring; 214 } 215 216 /* Bcm4301-ring to mac80211-queue mapping */ 217 static inline int txring_to_priority(struct b43legacy_dmaring *ring) 218 { 219 static const u8 idx_to_prio[] = 220 { 3, 2, 1, 0, 4, 5, }; 221 222 /*FIXME: have only one queue, for now */ 223 return 0; 224 225 return idx_to_prio[ring->index]; 226 } 227 228 229 static u16 b43legacy_dmacontroller_base(enum b43legacy_dmatype type, 230 int controller_idx) 231 { 232 static const u16 map32[] = { 233 B43legacy_MMIO_DMA32_BASE0, 234 B43legacy_MMIO_DMA32_BASE1, 235 B43legacy_MMIO_DMA32_BASE2, 236 B43legacy_MMIO_DMA32_BASE3, 237 B43legacy_MMIO_DMA32_BASE4, 238 B43legacy_MMIO_DMA32_BASE5, 239 }; 240 241 B43legacy_WARN_ON(!(controller_idx >= 0 && 242 controller_idx < ARRAY_SIZE(map32))); 243 return map32[controller_idx]; 244 } 245 246 static inline 247 dma_addr_t map_descbuffer(struct b43legacy_dmaring *ring, 248 unsigned char *buf, 249 size_t len, 250 int tx) 251 { 252 dma_addr_t dmaaddr; 253 254 if (tx) 255 dmaaddr = dma_map_single(ring->dev->dev->dma_dev, 256 buf, len, 257 DMA_TO_DEVICE); 258 else 259 dmaaddr = dma_map_single(ring->dev->dev->dma_dev, 260 buf, len, 261 DMA_FROM_DEVICE); 262 263 return dmaaddr; 264 } 265 266 static inline 267 void unmap_descbuffer(struct b43legacy_dmaring *ring, 268 dma_addr_t addr, 269 size_t len, 270 int tx) 271 { 272 if (tx) 273 dma_unmap_single(ring->dev->dev->dma_dev, 274 addr, len, 275 DMA_TO_DEVICE); 276 else 277 dma_unmap_single(ring->dev->dev->dma_dev, 278 addr, len, 279 DMA_FROM_DEVICE); 280 } 281 282 static inline 283 void sync_descbuffer_for_cpu(struct b43legacy_dmaring *ring, 284 dma_addr_t addr, 285 size_t len) 286 { 287 B43legacy_WARN_ON(ring->tx); 288 289 dma_sync_single_for_cpu(ring->dev->dev->dma_dev, 290 addr, len, DMA_FROM_DEVICE); 291 } 292 293 static inline 294 void sync_descbuffer_for_device(struct b43legacy_dmaring *ring, 295 dma_addr_t addr, 296 size_t len) 297 { 298 B43legacy_WARN_ON(ring->tx); 299 300 dma_sync_single_for_device(ring->dev->dev->dma_dev, 301 addr, len, DMA_FROM_DEVICE); 302 } 303 304 static inline 305 void free_descriptor_buffer(struct b43legacy_dmaring *ring, 306 struct b43legacy_dmadesc_meta *meta, 307 int irq_context) 308 { 309 if (meta->skb) { 310 if (irq_context) 311 dev_kfree_skb_irq(meta->skb); 312 else 313 dev_kfree_skb(meta->skb); 314 meta->skb = NULL; 315 } 316 } 317 318 static int alloc_ringmemory(struct b43legacy_dmaring *ring) 319 { 320 /* GFP flags must match the flags in free_ringmemory()! */ 321 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, 322 B43legacy_DMA_RINGMEMSIZE, 323 &(ring->dmabase), GFP_KERNEL); 324 if (!ring->descbase) 325 return -ENOMEM; 326 327 return 0; 328 } 329 330 static void free_ringmemory(struct b43legacy_dmaring *ring) 331 { 332 dma_free_coherent(ring->dev->dev->dma_dev, B43legacy_DMA_RINGMEMSIZE, 333 ring->descbase, ring->dmabase); 334 } 335 336 /* Reset the RX DMA channel */ 337 static int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev, 338 u16 mmio_base, 339 enum b43legacy_dmatype type) 340 { 341 int i; 342 u32 value; 343 u16 offset; 344 345 might_sleep(); 346 347 offset = B43legacy_DMA32_RXCTL; 348 b43legacy_write32(dev, mmio_base + offset, 0); 349 for (i = 0; i < 10; i++) { 350 offset = B43legacy_DMA32_RXSTATUS; 351 value = b43legacy_read32(dev, mmio_base + offset); 352 value &= B43legacy_DMA32_RXSTATE; 353 if (value == B43legacy_DMA32_RXSTAT_DISABLED) { 354 i = -1; 355 break; 356 } 357 msleep(1); 358 } 359 if (i != -1) { 360 b43legacyerr(dev->wl, "DMA RX reset timed out\n"); 361 return -ENODEV; 362 } 363 364 return 0; 365 } 366 367 /* Reset the RX DMA channel */ 368 static int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev, 369 u16 mmio_base, 370 enum b43legacy_dmatype type) 371 { 372 int i; 373 u32 value; 374 u16 offset; 375 376 might_sleep(); 377 378 for (i = 0; i < 10; i++) { 379 offset = B43legacy_DMA32_TXSTATUS; 380 value = b43legacy_read32(dev, mmio_base + offset); 381 value &= B43legacy_DMA32_TXSTATE; 382 if (value == B43legacy_DMA32_TXSTAT_DISABLED || 383 value == B43legacy_DMA32_TXSTAT_IDLEWAIT || 384 value == B43legacy_DMA32_TXSTAT_STOPPED) 385 break; 386 msleep(1); 387 } 388 offset = B43legacy_DMA32_TXCTL; 389 b43legacy_write32(dev, mmio_base + offset, 0); 390 for (i = 0; i < 10; i++) { 391 offset = B43legacy_DMA32_TXSTATUS; 392 value = b43legacy_read32(dev, mmio_base + offset); 393 value &= B43legacy_DMA32_TXSTATE; 394 if (value == B43legacy_DMA32_TXSTAT_DISABLED) { 395 i = -1; 396 break; 397 } 398 msleep(1); 399 } 400 if (i != -1) { 401 b43legacyerr(dev->wl, "DMA TX reset timed out\n"); 402 return -ENODEV; 403 } 404 /* ensure the reset is completed. */ 405 msleep(1); 406 407 return 0; 408 } 409 410 /* Check if a DMA mapping address is invalid. */ 411 static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring, 412 dma_addr_t addr, 413 size_t buffersize, 414 bool dma_to_device) 415 { 416 if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr))) 417 return true; 418 419 switch (ring->type) { 420 case B43legacy_DMA_30BIT: 421 if ((u64)addr + buffersize > (1ULL << 30)) 422 goto address_error; 423 break; 424 case B43legacy_DMA_32BIT: 425 if ((u64)addr + buffersize > (1ULL << 32)) 426 goto address_error; 427 break; 428 } 429 430 /* The address is OK. */ 431 return false; 432 433 address_error: 434 /* We can't support this address. Unmap it again. */ 435 unmap_descbuffer(ring, addr, buffersize, dma_to_device); 436 437 return true; 438 } 439 440 static int setup_rx_descbuffer(struct b43legacy_dmaring *ring, 441 struct b43legacy_dmadesc32 *desc, 442 struct b43legacy_dmadesc_meta *meta, 443 gfp_t gfp_flags) 444 { 445 struct b43legacy_rxhdr_fw3 *rxhdr; 446 struct b43legacy_hwtxstatus *txstat; 447 dma_addr_t dmaaddr; 448 struct sk_buff *skb; 449 450 B43legacy_WARN_ON(ring->tx); 451 452 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); 453 if (unlikely(!skb)) 454 return -ENOMEM; 455 dmaaddr = map_descbuffer(ring, skb->data, 456 ring->rx_buffersize, 0); 457 if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { 458 /* ugh. try to realloc in zone_dma */ 459 gfp_flags |= GFP_DMA; 460 461 dev_kfree_skb_any(skb); 462 463 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); 464 if (unlikely(!skb)) 465 return -ENOMEM; 466 dmaaddr = map_descbuffer(ring, skb->data, 467 ring->rx_buffersize, 0); 468 } 469 470 if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { 471 dev_kfree_skb_any(skb); 472 return -EIO; 473 } 474 475 meta->skb = skb; 476 meta->dmaaddr = dmaaddr; 477 op32_fill_descriptor(ring, desc, dmaaddr, ring->rx_buffersize, 0, 0, 0); 478 479 rxhdr = (struct b43legacy_rxhdr_fw3 *)(skb->data); 480 rxhdr->frame_len = 0; 481 txstat = (struct b43legacy_hwtxstatus *)(skb->data); 482 txstat->cookie = 0; 483 484 return 0; 485 } 486 487 /* Allocate the initial descbuffers. 488 * This is used for an RX ring only. 489 */ 490 static int alloc_initial_descbuffers(struct b43legacy_dmaring *ring) 491 { 492 int i; 493 int err = -ENOMEM; 494 struct b43legacy_dmadesc32 *desc; 495 struct b43legacy_dmadesc_meta *meta; 496 497 for (i = 0; i < ring->nr_slots; i++) { 498 desc = op32_idx2desc(ring, i, &meta); 499 500 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL); 501 if (err) { 502 b43legacyerr(ring->dev->wl, 503 "Failed to allocate initial descbuffers\n"); 504 goto err_unwind; 505 } 506 } 507 mb(); /* all descbuffer setup before next line */ 508 ring->used_slots = ring->nr_slots; 509 err = 0; 510 out: 511 return err; 512 513 err_unwind: 514 for (i--; i >= 0; i--) { 515 desc = op32_idx2desc(ring, i, &meta); 516 517 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); 518 dev_kfree_skb(meta->skb); 519 } 520 goto out; 521 } 522 523 /* Do initial setup of the DMA controller. 524 * Reset the controller, write the ring busaddress 525 * and switch the "enable" bit on. 526 */ 527 static int dmacontroller_setup(struct b43legacy_dmaring *ring) 528 { 529 int err = 0; 530 u32 value; 531 u32 addrext; 532 u32 trans = ring->dev->dma.translation; 533 u32 ringbase = (u32)(ring->dmabase); 534 535 if (ring->tx) { 536 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) 537 >> SSB_DMA_TRANSLATION_SHIFT; 538 value = B43legacy_DMA32_TXENABLE; 539 value |= (addrext << B43legacy_DMA32_TXADDREXT_SHIFT) 540 & B43legacy_DMA32_TXADDREXT_MASK; 541 b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, value); 542 b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 543 (ringbase & ~SSB_DMA_TRANSLATION_MASK) 544 | trans); 545 } else { 546 err = alloc_initial_descbuffers(ring); 547 if (err) 548 goto out; 549 550 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) 551 >> SSB_DMA_TRANSLATION_SHIFT; 552 value = (ring->frameoffset << 553 B43legacy_DMA32_RXFROFF_SHIFT); 554 value |= B43legacy_DMA32_RXENABLE; 555 value |= (addrext << B43legacy_DMA32_RXADDREXT_SHIFT) 556 & B43legacy_DMA32_RXADDREXT_MASK; 557 b43legacy_dma_write(ring, B43legacy_DMA32_RXCTL, value); 558 b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, 559 (ringbase & ~SSB_DMA_TRANSLATION_MASK) 560 | trans); 561 b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX, 200); 562 } 563 564 out: 565 return err; 566 } 567 568 /* Shutdown the DMA controller. */ 569 static void dmacontroller_cleanup(struct b43legacy_dmaring *ring) 570 { 571 if (ring->tx) { 572 b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base, 573 ring->type); 574 b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 0); 575 } else { 576 b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base, 577 ring->type); 578 b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, 0); 579 } 580 } 581 582 static void free_all_descbuffers(struct b43legacy_dmaring *ring) 583 { 584 struct b43legacy_dmadesc_meta *meta; 585 int i; 586 587 if (!ring->used_slots) 588 return; 589 for (i = 0; i < ring->nr_slots; i++) { 590 op32_idx2desc(ring, i, &meta); 591 592 if (!meta->skb) { 593 B43legacy_WARN_ON(!ring->tx); 594 continue; 595 } 596 if (ring->tx) 597 unmap_descbuffer(ring, meta->dmaaddr, 598 meta->skb->len, 1); 599 else 600 unmap_descbuffer(ring, meta->dmaaddr, 601 ring->rx_buffersize, 0); 602 free_descriptor_buffer(ring, meta, 0); 603 } 604 } 605 606 static u64 supported_dma_mask(struct b43legacy_wldev *dev) 607 { 608 u32 tmp; 609 u16 mmio_base; 610 611 mmio_base = b43legacy_dmacontroller_base(0, 0); 612 b43legacy_write32(dev, 613 mmio_base + B43legacy_DMA32_TXCTL, 614 B43legacy_DMA32_TXADDREXT_MASK); 615 tmp = b43legacy_read32(dev, mmio_base + 616 B43legacy_DMA32_TXCTL); 617 if (tmp & B43legacy_DMA32_TXADDREXT_MASK) 618 return DMA_BIT_MASK(32); 619 620 return DMA_BIT_MASK(30); 621 } 622 623 static enum b43legacy_dmatype dma_mask_to_engine_type(u64 dmamask) 624 { 625 if (dmamask == DMA_BIT_MASK(30)) 626 return B43legacy_DMA_30BIT; 627 if (dmamask == DMA_BIT_MASK(32)) 628 return B43legacy_DMA_32BIT; 629 B43legacy_WARN_ON(1); 630 return B43legacy_DMA_30BIT; 631 } 632 633 /* Main initialization function. */ 634 static 635 struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev, 636 int controller_index, 637 int for_tx, 638 enum b43legacy_dmatype type) 639 { 640 struct b43legacy_dmaring *ring; 641 int err; 642 int nr_slots; 643 dma_addr_t dma_test; 644 645 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 646 if (!ring) 647 goto out; 648 ring->type = type; 649 ring->dev = dev; 650 651 nr_slots = B43legacy_RXRING_SLOTS; 652 if (for_tx) 653 nr_slots = B43legacy_TXRING_SLOTS; 654 655 ring->meta = kcalloc(nr_slots, sizeof(struct b43legacy_dmadesc_meta), 656 GFP_KERNEL); 657 if (!ring->meta) 658 goto err_kfree_ring; 659 if (for_tx) { 660 ring->txhdr_cache = kcalloc(nr_slots, 661 sizeof(struct b43legacy_txhdr_fw3), 662 GFP_KERNEL); 663 if (!ring->txhdr_cache) 664 goto err_kfree_meta; 665 666 /* test for ability to dma to txhdr_cache */ 667 dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache, 668 sizeof(struct b43legacy_txhdr_fw3), 669 DMA_TO_DEVICE); 670 671 if (b43legacy_dma_mapping_error(ring, dma_test, 672 sizeof(struct b43legacy_txhdr_fw3), 1)) { 673 /* ugh realloc */ 674 kfree(ring->txhdr_cache); 675 ring->txhdr_cache = kcalloc(nr_slots, 676 sizeof(struct b43legacy_txhdr_fw3), 677 GFP_KERNEL | GFP_DMA); 678 if (!ring->txhdr_cache) 679 goto err_kfree_meta; 680 681 dma_test = dma_map_single(dev->dev->dma_dev, 682 ring->txhdr_cache, 683 sizeof(struct b43legacy_txhdr_fw3), 684 DMA_TO_DEVICE); 685 686 if (b43legacy_dma_mapping_error(ring, dma_test, 687 sizeof(struct b43legacy_txhdr_fw3), 1)) 688 goto err_kfree_txhdr_cache; 689 } 690 691 dma_unmap_single(dev->dev->dma_dev, dma_test, 692 sizeof(struct b43legacy_txhdr_fw3), 693 DMA_TO_DEVICE); 694 } 695 696 ring->nr_slots = nr_slots; 697 ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index); 698 ring->index = controller_index; 699 if (for_tx) { 700 ring->tx = true; 701 ring->current_slot = -1; 702 } else { 703 if (ring->index == 0) { 704 ring->rx_buffersize = B43legacy_DMA0_RX_BUFFERSIZE; 705 ring->frameoffset = B43legacy_DMA0_RX_FRAMEOFFSET; 706 } else if (ring->index == 3) { 707 ring->rx_buffersize = B43legacy_DMA3_RX_BUFFERSIZE; 708 ring->frameoffset = B43legacy_DMA3_RX_FRAMEOFFSET; 709 } else 710 B43legacy_WARN_ON(1); 711 } 712 #ifdef CONFIG_B43LEGACY_DEBUG 713 ring->last_injected_overflow = jiffies; 714 #endif 715 716 err = alloc_ringmemory(ring); 717 if (err) 718 goto err_kfree_txhdr_cache; 719 err = dmacontroller_setup(ring); 720 if (err) 721 goto err_free_ringmemory; 722 723 out: 724 return ring; 725 726 err_free_ringmemory: 727 free_ringmemory(ring); 728 err_kfree_txhdr_cache: 729 kfree(ring->txhdr_cache); 730 err_kfree_meta: 731 kfree(ring->meta); 732 err_kfree_ring: 733 kfree(ring); 734 ring = NULL; 735 goto out; 736 } 737 738 /* Main cleanup function. */ 739 static void b43legacy_destroy_dmaring(struct b43legacy_dmaring *ring) 740 { 741 if (!ring) 742 return; 743 744 b43legacydbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots:" 745 " %d/%d\n", (unsigned int)(ring->type), ring->mmio_base, 746 (ring->tx) ? "TX" : "RX", ring->max_used_slots, 747 ring->nr_slots); 748 /* Device IRQs are disabled prior entering this function, 749 * so no need to take care of concurrency with rx handler stuff. 750 */ 751 dmacontroller_cleanup(ring); 752 free_all_descbuffers(ring); 753 free_ringmemory(ring); 754 755 kfree(ring->txhdr_cache); 756 kfree(ring->meta); 757 kfree(ring); 758 } 759 760 void b43legacy_dma_free(struct b43legacy_wldev *dev) 761 { 762 struct b43legacy_dma *dma; 763 764 if (b43legacy_using_pio(dev)) 765 return; 766 dma = &dev->dma; 767 768 b43legacy_destroy_dmaring(dma->rx_ring3); 769 dma->rx_ring3 = NULL; 770 b43legacy_destroy_dmaring(dma->rx_ring0); 771 dma->rx_ring0 = NULL; 772 773 b43legacy_destroy_dmaring(dma->tx_ring5); 774 dma->tx_ring5 = NULL; 775 b43legacy_destroy_dmaring(dma->tx_ring4); 776 dma->tx_ring4 = NULL; 777 b43legacy_destroy_dmaring(dma->tx_ring3); 778 dma->tx_ring3 = NULL; 779 b43legacy_destroy_dmaring(dma->tx_ring2); 780 dma->tx_ring2 = NULL; 781 b43legacy_destroy_dmaring(dma->tx_ring1); 782 dma->tx_ring1 = NULL; 783 b43legacy_destroy_dmaring(dma->tx_ring0); 784 dma->tx_ring0 = NULL; 785 } 786 787 static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask) 788 { 789 u64 orig_mask = mask; 790 bool fallback = false; 791 int err; 792 793 /* Try to set the DMA mask. If it fails, try falling back to a 794 * lower mask, as we can always also support a lower one. */ 795 while (1) { 796 err = dma_set_mask_and_coherent(dev->dev->dma_dev, mask); 797 if (!err) 798 break; 799 if (mask == DMA_BIT_MASK(64)) { 800 mask = DMA_BIT_MASK(32); 801 fallback = true; 802 continue; 803 } 804 if (mask == DMA_BIT_MASK(32)) { 805 mask = DMA_BIT_MASK(30); 806 fallback = true; 807 continue; 808 } 809 b43legacyerr(dev->wl, "The machine/kernel does not support " 810 "the required %u-bit DMA mask\n", 811 (unsigned int)dma_mask_to_engine_type(orig_mask)); 812 return -EOPNOTSUPP; 813 } 814 if (fallback) { 815 b43legacyinfo(dev->wl, "DMA mask fallback from %u-bit to %u-" 816 "bit\n", 817 (unsigned int)dma_mask_to_engine_type(orig_mask), 818 (unsigned int)dma_mask_to_engine_type(mask)); 819 } 820 821 return 0; 822 } 823 824 int b43legacy_dma_init(struct b43legacy_wldev *dev) 825 { 826 struct b43legacy_dma *dma = &dev->dma; 827 struct b43legacy_dmaring *ring; 828 int err; 829 u64 dmamask; 830 enum b43legacy_dmatype type; 831 832 dmamask = supported_dma_mask(dev); 833 type = dma_mask_to_engine_type(dmamask); 834 err = b43legacy_dma_set_mask(dev, dmamask); 835 if (err) { 836 #ifdef CONFIG_B43LEGACY_PIO 837 b43legacywarn(dev->wl, "DMA for this device not supported. " 838 "Falling back to PIO\n"); 839 dev->__using_pio = true; 840 return -EAGAIN; 841 #else 842 b43legacyerr(dev->wl, "DMA for this device not supported and " 843 "no PIO support compiled in\n"); 844 return -EOPNOTSUPP; 845 #endif 846 } 847 dma->translation = ssb_dma_translation(dev->dev); 848 849 err = -ENOMEM; 850 /* setup TX DMA channels. */ 851 ring = b43legacy_setup_dmaring(dev, 0, 1, type); 852 if (!ring) 853 goto out; 854 dma->tx_ring0 = ring; 855 856 ring = b43legacy_setup_dmaring(dev, 1, 1, type); 857 if (!ring) 858 goto err_destroy_tx0; 859 dma->tx_ring1 = ring; 860 861 ring = b43legacy_setup_dmaring(dev, 2, 1, type); 862 if (!ring) 863 goto err_destroy_tx1; 864 dma->tx_ring2 = ring; 865 866 ring = b43legacy_setup_dmaring(dev, 3, 1, type); 867 if (!ring) 868 goto err_destroy_tx2; 869 dma->tx_ring3 = ring; 870 871 ring = b43legacy_setup_dmaring(dev, 4, 1, type); 872 if (!ring) 873 goto err_destroy_tx3; 874 dma->tx_ring4 = ring; 875 876 ring = b43legacy_setup_dmaring(dev, 5, 1, type); 877 if (!ring) 878 goto err_destroy_tx4; 879 dma->tx_ring5 = ring; 880 881 /* setup RX DMA channels. */ 882 ring = b43legacy_setup_dmaring(dev, 0, 0, type); 883 if (!ring) 884 goto err_destroy_tx5; 885 dma->rx_ring0 = ring; 886 887 if (dev->dev->id.revision < 5) { 888 ring = b43legacy_setup_dmaring(dev, 3, 0, type); 889 if (!ring) 890 goto err_destroy_rx0; 891 dma->rx_ring3 = ring; 892 } 893 894 b43legacydbg(dev->wl, "%u-bit DMA initialized\n", (unsigned int)type); 895 err = 0; 896 out: 897 return err; 898 899 err_destroy_rx0: 900 b43legacy_destroy_dmaring(dma->rx_ring0); 901 dma->rx_ring0 = NULL; 902 err_destroy_tx5: 903 b43legacy_destroy_dmaring(dma->tx_ring5); 904 dma->tx_ring5 = NULL; 905 err_destroy_tx4: 906 b43legacy_destroy_dmaring(dma->tx_ring4); 907 dma->tx_ring4 = NULL; 908 err_destroy_tx3: 909 b43legacy_destroy_dmaring(dma->tx_ring3); 910 dma->tx_ring3 = NULL; 911 err_destroy_tx2: 912 b43legacy_destroy_dmaring(dma->tx_ring2); 913 dma->tx_ring2 = NULL; 914 err_destroy_tx1: 915 b43legacy_destroy_dmaring(dma->tx_ring1); 916 dma->tx_ring1 = NULL; 917 err_destroy_tx0: 918 b43legacy_destroy_dmaring(dma->tx_ring0); 919 dma->tx_ring0 = NULL; 920 goto out; 921 } 922 923 /* Generate a cookie for the TX header. */ 924 static u16 generate_cookie(struct b43legacy_dmaring *ring, 925 int slot) 926 { 927 u16 cookie = 0x1000; 928 929 /* Use the upper 4 bits of the cookie as 930 * DMA controller ID and store the slot number 931 * in the lower 12 bits. 932 * Note that the cookie must never be 0, as this 933 * is a special value used in RX path. 934 */ 935 switch (ring->index) { 936 case 0: 937 cookie = 0xA000; 938 break; 939 case 1: 940 cookie = 0xB000; 941 break; 942 case 2: 943 cookie = 0xC000; 944 break; 945 case 3: 946 cookie = 0xD000; 947 break; 948 case 4: 949 cookie = 0xE000; 950 break; 951 case 5: 952 cookie = 0xF000; 953 break; 954 } 955 B43legacy_WARN_ON(!(((u16)slot & 0xF000) == 0x0000)); 956 cookie |= (u16)slot; 957 958 return cookie; 959 } 960 961 /* Inspect a cookie and find out to which controller/slot it belongs. */ 962 static 963 struct b43legacy_dmaring *parse_cookie(struct b43legacy_wldev *dev, 964 u16 cookie, int *slot) 965 { 966 struct b43legacy_dma *dma = &dev->dma; 967 struct b43legacy_dmaring *ring = NULL; 968 969 switch (cookie & 0xF000) { 970 case 0xA000: 971 ring = dma->tx_ring0; 972 break; 973 case 0xB000: 974 ring = dma->tx_ring1; 975 break; 976 case 0xC000: 977 ring = dma->tx_ring2; 978 break; 979 case 0xD000: 980 ring = dma->tx_ring3; 981 break; 982 case 0xE000: 983 ring = dma->tx_ring4; 984 break; 985 case 0xF000: 986 ring = dma->tx_ring5; 987 break; 988 default: 989 B43legacy_WARN_ON(1); 990 } 991 *slot = (cookie & 0x0FFF); 992 B43legacy_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots)); 993 994 return ring; 995 } 996 997 static int dma_tx_fragment(struct b43legacy_dmaring *ring, 998 struct sk_buff **in_skb) 999 { 1000 struct sk_buff *skb = *in_skb; 1001 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1002 u8 *header; 1003 int slot, old_top_slot, old_used_slots; 1004 int err; 1005 struct b43legacy_dmadesc32 *desc; 1006 struct b43legacy_dmadesc_meta *meta; 1007 struct b43legacy_dmadesc_meta *meta_hdr; 1008 struct sk_buff *bounce_skb; 1009 1010 #define SLOTS_PER_PACKET 2 1011 B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0); 1012 1013 old_top_slot = ring->current_slot; 1014 old_used_slots = ring->used_slots; 1015 1016 /* Get a slot for the header. */ 1017 slot = request_slot(ring); 1018 desc = op32_idx2desc(ring, slot, &meta_hdr); 1019 memset(meta_hdr, 0, sizeof(*meta_hdr)); 1020 1021 header = &(ring->txhdr_cache[slot * sizeof( 1022 struct b43legacy_txhdr_fw3)]); 1023 err = b43legacy_generate_txhdr(ring->dev, header, 1024 skb->data, skb->len, info, 1025 generate_cookie(ring, slot)); 1026 if (unlikely(err)) { 1027 ring->current_slot = old_top_slot; 1028 ring->used_slots = old_used_slots; 1029 return err; 1030 } 1031 1032 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, 1033 sizeof(struct b43legacy_txhdr_fw3), 1); 1034 if (b43legacy_dma_mapping_error(ring, meta_hdr->dmaaddr, 1035 sizeof(struct b43legacy_txhdr_fw3), 1)) { 1036 ring->current_slot = old_top_slot; 1037 ring->used_slots = old_used_slots; 1038 return -EIO; 1039 } 1040 op32_fill_descriptor(ring, desc, meta_hdr->dmaaddr, 1041 sizeof(struct b43legacy_txhdr_fw3), 1, 0, 0); 1042 1043 /* Get a slot for the payload. */ 1044 slot = request_slot(ring); 1045 desc = op32_idx2desc(ring, slot, &meta); 1046 memset(meta, 0, sizeof(*meta)); 1047 1048 meta->skb = skb; 1049 meta->is_last_fragment = true; 1050 1051 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); 1052 /* create a bounce buffer in zone_dma on mapping failure. */ 1053 if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { 1054 bounce_skb = alloc_skb(skb->len, GFP_KERNEL | GFP_DMA); 1055 if (!bounce_skb) { 1056 ring->current_slot = old_top_slot; 1057 ring->used_slots = old_used_slots; 1058 err = -ENOMEM; 1059 goto out_unmap_hdr; 1060 } 1061 1062 skb_put_data(bounce_skb, skb->data, skb->len); 1063 memcpy(bounce_skb->cb, skb->cb, sizeof(skb->cb)); 1064 bounce_skb->dev = skb->dev; 1065 skb_set_queue_mapping(bounce_skb, skb_get_queue_mapping(skb)); 1066 info = IEEE80211_SKB_CB(bounce_skb); 1067 1068 dev_kfree_skb_any(skb); 1069 skb = bounce_skb; 1070 *in_skb = bounce_skb; 1071 meta->skb = skb; 1072 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); 1073 if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { 1074 ring->current_slot = old_top_slot; 1075 ring->used_slots = old_used_slots; 1076 err = -EIO; 1077 goto out_free_bounce; 1078 } 1079 } 1080 1081 op32_fill_descriptor(ring, desc, meta->dmaaddr, 1082 skb->len, 0, 1, 1); 1083 1084 wmb(); /* previous stuff MUST be done */ 1085 /* Now transfer the whole frame. */ 1086 op32_poke_tx(ring, next_slot(ring, slot)); 1087 return 0; 1088 1089 out_free_bounce: 1090 dev_kfree_skb_any(skb); 1091 out_unmap_hdr: 1092 unmap_descbuffer(ring, meta_hdr->dmaaddr, 1093 sizeof(struct b43legacy_txhdr_fw3), 1); 1094 return err; 1095 } 1096 1097 static inline 1098 int should_inject_overflow(struct b43legacy_dmaring *ring) 1099 { 1100 #ifdef CONFIG_B43LEGACY_DEBUG 1101 if (unlikely(b43legacy_debug(ring->dev, 1102 B43legacy_DBG_DMAOVERFLOW))) { 1103 /* Check if we should inject another ringbuffer overflow 1104 * to test handling of this situation in the stack. */ 1105 unsigned long next_overflow; 1106 1107 next_overflow = ring->last_injected_overflow + HZ; 1108 if (time_after(jiffies, next_overflow)) { 1109 ring->last_injected_overflow = jiffies; 1110 b43legacydbg(ring->dev->wl, 1111 "Injecting TX ring overflow on " 1112 "DMA controller %d\n", ring->index); 1113 return 1; 1114 } 1115 } 1116 #endif /* CONFIG_B43LEGACY_DEBUG */ 1117 return 0; 1118 } 1119 1120 int b43legacy_dma_tx(struct b43legacy_wldev *dev, 1121 struct sk_buff *skb) 1122 { 1123 struct b43legacy_dmaring *ring; 1124 int err = 0; 1125 1126 ring = priority_to_txring(dev, skb_get_queue_mapping(skb)); 1127 B43legacy_WARN_ON(!ring->tx); 1128 1129 if (unlikely(ring->stopped)) { 1130 /* We get here only because of a bug in mac80211. 1131 * Because of a race, one packet may be queued after 1132 * the queue is stopped, thus we got called when we shouldn't. 1133 * For now, just refuse the transmit. */ 1134 if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE)) 1135 b43legacyerr(dev->wl, "Packet after queue stopped\n"); 1136 return -ENOSPC; 1137 } 1138 1139 if (WARN_ON(free_slots(ring) < SLOTS_PER_PACKET)) { 1140 /* If we get here, we have a real error with the queue 1141 * full, but queues not stopped. */ 1142 b43legacyerr(dev->wl, "DMA queue overflow\n"); 1143 return -ENOSPC; 1144 } 1145 1146 /* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing 1147 * into the skb data or cb now. */ 1148 err = dma_tx_fragment(ring, &skb); 1149 if (unlikely(err == -ENOKEY)) { 1150 /* Drop this packet, as we don't have the encryption key 1151 * anymore and must not transmit it unencrypted. */ 1152 dev_kfree_skb_any(skb); 1153 return 0; 1154 } 1155 if (unlikely(err)) { 1156 b43legacyerr(dev->wl, "DMA tx mapping failure\n"); 1157 return err; 1158 } 1159 if ((free_slots(ring) < SLOTS_PER_PACKET) || 1160 should_inject_overflow(ring)) { 1161 /* This TX ring is full. */ 1162 unsigned int skb_mapping = skb_get_queue_mapping(skb); 1163 ieee80211_stop_queue(dev->wl->hw, skb_mapping); 1164 dev->wl->tx_queue_stopped[skb_mapping] = 1; 1165 ring->stopped = true; 1166 if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE)) 1167 b43legacydbg(dev->wl, "Stopped TX ring %d\n", 1168 ring->index); 1169 } 1170 return err; 1171 } 1172 1173 void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev, 1174 const struct b43legacy_txstatus *status) 1175 { 1176 struct b43legacy_dmaring *ring; 1177 struct b43legacy_dmadesc_meta *meta; 1178 int retry_limit; 1179 int slot; 1180 int firstused; 1181 1182 ring = parse_cookie(dev, status->cookie, &slot); 1183 if (unlikely(!ring)) 1184 return; 1185 B43legacy_WARN_ON(!ring->tx); 1186 1187 /* Sanity check: TX packets are processed in-order on one ring. 1188 * Check if the slot deduced from the cookie really is the first 1189 * used slot. */ 1190 firstused = ring->current_slot - ring->used_slots + 1; 1191 if (firstused < 0) 1192 firstused = ring->nr_slots + firstused; 1193 if (unlikely(slot != firstused)) { 1194 /* This possibly is a firmware bug and will result in 1195 * malfunction, memory leaks and/or stall of DMA functionality. 1196 */ 1197 b43legacydbg(dev->wl, "Out of order TX status report on DMA " 1198 "ring %d. Expected %d, but got %d\n", 1199 ring->index, firstused, slot); 1200 return; 1201 } 1202 1203 while (1) { 1204 B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); 1205 op32_idx2desc(ring, slot, &meta); 1206 1207 if (meta->skb) 1208 unmap_descbuffer(ring, meta->dmaaddr, 1209 meta->skb->len, 1); 1210 else 1211 unmap_descbuffer(ring, meta->dmaaddr, 1212 sizeof(struct b43legacy_txhdr_fw3), 1213 1); 1214 1215 if (meta->is_last_fragment) { 1216 struct ieee80211_tx_info *info; 1217 BUG_ON(!meta->skb); 1218 info = IEEE80211_SKB_CB(meta->skb); 1219 1220 /* preserve the confiured retry limit before clearing the status 1221 * The xmit function has overwritten the rc's value with the actual 1222 * retry limit done by the hardware */ 1223 retry_limit = info->status.rates[0].count; 1224 ieee80211_tx_info_clear_status(info); 1225 1226 if (status->acked) 1227 info->flags |= IEEE80211_TX_STAT_ACK; 1228 1229 if (status->rts_count > dev->wl->hw->conf.short_frame_max_tx_count) { 1230 /* 1231 * If the short retries (RTS, not data frame) have exceeded 1232 * the limit, the hw will not have tried the selected rate, 1233 * but will have used the fallback rate instead. 1234 * Don't let the rate control count attempts for the selected 1235 * rate in this case, otherwise the statistics will be off. 1236 */ 1237 info->status.rates[0].count = 0; 1238 info->status.rates[1].count = status->frame_count; 1239 } else { 1240 if (status->frame_count > retry_limit) { 1241 info->status.rates[0].count = retry_limit; 1242 info->status.rates[1].count = status->frame_count - 1243 retry_limit; 1244 1245 } else { 1246 info->status.rates[0].count = status->frame_count; 1247 info->status.rates[1].idx = -1; 1248 } 1249 } 1250 1251 /* Call back to inform the ieee80211 subsystem about the 1252 * status of the transmission. 1253 * Some fields of txstat are already filled in dma_tx(). 1254 */ 1255 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb); 1256 /* skb is freed by ieee80211_tx_status_irqsafe() */ 1257 meta->skb = NULL; 1258 } else { 1259 /* No need to call free_descriptor_buffer here, as 1260 * this is only the txhdr, which is not allocated. 1261 */ 1262 B43legacy_WARN_ON(meta->skb != NULL); 1263 } 1264 1265 /* Everything unmapped and free'd. So it's not used anymore. */ 1266 ring->used_slots--; 1267 1268 if (meta->is_last_fragment) 1269 break; 1270 slot = next_slot(ring, slot); 1271 } 1272 dev->stats.last_tx = jiffies; 1273 if (ring->stopped) { 1274 B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET); 1275 ring->stopped = false; 1276 } 1277 1278 if (dev->wl->tx_queue_stopped[ring->queue_prio]) { 1279 dev->wl->tx_queue_stopped[ring->queue_prio] = 0; 1280 } else { 1281 /* If the driver queue is running wake the corresponding 1282 * mac80211 queue. */ 1283 ieee80211_wake_queue(dev->wl->hw, ring->queue_prio); 1284 if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE)) 1285 b43legacydbg(dev->wl, "Woke up TX ring %d\n", 1286 ring->index); 1287 } 1288 /* Add work to the queue. */ 1289 ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work); 1290 } 1291 1292 static void dma_rx(struct b43legacy_dmaring *ring, 1293 int *slot) 1294 { 1295 struct b43legacy_dmadesc32 *desc; 1296 struct b43legacy_dmadesc_meta *meta; 1297 struct b43legacy_rxhdr_fw3 *rxhdr; 1298 struct sk_buff *skb; 1299 u16 len; 1300 int err; 1301 dma_addr_t dmaaddr; 1302 1303 desc = op32_idx2desc(ring, *slot, &meta); 1304 1305 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); 1306 skb = meta->skb; 1307 1308 if (ring->index == 3) { 1309 /* We received an xmit status. */ 1310 struct b43legacy_hwtxstatus *hw = 1311 (struct b43legacy_hwtxstatus *)skb->data; 1312 int i = 0; 1313 1314 while (hw->cookie == 0) { 1315 if (i > 100) 1316 break; 1317 i++; 1318 udelay(2); 1319 barrier(); 1320 } 1321 b43legacy_handle_hwtxstatus(ring->dev, hw); 1322 /* recycle the descriptor buffer. */ 1323 sync_descbuffer_for_device(ring, meta->dmaaddr, 1324 ring->rx_buffersize); 1325 1326 return; 1327 } 1328 rxhdr = (struct b43legacy_rxhdr_fw3 *)skb->data; 1329 len = le16_to_cpu(rxhdr->frame_len); 1330 if (len == 0) { 1331 int i = 0; 1332 1333 do { 1334 udelay(2); 1335 barrier(); 1336 len = le16_to_cpu(rxhdr->frame_len); 1337 } while (len == 0 && i++ < 5); 1338 if (unlikely(len == 0)) { 1339 /* recycle the descriptor buffer. */ 1340 sync_descbuffer_for_device(ring, meta->dmaaddr, 1341 ring->rx_buffersize); 1342 goto drop; 1343 } 1344 } 1345 if (unlikely(len > ring->rx_buffersize)) { 1346 /* The data did not fit into one descriptor buffer 1347 * and is split over multiple buffers. 1348 * This should never happen, as we try to allocate buffers 1349 * big enough. So simply ignore this packet. 1350 */ 1351 int cnt = 0; 1352 s32 tmp = len; 1353 1354 while (1) { 1355 desc = op32_idx2desc(ring, *slot, &meta); 1356 /* recycle the descriptor buffer. */ 1357 sync_descbuffer_for_device(ring, meta->dmaaddr, 1358 ring->rx_buffersize); 1359 *slot = next_slot(ring, *slot); 1360 cnt++; 1361 tmp -= ring->rx_buffersize; 1362 if (tmp <= 0) 1363 break; 1364 } 1365 b43legacyerr(ring->dev->wl, "DMA RX buffer too small " 1366 "(len: %u, buffer: %u, nr-dropped: %d)\n", 1367 len, ring->rx_buffersize, cnt); 1368 goto drop; 1369 } 1370 1371 dmaaddr = meta->dmaaddr; 1372 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC); 1373 if (unlikely(err)) { 1374 b43legacydbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer()" 1375 " failed\n"); 1376 sync_descbuffer_for_device(ring, dmaaddr, 1377 ring->rx_buffersize); 1378 goto drop; 1379 } 1380 1381 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0); 1382 skb_put(skb, len + ring->frameoffset); 1383 skb_pull(skb, ring->frameoffset); 1384 1385 b43legacy_rx(ring->dev, skb, rxhdr); 1386 drop: 1387 return; 1388 } 1389 1390 void b43legacy_dma_rx(struct b43legacy_dmaring *ring) 1391 { 1392 int slot; 1393 int current_slot; 1394 int used_slots = 0; 1395 1396 B43legacy_WARN_ON(ring->tx); 1397 current_slot = op32_get_current_rxslot(ring); 1398 B43legacy_WARN_ON(!(current_slot >= 0 && current_slot < 1399 ring->nr_slots)); 1400 1401 slot = ring->current_slot; 1402 for (; slot != current_slot; slot = next_slot(ring, slot)) { 1403 dma_rx(ring, &slot); 1404 update_max_used_slots(ring, ++used_slots); 1405 } 1406 op32_set_current_rxslot(ring, slot); 1407 ring->current_slot = slot; 1408 } 1409 1410 static void b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring *ring) 1411 { 1412 B43legacy_WARN_ON(!ring->tx); 1413 op32_tx_suspend(ring); 1414 } 1415 1416 static void b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring *ring) 1417 { 1418 B43legacy_WARN_ON(!ring->tx); 1419 op32_tx_resume(ring); 1420 } 1421 1422 void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev) 1423 { 1424 b43legacy_power_saving_ctl_bits(dev, -1, 1); 1425 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring0); 1426 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring1); 1427 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring2); 1428 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring3); 1429 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring4); 1430 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring5); 1431 } 1432 1433 void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev) 1434 { 1435 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring5); 1436 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring4); 1437 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring3); 1438 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring2); 1439 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring1); 1440 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring0); 1441 b43legacy_power_saving_ctl_bits(dev, -1, -1); 1442 } 1443