1 /* 2 3 Broadcom B43legacy wireless driver 4 5 DMA ringbuffer and descriptor allocation/management 6 7 Copyright (c) 2005, 2006 Michael Buesch <m@bues.ch> 8 9 Some code in this file is derived from the b44.c driver 10 Copyright (C) 2002 David S. Miller 11 Copyright (C) Pekka Pietikainen 12 13 This program is free software; you can redistribute it and/or modify 14 it under the terms of the GNU General Public License as published by 15 the Free Software Foundation; either version 2 of the License, or 16 (at your option) any later version. 17 18 This program is distributed in the hope that it will be useful, 19 but WITHOUT ANY WARRANTY; without even the implied warranty of 20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 GNU General Public License for more details. 22 23 You should have received a copy of the GNU General Public License 24 along with this program; see the file COPYING. If not, write to 25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, 26 Boston, MA 02110-1301, USA. 27 28 */ 29 30 #include "b43legacy.h" 31 #include "dma.h" 32 #include "main.h" 33 #include "debugfs.h" 34 #include "xmit.h" 35 36 #include <linux/dma-mapping.h> 37 #include <linux/pci.h> 38 #include <linux/delay.h> 39 #include <linux/skbuff.h> 40 #include <linux/slab.h> 41 #include <net/dst.h> 42 43 /* 32bit DMA ops. */ 44 static 45 struct b43legacy_dmadesc32 *op32_idx2desc(struct b43legacy_dmaring *ring, 46 int slot, 47 struct b43legacy_dmadesc_meta **meta) 48 { 49 struct b43legacy_dmadesc32 *desc; 50 51 *meta = &(ring->meta[slot]); 52 desc = ring->descbase; 53 desc = &(desc[slot]); 54 55 return desc; 56 } 57 58 static void op32_fill_descriptor(struct b43legacy_dmaring *ring, 59 struct b43legacy_dmadesc32 *desc, 60 dma_addr_t dmaaddr, u16 bufsize, 61 int start, int end, int irq) 62 { 63 struct b43legacy_dmadesc32 *descbase = ring->descbase; 64 int slot; 65 u32 ctl; 66 u32 addr; 67 u32 addrext; 68 69 slot = (int)(desc - descbase); 70 B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); 71 72 addr = (u32)(dmaaddr & ~SSB_DMA_TRANSLATION_MASK); 73 addrext = (u32)(dmaaddr & SSB_DMA_TRANSLATION_MASK) 74 >> SSB_DMA_TRANSLATION_SHIFT; 75 addr |= ring->dev->dma.translation; 76 ctl = (bufsize - ring->frameoffset) 77 & B43legacy_DMA32_DCTL_BYTECNT; 78 if (slot == ring->nr_slots - 1) 79 ctl |= B43legacy_DMA32_DCTL_DTABLEEND; 80 if (start) 81 ctl |= B43legacy_DMA32_DCTL_FRAMESTART; 82 if (end) 83 ctl |= B43legacy_DMA32_DCTL_FRAMEEND; 84 if (irq) 85 ctl |= B43legacy_DMA32_DCTL_IRQ; 86 ctl |= (addrext << B43legacy_DMA32_DCTL_ADDREXT_SHIFT) 87 & B43legacy_DMA32_DCTL_ADDREXT_MASK; 88 89 desc->control = cpu_to_le32(ctl); 90 desc->address = cpu_to_le32(addr); 91 } 92 93 static void op32_poke_tx(struct b43legacy_dmaring *ring, int slot) 94 { 95 b43legacy_dma_write(ring, B43legacy_DMA32_TXINDEX, 96 (u32)(slot * sizeof(struct b43legacy_dmadesc32))); 97 } 98 99 static void op32_tx_suspend(struct b43legacy_dmaring *ring) 100 { 101 b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, 102 b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL) 103 | B43legacy_DMA32_TXSUSPEND); 104 } 105 106 static void op32_tx_resume(struct b43legacy_dmaring *ring) 107 { 108 b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, 109 b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL) 110 & ~B43legacy_DMA32_TXSUSPEND); 111 } 112 113 static int op32_get_current_rxslot(struct b43legacy_dmaring *ring) 114 { 115 u32 val; 116 117 val = b43legacy_dma_read(ring, B43legacy_DMA32_RXSTATUS); 118 val &= B43legacy_DMA32_RXDPTR; 119 120 return (val / sizeof(struct b43legacy_dmadesc32)); 121 } 122 123 static void op32_set_current_rxslot(struct b43legacy_dmaring *ring, 124 int slot) 125 { 126 b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX, 127 (u32)(slot * sizeof(struct b43legacy_dmadesc32))); 128 } 129 130 static inline int free_slots(struct b43legacy_dmaring *ring) 131 { 132 return (ring->nr_slots - ring->used_slots); 133 } 134 135 static inline int next_slot(struct b43legacy_dmaring *ring, int slot) 136 { 137 B43legacy_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1)); 138 if (slot == ring->nr_slots - 1) 139 return 0; 140 return slot + 1; 141 } 142 143 static inline int prev_slot(struct b43legacy_dmaring *ring, int slot) 144 { 145 B43legacy_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1)); 146 if (slot == 0) 147 return ring->nr_slots - 1; 148 return slot - 1; 149 } 150 151 #ifdef CONFIG_B43LEGACY_DEBUG 152 static void update_max_used_slots(struct b43legacy_dmaring *ring, 153 int current_used_slots) 154 { 155 if (current_used_slots <= ring->max_used_slots) 156 return; 157 ring->max_used_slots = current_used_slots; 158 if (b43legacy_debug(ring->dev, B43legacy_DBG_DMAVERBOSE)) 159 b43legacydbg(ring->dev->wl, 160 "max_used_slots increased to %d on %s ring %d\n", 161 ring->max_used_slots, 162 ring->tx ? "TX" : "RX", 163 ring->index); 164 } 165 #else 166 static inline 167 void update_max_used_slots(struct b43legacy_dmaring *ring, 168 int current_used_slots) 169 { } 170 #endif /* DEBUG */ 171 172 /* Request a slot for usage. */ 173 static inline 174 int request_slot(struct b43legacy_dmaring *ring) 175 { 176 int slot; 177 178 B43legacy_WARN_ON(!ring->tx); 179 B43legacy_WARN_ON(ring->stopped); 180 B43legacy_WARN_ON(free_slots(ring) == 0); 181 182 slot = next_slot(ring, ring->current_slot); 183 ring->current_slot = slot; 184 ring->used_slots++; 185 186 update_max_used_slots(ring, ring->used_slots); 187 188 return slot; 189 } 190 191 /* Mac80211-queue to b43legacy-ring mapping */ 192 static struct b43legacy_dmaring *priority_to_txring( 193 struct b43legacy_wldev *dev, 194 int queue_priority) 195 { 196 struct b43legacy_dmaring *ring; 197 198 /*FIXME: For now we always run on TX-ring-1 */ 199 return dev->dma.tx_ring1; 200 201 /* 0 = highest priority */ 202 switch (queue_priority) { 203 default: 204 B43legacy_WARN_ON(1); 205 /* fallthrough */ 206 case 0: 207 ring = dev->dma.tx_ring3; 208 break; 209 case 1: 210 ring = dev->dma.tx_ring2; 211 break; 212 case 2: 213 ring = dev->dma.tx_ring1; 214 break; 215 case 3: 216 ring = dev->dma.tx_ring0; 217 break; 218 case 4: 219 ring = dev->dma.tx_ring4; 220 break; 221 case 5: 222 ring = dev->dma.tx_ring5; 223 break; 224 } 225 226 return ring; 227 } 228 229 /* Bcm4301-ring to mac80211-queue mapping */ 230 static inline int txring_to_priority(struct b43legacy_dmaring *ring) 231 { 232 static const u8 idx_to_prio[] = 233 { 3, 2, 1, 0, 4, 5, }; 234 235 /*FIXME: have only one queue, for now */ 236 return 0; 237 238 return idx_to_prio[ring->index]; 239 } 240 241 242 static u16 b43legacy_dmacontroller_base(enum b43legacy_dmatype type, 243 int controller_idx) 244 { 245 static const u16 map32[] = { 246 B43legacy_MMIO_DMA32_BASE0, 247 B43legacy_MMIO_DMA32_BASE1, 248 B43legacy_MMIO_DMA32_BASE2, 249 B43legacy_MMIO_DMA32_BASE3, 250 B43legacy_MMIO_DMA32_BASE4, 251 B43legacy_MMIO_DMA32_BASE5, 252 }; 253 254 B43legacy_WARN_ON(!(controller_idx >= 0 && 255 controller_idx < ARRAY_SIZE(map32))); 256 return map32[controller_idx]; 257 } 258 259 static inline 260 dma_addr_t map_descbuffer(struct b43legacy_dmaring *ring, 261 unsigned char *buf, 262 size_t len, 263 int tx) 264 { 265 dma_addr_t dmaaddr; 266 267 if (tx) 268 dmaaddr = dma_map_single(ring->dev->dev->dma_dev, 269 buf, len, 270 DMA_TO_DEVICE); 271 else 272 dmaaddr = dma_map_single(ring->dev->dev->dma_dev, 273 buf, len, 274 DMA_FROM_DEVICE); 275 276 return dmaaddr; 277 } 278 279 static inline 280 void unmap_descbuffer(struct b43legacy_dmaring *ring, 281 dma_addr_t addr, 282 size_t len, 283 int tx) 284 { 285 if (tx) 286 dma_unmap_single(ring->dev->dev->dma_dev, 287 addr, len, 288 DMA_TO_DEVICE); 289 else 290 dma_unmap_single(ring->dev->dev->dma_dev, 291 addr, len, 292 DMA_FROM_DEVICE); 293 } 294 295 static inline 296 void sync_descbuffer_for_cpu(struct b43legacy_dmaring *ring, 297 dma_addr_t addr, 298 size_t len) 299 { 300 B43legacy_WARN_ON(ring->tx); 301 302 dma_sync_single_for_cpu(ring->dev->dev->dma_dev, 303 addr, len, DMA_FROM_DEVICE); 304 } 305 306 static inline 307 void sync_descbuffer_for_device(struct b43legacy_dmaring *ring, 308 dma_addr_t addr, 309 size_t len) 310 { 311 B43legacy_WARN_ON(ring->tx); 312 313 dma_sync_single_for_device(ring->dev->dev->dma_dev, 314 addr, len, DMA_FROM_DEVICE); 315 } 316 317 static inline 318 void free_descriptor_buffer(struct b43legacy_dmaring *ring, 319 struct b43legacy_dmadesc_meta *meta, 320 int irq_context) 321 { 322 if (meta->skb) { 323 if (irq_context) 324 dev_kfree_skb_irq(meta->skb); 325 else 326 dev_kfree_skb(meta->skb); 327 meta->skb = NULL; 328 } 329 } 330 331 static int alloc_ringmemory(struct b43legacy_dmaring *ring) 332 { 333 /* GFP flags must match the flags in free_ringmemory()! */ 334 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, 335 B43legacy_DMA_RINGMEMSIZE, 336 &(ring->dmabase), GFP_KERNEL); 337 if (!ring->descbase) 338 return -ENOMEM; 339 340 return 0; 341 } 342 343 static void free_ringmemory(struct b43legacy_dmaring *ring) 344 { 345 dma_free_coherent(ring->dev->dev->dma_dev, B43legacy_DMA_RINGMEMSIZE, 346 ring->descbase, ring->dmabase); 347 } 348 349 /* Reset the RX DMA channel */ 350 static int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev, 351 u16 mmio_base, 352 enum b43legacy_dmatype type) 353 { 354 int i; 355 u32 value; 356 u16 offset; 357 358 might_sleep(); 359 360 offset = B43legacy_DMA32_RXCTL; 361 b43legacy_write32(dev, mmio_base + offset, 0); 362 for (i = 0; i < 10; i++) { 363 offset = B43legacy_DMA32_RXSTATUS; 364 value = b43legacy_read32(dev, mmio_base + offset); 365 value &= B43legacy_DMA32_RXSTATE; 366 if (value == B43legacy_DMA32_RXSTAT_DISABLED) { 367 i = -1; 368 break; 369 } 370 msleep(1); 371 } 372 if (i != -1) { 373 b43legacyerr(dev->wl, "DMA RX reset timed out\n"); 374 return -ENODEV; 375 } 376 377 return 0; 378 } 379 380 /* Reset the RX DMA channel */ 381 static int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev, 382 u16 mmio_base, 383 enum b43legacy_dmatype type) 384 { 385 int i; 386 u32 value; 387 u16 offset; 388 389 might_sleep(); 390 391 for (i = 0; i < 10; i++) { 392 offset = B43legacy_DMA32_TXSTATUS; 393 value = b43legacy_read32(dev, mmio_base + offset); 394 value &= B43legacy_DMA32_TXSTATE; 395 if (value == B43legacy_DMA32_TXSTAT_DISABLED || 396 value == B43legacy_DMA32_TXSTAT_IDLEWAIT || 397 value == B43legacy_DMA32_TXSTAT_STOPPED) 398 break; 399 msleep(1); 400 } 401 offset = B43legacy_DMA32_TXCTL; 402 b43legacy_write32(dev, mmio_base + offset, 0); 403 for (i = 0; i < 10; i++) { 404 offset = B43legacy_DMA32_TXSTATUS; 405 value = b43legacy_read32(dev, mmio_base + offset); 406 value &= B43legacy_DMA32_TXSTATE; 407 if (value == B43legacy_DMA32_TXSTAT_DISABLED) { 408 i = -1; 409 break; 410 } 411 msleep(1); 412 } 413 if (i != -1) { 414 b43legacyerr(dev->wl, "DMA TX reset timed out\n"); 415 return -ENODEV; 416 } 417 /* ensure the reset is completed. */ 418 msleep(1); 419 420 return 0; 421 } 422 423 /* Check if a DMA mapping address is invalid. */ 424 static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring, 425 dma_addr_t addr, 426 size_t buffersize, 427 bool dma_to_device) 428 { 429 if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr))) 430 return true; 431 432 switch (ring->type) { 433 case B43legacy_DMA_30BIT: 434 if ((u64)addr + buffersize > (1ULL << 30)) 435 goto address_error; 436 break; 437 case B43legacy_DMA_32BIT: 438 if ((u64)addr + buffersize > (1ULL << 32)) 439 goto address_error; 440 break; 441 } 442 443 /* The address is OK. */ 444 return false; 445 446 address_error: 447 /* We can't support this address. Unmap it again. */ 448 unmap_descbuffer(ring, addr, buffersize, dma_to_device); 449 450 return true; 451 } 452 453 static int setup_rx_descbuffer(struct b43legacy_dmaring *ring, 454 struct b43legacy_dmadesc32 *desc, 455 struct b43legacy_dmadesc_meta *meta, 456 gfp_t gfp_flags) 457 { 458 struct b43legacy_rxhdr_fw3 *rxhdr; 459 struct b43legacy_hwtxstatus *txstat; 460 dma_addr_t dmaaddr; 461 struct sk_buff *skb; 462 463 B43legacy_WARN_ON(ring->tx); 464 465 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); 466 if (unlikely(!skb)) 467 return -ENOMEM; 468 dmaaddr = map_descbuffer(ring, skb->data, 469 ring->rx_buffersize, 0); 470 if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { 471 /* ugh. try to realloc in zone_dma */ 472 gfp_flags |= GFP_DMA; 473 474 dev_kfree_skb_any(skb); 475 476 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); 477 if (unlikely(!skb)) 478 return -ENOMEM; 479 dmaaddr = map_descbuffer(ring, skb->data, 480 ring->rx_buffersize, 0); 481 } 482 483 if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { 484 dev_kfree_skb_any(skb); 485 return -EIO; 486 } 487 488 meta->skb = skb; 489 meta->dmaaddr = dmaaddr; 490 op32_fill_descriptor(ring, desc, dmaaddr, ring->rx_buffersize, 0, 0, 0); 491 492 rxhdr = (struct b43legacy_rxhdr_fw3 *)(skb->data); 493 rxhdr->frame_len = 0; 494 txstat = (struct b43legacy_hwtxstatus *)(skb->data); 495 txstat->cookie = 0; 496 497 return 0; 498 } 499 500 /* Allocate the initial descbuffers. 501 * This is used for an RX ring only. 502 */ 503 static int alloc_initial_descbuffers(struct b43legacy_dmaring *ring) 504 { 505 int i; 506 int err = -ENOMEM; 507 struct b43legacy_dmadesc32 *desc; 508 struct b43legacy_dmadesc_meta *meta; 509 510 for (i = 0; i < ring->nr_slots; i++) { 511 desc = op32_idx2desc(ring, i, &meta); 512 513 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL); 514 if (err) { 515 b43legacyerr(ring->dev->wl, 516 "Failed to allocate initial descbuffers\n"); 517 goto err_unwind; 518 } 519 } 520 mb(); /* all descbuffer setup before next line */ 521 ring->used_slots = ring->nr_slots; 522 err = 0; 523 out: 524 return err; 525 526 err_unwind: 527 for (i--; i >= 0; i--) { 528 desc = op32_idx2desc(ring, i, &meta); 529 530 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); 531 dev_kfree_skb(meta->skb); 532 } 533 goto out; 534 } 535 536 /* Do initial setup of the DMA controller. 537 * Reset the controller, write the ring busaddress 538 * and switch the "enable" bit on. 539 */ 540 static int dmacontroller_setup(struct b43legacy_dmaring *ring) 541 { 542 int err = 0; 543 u32 value; 544 u32 addrext; 545 u32 trans = ring->dev->dma.translation; 546 u32 ringbase = (u32)(ring->dmabase); 547 548 if (ring->tx) { 549 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) 550 >> SSB_DMA_TRANSLATION_SHIFT; 551 value = B43legacy_DMA32_TXENABLE; 552 value |= (addrext << B43legacy_DMA32_TXADDREXT_SHIFT) 553 & B43legacy_DMA32_TXADDREXT_MASK; 554 b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, value); 555 b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 556 (ringbase & ~SSB_DMA_TRANSLATION_MASK) 557 | trans); 558 } else { 559 err = alloc_initial_descbuffers(ring); 560 if (err) 561 goto out; 562 563 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) 564 >> SSB_DMA_TRANSLATION_SHIFT; 565 value = (ring->frameoffset << 566 B43legacy_DMA32_RXFROFF_SHIFT); 567 value |= B43legacy_DMA32_RXENABLE; 568 value |= (addrext << B43legacy_DMA32_RXADDREXT_SHIFT) 569 & B43legacy_DMA32_RXADDREXT_MASK; 570 b43legacy_dma_write(ring, B43legacy_DMA32_RXCTL, value); 571 b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, 572 (ringbase & ~SSB_DMA_TRANSLATION_MASK) 573 | trans); 574 b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX, 200); 575 } 576 577 out: 578 return err; 579 } 580 581 /* Shutdown the DMA controller. */ 582 static void dmacontroller_cleanup(struct b43legacy_dmaring *ring) 583 { 584 if (ring->tx) { 585 b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base, 586 ring->type); 587 b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 0); 588 } else { 589 b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base, 590 ring->type); 591 b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, 0); 592 } 593 } 594 595 static void free_all_descbuffers(struct b43legacy_dmaring *ring) 596 { 597 struct b43legacy_dmadesc_meta *meta; 598 int i; 599 600 if (!ring->used_slots) 601 return; 602 for (i = 0; i < ring->nr_slots; i++) { 603 op32_idx2desc(ring, i, &meta); 604 605 if (!meta->skb) { 606 B43legacy_WARN_ON(!ring->tx); 607 continue; 608 } 609 if (ring->tx) 610 unmap_descbuffer(ring, meta->dmaaddr, 611 meta->skb->len, 1); 612 else 613 unmap_descbuffer(ring, meta->dmaaddr, 614 ring->rx_buffersize, 0); 615 free_descriptor_buffer(ring, meta, 0); 616 } 617 } 618 619 static u64 supported_dma_mask(struct b43legacy_wldev *dev) 620 { 621 u32 tmp; 622 u16 mmio_base; 623 624 mmio_base = b43legacy_dmacontroller_base(0, 0); 625 b43legacy_write32(dev, 626 mmio_base + B43legacy_DMA32_TXCTL, 627 B43legacy_DMA32_TXADDREXT_MASK); 628 tmp = b43legacy_read32(dev, mmio_base + 629 B43legacy_DMA32_TXCTL); 630 if (tmp & B43legacy_DMA32_TXADDREXT_MASK) 631 return DMA_BIT_MASK(32); 632 633 return DMA_BIT_MASK(30); 634 } 635 636 static enum b43legacy_dmatype dma_mask_to_engine_type(u64 dmamask) 637 { 638 if (dmamask == DMA_BIT_MASK(30)) 639 return B43legacy_DMA_30BIT; 640 if (dmamask == DMA_BIT_MASK(32)) 641 return B43legacy_DMA_32BIT; 642 B43legacy_WARN_ON(1); 643 return B43legacy_DMA_30BIT; 644 } 645 646 /* Main initialization function. */ 647 static 648 struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev, 649 int controller_index, 650 int for_tx, 651 enum b43legacy_dmatype type) 652 { 653 struct b43legacy_dmaring *ring; 654 int err; 655 int nr_slots; 656 dma_addr_t dma_test; 657 658 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 659 if (!ring) 660 goto out; 661 ring->type = type; 662 ring->dev = dev; 663 664 nr_slots = B43legacy_RXRING_SLOTS; 665 if (for_tx) 666 nr_slots = B43legacy_TXRING_SLOTS; 667 668 ring->meta = kcalloc(nr_slots, sizeof(struct b43legacy_dmadesc_meta), 669 GFP_KERNEL); 670 if (!ring->meta) 671 goto err_kfree_ring; 672 if (for_tx) { 673 ring->txhdr_cache = kcalloc(nr_slots, 674 sizeof(struct b43legacy_txhdr_fw3), 675 GFP_KERNEL); 676 if (!ring->txhdr_cache) 677 goto err_kfree_meta; 678 679 /* test for ability to dma to txhdr_cache */ 680 dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache, 681 sizeof(struct b43legacy_txhdr_fw3), 682 DMA_TO_DEVICE); 683 684 if (b43legacy_dma_mapping_error(ring, dma_test, 685 sizeof(struct b43legacy_txhdr_fw3), 1)) { 686 /* ugh realloc */ 687 kfree(ring->txhdr_cache); 688 ring->txhdr_cache = kcalloc(nr_slots, 689 sizeof(struct b43legacy_txhdr_fw3), 690 GFP_KERNEL | GFP_DMA); 691 if (!ring->txhdr_cache) 692 goto err_kfree_meta; 693 694 dma_test = dma_map_single(dev->dev->dma_dev, 695 ring->txhdr_cache, 696 sizeof(struct b43legacy_txhdr_fw3), 697 DMA_TO_DEVICE); 698 699 if (b43legacy_dma_mapping_error(ring, dma_test, 700 sizeof(struct b43legacy_txhdr_fw3), 1)) 701 goto err_kfree_txhdr_cache; 702 } 703 704 dma_unmap_single(dev->dev->dma_dev, dma_test, 705 sizeof(struct b43legacy_txhdr_fw3), 706 DMA_TO_DEVICE); 707 } 708 709 ring->nr_slots = nr_slots; 710 ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index); 711 ring->index = controller_index; 712 if (for_tx) { 713 ring->tx = true; 714 ring->current_slot = -1; 715 } else { 716 if (ring->index == 0) { 717 ring->rx_buffersize = B43legacy_DMA0_RX_BUFFERSIZE; 718 ring->frameoffset = B43legacy_DMA0_RX_FRAMEOFFSET; 719 } else if (ring->index == 3) { 720 ring->rx_buffersize = B43legacy_DMA3_RX_BUFFERSIZE; 721 ring->frameoffset = B43legacy_DMA3_RX_FRAMEOFFSET; 722 } else 723 B43legacy_WARN_ON(1); 724 } 725 #ifdef CONFIG_B43LEGACY_DEBUG 726 ring->last_injected_overflow = jiffies; 727 #endif 728 729 err = alloc_ringmemory(ring); 730 if (err) 731 goto err_kfree_txhdr_cache; 732 err = dmacontroller_setup(ring); 733 if (err) 734 goto err_free_ringmemory; 735 736 out: 737 return ring; 738 739 err_free_ringmemory: 740 free_ringmemory(ring); 741 err_kfree_txhdr_cache: 742 kfree(ring->txhdr_cache); 743 err_kfree_meta: 744 kfree(ring->meta); 745 err_kfree_ring: 746 kfree(ring); 747 ring = NULL; 748 goto out; 749 } 750 751 /* Main cleanup function. */ 752 static void b43legacy_destroy_dmaring(struct b43legacy_dmaring *ring) 753 { 754 if (!ring) 755 return; 756 757 b43legacydbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots:" 758 " %d/%d\n", (unsigned int)(ring->type), ring->mmio_base, 759 (ring->tx) ? "TX" : "RX", ring->max_used_slots, 760 ring->nr_slots); 761 /* Device IRQs are disabled prior entering this function, 762 * so no need to take care of concurrency with rx handler stuff. 763 */ 764 dmacontroller_cleanup(ring); 765 free_all_descbuffers(ring); 766 free_ringmemory(ring); 767 768 kfree(ring->txhdr_cache); 769 kfree(ring->meta); 770 kfree(ring); 771 } 772 773 void b43legacy_dma_free(struct b43legacy_wldev *dev) 774 { 775 struct b43legacy_dma *dma; 776 777 if (b43legacy_using_pio(dev)) 778 return; 779 dma = &dev->dma; 780 781 b43legacy_destroy_dmaring(dma->rx_ring3); 782 dma->rx_ring3 = NULL; 783 b43legacy_destroy_dmaring(dma->rx_ring0); 784 dma->rx_ring0 = NULL; 785 786 b43legacy_destroy_dmaring(dma->tx_ring5); 787 dma->tx_ring5 = NULL; 788 b43legacy_destroy_dmaring(dma->tx_ring4); 789 dma->tx_ring4 = NULL; 790 b43legacy_destroy_dmaring(dma->tx_ring3); 791 dma->tx_ring3 = NULL; 792 b43legacy_destroy_dmaring(dma->tx_ring2); 793 dma->tx_ring2 = NULL; 794 b43legacy_destroy_dmaring(dma->tx_ring1); 795 dma->tx_ring1 = NULL; 796 b43legacy_destroy_dmaring(dma->tx_ring0); 797 dma->tx_ring0 = NULL; 798 } 799 800 static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask) 801 { 802 u64 orig_mask = mask; 803 bool fallback = false; 804 int err; 805 806 /* Try to set the DMA mask. If it fails, try falling back to a 807 * lower mask, as we can always also support a lower one. */ 808 while (1) { 809 err = dma_set_mask_and_coherent(dev->dev->dma_dev, mask); 810 if (!err) 811 break; 812 if (mask == DMA_BIT_MASK(64)) { 813 mask = DMA_BIT_MASK(32); 814 fallback = true; 815 continue; 816 } 817 if (mask == DMA_BIT_MASK(32)) { 818 mask = DMA_BIT_MASK(30); 819 fallback = true; 820 continue; 821 } 822 b43legacyerr(dev->wl, "The machine/kernel does not support " 823 "the required %u-bit DMA mask\n", 824 (unsigned int)dma_mask_to_engine_type(orig_mask)); 825 return -EOPNOTSUPP; 826 } 827 if (fallback) { 828 b43legacyinfo(dev->wl, "DMA mask fallback from %u-bit to %u-" 829 "bit\n", 830 (unsigned int)dma_mask_to_engine_type(orig_mask), 831 (unsigned int)dma_mask_to_engine_type(mask)); 832 } 833 834 return 0; 835 } 836 837 int b43legacy_dma_init(struct b43legacy_wldev *dev) 838 { 839 struct b43legacy_dma *dma = &dev->dma; 840 struct b43legacy_dmaring *ring; 841 int err; 842 u64 dmamask; 843 enum b43legacy_dmatype type; 844 845 dmamask = supported_dma_mask(dev); 846 type = dma_mask_to_engine_type(dmamask); 847 err = b43legacy_dma_set_mask(dev, dmamask); 848 if (err) { 849 #ifdef CONFIG_B43LEGACY_PIO 850 b43legacywarn(dev->wl, "DMA for this device not supported. " 851 "Falling back to PIO\n"); 852 dev->__using_pio = true; 853 return -EAGAIN; 854 #else 855 b43legacyerr(dev->wl, "DMA for this device not supported and " 856 "no PIO support compiled in\n"); 857 return -EOPNOTSUPP; 858 #endif 859 } 860 dma->translation = ssb_dma_translation(dev->dev); 861 862 err = -ENOMEM; 863 /* setup TX DMA channels. */ 864 ring = b43legacy_setup_dmaring(dev, 0, 1, type); 865 if (!ring) 866 goto out; 867 dma->tx_ring0 = ring; 868 869 ring = b43legacy_setup_dmaring(dev, 1, 1, type); 870 if (!ring) 871 goto err_destroy_tx0; 872 dma->tx_ring1 = ring; 873 874 ring = b43legacy_setup_dmaring(dev, 2, 1, type); 875 if (!ring) 876 goto err_destroy_tx1; 877 dma->tx_ring2 = ring; 878 879 ring = b43legacy_setup_dmaring(dev, 3, 1, type); 880 if (!ring) 881 goto err_destroy_tx2; 882 dma->tx_ring3 = ring; 883 884 ring = b43legacy_setup_dmaring(dev, 4, 1, type); 885 if (!ring) 886 goto err_destroy_tx3; 887 dma->tx_ring4 = ring; 888 889 ring = b43legacy_setup_dmaring(dev, 5, 1, type); 890 if (!ring) 891 goto err_destroy_tx4; 892 dma->tx_ring5 = ring; 893 894 /* setup RX DMA channels. */ 895 ring = b43legacy_setup_dmaring(dev, 0, 0, type); 896 if (!ring) 897 goto err_destroy_tx5; 898 dma->rx_ring0 = ring; 899 900 if (dev->dev->id.revision < 5) { 901 ring = b43legacy_setup_dmaring(dev, 3, 0, type); 902 if (!ring) 903 goto err_destroy_rx0; 904 dma->rx_ring3 = ring; 905 } 906 907 b43legacydbg(dev->wl, "%u-bit DMA initialized\n", (unsigned int)type); 908 err = 0; 909 out: 910 return err; 911 912 err_destroy_rx0: 913 b43legacy_destroy_dmaring(dma->rx_ring0); 914 dma->rx_ring0 = NULL; 915 err_destroy_tx5: 916 b43legacy_destroy_dmaring(dma->tx_ring5); 917 dma->tx_ring5 = NULL; 918 err_destroy_tx4: 919 b43legacy_destroy_dmaring(dma->tx_ring4); 920 dma->tx_ring4 = NULL; 921 err_destroy_tx3: 922 b43legacy_destroy_dmaring(dma->tx_ring3); 923 dma->tx_ring3 = NULL; 924 err_destroy_tx2: 925 b43legacy_destroy_dmaring(dma->tx_ring2); 926 dma->tx_ring2 = NULL; 927 err_destroy_tx1: 928 b43legacy_destroy_dmaring(dma->tx_ring1); 929 dma->tx_ring1 = NULL; 930 err_destroy_tx0: 931 b43legacy_destroy_dmaring(dma->tx_ring0); 932 dma->tx_ring0 = NULL; 933 goto out; 934 } 935 936 /* Generate a cookie for the TX header. */ 937 static u16 generate_cookie(struct b43legacy_dmaring *ring, 938 int slot) 939 { 940 u16 cookie = 0x1000; 941 942 /* Use the upper 4 bits of the cookie as 943 * DMA controller ID and store the slot number 944 * in the lower 12 bits. 945 * Note that the cookie must never be 0, as this 946 * is a special value used in RX path. 947 */ 948 switch (ring->index) { 949 case 0: 950 cookie = 0xA000; 951 break; 952 case 1: 953 cookie = 0xB000; 954 break; 955 case 2: 956 cookie = 0xC000; 957 break; 958 case 3: 959 cookie = 0xD000; 960 break; 961 case 4: 962 cookie = 0xE000; 963 break; 964 case 5: 965 cookie = 0xF000; 966 break; 967 } 968 B43legacy_WARN_ON(!(((u16)slot & 0xF000) == 0x0000)); 969 cookie |= (u16)slot; 970 971 return cookie; 972 } 973 974 /* Inspect a cookie and find out to which controller/slot it belongs. */ 975 static 976 struct b43legacy_dmaring *parse_cookie(struct b43legacy_wldev *dev, 977 u16 cookie, int *slot) 978 { 979 struct b43legacy_dma *dma = &dev->dma; 980 struct b43legacy_dmaring *ring = NULL; 981 982 switch (cookie & 0xF000) { 983 case 0xA000: 984 ring = dma->tx_ring0; 985 break; 986 case 0xB000: 987 ring = dma->tx_ring1; 988 break; 989 case 0xC000: 990 ring = dma->tx_ring2; 991 break; 992 case 0xD000: 993 ring = dma->tx_ring3; 994 break; 995 case 0xE000: 996 ring = dma->tx_ring4; 997 break; 998 case 0xF000: 999 ring = dma->tx_ring5; 1000 break; 1001 default: 1002 B43legacy_WARN_ON(1); 1003 } 1004 *slot = (cookie & 0x0FFF); 1005 B43legacy_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots)); 1006 1007 return ring; 1008 } 1009 1010 static int dma_tx_fragment(struct b43legacy_dmaring *ring, 1011 struct sk_buff **in_skb) 1012 { 1013 struct sk_buff *skb = *in_skb; 1014 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1015 u8 *header; 1016 int slot, old_top_slot, old_used_slots; 1017 int err; 1018 struct b43legacy_dmadesc32 *desc; 1019 struct b43legacy_dmadesc_meta *meta; 1020 struct b43legacy_dmadesc_meta *meta_hdr; 1021 struct sk_buff *bounce_skb; 1022 1023 #define SLOTS_PER_PACKET 2 1024 B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0); 1025 1026 old_top_slot = ring->current_slot; 1027 old_used_slots = ring->used_slots; 1028 1029 /* Get a slot for the header. */ 1030 slot = request_slot(ring); 1031 desc = op32_idx2desc(ring, slot, &meta_hdr); 1032 memset(meta_hdr, 0, sizeof(*meta_hdr)); 1033 1034 header = &(ring->txhdr_cache[slot * sizeof( 1035 struct b43legacy_txhdr_fw3)]); 1036 err = b43legacy_generate_txhdr(ring->dev, header, 1037 skb->data, skb->len, info, 1038 generate_cookie(ring, slot)); 1039 if (unlikely(err)) { 1040 ring->current_slot = old_top_slot; 1041 ring->used_slots = old_used_slots; 1042 return err; 1043 } 1044 1045 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, 1046 sizeof(struct b43legacy_txhdr_fw3), 1); 1047 if (b43legacy_dma_mapping_error(ring, meta_hdr->dmaaddr, 1048 sizeof(struct b43legacy_txhdr_fw3), 1)) { 1049 ring->current_slot = old_top_slot; 1050 ring->used_slots = old_used_slots; 1051 return -EIO; 1052 } 1053 op32_fill_descriptor(ring, desc, meta_hdr->dmaaddr, 1054 sizeof(struct b43legacy_txhdr_fw3), 1, 0, 0); 1055 1056 /* Get a slot for the payload. */ 1057 slot = request_slot(ring); 1058 desc = op32_idx2desc(ring, slot, &meta); 1059 memset(meta, 0, sizeof(*meta)); 1060 1061 meta->skb = skb; 1062 meta->is_last_fragment = true; 1063 1064 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); 1065 /* create a bounce buffer in zone_dma on mapping failure. */ 1066 if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { 1067 bounce_skb = alloc_skb(skb->len, GFP_KERNEL | GFP_DMA); 1068 if (!bounce_skb) { 1069 ring->current_slot = old_top_slot; 1070 ring->used_slots = old_used_slots; 1071 err = -ENOMEM; 1072 goto out_unmap_hdr; 1073 } 1074 1075 skb_put_data(bounce_skb, skb->data, skb->len); 1076 memcpy(bounce_skb->cb, skb->cb, sizeof(skb->cb)); 1077 bounce_skb->dev = skb->dev; 1078 skb_set_queue_mapping(bounce_skb, skb_get_queue_mapping(skb)); 1079 info = IEEE80211_SKB_CB(bounce_skb); 1080 1081 dev_kfree_skb_any(skb); 1082 skb = bounce_skb; 1083 *in_skb = bounce_skb; 1084 meta->skb = skb; 1085 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); 1086 if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { 1087 ring->current_slot = old_top_slot; 1088 ring->used_slots = old_used_slots; 1089 err = -EIO; 1090 goto out_free_bounce; 1091 } 1092 } 1093 1094 op32_fill_descriptor(ring, desc, meta->dmaaddr, 1095 skb->len, 0, 1, 1); 1096 1097 wmb(); /* previous stuff MUST be done */ 1098 /* Now transfer the whole frame. */ 1099 op32_poke_tx(ring, next_slot(ring, slot)); 1100 return 0; 1101 1102 out_free_bounce: 1103 dev_kfree_skb_any(skb); 1104 out_unmap_hdr: 1105 unmap_descbuffer(ring, meta_hdr->dmaaddr, 1106 sizeof(struct b43legacy_txhdr_fw3), 1); 1107 return err; 1108 } 1109 1110 static inline 1111 int should_inject_overflow(struct b43legacy_dmaring *ring) 1112 { 1113 #ifdef CONFIG_B43LEGACY_DEBUG 1114 if (unlikely(b43legacy_debug(ring->dev, 1115 B43legacy_DBG_DMAOVERFLOW))) { 1116 /* Check if we should inject another ringbuffer overflow 1117 * to test handling of this situation in the stack. */ 1118 unsigned long next_overflow; 1119 1120 next_overflow = ring->last_injected_overflow + HZ; 1121 if (time_after(jiffies, next_overflow)) { 1122 ring->last_injected_overflow = jiffies; 1123 b43legacydbg(ring->dev->wl, 1124 "Injecting TX ring overflow on " 1125 "DMA controller %d\n", ring->index); 1126 return 1; 1127 } 1128 } 1129 #endif /* CONFIG_B43LEGACY_DEBUG */ 1130 return 0; 1131 } 1132 1133 int b43legacy_dma_tx(struct b43legacy_wldev *dev, 1134 struct sk_buff *skb) 1135 { 1136 struct b43legacy_dmaring *ring; 1137 int err = 0; 1138 1139 ring = priority_to_txring(dev, skb_get_queue_mapping(skb)); 1140 B43legacy_WARN_ON(!ring->tx); 1141 1142 if (unlikely(ring->stopped)) { 1143 /* We get here only because of a bug in mac80211. 1144 * Because of a race, one packet may be queued after 1145 * the queue is stopped, thus we got called when we shouldn't. 1146 * For now, just refuse the transmit. */ 1147 if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE)) 1148 b43legacyerr(dev->wl, "Packet after queue stopped\n"); 1149 return -ENOSPC; 1150 } 1151 1152 if (WARN_ON(free_slots(ring) < SLOTS_PER_PACKET)) { 1153 /* If we get here, we have a real error with the queue 1154 * full, but queues not stopped. */ 1155 b43legacyerr(dev->wl, "DMA queue overflow\n"); 1156 return -ENOSPC; 1157 } 1158 1159 /* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing 1160 * into the skb data or cb now. */ 1161 err = dma_tx_fragment(ring, &skb); 1162 if (unlikely(err == -ENOKEY)) { 1163 /* Drop this packet, as we don't have the encryption key 1164 * anymore and must not transmit it unencrypted. */ 1165 dev_kfree_skb_any(skb); 1166 return 0; 1167 } 1168 if (unlikely(err)) { 1169 b43legacyerr(dev->wl, "DMA tx mapping failure\n"); 1170 return err; 1171 } 1172 if ((free_slots(ring) < SLOTS_PER_PACKET) || 1173 should_inject_overflow(ring)) { 1174 /* This TX ring is full. */ 1175 unsigned int skb_mapping = skb_get_queue_mapping(skb); 1176 ieee80211_stop_queue(dev->wl->hw, skb_mapping); 1177 dev->wl->tx_queue_stopped[skb_mapping] = 1; 1178 ring->stopped = true; 1179 if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE)) 1180 b43legacydbg(dev->wl, "Stopped TX ring %d\n", 1181 ring->index); 1182 } 1183 return err; 1184 } 1185 1186 void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev, 1187 const struct b43legacy_txstatus *status) 1188 { 1189 struct b43legacy_dmaring *ring; 1190 struct b43legacy_dmadesc_meta *meta; 1191 int retry_limit; 1192 int slot; 1193 int firstused; 1194 1195 ring = parse_cookie(dev, status->cookie, &slot); 1196 if (unlikely(!ring)) 1197 return; 1198 B43legacy_WARN_ON(!ring->tx); 1199 1200 /* Sanity check: TX packets are processed in-order on one ring. 1201 * Check if the slot deduced from the cookie really is the first 1202 * used slot. */ 1203 firstused = ring->current_slot - ring->used_slots + 1; 1204 if (firstused < 0) 1205 firstused = ring->nr_slots + firstused; 1206 if (unlikely(slot != firstused)) { 1207 /* This possibly is a firmware bug and will result in 1208 * malfunction, memory leaks and/or stall of DMA functionality. 1209 */ 1210 b43legacydbg(dev->wl, "Out of order TX status report on DMA " 1211 "ring %d. Expected %d, but got %d\n", 1212 ring->index, firstused, slot); 1213 return; 1214 } 1215 1216 while (1) { 1217 B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); 1218 op32_idx2desc(ring, slot, &meta); 1219 1220 if (meta->skb) 1221 unmap_descbuffer(ring, meta->dmaaddr, 1222 meta->skb->len, 1); 1223 else 1224 unmap_descbuffer(ring, meta->dmaaddr, 1225 sizeof(struct b43legacy_txhdr_fw3), 1226 1); 1227 1228 if (meta->is_last_fragment) { 1229 struct ieee80211_tx_info *info; 1230 BUG_ON(!meta->skb); 1231 info = IEEE80211_SKB_CB(meta->skb); 1232 1233 /* preserve the confiured retry limit before clearing the status 1234 * The xmit function has overwritten the rc's value with the actual 1235 * retry limit done by the hardware */ 1236 retry_limit = info->status.rates[0].count; 1237 ieee80211_tx_info_clear_status(info); 1238 1239 if (status->acked) 1240 info->flags |= IEEE80211_TX_STAT_ACK; 1241 1242 if (status->rts_count > dev->wl->hw->conf.short_frame_max_tx_count) { 1243 /* 1244 * If the short retries (RTS, not data frame) have exceeded 1245 * the limit, the hw will not have tried the selected rate, 1246 * but will have used the fallback rate instead. 1247 * Don't let the rate control count attempts for the selected 1248 * rate in this case, otherwise the statistics will be off. 1249 */ 1250 info->status.rates[0].count = 0; 1251 info->status.rates[1].count = status->frame_count; 1252 } else { 1253 if (status->frame_count > retry_limit) { 1254 info->status.rates[0].count = retry_limit; 1255 info->status.rates[1].count = status->frame_count - 1256 retry_limit; 1257 1258 } else { 1259 info->status.rates[0].count = status->frame_count; 1260 info->status.rates[1].idx = -1; 1261 } 1262 } 1263 1264 /* Call back to inform the ieee80211 subsystem about the 1265 * status of the transmission. 1266 * Some fields of txstat are already filled in dma_tx(). 1267 */ 1268 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb); 1269 /* skb is freed by ieee80211_tx_status_irqsafe() */ 1270 meta->skb = NULL; 1271 } else { 1272 /* No need to call free_descriptor_buffer here, as 1273 * this is only the txhdr, which is not allocated. 1274 */ 1275 B43legacy_WARN_ON(meta->skb != NULL); 1276 } 1277 1278 /* Everything unmapped and free'd. So it's not used anymore. */ 1279 ring->used_slots--; 1280 1281 if (meta->is_last_fragment) 1282 break; 1283 slot = next_slot(ring, slot); 1284 } 1285 dev->stats.last_tx = jiffies; 1286 if (ring->stopped) { 1287 B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET); 1288 ring->stopped = false; 1289 } 1290 1291 if (dev->wl->tx_queue_stopped[ring->queue_prio]) { 1292 dev->wl->tx_queue_stopped[ring->queue_prio] = 0; 1293 } else { 1294 /* If the driver queue is running wake the corresponding 1295 * mac80211 queue. */ 1296 ieee80211_wake_queue(dev->wl->hw, ring->queue_prio); 1297 if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE)) 1298 b43legacydbg(dev->wl, "Woke up TX ring %d\n", 1299 ring->index); 1300 } 1301 /* Add work to the queue. */ 1302 ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work); 1303 } 1304 1305 static void dma_rx(struct b43legacy_dmaring *ring, 1306 int *slot) 1307 { 1308 struct b43legacy_dmadesc32 *desc; 1309 struct b43legacy_dmadesc_meta *meta; 1310 struct b43legacy_rxhdr_fw3 *rxhdr; 1311 struct sk_buff *skb; 1312 u16 len; 1313 int err; 1314 dma_addr_t dmaaddr; 1315 1316 desc = op32_idx2desc(ring, *slot, &meta); 1317 1318 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); 1319 skb = meta->skb; 1320 1321 if (ring->index == 3) { 1322 /* We received an xmit status. */ 1323 struct b43legacy_hwtxstatus *hw = 1324 (struct b43legacy_hwtxstatus *)skb->data; 1325 int i = 0; 1326 1327 while (hw->cookie == 0) { 1328 if (i > 100) 1329 break; 1330 i++; 1331 udelay(2); 1332 barrier(); 1333 } 1334 b43legacy_handle_hwtxstatus(ring->dev, hw); 1335 /* recycle the descriptor buffer. */ 1336 sync_descbuffer_for_device(ring, meta->dmaaddr, 1337 ring->rx_buffersize); 1338 1339 return; 1340 } 1341 rxhdr = (struct b43legacy_rxhdr_fw3 *)skb->data; 1342 len = le16_to_cpu(rxhdr->frame_len); 1343 if (len == 0) { 1344 int i = 0; 1345 1346 do { 1347 udelay(2); 1348 barrier(); 1349 len = le16_to_cpu(rxhdr->frame_len); 1350 } while (len == 0 && i++ < 5); 1351 if (unlikely(len == 0)) { 1352 /* recycle the descriptor buffer. */ 1353 sync_descbuffer_for_device(ring, meta->dmaaddr, 1354 ring->rx_buffersize); 1355 goto drop; 1356 } 1357 } 1358 if (unlikely(len > ring->rx_buffersize)) { 1359 /* The data did not fit into one descriptor buffer 1360 * and is split over multiple buffers. 1361 * This should never happen, as we try to allocate buffers 1362 * big enough. So simply ignore this packet. 1363 */ 1364 int cnt = 0; 1365 s32 tmp = len; 1366 1367 while (1) { 1368 desc = op32_idx2desc(ring, *slot, &meta); 1369 /* recycle the descriptor buffer. */ 1370 sync_descbuffer_for_device(ring, meta->dmaaddr, 1371 ring->rx_buffersize); 1372 *slot = next_slot(ring, *slot); 1373 cnt++; 1374 tmp -= ring->rx_buffersize; 1375 if (tmp <= 0) 1376 break; 1377 } 1378 b43legacyerr(ring->dev->wl, "DMA RX buffer too small " 1379 "(len: %u, buffer: %u, nr-dropped: %d)\n", 1380 len, ring->rx_buffersize, cnt); 1381 goto drop; 1382 } 1383 1384 dmaaddr = meta->dmaaddr; 1385 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC); 1386 if (unlikely(err)) { 1387 b43legacydbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer()" 1388 " failed\n"); 1389 sync_descbuffer_for_device(ring, dmaaddr, 1390 ring->rx_buffersize); 1391 goto drop; 1392 } 1393 1394 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0); 1395 skb_put(skb, len + ring->frameoffset); 1396 skb_pull(skb, ring->frameoffset); 1397 1398 b43legacy_rx(ring->dev, skb, rxhdr); 1399 drop: 1400 return; 1401 } 1402 1403 void b43legacy_dma_rx(struct b43legacy_dmaring *ring) 1404 { 1405 int slot; 1406 int current_slot; 1407 int used_slots = 0; 1408 1409 B43legacy_WARN_ON(ring->tx); 1410 current_slot = op32_get_current_rxslot(ring); 1411 B43legacy_WARN_ON(!(current_slot >= 0 && current_slot < 1412 ring->nr_slots)); 1413 1414 slot = ring->current_slot; 1415 for (; slot != current_slot; slot = next_slot(ring, slot)) { 1416 dma_rx(ring, &slot); 1417 update_max_used_slots(ring, ++used_slots); 1418 } 1419 op32_set_current_rxslot(ring, slot); 1420 ring->current_slot = slot; 1421 } 1422 1423 static void b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring *ring) 1424 { 1425 B43legacy_WARN_ON(!ring->tx); 1426 op32_tx_suspend(ring); 1427 } 1428 1429 static void b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring *ring) 1430 { 1431 B43legacy_WARN_ON(!ring->tx); 1432 op32_tx_resume(ring); 1433 } 1434 1435 void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev) 1436 { 1437 b43legacy_power_saving_ctl_bits(dev, -1, 1); 1438 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring0); 1439 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring1); 1440 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring2); 1441 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring3); 1442 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring4); 1443 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring5); 1444 } 1445 1446 void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev) 1447 { 1448 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring5); 1449 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring4); 1450 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring3); 1451 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring2); 1452 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring1); 1453 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring0); 1454 b43legacy_power_saving_ctl_bits(dev, -1, -1); 1455 } 1456