Lines Matching +full:use +full:- +full:dma +full:- +full:tx
1 // SPDX-License-Identifier: GPL-2.0-or-later
6 DMA ringbuffer and descriptor allocation/management
18 #include "dma.h"
23 #include <linux/dma-mapping.h>
32 /* Required number of TX DMA slots per TX frame.
37 static u32 b43_dma_address(struct b43_dma *dma, dma_addr_t dmaaddr, in b43_dma_address() argument
45 if (dma->translation_in_low) { in b43_dma_address()
47 addr |= dma->translation; in b43_dma_address()
52 if (!dma->translation_in_low) { in b43_dma_address()
54 addr |= dma->translation; in b43_dma_address()
58 if (dma->translation_in_low) in b43_dma_address()
70 /* 32bit DMA ops. */
78 *meta = &(ring->meta[slot]); in op32_idx2desc()
79 desc = ring->descbase; in op32_idx2desc()
90 struct b43_dmadesc32 *descbase = ring->descbase; in op32_fill_descriptor()
96 slot = (int)(&(desc->dma32) - descbase); in op32_fill_descriptor()
97 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); in op32_fill_descriptor()
99 addr = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_LOW); in op32_fill_descriptor()
100 addrext = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_EXT); in op32_fill_descriptor()
103 if (slot == ring->nr_slots - 1) in op32_fill_descriptor()
114 desc->dma32.control = cpu_to_le32(ctl); in op32_fill_descriptor()
115 desc->dma32.address = cpu_to_le32(addr); in op32_fill_descriptor()
162 /* 64bit DMA ops. */
170 *meta = &(ring->meta[slot]); in op64_idx2desc()
171 desc = ring->descbase; in op64_idx2desc()
182 struct b43_dmadesc64 *descbase = ring->descbase; in op64_fill_descriptor()
188 slot = (int)(&(desc->dma64) - descbase); in op64_fill_descriptor()
189 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); in op64_fill_descriptor()
191 addrlo = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_LOW); in op64_fill_descriptor()
192 addrhi = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_HIGH); in op64_fill_descriptor()
193 addrext = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_EXT); in op64_fill_descriptor()
195 if (slot == ring->nr_slots - 1) in op64_fill_descriptor()
207 desc->dma64.control0 = cpu_to_le32(ctl0); in op64_fill_descriptor()
208 desc->dma64.control1 = cpu_to_le32(ctl1); in op64_fill_descriptor()
209 desc->dma64.address_low = cpu_to_le32(addrlo); in op64_fill_descriptor()
210 desc->dma64.address_high = cpu_to_le32(addrhi); in op64_fill_descriptor()
259 return (ring->nr_slots - ring->used_slots); in free_slots()
264 B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1)); in next_slot()
265 if (slot == ring->nr_slots - 1) in next_slot()
272 B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1)); in prev_slot()
274 return ring->nr_slots - 1; in prev_slot()
275 return slot - 1; in prev_slot()
282 if (current_used_slots <= ring->max_used_slots) in update_max_used_slots()
284 ring->max_used_slots = current_used_slots; in update_max_used_slots()
285 if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) { in update_max_used_slots()
286 b43dbg(ring->dev->wl, in update_max_used_slots()
288 ring->max_used_slots, in update_max_used_slots()
289 ring->tx ? "TX" : "RX", ring->index); in update_max_used_slots()
304 B43_WARN_ON(!ring->tx); in request_slot()
305 B43_WARN_ON(ring->stopped); in request_slot()
308 slot = next_slot(ring, ring->current_slot); in request_slot()
309 ring->current_slot = slot; in request_slot()
310 ring->used_slots++; in request_slot()
312 update_max_used_slots(ring, ring->used_slots); in request_slot()
348 unsigned char *buf, size_t len, int tx) in map_descbuffer() argument
352 if (tx) { in map_descbuffer()
353 dmaaddr = dma_map_single(ring->dev->dev->dma_dev, in map_descbuffer()
356 dmaaddr = dma_map_single(ring->dev->dev->dma_dev, in map_descbuffer()
365 dma_addr_t addr, size_t len, int tx) in unmap_descbuffer() argument
367 if (tx) { in unmap_descbuffer()
368 dma_unmap_single(ring->dev->dev->dma_dev, in unmap_descbuffer()
371 dma_unmap_single(ring->dev->dev->dma_dev, in unmap_descbuffer()
380 B43_WARN_ON(ring->tx); in sync_descbuffer_for_cpu()
381 dma_sync_single_for_cpu(ring->dev->dev->dma_dev, in sync_descbuffer_for_cpu()
389 B43_WARN_ON(ring->tx); in sync_descbuffer_for_device()
390 dma_sync_single_for_device(ring->dev->dev->dma_dev, in sync_descbuffer_for_device()
398 if (meta->skb) { in free_descriptor_buffer()
399 if (ring->tx) in free_descriptor_buffer()
400 ieee80211_free_txskb(ring->dev->wl->hw, meta->skb); in free_descriptor_buffer()
402 dev_kfree_skb_any(meta->skb); in free_descriptor_buffer()
403 meta->skb = NULL; in free_descriptor_buffer()
409 /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K in alloc_ringmemory()
410 * alignment and 8K buffers for 64-bit DMA with 8K alignment. in alloc_ringmemory()
411 * In practice we could use smaller buffers for the latter, but the in alloc_ringmemory()
413 * 0x00001000 is used in DMA address, some hardware (like BCM4331) in alloc_ringmemory()
415 * B43_DMA64_RXSTATDPTR. Let's just use 8K buffers even if we don't use in alloc_ringmemory()
418 u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ? in alloc_ringmemory()
421 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, in alloc_ringmemory()
422 ring_mem_size, &(ring->dmabase), in alloc_ringmemory()
424 if (!ring->descbase) in alloc_ringmemory()
425 return -ENOMEM; in alloc_ringmemory()
432 u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ? in free_ringmemory()
434 dma_free_coherent(ring->dev->dev->dma_dev, ring_mem_size, in free_ringmemory()
435 ring->descbase, ring->dmabase); in free_ringmemory()
438 /* Reset the RX DMA channel */
457 i = -1; in b43_dmacontroller_rx_reset()
463 i = -1; in b43_dmacontroller_rx_reset()
469 if (i != -1) { in b43_dmacontroller_rx_reset()
470 b43err(dev->wl, "DMA RX reset timed out\n"); in b43_dmacontroller_rx_reset()
471 return -ENODEV; in b43_dmacontroller_rx_reset()
477 /* Reset the TX DMA channel */
515 i = -1; in b43_dmacontroller_tx_reset()
521 i = -1; in b43_dmacontroller_tx_reset()
527 if (i != -1) { in b43_dmacontroller_tx_reset()
528 b43err(dev->wl, "DMA TX reset timed out\n"); in b43_dmacontroller_tx_reset()
529 return -ENODEV; in b43_dmacontroller_tx_reset()
537 /* Check if a DMA mapping address is invalid. */
542 if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr))) in b43_dma_mapping_error()
545 switch (ring->type) { in b43_dma_mapping_error()
572 unsigned char *f = skb->data + ring->frameoffset; in b43_rx_buffer_is_poisoned()
582 /* This poisons the RX buffer to detect DMA failures. */ in b43_poison_rx_buffer()
584 rxhdr = (struct b43_rxhdr_fw4 *)(skb->data); in b43_poison_rx_buffer()
585 rxhdr->frame_len = 0; in b43_poison_rx_buffer()
587 B43_WARN_ON(ring->rx_buffersize < ring->frameoffset + sizeof(struct b43_plcp_hdr6) + 2); in b43_poison_rx_buffer()
588 frame = skb->data + ring->frameoffset; in b43_poison_rx_buffer()
599 B43_WARN_ON(ring->tx); in setup_rx_descbuffer()
601 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); in setup_rx_descbuffer()
603 return -ENOMEM; in setup_rx_descbuffer()
605 dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0); in setup_rx_descbuffer()
606 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { in setup_rx_descbuffer()
612 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); in setup_rx_descbuffer()
614 return -ENOMEM; in setup_rx_descbuffer()
616 dmaaddr = map_descbuffer(ring, skb->data, in setup_rx_descbuffer()
617 ring->rx_buffersize, 0); in setup_rx_descbuffer()
618 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { in setup_rx_descbuffer()
619 b43err(ring->dev->wl, "RX DMA buffer allocation failed\n"); in setup_rx_descbuffer()
621 return -EIO; in setup_rx_descbuffer()
625 meta->skb = skb; in setup_rx_descbuffer()
626 meta->dmaaddr = dmaaddr; in setup_rx_descbuffer()
627 ring->ops->fill_descriptor(ring, desc, dmaaddr, in setup_rx_descbuffer()
628 ring->rx_buffersize, 0, 0, 0); in setup_rx_descbuffer()
638 int i, err = -ENOMEM; in alloc_initial_descbuffers()
642 for (i = 0; i < ring->nr_slots; i++) { in alloc_initial_descbuffers()
643 desc = ring->ops->idx2desc(ring, i, &meta); in alloc_initial_descbuffers()
647 b43err(ring->dev->wl, in alloc_initial_descbuffers()
653 ring->used_slots = ring->nr_slots; in alloc_initial_descbuffers()
659 for (i--; i >= 0; i--) { in alloc_initial_descbuffers()
660 desc = ring->ops->idx2desc(ring, i, &meta); in alloc_initial_descbuffers()
662 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); in alloc_initial_descbuffers()
663 dev_kfree_skb(meta->skb); in alloc_initial_descbuffers()
668 /* Do initial setup of the DMA controller.
677 bool parity = ring->dev->dma.parity; in dmacontroller_setup()
681 if (ring->tx) { in dmacontroller_setup()
682 if (ring->type == B43_DMA_64BIT) { in dmacontroller_setup()
683 u64 ringbase = (u64) (ring->dmabase); in dmacontroller_setup()
684 addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT); in dmacontroller_setup()
685 addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW); in dmacontroller_setup()
686 addrhi = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_HIGH); in dmacontroller_setup()
697 u32 ringbase = (u32) (ring->dmabase); in dmacontroller_setup()
698 addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT); in dmacontroller_setup()
699 addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW); in dmacontroller_setup()
713 if (ring->type == B43_DMA_64BIT) { in dmacontroller_setup()
714 u64 ringbase = (u64) (ring->dmabase); in dmacontroller_setup()
715 addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT); in dmacontroller_setup()
716 addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW); in dmacontroller_setup()
717 addrhi = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_HIGH); in dmacontroller_setup()
719 value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT); in dmacontroller_setup()
728 b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots * in dmacontroller_setup()
731 u32 ringbase = (u32) (ring->dmabase); in dmacontroller_setup()
732 addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT); in dmacontroller_setup()
733 addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW); in dmacontroller_setup()
735 value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT); in dmacontroller_setup()
743 b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots * in dmacontroller_setup()
752 /* Shutdown the DMA controller. */
755 if (ring->tx) { in dmacontroller_cleanup()
756 b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base, in dmacontroller_cleanup()
757 ring->type); in dmacontroller_cleanup()
758 if (ring->type == B43_DMA_64BIT) { in dmacontroller_cleanup()
764 b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base, in dmacontroller_cleanup()
765 ring->type); in dmacontroller_cleanup()
766 if (ring->type == B43_DMA_64BIT) { in dmacontroller_cleanup()
779 if (!ring->used_slots) in free_all_descbuffers()
781 for (i = 0; i < ring->nr_slots; i++) { in free_all_descbuffers()
782 /* get meta - ignore returned value */ in free_all_descbuffers()
783 ring->ops->idx2desc(ring, i, &meta); in free_all_descbuffers()
785 if (!meta->skb || b43_dma_ptr_is_poisoned(meta->skb)) { in free_all_descbuffers()
786 B43_WARN_ON(!ring->tx); in free_all_descbuffers()
789 if (ring->tx) { in free_all_descbuffers()
790 unmap_descbuffer(ring, meta->dmaaddr, in free_all_descbuffers()
791 meta->skb->len, 1); in free_all_descbuffers()
793 unmap_descbuffer(ring, meta->dmaaddr, in free_all_descbuffers()
794 ring->rx_buffersize, 0); in free_all_descbuffers()
805 switch (dev->dev->bus_type) { in b43_engine_type()
808 tmp = bcma_aread32(dev->dev->bdev, BCMA_IOST); in b43_engine_type()
815 tmp = ssb_read32(dev->dev->sdev, SSB_TMSHIGH); in b43_engine_type()
845 ring->nr_slots = B43_RXRING_SLOTS; in b43_setup_dmaring()
847 ring->nr_slots = B43_TXRING_SLOTS; in b43_setup_dmaring()
849 ring->meta = kcalloc(ring->nr_slots, sizeof(struct b43_dmadesc_meta), in b43_setup_dmaring()
851 if (!ring->meta) in b43_setup_dmaring()
853 for (i = 0; i < ring->nr_slots; i++) in b43_setup_dmaring()
854 ring->meta->skb = B43_DMA_PTR_POISON; in b43_setup_dmaring()
856 ring->type = type; in b43_setup_dmaring()
857 ring->dev = dev; in b43_setup_dmaring()
858 ring->mmio_base = b43_dmacontroller_base(type, controller_index); in b43_setup_dmaring()
859 ring->index = controller_index; in b43_setup_dmaring()
861 ring->ops = &dma64_ops; in b43_setup_dmaring()
863 ring->ops = &dma32_ops; in b43_setup_dmaring()
865 ring->tx = true; in b43_setup_dmaring()
866 ring->current_slot = -1; in b43_setup_dmaring()
868 if (ring->index == 0) { in b43_setup_dmaring()
869 switch (dev->fw.hdr_format) { in b43_setup_dmaring()
871 ring->rx_buffersize = B43_DMA0_RX_FW598_BUFSIZE; in b43_setup_dmaring()
872 ring->frameoffset = B43_DMA0_RX_FW598_FO; in b43_setup_dmaring()
876 ring->rx_buffersize = B43_DMA0_RX_FW351_BUFSIZE; in b43_setup_dmaring()
877 ring->frameoffset = B43_DMA0_RX_FW351_FO; in b43_setup_dmaring()
884 ring->last_injected_overflow = jiffies; in b43_setup_dmaring()
891 ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME, in b43_setup_dmaring()
894 if (!ring->txhdr_cache) in b43_setup_dmaring()
897 /* test for ability to dma to txhdr_cache */ in b43_setup_dmaring()
898 dma_test = dma_map_single(dev->dev->dma_dev, in b43_setup_dmaring()
899 ring->txhdr_cache, in b43_setup_dmaring()
906 kfree(ring->txhdr_cache); in b43_setup_dmaring()
907 ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME, in b43_setup_dmaring()
910 if (!ring->txhdr_cache) in b43_setup_dmaring()
913 dma_test = dma_map_single(dev->dev->dma_dev, in b43_setup_dmaring()
914 ring->txhdr_cache, in b43_setup_dmaring()
921 b43err(dev->wl, in b43_setup_dmaring()
922 "TXHDR DMA allocation failed\n"); in b43_setup_dmaring()
927 dma_unmap_single(dev->dev->dma_dev, in b43_setup_dmaring()
945 kfree(ring->txhdr_cache); in b43_setup_dmaring()
947 kfree(ring->meta); in b43_setup_dmaring()
975 u64 failed_packets = ring->nr_failed_tx_packets; in b43_destroy_dmaring()
976 u64 succeed_packets = ring->nr_succeed_tx_packets; in b43_destroy_dmaring()
983 average_tries = divide(ring->nr_total_packet_tries * 100, nr_packets); in b43_destroy_dmaring()
985 b43dbg(ring->dev->wl, "DMA-%u %s: " in b43_destroy_dmaring()
988 (unsigned int)(ring->type), ringname, in b43_destroy_dmaring()
989 ring->max_used_slots, in b43_destroy_dmaring()
990 ring->nr_slots, in b43_destroy_dmaring()
1007 kfree(ring->txhdr_cache); in b43_destroy_dmaring()
1008 kfree(ring->meta); in b43_destroy_dmaring()
1012 #define destroy_ring(dma, ring) do { \ argument
1013 b43_destroy_dmaring((dma)->ring, __stringify(ring)); \
1014 (dma)->ring = NULL; \
1019 struct b43_dma *dma; in b43_dma_free() local
1023 dma = &dev->dma; in b43_dma_free()
1025 destroy_ring(dma, rx_ring); in b43_dma_free()
1026 destroy_ring(dma, tx_ring_AC_BK); in b43_dma_free()
1027 destroy_ring(dma, tx_ring_AC_BE); in b43_dma_free()
1028 destroy_ring(dma, tx_ring_AC_VI); in b43_dma_free()
1029 destroy_ring(dma, tx_ring_AC_VO); in b43_dma_free()
1030 destroy_ring(dma, tx_ring_mcast); in b43_dma_free()
1033 /* Some hardware with 64-bit DMA seems to be bugged and looks for translation
1043 if (dev->dev->bus_type == B43_BUS_SSB && in b43_dma_translation_in_low_word()
1044 dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI && in b43_dma_translation_in_low_word()
1045 !(pci_is_pcie(dev->dev->sdev->bus->host_pci) && in b43_dma_translation_in_low_word()
1046 ssb_read32(dev->dev->sdev, SSB_TMSHIGH) & SSB_TMSHIGH_DMA64)) in b43_dma_translation_in_low_word()
1054 struct b43_dma *dma = &dev->dma; in b43_dma_init() local
1058 err = dma_set_mask_and_coherent(dev->dev->dma_dev, DMA_BIT_MASK(type)); in b43_dma_init()
1060 b43err(dev->wl, "The machine/kernel does not support " in b43_dma_init()
1061 "the required %u-bit DMA mask\n", type); in b43_dma_init()
1065 switch (dev->dev->bus_type) { in b43_dma_init()
1068 dma->translation = bcma_core_dma_translation(dev->dev->bdev); in b43_dma_init()
1073 dma->translation = ssb_dma_translation(dev->dev->sdev); in b43_dma_init()
1077 dma->translation_in_low = b43_dma_translation_in_low_word(dev, type); in b43_dma_init()
1079 dma->parity = true; in b43_dma_init()
1082 if (dev->dev->bus_type == B43_BUS_BCMA) in b43_dma_init()
1083 dma->parity = false; in b43_dma_init()
1086 err = -ENOMEM; in b43_dma_init()
1087 /* setup TX DMA channels. */ in b43_dma_init()
1088 dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type); in b43_dma_init()
1089 if (!dma->tx_ring_AC_BK) in b43_dma_init()
1092 dma->tx_ring_AC_BE = b43_setup_dmaring(dev, 1, 1, type); in b43_dma_init()
1093 if (!dma->tx_ring_AC_BE) in b43_dma_init()
1096 dma->tx_ring_AC_VI = b43_setup_dmaring(dev, 2, 1, type); in b43_dma_init()
1097 if (!dma->tx_ring_AC_VI) in b43_dma_init()
1100 dma->tx_ring_AC_VO = b43_setup_dmaring(dev, 3, 1, type); in b43_dma_init()
1101 if (!dma->tx_ring_AC_VO) in b43_dma_init()
1104 dma->tx_ring_mcast = b43_setup_dmaring(dev, 4, 1, type); in b43_dma_init()
1105 if (!dma->tx_ring_mcast) in b43_dma_init()
1108 /* setup RX DMA channel. */ in b43_dma_init()
1109 dma->rx_ring = b43_setup_dmaring(dev, 0, 0, type); in b43_dma_init()
1110 if (!dma->rx_ring) in b43_dma_init()
1113 /* No support for the TX status DMA ring. */ in b43_dma_init()
1114 B43_WARN_ON(dev->dev->core_rev < 5); in b43_dma_init()
1116 b43dbg(dev->wl, "%u-bit DMA initialized\n", in b43_dma_init()
1123 destroy_ring(dma, tx_ring_mcast); in b43_dma_init()
1125 destroy_ring(dma, tx_ring_AC_VO); in b43_dma_init()
1127 destroy_ring(dma, tx_ring_AC_VI); in b43_dma_init()
1129 destroy_ring(dma, tx_ring_AC_BE); in b43_dma_init()
1131 destroy_ring(dma, tx_ring_AC_BK); in b43_dma_init()
1135 /* Generate a cookie for the TX header. */
1140 /* Use the upper 4 bits of the cookie as in generate_cookie()
1141 * DMA controller ID and store the slot number in generate_cookie()
1148 cookie = (((u16)ring->index + 1) << 12); in generate_cookie()
1159 struct b43_dma *dma = &dev->dma; in parse_cookie() local
1164 ring = dma->tx_ring_AC_BK; in parse_cookie()
1167 ring = dma->tx_ring_AC_BE; in parse_cookie()
1170 ring = dma->tx_ring_AC_VI; in parse_cookie()
1173 ring = dma->tx_ring_AC_VO; in parse_cookie()
1176 ring = dma->tx_ring_mcast; in parse_cookie()
1180 if (unlikely(!ring || *slot < 0 || *slot >= ring->nr_slots)) { in parse_cookie()
1181 b43dbg(dev->wl, "TX-status contains " in parse_cookie()
1192 const struct b43_dma_ops *ops = ring->ops; in dma_tx_fragment()
1202 size_t hdrsize = b43_txhdr_size(ring->dev); in dma_tx_fragment()
1204 /* Important note: If the number of used DMA slots per TX frame in dma_tx_fragment()
1209 old_top_slot = ring->current_slot; in dma_tx_fragment()
1210 old_used_slots = ring->used_slots; in dma_tx_fragment()
1214 desc = ops->idx2desc(ring, slot, &meta_hdr); in dma_tx_fragment()
1217 header = &(ring->txhdr_cache[(slot / TX_SLOTS_PER_FRAME) * hdrsize]); in dma_tx_fragment()
1219 err = b43_generate_txhdr(ring->dev, header, in dma_tx_fragment()
1222 ring->current_slot = old_top_slot; in dma_tx_fragment()
1223 ring->used_slots = old_used_slots; in dma_tx_fragment()
1227 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, in dma_tx_fragment()
1229 if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) { in dma_tx_fragment()
1230 ring->current_slot = old_top_slot; in dma_tx_fragment()
1231 ring->used_slots = old_used_slots; in dma_tx_fragment()
1232 return -EIO; in dma_tx_fragment()
1234 ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr, in dma_tx_fragment()
1239 desc = ops->idx2desc(ring, slot, &meta); in dma_tx_fragment()
1242 meta->skb = skb; in dma_tx_fragment()
1243 meta->is_last_fragment = true; in dma_tx_fragment()
1244 priv_info->bouncebuffer = NULL; in dma_tx_fragment()
1246 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); in dma_tx_fragment()
1248 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { in dma_tx_fragment()
1249 priv_info->bouncebuffer = kmemdup(skb->data, skb->len, in dma_tx_fragment()
1251 if (!priv_info->bouncebuffer) { in dma_tx_fragment()
1252 ring->current_slot = old_top_slot; in dma_tx_fragment()
1253 ring->used_slots = old_used_slots; in dma_tx_fragment()
1254 err = -ENOMEM; in dma_tx_fragment()
1258 meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1); in dma_tx_fragment()
1259 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { in dma_tx_fragment()
1260 kfree(priv_info->bouncebuffer); in dma_tx_fragment()
1261 priv_info->bouncebuffer = NULL; in dma_tx_fragment()
1262 ring->current_slot = old_top_slot; in dma_tx_fragment()
1263 ring->used_slots = old_used_slots; in dma_tx_fragment()
1264 err = -EIO; in dma_tx_fragment()
1269 ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1); in dma_tx_fragment()
1271 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { in dma_tx_fragment()
1273 * mcast frame, so it can clear the more-data bit in it. */ in dma_tx_fragment()
1274 b43_shm_write16(ring->dev, B43_SHM_SHARED, in dma_tx_fragment()
1279 ops->poke_tx(ring, next_slot(ring, slot)); in dma_tx_fragment()
1283 unmap_descbuffer(ring, meta_hdr->dmaaddr, in dma_tx_fragment()
1291 if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) { in should_inject_overflow()
1296 next_overflow = ring->last_injected_overflow + HZ; in should_inject_overflow()
1298 ring->last_injected_overflow = jiffies; in should_inject_overflow()
1299 b43dbg(ring->dev->wl, in should_inject_overflow()
1300 "Injecting TX ring overflow on " in should_inject_overflow()
1301 "DMA controller %d\n", ring->index); in should_inject_overflow()
1309 /* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */
1315 if (dev->qos_enabled) { in select_ring_by_priority()
1322 ring = dev->dma.tx_ring_AC_VO; in select_ring_by_priority()
1325 ring = dev->dma.tx_ring_AC_VI; in select_ring_by_priority()
1328 ring = dev->dma.tx_ring_AC_BE; in select_ring_by_priority()
1331 ring = dev->dma.tx_ring_AC_BK; in select_ring_by_priority()
1335 ring = dev->dma.tx_ring_AC_BE; in select_ring_by_priority()
1347 hdr = (struct ieee80211_hdr *)skb->data; in b43_dma_tx()
1348 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { in b43_dma_tx()
1350 ring = dev->dma.tx_ring_mcast; in b43_dma_tx()
1351 /* Set the more-data bit. Ucode will clear it on in b43_dma_tx()
1353 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); in b43_dma_tx()
1360 B43_WARN_ON(!ring->tx); in b43_dma_tx()
1362 if (unlikely(ring->stopped)) { in b43_dma_tx()
1368 b43err(dev->wl, "Packet after queue stopped\n"); in b43_dma_tx()
1369 err = -ENOSPC; in b43_dma_tx()
1376 b43err(dev->wl, "DMA queue overflow\n"); in b43_dma_tx()
1377 err = -ENOSPC; in b43_dma_tx()
1382 * so TX status handling can use it. The queue to ring mapping is in b43_dma_tx()
1384 ring->queue_prio = skb_get_queue_mapping(skb); in b43_dma_tx()
1387 if (unlikely(err == -ENOKEY)) { in b43_dma_tx()
1390 ieee80211_free_txskb(dev->wl->hw, skb); in b43_dma_tx()
1395 b43err(dev->wl, "DMA tx mapping failure\n"); in b43_dma_tx()
1400 /* This TX ring is full. */ in b43_dma_tx()
1403 dev->wl->tx_queue_stopped[skb_mapping] = true; in b43_dma_tx()
1404 ring->stopped = true; in b43_dma_tx()
1406 b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index); in b43_dma_tx()
1427 ring = parse_cookie(dev, status->cookie, &slot); in b43_dma_handle_txstatus()
1430 B43_WARN_ON(!ring->tx); in b43_dma_handle_txstatus()
1432 /* Sanity check: TX packets are processed in-order on one ring. in b43_dma_handle_txstatus()
1435 firstused = ring->current_slot - ring->used_slots + 1; in b43_dma_handle_txstatus()
1437 firstused = ring->nr_slots + firstused; in b43_dma_handle_txstatus()
1442 * malfunction, memory leaks and/or stall of DMA functionality. in b43_dma_handle_txstatus()
1452 b43dbg(dev->wl, in b43_dma_handle_txstatus()
1453 "Skip on DMA ring %d slot %d.\n", in b43_dma_handle_txstatus()
1454 ring->index, slot); in b43_dma_handle_txstatus()
1459 * Report this error. If running with open-source in b43_dma_handle_txstatus()
1463 b43dbg(dev->wl, in b43_dma_handle_txstatus()
1464 "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n", in b43_dma_handle_txstatus()
1465 ring->index, firstused, slot); in b43_dma_handle_txstatus()
1466 if (dev->fw.opensource) in b43_dma_handle_txstatus()
1467 b43_controller_restart(dev, "Out of order TX"); in b43_dma_handle_txstatus()
1472 ops = ring->ops; in b43_dma_handle_txstatus()
1474 B43_WARN_ON(slot < 0 || slot >= ring->nr_slots); in b43_dma_handle_txstatus()
1475 /* get meta - ignore returned value */ in b43_dma_handle_txstatus()
1476 ops->idx2desc(ring, slot, &meta); in b43_dma_handle_txstatus()
1478 if (b43_dma_ptr_is_poisoned(meta->skb)) { in b43_dma_handle_txstatus()
1479 b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) " in b43_dma_handle_txstatus()
1481 slot, firstused, ring->index); in b43_dma_handle_txstatus()
1485 if (meta->skb) { in b43_dma_handle_txstatus()
1487 b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb)); in b43_dma_handle_txstatus()
1489 unmap_descbuffer(ring, meta->dmaaddr, in b43_dma_handle_txstatus()
1490 meta->skb->len, 1); in b43_dma_handle_txstatus()
1491 kfree(priv_info->bouncebuffer); in b43_dma_handle_txstatus()
1492 priv_info->bouncebuffer = NULL; in b43_dma_handle_txstatus()
1494 unmap_descbuffer(ring, meta->dmaaddr, in b43_dma_handle_txstatus()
1498 if (meta->is_last_fragment) { in b43_dma_handle_txstatus()
1501 if (unlikely(!meta->skb)) { in b43_dma_handle_txstatus()
1502 /* This is a scatter-gather fragment of a frame, in b43_dma_handle_txstatus()
1505 b43dbg(dev->wl, "TX status unexpected NULL skb " in b43_dma_handle_txstatus()
1507 slot, firstused, ring->index); in b43_dma_handle_txstatus()
1511 info = IEEE80211_SKB_CB(meta->skb); in b43_dma_handle_txstatus()
1516 * a missed TX status report, use a status structure in b43_dma_handle_txstatus()
1529 ring->nr_succeed_tx_packets++; in b43_dma_handle_txstatus()
1531 ring->nr_failed_tx_packets++; in b43_dma_handle_txstatus()
1532 ring->nr_total_packet_tries += status->frame_count; in b43_dma_handle_txstatus()
1534 ieee80211_tx_status(dev->wl->hw, meta->skb); in b43_dma_handle_txstatus()
1538 meta->skb = B43_DMA_PTR_POISON; in b43_dma_handle_txstatus()
1543 if (unlikely(meta->skb)) { in b43_dma_handle_txstatus()
1544 b43dbg(dev->wl, "TX status unexpected non-NULL skb " in b43_dma_handle_txstatus()
1546 slot, firstused, ring->index); in b43_dma_handle_txstatus()
1552 ring->used_slots--; in b43_dma_handle_txstatus()
1554 if (meta->is_last_fragment && !skip) { in b43_dma_handle_txstatus()
1555 /* This is the last scatter-gather in b43_dma_handle_txstatus()
1561 --skip; in b43_dma_handle_txstatus()
1563 if (ring->stopped) { in b43_dma_handle_txstatus()
1565 ring->stopped = false; in b43_dma_handle_txstatus()
1568 if (dev->wl->tx_queue_stopped[ring->queue_prio]) { in b43_dma_handle_txstatus()
1569 dev->wl->tx_queue_stopped[ring->queue_prio] = false; in b43_dma_handle_txstatus()
1573 b43_wake_queue(dev, ring->queue_prio); in b43_dma_handle_txstatus()
1575 b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index); in b43_dma_handle_txstatus()
1579 ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work); in b43_dma_handle_txstatus()
1584 const struct b43_dma_ops *ops = ring->ops; in dma_rx()
1593 desc = ops->idx2desc(ring, *slot, &meta); in dma_rx()
1595 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); in dma_rx()
1596 skb = meta->skb; in dma_rx()
1598 rxhdr = (struct b43_rxhdr_fw4 *)skb->data; in dma_rx()
1599 len = le16_to_cpu(rxhdr->frame_len); in dma_rx()
1606 len = le16_to_cpu(rxhdr->frame_len); in dma_rx()
1609 dmaaddr = meta->dmaaddr; in dma_rx()
1614 /* Something went wrong with the DMA. in dma_rx()
1616 b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n"); in dma_rx()
1617 dmaaddr = meta->dmaaddr; in dma_rx()
1620 if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) { in dma_rx()
1630 desc = ops->idx2desc(ring, *slot, &meta); in dma_rx()
1632 b43_poison_rx_buffer(ring, meta->skb); in dma_rx()
1633 sync_descbuffer_for_device(ring, meta->dmaaddr, in dma_rx()
1634 ring->rx_buffersize); in dma_rx()
1637 tmp -= ring->rx_buffersize; in dma_rx()
1641 b43err(ring->dev->wl, "DMA RX buffer too small " in dma_rx()
1642 "(len: %u, buffer: %u, nr-dropped: %d)\n", in dma_rx()
1643 len, ring->rx_buffersize, cnt); in dma_rx()
1647 dmaaddr = meta->dmaaddr; in dma_rx()
1650 b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n"); in dma_rx()
1654 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0); in dma_rx()
1655 skb_put(skb, len + ring->frameoffset); in dma_rx()
1656 skb_pull(skb, ring->frameoffset); in dma_rx()
1658 b43_rx(ring->dev, skb, rxhdr); in dma_rx()
1665 sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize); in dma_rx()
1672 B43_WARN_ON(ring->tx); in b43_dma_handle_rx_overflow()
1682 current_slot = ring->ops->get_current_rxslot(ring); in b43_dma_handle_rx_overflow()
1684 ring->ops->set_current_rxslot(ring, previous_slot); in b43_dma_handle_rx_overflow()
1689 const struct b43_dma_ops *ops = ring->ops; in b43_dma_rx()
1693 B43_WARN_ON(ring->tx); in b43_dma_rx()
1694 current_slot = ops->get_current_rxslot(ring); in b43_dma_rx()
1695 B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots)); in b43_dma_rx()
1697 slot = ring->current_slot; in b43_dma_rx()
1703 ops->set_current_rxslot(ring, slot); in b43_dma_rx()
1704 ring->current_slot = slot; in b43_dma_rx()
1709 B43_WARN_ON(!ring->tx); in b43_dma_tx_suspend_ring()
1710 ring->ops->tx_suspend(ring); in b43_dma_tx_suspend_ring()
1715 B43_WARN_ON(!ring->tx); in b43_dma_tx_resume_ring()
1716 ring->ops->tx_resume(ring); in b43_dma_tx_resume_ring()
1722 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BK); in b43_dma_tx_suspend()
1723 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BE); in b43_dma_tx_suspend()
1724 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VI); in b43_dma_tx_suspend()
1725 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VO); in b43_dma_tx_suspend()
1726 b43_dma_tx_suspend_ring(dev->dma.tx_ring_mcast); in b43_dma_tx_suspend()
1731 b43_dma_tx_resume_ring(dev->dma.tx_ring_mcast); in b43_dma_tx_resume()
1732 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VO); in b43_dma_tx_resume()
1733 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VI); in b43_dma_tx_resume()
1734 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BE); in b43_dma_tx_resume()
1735 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BK); in b43_dma_tx_resume()
1760 * This is called from PIO code, so DMA structures are not available. */