1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 
4   Broadcom B43legacy wireless driver
5 
6   DMA ringbuffer and descriptor allocation/management
7 
8   Copyright (c) 2005, 2006 Michael Buesch <m@bues.ch>
9 
10   Some code in this file is derived from the b44.c driver
11   Copyright (C) 2002 David S. Miller
12   Copyright (C) Pekka Pietikainen
13 
14 
15 */
16 
17 #include "b43legacy.h"
18 #include "dma.h"
19 #include "main.h"
20 #include "debugfs.h"
21 #include "xmit.h"
22 
23 #include <linux/dma-mapping.h>
24 #include <linux/pci.h>
25 #include <linux/delay.h>
26 #include <linux/skbuff.h>
27 #include <linux/slab.h>
28 #include <net/dst.h>
29 
30 /* 32bit DMA ops. */
31 static
32 struct b43legacy_dmadesc32 *op32_idx2desc(struct b43legacy_dmaring *ring,
33 					  int slot,
34 					  struct b43legacy_dmadesc_meta **meta)
35 {
36 	struct b43legacy_dmadesc32 *desc;
37 
38 	*meta = &(ring->meta[slot]);
39 	desc = ring->descbase;
40 	desc = &(desc[slot]);
41 
42 	return desc;
43 }
44 
45 static void op32_fill_descriptor(struct b43legacy_dmaring *ring,
46 				 struct b43legacy_dmadesc32 *desc,
47 				 dma_addr_t dmaaddr, u16 bufsize,
48 				 int start, int end, int irq)
49 {
50 	struct b43legacy_dmadesc32 *descbase = ring->descbase;
51 	int slot;
52 	u32 ctl;
53 	u32 addr;
54 	u32 addrext;
55 
56 	slot = (int)(desc - descbase);
57 	B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
58 
59 	addr = (u32)(dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
60 	addrext = (u32)(dmaaddr & SSB_DMA_TRANSLATION_MASK)
61 		   >> SSB_DMA_TRANSLATION_SHIFT;
62 	addr |= ring->dev->dma.translation;
63 	ctl = (bufsize - ring->frameoffset)
64 	      & B43legacy_DMA32_DCTL_BYTECNT;
65 	if (slot == ring->nr_slots - 1)
66 		ctl |= B43legacy_DMA32_DCTL_DTABLEEND;
67 	if (start)
68 		ctl |= B43legacy_DMA32_DCTL_FRAMESTART;
69 	if (end)
70 		ctl |= B43legacy_DMA32_DCTL_FRAMEEND;
71 	if (irq)
72 		ctl |= B43legacy_DMA32_DCTL_IRQ;
73 	ctl |= (addrext << B43legacy_DMA32_DCTL_ADDREXT_SHIFT)
74 	       & B43legacy_DMA32_DCTL_ADDREXT_MASK;
75 
76 	desc->control = cpu_to_le32(ctl);
77 	desc->address = cpu_to_le32(addr);
78 }
79 
80 static void op32_poke_tx(struct b43legacy_dmaring *ring, int slot)
81 {
82 	b43legacy_dma_write(ring, B43legacy_DMA32_TXINDEX,
83 			    (u32)(slot * sizeof(struct b43legacy_dmadesc32)));
84 }
85 
86 static void op32_tx_suspend(struct b43legacy_dmaring *ring)
87 {
88 	b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
89 			    b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL)
90 			    | B43legacy_DMA32_TXSUSPEND);
91 }
92 
93 static void op32_tx_resume(struct b43legacy_dmaring *ring)
94 {
95 	b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
96 			    b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL)
97 			    & ~B43legacy_DMA32_TXSUSPEND);
98 }
99 
100 static int op32_get_current_rxslot(struct b43legacy_dmaring *ring)
101 {
102 	u32 val;
103 
104 	val = b43legacy_dma_read(ring, B43legacy_DMA32_RXSTATUS);
105 	val &= B43legacy_DMA32_RXDPTR;
106 
107 	return (val / sizeof(struct b43legacy_dmadesc32));
108 }
109 
110 static void op32_set_current_rxslot(struct b43legacy_dmaring *ring,
111 				    int slot)
112 {
113 	b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX,
114 			    (u32)(slot * sizeof(struct b43legacy_dmadesc32)));
115 }
116 
117 static inline int free_slots(struct b43legacy_dmaring *ring)
118 {
119 	return (ring->nr_slots - ring->used_slots);
120 }
121 
122 static inline int next_slot(struct b43legacy_dmaring *ring, int slot)
123 {
124 	B43legacy_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
125 	if (slot == ring->nr_slots - 1)
126 		return 0;
127 	return slot + 1;
128 }
129 
130 static inline int prev_slot(struct b43legacy_dmaring *ring, int slot)
131 {
132 	B43legacy_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
133 	if (slot == 0)
134 		return ring->nr_slots - 1;
135 	return slot - 1;
136 }
137 
138 #ifdef CONFIG_B43LEGACY_DEBUG
139 static void update_max_used_slots(struct b43legacy_dmaring *ring,
140 				  int current_used_slots)
141 {
142 	if (current_used_slots <= ring->max_used_slots)
143 		return;
144 	ring->max_used_slots = current_used_slots;
145 	if (b43legacy_debug(ring->dev, B43legacy_DBG_DMAVERBOSE))
146 		b43legacydbg(ring->dev->wl,
147 		       "max_used_slots increased to %d on %s ring %d\n",
148 		       ring->max_used_slots,
149 		       ring->tx ? "TX" : "RX",
150 		       ring->index);
151 }
152 #else
153 static inline
154 void update_max_used_slots(struct b43legacy_dmaring *ring,
155 			   int current_used_slots)
156 { }
157 #endif /* DEBUG */
158 
159 /* Request a slot for usage. */
160 static inline
161 int request_slot(struct b43legacy_dmaring *ring)
162 {
163 	int slot;
164 
165 	B43legacy_WARN_ON(!ring->tx);
166 	B43legacy_WARN_ON(ring->stopped);
167 	B43legacy_WARN_ON(free_slots(ring) == 0);
168 
169 	slot = next_slot(ring, ring->current_slot);
170 	ring->current_slot = slot;
171 	ring->used_slots++;
172 
173 	update_max_used_slots(ring, ring->used_slots);
174 
175 	return slot;
176 }
177 
178 /* Mac80211-queue to b43legacy-ring mapping */
179 static struct b43legacy_dmaring *priority_to_txring(
180 						struct b43legacy_wldev *dev,
181 						int queue_priority)
182 {
183 	struct b43legacy_dmaring *ring;
184 
185 /*FIXME: For now we always run on TX-ring-1 */
186 return dev->dma.tx_ring1;
187 
188 	/* 0 = highest priority */
189 	switch (queue_priority) {
190 	default:
191 		B43legacy_WARN_ON(1);
192 		fallthrough;
193 	case 0:
194 		ring = dev->dma.tx_ring3;
195 		break;
196 	case 1:
197 		ring = dev->dma.tx_ring2;
198 		break;
199 	case 2:
200 		ring = dev->dma.tx_ring1;
201 		break;
202 	case 3:
203 		ring = dev->dma.tx_ring0;
204 		break;
205 	case 4:
206 		ring = dev->dma.tx_ring4;
207 		break;
208 	case 5:
209 		ring = dev->dma.tx_ring5;
210 		break;
211 	}
212 
213 	return ring;
214 }
215 
216 static u16 b43legacy_dmacontroller_base(enum b43legacy_dmatype type,
217 					int controller_idx)
218 {
219 	static const u16 map32[] = {
220 		B43legacy_MMIO_DMA32_BASE0,
221 		B43legacy_MMIO_DMA32_BASE1,
222 		B43legacy_MMIO_DMA32_BASE2,
223 		B43legacy_MMIO_DMA32_BASE3,
224 		B43legacy_MMIO_DMA32_BASE4,
225 		B43legacy_MMIO_DMA32_BASE5,
226 	};
227 
228 	B43legacy_WARN_ON(!(controller_idx >= 0 &&
229 			  controller_idx < ARRAY_SIZE(map32)));
230 	return map32[controller_idx];
231 }
232 
233 static inline
234 dma_addr_t map_descbuffer(struct b43legacy_dmaring *ring,
235 			  unsigned char *buf,
236 			  size_t len,
237 			  int tx)
238 {
239 	dma_addr_t dmaaddr;
240 
241 	if (tx)
242 		dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
243 					     buf, len,
244 					     DMA_TO_DEVICE);
245 	else
246 		dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
247 					     buf, len,
248 					     DMA_FROM_DEVICE);
249 
250 	return dmaaddr;
251 }
252 
253 static inline
254 void unmap_descbuffer(struct b43legacy_dmaring *ring,
255 		      dma_addr_t addr,
256 		      size_t len,
257 		      int tx)
258 {
259 	if (tx)
260 		dma_unmap_single(ring->dev->dev->dma_dev,
261 				     addr, len,
262 				     DMA_TO_DEVICE);
263 	else
264 		dma_unmap_single(ring->dev->dev->dma_dev,
265 				     addr, len,
266 				     DMA_FROM_DEVICE);
267 }
268 
269 static inline
270 void sync_descbuffer_for_cpu(struct b43legacy_dmaring *ring,
271 			     dma_addr_t addr,
272 			     size_t len)
273 {
274 	B43legacy_WARN_ON(ring->tx);
275 
276 	dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
277 				addr, len, DMA_FROM_DEVICE);
278 }
279 
280 static inline
281 void sync_descbuffer_for_device(struct b43legacy_dmaring *ring,
282 				dma_addr_t addr,
283 				size_t len)
284 {
285 	B43legacy_WARN_ON(ring->tx);
286 
287 	dma_sync_single_for_device(ring->dev->dev->dma_dev,
288 				   addr, len, DMA_FROM_DEVICE);
289 }
290 
291 static inline
292 void free_descriptor_buffer(struct b43legacy_dmaring *ring,
293 			    struct b43legacy_dmadesc_meta *meta,
294 			    int irq_context)
295 {
296 	if (meta->skb) {
297 		if (irq_context)
298 			dev_kfree_skb_irq(meta->skb);
299 		else
300 			dev_kfree_skb(meta->skb);
301 		meta->skb = NULL;
302 	}
303 }
304 
305 static int alloc_ringmemory(struct b43legacy_dmaring *ring)
306 {
307 	/* GFP flags must match the flags in free_ringmemory()! */
308 	ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
309 					    B43legacy_DMA_RINGMEMSIZE,
310 					    &(ring->dmabase), GFP_KERNEL);
311 	if (!ring->descbase)
312 		return -ENOMEM;
313 
314 	return 0;
315 }
316 
317 static void free_ringmemory(struct b43legacy_dmaring *ring)
318 {
319 	dma_free_coherent(ring->dev->dev->dma_dev, B43legacy_DMA_RINGMEMSIZE,
320 			  ring->descbase, ring->dmabase);
321 }
322 
323 /* Reset the RX DMA channel */
324 static int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev,
325 					    u16 mmio_base,
326 					    enum b43legacy_dmatype type)
327 {
328 	int i;
329 	u32 value;
330 	u16 offset;
331 
332 	might_sleep();
333 
334 	offset = B43legacy_DMA32_RXCTL;
335 	b43legacy_write32(dev, mmio_base + offset, 0);
336 	for (i = 0; i < 10; i++) {
337 		offset = B43legacy_DMA32_RXSTATUS;
338 		value = b43legacy_read32(dev, mmio_base + offset);
339 		value &= B43legacy_DMA32_RXSTATE;
340 		if (value == B43legacy_DMA32_RXSTAT_DISABLED) {
341 			i = -1;
342 			break;
343 		}
344 		msleep(1);
345 	}
346 	if (i != -1) {
347 		b43legacyerr(dev->wl, "DMA RX reset timed out\n");
348 		return -ENODEV;
349 	}
350 
351 	return 0;
352 }
353 
354 /* Reset the RX DMA channel */
355 static int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
356 					    u16 mmio_base,
357 					    enum b43legacy_dmatype type)
358 {
359 	int i;
360 	u32 value;
361 	u16 offset;
362 
363 	might_sleep();
364 
365 	for (i = 0; i < 10; i++) {
366 		offset = B43legacy_DMA32_TXSTATUS;
367 		value = b43legacy_read32(dev, mmio_base + offset);
368 		value &= B43legacy_DMA32_TXSTATE;
369 		if (value == B43legacy_DMA32_TXSTAT_DISABLED ||
370 		    value == B43legacy_DMA32_TXSTAT_IDLEWAIT ||
371 		    value == B43legacy_DMA32_TXSTAT_STOPPED)
372 			break;
373 		msleep(1);
374 	}
375 	offset = B43legacy_DMA32_TXCTL;
376 	b43legacy_write32(dev, mmio_base + offset, 0);
377 	for (i = 0; i < 10; i++) {
378 		offset = B43legacy_DMA32_TXSTATUS;
379 		value = b43legacy_read32(dev, mmio_base + offset);
380 		value &= B43legacy_DMA32_TXSTATE;
381 		if (value == B43legacy_DMA32_TXSTAT_DISABLED) {
382 			i = -1;
383 			break;
384 		}
385 		msleep(1);
386 	}
387 	if (i != -1) {
388 		b43legacyerr(dev->wl, "DMA TX reset timed out\n");
389 		return -ENODEV;
390 	}
391 	/* ensure the reset is completed. */
392 	msleep(1);
393 
394 	return 0;
395 }
396 
397 /* Check if a DMA mapping address is invalid. */
398 static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring,
399 					 dma_addr_t addr,
400 					 size_t buffersize,
401 					 bool dma_to_device)
402 {
403 	if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
404 		return true;
405 
406 	switch (ring->type) {
407 	case B43legacy_DMA_30BIT:
408 		if ((u64)addr + buffersize > (1ULL << 30))
409 			goto address_error;
410 		break;
411 	case B43legacy_DMA_32BIT:
412 		if ((u64)addr + buffersize > (1ULL << 32))
413 			goto address_error;
414 		break;
415 	}
416 
417 	/* The address is OK. */
418 	return false;
419 
420 address_error:
421 	/* We can't support this address. Unmap it again. */
422 	unmap_descbuffer(ring, addr, buffersize, dma_to_device);
423 
424 	return true;
425 }
426 
427 static int setup_rx_descbuffer(struct b43legacy_dmaring *ring,
428 			       struct b43legacy_dmadesc32 *desc,
429 			       struct b43legacy_dmadesc_meta *meta,
430 			       gfp_t gfp_flags)
431 {
432 	struct b43legacy_rxhdr_fw3 *rxhdr;
433 	struct b43legacy_hwtxstatus *txstat;
434 	dma_addr_t dmaaddr;
435 	struct sk_buff *skb;
436 
437 	B43legacy_WARN_ON(ring->tx);
438 
439 	skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
440 	if (unlikely(!skb))
441 		return -ENOMEM;
442 	dmaaddr = map_descbuffer(ring, skb->data,
443 				 ring->rx_buffersize, 0);
444 	if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
445 		/* ugh. try to realloc in zone_dma */
446 		gfp_flags |= GFP_DMA;
447 
448 		dev_kfree_skb_any(skb);
449 
450 		skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
451 		if (unlikely(!skb))
452 			return -ENOMEM;
453 		dmaaddr = map_descbuffer(ring, skb->data,
454 					 ring->rx_buffersize, 0);
455 	}
456 
457 	if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
458 		dev_kfree_skb_any(skb);
459 		return -EIO;
460 	}
461 
462 	meta->skb = skb;
463 	meta->dmaaddr = dmaaddr;
464 	op32_fill_descriptor(ring, desc, dmaaddr, ring->rx_buffersize, 0, 0, 0);
465 
466 	rxhdr = (struct b43legacy_rxhdr_fw3 *)(skb->data);
467 	rxhdr->frame_len = 0;
468 	txstat = (struct b43legacy_hwtxstatus *)(skb->data);
469 	txstat->cookie = 0;
470 
471 	return 0;
472 }
473 
474 /* Allocate the initial descbuffers.
475  * This is used for an RX ring only.
476  */
477 static int alloc_initial_descbuffers(struct b43legacy_dmaring *ring)
478 {
479 	int i;
480 	int err = -ENOMEM;
481 	struct b43legacy_dmadesc32 *desc;
482 	struct b43legacy_dmadesc_meta *meta;
483 
484 	for (i = 0; i < ring->nr_slots; i++) {
485 		desc = op32_idx2desc(ring, i, &meta);
486 
487 		err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
488 		if (err) {
489 			b43legacyerr(ring->dev->wl,
490 			       "Failed to allocate initial descbuffers\n");
491 			goto err_unwind;
492 		}
493 	}
494 	mb(); /* all descbuffer setup before next line */
495 	ring->used_slots = ring->nr_slots;
496 	err = 0;
497 out:
498 	return err;
499 
500 err_unwind:
501 	for (i--; i >= 0; i--) {
502 		desc = op32_idx2desc(ring, i, &meta);
503 
504 		unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
505 		dev_kfree_skb(meta->skb);
506 	}
507 	goto out;
508 }
509 
510 /* Do initial setup of the DMA controller.
511  * Reset the controller, write the ring busaddress
512  * and switch the "enable" bit on.
513  */
514 static int dmacontroller_setup(struct b43legacy_dmaring *ring)
515 {
516 	int err = 0;
517 	u32 value;
518 	u32 addrext;
519 	u32 trans = ring->dev->dma.translation;
520 	u32 ringbase = (u32)(ring->dmabase);
521 
522 	if (ring->tx) {
523 		addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
524 			  >> SSB_DMA_TRANSLATION_SHIFT;
525 		value = B43legacy_DMA32_TXENABLE;
526 		value |= (addrext << B43legacy_DMA32_TXADDREXT_SHIFT)
527 			& B43legacy_DMA32_TXADDREXT_MASK;
528 		b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, value);
529 		b43legacy_dma_write(ring, B43legacy_DMA32_TXRING,
530 				    (ringbase & ~SSB_DMA_TRANSLATION_MASK)
531 				    | trans);
532 	} else {
533 		err = alloc_initial_descbuffers(ring);
534 		if (err)
535 			goto out;
536 
537 		addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
538 			  >> SSB_DMA_TRANSLATION_SHIFT;
539 		value = (ring->frameoffset <<
540 			 B43legacy_DMA32_RXFROFF_SHIFT);
541 		value |= B43legacy_DMA32_RXENABLE;
542 		value |= (addrext << B43legacy_DMA32_RXADDREXT_SHIFT)
543 			 & B43legacy_DMA32_RXADDREXT_MASK;
544 		b43legacy_dma_write(ring, B43legacy_DMA32_RXCTL, value);
545 		b43legacy_dma_write(ring, B43legacy_DMA32_RXRING,
546 				    (ringbase & ~SSB_DMA_TRANSLATION_MASK)
547 				    | trans);
548 		b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX, 200);
549 	}
550 
551 out:
552 	return err;
553 }
554 
555 /* Shutdown the DMA controller. */
556 static void dmacontroller_cleanup(struct b43legacy_dmaring *ring)
557 {
558 	if (ring->tx) {
559 		b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
560 						 ring->type);
561 		b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 0);
562 	} else {
563 		b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
564 						 ring->type);
565 		b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, 0);
566 	}
567 }
568 
569 static void free_all_descbuffers(struct b43legacy_dmaring *ring)
570 {
571 	struct b43legacy_dmadesc_meta *meta;
572 	int i;
573 
574 	if (!ring->used_slots)
575 		return;
576 	for (i = 0; i < ring->nr_slots; i++) {
577 		op32_idx2desc(ring, i, &meta);
578 
579 		if (!meta->skb) {
580 			B43legacy_WARN_ON(!ring->tx);
581 			continue;
582 		}
583 		if (ring->tx)
584 			unmap_descbuffer(ring, meta->dmaaddr,
585 					 meta->skb->len, 1);
586 		else
587 			unmap_descbuffer(ring, meta->dmaaddr,
588 					 ring->rx_buffersize, 0);
589 		free_descriptor_buffer(ring, meta, 0);
590 	}
591 }
592 
593 static enum b43legacy_dmatype b43legacy_engine_type(struct b43legacy_wldev *dev)
594 {
595 	u32 tmp;
596 	u16 mmio_base;
597 
598 	mmio_base = b43legacy_dmacontroller_base(0, 0);
599 	b43legacy_write32(dev,
600 			mmio_base + B43legacy_DMA32_TXCTL,
601 			B43legacy_DMA32_TXADDREXT_MASK);
602 	tmp = b43legacy_read32(dev, mmio_base +
603 			       B43legacy_DMA32_TXCTL);
604 	if (tmp & B43legacy_DMA32_TXADDREXT_MASK)
605 		return B43legacy_DMA_32BIT;
606 	return B43legacy_DMA_30BIT;
607 }
608 
609 /* Main initialization function. */
610 static
611 struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
612 						  int controller_index,
613 						  int for_tx,
614 						  enum b43legacy_dmatype type)
615 {
616 	struct b43legacy_dmaring *ring;
617 	int err;
618 	int nr_slots;
619 	dma_addr_t dma_test;
620 
621 	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
622 	if (!ring)
623 		goto out;
624 	ring->type = type;
625 	ring->dev = dev;
626 
627 	nr_slots = B43legacy_RXRING_SLOTS;
628 	if (for_tx)
629 		nr_slots = B43legacy_TXRING_SLOTS;
630 
631 	ring->meta = kcalloc(nr_slots, sizeof(struct b43legacy_dmadesc_meta),
632 			     GFP_KERNEL);
633 	if (!ring->meta)
634 		goto err_kfree_ring;
635 	if (for_tx) {
636 		ring->txhdr_cache = kcalloc(nr_slots,
637 					sizeof(struct b43legacy_txhdr_fw3),
638 					GFP_KERNEL);
639 		if (!ring->txhdr_cache)
640 			goto err_kfree_meta;
641 
642 		/* test for ability to dma to txhdr_cache */
643 		dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache,
644 					      sizeof(struct b43legacy_txhdr_fw3),
645 					      DMA_TO_DEVICE);
646 
647 		if (b43legacy_dma_mapping_error(ring, dma_test,
648 					sizeof(struct b43legacy_txhdr_fw3), 1)) {
649 			/* ugh realloc */
650 			kfree(ring->txhdr_cache);
651 			ring->txhdr_cache = kcalloc(nr_slots,
652 					sizeof(struct b43legacy_txhdr_fw3),
653 					GFP_KERNEL | GFP_DMA);
654 			if (!ring->txhdr_cache)
655 				goto err_kfree_meta;
656 
657 			dma_test = dma_map_single(dev->dev->dma_dev,
658 					ring->txhdr_cache,
659 					sizeof(struct b43legacy_txhdr_fw3),
660 					DMA_TO_DEVICE);
661 
662 			if (b43legacy_dma_mapping_error(ring, dma_test,
663 					sizeof(struct b43legacy_txhdr_fw3), 1))
664 				goto err_kfree_txhdr_cache;
665 		}
666 
667 		dma_unmap_single(dev->dev->dma_dev, dma_test,
668 				 sizeof(struct b43legacy_txhdr_fw3),
669 				 DMA_TO_DEVICE);
670 	}
671 
672 	ring->nr_slots = nr_slots;
673 	ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index);
674 	ring->index = controller_index;
675 	if (for_tx) {
676 		ring->tx = true;
677 		ring->current_slot = -1;
678 	} else {
679 		if (ring->index == 0) {
680 			ring->rx_buffersize = B43legacy_DMA0_RX_BUFFERSIZE;
681 			ring->frameoffset = B43legacy_DMA0_RX_FRAMEOFFSET;
682 		} else if (ring->index == 3) {
683 			ring->rx_buffersize = B43legacy_DMA3_RX_BUFFERSIZE;
684 			ring->frameoffset = B43legacy_DMA3_RX_FRAMEOFFSET;
685 		} else
686 			B43legacy_WARN_ON(1);
687 	}
688 #ifdef CONFIG_B43LEGACY_DEBUG
689 	ring->last_injected_overflow = jiffies;
690 #endif
691 
692 	err = alloc_ringmemory(ring);
693 	if (err)
694 		goto err_kfree_txhdr_cache;
695 	err = dmacontroller_setup(ring);
696 	if (err)
697 		goto err_free_ringmemory;
698 
699 out:
700 	return ring;
701 
702 err_free_ringmemory:
703 	free_ringmemory(ring);
704 err_kfree_txhdr_cache:
705 	kfree(ring->txhdr_cache);
706 err_kfree_meta:
707 	kfree(ring->meta);
708 err_kfree_ring:
709 	kfree(ring);
710 	ring = NULL;
711 	goto out;
712 }
713 
714 /* Main cleanup function. */
715 static void b43legacy_destroy_dmaring(struct b43legacy_dmaring *ring)
716 {
717 	if (!ring)
718 		return;
719 
720 	b43legacydbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots:"
721 		     " %d/%d\n", (unsigned int)(ring->type), ring->mmio_base,
722 		     (ring->tx) ? "TX" : "RX", ring->max_used_slots,
723 		     ring->nr_slots);
724 	/* Device IRQs are disabled prior entering this function,
725 	 * so no need to take care of concurrency with rx handler stuff.
726 	 */
727 	dmacontroller_cleanup(ring);
728 	free_all_descbuffers(ring);
729 	free_ringmemory(ring);
730 
731 	kfree(ring->txhdr_cache);
732 	kfree(ring->meta);
733 	kfree(ring);
734 }
735 
736 void b43legacy_dma_free(struct b43legacy_wldev *dev)
737 {
738 	struct b43legacy_dma *dma;
739 
740 	if (b43legacy_using_pio(dev))
741 		return;
742 	dma = &dev->dma;
743 
744 	b43legacy_destroy_dmaring(dma->rx_ring3);
745 	dma->rx_ring3 = NULL;
746 	b43legacy_destroy_dmaring(dma->rx_ring0);
747 	dma->rx_ring0 = NULL;
748 
749 	b43legacy_destroy_dmaring(dma->tx_ring5);
750 	dma->tx_ring5 = NULL;
751 	b43legacy_destroy_dmaring(dma->tx_ring4);
752 	dma->tx_ring4 = NULL;
753 	b43legacy_destroy_dmaring(dma->tx_ring3);
754 	dma->tx_ring3 = NULL;
755 	b43legacy_destroy_dmaring(dma->tx_ring2);
756 	dma->tx_ring2 = NULL;
757 	b43legacy_destroy_dmaring(dma->tx_ring1);
758 	dma->tx_ring1 = NULL;
759 	b43legacy_destroy_dmaring(dma->tx_ring0);
760 	dma->tx_ring0 = NULL;
761 }
762 
763 int b43legacy_dma_init(struct b43legacy_wldev *dev)
764 {
765 	struct b43legacy_dma *dma = &dev->dma;
766 	struct b43legacy_dmaring *ring;
767 	enum b43legacy_dmatype type = b43legacy_engine_type(dev);
768 	int err;
769 
770 	err = dma_set_mask_and_coherent(dev->dev->dma_dev, DMA_BIT_MASK(type));
771 	if (err) {
772 #ifdef CONFIG_B43LEGACY_PIO
773 		b43legacywarn(dev->wl, "DMA for this device not supported. "
774 			"Falling back to PIO\n");
775 		dev->__using_pio = true;
776 		return -EAGAIN;
777 #else
778 		b43legacyerr(dev->wl, "DMA for this device not supported and "
779 		       "no PIO support compiled in\n");
780 		return -EOPNOTSUPP;
781 #endif
782 	}
783 	dma->translation = ssb_dma_translation(dev->dev);
784 
785 	err = -ENOMEM;
786 	/* setup TX DMA channels. */
787 	ring = b43legacy_setup_dmaring(dev, 0, 1, type);
788 	if (!ring)
789 		goto out;
790 	dma->tx_ring0 = ring;
791 
792 	ring = b43legacy_setup_dmaring(dev, 1, 1, type);
793 	if (!ring)
794 		goto err_destroy_tx0;
795 	dma->tx_ring1 = ring;
796 
797 	ring = b43legacy_setup_dmaring(dev, 2, 1, type);
798 	if (!ring)
799 		goto err_destroy_tx1;
800 	dma->tx_ring2 = ring;
801 
802 	ring = b43legacy_setup_dmaring(dev, 3, 1, type);
803 	if (!ring)
804 		goto err_destroy_tx2;
805 	dma->tx_ring3 = ring;
806 
807 	ring = b43legacy_setup_dmaring(dev, 4, 1, type);
808 	if (!ring)
809 		goto err_destroy_tx3;
810 	dma->tx_ring4 = ring;
811 
812 	ring = b43legacy_setup_dmaring(dev, 5, 1, type);
813 	if (!ring)
814 		goto err_destroy_tx4;
815 	dma->tx_ring5 = ring;
816 
817 	/* setup RX DMA channels. */
818 	ring = b43legacy_setup_dmaring(dev, 0, 0, type);
819 	if (!ring)
820 		goto err_destroy_tx5;
821 	dma->rx_ring0 = ring;
822 
823 	if (dev->dev->id.revision < 5) {
824 		ring = b43legacy_setup_dmaring(dev, 3, 0, type);
825 		if (!ring)
826 			goto err_destroy_rx0;
827 		dma->rx_ring3 = ring;
828 	}
829 
830 	b43legacydbg(dev->wl, "%u-bit DMA initialized\n", (unsigned int)type);
831 	err = 0;
832 out:
833 	return err;
834 
835 err_destroy_rx0:
836 	b43legacy_destroy_dmaring(dma->rx_ring0);
837 	dma->rx_ring0 = NULL;
838 err_destroy_tx5:
839 	b43legacy_destroy_dmaring(dma->tx_ring5);
840 	dma->tx_ring5 = NULL;
841 err_destroy_tx4:
842 	b43legacy_destroy_dmaring(dma->tx_ring4);
843 	dma->tx_ring4 = NULL;
844 err_destroy_tx3:
845 	b43legacy_destroy_dmaring(dma->tx_ring3);
846 	dma->tx_ring3 = NULL;
847 err_destroy_tx2:
848 	b43legacy_destroy_dmaring(dma->tx_ring2);
849 	dma->tx_ring2 = NULL;
850 err_destroy_tx1:
851 	b43legacy_destroy_dmaring(dma->tx_ring1);
852 	dma->tx_ring1 = NULL;
853 err_destroy_tx0:
854 	b43legacy_destroy_dmaring(dma->tx_ring0);
855 	dma->tx_ring0 = NULL;
856 	goto out;
857 }
858 
859 /* Generate a cookie for the TX header. */
860 static u16 generate_cookie(struct b43legacy_dmaring *ring,
861 			   int slot)
862 {
863 	u16 cookie = 0x1000;
864 
865 	/* Use the upper 4 bits of the cookie as
866 	 * DMA controller ID and store the slot number
867 	 * in the lower 12 bits.
868 	 * Note that the cookie must never be 0, as this
869 	 * is a special value used in RX path.
870 	 */
871 	switch (ring->index) {
872 	case 0:
873 		cookie = 0xA000;
874 		break;
875 	case 1:
876 		cookie = 0xB000;
877 		break;
878 	case 2:
879 		cookie = 0xC000;
880 		break;
881 	case 3:
882 		cookie = 0xD000;
883 		break;
884 	case 4:
885 		cookie = 0xE000;
886 		break;
887 	case 5:
888 		cookie = 0xF000;
889 		break;
890 	}
891 	B43legacy_WARN_ON(!(((u16)slot & 0xF000) == 0x0000));
892 	cookie |= (u16)slot;
893 
894 	return cookie;
895 }
896 
897 /* Inspect a cookie and find out to which controller/slot it belongs. */
898 static
899 struct b43legacy_dmaring *parse_cookie(struct b43legacy_wldev *dev,
900 				      u16 cookie, int *slot)
901 {
902 	struct b43legacy_dma *dma = &dev->dma;
903 	struct b43legacy_dmaring *ring = NULL;
904 
905 	switch (cookie & 0xF000) {
906 	case 0xA000:
907 		ring = dma->tx_ring0;
908 		break;
909 	case 0xB000:
910 		ring = dma->tx_ring1;
911 		break;
912 	case 0xC000:
913 		ring = dma->tx_ring2;
914 		break;
915 	case 0xD000:
916 		ring = dma->tx_ring3;
917 		break;
918 	case 0xE000:
919 		ring = dma->tx_ring4;
920 		break;
921 	case 0xF000:
922 		ring = dma->tx_ring5;
923 		break;
924 	default:
925 		B43legacy_WARN_ON(1);
926 	}
927 	*slot = (cookie & 0x0FFF);
928 	B43legacy_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots));
929 
930 	return ring;
931 }
932 
933 static int dma_tx_fragment(struct b43legacy_dmaring *ring,
934 			    struct sk_buff **in_skb)
935 {
936 	struct sk_buff *skb = *in_skb;
937 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
938 	u8 *header;
939 	int slot, old_top_slot, old_used_slots;
940 	int err;
941 	struct b43legacy_dmadesc32 *desc;
942 	struct b43legacy_dmadesc_meta *meta;
943 	struct b43legacy_dmadesc_meta *meta_hdr;
944 	struct sk_buff *bounce_skb;
945 
946 #define SLOTS_PER_PACKET  2
947 	B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0);
948 
949 	old_top_slot = ring->current_slot;
950 	old_used_slots = ring->used_slots;
951 
952 	/* Get a slot for the header. */
953 	slot = request_slot(ring);
954 	desc = op32_idx2desc(ring, slot, &meta_hdr);
955 	memset(meta_hdr, 0, sizeof(*meta_hdr));
956 
957 	header = &(ring->txhdr_cache[slot * sizeof(
958 			       struct b43legacy_txhdr_fw3)]);
959 	err = b43legacy_generate_txhdr(ring->dev, header,
960 				 skb->data, skb->len, info,
961 				 generate_cookie(ring, slot));
962 	if (unlikely(err)) {
963 		ring->current_slot = old_top_slot;
964 		ring->used_slots = old_used_slots;
965 		return err;
966 	}
967 
968 	meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
969 					   sizeof(struct b43legacy_txhdr_fw3), 1);
970 	if (b43legacy_dma_mapping_error(ring, meta_hdr->dmaaddr,
971 					sizeof(struct b43legacy_txhdr_fw3), 1)) {
972 		ring->current_slot = old_top_slot;
973 		ring->used_slots = old_used_slots;
974 		return -EIO;
975 	}
976 	op32_fill_descriptor(ring, desc, meta_hdr->dmaaddr,
977 			     sizeof(struct b43legacy_txhdr_fw3), 1, 0, 0);
978 
979 	/* Get a slot for the payload. */
980 	slot = request_slot(ring);
981 	desc = op32_idx2desc(ring, slot, &meta);
982 	memset(meta, 0, sizeof(*meta));
983 
984 	meta->skb = skb;
985 	meta->is_last_fragment = true;
986 
987 	meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
988 	/* create a bounce buffer in zone_dma on mapping failure. */
989 	if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
990 		bounce_skb = alloc_skb(skb->len, GFP_KERNEL | GFP_DMA);
991 		if (!bounce_skb) {
992 			ring->current_slot = old_top_slot;
993 			ring->used_slots = old_used_slots;
994 			err = -ENOMEM;
995 			goto out_unmap_hdr;
996 		}
997 
998 		skb_put_data(bounce_skb, skb->data, skb->len);
999 		memcpy(bounce_skb->cb, skb->cb, sizeof(skb->cb));
1000 		bounce_skb->dev = skb->dev;
1001 		skb_set_queue_mapping(bounce_skb, skb_get_queue_mapping(skb));
1002 		info = IEEE80211_SKB_CB(bounce_skb);
1003 
1004 		dev_kfree_skb_any(skb);
1005 		skb = bounce_skb;
1006 		*in_skb = bounce_skb;
1007 		meta->skb = skb;
1008 		meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1009 		if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1010 			ring->current_slot = old_top_slot;
1011 			ring->used_slots = old_used_slots;
1012 			err = -EIO;
1013 			goto out_free_bounce;
1014 		}
1015 	}
1016 
1017 	op32_fill_descriptor(ring, desc, meta->dmaaddr,
1018 			     skb->len, 0, 1, 1);
1019 
1020 	wmb();	/* previous stuff MUST be done */
1021 	/* Now transfer the whole frame. */
1022 	op32_poke_tx(ring, next_slot(ring, slot));
1023 	return 0;
1024 
1025 out_free_bounce:
1026 	dev_kfree_skb_any(skb);
1027 out_unmap_hdr:
1028 	unmap_descbuffer(ring, meta_hdr->dmaaddr,
1029 			 sizeof(struct b43legacy_txhdr_fw3), 1);
1030 	return err;
1031 }
1032 
1033 static inline
1034 int should_inject_overflow(struct b43legacy_dmaring *ring)
1035 {
1036 #ifdef CONFIG_B43LEGACY_DEBUG
1037 	if (unlikely(b43legacy_debug(ring->dev,
1038 				     B43legacy_DBG_DMAOVERFLOW))) {
1039 		/* Check if we should inject another ringbuffer overflow
1040 		 * to test handling of this situation in the stack. */
1041 		unsigned long next_overflow;
1042 
1043 		next_overflow = ring->last_injected_overflow + HZ;
1044 		if (time_after(jiffies, next_overflow)) {
1045 			ring->last_injected_overflow = jiffies;
1046 			b43legacydbg(ring->dev->wl,
1047 			       "Injecting TX ring overflow on "
1048 			       "DMA controller %d\n", ring->index);
1049 			return 1;
1050 		}
1051 	}
1052 #endif /* CONFIG_B43LEGACY_DEBUG */
1053 	return 0;
1054 }
1055 
1056 int b43legacy_dma_tx(struct b43legacy_wldev *dev,
1057 		     struct sk_buff *skb)
1058 {
1059 	struct b43legacy_dmaring *ring;
1060 	int err = 0;
1061 
1062 	ring = priority_to_txring(dev, skb_get_queue_mapping(skb));
1063 	B43legacy_WARN_ON(!ring->tx);
1064 
1065 	if (unlikely(ring->stopped)) {
1066 		/* We get here only because of a bug in mac80211.
1067 		 * Because of a race, one packet may be queued after
1068 		 * the queue is stopped, thus we got called when we shouldn't.
1069 		 * For now, just refuse the transmit. */
1070 		if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1071 			b43legacyerr(dev->wl, "Packet after queue stopped\n");
1072 		return -ENOSPC;
1073 	}
1074 
1075 	if (WARN_ON(free_slots(ring) < SLOTS_PER_PACKET)) {
1076 		/* If we get here, we have a real error with the queue
1077 		 * full, but queues not stopped. */
1078 		b43legacyerr(dev->wl, "DMA queue overflow\n");
1079 		return -ENOSPC;
1080 	}
1081 
1082 	/* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing
1083 	 * into the skb data or cb now. */
1084 	err = dma_tx_fragment(ring, &skb);
1085 	if (unlikely(err == -ENOKEY)) {
1086 		/* Drop this packet, as we don't have the encryption key
1087 		 * anymore and must not transmit it unencrypted. */
1088 		dev_kfree_skb_any(skb);
1089 		return 0;
1090 	}
1091 	if (unlikely(err)) {
1092 		b43legacyerr(dev->wl, "DMA tx mapping failure\n");
1093 		return err;
1094 	}
1095 	if ((free_slots(ring) < SLOTS_PER_PACKET) ||
1096 	    should_inject_overflow(ring)) {
1097 		/* This TX ring is full. */
1098 		unsigned int skb_mapping = skb_get_queue_mapping(skb);
1099 		ieee80211_stop_queue(dev->wl->hw, skb_mapping);
1100 		dev->wl->tx_queue_stopped[skb_mapping] = 1;
1101 		ring->stopped = true;
1102 		if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1103 			b43legacydbg(dev->wl, "Stopped TX ring %d\n",
1104 			       ring->index);
1105 	}
1106 	return err;
1107 }
1108 
1109 void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
1110 				 const struct b43legacy_txstatus *status)
1111 {
1112 	struct b43legacy_dmaring *ring;
1113 	struct b43legacy_dmadesc_meta *meta;
1114 	int retry_limit;
1115 	int slot;
1116 	int firstused;
1117 
1118 	ring = parse_cookie(dev, status->cookie, &slot);
1119 	if (unlikely(!ring))
1120 		return;
1121 	B43legacy_WARN_ON(!ring->tx);
1122 
1123 	/* Sanity check: TX packets are processed in-order on one ring.
1124 	 * Check if the slot deduced from the cookie really is the first
1125 	 * used slot. */
1126 	firstused = ring->current_slot - ring->used_slots + 1;
1127 	if (firstused < 0)
1128 		firstused = ring->nr_slots + firstused;
1129 	if (unlikely(slot != firstused)) {
1130 		/* This possibly is a firmware bug and will result in
1131 		 * malfunction, memory leaks and/or stall of DMA functionality.
1132 		 */
1133 		b43legacydbg(dev->wl, "Out of order TX status report on DMA "
1134 			     "ring %d. Expected %d, but got %d\n",
1135 			     ring->index, firstused, slot);
1136 		return;
1137 	}
1138 
1139 	while (1) {
1140 		B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
1141 		op32_idx2desc(ring, slot, &meta);
1142 
1143 		if (meta->skb)
1144 			unmap_descbuffer(ring, meta->dmaaddr,
1145 					 meta->skb->len, 1);
1146 		else
1147 			unmap_descbuffer(ring, meta->dmaaddr,
1148 					 sizeof(struct b43legacy_txhdr_fw3),
1149 					 1);
1150 
1151 		if (meta->is_last_fragment) {
1152 			struct ieee80211_tx_info *info;
1153 			BUG_ON(!meta->skb);
1154 			info = IEEE80211_SKB_CB(meta->skb);
1155 
1156 			/* preserve the confiured retry limit before clearing the status
1157 			 * The xmit function has overwritten the rc's value with the actual
1158 			 * retry limit done by the hardware */
1159 			retry_limit = info->status.rates[0].count;
1160 			ieee80211_tx_info_clear_status(info);
1161 
1162 			if (status->acked)
1163 				info->flags |= IEEE80211_TX_STAT_ACK;
1164 
1165 			if (status->rts_count > dev->wl->hw->conf.short_frame_max_tx_count) {
1166 				/*
1167 				 * If the short retries (RTS, not data frame) have exceeded
1168 				 * the limit, the hw will not have tried the selected rate,
1169 				 * but will have used the fallback rate instead.
1170 				 * Don't let the rate control count attempts for the selected
1171 				 * rate in this case, otherwise the statistics will be off.
1172 				 */
1173 				info->status.rates[0].count = 0;
1174 				info->status.rates[1].count = status->frame_count;
1175 			} else {
1176 				if (status->frame_count > retry_limit) {
1177 					info->status.rates[0].count = retry_limit;
1178 					info->status.rates[1].count = status->frame_count -
1179 							retry_limit;
1180 
1181 				} else {
1182 					info->status.rates[0].count = status->frame_count;
1183 					info->status.rates[1].idx = -1;
1184 				}
1185 			}
1186 
1187 			/* Call back to inform the ieee80211 subsystem about the
1188 			 * status of the transmission.
1189 			 * Some fields of txstat are already filled in dma_tx().
1190 			 */
1191 			ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb);
1192 			/* skb is freed by ieee80211_tx_status_irqsafe() */
1193 			meta->skb = NULL;
1194 		} else {
1195 			/* No need to call free_descriptor_buffer here, as
1196 			 * this is only the txhdr, which is not allocated.
1197 			 */
1198 			B43legacy_WARN_ON(meta->skb != NULL);
1199 		}
1200 
1201 		/* Everything unmapped and free'd. So it's not used anymore. */
1202 		ring->used_slots--;
1203 
1204 		if (meta->is_last_fragment)
1205 			break;
1206 		slot = next_slot(ring, slot);
1207 	}
1208 	dev->stats.last_tx = jiffies;
1209 	if (ring->stopped) {
1210 		B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);
1211 		ring->stopped = false;
1212 	}
1213 
1214 	if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
1215 		dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
1216 	} else {
1217 		/* If the driver queue is running wake the corresponding
1218 		 * mac80211 queue. */
1219 		ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
1220 		if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1221 			b43legacydbg(dev->wl, "Woke up TX ring %d\n",
1222 				     ring->index);
1223 	}
1224 	/* Add work to the queue. */
1225 	ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work);
1226 }
1227 
1228 static void dma_rx(struct b43legacy_dmaring *ring,
1229 		   int *slot)
1230 {
1231 	struct b43legacy_dmadesc32 *desc;
1232 	struct b43legacy_dmadesc_meta *meta;
1233 	struct b43legacy_rxhdr_fw3 *rxhdr;
1234 	struct sk_buff *skb;
1235 	u16 len;
1236 	int err;
1237 	dma_addr_t dmaaddr;
1238 
1239 	desc = op32_idx2desc(ring, *slot, &meta);
1240 
1241 	sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1242 	skb = meta->skb;
1243 
1244 	if (ring->index == 3) {
1245 		/* We received an xmit status. */
1246 		struct b43legacy_hwtxstatus *hw =
1247 				(struct b43legacy_hwtxstatus *)skb->data;
1248 		int i = 0;
1249 
1250 		while (hw->cookie == 0) {
1251 			if (i > 100)
1252 				break;
1253 			i++;
1254 			udelay(2);
1255 			barrier();
1256 		}
1257 		b43legacy_handle_hwtxstatus(ring->dev, hw);
1258 		/* recycle the descriptor buffer. */
1259 		sync_descbuffer_for_device(ring, meta->dmaaddr,
1260 					   ring->rx_buffersize);
1261 
1262 		return;
1263 	}
1264 	rxhdr = (struct b43legacy_rxhdr_fw3 *)skb->data;
1265 	len = le16_to_cpu(rxhdr->frame_len);
1266 	if (len == 0) {
1267 		int i = 0;
1268 
1269 		do {
1270 			udelay(2);
1271 			barrier();
1272 			len = le16_to_cpu(rxhdr->frame_len);
1273 		} while (len == 0 && i++ < 5);
1274 		if (unlikely(len == 0)) {
1275 			/* recycle the descriptor buffer. */
1276 			sync_descbuffer_for_device(ring, meta->dmaaddr,
1277 						   ring->rx_buffersize);
1278 			goto drop;
1279 		}
1280 	}
1281 	if (unlikely(len > ring->rx_buffersize)) {
1282 		/* The data did not fit into one descriptor buffer
1283 		 * and is split over multiple buffers.
1284 		 * This should never happen, as we try to allocate buffers
1285 		 * big enough. So simply ignore this packet.
1286 		 */
1287 		int cnt = 0;
1288 		s32 tmp = len;
1289 
1290 		while (1) {
1291 			desc = op32_idx2desc(ring, *slot, &meta);
1292 			/* recycle the descriptor buffer. */
1293 			sync_descbuffer_for_device(ring, meta->dmaaddr,
1294 						   ring->rx_buffersize);
1295 			*slot = next_slot(ring, *slot);
1296 			cnt++;
1297 			tmp -= ring->rx_buffersize;
1298 			if (tmp <= 0)
1299 				break;
1300 		}
1301 		b43legacyerr(ring->dev->wl, "DMA RX buffer too small "
1302 		       "(len: %u, buffer: %u, nr-dropped: %d)\n",
1303 		       len, ring->rx_buffersize, cnt);
1304 		goto drop;
1305 	}
1306 
1307 	dmaaddr = meta->dmaaddr;
1308 	err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1309 	if (unlikely(err)) {
1310 		b43legacydbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer()"
1311 			     " failed\n");
1312 		sync_descbuffer_for_device(ring, dmaaddr,
1313 					   ring->rx_buffersize);
1314 		goto drop;
1315 	}
1316 
1317 	unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1318 	skb_put(skb, len + ring->frameoffset);
1319 	skb_pull(skb, ring->frameoffset);
1320 
1321 	b43legacy_rx(ring->dev, skb, rxhdr);
1322 drop:
1323 	return;
1324 }
1325 
1326 void b43legacy_dma_rx(struct b43legacy_dmaring *ring)
1327 {
1328 	int slot;
1329 	int current_slot;
1330 	int used_slots = 0;
1331 
1332 	B43legacy_WARN_ON(ring->tx);
1333 	current_slot = op32_get_current_rxslot(ring);
1334 	B43legacy_WARN_ON(!(current_slot >= 0 && current_slot <
1335 			   ring->nr_slots));
1336 
1337 	slot = ring->current_slot;
1338 	for (; slot != current_slot; slot = next_slot(ring, slot)) {
1339 		dma_rx(ring, &slot);
1340 		update_max_used_slots(ring, ++used_slots);
1341 	}
1342 	op32_set_current_rxslot(ring, slot);
1343 	ring->current_slot = slot;
1344 }
1345 
1346 static void b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring *ring)
1347 {
1348 	B43legacy_WARN_ON(!ring->tx);
1349 	op32_tx_suspend(ring);
1350 }
1351 
1352 static void b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring *ring)
1353 {
1354 	B43legacy_WARN_ON(!ring->tx);
1355 	op32_tx_resume(ring);
1356 }
1357 
1358 void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev)
1359 {
1360 	b43legacy_power_saving_ctl_bits(dev, -1, 1);
1361 	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring0);
1362 	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring1);
1363 	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring2);
1364 	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring3);
1365 	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring4);
1366 	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring5);
1367 }
1368 
1369 void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev)
1370 {
1371 	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring5);
1372 	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring4);
1373 	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring3);
1374 	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring2);
1375 	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring1);
1376 	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring0);
1377 	b43legacy_power_saving_ctl_bits(dev, -1, -1);
1378 }
1379