105491d2cSKalle Valo /*
205491d2cSKalle Valo  * Copyright (c) 2010 Broadcom Corporation
305491d2cSKalle Valo  *
405491d2cSKalle Valo  * Permission to use, copy, modify, and/or distribute this software for any
505491d2cSKalle Valo  * purpose with or without fee is hereby granted, provided that the above
605491d2cSKalle Valo  * copyright notice and this permission notice appear in all copies.
705491d2cSKalle Valo  *
805491d2cSKalle Valo  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
905491d2cSKalle Valo  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
1005491d2cSKalle Valo  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
1105491d2cSKalle Valo  * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
1205491d2cSKalle Valo  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
1305491d2cSKalle Valo  * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
1405491d2cSKalle Valo  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
1505491d2cSKalle Valo  */
1605491d2cSKalle Valo 
1705491d2cSKalle Valo #include <linux/slab.h>
1805491d2cSKalle Valo #include <linux/delay.h>
1905491d2cSKalle Valo #include <linux/pci.h>
2005491d2cSKalle Valo #include <net/cfg80211.h>
2105491d2cSKalle Valo #include <net/mac80211.h>
2205491d2cSKalle Valo 
2305491d2cSKalle Valo #include <brcmu_utils.h>
2405491d2cSKalle Valo #include <aiutils.h>
2505491d2cSKalle Valo #include "types.h"
2605491d2cSKalle Valo #include "main.h"
2705491d2cSKalle Valo #include "dma.h"
2805491d2cSKalle Valo #include "soc.h"
2905491d2cSKalle Valo #include "scb.h"
3005491d2cSKalle Valo #include "ampdu.h"
3105491d2cSKalle Valo #include "debug.h"
3205491d2cSKalle Valo #include "brcms_trace_events.h"
3305491d2cSKalle Valo 
3405491d2cSKalle Valo /*
3505491d2cSKalle Valo  * dma register field offset calculation
3605491d2cSKalle Valo  */
3705491d2cSKalle Valo #define DMA64REGOFFS(field)		offsetof(struct dma64regs, field)
3805491d2cSKalle Valo #define DMA64TXREGOFFS(di, field)	(di->d64txregbase + DMA64REGOFFS(field))
3905491d2cSKalle Valo #define DMA64RXREGOFFS(di, field)	(di->d64rxregbase + DMA64REGOFFS(field))
4005491d2cSKalle Valo 
4105491d2cSKalle Valo /*
4205491d2cSKalle Valo  * DMA hardware requires each descriptor ring to be 8kB aligned, and fit within
4305491d2cSKalle Valo  * a contiguous 8kB physical address.
4405491d2cSKalle Valo  */
4505491d2cSKalle Valo #define D64RINGALIGN_BITS	13
4605491d2cSKalle Valo #define	D64MAXRINGSZ		(1 << D64RINGALIGN_BITS)
4705491d2cSKalle Valo #define	D64RINGALIGN		(1 << D64RINGALIGN_BITS)
4805491d2cSKalle Valo 
4905491d2cSKalle Valo #define	D64MAXDD	(D64MAXRINGSZ / sizeof(struct dma64desc))
5005491d2cSKalle Valo 
5105491d2cSKalle Valo /* transmit channel control */
5205491d2cSKalle Valo #define	D64_XC_XE		0x00000001	/* transmit enable */
5305491d2cSKalle Valo #define	D64_XC_SE		0x00000002	/* transmit suspend request */
5405491d2cSKalle Valo #define	D64_XC_LE		0x00000004	/* loopback enable */
5505491d2cSKalle Valo #define	D64_XC_FL		0x00000010	/* flush request */
5605491d2cSKalle Valo #define	D64_XC_PD		0x00000800	/* parity check disable */
5705491d2cSKalle Valo #define	D64_XC_AE		0x00030000	/* address extension bits */
5805491d2cSKalle Valo #define	D64_XC_AE_SHIFT		16
5905491d2cSKalle Valo 
6005491d2cSKalle Valo /* transmit descriptor table pointer */
6105491d2cSKalle Valo #define	D64_XP_LD_MASK		0x00000fff	/* last valid descriptor */
6205491d2cSKalle Valo 
6305491d2cSKalle Valo /* transmit channel status */
6405491d2cSKalle Valo #define	D64_XS0_CD_MASK		0x00001fff	/* current descriptor pointer */
6505491d2cSKalle Valo #define	D64_XS0_XS_MASK		0xf0000000	/* transmit state */
6605491d2cSKalle Valo #define	D64_XS0_XS_SHIFT		28
6705491d2cSKalle Valo #define	D64_XS0_XS_DISABLED	0x00000000	/* disabled */
6805491d2cSKalle Valo #define	D64_XS0_XS_ACTIVE	0x10000000	/* active */
6905491d2cSKalle Valo #define	D64_XS0_XS_IDLE		0x20000000	/* idle wait */
7005491d2cSKalle Valo #define	D64_XS0_XS_STOPPED	0x30000000	/* stopped */
7105491d2cSKalle Valo #define	D64_XS0_XS_SUSP		0x40000000	/* suspend pending */
7205491d2cSKalle Valo 
7305491d2cSKalle Valo #define	D64_XS1_AD_MASK		0x00001fff	/* active descriptor */
7405491d2cSKalle Valo #define	D64_XS1_XE_MASK		0xf0000000	/* transmit errors */
7505491d2cSKalle Valo #define	D64_XS1_XE_SHIFT		28
7605491d2cSKalle Valo #define	D64_XS1_XE_NOERR	0x00000000	/* no error */
7705491d2cSKalle Valo #define	D64_XS1_XE_DPE		0x10000000	/* descriptor protocol error */
7805491d2cSKalle Valo #define	D64_XS1_XE_DFU		0x20000000	/* data fifo underrun */
7905491d2cSKalle Valo #define	D64_XS1_XE_DTE		0x30000000	/* data transfer error */
8005491d2cSKalle Valo #define	D64_XS1_XE_DESRE	0x40000000	/* descriptor read error */
8105491d2cSKalle Valo #define	D64_XS1_XE_COREE	0x50000000	/* core error */
8205491d2cSKalle Valo 
8305491d2cSKalle Valo /* receive channel control */
8405491d2cSKalle Valo /* receive enable */
8505491d2cSKalle Valo #define	D64_RC_RE		0x00000001
8605491d2cSKalle Valo /* receive frame offset */
8705491d2cSKalle Valo #define	D64_RC_RO_MASK		0x000000fe
8805491d2cSKalle Valo #define	D64_RC_RO_SHIFT		1
8905491d2cSKalle Valo /* direct fifo receive (pio) mode */
9005491d2cSKalle Valo #define	D64_RC_FM		0x00000100
9105491d2cSKalle Valo /* separate rx header descriptor enable */
9205491d2cSKalle Valo #define	D64_RC_SH		0x00000200
9305491d2cSKalle Valo /* overflow continue */
9405491d2cSKalle Valo #define	D64_RC_OC		0x00000400
9505491d2cSKalle Valo /* parity check disable */
9605491d2cSKalle Valo #define	D64_RC_PD		0x00000800
9705491d2cSKalle Valo /* address extension bits */
9805491d2cSKalle Valo #define	D64_RC_AE		0x00030000
9905491d2cSKalle Valo #define	D64_RC_AE_SHIFT		16
10005491d2cSKalle Valo 
10105491d2cSKalle Valo /* flags for dma controller */
10205491d2cSKalle Valo /* partity enable */
10305491d2cSKalle Valo #define DMA_CTRL_PEN		(1 << 0)
10405491d2cSKalle Valo /* rx overflow continue */
10505491d2cSKalle Valo #define DMA_CTRL_ROC		(1 << 1)
10605491d2cSKalle Valo /* allow rx scatter to multiple descriptors */
10705491d2cSKalle Valo #define DMA_CTRL_RXMULTI	(1 << 2)
10805491d2cSKalle Valo /* Unframed Rx/Tx data */
10905491d2cSKalle Valo #define DMA_CTRL_UNFRAMED	(1 << 3)
11005491d2cSKalle Valo 
11105491d2cSKalle Valo /* receive descriptor table pointer */
11205491d2cSKalle Valo #define	D64_RP_LD_MASK		0x00000fff	/* last valid descriptor */
11305491d2cSKalle Valo 
11405491d2cSKalle Valo /* receive channel status */
11505491d2cSKalle Valo #define	D64_RS0_CD_MASK		0x00001fff	/* current descriptor pointer */
11605491d2cSKalle Valo #define	D64_RS0_RS_MASK		0xf0000000	/* receive state */
11705491d2cSKalle Valo #define	D64_RS0_RS_SHIFT		28
11805491d2cSKalle Valo #define	D64_RS0_RS_DISABLED	0x00000000	/* disabled */
11905491d2cSKalle Valo #define	D64_RS0_RS_ACTIVE	0x10000000	/* active */
12005491d2cSKalle Valo #define	D64_RS0_RS_IDLE		0x20000000	/* idle wait */
12105491d2cSKalle Valo #define	D64_RS0_RS_STOPPED	0x30000000	/* stopped */
12205491d2cSKalle Valo #define	D64_RS0_RS_SUSP		0x40000000	/* suspend pending */
12305491d2cSKalle Valo 
12405491d2cSKalle Valo #define	D64_RS1_AD_MASK		0x0001ffff	/* active descriptor */
12505491d2cSKalle Valo #define	D64_RS1_RE_MASK		0xf0000000	/* receive errors */
12605491d2cSKalle Valo #define	D64_RS1_RE_SHIFT		28
12705491d2cSKalle Valo #define	D64_RS1_RE_NOERR	0x00000000	/* no error */
12805491d2cSKalle Valo #define	D64_RS1_RE_DPO		0x10000000	/* descriptor protocol error */
12905491d2cSKalle Valo #define	D64_RS1_RE_DFU		0x20000000	/* data fifo overflow */
13005491d2cSKalle Valo #define	D64_RS1_RE_DTE		0x30000000	/* data transfer error */
13105491d2cSKalle Valo #define	D64_RS1_RE_DESRE	0x40000000	/* descriptor read error */
13205491d2cSKalle Valo #define	D64_RS1_RE_COREE	0x50000000	/* core error */
13305491d2cSKalle Valo 
13405491d2cSKalle Valo /* fifoaddr */
13505491d2cSKalle Valo #define	D64_FA_OFF_MASK		0xffff	/* offset */
13605491d2cSKalle Valo #define	D64_FA_SEL_MASK		0xf0000	/* select */
13705491d2cSKalle Valo #define	D64_FA_SEL_SHIFT	16
13805491d2cSKalle Valo #define	D64_FA_SEL_XDD		0x00000	/* transmit dma data */
13905491d2cSKalle Valo #define	D64_FA_SEL_XDP		0x10000	/* transmit dma pointers */
14005491d2cSKalle Valo #define	D64_FA_SEL_RDD		0x40000	/* receive dma data */
14105491d2cSKalle Valo #define	D64_FA_SEL_RDP		0x50000	/* receive dma pointers */
14205491d2cSKalle Valo #define	D64_FA_SEL_XFD		0x80000	/* transmit fifo data */
14305491d2cSKalle Valo #define	D64_FA_SEL_XFP		0x90000	/* transmit fifo pointers */
14405491d2cSKalle Valo #define	D64_FA_SEL_RFD		0xc0000	/* receive fifo data */
14505491d2cSKalle Valo #define	D64_FA_SEL_RFP		0xd0000	/* receive fifo pointers */
14605491d2cSKalle Valo #define	D64_FA_SEL_RSD		0xe0000	/* receive frame status data */
14705491d2cSKalle Valo #define	D64_FA_SEL_RSP		0xf0000	/* receive frame status pointers */
14805491d2cSKalle Valo 
14905491d2cSKalle Valo /* descriptor control flags 1 */
15005491d2cSKalle Valo #define D64_CTRL_COREFLAGS	0x0ff00000	/* core specific flags */
15105491d2cSKalle Valo #define	D64_CTRL1_EOT		((u32)1 << 28)	/* end of descriptor table */
15205491d2cSKalle Valo #define	D64_CTRL1_IOC		((u32)1 << 29)	/* interrupt on completion */
15305491d2cSKalle Valo #define	D64_CTRL1_EOF		((u32)1 << 30)	/* end of frame */
15405491d2cSKalle Valo #define	D64_CTRL1_SOF		((u32)1 << 31)	/* start of frame */
15505491d2cSKalle Valo 
15605491d2cSKalle Valo /* descriptor control flags 2 */
15705491d2cSKalle Valo /* buffer byte count. real data len must <= 16KB */
15805491d2cSKalle Valo #define	D64_CTRL2_BC_MASK	0x00007fff
15905491d2cSKalle Valo /* address extension bits */
16005491d2cSKalle Valo #define	D64_CTRL2_AE		0x00030000
16105491d2cSKalle Valo #define	D64_CTRL2_AE_SHIFT	16
16205491d2cSKalle Valo /* parity bit */
16305491d2cSKalle Valo #define D64_CTRL2_PARITY	0x00040000
16405491d2cSKalle Valo 
16505491d2cSKalle Valo /* control flags in the range [27:20] are core-specific and not defined here */
16605491d2cSKalle Valo #define	D64_CTRL_CORE_MASK	0x0ff00000
16705491d2cSKalle Valo 
16805491d2cSKalle Valo #define D64_RX_FRM_STS_LEN	0x0000ffff	/* frame length mask */
16905491d2cSKalle Valo #define D64_RX_FRM_STS_OVFL	0x00800000	/* RxOverFlow */
17005491d2cSKalle Valo #define D64_RX_FRM_STS_DSCRCNT	0x0f000000  /* no. of descriptors used - 1 */
17105491d2cSKalle Valo #define D64_RX_FRM_STS_DATATYPE	0xf0000000	/* core-dependent data type */
17205491d2cSKalle Valo 
17305491d2cSKalle Valo /*
17405491d2cSKalle Valo  * packet headroom necessary to accommodate the largest header
17505491d2cSKalle Valo  * in the system, (i.e TXOFF). By doing, we avoid the need to
17605491d2cSKalle Valo  * allocate an extra buffer for the header when bridging to WL.
17705491d2cSKalle Valo  * There is a compile time check in wlc.c which ensure that this
17805491d2cSKalle Valo  * value is at least as big as TXOFF. This value is used in
17905491d2cSKalle Valo  * dma_rxfill().
18005491d2cSKalle Valo  */
18105491d2cSKalle Valo 
18205491d2cSKalle Valo #define BCMEXTRAHDROOM 172
18305491d2cSKalle Valo 
18405491d2cSKalle Valo #define	MAXNAMEL	8	/* 8 char names */
18505491d2cSKalle Valo 
18605491d2cSKalle Valo /* macros to convert between byte offsets and indexes */
18705491d2cSKalle Valo #define	B2I(bytes, type)	((bytes) / sizeof(type))
18805491d2cSKalle Valo #define	I2B(index, type)	((index) * sizeof(type))
18905491d2cSKalle Valo 
19005491d2cSKalle Valo #define	PCI32ADDR_HIGH		0xc0000000	/* address[31:30] */
19105491d2cSKalle Valo #define	PCI32ADDR_HIGH_SHIFT	30	/* address[31:30] */
19205491d2cSKalle Valo 
19305491d2cSKalle Valo #define	PCI64ADDR_HIGH		0x80000000	/* address[63] */
19405491d2cSKalle Valo #define	PCI64ADDR_HIGH_SHIFT	31	/* address[63] */
19505491d2cSKalle Valo 
19605491d2cSKalle Valo /*
19705491d2cSKalle Valo  * DMA Descriptor
19805491d2cSKalle Valo  * Descriptors are only read by the hardware, never written back.
19905491d2cSKalle Valo  */
20005491d2cSKalle Valo struct dma64desc {
20105491d2cSKalle Valo 	__le32 ctrl1;	/* misc control bits & bufcount */
20205491d2cSKalle Valo 	__le32 ctrl2;	/* buffer count and address extension */
20305491d2cSKalle Valo 	__le32 addrlow;	/* memory address of the date buffer, bits 31:0 */
20405491d2cSKalle Valo 	__le32 addrhigh; /* memory address of the date buffer, bits 63:32 */
20505491d2cSKalle Valo };
20605491d2cSKalle Valo 
20705491d2cSKalle Valo /* dma engine software state */
20805491d2cSKalle Valo struct dma_info {
20905491d2cSKalle Valo 	struct dma_pub dma; /* exported structure */
21005491d2cSKalle Valo 	char name[MAXNAMEL];	/* callers name for diag msgs */
21105491d2cSKalle Valo 
21205491d2cSKalle Valo 	struct bcma_device *core;
21305491d2cSKalle Valo 	struct device *dmadev;
21405491d2cSKalle Valo 
21505491d2cSKalle Valo 	/* session information for AMPDU */
21605491d2cSKalle Valo 	struct brcms_ampdu_session ampdu_session;
21705491d2cSKalle Valo 
21805491d2cSKalle Valo 	bool dma64;	/* this dma engine is operating in 64-bit mode */
21905491d2cSKalle Valo 	bool addrext;	/* this dma engine supports DmaExtendedAddrChanges */
22005491d2cSKalle Valo 
22105491d2cSKalle Valo 	/* 64-bit dma tx engine registers */
22205491d2cSKalle Valo 	uint d64txregbase;
22305491d2cSKalle Valo 	/* 64-bit dma rx engine registers */
22405491d2cSKalle Valo 	uint d64rxregbase;
22505491d2cSKalle Valo 	/* pointer to dma64 tx descriptor ring */
22605491d2cSKalle Valo 	struct dma64desc *txd64;
22705491d2cSKalle Valo 	/* pointer to dma64 rx descriptor ring */
22805491d2cSKalle Valo 	struct dma64desc *rxd64;
22905491d2cSKalle Valo 
23005491d2cSKalle Valo 	u16 dmadesc_align;	/* alignment requirement for dma descriptors */
23105491d2cSKalle Valo 
23205491d2cSKalle Valo 	u16 ntxd;		/* # tx descriptors tunable */
23305491d2cSKalle Valo 	u16 txin;		/* index of next descriptor to reclaim */
23405491d2cSKalle Valo 	u16 txout;		/* index of next descriptor to post */
23505491d2cSKalle Valo 	/* pointer to parallel array of pointers to packets */
23605491d2cSKalle Valo 	struct sk_buff **txp;
23705491d2cSKalle Valo 	/* Aligned physical address of descriptor ring */
23805491d2cSKalle Valo 	dma_addr_t txdpa;
23905491d2cSKalle Valo 	/* Original physical address of descriptor ring */
24005491d2cSKalle Valo 	dma_addr_t txdpaorig;
24105491d2cSKalle Valo 	u16 txdalign;	/* #bytes added to alloc'd mem to align txd */
24205491d2cSKalle Valo 	u32 txdalloc;	/* #bytes allocated for the ring */
24305491d2cSKalle Valo 	u32 xmtptrbase;	/* When using unaligned descriptors, the ptr register
24405491d2cSKalle Valo 			 * is not just an index, it needs all 13 bits to be
24505491d2cSKalle Valo 			 * an offset from the addr register.
24605491d2cSKalle Valo 			 */
24705491d2cSKalle Valo 
24805491d2cSKalle Valo 	u16 nrxd;	/* # rx descriptors tunable */
24905491d2cSKalle Valo 	u16 rxin;	/* index of next descriptor to reclaim */
25005491d2cSKalle Valo 	u16 rxout;	/* index of next descriptor to post */
25105491d2cSKalle Valo 	/* pointer to parallel array of pointers to packets */
25205491d2cSKalle Valo 	struct sk_buff **rxp;
25305491d2cSKalle Valo 	/* Aligned physical address of descriptor ring */
25405491d2cSKalle Valo 	dma_addr_t rxdpa;
25505491d2cSKalle Valo 	/* Original physical address of descriptor ring */
25605491d2cSKalle Valo 	dma_addr_t rxdpaorig;
25705491d2cSKalle Valo 	u16 rxdalign;	/* #bytes added to alloc'd mem to align rxd */
25805491d2cSKalle Valo 	u32 rxdalloc;	/* #bytes allocated for the ring */
25905491d2cSKalle Valo 	u32 rcvptrbase;	/* Base for ptr reg when using unaligned descriptors */
26005491d2cSKalle Valo 
26105491d2cSKalle Valo 	/* tunables */
26205491d2cSKalle Valo 	unsigned int rxbufsize;	/* rx buffer size in bytes, not including
26305491d2cSKalle Valo 				 * the extra headroom
26405491d2cSKalle Valo 				 */
26505491d2cSKalle Valo 	uint rxextrahdrroom;	/* extra rx headroom, reverseved to assist upper
26605491d2cSKalle Valo 				 * stack, e.g. some rx pkt buffers will be
26705491d2cSKalle Valo 				 * bridged to tx side without byte copying.
26805491d2cSKalle Valo 				 * The extra headroom needs to be large enough
26905491d2cSKalle Valo 				 * to fit txheader needs. Some dongle driver may
27005491d2cSKalle Valo 				 * not need it.
27105491d2cSKalle Valo 				 */
27205491d2cSKalle Valo 	uint nrxpost;		/* # rx buffers to keep posted */
27305491d2cSKalle Valo 	unsigned int rxoffset;	/* rxcontrol offset */
27405491d2cSKalle Valo 	/* add to get dma address of descriptor ring, low 32 bits */
27505491d2cSKalle Valo 	uint ddoffsetlow;
27605491d2cSKalle Valo 	/*   high 32 bits */
27705491d2cSKalle Valo 	uint ddoffsethigh;
27805491d2cSKalle Valo 	/* add to get dma address of data buffer, low 32 bits */
27905491d2cSKalle Valo 	uint dataoffsetlow;
28005491d2cSKalle Valo 	/*   high 32 bits */
28105491d2cSKalle Valo 	uint dataoffsethigh;
28205491d2cSKalle Valo 	/* descriptor base need to be aligned or not */
28305491d2cSKalle Valo 	bool aligndesc_4k;
28405491d2cSKalle Valo };
28505491d2cSKalle Valo 
28605491d2cSKalle Valo /* Check for odd number of 1's */
parity32(__le32 data)28705491d2cSKalle Valo static u32 parity32(__le32 data)
28805491d2cSKalle Valo {
28905491d2cSKalle Valo 	/* no swap needed for counting 1's */
29005491d2cSKalle Valo 	u32 par_data = *(u32 *)&data;
29105491d2cSKalle Valo 
29205491d2cSKalle Valo 	par_data ^= par_data >> 16;
29305491d2cSKalle Valo 	par_data ^= par_data >> 8;
29405491d2cSKalle Valo 	par_data ^= par_data >> 4;
29505491d2cSKalle Valo 	par_data ^= par_data >> 2;
29605491d2cSKalle Valo 	par_data ^= par_data >> 1;
29705491d2cSKalle Valo 
29805491d2cSKalle Valo 	return par_data & 1;
29905491d2cSKalle Valo }
30005491d2cSKalle Valo 
dma64_dd_parity(struct dma64desc * dd)30105491d2cSKalle Valo static bool dma64_dd_parity(struct dma64desc *dd)
30205491d2cSKalle Valo {
30305491d2cSKalle Valo 	return parity32(dd->addrlow ^ dd->addrhigh ^ dd->ctrl1 ^ dd->ctrl2);
30405491d2cSKalle Valo }
30505491d2cSKalle Valo 
30605491d2cSKalle Valo /* descriptor bumping functions */
30705491d2cSKalle Valo 
xxd(uint x,uint n)30805491d2cSKalle Valo static uint xxd(uint x, uint n)
30905491d2cSKalle Valo {
31005491d2cSKalle Valo 	return x & (n - 1); /* faster than %, but n must be power of 2 */
31105491d2cSKalle Valo }
31205491d2cSKalle Valo 
txd(struct dma_info * di,uint x)31305491d2cSKalle Valo static uint txd(struct dma_info *di, uint x)
31405491d2cSKalle Valo {
31505491d2cSKalle Valo 	return xxd(x, di->ntxd);
31605491d2cSKalle Valo }
31705491d2cSKalle Valo 
rxd(struct dma_info * di,uint x)31805491d2cSKalle Valo static uint rxd(struct dma_info *di, uint x)
31905491d2cSKalle Valo {
32005491d2cSKalle Valo 	return xxd(x, di->nrxd);
32105491d2cSKalle Valo }
32205491d2cSKalle Valo 
nexttxd(struct dma_info * di,uint i)32305491d2cSKalle Valo static uint nexttxd(struct dma_info *di, uint i)
32405491d2cSKalle Valo {
32505491d2cSKalle Valo 	return txd(di, i + 1);
32605491d2cSKalle Valo }
32705491d2cSKalle Valo 
prevtxd(struct dma_info * di,uint i)32805491d2cSKalle Valo static uint prevtxd(struct dma_info *di, uint i)
32905491d2cSKalle Valo {
33005491d2cSKalle Valo 	return txd(di, i - 1);
33105491d2cSKalle Valo }
33205491d2cSKalle Valo 
nextrxd(struct dma_info * di,uint i)33305491d2cSKalle Valo static uint nextrxd(struct dma_info *di, uint i)
33405491d2cSKalle Valo {
33505491d2cSKalle Valo 	return rxd(di, i + 1);
33605491d2cSKalle Valo }
33705491d2cSKalle Valo 
ntxdactive(struct dma_info * di,uint h,uint t)33805491d2cSKalle Valo static uint ntxdactive(struct dma_info *di, uint h, uint t)
33905491d2cSKalle Valo {
34005491d2cSKalle Valo 	return txd(di, t-h);
34105491d2cSKalle Valo }
34205491d2cSKalle Valo 
nrxdactive(struct dma_info * di,uint h,uint t)34305491d2cSKalle Valo static uint nrxdactive(struct dma_info *di, uint h, uint t)
34405491d2cSKalle Valo {
34505491d2cSKalle Valo 	return rxd(di, t-h);
34605491d2cSKalle Valo }
34705491d2cSKalle Valo 
_dma_ctrlflags(struct dma_info * di,uint mask,uint flags)34805491d2cSKalle Valo static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
34905491d2cSKalle Valo {
35005491d2cSKalle Valo 	uint dmactrlflags;
35105491d2cSKalle Valo 
35205491d2cSKalle Valo 	if (di == NULL)
35305491d2cSKalle Valo 		return 0;
35405491d2cSKalle Valo 
35505491d2cSKalle Valo 	dmactrlflags = di->dma.dmactrlflags;
35605491d2cSKalle Valo 	dmactrlflags &= ~mask;
35705491d2cSKalle Valo 	dmactrlflags |= flags;
35805491d2cSKalle Valo 
35905491d2cSKalle Valo 	/* If trying to enable parity, check if parity is actually supported */
36005491d2cSKalle Valo 	if (dmactrlflags & DMA_CTRL_PEN) {
36105491d2cSKalle Valo 		u32 control;
36205491d2cSKalle Valo 
36305491d2cSKalle Valo 		control = bcma_read32(di->core, DMA64TXREGOFFS(di, control));
36405491d2cSKalle Valo 		bcma_write32(di->core, DMA64TXREGOFFS(di, control),
36505491d2cSKalle Valo 		      control | D64_XC_PD);
36605491d2cSKalle Valo 		if (bcma_read32(di->core, DMA64TXREGOFFS(di, control)) &
36705491d2cSKalle Valo 		    D64_XC_PD)
36805491d2cSKalle Valo 			/* We *can* disable it so it is supported,
36905491d2cSKalle Valo 			 * restore control register
37005491d2cSKalle Valo 			 */
37105491d2cSKalle Valo 			bcma_write32(di->core, DMA64TXREGOFFS(di, control),
37205491d2cSKalle Valo 				     control);
37305491d2cSKalle Valo 		else
37405491d2cSKalle Valo 			/* Not supported, don't allow it to be enabled */
37505491d2cSKalle Valo 			dmactrlflags &= ~DMA_CTRL_PEN;
37605491d2cSKalle Valo 	}
37705491d2cSKalle Valo 
37805491d2cSKalle Valo 	di->dma.dmactrlflags = dmactrlflags;
37905491d2cSKalle Valo 
38005491d2cSKalle Valo 	return dmactrlflags;
38105491d2cSKalle Valo }
38205491d2cSKalle Valo 
_dma64_addrext(struct dma_info * di,uint ctrl_offset)38305491d2cSKalle Valo static bool _dma64_addrext(struct dma_info *di, uint ctrl_offset)
38405491d2cSKalle Valo {
38505491d2cSKalle Valo 	u32 w;
38605491d2cSKalle Valo 	bcma_set32(di->core, ctrl_offset, D64_XC_AE);
38705491d2cSKalle Valo 	w = bcma_read32(di->core, ctrl_offset);
38805491d2cSKalle Valo 	bcma_mask32(di->core, ctrl_offset, ~D64_XC_AE);
38905491d2cSKalle Valo 	return (w & D64_XC_AE) == D64_XC_AE;
39005491d2cSKalle Valo }
39105491d2cSKalle Valo 
39205491d2cSKalle Valo /*
39305491d2cSKalle Valo  * return true if this dma engine supports DmaExtendedAddrChanges,
39405491d2cSKalle Valo  * otherwise false
39505491d2cSKalle Valo  */
_dma_isaddrext(struct dma_info * di)39605491d2cSKalle Valo static bool _dma_isaddrext(struct dma_info *di)
39705491d2cSKalle Valo {
39805491d2cSKalle Valo 	/* DMA64 supports full 32- or 64-bit operation. AE is always valid */
39905491d2cSKalle Valo 
40005491d2cSKalle Valo 	/* not all tx or rx channel are available */
40105491d2cSKalle Valo 	if (di->d64txregbase != 0) {
40205491d2cSKalle Valo 		if (!_dma64_addrext(di, DMA64TXREGOFFS(di, control)))
40305491d2cSKalle Valo 			brcms_dbg_dma(di->core,
40405491d2cSKalle Valo 				      "%s: DMA64 tx doesn't have AE set\n",
40505491d2cSKalle Valo 				      di->name);
40605491d2cSKalle Valo 		return true;
40705491d2cSKalle Valo 	} else if (di->d64rxregbase != 0) {
40805491d2cSKalle Valo 		if (!_dma64_addrext(di, DMA64RXREGOFFS(di, control)))
40905491d2cSKalle Valo 			brcms_dbg_dma(di->core,
41005491d2cSKalle Valo 				      "%s: DMA64 rx doesn't have AE set\n",
41105491d2cSKalle Valo 				      di->name);
41205491d2cSKalle Valo 		return true;
41305491d2cSKalle Valo 	}
41405491d2cSKalle Valo 
41505491d2cSKalle Valo 	return false;
41605491d2cSKalle Valo }
41705491d2cSKalle Valo 
_dma_descriptor_align(struct dma_info * di)41805491d2cSKalle Valo static bool _dma_descriptor_align(struct dma_info *di)
41905491d2cSKalle Valo {
42005491d2cSKalle Valo 	u32 addrl;
42105491d2cSKalle Valo 
42205491d2cSKalle Valo 	/* Check to see if the descriptors need to be aligned on 4K/8K or not */
42305491d2cSKalle Valo 	if (di->d64txregbase != 0) {
42405491d2cSKalle Valo 		bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow), 0xff0);
42505491d2cSKalle Valo 		addrl = bcma_read32(di->core, DMA64TXREGOFFS(di, addrlow));
42605491d2cSKalle Valo 		if (addrl != 0)
42705491d2cSKalle Valo 			return false;
42805491d2cSKalle Valo 	} else if (di->d64rxregbase != 0) {
42905491d2cSKalle Valo 		bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow), 0xff0);
43005491d2cSKalle Valo 		addrl = bcma_read32(di->core, DMA64RXREGOFFS(di, addrlow));
43105491d2cSKalle Valo 		if (addrl != 0)
43205491d2cSKalle Valo 			return false;
43305491d2cSKalle Valo 	}
43405491d2cSKalle Valo 	return true;
43505491d2cSKalle Valo }
43605491d2cSKalle Valo 
43705491d2cSKalle Valo /*
43805491d2cSKalle Valo  * Descriptor table must start at the DMA hardware dictated alignment, so
43905491d2cSKalle Valo  * allocated memory must be large enough to support this requirement.
44005491d2cSKalle Valo  */
dma_alloc_consistent(struct dma_info * di,uint size,u16 align_bits,uint * alloced,dma_addr_t * pap)44105491d2cSKalle Valo static void *dma_alloc_consistent(struct dma_info *di, uint size,
44205491d2cSKalle Valo 				  u16 align_bits, uint *alloced,
44305491d2cSKalle Valo 				  dma_addr_t *pap)
44405491d2cSKalle Valo {
44505491d2cSKalle Valo 	if (align_bits) {
44605491d2cSKalle Valo 		u16 align = (1 << align_bits);
44705491d2cSKalle Valo 		if (!IS_ALIGNED(PAGE_SIZE, align))
44805491d2cSKalle Valo 			size += align;
44905491d2cSKalle Valo 		*alloced = size;
45005491d2cSKalle Valo 	}
45105491d2cSKalle Valo 	return dma_alloc_coherent(di->dmadev, size, pap, GFP_ATOMIC);
45205491d2cSKalle Valo }
45305491d2cSKalle Valo 
45405491d2cSKalle Valo static
dma_align_sizetobits(uint size)45505491d2cSKalle Valo u8 dma_align_sizetobits(uint size)
45605491d2cSKalle Valo {
45705491d2cSKalle Valo 	u8 bitpos = 0;
45805491d2cSKalle Valo 	while (size >>= 1)
45905491d2cSKalle Valo 		bitpos++;
46005491d2cSKalle Valo 	return bitpos;
46105491d2cSKalle Valo }
46205491d2cSKalle Valo 
46305491d2cSKalle Valo /* This function ensures that the DMA descriptor ring will not get allocated
46405491d2cSKalle Valo  * across Page boundary. If the allocation is done across the page boundary
46505491d2cSKalle Valo  * at the first time, then it is freed and the allocation is done at
46605491d2cSKalle Valo  * descriptor ring size aligned location. This will ensure that the ring will
46705491d2cSKalle Valo  * not cross page boundary
46805491d2cSKalle Valo  */
dma_ringalloc(struct dma_info * di,u32 boundary,uint size,u16 * alignbits,uint * alloced,dma_addr_t * descpa)46905491d2cSKalle Valo static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size,
47005491d2cSKalle Valo 			   u16 *alignbits, uint *alloced,
47105491d2cSKalle Valo 			   dma_addr_t *descpa)
47205491d2cSKalle Valo {
47305491d2cSKalle Valo 	void *va;
47405491d2cSKalle Valo 	u32 desc_strtaddr;
47505491d2cSKalle Valo 	u32 alignbytes = 1 << *alignbits;
47605491d2cSKalle Valo 
47705491d2cSKalle Valo 	va = dma_alloc_consistent(di, size, *alignbits, alloced, descpa);
47805491d2cSKalle Valo 
47905491d2cSKalle Valo 	if (NULL == va)
48005491d2cSKalle Valo 		return NULL;
48105491d2cSKalle Valo 
48205491d2cSKalle Valo 	desc_strtaddr = (u32) roundup((unsigned long)va, alignbytes);
48305491d2cSKalle Valo 	if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr
48405491d2cSKalle Valo 							& boundary)) {
48505491d2cSKalle Valo 		*alignbits = dma_align_sizetobits(size);
48605491d2cSKalle Valo 		dma_free_coherent(di->dmadev, size, va, *descpa);
48705491d2cSKalle Valo 		va = dma_alloc_consistent(di, size, *alignbits,
48805491d2cSKalle Valo 			alloced, descpa);
48905491d2cSKalle Valo 	}
49005491d2cSKalle Valo 	return va;
49105491d2cSKalle Valo }
49205491d2cSKalle Valo 
dma64_alloc(struct dma_info * di,uint direction)49305491d2cSKalle Valo static bool dma64_alloc(struct dma_info *di, uint direction)
49405491d2cSKalle Valo {
49505491d2cSKalle Valo 	u16 size;
49605491d2cSKalle Valo 	uint ddlen;
49705491d2cSKalle Valo 	void *va;
49805491d2cSKalle Valo 	uint alloced = 0;
49905491d2cSKalle Valo 	u16 align;
50005491d2cSKalle Valo 	u16 align_bits;
50105491d2cSKalle Valo 
50205491d2cSKalle Valo 	ddlen = sizeof(struct dma64desc);
50305491d2cSKalle Valo 
50405491d2cSKalle Valo 	size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
50505491d2cSKalle Valo 	align_bits = di->dmadesc_align;
50605491d2cSKalle Valo 	align = (1 << align_bits);
50705491d2cSKalle Valo 
50805491d2cSKalle Valo 	if (direction == DMA_TX) {
50905491d2cSKalle Valo 		va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
51005491d2cSKalle Valo 			&alloced, &di->txdpaorig);
51105491d2cSKalle Valo 		if (va == NULL) {
51205491d2cSKalle Valo 			brcms_dbg_dma(di->core,
51305491d2cSKalle Valo 				      "%s: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
51405491d2cSKalle Valo 				      di->name);
51505491d2cSKalle Valo 			return false;
51605491d2cSKalle Valo 		}
51705491d2cSKalle Valo 		align = (1 << align_bits);
51805491d2cSKalle Valo 		di->txd64 = (struct dma64desc *)
51905491d2cSKalle Valo 					roundup((unsigned long)va, align);
52005491d2cSKalle Valo 		di->txdalign = (uint) ((s8 *)di->txd64 - (s8 *) va);
52105491d2cSKalle Valo 		di->txdpa = di->txdpaorig + di->txdalign;
52205491d2cSKalle Valo 		di->txdalloc = alloced;
52305491d2cSKalle Valo 	} else {
52405491d2cSKalle Valo 		va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
52505491d2cSKalle Valo 			&alloced, &di->rxdpaorig);
52605491d2cSKalle Valo 		if (va == NULL) {
52705491d2cSKalle Valo 			brcms_dbg_dma(di->core,
52805491d2cSKalle Valo 				      "%s: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
52905491d2cSKalle Valo 				      di->name);
53005491d2cSKalle Valo 			return false;
53105491d2cSKalle Valo 		}
53205491d2cSKalle Valo 		align = (1 << align_bits);
53305491d2cSKalle Valo 		di->rxd64 = (struct dma64desc *)
53405491d2cSKalle Valo 					roundup((unsigned long)va, align);
53505491d2cSKalle Valo 		di->rxdalign = (uint) ((s8 *)di->rxd64 - (s8 *) va);
53605491d2cSKalle Valo 		di->rxdpa = di->rxdpaorig + di->rxdalign;
53705491d2cSKalle Valo 		di->rxdalloc = alloced;
53805491d2cSKalle Valo 	}
53905491d2cSKalle Valo 
54005491d2cSKalle Valo 	return true;
54105491d2cSKalle Valo }
54205491d2cSKalle Valo 
_dma_alloc(struct dma_info * di,uint direction)54305491d2cSKalle Valo static bool _dma_alloc(struct dma_info *di, uint direction)
54405491d2cSKalle Valo {
54505491d2cSKalle Valo 	return dma64_alloc(di, direction);
54605491d2cSKalle Valo }
54705491d2cSKalle Valo 
dma_attach(char * name,struct brcms_c_info * wlc,uint txregbase,uint rxregbase,uint ntxd,uint nrxd,uint rxbufsize,int rxextheadroom,uint nrxpost,uint rxoffset)54805491d2cSKalle Valo struct dma_pub *dma_attach(char *name, struct brcms_c_info *wlc,
54905491d2cSKalle Valo 			   uint txregbase, uint rxregbase, uint ntxd, uint nrxd,
55005491d2cSKalle Valo 			   uint rxbufsize, int rxextheadroom,
55105491d2cSKalle Valo 			   uint nrxpost, uint rxoffset)
55205491d2cSKalle Valo {
55305491d2cSKalle Valo 	struct si_pub *sih = wlc->hw->sih;
55405491d2cSKalle Valo 	struct bcma_device *core = wlc->hw->d11core;
55505491d2cSKalle Valo 	struct dma_info *di;
55605491d2cSKalle Valo 	u8 rev = core->id.rev;
55705491d2cSKalle Valo 	uint size;
55805491d2cSKalle Valo 	struct si_info *sii = container_of(sih, struct si_info, pub);
55905491d2cSKalle Valo 
56005491d2cSKalle Valo 	/* allocate private info structure */
56105491d2cSKalle Valo 	di = kzalloc(sizeof(struct dma_info), GFP_ATOMIC);
56205491d2cSKalle Valo 	if (di == NULL)
56305491d2cSKalle Valo 		return NULL;
56405491d2cSKalle Valo 
56505491d2cSKalle Valo 	di->dma64 =
56605491d2cSKalle Valo 		((bcma_aread32(core, BCMA_IOST) & SISF_DMA64) == SISF_DMA64);
56705491d2cSKalle Valo 
56805491d2cSKalle Valo 	/* init dma reg info */
56905491d2cSKalle Valo 	di->core = core;
57005491d2cSKalle Valo 	di->d64txregbase = txregbase;
57105491d2cSKalle Valo 	di->d64rxregbase = rxregbase;
57205491d2cSKalle Valo 
57305491d2cSKalle Valo 	/*
57405491d2cSKalle Valo 	 * Default flags (which can be changed by the driver calling
57505491d2cSKalle Valo 	 * dma_ctrlflags before enable): For backwards compatibility
57605491d2cSKalle Valo 	 * both Rx Overflow Continue and Parity are DISABLED.
57705491d2cSKalle Valo 	 */
57805491d2cSKalle Valo 	_dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
57905491d2cSKalle Valo 
58005491d2cSKalle Valo 	brcms_dbg_dma(di->core, "%s: %s flags 0x%x ntxd %d nrxd %d "
58105491d2cSKalle Valo 		      "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
58205491d2cSKalle Valo 		      "txregbase %u rxregbase %u\n", name, "DMA64",
58305491d2cSKalle Valo 		      di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
58405491d2cSKalle Valo 		      rxextheadroom, nrxpost, rxoffset, txregbase, rxregbase);
58505491d2cSKalle Valo 
58605491d2cSKalle Valo 	/* make a private copy of our callers name */
58705491d2cSKalle Valo 	strncpy(di->name, name, MAXNAMEL);
58805491d2cSKalle Valo 	di->name[MAXNAMEL - 1] = '\0';
58905491d2cSKalle Valo 
59005491d2cSKalle Valo 	di->dmadev = core->dma_dev;
59105491d2cSKalle Valo 
59205491d2cSKalle Valo 	/* save tunables */
59305491d2cSKalle Valo 	di->ntxd = (u16) ntxd;
59405491d2cSKalle Valo 	di->nrxd = (u16) nrxd;
59505491d2cSKalle Valo 
59605491d2cSKalle Valo 	/* the actual dma size doesn't include the extra headroom */
59705491d2cSKalle Valo 	di->rxextrahdrroom =
59805491d2cSKalle Valo 	    (rxextheadroom == -1) ? BCMEXTRAHDROOM : rxextheadroom;
59905491d2cSKalle Valo 	if (rxbufsize > BCMEXTRAHDROOM)
60005491d2cSKalle Valo 		di->rxbufsize = (u16) (rxbufsize - di->rxextrahdrroom);
60105491d2cSKalle Valo 	else
60205491d2cSKalle Valo 		di->rxbufsize = (u16) rxbufsize;
60305491d2cSKalle Valo 
60405491d2cSKalle Valo 	di->nrxpost = (u16) nrxpost;
60505491d2cSKalle Valo 	di->rxoffset = (u8) rxoffset;
60605491d2cSKalle Valo 
60705491d2cSKalle Valo 	/*
60805491d2cSKalle Valo 	 * figure out the DMA physical address offset for dd and data
60905491d2cSKalle Valo 	 *     PCI/PCIE: they map silicon backplace address to zero
61005491d2cSKalle Valo 	 *     based memory, need offset
61105491d2cSKalle Valo 	 *     Other bus: use zero SI_BUS BIGENDIAN kludge: use sdram
61205491d2cSKalle Valo 	 *     swapped region for data buffer, not descriptor
61305491d2cSKalle Valo 	 */
61405491d2cSKalle Valo 	di->ddoffsetlow = 0;
61505491d2cSKalle Valo 	di->dataoffsetlow = 0;
61605491d2cSKalle Valo 	/* for pci bus, add offset */
61705491d2cSKalle Valo 	if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI) {
61805491d2cSKalle Valo 		/* add offset for pcie with DMA64 bus */
61905491d2cSKalle Valo 		di->ddoffsetlow = 0;
62005491d2cSKalle Valo 		di->ddoffsethigh = SI_PCIE_DMA_H32;
62105491d2cSKalle Valo 	}
62205491d2cSKalle Valo 	di->dataoffsetlow = di->ddoffsetlow;
62305491d2cSKalle Valo 	di->dataoffsethigh = di->ddoffsethigh;
62405491d2cSKalle Valo 
62505491d2cSKalle Valo 	/* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
62605491d2cSKalle Valo 	if ((core->id.id == BCMA_CORE_SDIO_DEV)
62705491d2cSKalle Valo 	    && ((rev > 0) && (rev <= 2)))
62805491d2cSKalle Valo 		di->addrext = false;
62905491d2cSKalle Valo 	else if ((core->id.id == BCMA_CORE_I2S) &&
63005491d2cSKalle Valo 		 ((rev == 0) || (rev == 1)))
63105491d2cSKalle Valo 		di->addrext = false;
63205491d2cSKalle Valo 	else
63305491d2cSKalle Valo 		di->addrext = _dma_isaddrext(di);
63405491d2cSKalle Valo 
63505491d2cSKalle Valo 	/* does the descriptor need to be aligned and if yes, on 4K/8K or not */
63605491d2cSKalle Valo 	di->aligndesc_4k = _dma_descriptor_align(di);
63705491d2cSKalle Valo 	if (di->aligndesc_4k) {
63805491d2cSKalle Valo 		di->dmadesc_align = D64RINGALIGN_BITS;
63905491d2cSKalle Valo 		if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2))
64005491d2cSKalle Valo 			/* for smaller dd table, HW relax alignment reqmnt */
64105491d2cSKalle Valo 			di->dmadesc_align = D64RINGALIGN_BITS - 1;
64205491d2cSKalle Valo 	} else {
64305491d2cSKalle Valo 		di->dmadesc_align = 4;	/* 16 byte alignment */
64405491d2cSKalle Valo 	}
64505491d2cSKalle Valo 
64605491d2cSKalle Valo 	brcms_dbg_dma(di->core, "DMA descriptor align_needed %d, align %d\n",
64705491d2cSKalle Valo 		      di->aligndesc_4k, di->dmadesc_align);
64805491d2cSKalle Valo 
64905491d2cSKalle Valo 	/* allocate tx packet pointer vector */
65005491d2cSKalle Valo 	if (ntxd) {
65105491d2cSKalle Valo 		size = ntxd * sizeof(void *);
65205491d2cSKalle Valo 		di->txp = kzalloc(size, GFP_ATOMIC);
65305491d2cSKalle Valo 		if (di->txp == NULL)
65405491d2cSKalle Valo 			goto fail;
65505491d2cSKalle Valo 	}
65605491d2cSKalle Valo 
65705491d2cSKalle Valo 	/* allocate rx packet pointer vector */
65805491d2cSKalle Valo 	if (nrxd) {
65905491d2cSKalle Valo 		size = nrxd * sizeof(void *);
66005491d2cSKalle Valo 		di->rxp = kzalloc(size, GFP_ATOMIC);
66105491d2cSKalle Valo 		if (di->rxp == NULL)
66205491d2cSKalle Valo 			goto fail;
66305491d2cSKalle Valo 	}
66405491d2cSKalle Valo 
66505491d2cSKalle Valo 	/*
66605491d2cSKalle Valo 	 * allocate transmit descriptor ring, only need ntxd descriptors
66705491d2cSKalle Valo 	 * but it must be aligned
66805491d2cSKalle Valo 	 */
66905491d2cSKalle Valo 	if (ntxd) {
67005491d2cSKalle Valo 		if (!_dma_alloc(di, DMA_TX))
67105491d2cSKalle Valo 			goto fail;
67205491d2cSKalle Valo 	}
67305491d2cSKalle Valo 
67405491d2cSKalle Valo 	/*
67505491d2cSKalle Valo 	 * allocate receive descriptor ring, only need nrxd descriptors
67605491d2cSKalle Valo 	 * but it must be aligned
67705491d2cSKalle Valo 	 */
67805491d2cSKalle Valo 	if (nrxd) {
67905491d2cSKalle Valo 		if (!_dma_alloc(di, DMA_RX))
68005491d2cSKalle Valo 			goto fail;
68105491d2cSKalle Valo 	}
68205491d2cSKalle Valo 
68305491d2cSKalle Valo 	if ((di->ddoffsetlow != 0) && !di->addrext) {
68405491d2cSKalle Valo 		if (di->txdpa > SI_PCI_DMA_SZ) {
68505491d2cSKalle Valo 			brcms_dbg_dma(di->core,
68605491d2cSKalle Valo 				      "%s: txdpa 0x%x: addrext not supported\n",
68705491d2cSKalle Valo 				      di->name, (u32)di->txdpa);
68805491d2cSKalle Valo 			goto fail;
68905491d2cSKalle Valo 		}
69005491d2cSKalle Valo 		if (di->rxdpa > SI_PCI_DMA_SZ) {
69105491d2cSKalle Valo 			brcms_dbg_dma(di->core,
69205491d2cSKalle Valo 				      "%s: rxdpa 0x%x: addrext not supported\n",
69305491d2cSKalle Valo 				      di->name, (u32)di->rxdpa);
69405491d2cSKalle Valo 			goto fail;
69505491d2cSKalle Valo 		}
69605491d2cSKalle Valo 	}
69705491d2cSKalle Valo 
69805491d2cSKalle Valo 	/* Initialize AMPDU session */
69905491d2cSKalle Valo 	brcms_c_ampdu_reset_session(&di->ampdu_session, wlc);
70005491d2cSKalle Valo 
70105491d2cSKalle Valo 	brcms_dbg_dma(di->core,
70205491d2cSKalle Valo 		      "ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh 0x%x addrext %d\n",
70305491d2cSKalle Valo 		      di->ddoffsetlow, di->ddoffsethigh,
70405491d2cSKalle Valo 		      di->dataoffsetlow, di->dataoffsethigh,
70505491d2cSKalle Valo 		      di->addrext);
70605491d2cSKalle Valo 
70705491d2cSKalle Valo 	return (struct dma_pub *) di;
70805491d2cSKalle Valo 
70905491d2cSKalle Valo  fail:
71005491d2cSKalle Valo 	dma_detach((struct dma_pub *)di);
71105491d2cSKalle Valo 	return NULL;
71205491d2cSKalle Valo }
71305491d2cSKalle Valo 
71405491d2cSKalle Valo static inline void
dma64_dd_upd(struct dma_info * di,struct dma64desc * ddring,dma_addr_t pa,uint outidx,u32 * flags,u32 bufcount)71505491d2cSKalle Valo dma64_dd_upd(struct dma_info *di, struct dma64desc *ddring,
71605491d2cSKalle Valo 	     dma_addr_t pa, uint outidx, u32 *flags, u32 bufcount)
71705491d2cSKalle Valo {
71805491d2cSKalle Valo 	u32 ctrl2 = bufcount & D64_CTRL2_BC_MASK;
71905491d2cSKalle Valo 
72005491d2cSKalle Valo 	/* PCI bus with big(>1G) physical address, use address extension */
72105491d2cSKalle Valo 	if ((di->dataoffsetlow == 0) || !(pa & PCI32ADDR_HIGH)) {
72205491d2cSKalle Valo 		ddring[outidx].addrlow = cpu_to_le32(pa + di->dataoffsetlow);
72305491d2cSKalle Valo 		ddring[outidx].addrhigh = cpu_to_le32(di->dataoffsethigh);
72405491d2cSKalle Valo 		ddring[outidx].ctrl1 = cpu_to_le32(*flags);
72505491d2cSKalle Valo 		ddring[outidx].ctrl2 = cpu_to_le32(ctrl2);
72605491d2cSKalle Valo 	} else {
72705491d2cSKalle Valo 		/* address extension for 32-bit PCI */
72805491d2cSKalle Valo 		u32 ae;
72905491d2cSKalle Valo 
73005491d2cSKalle Valo 		ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
73105491d2cSKalle Valo 		pa &= ~PCI32ADDR_HIGH;
73205491d2cSKalle Valo 
73305491d2cSKalle Valo 		ctrl2 |= (ae << D64_CTRL2_AE_SHIFT) & D64_CTRL2_AE;
73405491d2cSKalle Valo 		ddring[outidx].addrlow = cpu_to_le32(pa + di->dataoffsetlow);
73505491d2cSKalle Valo 		ddring[outidx].addrhigh = cpu_to_le32(di->dataoffsethigh);
73605491d2cSKalle Valo 		ddring[outidx].ctrl1 = cpu_to_le32(*flags);
73705491d2cSKalle Valo 		ddring[outidx].ctrl2 = cpu_to_le32(ctrl2);
73805491d2cSKalle Valo 	}
73905491d2cSKalle Valo 	if (di->dma.dmactrlflags & DMA_CTRL_PEN) {
74005491d2cSKalle Valo 		if (dma64_dd_parity(&ddring[outidx]))
74105491d2cSKalle Valo 			ddring[outidx].ctrl2 =
74205491d2cSKalle Valo 			     cpu_to_le32(ctrl2 | D64_CTRL2_PARITY);
74305491d2cSKalle Valo 	}
74405491d2cSKalle Valo }
74505491d2cSKalle Valo 
74605491d2cSKalle Valo /* !! may be called with core in reset */
dma_detach(struct dma_pub * pub)74705491d2cSKalle Valo void dma_detach(struct dma_pub *pub)
74805491d2cSKalle Valo {
74905491d2cSKalle Valo 	struct dma_info *di = container_of(pub, struct dma_info, dma);
75005491d2cSKalle Valo 
75105491d2cSKalle Valo 	brcms_dbg_dma(di->core, "%s:\n", di->name);
75205491d2cSKalle Valo 
75305491d2cSKalle Valo 	/* free dma descriptor rings */
75405491d2cSKalle Valo 	if (di->txd64)
75505491d2cSKalle Valo 		dma_free_coherent(di->dmadev, di->txdalloc,
75605491d2cSKalle Valo 				  ((s8 *)di->txd64 - di->txdalign),
75705491d2cSKalle Valo 				  (di->txdpaorig));
75805491d2cSKalle Valo 	if (di->rxd64)
75905491d2cSKalle Valo 		dma_free_coherent(di->dmadev, di->rxdalloc,
76005491d2cSKalle Valo 				  ((s8 *)di->rxd64 - di->rxdalign),
76105491d2cSKalle Valo 				  (di->rxdpaorig));
76205491d2cSKalle Valo 
76305491d2cSKalle Valo 	/* free packet pointer vectors */
76405491d2cSKalle Valo 	kfree(di->txp);
76505491d2cSKalle Valo 	kfree(di->rxp);
76605491d2cSKalle Valo 
76705491d2cSKalle Valo 	/* free our private info structure */
76805491d2cSKalle Valo 	kfree(di);
76905491d2cSKalle Valo 
77005491d2cSKalle Valo }
77105491d2cSKalle Valo 
77205491d2cSKalle Valo /* initialize descriptor table base address */
77305491d2cSKalle Valo static void
_dma_ddtable_init(struct dma_info * di,uint direction,dma_addr_t pa)77405491d2cSKalle Valo _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa)
77505491d2cSKalle Valo {
77605491d2cSKalle Valo 	if (!di->aligndesc_4k) {
77705491d2cSKalle Valo 		if (direction == DMA_TX)
77805491d2cSKalle Valo 			di->xmtptrbase = pa;
77905491d2cSKalle Valo 		else
78005491d2cSKalle Valo 			di->rcvptrbase = pa;
78105491d2cSKalle Valo 	}
78205491d2cSKalle Valo 
78305491d2cSKalle Valo 	if ((di->ddoffsetlow == 0)
78405491d2cSKalle Valo 	    || !(pa & PCI32ADDR_HIGH)) {
78505491d2cSKalle Valo 		if (direction == DMA_TX) {
78605491d2cSKalle Valo 			bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow),
78705491d2cSKalle Valo 				     pa + di->ddoffsetlow);
78805491d2cSKalle Valo 			bcma_write32(di->core, DMA64TXREGOFFS(di, addrhigh),
78905491d2cSKalle Valo 				     di->ddoffsethigh);
79005491d2cSKalle Valo 		} else {
79105491d2cSKalle Valo 			bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow),
79205491d2cSKalle Valo 				     pa + di->ddoffsetlow);
79305491d2cSKalle Valo 			bcma_write32(di->core, DMA64RXREGOFFS(di, addrhigh),
79405491d2cSKalle Valo 				     di->ddoffsethigh);
79505491d2cSKalle Valo 		}
79605491d2cSKalle Valo 	} else {
79705491d2cSKalle Valo 		/* DMA64 32bits address extension */
79805491d2cSKalle Valo 		u32 ae;
79905491d2cSKalle Valo 
80005491d2cSKalle Valo 		/* shift the high bit(s) from pa to ae */
80105491d2cSKalle Valo 		ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
80205491d2cSKalle Valo 		pa &= ~PCI32ADDR_HIGH;
80305491d2cSKalle Valo 
80405491d2cSKalle Valo 		if (direction == DMA_TX) {
80505491d2cSKalle Valo 			bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow),
80605491d2cSKalle Valo 				     pa + di->ddoffsetlow);
80705491d2cSKalle Valo 			bcma_write32(di->core, DMA64TXREGOFFS(di, addrhigh),
80805491d2cSKalle Valo 				     di->ddoffsethigh);
80905491d2cSKalle Valo 			bcma_maskset32(di->core, DMA64TXREGOFFS(di, control),
81005491d2cSKalle Valo 				       D64_XC_AE, (ae << D64_XC_AE_SHIFT));
81105491d2cSKalle Valo 		} else {
81205491d2cSKalle Valo 			bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow),
81305491d2cSKalle Valo 				     pa + di->ddoffsetlow);
81405491d2cSKalle Valo 			bcma_write32(di->core, DMA64RXREGOFFS(di, addrhigh),
81505491d2cSKalle Valo 				     di->ddoffsethigh);
81605491d2cSKalle Valo 			bcma_maskset32(di->core, DMA64RXREGOFFS(di, control),
81705491d2cSKalle Valo 				       D64_RC_AE, (ae << D64_RC_AE_SHIFT));
81805491d2cSKalle Valo 		}
81905491d2cSKalle Valo 	}
82005491d2cSKalle Valo }
82105491d2cSKalle Valo 
_dma_rxenable(struct dma_info * di)82205491d2cSKalle Valo static void _dma_rxenable(struct dma_info *di)
82305491d2cSKalle Valo {
82405491d2cSKalle Valo 	uint dmactrlflags = di->dma.dmactrlflags;
82505491d2cSKalle Valo 	u32 control;
82605491d2cSKalle Valo 
82705491d2cSKalle Valo 	brcms_dbg_dma(di->core, "%s:\n", di->name);
82805491d2cSKalle Valo 
82905491d2cSKalle Valo 	control = D64_RC_RE | (bcma_read32(di->core,
83005491d2cSKalle Valo 					   DMA64RXREGOFFS(di, control)) &
83105491d2cSKalle Valo 			       D64_RC_AE);
83205491d2cSKalle Valo 
83305491d2cSKalle Valo 	if ((dmactrlflags & DMA_CTRL_PEN) == 0)
83405491d2cSKalle Valo 		control |= D64_RC_PD;
83505491d2cSKalle Valo 
83605491d2cSKalle Valo 	if (dmactrlflags & DMA_CTRL_ROC)
83705491d2cSKalle Valo 		control |= D64_RC_OC;
83805491d2cSKalle Valo 
83905491d2cSKalle Valo 	bcma_write32(di->core, DMA64RXREGOFFS(di, control),
84005491d2cSKalle Valo 		((di->rxoffset << D64_RC_RO_SHIFT) | control));
84105491d2cSKalle Valo }
84205491d2cSKalle Valo 
dma_rxinit(struct dma_pub * pub)84305491d2cSKalle Valo void dma_rxinit(struct dma_pub *pub)
84405491d2cSKalle Valo {
84505491d2cSKalle Valo 	struct dma_info *di = container_of(pub, struct dma_info, dma);
84605491d2cSKalle Valo 
84705491d2cSKalle Valo 	brcms_dbg_dma(di->core, "%s:\n", di->name);
84805491d2cSKalle Valo 
84905491d2cSKalle Valo 	if (di->nrxd == 0)
85005491d2cSKalle Valo 		return;
85105491d2cSKalle Valo 
85205491d2cSKalle Valo 	di->rxin = di->rxout = 0;
85305491d2cSKalle Valo 
85405491d2cSKalle Valo 	/* clear rx descriptor ring */
85505491d2cSKalle Valo 	memset(di->rxd64, '\0', di->nrxd * sizeof(struct dma64desc));
85605491d2cSKalle Valo 
85705491d2cSKalle Valo 	/* DMA engine with out alignment requirement requires table to be inited
85805491d2cSKalle Valo 	 * before enabling the engine
85905491d2cSKalle Valo 	 */
86005491d2cSKalle Valo 	if (!di->aligndesc_4k)
86105491d2cSKalle Valo 		_dma_ddtable_init(di, DMA_RX, di->rxdpa);
86205491d2cSKalle Valo 
86305491d2cSKalle Valo 	_dma_rxenable(di);
86405491d2cSKalle Valo 
86505491d2cSKalle Valo 	if (di->aligndesc_4k)
86605491d2cSKalle Valo 		_dma_ddtable_init(di, DMA_RX, di->rxdpa);
86705491d2cSKalle Valo }
86805491d2cSKalle Valo 
dma64_getnextrxp(struct dma_info * di,bool forceall)86905491d2cSKalle Valo static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall)
87005491d2cSKalle Valo {
87105491d2cSKalle Valo 	uint i, curr;
87205491d2cSKalle Valo 	struct sk_buff *rxp;
87305491d2cSKalle Valo 	dma_addr_t pa;
87405491d2cSKalle Valo 
87505491d2cSKalle Valo 	i = di->rxin;
87605491d2cSKalle Valo 
87705491d2cSKalle Valo 	/* return if no packets posted */
87805491d2cSKalle Valo 	if (i == di->rxout)
87905491d2cSKalle Valo 		return NULL;
88005491d2cSKalle Valo 
88105491d2cSKalle Valo 	curr =
88205491d2cSKalle Valo 	    B2I(((bcma_read32(di->core,
88305491d2cSKalle Valo 			      DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) -
88405491d2cSKalle Valo 		 di->rcvptrbase) & D64_RS0_CD_MASK, struct dma64desc);
88505491d2cSKalle Valo 
88605491d2cSKalle Valo 	/* ignore curr if forceall */
88705491d2cSKalle Valo 	if (!forceall && (i == curr))
88805491d2cSKalle Valo 		return NULL;
88905491d2cSKalle Valo 
89005491d2cSKalle Valo 	/* get the packet pointer that corresponds to the rx descriptor */
89105491d2cSKalle Valo 	rxp = di->rxp[i];
89205491d2cSKalle Valo 	di->rxp[i] = NULL;
89305491d2cSKalle Valo 
89405491d2cSKalle Valo 	pa = le32_to_cpu(di->rxd64[i].addrlow) - di->dataoffsetlow;
89505491d2cSKalle Valo 
89605491d2cSKalle Valo 	/* clear this packet from the descriptor ring */
89705491d2cSKalle Valo 	dma_unmap_single(di->dmadev, pa, di->rxbufsize, DMA_FROM_DEVICE);
89805491d2cSKalle Valo 
89905491d2cSKalle Valo 	di->rxd64[i].addrlow = cpu_to_le32(0xdeadbeef);
90005491d2cSKalle Valo 	di->rxd64[i].addrhigh = cpu_to_le32(0xdeadbeef);
90105491d2cSKalle Valo 
90205491d2cSKalle Valo 	di->rxin = nextrxd(di, i);
90305491d2cSKalle Valo 
90405491d2cSKalle Valo 	return rxp;
90505491d2cSKalle Valo }
90605491d2cSKalle Valo 
_dma_getnextrxp(struct dma_info * di,bool forceall)90705491d2cSKalle Valo static struct sk_buff *_dma_getnextrxp(struct dma_info *di, bool forceall)
90805491d2cSKalle Valo {
90905491d2cSKalle Valo 	if (di->nrxd == 0)
91005491d2cSKalle Valo 		return NULL;
91105491d2cSKalle Valo 
91205491d2cSKalle Valo 	return dma64_getnextrxp(di, forceall);
91305491d2cSKalle Valo }
91405491d2cSKalle Valo 
91505491d2cSKalle Valo /*
91605491d2cSKalle Valo  * !! rx entry routine
91705491d2cSKalle Valo  * returns the number packages in the next frame, or 0 if there are no more
91805491d2cSKalle Valo  *   if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is
91905491d2cSKalle Valo  *   supported with pkts chain
92005491d2cSKalle Valo  *   otherwise, it's treated as giant pkt and will be tossed.
92105491d2cSKalle Valo  *   The DMA scattering starts with normal DMA header, followed by first
92205491d2cSKalle Valo  *   buffer data. After it reaches the max size of buffer, the data continues
92305491d2cSKalle Valo  *   in next DMA descriptor buffer WITHOUT DMA header
92405491d2cSKalle Valo  */
dma_rx(struct dma_pub * pub,struct sk_buff_head * skb_list)92505491d2cSKalle Valo int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
92605491d2cSKalle Valo {
92705491d2cSKalle Valo 	struct dma_info *di = container_of(pub, struct dma_info, dma);
92805491d2cSKalle Valo 	struct sk_buff_head dma_frames;
92905491d2cSKalle Valo 	struct sk_buff *p, *next;
93005491d2cSKalle Valo 	uint len;
93105491d2cSKalle Valo 	uint pkt_len;
93205491d2cSKalle Valo 	int resid = 0;
93305491d2cSKalle Valo 	int pktcnt = 1;
93405491d2cSKalle Valo 
93505491d2cSKalle Valo 	skb_queue_head_init(&dma_frames);
93605491d2cSKalle Valo  next_frame:
93705491d2cSKalle Valo 	p = _dma_getnextrxp(di, false);
93805491d2cSKalle Valo 	if (p == NULL)
93905491d2cSKalle Valo 		return 0;
94005491d2cSKalle Valo 
94105491d2cSKalle Valo 	len = le16_to_cpu(*(__le16 *) (p->data));
94205491d2cSKalle Valo 	brcms_dbg_dma(di->core, "%s: dma_rx len %d\n", di->name, len);
94305491d2cSKalle Valo 	dma_spin_for_len(len, p);
94405491d2cSKalle Valo 
94505491d2cSKalle Valo 	/* set actual length */
94605491d2cSKalle Valo 	pkt_len = min((di->rxoffset + len), di->rxbufsize);
94705491d2cSKalle Valo 	__skb_trim(p, pkt_len);
94805491d2cSKalle Valo 	skb_queue_tail(&dma_frames, p);
94905491d2cSKalle Valo 	resid = len - (di->rxbufsize - di->rxoffset);
95005491d2cSKalle Valo 
95105491d2cSKalle Valo 	/* check for single or multi-buffer rx */
95205491d2cSKalle Valo 	if (resid > 0) {
95305491d2cSKalle Valo 		while ((resid > 0) && (p = _dma_getnextrxp(di, false))) {
95405491d2cSKalle Valo 			pkt_len = min_t(uint, resid, di->rxbufsize);
95505491d2cSKalle Valo 			__skb_trim(p, pkt_len);
95605491d2cSKalle Valo 			skb_queue_tail(&dma_frames, p);
95705491d2cSKalle Valo 			resid -= di->rxbufsize;
95805491d2cSKalle Valo 			pktcnt++;
95905491d2cSKalle Valo 		}
96005491d2cSKalle Valo 
96105491d2cSKalle Valo #ifdef DEBUG
96205491d2cSKalle Valo 		if (resid > 0) {
96305491d2cSKalle Valo 			uint cur;
96405491d2cSKalle Valo 			cur =
96505491d2cSKalle Valo 			    B2I(((bcma_read32(di->core,
96605491d2cSKalle Valo 					      DMA64RXREGOFFS(di, status0)) &
96705491d2cSKalle Valo 				  D64_RS0_CD_MASK) - di->rcvptrbase) &
96805491d2cSKalle Valo 				D64_RS0_CD_MASK, struct dma64desc);
96905491d2cSKalle Valo 			brcms_dbg_dma(di->core,
97005491d2cSKalle Valo 				      "rxin %d rxout %d, hw_curr %d\n",
97105491d2cSKalle Valo 				      di->rxin, di->rxout, cur);
97205491d2cSKalle Valo 		}
97305491d2cSKalle Valo #endif				/* DEBUG */
97405491d2cSKalle Valo 
97505491d2cSKalle Valo 		if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
97605491d2cSKalle Valo 			brcms_dbg_dma(di->core, "%s: bad frame length (%d)\n",
97705491d2cSKalle Valo 				      di->name, len);
97805491d2cSKalle Valo 			skb_queue_walk_safe(&dma_frames, p, next) {
97905491d2cSKalle Valo 				skb_unlink(p, &dma_frames);
98005491d2cSKalle Valo 				brcmu_pkt_buf_free_skb(p);
98105491d2cSKalle Valo 			}
98205491d2cSKalle Valo 			di->dma.rxgiants++;
98305491d2cSKalle Valo 			pktcnt = 1;
98405491d2cSKalle Valo 			goto next_frame;
98505491d2cSKalle Valo 		}
98605491d2cSKalle Valo 	}
98705491d2cSKalle Valo 
98805491d2cSKalle Valo 	skb_queue_splice_tail(&dma_frames, skb_list);
98905491d2cSKalle Valo 	return pktcnt;
99005491d2cSKalle Valo }
99105491d2cSKalle Valo 
dma64_rxidle(struct dma_info * di)99205491d2cSKalle Valo static bool dma64_rxidle(struct dma_info *di)
99305491d2cSKalle Valo {
99405491d2cSKalle Valo 	brcms_dbg_dma(di->core, "%s:\n", di->name);
99505491d2cSKalle Valo 
99605491d2cSKalle Valo 	if (di->nrxd == 0)
99705491d2cSKalle Valo 		return true;
99805491d2cSKalle Valo 
99905491d2cSKalle Valo 	return ((bcma_read32(di->core,
100005491d2cSKalle Valo 			     DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) ==
100105491d2cSKalle Valo 		(bcma_read32(di->core, DMA64RXREGOFFS(di, ptr)) &
100205491d2cSKalle Valo 		 D64_RS0_CD_MASK));
100305491d2cSKalle Valo }
100405491d2cSKalle Valo 
dma64_txidle(struct dma_info * di)100505491d2cSKalle Valo static bool dma64_txidle(struct dma_info *di)
100605491d2cSKalle Valo {
100705491d2cSKalle Valo 	if (di->ntxd == 0)
100805491d2cSKalle Valo 		return true;
100905491d2cSKalle Valo 
101005491d2cSKalle Valo 	return ((bcma_read32(di->core,
101105491d2cSKalle Valo 			     DMA64TXREGOFFS(di, status0)) & D64_XS0_CD_MASK) ==
101205491d2cSKalle Valo 		(bcma_read32(di->core, DMA64TXREGOFFS(di, ptr)) &
101305491d2cSKalle Valo 		 D64_XS0_CD_MASK));
101405491d2cSKalle Valo }
101505491d2cSKalle Valo 
101605491d2cSKalle Valo /*
101705491d2cSKalle Valo  * post receive buffers
101805491d2cSKalle Valo  *  Return false if refill failed completely or dma mapping failed. The ring
101905491d2cSKalle Valo  *  is empty, which will stall the rx dma and user might want to call rxfill
102005491d2cSKalle Valo  *  again asap. This is unlikely to happen on a memory-rich NIC, but often on
102105491d2cSKalle Valo  *  memory-constrained dongle.
102205491d2cSKalle Valo  */
dma_rxfill(struct dma_pub * pub)102305491d2cSKalle Valo bool dma_rxfill(struct dma_pub *pub)
102405491d2cSKalle Valo {
102505491d2cSKalle Valo 	struct dma_info *di = container_of(pub, struct dma_info, dma);
102605491d2cSKalle Valo 	struct sk_buff *p;
102705491d2cSKalle Valo 	u16 rxin, rxout;
102805491d2cSKalle Valo 	u32 flags = 0;
102905491d2cSKalle Valo 	uint n;
103005491d2cSKalle Valo 	uint i;
103105491d2cSKalle Valo 	dma_addr_t pa;
103205491d2cSKalle Valo 	uint extra_offset = 0;
103305491d2cSKalle Valo 	bool ring_empty;
103405491d2cSKalle Valo 
103505491d2cSKalle Valo 	ring_empty = false;
103605491d2cSKalle Valo 
103705491d2cSKalle Valo 	/*
103805491d2cSKalle Valo 	 * Determine how many receive buffers we're lacking
103905491d2cSKalle Valo 	 * from the full complement, allocate, initialize,
104005491d2cSKalle Valo 	 * and post them, then update the chip rx lastdscr.
104105491d2cSKalle Valo 	 */
104205491d2cSKalle Valo 
104305491d2cSKalle Valo 	rxin = di->rxin;
104405491d2cSKalle Valo 	rxout = di->rxout;
104505491d2cSKalle Valo 
104605491d2cSKalle Valo 	n = di->nrxpost - nrxdactive(di, rxin, rxout);
104705491d2cSKalle Valo 
104805491d2cSKalle Valo 	brcms_dbg_dma(di->core, "%s: post %d\n", di->name, n);
104905491d2cSKalle Valo 
105005491d2cSKalle Valo 	if (di->rxbufsize > BCMEXTRAHDROOM)
105105491d2cSKalle Valo 		extra_offset = di->rxextrahdrroom;
105205491d2cSKalle Valo 
105305491d2cSKalle Valo 	for (i = 0; i < n; i++) {
105405491d2cSKalle Valo 		/*
105505491d2cSKalle Valo 		 * the di->rxbufsize doesn't include the extra headroom,
105605491d2cSKalle Valo 		 * we need to add it to the size to be allocated
105705491d2cSKalle Valo 		 */
105805491d2cSKalle Valo 		p = brcmu_pkt_buf_get_skb(di->rxbufsize + extra_offset);
105905491d2cSKalle Valo 
106005491d2cSKalle Valo 		if (p == NULL) {
106105491d2cSKalle Valo 			brcms_dbg_dma(di->core, "%s: out of rxbufs\n",
106205491d2cSKalle Valo 				      di->name);
106305491d2cSKalle Valo 			if (i == 0 && dma64_rxidle(di)) {
106405491d2cSKalle Valo 				brcms_dbg_dma(di->core, "%s: ring is empty !\n",
106505491d2cSKalle Valo 					      di->name);
106605491d2cSKalle Valo 				ring_empty = true;
106705491d2cSKalle Valo 			}
106805491d2cSKalle Valo 			di->dma.rxnobuf++;
106905491d2cSKalle Valo 			break;
107005491d2cSKalle Valo 		}
107105491d2cSKalle Valo 		/* reserve an extra headroom, if applicable */
107205491d2cSKalle Valo 		if (extra_offset)
107305491d2cSKalle Valo 			skb_pull(p, extra_offset);
107405491d2cSKalle Valo 
107505491d2cSKalle Valo 		/* Do a cached write instead of uncached write since DMA_MAP
107605491d2cSKalle Valo 		 * will flush the cache.
107705491d2cSKalle Valo 		 */
107805491d2cSKalle Valo 		*(u32 *) (p->data) = 0;
107905491d2cSKalle Valo 
108005491d2cSKalle Valo 		pa = dma_map_single(di->dmadev, p->data, di->rxbufsize,
108105491d2cSKalle Valo 				    DMA_FROM_DEVICE);
10825c5fa1f4SFlorian Fainelli 		if (dma_mapping_error(di->dmadev, pa)) {
10835c5fa1f4SFlorian Fainelli 			brcmu_pkt_buf_free_skb(p);
108405491d2cSKalle Valo 			return false;
10855c5fa1f4SFlorian Fainelli 		}
108605491d2cSKalle Valo 
108705491d2cSKalle Valo 		/* save the free packet pointer */
108805491d2cSKalle Valo 		di->rxp[rxout] = p;
108905491d2cSKalle Valo 
109005491d2cSKalle Valo 		/* reset flags for each descriptor */
109105491d2cSKalle Valo 		flags = 0;
109205491d2cSKalle Valo 		if (rxout == (di->nrxd - 1))
109305491d2cSKalle Valo 			flags = D64_CTRL1_EOT;
109405491d2cSKalle Valo 
109505491d2cSKalle Valo 		dma64_dd_upd(di, di->rxd64, pa, rxout, &flags,
109605491d2cSKalle Valo 			     di->rxbufsize);
109705491d2cSKalle Valo 		rxout = nextrxd(di, rxout);
109805491d2cSKalle Valo 	}
109905491d2cSKalle Valo 
110005491d2cSKalle Valo 	di->rxout = rxout;
110105491d2cSKalle Valo 
110205491d2cSKalle Valo 	/* update the chip lastdscr pointer */
110305491d2cSKalle Valo 	bcma_write32(di->core, DMA64RXREGOFFS(di, ptr),
110405491d2cSKalle Valo 	      di->rcvptrbase + I2B(rxout, struct dma64desc));
110505491d2cSKalle Valo 
110605491d2cSKalle Valo 	return ring_empty;
110705491d2cSKalle Valo }
110805491d2cSKalle Valo 
dma_rxreclaim(struct dma_pub * pub)110905491d2cSKalle Valo void dma_rxreclaim(struct dma_pub *pub)
111005491d2cSKalle Valo {
111105491d2cSKalle Valo 	struct dma_info *di = container_of(pub, struct dma_info, dma);
111205491d2cSKalle Valo 	struct sk_buff *p;
111305491d2cSKalle Valo 
111405491d2cSKalle Valo 	brcms_dbg_dma(di->core, "%s:\n", di->name);
111505491d2cSKalle Valo 
111605491d2cSKalle Valo 	while ((p = _dma_getnextrxp(di, true)))
111705491d2cSKalle Valo 		brcmu_pkt_buf_free_skb(p);
111805491d2cSKalle Valo }
111905491d2cSKalle Valo 
dma_counterreset(struct dma_pub * pub)112005491d2cSKalle Valo void dma_counterreset(struct dma_pub *pub)
112105491d2cSKalle Valo {
112205491d2cSKalle Valo 	/* reset all software counters */
112305491d2cSKalle Valo 	pub->rxgiants = 0;
112405491d2cSKalle Valo 	pub->rxnobuf = 0;
112505491d2cSKalle Valo 	pub->txnobuf = 0;
112605491d2cSKalle Valo }
112705491d2cSKalle Valo 
112805491d2cSKalle Valo /* get the address of the var in order to change later */
dma_getvar(struct dma_pub * pub,const char * name)112905491d2cSKalle Valo unsigned long dma_getvar(struct dma_pub *pub, const char *name)
113005491d2cSKalle Valo {
113105491d2cSKalle Valo 	struct dma_info *di = container_of(pub, struct dma_info, dma);
113205491d2cSKalle Valo 
113305491d2cSKalle Valo 	if (!strcmp(name, "&txavail"))
113405491d2cSKalle Valo 		return (unsigned long)&(di->dma.txavail);
113505491d2cSKalle Valo 	return 0;
113605491d2cSKalle Valo }
113705491d2cSKalle Valo 
113805491d2cSKalle Valo /* 64-bit DMA functions */
113905491d2cSKalle Valo 
dma_txinit(struct dma_pub * pub)114005491d2cSKalle Valo void dma_txinit(struct dma_pub *pub)
114105491d2cSKalle Valo {
114205491d2cSKalle Valo 	struct dma_info *di = container_of(pub, struct dma_info, dma);
114305491d2cSKalle Valo 	u32 control = D64_XC_XE;
114405491d2cSKalle Valo 
114505491d2cSKalle Valo 	brcms_dbg_dma(di->core, "%s:\n", di->name);
114605491d2cSKalle Valo 
114705491d2cSKalle Valo 	if (di->ntxd == 0)
114805491d2cSKalle Valo 		return;
114905491d2cSKalle Valo 
115005491d2cSKalle Valo 	di->txin = di->txout = 0;
115105491d2cSKalle Valo 	di->dma.txavail = di->ntxd - 1;
115205491d2cSKalle Valo 
115305491d2cSKalle Valo 	/* clear tx descriptor ring */
115405491d2cSKalle Valo 	memset(di->txd64, '\0', (di->ntxd * sizeof(struct dma64desc)));
115505491d2cSKalle Valo 
115605491d2cSKalle Valo 	/* DMA engine with out alignment requirement requires table to be inited
115705491d2cSKalle Valo 	 * before enabling the engine
115805491d2cSKalle Valo 	 */
115905491d2cSKalle Valo 	if (!di->aligndesc_4k)
116005491d2cSKalle Valo 		_dma_ddtable_init(di, DMA_TX, di->txdpa);
116105491d2cSKalle Valo 
116205491d2cSKalle Valo 	if ((di->dma.dmactrlflags & DMA_CTRL_PEN) == 0)
116305491d2cSKalle Valo 		control |= D64_XC_PD;
116405491d2cSKalle Valo 	bcma_set32(di->core, DMA64TXREGOFFS(di, control), control);
116505491d2cSKalle Valo 
116605491d2cSKalle Valo 	/* DMA engine with alignment requirement requires table to be inited
116705491d2cSKalle Valo 	 * before enabling the engine
116805491d2cSKalle Valo 	 */
116905491d2cSKalle Valo 	if (di->aligndesc_4k)
117005491d2cSKalle Valo 		_dma_ddtable_init(di, DMA_TX, di->txdpa);
117105491d2cSKalle Valo }
117205491d2cSKalle Valo 
dma_txsuspend(struct dma_pub * pub)117305491d2cSKalle Valo void dma_txsuspend(struct dma_pub *pub)
117405491d2cSKalle Valo {
117505491d2cSKalle Valo 	struct dma_info *di = container_of(pub, struct dma_info, dma);
117605491d2cSKalle Valo 
117705491d2cSKalle Valo 	brcms_dbg_dma(di->core, "%s:\n", di->name);
117805491d2cSKalle Valo 
117905491d2cSKalle Valo 	if (di->ntxd == 0)
118005491d2cSKalle Valo 		return;
118105491d2cSKalle Valo 
118205491d2cSKalle Valo 	bcma_set32(di->core, DMA64TXREGOFFS(di, control), D64_XC_SE);
118305491d2cSKalle Valo }
118405491d2cSKalle Valo 
dma_txresume(struct dma_pub * pub)118505491d2cSKalle Valo void dma_txresume(struct dma_pub *pub)
118605491d2cSKalle Valo {
118705491d2cSKalle Valo 	struct dma_info *di = container_of(pub, struct dma_info, dma);
118805491d2cSKalle Valo 
118905491d2cSKalle Valo 	brcms_dbg_dma(di->core, "%s:\n", di->name);
119005491d2cSKalle Valo 
119105491d2cSKalle Valo 	if (di->ntxd == 0)
119205491d2cSKalle Valo 		return;
119305491d2cSKalle Valo 
119405491d2cSKalle Valo 	bcma_mask32(di->core, DMA64TXREGOFFS(di, control), ~D64_XC_SE);
119505491d2cSKalle Valo }
119605491d2cSKalle Valo 
dma_txsuspended(struct dma_pub * pub)119705491d2cSKalle Valo bool dma_txsuspended(struct dma_pub *pub)
119805491d2cSKalle Valo {
119905491d2cSKalle Valo 	struct dma_info *di = container_of(pub, struct dma_info, dma);
120005491d2cSKalle Valo 
120105491d2cSKalle Valo 	return (di->ntxd == 0) ||
120205491d2cSKalle Valo 	       ((bcma_read32(di->core,
120305491d2cSKalle Valo 			     DMA64TXREGOFFS(di, control)) & D64_XC_SE) ==
120405491d2cSKalle Valo 		D64_XC_SE);
120505491d2cSKalle Valo }
120605491d2cSKalle Valo 
dma_txreclaim(struct dma_pub * pub,enum txd_range range)120705491d2cSKalle Valo void dma_txreclaim(struct dma_pub *pub, enum txd_range range)
120805491d2cSKalle Valo {
120905491d2cSKalle Valo 	struct dma_info *di = container_of(pub, struct dma_info, dma);
121005491d2cSKalle Valo 	struct sk_buff *p;
121105491d2cSKalle Valo 
121205491d2cSKalle Valo 	brcms_dbg_dma(di->core, "%s: %s\n",
121305491d2cSKalle Valo 		      di->name,
121405491d2cSKalle Valo 		      range == DMA_RANGE_ALL ? "all" :
121505491d2cSKalle Valo 		      range == DMA_RANGE_TRANSMITTED ? "transmitted" :
121605491d2cSKalle Valo 		      "transferred");
121705491d2cSKalle Valo 
121805491d2cSKalle Valo 	if (di->txin == di->txout)
121905491d2cSKalle Valo 		return;
122005491d2cSKalle Valo 
122105491d2cSKalle Valo 	while ((p = dma_getnexttxp(pub, range))) {
122205491d2cSKalle Valo 		/* For unframed data, we don't have any packets to free */
122305491d2cSKalle Valo 		if (!(di->dma.dmactrlflags & DMA_CTRL_UNFRAMED))
122405491d2cSKalle Valo 			brcmu_pkt_buf_free_skb(p);
122505491d2cSKalle Valo 	}
122605491d2cSKalle Valo }
122705491d2cSKalle Valo 
dma_txreset(struct dma_pub * pub)122805491d2cSKalle Valo bool dma_txreset(struct dma_pub *pub)
122905491d2cSKalle Valo {
123005491d2cSKalle Valo 	struct dma_info *di = container_of(pub, struct dma_info, dma);
123105491d2cSKalle Valo 	u32 status;
123205491d2cSKalle Valo 
123305491d2cSKalle Valo 	if (di->ntxd == 0)
123405491d2cSKalle Valo 		return true;
123505491d2cSKalle Valo 
123605491d2cSKalle Valo 	/* suspend tx DMA first */
123705491d2cSKalle Valo 	bcma_write32(di->core, DMA64TXREGOFFS(di, control), D64_XC_SE);
123805491d2cSKalle Valo 	SPINWAIT(((status =
123905491d2cSKalle Valo 		   (bcma_read32(di->core, DMA64TXREGOFFS(di, status0)) &
124005491d2cSKalle Valo 		    D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED) &&
124105491d2cSKalle Valo 		  (status != D64_XS0_XS_IDLE) && (status != D64_XS0_XS_STOPPED),
124205491d2cSKalle Valo 		 10000);
124305491d2cSKalle Valo 
124405491d2cSKalle Valo 	bcma_write32(di->core, DMA64TXREGOFFS(di, control), 0);
124505491d2cSKalle Valo 	SPINWAIT(((status =
124605491d2cSKalle Valo 		   (bcma_read32(di->core, DMA64TXREGOFFS(di, status0)) &
124705491d2cSKalle Valo 		    D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED), 10000);
124805491d2cSKalle Valo 
124905491d2cSKalle Valo 	/* wait for the last transaction to complete */
125005491d2cSKalle Valo 	udelay(300);
125105491d2cSKalle Valo 
125205491d2cSKalle Valo 	return status == D64_XS0_XS_DISABLED;
125305491d2cSKalle Valo }
125405491d2cSKalle Valo 
dma_rxreset(struct dma_pub * pub)125505491d2cSKalle Valo bool dma_rxreset(struct dma_pub *pub)
125605491d2cSKalle Valo {
125705491d2cSKalle Valo 	struct dma_info *di = container_of(pub, struct dma_info, dma);
125805491d2cSKalle Valo 	u32 status;
125905491d2cSKalle Valo 
126005491d2cSKalle Valo 	if (di->nrxd == 0)
126105491d2cSKalle Valo 		return true;
126205491d2cSKalle Valo 
126305491d2cSKalle Valo 	bcma_write32(di->core, DMA64RXREGOFFS(di, control), 0);
126405491d2cSKalle Valo 	SPINWAIT(((status =
126505491d2cSKalle Valo 		   (bcma_read32(di->core, DMA64RXREGOFFS(di, status0)) &
126605491d2cSKalle Valo 		    D64_RS0_RS_MASK)) != D64_RS0_RS_DISABLED), 10000);
126705491d2cSKalle Valo 
126805491d2cSKalle Valo 	return status == D64_RS0_RS_DISABLED;
126905491d2cSKalle Valo }
127005491d2cSKalle Valo 
dma_txenq(struct dma_info * di,struct sk_buff * p)127105491d2cSKalle Valo static void dma_txenq(struct dma_info *di, struct sk_buff *p)
127205491d2cSKalle Valo {
127305491d2cSKalle Valo 	unsigned char *data;
127405491d2cSKalle Valo 	uint len;
127505491d2cSKalle Valo 	u16 txout;
127605491d2cSKalle Valo 	u32 flags = 0;
127705491d2cSKalle Valo 	dma_addr_t pa;
127805491d2cSKalle Valo 
127905491d2cSKalle Valo 	txout = di->txout;
128005491d2cSKalle Valo 
128105491d2cSKalle Valo 	if (WARN_ON(nexttxd(di, txout) == di->txin))
128205491d2cSKalle Valo 		return;
128305491d2cSKalle Valo 
128405491d2cSKalle Valo 	/*
128505491d2cSKalle Valo 	 * obtain and initialize transmit descriptor entry.
128605491d2cSKalle Valo 	 */
128705491d2cSKalle Valo 	data = p->data;
128805491d2cSKalle Valo 	len = p->len;
128905491d2cSKalle Valo 
129005491d2cSKalle Valo 	/* get physical address of buffer start */
129105491d2cSKalle Valo 	pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE);
129205491d2cSKalle Valo 	/* if mapping failed, free skb */
129305491d2cSKalle Valo 	if (dma_mapping_error(di->dmadev, pa)) {
129405491d2cSKalle Valo 		brcmu_pkt_buf_free_skb(p);
129505491d2cSKalle Valo 		return;
129605491d2cSKalle Valo 	}
129705491d2cSKalle Valo 	/* With a DMA segment list, Descriptor table is filled
129805491d2cSKalle Valo 	 * using the segment list instead of looping over
129905491d2cSKalle Valo 	 * buffers in multi-chain DMA. Therefore, EOF for SGLIST
130005491d2cSKalle Valo 	 * is when end of segment list is reached.
130105491d2cSKalle Valo 	 */
130205491d2cSKalle Valo 	flags = D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF;
130305491d2cSKalle Valo 	if (txout == (di->ntxd - 1))
130405491d2cSKalle Valo 		flags |= D64_CTRL1_EOT;
130505491d2cSKalle Valo 
130605491d2cSKalle Valo 	dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
130705491d2cSKalle Valo 
130805491d2cSKalle Valo 	txout = nexttxd(di, txout);
130905491d2cSKalle Valo 
131005491d2cSKalle Valo 	/* save the packet */
131105491d2cSKalle Valo 	di->txp[prevtxd(di, txout)] = p;
131205491d2cSKalle Valo 
131305491d2cSKalle Valo 	/* bump the tx descriptor index */
131405491d2cSKalle Valo 	di->txout = txout;
131505491d2cSKalle Valo }
131605491d2cSKalle Valo 
ampdu_finalize(struct dma_info * di)131705491d2cSKalle Valo static void ampdu_finalize(struct dma_info *di)
131805491d2cSKalle Valo {
131905491d2cSKalle Valo 	struct brcms_ampdu_session *session = &di->ampdu_session;
132005491d2cSKalle Valo 	struct sk_buff *p;
132105491d2cSKalle Valo 
132205491d2cSKalle Valo 	trace_brcms_ampdu_session(&session->wlc->hw->d11core->dev,
132305491d2cSKalle Valo 				  session->max_ampdu_len,
132405491d2cSKalle Valo 				  session->max_ampdu_frames,
132505491d2cSKalle Valo 				  session->ampdu_len,
132605491d2cSKalle Valo 				  skb_queue_len(&session->skb_list),
132705491d2cSKalle Valo 				  session->dma_len);
132805491d2cSKalle Valo 
132905491d2cSKalle Valo 	if (WARN_ON(skb_queue_empty(&session->skb_list)))
133005491d2cSKalle Valo 		return;
133105491d2cSKalle Valo 
133205491d2cSKalle Valo 	brcms_c_ampdu_finalize(session);
133305491d2cSKalle Valo 
133405491d2cSKalle Valo 	while (!skb_queue_empty(&session->skb_list)) {
133505491d2cSKalle Valo 		p = skb_dequeue(&session->skb_list);
133605491d2cSKalle Valo 		dma_txenq(di, p);
133705491d2cSKalle Valo 	}
133805491d2cSKalle Valo 
133905491d2cSKalle Valo 	bcma_write32(di->core, DMA64TXREGOFFS(di, ptr),
134005491d2cSKalle Valo 		     di->xmtptrbase + I2B(di->txout, struct dma64desc));
134105491d2cSKalle Valo 	brcms_c_ampdu_reset_session(session, session->wlc);
134205491d2cSKalle Valo }
134305491d2cSKalle Valo 
prep_ampdu_frame(struct dma_info * di,struct sk_buff * p)134405491d2cSKalle Valo static void prep_ampdu_frame(struct dma_info *di, struct sk_buff *p)
134505491d2cSKalle Valo {
134605491d2cSKalle Valo 	struct brcms_ampdu_session *session = &di->ampdu_session;
134705491d2cSKalle Valo 	int ret;
134805491d2cSKalle Valo 
134905491d2cSKalle Valo 	ret = brcms_c_ampdu_add_frame(session, p);
135005491d2cSKalle Valo 	if (ret == -ENOSPC) {
135105491d2cSKalle Valo 		/*
135205491d2cSKalle Valo 		 * AMPDU cannot accomodate this frame. Close out the in-
135305491d2cSKalle Valo 		 * progress AMPDU session and start a new one.
135405491d2cSKalle Valo 		 */
135505491d2cSKalle Valo 		ampdu_finalize(di);
135605491d2cSKalle Valo 		ret = brcms_c_ampdu_add_frame(session, p);
135705491d2cSKalle Valo 	}
135805491d2cSKalle Valo 
135905491d2cSKalle Valo 	WARN_ON(ret);
136005491d2cSKalle Valo }
136105491d2cSKalle Valo 
136205491d2cSKalle Valo /* Update count of available tx descriptors based on current DMA state */
dma_update_txavail(struct dma_info * di)136305491d2cSKalle Valo static void dma_update_txavail(struct dma_info *di)
136405491d2cSKalle Valo {
136505491d2cSKalle Valo 	/*
136605491d2cSKalle Valo 	 * Available space is number of descriptors less the number of
136705491d2cSKalle Valo 	 * active descriptors and the number of queued AMPDU frames.
136805491d2cSKalle Valo 	 */
136905491d2cSKalle Valo 	di->dma.txavail = di->ntxd - ntxdactive(di, di->txin, di->txout) -
137005491d2cSKalle Valo 			  skb_queue_len(&di->ampdu_session.skb_list) - 1;
137105491d2cSKalle Valo }
137205491d2cSKalle Valo 
137305491d2cSKalle Valo /*
137405491d2cSKalle Valo  * !! tx entry routine
137505491d2cSKalle Valo  * WARNING: call must check the return value for error.
137605491d2cSKalle Valo  *   the error(toss frames) could be fatal and cause many subsequent hard
137705491d2cSKalle Valo  *   to debug problems
137805491d2cSKalle Valo  */
dma_txfast(struct brcms_c_info * wlc,struct dma_pub * pub,struct sk_buff * p)137905491d2cSKalle Valo int dma_txfast(struct brcms_c_info *wlc, struct dma_pub *pub,
138005491d2cSKalle Valo 	       struct sk_buff *p)
138105491d2cSKalle Valo {
138205491d2cSKalle Valo 	struct dma_info *di = container_of(pub, struct dma_info, dma);
138305491d2cSKalle Valo 	struct brcms_ampdu_session *session = &di->ampdu_session;
138405491d2cSKalle Valo 	struct ieee80211_tx_info *tx_info;
138505491d2cSKalle Valo 	bool is_ampdu;
138605491d2cSKalle Valo 
138705491d2cSKalle Valo 	/* no use to transmit a zero length packet */
138805491d2cSKalle Valo 	if (p->len == 0)
138905491d2cSKalle Valo 		return 0;
139005491d2cSKalle Valo 
139105491d2cSKalle Valo 	/* return nonzero if out of tx descriptors */
139205491d2cSKalle Valo 	if (di->dma.txavail == 0 || nexttxd(di, di->txout) == di->txin)
139305491d2cSKalle Valo 		goto outoftxd;
139405491d2cSKalle Valo 
139505491d2cSKalle Valo 	tx_info = IEEE80211_SKB_CB(p);
139605491d2cSKalle Valo 	is_ampdu = tx_info->flags & IEEE80211_TX_CTL_AMPDU;
139705491d2cSKalle Valo 	if (is_ampdu)
139805491d2cSKalle Valo 		prep_ampdu_frame(di, p);
139905491d2cSKalle Valo 	else
140005491d2cSKalle Valo 		dma_txenq(di, p);
140105491d2cSKalle Valo 
140205491d2cSKalle Valo 	/* tx flow control */
140305491d2cSKalle Valo 	dma_update_txavail(di);
140405491d2cSKalle Valo 
140505491d2cSKalle Valo 	/* kick the chip */
140605491d2cSKalle Valo 	if (is_ampdu) {
140705491d2cSKalle Valo 		/*
140805491d2cSKalle Valo 		 * Start sending data if we've got a full AMPDU, there's
140905491d2cSKalle Valo 		 * no more space in the DMA ring, or the ring isn't
141005491d2cSKalle Valo 		 * currently transmitting.
141105491d2cSKalle Valo 		 */
141205491d2cSKalle Valo 		if (skb_queue_len(&session->skb_list) == session->max_ampdu_frames ||
141305491d2cSKalle Valo 		    di->dma.txavail == 0 || dma64_txidle(di))
141405491d2cSKalle Valo 			ampdu_finalize(di);
141505491d2cSKalle Valo 	} else {
141605491d2cSKalle Valo 		bcma_write32(di->core, DMA64TXREGOFFS(di, ptr),
141705491d2cSKalle Valo 			     di->xmtptrbase + I2B(di->txout, struct dma64desc));
141805491d2cSKalle Valo 	}
141905491d2cSKalle Valo 
142005491d2cSKalle Valo 	return 0;
142105491d2cSKalle Valo 
142205491d2cSKalle Valo  outoftxd:
142305491d2cSKalle Valo 	brcms_dbg_dma(di->core, "%s: out of txds !!!\n", di->name);
142405491d2cSKalle Valo 	brcmu_pkt_buf_free_skb(p);
142505491d2cSKalle Valo 	di->dma.txavail = 0;
142605491d2cSKalle Valo 	di->dma.txnobuf++;
142705491d2cSKalle Valo 	return -ENOSPC;
142805491d2cSKalle Valo }
142905491d2cSKalle Valo 
dma_txflush(struct dma_pub * pub)143005491d2cSKalle Valo void dma_txflush(struct dma_pub *pub)
143105491d2cSKalle Valo {
143205491d2cSKalle Valo 	struct dma_info *di = container_of(pub, struct dma_info, dma);
143305491d2cSKalle Valo 	struct brcms_ampdu_session *session = &di->ampdu_session;
143405491d2cSKalle Valo 
143505491d2cSKalle Valo 	if (!skb_queue_empty(&session->skb_list))
143605491d2cSKalle Valo 		ampdu_finalize(di);
143705491d2cSKalle Valo }
143805491d2cSKalle Valo 
dma_txpending(struct dma_pub * pub)143905491d2cSKalle Valo int dma_txpending(struct dma_pub *pub)
144005491d2cSKalle Valo {
144105491d2cSKalle Valo 	struct dma_info *di = container_of(pub, struct dma_info, dma);
144205491d2cSKalle Valo 	return ntxdactive(di, di->txin, di->txout);
144305491d2cSKalle Valo }
144405491d2cSKalle Valo 
144505491d2cSKalle Valo /*
144605491d2cSKalle Valo  * If we have an active AMPDU session and are not transmitting,
144705491d2cSKalle Valo  * this function will force tx to start.
144805491d2cSKalle Valo  */
dma_kick_tx(struct dma_pub * pub)144905491d2cSKalle Valo void dma_kick_tx(struct dma_pub *pub)
145005491d2cSKalle Valo {
145105491d2cSKalle Valo 	struct dma_info *di = container_of(pub, struct dma_info, dma);
145205491d2cSKalle Valo 	struct brcms_ampdu_session *session = &di->ampdu_session;
145305491d2cSKalle Valo 
145405491d2cSKalle Valo 	if (!skb_queue_empty(&session->skb_list) && dma64_txidle(di))
145505491d2cSKalle Valo 		ampdu_finalize(di);
145605491d2cSKalle Valo }
145705491d2cSKalle Valo 
145805491d2cSKalle Valo /*
145905491d2cSKalle Valo  * Reclaim next completed txd (txds if using chained buffers) in the range
146005491d2cSKalle Valo  * specified and return associated packet.
146105491d2cSKalle Valo  * If range is DMA_RANGE_TRANSMITTED, reclaim descriptors that have be
146205491d2cSKalle Valo  * transmitted as noted by the hardware "CurrDescr" pointer.
146305491d2cSKalle Valo  * If range is DMA_RANGE_TRANSFERED, reclaim descriptors that have be
146405491d2cSKalle Valo  * transferred by the DMA as noted by the hardware "ActiveDescr" pointer.
146505491d2cSKalle Valo  * If range is DMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
146605491d2cSKalle Valo  * return associated packet regardless of the value of hardware pointers.
146705491d2cSKalle Valo  */
dma_getnexttxp(struct dma_pub * pub,enum txd_range range)146805491d2cSKalle Valo struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
146905491d2cSKalle Valo {
147005491d2cSKalle Valo 	struct dma_info *di = container_of(pub, struct dma_info, dma);
147105491d2cSKalle Valo 	u16 start, end, i;
147205491d2cSKalle Valo 	u16 active_desc;
147305491d2cSKalle Valo 	struct sk_buff *txp;
147405491d2cSKalle Valo 
147505491d2cSKalle Valo 	brcms_dbg_dma(di->core, "%s: %s\n",
147605491d2cSKalle Valo 		      di->name,
147705491d2cSKalle Valo 		      range == DMA_RANGE_ALL ? "all" :
147805491d2cSKalle Valo 		      range == DMA_RANGE_TRANSMITTED ? "transmitted" :
147905491d2cSKalle Valo 		      "transferred");
148005491d2cSKalle Valo 
148105491d2cSKalle Valo 	if (di->ntxd == 0)
148205491d2cSKalle Valo 		return NULL;
148305491d2cSKalle Valo 
148405491d2cSKalle Valo 	txp = NULL;
148505491d2cSKalle Valo 
148605491d2cSKalle Valo 	start = di->txin;
148705491d2cSKalle Valo 	if (range == DMA_RANGE_ALL)
148805491d2cSKalle Valo 		end = di->txout;
148905491d2cSKalle Valo 	else {
149005491d2cSKalle Valo 		end = (u16) (B2I(((bcma_read32(di->core,
149105491d2cSKalle Valo 					       DMA64TXREGOFFS(di, status0)) &
149205491d2cSKalle Valo 				   D64_XS0_CD_MASK) - di->xmtptrbase) &
149305491d2cSKalle Valo 				 D64_XS0_CD_MASK, struct dma64desc));
149405491d2cSKalle Valo 
149505491d2cSKalle Valo 		if (range == DMA_RANGE_TRANSFERED) {
149605491d2cSKalle Valo 			active_desc =
149705491d2cSKalle Valo 				(u16)(bcma_read32(di->core,
149805491d2cSKalle Valo 						  DMA64TXREGOFFS(di, status1)) &
149905491d2cSKalle Valo 				      D64_XS1_AD_MASK);
150005491d2cSKalle Valo 			active_desc =
150105491d2cSKalle Valo 			    (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK;
150205491d2cSKalle Valo 			active_desc = B2I(active_desc, struct dma64desc);
150305491d2cSKalle Valo 			if (end != active_desc)
150405491d2cSKalle Valo 				end = prevtxd(di, active_desc);
150505491d2cSKalle Valo 		}
150605491d2cSKalle Valo 	}
150705491d2cSKalle Valo 
150805491d2cSKalle Valo 	if ((start == 0) && (end > di->txout))
150905491d2cSKalle Valo 		goto bogus;
151005491d2cSKalle Valo 
151105491d2cSKalle Valo 	for (i = start; i != end && !txp; i = nexttxd(di, i)) {
151205491d2cSKalle Valo 		dma_addr_t pa;
151305491d2cSKalle Valo 		uint size;
151405491d2cSKalle Valo 
151505491d2cSKalle Valo 		pa = le32_to_cpu(di->txd64[i].addrlow) - di->dataoffsetlow;
151605491d2cSKalle Valo 
151705491d2cSKalle Valo 		size =
151805491d2cSKalle Valo 		    (le32_to_cpu(di->txd64[i].ctrl2) &
151905491d2cSKalle Valo 		     D64_CTRL2_BC_MASK);
152005491d2cSKalle Valo 
152105491d2cSKalle Valo 		di->txd64[i].addrlow = cpu_to_le32(0xdeadbeef);
152205491d2cSKalle Valo 		di->txd64[i].addrhigh = cpu_to_le32(0xdeadbeef);
152305491d2cSKalle Valo 
152405491d2cSKalle Valo 		txp = di->txp[i];
152505491d2cSKalle Valo 		di->txp[i] = NULL;
152605491d2cSKalle Valo 
152705491d2cSKalle Valo 		dma_unmap_single(di->dmadev, pa, size, DMA_TO_DEVICE);
152805491d2cSKalle Valo 	}
152905491d2cSKalle Valo 
153005491d2cSKalle Valo 	di->txin = i;
153105491d2cSKalle Valo 
153205491d2cSKalle Valo 	/* tx flow control */
153305491d2cSKalle Valo 	dma_update_txavail(di);
153405491d2cSKalle Valo 
153505491d2cSKalle Valo 	return txp;
153605491d2cSKalle Valo 
153705491d2cSKalle Valo  bogus:
153805491d2cSKalle Valo 	brcms_dbg_dma(di->core, "bogus curr: start %d end %d txout %d\n",
153905491d2cSKalle Valo 		      start, end, di->txout);
154005491d2cSKalle Valo 	return NULL;
154105491d2cSKalle Valo }
154205491d2cSKalle Valo 
154305491d2cSKalle Valo /*
154405491d2cSKalle Valo  * Mac80211 initiated actions sometimes require packets in the DMA queue to be
154505491d2cSKalle Valo  * modified. The modified portion of the packet is not under control of the DMA
154605491d2cSKalle Valo  * engine. This function calls a caller-supplied function for each packet in
154705491d2cSKalle Valo  * the caller specified dma chain.
154805491d2cSKalle Valo  */
dma_walk_packets(struct dma_pub * dmah,void (* callback_fnc)(void * pkt,void * arg_a),void * arg_a)154905491d2cSKalle Valo void dma_walk_packets(struct dma_pub *dmah, void (*callback_fnc)
155005491d2cSKalle Valo 		      (void *pkt, void *arg_a), void *arg_a)
155105491d2cSKalle Valo {
155205491d2cSKalle Valo 	struct dma_info *di = container_of(dmah, struct dma_info, dma);
155305491d2cSKalle Valo 	uint i =   di->txin;
155405491d2cSKalle Valo 	uint end = di->txout;
155505491d2cSKalle Valo 	struct sk_buff *skb;
155605491d2cSKalle Valo 	struct ieee80211_tx_info *tx_info;
155705491d2cSKalle Valo 
155805491d2cSKalle Valo 	while (i != end) {
155905491d2cSKalle Valo 		skb = di->txp[i];
156005491d2cSKalle Valo 		if (skb != NULL) {
156105491d2cSKalle Valo 			tx_info = (struct ieee80211_tx_info *)skb->cb;
156205491d2cSKalle Valo 			(callback_fnc)(tx_info, arg_a);
156305491d2cSKalle Valo 		}
156405491d2cSKalle Valo 		i = nexttxd(di, i);
156505491d2cSKalle Valo 	}
156605491d2cSKalle Valo }
1567