1*05491d2cSKalle Valo /*
2*05491d2cSKalle Valo * Copyright (c) 2010 Broadcom Corporation
3*05491d2cSKalle Valo *
4*05491d2cSKalle Valo * Permission to use, copy, modify, and/or distribute this software for any
5*05491d2cSKalle Valo * purpose with or without fee is hereby granted, provided that the above
6*05491d2cSKalle Valo * copyright notice and this permission notice appear in all copies.
7*05491d2cSKalle Valo *
8*05491d2cSKalle Valo * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9*05491d2cSKalle Valo * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10*05491d2cSKalle Valo * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11*05491d2cSKalle Valo * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12*05491d2cSKalle Valo * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13*05491d2cSKalle Valo * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14*05491d2cSKalle Valo * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15*05491d2cSKalle Valo */
16*05491d2cSKalle Valo
17*05491d2cSKalle Valo #ifndef _BRCM_DMA_H_
18*05491d2cSKalle Valo #define _BRCM_DMA_H_
19*05491d2cSKalle Valo
20*05491d2cSKalle Valo #include <linux/delay.h>
21*05491d2cSKalle Valo #include <linux/skbuff.h>
22*05491d2cSKalle Valo #include "types.h" /* forward structure declarations */
23*05491d2cSKalle Valo
24*05491d2cSKalle Valo /* map/unmap direction */
25*05491d2cSKalle Valo #define DMA_TX 1 /* TX direction for DMA */
26*05491d2cSKalle Valo #define DMA_RX 2 /* RX direction for DMA */
27*05491d2cSKalle Valo
28*05491d2cSKalle Valo /* DMA structure:
29*05491d2cSKalle Valo * support two DMA engines: 32 bits address or 64 bit addressing
30*05491d2cSKalle Valo * basic DMA register set is per channel(transmit or receive)
31*05491d2cSKalle Valo * a pair of channels is defined for convenience
32*05491d2cSKalle Valo */
33*05491d2cSKalle Valo
34*05491d2cSKalle Valo /* 32 bits addressing */
35*05491d2cSKalle Valo
36*05491d2cSKalle Valo struct dma32diag { /* diag access */
37*05491d2cSKalle Valo u32 fifoaddr; /* diag address */
38*05491d2cSKalle Valo u32 fifodatalow; /* low 32bits of data */
39*05491d2cSKalle Valo u32 fifodatahigh; /* high 32bits of data */
40*05491d2cSKalle Valo u32 pad; /* reserved */
41*05491d2cSKalle Valo };
42*05491d2cSKalle Valo
43*05491d2cSKalle Valo /* 64 bits addressing */
44*05491d2cSKalle Valo
45*05491d2cSKalle Valo /* dma registers per channel(xmt or rcv) */
46*05491d2cSKalle Valo struct dma64regs {
47*05491d2cSKalle Valo u32 control; /* enable, et al */
48*05491d2cSKalle Valo u32 ptr; /* last descriptor posted to chip */
49*05491d2cSKalle Valo u32 addrlow; /* desc ring base address low 32-bits (8K aligned) */
50*05491d2cSKalle Valo u32 addrhigh; /* desc ring base address bits 63:32 (8K aligned) */
51*05491d2cSKalle Valo u32 status0; /* current descriptor, xmt state */
52*05491d2cSKalle Valo u32 status1; /* active descriptor, xmt error */
53*05491d2cSKalle Valo };
54*05491d2cSKalle Valo
55*05491d2cSKalle Valo /* range param for dma_getnexttxp() and dma_txreclaim */
56*05491d2cSKalle Valo enum txd_range {
57*05491d2cSKalle Valo DMA_RANGE_ALL = 1,
58*05491d2cSKalle Valo DMA_RANGE_TRANSMITTED,
59*05491d2cSKalle Valo DMA_RANGE_TRANSFERED
60*05491d2cSKalle Valo };
61*05491d2cSKalle Valo
62*05491d2cSKalle Valo /*
63*05491d2cSKalle Valo * Exported data structure (read-only)
64*05491d2cSKalle Valo */
65*05491d2cSKalle Valo /* export structure */
66*05491d2cSKalle Valo struct dma_pub {
67*05491d2cSKalle Valo uint txavail; /* # free tx descriptors */
68*05491d2cSKalle Valo uint dmactrlflags; /* dma control flags */
69*05491d2cSKalle Valo
70*05491d2cSKalle Valo /* rx error counters */
71*05491d2cSKalle Valo uint rxgiants; /* rx giant frames */
72*05491d2cSKalle Valo uint rxnobuf; /* rx out of dma descriptors */
73*05491d2cSKalle Valo /* tx error counters */
74*05491d2cSKalle Valo uint txnobuf; /* tx out of dma descriptors */
75*05491d2cSKalle Valo };
76*05491d2cSKalle Valo
77*05491d2cSKalle Valo extern struct dma_pub *dma_attach(char *name, struct brcms_c_info *wlc,
78*05491d2cSKalle Valo uint txregbase, uint rxregbase,
79*05491d2cSKalle Valo uint ntxd, uint nrxd,
80*05491d2cSKalle Valo uint rxbufsize, int rxextheadroom,
81*05491d2cSKalle Valo uint nrxpost, uint rxoffset);
82*05491d2cSKalle Valo
83*05491d2cSKalle Valo void dma_rxinit(struct dma_pub *pub);
84*05491d2cSKalle Valo int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list);
85*05491d2cSKalle Valo bool dma_rxfill(struct dma_pub *pub);
86*05491d2cSKalle Valo bool dma_rxreset(struct dma_pub *pub);
87*05491d2cSKalle Valo bool dma_txreset(struct dma_pub *pub);
88*05491d2cSKalle Valo void dma_txinit(struct dma_pub *pub);
89*05491d2cSKalle Valo int dma_txfast(struct brcms_c_info *wlc, struct dma_pub *pub,
90*05491d2cSKalle Valo struct sk_buff *p0);
91*05491d2cSKalle Valo void dma_txflush(struct dma_pub *pub);
92*05491d2cSKalle Valo int dma_txpending(struct dma_pub *pub);
93*05491d2cSKalle Valo void dma_kick_tx(struct dma_pub *pub);
94*05491d2cSKalle Valo void dma_txsuspend(struct dma_pub *pub);
95*05491d2cSKalle Valo bool dma_txsuspended(struct dma_pub *pub);
96*05491d2cSKalle Valo void dma_txresume(struct dma_pub *pub);
97*05491d2cSKalle Valo void dma_txreclaim(struct dma_pub *pub, enum txd_range range);
98*05491d2cSKalle Valo void dma_rxreclaim(struct dma_pub *pub);
99*05491d2cSKalle Valo void dma_detach(struct dma_pub *pub);
100*05491d2cSKalle Valo unsigned long dma_getvar(struct dma_pub *pub, const char *name);
101*05491d2cSKalle Valo struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range);
102*05491d2cSKalle Valo void dma_counterreset(struct dma_pub *pub);
103*05491d2cSKalle Valo
104*05491d2cSKalle Valo void dma_walk_packets(struct dma_pub *dmah, void (*callback_fnc)
105*05491d2cSKalle Valo (void *pkt, void *arg_a), void *arg_a);
106*05491d2cSKalle Valo
107*05491d2cSKalle Valo /*
108*05491d2cSKalle Valo * DMA(Bug) on bcm47xx chips seems to declare that the packet is ready, but
109*05491d2cSKalle Valo * the packet length is not updated yet (by DMA) on the expected time.
110*05491d2cSKalle Valo * Workaround is to hold processor till DMA updates the length, and stay off
111*05491d2cSKalle Valo * the bus to allow DMA update the length in buffer
112*05491d2cSKalle Valo */
dma_spin_for_len(uint len,struct sk_buff * head)113*05491d2cSKalle Valo static inline void dma_spin_for_len(uint len, struct sk_buff *head)
114*05491d2cSKalle Valo {
115*05491d2cSKalle Valo #if defined(CONFIG_BCM47XX)
116*05491d2cSKalle Valo if (!len) {
117*05491d2cSKalle Valo while (!(len = *(u16 *) KSEG1ADDR(head->data)))
118*05491d2cSKalle Valo udelay(1);
119*05491d2cSKalle Valo
120*05491d2cSKalle Valo *(u16 *) (head->data) = cpu_to_le16((u16) len);
121*05491d2cSKalle Valo }
122*05491d2cSKalle Valo #endif /* defined(CONFIG_BCM47XX) */
123*05491d2cSKalle Valo }
124*05491d2cSKalle Valo
125*05491d2cSKalle Valo #endif /* _BRCM_DMA_H_ */
126