xref: /openbmc/linux/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c (revision 7ae9fb1b7ecbb5d85d07857943f677fd1a559b18)
1  // SPDX-License-Identifier: ISC
2  /*
3   * Copyright (c) 2010 Broadcom Corporation
4   */
5  
6  #include <linux/types.h>
7  #include <linux/atomic.h>
8  #include <linux/kernel.h>
9  #include <linux/kthread.h>
10  #include <linux/printk.h>
11  #include <linux/pci_ids.h>
12  #include <linux/netdevice.h>
13  #include <linux/interrupt.h>
14  #include <linux/sched/signal.h>
15  #include <linux/mmc/sdio.h>
16  #include <linux/mmc/sdio_ids.h>
17  #include <linux/mmc/sdio_func.h>
18  #include <linux/mmc/card.h>
19  #include <linux/mmc/core.h>
20  #include <linux/semaphore.h>
21  #include <linux/firmware.h>
22  #include <linux/module.h>
23  #include <linux/bcma/bcma.h>
24  #include <linux/debugfs.h>
25  #include <linux/vmalloc.h>
26  #include <asm/unaligned.h>
27  #include <defs.h>
28  #include <brcmu_wifi.h>
29  #include <brcmu_utils.h>
30  #include <brcm_hw_ids.h>
31  #include <soc.h>
32  #include "sdio.h"
33  #include "chip.h"
34  #include "firmware.h"
35  #include "core.h"
36  #include "common.h"
37  #include "bcdc.h"
38  
39  #define DCMD_RESP_TIMEOUT	msecs_to_jiffies(2500)
40  #define CTL_DONE_TIMEOUT	msecs_to_jiffies(2500)
41  
42  /* watermark expressed in number of words */
43  #define DEFAULT_F2_WATERMARK    0x8
44  #define CY_4373_F2_WATERMARK    0x40
45  #define CY_4373_F1_MESBUSYCTRL  (CY_4373_F2_WATERMARK | SBSDIO_MESBUSYCTRL_ENAB)
46  #define CY_43012_F2_WATERMARK    0x60
47  #define CY_43012_MES_WATERMARK  0x50
48  #define CY_43012_MESBUSYCTRL    (CY_43012_MES_WATERMARK | \
49  				 SBSDIO_MESBUSYCTRL_ENAB)
50  #define CY_4339_F2_WATERMARK    48
51  #define CY_4339_MES_WATERMARK	80
52  #define CY_4339_MESBUSYCTRL	(CY_4339_MES_WATERMARK | \
53  				 SBSDIO_MESBUSYCTRL_ENAB)
54  #define CY_43455_F2_WATERMARK	0x60
55  #define CY_43455_MES_WATERMARK	0x50
56  #define CY_43455_MESBUSYCTRL	(CY_43455_MES_WATERMARK | \
57  				 SBSDIO_MESBUSYCTRL_ENAB)
58  #define CY_435X_F2_WATERMARK	0x40
59  #define CY_435X_F1_MESBUSYCTRL	(CY_435X_F2_WATERMARK | \
60  				 SBSDIO_MESBUSYCTRL_ENAB)
61  
62  #ifdef DEBUG
63  
64  #define BRCMF_TRAP_INFO_SIZE	80
65  
66  #define CBUF_LEN	(128)
67  
68  /* Device console log buffer state */
69  #define CONSOLE_BUFFER_MAX	2024
70  
71  struct rte_log_le {
72  	__le32 buf;		/* Can't be pointer on (64-bit) hosts */
73  	__le32 buf_size;
74  	__le32 idx;
75  	char *_buf_compat;	/* Redundant pointer for backward compat. */
76  };
77  
78  struct rte_console {
79  	/* Virtual UART
80  	 * When there is no UART (e.g. Quickturn),
81  	 * the host should write a complete
82  	 * input line directly into cbuf and then write
83  	 * the length into vcons_in.
84  	 * This may also be used when there is a real UART
85  	 * (at risk of conflicting with
86  	 * the real UART).  vcons_out is currently unused.
87  	 */
88  	uint vcons_in;
89  	uint vcons_out;
90  
91  	/* Output (logging) buffer
92  	 * Console output is written to a ring buffer log_buf at index log_idx.
93  	 * The host may read the output when it sees log_idx advance.
94  	 * Output will be lost if the output wraps around faster than the host
95  	 * polls.
96  	 */
97  	struct rte_log_le log_le;
98  
99  	/* Console input line buffer
100  	 * Characters are read one at a time into cbuf
101  	 * until <CR> is received, then
102  	 * the buffer is processed as a command line.
103  	 * Also used for virtual UART.
104  	 */
105  	uint cbuf_idx;
106  	char cbuf[CBUF_LEN];
107  };
108  
109  #endif				/* DEBUG */
110  #include <chipcommon.h>
111  
112  #include "bus.h"
113  #include "debug.h"
114  #include "tracepoint.h"
115  
116  #define TXQLEN		2048	/* bulk tx queue length */
117  #define TXHI		(TXQLEN - 256)	/* turn on flow control above TXHI */
118  #define TXLOW		(TXHI - 256)	/* turn off flow control below TXLOW */
119  #define PRIOMASK	7
120  
121  #define TXRETRIES	2	/* # of retries for tx frames */
122  
123  #define BRCMF_RXBOUND	50	/* Default for max rx frames in
124  				 one scheduling */
125  
126  #define BRCMF_TXBOUND	20	/* Default for max tx frames in
127  				 one scheduling */
128  
129  #define BRCMF_TXMINMAX	1	/* Max tx frames if rx still pending */
130  
131  #define MEMBLOCK	2048	/* Block size used for downloading
132  				 of dongle image */
133  #define MAX_DATA_BUF	(32 * 1024)	/* Must be large enough to hold
134  				 biggest possible glom */
135  
136  #define BRCMF_FIRSTREAD	(1 << 6)
137  
138  /* SBSDIO_DEVICE_CTL */
139  
140  /* 1: device will assert busy signal when receiving CMD53 */
141  #define SBSDIO_DEVCTL_SETBUSY		0x01
142  /* 1: assertion of sdio interrupt is synchronous to the sdio clock */
143  #define SBSDIO_DEVCTL_SPI_INTR_SYNC	0x02
144  /* 1: mask all interrupts to host except the chipActive (rev 8) */
145  #define SBSDIO_DEVCTL_CA_INT_ONLY	0x04
146  /* 1: isolate internal sdio signals, put external pads in tri-state; requires
147   * sdio bus power cycle to clear (rev 9) */
148  #define SBSDIO_DEVCTL_PADS_ISO		0x08
149  /* 1: enable F2 Watermark */
150  #define SBSDIO_DEVCTL_F2WM_ENAB		0x10
151  /* Force SD->SB reset mapping (rev 11) */
152  #define SBSDIO_DEVCTL_SB_RST_CTL	0x30
153  /*   Determined by CoreControl bit */
154  #define SBSDIO_DEVCTL_RST_CORECTL	0x00
155  /*   Force backplane reset */
156  #define SBSDIO_DEVCTL_RST_BPRESET	0x10
157  /*   Force no backplane reset */
158  #define SBSDIO_DEVCTL_RST_NOBPRESET	0x20
159  
160  /* direct(mapped) cis space */
161  
162  /* MAPPED common CIS address */
163  #define SBSDIO_CIS_BASE_COMMON		0x1000
164  /* maximum bytes in one CIS */
165  #define SBSDIO_CIS_SIZE_LIMIT		0x200
166  /* cis offset addr is < 17 bits */
167  #define SBSDIO_CIS_OFT_ADDR_MASK	0x1FFFF
168  
169  /* manfid tuple length, include tuple, link bytes */
170  #define SBSDIO_CIS_MANFID_TUPLE_LEN	6
171  
172  #define SD_REG(field) \
173  		(offsetof(struct sdpcmd_regs, field))
174  
175  /* SDIO function 1 register CHIPCLKCSR */
176  /* Force ALP request to backplane */
177  #define SBSDIO_FORCE_ALP		0x01
178  /* Force HT request to backplane */
179  #define SBSDIO_FORCE_HT			0x02
180  /* Force ILP request to backplane */
181  #define SBSDIO_FORCE_ILP		0x04
182  /* Make ALP ready (power up xtal) */
183  #define SBSDIO_ALP_AVAIL_REQ		0x08
184  /* Make HT ready (power up PLL) */
185  #define SBSDIO_HT_AVAIL_REQ		0x10
186  /* Squelch clock requests from HW */
187  #define SBSDIO_FORCE_HW_CLKREQ_OFF	0x20
188  /* Status: ALP is ready */
189  #define SBSDIO_ALP_AVAIL		0x40
190  /* Status: HT is ready */
191  #define SBSDIO_HT_AVAIL			0x80
192  #define SBSDIO_CSR_MASK			0x1F
193  #define SBSDIO_AVBITS		(SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
194  #define SBSDIO_ALPAV(regval)	((regval) & SBSDIO_AVBITS)
195  #define SBSDIO_HTAV(regval)	(((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
196  #define SBSDIO_ALPONLY(regval)	(SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
197  #define SBSDIO_CLKAV(regval, alponly) \
198  	(SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval)))
199  
200  /* intstatus */
201  #define I_SMB_SW0	(1 << 0)	/* To SB Mail S/W interrupt 0 */
202  #define I_SMB_SW1	(1 << 1)	/* To SB Mail S/W interrupt 1 */
203  #define I_SMB_SW2	(1 << 2)	/* To SB Mail S/W interrupt 2 */
204  #define I_SMB_SW3	(1 << 3)	/* To SB Mail S/W interrupt 3 */
205  #define I_SMB_SW_MASK	0x0000000f	/* To SB Mail S/W interrupts mask */
206  #define I_SMB_SW_SHIFT	0	/* To SB Mail S/W interrupts shift */
207  #define I_HMB_SW0	(1 << 4)	/* To Host Mail S/W interrupt 0 */
208  #define I_HMB_SW1	(1 << 5)	/* To Host Mail S/W interrupt 1 */
209  #define I_HMB_SW2	(1 << 6)	/* To Host Mail S/W interrupt 2 */
210  #define I_HMB_SW3	(1 << 7)	/* To Host Mail S/W interrupt 3 */
211  #define I_HMB_SW_MASK	0x000000f0	/* To Host Mail S/W interrupts mask */
212  #define I_HMB_SW_SHIFT	4	/* To Host Mail S/W interrupts shift */
213  #define I_WR_OOSYNC	(1 << 8)	/* Write Frame Out Of Sync */
214  #define I_RD_OOSYNC	(1 << 9)	/* Read Frame Out Of Sync */
215  #define	I_PC		(1 << 10)	/* descriptor error */
216  #define	I_PD		(1 << 11)	/* data error */
217  #define	I_DE		(1 << 12)	/* Descriptor protocol Error */
218  #define	I_RU		(1 << 13)	/* Receive descriptor Underflow */
219  #define	I_RO		(1 << 14)	/* Receive fifo Overflow */
220  #define	I_XU		(1 << 15)	/* Transmit fifo Underflow */
221  #define	I_RI		(1 << 16)	/* Receive Interrupt */
222  #define I_BUSPWR	(1 << 17)	/* SDIO Bus Power Change (rev 9) */
223  #define I_XMTDATA_AVAIL (1 << 23)	/* bits in fifo */
224  #define	I_XI		(1 << 24)	/* Transmit Interrupt */
225  #define I_RF_TERM	(1 << 25)	/* Read Frame Terminate */
226  #define I_WF_TERM	(1 << 26)	/* Write Frame Terminate */
227  #define I_PCMCIA_XU	(1 << 27)	/* PCMCIA Transmit FIFO Underflow */
228  #define I_SBINT		(1 << 28)	/* sbintstatus Interrupt */
229  #define I_CHIPACTIVE	(1 << 29)	/* chip from doze to active state */
230  #define I_SRESET	(1 << 30)	/* CCCR RES interrupt */
231  #define I_IOE2		(1U << 31)	/* CCCR IOE2 Bit Changed */
232  #define	I_ERRORS	(I_PC | I_PD | I_DE | I_RU | I_RO | I_XU)
233  #define I_DMA		(I_RI | I_XI | I_ERRORS)
234  
235  /* corecontrol */
236  #define CC_CISRDY		(1 << 0)	/* CIS Ready */
237  #define CC_BPRESEN		(1 << 1)	/* CCCR RES signal */
238  #define CC_F2RDY		(1 << 2)	/* set CCCR IOR2 bit */
239  #define CC_CLRPADSISO		(1 << 3)	/* clear SDIO pads isolation */
240  #define CC_XMTDATAAVAIL_MODE	(1 << 4)
241  #define CC_XMTDATAAVAIL_CTRL	(1 << 5)
242  
243  /* SDA_FRAMECTRL */
244  #define SFC_RF_TERM	(1 << 0)	/* Read Frame Terminate */
245  #define SFC_WF_TERM	(1 << 1)	/* Write Frame Terminate */
246  #define SFC_CRC4WOOS	(1 << 2)	/* CRC error for write out of sync */
247  #define SFC_ABORTALL	(1 << 3)	/* Abort all in-progress frames */
248  
249  /*
250   * Software allocation of To SB Mailbox resources
251   */
252  
253  /* tosbmailbox bits corresponding to intstatus bits */
254  #define SMB_NAK		(1 << 0)	/* Frame NAK */
255  #define SMB_INT_ACK	(1 << 1)	/* Host Interrupt ACK */
256  #define SMB_USE_OOB	(1 << 2)	/* Use OOB Wakeup */
257  #define SMB_DEV_INT	(1 << 3)	/* Miscellaneous Interrupt */
258  
259  /* tosbmailboxdata */
260  #define SMB_DATA_VERSION_SHIFT	16	/* host protocol version */
261  
262  /*
263   * Software allocation of To Host Mailbox resources
264   */
265  
266  /* intstatus bits */
267  #define I_HMB_FC_STATE	I_HMB_SW0	/* Flow Control State */
268  #define I_HMB_FC_CHANGE	I_HMB_SW1	/* Flow Control State Changed */
269  #define I_HMB_FRAME_IND	I_HMB_SW2	/* Frame Indication */
270  #define I_HMB_HOST_INT	I_HMB_SW3	/* Miscellaneous Interrupt */
271  
272  /* tohostmailboxdata */
273  #define HMB_DATA_NAKHANDLED	0x0001	/* retransmit NAK'd frame */
274  #define HMB_DATA_DEVREADY	0x0002	/* talk to host after enable */
275  #define HMB_DATA_FC		0x0004	/* per prio flowcontrol update flag */
276  #define HMB_DATA_FWREADY	0x0008	/* fw ready for protocol activity */
277  #define HMB_DATA_FWHALT		0x0010	/* firmware halted */
278  
279  #define HMB_DATA_FCDATA_MASK	0xff000000
280  #define HMB_DATA_FCDATA_SHIFT	24
281  
282  #define HMB_DATA_VERSION_MASK	0x00ff0000
283  #define HMB_DATA_VERSION_SHIFT	16
284  
285  /*
286   * Software-defined protocol header
287   */
288  
289  /* Current protocol version */
290  #define SDPCM_PROT_VERSION	4
291  
292  /*
293   * Shared structure between dongle and the host.
294   * The structure contains pointers to trap or assert information.
295   */
296  #define SDPCM_SHARED_VERSION       0x0003
297  #define SDPCM_SHARED_VERSION_MASK  0x00FF
298  #define SDPCM_SHARED_ASSERT_BUILT  0x0100
299  #define SDPCM_SHARED_ASSERT        0x0200
300  #define SDPCM_SHARED_TRAP          0x0400
301  
302  /* Space for header read, limit for data packets */
303  #define MAX_HDR_READ	(1 << 6)
304  #define MAX_RX_DATASZ	2048
305  
306  /* Bump up limit on waiting for HT to account for first startup;
307   * if the image is doing a CRC calculation before programming the PMU
308   * for HT availability, it could take a couple hundred ms more, so
309   * max out at a 1 second (1000000us).
310   */
311  #undef PMU_MAX_TRANSITION_DLY
312  #define PMU_MAX_TRANSITION_DLY 1000000
313  
314  /* Value for ChipClockCSR during initial setup */
315  #define BRCMF_INIT_CLKCTL1	(SBSDIO_FORCE_HW_CLKREQ_OFF |	\
316  					SBSDIO_ALP_AVAIL_REQ)
317  
318  /* Flags for SDH calls */
319  #define F2SYNC	(SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
320  
321  #define BRCMF_IDLE_ACTIVE	0	/* Do not request any SD clock change
322  					 * when idle
323  					 */
324  #define BRCMF_IDLE_INTERVAL	1
325  
326  #define KSO_WAIT_US 50
327  #define MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US)
328  #define BRCMF_SDIO_MAX_ACCESS_ERRORS	5
329  
330  #ifdef DEBUG
331  /* Device console log buffer state */
332  struct brcmf_console {
333  	uint count;		/* Poll interval msec counter */
334  	uint log_addr;		/* Log struct address (fixed) */
335  	struct rte_log_le log_le;	/* Log struct (host copy) */
336  	uint bufsize;		/* Size of log buffer */
337  	u8 *buf;		/* Log buffer (host copy) */
338  	uint last;		/* Last buffer read index */
339  };
340  
341  struct brcmf_trap_info {
342  	__le32		type;
343  	__le32		epc;
344  	__le32		cpsr;
345  	__le32		spsr;
346  	__le32		r0;	/* a1 */
347  	__le32		r1;	/* a2 */
348  	__le32		r2;	/* a3 */
349  	__le32		r3;	/* a4 */
350  	__le32		r4;	/* v1 */
351  	__le32		r5;	/* v2 */
352  	__le32		r6;	/* v3 */
353  	__le32		r7;	/* v4 */
354  	__le32		r8;	/* v5 */
355  	__le32		r9;	/* sb/v6 */
356  	__le32		r10;	/* sl/v7 */
357  	__le32		r11;	/* fp/v8 */
358  	__le32		r12;	/* ip */
359  	__le32		r13;	/* sp */
360  	__le32		r14;	/* lr */
361  	__le32		pc;	/* r15 */
362  };
363  #endif				/* DEBUG */
364  
365  struct sdpcm_shared {
366  	u32 flags;
367  	u32 trap_addr;
368  	u32 assert_exp_addr;
369  	u32 assert_file_addr;
370  	u32 assert_line;
371  	u32 console_addr;	/* Address of struct rte_console */
372  	u32 msgtrace_addr;
373  	u8 tag[32];
374  	u32 brpt_addr;
375  };
376  
377  struct sdpcm_shared_le {
378  	__le32 flags;
379  	__le32 trap_addr;
380  	__le32 assert_exp_addr;
381  	__le32 assert_file_addr;
382  	__le32 assert_line;
383  	__le32 console_addr;	/* Address of struct rte_console */
384  	__le32 msgtrace_addr;
385  	u8 tag[32];
386  	__le32 brpt_addr;
387  };
388  
389  /* dongle SDIO bus specific header info */
390  struct brcmf_sdio_hdrinfo {
391  	u8 seq_num;
392  	u8 channel;
393  	u16 len;
394  	u16 len_left;
395  	u16 len_nxtfrm;
396  	u8 dat_offset;
397  	bool lastfrm;
398  	u16 tail_pad;
399  };
400  
401  /*
402   * hold counter variables
403   */
404  struct brcmf_sdio_count {
405  	uint intrcount;		/* Count of device interrupt callbacks */
406  	uint lastintrs;		/* Count as of last watchdog timer */
407  	uint pollcnt;		/* Count of active polls */
408  	uint regfails;		/* Count of R_REG failures */
409  	uint tx_sderrs;		/* Count of tx attempts with sd errors */
410  	uint fcqueued;		/* Tx packets that got queued */
411  	uint rxrtx;		/* Count of rtx requests (NAK to dongle) */
412  	uint rx_toolong;	/* Receive frames too long to receive */
413  	uint rxc_errors;	/* SDIO errors when reading control frames */
414  	uint rx_hdrfail;	/* SDIO errors on header reads */
415  	uint rx_badhdr;		/* Bad received headers (roosync?) */
416  	uint rx_badseq;		/* Mismatched rx sequence number */
417  	uint fc_rcvd;		/* Number of flow-control events received */
418  	uint fc_xoff;		/* Number which turned on flow-control */
419  	uint fc_xon;		/* Number which turned off flow-control */
420  	uint rxglomfail;	/* Failed deglom attempts */
421  	uint rxglomframes;	/* Number of glom frames (superframes) */
422  	uint rxglompkts;	/* Number of packets from glom frames */
423  	uint f2rxhdrs;		/* Number of header reads */
424  	uint f2rxdata;		/* Number of frame data reads */
425  	uint f2txdata;		/* Number of f2 frame writes */
426  	uint f1regdata;		/* Number of f1 register accesses */
427  	uint tickcnt;		/* Number of watchdog been schedule */
428  	ulong tx_ctlerrs;	/* Err of sending ctrl frames */
429  	ulong tx_ctlpkts;	/* Ctrl frames sent to dongle */
430  	ulong rx_ctlerrs;	/* Err of processing rx ctrl frames */
431  	ulong rx_ctlpkts;	/* Ctrl frames processed from dongle */
432  	ulong rx_readahead_cnt;	/* packets where header read-ahead was used */
433  };
434  
435  /* misc chip info needed by some of the routines */
436  /* Private data for SDIO bus interaction */
437  struct brcmf_sdio {
438  	struct brcmf_sdio_dev *sdiodev;	/* sdio device handler */
439  	struct brcmf_chip *ci;	/* Chip info struct */
440  	struct brcmf_core *sdio_core; /* sdio core info struct */
441  
442  	u32 hostintmask;	/* Copy of Host Interrupt Mask */
443  	atomic_t intstatus;	/* Intstatus bits (events) pending */
444  	atomic_t fcstate;	/* State of dongle flow-control */
445  
446  	uint blocksize;		/* Block size of SDIO transfers */
447  	uint roundup;		/* Max roundup limit */
448  
449  	struct pktq txq;	/* Queue length used for flow-control */
450  	u8 flowcontrol;	/* per prio flow control bitmask */
451  	u8 tx_seq;		/* Transmit sequence number (next) */
452  	u8 tx_max;		/* Maximum transmit sequence allowed */
453  
454  	u8 *hdrbuf;		/* buffer for handling rx frame */
455  	u8 *rxhdr;		/* Header of current rx frame (in hdrbuf) */
456  	u8 rx_seq;		/* Receive sequence number (expected) */
457  	struct brcmf_sdio_hdrinfo cur_read;
458  				/* info of current read frame */
459  	bool rxskip;		/* Skip receive (awaiting NAK ACK) */
460  	bool rxpending;		/* Data frame pending in dongle */
461  
462  	uint rxbound;		/* Rx frames to read before resched */
463  	uint txbound;		/* Tx frames to send before resched */
464  	uint txminmax;
465  
466  	struct sk_buff *glomd;	/* Packet containing glomming descriptor */
467  	struct sk_buff_head glom; /* Packet list for glommed superframe */
468  
469  	u8 *rxbuf;		/* Buffer for receiving control packets */
470  	uint rxblen;		/* Allocated length of rxbuf */
471  	u8 *rxctl;		/* Aligned pointer into rxbuf */
472  	u8 *rxctl_orig;		/* pointer for freeing rxctl */
473  	uint rxlen;		/* Length of valid data in buffer */
474  	spinlock_t rxctl_lock;	/* protection lock for ctrl frame resources */
475  
476  	u8 sdpcm_ver;	/* Bus protocol reported by dongle */
477  
478  	bool intr;		/* Use interrupts */
479  	bool poll;		/* Use polling */
480  	atomic_t ipend;		/* Device interrupt is pending */
481  	uint spurious;		/* Count of spurious interrupts */
482  	uint pollrate;		/* Ticks between device polls */
483  	uint polltick;		/* Tick counter */
484  
485  #ifdef DEBUG
486  	uint console_interval;
487  	struct brcmf_console console;	/* Console output polling support */
488  	uint console_addr;	/* Console address from shared struct */
489  #endif				/* DEBUG */
490  
491  	uint clkstate;		/* State of sd and backplane clock(s) */
492  	s32 idletime;		/* Control for activity timeout */
493  	s32 idlecount;		/* Activity timeout counter */
494  	s32 idleclock;		/* How to set bus driver when idle */
495  	bool rxflow_mode;	/* Rx flow control mode */
496  	bool rxflow;		/* Is rx flow control on */
497  	bool alp_only;		/* Don't use HT clock (ALP only) */
498  
499  	u8 *ctrl_frame_buf;
500  	u16 ctrl_frame_len;
501  	bool ctrl_frame_stat;
502  	int ctrl_frame_err;
503  
504  	spinlock_t txq_lock;		/* protect bus->txq */
505  	wait_queue_head_t ctrl_wait;
506  	wait_queue_head_t dcmd_resp_wait;
507  
508  	struct timer_list timer;
509  	struct completion watchdog_wait;
510  	struct task_struct *watchdog_tsk;
511  	bool wd_active;
512  
513  	struct workqueue_struct *brcmf_wq;
514  	struct work_struct datawork;
515  	bool dpc_triggered;
516  	bool dpc_running;
517  
518  	bool txoff;		/* Transmit flow-controlled */
519  	struct brcmf_sdio_count sdcnt;
520  	bool sr_enabled; /* SaveRestore enabled */
521  	bool sleeping;
522  
523  	u8 tx_hdrlen;		/* sdio bus header length for tx packet */
524  	bool txglom;		/* host tx glomming enable flag */
525  	u16 head_align;		/* buffer pointer alignment */
526  	u16 sgentry_align;	/* scatter-gather buffer alignment */
527  };
528  
529  /* clkstate */
530  #define CLK_NONE	0
531  #define CLK_SDONLY	1
532  #define CLK_PENDING	2
533  #define CLK_AVAIL	3
534  
535  #ifdef DEBUG
536  static int qcount[NUMPRIO];
537  #endif				/* DEBUG */
538  
539  #define DEFAULT_SDIO_DRIVE_STRENGTH	6	/* in milliamps */
540  
541  #define RETRYCHAN(chan) ((chan) == SDPCM_EVENT_CHANNEL)
542  
543  /* Limit on rounding up frames */
544  static const uint max_roundup = 512;
545  
546  #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
547  #define ALIGNMENT  8
548  #else
549  #define ALIGNMENT  4
550  #endif
551  
552  enum brcmf_sdio_frmtype {
553  	BRCMF_SDIO_FT_NORMAL,
554  	BRCMF_SDIO_FT_SUPER,
555  	BRCMF_SDIO_FT_SUB,
556  };
557  
558  #define SDIOD_DRVSTR_KEY(chip, pmu)     (((unsigned int)(chip) << 16) | (pmu))
559  
560  /* SDIO Pad drive strength to select value mappings */
561  struct sdiod_drive_str {
562  	u8 strength;	/* Pad Drive Strength in mA */
563  	u8 sel;		/* Chip-specific select value */
564  };
565  
566  /* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8V) */
567  static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = {
568  	{32, 0x6},
569  	{26, 0x7},
570  	{22, 0x4},
571  	{16, 0x5},
572  	{12, 0x2},
573  	{8, 0x3},
574  	{4, 0x0},
575  	{0, 0x1}
576  };
577  
578  /* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */
579  static const struct sdiod_drive_str sdiod_drive_strength_tab5_1v8[] = {
580  	{6, 0x7},
581  	{5, 0x6},
582  	{4, 0x5},
583  	{3, 0x4},
584  	{2, 0x2},
585  	{1, 0x1},
586  	{0, 0x0}
587  };
588  
589  /* SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */
590  static const struct sdiod_drive_str sdiod_drvstr_tab6_1v8[] = {
591  	{3, 0x3},
592  	{2, 0x2},
593  	{1, 0x1},
594  	{0, 0x0} };
595  
596  /* SDIO Drive Strength to sel value table for 43143 PMU Rev 17 (3.3V) */
597  static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
598  	{16, 0x7},
599  	{12, 0x5},
600  	{8,  0x3},
601  	{4,  0x1}
602  };
603  
604  BRCMF_FW_DEF(43143, "brcmfmac43143-sdio");
605  BRCMF_FW_DEF(43241B0, "brcmfmac43241b0-sdio");
606  BRCMF_FW_DEF(43241B4, "brcmfmac43241b4-sdio");
607  BRCMF_FW_DEF(43241B5, "brcmfmac43241b5-sdio");
608  BRCMF_FW_DEF(4329, "brcmfmac4329-sdio");
609  BRCMF_FW_DEF(4330, "brcmfmac4330-sdio");
610  BRCMF_FW_DEF(4334, "brcmfmac4334-sdio");
611  BRCMF_FW_DEF(43340, "brcmfmac43340-sdio");
612  BRCMF_FW_DEF(4335, "brcmfmac4335-sdio");
613  BRCMF_FW_DEF(43362, "brcmfmac43362-sdio");
614  BRCMF_FW_DEF(4339, "brcmfmac4339-sdio");
615  BRCMF_FW_DEF(43430A0, "brcmfmac43430a0-sdio");
616  /* Note the names are not postfixed with a1 for backward compatibility */
617  BRCMF_FW_CLM_DEF(43430A1, "brcmfmac43430-sdio");
618  BRCMF_FW_DEF(43430B0, "brcmfmac43430b0-sdio");
619  BRCMF_FW_CLM_DEF(43439, "brcmfmac43439-sdio");
620  BRCMF_FW_CLM_DEF(43455, "brcmfmac43455-sdio");
621  BRCMF_FW_DEF(43456, "brcmfmac43456-sdio");
622  BRCMF_FW_CLM_DEF(4354, "brcmfmac4354-sdio");
623  BRCMF_FW_CLM_DEF(4356, "brcmfmac4356-sdio");
624  BRCMF_FW_DEF(4359, "brcmfmac4359-sdio");
625  BRCMF_FW_CLM_DEF(4373, "brcmfmac4373-sdio");
626  BRCMF_FW_CLM_DEF(43012, "brcmfmac43012-sdio");
627  BRCMF_FW_CLM_DEF(43752, "brcmfmac43752-sdio");
628  
629  /* firmware config files */
630  MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-sdio.*.txt");
631  
632  /* per-board firmware binaries */
633  MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-sdio.*.bin");
634  
635  static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
636  	BRCMF_FW_ENTRY(BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, 43143),
637  	BRCMF_FW_ENTRY(BRCM_CC_43241_CHIP_ID, 0x0000001F, 43241B0),
638  	BRCMF_FW_ENTRY(BRCM_CC_43241_CHIP_ID, 0x00000020, 43241B4),
639  	BRCMF_FW_ENTRY(BRCM_CC_43241_CHIP_ID, 0xFFFFFFC0, 43241B5),
640  	BRCMF_FW_ENTRY(BRCM_CC_4329_CHIP_ID, 0xFFFFFFFF, 4329),
641  	BRCMF_FW_ENTRY(BRCM_CC_4330_CHIP_ID, 0xFFFFFFFF, 4330),
642  	BRCMF_FW_ENTRY(BRCM_CC_4334_CHIP_ID, 0xFFFFFFFF, 4334),
643  	BRCMF_FW_ENTRY(BRCM_CC_43340_CHIP_ID, 0xFFFFFFFF, 43340),
644  	BRCMF_FW_ENTRY(BRCM_CC_43341_CHIP_ID, 0xFFFFFFFF, 43340),
645  	BRCMF_FW_ENTRY(BRCM_CC_4335_CHIP_ID, 0xFFFFFFFF, 4335),
646  	BRCMF_FW_ENTRY(BRCM_CC_43362_CHIP_ID, 0xFFFFFFFE, 43362),
647  	BRCMF_FW_ENTRY(BRCM_CC_4339_CHIP_ID, 0xFFFFFFFF, 4339),
648  	BRCMF_FW_ENTRY(BRCM_CC_43430_CHIP_ID, 0x00000001, 43430A0),
649  	BRCMF_FW_ENTRY(BRCM_CC_43430_CHIP_ID, 0x00000002, 43430A1),
650  	BRCMF_FW_ENTRY(BRCM_CC_43430_CHIP_ID, 0xFFFFFFFC, 43430B0),
651  	BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0x00000200, 43456),
652  	BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0xFFFFFDC0, 43455),
653  	BRCMF_FW_ENTRY(BRCM_CC_43454_CHIP_ID, 0x00000040, 43455),
654  	BRCMF_FW_ENTRY(BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, 4354),
655  	BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356),
656  	BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
657  	BRCMF_FW_ENTRY(CY_CC_4373_CHIP_ID, 0xFFFFFFFF, 4373),
658  	BRCMF_FW_ENTRY(CY_CC_43012_CHIP_ID, 0xFFFFFFFF, 43012),
659  	BRCMF_FW_ENTRY(CY_CC_43439_CHIP_ID, 0xFFFFFFFF, 43439),
660  	BRCMF_FW_ENTRY(CY_CC_43752_CHIP_ID, 0xFFFFFFFF, 43752)
661  };
662  
663  #define TXCTL_CREDITS	2
664  
pkt_align(struct sk_buff * p,int len,int align)665  static void pkt_align(struct sk_buff *p, int len, int align)
666  {
667  	uint datalign;
668  	datalign = (unsigned long)(p->data);
669  	datalign = roundup(datalign, (align)) - datalign;
670  	if (datalign)
671  		skb_pull(p, datalign);
672  	__skb_trim(p, len);
673  }
674  
675  /* To check if there's window offered */
data_ok(struct brcmf_sdio * bus)676  static bool data_ok(struct brcmf_sdio *bus)
677  {
678  	u8 tx_rsv = 0;
679  
680  	/* Reserve TXCTL_CREDITS credits for txctl when it is ready to send */
681  	if (bus->ctrl_frame_stat)
682  		tx_rsv = TXCTL_CREDITS;
683  
684  	return (bus->tx_max - bus->tx_seq - tx_rsv) != 0 &&
685  	       ((bus->tx_max - bus->tx_seq - tx_rsv) & 0x80) == 0;
686  
687  }
688  
689  /* To check if there's window offered */
txctl_ok(struct brcmf_sdio * bus)690  static bool txctl_ok(struct brcmf_sdio *bus)
691  {
692  	return (bus->tx_max - bus->tx_seq) != 0 &&
693  	       ((bus->tx_max - bus->tx_seq) & 0x80) == 0;
694  }
695  
696  static int
brcmf_sdio_kso_control(struct brcmf_sdio * bus,bool on)697  brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
698  {
699  	u8 wr_val = 0, rd_val, cmp_val, bmask;
700  	int err = 0;
701  	int err_cnt = 0;
702  	int try_cnt = 0;
703  
704  	brcmf_dbg(TRACE, "Enter: on=%d\n", on);
705  
706  	sdio_retune_crc_disable(bus->sdiodev->func1);
707  
708  	/* Cannot re-tune if device is asleep; defer till we're awake */
709  	if (on)
710  		sdio_retune_hold_now(bus->sdiodev->func1);
711  
712  	wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
713  	/* 1st KSO write goes to AOS wake up core if device is asleep  */
714  	brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err);
715  
716  	/* In case of 43012 chip, the chip could go down immediately after
717  	 * KSO bit is cleared. So the further reads of KSO register could
718  	 * fail. Thereby just bailing out immediately after clearing KSO
719  	 * bit, to avoid polling of KSO bit.
720  	 */
721  	if (!on && bus->ci->chip == CY_CC_43012_CHIP_ID)
722  		return err;
723  
724  	if (on) {
725  		/* device WAKEUP through KSO:
726  		 * write bit 0 & read back until
727  		 * both bits 0 (kso bit) & 1 (dev on status) are set
728  		 */
729  		cmp_val = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK |
730  			  SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK;
731  		bmask = cmp_val;
732  		usleep_range(2000, 3000);
733  	} else {
734  		/* Put device to sleep, turn off KSO */
735  		cmp_val = 0;
736  		/* only check for bit0, bit1(dev on status) may not
737  		 * get cleared right away
738  		 */
739  		bmask = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK;
740  	}
741  
742  	do {
743  		/* reliable KSO bit set/clr:
744  		 * the sdiod sleep write access is synced to PMU 32khz clk
745  		 * just one write attempt may fail,
746  		 * read it back until it matches written value
747  		 */
748  		rd_val = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
749  					   &err);
750  		if (!err) {
751  			if ((rd_val & bmask) == cmp_val)
752  				break;
753  			err_cnt = 0;
754  		}
755  		/* bail out upon subsequent access errors */
756  		if (err && (err_cnt++ > BRCMF_SDIO_MAX_ACCESS_ERRORS))
757  			break;
758  
759  		udelay(KSO_WAIT_US);
760  		brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, wr_val,
761  				   &err);
762  
763  	} while (try_cnt++ < MAX_KSO_ATTEMPTS);
764  
765  	if (try_cnt > 2)
766  		brcmf_dbg(SDIO, "try_cnt=%d rd_val=0x%x err=%d\n", try_cnt,
767  			  rd_val, err);
768  
769  	if (try_cnt > MAX_KSO_ATTEMPTS)
770  		brcmf_err("max tries: rd_val=0x%x err=%d\n", rd_val, err);
771  
772  	if (on)
773  		sdio_retune_release(bus->sdiodev->func1);
774  
775  	sdio_retune_crc_enable(bus->sdiodev->func1);
776  
777  	return err;
778  }
779  
780  #define HOSTINTMASK		(I_HMB_SW_MASK | I_CHIPACTIVE)
781  
782  /* Turn backplane clock on or off */
brcmf_sdio_htclk(struct brcmf_sdio * bus,bool on,bool pendok)783  static int brcmf_sdio_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
784  {
785  	int err;
786  	u8 clkctl, clkreq, devctl;
787  	unsigned long timeout;
788  
789  	brcmf_dbg(SDIO, "Enter\n");
790  
791  	clkctl = 0;
792  
793  	if (bus->sr_enabled) {
794  		bus->clkstate = (on ? CLK_AVAIL : CLK_SDONLY);
795  		return 0;
796  	}
797  
798  	if (on) {
799  		/* Request HT Avail */
800  		clkreq =
801  		    bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
802  
803  		brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
804  				   clkreq, &err);
805  		if (err) {
806  			brcmf_err("HT Avail request error: %d\n", err);
807  			return -EBADE;
808  		}
809  
810  		/* Check current status */
811  		clkctl = brcmf_sdiod_readb(bus->sdiodev,
812  					   SBSDIO_FUNC1_CHIPCLKCSR, &err);
813  		if (err) {
814  			brcmf_err("HT Avail read error: %d\n", err);
815  			return -EBADE;
816  		}
817  
818  		/* Go to pending and await interrupt if appropriate */
819  		if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
820  			/* Allow only clock-available interrupt */
821  			devctl = brcmf_sdiod_readb(bus->sdiodev,
822  						   SBSDIO_DEVICE_CTL, &err);
823  			if (err) {
824  				brcmf_err("Devctl error setting CA: %d\n", err);
825  				return -EBADE;
826  			}
827  
828  			devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
829  			brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_DEVICE_CTL,
830  					   devctl, &err);
831  			brcmf_dbg(SDIO, "CLKCTL: set PENDING\n");
832  			bus->clkstate = CLK_PENDING;
833  
834  			return 0;
835  		} else if (bus->clkstate == CLK_PENDING) {
836  			/* Cancel CA-only interrupt filter */
837  			devctl = brcmf_sdiod_readb(bus->sdiodev,
838  						   SBSDIO_DEVICE_CTL, &err);
839  			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
840  			brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_DEVICE_CTL,
841  					   devctl, &err);
842  		}
843  
844  		/* Otherwise, wait here (polling) for HT Avail */
845  		timeout = jiffies +
846  			  msecs_to_jiffies(PMU_MAX_TRANSITION_DLY/1000);
847  		while (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
848  			clkctl = brcmf_sdiod_readb(bus->sdiodev,
849  						   SBSDIO_FUNC1_CHIPCLKCSR,
850  						   &err);
851  			if (time_after(jiffies, timeout))
852  				break;
853  			else
854  				usleep_range(5000, 10000);
855  		}
856  		if (err) {
857  			brcmf_err("HT Avail request error: %d\n", err);
858  			return -EBADE;
859  		}
860  		if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
861  			brcmf_err("HT Avail timeout (%d): clkctl 0x%02x\n",
862  				  PMU_MAX_TRANSITION_DLY, clkctl);
863  			return -EBADE;
864  		}
865  
866  		/* Mark clock available */
867  		bus->clkstate = CLK_AVAIL;
868  		brcmf_dbg(SDIO, "CLKCTL: turned ON\n");
869  
870  #if defined(DEBUG)
871  		if (!bus->alp_only) {
872  			if (SBSDIO_ALPONLY(clkctl))
873  				brcmf_err("HT Clock should be on\n");
874  		}
875  #endif				/* defined (DEBUG) */
876  
877  	} else {
878  		clkreq = 0;
879  
880  		if (bus->clkstate == CLK_PENDING) {
881  			/* Cancel CA-only interrupt filter */
882  			devctl = brcmf_sdiod_readb(bus->sdiodev,
883  						   SBSDIO_DEVICE_CTL, &err);
884  			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
885  			brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_DEVICE_CTL,
886  					   devctl, &err);
887  		}
888  
889  		bus->clkstate = CLK_SDONLY;
890  		brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
891  				   clkreq, &err);
892  		brcmf_dbg(SDIO, "CLKCTL: turned OFF\n");
893  		if (err) {
894  			brcmf_err("Failed access turning clock off: %d\n",
895  				  err);
896  			return -EBADE;
897  		}
898  	}
899  	return 0;
900  }
901  
902  /* Change idle/active SD state */
brcmf_sdio_sdclk(struct brcmf_sdio * bus,bool on)903  static int brcmf_sdio_sdclk(struct brcmf_sdio *bus, bool on)
904  {
905  	brcmf_dbg(SDIO, "Enter\n");
906  
907  	if (on)
908  		bus->clkstate = CLK_SDONLY;
909  	else
910  		bus->clkstate = CLK_NONE;
911  
912  	return 0;
913  }
914  
915  /* Transition SD and backplane clock readiness */
brcmf_sdio_clkctl(struct brcmf_sdio * bus,uint target,bool pendok)916  static int brcmf_sdio_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
917  {
918  #ifdef DEBUG
919  	uint oldstate = bus->clkstate;
920  #endif				/* DEBUG */
921  
922  	brcmf_dbg(SDIO, "Enter\n");
923  
924  	/* Early exit if we're already there */
925  	if (bus->clkstate == target)
926  		return 0;
927  
928  	switch (target) {
929  	case CLK_AVAIL:
930  		/* Make sure SD clock is available */
931  		if (bus->clkstate == CLK_NONE)
932  			brcmf_sdio_sdclk(bus, true);
933  		/* Now request HT Avail on the backplane */
934  		brcmf_sdio_htclk(bus, true, pendok);
935  		break;
936  
937  	case CLK_SDONLY:
938  		/* Remove HT request, or bring up SD clock */
939  		if (bus->clkstate == CLK_NONE)
940  			brcmf_sdio_sdclk(bus, true);
941  		else if (bus->clkstate == CLK_AVAIL)
942  			brcmf_sdio_htclk(bus, false, false);
943  		else
944  			brcmf_err("request for %d -> %d\n",
945  				  bus->clkstate, target);
946  		break;
947  
948  	case CLK_NONE:
949  		/* Make sure to remove HT request */
950  		if (bus->clkstate == CLK_AVAIL)
951  			brcmf_sdio_htclk(bus, false, false);
952  		/* Now remove the SD clock */
953  		brcmf_sdio_sdclk(bus, false);
954  		break;
955  	}
956  #ifdef DEBUG
957  	brcmf_dbg(SDIO, "%d -> %d\n", oldstate, bus->clkstate);
958  #endif				/* DEBUG */
959  
960  	return 0;
961  }
962  
963  static int
brcmf_sdio_bus_sleep(struct brcmf_sdio * bus,bool sleep,bool pendok)964  brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
965  {
966  	int err = 0;
967  	u8 clkcsr;
968  
969  	brcmf_dbg(SDIO, "Enter: request %s currently %s\n",
970  		  (sleep ? "SLEEP" : "WAKE"),
971  		  (bus->sleeping ? "SLEEP" : "WAKE"));
972  
973  	/* If SR is enabled control bus state with KSO */
974  	if (bus->sr_enabled) {
975  		/* Done if we're already in the requested state */
976  		if (sleep == bus->sleeping)
977  			goto end;
978  
979  		/* Going to sleep */
980  		if (sleep) {
981  			clkcsr = brcmf_sdiod_readb(bus->sdiodev,
982  						   SBSDIO_FUNC1_CHIPCLKCSR,
983  						   &err);
984  			if ((clkcsr & SBSDIO_CSR_MASK) == 0) {
985  				brcmf_dbg(SDIO, "no clock, set ALP\n");
986  				brcmf_sdiod_writeb(bus->sdiodev,
987  						   SBSDIO_FUNC1_CHIPCLKCSR,
988  						   SBSDIO_ALP_AVAIL_REQ, &err);
989  			}
990  			err = brcmf_sdio_kso_control(bus, false);
991  		} else {
992  			err = brcmf_sdio_kso_control(bus, true);
993  		}
994  		if (err) {
995  			brcmf_err("error while changing bus sleep state %d\n",
996  				  err);
997  			goto done;
998  		}
999  	}
1000  
1001  end:
1002  	/* control clocks */
1003  	if (sleep) {
1004  		if (!bus->sr_enabled)
1005  			brcmf_sdio_clkctl(bus, CLK_NONE, pendok);
1006  	} else {
1007  		brcmf_sdio_clkctl(bus, CLK_AVAIL, pendok);
1008  		brcmf_sdio_wd_timer(bus, true);
1009  	}
1010  	bus->sleeping = sleep;
1011  	brcmf_dbg(SDIO, "new state %s\n",
1012  		  (sleep ? "SLEEP" : "WAKE"));
1013  done:
1014  	brcmf_dbg(SDIO, "Exit: err=%d\n", err);
1015  	return err;
1016  
1017  }
1018  
1019  #ifdef DEBUG
brcmf_sdio_valid_shared_address(u32 addr)1020  static inline bool brcmf_sdio_valid_shared_address(u32 addr)
1021  {
1022  	return !(addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff));
1023  }
1024  
brcmf_sdio_readshared(struct brcmf_sdio * bus,struct sdpcm_shared * sh)1025  static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
1026  				 struct sdpcm_shared *sh)
1027  {
1028  	u32 addr = 0;
1029  	int rv;
1030  	u32 shaddr = 0;
1031  	struct sdpcm_shared_le sh_le;
1032  	__le32 addr_le;
1033  
1034  	sdio_claim_host(bus->sdiodev->func1);
1035  	brcmf_sdio_bus_sleep(bus, false, false);
1036  
1037  	/*
1038  	 * Read last word in socram to determine
1039  	 * address of sdpcm_shared structure
1040  	 */
1041  	shaddr = bus->ci->rambase + bus->ci->ramsize - 4;
1042  	if (!bus->ci->rambase && brcmf_chip_sr_capable(bus->ci))
1043  		shaddr -= bus->ci->srsize;
1044  	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr,
1045  			       (u8 *)&addr_le, 4);
1046  	if (rv < 0)
1047  		goto fail;
1048  
1049  	/*
1050  	 * Check if addr is valid.
1051  	 * NVRAM length at the end of memory should have been overwritten.
1052  	 */
1053  	addr = le32_to_cpu(addr_le);
1054  	if (!brcmf_sdio_valid_shared_address(addr)) {
1055  		brcmf_err("invalid sdpcm_shared address 0x%08X\n", addr);
1056  		rv = -EINVAL;
1057  		goto fail;
1058  	}
1059  
1060  	brcmf_dbg(INFO, "sdpcm_shared address 0x%08X\n", addr);
1061  
1062  	/* Read hndrte_shared structure */
1063  	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
1064  			       sizeof(struct sdpcm_shared_le));
1065  	if (rv < 0)
1066  		goto fail;
1067  
1068  	sdio_release_host(bus->sdiodev->func1);
1069  
1070  	/* Endianness */
1071  	sh->flags = le32_to_cpu(sh_le.flags);
1072  	sh->trap_addr = le32_to_cpu(sh_le.trap_addr);
1073  	sh->assert_exp_addr = le32_to_cpu(sh_le.assert_exp_addr);
1074  	sh->assert_file_addr = le32_to_cpu(sh_le.assert_file_addr);
1075  	sh->assert_line = le32_to_cpu(sh_le.assert_line);
1076  	sh->console_addr = le32_to_cpu(sh_le.console_addr);
1077  	sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr);
1078  
1079  	if ((sh->flags & SDPCM_SHARED_VERSION_MASK) > SDPCM_SHARED_VERSION) {
1080  		brcmf_err("sdpcm shared version unsupported: dhd %d dongle %d\n",
1081  			  SDPCM_SHARED_VERSION,
1082  			  sh->flags & SDPCM_SHARED_VERSION_MASK);
1083  		return -EPROTO;
1084  	}
1085  	return 0;
1086  
1087  fail:
1088  	brcmf_err("unable to obtain sdpcm_shared info: rv=%d (addr=0x%x)\n",
1089  		  rv, addr);
1090  	sdio_release_host(bus->sdiodev->func1);
1091  	return rv;
1092  }
1093  
brcmf_sdio_get_console_addr(struct brcmf_sdio * bus)1094  static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
1095  {
1096  	struct sdpcm_shared sh;
1097  
1098  	if (brcmf_sdio_readshared(bus, &sh) == 0)
1099  		bus->console_addr = sh.console_addr;
1100  }
1101  #else
brcmf_sdio_get_console_addr(struct brcmf_sdio * bus)1102  static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
1103  {
1104  }
1105  #endif /* DEBUG */
1106  
brcmf_sdio_hostmail(struct brcmf_sdio * bus)1107  static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
1108  {
1109  	struct brcmf_sdio_dev *sdiod = bus->sdiodev;
1110  	struct brcmf_core *core = bus->sdio_core;
1111  	u32 intstatus = 0;
1112  	u32 hmb_data;
1113  	u8 fcbits;
1114  	int ret;
1115  
1116  	brcmf_dbg(SDIO, "Enter\n");
1117  
1118  	/* Read mailbox data and ack that we did so */
1119  	hmb_data = brcmf_sdiod_readl(sdiod,
1120  				     core->base + SD_REG(tohostmailboxdata),
1121  				     &ret);
1122  
1123  	if (!ret)
1124  		brcmf_sdiod_writel(sdiod, core->base + SD_REG(tosbmailbox),
1125  				   SMB_INT_ACK, &ret);
1126  
1127  	bus->sdcnt.f1regdata += 2;
1128  
1129  	/* dongle indicates the firmware has halted/crashed */
1130  	if (hmb_data & HMB_DATA_FWHALT) {
1131  		brcmf_dbg(SDIO, "mailbox indicates firmware halted\n");
1132  		brcmf_fw_crashed(&sdiod->func1->dev);
1133  	}
1134  
1135  	/* Dongle recomposed rx frames, accept them again */
1136  	if (hmb_data & HMB_DATA_NAKHANDLED) {
1137  		brcmf_dbg(SDIO, "Dongle reports NAK handled, expect rtx of %d\n",
1138  			  bus->rx_seq);
1139  		if (!bus->rxskip)
1140  			brcmf_err("unexpected NAKHANDLED!\n");
1141  
1142  		bus->rxskip = false;
1143  		intstatus |= I_HMB_FRAME_IND;
1144  	}
1145  
1146  	/*
1147  	 * DEVREADY does not occur with gSPI.
1148  	 */
1149  	if (hmb_data & (HMB_DATA_DEVREADY | HMB_DATA_FWREADY)) {
1150  		bus->sdpcm_ver =
1151  		    (hmb_data & HMB_DATA_VERSION_MASK) >>
1152  		    HMB_DATA_VERSION_SHIFT;
1153  		if (bus->sdpcm_ver != SDPCM_PROT_VERSION)
1154  			brcmf_err("Version mismatch, dongle reports %d, "
1155  				  "expecting %d\n",
1156  				  bus->sdpcm_ver, SDPCM_PROT_VERSION);
1157  		else
1158  			brcmf_dbg(SDIO, "Dongle ready, protocol version %d\n",
1159  				  bus->sdpcm_ver);
1160  
1161  		/*
1162  		 * Retrieve console state address now that firmware should have
1163  		 * updated it.
1164  		 */
1165  		brcmf_sdio_get_console_addr(bus);
1166  	}
1167  
1168  	/*
1169  	 * Flow Control has been moved into the RX headers and this out of band
1170  	 * method isn't used any more.
1171  	 * remaining backward compatible with older dongles.
1172  	 */
1173  	if (hmb_data & HMB_DATA_FC) {
1174  		fcbits = (hmb_data & HMB_DATA_FCDATA_MASK) >>
1175  							HMB_DATA_FCDATA_SHIFT;
1176  
1177  		if (fcbits & ~bus->flowcontrol)
1178  			bus->sdcnt.fc_xoff++;
1179  
1180  		if (bus->flowcontrol & ~fcbits)
1181  			bus->sdcnt.fc_xon++;
1182  
1183  		bus->sdcnt.fc_rcvd++;
1184  		bus->flowcontrol = fcbits;
1185  	}
1186  
1187  	/* Shouldn't be any others */
1188  	if (hmb_data & ~(HMB_DATA_DEVREADY |
1189  			 HMB_DATA_NAKHANDLED |
1190  			 HMB_DATA_FC |
1191  			 HMB_DATA_FWREADY |
1192  			 HMB_DATA_FWHALT |
1193  			 HMB_DATA_FCDATA_MASK | HMB_DATA_VERSION_MASK))
1194  		brcmf_err("Unknown mailbox data content: 0x%02x\n",
1195  			  hmb_data);
1196  
1197  	return intstatus;
1198  }
1199  
brcmf_sdio_rxfail(struct brcmf_sdio * bus,bool abort,bool rtx)1200  static void brcmf_sdio_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
1201  {
1202  	struct brcmf_sdio_dev *sdiod = bus->sdiodev;
1203  	struct brcmf_core *core = bus->sdio_core;
1204  	uint retries = 0;
1205  	u16 lastrbc;
1206  	u8 hi, lo;
1207  	int err;
1208  
1209  	brcmf_err("%sterminate frame%s\n",
1210  		  abort ? "abort command, " : "",
1211  		  rtx ? ", send NAK" : "");
1212  
1213  	if (abort)
1214  		brcmf_sdiod_abort(bus->sdiodev, bus->sdiodev->func2);
1215  
1216  	brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_RF_TERM,
1217  			   &err);
1218  	bus->sdcnt.f1regdata++;
1219  
1220  	/* Wait until the packet has been flushed (device/FIFO stable) */
1221  	for (lastrbc = retries = 0xffff; retries > 0; retries--) {
1222  		hi = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_RFRAMEBCHI,
1223  				       &err);
1224  		lo = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_RFRAMEBCLO,
1225  				       &err);
1226  		bus->sdcnt.f1regdata += 2;
1227  
1228  		if ((hi == 0) && (lo == 0))
1229  			break;
1230  
1231  		if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) {
1232  			brcmf_err("count growing: last 0x%04x now 0x%04x\n",
1233  				  lastrbc, (hi << 8) + lo);
1234  		}
1235  		lastrbc = (hi << 8) + lo;
1236  	}
1237  
1238  	if (!retries)
1239  		brcmf_err("count never zeroed: last 0x%04x\n", lastrbc);
1240  	else
1241  		brcmf_dbg(SDIO, "flush took %d iterations\n", 0xffff - retries);
1242  
1243  	if (rtx) {
1244  		bus->sdcnt.rxrtx++;
1245  		brcmf_sdiod_writel(sdiod, core->base + SD_REG(tosbmailbox),
1246  				   SMB_NAK, &err);
1247  
1248  		bus->sdcnt.f1regdata++;
1249  		if (err == 0)
1250  			bus->rxskip = true;
1251  	}
1252  
1253  	/* Clear partial in any case */
1254  	bus->cur_read.len = 0;
1255  }
1256  
brcmf_sdio_txfail(struct brcmf_sdio * bus)1257  static void brcmf_sdio_txfail(struct brcmf_sdio *bus)
1258  {
1259  	struct brcmf_sdio_dev *sdiodev = bus->sdiodev;
1260  	u8 i, hi, lo;
1261  
1262  	/* On failure, abort the command and terminate the frame */
1263  	brcmf_err("sdio error, abort command and terminate frame\n");
1264  	bus->sdcnt.tx_sderrs++;
1265  
1266  	brcmf_sdiod_abort(sdiodev, sdiodev->func2);
1267  	brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM, NULL);
1268  	bus->sdcnt.f1regdata++;
1269  
1270  	for (i = 0; i < 3; i++) {
1271  		hi = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_WFRAMEBCHI, NULL);
1272  		lo = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_WFRAMEBCLO, NULL);
1273  		bus->sdcnt.f1regdata += 2;
1274  		if ((hi == 0) && (lo == 0))
1275  			break;
1276  	}
1277  }
1278  
1279  /* return total length of buffer chain */
brcmf_sdio_glom_len(struct brcmf_sdio * bus)1280  static uint brcmf_sdio_glom_len(struct brcmf_sdio *bus)
1281  {
1282  	struct sk_buff *p;
1283  	uint total;
1284  
1285  	total = 0;
1286  	skb_queue_walk(&bus->glom, p)
1287  		total += p->len;
1288  	return total;
1289  }
1290  
brcmf_sdio_free_glom(struct brcmf_sdio * bus)1291  static void brcmf_sdio_free_glom(struct brcmf_sdio *bus)
1292  {
1293  	struct sk_buff *cur, *next;
1294  
1295  	skb_queue_walk_safe(&bus->glom, cur, next) {
1296  		skb_unlink(cur, &bus->glom);
1297  		brcmu_pkt_buf_free_skb(cur);
1298  	}
1299  }
1300  
1301  /*
1302   * brcmfmac sdio bus specific header
1303   * This is the lowest layer header wrapped on the packets transmitted between
1304   * host and WiFi dongle which contains information needed for SDIO core and
1305   * firmware
1306   *
1307   * It consists of 3 parts: hardware header, hardware extension header and
1308   * software header
1309   * hardware header (frame tag) - 4 bytes
1310   * Byte 0~1: Frame length
1311   * Byte 2~3: Checksum, bit-wise inverse of frame length
1312   * hardware extension header - 8 bytes
1313   * Tx glom mode only, N/A for Rx or normal Tx
1314   * Byte 0~1: Packet length excluding hw frame tag
1315   * Byte 2: Reserved
1316   * Byte 3: Frame flags, bit 0: last frame indication
1317   * Byte 4~5: Reserved
1318   * Byte 6~7: Tail padding length
1319   * software header - 8 bytes
1320   * Byte 0: Rx/Tx sequence number
1321   * Byte 1: 4 MSB Channel number, 4 LSB arbitrary flag
1322   * Byte 2: Length of next data frame, reserved for Tx
1323   * Byte 3: Data offset
1324   * Byte 4: Flow control bits, reserved for Tx
1325   * Byte 5: Maximum Sequence number allowed by firmware for Tx, N/A for Tx packet
1326   * Byte 6~7: Reserved
1327   */
1328  #define SDPCM_HWHDR_LEN			4
1329  #define SDPCM_HWEXT_LEN			8
1330  #define SDPCM_SWHDR_LEN			8
1331  #define SDPCM_HDRLEN			(SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN)
1332  /* software header */
1333  #define SDPCM_SEQ_MASK			0x000000ff
1334  #define SDPCM_SEQ_WRAP			256
1335  #define SDPCM_CHANNEL_MASK		0x00000f00
1336  #define SDPCM_CHANNEL_SHIFT		8
1337  #define SDPCM_CONTROL_CHANNEL		0	/* Control */
1338  #define SDPCM_EVENT_CHANNEL		1	/* Asyc Event Indication */
1339  #define SDPCM_DATA_CHANNEL		2	/* Data Xmit/Recv */
1340  #define SDPCM_GLOM_CHANNEL		3	/* Coalesced packets */
1341  #define SDPCM_TEST_CHANNEL		15	/* Test/debug packets */
1342  #define SDPCM_GLOMDESC(p)		(((u8 *)p)[1] & 0x80)
1343  #define SDPCM_NEXTLEN_MASK		0x00ff0000
1344  #define SDPCM_NEXTLEN_SHIFT		16
1345  #define SDPCM_DOFFSET_MASK		0xff000000
1346  #define SDPCM_DOFFSET_SHIFT		24
1347  #define SDPCM_FCMASK_MASK		0x000000ff
1348  #define SDPCM_WINDOW_MASK		0x0000ff00
1349  #define SDPCM_WINDOW_SHIFT		8
1350  
brcmf_sdio_getdatoffset(u8 * swheader)1351  static inline u8 brcmf_sdio_getdatoffset(u8 *swheader)
1352  {
1353  	u32 hdrvalue;
1354  	hdrvalue = le32_to_cpu(*(__le32 *)swheader);
1355  	return (u8)((hdrvalue & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT);
1356  }
1357  
brcmf_sdio_fromevntchan(u8 * swheader)1358  static inline bool brcmf_sdio_fromevntchan(u8 *swheader)
1359  {
1360  	u32 hdrvalue;
1361  	u8 ret;
1362  
1363  	hdrvalue = le32_to_cpu(*(__le32 *)swheader);
1364  	ret = (u8)((hdrvalue & SDPCM_CHANNEL_MASK) >> SDPCM_CHANNEL_SHIFT);
1365  
1366  	return (ret == SDPCM_EVENT_CHANNEL);
1367  }
1368  
brcmf_sdio_hdparse(struct brcmf_sdio * bus,u8 * header,struct brcmf_sdio_hdrinfo * rd,enum brcmf_sdio_frmtype type)1369  static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
1370  			      struct brcmf_sdio_hdrinfo *rd,
1371  			      enum brcmf_sdio_frmtype type)
1372  {
1373  	u16 len, checksum;
1374  	u8 rx_seq, fc, tx_seq_max;
1375  	u32 swheader;
1376  
1377  	trace_brcmf_sdpcm_hdr(SDPCM_RX, header);
1378  
1379  	/* hw header */
1380  	len = get_unaligned_le16(header);
1381  	checksum = get_unaligned_le16(header + sizeof(u16));
1382  	/* All zero means no more to read */
1383  	if (!(len | checksum)) {
1384  		bus->rxpending = false;
1385  		return -ENODATA;
1386  	}
1387  	if ((u16)(~(len ^ checksum))) {
1388  		brcmf_err("HW header checksum error\n");
1389  		bus->sdcnt.rx_badhdr++;
1390  		brcmf_sdio_rxfail(bus, false, false);
1391  		return -EIO;
1392  	}
1393  	if (len < SDPCM_HDRLEN) {
1394  		brcmf_err("HW header length error\n");
1395  		return -EPROTO;
1396  	}
1397  	if (type == BRCMF_SDIO_FT_SUPER &&
1398  	    (roundup(len, bus->blocksize) != rd->len)) {
1399  		brcmf_err("HW superframe header length error\n");
1400  		return -EPROTO;
1401  	}
1402  	if (type == BRCMF_SDIO_FT_SUB && len > rd->len) {
1403  		brcmf_err("HW subframe header length error\n");
1404  		return -EPROTO;
1405  	}
1406  	rd->len = len;
1407  
1408  	/* software header */
1409  	header += SDPCM_HWHDR_LEN;
1410  	swheader = le32_to_cpu(*(__le32 *)header);
1411  	if (type == BRCMF_SDIO_FT_SUPER && SDPCM_GLOMDESC(header)) {
1412  		brcmf_err("Glom descriptor found in superframe head\n");
1413  		rd->len = 0;
1414  		return -EINVAL;
1415  	}
1416  	rx_seq = (u8)(swheader & SDPCM_SEQ_MASK);
1417  	rd->channel = (swheader & SDPCM_CHANNEL_MASK) >> SDPCM_CHANNEL_SHIFT;
1418  	if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL &&
1419  	    type != BRCMF_SDIO_FT_SUPER) {
1420  		brcmf_err("HW header length too long\n");
1421  		bus->sdcnt.rx_toolong++;
1422  		brcmf_sdio_rxfail(bus, false, false);
1423  		rd->len = 0;
1424  		return -EPROTO;
1425  	}
1426  	if (type == BRCMF_SDIO_FT_SUPER && rd->channel != SDPCM_GLOM_CHANNEL) {
1427  		brcmf_err("Wrong channel for superframe\n");
1428  		rd->len = 0;
1429  		return -EINVAL;
1430  	}
1431  	if (type == BRCMF_SDIO_FT_SUB && rd->channel != SDPCM_DATA_CHANNEL &&
1432  	    rd->channel != SDPCM_EVENT_CHANNEL) {
1433  		brcmf_err("Wrong channel for subframe\n");
1434  		rd->len = 0;
1435  		return -EINVAL;
1436  	}
1437  	rd->dat_offset = brcmf_sdio_getdatoffset(header);
1438  	if (rd->dat_offset < SDPCM_HDRLEN || rd->dat_offset > rd->len) {
1439  		brcmf_err("seq %d: bad data offset\n", rx_seq);
1440  		bus->sdcnt.rx_badhdr++;
1441  		brcmf_sdio_rxfail(bus, false, false);
1442  		rd->len = 0;
1443  		return -ENXIO;
1444  	}
1445  	if (rd->seq_num != rx_seq) {
1446  		brcmf_dbg(SDIO, "seq %d, expected %d\n", rx_seq, rd->seq_num);
1447  		bus->sdcnt.rx_badseq++;
1448  		rd->seq_num = rx_seq;
1449  	}
1450  	/* no need to check the reset for subframe */
1451  	if (type == BRCMF_SDIO_FT_SUB)
1452  		return 0;
1453  	rd->len_nxtfrm = (swheader & SDPCM_NEXTLEN_MASK) >> SDPCM_NEXTLEN_SHIFT;
1454  	if (rd->len_nxtfrm << 4 > MAX_RX_DATASZ) {
1455  		/* only warm for NON glom packet */
1456  		if (rd->channel != SDPCM_GLOM_CHANNEL)
1457  			brcmf_err("seq %d: next length error\n", rx_seq);
1458  		rd->len_nxtfrm = 0;
1459  	}
1460  	swheader = le32_to_cpu(*(__le32 *)(header + 4));
1461  	fc = swheader & SDPCM_FCMASK_MASK;
1462  	if (bus->flowcontrol != fc) {
1463  		if (~bus->flowcontrol & fc)
1464  			bus->sdcnt.fc_xoff++;
1465  		if (bus->flowcontrol & ~fc)
1466  			bus->sdcnt.fc_xon++;
1467  		bus->sdcnt.fc_rcvd++;
1468  		bus->flowcontrol = fc;
1469  	}
1470  	tx_seq_max = (swheader & SDPCM_WINDOW_MASK) >> SDPCM_WINDOW_SHIFT;
1471  	if ((u8)(tx_seq_max - bus->tx_seq) > 0x40) {
1472  		brcmf_err("seq %d: max tx seq number error\n", rx_seq);
1473  		tx_seq_max = bus->tx_seq + 2;
1474  	}
1475  	bus->tx_max = tx_seq_max;
1476  
1477  	return 0;
1478  }
1479  
brcmf_sdio_update_hwhdr(u8 * header,u16 frm_length)1480  static inline void brcmf_sdio_update_hwhdr(u8 *header, u16 frm_length)
1481  {
1482  	*(__le16 *)header = cpu_to_le16(frm_length);
1483  	*(((__le16 *)header) + 1) = cpu_to_le16(~frm_length);
1484  }
1485  
brcmf_sdio_hdpack(struct brcmf_sdio * bus,u8 * header,struct brcmf_sdio_hdrinfo * hd_info)1486  static void brcmf_sdio_hdpack(struct brcmf_sdio *bus, u8 *header,
1487  			      struct brcmf_sdio_hdrinfo *hd_info)
1488  {
1489  	u32 hdrval;
1490  	u8 hdr_offset;
1491  
1492  	brcmf_sdio_update_hwhdr(header, hd_info->len);
1493  	hdr_offset = SDPCM_HWHDR_LEN;
1494  
1495  	if (bus->txglom) {
1496  		hdrval = (hd_info->len - hdr_offset) | (hd_info->lastfrm << 24);
1497  		*((__le32 *)(header + hdr_offset)) = cpu_to_le32(hdrval);
1498  		hdrval = (u16)hd_info->tail_pad << 16;
1499  		*(((__le32 *)(header + hdr_offset)) + 1) = cpu_to_le32(hdrval);
1500  		hdr_offset += SDPCM_HWEXT_LEN;
1501  	}
1502  
1503  	hdrval = hd_info->seq_num;
1504  	hdrval |= (hd_info->channel << SDPCM_CHANNEL_SHIFT) &
1505  		  SDPCM_CHANNEL_MASK;
1506  	hdrval |= (hd_info->dat_offset << SDPCM_DOFFSET_SHIFT) &
1507  		  SDPCM_DOFFSET_MASK;
1508  	*((__le32 *)(header + hdr_offset)) = cpu_to_le32(hdrval);
1509  	*(((__le32 *)(header + hdr_offset)) + 1) = 0;
1510  	trace_brcmf_sdpcm_hdr(SDPCM_TX + !!(bus->txglom), header);
1511  }
1512  
brcmf_sdio_rxglom(struct brcmf_sdio * bus,u8 rxseq)1513  static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1514  {
1515  	u16 dlen, totlen;
1516  	u8 *dptr, num = 0;
1517  	u16 sublen;
1518  	struct sk_buff *pfirst, *pnext;
1519  
1520  	int errcode;
1521  	u8 doff;
1522  
1523  	struct brcmf_sdio_hdrinfo rd_new;
1524  
1525  	/* If packets, issue read(s) and send up packet chain */
1526  	/* Return sequence numbers consumed? */
1527  
1528  	brcmf_dbg(SDIO, "start: glomd %p glom %p\n",
1529  		  bus->glomd, skb_peek(&bus->glom));
1530  
1531  	/* If there's a descriptor, generate the packet chain */
1532  	if (bus->glomd) {
1533  		pfirst = pnext = NULL;
1534  		dlen = (u16) (bus->glomd->len);
1535  		dptr = bus->glomd->data;
1536  		if (!dlen || (dlen & 1)) {
1537  			brcmf_err("bad glomd len(%d), ignore descriptor\n",
1538  				  dlen);
1539  			dlen = 0;
1540  		}
1541  
1542  		for (totlen = num = 0; dlen; num++) {
1543  			/* Get (and move past) next length */
1544  			sublen = get_unaligned_le16(dptr);
1545  			dlen -= sizeof(u16);
1546  			dptr += sizeof(u16);
1547  			if ((sublen < SDPCM_HDRLEN) ||
1548  			    ((num == 0) && (sublen < (2 * SDPCM_HDRLEN)))) {
1549  				brcmf_err("descriptor len %d bad: %d\n",
1550  					  num, sublen);
1551  				pnext = NULL;
1552  				break;
1553  			}
1554  			if (sublen % bus->sgentry_align) {
1555  				brcmf_err("sublen %d not multiple of %d\n",
1556  					  sublen, bus->sgentry_align);
1557  			}
1558  			totlen += sublen;
1559  
1560  			/* For last frame, adjust read len so total
1561  				 is a block multiple */
1562  			if (!dlen) {
1563  				sublen +=
1564  				    (roundup(totlen, bus->blocksize) - totlen);
1565  				totlen = roundup(totlen, bus->blocksize);
1566  			}
1567  
1568  			/* Allocate/chain packet for next subframe */
1569  			pnext = brcmu_pkt_buf_get_skb(sublen + bus->sgentry_align);
1570  			if (pnext == NULL) {
1571  				brcmf_err("bcm_pkt_buf_get_skb failed, num %d len %d\n",
1572  					  num, sublen);
1573  				break;
1574  			}
1575  			skb_queue_tail(&bus->glom, pnext);
1576  
1577  			/* Adhere to start alignment requirements */
1578  			pkt_align(pnext, sublen, bus->sgentry_align);
1579  		}
1580  
1581  		/* If all allocations succeeded, save packet chain
1582  			 in bus structure */
1583  		if (pnext) {
1584  			brcmf_dbg(GLOM, "allocated %d-byte packet chain for %d subframes\n",
1585  				  totlen, num);
1586  			if (BRCMF_GLOM_ON() && bus->cur_read.len &&
1587  			    totlen != bus->cur_read.len) {
1588  				brcmf_dbg(GLOM, "glomdesc mismatch: nextlen %d glomdesc %d rxseq %d\n",
1589  					  bus->cur_read.len, totlen, rxseq);
1590  			}
1591  			pfirst = pnext = NULL;
1592  		} else {
1593  			brcmf_sdio_free_glom(bus);
1594  			num = 0;
1595  		}
1596  
1597  		/* Done with descriptor packet */
1598  		brcmu_pkt_buf_free_skb(bus->glomd);
1599  		bus->glomd = NULL;
1600  		bus->cur_read.len = 0;
1601  	}
1602  
1603  	/* Ok -- either we just generated a packet chain,
1604  		 or had one from before */
1605  	if (!skb_queue_empty(&bus->glom)) {
1606  		if (BRCMF_GLOM_ON()) {
1607  			brcmf_dbg(GLOM, "try superframe read, packet chain:\n");
1608  			skb_queue_walk(&bus->glom, pnext) {
1609  				brcmf_dbg(GLOM, "    %p: %p len 0x%04x (%d)\n",
1610  					  pnext, (u8 *) (pnext->data),
1611  					  pnext->len, pnext->len);
1612  			}
1613  		}
1614  
1615  		pfirst = skb_peek(&bus->glom);
1616  		dlen = (u16) brcmf_sdio_glom_len(bus);
1617  
1618  		/* Do an SDIO read for the superframe.  Configurable iovar to
1619  		 * read directly into the chained packet, or allocate a large
1620  		 * packet and copy into the chain.
1621  		 */
1622  		sdio_claim_host(bus->sdiodev->func1);
1623  		errcode = brcmf_sdiod_recv_chain(bus->sdiodev,
1624  						 &bus->glom, dlen);
1625  		sdio_release_host(bus->sdiodev->func1);
1626  		bus->sdcnt.f2rxdata++;
1627  
1628  		/* On failure, kill the superframe */
1629  		if (errcode < 0) {
1630  			brcmf_err("glom read of %d bytes failed: %d\n",
1631  				  dlen, errcode);
1632  
1633  			sdio_claim_host(bus->sdiodev->func1);
1634  			brcmf_sdio_rxfail(bus, true, false);
1635  			bus->sdcnt.rxglomfail++;
1636  			brcmf_sdio_free_glom(bus);
1637  			sdio_release_host(bus->sdiodev->func1);
1638  			return 0;
1639  		}
1640  
1641  		brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1642  				   pfirst->data, min_t(int, pfirst->len, 48),
1643  				   "SUPERFRAME:\n");
1644  
1645  		rd_new.seq_num = rxseq;
1646  		rd_new.len = dlen;
1647  		sdio_claim_host(bus->sdiodev->func1);
1648  		errcode = brcmf_sdio_hdparse(bus, pfirst->data, &rd_new,
1649  					     BRCMF_SDIO_FT_SUPER);
1650  		sdio_release_host(bus->sdiodev->func1);
1651  		bus->cur_read.len = rd_new.len_nxtfrm << 4;
1652  
1653  		/* Remove superframe header, remember offset */
1654  		skb_pull(pfirst, rd_new.dat_offset);
1655  		num = 0;
1656  
1657  		/* Validate all the subframe headers */
1658  		skb_queue_walk(&bus->glom, pnext) {
1659  			/* leave when invalid subframe is found */
1660  			if (errcode)
1661  				break;
1662  
1663  			rd_new.len = pnext->len;
1664  			rd_new.seq_num = rxseq++;
1665  			sdio_claim_host(bus->sdiodev->func1);
1666  			errcode = brcmf_sdio_hdparse(bus, pnext->data, &rd_new,
1667  						     BRCMF_SDIO_FT_SUB);
1668  			sdio_release_host(bus->sdiodev->func1);
1669  			brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1670  					   pnext->data, 32, "subframe:\n");
1671  
1672  			num++;
1673  		}
1674  
1675  		if (errcode) {
1676  			/* Terminate frame on error */
1677  			sdio_claim_host(bus->sdiodev->func1);
1678  			brcmf_sdio_rxfail(bus, true, false);
1679  			bus->sdcnt.rxglomfail++;
1680  			brcmf_sdio_free_glom(bus);
1681  			sdio_release_host(bus->sdiodev->func1);
1682  			bus->cur_read.len = 0;
1683  			return 0;
1684  		}
1685  
1686  		/* Basic SD framing looks ok - process each packet (header) */
1687  
1688  		skb_queue_walk_safe(&bus->glom, pfirst, pnext) {
1689  			dptr = (u8 *) (pfirst->data);
1690  			sublen = get_unaligned_le16(dptr);
1691  			doff = brcmf_sdio_getdatoffset(&dptr[SDPCM_HWHDR_LEN]);
1692  
1693  			brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
1694  					   dptr, pfirst->len,
1695  					   "Rx Subframe Data:\n");
1696  
1697  			__skb_trim(pfirst, sublen);
1698  			skb_pull(pfirst, doff);
1699  
1700  			if (pfirst->len == 0) {
1701  				skb_unlink(pfirst, &bus->glom);
1702  				brcmu_pkt_buf_free_skb(pfirst);
1703  				continue;
1704  			}
1705  
1706  			brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1707  					   pfirst->data,
1708  					   min_t(int, pfirst->len, 32),
1709  					   "subframe %d to stack, %p (%p/%d) nxt/lnk %p/%p\n",
1710  					   bus->glom.qlen, pfirst, pfirst->data,
1711  					   pfirst->len, pfirst->next,
1712  					   pfirst->prev);
1713  			skb_unlink(pfirst, &bus->glom);
1714  			if (brcmf_sdio_fromevntchan(&dptr[SDPCM_HWHDR_LEN]))
1715  				brcmf_rx_event(bus->sdiodev->dev, pfirst);
1716  			else
1717  				brcmf_rx_frame(bus->sdiodev->dev, pfirst,
1718  					       false, false);
1719  			bus->sdcnt.rxglompkts++;
1720  		}
1721  
1722  		bus->sdcnt.rxglomframes++;
1723  	}
1724  	return num;
1725  }
1726  
brcmf_sdio_dcmd_resp_wait(struct brcmf_sdio * bus,uint * condition,bool * pending)1727  static int brcmf_sdio_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition,
1728  				     bool *pending)
1729  {
1730  	DECLARE_WAITQUEUE(wait, current);
1731  	int timeout = DCMD_RESP_TIMEOUT;
1732  
1733  	/* Wait until control frame is available */
1734  	add_wait_queue(&bus->dcmd_resp_wait, &wait);
1735  	set_current_state(TASK_INTERRUPTIBLE);
1736  
1737  	while (!(*condition) && (!signal_pending(current) && timeout))
1738  		timeout = schedule_timeout(timeout);
1739  
1740  	if (signal_pending(current))
1741  		*pending = true;
1742  
1743  	set_current_state(TASK_RUNNING);
1744  	remove_wait_queue(&bus->dcmd_resp_wait, &wait);
1745  
1746  	return timeout;
1747  }
1748  
brcmf_sdio_dcmd_resp_wake(struct brcmf_sdio * bus)1749  static int brcmf_sdio_dcmd_resp_wake(struct brcmf_sdio *bus)
1750  {
1751  	wake_up_interruptible(&bus->dcmd_resp_wait);
1752  
1753  	return 0;
1754  }
1755  static void
brcmf_sdio_read_control(struct brcmf_sdio * bus,u8 * hdr,uint len,uint doff)1756  brcmf_sdio_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
1757  {
1758  	uint rdlen, pad;
1759  	u8 *buf = NULL, *rbuf;
1760  	int sdret;
1761  
1762  	brcmf_dbg(SDIO, "Enter\n");
1763  	if (bus->rxblen)
1764  		buf = vzalloc(bus->rxblen);
1765  	if (!buf)
1766  		goto done;
1767  
1768  	rbuf = bus->rxbuf;
1769  	pad = ((unsigned long)rbuf % bus->head_align);
1770  	if (pad)
1771  		rbuf += (bus->head_align - pad);
1772  
1773  	/* Copy the already-read portion over */
1774  	memcpy(buf, hdr, BRCMF_FIRSTREAD);
1775  	if (len <= BRCMF_FIRSTREAD)
1776  		goto gotpkt;
1777  
1778  	/* Raise rdlen to next SDIO block to avoid tail command */
1779  	rdlen = len - BRCMF_FIRSTREAD;
1780  	if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
1781  		pad = bus->blocksize - (rdlen % bus->blocksize);
1782  		if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
1783  		    ((len + pad) < bus->sdiodev->bus_if->maxctl))
1784  			rdlen += pad;
1785  	} else if (rdlen % bus->head_align) {
1786  		rdlen += bus->head_align - (rdlen % bus->head_align);
1787  	}
1788  
1789  	/* Drop if the read is too big or it exceeds our maximum */
1790  	if ((rdlen + BRCMF_FIRSTREAD) > bus->sdiodev->bus_if->maxctl) {
1791  		brcmf_err("%d-byte control read exceeds %d-byte buffer\n",
1792  			  rdlen, bus->sdiodev->bus_if->maxctl);
1793  		brcmf_sdio_rxfail(bus, false, false);
1794  		goto done;
1795  	}
1796  
1797  	if ((len - doff) > bus->sdiodev->bus_if->maxctl) {
1798  		brcmf_err("%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
1799  			  len, len - doff, bus->sdiodev->bus_if->maxctl);
1800  		bus->sdcnt.rx_toolong++;
1801  		brcmf_sdio_rxfail(bus, false, false);
1802  		goto done;
1803  	}
1804  
1805  	/* Read remain of frame body */
1806  	sdret = brcmf_sdiod_recv_buf(bus->sdiodev, rbuf, rdlen);
1807  	bus->sdcnt.f2rxdata++;
1808  
1809  	/* Control frame failures need retransmission */
1810  	if (sdret < 0) {
1811  		brcmf_err("read %d control bytes failed: %d\n",
1812  			  rdlen, sdret);
1813  		bus->sdcnt.rxc_errors++;
1814  		brcmf_sdio_rxfail(bus, true, true);
1815  		goto done;
1816  	} else
1817  		memcpy(buf + BRCMF_FIRSTREAD, rbuf, rdlen);
1818  
1819  gotpkt:
1820  
1821  	brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
1822  			   buf, len, "RxCtrl:\n");
1823  
1824  	/* Point to valid data and indicate its length */
1825  	spin_lock_bh(&bus->rxctl_lock);
1826  	if (bus->rxctl) {
1827  		brcmf_err("last control frame is being processed.\n");
1828  		spin_unlock_bh(&bus->rxctl_lock);
1829  		vfree(buf);
1830  		goto done;
1831  	}
1832  	bus->rxctl = buf + doff;
1833  	bus->rxctl_orig = buf;
1834  	bus->rxlen = len - doff;
1835  	spin_unlock_bh(&bus->rxctl_lock);
1836  
1837  done:
1838  	/* Awake any waiters */
1839  	brcmf_sdio_dcmd_resp_wake(bus);
1840  }
1841  
1842  /* Pad read to blocksize for efficiency */
brcmf_sdio_pad(struct brcmf_sdio * bus,u16 * pad,u16 * rdlen)1843  static void brcmf_sdio_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
1844  {
1845  	if (bus->roundup && bus->blocksize && *rdlen > bus->blocksize) {
1846  		*pad = bus->blocksize - (*rdlen % bus->blocksize);
1847  		if (*pad <= bus->roundup && *pad < bus->blocksize &&
1848  		    *rdlen + *pad + BRCMF_FIRSTREAD < MAX_RX_DATASZ)
1849  			*rdlen += *pad;
1850  	} else if (*rdlen % bus->head_align) {
1851  		*rdlen += bus->head_align - (*rdlen % bus->head_align);
1852  	}
1853  }
1854  
brcmf_sdio_readframes(struct brcmf_sdio * bus,uint maxframes)1855  static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
1856  {
1857  	struct sk_buff *pkt;		/* Packet for event or data frames */
1858  	u16 pad;		/* Number of pad bytes to read */
1859  	uint rxleft = 0;	/* Remaining number of frames allowed */
1860  	int ret;		/* Return code from calls */
1861  	uint rxcount = 0;	/* Total frames read */
1862  	struct brcmf_sdio_hdrinfo *rd = &bus->cur_read, rd_new;
1863  	u8 head_read = 0;
1864  
1865  	brcmf_dbg(SDIO, "Enter\n");
1866  
1867  	/* Not finished unless we encounter no more frames indication */
1868  	bus->rxpending = true;
1869  
1870  	for (rd->seq_num = bus->rx_seq, rxleft = maxframes;
1871  	     !bus->rxskip && rxleft && bus->sdiodev->state == BRCMF_SDIOD_DATA;
1872  	     rd->seq_num++, rxleft--) {
1873  
1874  		/* Handle glomming separately */
1875  		if (bus->glomd || !skb_queue_empty(&bus->glom)) {
1876  			u8 cnt;
1877  			brcmf_dbg(GLOM, "calling rxglom: glomd %p, glom %p\n",
1878  				  bus->glomd, skb_peek(&bus->glom));
1879  			cnt = brcmf_sdio_rxglom(bus, rd->seq_num);
1880  			brcmf_dbg(GLOM, "rxglom returned %d\n", cnt);
1881  			rd->seq_num += cnt - 1;
1882  			rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1;
1883  			continue;
1884  		}
1885  
1886  		rd->len_left = rd->len;
1887  		/* read header first for unknown frame length */
1888  		sdio_claim_host(bus->sdiodev->func1);
1889  		if (!rd->len) {
1890  			ret = brcmf_sdiod_recv_buf(bus->sdiodev,
1891  						   bus->rxhdr, BRCMF_FIRSTREAD);
1892  			bus->sdcnt.f2rxhdrs++;
1893  			if (ret < 0) {
1894  				brcmf_err("RXHEADER FAILED: %d\n",
1895  					  ret);
1896  				bus->sdcnt.rx_hdrfail++;
1897  				brcmf_sdio_rxfail(bus, true, true);
1898  				sdio_release_host(bus->sdiodev->func1);
1899  				continue;
1900  			}
1901  
1902  			brcmf_dbg_hex_dump(BRCMF_BYTES_ON() || BRCMF_HDRS_ON(),
1903  					   bus->rxhdr, SDPCM_HDRLEN,
1904  					   "RxHdr:\n");
1905  
1906  			if (brcmf_sdio_hdparse(bus, bus->rxhdr, rd,
1907  					       BRCMF_SDIO_FT_NORMAL)) {
1908  				sdio_release_host(bus->sdiodev->func1);
1909  				if (!bus->rxpending)
1910  					break;
1911  				else
1912  					continue;
1913  			}
1914  
1915  			if (rd->channel == SDPCM_CONTROL_CHANNEL) {
1916  				brcmf_sdio_read_control(bus, bus->rxhdr,
1917  							rd->len,
1918  							rd->dat_offset);
1919  				/* prepare the descriptor for the next read */
1920  				rd->len = rd->len_nxtfrm << 4;
1921  				rd->len_nxtfrm = 0;
1922  				/* treat all packet as event if we don't know */
1923  				rd->channel = SDPCM_EVENT_CHANNEL;
1924  				sdio_release_host(bus->sdiodev->func1);
1925  				continue;
1926  			}
1927  			rd->len_left = rd->len > BRCMF_FIRSTREAD ?
1928  				       rd->len - BRCMF_FIRSTREAD : 0;
1929  			head_read = BRCMF_FIRSTREAD;
1930  		}
1931  
1932  		brcmf_sdio_pad(bus, &pad, &rd->len_left);
1933  
1934  		pkt = brcmu_pkt_buf_get_skb(rd->len_left + head_read +
1935  					    bus->head_align);
1936  		if (!pkt) {
1937  			/* Give up on data, request rtx of events */
1938  			brcmf_err("brcmu_pkt_buf_get_skb failed\n");
1939  			brcmf_sdio_rxfail(bus, false,
1940  					    RETRYCHAN(rd->channel));
1941  			sdio_release_host(bus->sdiodev->func1);
1942  			continue;
1943  		}
1944  		skb_pull(pkt, head_read);
1945  		pkt_align(pkt, rd->len_left, bus->head_align);
1946  
1947  		ret = brcmf_sdiod_recv_pkt(bus->sdiodev, pkt);
1948  		bus->sdcnt.f2rxdata++;
1949  		sdio_release_host(bus->sdiodev->func1);
1950  
1951  		if (ret < 0) {
1952  			brcmf_err("read %d bytes from channel %d failed: %d\n",
1953  				  rd->len, rd->channel, ret);
1954  			brcmu_pkt_buf_free_skb(pkt);
1955  			sdio_claim_host(bus->sdiodev->func1);
1956  			brcmf_sdio_rxfail(bus, true,
1957  					    RETRYCHAN(rd->channel));
1958  			sdio_release_host(bus->sdiodev->func1);
1959  			continue;
1960  		}
1961  
1962  		if (head_read) {
1963  			skb_push(pkt, head_read);
1964  			memcpy(pkt->data, bus->rxhdr, head_read);
1965  			head_read = 0;
1966  		} else {
1967  			memcpy(bus->rxhdr, pkt->data, SDPCM_HDRLEN);
1968  			rd_new.seq_num = rd->seq_num;
1969  			sdio_claim_host(bus->sdiodev->func1);
1970  			if (brcmf_sdio_hdparse(bus, bus->rxhdr, &rd_new,
1971  					       BRCMF_SDIO_FT_NORMAL)) {
1972  				rd->len = 0;
1973  				brcmf_sdio_rxfail(bus, true, true);
1974  				sdio_release_host(bus->sdiodev->func1);
1975  				brcmu_pkt_buf_free_skb(pkt);
1976  				continue;
1977  			}
1978  			bus->sdcnt.rx_readahead_cnt++;
1979  			if (rd->len != roundup(rd_new.len, 16)) {
1980  				brcmf_err("frame length mismatch:read %d, should be %d\n",
1981  					  rd->len,
1982  					  roundup(rd_new.len, 16) >> 4);
1983  				rd->len = 0;
1984  				brcmf_sdio_rxfail(bus, true, true);
1985  				sdio_release_host(bus->sdiodev->func1);
1986  				brcmu_pkt_buf_free_skb(pkt);
1987  				continue;
1988  			}
1989  			sdio_release_host(bus->sdiodev->func1);
1990  			rd->len_nxtfrm = rd_new.len_nxtfrm;
1991  			rd->channel = rd_new.channel;
1992  			rd->dat_offset = rd_new.dat_offset;
1993  
1994  			brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() &&
1995  					     BRCMF_DATA_ON()) &&
1996  					   BRCMF_HDRS_ON(),
1997  					   bus->rxhdr, SDPCM_HDRLEN,
1998  					   "RxHdr:\n");
1999  
2000  			if (rd_new.channel == SDPCM_CONTROL_CHANNEL) {
2001  				brcmf_err("readahead on control packet %d?\n",
2002  					  rd_new.seq_num);
2003  				/* Force retry w/normal header read */
2004  				rd->len = 0;
2005  				sdio_claim_host(bus->sdiodev->func1);
2006  				brcmf_sdio_rxfail(bus, false, true);
2007  				sdio_release_host(bus->sdiodev->func1);
2008  				brcmu_pkt_buf_free_skb(pkt);
2009  				continue;
2010  			}
2011  		}
2012  
2013  		brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
2014  				   pkt->data, rd->len, "Rx Data:\n");
2015  
2016  		/* Save superframe descriptor and allocate packet frame */
2017  		if (rd->channel == SDPCM_GLOM_CHANNEL) {
2018  			if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_HWHDR_LEN])) {
2019  				brcmf_dbg(GLOM, "glom descriptor, %d bytes:\n",
2020  					  rd->len);
2021  				brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
2022  						   pkt->data, rd->len,
2023  						   "Glom Data:\n");
2024  				__skb_trim(pkt, rd->len);
2025  				skb_pull(pkt, SDPCM_HDRLEN);
2026  				bus->glomd = pkt;
2027  			} else {
2028  				brcmf_err("%s: glom superframe w/o "
2029  					  "descriptor!\n", __func__);
2030  				sdio_claim_host(bus->sdiodev->func1);
2031  				brcmf_sdio_rxfail(bus, false, false);
2032  				sdio_release_host(bus->sdiodev->func1);
2033  			}
2034  			/* prepare the descriptor for the next read */
2035  			rd->len = rd->len_nxtfrm << 4;
2036  			rd->len_nxtfrm = 0;
2037  			/* treat all packet as event if we don't know */
2038  			rd->channel = SDPCM_EVENT_CHANNEL;
2039  			continue;
2040  		}
2041  
2042  		/* Fill in packet len and prio, deliver upward */
2043  		__skb_trim(pkt, rd->len);
2044  		skb_pull(pkt, rd->dat_offset);
2045  
2046  		if (pkt->len == 0)
2047  			brcmu_pkt_buf_free_skb(pkt);
2048  		else if (rd->channel == SDPCM_EVENT_CHANNEL)
2049  			brcmf_rx_event(bus->sdiodev->dev, pkt);
2050  		else
2051  			brcmf_rx_frame(bus->sdiodev->dev, pkt,
2052  				       false, false);
2053  
2054  		/* prepare the descriptor for the next read */
2055  		rd->len = rd->len_nxtfrm << 4;
2056  		rd->len_nxtfrm = 0;
2057  		/* treat all packet as event if we don't know */
2058  		rd->channel = SDPCM_EVENT_CHANNEL;
2059  	}
2060  
2061  	rxcount = maxframes - rxleft;
2062  	/* Message if we hit the limit */
2063  	if (!rxleft)
2064  		brcmf_dbg(DATA, "hit rx limit of %d frames\n", maxframes);
2065  	else
2066  		brcmf_dbg(DATA, "processed %d frames\n", rxcount);
2067  	/* Back off rxseq if awaiting rtx, update rx_seq */
2068  	if (bus->rxskip)
2069  		rd->seq_num--;
2070  	bus->rx_seq = rd->seq_num;
2071  
2072  	return rxcount;
2073  }
2074  
2075  static void
brcmf_sdio_wait_event_wakeup(struct brcmf_sdio * bus)2076  brcmf_sdio_wait_event_wakeup(struct brcmf_sdio *bus)
2077  {
2078  	wake_up_interruptible(&bus->ctrl_wait);
2079  	return;
2080  }
2081  
brcmf_sdio_txpkt_hdalign(struct brcmf_sdio * bus,struct sk_buff * pkt)2082  static int brcmf_sdio_txpkt_hdalign(struct brcmf_sdio *bus, struct sk_buff *pkt)
2083  {
2084  	struct brcmf_bus_stats *stats;
2085  	u16 head_pad;
2086  	u8 *dat_buf;
2087  
2088  	dat_buf = (u8 *)(pkt->data);
2089  
2090  	/* Check head padding */
2091  	head_pad = ((unsigned long)dat_buf % bus->head_align);
2092  	if (head_pad) {
2093  		if (skb_headroom(pkt) < head_pad) {
2094  			stats = &bus->sdiodev->bus_if->stats;
2095  			atomic_inc(&stats->pktcowed);
2096  			if (skb_cow_head(pkt, head_pad)) {
2097  				atomic_inc(&stats->pktcow_failed);
2098  				return -ENOMEM;
2099  			}
2100  			head_pad = 0;
2101  		}
2102  		skb_push(pkt, head_pad);
2103  		dat_buf = (u8 *)(pkt->data);
2104  	}
2105  	memset(dat_buf, 0, head_pad + bus->tx_hdrlen);
2106  	return head_pad;
2107  }
2108  
2109  /*
2110   * struct brcmf_skbuff_cb reserves first two bytes in sk_buff::cb for
2111   * bus layer usage.
2112   */
2113  /* flag marking a dummy skb added for DMA alignment requirement */
2114  #define ALIGN_SKB_FLAG		0x8000
2115  /* bit mask of data length chopped from the previous packet */
2116  #define ALIGN_SKB_CHOP_LEN_MASK	0x7fff
2117  
brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio * bus,struct sk_buff_head * pktq,struct sk_buff * pkt,u16 total_len)2118  static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio *bus,
2119  				    struct sk_buff_head *pktq,
2120  				    struct sk_buff *pkt, u16 total_len)
2121  {
2122  	struct brcmf_sdio_dev *sdiodev;
2123  	struct sk_buff *pkt_pad;
2124  	u16 tail_pad, tail_chop, chain_pad;
2125  	unsigned int blksize;
2126  	bool lastfrm;
2127  	int ntail, ret;
2128  
2129  	sdiodev = bus->sdiodev;
2130  	blksize = sdiodev->func2->cur_blksize;
2131  	/* sg entry alignment should be a divisor of block size */
2132  	WARN_ON(blksize % bus->sgentry_align);
2133  
2134  	/* Check tail padding */
2135  	lastfrm = skb_queue_is_last(pktq, pkt);
2136  	tail_pad = 0;
2137  	tail_chop = pkt->len % bus->sgentry_align;
2138  	if (tail_chop)
2139  		tail_pad = bus->sgentry_align - tail_chop;
2140  	chain_pad = (total_len + tail_pad) % blksize;
2141  	if (lastfrm && chain_pad)
2142  		tail_pad += blksize - chain_pad;
2143  	if (skb_tailroom(pkt) < tail_pad && pkt->len > blksize) {
2144  		pkt_pad = brcmu_pkt_buf_get_skb(tail_pad + tail_chop +
2145  						bus->head_align);
2146  		if (pkt_pad == NULL)
2147  			return -ENOMEM;
2148  		ret = brcmf_sdio_txpkt_hdalign(bus, pkt_pad);
2149  		if (unlikely(ret < 0)) {
2150  			kfree_skb(pkt_pad);
2151  			return ret;
2152  		}
2153  		memcpy(pkt_pad->data,
2154  		       pkt->data + pkt->len - tail_chop,
2155  		       tail_chop);
2156  		*(u16 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop;
2157  		skb_trim(pkt, pkt->len - tail_chop);
2158  		skb_trim(pkt_pad, tail_pad + tail_chop);
2159  		__skb_queue_after(pktq, pkt, pkt_pad);
2160  	} else {
2161  		ntail = pkt->data_len + tail_pad -
2162  			(pkt->end - pkt->tail);
2163  		if (skb_cloned(pkt) || ntail > 0)
2164  			if (pskb_expand_head(pkt, 0, ntail, GFP_ATOMIC))
2165  				return -ENOMEM;
2166  		if (skb_linearize(pkt))
2167  			return -ENOMEM;
2168  		__skb_put(pkt, tail_pad);
2169  	}
2170  
2171  	return tail_pad;
2172  }
2173  
2174  /**
2175   * brcmf_sdio_txpkt_prep - packet preparation for transmit
2176   * @bus: brcmf_sdio structure pointer
2177   * @pktq: packet list pointer
2178   * @chan: virtual channel to transmit the packet
2179   *
2180   * Processes to be applied to the packet
2181   *	- Align data buffer pointer
2182   *	- Align data buffer length
2183   *	- Prepare header
2184   * Return: negative value if there is error
2185   */
2186  static int
brcmf_sdio_txpkt_prep(struct brcmf_sdio * bus,struct sk_buff_head * pktq,uint chan)2187  brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
2188  		      uint chan)
2189  {
2190  	u16 head_pad, total_len;
2191  	struct sk_buff *pkt_next;
2192  	u8 txseq;
2193  	int ret;
2194  	struct brcmf_sdio_hdrinfo hd_info = {0};
2195  
2196  	txseq = bus->tx_seq;
2197  	total_len = 0;
2198  	skb_queue_walk(pktq, pkt_next) {
2199  		/* alignment packet inserted in previous
2200  		 * loop cycle can be skipped as it is
2201  		 * already properly aligned and does not
2202  		 * need an sdpcm header.
2203  		 */
2204  		if (*(u16 *)(pkt_next->cb) & ALIGN_SKB_FLAG)
2205  			continue;
2206  
2207  		/* align packet data pointer */
2208  		ret = brcmf_sdio_txpkt_hdalign(bus, pkt_next);
2209  		if (ret < 0)
2210  			return ret;
2211  		head_pad = (u16)ret;
2212  		if (head_pad)
2213  			memset(pkt_next->data + bus->tx_hdrlen, 0, head_pad);
2214  
2215  		total_len += pkt_next->len;
2216  
2217  		hd_info.len = pkt_next->len;
2218  		hd_info.lastfrm = skb_queue_is_last(pktq, pkt_next);
2219  		if (bus->txglom && pktq->qlen > 1) {
2220  			ret = brcmf_sdio_txpkt_prep_sg(bus, pktq,
2221  						       pkt_next, total_len);
2222  			if (ret < 0)
2223  				return ret;
2224  			hd_info.tail_pad = (u16)ret;
2225  			total_len += (u16)ret;
2226  		}
2227  
2228  		hd_info.channel = chan;
2229  		hd_info.dat_offset = head_pad + bus->tx_hdrlen;
2230  		hd_info.seq_num = txseq++;
2231  
2232  		/* Now fill the header */
2233  		brcmf_sdio_hdpack(bus, pkt_next->data, &hd_info);
2234  
2235  		if (BRCMF_BYTES_ON() &&
2236  		    ((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) ||
2237  		     (BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL)))
2238  			brcmf_dbg_hex_dump(true, pkt_next->data, hd_info.len,
2239  					   "Tx Frame:\n");
2240  		else if (BRCMF_HDRS_ON())
2241  			brcmf_dbg_hex_dump(true, pkt_next->data,
2242  					   head_pad + bus->tx_hdrlen,
2243  					   "Tx Header:\n");
2244  	}
2245  	/* Hardware length tag of the first packet should be total
2246  	 * length of the chain (including padding)
2247  	 */
2248  	if (bus->txglom)
2249  		brcmf_sdio_update_hwhdr(__skb_peek(pktq)->data, total_len);
2250  	return 0;
2251  }
2252  
2253  /**
2254   * brcmf_sdio_txpkt_postp - packet post processing for transmit
2255   * @bus: brcmf_sdio structure pointer
2256   * @pktq: packet list pointer
2257   *
2258   * Processes to be applied to the packet
2259   *	- Remove head padding
2260   *	- Remove tail padding
2261   */
2262  static void
brcmf_sdio_txpkt_postp(struct brcmf_sdio * bus,struct sk_buff_head * pktq)2263  brcmf_sdio_txpkt_postp(struct brcmf_sdio *bus, struct sk_buff_head *pktq)
2264  {
2265  	u8 *hdr;
2266  	u32 dat_offset;
2267  	u16 tail_pad;
2268  	u16 dummy_flags, chop_len;
2269  	struct sk_buff *pkt_next, *tmp, *pkt_prev;
2270  
2271  	skb_queue_walk_safe(pktq, pkt_next, tmp) {
2272  		dummy_flags = *(u16 *)(pkt_next->cb);
2273  		if (dummy_flags & ALIGN_SKB_FLAG) {
2274  			chop_len = dummy_flags & ALIGN_SKB_CHOP_LEN_MASK;
2275  			if (chop_len) {
2276  				pkt_prev = pkt_next->prev;
2277  				skb_put(pkt_prev, chop_len);
2278  			}
2279  			__skb_unlink(pkt_next, pktq);
2280  			brcmu_pkt_buf_free_skb(pkt_next);
2281  		} else {
2282  			hdr = pkt_next->data + bus->tx_hdrlen - SDPCM_SWHDR_LEN;
2283  			dat_offset = le32_to_cpu(*(__le32 *)hdr);
2284  			dat_offset = (dat_offset & SDPCM_DOFFSET_MASK) >>
2285  				     SDPCM_DOFFSET_SHIFT;
2286  			skb_pull(pkt_next, dat_offset);
2287  			if (bus->txglom) {
2288  				tail_pad = le16_to_cpu(*(__le16 *)(hdr - 2));
2289  				skb_trim(pkt_next, pkt_next->len - tail_pad);
2290  			}
2291  		}
2292  	}
2293  }
2294  
2295  /* Writes a HW/SW header into the packet and sends it. */
2296  /* Assumes: (a) header space already there, (b) caller holds lock */
brcmf_sdio_txpkt(struct brcmf_sdio * bus,struct sk_buff_head * pktq,uint chan)2297  static int brcmf_sdio_txpkt(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
2298  			    uint chan)
2299  {
2300  	int ret;
2301  	struct sk_buff *pkt_next, *tmp;
2302  
2303  	brcmf_dbg(TRACE, "Enter\n");
2304  
2305  	ret = brcmf_sdio_txpkt_prep(bus, pktq, chan);
2306  	if (ret)
2307  		goto done;
2308  
2309  	sdio_claim_host(bus->sdiodev->func1);
2310  	ret = brcmf_sdiod_send_pkt(bus->sdiodev, pktq);
2311  	bus->sdcnt.f2txdata++;
2312  
2313  	if (ret < 0)
2314  		brcmf_sdio_txfail(bus);
2315  
2316  	sdio_release_host(bus->sdiodev->func1);
2317  
2318  done:
2319  	brcmf_sdio_txpkt_postp(bus, pktq);
2320  	if (ret == 0)
2321  		bus->tx_seq = (bus->tx_seq + pktq->qlen) % SDPCM_SEQ_WRAP;
2322  	skb_queue_walk_safe(pktq, pkt_next, tmp) {
2323  		__skb_unlink(pkt_next, pktq);
2324  		brcmf_proto_bcdc_txcomplete(bus->sdiodev->dev, pkt_next,
2325  					    ret == 0);
2326  	}
2327  	return ret;
2328  }
2329  
brcmf_sdio_sendfromq(struct brcmf_sdio * bus,uint maxframes)2330  static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
2331  {
2332  	struct sk_buff *pkt;
2333  	struct sk_buff_head pktq;
2334  	u32 intstat_addr = bus->sdio_core->base + SD_REG(intstatus);
2335  	u32 intstatus = 0;
2336  	int ret = 0, prec_out, i;
2337  	uint cnt = 0;
2338  	u8 tx_prec_map, pkt_num;
2339  
2340  	brcmf_dbg(TRACE, "Enter\n");
2341  
2342  	tx_prec_map = ~bus->flowcontrol;
2343  
2344  	/* Send frames until the limit or some other event */
2345  	for (cnt = 0; (cnt < maxframes) && data_ok(bus);) {
2346  		pkt_num = 1;
2347  		if (bus->txglom)
2348  			pkt_num = min_t(u8, bus->tx_max - bus->tx_seq,
2349  					bus->sdiodev->txglomsz);
2350  		pkt_num = min_t(u32, pkt_num,
2351  				brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol));
2352  		__skb_queue_head_init(&pktq);
2353  		spin_lock_bh(&bus->txq_lock);
2354  		for (i = 0; i < pkt_num; i++) {
2355  			pkt = brcmu_pktq_mdeq(&bus->txq, tx_prec_map,
2356  					      &prec_out);
2357  			if (pkt == NULL)
2358  				break;
2359  			__skb_queue_tail(&pktq, pkt);
2360  		}
2361  		spin_unlock_bh(&bus->txq_lock);
2362  		if (i == 0)
2363  			break;
2364  
2365  		ret = brcmf_sdio_txpkt(bus, &pktq, SDPCM_DATA_CHANNEL);
2366  
2367  		cnt += i;
2368  
2369  		/* In poll mode, need to check for other events */
2370  		if (!bus->intr) {
2371  			/* Check device status, signal pending interrupt */
2372  			sdio_claim_host(bus->sdiodev->func1);
2373  			intstatus = brcmf_sdiod_readl(bus->sdiodev,
2374  						      intstat_addr, &ret);
2375  			sdio_release_host(bus->sdiodev->func1);
2376  
2377  			bus->sdcnt.f2txdata++;
2378  			if (ret != 0)
2379  				break;
2380  			if (intstatus & bus->hostintmask)
2381  				atomic_set(&bus->ipend, 1);
2382  		}
2383  	}
2384  
2385  	/* Deflow-control stack if needed */
2386  	if ((bus->sdiodev->state == BRCMF_SDIOD_DATA) &&
2387  	    bus->txoff && (pktq_len(&bus->txq) < TXLOW)) {
2388  		bus->txoff = false;
2389  		brcmf_proto_bcdc_txflowblock(bus->sdiodev->dev, false);
2390  	}
2391  
2392  	return cnt;
2393  }
2394  
brcmf_sdio_tx_ctrlframe(struct brcmf_sdio * bus,u8 * frame,u16 len)2395  static int brcmf_sdio_tx_ctrlframe(struct brcmf_sdio *bus, u8 *frame, u16 len)
2396  {
2397  	u8 doff;
2398  	u16 pad;
2399  	uint retries = 0;
2400  	struct brcmf_sdio_hdrinfo hd_info = {0};
2401  	int ret;
2402  
2403  	brcmf_dbg(SDIO, "Enter\n");
2404  
2405  	/* Back the pointer to make room for bus header */
2406  	frame -= bus->tx_hdrlen;
2407  	len += bus->tx_hdrlen;
2408  
2409  	/* Add alignment padding (optional for ctl frames) */
2410  	doff = ((unsigned long)frame % bus->head_align);
2411  	if (doff) {
2412  		frame -= doff;
2413  		len += doff;
2414  		memset(frame + bus->tx_hdrlen, 0, doff);
2415  	}
2416  
2417  	/* Round send length to next SDIO block */
2418  	pad = 0;
2419  	if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
2420  		pad = bus->blocksize - (len % bus->blocksize);
2421  		if ((pad > bus->roundup) || (pad >= bus->blocksize))
2422  			pad = 0;
2423  	} else if (len % bus->head_align) {
2424  		pad = bus->head_align - (len % bus->head_align);
2425  	}
2426  	len += pad;
2427  
2428  	hd_info.len = len - pad;
2429  	hd_info.channel = SDPCM_CONTROL_CHANNEL;
2430  	hd_info.dat_offset = doff + bus->tx_hdrlen;
2431  	hd_info.seq_num = bus->tx_seq;
2432  	hd_info.lastfrm = true;
2433  	hd_info.tail_pad = pad;
2434  	brcmf_sdio_hdpack(bus, frame, &hd_info);
2435  
2436  	if (bus->txglom)
2437  		brcmf_sdio_update_hwhdr(frame, len);
2438  
2439  	brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
2440  			   frame, len, "Tx Frame:\n");
2441  	brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() && BRCMF_CTL_ON()) &&
2442  			   BRCMF_HDRS_ON(),
2443  			   frame, min_t(u16, len, 16), "TxHdr:\n");
2444  
2445  	do {
2446  		ret = brcmf_sdiod_send_buf(bus->sdiodev, frame, len);
2447  
2448  		if (ret < 0)
2449  			brcmf_sdio_txfail(bus);
2450  		else
2451  			bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
2452  	} while (ret < 0 && retries++ < TXRETRIES);
2453  
2454  	return ret;
2455  }
2456  
brcmf_chip_is_ulp(struct brcmf_chip * ci)2457  static bool brcmf_chip_is_ulp(struct brcmf_chip *ci)
2458  {
2459  	if (ci->chip == CY_CC_43012_CHIP_ID)
2460  		return true;
2461  	else
2462  		return false;
2463  }
2464  
brcmf_sdio_bus_stop(struct device * dev)2465  static void brcmf_sdio_bus_stop(struct device *dev)
2466  {
2467  	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2468  	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2469  	struct brcmf_sdio *bus = sdiodev->bus;
2470  	struct brcmf_core *core = bus->sdio_core;
2471  	u32 local_hostintmask;
2472  	u8 saveclk, bpreq;
2473  	int err;
2474  
2475  	brcmf_dbg(TRACE, "Enter\n");
2476  
2477  	if (bus->watchdog_tsk) {
2478  		send_sig(SIGTERM, bus->watchdog_tsk, 1);
2479  		kthread_stop(bus->watchdog_tsk);
2480  		bus->watchdog_tsk = NULL;
2481  	}
2482  
2483  	if (sdiodev->state != BRCMF_SDIOD_NOMEDIUM) {
2484  		sdio_claim_host(sdiodev->func1);
2485  
2486  		/* Enable clock for device interrupts */
2487  		brcmf_sdio_bus_sleep(bus, false, false);
2488  
2489  		/* Disable and clear interrupts at the chip level also */
2490  		brcmf_sdiod_writel(sdiodev, core->base + SD_REG(hostintmask),
2491  				   0, NULL);
2492  
2493  		local_hostintmask = bus->hostintmask;
2494  		bus->hostintmask = 0;
2495  
2496  		/* Force backplane clocks to assure F2 interrupt propagates */
2497  		saveclk = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
2498  					    &err);
2499  		if (!err) {
2500  			bpreq = saveclk;
2501  			bpreq |= brcmf_chip_is_ulp(bus->ci) ?
2502  				SBSDIO_HT_AVAIL_REQ : SBSDIO_FORCE_HT;
2503  			brcmf_sdiod_writeb(sdiodev,
2504  					   SBSDIO_FUNC1_CHIPCLKCSR,
2505  					   bpreq, &err);
2506  		}
2507  		if (err)
2508  			brcmf_err("Failed to force clock for F2: err %d\n",
2509  				  err);
2510  
2511  		/* Turn off the bus (F2), free any pending packets */
2512  		brcmf_dbg(INTR, "disable SDIO interrupts\n");
2513  		sdio_disable_func(sdiodev->func2);
2514  
2515  		/* Clear any pending interrupts now that F2 is disabled */
2516  		brcmf_sdiod_writel(sdiodev, core->base + SD_REG(intstatus),
2517  				   local_hostintmask, NULL);
2518  
2519  		sdio_release_host(sdiodev->func1);
2520  	}
2521  	/* Clear the data packet queues */
2522  	brcmu_pktq_flush(&bus->txq, true, NULL, NULL);
2523  
2524  	/* Clear any held glomming stuff */
2525  	brcmu_pkt_buf_free_skb(bus->glomd);
2526  	brcmf_sdio_free_glom(bus);
2527  
2528  	/* Clear rx control and wake any waiters */
2529  	spin_lock_bh(&bus->rxctl_lock);
2530  	bus->rxlen = 0;
2531  	spin_unlock_bh(&bus->rxctl_lock);
2532  	brcmf_sdio_dcmd_resp_wake(bus);
2533  
2534  	/* Reset some F2 state stuff */
2535  	bus->rxskip = false;
2536  	bus->tx_seq = bus->rx_seq = 0;
2537  }
2538  
brcmf_sdio_clrintr(struct brcmf_sdio * bus)2539  static inline void brcmf_sdio_clrintr(struct brcmf_sdio *bus)
2540  {
2541  	struct brcmf_sdio_dev *sdiodev;
2542  	unsigned long flags;
2543  
2544  	sdiodev = bus->sdiodev;
2545  	if (sdiodev->oob_irq_requested) {
2546  		spin_lock_irqsave(&sdiodev->irq_en_lock, flags);
2547  		if (!sdiodev->irq_en && !atomic_read(&bus->ipend)) {
2548  			enable_irq(sdiodev->settings->bus.sdio.oob_irq_nr);
2549  			sdiodev->irq_en = true;
2550  		}
2551  		spin_unlock_irqrestore(&sdiodev->irq_en_lock, flags);
2552  	}
2553  }
2554  
brcmf_sdio_intr_rstatus(struct brcmf_sdio * bus)2555  static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
2556  {
2557  	struct brcmf_core *core = bus->sdio_core;
2558  	u32 addr;
2559  	unsigned long val;
2560  	int ret;
2561  
2562  	addr = core->base + SD_REG(intstatus);
2563  
2564  	val = brcmf_sdiod_readl(bus->sdiodev, addr, &ret);
2565  	bus->sdcnt.f1regdata++;
2566  	if (ret != 0)
2567  		return ret;
2568  
2569  	val &= bus->hostintmask;
2570  	atomic_set(&bus->fcstate, !!(val & I_HMB_FC_STATE));
2571  
2572  	/* Clear interrupts */
2573  	if (val) {
2574  		brcmf_sdiod_writel(bus->sdiodev, addr, val, &ret);
2575  		bus->sdcnt.f1regdata++;
2576  		atomic_or(val, &bus->intstatus);
2577  	}
2578  
2579  	return ret;
2580  }
2581  
brcmf_sdio_dpc(struct brcmf_sdio * bus)2582  static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
2583  {
2584  	struct brcmf_sdio_dev *sdiod = bus->sdiodev;
2585  	u32 newstatus = 0;
2586  	u32 intstat_addr = bus->sdio_core->base + SD_REG(intstatus);
2587  	unsigned long intstatus;
2588  	uint txlimit = bus->txbound;	/* Tx frames to send before resched */
2589  	uint framecnt;			/* Temporary counter of tx/rx frames */
2590  	int err = 0;
2591  
2592  	brcmf_dbg(SDIO, "Enter\n");
2593  
2594  	sdio_claim_host(bus->sdiodev->func1);
2595  
2596  	/* If waiting for HTAVAIL, check status */
2597  	if (!bus->sr_enabled && bus->clkstate == CLK_PENDING) {
2598  		u8 clkctl, devctl = 0;
2599  
2600  #ifdef DEBUG
2601  		/* Check for inconsistent device control */
2602  		devctl = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_DEVICE_CTL,
2603  					   &err);
2604  #endif				/* DEBUG */
2605  
2606  		/* Read CSR, if clock on switch to AVAIL, else ignore */
2607  		clkctl = brcmf_sdiod_readb(bus->sdiodev,
2608  					   SBSDIO_FUNC1_CHIPCLKCSR, &err);
2609  
2610  		brcmf_dbg(SDIO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n",
2611  			  devctl, clkctl);
2612  
2613  		if (SBSDIO_HTAV(clkctl)) {
2614  			devctl = brcmf_sdiod_readb(bus->sdiodev,
2615  						   SBSDIO_DEVICE_CTL, &err);
2616  			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
2617  			brcmf_sdiod_writeb(bus->sdiodev,
2618  					   SBSDIO_DEVICE_CTL, devctl, &err);
2619  			bus->clkstate = CLK_AVAIL;
2620  		}
2621  	}
2622  
2623  	/* Make sure backplane clock is on */
2624  	brcmf_sdio_bus_sleep(bus, false, true);
2625  
2626  	/* Pending interrupt indicates new device status */
2627  	if (atomic_read(&bus->ipend) > 0) {
2628  		atomic_set(&bus->ipend, 0);
2629  		err = brcmf_sdio_intr_rstatus(bus);
2630  	}
2631  
2632  	/* Start with leftover status bits */
2633  	intstatus = atomic_xchg(&bus->intstatus, 0);
2634  
2635  	/* Handle flow-control change: read new state in case our ack
2636  	 * crossed another change interrupt.  If change still set, assume
2637  	 * FC ON for safety, let next loop through do the debounce.
2638  	 */
2639  	if (intstatus & I_HMB_FC_CHANGE) {
2640  		intstatus &= ~I_HMB_FC_CHANGE;
2641  		brcmf_sdiod_writel(sdiod, intstat_addr, I_HMB_FC_CHANGE, &err);
2642  
2643  		newstatus = brcmf_sdiod_readl(sdiod, intstat_addr, &err);
2644  
2645  		bus->sdcnt.f1regdata += 2;
2646  		atomic_set(&bus->fcstate,
2647  			   !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE)));
2648  		intstatus |= (newstatus & bus->hostintmask);
2649  	}
2650  
2651  	/* Handle host mailbox indication */
2652  	if (intstatus & I_HMB_HOST_INT) {
2653  		intstatus &= ~I_HMB_HOST_INT;
2654  		intstatus |= brcmf_sdio_hostmail(bus);
2655  	}
2656  
2657  	sdio_release_host(bus->sdiodev->func1);
2658  
2659  	/* Generally don't ask for these, can get CRC errors... */
2660  	if (intstatus & I_WR_OOSYNC) {
2661  		brcmf_err("Dongle reports WR_OOSYNC\n");
2662  		intstatus &= ~I_WR_OOSYNC;
2663  	}
2664  
2665  	if (intstatus & I_RD_OOSYNC) {
2666  		brcmf_err("Dongle reports RD_OOSYNC\n");
2667  		intstatus &= ~I_RD_OOSYNC;
2668  	}
2669  
2670  	if (intstatus & I_SBINT) {
2671  		brcmf_err("Dongle reports SBINT\n");
2672  		intstatus &= ~I_SBINT;
2673  	}
2674  
2675  	/* Would be active due to wake-wlan in gSPI */
2676  	if (intstatus & I_CHIPACTIVE) {
2677  		brcmf_dbg(SDIO, "Dongle reports CHIPACTIVE\n");
2678  		intstatus &= ~I_CHIPACTIVE;
2679  	}
2680  
2681  	/* Ignore frame indications if rxskip is set */
2682  	if (bus->rxskip)
2683  		intstatus &= ~I_HMB_FRAME_IND;
2684  
2685  	/* On frame indication, read available frames */
2686  	if ((intstatus & I_HMB_FRAME_IND) && (bus->clkstate == CLK_AVAIL)) {
2687  		brcmf_sdio_readframes(bus, bus->rxbound);
2688  		if (!bus->rxpending)
2689  			intstatus &= ~I_HMB_FRAME_IND;
2690  	}
2691  
2692  	/* Keep still-pending events for next scheduling */
2693  	if (intstatus)
2694  		atomic_or(intstatus, &bus->intstatus);
2695  
2696  	brcmf_sdio_clrintr(bus);
2697  
2698  	if (bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL) &&
2699  	    txctl_ok(bus)) {
2700  		sdio_claim_host(bus->sdiodev->func1);
2701  		if (bus->ctrl_frame_stat) {
2702  			err = brcmf_sdio_tx_ctrlframe(bus,  bus->ctrl_frame_buf,
2703  						      bus->ctrl_frame_len);
2704  			bus->ctrl_frame_err = err;
2705  			wmb();
2706  			bus->ctrl_frame_stat = false;
2707  			if (err)
2708  				brcmf_err("sdio ctrlframe tx failed err=%d\n",
2709  					  err);
2710  		}
2711  		sdio_release_host(bus->sdiodev->func1);
2712  		brcmf_sdio_wait_event_wakeup(bus);
2713  	}
2714  	/* Send queued frames (limit 1 if rx may still be pending) */
2715  	if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) &&
2716  	    brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit &&
2717  	    data_ok(bus)) {
2718  		framecnt = bus->rxpending ? min(txlimit, bus->txminmax) :
2719  					    txlimit;
2720  		brcmf_sdio_sendfromq(bus, framecnt);
2721  	}
2722  
2723  	if ((bus->sdiodev->state != BRCMF_SDIOD_DATA) || (err != 0)) {
2724  		brcmf_err("failed backplane access over SDIO, halting operation\n");
2725  		atomic_set(&bus->intstatus, 0);
2726  		if (bus->ctrl_frame_stat) {
2727  			sdio_claim_host(bus->sdiodev->func1);
2728  			if (bus->ctrl_frame_stat) {
2729  				bus->ctrl_frame_err = -ENODEV;
2730  				wmb();
2731  				bus->ctrl_frame_stat = false;
2732  				brcmf_sdio_wait_event_wakeup(bus);
2733  			}
2734  			sdio_release_host(bus->sdiodev->func1);
2735  		}
2736  	} else if (atomic_read(&bus->intstatus) ||
2737  		   atomic_read(&bus->ipend) > 0 ||
2738  		   (!atomic_read(&bus->fcstate) &&
2739  		    brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
2740  		    data_ok(bus))) {
2741  		bus->dpc_triggered = true;
2742  	}
2743  }
2744  
brcmf_sdio_bus_gettxq(struct device * dev)2745  static struct pktq *brcmf_sdio_bus_gettxq(struct device *dev)
2746  {
2747  	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2748  	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2749  	struct brcmf_sdio *bus = sdiodev->bus;
2750  
2751  	return &bus->txq;
2752  }
2753  
brcmf_sdio_prec_enq(struct pktq * q,struct sk_buff * pkt,int prec)2754  static bool brcmf_sdio_prec_enq(struct pktq *q, struct sk_buff *pkt, int prec)
2755  {
2756  	struct sk_buff *p;
2757  	int eprec = -1;		/* precedence to evict from */
2758  
2759  	/* Fast case, precedence queue is not full and we are also not
2760  	 * exceeding total queue length
2761  	 */
2762  	if (!pktq_pfull(q, prec) && !pktq_full(q)) {
2763  		brcmu_pktq_penq(q, prec, pkt);
2764  		return true;
2765  	}
2766  
2767  	/* Determine precedence from which to evict packet, if any */
2768  	if (pktq_pfull(q, prec)) {
2769  		eprec = prec;
2770  	} else if (pktq_full(q)) {
2771  		p = brcmu_pktq_peek_tail(q, &eprec);
2772  		if (eprec > prec)
2773  			return false;
2774  	}
2775  
2776  	/* Evict if needed */
2777  	if (eprec >= 0) {
2778  		/* Detect queueing to unconfigured precedence */
2779  		if (eprec == prec)
2780  			return false;	/* refuse newer (incoming) packet */
2781  		/* Evict packet according to discard policy */
2782  		p = brcmu_pktq_pdeq_tail(q, eprec);
2783  		if (p == NULL)
2784  			brcmf_err("brcmu_pktq_pdeq_tail() failed\n");
2785  		brcmu_pkt_buf_free_skb(p);
2786  	}
2787  
2788  	/* Enqueue */
2789  	p = brcmu_pktq_penq(q, prec, pkt);
2790  	if (p == NULL)
2791  		brcmf_err("brcmu_pktq_penq() failed\n");
2792  
2793  	return p != NULL;
2794  }
2795  
brcmf_sdio_bus_txdata(struct device * dev,struct sk_buff * pkt)2796  static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
2797  {
2798  	int ret = -EBADE;
2799  	uint prec;
2800  	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2801  	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2802  	struct brcmf_sdio *bus = sdiodev->bus;
2803  
2804  	brcmf_dbg(TRACE, "Enter: pkt: data %p len %d\n", pkt->data, pkt->len);
2805  	if (sdiodev->state != BRCMF_SDIOD_DATA)
2806  		return -EIO;
2807  
2808  	/* Add space for the header */
2809  	skb_push(pkt, bus->tx_hdrlen);
2810  	/* precondition: IS_ALIGNED((unsigned long)(pkt->data), 2) */
2811  
2812  	/* In WLAN, priority is always set by the AP using WMM parameters
2813  	 * and this need not always follow the standard 802.1d priority.
2814  	 * Based on AP WMM config, map from 802.1d priority to corresponding
2815  	 * precedence level.
2816  	 */
2817  	prec = brcmf_map_prio_to_prec(bus_if->drvr->config,
2818  				      (pkt->priority & PRIOMASK));
2819  
2820  	/* Check for existing queue, current flow-control,
2821  			 pending event, or pending clock */
2822  	brcmf_dbg(TRACE, "deferring pktq len %d\n", pktq_len(&bus->txq));
2823  	bus->sdcnt.fcqueued++;
2824  
2825  	/* Priority based enq */
2826  	spin_lock_bh(&bus->txq_lock);
2827  	/* reset bus_flags in packet cb */
2828  	*(u16 *)(pkt->cb) = 0;
2829  	if (!brcmf_sdio_prec_enq(&bus->txq, pkt, prec)) {
2830  		skb_pull(pkt, bus->tx_hdrlen);
2831  		brcmf_err("out of bus->txq !!!\n");
2832  		ret = -ENOSR;
2833  	} else {
2834  		ret = 0;
2835  	}
2836  
2837  	if (pktq_len(&bus->txq) >= TXHI) {
2838  		bus->txoff = true;
2839  		brcmf_proto_bcdc_txflowblock(dev, true);
2840  	}
2841  	spin_unlock_bh(&bus->txq_lock);
2842  
2843  #ifdef DEBUG
2844  	if (pktq_plen(&bus->txq, prec) > qcount[prec])
2845  		qcount[prec] = pktq_plen(&bus->txq, prec);
2846  #endif
2847  
2848  	brcmf_sdio_trigger_dpc(bus);
2849  	return ret;
2850  }
2851  
2852  #ifdef DEBUG
2853  #define CONSOLE_LINE_MAX	192
2854  
brcmf_sdio_readconsole(struct brcmf_sdio * bus)2855  static int brcmf_sdio_readconsole(struct brcmf_sdio *bus)
2856  {
2857  	struct brcmf_console *c = &bus->console;
2858  	u8 line[CONSOLE_LINE_MAX], ch;
2859  	u32 n, idx, addr;
2860  	int rv;
2861  
2862  	/* Don't do anything until FWREADY updates console address */
2863  	if (bus->console_addr == 0)
2864  		return 0;
2865  
2866  	/* Read console log struct */
2867  	addr = bus->console_addr + offsetof(struct rte_console, log_le);
2868  	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&c->log_le,
2869  			       sizeof(c->log_le));
2870  	if (rv < 0)
2871  		return rv;
2872  
2873  	/* Allocate console buffer (one time only) */
2874  	if (c->buf == NULL) {
2875  		c->bufsize = le32_to_cpu(c->log_le.buf_size);
2876  		c->buf = kmalloc(c->bufsize, GFP_ATOMIC);
2877  		if (c->buf == NULL)
2878  			return -ENOMEM;
2879  	}
2880  
2881  	idx = le32_to_cpu(c->log_le.idx);
2882  
2883  	/* Protect against corrupt value */
2884  	if (idx > c->bufsize)
2885  		return -EBADE;
2886  
2887  	/* Skip reading the console buffer if the index pointer
2888  	 has not moved */
2889  	if (idx == c->last)
2890  		return 0;
2891  
2892  	/* Read the console buffer */
2893  	addr = le32_to_cpu(c->log_le.buf);
2894  	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, c->buf, c->bufsize);
2895  	if (rv < 0)
2896  		return rv;
2897  
2898  	while (c->last != idx) {
2899  		for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
2900  			if (c->last == idx) {
2901  				/* This would output a partial line.
2902  				 * Instead, back up
2903  				 * the buffer pointer and output this
2904  				 * line next time around.
2905  				 */
2906  				if (c->last >= n)
2907  					c->last -= n;
2908  				else
2909  					c->last = c->bufsize - n;
2910  				goto break2;
2911  			}
2912  			ch = c->buf[c->last];
2913  			c->last = (c->last + 1) % c->bufsize;
2914  			if (ch == '\n')
2915  				break;
2916  			line[n] = ch;
2917  		}
2918  
2919  		if (n > 0) {
2920  			if (line[n - 1] == '\r')
2921  				n--;
2922  			line[n] = 0;
2923  			pr_debug("CONSOLE: %s\n", line);
2924  		}
2925  	}
2926  break2:
2927  
2928  	return 0;
2929  }
2930  #endif				/* DEBUG */
2931  
2932  static int
brcmf_sdio_bus_txctl(struct device * dev,unsigned char * msg,uint msglen)2933  brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2934  {
2935  	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2936  	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2937  	struct brcmf_sdio *bus = sdiodev->bus;
2938  	int ret;
2939  
2940  	brcmf_dbg(TRACE, "Enter\n");
2941  	if (sdiodev->state != BRCMF_SDIOD_DATA)
2942  		return -EIO;
2943  
2944  	/* Send from dpc */
2945  	bus->ctrl_frame_buf = msg;
2946  	bus->ctrl_frame_len = msglen;
2947  	wmb();
2948  	bus->ctrl_frame_stat = true;
2949  
2950  	brcmf_sdio_trigger_dpc(bus);
2951  	wait_event_interruptible_timeout(bus->ctrl_wait, !bus->ctrl_frame_stat,
2952  					 CTL_DONE_TIMEOUT);
2953  	ret = 0;
2954  	if (bus->ctrl_frame_stat) {
2955  		sdio_claim_host(bus->sdiodev->func1);
2956  		if (bus->ctrl_frame_stat) {
2957  			brcmf_dbg(SDIO, "ctrl_frame timeout\n");
2958  			bus->ctrl_frame_stat = false;
2959  			ret = -ETIMEDOUT;
2960  		}
2961  		sdio_release_host(bus->sdiodev->func1);
2962  	}
2963  	if (!ret) {
2964  		brcmf_dbg(SDIO, "ctrl_frame complete, err=%d\n",
2965  			  bus->ctrl_frame_err);
2966  		rmb();
2967  		ret = bus->ctrl_frame_err;
2968  	}
2969  
2970  	if (ret)
2971  		bus->sdcnt.tx_ctlerrs++;
2972  	else
2973  		bus->sdcnt.tx_ctlpkts++;
2974  
2975  	return ret;
2976  }
2977  
2978  #ifdef DEBUG
brcmf_sdio_dump_console(struct seq_file * seq,struct brcmf_sdio * bus,struct sdpcm_shared * sh)2979  static int brcmf_sdio_dump_console(struct seq_file *seq, struct brcmf_sdio *bus,
2980  				   struct sdpcm_shared *sh)
2981  {
2982  	u32 addr, console_ptr, console_size, console_index;
2983  	char *conbuf = NULL;
2984  	__le32 sh_val;
2985  	int rv;
2986  
2987  	/* obtain console information from device memory */
2988  	addr = sh->console_addr + offsetof(struct rte_console, log_le);
2989  	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr,
2990  			       (u8 *)&sh_val, sizeof(u32));
2991  	if (rv < 0)
2992  		return rv;
2993  	console_ptr = le32_to_cpu(sh_val);
2994  
2995  	addr = sh->console_addr + offsetof(struct rte_console, log_le.buf_size);
2996  	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr,
2997  			       (u8 *)&sh_val, sizeof(u32));
2998  	if (rv < 0)
2999  		return rv;
3000  	console_size = le32_to_cpu(sh_val);
3001  
3002  	addr = sh->console_addr + offsetof(struct rte_console, log_le.idx);
3003  	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr,
3004  			       (u8 *)&sh_val, sizeof(u32));
3005  	if (rv < 0)
3006  		return rv;
3007  	console_index = le32_to_cpu(sh_val);
3008  
3009  	/* allocate buffer for console data */
3010  	if (console_size <= CONSOLE_BUFFER_MAX)
3011  		conbuf = vzalloc(console_size+1);
3012  
3013  	if (!conbuf)
3014  		return -ENOMEM;
3015  
3016  	/* obtain the console data from device */
3017  	conbuf[console_size] = '\0';
3018  	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, console_ptr, (u8 *)conbuf,
3019  			       console_size);
3020  	if (rv < 0)
3021  		goto done;
3022  
3023  	rv = seq_write(seq, conbuf + console_index,
3024  		       console_size - console_index);
3025  	if (rv < 0)
3026  		goto done;
3027  
3028  	if (console_index > 0)
3029  		rv = seq_write(seq, conbuf, console_index - 1);
3030  
3031  done:
3032  	vfree(conbuf);
3033  	return rv;
3034  }
3035  
brcmf_sdio_trap_info(struct seq_file * seq,struct brcmf_sdio * bus,struct sdpcm_shared * sh)3036  static int brcmf_sdio_trap_info(struct seq_file *seq, struct brcmf_sdio *bus,
3037  				struct sdpcm_shared *sh)
3038  {
3039  	int error;
3040  	struct brcmf_trap_info tr;
3041  
3042  	if ((sh->flags & SDPCM_SHARED_TRAP) == 0) {
3043  		brcmf_dbg(INFO, "no trap in firmware\n");
3044  		return 0;
3045  	}
3046  
3047  	error = brcmf_sdiod_ramrw(bus->sdiodev, false, sh->trap_addr, (u8 *)&tr,
3048  				  sizeof(struct brcmf_trap_info));
3049  	if (error < 0)
3050  		return error;
3051  
3052  	if (seq)
3053  		seq_printf(seq,
3054  			   "dongle trap info: type 0x%x @ epc 0x%08x\n"
3055  			   "  cpsr 0x%08x spsr 0x%08x sp 0x%08x\n"
3056  			   "  lr   0x%08x pc   0x%08x offset 0x%x\n"
3057  			   "  r0   0x%08x r1   0x%08x r2 0x%08x r3 0x%08x\n"
3058  			   "  r4   0x%08x r5   0x%08x r6 0x%08x r7 0x%08x\n",
3059  			   le32_to_cpu(tr.type), le32_to_cpu(tr.epc),
3060  			   le32_to_cpu(tr.cpsr), le32_to_cpu(tr.spsr),
3061  			   le32_to_cpu(tr.r13), le32_to_cpu(tr.r14),
3062  			   le32_to_cpu(tr.pc), sh->trap_addr,
3063  			   le32_to_cpu(tr.r0), le32_to_cpu(tr.r1),
3064  			   le32_to_cpu(tr.r2), le32_to_cpu(tr.r3),
3065  			   le32_to_cpu(tr.r4), le32_to_cpu(tr.r5),
3066  			   le32_to_cpu(tr.r6), le32_to_cpu(tr.r7));
3067  	else
3068  		pr_debug("dongle trap info: type 0x%x @ epc 0x%08x\n"
3069  			 "  cpsr 0x%08x spsr 0x%08x sp 0x%08x\n"
3070  			 "  lr   0x%08x pc   0x%08x offset 0x%x\n"
3071  			 "  r0   0x%08x r1   0x%08x r2 0x%08x r3 0x%08x\n"
3072  			 "  r4   0x%08x r5   0x%08x r6 0x%08x r7 0x%08x\n",
3073  			 le32_to_cpu(tr.type), le32_to_cpu(tr.epc),
3074  			 le32_to_cpu(tr.cpsr), le32_to_cpu(tr.spsr),
3075  			 le32_to_cpu(tr.r13), le32_to_cpu(tr.r14),
3076  			 le32_to_cpu(tr.pc), sh->trap_addr,
3077  			 le32_to_cpu(tr.r0), le32_to_cpu(tr.r1),
3078  			 le32_to_cpu(tr.r2), le32_to_cpu(tr.r3),
3079  			 le32_to_cpu(tr.r4), le32_to_cpu(tr.r5),
3080  			 le32_to_cpu(tr.r6), le32_to_cpu(tr.r7));
3081  	return 0;
3082  }
3083  
brcmf_sdio_assert_info(struct seq_file * seq,struct brcmf_sdio * bus,struct sdpcm_shared * sh)3084  static int brcmf_sdio_assert_info(struct seq_file *seq, struct brcmf_sdio *bus,
3085  				  struct sdpcm_shared *sh)
3086  {
3087  	int error = 0;
3088  	char file[80] = "?";
3089  	char expr[80] = "<???>";
3090  
3091  	if ((sh->flags & SDPCM_SHARED_ASSERT_BUILT) == 0) {
3092  		brcmf_dbg(INFO, "firmware not built with -assert\n");
3093  		return 0;
3094  	} else if ((sh->flags & SDPCM_SHARED_ASSERT) == 0) {
3095  		brcmf_dbg(INFO, "no assert in dongle\n");
3096  		return 0;
3097  	}
3098  
3099  	sdio_claim_host(bus->sdiodev->func1);
3100  	if (sh->assert_file_addr != 0) {
3101  		error = brcmf_sdiod_ramrw(bus->sdiodev, false,
3102  					  sh->assert_file_addr, (u8 *)file, 80);
3103  		if (error < 0)
3104  			return error;
3105  	}
3106  	if (sh->assert_exp_addr != 0) {
3107  		error = brcmf_sdiod_ramrw(bus->sdiodev, false,
3108  					  sh->assert_exp_addr, (u8 *)expr, 80);
3109  		if (error < 0)
3110  			return error;
3111  	}
3112  	sdio_release_host(bus->sdiodev->func1);
3113  
3114  	seq_printf(seq, "dongle assert: %s:%d: assert(%s)\n",
3115  		   file, sh->assert_line, expr);
3116  	return 0;
3117  }
3118  
brcmf_sdio_checkdied(struct brcmf_sdio * bus)3119  static int brcmf_sdio_checkdied(struct brcmf_sdio *bus)
3120  {
3121  	int error;
3122  	struct sdpcm_shared sh;
3123  
3124  	error = brcmf_sdio_readshared(bus, &sh);
3125  
3126  	if (error < 0)
3127  		return error;
3128  
3129  	if ((sh.flags & SDPCM_SHARED_ASSERT_BUILT) == 0)
3130  		brcmf_dbg(INFO, "firmware not built with -assert\n");
3131  	else if (sh.flags & SDPCM_SHARED_ASSERT)
3132  		brcmf_err("assertion in dongle\n");
3133  
3134  	if (sh.flags & SDPCM_SHARED_TRAP) {
3135  		brcmf_err("firmware trap in dongle\n");
3136  		brcmf_sdio_trap_info(NULL, bus, &sh);
3137  	}
3138  
3139  	return 0;
3140  }
3141  
brcmf_sdio_died_dump(struct seq_file * seq,struct brcmf_sdio * bus)3142  static int brcmf_sdio_died_dump(struct seq_file *seq, struct brcmf_sdio *bus)
3143  {
3144  	int error = 0;
3145  	struct sdpcm_shared sh;
3146  
3147  	error = brcmf_sdio_readshared(bus, &sh);
3148  	if (error < 0)
3149  		goto done;
3150  
3151  	error = brcmf_sdio_assert_info(seq, bus, &sh);
3152  	if (error < 0)
3153  		goto done;
3154  
3155  	error = brcmf_sdio_trap_info(seq, bus, &sh);
3156  	if (error < 0)
3157  		goto done;
3158  
3159  	error = brcmf_sdio_dump_console(seq, bus, &sh);
3160  
3161  done:
3162  	return error;
3163  }
3164  
brcmf_sdio_forensic_read(struct seq_file * seq,void * data)3165  static int brcmf_sdio_forensic_read(struct seq_file *seq, void *data)
3166  {
3167  	struct brcmf_bus *bus_if = dev_get_drvdata(seq->private);
3168  	struct brcmf_sdio *bus = bus_if->bus_priv.sdio->bus;
3169  
3170  	return brcmf_sdio_died_dump(seq, bus);
3171  }
3172  
brcmf_debugfs_sdio_count_read(struct seq_file * seq,void * data)3173  static int brcmf_debugfs_sdio_count_read(struct seq_file *seq, void *data)
3174  {
3175  	struct brcmf_bus *bus_if = dev_get_drvdata(seq->private);
3176  	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
3177  	struct brcmf_sdio_count *sdcnt = &sdiodev->bus->sdcnt;
3178  
3179  	seq_printf(seq,
3180  		   "intrcount:    %u\nlastintrs:    %u\n"
3181  		   "pollcnt:      %u\nregfails:     %u\n"
3182  		   "tx_sderrs:    %u\nfcqueued:     %u\n"
3183  		   "rxrtx:        %u\nrx_toolong:   %u\n"
3184  		   "rxc_errors:   %u\nrx_hdrfail:   %u\n"
3185  		   "rx_badhdr:    %u\nrx_badseq:    %u\n"
3186  		   "fc_rcvd:      %u\nfc_xoff:      %u\n"
3187  		   "fc_xon:       %u\nrxglomfail:   %u\n"
3188  		   "rxglomframes: %u\nrxglompkts:   %u\n"
3189  		   "f2rxhdrs:     %u\nf2rxdata:     %u\n"
3190  		   "f2txdata:     %u\nf1regdata:    %u\n"
3191  		   "tickcnt:      %u\ntx_ctlerrs:   %lu\n"
3192  		   "tx_ctlpkts:   %lu\nrx_ctlerrs:   %lu\n"
3193  		   "rx_ctlpkts:   %lu\nrx_readahead: %lu\n",
3194  		   sdcnt->intrcount, sdcnt->lastintrs,
3195  		   sdcnt->pollcnt, sdcnt->regfails,
3196  		   sdcnt->tx_sderrs, sdcnt->fcqueued,
3197  		   sdcnt->rxrtx, sdcnt->rx_toolong,
3198  		   sdcnt->rxc_errors, sdcnt->rx_hdrfail,
3199  		   sdcnt->rx_badhdr, sdcnt->rx_badseq,
3200  		   sdcnt->fc_rcvd, sdcnt->fc_xoff,
3201  		   sdcnt->fc_xon, sdcnt->rxglomfail,
3202  		   sdcnt->rxglomframes, sdcnt->rxglompkts,
3203  		   sdcnt->f2rxhdrs, sdcnt->f2rxdata,
3204  		   sdcnt->f2txdata, sdcnt->f1regdata,
3205  		   sdcnt->tickcnt, sdcnt->tx_ctlerrs,
3206  		   sdcnt->tx_ctlpkts, sdcnt->rx_ctlerrs,
3207  		   sdcnt->rx_ctlpkts, sdcnt->rx_readahead_cnt);
3208  
3209  	return 0;
3210  }
3211  
brcmf_sdio_debugfs_create(struct device * dev)3212  static void brcmf_sdio_debugfs_create(struct device *dev)
3213  {
3214  	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
3215  	struct brcmf_pub *drvr = bus_if->drvr;
3216  	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
3217  	struct brcmf_sdio *bus = sdiodev->bus;
3218  	struct dentry *dentry = brcmf_debugfs_get_devdir(drvr);
3219  
3220  	if (IS_ERR_OR_NULL(dentry))
3221  		return;
3222  
3223  	bus->console_interval = BRCMF_CONSOLE;
3224  
3225  	brcmf_debugfs_add_entry(drvr, "forensics", brcmf_sdio_forensic_read);
3226  	brcmf_debugfs_add_entry(drvr, "counters",
3227  				brcmf_debugfs_sdio_count_read);
3228  	debugfs_create_u32("console_interval", 0644, dentry,
3229  			   &bus->console_interval);
3230  }
3231  #else
brcmf_sdio_checkdied(struct brcmf_sdio * bus)3232  static int brcmf_sdio_checkdied(struct brcmf_sdio *bus)
3233  {
3234  	return 0;
3235  }
3236  
brcmf_sdio_debugfs_create(struct device * dev)3237  static void brcmf_sdio_debugfs_create(struct device *dev)
3238  {
3239  }
3240  #endif /* DEBUG */
3241  
3242  static int
brcmf_sdio_bus_rxctl(struct device * dev,unsigned char * msg,uint msglen)3243  brcmf_sdio_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
3244  {
3245  	int timeleft;
3246  	uint rxlen = 0;
3247  	bool pending;
3248  	u8 *buf;
3249  	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
3250  	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
3251  	struct brcmf_sdio *bus = sdiodev->bus;
3252  
3253  	brcmf_dbg(TRACE, "Enter\n");
3254  	if (sdiodev->state != BRCMF_SDIOD_DATA)
3255  		return -EIO;
3256  
3257  	/* Wait until control frame is available */
3258  	timeleft = brcmf_sdio_dcmd_resp_wait(bus, &bus->rxlen, &pending);
3259  
3260  	spin_lock_bh(&bus->rxctl_lock);
3261  	rxlen = bus->rxlen;
3262  	memcpy(msg, bus->rxctl, min(msglen, rxlen));
3263  	bus->rxctl = NULL;
3264  	buf = bus->rxctl_orig;
3265  	bus->rxctl_orig = NULL;
3266  	bus->rxlen = 0;
3267  	spin_unlock_bh(&bus->rxctl_lock);
3268  	vfree(buf);
3269  
3270  	if (rxlen) {
3271  		brcmf_dbg(CTL, "resumed on rxctl frame, got %d expected %d\n",
3272  			  rxlen, msglen);
3273  	} else if (timeleft == 0) {
3274  		brcmf_err("resumed on timeout\n");
3275  		brcmf_sdio_checkdied(bus);
3276  	} else if (pending) {
3277  		brcmf_dbg(CTL, "cancelled\n");
3278  		return -ERESTARTSYS;
3279  	} else {
3280  		brcmf_dbg(CTL, "resumed for unknown reason?\n");
3281  		brcmf_sdio_checkdied(bus);
3282  	}
3283  
3284  	if (rxlen)
3285  		bus->sdcnt.rx_ctlpkts++;
3286  	else
3287  		bus->sdcnt.rx_ctlerrs++;
3288  
3289  	return rxlen ? (int)rxlen : -ETIMEDOUT;
3290  }
3291  
3292  #ifdef DEBUG
3293  static bool
brcmf_sdio_verifymemory(struct brcmf_sdio_dev * sdiodev,u32 ram_addr,u8 * ram_data,uint ram_sz)3294  brcmf_sdio_verifymemory(struct brcmf_sdio_dev *sdiodev, u32 ram_addr,
3295  			u8 *ram_data, uint ram_sz)
3296  {
3297  	char *ram_cmp;
3298  	int err;
3299  	bool ret = true;
3300  	int address;
3301  	int offset;
3302  	int len;
3303  
3304  	/* read back and verify */
3305  	brcmf_dbg(INFO, "Compare RAM dl & ul at 0x%08x; size=%d\n", ram_addr,
3306  		  ram_sz);
3307  	ram_cmp = kmalloc(MEMBLOCK, GFP_KERNEL);
3308  	/* do not proceed while no memory but  */
3309  	if (!ram_cmp)
3310  		return true;
3311  
3312  	address = ram_addr;
3313  	offset = 0;
3314  	while (offset < ram_sz) {
3315  		len = ((offset + MEMBLOCK) < ram_sz) ? MEMBLOCK :
3316  		      ram_sz - offset;
3317  		err = brcmf_sdiod_ramrw(sdiodev, false, address, ram_cmp, len);
3318  		if (err) {
3319  			brcmf_err("error %d on reading %d membytes at 0x%08x\n",
3320  				  err, len, address);
3321  			ret = false;
3322  			break;
3323  		} else if (memcmp(ram_cmp, &ram_data[offset], len)) {
3324  			brcmf_err("Downloaded RAM image is corrupted, block offset is %d, len is %d\n",
3325  				  offset, len);
3326  			ret = false;
3327  			break;
3328  		}
3329  		offset += len;
3330  		address += len;
3331  	}
3332  
3333  	kfree(ram_cmp);
3334  
3335  	return ret;
3336  }
3337  #else	/* DEBUG */
3338  static bool
brcmf_sdio_verifymemory(struct brcmf_sdio_dev * sdiodev,u32 ram_addr,u8 * ram_data,uint ram_sz)3339  brcmf_sdio_verifymemory(struct brcmf_sdio_dev *sdiodev, u32 ram_addr,
3340  			u8 *ram_data, uint ram_sz)
3341  {
3342  	return true;
3343  }
3344  #endif	/* DEBUG */
3345  
brcmf_sdio_download_code_file(struct brcmf_sdio * bus,const struct firmware * fw)3346  static int brcmf_sdio_download_code_file(struct brcmf_sdio *bus,
3347  					 const struct firmware *fw)
3348  {
3349  	int err;
3350  
3351  	brcmf_dbg(TRACE, "Enter\n");
3352  
3353  	err = brcmf_sdiod_ramrw(bus->sdiodev, true, bus->ci->rambase,
3354  				(u8 *)fw->data, fw->size);
3355  	if (err)
3356  		brcmf_err("error %d on writing %d membytes at 0x%08x\n",
3357  			  err, (int)fw->size, bus->ci->rambase);
3358  	else if (!brcmf_sdio_verifymemory(bus->sdiodev, bus->ci->rambase,
3359  					  (u8 *)fw->data, fw->size))
3360  		err = -EIO;
3361  
3362  	return err;
3363  }
3364  
brcmf_sdio_download_nvram(struct brcmf_sdio * bus,void * vars,u32 varsz)3365  static int brcmf_sdio_download_nvram(struct brcmf_sdio *bus,
3366  				     void *vars, u32 varsz)
3367  {
3368  	int address;
3369  	int err;
3370  
3371  	brcmf_dbg(TRACE, "Enter\n");
3372  
3373  	address = bus->ci->ramsize - varsz + bus->ci->rambase;
3374  	err = brcmf_sdiod_ramrw(bus->sdiodev, true, address, vars, varsz);
3375  	if (err)
3376  		brcmf_err("error %d on writing %d nvram bytes at 0x%08x\n",
3377  			  err, varsz, address);
3378  	else if (!brcmf_sdio_verifymemory(bus->sdiodev, address, vars, varsz))
3379  		err = -EIO;
3380  
3381  	return err;
3382  }
3383  
brcmf_sdio_download_firmware(struct brcmf_sdio * bus,const struct firmware * fw,void * nvram,u32 nvlen)3384  static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
3385  					const struct firmware *fw,
3386  					void *nvram, u32 nvlen)
3387  {
3388  	int bcmerror;
3389  	u32 rstvec;
3390  
3391  	sdio_claim_host(bus->sdiodev->func1);
3392  	brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
3393  
3394  	rstvec = get_unaligned_le32(fw->data);
3395  	brcmf_dbg(SDIO, "firmware rstvec: %x\n", rstvec);
3396  
3397  	bcmerror = brcmf_sdio_download_code_file(bus, fw);
3398  	release_firmware(fw);
3399  	if (bcmerror) {
3400  		brcmf_err("dongle image file download failed\n");
3401  		brcmf_fw_nvram_free(nvram);
3402  		goto err;
3403  	}
3404  
3405  	bcmerror = brcmf_sdio_download_nvram(bus, nvram, nvlen);
3406  	brcmf_fw_nvram_free(nvram);
3407  	if (bcmerror) {
3408  		brcmf_err("dongle nvram file download failed\n");
3409  		goto err;
3410  	}
3411  
3412  	/* Take arm out of reset */
3413  	if (!brcmf_chip_set_active(bus->ci, rstvec)) {
3414  		brcmf_err("error getting out of ARM core reset\n");
3415  		bcmerror = -EIO;
3416  		goto err;
3417  	}
3418  
3419  err:
3420  	brcmf_sdio_clkctl(bus, CLK_SDONLY, false);
3421  	sdio_release_host(bus->sdiodev->func1);
3422  	return bcmerror;
3423  }
3424  
brcmf_sdio_aos_no_decode(struct brcmf_sdio * bus)3425  static bool brcmf_sdio_aos_no_decode(struct brcmf_sdio *bus)
3426  {
3427  	if (bus->ci->chip == CY_CC_43012_CHIP_ID ||
3428  	    bus->ci->chip == CY_CC_43752_CHIP_ID)
3429  		return true;
3430  	else
3431  		return false;
3432  }
3433  
brcmf_sdio_sr_init(struct brcmf_sdio * bus)3434  static void brcmf_sdio_sr_init(struct brcmf_sdio *bus)
3435  {
3436  	int err = 0;
3437  	u8 val;
3438  	u8 wakeupctrl;
3439  	u8 cardcap;
3440  	u8 chipclkcsr;
3441  
3442  	brcmf_dbg(TRACE, "Enter\n");
3443  
3444  	if (brcmf_chip_is_ulp(bus->ci)) {
3445  		wakeupctrl = SBSDIO_FUNC1_WCTRL_ALPWAIT_SHIFT;
3446  		chipclkcsr = SBSDIO_HT_AVAIL_REQ;
3447  	} else {
3448  		wakeupctrl = SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT;
3449  		chipclkcsr = SBSDIO_FORCE_HT;
3450  	}
3451  
3452  	if (brcmf_sdio_aos_no_decode(bus)) {
3453  		cardcap = SDIO_CCCR_BRCM_CARDCAP_CMD_NODEC;
3454  	} else {
3455  		cardcap = (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT |
3456  			   SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT);
3457  	}
3458  
3459  	val = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, &err);
3460  	if (err) {
3461  		brcmf_err("error reading SBSDIO_FUNC1_WAKEUPCTRL\n");
3462  		return;
3463  	}
3464  	val |= 1 << wakeupctrl;
3465  	brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, val, &err);
3466  	if (err) {
3467  		brcmf_err("error writing SBSDIO_FUNC1_WAKEUPCTRL\n");
3468  		return;
3469  	}
3470  
3471  	/* Add CMD14 Support */
3472  	brcmf_sdiod_func0_wb(bus->sdiodev, SDIO_CCCR_BRCM_CARDCAP,
3473  			     cardcap,
3474  			     &err);
3475  	if (err) {
3476  		brcmf_err("error writing SDIO_CCCR_BRCM_CARDCAP\n");
3477  		return;
3478  	}
3479  
3480  	brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3481  			   chipclkcsr, &err);
3482  	if (err) {
3483  		brcmf_err("error writing SBSDIO_FUNC1_CHIPCLKCSR\n");
3484  		return;
3485  	}
3486  
3487  	/* set flag */
3488  	bus->sr_enabled = true;
3489  	brcmf_dbg(INFO, "SR enabled\n");
3490  }
3491  
3492  /* enable KSO bit */
brcmf_sdio_kso_init(struct brcmf_sdio * bus)3493  static int brcmf_sdio_kso_init(struct brcmf_sdio *bus)
3494  {
3495  	struct brcmf_core *core = bus->sdio_core;
3496  	u8 val;
3497  	int err = 0;
3498  
3499  	brcmf_dbg(TRACE, "Enter\n");
3500  
3501  	/* KSO bit added in SDIO core rev 12 */
3502  	if (core->rev < 12)
3503  		return 0;
3504  
3505  	val = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, &err);
3506  	if (err) {
3507  		brcmf_err("error reading SBSDIO_FUNC1_SLEEPCSR\n");
3508  		return err;
3509  	}
3510  
3511  	if (!(val & SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) {
3512  		val |= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN <<
3513  			SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
3514  		brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
3515  				   val, &err);
3516  		if (err) {
3517  			brcmf_err("error writing SBSDIO_FUNC1_SLEEPCSR\n");
3518  			return err;
3519  		}
3520  	}
3521  
3522  	return 0;
3523  }
3524  
3525  
brcmf_sdio_bus_preinit(struct device * dev)3526  static int brcmf_sdio_bus_preinit(struct device *dev)
3527  {
3528  	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
3529  	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
3530  	struct brcmf_sdio *bus = sdiodev->bus;
3531  	struct brcmf_core *core = bus->sdio_core;
3532  	u32 value;
3533  	__le32 iovar;
3534  	int err;
3535  
3536  	/* maxctl provided by common layer */
3537  	if (WARN_ON(!bus_if->maxctl))
3538  		return -EINVAL;
3539  
3540  	/* Allocate control receive buffer */
3541  	bus_if->maxctl += bus->roundup;
3542  	value = roundup((bus_if->maxctl + SDPCM_HDRLEN), ALIGNMENT);
3543  	value += bus->head_align;
3544  	bus->rxbuf = kmalloc(value, GFP_ATOMIC);
3545  	if (bus->rxbuf)
3546  		bus->rxblen = value;
3547  
3548  	/* the commands below use the terms tx and rx from
3549  	 * a device perspective, ie. bus:txglom affects the
3550  	 * bus transfers from device to host.
3551  	 */
3552  	if (core->rev < 12) {
3553  		/* for sdio core rev < 12, disable txgloming */
3554  		iovar = 0;
3555  		err = brcmf_iovar_data_set(dev, "bus:txglom", &iovar,
3556  					   sizeof(iovar));
3557  	} else {
3558  		/* otherwise, set txglomalign */
3559  		value = sdiodev->settings->bus.sdio.sd_sgentry_align;
3560  		/* SDIO ADMA requires at least 32 bit alignment */
3561  		iovar = cpu_to_le32(max_t(u32, value, ALIGNMENT));
3562  		err = brcmf_iovar_data_set(dev, "bus:txglomalign", &iovar,
3563  					   sizeof(iovar));
3564  	}
3565  
3566  	if (err < 0)
3567  		goto done;
3568  
3569  	bus->tx_hdrlen = SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN;
3570  	if (sdiodev->sg_support) {
3571  		bus->txglom = false;
3572  		iovar = cpu_to_le32(1);
3573  		err = brcmf_iovar_data_set(bus->sdiodev->dev, "bus:rxglom",
3574  					   &iovar, sizeof(iovar));
3575  		if (err < 0) {
3576  			/* bus:rxglom is allowed to fail */
3577  			err = 0;
3578  		} else {
3579  			bus->txglom = true;
3580  			bus->tx_hdrlen += SDPCM_HWEXT_LEN;
3581  		}
3582  	}
3583  	brcmf_bus_add_txhdrlen(bus->sdiodev->dev, bus->tx_hdrlen);
3584  
3585  done:
3586  	return err;
3587  }
3588  
brcmf_sdio_bus_get_ramsize(struct device * dev)3589  static size_t brcmf_sdio_bus_get_ramsize(struct device *dev)
3590  {
3591  	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
3592  	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
3593  	struct brcmf_sdio *bus = sdiodev->bus;
3594  
3595  	return bus->ci->ramsize - bus->ci->srsize;
3596  }
3597  
brcmf_sdio_bus_get_memdump(struct device * dev,void * data,size_t mem_size)3598  static int brcmf_sdio_bus_get_memdump(struct device *dev, void *data,
3599  				      size_t mem_size)
3600  {
3601  	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
3602  	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
3603  	struct brcmf_sdio *bus = sdiodev->bus;
3604  	int err;
3605  	int address;
3606  	int offset;
3607  	int len;
3608  
3609  	brcmf_dbg(INFO, "dump at 0x%08x: size=%zu\n", bus->ci->rambase,
3610  		  mem_size);
3611  
3612  	address = bus->ci->rambase;
3613  	offset = err = 0;
3614  	sdio_claim_host(sdiodev->func1);
3615  	while (offset < mem_size) {
3616  		len = ((offset + MEMBLOCK) < mem_size) ? MEMBLOCK :
3617  		      mem_size - offset;
3618  		err = brcmf_sdiod_ramrw(sdiodev, false, address, data, len);
3619  		if (err) {
3620  			brcmf_err("error %d on reading %d membytes at 0x%08x\n",
3621  				  err, len, address);
3622  			goto done;
3623  		}
3624  		data += len;
3625  		offset += len;
3626  		address += len;
3627  	}
3628  
3629  done:
3630  	sdio_release_host(sdiodev->func1);
3631  	return err;
3632  }
3633  
brcmf_sdio_trigger_dpc(struct brcmf_sdio * bus)3634  void brcmf_sdio_trigger_dpc(struct brcmf_sdio *bus)
3635  {
3636  	if (!bus->dpc_triggered) {
3637  		bus->dpc_triggered = true;
3638  		queue_work(bus->brcmf_wq, &bus->datawork);
3639  	}
3640  }
3641  
brcmf_sdio_isr(struct brcmf_sdio * bus,bool in_isr)3642  void brcmf_sdio_isr(struct brcmf_sdio *bus, bool in_isr)
3643  {
3644  	brcmf_dbg(TRACE, "Enter\n");
3645  
3646  	if (!bus) {
3647  		brcmf_err("bus is null pointer, exiting\n");
3648  		return;
3649  	}
3650  
3651  	/* Count the interrupt call */
3652  	bus->sdcnt.intrcount++;
3653  	if (in_isr)
3654  		atomic_set(&bus->ipend, 1);
3655  	else
3656  		if (brcmf_sdio_intr_rstatus(bus)) {
3657  			brcmf_err("failed backplane access\n");
3658  		}
3659  
3660  	/* Disable additional interrupts (is this needed now)? */
3661  	if (!bus->intr)
3662  		brcmf_err("isr w/o interrupt configured!\n");
3663  
3664  	bus->dpc_triggered = true;
3665  	queue_work(bus->brcmf_wq, &bus->datawork);
3666  }
3667  
brcmf_sdio_bus_watchdog(struct brcmf_sdio * bus)3668  static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
3669  {
3670  	brcmf_dbg(TIMER, "Enter\n");
3671  
3672  	/* Poll period: check device if appropriate. */
3673  	if (!bus->sr_enabled &&
3674  	    bus->poll && (++bus->polltick >= bus->pollrate)) {
3675  		u32 intstatus = 0;
3676  
3677  		/* Reset poll tick */
3678  		bus->polltick = 0;
3679  
3680  		/* Check device if no interrupts */
3681  		if (!bus->intr ||
3682  		    (bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) {
3683  
3684  			if (!bus->dpc_triggered) {
3685  				u8 devpend;
3686  
3687  				sdio_claim_host(bus->sdiodev->func1);
3688  				devpend = brcmf_sdiod_func0_rb(bus->sdiodev,
3689  						  SDIO_CCCR_INTx, NULL);
3690  				sdio_release_host(bus->sdiodev->func1);
3691  				intstatus = devpend & (INTR_STATUS_FUNC1 |
3692  						       INTR_STATUS_FUNC2);
3693  			}
3694  
3695  			/* If there is something, make like the ISR and
3696  				 schedule the DPC */
3697  			if (intstatus) {
3698  				bus->sdcnt.pollcnt++;
3699  				atomic_set(&bus->ipend, 1);
3700  
3701  				bus->dpc_triggered = true;
3702  				queue_work(bus->brcmf_wq, &bus->datawork);
3703  			}
3704  		}
3705  
3706  		/* Update interrupt tracking */
3707  		bus->sdcnt.lastintrs = bus->sdcnt.intrcount;
3708  	}
3709  #ifdef DEBUG
3710  	/* Poll for console output periodically */
3711  	if (bus->sdiodev->state == BRCMF_SDIOD_DATA && BRCMF_FWCON_ON() &&
3712  	    bus->console_interval != 0) {
3713  		bus->console.count += jiffies_to_msecs(BRCMF_WD_POLL);
3714  		if (bus->console.count >= bus->console_interval) {
3715  			bus->console.count -= bus->console_interval;
3716  			sdio_claim_host(bus->sdiodev->func1);
3717  			/* Make sure backplane clock is on */
3718  			brcmf_sdio_bus_sleep(bus, false, false);
3719  			if (brcmf_sdio_readconsole(bus) < 0)
3720  				/* stop on error */
3721  				bus->console_interval = 0;
3722  			sdio_release_host(bus->sdiodev->func1);
3723  		}
3724  	}
3725  #endif				/* DEBUG */
3726  
3727  	/* On idle timeout clear activity flag and/or turn off clock */
3728  	if (!bus->dpc_triggered) {
3729  		rmb();
3730  		if ((!bus->dpc_running) && (bus->idletime > 0) &&
3731  		    (bus->clkstate == CLK_AVAIL)) {
3732  			bus->idlecount++;
3733  			if (bus->idlecount > bus->idletime) {
3734  				brcmf_dbg(SDIO, "idle\n");
3735  				sdio_claim_host(bus->sdiodev->func1);
3736  #ifdef DEBUG
3737  				if (!BRCMF_FWCON_ON() ||
3738  				    bus->console_interval == 0)
3739  #endif
3740  					brcmf_sdio_wd_timer(bus, false);
3741  				bus->idlecount = 0;
3742  				brcmf_sdio_bus_sleep(bus, true, false);
3743  				sdio_release_host(bus->sdiodev->func1);
3744  			}
3745  		} else {
3746  			bus->idlecount = 0;
3747  		}
3748  	} else {
3749  		bus->idlecount = 0;
3750  	}
3751  }
3752  
brcmf_sdio_dataworker(struct work_struct * work)3753  static void brcmf_sdio_dataworker(struct work_struct *work)
3754  {
3755  	struct brcmf_sdio *bus = container_of(work, struct brcmf_sdio,
3756  					      datawork);
3757  
3758  	bus->dpc_running = true;
3759  	wmb();
3760  	while (READ_ONCE(bus->dpc_triggered)) {
3761  		bus->dpc_triggered = false;
3762  		brcmf_sdio_dpc(bus);
3763  		bus->idlecount = 0;
3764  	}
3765  	bus->dpc_running = false;
3766  	if (brcmf_sdiod_freezing(bus->sdiodev)) {
3767  		brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DOWN);
3768  		brcmf_sdiod_try_freeze(bus->sdiodev);
3769  		brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA);
3770  	}
3771  }
3772  
3773  static void
brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev * sdiodev,struct brcmf_chip * ci,u32 drivestrength)3774  brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
3775  			     struct brcmf_chip *ci, u32 drivestrength)
3776  {
3777  	const struct sdiod_drive_str *str_tab = NULL;
3778  	u32 str_mask;
3779  	u32 str_shift;
3780  	u32 i;
3781  	u32 drivestrength_sel = 0;
3782  	u32 cc_data_temp;
3783  	u32 addr;
3784  
3785  	if (!(ci->cc_caps & CC_CAP_PMU))
3786  		return;
3787  
3788  	switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) {
3789  	case SDIOD_DRVSTR_KEY(BRCM_CC_4330_CHIP_ID, 12):
3790  		str_tab = sdiod_drvstr_tab1_1v8;
3791  		str_mask = 0x00003800;
3792  		str_shift = 11;
3793  		break;
3794  	case SDIOD_DRVSTR_KEY(BRCM_CC_4334_CHIP_ID, 17):
3795  		str_tab = sdiod_drvstr_tab6_1v8;
3796  		str_mask = 0x00001800;
3797  		str_shift = 11;
3798  		break;
3799  	case SDIOD_DRVSTR_KEY(BRCM_CC_43143_CHIP_ID, 17):
3800  		/* note: 43143 does not support tristate */
3801  		i = ARRAY_SIZE(sdiod_drvstr_tab2_3v3) - 1;
3802  		if (drivestrength >= sdiod_drvstr_tab2_3v3[i].strength) {
3803  			str_tab = sdiod_drvstr_tab2_3v3;
3804  			str_mask = 0x00000007;
3805  			str_shift = 0;
3806  		} else
3807  			brcmf_err("Invalid SDIO Drive strength for chip %s, strength=%d\n",
3808  				  ci->name, drivestrength);
3809  		break;
3810  	case SDIOD_DRVSTR_KEY(BRCM_CC_43362_CHIP_ID, 13):
3811  		str_tab = sdiod_drive_strength_tab5_1v8;
3812  		str_mask = 0x00003800;
3813  		str_shift = 11;
3814  		break;
3815  	default:
3816  		brcmf_dbg(INFO, "No SDIO driver strength init needed for chip %s rev %d pmurev %d\n",
3817  			  ci->name, ci->chiprev, ci->pmurev);
3818  		break;
3819  	}
3820  
3821  	if (str_tab != NULL) {
3822  		struct brcmf_core *pmu = brcmf_chip_get_pmu(ci);
3823  
3824  		for (i = 0; str_tab[i].strength != 0; i++) {
3825  			if (drivestrength >= str_tab[i].strength) {
3826  				drivestrength_sel = str_tab[i].sel;
3827  				break;
3828  			}
3829  		}
3830  		addr = CORE_CC_REG(pmu->base, chipcontrol_addr);
3831  		brcmf_sdiod_writel(sdiodev, addr, 1, NULL);
3832  		cc_data_temp = brcmf_sdiod_readl(sdiodev, addr, NULL);
3833  		cc_data_temp &= ~str_mask;
3834  		drivestrength_sel <<= str_shift;
3835  		cc_data_temp |= drivestrength_sel;
3836  		brcmf_sdiod_writel(sdiodev, addr, cc_data_temp, NULL);
3837  
3838  		brcmf_dbg(INFO, "SDIO: %d mA (req=%d mA) drive strength selected, set to 0x%08x\n",
3839  			  str_tab[i].strength, drivestrength, cc_data_temp);
3840  	}
3841  }
3842  
brcmf_sdio_buscoreprep(void * ctx)3843  static int brcmf_sdio_buscoreprep(void *ctx)
3844  {
3845  	struct brcmf_sdio_dev *sdiodev = ctx;
3846  	int err = 0;
3847  	u8 clkval, clkset;
3848  
3849  	/* Try forcing SDIO core to do ALPAvail request only */
3850  	clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
3851  	brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
3852  	if (err) {
3853  		brcmf_err("error writing for HT off\n");
3854  		return err;
3855  	}
3856  
3857  	/* If register supported, wait for ALPAvail and then force ALP */
3858  	/* This may take up to 15 milliseconds */
3859  	clkval = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, NULL);
3860  
3861  	if ((clkval & ~SBSDIO_AVBITS) != clkset) {
3862  		brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
3863  			  clkset, clkval);
3864  		return -EACCES;
3865  	}
3866  
3867  	SPINWAIT(((clkval = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3868  					      NULL)),
3869  		 !SBSDIO_ALPAV(clkval)),
3870  		 PMU_MAX_TRANSITION_DLY);
3871  
3872  	if (!SBSDIO_ALPAV(clkval)) {
3873  		brcmf_err("timeout on ALPAV wait, clkval 0x%02x\n",
3874  			  clkval);
3875  		return -EBUSY;
3876  	}
3877  
3878  	clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
3879  	brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
3880  	udelay(65);
3881  
3882  	/* Also, disable the extra SDIO pull-ups */
3883  	brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
3884  
3885  	return 0;
3886  }
3887  
brcmf_sdio_buscore_activate(void * ctx,struct brcmf_chip * chip,u32 rstvec)3888  static void brcmf_sdio_buscore_activate(void *ctx, struct brcmf_chip *chip,
3889  					u32 rstvec)
3890  {
3891  	struct brcmf_sdio_dev *sdiodev = ctx;
3892  	struct brcmf_core *core = sdiodev->bus->sdio_core;
3893  	u32 reg_addr;
3894  
3895  	/* clear all interrupts */
3896  	reg_addr = core->base + SD_REG(intstatus);
3897  	brcmf_sdiod_writel(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
3898  
3899  	if (rstvec)
3900  		/* Write reset vector to address 0 */
3901  		brcmf_sdiod_ramrw(sdiodev, true, 0, (void *)&rstvec,
3902  				  sizeof(rstvec));
3903  }
3904  
brcmf_sdio_buscore_read32(void * ctx,u32 addr)3905  static u32 brcmf_sdio_buscore_read32(void *ctx, u32 addr)
3906  {
3907  	struct brcmf_sdio_dev *sdiodev = ctx;
3908  	u32 val, rev;
3909  
3910  	val = brcmf_sdiod_readl(sdiodev, addr, NULL);
3911  
3912  	/*
3913  	 * this is a bit of special handling if reading the chipcommon chipid
3914  	 * register. The 4339 is a next-gen of the 4335. It uses the same
3915  	 * SDIO device id as 4335 and the chipid register returns 4335 as well.
3916  	 * It can be identified as 4339 by looking at the chip revision. It
3917  	 * is corrected here so the chip.c module has the right info.
3918  	 */
3919  	if (addr == CORE_CC_REG(SI_ENUM_BASE_DEFAULT, chipid) &&
3920  	    (sdiodev->func1->device == SDIO_DEVICE_ID_BROADCOM_4339 ||
3921  	     sdiodev->func1->device == SDIO_DEVICE_ID_BROADCOM_4335_4339)) {
3922  		rev = (val & CID_REV_MASK) >> CID_REV_SHIFT;
3923  		if (rev >= 2) {
3924  			val &= ~CID_ID_MASK;
3925  			val |= BRCM_CC_4339_CHIP_ID;
3926  		}
3927  	}
3928  
3929  	return val;
3930  }
3931  
brcmf_sdio_buscore_write32(void * ctx,u32 addr,u32 val)3932  static void brcmf_sdio_buscore_write32(void *ctx, u32 addr, u32 val)
3933  {
3934  	struct brcmf_sdio_dev *sdiodev = ctx;
3935  
3936  	brcmf_sdiod_writel(sdiodev, addr, val, NULL);
3937  }
3938  
3939  static const struct brcmf_buscore_ops brcmf_sdio_buscore_ops = {
3940  	.prepare = brcmf_sdio_buscoreprep,
3941  	.activate = brcmf_sdio_buscore_activate,
3942  	.read32 = brcmf_sdio_buscore_read32,
3943  	.write32 = brcmf_sdio_buscore_write32,
3944  };
3945  
3946  static bool
brcmf_sdio_probe_attach(struct brcmf_sdio * bus)3947  brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
3948  {
3949  	struct brcmf_sdio_dev *sdiodev;
3950  	u8 clkctl = 0;
3951  	int err = 0;
3952  	int reg_addr;
3953  	u32 reg_val;
3954  	u32 drivestrength;
3955  	u32 enum_base;
3956  
3957  	sdiodev = bus->sdiodev;
3958  	sdio_claim_host(sdiodev->func1);
3959  
3960  	enum_base = brcmf_chip_enum_base(sdiodev->func1->device);
3961  
3962  	pr_debug("F1 signature read @0x%08x=0x%4x\n", enum_base,
3963  		 brcmf_sdiod_readl(sdiodev, enum_base, NULL));
3964  
3965  	/*
3966  	 * Force PLL off until brcmf_chip_attach()
3967  	 * programs PLL control regs
3968  	 */
3969  
3970  	brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, BRCMF_INIT_CLKCTL1,
3971  			   &err);
3972  	if (!err)
3973  		clkctl = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3974  					   &err);
3975  
3976  	if (err || ((clkctl & ~SBSDIO_AVBITS) != BRCMF_INIT_CLKCTL1)) {
3977  		brcmf_err("ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
3978  			  err, BRCMF_INIT_CLKCTL1, clkctl);
3979  		goto fail;
3980  	}
3981  
3982  	bus->ci = brcmf_chip_attach(sdiodev, sdiodev->func1->device,
3983  				    &brcmf_sdio_buscore_ops);
3984  	if (IS_ERR(bus->ci)) {
3985  		brcmf_err("brcmf_chip_attach failed!\n");
3986  		bus->ci = NULL;
3987  		goto fail;
3988  	}
3989  
3990  	/* Pick up the SDIO core info struct from chip.c */
3991  	bus->sdio_core   = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
3992  	if (!bus->sdio_core)
3993  		goto fail;
3994  
3995  	/* Pick up the CHIPCOMMON core info struct, for bulk IO in bcmsdh.c */
3996  	sdiodev->cc_core = brcmf_chip_get_core(bus->ci, BCMA_CORE_CHIPCOMMON);
3997  	if (!sdiodev->cc_core)
3998  		goto fail;
3999  
4000  	sdiodev->settings = brcmf_get_module_param(sdiodev->dev,
4001  						   BRCMF_BUSTYPE_SDIO,
4002  						   bus->ci->chip,
4003  						   bus->ci->chiprev);
4004  	if (!sdiodev->settings) {
4005  		brcmf_err("Failed to get device parameters\n");
4006  		goto fail;
4007  	}
4008  	/* platform specific configuration:
4009  	 *   alignments must be at least 4 bytes for ADMA
4010  	 */
4011  	bus->head_align = ALIGNMENT;
4012  	bus->sgentry_align = ALIGNMENT;
4013  	if (sdiodev->settings->bus.sdio.sd_head_align > ALIGNMENT)
4014  		bus->head_align = sdiodev->settings->bus.sdio.sd_head_align;
4015  	if (sdiodev->settings->bus.sdio.sd_sgentry_align > ALIGNMENT)
4016  		bus->sgentry_align =
4017  				sdiodev->settings->bus.sdio.sd_sgentry_align;
4018  
4019  	/* allocate scatter-gather table. sg support
4020  	 * will be disabled upon allocation failure.
4021  	 */
4022  	brcmf_sdiod_sgtable_alloc(sdiodev);
4023  
4024  	/* wowl can be supported when KEEP_POWER is true and (WAKE_SDIO_IRQ
4025  	 * is true or when platform data OOB irq is true).
4026  	 */
4027  	if (IS_ENABLED(CONFIG_PM_SLEEP) &&
4028  	    (sdio_get_host_pm_caps(sdiodev->func1) & MMC_PM_KEEP_POWER) &&
4029  	    ((sdio_get_host_pm_caps(sdiodev->func1) & MMC_PM_WAKE_SDIO_IRQ) ||
4030  	     (sdiodev->settings->bus.sdio.oob_irq_supported)))
4031  		sdiodev->bus_if->wowl_supported = true;
4032  
4033  	if (brcmf_sdio_kso_init(bus)) {
4034  		brcmf_err("error enabling KSO\n");
4035  		goto fail;
4036  	}
4037  
4038  	if (sdiodev->settings->bus.sdio.drive_strength)
4039  		drivestrength = sdiodev->settings->bus.sdio.drive_strength;
4040  	else
4041  		drivestrength = DEFAULT_SDIO_DRIVE_STRENGTH;
4042  	brcmf_sdio_drivestrengthinit(sdiodev, bus->ci, drivestrength);
4043  
4044  	/* Set card control so an SDIO card reset does a WLAN backplane reset */
4045  	reg_val = brcmf_sdiod_func0_rb(sdiodev, SDIO_CCCR_BRCM_CARDCTRL, &err);
4046  	if (err)
4047  		goto fail;
4048  
4049  	reg_val |= SDIO_CCCR_BRCM_CARDCTRL_WLANRESET;
4050  
4051  	brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_BRCM_CARDCTRL, reg_val, &err);
4052  	if (err)
4053  		goto fail;
4054  
4055  	/* set PMUControl so a backplane reset does PMU state reload */
4056  	reg_addr = CORE_CC_REG(brcmf_chip_get_pmu(bus->ci)->base, pmucontrol);
4057  	reg_val = brcmf_sdiod_readl(sdiodev, reg_addr, &err);
4058  	if (err)
4059  		goto fail;
4060  
4061  	reg_val |= (BCMA_CC_PMU_CTL_RES_RELOAD << BCMA_CC_PMU_CTL_RES_SHIFT);
4062  
4063  	brcmf_sdiod_writel(sdiodev, reg_addr, reg_val, &err);
4064  	if (err)
4065  		goto fail;
4066  
4067  	sdio_release_host(sdiodev->func1);
4068  
4069  	brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
4070  
4071  	/* allocate header buffer */
4072  	bus->hdrbuf = kzalloc(MAX_HDR_READ + bus->head_align, GFP_KERNEL);
4073  	if (!bus->hdrbuf)
4074  		return false;
4075  	/* Locate an appropriately-aligned portion of hdrbuf */
4076  	bus->rxhdr = (u8 *) roundup((unsigned long)&bus->hdrbuf[0],
4077  				    bus->head_align);
4078  
4079  	/* Set the poll and/or interrupt flags */
4080  	bus->intr = true;
4081  	bus->poll = false;
4082  	if (bus->poll)
4083  		bus->pollrate = 1;
4084  
4085  	return true;
4086  
4087  fail:
4088  	sdio_release_host(sdiodev->func1);
4089  	return false;
4090  }
4091  
4092  static int
brcmf_sdio_watchdog_thread(void * data)4093  brcmf_sdio_watchdog_thread(void *data)
4094  {
4095  	struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
4096  	int wait;
4097  
4098  	allow_signal(SIGTERM);
4099  	/* Run until signal received */
4100  	brcmf_sdiod_freezer_count(bus->sdiodev);
4101  	while (1) {
4102  		if (kthread_should_stop())
4103  			break;
4104  		brcmf_sdiod_freezer_uncount(bus->sdiodev);
4105  		wait = wait_for_completion_interruptible(&bus->watchdog_wait);
4106  		brcmf_sdiod_freezer_count(bus->sdiodev);
4107  		brcmf_sdiod_try_freeze(bus->sdiodev);
4108  		if (!wait) {
4109  			brcmf_sdio_bus_watchdog(bus);
4110  			/* Count the tick for reference */
4111  			bus->sdcnt.tickcnt++;
4112  			reinit_completion(&bus->watchdog_wait);
4113  		} else
4114  			break;
4115  	}
4116  	return 0;
4117  }
4118  
4119  static void
brcmf_sdio_watchdog(struct timer_list * t)4120  brcmf_sdio_watchdog(struct timer_list *t)
4121  {
4122  	struct brcmf_sdio *bus = from_timer(bus, t, timer);
4123  
4124  	if (bus->watchdog_tsk) {
4125  		complete(&bus->watchdog_wait);
4126  		/* Reschedule the watchdog */
4127  		if (bus->wd_active)
4128  			mod_timer(&bus->timer,
4129  				  jiffies + BRCMF_WD_POLL);
4130  	}
4131  }
4132  
brcmf_sdio_get_blob(struct device * dev,const struct firmware ** fw,enum brcmf_blob_type type)4133  static int brcmf_sdio_get_blob(struct device *dev, const struct firmware **fw,
4134  			       enum brcmf_blob_type type)
4135  {
4136  	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
4137  	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
4138  
4139  	switch (type) {
4140  	case BRCMF_BLOB_CLM:
4141  		*fw = sdiodev->clm_fw;
4142  		sdiodev->clm_fw = NULL;
4143  		break;
4144  	default:
4145  		return -ENOENT;
4146  	}
4147  
4148  	if (!*fw)
4149  		return -ENOENT;
4150  
4151  	return 0;
4152  }
4153  
brcmf_sdio_bus_reset(struct device * dev)4154  static int brcmf_sdio_bus_reset(struct device *dev)
4155  {
4156  	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
4157  	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
4158  
4159  	brcmf_dbg(SDIO, "Enter\n");
4160  
4161  	/* start by unregistering irqs */
4162  	brcmf_sdiod_intr_unregister(sdiodev);
4163  
4164  	brcmf_sdiod_remove(sdiodev);
4165  
4166  	/* reset the adapter */
4167  	sdio_claim_host(sdiodev->func1);
4168  	mmc_hw_reset(sdiodev->func1->card);
4169  	sdio_release_host(sdiodev->func1);
4170  
4171  	brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN);
4172  	return 0;
4173  }
4174  
brcmf_sdio_bus_remove(struct device * dev)4175  static void brcmf_sdio_bus_remove(struct device *dev)
4176  {
4177  	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
4178  	struct brcmf_sdio_dev *sdiod = bus_if->bus_priv.sdio;
4179  
4180  	device_release_driver(&sdiod->func2->dev);
4181  	device_release_driver(&sdiod->func1->dev);
4182  }
4183  
4184  static const struct brcmf_bus_ops brcmf_sdio_bus_ops = {
4185  	.stop = brcmf_sdio_bus_stop,
4186  	.preinit = brcmf_sdio_bus_preinit,
4187  	.txdata = brcmf_sdio_bus_txdata,
4188  	.txctl = brcmf_sdio_bus_txctl,
4189  	.rxctl = brcmf_sdio_bus_rxctl,
4190  	.gettxq = brcmf_sdio_bus_gettxq,
4191  	.wowl_config = brcmf_sdio_wowl_config,
4192  	.get_ramsize = brcmf_sdio_bus_get_ramsize,
4193  	.get_memdump = brcmf_sdio_bus_get_memdump,
4194  	.get_blob = brcmf_sdio_get_blob,
4195  	.debugfs_create = brcmf_sdio_debugfs_create,
4196  	.reset = brcmf_sdio_bus_reset,
4197  	.remove = brcmf_sdio_bus_remove,
4198  };
4199  
4200  #define BRCMF_SDIO_FW_CODE	0
4201  #define BRCMF_SDIO_FW_NVRAM	1
4202  #define BRCMF_SDIO_FW_CLM	2
4203  
brcmf_sdio_firmware_callback(struct device * dev,int err,struct brcmf_fw_request * fwreq)4204  static void brcmf_sdio_firmware_callback(struct device *dev, int err,
4205  					 struct brcmf_fw_request *fwreq)
4206  {
4207  	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
4208  	struct brcmf_sdio_dev *sdiod = bus_if->bus_priv.sdio;
4209  	struct brcmf_sdio *bus = sdiod->bus;
4210  	struct brcmf_core *core = bus->sdio_core;
4211  	const struct firmware *code;
4212  	void *nvram;
4213  	u32 nvram_len;
4214  	u8 saveclk, bpreq;
4215  	u8 devctl;
4216  
4217  	brcmf_dbg(TRACE, "Enter: dev=%s, err=%d\n", dev_name(dev), err);
4218  
4219  	if (err)
4220  		goto fail;
4221  
4222  	code = fwreq->items[BRCMF_SDIO_FW_CODE].binary;
4223  	nvram = fwreq->items[BRCMF_SDIO_FW_NVRAM].nv_data.data;
4224  	nvram_len = fwreq->items[BRCMF_SDIO_FW_NVRAM].nv_data.len;
4225  	sdiod->clm_fw = fwreq->items[BRCMF_SDIO_FW_CLM].binary;
4226  	kfree(fwreq);
4227  
4228  	/* try to download image and nvram to the dongle */
4229  	bus->alp_only = true;
4230  	err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len);
4231  	if (err)
4232  		goto fail;
4233  	bus->alp_only = false;
4234  
4235  	/* Start the watchdog timer */
4236  	bus->sdcnt.tickcnt = 0;
4237  	brcmf_sdio_wd_timer(bus, true);
4238  
4239  	sdio_claim_host(sdiod->func1);
4240  
4241  	/* Make sure backplane clock is on, needed to generate F2 interrupt */
4242  	brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
4243  	if (bus->clkstate != CLK_AVAIL)
4244  		goto release;
4245  
4246  	/* Force clocks on backplane to be sure F2 interrupt propagates */
4247  	saveclk = brcmf_sdiod_readb(sdiod, SBSDIO_FUNC1_CHIPCLKCSR, &err);
4248  	if (!err) {
4249  		bpreq = saveclk;
4250  		bpreq |= brcmf_chip_is_ulp(bus->ci) ?
4251  			SBSDIO_HT_AVAIL_REQ : SBSDIO_FORCE_HT;
4252  		brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_CHIPCLKCSR,
4253  				   bpreq, &err);
4254  	}
4255  	if (err) {
4256  		brcmf_err("Failed to force clock for F2: err %d\n", err);
4257  		goto release;
4258  	}
4259  
4260  	/* Enable function 2 (frame transfers) */
4261  	brcmf_sdiod_writel(sdiod, core->base + SD_REG(tosbmailboxdata),
4262  			   SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT, NULL);
4263  
4264  	err = sdio_enable_func(sdiod->func2);
4265  
4266  	brcmf_dbg(INFO, "enable F2: err=%d\n", err);
4267  
4268  	/* If F2 successfully enabled, set core and enable interrupts */
4269  	if (!err) {
4270  		/* Set up the interrupt mask and enable interrupts */
4271  		bus->hostintmask = HOSTINTMASK;
4272  		brcmf_sdiod_writel(sdiod, core->base + SD_REG(hostintmask),
4273  				   bus->hostintmask, NULL);
4274  
4275  		switch (sdiod->func1->device) {
4276  		case SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373:
4277  		case SDIO_DEVICE_ID_BROADCOM_CYPRESS_43752:
4278  			brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n",
4279  				  CY_4373_F2_WATERMARK);
4280  			brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK,
4281  					   CY_4373_F2_WATERMARK, &err);
4282  			devctl = brcmf_sdiod_readb(sdiod, SBSDIO_DEVICE_CTL,
4283  						   &err);
4284  			devctl |= SBSDIO_DEVCTL_F2WM_ENAB;
4285  			brcmf_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl,
4286  					   &err);
4287  			brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL,
4288  					   CY_4373_F1_MESBUSYCTRL, &err);
4289  			break;
4290  		case SDIO_DEVICE_ID_BROADCOM_CYPRESS_43012:
4291  			brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n",
4292  				  CY_43012_F2_WATERMARK);
4293  			brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK,
4294  					   CY_43012_F2_WATERMARK, &err);
4295  			devctl = brcmf_sdiod_readb(sdiod, SBSDIO_DEVICE_CTL,
4296  						   &err);
4297  			devctl |= SBSDIO_DEVCTL_F2WM_ENAB;
4298  			brcmf_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl,
4299  					   &err);
4300  			brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL,
4301  					   CY_43012_MESBUSYCTRL, &err);
4302  			break;
4303  		case SDIO_DEVICE_ID_BROADCOM_4329:
4304  		case SDIO_DEVICE_ID_BROADCOM_4339:
4305  			brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n",
4306  				  CY_4339_F2_WATERMARK);
4307  			brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK,
4308  					   CY_4339_F2_WATERMARK, &err);
4309  			devctl = brcmf_sdiod_readb(sdiod, SBSDIO_DEVICE_CTL,
4310  						   &err);
4311  			devctl |= SBSDIO_DEVCTL_F2WM_ENAB;
4312  			brcmf_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl,
4313  					   &err);
4314  			brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL,
4315  					   CY_4339_MESBUSYCTRL, &err);
4316  			break;
4317  		case SDIO_DEVICE_ID_BROADCOM_43455:
4318  			brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n",
4319  				  CY_43455_F2_WATERMARK);
4320  			brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK,
4321  					   CY_43455_F2_WATERMARK, &err);
4322  			devctl = brcmf_sdiod_readb(sdiod, SBSDIO_DEVICE_CTL,
4323  						   &err);
4324  			devctl |= SBSDIO_DEVCTL_F2WM_ENAB;
4325  			brcmf_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl,
4326  					   &err);
4327  			brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL,
4328  					   CY_43455_MESBUSYCTRL, &err);
4329  			break;
4330  		case SDIO_DEVICE_ID_BROADCOM_4359:
4331  		case SDIO_DEVICE_ID_BROADCOM_4354:
4332  		case SDIO_DEVICE_ID_BROADCOM_4356:
4333  			brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n",
4334  				  CY_435X_F2_WATERMARK);
4335  			brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK,
4336  					   CY_435X_F2_WATERMARK, &err);
4337  			devctl = brcmf_sdiod_readb(sdiod, SBSDIO_DEVICE_CTL,
4338  						   &err);
4339  			devctl |= SBSDIO_DEVCTL_F2WM_ENAB;
4340  			brcmf_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl,
4341  					   &err);
4342  			brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL,
4343  					   CY_435X_F1_MESBUSYCTRL, &err);
4344  			break;
4345  		default:
4346  			brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK,
4347  					   DEFAULT_F2_WATERMARK, &err);
4348  			break;
4349  		}
4350  	} else {
4351  		/* Disable F2 again */
4352  		sdio_disable_func(sdiod->func2);
4353  		goto checkdied;
4354  	}
4355  
4356  	if (brcmf_chip_sr_capable(bus->ci)) {
4357  		brcmf_sdio_sr_init(bus);
4358  	} else {
4359  		/* Restore previous clock setting */
4360  		brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_CHIPCLKCSR,
4361  				   saveclk, &err);
4362  	}
4363  
4364  	if (err == 0) {
4365  		/* Assign bus interface call back */
4366  		sdiod->bus_if->dev = sdiod->dev;
4367  		sdiod->bus_if->ops = &brcmf_sdio_bus_ops;
4368  		sdiod->bus_if->chip = bus->ci->chip;
4369  		sdiod->bus_if->chiprev = bus->ci->chiprev;
4370  
4371  		/* Allow full data communication using DPC from now on. */
4372  		brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA);
4373  
4374  		err = brcmf_sdiod_intr_register(sdiod);
4375  		if (err != 0)
4376  			brcmf_err("intr register failed:%d\n", err);
4377  	}
4378  
4379  	/* If we didn't come up, turn off backplane clock */
4380  	if (err != 0) {
4381  		brcmf_sdio_clkctl(bus, CLK_NONE, false);
4382  		goto checkdied;
4383  	}
4384  
4385  	sdio_release_host(sdiod->func1);
4386  
4387  	err = brcmf_alloc(sdiod->dev, sdiod->settings);
4388  	if (err) {
4389  		brcmf_err("brcmf_alloc failed\n");
4390  		goto claim;
4391  	}
4392  
4393  	/* Attach to the common layer, reserve hdr space */
4394  	err = brcmf_attach(sdiod->dev);
4395  	if (err != 0) {
4396  		brcmf_err("brcmf_attach failed\n");
4397  		goto free;
4398  	}
4399  
4400  	/* ready */
4401  	return;
4402  
4403  free:
4404  	brcmf_free(sdiod->dev);
4405  claim:
4406  	sdio_claim_host(sdiod->func1);
4407  checkdied:
4408  	brcmf_sdio_checkdied(bus);
4409  release:
4410  	sdio_release_host(sdiod->func1);
4411  fail:
4412  	brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err);
4413  	device_release_driver(&sdiod->func2->dev);
4414  	device_release_driver(dev);
4415  }
4416  
4417  static struct brcmf_fw_request *
brcmf_sdio_prepare_fw_request(struct brcmf_sdio * bus)4418  brcmf_sdio_prepare_fw_request(struct brcmf_sdio *bus)
4419  {
4420  	struct brcmf_fw_request *fwreq;
4421  	struct brcmf_fw_name fwnames[] = {
4422  		{ ".bin", bus->sdiodev->fw_name },
4423  		{ ".txt", bus->sdiodev->nvram_name },
4424  		{ ".clm_blob", bus->sdiodev->clm_name },
4425  	};
4426  
4427  	fwreq = brcmf_fw_alloc_request(bus->ci->chip, bus->ci->chiprev,
4428  				       brcmf_sdio_fwnames,
4429  				       ARRAY_SIZE(brcmf_sdio_fwnames),
4430  				       fwnames, ARRAY_SIZE(fwnames));
4431  	if (!fwreq)
4432  		return NULL;
4433  
4434  	fwreq->items[BRCMF_SDIO_FW_CODE].type = BRCMF_FW_TYPE_BINARY;
4435  	fwreq->items[BRCMF_SDIO_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM;
4436  	fwreq->items[BRCMF_SDIO_FW_CLM].type = BRCMF_FW_TYPE_BINARY;
4437  	fwreq->items[BRCMF_SDIO_FW_CLM].flags = BRCMF_FW_REQF_OPTIONAL;
4438  	fwreq->board_types[0] = bus->sdiodev->settings->board_type;
4439  
4440  	return fwreq;
4441  }
4442  
brcmf_sdio_probe(struct brcmf_sdio_dev * sdiodev)4443  struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
4444  {
4445  	int ret;
4446  	struct brcmf_sdio *bus;
4447  	struct workqueue_struct *wq;
4448  	struct brcmf_fw_request *fwreq;
4449  
4450  	brcmf_dbg(TRACE, "Enter\n");
4451  
4452  	/* Allocate private bus interface state */
4453  	bus = kzalloc(sizeof(struct brcmf_sdio), GFP_ATOMIC);
4454  	if (!bus)
4455  		goto fail;
4456  
4457  	bus->sdiodev = sdiodev;
4458  	sdiodev->bus = bus;
4459  	skb_queue_head_init(&bus->glom);
4460  	bus->txbound = BRCMF_TXBOUND;
4461  	bus->rxbound = BRCMF_RXBOUND;
4462  	bus->txminmax = BRCMF_TXMINMAX;
4463  	bus->tx_seq = SDPCM_SEQ_WRAP - 1;
4464  
4465  	/* single-threaded workqueue */
4466  	wq = alloc_ordered_workqueue("brcmf_wq/%s", WQ_MEM_RECLAIM | WQ_HIGHPRI,
4467  				     dev_name(&sdiodev->func1->dev));
4468  	if (!wq) {
4469  		brcmf_err("insufficient memory to create txworkqueue\n");
4470  		goto fail;
4471  	}
4472  	brcmf_sdiod_freezer_count(sdiodev);
4473  	INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
4474  	bus->brcmf_wq = wq;
4475  
4476  	/* attempt to attach to the dongle */
4477  	if (!(brcmf_sdio_probe_attach(bus))) {
4478  		brcmf_err("brcmf_sdio_probe_attach failed\n");
4479  		goto fail;
4480  	}
4481  
4482  	spin_lock_init(&bus->rxctl_lock);
4483  	spin_lock_init(&bus->txq_lock);
4484  	init_waitqueue_head(&bus->ctrl_wait);
4485  	init_waitqueue_head(&bus->dcmd_resp_wait);
4486  
4487  	/* Set up the watchdog timer */
4488  	timer_setup(&bus->timer, brcmf_sdio_watchdog, 0);
4489  	/* Initialize watchdog thread */
4490  	init_completion(&bus->watchdog_wait);
4491  	bus->watchdog_tsk = kthread_run(brcmf_sdio_watchdog_thread,
4492  					bus, "brcmf_wdog/%s",
4493  					dev_name(&sdiodev->func1->dev));
4494  	if (IS_ERR(bus->watchdog_tsk)) {
4495  		pr_warn("brcmf_watchdog thread failed to start\n");
4496  		bus->watchdog_tsk = NULL;
4497  	}
4498  	/* Initialize DPC thread */
4499  	bus->dpc_triggered = false;
4500  	bus->dpc_running = false;
4501  
4502  	/* default sdio bus header length for tx packet */
4503  	bus->tx_hdrlen = SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN;
4504  
4505  	/* Query the F2 block size, set roundup accordingly */
4506  	bus->blocksize = bus->sdiodev->func2->cur_blksize;
4507  	bus->roundup = min(max_roundup, bus->blocksize);
4508  
4509  	sdio_claim_host(bus->sdiodev->func1);
4510  
4511  	/* Disable F2 to clear any intermediate frame state on the dongle */
4512  	sdio_disable_func(bus->sdiodev->func2);
4513  
4514  	bus->rxflow = false;
4515  
4516  	/* Done with backplane-dependent accesses, can drop clock... */
4517  	brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
4518  
4519  	sdio_release_host(bus->sdiodev->func1);
4520  
4521  	/* ...and initialize clock/power states */
4522  	bus->clkstate = CLK_SDONLY;
4523  	bus->idletime = BRCMF_IDLE_INTERVAL;
4524  	bus->idleclock = BRCMF_IDLE_ACTIVE;
4525  
4526  	/* SR state */
4527  	bus->sr_enabled = false;
4528  
4529  	brcmf_dbg(INFO, "completed!!\n");
4530  
4531  	fwreq = brcmf_sdio_prepare_fw_request(bus);
4532  	if (!fwreq) {
4533  		ret = -ENOMEM;
4534  		goto fail;
4535  	}
4536  
4537  	ret = brcmf_fw_get_firmwares(sdiodev->dev, fwreq,
4538  				     brcmf_sdio_firmware_callback);
4539  	if (ret != 0) {
4540  		brcmf_err("async firmware request failed: %d\n", ret);
4541  		kfree(fwreq);
4542  		goto fail;
4543  	}
4544  
4545  	return bus;
4546  
4547  fail:
4548  	brcmf_sdio_remove(bus);
4549  	return NULL;
4550  }
4551  
4552  /* Detach and free everything */
brcmf_sdio_remove(struct brcmf_sdio * bus)4553  void brcmf_sdio_remove(struct brcmf_sdio *bus)
4554  {
4555  	brcmf_dbg(TRACE, "Enter\n");
4556  
4557  	if (bus) {
4558  		/* Stop watchdog task */
4559  		if (bus->watchdog_tsk) {
4560  			send_sig(SIGTERM, bus->watchdog_tsk, 1);
4561  			kthread_stop(bus->watchdog_tsk);
4562  			bus->watchdog_tsk = NULL;
4563  		}
4564  
4565  		/* De-register interrupt handler */
4566  		brcmf_sdiod_intr_unregister(bus->sdiodev);
4567  
4568  		brcmf_detach(bus->sdiodev->dev);
4569  		brcmf_free(bus->sdiodev->dev);
4570  
4571  		cancel_work_sync(&bus->datawork);
4572  		if (bus->brcmf_wq)
4573  			destroy_workqueue(bus->brcmf_wq);
4574  
4575  		if (bus->ci) {
4576  			if (bus->sdiodev->state != BRCMF_SDIOD_NOMEDIUM) {
4577  				sdio_claim_host(bus->sdiodev->func1);
4578  				brcmf_sdio_wd_timer(bus, false);
4579  				brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
4580  				/* Leave the device in state where it is
4581  				 * 'passive'. This is done by resetting all
4582  				 * necessary cores.
4583  				 */
4584  				msleep(20);
4585  				brcmf_chip_set_passive(bus->ci);
4586  				brcmf_sdio_clkctl(bus, CLK_NONE, false);
4587  				sdio_release_host(bus->sdiodev->func1);
4588  			}
4589  			brcmf_chip_detach(bus->ci);
4590  		}
4591  		if (bus->sdiodev->settings)
4592  			brcmf_release_module_param(bus->sdiodev->settings);
4593  
4594  		release_firmware(bus->sdiodev->clm_fw);
4595  		bus->sdiodev->clm_fw = NULL;
4596  		kfree(bus->rxbuf);
4597  		kfree(bus->hdrbuf);
4598  		kfree(bus);
4599  	}
4600  
4601  	brcmf_dbg(TRACE, "Disconnected\n");
4602  }
4603  
brcmf_sdio_wd_timer(struct brcmf_sdio * bus,bool active)4604  void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, bool active)
4605  {
4606  	/* Totally stop the timer */
4607  	if (!active && bus->wd_active) {
4608  		del_timer_sync(&bus->timer);
4609  		bus->wd_active = false;
4610  		return;
4611  	}
4612  
4613  	/* don't start the wd until fw is loaded */
4614  	if (bus->sdiodev->state != BRCMF_SDIOD_DATA)
4615  		return;
4616  
4617  	if (active) {
4618  		if (!bus->wd_active) {
4619  			/* Create timer again when watchdog period is
4620  			   dynamically changed or in the first instance
4621  			 */
4622  			bus->timer.expires = jiffies + BRCMF_WD_POLL;
4623  			add_timer(&bus->timer);
4624  			bus->wd_active = true;
4625  		} else {
4626  			/* Re arm the timer, at last watchdog period */
4627  			mod_timer(&bus->timer, jiffies + BRCMF_WD_POLL);
4628  		}
4629  	}
4630  }
4631  
brcmf_sdio_sleep(struct brcmf_sdio * bus,bool sleep)4632  int brcmf_sdio_sleep(struct brcmf_sdio *bus, bool sleep)
4633  {
4634  	int ret;
4635  
4636  	sdio_claim_host(bus->sdiodev->func1);
4637  	ret = brcmf_sdio_bus_sleep(bus, sleep, false);
4638  	sdio_release_host(bus->sdiodev->func1);
4639  
4640  	return ret;
4641  }
4642