xref: /openbmc/linux/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c (revision 4f727ecefefbd180de10e25b3e74c03dce3f1e75)
1 /* Copyright (c) 2014 Broadcom Corporation
2  *
3  * Permission to use, copy, modify, and/or distribute this software for any
4  * purpose with or without fee is hereby granted, provided that the above
5  * copyright notice and this permission notice appear in all copies.
6  *
7  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
10  * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
12  * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
13  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14  */
15 
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/firmware.h>
19 #include <linux/pci.h>
20 #include <linux/vmalloc.h>
21 #include <linux/delay.h>
22 #include <linux/interrupt.h>
23 #include <linux/bcma/bcma.h>
24 #include <linux/sched.h>
25 #include <asm/unaligned.h>
26 
27 #include <soc.h>
28 #include <chipcommon.h>
29 #include <brcmu_utils.h>
30 #include <brcmu_wifi.h>
31 #include <brcm_hw_ids.h>
32 
33 /* Custom brcmf_err() that takes bus arg and passes it further */
34 #define brcmf_err(bus, fmt, ...)					\
35 	do {								\
36 		if (IS_ENABLED(CONFIG_BRCMDBG) ||			\
37 		    IS_ENABLED(CONFIG_BRCM_TRACING) ||			\
38 		    net_ratelimit())					\
39 			__brcmf_err(bus, __func__, fmt, ##__VA_ARGS__);	\
40 	} while (0)
41 
42 #include "debug.h"
43 #include "bus.h"
44 #include "commonring.h"
45 #include "msgbuf.h"
46 #include "pcie.h"
47 #include "firmware.h"
48 #include "chip.h"
49 #include "core.h"
50 #include "common.h"
51 
52 
53 enum brcmf_pcie_state {
54 	BRCMFMAC_PCIE_STATE_DOWN,
55 	BRCMFMAC_PCIE_STATE_UP
56 };
57 
58 BRCMF_FW_DEF(43602, "brcmfmac43602-pcie");
59 BRCMF_FW_DEF(4350, "brcmfmac4350-pcie");
60 BRCMF_FW_DEF(4350C, "brcmfmac4350c2-pcie");
61 BRCMF_FW_DEF(4356, "brcmfmac4356-pcie");
62 BRCMF_FW_DEF(43570, "brcmfmac43570-pcie");
63 BRCMF_FW_DEF(4358, "brcmfmac4358-pcie");
64 BRCMF_FW_DEF(4359, "brcmfmac4359-pcie");
65 BRCMF_FW_DEF(4365B, "brcmfmac4365b-pcie");
66 BRCMF_FW_DEF(4365C, "brcmfmac4365c-pcie");
67 BRCMF_FW_DEF(4366B, "brcmfmac4366b-pcie");
68 BRCMF_FW_DEF(4366C, "brcmfmac4366c-pcie");
69 BRCMF_FW_DEF(4371, "brcmfmac4371-pcie");
70 
71 static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
72 	BRCMF_FW_ENTRY(BRCM_CC_43602_CHIP_ID, 0xFFFFFFFF, 43602),
73 	BRCMF_FW_ENTRY(BRCM_CC_43465_CHIP_ID, 0xFFFFFFF0, 4366C),
74 	BRCMF_FW_ENTRY(BRCM_CC_4350_CHIP_ID, 0x000000FF, 4350C),
75 	BRCMF_FW_ENTRY(BRCM_CC_4350_CHIP_ID, 0xFFFFFF00, 4350),
76 	BRCMF_FW_ENTRY(BRCM_CC_43525_CHIP_ID, 0xFFFFFFF0, 4365C),
77 	BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356),
78 	BRCMF_FW_ENTRY(BRCM_CC_43567_CHIP_ID, 0xFFFFFFFF, 43570),
79 	BRCMF_FW_ENTRY(BRCM_CC_43569_CHIP_ID, 0xFFFFFFFF, 43570),
80 	BRCMF_FW_ENTRY(BRCM_CC_43570_CHIP_ID, 0xFFFFFFFF, 43570),
81 	BRCMF_FW_ENTRY(BRCM_CC_4358_CHIP_ID, 0xFFFFFFFF, 4358),
82 	BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
83 	BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0x0000000F, 4365B),
84 	BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFF0, 4365C),
85 	BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0x0000000F, 4366B),
86 	BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0xFFFFFFF0, 4366C),
87 	BRCMF_FW_ENTRY(BRCM_CC_43664_CHIP_ID, 0xFFFFFFF0, 4366C),
88 	BRCMF_FW_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371),
89 };
90 
91 #define BRCMF_PCIE_FW_UP_TIMEOUT		2000 /* msec */
92 
93 #define BRCMF_PCIE_REG_MAP_SIZE			(32 * 1024)
94 
95 /* backplane addres space accessed by BAR0 */
96 #define	BRCMF_PCIE_BAR0_WINDOW			0x80
97 #define BRCMF_PCIE_BAR0_REG_SIZE		0x1000
98 #define	BRCMF_PCIE_BAR0_WRAPPERBASE		0x70
99 
100 #define BRCMF_PCIE_BAR0_WRAPBASE_DMP_OFFSET	0x1000
101 #define BRCMF_PCIE_BARO_PCIE_ENUM_OFFSET	0x2000
102 
103 #define BRCMF_PCIE_ARMCR4REG_BANKIDX		0x40
104 #define BRCMF_PCIE_ARMCR4REG_BANKPDA		0x4C
105 
106 #define BRCMF_PCIE_REG_INTSTATUS		0x90
107 #define BRCMF_PCIE_REG_INTMASK			0x94
108 #define BRCMF_PCIE_REG_SBMBX			0x98
109 
110 #define BRCMF_PCIE_REG_LINK_STATUS_CTRL		0xBC
111 
112 #define BRCMF_PCIE_PCIE2REG_INTMASK		0x24
113 #define BRCMF_PCIE_PCIE2REG_MAILBOXINT		0x48
114 #define BRCMF_PCIE_PCIE2REG_MAILBOXMASK		0x4C
115 #define BRCMF_PCIE_PCIE2REG_CONFIGADDR		0x120
116 #define BRCMF_PCIE_PCIE2REG_CONFIGDATA		0x124
117 #define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0	0x140
118 #define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1	0x144
119 
120 #define BRCMF_PCIE2_INTA			0x01
121 #define BRCMF_PCIE2_INTB			0x02
122 
123 #define BRCMF_PCIE_INT_0			0x01
124 #define BRCMF_PCIE_INT_1			0x02
125 #define BRCMF_PCIE_INT_DEF			(BRCMF_PCIE_INT_0 | \
126 						 BRCMF_PCIE_INT_1)
127 
128 #define BRCMF_PCIE_MB_INT_FN0_0			0x0100
129 #define BRCMF_PCIE_MB_INT_FN0_1			0x0200
130 #define	BRCMF_PCIE_MB_INT_D2H0_DB0		0x10000
131 #define	BRCMF_PCIE_MB_INT_D2H0_DB1		0x20000
132 #define	BRCMF_PCIE_MB_INT_D2H1_DB0		0x40000
133 #define	BRCMF_PCIE_MB_INT_D2H1_DB1		0x80000
134 #define	BRCMF_PCIE_MB_INT_D2H2_DB0		0x100000
135 #define	BRCMF_PCIE_MB_INT_D2H2_DB1		0x200000
136 #define	BRCMF_PCIE_MB_INT_D2H3_DB0		0x400000
137 #define	BRCMF_PCIE_MB_INT_D2H3_DB1		0x800000
138 
139 #define BRCMF_PCIE_MB_INT_D2H_DB		(BRCMF_PCIE_MB_INT_D2H0_DB0 | \
140 						 BRCMF_PCIE_MB_INT_D2H0_DB1 | \
141 						 BRCMF_PCIE_MB_INT_D2H1_DB0 | \
142 						 BRCMF_PCIE_MB_INT_D2H1_DB1 | \
143 						 BRCMF_PCIE_MB_INT_D2H2_DB0 | \
144 						 BRCMF_PCIE_MB_INT_D2H2_DB1 | \
145 						 BRCMF_PCIE_MB_INT_D2H3_DB0 | \
146 						 BRCMF_PCIE_MB_INT_D2H3_DB1)
147 
148 #define BRCMF_PCIE_SHARED_VERSION_7		7
149 #define BRCMF_PCIE_MIN_SHARED_VERSION		5
150 #define BRCMF_PCIE_MAX_SHARED_VERSION		BRCMF_PCIE_SHARED_VERSION_7
151 #define BRCMF_PCIE_SHARED_VERSION_MASK		0x00FF
152 #define BRCMF_PCIE_SHARED_DMA_INDEX		0x10000
153 #define BRCMF_PCIE_SHARED_DMA_2B_IDX		0x100000
154 #define BRCMF_PCIE_SHARED_HOSTRDY_DB1		0x10000000
155 
156 #define BRCMF_PCIE_FLAGS_HTOD_SPLIT		0x4000
157 #define BRCMF_PCIE_FLAGS_DTOH_SPLIT		0x8000
158 
159 #define BRCMF_SHARED_MAX_RXBUFPOST_OFFSET	34
160 #define BRCMF_SHARED_RING_BASE_OFFSET		52
161 #define BRCMF_SHARED_RX_DATAOFFSET_OFFSET	36
162 #define BRCMF_SHARED_CONSOLE_ADDR_OFFSET	20
163 #define BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET	40
164 #define BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET	44
165 #define BRCMF_SHARED_RING_INFO_ADDR_OFFSET	48
166 #define BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET	52
167 #define BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET	56
168 #define BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET	64
169 #define BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET	68
170 
171 #define BRCMF_RING_H2D_RING_COUNT_OFFSET	0
172 #define BRCMF_RING_D2H_RING_COUNT_OFFSET	1
173 #define BRCMF_RING_H2D_RING_MEM_OFFSET		4
174 #define BRCMF_RING_H2D_RING_STATE_OFFSET	8
175 
176 #define BRCMF_RING_MEM_BASE_ADDR_OFFSET		8
177 #define BRCMF_RING_MAX_ITEM_OFFSET		4
178 #define BRCMF_RING_LEN_ITEMS_OFFSET		6
179 #define BRCMF_RING_MEM_SZ			16
180 #define BRCMF_RING_STATE_SZ			8
181 
182 #define BRCMF_DEF_MAX_RXBUFPOST			255
183 
184 #define BRCMF_CONSOLE_BUFADDR_OFFSET		8
185 #define BRCMF_CONSOLE_BUFSIZE_OFFSET		12
186 #define BRCMF_CONSOLE_WRITEIDX_OFFSET		16
187 
188 #define BRCMF_DMA_D2H_SCRATCH_BUF_LEN		8
189 #define BRCMF_DMA_D2H_RINGUPD_BUF_LEN		1024
190 
191 #define BRCMF_D2H_DEV_D3_ACK			0x00000001
192 #define BRCMF_D2H_DEV_DS_ENTER_REQ		0x00000002
193 #define BRCMF_D2H_DEV_DS_EXIT_NOTE		0x00000004
194 #define BRCMF_D2H_DEV_FWHALT			0x10000000
195 
196 #define BRCMF_H2D_HOST_D3_INFORM		0x00000001
197 #define BRCMF_H2D_HOST_DS_ACK			0x00000002
198 #define BRCMF_H2D_HOST_D0_INFORM_IN_USE		0x00000008
199 #define BRCMF_H2D_HOST_D0_INFORM		0x00000010
200 
201 #define BRCMF_PCIE_MBDATA_TIMEOUT		msecs_to_jiffies(2000)
202 
203 #define BRCMF_PCIE_CFGREG_STATUS_CMD		0x4
204 #define BRCMF_PCIE_CFGREG_PM_CSR		0x4C
205 #define BRCMF_PCIE_CFGREG_MSI_CAP		0x58
206 #define BRCMF_PCIE_CFGREG_MSI_ADDR_L		0x5C
207 #define BRCMF_PCIE_CFGREG_MSI_ADDR_H		0x60
208 #define BRCMF_PCIE_CFGREG_MSI_DATA		0x64
209 #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL	0xBC
210 #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2	0xDC
211 #define BRCMF_PCIE_CFGREG_RBAR_CTRL		0x228
212 #define BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1	0x248
213 #define BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG	0x4E0
214 #define BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG	0x4F4
215 #define BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB	3
216 
217 /* Magic number at a magic location to find RAM size */
218 #define BRCMF_RAMSIZE_MAGIC			0x534d4152	/* SMAR */
219 #define BRCMF_RAMSIZE_OFFSET			0x6c
220 
221 
222 struct brcmf_pcie_console {
223 	u32 base_addr;
224 	u32 buf_addr;
225 	u32 bufsize;
226 	u32 read_idx;
227 	u8 log_str[256];
228 	u8 log_idx;
229 };
230 
231 struct brcmf_pcie_shared_info {
232 	u32 tcm_base_address;
233 	u32 flags;
234 	struct brcmf_pcie_ringbuf *commonrings[BRCMF_NROF_COMMON_MSGRINGS];
235 	struct brcmf_pcie_ringbuf *flowrings;
236 	u16 max_rxbufpost;
237 	u16 max_flowrings;
238 	u16 max_submissionrings;
239 	u16 max_completionrings;
240 	u32 rx_dataoffset;
241 	u32 htod_mb_data_addr;
242 	u32 dtoh_mb_data_addr;
243 	u32 ring_info_addr;
244 	struct brcmf_pcie_console console;
245 	void *scratch;
246 	dma_addr_t scratch_dmahandle;
247 	void *ringupd;
248 	dma_addr_t ringupd_dmahandle;
249 	u8 version;
250 };
251 
252 struct brcmf_pcie_core_info {
253 	u32 base;
254 	u32 wrapbase;
255 };
256 
257 struct brcmf_pciedev_info {
258 	enum brcmf_pcie_state state;
259 	bool in_irq;
260 	struct pci_dev *pdev;
261 	char fw_name[BRCMF_FW_NAME_LEN];
262 	char nvram_name[BRCMF_FW_NAME_LEN];
263 	void __iomem *regs;
264 	void __iomem *tcm;
265 	u32 ram_base;
266 	u32 ram_size;
267 	struct brcmf_chip *ci;
268 	u32 coreid;
269 	struct brcmf_pcie_shared_info shared;
270 	wait_queue_head_t mbdata_resp_wait;
271 	bool mbdata_completed;
272 	bool irq_allocated;
273 	bool wowl_enabled;
274 	u8 dma_idx_sz;
275 	void *idxbuf;
276 	u32 idxbuf_sz;
277 	dma_addr_t idxbuf_dmahandle;
278 	u16 (*read_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset);
279 	void (*write_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
280 			  u16 value);
281 	struct brcmf_mp_device *settings;
282 };
283 
284 struct brcmf_pcie_ringbuf {
285 	struct brcmf_commonring commonring;
286 	dma_addr_t dma_handle;
287 	u32 w_idx_addr;
288 	u32 r_idx_addr;
289 	struct brcmf_pciedev_info *devinfo;
290 	u8 id;
291 };
292 
293 /**
294  * struct brcmf_pcie_dhi_ringinfo - dongle/host interface shared ring info
295  *
296  * @ringmem: dongle memory pointer to ring memory location
297  * @h2d_w_idx_ptr: h2d ring write indices dongle memory pointers
298  * @h2d_r_idx_ptr: h2d ring read indices dongle memory pointers
299  * @d2h_w_idx_ptr: d2h ring write indices dongle memory pointers
300  * @d2h_r_idx_ptr: d2h ring read indices dongle memory pointers
301  * @h2d_w_idx_hostaddr: h2d ring write indices host memory pointers
302  * @h2d_r_idx_hostaddr: h2d ring read indices host memory pointers
303  * @d2h_w_idx_hostaddr: d2h ring write indices host memory pointers
304  * @d2h_r_idx_hostaddr: d2h ring reaD indices host memory pointers
305  * @max_flowrings: maximum number of tx flow rings supported.
306  * @max_submissionrings: maximum number of submission rings(h2d) supported.
307  * @max_completionrings: maximum number of completion rings(d2h) supported.
308  */
309 struct brcmf_pcie_dhi_ringinfo {
310 	__le32			ringmem;
311 	__le32			h2d_w_idx_ptr;
312 	__le32			h2d_r_idx_ptr;
313 	__le32			d2h_w_idx_ptr;
314 	__le32			d2h_r_idx_ptr;
315 	struct msgbuf_buf_addr	h2d_w_idx_hostaddr;
316 	struct msgbuf_buf_addr	h2d_r_idx_hostaddr;
317 	struct msgbuf_buf_addr	d2h_w_idx_hostaddr;
318 	struct msgbuf_buf_addr	d2h_r_idx_hostaddr;
319 	__le16			max_flowrings;
320 	__le16			max_submissionrings;
321 	__le16			max_completionrings;
322 };
323 
324 static const u32 brcmf_ring_max_item[BRCMF_NROF_COMMON_MSGRINGS] = {
325 	BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM,
326 	BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM,
327 	BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM,
328 	BRCMF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM,
329 	BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM
330 };
331 
332 static const u32 brcmf_ring_itemsize_pre_v7[BRCMF_NROF_COMMON_MSGRINGS] = {
333 	BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE,
334 	BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE,
335 	BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE,
336 	BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE_PRE_V7,
337 	BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE_PRE_V7
338 };
339 
340 static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = {
341 	BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE,
342 	BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE,
343 	BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE,
344 	BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE,
345 	BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE
346 };
347 
348 static void brcmf_pcie_setup(struct device *dev, int ret,
349 			     struct brcmf_fw_request *fwreq);
350 static struct brcmf_fw_request *
351 brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo);
352 
353 static u32
354 brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset)
355 {
356 	void __iomem *address = devinfo->regs + reg_offset;
357 
358 	return (ioread32(address));
359 }
360 
361 
362 static void
363 brcmf_pcie_write_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset,
364 		       u32 value)
365 {
366 	void __iomem *address = devinfo->regs + reg_offset;
367 
368 	iowrite32(value, address);
369 }
370 
371 
372 static u8
373 brcmf_pcie_read_tcm8(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
374 {
375 	void __iomem *address = devinfo->tcm + mem_offset;
376 
377 	return (ioread8(address));
378 }
379 
380 
381 static u16
382 brcmf_pcie_read_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
383 {
384 	void __iomem *address = devinfo->tcm + mem_offset;
385 
386 	return (ioread16(address));
387 }
388 
389 
390 static void
391 brcmf_pcie_write_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
392 		       u16 value)
393 {
394 	void __iomem *address = devinfo->tcm + mem_offset;
395 
396 	iowrite16(value, address);
397 }
398 
399 
400 static u16
401 brcmf_pcie_read_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
402 {
403 	u16 *address = devinfo->idxbuf + mem_offset;
404 
405 	return (*(address));
406 }
407 
408 
409 static void
410 brcmf_pcie_write_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
411 		     u16 value)
412 {
413 	u16 *address = devinfo->idxbuf + mem_offset;
414 
415 	*(address) = value;
416 }
417 
418 
419 static u32
420 brcmf_pcie_read_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
421 {
422 	void __iomem *address = devinfo->tcm + mem_offset;
423 
424 	return (ioread32(address));
425 }
426 
427 
428 static void
429 brcmf_pcie_write_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
430 		       u32 value)
431 {
432 	void __iomem *address = devinfo->tcm + mem_offset;
433 
434 	iowrite32(value, address);
435 }
436 
437 
438 static u32
439 brcmf_pcie_read_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
440 {
441 	void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset;
442 
443 	return (ioread32(addr));
444 }
445 
446 
447 static void
448 brcmf_pcie_write_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
449 		       u32 value)
450 {
451 	void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset;
452 
453 	iowrite32(value, addr);
454 }
455 
456 
457 static void
458 brcmf_pcie_copy_mem_todev(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
459 			  void *srcaddr, u32 len)
460 {
461 	void __iomem *address = devinfo->tcm + mem_offset;
462 	__le32 *src32;
463 	__le16 *src16;
464 	u8 *src8;
465 
466 	if (((ulong)address & 4) || ((ulong)srcaddr & 4) || (len & 4)) {
467 		if (((ulong)address & 2) || ((ulong)srcaddr & 2) || (len & 2)) {
468 			src8 = (u8 *)srcaddr;
469 			while (len) {
470 				iowrite8(*src8, address);
471 				address++;
472 				src8++;
473 				len--;
474 			}
475 		} else {
476 			len = len / 2;
477 			src16 = (__le16 *)srcaddr;
478 			while (len) {
479 				iowrite16(le16_to_cpu(*src16), address);
480 				address += 2;
481 				src16++;
482 				len--;
483 			}
484 		}
485 	} else {
486 		len = len / 4;
487 		src32 = (__le32 *)srcaddr;
488 		while (len) {
489 			iowrite32(le32_to_cpu(*src32), address);
490 			address += 4;
491 			src32++;
492 			len--;
493 		}
494 	}
495 }
496 
497 
498 static void
499 brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
500 			  void *dstaddr, u32 len)
501 {
502 	void __iomem *address = devinfo->tcm + mem_offset;
503 	__le32 *dst32;
504 	__le16 *dst16;
505 	u8 *dst8;
506 
507 	if (((ulong)address & 4) || ((ulong)dstaddr & 4) || (len & 4)) {
508 		if (((ulong)address & 2) || ((ulong)dstaddr & 2) || (len & 2)) {
509 			dst8 = (u8 *)dstaddr;
510 			while (len) {
511 				*dst8 = ioread8(address);
512 				address++;
513 				dst8++;
514 				len--;
515 			}
516 		} else {
517 			len = len / 2;
518 			dst16 = (__le16 *)dstaddr;
519 			while (len) {
520 				*dst16 = cpu_to_le16(ioread16(address));
521 				address += 2;
522 				dst16++;
523 				len--;
524 			}
525 		}
526 	} else {
527 		len = len / 4;
528 		dst32 = (__le32 *)dstaddr;
529 		while (len) {
530 			*dst32 = cpu_to_le32(ioread32(address));
531 			address += 4;
532 			dst32++;
533 			len--;
534 		}
535 	}
536 }
537 
538 
539 #define WRITECC32(devinfo, reg, value) brcmf_pcie_write_reg32(devinfo, \
540 		CHIPCREGOFFS(reg), value)
541 
542 
543 static void
544 brcmf_pcie_select_core(struct brcmf_pciedev_info *devinfo, u16 coreid)
545 {
546 	const struct pci_dev *pdev = devinfo->pdev;
547 	struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
548 	struct brcmf_core *core;
549 	u32 bar0_win;
550 
551 	core = brcmf_chip_get_core(devinfo->ci, coreid);
552 	if (core) {
553 		bar0_win = core->base;
554 		pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, bar0_win);
555 		if (pci_read_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW,
556 					  &bar0_win) == 0) {
557 			if (bar0_win != core->base) {
558 				bar0_win = core->base;
559 				pci_write_config_dword(pdev,
560 						       BRCMF_PCIE_BAR0_WINDOW,
561 						       bar0_win);
562 			}
563 		}
564 	} else {
565 		brcmf_err(bus, "Unsupported core selected %x\n", coreid);
566 	}
567 }
568 
569 
570 static void brcmf_pcie_reset_device(struct brcmf_pciedev_info *devinfo)
571 {
572 	struct brcmf_core *core;
573 	u16 cfg_offset[] = { BRCMF_PCIE_CFGREG_STATUS_CMD,
574 			     BRCMF_PCIE_CFGREG_PM_CSR,
575 			     BRCMF_PCIE_CFGREG_MSI_CAP,
576 			     BRCMF_PCIE_CFGREG_MSI_ADDR_L,
577 			     BRCMF_PCIE_CFGREG_MSI_ADDR_H,
578 			     BRCMF_PCIE_CFGREG_MSI_DATA,
579 			     BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2,
580 			     BRCMF_PCIE_CFGREG_RBAR_CTRL,
581 			     BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1,
582 			     BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG,
583 			     BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG };
584 	u32 i;
585 	u32 val;
586 	u32 lsc;
587 
588 	if (!devinfo->ci)
589 		return;
590 
591 	/* Disable ASPM */
592 	brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
593 	pci_read_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
594 			      &lsc);
595 	val = lsc & (~BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB);
596 	pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
597 			       val);
598 
599 	/* Watchdog reset */
600 	brcmf_pcie_select_core(devinfo, BCMA_CORE_CHIPCOMMON);
601 	WRITECC32(devinfo, watchdog, 4);
602 	msleep(100);
603 
604 	/* Restore ASPM */
605 	brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
606 	pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
607 			       lsc);
608 
609 	core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_PCIE2);
610 	if (core->rev <= 13) {
611 		for (i = 0; i < ARRAY_SIZE(cfg_offset); i++) {
612 			brcmf_pcie_write_reg32(devinfo,
613 					       BRCMF_PCIE_PCIE2REG_CONFIGADDR,
614 					       cfg_offset[i]);
615 			val = brcmf_pcie_read_reg32(devinfo,
616 				BRCMF_PCIE_PCIE2REG_CONFIGDATA);
617 			brcmf_dbg(PCIE, "config offset 0x%04x, value 0x%04x\n",
618 				  cfg_offset[i], val);
619 			brcmf_pcie_write_reg32(devinfo,
620 					       BRCMF_PCIE_PCIE2REG_CONFIGDATA,
621 					       val);
622 		}
623 	}
624 }
625 
626 
627 static void brcmf_pcie_attach(struct brcmf_pciedev_info *devinfo)
628 {
629 	u32 config;
630 
631 	/* BAR1 window may not be sized properly */
632 	brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
633 	brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, 0x4e0);
634 	config = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA);
635 	brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, config);
636 
637 	device_wakeup_enable(&devinfo->pdev->dev);
638 }
639 
640 
641 static int brcmf_pcie_enter_download_state(struct brcmf_pciedev_info *devinfo)
642 {
643 	if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
644 		brcmf_pcie_select_core(devinfo, BCMA_CORE_ARM_CR4);
645 		brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
646 				       5);
647 		brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA,
648 				       0);
649 		brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
650 				       7);
651 		brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA,
652 				       0);
653 	}
654 	return 0;
655 }
656 
657 
658 static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info *devinfo,
659 					  u32 resetintr)
660 {
661 	struct brcmf_core *core;
662 
663 	if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
664 		core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_INTERNAL_MEM);
665 		brcmf_chip_resetcore(core, 0, 0, 0);
666 	}
667 
668 	if (!brcmf_chip_set_active(devinfo->ci, resetintr))
669 		return -EINVAL;
670 	return 0;
671 }
672 
673 
674 static int
675 brcmf_pcie_send_mb_data(struct brcmf_pciedev_info *devinfo, u32 htod_mb_data)
676 {
677 	struct brcmf_pcie_shared_info *shared;
678 	struct brcmf_core *core;
679 	u32 addr;
680 	u32 cur_htod_mb_data;
681 	u32 i;
682 
683 	shared = &devinfo->shared;
684 	addr = shared->htod_mb_data_addr;
685 	cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
686 
687 	if (cur_htod_mb_data != 0)
688 		brcmf_dbg(PCIE, "MB transaction is already pending 0x%04x\n",
689 			  cur_htod_mb_data);
690 
691 	i = 0;
692 	while (cur_htod_mb_data != 0) {
693 		msleep(10);
694 		i++;
695 		if (i > 100)
696 			return -EIO;
697 		cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
698 	}
699 
700 	brcmf_pcie_write_tcm32(devinfo, addr, htod_mb_data);
701 	pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
702 
703 	/* Send mailbox interrupt twice as a hardware workaround */
704 	core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_PCIE2);
705 	if (core->rev <= 13)
706 		pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
707 
708 	return 0;
709 }
710 
711 
712 static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo)
713 {
714 	struct brcmf_pcie_shared_info *shared;
715 	u32 addr;
716 	u32 dtoh_mb_data;
717 
718 	shared = &devinfo->shared;
719 	addr = shared->dtoh_mb_data_addr;
720 	dtoh_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
721 
722 	if (!dtoh_mb_data)
723 		return;
724 
725 	brcmf_pcie_write_tcm32(devinfo, addr, 0);
726 
727 	brcmf_dbg(PCIE, "D2H_MB_DATA: 0x%04x\n", dtoh_mb_data);
728 	if (dtoh_mb_data & BRCMF_D2H_DEV_DS_ENTER_REQ)  {
729 		brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP REQ\n");
730 		brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_DS_ACK);
731 		brcmf_dbg(PCIE, "D2H_MB_DATA: sent DEEP SLEEP ACK\n");
732 	}
733 	if (dtoh_mb_data & BRCMF_D2H_DEV_DS_EXIT_NOTE)
734 		brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP EXIT\n");
735 	if (dtoh_mb_data & BRCMF_D2H_DEV_D3_ACK) {
736 		brcmf_dbg(PCIE, "D2H_MB_DATA: D3 ACK\n");
737 		devinfo->mbdata_completed = true;
738 		wake_up(&devinfo->mbdata_resp_wait);
739 	}
740 	if (dtoh_mb_data & BRCMF_D2H_DEV_FWHALT) {
741 		brcmf_dbg(PCIE, "D2H_MB_DATA: FW HALT\n");
742 		brcmf_fw_crashed(&devinfo->pdev->dev);
743 	}
744 }
745 
746 
747 static void brcmf_pcie_bus_console_init(struct brcmf_pciedev_info *devinfo)
748 {
749 	struct brcmf_pcie_shared_info *shared;
750 	struct brcmf_pcie_console *console;
751 	u32 addr;
752 
753 	shared = &devinfo->shared;
754 	console = &shared->console;
755 	addr = shared->tcm_base_address + BRCMF_SHARED_CONSOLE_ADDR_OFFSET;
756 	console->base_addr = brcmf_pcie_read_tcm32(devinfo, addr);
757 
758 	addr = console->base_addr + BRCMF_CONSOLE_BUFADDR_OFFSET;
759 	console->buf_addr = brcmf_pcie_read_tcm32(devinfo, addr);
760 	addr = console->base_addr + BRCMF_CONSOLE_BUFSIZE_OFFSET;
761 	console->bufsize = brcmf_pcie_read_tcm32(devinfo, addr);
762 
763 	brcmf_dbg(FWCON, "Console: base %x, buf %x, size %d\n",
764 		  console->base_addr, console->buf_addr, console->bufsize);
765 }
766 
767 /**
768  * brcmf_pcie_bus_console_read - reads firmware messages
769  *
770  * @error: specifies if error has occurred (prints messages unconditionally)
771  */
772 static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo,
773 					bool error)
774 {
775 	struct pci_dev *pdev = devinfo->pdev;
776 	struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
777 	struct brcmf_pcie_console *console;
778 	u32 addr;
779 	u8 ch;
780 	u32 newidx;
781 
782 	if (!error && !BRCMF_FWCON_ON())
783 		return;
784 
785 	console = &devinfo->shared.console;
786 	addr = console->base_addr + BRCMF_CONSOLE_WRITEIDX_OFFSET;
787 	newidx = brcmf_pcie_read_tcm32(devinfo, addr);
788 	while (newidx != console->read_idx) {
789 		addr = console->buf_addr + console->read_idx;
790 		ch = brcmf_pcie_read_tcm8(devinfo, addr);
791 		console->read_idx++;
792 		if (console->read_idx == console->bufsize)
793 			console->read_idx = 0;
794 		if (ch == '\r')
795 			continue;
796 		console->log_str[console->log_idx] = ch;
797 		console->log_idx++;
798 		if ((ch != '\n') &&
799 		    (console->log_idx == (sizeof(console->log_str) - 2))) {
800 			ch = '\n';
801 			console->log_str[console->log_idx] = ch;
802 			console->log_idx++;
803 		}
804 		if (ch == '\n') {
805 			console->log_str[console->log_idx] = 0;
806 			if (error)
807 				brcmf_err(bus, "CONSOLE: %s", console->log_str);
808 			else
809 				pr_debug("CONSOLE: %s", console->log_str);
810 			console->log_idx = 0;
811 		}
812 	}
813 }
814 
815 
816 static void brcmf_pcie_intr_disable(struct brcmf_pciedev_info *devinfo)
817 {
818 	brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK, 0);
819 }
820 
821 
822 static void brcmf_pcie_intr_enable(struct brcmf_pciedev_info *devinfo)
823 {
824 	brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK,
825 			       BRCMF_PCIE_MB_INT_D2H_DB |
826 			       BRCMF_PCIE_MB_INT_FN0_0 |
827 			       BRCMF_PCIE_MB_INT_FN0_1);
828 }
829 
830 static void brcmf_pcie_hostready(struct brcmf_pciedev_info *devinfo)
831 {
832 	if (devinfo->shared.flags & BRCMF_PCIE_SHARED_HOSTRDY_DB1)
833 		brcmf_pcie_write_reg32(devinfo,
834 				       BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1, 1);
835 }
836 
837 static irqreturn_t brcmf_pcie_quick_check_isr(int irq, void *arg)
838 {
839 	struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
840 
841 	if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT)) {
842 		brcmf_pcie_intr_disable(devinfo);
843 		brcmf_dbg(PCIE, "Enter\n");
844 		return IRQ_WAKE_THREAD;
845 	}
846 	return IRQ_NONE;
847 }
848 
849 
850 static irqreturn_t brcmf_pcie_isr_thread(int irq, void *arg)
851 {
852 	struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
853 	u32 status;
854 
855 	devinfo->in_irq = true;
856 	status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
857 	brcmf_dbg(PCIE, "Enter %x\n", status);
858 	if (status) {
859 		brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
860 				       status);
861 		if (status & (BRCMF_PCIE_MB_INT_FN0_0 |
862 			      BRCMF_PCIE_MB_INT_FN0_1))
863 			brcmf_pcie_handle_mb_data(devinfo);
864 		if (status & BRCMF_PCIE_MB_INT_D2H_DB) {
865 			if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
866 				brcmf_proto_msgbuf_rx_trigger(
867 							&devinfo->pdev->dev);
868 		}
869 	}
870 	brcmf_pcie_bus_console_read(devinfo, false);
871 	if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
872 		brcmf_pcie_intr_enable(devinfo);
873 	devinfo->in_irq = false;
874 	return IRQ_HANDLED;
875 }
876 
877 
878 static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo)
879 {
880 	struct pci_dev *pdev = devinfo->pdev;
881 	struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
882 
883 	brcmf_pcie_intr_disable(devinfo);
884 
885 	brcmf_dbg(PCIE, "Enter\n");
886 
887 	pci_enable_msi(pdev);
888 	if (request_threaded_irq(pdev->irq, brcmf_pcie_quick_check_isr,
889 				 brcmf_pcie_isr_thread, IRQF_SHARED,
890 				 "brcmf_pcie_intr", devinfo)) {
891 		pci_disable_msi(pdev);
892 		brcmf_err(bus, "Failed to request IRQ %d\n", pdev->irq);
893 		return -EIO;
894 	}
895 	devinfo->irq_allocated = true;
896 	return 0;
897 }
898 
899 
900 static void brcmf_pcie_release_irq(struct brcmf_pciedev_info *devinfo)
901 {
902 	struct pci_dev *pdev = devinfo->pdev;
903 	struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
904 	u32 status;
905 	u32 count;
906 
907 	if (!devinfo->irq_allocated)
908 		return;
909 
910 	brcmf_pcie_intr_disable(devinfo);
911 	free_irq(pdev->irq, devinfo);
912 	pci_disable_msi(pdev);
913 
914 	msleep(50);
915 	count = 0;
916 	while ((devinfo->in_irq) && (count < 20)) {
917 		msleep(50);
918 		count++;
919 	}
920 	if (devinfo->in_irq)
921 		brcmf_err(bus, "Still in IRQ (processing) !!!\n");
922 
923 	status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
924 	brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT, status);
925 
926 	devinfo->irq_allocated = false;
927 }
928 
929 
930 static int brcmf_pcie_ring_mb_write_rptr(void *ctx)
931 {
932 	struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
933 	struct brcmf_pciedev_info *devinfo = ring->devinfo;
934 	struct brcmf_commonring *commonring = &ring->commonring;
935 
936 	if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
937 		return -EIO;
938 
939 	brcmf_dbg(PCIE, "W r_ptr %d (%d), ring %d\n", commonring->r_ptr,
940 		  commonring->w_ptr, ring->id);
941 
942 	devinfo->write_ptr(devinfo, ring->r_idx_addr, commonring->r_ptr);
943 
944 	return 0;
945 }
946 
947 
948 static int brcmf_pcie_ring_mb_write_wptr(void *ctx)
949 {
950 	struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
951 	struct brcmf_pciedev_info *devinfo = ring->devinfo;
952 	struct brcmf_commonring *commonring = &ring->commonring;
953 
954 	if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
955 		return -EIO;
956 
957 	brcmf_dbg(PCIE, "W w_ptr %d (%d), ring %d\n", commonring->w_ptr,
958 		  commonring->r_ptr, ring->id);
959 
960 	devinfo->write_ptr(devinfo, ring->w_idx_addr, commonring->w_ptr);
961 
962 	return 0;
963 }
964 
965 
966 static int brcmf_pcie_ring_mb_ring_bell(void *ctx)
967 {
968 	struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
969 	struct brcmf_pciedev_info *devinfo = ring->devinfo;
970 
971 	if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
972 		return -EIO;
973 
974 	brcmf_dbg(PCIE, "RING !\n");
975 	/* Any arbitrary value will do, lets use 1 */
976 	brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0, 1);
977 
978 	return 0;
979 }
980 
981 
982 static int brcmf_pcie_ring_mb_update_rptr(void *ctx)
983 {
984 	struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
985 	struct brcmf_pciedev_info *devinfo = ring->devinfo;
986 	struct brcmf_commonring *commonring = &ring->commonring;
987 
988 	if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
989 		return -EIO;
990 
991 	commonring->r_ptr = devinfo->read_ptr(devinfo, ring->r_idx_addr);
992 
993 	brcmf_dbg(PCIE, "R r_ptr %d (%d), ring %d\n", commonring->r_ptr,
994 		  commonring->w_ptr, ring->id);
995 
996 	return 0;
997 }
998 
999 
1000 static int brcmf_pcie_ring_mb_update_wptr(void *ctx)
1001 {
1002 	struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
1003 	struct brcmf_pciedev_info *devinfo = ring->devinfo;
1004 	struct brcmf_commonring *commonring = &ring->commonring;
1005 
1006 	if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
1007 		return -EIO;
1008 
1009 	commonring->w_ptr = devinfo->read_ptr(devinfo, ring->w_idx_addr);
1010 
1011 	brcmf_dbg(PCIE, "R w_ptr %d (%d), ring %d\n", commonring->w_ptr,
1012 		  commonring->r_ptr, ring->id);
1013 
1014 	return 0;
1015 }
1016 
1017 
1018 static void *
1019 brcmf_pcie_init_dmabuffer_for_device(struct brcmf_pciedev_info *devinfo,
1020 				     u32 size, u32 tcm_dma_phys_addr,
1021 				     dma_addr_t *dma_handle)
1022 {
1023 	void *ring;
1024 	u64 address;
1025 
1026 	ring = dma_alloc_coherent(&devinfo->pdev->dev, size, dma_handle,
1027 				  GFP_KERNEL);
1028 	if (!ring)
1029 		return NULL;
1030 
1031 	address = (u64)*dma_handle;
1032 	brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr,
1033 			       address & 0xffffffff);
1034 	brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr + 4, address >> 32);
1035 
1036 	memset(ring, 0, size);
1037 
1038 	return (ring);
1039 }
1040 
1041 
1042 static struct brcmf_pcie_ringbuf *
1043 brcmf_pcie_alloc_dma_and_ring(struct brcmf_pciedev_info *devinfo, u32 ring_id,
1044 			      u32 tcm_ring_phys_addr)
1045 {
1046 	void *dma_buf;
1047 	dma_addr_t dma_handle;
1048 	struct brcmf_pcie_ringbuf *ring;
1049 	u32 size;
1050 	u32 addr;
1051 	const u32 *ring_itemsize_array;
1052 
1053 	if (devinfo->shared.version < BRCMF_PCIE_SHARED_VERSION_7)
1054 		ring_itemsize_array = brcmf_ring_itemsize_pre_v7;
1055 	else
1056 		ring_itemsize_array = brcmf_ring_itemsize;
1057 
1058 	size = brcmf_ring_max_item[ring_id] * ring_itemsize_array[ring_id];
1059 	dma_buf = brcmf_pcie_init_dmabuffer_for_device(devinfo, size,
1060 			tcm_ring_phys_addr + BRCMF_RING_MEM_BASE_ADDR_OFFSET,
1061 			&dma_handle);
1062 	if (!dma_buf)
1063 		return NULL;
1064 
1065 	addr = tcm_ring_phys_addr + BRCMF_RING_MAX_ITEM_OFFSET;
1066 	brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_max_item[ring_id]);
1067 	addr = tcm_ring_phys_addr + BRCMF_RING_LEN_ITEMS_OFFSET;
1068 	brcmf_pcie_write_tcm16(devinfo, addr, ring_itemsize_array[ring_id]);
1069 
1070 	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1071 	if (!ring) {
1072 		dma_free_coherent(&devinfo->pdev->dev, size, dma_buf,
1073 				  dma_handle);
1074 		return NULL;
1075 	}
1076 	brcmf_commonring_config(&ring->commonring, brcmf_ring_max_item[ring_id],
1077 				ring_itemsize_array[ring_id], dma_buf);
1078 	ring->dma_handle = dma_handle;
1079 	ring->devinfo = devinfo;
1080 	brcmf_commonring_register_cb(&ring->commonring,
1081 				     brcmf_pcie_ring_mb_ring_bell,
1082 				     brcmf_pcie_ring_mb_update_rptr,
1083 				     brcmf_pcie_ring_mb_update_wptr,
1084 				     brcmf_pcie_ring_mb_write_rptr,
1085 				     brcmf_pcie_ring_mb_write_wptr, ring);
1086 
1087 	return (ring);
1088 }
1089 
1090 
1091 static void brcmf_pcie_release_ringbuffer(struct device *dev,
1092 					  struct brcmf_pcie_ringbuf *ring)
1093 {
1094 	void *dma_buf;
1095 	u32 size;
1096 
1097 	if (!ring)
1098 		return;
1099 
1100 	dma_buf = ring->commonring.buf_addr;
1101 	if (dma_buf) {
1102 		size = ring->commonring.depth * ring->commonring.item_len;
1103 		dma_free_coherent(dev, size, dma_buf, ring->dma_handle);
1104 	}
1105 	kfree(ring);
1106 }
1107 
1108 
1109 static void brcmf_pcie_release_ringbuffers(struct brcmf_pciedev_info *devinfo)
1110 {
1111 	u32 i;
1112 
1113 	for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++) {
1114 		brcmf_pcie_release_ringbuffer(&devinfo->pdev->dev,
1115 					      devinfo->shared.commonrings[i]);
1116 		devinfo->shared.commonrings[i] = NULL;
1117 	}
1118 	kfree(devinfo->shared.flowrings);
1119 	devinfo->shared.flowrings = NULL;
1120 	if (devinfo->idxbuf) {
1121 		dma_free_coherent(&devinfo->pdev->dev,
1122 				  devinfo->idxbuf_sz,
1123 				  devinfo->idxbuf,
1124 				  devinfo->idxbuf_dmahandle);
1125 		devinfo->idxbuf = NULL;
1126 	}
1127 }
1128 
1129 
1130 static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
1131 {
1132 	struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev);
1133 	struct brcmf_pcie_ringbuf *ring;
1134 	struct brcmf_pcie_ringbuf *rings;
1135 	u32 d2h_w_idx_ptr;
1136 	u32 d2h_r_idx_ptr;
1137 	u32 h2d_w_idx_ptr;
1138 	u32 h2d_r_idx_ptr;
1139 	u32 ring_mem_ptr;
1140 	u32 i;
1141 	u64 address;
1142 	u32 bufsz;
1143 	u8 idx_offset;
1144 	struct brcmf_pcie_dhi_ringinfo ringinfo;
1145 	u16 max_flowrings;
1146 	u16 max_submissionrings;
1147 	u16 max_completionrings;
1148 
1149 	memcpy_fromio(&ringinfo, devinfo->tcm + devinfo->shared.ring_info_addr,
1150 		      sizeof(ringinfo));
1151 	if (devinfo->shared.version >= 6) {
1152 		max_submissionrings = le16_to_cpu(ringinfo.max_submissionrings);
1153 		max_flowrings = le16_to_cpu(ringinfo.max_flowrings);
1154 		max_completionrings = le16_to_cpu(ringinfo.max_completionrings);
1155 	} else {
1156 		max_submissionrings = le16_to_cpu(ringinfo.max_flowrings);
1157 		max_flowrings = max_submissionrings -
1158 				BRCMF_NROF_H2D_COMMON_MSGRINGS;
1159 		max_completionrings = BRCMF_NROF_D2H_COMMON_MSGRINGS;
1160 	}
1161 
1162 	if (devinfo->dma_idx_sz != 0) {
1163 		bufsz = (max_submissionrings + max_completionrings) *
1164 			devinfo->dma_idx_sz * 2;
1165 		devinfo->idxbuf = dma_alloc_coherent(&devinfo->pdev->dev, bufsz,
1166 						     &devinfo->idxbuf_dmahandle,
1167 						     GFP_KERNEL);
1168 		if (!devinfo->idxbuf)
1169 			devinfo->dma_idx_sz = 0;
1170 	}
1171 
1172 	if (devinfo->dma_idx_sz == 0) {
1173 		d2h_w_idx_ptr = le32_to_cpu(ringinfo.d2h_w_idx_ptr);
1174 		d2h_r_idx_ptr = le32_to_cpu(ringinfo.d2h_r_idx_ptr);
1175 		h2d_w_idx_ptr = le32_to_cpu(ringinfo.h2d_w_idx_ptr);
1176 		h2d_r_idx_ptr = le32_to_cpu(ringinfo.h2d_r_idx_ptr);
1177 		idx_offset = sizeof(u32);
1178 		devinfo->write_ptr = brcmf_pcie_write_tcm16;
1179 		devinfo->read_ptr = brcmf_pcie_read_tcm16;
1180 		brcmf_dbg(PCIE, "Using TCM indices\n");
1181 	} else {
1182 		memset(devinfo->idxbuf, 0, bufsz);
1183 		devinfo->idxbuf_sz = bufsz;
1184 		idx_offset = devinfo->dma_idx_sz;
1185 		devinfo->write_ptr = brcmf_pcie_write_idx;
1186 		devinfo->read_ptr = brcmf_pcie_read_idx;
1187 
1188 		h2d_w_idx_ptr = 0;
1189 		address = (u64)devinfo->idxbuf_dmahandle;
1190 		ringinfo.h2d_w_idx_hostaddr.low_addr =
1191 			cpu_to_le32(address & 0xffffffff);
1192 		ringinfo.h2d_w_idx_hostaddr.high_addr =
1193 			cpu_to_le32(address >> 32);
1194 
1195 		h2d_r_idx_ptr = h2d_w_idx_ptr +
1196 				max_submissionrings * idx_offset;
1197 		address += max_submissionrings * idx_offset;
1198 		ringinfo.h2d_r_idx_hostaddr.low_addr =
1199 			cpu_to_le32(address & 0xffffffff);
1200 		ringinfo.h2d_r_idx_hostaddr.high_addr =
1201 			cpu_to_le32(address >> 32);
1202 
1203 		d2h_w_idx_ptr = h2d_r_idx_ptr +
1204 				max_submissionrings * idx_offset;
1205 		address += max_submissionrings * idx_offset;
1206 		ringinfo.d2h_w_idx_hostaddr.low_addr =
1207 			cpu_to_le32(address & 0xffffffff);
1208 		ringinfo.d2h_w_idx_hostaddr.high_addr =
1209 			cpu_to_le32(address >> 32);
1210 
1211 		d2h_r_idx_ptr = d2h_w_idx_ptr +
1212 				max_completionrings * idx_offset;
1213 		address += max_completionrings * idx_offset;
1214 		ringinfo.d2h_r_idx_hostaddr.low_addr =
1215 			cpu_to_le32(address & 0xffffffff);
1216 		ringinfo.d2h_r_idx_hostaddr.high_addr =
1217 			cpu_to_le32(address >> 32);
1218 
1219 		memcpy_toio(devinfo->tcm + devinfo->shared.ring_info_addr,
1220 			    &ringinfo, sizeof(ringinfo));
1221 		brcmf_dbg(PCIE, "Using host memory indices\n");
1222 	}
1223 
1224 	ring_mem_ptr = le32_to_cpu(ringinfo.ringmem);
1225 
1226 	for (i = 0; i < BRCMF_NROF_H2D_COMMON_MSGRINGS; i++) {
1227 		ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
1228 		if (!ring)
1229 			goto fail;
1230 		ring->w_idx_addr = h2d_w_idx_ptr;
1231 		ring->r_idx_addr = h2d_r_idx_ptr;
1232 		ring->id = i;
1233 		devinfo->shared.commonrings[i] = ring;
1234 
1235 		h2d_w_idx_ptr += idx_offset;
1236 		h2d_r_idx_ptr += idx_offset;
1237 		ring_mem_ptr += BRCMF_RING_MEM_SZ;
1238 	}
1239 
1240 	for (i = BRCMF_NROF_H2D_COMMON_MSGRINGS;
1241 	     i < BRCMF_NROF_COMMON_MSGRINGS; i++) {
1242 		ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
1243 		if (!ring)
1244 			goto fail;
1245 		ring->w_idx_addr = d2h_w_idx_ptr;
1246 		ring->r_idx_addr = d2h_r_idx_ptr;
1247 		ring->id = i;
1248 		devinfo->shared.commonrings[i] = ring;
1249 
1250 		d2h_w_idx_ptr += idx_offset;
1251 		d2h_r_idx_ptr += idx_offset;
1252 		ring_mem_ptr += BRCMF_RING_MEM_SZ;
1253 	}
1254 
1255 	devinfo->shared.max_flowrings = max_flowrings;
1256 	devinfo->shared.max_submissionrings = max_submissionrings;
1257 	devinfo->shared.max_completionrings = max_completionrings;
1258 	rings = kcalloc(max_flowrings, sizeof(*ring), GFP_KERNEL);
1259 	if (!rings)
1260 		goto fail;
1261 
1262 	brcmf_dbg(PCIE, "Nr of flowrings is %d\n", max_flowrings);
1263 
1264 	for (i = 0; i < max_flowrings; i++) {
1265 		ring = &rings[i];
1266 		ring->devinfo = devinfo;
1267 		ring->id = i + BRCMF_H2D_MSGRING_FLOWRING_IDSTART;
1268 		brcmf_commonring_register_cb(&ring->commonring,
1269 					     brcmf_pcie_ring_mb_ring_bell,
1270 					     brcmf_pcie_ring_mb_update_rptr,
1271 					     brcmf_pcie_ring_mb_update_wptr,
1272 					     brcmf_pcie_ring_mb_write_rptr,
1273 					     brcmf_pcie_ring_mb_write_wptr,
1274 					     ring);
1275 		ring->w_idx_addr = h2d_w_idx_ptr;
1276 		ring->r_idx_addr = h2d_r_idx_ptr;
1277 		h2d_w_idx_ptr += idx_offset;
1278 		h2d_r_idx_ptr += idx_offset;
1279 	}
1280 	devinfo->shared.flowrings = rings;
1281 
1282 	return 0;
1283 
1284 fail:
1285 	brcmf_err(bus, "Allocating ring buffers failed\n");
1286 	brcmf_pcie_release_ringbuffers(devinfo);
1287 	return -ENOMEM;
1288 }
1289 
1290 
1291 static void
1292 brcmf_pcie_release_scratchbuffers(struct brcmf_pciedev_info *devinfo)
1293 {
1294 	if (devinfo->shared.scratch)
1295 		dma_free_coherent(&devinfo->pdev->dev,
1296 				  BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
1297 				  devinfo->shared.scratch,
1298 				  devinfo->shared.scratch_dmahandle);
1299 	if (devinfo->shared.ringupd)
1300 		dma_free_coherent(&devinfo->pdev->dev,
1301 				  BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
1302 				  devinfo->shared.ringupd,
1303 				  devinfo->shared.ringupd_dmahandle);
1304 }
1305 
1306 static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
1307 {
1308 	struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev);
1309 	u64 address;
1310 	u32 addr;
1311 
1312 	devinfo->shared.scratch =
1313 		dma_alloc_coherent(&devinfo->pdev->dev,
1314 				   BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
1315 				   &devinfo->shared.scratch_dmahandle,
1316 				   GFP_KERNEL);
1317 	if (!devinfo->shared.scratch)
1318 		goto fail;
1319 
1320 	addr = devinfo->shared.tcm_base_address +
1321 	       BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET;
1322 	address = (u64)devinfo->shared.scratch_dmahandle;
1323 	brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
1324 	brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
1325 	addr = devinfo->shared.tcm_base_address +
1326 	       BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET;
1327 	brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
1328 
1329 	devinfo->shared.ringupd =
1330 		dma_alloc_coherent(&devinfo->pdev->dev,
1331 				   BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
1332 				   &devinfo->shared.ringupd_dmahandle,
1333 				   GFP_KERNEL);
1334 	if (!devinfo->shared.ringupd)
1335 		goto fail;
1336 
1337 	addr = devinfo->shared.tcm_base_address +
1338 	       BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET;
1339 	address = (u64)devinfo->shared.ringupd_dmahandle;
1340 	brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
1341 	brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
1342 	addr = devinfo->shared.tcm_base_address +
1343 	       BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET;
1344 	brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
1345 	return 0;
1346 
1347 fail:
1348 	brcmf_err(bus, "Allocating scratch buffers failed\n");
1349 	brcmf_pcie_release_scratchbuffers(devinfo);
1350 	return -ENOMEM;
1351 }
1352 
1353 
1354 static void brcmf_pcie_down(struct device *dev)
1355 {
1356 }
1357 
1358 
1359 static int brcmf_pcie_tx(struct device *dev, struct sk_buff *skb)
1360 {
1361 	return 0;
1362 }
1363 
1364 
1365 static int brcmf_pcie_tx_ctlpkt(struct device *dev, unsigned char *msg,
1366 				uint len)
1367 {
1368 	return 0;
1369 }
1370 
1371 
1372 static int brcmf_pcie_rx_ctlpkt(struct device *dev, unsigned char *msg,
1373 				uint len)
1374 {
1375 	return 0;
1376 }
1377 
1378 
1379 static void brcmf_pcie_wowl_config(struct device *dev, bool enabled)
1380 {
1381 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1382 	struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
1383 	struct brcmf_pciedev_info *devinfo = buspub->devinfo;
1384 
1385 	brcmf_dbg(PCIE, "Configuring WOWL, enabled=%d\n", enabled);
1386 	devinfo->wowl_enabled = enabled;
1387 }
1388 
1389 
1390 static size_t brcmf_pcie_get_ramsize(struct device *dev)
1391 {
1392 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1393 	struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
1394 	struct brcmf_pciedev_info *devinfo = buspub->devinfo;
1395 
1396 	return devinfo->ci->ramsize - devinfo->ci->srsize;
1397 }
1398 
1399 
1400 static int brcmf_pcie_get_memdump(struct device *dev, void *data, size_t len)
1401 {
1402 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1403 	struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
1404 	struct brcmf_pciedev_info *devinfo = buspub->devinfo;
1405 
1406 	brcmf_dbg(PCIE, "dump at 0x%08X: len=%zu\n", devinfo->ci->rambase, len);
1407 	brcmf_pcie_copy_dev_tomem(devinfo, devinfo->ci->rambase, data, len);
1408 	return 0;
1409 }
1410 
1411 static
1412 int brcmf_pcie_get_fwname(struct device *dev, const char *ext, u8 *fw_name)
1413 {
1414 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1415 	struct brcmf_fw_request *fwreq;
1416 	struct brcmf_fw_name fwnames[] = {
1417 		{ ext, fw_name },
1418 	};
1419 
1420 	fwreq = brcmf_fw_alloc_request(bus_if->chip, bus_if->chiprev,
1421 				       brcmf_pcie_fwnames,
1422 				       ARRAY_SIZE(brcmf_pcie_fwnames),
1423 				       fwnames, ARRAY_SIZE(fwnames));
1424 	if (!fwreq)
1425 		return -ENOMEM;
1426 
1427 	kfree(fwreq);
1428 	return 0;
1429 }
1430 
1431 static int brcmf_pcie_reset(struct device *dev)
1432 {
1433 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1434 	struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
1435 	struct brcmf_pciedev_info *devinfo = buspub->devinfo;
1436 	struct brcmf_fw_request *fwreq;
1437 	int err;
1438 
1439 	brcmf_pcie_bus_console_read(devinfo, true);
1440 
1441 	brcmf_detach(dev);
1442 
1443 	brcmf_pcie_release_irq(devinfo);
1444 	brcmf_pcie_release_scratchbuffers(devinfo);
1445 	brcmf_pcie_release_ringbuffers(devinfo);
1446 	brcmf_pcie_reset_device(devinfo);
1447 
1448 	fwreq = brcmf_pcie_prepare_fw_request(devinfo);
1449 	if (!fwreq) {
1450 		dev_err(dev, "Failed to prepare FW request\n");
1451 		return -ENOMEM;
1452 	}
1453 
1454 	err = brcmf_fw_get_firmwares(dev, fwreq, brcmf_pcie_setup);
1455 	if (err) {
1456 		dev_err(dev, "Failed to prepare FW request\n");
1457 		kfree(fwreq);
1458 	}
1459 
1460 	return err;
1461 }
1462 
1463 static const struct brcmf_bus_ops brcmf_pcie_bus_ops = {
1464 	.txdata = brcmf_pcie_tx,
1465 	.stop = brcmf_pcie_down,
1466 	.txctl = brcmf_pcie_tx_ctlpkt,
1467 	.rxctl = brcmf_pcie_rx_ctlpkt,
1468 	.wowl_config = brcmf_pcie_wowl_config,
1469 	.get_ramsize = brcmf_pcie_get_ramsize,
1470 	.get_memdump = brcmf_pcie_get_memdump,
1471 	.get_fwname = brcmf_pcie_get_fwname,
1472 	.reset = brcmf_pcie_reset,
1473 };
1474 
1475 
1476 static void
1477 brcmf_pcie_adjust_ramsize(struct brcmf_pciedev_info *devinfo, u8 *data,
1478 			  u32 data_len)
1479 {
1480 	__le32 *field;
1481 	u32 newsize;
1482 
1483 	if (data_len < BRCMF_RAMSIZE_OFFSET + 8)
1484 		return;
1485 
1486 	field = (__le32 *)&data[BRCMF_RAMSIZE_OFFSET];
1487 	if (le32_to_cpup(field) != BRCMF_RAMSIZE_MAGIC)
1488 		return;
1489 	field++;
1490 	newsize = le32_to_cpup(field);
1491 
1492 	brcmf_dbg(PCIE, "Found ramsize info in FW, adjusting to 0x%x\n",
1493 		  newsize);
1494 	devinfo->ci->ramsize = newsize;
1495 }
1496 
1497 
1498 static int
1499 brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo,
1500 			       u32 sharedram_addr)
1501 {
1502 	struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev);
1503 	struct brcmf_pcie_shared_info *shared;
1504 	u32 addr;
1505 
1506 	shared = &devinfo->shared;
1507 	shared->tcm_base_address = sharedram_addr;
1508 
1509 	shared->flags = brcmf_pcie_read_tcm32(devinfo, sharedram_addr);
1510 	shared->version = (u8)(shared->flags & BRCMF_PCIE_SHARED_VERSION_MASK);
1511 	brcmf_dbg(PCIE, "PCIe protocol version %d\n", shared->version);
1512 	if ((shared->version > BRCMF_PCIE_MAX_SHARED_VERSION) ||
1513 	    (shared->version < BRCMF_PCIE_MIN_SHARED_VERSION)) {
1514 		brcmf_err(bus, "Unsupported PCIE version %d\n",
1515 			  shared->version);
1516 		return -EINVAL;
1517 	}
1518 
1519 	/* check firmware support dma indicies */
1520 	if (shared->flags & BRCMF_PCIE_SHARED_DMA_INDEX) {
1521 		if (shared->flags & BRCMF_PCIE_SHARED_DMA_2B_IDX)
1522 			devinfo->dma_idx_sz = sizeof(u16);
1523 		else
1524 			devinfo->dma_idx_sz = sizeof(u32);
1525 	}
1526 
1527 	addr = sharedram_addr + BRCMF_SHARED_MAX_RXBUFPOST_OFFSET;
1528 	shared->max_rxbufpost = brcmf_pcie_read_tcm16(devinfo, addr);
1529 	if (shared->max_rxbufpost == 0)
1530 		shared->max_rxbufpost = BRCMF_DEF_MAX_RXBUFPOST;
1531 
1532 	addr = sharedram_addr + BRCMF_SHARED_RX_DATAOFFSET_OFFSET;
1533 	shared->rx_dataoffset = brcmf_pcie_read_tcm32(devinfo, addr);
1534 
1535 	addr = sharedram_addr + BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET;
1536 	shared->htod_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr);
1537 
1538 	addr = sharedram_addr + BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET;
1539 	shared->dtoh_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr);
1540 
1541 	addr = sharedram_addr + BRCMF_SHARED_RING_INFO_ADDR_OFFSET;
1542 	shared->ring_info_addr = brcmf_pcie_read_tcm32(devinfo, addr);
1543 
1544 	brcmf_dbg(PCIE, "max rx buf post %d, rx dataoffset %d\n",
1545 		  shared->max_rxbufpost, shared->rx_dataoffset);
1546 
1547 	brcmf_pcie_bus_console_init(devinfo);
1548 
1549 	return 0;
1550 }
1551 
1552 
1553 static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
1554 					const struct firmware *fw, void *nvram,
1555 					u32 nvram_len)
1556 {
1557 	struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev);
1558 	u32 sharedram_addr;
1559 	u32 sharedram_addr_written;
1560 	u32 loop_counter;
1561 	int err;
1562 	u32 address;
1563 	u32 resetintr;
1564 
1565 	brcmf_dbg(PCIE, "Halt ARM.\n");
1566 	err = brcmf_pcie_enter_download_state(devinfo);
1567 	if (err)
1568 		return err;
1569 
1570 	brcmf_dbg(PCIE, "Download FW %s\n", devinfo->fw_name);
1571 	brcmf_pcie_copy_mem_todev(devinfo, devinfo->ci->rambase,
1572 				  (void *)fw->data, fw->size);
1573 
1574 	resetintr = get_unaligned_le32(fw->data);
1575 	release_firmware(fw);
1576 
1577 	/* reset last 4 bytes of RAM address. to be used for shared
1578 	 * area. This identifies when FW is running
1579 	 */
1580 	brcmf_pcie_write_ram32(devinfo, devinfo->ci->ramsize - 4, 0);
1581 
1582 	if (nvram) {
1583 		brcmf_dbg(PCIE, "Download NVRAM %s\n", devinfo->nvram_name);
1584 		address = devinfo->ci->rambase + devinfo->ci->ramsize -
1585 			  nvram_len;
1586 		brcmf_pcie_copy_mem_todev(devinfo, address, nvram, nvram_len);
1587 		brcmf_fw_nvram_free(nvram);
1588 	} else {
1589 		brcmf_dbg(PCIE, "No matching NVRAM file found %s\n",
1590 			  devinfo->nvram_name);
1591 	}
1592 
1593 	sharedram_addr_written = brcmf_pcie_read_ram32(devinfo,
1594 						       devinfo->ci->ramsize -
1595 						       4);
1596 	brcmf_dbg(PCIE, "Bring ARM in running state\n");
1597 	err = brcmf_pcie_exit_download_state(devinfo, resetintr);
1598 	if (err)
1599 		return err;
1600 
1601 	brcmf_dbg(PCIE, "Wait for FW init\n");
1602 	sharedram_addr = sharedram_addr_written;
1603 	loop_counter = BRCMF_PCIE_FW_UP_TIMEOUT / 50;
1604 	while ((sharedram_addr == sharedram_addr_written) && (loop_counter)) {
1605 		msleep(50);
1606 		sharedram_addr = brcmf_pcie_read_ram32(devinfo,
1607 						       devinfo->ci->ramsize -
1608 						       4);
1609 		loop_counter--;
1610 	}
1611 	if (sharedram_addr == sharedram_addr_written) {
1612 		brcmf_err(bus, "FW failed to initialize\n");
1613 		return -ENODEV;
1614 	}
1615 	if (sharedram_addr < devinfo->ci->rambase ||
1616 	    sharedram_addr >= devinfo->ci->rambase + devinfo->ci->ramsize) {
1617 		brcmf_err(bus, "Invalid shared RAM address 0x%08x\n",
1618 			  sharedram_addr);
1619 		return -ENODEV;
1620 	}
1621 	brcmf_dbg(PCIE, "Shared RAM addr: 0x%08x\n", sharedram_addr);
1622 
1623 	return (brcmf_pcie_init_share_ram_info(devinfo, sharedram_addr));
1624 }
1625 
1626 
1627 static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo)
1628 {
1629 	struct pci_dev *pdev = devinfo->pdev;
1630 	struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
1631 	int err;
1632 	phys_addr_t  bar0_addr, bar1_addr;
1633 	ulong bar1_size;
1634 
1635 	err = pci_enable_device(pdev);
1636 	if (err) {
1637 		brcmf_err(bus, "pci_enable_device failed err=%d\n", err);
1638 		return err;
1639 	}
1640 
1641 	pci_set_master(pdev);
1642 
1643 	/* Bar-0 mapped address */
1644 	bar0_addr = pci_resource_start(pdev, 0);
1645 	/* Bar-1 mapped address */
1646 	bar1_addr = pci_resource_start(pdev, 2);
1647 	/* read Bar-1 mapped memory range */
1648 	bar1_size = pci_resource_len(pdev, 2);
1649 	if ((bar1_size == 0) || (bar1_addr == 0)) {
1650 		brcmf_err(bus, "BAR1 Not enabled, device size=%ld, addr=%#016llx\n",
1651 			  bar1_size, (unsigned long long)bar1_addr);
1652 		return -EINVAL;
1653 	}
1654 
1655 	devinfo->regs = ioremap_nocache(bar0_addr, BRCMF_PCIE_REG_MAP_SIZE);
1656 	devinfo->tcm = ioremap_nocache(bar1_addr, bar1_size);
1657 
1658 	if (!devinfo->regs || !devinfo->tcm) {
1659 		brcmf_err(bus, "ioremap() failed (%p,%p)\n", devinfo->regs,
1660 			  devinfo->tcm);
1661 		return -EINVAL;
1662 	}
1663 	brcmf_dbg(PCIE, "Phys addr : reg space = %p base addr %#016llx\n",
1664 		  devinfo->regs, (unsigned long long)bar0_addr);
1665 	brcmf_dbg(PCIE, "Phys addr : mem space = %p base addr %#016llx size 0x%x\n",
1666 		  devinfo->tcm, (unsigned long long)bar1_addr,
1667 		  (unsigned int)bar1_size);
1668 
1669 	return 0;
1670 }
1671 
1672 
1673 static void brcmf_pcie_release_resource(struct brcmf_pciedev_info *devinfo)
1674 {
1675 	if (devinfo->tcm)
1676 		iounmap(devinfo->tcm);
1677 	if (devinfo->regs)
1678 		iounmap(devinfo->regs);
1679 
1680 	pci_disable_device(devinfo->pdev);
1681 }
1682 
1683 
1684 static u32 brcmf_pcie_buscore_prep_addr(const struct pci_dev *pdev, u32 addr)
1685 {
1686 	u32 ret_addr;
1687 
1688 	ret_addr = addr & (BRCMF_PCIE_BAR0_REG_SIZE - 1);
1689 	addr &= ~(BRCMF_PCIE_BAR0_REG_SIZE - 1);
1690 	pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, addr);
1691 
1692 	return ret_addr;
1693 }
1694 
1695 
1696 static u32 brcmf_pcie_buscore_read32(void *ctx, u32 addr)
1697 {
1698 	struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1699 
1700 	addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr);
1701 	return brcmf_pcie_read_reg32(devinfo, addr);
1702 }
1703 
1704 
1705 static void brcmf_pcie_buscore_write32(void *ctx, u32 addr, u32 value)
1706 {
1707 	struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1708 
1709 	addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr);
1710 	brcmf_pcie_write_reg32(devinfo, addr, value);
1711 }
1712 
1713 
1714 static int brcmf_pcie_buscoreprep(void *ctx)
1715 {
1716 	return brcmf_pcie_get_resource(ctx);
1717 }
1718 
1719 
1720 static int brcmf_pcie_buscore_reset(void *ctx, struct brcmf_chip *chip)
1721 {
1722 	struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1723 	u32 val;
1724 
1725 	devinfo->ci = chip;
1726 	brcmf_pcie_reset_device(devinfo);
1727 
1728 	val = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
1729 	if (val != 0xffffffff)
1730 		brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
1731 				       val);
1732 
1733 	return 0;
1734 }
1735 
1736 
1737 static void brcmf_pcie_buscore_activate(void *ctx, struct brcmf_chip *chip,
1738 					u32 rstvec)
1739 {
1740 	struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1741 
1742 	brcmf_pcie_write_tcm32(devinfo, 0, rstvec);
1743 }
1744 
1745 
1746 static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
1747 	.prepare = brcmf_pcie_buscoreprep,
1748 	.reset = brcmf_pcie_buscore_reset,
1749 	.activate = brcmf_pcie_buscore_activate,
1750 	.read32 = brcmf_pcie_buscore_read32,
1751 	.write32 = brcmf_pcie_buscore_write32,
1752 };
1753 
1754 #define BRCMF_PCIE_FW_CODE	0
1755 #define BRCMF_PCIE_FW_NVRAM	1
1756 
1757 static void brcmf_pcie_setup(struct device *dev, int ret,
1758 			     struct brcmf_fw_request *fwreq)
1759 {
1760 	const struct firmware *fw;
1761 	void *nvram;
1762 	struct brcmf_bus *bus;
1763 	struct brcmf_pciedev *pcie_bus_dev;
1764 	struct brcmf_pciedev_info *devinfo;
1765 	struct brcmf_commonring **flowrings;
1766 	u32 i, nvram_len;
1767 
1768 	/* check firmware loading result */
1769 	if (ret)
1770 		goto fail;
1771 
1772 	bus = dev_get_drvdata(dev);
1773 	pcie_bus_dev = bus->bus_priv.pcie;
1774 	devinfo = pcie_bus_dev->devinfo;
1775 	brcmf_pcie_attach(devinfo);
1776 
1777 	fw = fwreq->items[BRCMF_PCIE_FW_CODE].binary;
1778 	nvram = fwreq->items[BRCMF_PCIE_FW_NVRAM].nv_data.data;
1779 	nvram_len = fwreq->items[BRCMF_PCIE_FW_NVRAM].nv_data.len;
1780 	kfree(fwreq);
1781 
1782 	/* Some of the firmwares have the size of the memory of the device
1783 	 * defined inside the firmware. This is because part of the memory in
1784 	 * the device is shared and the devision is determined by FW. Parse
1785 	 * the firmware and adjust the chip memory size now.
1786 	 */
1787 	brcmf_pcie_adjust_ramsize(devinfo, (u8 *)fw->data, fw->size);
1788 
1789 	ret = brcmf_pcie_download_fw_nvram(devinfo, fw, nvram, nvram_len);
1790 	if (ret)
1791 		goto fail;
1792 
1793 	devinfo->state = BRCMFMAC_PCIE_STATE_UP;
1794 
1795 	ret = brcmf_pcie_init_ringbuffers(devinfo);
1796 	if (ret)
1797 		goto fail;
1798 
1799 	ret = brcmf_pcie_init_scratchbuffers(devinfo);
1800 	if (ret)
1801 		goto fail;
1802 
1803 	brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
1804 	ret = brcmf_pcie_request_irq(devinfo);
1805 	if (ret)
1806 		goto fail;
1807 
1808 	/* hook the commonrings in the bus structure. */
1809 	for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++)
1810 		bus->msgbuf->commonrings[i] =
1811 				&devinfo->shared.commonrings[i]->commonring;
1812 
1813 	flowrings = kcalloc(devinfo->shared.max_flowrings, sizeof(*flowrings),
1814 			    GFP_KERNEL);
1815 	if (!flowrings)
1816 		goto fail;
1817 
1818 	for (i = 0; i < devinfo->shared.max_flowrings; i++)
1819 		flowrings[i] = &devinfo->shared.flowrings[i].commonring;
1820 	bus->msgbuf->flowrings = flowrings;
1821 
1822 	bus->msgbuf->rx_dataoffset = devinfo->shared.rx_dataoffset;
1823 	bus->msgbuf->max_rxbufpost = devinfo->shared.max_rxbufpost;
1824 	bus->msgbuf->max_flowrings = devinfo->shared.max_flowrings;
1825 
1826 	init_waitqueue_head(&devinfo->mbdata_resp_wait);
1827 
1828 	brcmf_pcie_intr_enable(devinfo);
1829 	brcmf_pcie_hostready(devinfo);
1830 	if (brcmf_attach(&devinfo->pdev->dev, devinfo->settings) == 0)
1831 		return;
1832 
1833 	brcmf_pcie_bus_console_read(devinfo, false);
1834 
1835 fail:
1836 	device_release_driver(dev);
1837 }
1838 
1839 static struct brcmf_fw_request *
1840 brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo)
1841 {
1842 	struct brcmf_fw_request *fwreq;
1843 	struct brcmf_fw_name fwnames[] = {
1844 		{ ".bin", devinfo->fw_name },
1845 		{ ".txt", devinfo->nvram_name },
1846 	};
1847 
1848 	fwreq = brcmf_fw_alloc_request(devinfo->ci->chip, devinfo->ci->chiprev,
1849 				       brcmf_pcie_fwnames,
1850 				       ARRAY_SIZE(brcmf_pcie_fwnames),
1851 				       fwnames, ARRAY_SIZE(fwnames));
1852 	if (!fwreq)
1853 		return NULL;
1854 
1855 	fwreq->items[BRCMF_PCIE_FW_CODE].type = BRCMF_FW_TYPE_BINARY;
1856 	fwreq->items[BRCMF_PCIE_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM;
1857 	fwreq->items[BRCMF_PCIE_FW_NVRAM].flags = BRCMF_FW_REQF_OPTIONAL;
1858 	fwreq->board_type = devinfo->settings->board_type;
1859 	/* NVRAM reserves PCI domain 0 for Broadcom's SDK faked bus */
1860 	fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus) + 1;
1861 	fwreq->bus_nr = devinfo->pdev->bus->number;
1862 
1863 	return fwreq;
1864 }
1865 
1866 static int
1867 brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1868 {
1869 	int ret;
1870 	struct brcmf_fw_request *fwreq;
1871 	struct brcmf_pciedev_info *devinfo;
1872 	struct brcmf_pciedev *pcie_bus_dev;
1873 	struct brcmf_bus *bus;
1874 
1875 	brcmf_dbg(PCIE, "Enter %x:%x\n", pdev->vendor, pdev->device);
1876 
1877 	ret = -ENOMEM;
1878 	devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL);
1879 	if (devinfo == NULL)
1880 		return ret;
1881 
1882 	devinfo->pdev = pdev;
1883 	pcie_bus_dev = NULL;
1884 	devinfo->ci = brcmf_chip_attach(devinfo, &brcmf_pcie_buscore_ops);
1885 	if (IS_ERR(devinfo->ci)) {
1886 		ret = PTR_ERR(devinfo->ci);
1887 		devinfo->ci = NULL;
1888 		goto fail;
1889 	}
1890 
1891 	pcie_bus_dev = kzalloc(sizeof(*pcie_bus_dev), GFP_KERNEL);
1892 	if (pcie_bus_dev == NULL) {
1893 		ret = -ENOMEM;
1894 		goto fail;
1895 	}
1896 
1897 	devinfo->settings = brcmf_get_module_param(&devinfo->pdev->dev,
1898 						   BRCMF_BUSTYPE_PCIE,
1899 						   devinfo->ci->chip,
1900 						   devinfo->ci->chiprev);
1901 	if (!devinfo->settings) {
1902 		ret = -ENOMEM;
1903 		goto fail;
1904 	}
1905 
1906 	bus = kzalloc(sizeof(*bus), GFP_KERNEL);
1907 	if (!bus) {
1908 		ret = -ENOMEM;
1909 		goto fail;
1910 	}
1911 	bus->msgbuf = kzalloc(sizeof(*bus->msgbuf), GFP_KERNEL);
1912 	if (!bus->msgbuf) {
1913 		ret = -ENOMEM;
1914 		kfree(bus);
1915 		goto fail;
1916 	}
1917 
1918 	/* hook it all together. */
1919 	pcie_bus_dev->devinfo = devinfo;
1920 	pcie_bus_dev->bus = bus;
1921 	bus->dev = &pdev->dev;
1922 	bus->bus_priv.pcie = pcie_bus_dev;
1923 	bus->ops = &brcmf_pcie_bus_ops;
1924 	bus->proto_type = BRCMF_PROTO_MSGBUF;
1925 	bus->chip = devinfo->coreid;
1926 	bus->wowl_supported = pci_pme_capable(pdev, PCI_D3hot);
1927 	dev_set_drvdata(&pdev->dev, bus);
1928 
1929 	fwreq = brcmf_pcie_prepare_fw_request(devinfo);
1930 	if (!fwreq) {
1931 		ret = -ENOMEM;
1932 		goto fail_bus;
1933 	}
1934 
1935 	ret = brcmf_fw_get_firmwares(bus->dev, fwreq, brcmf_pcie_setup);
1936 	if (ret < 0) {
1937 		kfree(fwreq);
1938 		goto fail_bus;
1939 	}
1940 	return 0;
1941 
1942 fail_bus:
1943 	kfree(bus->msgbuf);
1944 	kfree(bus);
1945 fail:
1946 	brcmf_err(NULL, "failed %x:%x\n", pdev->vendor, pdev->device);
1947 	brcmf_pcie_release_resource(devinfo);
1948 	if (devinfo->ci)
1949 		brcmf_chip_detach(devinfo->ci);
1950 	if (devinfo->settings)
1951 		brcmf_release_module_param(devinfo->settings);
1952 	kfree(pcie_bus_dev);
1953 	kfree(devinfo);
1954 	return ret;
1955 }
1956 
1957 
1958 static void
1959 brcmf_pcie_remove(struct pci_dev *pdev)
1960 {
1961 	struct brcmf_pciedev_info *devinfo;
1962 	struct brcmf_bus *bus;
1963 
1964 	brcmf_dbg(PCIE, "Enter\n");
1965 
1966 	bus = dev_get_drvdata(&pdev->dev);
1967 	if (bus == NULL)
1968 		return;
1969 
1970 	devinfo = bus->bus_priv.pcie->devinfo;
1971 
1972 	devinfo->state = BRCMFMAC_PCIE_STATE_DOWN;
1973 	if (devinfo->ci)
1974 		brcmf_pcie_intr_disable(devinfo);
1975 
1976 	brcmf_detach(&pdev->dev);
1977 
1978 	kfree(bus->bus_priv.pcie);
1979 	kfree(bus->msgbuf->flowrings);
1980 	kfree(bus->msgbuf);
1981 	kfree(bus);
1982 
1983 	brcmf_pcie_release_irq(devinfo);
1984 	brcmf_pcie_release_scratchbuffers(devinfo);
1985 	brcmf_pcie_release_ringbuffers(devinfo);
1986 	brcmf_pcie_reset_device(devinfo);
1987 	brcmf_pcie_release_resource(devinfo);
1988 
1989 	if (devinfo->ci)
1990 		brcmf_chip_detach(devinfo->ci);
1991 	if (devinfo->settings)
1992 		brcmf_release_module_param(devinfo->settings);
1993 
1994 	kfree(devinfo);
1995 	dev_set_drvdata(&pdev->dev, NULL);
1996 }
1997 
1998 
1999 #ifdef CONFIG_PM
2000 
2001 
2002 static int brcmf_pcie_pm_enter_D3(struct device *dev)
2003 {
2004 	struct brcmf_pciedev_info *devinfo;
2005 	struct brcmf_bus *bus;
2006 
2007 	brcmf_dbg(PCIE, "Enter\n");
2008 
2009 	bus = dev_get_drvdata(dev);
2010 	devinfo = bus->bus_priv.pcie->devinfo;
2011 
2012 	brcmf_bus_change_state(bus, BRCMF_BUS_DOWN);
2013 
2014 	devinfo->mbdata_completed = false;
2015 	brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D3_INFORM);
2016 
2017 	wait_event_timeout(devinfo->mbdata_resp_wait, devinfo->mbdata_completed,
2018 			   BRCMF_PCIE_MBDATA_TIMEOUT);
2019 	if (!devinfo->mbdata_completed) {
2020 		brcmf_err(bus, "Timeout on response for entering D3 substate\n");
2021 		brcmf_bus_change_state(bus, BRCMF_BUS_UP);
2022 		return -EIO;
2023 	}
2024 
2025 	devinfo->state = BRCMFMAC_PCIE_STATE_DOWN;
2026 
2027 	return 0;
2028 }
2029 
2030 
2031 static int brcmf_pcie_pm_leave_D3(struct device *dev)
2032 {
2033 	struct brcmf_pciedev_info *devinfo;
2034 	struct brcmf_bus *bus;
2035 	struct pci_dev *pdev;
2036 	int err;
2037 
2038 	brcmf_dbg(PCIE, "Enter\n");
2039 
2040 	bus = dev_get_drvdata(dev);
2041 	devinfo = bus->bus_priv.pcie->devinfo;
2042 	brcmf_dbg(PCIE, "Enter, dev=%p, bus=%p\n", dev, bus);
2043 
2044 	/* Check if device is still up and running, if so we are ready */
2045 	if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_INTMASK) != 0) {
2046 		brcmf_dbg(PCIE, "Try to wakeup device....\n");
2047 		if (brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D0_INFORM))
2048 			goto cleanup;
2049 		brcmf_dbg(PCIE, "Hot resume, continue....\n");
2050 		devinfo->state = BRCMFMAC_PCIE_STATE_UP;
2051 		brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
2052 		brcmf_bus_change_state(bus, BRCMF_BUS_UP);
2053 		brcmf_pcie_intr_enable(devinfo);
2054 		brcmf_pcie_hostready(devinfo);
2055 		return 0;
2056 	}
2057 
2058 cleanup:
2059 	brcmf_chip_detach(devinfo->ci);
2060 	devinfo->ci = NULL;
2061 	pdev = devinfo->pdev;
2062 	brcmf_pcie_remove(pdev);
2063 
2064 	err = brcmf_pcie_probe(pdev, NULL);
2065 	if (err)
2066 		brcmf_err(bus, "probe after resume failed, err=%d\n", err);
2067 
2068 	return err;
2069 }
2070 
2071 
2072 static const struct dev_pm_ops brcmf_pciedrvr_pm = {
2073 	.suspend = brcmf_pcie_pm_enter_D3,
2074 	.resume = brcmf_pcie_pm_leave_D3,
2075 	.freeze = brcmf_pcie_pm_enter_D3,
2076 	.restore = brcmf_pcie_pm_leave_D3,
2077 };
2078 
2079 
2080 #endif /* CONFIG_PM */
2081 
2082 
2083 #define BRCMF_PCIE_DEVICE(dev_id)	{ BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\
2084 	PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
2085 #define BRCMF_PCIE_DEVICE_SUB(dev_id, subvend, subdev)	{ \
2086 	BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\
2087 	subvend, subdev, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
2088 
2089 static const struct pci_device_id brcmf_pcie_devid_table[] = {
2090 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID),
2091 	BRCMF_PCIE_DEVICE_SUB(0x4355, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4355),
2092 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4354_RAW_DEVICE_ID),
2093 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
2094 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
2095 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
2096 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4358_DEVICE_ID),
2097 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4359_DEVICE_ID),
2098 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID),
2099 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID),
2100 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID),
2101 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID),
2102 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_DEVICE_ID),
2103 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_2G_DEVICE_ID),
2104 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_5G_DEVICE_ID),
2105 	BRCMF_PCIE_DEVICE_SUB(0x4365, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4365),
2106 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_DEVICE_ID),
2107 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_2G_DEVICE_ID),
2108 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_5G_DEVICE_ID),
2109 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4371_DEVICE_ID),
2110 	{ /* end: all zeroes */ }
2111 };
2112 
2113 
2114 MODULE_DEVICE_TABLE(pci, brcmf_pcie_devid_table);
2115 
2116 
2117 static struct pci_driver brcmf_pciedrvr = {
2118 	.node = {},
2119 	.name = KBUILD_MODNAME,
2120 	.id_table = brcmf_pcie_devid_table,
2121 	.probe = brcmf_pcie_probe,
2122 	.remove = brcmf_pcie_remove,
2123 #ifdef CONFIG_PM
2124 	.driver.pm = &brcmf_pciedrvr_pm,
2125 #endif
2126 	.driver.coredump = brcmf_dev_coredump,
2127 };
2128 
2129 
2130 void brcmf_pcie_register(void)
2131 {
2132 	int err;
2133 
2134 	brcmf_dbg(PCIE, "Enter\n");
2135 	err = pci_register_driver(&brcmf_pciedrvr);
2136 	if (err)
2137 		brcmf_err(NULL, "PCIE driver registration failed, err=%d\n",
2138 			  err);
2139 }
2140 
2141 
2142 void brcmf_pcie_exit(void)
2143 {
2144 	brcmf_dbg(PCIE, "Enter\n");
2145 	pci_unregister_driver(&brcmf_pciedrvr);
2146 }
2147