1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (c) 2010 Broadcom Corporation
4  */
5 /* ****************** SDIO CARD Interface Functions **************************/
6 
7 #include <linux/types.h>
8 #include <linux/netdevice.h>
9 #include <linux/pci.h>
10 #include <linux/pci_ids.h>
11 #include <linux/sched.h>
12 #include <linux/completion.h>
13 #include <linux/interrupt.h>
14 #include <linux/scatterlist.h>
15 #include <linux/mmc/sdio.h>
16 #include <linux/mmc/core.h>
17 #include <linux/mmc/sdio_func.h>
18 #include <linux/mmc/card.h>
19 #include <linux/mmc/host.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/suspend.h>
22 #include <linux/errno.h>
23 #include <linux/module.h>
24 #include <linux/acpi.h>
25 #include <net/cfg80211.h>
26 
27 #include <defs.h>
28 #include <brcm_hw_ids.h>
29 #include <brcmu_utils.h>
30 #include <brcmu_wifi.h>
31 #include <chipcommon.h>
32 #include <soc.h>
33 #include "chip.h"
34 #include "bus.h"
35 #include "debug.h"
36 #include "sdio.h"
37 #include "core.h"
38 #include "common.h"
39 
40 #define SDIOH_API_ACCESS_RETRY_LIMIT	2
41 
42 #define DMA_ALIGN_MASK	0x03
43 
44 #define SDIO_FUNC1_BLOCKSIZE		64
45 #define SDIO_FUNC2_BLOCKSIZE		512
46 #define SDIO_4373_FUNC2_BLOCKSIZE	256
47 #define SDIO_435X_FUNC2_BLOCKSIZE	256
48 #define SDIO_4329_FUNC2_BLOCKSIZE	128
49 /* Maximum milliseconds to wait for F2 to come up */
50 #define SDIO_WAIT_F2RDY	3000
51 
52 #define BRCMF_DEFAULT_RXGLOM_SIZE	32  /* max rx frames in glom chain */
53 
54 struct brcmf_sdiod_freezer {
55 	atomic_t freezing;
56 	atomic_t thread_count;
57 	u32 frozen_count;
58 	wait_queue_head_t thread_freeze;
59 	struct completion resumed;
60 };
61 
62 static irqreturn_t brcmf_sdiod_oob_irqhandler(int irq, void *dev_id)
63 {
64 	struct brcmf_bus *bus_if = dev_get_drvdata(dev_id);
65 	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
66 
67 	brcmf_dbg(INTR, "OOB intr triggered\n");
68 
69 	/* out-of-band interrupt is level-triggered which won't
70 	 * be cleared until dpc
71 	 */
72 	if (sdiodev->irq_en) {
73 		disable_irq_nosync(irq);
74 		sdiodev->irq_en = false;
75 	}
76 
77 	brcmf_sdio_isr(sdiodev->bus, true);
78 
79 	return IRQ_HANDLED;
80 }
81 
82 static void brcmf_sdiod_ib_irqhandler(struct sdio_func *func)
83 {
84 	struct brcmf_bus *bus_if = dev_get_drvdata(&func->dev);
85 	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
86 
87 	brcmf_dbg(INTR, "IB intr triggered\n");
88 
89 	brcmf_sdio_isr(sdiodev->bus, false);
90 }
91 
92 /* dummy handler for SDIO function 2 interrupt */
93 static void brcmf_sdiod_dummy_irqhandler(struct sdio_func *func)
94 {
95 }
96 
97 int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
98 {
99 	struct brcmfmac_sdio_pd *pdata;
100 	int ret = 0;
101 	u8 data;
102 	u32 addr, gpiocontrol;
103 
104 	pdata = &sdiodev->settings->bus.sdio;
105 	if (pdata->oob_irq_supported) {
106 		brcmf_dbg(SDIO, "Enter, register OOB IRQ %d\n",
107 			  pdata->oob_irq_nr);
108 		spin_lock_init(&sdiodev->irq_en_lock);
109 		sdiodev->irq_en = true;
110 
111 		ret = request_irq(pdata->oob_irq_nr, brcmf_sdiod_oob_irqhandler,
112 				  pdata->oob_irq_flags, "brcmf_oob_intr",
113 				  &sdiodev->func1->dev);
114 		if (ret != 0) {
115 			brcmf_err("request_irq failed %d\n", ret);
116 			return ret;
117 		}
118 		sdiodev->oob_irq_requested = true;
119 
120 		ret = enable_irq_wake(pdata->oob_irq_nr);
121 		if (ret != 0) {
122 			brcmf_err("enable_irq_wake failed %d\n", ret);
123 			return ret;
124 		}
125 		disable_irq_wake(pdata->oob_irq_nr);
126 
127 		sdio_claim_host(sdiodev->func1);
128 
129 		if (sdiodev->bus_if->chip == BRCM_CC_43362_CHIP_ID) {
130 			/* assign GPIO to SDIO core */
131 			addr = CORE_CC_REG(SI_ENUM_BASE, gpiocontrol);
132 			gpiocontrol = brcmf_sdiod_readl(sdiodev, addr, &ret);
133 			gpiocontrol |= 0x2;
134 			brcmf_sdiod_writel(sdiodev, addr, gpiocontrol, &ret);
135 
136 			brcmf_sdiod_writeb(sdiodev, SBSDIO_GPIO_SELECT,
137 					   0xf, &ret);
138 			brcmf_sdiod_writeb(sdiodev, SBSDIO_GPIO_OUT, 0, &ret);
139 			brcmf_sdiod_writeb(sdiodev, SBSDIO_GPIO_EN, 0x2, &ret);
140 		}
141 
142 		/* must configure SDIO_CCCR_IENx to enable irq */
143 		data = brcmf_sdiod_func0_rb(sdiodev, SDIO_CCCR_IENx, &ret);
144 		data |= SDIO_CCCR_IEN_FUNC1 | SDIO_CCCR_IEN_FUNC2 |
145 			SDIO_CCCR_IEN_FUNC0;
146 		brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_IENx, data, &ret);
147 
148 		/* redirect, configure and enable io for interrupt signal */
149 		data = SDIO_CCCR_BRCM_SEPINT_MASK | SDIO_CCCR_BRCM_SEPINT_OE;
150 		if (pdata->oob_irq_flags & IRQF_TRIGGER_HIGH)
151 			data |= SDIO_CCCR_BRCM_SEPINT_ACT_HI;
152 		brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_BRCM_SEPINT,
153 				     data, &ret);
154 		sdio_release_host(sdiodev->func1);
155 	} else {
156 		brcmf_dbg(SDIO, "Entering\n");
157 		sdio_claim_host(sdiodev->func1);
158 		sdio_claim_irq(sdiodev->func1, brcmf_sdiod_ib_irqhandler);
159 		sdio_claim_irq(sdiodev->func2, brcmf_sdiod_dummy_irqhandler);
160 		sdio_release_host(sdiodev->func1);
161 		sdiodev->sd_irq_requested = true;
162 	}
163 
164 	return 0;
165 }
166 
167 void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
168 {
169 
170 	brcmf_dbg(SDIO, "Entering oob=%d sd=%d\n",
171 		  sdiodev->oob_irq_requested,
172 		  sdiodev->sd_irq_requested);
173 
174 	if (sdiodev->oob_irq_requested) {
175 		struct brcmfmac_sdio_pd *pdata;
176 
177 		pdata = &sdiodev->settings->bus.sdio;
178 		sdio_claim_host(sdiodev->func1);
179 		brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
180 		brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
181 		sdio_release_host(sdiodev->func1);
182 
183 		sdiodev->oob_irq_requested = false;
184 		free_irq(pdata->oob_irq_nr, &sdiodev->func1->dev);
185 		sdiodev->irq_en = false;
186 		sdiodev->oob_irq_requested = false;
187 	}
188 
189 	if (sdiodev->sd_irq_requested) {
190 		sdio_claim_host(sdiodev->func1);
191 		sdio_release_irq(sdiodev->func2);
192 		sdio_release_irq(sdiodev->func1);
193 		sdio_release_host(sdiodev->func1);
194 		sdiodev->sd_irq_requested = false;
195 	}
196 }
197 
198 void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
199 			      enum brcmf_sdiod_state state)
200 {
201 	if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM ||
202 	    state == sdiodev->state)
203 		return;
204 
205 	brcmf_dbg(TRACE, "%d -> %d\n", sdiodev->state, state);
206 	switch (sdiodev->state) {
207 	case BRCMF_SDIOD_DATA:
208 		/* any other state means bus interface is down */
209 		brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN);
210 		break;
211 	case BRCMF_SDIOD_DOWN:
212 		/* transition from DOWN to DATA means bus interface is up */
213 		if (state == BRCMF_SDIOD_DATA)
214 			brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_UP);
215 		break;
216 	default:
217 		break;
218 	}
219 	sdiodev->state = state;
220 }
221 
222 static int brcmf_sdiod_set_backplane_window(struct brcmf_sdio_dev *sdiodev,
223 					    u32 addr)
224 {
225 	u32 v, bar0 = addr & SBSDIO_SBWINDOW_MASK;
226 	int err = 0, i;
227 
228 	if (bar0 == sdiodev->sbwad)
229 		return 0;
230 
231 	v = bar0 >> 8;
232 
233 	for (i = 0 ; i < 3 && !err ; i++, v >>= 8)
234 		brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_SBADDRLOW + i,
235 				   v & 0xff, &err);
236 
237 	if (!err)
238 		sdiodev->sbwad = bar0;
239 
240 	return err;
241 }
242 
243 u32 brcmf_sdiod_readl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
244 {
245 	u32 data = 0;
246 	int retval;
247 
248 	retval = brcmf_sdiod_set_backplane_window(sdiodev, addr);
249 	if (retval)
250 		goto out;
251 
252 	addr &= SBSDIO_SB_OFT_ADDR_MASK;
253 	addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
254 
255 	data = sdio_readl(sdiodev->func1, addr, &retval);
256 
257 out:
258 	if (ret)
259 		*ret = retval;
260 
261 	return data;
262 }
263 
264 void brcmf_sdiod_writel(struct brcmf_sdio_dev *sdiodev, u32 addr,
265 			u32 data, int *ret)
266 {
267 	int retval;
268 
269 	retval = brcmf_sdiod_set_backplane_window(sdiodev, addr);
270 	if (retval)
271 		goto out;
272 
273 	addr &= SBSDIO_SB_OFT_ADDR_MASK;
274 	addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
275 
276 	sdio_writel(sdiodev->func1, data, addr, &retval);
277 
278 out:
279 	if (ret)
280 		*ret = retval;
281 }
282 
283 static int brcmf_sdiod_skbuff_read(struct brcmf_sdio_dev *sdiodev,
284 				   struct sdio_func *func, u32 addr,
285 				   struct sk_buff *skb)
286 {
287 	unsigned int req_sz;
288 	int err;
289 
290 	/* Single skb use the standard mmc interface */
291 	req_sz = skb->len + 3;
292 	req_sz &= (uint)~3;
293 
294 	switch (func->num) {
295 	case 1:
296 		err = sdio_memcpy_fromio(func, ((u8 *)(skb->data)), addr,
297 					 req_sz);
298 		break;
299 	case 2:
300 		err = sdio_readsb(func, ((u8 *)(skb->data)), addr, req_sz);
301 		break;
302 	default:
303 		/* bail out as things are really fishy here */
304 		WARN(1, "invalid sdio function number: %d\n", func->num);
305 		err = -ENOMEDIUM;
306 	}
307 
308 	if (err == -ENOMEDIUM)
309 		brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
310 
311 	return err;
312 }
313 
314 static int brcmf_sdiod_skbuff_write(struct brcmf_sdio_dev *sdiodev,
315 				    struct sdio_func *func, u32 addr,
316 				    struct sk_buff *skb)
317 {
318 	unsigned int req_sz;
319 	int err;
320 
321 	/* Single skb use the standard mmc interface */
322 	req_sz = skb->len + 3;
323 	req_sz &= (uint)~3;
324 
325 	err = sdio_memcpy_toio(func, addr, ((u8 *)(skb->data)), req_sz);
326 
327 	if (err == -ENOMEDIUM)
328 		brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
329 
330 	return err;
331 }
332 
333 static int mmc_submit_one(struct mmc_data *md, struct mmc_request *mr,
334 			  struct mmc_command *mc, int sg_cnt, int req_sz,
335 			  int func_blk_sz, u32 *addr,
336 			  struct brcmf_sdio_dev *sdiodev,
337 			  struct sdio_func *func, int write)
338 {
339 	int ret;
340 
341 	md->sg_len = sg_cnt;
342 	md->blocks = req_sz / func_blk_sz;
343 	mc->arg |= (*addr & 0x1FFFF) << 9;	/* address */
344 	mc->arg |= md->blocks & 0x1FF;	/* block count */
345 	/* incrementing addr for function 1 */
346 	if (func->num == 1)
347 		*addr += req_sz;
348 
349 	mmc_set_data_timeout(md, func->card);
350 	mmc_wait_for_req(func->card->host, mr);
351 
352 	ret = mc->error ? mc->error : md->error;
353 	if (ret == -ENOMEDIUM) {
354 		brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
355 	} else if (ret != 0) {
356 		brcmf_err("CMD53 sg block %s failed %d\n",
357 			  write ? "write" : "read", ret);
358 		ret = -EIO;
359 	}
360 
361 	return ret;
362 }
363 
364 /**
365  * brcmf_sdiod_sglist_rw - SDIO interface function for block data access
366  * @sdiodev: brcmfmac sdio device
367  * @func: SDIO function
368  * @write: direction flag
369  * @addr: dongle memory address as source/destination
370  * @pkt: skb pointer
371  *
372  * This function takes the respbonsibility as the interface function to MMC
373  * stack for block data access. It assumes that the skb passed down by the
374  * caller has already been padded and aligned.
375  */
376 static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
377 				 struct sdio_func *func,
378 				 bool write, u32 addr,
379 				 struct sk_buff_head *pktlist)
380 {
381 	unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
382 	unsigned int max_req_sz, src_offset, dst_offset;
383 	unsigned char *pkt_data, *orig_data, *dst_data;
384 	struct sk_buff_head local_list, *target_list;
385 	struct sk_buff *pkt_next = NULL, *src;
386 	unsigned short max_seg_cnt;
387 	struct mmc_request mmc_req;
388 	struct mmc_command mmc_cmd;
389 	struct mmc_data mmc_dat;
390 	struct scatterlist *sgl;
391 	int ret = 0;
392 
393 	if (!pktlist->qlen)
394 		return -EINVAL;
395 
396 	target_list = pktlist;
397 	/* for host with broken sg support, prepare a page aligned list */
398 	__skb_queue_head_init(&local_list);
399 	if (!write && sdiodev->settings->bus.sdio.broken_sg_support) {
400 		req_sz = 0;
401 		skb_queue_walk(pktlist, pkt_next)
402 			req_sz += pkt_next->len;
403 		req_sz = ALIGN(req_sz, func->cur_blksize);
404 		while (req_sz > PAGE_SIZE) {
405 			pkt_next = brcmu_pkt_buf_get_skb(PAGE_SIZE);
406 			if (pkt_next == NULL) {
407 				ret = -ENOMEM;
408 				goto exit;
409 			}
410 			__skb_queue_tail(&local_list, pkt_next);
411 			req_sz -= PAGE_SIZE;
412 		}
413 		pkt_next = brcmu_pkt_buf_get_skb(req_sz);
414 		if (pkt_next == NULL) {
415 			ret = -ENOMEM;
416 			goto exit;
417 		}
418 		__skb_queue_tail(&local_list, pkt_next);
419 		target_list = &local_list;
420 	}
421 
422 	func_blk_sz = func->cur_blksize;
423 	max_req_sz = sdiodev->max_request_size;
424 	max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count,
425 			    target_list->qlen);
426 
427 	memset(&mmc_req, 0, sizeof(struct mmc_request));
428 	memset(&mmc_cmd, 0, sizeof(struct mmc_command));
429 	memset(&mmc_dat, 0, sizeof(struct mmc_data));
430 
431 	mmc_dat.sg = sdiodev->sgtable.sgl;
432 	mmc_dat.blksz = func_blk_sz;
433 	mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
434 	mmc_cmd.opcode = SD_IO_RW_EXTENDED;
435 	mmc_cmd.arg = write ? 1<<31 : 0;	/* write flag  */
436 	mmc_cmd.arg |= (func->num & 0x7) << 28;	/* SDIO func num */
437 	mmc_cmd.arg |= 1 << 27;			/* block mode */
438 	/* for function 1 the addr will be incremented */
439 	mmc_cmd.arg |= (func->num == 1) ? 1 << 26 : 0;
440 	mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
441 	mmc_req.cmd = &mmc_cmd;
442 	mmc_req.data = &mmc_dat;
443 
444 	req_sz = 0;
445 	sg_cnt = 0;
446 	sgl = sdiodev->sgtable.sgl;
447 	skb_queue_walk(target_list, pkt_next) {
448 		pkt_offset = 0;
449 		while (pkt_offset < pkt_next->len) {
450 			pkt_data = pkt_next->data + pkt_offset;
451 			sg_data_sz = pkt_next->len - pkt_offset;
452 			if (sg_data_sz > sdiodev->max_segment_size)
453 				sg_data_sz = sdiodev->max_segment_size;
454 			if (sg_data_sz > max_req_sz - req_sz)
455 				sg_data_sz = max_req_sz - req_sz;
456 
457 			sg_set_buf(sgl, pkt_data, sg_data_sz);
458 			sg_cnt++;
459 
460 			sgl = sg_next(sgl);
461 			req_sz += sg_data_sz;
462 			pkt_offset += sg_data_sz;
463 			if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt) {
464 				ret = mmc_submit_one(&mmc_dat, &mmc_req, &mmc_cmd,
465 						     sg_cnt, req_sz, func_blk_sz,
466 						     &addr, sdiodev, func, write);
467 				if (ret)
468 					goto exit_queue_walk;
469 				req_sz = 0;
470 				sg_cnt = 0;
471 				sgl = sdiodev->sgtable.sgl;
472 			}
473 		}
474 	}
475 	if (sg_cnt)
476 		ret = mmc_submit_one(&mmc_dat, &mmc_req, &mmc_cmd,
477 				     sg_cnt, req_sz, func_blk_sz,
478 				     &addr, sdiodev, func, write);
479 exit_queue_walk:
480 	if (!write && sdiodev->settings->bus.sdio.broken_sg_support) {
481 		src = __skb_peek(&local_list);
482 		src_offset = 0;
483 		skb_queue_walk(pktlist, pkt_next) {
484 			dst_offset = 0;
485 
486 			/* This is safe because we must have enough SKB data
487 			 * in the local list to cover everything in pktlist.
488 			 */
489 			while (1) {
490 				req_sz = pkt_next->len - dst_offset;
491 				if (req_sz > src->len - src_offset)
492 					req_sz = src->len - src_offset;
493 
494 				orig_data = src->data + src_offset;
495 				dst_data = pkt_next->data + dst_offset;
496 				memcpy(dst_data, orig_data, req_sz);
497 
498 				src_offset += req_sz;
499 				if (src_offset == src->len) {
500 					src_offset = 0;
501 					src = skb_peek_next(src, &local_list);
502 				}
503 				dst_offset += req_sz;
504 				if (dst_offset == pkt_next->len)
505 					break;
506 			}
507 		}
508 	}
509 
510 exit:
511 	sg_init_table(sdiodev->sgtable.sgl, sdiodev->sgtable.orig_nents);
512 	while ((pkt_next = __skb_dequeue(&local_list)) != NULL)
513 		brcmu_pkt_buf_free_skb(pkt_next);
514 
515 	return ret;
516 }
517 
518 int brcmf_sdiod_recv_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
519 {
520 	struct sk_buff *mypkt;
521 	int err;
522 
523 	mypkt = brcmu_pkt_buf_get_skb(nbytes);
524 	if (!mypkt) {
525 		brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
526 			  nbytes);
527 		return -EIO;
528 	}
529 
530 	err = brcmf_sdiod_recv_pkt(sdiodev, mypkt);
531 	if (!err)
532 		memcpy(buf, mypkt->data, nbytes);
533 
534 	brcmu_pkt_buf_free_skb(mypkt);
535 	return err;
536 }
537 
538 int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt)
539 {
540 	u32 addr = sdiodev->cc_core->base;
541 	int err = 0;
542 
543 	brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pkt->len);
544 
545 	err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
546 	if (err)
547 		goto done;
548 
549 	addr &= SBSDIO_SB_OFT_ADDR_MASK;
550 	addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
551 
552 	err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr, pkt);
553 
554 done:
555 	return err;
556 }
557 
558 int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
559 			   struct sk_buff_head *pktq, uint totlen)
560 {
561 	struct sk_buff *glom_skb = NULL;
562 	struct sk_buff *skb;
563 	u32 addr = sdiodev->cc_core->base;
564 	int err = 0;
565 
566 	brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n",
567 		  addr, pktq->qlen);
568 
569 	err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
570 	if (err)
571 		goto done;
572 
573 	addr &= SBSDIO_SB_OFT_ADDR_MASK;
574 	addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
575 
576 	if (pktq->qlen == 1)
577 		err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr,
578 					      __skb_peek(pktq));
579 	else if (!sdiodev->sg_support) {
580 		glom_skb = brcmu_pkt_buf_get_skb(totlen);
581 		if (!glom_skb)
582 			return -ENOMEM;
583 		err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr,
584 					      glom_skb);
585 		if (err)
586 			goto done;
587 
588 		skb_queue_walk(pktq, skb) {
589 			memcpy(skb->data, glom_skb->data, skb->len);
590 			skb_pull(glom_skb, skb->len);
591 		}
592 	} else
593 		err = brcmf_sdiod_sglist_rw(sdiodev, sdiodev->func2, false,
594 					    addr, pktq);
595 
596 done:
597 	brcmu_pkt_buf_free_skb(glom_skb);
598 	return err;
599 }
600 
601 int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
602 {
603 	struct sk_buff *mypkt;
604 	u32 addr = sdiodev->cc_core->base;
605 	int err;
606 
607 	mypkt = brcmu_pkt_buf_get_skb(nbytes);
608 
609 	if (!mypkt) {
610 		brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
611 			  nbytes);
612 		return -EIO;
613 	}
614 
615 	memcpy(mypkt->data, buf, nbytes);
616 
617 	err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
618 	if (err)
619 		goto out;
620 
621 	addr &= SBSDIO_SB_OFT_ADDR_MASK;
622 	addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
623 
624 	err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func2, addr, mypkt);
625 out:
626 	brcmu_pkt_buf_free_skb(mypkt);
627 
628 	return err;
629 }
630 
631 int brcmf_sdiod_send_pkt(struct brcmf_sdio_dev *sdiodev,
632 			 struct sk_buff_head *pktq)
633 {
634 	struct sk_buff *skb;
635 	u32 addr = sdiodev->cc_core->base;
636 	int err;
637 
638 	brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pktq->qlen);
639 
640 	err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
641 	if (err)
642 		return err;
643 
644 	addr &= SBSDIO_SB_OFT_ADDR_MASK;
645 	addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
646 
647 	if (pktq->qlen == 1 || !sdiodev->sg_support) {
648 		skb_queue_walk(pktq, skb) {
649 			err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func2,
650 						       addr, skb);
651 			if (err)
652 				break;
653 		}
654 	} else {
655 		err = brcmf_sdiod_sglist_rw(sdiodev, sdiodev->func2, true,
656 					    addr, pktq);
657 	}
658 
659 	return err;
660 }
661 
662 int
663 brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
664 		  u8 *data, uint size)
665 {
666 	int err = 0;
667 	struct sk_buff *pkt;
668 	u32 sdaddr;
669 	uint dsize;
670 
671 	dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
672 	pkt = dev_alloc_skb(dsize);
673 	if (!pkt) {
674 		brcmf_err("dev_alloc_skb failed: len %d\n", dsize);
675 		return -EIO;
676 	}
677 	pkt->priority = 0;
678 
679 	/* Determine initial transfer parameters */
680 	sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
681 	if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK)
682 		dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr);
683 	else
684 		dsize = size;
685 
686 	sdio_claim_host(sdiodev->func1);
687 
688 	/* Do the transfer(s) */
689 	while (size) {
690 		/* Set the backplane window to include the start address */
691 		err = brcmf_sdiod_set_backplane_window(sdiodev, address);
692 		if (err)
693 			break;
694 
695 		brcmf_dbg(SDIO, "%s %d bytes at offset 0x%08x in window 0x%08x\n",
696 			  write ? "write" : "read", dsize,
697 			  sdaddr, address & SBSDIO_SBWINDOW_MASK);
698 
699 		sdaddr &= SBSDIO_SB_OFT_ADDR_MASK;
700 		sdaddr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
701 
702 		skb_put(pkt, dsize);
703 
704 		if (write) {
705 			memcpy(pkt->data, data, dsize);
706 			err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func1,
707 						       sdaddr, pkt);
708 		} else {
709 			err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func1,
710 						      sdaddr, pkt);
711 		}
712 
713 		if (err) {
714 			brcmf_err("membytes transfer failed\n");
715 			break;
716 		}
717 		if (!write)
718 			memcpy(data, pkt->data, dsize);
719 		skb_trim(pkt, 0);
720 
721 		/* Adjust for next transfer (if any) */
722 		size -= dsize;
723 		if (size) {
724 			data += dsize;
725 			address += dsize;
726 			sdaddr = 0;
727 			dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
728 		}
729 	}
730 
731 	dev_kfree_skb(pkt);
732 
733 	sdio_release_host(sdiodev->func1);
734 
735 	return err;
736 }
737 
738 int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, struct sdio_func *func)
739 {
740 	brcmf_dbg(SDIO, "Enter\n");
741 
742 	/* Issue abort cmd52 command through F0 */
743 	brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_ABORT, func->num, NULL);
744 
745 	brcmf_dbg(SDIO, "Exit\n");
746 	return 0;
747 }
748 
749 void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
750 {
751 	struct sdio_func *func;
752 	struct mmc_host *host;
753 	uint max_blocks;
754 	uint nents;
755 	int err;
756 
757 	func = sdiodev->func2;
758 	host = func->card->host;
759 	sdiodev->sg_support = host->max_segs > 1;
760 	max_blocks = min_t(uint, host->max_blk_count, 511u);
761 	sdiodev->max_request_size = min_t(uint, host->max_req_size,
762 					  max_blocks * func->cur_blksize);
763 	sdiodev->max_segment_count = min_t(uint, host->max_segs,
764 					   SG_MAX_SINGLE_ALLOC);
765 	sdiodev->max_segment_size = host->max_seg_size;
766 
767 	if (!sdiodev->sg_support)
768 		return;
769 
770 	nents = max_t(uint, BRCMF_DEFAULT_RXGLOM_SIZE,
771 		      sdiodev->settings->bus.sdio.txglomsz);
772 	nents += (nents >> 4) + 1;
773 
774 	WARN_ON(nents > sdiodev->max_segment_count);
775 
776 	brcmf_dbg(TRACE, "nents=%d\n", nents);
777 	err = sg_alloc_table(&sdiodev->sgtable, nents, GFP_KERNEL);
778 	if (err < 0) {
779 		brcmf_err("allocation failed: disable scatter-gather");
780 		sdiodev->sg_support = false;
781 	}
782 
783 	sdiodev->txglomsz = sdiodev->settings->bus.sdio.txglomsz;
784 }
785 
786 #ifdef CONFIG_PM_SLEEP
787 static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev)
788 {
789 	sdiodev->freezer = kzalloc(sizeof(*sdiodev->freezer), GFP_KERNEL);
790 	if (!sdiodev->freezer)
791 		return -ENOMEM;
792 	atomic_set(&sdiodev->freezer->thread_count, 0);
793 	atomic_set(&sdiodev->freezer->freezing, 0);
794 	init_waitqueue_head(&sdiodev->freezer->thread_freeze);
795 	init_completion(&sdiodev->freezer->resumed);
796 	return 0;
797 }
798 
799 static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
800 {
801 	if (sdiodev->freezer) {
802 		WARN_ON(atomic_read(&sdiodev->freezer->freezing));
803 		kfree(sdiodev->freezer);
804 	}
805 }
806 
807 static int brcmf_sdiod_freezer_on(struct brcmf_sdio_dev *sdiodev)
808 {
809 	atomic_t *expect = &sdiodev->freezer->thread_count;
810 	int res = 0;
811 
812 	sdiodev->freezer->frozen_count = 0;
813 	reinit_completion(&sdiodev->freezer->resumed);
814 	atomic_set(&sdiodev->freezer->freezing, 1);
815 	brcmf_sdio_trigger_dpc(sdiodev->bus);
816 	wait_event(sdiodev->freezer->thread_freeze,
817 		   atomic_read(expect) == sdiodev->freezer->frozen_count);
818 	sdio_claim_host(sdiodev->func1);
819 	res = brcmf_sdio_sleep(sdiodev->bus, true);
820 	sdio_release_host(sdiodev->func1);
821 	return res;
822 }
823 
824 static void brcmf_sdiod_freezer_off(struct brcmf_sdio_dev *sdiodev)
825 {
826 	sdio_claim_host(sdiodev->func1);
827 	brcmf_sdio_sleep(sdiodev->bus, false);
828 	sdio_release_host(sdiodev->func1);
829 	atomic_set(&sdiodev->freezer->freezing, 0);
830 	complete_all(&sdiodev->freezer->resumed);
831 }
832 
833 bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev)
834 {
835 	return atomic_read(&sdiodev->freezer->freezing);
836 }
837 
838 void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev)
839 {
840 	if (!brcmf_sdiod_freezing(sdiodev))
841 		return;
842 	sdiodev->freezer->frozen_count++;
843 	wake_up(&sdiodev->freezer->thread_freeze);
844 	wait_for_completion(&sdiodev->freezer->resumed);
845 }
846 
847 void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev)
848 {
849 	atomic_inc(&sdiodev->freezer->thread_count);
850 }
851 
852 void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev)
853 {
854 	atomic_dec(&sdiodev->freezer->thread_count);
855 }
856 #else
857 static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev)
858 {
859 	return 0;
860 }
861 
862 static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
863 {
864 }
865 #endif /* CONFIG_PM_SLEEP */
866 
867 int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
868 {
869 	sdiodev->state = BRCMF_SDIOD_DOWN;
870 	if (sdiodev->bus) {
871 		brcmf_sdio_remove(sdiodev->bus);
872 		sdiodev->bus = NULL;
873 	}
874 
875 	brcmf_sdiod_freezer_detach(sdiodev);
876 
877 	/* Disable Function 2 */
878 	sdio_claim_host(sdiodev->func2);
879 	sdio_disable_func(sdiodev->func2);
880 	sdio_release_host(sdiodev->func2);
881 
882 	/* Disable Function 1 */
883 	sdio_claim_host(sdiodev->func1);
884 	sdio_disable_func(sdiodev->func1);
885 	sdio_release_host(sdiodev->func1);
886 
887 	sg_free_table(&sdiodev->sgtable);
888 	sdiodev->sbwad = 0;
889 
890 	pm_runtime_allow(sdiodev->func1->card->host->parent);
891 	return 0;
892 }
893 
894 static void brcmf_sdiod_host_fixup(struct mmc_host *host)
895 {
896 	/* runtime-pm powers off the device */
897 	pm_runtime_forbid(host->parent);
898 	/* avoid removal detection upon resume */
899 	host->caps |= MMC_CAP_NONREMOVABLE;
900 }
901 
902 int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
903 {
904 	int ret = 0;
905 	unsigned int f2_blksz = SDIO_FUNC2_BLOCKSIZE;
906 
907 	sdio_claim_host(sdiodev->func1);
908 
909 	ret = sdio_set_block_size(sdiodev->func1, SDIO_FUNC1_BLOCKSIZE);
910 	if (ret) {
911 		brcmf_err("Failed to set F1 blocksize\n");
912 		sdio_release_host(sdiodev->func1);
913 		goto out;
914 	}
915 	switch (sdiodev->func2->device) {
916 	case SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373:
917 		f2_blksz = SDIO_4373_FUNC2_BLOCKSIZE;
918 		break;
919 	case SDIO_DEVICE_ID_BROADCOM_4359:
920 	case SDIO_DEVICE_ID_BROADCOM_4354:
921 	case SDIO_DEVICE_ID_BROADCOM_4356:
922 		f2_blksz = SDIO_435X_FUNC2_BLOCKSIZE;
923 		break;
924 	case SDIO_DEVICE_ID_BROADCOM_4329:
925 		f2_blksz = SDIO_4329_FUNC2_BLOCKSIZE;
926 		break;
927 	default:
928 		break;
929 	}
930 
931 	ret = sdio_set_block_size(sdiodev->func2, f2_blksz);
932 	if (ret) {
933 		brcmf_err("Failed to set F2 blocksize\n");
934 		sdio_release_host(sdiodev->func1);
935 		goto out;
936 	} else {
937 		brcmf_dbg(SDIO, "set F2 blocksize to %d\n", f2_blksz);
938 	}
939 
940 	/* increase F2 timeout */
941 	sdiodev->func2->enable_timeout = SDIO_WAIT_F2RDY;
942 
943 	/* Enable Function 1 */
944 	ret = sdio_enable_func(sdiodev->func1);
945 	sdio_release_host(sdiodev->func1);
946 	if (ret) {
947 		brcmf_err("Failed to enable F1: err=%d\n", ret);
948 		goto out;
949 	}
950 
951 	ret = brcmf_sdiod_freezer_attach(sdiodev);
952 	if (ret)
953 		goto out;
954 
955 	/* try to attach to the target device */
956 	sdiodev->bus = brcmf_sdio_probe(sdiodev);
957 	if (!sdiodev->bus) {
958 		ret = -ENODEV;
959 		goto out;
960 	}
961 	brcmf_sdiod_host_fixup(sdiodev->func2->card->host);
962 out:
963 	if (ret)
964 		brcmf_sdiod_remove(sdiodev);
965 
966 	return ret;
967 }
968 
969 #define BRCMF_SDIO_DEVICE(dev_id)	\
970 	{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, dev_id)}
971 
972 /* devices we support, null terminated */
973 static const struct sdio_device_id brcmf_sdmmc_ids[] = {
974 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43143),
975 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43241),
976 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4329),
977 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4330),
978 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4334),
979 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43340),
980 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341),
981 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362),
982 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43364),
983 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339),
984 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4339),
985 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430),
986 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4345),
987 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43455),
988 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354),
989 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4356),
990 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4359),
991 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373),
992 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_43012),
993 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_89359),
994 	{ /* end: all zeroes */ }
995 };
996 MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
997 
998 
999 static void brcmf_sdiod_acpi_set_power_manageable(struct device *dev,
1000 						  int val)
1001 {
1002 #if IS_ENABLED(CONFIG_ACPI)
1003 	struct acpi_device *adev;
1004 
1005 	adev = ACPI_COMPANION(dev);
1006 	if (adev)
1007 		adev->flags.power_manageable = 0;
1008 #endif
1009 }
1010 
1011 static int brcmf_ops_sdio_probe(struct sdio_func *func,
1012 				const struct sdio_device_id *id)
1013 {
1014 	int err;
1015 	struct brcmf_sdio_dev *sdiodev;
1016 	struct brcmf_bus *bus_if;
1017 	struct device *dev;
1018 
1019 	brcmf_dbg(SDIO, "Enter\n");
1020 	brcmf_dbg(SDIO, "Class=%x\n", func->class);
1021 	brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
1022 	brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
1023 	brcmf_dbg(SDIO, "Function#: %d\n", func->num);
1024 
1025 	dev = &func->dev;
1026 
1027 	/* Set MMC_QUIRK_LENIENT_FN0 for this card */
1028 	func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
1029 
1030 	/* prohibit ACPI power management for this device */
1031 	brcmf_sdiod_acpi_set_power_manageable(dev, 0);
1032 
1033 	/* Consume func num 1 but dont do anything with it. */
1034 	if (func->num == 1)
1035 		return 0;
1036 
1037 	/* Ignore anything but func 2 */
1038 	if (func->num != 2)
1039 		return -ENODEV;
1040 
1041 	bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL);
1042 	if (!bus_if)
1043 		return -ENOMEM;
1044 	sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
1045 	if (!sdiodev) {
1046 		kfree(bus_if);
1047 		return -ENOMEM;
1048 	}
1049 
1050 	/* store refs to functions used. mmc_card does
1051 	 * not hold the F0 function pointer.
1052 	 */
1053 	sdiodev->func1 = func->card->sdio_func[0];
1054 	sdiodev->func2 = func;
1055 
1056 	sdiodev->bus_if = bus_if;
1057 	bus_if->bus_priv.sdio = sdiodev;
1058 	bus_if->proto_type = BRCMF_PROTO_BCDC;
1059 	dev_set_drvdata(&func->dev, bus_if);
1060 	dev_set_drvdata(&sdiodev->func1->dev, bus_if);
1061 	sdiodev->dev = &sdiodev->func1->dev;
1062 
1063 	brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_DOWN);
1064 
1065 	brcmf_dbg(SDIO, "F2 found, calling brcmf_sdiod_probe...\n");
1066 	err = brcmf_sdiod_probe(sdiodev);
1067 	if (err) {
1068 		brcmf_err("F2 error, probe failed %d...\n", err);
1069 		goto fail;
1070 	}
1071 
1072 	brcmf_dbg(SDIO, "F2 init completed...\n");
1073 	return 0;
1074 
1075 fail:
1076 	dev_set_drvdata(&func->dev, NULL);
1077 	dev_set_drvdata(&sdiodev->func1->dev, NULL);
1078 	kfree(sdiodev);
1079 	kfree(bus_if);
1080 	return err;
1081 }
1082 
1083 static void brcmf_ops_sdio_remove(struct sdio_func *func)
1084 {
1085 	struct brcmf_bus *bus_if;
1086 	struct brcmf_sdio_dev *sdiodev;
1087 
1088 	brcmf_dbg(SDIO, "Enter\n");
1089 	brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
1090 	brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
1091 	brcmf_dbg(SDIO, "Function: %d\n", func->num);
1092 
1093 	bus_if = dev_get_drvdata(&func->dev);
1094 	if (bus_if) {
1095 		sdiodev = bus_if->bus_priv.sdio;
1096 
1097 		/* start by unregistering irqs */
1098 		brcmf_sdiod_intr_unregister(sdiodev);
1099 
1100 		if (func->num != 1)
1101 			return;
1102 
1103 		/* only proceed with rest of cleanup if func 1 */
1104 		brcmf_sdiod_remove(sdiodev);
1105 
1106 		dev_set_drvdata(&sdiodev->func1->dev, NULL);
1107 		dev_set_drvdata(&sdiodev->func2->dev, NULL);
1108 
1109 		kfree(bus_if);
1110 		kfree(sdiodev);
1111 	}
1112 
1113 	brcmf_dbg(SDIO, "Exit\n");
1114 }
1115 
1116 void brcmf_sdio_wowl_config(struct device *dev, bool enabled)
1117 {
1118 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1119 	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
1120 
1121 	brcmf_dbg(SDIO, "Configuring WOWL, enabled=%d\n", enabled);
1122 	sdiodev->wowl_enabled = enabled;
1123 }
1124 
1125 #ifdef CONFIG_PM_SLEEP
1126 static int brcmf_ops_sdio_suspend(struct device *dev)
1127 {
1128 	struct sdio_func *func;
1129 	struct brcmf_bus *bus_if;
1130 	struct brcmf_sdio_dev *sdiodev;
1131 	mmc_pm_flag_t pm_caps, sdio_flags;
1132 	int ret = 0;
1133 
1134 	func = container_of(dev, struct sdio_func, dev);
1135 	brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
1136 	if (func->num != 1)
1137 		return 0;
1138 
1139 
1140 	bus_if = dev_get_drvdata(dev);
1141 	sdiodev = bus_if->bus_priv.sdio;
1142 
1143 	pm_caps = sdio_get_host_pm_caps(func);
1144 
1145 	if (pm_caps & MMC_PM_KEEP_POWER) {
1146 		/* preserve card power during suspend */
1147 		brcmf_sdiod_freezer_on(sdiodev);
1148 		brcmf_sdio_wd_timer(sdiodev->bus, 0);
1149 
1150 		sdio_flags = MMC_PM_KEEP_POWER;
1151 		if (sdiodev->wowl_enabled) {
1152 			if (sdiodev->settings->bus.sdio.oob_irq_supported)
1153 				enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
1154 			else
1155 				sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
1156 		}
1157 
1158 		if (sdio_set_host_pm_flags(sdiodev->func1, sdio_flags))
1159 			brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
1160 
1161 	} else {
1162 		/* power will be cut so remove device, probe again in resume */
1163 		brcmf_sdiod_intr_unregister(sdiodev);
1164 		ret = brcmf_sdiod_remove(sdiodev);
1165 		if (ret)
1166 			brcmf_err("Failed to remove device on suspend\n");
1167 	}
1168 
1169 	return ret;
1170 }
1171 
1172 static int brcmf_ops_sdio_resume(struct device *dev)
1173 {
1174 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1175 	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
1176 	struct sdio_func *func = container_of(dev, struct sdio_func, dev);
1177 	mmc_pm_flag_t pm_caps = sdio_get_host_pm_caps(func);
1178 	int ret = 0;
1179 
1180 	brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
1181 	if (func->num != 2)
1182 		return 0;
1183 
1184 	if (!(pm_caps & MMC_PM_KEEP_POWER)) {
1185 		/* bus was powered off and device removed, probe again */
1186 		ret = brcmf_sdiod_probe(sdiodev);
1187 		if (ret)
1188 			brcmf_err("Failed to probe device on resume\n");
1189 	} else {
1190 		if (sdiodev->wowl_enabled &&
1191 		    sdiodev->settings->bus.sdio.oob_irq_supported)
1192 			disable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
1193 
1194 		brcmf_sdiod_freezer_off(sdiodev);
1195 	}
1196 
1197 	return ret;
1198 }
1199 
1200 static const struct dev_pm_ops brcmf_sdio_pm_ops = {
1201 	.suspend	= brcmf_ops_sdio_suspend,
1202 	.resume		= brcmf_ops_sdio_resume,
1203 };
1204 #endif	/* CONFIG_PM_SLEEP */
1205 
1206 static struct sdio_driver brcmf_sdmmc_driver = {
1207 	.probe = brcmf_ops_sdio_probe,
1208 	.remove = brcmf_ops_sdio_remove,
1209 	.name = KBUILD_MODNAME,
1210 	.id_table = brcmf_sdmmc_ids,
1211 	.drv = {
1212 		.owner = THIS_MODULE,
1213 #ifdef CONFIG_PM_SLEEP
1214 		.pm = &brcmf_sdio_pm_ops,
1215 #endif	/* CONFIG_PM_SLEEP */
1216 		.coredump = brcmf_dev_coredump,
1217 	},
1218 };
1219 
1220 void brcmf_sdio_register(void)
1221 {
1222 	int ret;
1223 
1224 	ret = sdio_register_driver(&brcmf_sdmmc_driver);
1225 	if (ret)
1226 		brcmf_err("sdio_register_driver failed: %d\n", ret);
1227 }
1228 
1229 void brcmf_sdio_exit(void)
1230 {
1231 	brcmf_dbg(SDIO, "Enter\n");
1232 
1233 	sdio_unregister_driver(&brcmf_sdmmc_driver);
1234 }
1235 
1236