1 /*
2  * Copyright (c) 2004-2011 Atheros Communications Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/mmc/card.h>
18 #include <linux/mmc/mmc.h>
19 #include <linux/mmc/host.h>
20 #include <linux/mmc/sdio_func.h>
21 #include <linux/mmc/sdio_ids.h>
22 #include <linux/mmc/sdio.h>
23 #include <linux/mmc/sd.h>
24 #include "htc_hif.h"
25 #include "hif-ops.h"
26 #include "target.h"
27 #include "debug.h"
28 
29 struct ath6kl_sdio {
30 	struct sdio_func *func;
31 
32 	spinlock_t lock;
33 
34 	/* free list */
35 	struct list_head bus_req_freeq;
36 
37 	/* available bus requests */
38 	struct bus_request bus_req[BUS_REQUEST_MAX_NUM];
39 
40 	struct ath6kl *ar;
41 	u8 *dma_buffer;
42 
43 	/* scatter request list head */
44 	struct list_head scat_req;
45 
46 	spinlock_t scat_lock;
47 	bool is_disabled;
48 	atomic_t irq_handling;
49 	const struct sdio_device_id *id;
50 	struct work_struct wr_async_work;
51 	struct list_head wr_asyncq;
52 	spinlock_t wr_async_lock;
53 };
54 
55 #define CMD53_ARG_READ          0
56 #define CMD53_ARG_WRITE         1
57 #define CMD53_ARG_BLOCK_BASIS   1
58 #define CMD53_ARG_FIXED_ADDRESS 0
59 #define CMD53_ARG_INCR_ADDRESS  1
60 
61 static inline struct ath6kl_sdio *ath6kl_sdio_priv(struct ath6kl *ar)
62 {
63 	return ar->hif_priv;
64 }
65 
66 /*
67  * Macro to check if DMA buffer is WORD-aligned and DMA-able.
68  * Most host controllers assume the buffer is DMA'able and will
69  * bug-check otherwise (i.e. buffers on the stack). virt_addr_valid
70  * check fails on stack memory.
71  */
72 static inline bool buf_needs_bounce(u8 *buf)
73 {
74 	return ((unsigned long) buf & 0x3) || !virt_addr_valid(buf);
75 }
76 
77 static void ath6kl_sdio_set_mbox_info(struct ath6kl *ar)
78 {
79 	struct ath6kl_mbox_info *mbox_info = &ar->mbox_info;
80 
81 	/* EP1 has an extended range */
82 	mbox_info->htc_addr = HIF_MBOX_BASE_ADDR;
83 	mbox_info->htc_ext_addr = HIF_MBOX0_EXT_BASE_ADDR;
84 	mbox_info->htc_ext_sz = HIF_MBOX0_EXT_WIDTH;
85 	mbox_info->block_size = HIF_MBOX_BLOCK_SIZE;
86 	mbox_info->gmbox_addr = HIF_GMBOX_BASE_ADDR;
87 	mbox_info->gmbox_sz = HIF_GMBOX_WIDTH;
88 }
89 
90 static inline void ath6kl_sdio_set_cmd53_arg(u32 *arg, u8 rw, u8 func,
91 					     u8 mode, u8 opcode, u32 addr,
92 					     u16 blksz)
93 {
94 	*arg = (((rw & 1) << 31) |
95 		((func & 0x7) << 28) |
96 		((mode & 1) << 27) |
97 		((opcode & 1) << 26) |
98 		((addr & 0x1FFFF) << 9) |
99 		(blksz & 0x1FF));
100 }
101 
102 static inline void ath6kl_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,
103 					     unsigned int address,
104 					     unsigned char val)
105 {
106 	const u8 func = 0;
107 
108 	*arg = ((write & 1) << 31) |
109 	       ((func & 0x7) << 28) |
110 	       ((raw & 1) << 27) |
111 	       (1 << 26) |
112 	       ((address & 0x1FFFF) << 9) |
113 	       (1 << 8) |
114 	       (val & 0xFF);
115 }
116 
117 static int ath6kl_sdio_func0_cmd52_wr_byte(struct mmc_card *card,
118 					   unsigned int address,
119 					   unsigned char byte)
120 {
121 	struct mmc_command io_cmd;
122 
123 	memset(&io_cmd, 0, sizeof(io_cmd));
124 	ath6kl_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);
125 	io_cmd.opcode = SD_IO_RW_DIRECT;
126 	io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
127 
128 	return mmc_wait_for_cmd(card->host, &io_cmd, 0);
129 }
130 
131 static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
132 {
133 	struct bus_request *bus_req;
134 	unsigned long flag;
135 
136 	spin_lock_irqsave(&ar_sdio->lock, flag);
137 
138 	if (list_empty(&ar_sdio->bus_req_freeq)) {
139 		spin_unlock_irqrestore(&ar_sdio->lock, flag);
140 		return NULL;
141 	}
142 
143 	bus_req = list_first_entry(&ar_sdio->bus_req_freeq,
144 				   struct bus_request, list);
145 	list_del(&bus_req->list);
146 
147 	spin_unlock_irqrestore(&ar_sdio->lock, flag);
148 	ath6kl_dbg(ATH6KL_DBG_TRC, "%s: bus request 0x%p\n", __func__, bus_req);
149 
150 	return bus_req;
151 }
152 
153 static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio,
154 				     struct bus_request *bus_req)
155 {
156 	unsigned long flag;
157 
158 	ath6kl_dbg(ATH6KL_DBG_TRC, "%s: bus request 0x%p\n", __func__, bus_req);
159 
160 	spin_lock_irqsave(&ar_sdio->lock, flag);
161 	list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
162 	spin_unlock_irqrestore(&ar_sdio->lock, flag);
163 }
164 
165 static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req,
166 					struct hif_scatter_req_priv *s_req_priv,
167 					struct mmc_data *data)
168 {
169 	struct scatterlist *sg;
170 	int i;
171 
172 	data->blksz = HIF_MBOX_BLOCK_SIZE;
173 	data->blocks = scat_req->len / HIF_MBOX_BLOCK_SIZE;
174 
175 	ath6kl_dbg(ATH6KL_DBG_SCATTER,
176 		   "hif-scatter: (%s) addr: 0x%X, (block len: %d, block count: %d) , (tot:%d,sg:%d)\n",
177 		   (scat_req->req & HIF_WRITE) ? "WR" : "RD", scat_req->addr,
178 		   data->blksz, data->blocks, scat_req->len,
179 		   scat_req->scat_entries);
180 
181 	data->flags = (scat_req->req & HIF_WRITE) ? MMC_DATA_WRITE :
182 						    MMC_DATA_READ;
183 
184 	/* fill SG entries */
185 	sg = s_req_priv->sgentries;
186 	sg_init_table(sg, scat_req->scat_entries);
187 
188 	/* assemble SG list */
189 	for (i = 0; i < scat_req->scat_entries; i++, sg++) {
190 		if ((unsigned long)scat_req->scat_list[i].buf & 0x3)
191 			/*
192 			 * Some scatter engines can handle unaligned
193 			 * buffers, print this as informational only.
194 			 */
195 			ath6kl_dbg(ATH6KL_DBG_SCATTER,
196 				   "(%s) scatter buffer is unaligned 0x%p\n",
197 				   scat_req->req & HIF_WRITE ? "WR" : "RD",
198 				   scat_req->scat_list[i].buf);
199 
200 		ath6kl_dbg(ATH6KL_DBG_SCATTER, "%d: addr:0x%p, len:%d\n",
201 			   i, scat_req->scat_list[i].buf,
202 			   scat_req->scat_list[i].len);
203 
204 		sg_set_buf(sg, scat_req->scat_list[i].buf,
205 			   scat_req->scat_list[i].len);
206 	}
207 
208 	/* set scatter-gather table for request */
209 	data->sg = s_req_priv->sgentries;
210 	data->sg_len = scat_req->scat_entries;
211 }
212 
213 static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio,
214 			       struct bus_request *req)
215 {
216 	struct mmc_request mmc_req;
217 	struct mmc_command cmd;
218 	struct mmc_data data;
219 	struct hif_scatter_req *scat_req;
220 	u8 opcode, rw;
221 	int status;
222 
223 	scat_req = req->scat_req;
224 
225 	memset(&mmc_req, 0, sizeof(struct mmc_request));
226 	memset(&cmd, 0, sizeof(struct mmc_command));
227 	memset(&data, 0, sizeof(struct mmc_data));
228 
229 	ath6kl_sdio_setup_scat_data(scat_req, scat_req->req_priv, &data);
230 
231 	opcode = (scat_req->req & HIF_FIXED_ADDRESS) ?
232 		  CMD53_ARG_FIXED_ADDRESS : CMD53_ARG_INCR_ADDRESS;
233 
234 	rw = (scat_req->req & HIF_WRITE) ? CMD53_ARG_WRITE : CMD53_ARG_READ;
235 
236 	/* Fixup the address so that the last byte will fall on MBOX EOM */
237 	if (scat_req->req & HIF_WRITE) {
238 		if (scat_req->addr == HIF_MBOX_BASE_ADDR)
239 			scat_req->addr += HIF_MBOX_WIDTH - scat_req->len;
240 		else
241 			/* Uses extended address range */
242 			scat_req->addr += HIF_MBOX0_EXT_WIDTH - scat_req->len;
243 	}
244 
245 	/* set command argument */
246 	ath6kl_sdio_set_cmd53_arg(&cmd.arg, rw, ar_sdio->func->num,
247 				  CMD53_ARG_BLOCK_BASIS, opcode, scat_req->addr,
248 				  data.blocks);
249 
250 	cmd.opcode = SD_IO_RW_EXTENDED;
251 	cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
252 
253 	mmc_req.cmd = &cmd;
254 	mmc_req.data = &data;
255 
256 	mmc_set_data_timeout(&data, ar_sdio->func->card);
257 	/* synchronous call to process request */
258 	mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req);
259 
260 	status = cmd.error ? cmd.error : data.error;
261 	scat_req->status = status;
262 
263 	if (scat_req->status)
264 		ath6kl_err("Scatter write request failed:%d\n",
265 			   scat_req->status);
266 
267 	if (scat_req->req & HIF_ASYNCHRONOUS)
268 		scat_req->complete(scat_req);
269 
270 	return status;
271 }
272 
273 
274 /* scatter gather read write request */
275 static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
276 				 struct hif_scatter_req *scat_req)
277 {
278 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
279 	struct hif_scatter_req_priv *req_priv = scat_req->req_priv;
280 	u32 request = scat_req->req;
281 	int status = 0;
282 	unsigned long flags;
283 
284 	if (!scat_req->len)
285 		return -EINVAL;
286 
287 	ath6kl_dbg(ATH6KL_DBG_SCATTER,
288 		"hif-scatter: total len: %d scatter entries: %d\n",
289 		scat_req->len, scat_req->scat_entries);
290 
291 	if (request & HIF_SYNCHRONOUS) {
292 		sdio_claim_host(ar_sdio->func);
293 		status = ath6kl_sdio_scat_rw(ar_sdio, req_priv->busrequest);
294 		sdio_release_host(ar_sdio->func);
295 	} else {
296 		spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
297 		list_add_tail(&req_priv->busrequest->list, &ar_sdio->wr_asyncq);
298 		spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
299 		queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
300 	}
301 
302 	return status;
303 }
304 
305 /* clean up scatter support */
306 static void ath6kl_sdio_cleanup_scat_resource(struct ath6kl_sdio *ar_sdio)
307 {
308 	struct hif_scatter_req *s_req, *tmp_req;
309 	unsigned long flag;
310 
311 	/* empty the free list */
312 	spin_lock_irqsave(&ar_sdio->scat_lock, flag);
313 	list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) {
314 		list_del(&s_req->list);
315 		spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
316 
317 		if (s_req->req_priv && s_req->req_priv->busrequest)
318 			ath6kl_sdio_free_bus_req(ar_sdio,
319 						 s_req->req_priv->busrequest);
320 		kfree(s_req->virt_dma_buf);
321 		kfree(s_req->req_priv);
322 		kfree(s_req);
323 
324 		spin_lock_irqsave(&ar_sdio->scat_lock, flag);
325 	}
326 	spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
327 }
328 
329 /* setup of HIF scatter resources */
330 static int ath6kl_sdio_setup_scat_resource(struct ath6kl_sdio *ar_sdio,
331 					   struct hif_dev_scat_sup_info *pinfo)
332 {
333 	struct hif_scatter_req *s_req;
334 	struct bus_request *bus_req;
335 	int i, scat_req_sz, scat_list_sz;
336 
337 	/* check if host supports scatter and it meets our requirements */
338 	if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
339 		ath6kl_err("hif-scatter: host only supports scatter of : %d entries, need: %d\n",
340 			   ar_sdio->func->card->host->max_segs,
341 			   MAX_SCATTER_ENTRIES_PER_REQ);
342 		return -EINVAL;
343 	}
344 
345 	ath6kl_dbg(ATH6KL_DBG_ANY,
346 		   "hif-scatter enabled: max scatter req : %d entries: %d\n",
347 		   MAX_SCATTER_REQUESTS, MAX_SCATTER_ENTRIES_PER_REQ);
348 
349 	scat_list_sz = (MAX_SCATTER_ENTRIES_PER_REQ - 1) *
350 		       sizeof(struct hif_scatter_item);
351 	scat_req_sz = sizeof(*s_req) + scat_list_sz;
352 
353 	for (i = 0; i < MAX_SCATTER_REQUESTS; i++) {
354 		/* allocate the scatter request */
355 		s_req = kzalloc(scat_req_sz, GFP_KERNEL);
356 		if (!s_req)
357 			goto fail_setup_scat;
358 
359 		/* allocate the private request blob */
360 		s_req->req_priv = kzalloc(sizeof(*s_req->req_priv), GFP_KERNEL);
361 
362 		if (!s_req->req_priv) {
363 			kfree(s_req);
364 			goto fail_setup_scat;
365 		}
366 
367 		/* allocate a bus request for this scatter request */
368 		bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
369 		if (!bus_req) {
370 			kfree(s_req->req_priv);
371 			kfree(s_req);
372 			goto fail_setup_scat;
373 		}
374 
375 		/* assign the scatter request to this bus request */
376 		bus_req->scat_req = s_req;
377 		s_req->req_priv->busrequest = bus_req;
378 		/* add it to the scatter pool */
379 		hif_scatter_req_add(ar_sdio->ar, s_req);
380 	}
381 
382 	pinfo->max_scat_entries = MAX_SCATTER_ENTRIES_PER_REQ;
383 	pinfo->max_xfer_szper_scatreq = MAX_SCATTER_REQ_TRANSFER_SIZE;
384 
385 	return 0;
386 
387 fail_setup_scat:
388 	ath6kl_err("hif-scatter: failed to alloc scatter resources !\n");
389 	ath6kl_sdio_cleanup_scat_resource(ar_sdio);
390 
391 	return -ENOMEM;
392 }
393 
394 static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
395 				       u32 len, u32 request)
396 {
397 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
398 	u8  *tbuf = NULL;
399 	int ret;
400 	bool bounced = false;
401 
402 	if (request & HIF_BLOCK_BASIS)
403 		len = round_down(len, HIF_MBOX_BLOCK_SIZE);
404 
405 	if (buf_needs_bounce(buf)) {
406 		if (!ar_sdio->dma_buffer)
407 			return -ENOMEM;
408 		tbuf = ar_sdio->dma_buffer;
409 		memcpy(tbuf, buf, len);
410 		bounced = true;
411 	} else
412 		tbuf = buf;
413 
414 	sdio_claim_host(ar_sdio->func);
415 	if (request & HIF_WRITE) {
416 		if (addr >= HIF_MBOX_BASE_ADDR &&
417 		    addr <= HIF_MBOX_END_ADDR)
418 			addr += (HIF_MBOX_WIDTH - len);
419 
420 		if (addr == HIF_MBOX0_EXT_BASE_ADDR)
421 			addr += HIF_MBOX0_EXT_WIDTH - len;
422 
423 		if (request & HIF_FIXED_ADDRESS)
424 			ret = sdio_writesb(ar_sdio->func, addr, tbuf, len);
425 		else
426 			ret = sdio_memcpy_toio(ar_sdio->func, addr, tbuf, len);
427 	} else {
428 		if (request & HIF_FIXED_ADDRESS)
429 			ret = sdio_readsb(ar_sdio->func, tbuf, addr, len);
430 		else
431 			ret = sdio_memcpy_fromio(ar_sdio->func, tbuf,
432 						 addr, len);
433 		if (bounced)
434 			memcpy(buf, tbuf, len);
435 	}
436 	sdio_release_host(ar_sdio->func);
437 
438 	return ret;
439 }
440 
441 static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
442 				      struct bus_request *req)
443 {
444 	if (req->scat_req)
445 		ath6kl_sdio_scat_rw(ar_sdio, req);
446 	else {
447 		void *context;
448 		int status;
449 
450 		status = ath6kl_sdio_read_write_sync(ar_sdio->ar, req->address,
451 						     req->buffer, req->length,
452 						     req->request);
453 		context = req->packet;
454 		ath6kl_sdio_free_bus_req(ar_sdio, req);
455 		ath6kldev_rw_comp_handler(context, status);
456 	}
457 }
458 
459 static void ath6kl_sdio_write_async_work(struct work_struct *work)
460 {
461 	struct ath6kl_sdio *ar_sdio;
462 	unsigned long flags;
463 	struct bus_request *req, *tmp_req;
464 
465 	ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work);
466 	sdio_claim_host(ar_sdio->func);
467 
468 	spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
469 	list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
470 		list_del(&req->list);
471 		spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
472 		__ath6kl_sdio_write_async(ar_sdio, req);
473 		spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
474 	}
475 	spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
476 
477 	sdio_release_host(ar_sdio->func);
478 }
479 
480 static void ath6kl_sdio_irq_handler(struct sdio_func *func)
481 {
482 	int status;
483 	struct ath6kl_sdio *ar_sdio;
484 
485 	ar_sdio = sdio_get_drvdata(func);
486 	atomic_set(&ar_sdio->irq_handling, 1);
487 
488 	/*
489 	 * Release the host during interrups so we can pick it back up when
490 	 * we process commands.
491 	 */
492 	sdio_release_host(ar_sdio->func);
493 
494 	status = ath6kldev_intr_bh_handler(ar_sdio->ar);
495 	sdio_claim_host(ar_sdio->func);
496 	atomic_set(&ar_sdio->irq_handling, 0);
497 	WARN_ON(status && status != -ECANCELED);
498 }
499 
500 static int ath6kl_sdio_power_on(struct ath6kl_sdio *ar_sdio)
501 {
502 	struct sdio_func *func = ar_sdio->func;
503 	int ret = 0;
504 
505 	if (!ar_sdio->is_disabled)
506 		return 0;
507 
508 	sdio_claim_host(func);
509 
510 	ret = sdio_enable_func(func);
511 	if (ret) {
512 		ath6kl_err("Unable to enable sdio func: %d)\n", ret);
513 		sdio_release_host(func);
514 		return ret;
515 	}
516 
517 	sdio_release_host(func);
518 
519 	/*
520 	 * Wait for hardware to initialise. It should take a lot less than
521 	 * 10 ms but let's be conservative here.
522 	 */
523 	msleep(10);
524 
525 	ar_sdio->is_disabled = false;
526 
527 	return ret;
528 }
529 
530 static int ath6kl_sdio_power_off(struct ath6kl_sdio *ar_sdio)
531 {
532 	int ret;
533 
534 	if (ar_sdio->is_disabled)
535 		return 0;
536 
537 	/* Disable the card */
538 	sdio_claim_host(ar_sdio->func);
539 	ret = sdio_disable_func(ar_sdio->func);
540 	sdio_release_host(ar_sdio->func);
541 
542 	if (ret)
543 		return ret;
544 
545 	ar_sdio->is_disabled = true;
546 
547 	return ret;
548 }
549 
550 static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
551 				   u32 length, u32 request,
552 				   struct htc_packet *packet)
553 {
554 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
555 	struct bus_request *bus_req;
556 	unsigned long flags;
557 
558 	bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
559 
560 	if (!bus_req)
561 		return -ENOMEM;
562 
563 	bus_req->address = address;
564 	bus_req->buffer = buffer;
565 	bus_req->length = length;
566 	bus_req->request = request;
567 	bus_req->packet = packet;
568 
569 	spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
570 	list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
571 	spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
572 	queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
573 
574 	return 0;
575 }
576 
577 static void ath6kl_sdio_irq_enable(struct ath6kl *ar)
578 {
579 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
580 	int ret;
581 
582 	sdio_claim_host(ar_sdio->func);
583 
584 	/* Register the isr */
585 	ret =  sdio_claim_irq(ar_sdio->func, ath6kl_sdio_irq_handler);
586 	if (ret)
587 		ath6kl_err("Failed to claim sdio irq: %d\n", ret);
588 
589 	sdio_release_host(ar_sdio->func);
590 }
591 
592 static void ath6kl_sdio_irq_disable(struct ath6kl *ar)
593 {
594 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
595 	int ret;
596 
597 	sdio_claim_host(ar_sdio->func);
598 
599 	/* Mask our function IRQ */
600 	while (atomic_read(&ar_sdio->irq_handling)) {
601 		sdio_release_host(ar_sdio->func);
602 		schedule_timeout(HZ / 10);
603 		sdio_claim_host(ar_sdio->func);
604 	}
605 
606 	ret = sdio_release_irq(ar_sdio->func);
607 	if (ret)
608 		ath6kl_err("Failed to release sdio irq: %d\n", ret);
609 
610 	sdio_release_host(ar_sdio->func);
611 }
612 
613 static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
614 {
615 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
616 	struct hif_scatter_req *node = NULL;
617 	unsigned long flag;
618 
619 	spin_lock_irqsave(&ar_sdio->scat_lock, flag);
620 
621 	if (!list_empty(&ar_sdio->scat_req)) {
622 		node = list_first_entry(&ar_sdio->scat_req,
623 					struct hif_scatter_req, list);
624 		list_del(&node->list);
625 	}
626 
627 	spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
628 
629 	return node;
630 }
631 
632 static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
633 					struct hif_scatter_req *s_req)
634 {
635 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
636 	unsigned long flag;
637 
638 	spin_lock_irqsave(&ar_sdio->scat_lock, flag);
639 
640 	list_add_tail(&s_req->list, &ar_sdio->scat_req);
641 
642 	spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
643 
644 }
645 
646 static int ath6kl_sdio_enable_scatter(struct ath6kl *ar,
647 				      struct hif_dev_scat_sup_info *info)
648 {
649 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
650 	int ret;
651 
652 	ret = ath6kl_sdio_setup_scat_resource(ar_sdio, info);
653 
654 	return ret;
655 }
656 
657 static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar)
658 {
659 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
660 
661 	ath6kl_sdio_cleanup_scat_resource(ar_sdio);
662 }
663 
664 static const struct ath6kl_hif_ops ath6kl_sdio_ops = {
665 	.read_write_sync = ath6kl_sdio_read_write_sync,
666 	.write_async = ath6kl_sdio_write_async,
667 	.irq_enable = ath6kl_sdio_irq_enable,
668 	.irq_disable = ath6kl_sdio_irq_disable,
669 	.scatter_req_get = ath6kl_sdio_scatter_req_get,
670 	.scatter_req_add = ath6kl_sdio_scatter_req_add,
671 	.enable_scatter = ath6kl_sdio_enable_scatter,
672 	.scat_req_rw = ath6kl_sdio_async_rw_scatter,
673 	.cleanup_scatter = ath6kl_sdio_cleanup_scatter,
674 };
675 
676 static int ath6kl_sdio_probe(struct sdio_func *func,
677 			     const struct sdio_device_id *id)
678 {
679 	int ret;
680 	struct ath6kl_sdio *ar_sdio;
681 	struct ath6kl *ar;
682 	int count;
683 
684 	ath6kl_dbg(ATH6KL_DBG_TRC,
685 		   "%s: func: 0x%X, vendor id: 0x%X, dev id: 0x%X, block size: 0x%X/0x%X\n",
686 		   __func__, func->num, func->vendor,
687 		   func->device, func->max_blksize, func->cur_blksize);
688 
689 	ar_sdio = kzalloc(sizeof(struct ath6kl_sdio), GFP_KERNEL);
690 	if (!ar_sdio)
691 		return -ENOMEM;
692 
693 	ar_sdio->dma_buffer = kzalloc(HIF_DMA_BUFFER_SIZE, GFP_KERNEL);
694 	if (!ar_sdio->dma_buffer) {
695 		ret = -ENOMEM;
696 		goto err_hif;
697 	}
698 
699 	ar_sdio->func = func;
700 	sdio_set_drvdata(func, ar_sdio);
701 
702 	ar_sdio->id = id;
703 	ar_sdio->is_disabled = true;
704 
705 	spin_lock_init(&ar_sdio->lock);
706 	spin_lock_init(&ar_sdio->scat_lock);
707 	spin_lock_init(&ar_sdio->wr_async_lock);
708 
709 	INIT_LIST_HEAD(&ar_sdio->scat_req);
710 	INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
711 	INIT_LIST_HEAD(&ar_sdio->wr_asyncq);
712 
713 	INIT_WORK(&ar_sdio->wr_async_work, ath6kl_sdio_write_async_work);
714 
715 	for (count = 0; count < BUS_REQUEST_MAX_NUM; count++)
716 		ath6kl_sdio_free_bus_req(ar_sdio, &ar_sdio->bus_req[count]);
717 
718 	ar = ath6kl_core_alloc(&ar_sdio->func->dev);
719 	if (!ar) {
720 		ath6kl_err("Failed to alloc ath6kl core\n");
721 		ret = -ENOMEM;
722 		goto err_dma;
723 	}
724 
725 	ar_sdio->ar = ar;
726 	ar->hif_priv = ar_sdio;
727 	ar->hif_ops = &ath6kl_sdio_ops;
728 
729 	ath6kl_sdio_set_mbox_info(ar);
730 
731 	sdio_claim_host(func);
732 
733 	if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >=
734 	    MANUFACTURER_ID_AR6003_BASE) {
735 		/* enable 4-bit ASYNC interrupt on AR6003 or later */
736 		ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
737 						CCCR_SDIO_IRQ_MODE_REG,
738 						SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
739 		if (ret) {
740 			ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
741 				   ret);
742 			sdio_release_host(func);
743 			goto err_dma;
744 		}
745 
746 		ath6kl_dbg(ATH6KL_DBG_TRC, "4-bit async irq mode enabled\n");
747 	}
748 
749 	/* give us some time to enable, in ms */
750 	func->enable_timeout = 100;
751 
752 	sdio_release_host(func);
753 
754 	ret = ath6kl_sdio_power_on(ar_sdio);
755 	if (ret)
756 		goto err_dma;
757 
758 	sdio_claim_host(func);
759 
760 	ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
761 	if (ret) {
762 		ath6kl_err("Set sdio block size %d failed: %d)\n",
763 			   HIF_MBOX_BLOCK_SIZE, ret);
764 		sdio_release_host(func);
765 		goto err_off;
766 	}
767 
768 	sdio_release_host(func);
769 
770 	ret = ath6kl_core_init(ar);
771 	if (ret) {
772 		ath6kl_err("Failed to init ath6kl core\n");
773 		goto err_off;
774 	}
775 
776 	return ret;
777 
778 err_off:
779 	ath6kl_sdio_power_off(ar_sdio);
780 err_dma:
781 	kfree(ar_sdio->dma_buffer);
782 err_hif:
783 	kfree(ar_sdio);
784 
785 	return ret;
786 }
787 
788 static void ath6kl_sdio_remove(struct sdio_func *func)
789 {
790 	struct ath6kl_sdio *ar_sdio;
791 
792 	ar_sdio = sdio_get_drvdata(func);
793 
794 	ath6kl_stop_txrx(ar_sdio->ar);
795 	cancel_work_sync(&ar_sdio->wr_async_work);
796 
797 	ath6kl_unavail_ev(ar_sdio->ar);
798 
799 	ath6kl_sdio_power_off(ar_sdio);
800 
801 	kfree(ar_sdio->dma_buffer);
802 	kfree(ar_sdio);
803 }
804 
805 static const struct sdio_device_id ath6kl_sdio_devices[] = {
806 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0))},
807 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))},
808 	{},
809 };
810 
811 MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices);
812 
813 static struct sdio_driver ath6kl_sdio_driver = {
814 	.name = "ath6kl_sdio",
815 	.id_table = ath6kl_sdio_devices,
816 	.probe = ath6kl_sdio_probe,
817 	.remove = ath6kl_sdio_remove,
818 };
819 
820 static int __init ath6kl_sdio_init(void)
821 {
822 	int ret;
823 
824 	ret = sdio_register_driver(&ath6kl_sdio_driver);
825 	if (ret)
826 		ath6kl_err("sdio driver registration failed: %d\n", ret);
827 
828 	return ret;
829 }
830 
831 static void __exit ath6kl_sdio_exit(void)
832 {
833 	sdio_unregister_driver(&ath6kl_sdio_driver);
834 }
835 
836 module_init(ath6kl_sdio_init);
837 module_exit(ath6kl_sdio_exit);
838 
839 MODULE_AUTHOR("Atheros Communications, Inc.");
840 MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices");
841 MODULE_LICENSE("Dual BSD/GPL");
842 
843 MODULE_FIRMWARE(AR6003_REV2_OTP_FILE);
844 MODULE_FIRMWARE(AR6003_REV2_FIRMWARE_FILE);
845 MODULE_FIRMWARE(AR6003_REV2_PATCH_FILE);
846 MODULE_FIRMWARE(AR6003_REV2_BOARD_DATA_FILE);
847 MODULE_FIRMWARE(AR6003_REV2_DEFAULT_BOARD_DATA_FILE);
848 MODULE_FIRMWARE(AR6003_REV3_OTP_FILE);
849 MODULE_FIRMWARE(AR6003_REV3_FIRMWARE_FILE);
850 MODULE_FIRMWARE(AR6003_REV3_PATCH_FILE);
851 MODULE_FIRMWARE(AR6003_REV3_BOARD_DATA_FILE);
852 MODULE_FIRMWARE(AR6003_REV3_DEFAULT_BOARD_DATA_FILE);
853