1 /*
2  * Copyright (c) 2004-2011 Atheros Communications Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/mmc/card.h>
18 #include <linux/mmc/mmc.h>
19 #include <linux/mmc/host.h>
20 #include <linux/mmc/sdio_func.h>
21 #include <linux/mmc/sdio_ids.h>
22 #include <linux/mmc/sdio.h>
23 #include <linux/mmc/sd.h>
24 #include "htc_hif.h"
25 #include "hif-ops.h"
26 #include "target.h"
27 #include "debug.h"
28 
29 struct ath6kl_sdio {
30 	struct sdio_func *func;
31 
32 	spinlock_t lock;
33 
34 	/* free list */
35 	struct list_head bus_req_freeq;
36 
37 	/* available bus requests */
38 	struct bus_request bus_req[BUS_REQUEST_MAX_NUM];
39 
40 	struct ath6kl *ar;
41 	u8 *dma_buffer;
42 
43 	/* scatter request list head */
44 	struct list_head scat_req;
45 
46 	spinlock_t scat_lock;
47 	bool is_disabled;
48 	atomic_t irq_handling;
49 	const struct sdio_device_id *id;
50 	struct work_struct wr_async_work;
51 	struct list_head wr_asyncq;
52 	spinlock_t wr_async_lock;
53 };
54 
55 #define CMD53_ARG_READ          0
56 #define CMD53_ARG_WRITE         1
57 #define CMD53_ARG_BLOCK_BASIS   1
58 #define CMD53_ARG_FIXED_ADDRESS 0
59 #define CMD53_ARG_INCR_ADDRESS  1
60 
61 static inline struct ath6kl_sdio *ath6kl_sdio_priv(struct ath6kl *ar)
62 {
63 	return ar->hif_priv;
64 }
65 
66 /*
67  * Macro to check if DMA buffer is WORD-aligned and DMA-able.
68  * Most host controllers assume the buffer is DMA'able and will
69  * bug-check otherwise (i.e. buffers on the stack). virt_addr_valid
70  * check fails on stack memory.
71  */
72 static inline bool buf_needs_bounce(u8 *buf)
73 {
74 	return ((unsigned long) buf & 0x3) || !virt_addr_valid(buf);
75 }
76 
77 static void ath6kl_sdio_set_mbox_info(struct ath6kl *ar)
78 {
79 	struct ath6kl_mbox_info *mbox_info = &ar->mbox_info;
80 
81 	/* EP1 has an extended range */
82 	mbox_info->htc_addr = HIF_MBOX_BASE_ADDR;
83 	mbox_info->htc_ext_addr = HIF_MBOX0_EXT_BASE_ADDR;
84 	mbox_info->htc_ext_sz = HIF_MBOX0_EXT_WIDTH;
85 	mbox_info->block_size = HIF_MBOX_BLOCK_SIZE;
86 	mbox_info->gmbox_addr = HIF_GMBOX_BASE_ADDR;
87 	mbox_info->gmbox_sz = HIF_GMBOX_WIDTH;
88 }
89 
90 static inline void ath6kl_sdio_set_cmd53_arg(u32 *arg, u8 rw, u8 func,
91 					     u8 mode, u8 opcode, u32 addr,
92 					     u16 blksz)
93 {
94 	*arg = (((rw & 1) << 31) |
95 		((func & 0x7) << 28) |
96 		((mode & 1) << 27) |
97 		((opcode & 1) << 26) |
98 		((addr & 0x1FFFF) << 9) |
99 		(blksz & 0x1FF));
100 }
101 
102 static inline void ath6kl_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,
103 					     unsigned int address,
104 					     unsigned char val)
105 {
106 	const u8 func = 0;
107 
108 	*arg = ((write & 1) << 31) |
109 	       ((func & 0x7) << 28) |
110 	       ((raw & 1) << 27) |
111 	       (1 << 26) |
112 	       ((address & 0x1FFFF) << 9) |
113 	       (1 << 8) |
114 	       (val & 0xFF);
115 }
116 
117 static int ath6kl_sdio_func0_cmd52_wr_byte(struct mmc_card *card,
118 					   unsigned int address,
119 					   unsigned char byte)
120 {
121 	struct mmc_command io_cmd;
122 
123 	memset(&io_cmd, 0, sizeof(io_cmd));
124 	ath6kl_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);
125 	io_cmd.opcode = SD_IO_RW_DIRECT;
126 	io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
127 
128 	return mmc_wait_for_cmd(card->host, &io_cmd, 0);
129 }
130 
131 static void ath6kl_sdio_buf_align(u8 **buf, unsigned long len)
132 {
133 	u8 *align_addr;
134 
135 	if (!IS_ALIGNED((unsigned long) *buf, 4)) {
136 		align_addr = PTR_ALIGN(*buf - 4, 4);
137 		memmove(align_addr, *buf, len);
138 		*buf = align_addr;
139 	}
140 }
141 
142 static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
143 			  u8 *buf, u32 len)
144 {
145 	int ret = 0;
146 
147 	if (request & HIF_WRITE) {
148 		if (addr >= HIF_MBOX_BASE_ADDR &&
149 		    addr <= HIF_MBOX_END_ADDR)
150 			addr += (HIF_MBOX_WIDTH - len);
151 
152 		if (addr == HIF_MBOX0_EXT_BASE_ADDR)
153 			addr += HIF_MBOX0_EXT_WIDTH - len;
154 
155 		if (request & HIF_FIXED_ADDRESS)
156 			ret = sdio_writesb(func, addr, buf, len);
157 		else
158 			ret = sdio_memcpy_toio(func, addr, buf, len);
159 	} else {
160 		if (request & HIF_FIXED_ADDRESS)
161 			ret = sdio_readsb(func, buf, addr, len);
162 		else
163 			ret = sdio_memcpy_fromio(func, buf, addr, len);
164 	}
165 
166 	return ret;
167 }
168 
169 static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
170 {
171 	struct bus_request *bus_req;
172 	unsigned long flag;
173 
174 	spin_lock_irqsave(&ar_sdio->lock, flag);
175 
176 	if (list_empty(&ar_sdio->bus_req_freeq)) {
177 		spin_unlock_irqrestore(&ar_sdio->lock, flag);
178 		return NULL;
179 	}
180 
181 	bus_req = list_first_entry(&ar_sdio->bus_req_freeq,
182 				   struct bus_request, list);
183 	list_del(&bus_req->list);
184 
185 	spin_unlock_irqrestore(&ar_sdio->lock, flag);
186 	ath6kl_dbg(ATH6KL_DBG_TRC, "%s: bus request 0x%p\n", __func__, bus_req);
187 
188 	return bus_req;
189 }
190 
191 static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio,
192 				     struct bus_request *bus_req)
193 {
194 	unsigned long flag;
195 
196 	ath6kl_dbg(ATH6KL_DBG_TRC, "%s: bus request 0x%p\n", __func__, bus_req);
197 
198 	spin_lock_irqsave(&ar_sdio->lock, flag);
199 	list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
200 	spin_unlock_irqrestore(&ar_sdio->lock, flag);
201 }
202 
203 static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req,
204 					struct mmc_data *data)
205 {
206 	struct scatterlist *sg;
207 	int i;
208 
209 	data->blksz = HIF_MBOX_BLOCK_SIZE;
210 	data->blocks = scat_req->len / HIF_MBOX_BLOCK_SIZE;
211 
212 	ath6kl_dbg(ATH6KL_DBG_SCATTER,
213 		   "hif-scatter: (%s) addr: 0x%X, (block len: %d, block count: %d) , (tot:%d,sg:%d)\n",
214 		   (scat_req->req & HIF_WRITE) ? "WR" : "RD", scat_req->addr,
215 		   data->blksz, data->blocks, scat_req->len,
216 		   scat_req->scat_entries);
217 
218 	data->flags = (scat_req->req & HIF_WRITE) ? MMC_DATA_WRITE :
219 						    MMC_DATA_READ;
220 
221 	/* fill SG entries */
222 	sg = scat_req->sgentries;
223 	sg_init_table(sg, scat_req->scat_entries);
224 
225 	/* assemble SG list */
226 	for (i = 0; i < scat_req->scat_entries; i++, sg++) {
227 		/* No header is added to rx buf, so it shoule be aligned */
228 		if (data->flags == MMC_DATA_WRITE)
229 			ath6kl_sdio_buf_align(&scat_req->scat_list[i].buf,
230 					      scat_req->scat_list[i].len);
231 		ath6kl_dbg(ATH6KL_DBG_SCATTER, "%d: addr:0x%p, len:%d\n",
232 			   i, scat_req->scat_list[i].buf,
233 			   scat_req->scat_list[i].len);
234 
235 		sg_set_buf(sg, scat_req->scat_list[i].buf,
236 			   scat_req->scat_list[i].len);
237 	}
238 
239 	/* set scatter-gather table for request */
240 	data->sg = scat_req->sgentries;
241 	data->sg_len = scat_req->scat_entries;
242 }
243 
244 static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio,
245 			       struct bus_request *req)
246 {
247 	struct mmc_request mmc_req;
248 	struct mmc_command cmd;
249 	struct mmc_data data;
250 	struct hif_scatter_req *scat_req;
251 	u8 opcode, rw;
252 	int status, len;
253 
254 	scat_req = req->scat_req;
255 
256 	if (scat_req->virt_scat) {
257 		len = scat_req->len;
258 		if (scat_req->req & HIF_BLOCK_BASIS)
259 			len = round_down(len, HIF_MBOX_BLOCK_SIZE);
260 
261 		status = ath6kl_sdio_io(ar_sdio->func, scat_req->req,
262 					scat_req->addr, scat_req->virt_dma_buf,
263 					len);
264 		goto scat_complete;
265 	}
266 
267 	memset(&mmc_req, 0, sizeof(struct mmc_request));
268 	memset(&cmd, 0, sizeof(struct mmc_command));
269 	memset(&data, 0, sizeof(struct mmc_data));
270 
271 	ath6kl_sdio_setup_scat_data(scat_req, &data);
272 
273 	opcode = (scat_req->req & HIF_FIXED_ADDRESS) ?
274 		  CMD53_ARG_FIXED_ADDRESS : CMD53_ARG_INCR_ADDRESS;
275 
276 	rw = (scat_req->req & HIF_WRITE) ? CMD53_ARG_WRITE : CMD53_ARG_READ;
277 
278 	/* Fixup the address so that the last byte will fall on MBOX EOM */
279 	if (scat_req->req & HIF_WRITE) {
280 		if (scat_req->addr == HIF_MBOX_BASE_ADDR)
281 			scat_req->addr += HIF_MBOX_WIDTH - scat_req->len;
282 		else
283 			/* Uses extended address range */
284 			scat_req->addr += HIF_MBOX0_EXT_WIDTH - scat_req->len;
285 	}
286 
287 	/* set command argument */
288 	ath6kl_sdio_set_cmd53_arg(&cmd.arg, rw, ar_sdio->func->num,
289 				  CMD53_ARG_BLOCK_BASIS, opcode, scat_req->addr,
290 				  data.blocks);
291 
292 	cmd.opcode = SD_IO_RW_EXTENDED;
293 	cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
294 
295 	mmc_req.cmd = &cmd;
296 	mmc_req.data = &data;
297 
298 	mmc_set_data_timeout(&data, ar_sdio->func->card);
299 	/* synchronous call to process request */
300 	mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req);
301 
302 	status = cmd.error ? cmd.error : data.error;
303 
304 scat_complete:
305 	scat_req->status = status;
306 
307 	if (scat_req->status)
308 		ath6kl_err("Scatter write request failed:%d\n",
309 			   scat_req->status);
310 
311 	if (scat_req->req & HIF_ASYNCHRONOUS)
312 		scat_req->complete(ar_sdio->ar->htc_target, scat_req);
313 
314 	return status;
315 }
316 
317 static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
318 					   int n_scat_entry, int n_scat_req,
319 					   bool virt_scat)
320 {
321 	struct hif_scatter_req *s_req;
322 	struct bus_request *bus_req;
323 	int i, scat_req_sz, scat_list_sz, sg_sz, buf_sz;
324 	u8 *virt_buf;
325 
326 	scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item);
327 	scat_req_sz = sizeof(*s_req) + scat_list_sz;
328 
329 	if (!virt_scat)
330 		sg_sz = sizeof(struct scatterlist) * n_scat_entry;
331 	else
332 		buf_sz =  2 * L1_CACHE_BYTES +
333 			  ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
334 
335 	for (i = 0; i < n_scat_req; i++) {
336 		/* allocate the scatter request */
337 		s_req = kzalloc(scat_req_sz, GFP_KERNEL);
338 		if (!s_req)
339 			return -ENOMEM;
340 
341 		if (virt_scat) {
342 			virt_buf = kzalloc(buf_sz, GFP_KERNEL);
343 			if (!virt_buf) {
344 				kfree(s_req);
345 				return -ENOMEM;
346 			}
347 
348 			s_req->virt_dma_buf =
349 				(u8 *)L1_CACHE_ALIGN((unsigned long)virt_buf);
350 		} else {
351 			/* allocate sglist */
352 			s_req->sgentries = kzalloc(sg_sz, GFP_KERNEL);
353 
354 			if (!s_req->sgentries) {
355 				kfree(s_req);
356 				return -ENOMEM;
357 			}
358 		}
359 
360 		/* allocate a bus request for this scatter request */
361 		bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
362 		if (!bus_req) {
363 			kfree(s_req->sgentries);
364 			kfree(s_req->virt_dma_buf);
365 			kfree(s_req);
366 			return -ENOMEM;
367 		}
368 
369 		/* assign the scatter request to this bus request */
370 		bus_req->scat_req = s_req;
371 		s_req->busrequest = bus_req;
372 
373 		s_req->virt_scat = virt_scat;
374 
375 		/* add it to the scatter pool */
376 		hif_scatter_req_add(ar_sdio->ar, s_req);
377 	}
378 
379 	return 0;
380 }
381 
382 static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
383 				       u32 len, u32 request)
384 {
385 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
386 	u8  *tbuf = NULL;
387 	int ret;
388 	bool bounced = false;
389 
390 	if (request & HIF_BLOCK_BASIS)
391 		len = round_down(len, HIF_MBOX_BLOCK_SIZE);
392 
393 	if (buf_needs_bounce(buf)) {
394 		if (!ar_sdio->dma_buffer)
395 			return -ENOMEM;
396 		tbuf = ar_sdio->dma_buffer;
397 		memcpy(tbuf, buf, len);
398 		bounced = true;
399 	} else
400 		tbuf = buf;
401 
402 	sdio_claim_host(ar_sdio->func);
403 	ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len);
404 	if ((request & HIF_READ) && bounced)
405 		memcpy(buf, tbuf, len);
406 	sdio_release_host(ar_sdio->func);
407 
408 	return ret;
409 }
410 
411 static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
412 				      struct bus_request *req)
413 {
414 	if (req->scat_req)
415 		ath6kl_sdio_scat_rw(ar_sdio, req);
416 	else {
417 		void *context;
418 		int status;
419 
420 		status = ath6kl_sdio_read_write_sync(ar_sdio->ar, req->address,
421 						     req->buffer, req->length,
422 						     req->request);
423 		context = req->packet;
424 		ath6kl_sdio_free_bus_req(ar_sdio, req);
425 		ath6kldev_rw_comp_handler(context, status);
426 	}
427 }
428 
429 static void ath6kl_sdio_write_async_work(struct work_struct *work)
430 {
431 	struct ath6kl_sdio *ar_sdio;
432 	unsigned long flags;
433 	struct bus_request *req, *tmp_req;
434 
435 	ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work);
436 	sdio_claim_host(ar_sdio->func);
437 
438 	spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
439 	list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
440 		list_del(&req->list);
441 		spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
442 		__ath6kl_sdio_write_async(ar_sdio, req);
443 		spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
444 	}
445 	spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
446 
447 	sdio_release_host(ar_sdio->func);
448 }
449 
450 static void ath6kl_sdio_irq_handler(struct sdio_func *func)
451 {
452 	int status;
453 	struct ath6kl_sdio *ar_sdio;
454 
455 	ar_sdio = sdio_get_drvdata(func);
456 	atomic_set(&ar_sdio->irq_handling, 1);
457 
458 	/*
459 	 * Release the host during interrups so we can pick it back up when
460 	 * we process commands.
461 	 */
462 	sdio_release_host(ar_sdio->func);
463 
464 	status = ath6kldev_intr_bh_handler(ar_sdio->ar);
465 	sdio_claim_host(ar_sdio->func);
466 	atomic_set(&ar_sdio->irq_handling, 0);
467 	WARN_ON(status && status != -ECANCELED);
468 }
469 
470 static int ath6kl_sdio_power_on(struct ath6kl_sdio *ar_sdio)
471 {
472 	struct sdio_func *func = ar_sdio->func;
473 	int ret = 0;
474 
475 	if (!ar_sdio->is_disabled)
476 		return 0;
477 
478 	sdio_claim_host(func);
479 
480 	ret = sdio_enable_func(func);
481 	if (ret) {
482 		ath6kl_err("Unable to enable sdio func: %d)\n", ret);
483 		sdio_release_host(func);
484 		return ret;
485 	}
486 
487 	sdio_release_host(func);
488 
489 	/*
490 	 * Wait for hardware to initialise. It should take a lot less than
491 	 * 10 ms but let's be conservative here.
492 	 */
493 	msleep(10);
494 
495 	ar_sdio->is_disabled = false;
496 
497 	return ret;
498 }
499 
500 static int ath6kl_sdio_power_off(struct ath6kl_sdio *ar_sdio)
501 {
502 	int ret;
503 
504 	if (ar_sdio->is_disabled)
505 		return 0;
506 
507 	/* Disable the card */
508 	sdio_claim_host(ar_sdio->func);
509 	ret = sdio_disable_func(ar_sdio->func);
510 	sdio_release_host(ar_sdio->func);
511 
512 	if (ret)
513 		return ret;
514 
515 	ar_sdio->is_disabled = true;
516 
517 	return ret;
518 }
519 
520 static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
521 				   u32 length, u32 request,
522 				   struct htc_packet *packet)
523 {
524 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
525 	struct bus_request *bus_req;
526 	unsigned long flags;
527 
528 	bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
529 
530 	if (!bus_req)
531 		return -ENOMEM;
532 
533 	bus_req->address = address;
534 	bus_req->buffer = buffer;
535 	bus_req->length = length;
536 	bus_req->request = request;
537 	bus_req->packet = packet;
538 
539 	spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
540 	list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
541 	spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
542 	queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
543 
544 	return 0;
545 }
546 
547 static void ath6kl_sdio_irq_enable(struct ath6kl *ar)
548 {
549 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
550 	int ret;
551 
552 	sdio_claim_host(ar_sdio->func);
553 
554 	/* Register the isr */
555 	ret =  sdio_claim_irq(ar_sdio->func, ath6kl_sdio_irq_handler);
556 	if (ret)
557 		ath6kl_err("Failed to claim sdio irq: %d\n", ret);
558 
559 	sdio_release_host(ar_sdio->func);
560 }
561 
562 static void ath6kl_sdio_irq_disable(struct ath6kl *ar)
563 {
564 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
565 	int ret;
566 
567 	sdio_claim_host(ar_sdio->func);
568 
569 	/* Mask our function IRQ */
570 	while (atomic_read(&ar_sdio->irq_handling)) {
571 		sdio_release_host(ar_sdio->func);
572 		schedule_timeout(HZ / 10);
573 		sdio_claim_host(ar_sdio->func);
574 	}
575 
576 	ret = sdio_release_irq(ar_sdio->func);
577 	if (ret)
578 		ath6kl_err("Failed to release sdio irq: %d\n", ret);
579 
580 	sdio_release_host(ar_sdio->func);
581 }
582 
583 static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
584 {
585 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
586 	struct hif_scatter_req *node = NULL;
587 	unsigned long flag;
588 
589 	spin_lock_irqsave(&ar_sdio->scat_lock, flag);
590 
591 	if (!list_empty(&ar_sdio->scat_req)) {
592 		node = list_first_entry(&ar_sdio->scat_req,
593 					struct hif_scatter_req, list);
594 		list_del(&node->list);
595 	}
596 
597 	spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
598 
599 	return node;
600 }
601 
602 static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
603 					struct hif_scatter_req *s_req)
604 {
605 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
606 	unsigned long flag;
607 
608 	spin_lock_irqsave(&ar_sdio->scat_lock, flag);
609 
610 	list_add_tail(&s_req->list, &ar_sdio->scat_req);
611 
612 	spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
613 
614 }
615 
616 /* scatter gather read write request */
617 static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
618 					struct hif_scatter_req *scat_req)
619 {
620 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
621 	u32 request = scat_req->req;
622 	int status = 0;
623 	unsigned long flags;
624 
625 	if (!scat_req->len)
626 		return -EINVAL;
627 
628 	ath6kl_dbg(ATH6KL_DBG_SCATTER,
629 		"hif-scatter: total len: %d scatter entries: %d\n",
630 		scat_req->len, scat_req->scat_entries);
631 
632 	if (request & HIF_SYNCHRONOUS) {
633 		sdio_claim_host(ar_sdio->func);
634 		status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
635 		sdio_release_host(ar_sdio->func);
636 	} else {
637 		spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
638 		list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
639 		spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
640 		queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
641 	}
642 
643 	return status;
644 }
645 
646 /* clean up scatter support */
647 static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar)
648 {
649 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
650 	struct hif_scatter_req *s_req, *tmp_req;
651 	unsigned long flag;
652 
653 	/* empty the free list */
654 	spin_lock_irqsave(&ar_sdio->scat_lock, flag);
655 	list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) {
656 		list_del(&s_req->list);
657 		spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
658 
659 		if (s_req->busrequest)
660 			ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest);
661 		kfree(s_req->virt_dma_buf);
662 		kfree(s_req->sgentries);
663 		kfree(s_req);
664 
665 		spin_lock_irqsave(&ar_sdio->scat_lock, flag);
666 	}
667 	spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
668 }
669 
670 /* setup of HIF scatter resources */
671 static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
672 {
673 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
674 	struct htc_target *target = ar->htc_target;
675 	int ret;
676 	bool virt_scat = false;
677 
678 	/* check if host supports scatter and it meets our requirements */
679 	if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
680 		ath6kl_err("host only supports scatter of :%d entries, need: %d\n",
681 			   ar_sdio->func->card->host->max_segs,
682 			   MAX_SCATTER_ENTRIES_PER_REQ);
683 		virt_scat = true;
684 	}
685 
686 	if (!virt_scat) {
687 		ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio,
688 				MAX_SCATTER_ENTRIES_PER_REQ,
689 				MAX_SCATTER_REQUESTS, virt_scat);
690 
691 		if (!ret) {
692 			ath6kl_dbg(ATH6KL_DBG_ANY,
693 				   "hif-scatter enabled: max scatter req : %d entries: %d\n",
694 				   MAX_SCATTER_REQUESTS,
695 				   MAX_SCATTER_ENTRIES_PER_REQ);
696 
697 			target->max_scat_entries = MAX_SCATTER_ENTRIES_PER_REQ;
698 			target->max_xfer_szper_scatreq =
699 						MAX_SCATTER_REQ_TRANSFER_SIZE;
700 		} else {
701 			ath6kl_sdio_cleanup_scatter(ar);
702 			ath6kl_warn("hif scatter resource setup failed, trying virtual scatter method\n");
703 		}
704 	}
705 
706 	if (virt_scat || ret) {
707 		ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio,
708 				ATH6KL_SCATTER_ENTRIES_PER_REQ,
709 				ATH6KL_SCATTER_REQS, virt_scat);
710 
711 		if (ret) {
712 			ath6kl_err("failed to alloc virtual scatter resources !\n");
713 			ath6kl_sdio_cleanup_scatter(ar);
714 			return ret;
715 		}
716 
717 		ath6kl_dbg(ATH6KL_DBG_ANY,
718 			   "Vitual scatter enabled, max_scat_req:%d, entries:%d\n",
719 			   ATH6KL_SCATTER_REQS, ATH6KL_SCATTER_ENTRIES_PER_REQ);
720 
721 		target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ;
722 		target->max_xfer_szper_scatreq =
723 					ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
724 	}
725 
726 	return 0;
727 }
728 
729 static const struct ath6kl_hif_ops ath6kl_sdio_ops = {
730 	.read_write_sync = ath6kl_sdio_read_write_sync,
731 	.write_async = ath6kl_sdio_write_async,
732 	.irq_enable = ath6kl_sdio_irq_enable,
733 	.irq_disable = ath6kl_sdio_irq_disable,
734 	.scatter_req_get = ath6kl_sdio_scatter_req_get,
735 	.scatter_req_add = ath6kl_sdio_scatter_req_add,
736 	.enable_scatter = ath6kl_sdio_enable_scatter,
737 	.scat_req_rw = ath6kl_sdio_async_rw_scatter,
738 	.cleanup_scatter = ath6kl_sdio_cleanup_scatter,
739 };
740 
741 static int ath6kl_sdio_probe(struct sdio_func *func,
742 			     const struct sdio_device_id *id)
743 {
744 	int ret;
745 	struct ath6kl_sdio *ar_sdio;
746 	struct ath6kl *ar;
747 	int count;
748 
749 	ath6kl_dbg(ATH6KL_DBG_TRC,
750 		   "%s: func: 0x%X, vendor id: 0x%X, dev id: 0x%X, block size: 0x%X/0x%X\n",
751 		   __func__, func->num, func->vendor,
752 		   func->device, func->max_blksize, func->cur_blksize);
753 
754 	ar_sdio = kzalloc(sizeof(struct ath6kl_sdio), GFP_KERNEL);
755 	if (!ar_sdio)
756 		return -ENOMEM;
757 
758 	ar_sdio->dma_buffer = kzalloc(HIF_DMA_BUFFER_SIZE, GFP_KERNEL);
759 	if (!ar_sdio->dma_buffer) {
760 		ret = -ENOMEM;
761 		goto err_hif;
762 	}
763 
764 	ar_sdio->func = func;
765 	sdio_set_drvdata(func, ar_sdio);
766 
767 	ar_sdio->id = id;
768 	ar_sdio->is_disabled = true;
769 
770 	spin_lock_init(&ar_sdio->lock);
771 	spin_lock_init(&ar_sdio->scat_lock);
772 	spin_lock_init(&ar_sdio->wr_async_lock);
773 
774 	INIT_LIST_HEAD(&ar_sdio->scat_req);
775 	INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
776 	INIT_LIST_HEAD(&ar_sdio->wr_asyncq);
777 
778 	INIT_WORK(&ar_sdio->wr_async_work, ath6kl_sdio_write_async_work);
779 
780 	for (count = 0; count < BUS_REQUEST_MAX_NUM; count++)
781 		ath6kl_sdio_free_bus_req(ar_sdio, &ar_sdio->bus_req[count]);
782 
783 	ar = ath6kl_core_alloc(&ar_sdio->func->dev);
784 	if (!ar) {
785 		ath6kl_err("Failed to alloc ath6kl core\n");
786 		ret = -ENOMEM;
787 		goto err_dma;
788 	}
789 
790 	ar_sdio->ar = ar;
791 	ar->hif_priv = ar_sdio;
792 	ar->hif_ops = &ath6kl_sdio_ops;
793 
794 	ath6kl_sdio_set_mbox_info(ar);
795 
796 	sdio_claim_host(func);
797 
798 	if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >=
799 	    MANUFACTURER_ID_AR6003_BASE) {
800 		/* enable 4-bit ASYNC interrupt on AR6003 or later */
801 		ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
802 						CCCR_SDIO_IRQ_MODE_REG,
803 						SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
804 		if (ret) {
805 			ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
806 				   ret);
807 			sdio_release_host(func);
808 			goto err_dma;
809 		}
810 
811 		ath6kl_dbg(ATH6KL_DBG_TRC, "4-bit async irq mode enabled\n");
812 	}
813 
814 	/* give us some time to enable, in ms */
815 	func->enable_timeout = 100;
816 
817 	sdio_release_host(func);
818 
819 	ret = ath6kl_sdio_power_on(ar_sdio);
820 	if (ret)
821 		goto err_dma;
822 
823 	sdio_claim_host(func);
824 
825 	ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
826 	if (ret) {
827 		ath6kl_err("Set sdio block size %d failed: %d)\n",
828 			   HIF_MBOX_BLOCK_SIZE, ret);
829 		sdio_release_host(func);
830 		goto err_off;
831 	}
832 
833 	sdio_release_host(func);
834 
835 	ret = ath6kl_core_init(ar);
836 	if (ret) {
837 		ath6kl_err("Failed to init ath6kl core\n");
838 		goto err_off;
839 	}
840 
841 	return ret;
842 
843 err_off:
844 	ath6kl_sdio_power_off(ar_sdio);
845 err_dma:
846 	kfree(ar_sdio->dma_buffer);
847 err_hif:
848 	kfree(ar_sdio);
849 
850 	return ret;
851 }
852 
853 static void ath6kl_sdio_remove(struct sdio_func *func)
854 {
855 	struct ath6kl_sdio *ar_sdio;
856 
857 	ar_sdio = sdio_get_drvdata(func);
858 
859 	ath6kl_stop_txrx(ar_sdio->ar);
860 	cancel_work_sync(&ar_sdio->wr_async_work);
861 
862 	ath6kl_unavail_ev(ar_sdio->ar);
863 
864 	ath6kl_sdio_power_off(ar_sdio);
865 
866 	kfree(ar_sdio->dma_buffer);
867 	kfree(ar_sdio);
868 }
869 
870 static const struct sdio_device_id ath6kl_sdio_devices[] = {
871 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0))},
872 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))},
873 	{},
874 };
875 
876 MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices);
877 
878 static struct sdio_driver ath6kl_sdio_driver = {
879 	.name = "ath6kl_sdio",
880 	.id_table = ath6kl_sdio_devices,
881 	.probe = ath6kl_sdio_probe,
882 	.remove = ath6kl_sdio_remove,
883 };
884 
885 static int __init ath6kl_sdio_init(void)
886 {
887 	int ret;
888 
889 	ret = sdio_register_driver(&ath6kl_sdio_driver);
890 	if (ret)
891 		ath6kl_err("sdio driver registration failed: %d\n", ret);
892 
893 	return ret;
894 }
895 
896 static void __exit ath6kl_sdio_exit(void)
897 {
898 	sdio_unregister_driver(&ath6kl_sdio_driver);
899 }
900 
901 module_init(ath6kl_sdio_init);
902 module_exit(ath6kl_sdio_exit);
903 
904 MODULE_AUTHOR("Atheros Communications, Inc.");
905 MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices");
906 MODULE_LICENSE("Dual BSD/GPL");
907 
908 MODULE_FIRMWARE(AR6003_REV2_OTP_FILE);
909 MODULE_FIRMWARE(AR6003_REV2_FIRMWARE_FILE);
910 MODULE_FIRMWARE(AR6003_REV2_PATCH_FILE);
911 MODULE_FIRMWARE(AR6003_REV2_BOARD_DATA_FILE);
912 MODULE_FIRMWARE(AR6003_REV2_DEFAULT_BOARD_DATA_FILE);
913 MODULE_FIRMWARE(AR6003_REV3_OTP_FILE);
914 MODULE_FIRMWARE(AR6003_REV3_FIRMWARE_FILE);
915 MODULE_FIRMWARE(AR6003_REV3_PATCH_FILE);
916 MODULE_FIRMWARE(AR6003_REV3_BOARD_DATA_FILE);
917 MODULE_FIRMWARE(AR6003_REV3_DEFAULT_BOARD_DATA_FILE);
918