1 /*
2  * Copyright (c) 2004-2011 Atheros Communications Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/mmc/card.h>
18 #include <linux/mmc/mmc.h>
19 #include <linux/mmc/host.h>
20 #include <linux/mmc/sdio_func.h>
21 #include <linux/mmc/sdio_ids.h>
22 #include <linux/mmc/sdio.h>
23 #include <linux/mmc/sd.h>
24 #include "htc_hif.h"
25 #include "hif-ops.h"
26 #include "target.h"
27 #include "debug.h"
28 #include "cfg80211.h"
29 
30 struct ath6kl_sdio {
31 	struct sdio_func *func;
32 
33 	spinlock_t lock;
34 
35 	/* free list */
36 	struct list_head bus_req_freeq;
37 
38 	/* available bus requests */
39 	struct bus_request bus_req[BUS_REQUEST_MAX_NUM];
40 
41 	struct ath6kl *ar;
42 	u8 *dma_buffer;
43 
44 	/* scatter request list head */
45 	struct list_head scat_req;
46 
47 	spinlock_t scat_lock;
48 	bool is_disabled;
49 	atomic_t irq_handling;
50 	const struct sdio_device_id *id;
51 	struct work_struct wr_async_work;
52 	struct list_head wr_asyncq;
53 	spinlock_t wr_async_lock;
54 };
55 
56 #define CMD53_ARG_READ          0
57 #define CMD53_ARG_WRITE         1
58 #define CMD53_ARG_BLOCK_BASIS   1
59 #define CMD53_ARG_FIXED_ADDRESS 0
60 #define CMD53_ARG_INCR_ADDRESS  1
61 
62 static inline struct ath6kl_sdio *ath6kl_sdio_priv(struct ath6kl *ar)
63 {
64 	return ar->hif_priv;
65 }
66 
67 /*
68  * Macro to check if DMA buffer is WORD-aligned and DMA-able.
69  * Most host controllers assume the buffer is DMA'able and will
70  * bug-check otherwise (i.e. buffers on the stack). virt_addr_valid
71  * check fails on stack memory.
72  */
73 static inline bool buf_needs_bounce(u8 *buf)
74 {
75 	return ((unsigned long) buf & 0x3) || !virt_addr_valid(buf);
76 }
77 
78 static void ath6kl_sdio_set_mbox_info(struct ath6kl *ar)
79 {
80 	struct ath6kl_mbox_info *mbox_info = &ar->mbox_info;
81 
82 	/* EP1 has an extended range */
83 	mbox_info->htc_addr = HIF_MBOX_BASE_ADDR;
84 	mbox_info->htc_ext_addr = HIF_MBOX0_EXT_BASE_ADDR;
85 	mbox_info->htc_ext_sz = HIF_MBOX0_EXT_WIDTH;
86 	mbox_info->block_size = HIF_MBOX_BLOCK_SIZE;
87 	mbox_info->gmbox_addr = HIF_GMBOX_BASE_ADDR;
88 	mbox_info->gmbox_sz = HIF_GMBOX_WIDTH;
89 }
90 
91 static inline void ath6kl_sdio_set_cmd53_arg(u32 *arg, u8 rw, u8 func,
92 					     u8 mode, u8 opcode, u32 addr,
93 					     u16 blksz)
94 {
95 	*arg = (((rw & 1) << 31) |
96 		((func & 0x7) << 28) |
97 		((mode & 1) << 27) |
98 		((opcode & 1) << 26) |
99 		((addr & 0x1FFFF) << 9) |
100 		(blksz & 0x1FF));
101 }
102 
103 static inline void ath6kl_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,
104 					     unsigned int address,
105 					     unsigned char val)
106 {
107 	const u8 func = 0;
108 
109 	*arg = ((write & 1) << 31) |
110 	       ((func & 0x7) << 28) |
111 	       ((raw & 1) << 27) |
112 	       (1 << 26) |
113 	       ((address & 0x1FFFF) << 9) |
114 	       (1 << 8) |
115 	       (val & 0xFF);
116 }
117 
118 static int ath6kl_sdio_func0_cmd52_wr_byte(struct mmc_card *card,
119 					   unsigned int address,
120 					   unsigned char byte)
121 {
122 	struct mmc_command io_cmd;
123 
124 	memset(&io_cmd, 0, sizeof(io_cmd));
125 	ath6kl_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);
126 	io_cmd.opcode = SD_IO_RW_DIRECT;
127 	io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
128 
129 	return mmc_wait_for_cmd(card->host, &io_cmd, 0);
130 }
131 
132 static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
133 			  u8 *buf, u32 len)
134 {
135 	int ret = 0;
136 
137 	sdio_claim_host(func);
138 
139 	if (request & HIF_WRITE) {
140 		/* FIXME: looks like ugly workaround for something */
141 		if (addr >= HIF_MBOX_BASE_ADDR &&
142 		    addr <= HIF_MBOX_END_ADDR)
143 			addr += (HIF_MBOX_WIDTH - len);
144 
145 		/* FIXME: this also looks like ugly workaround */
146 		if (addr == HIF_MBOX0_EXT_BASE_ADDR)
147 			addr += HIF_MBOX0_EXT_WIDTH - len;
148 
149 		if (request & HIF_FIXED_ADDRESS)
150 			ret = sdio_writesb(func, addr, buf, len);
151 		else
152 			ret = sdio_memcpy_toio(func, addr, buf, len);
153 	} else {
154 		if (request & HIF_FIXED_ADDRESS)
155 			ret = sdio_readsb(func, buf, addr, len);
156 		else
157 			ret = sdio_memcpy_fromio(func, buf, addr, len);
158 	}
159 
160 	sdio_release_host(func);
161 
162 	ath6kl_dbg(ATH6KL_DBG_SDIO, "%s addr 0x%x%s buf 0x%p len %d\n",
163 		   request & HIF_WRITE ? "wr" : "rd", addr,
164 		   request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len);
165 	ath6kl_dbg_dump(ATH6KL_DBG_SDIO_DUMP, NULL, "sdio ", buf, len);
166 
167 	return ret;
168 }
169 
170 static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
171 {
172 	struct bus_request *bus_req;
173 
174 	spin_lock_bh(&ar_sdio->lock);
175 
176 	if (list_empty(&ar_sdio->bus_req_freeq)) {
177 		spin_unlock_bh(&ar_sdio->lock);
178 		return NULL;
179 	}
180 
181 	bus_req = list_first_entry(&ar_sdio->bus_req_freeq,
182 				   struct bus_request, list);
183 	list_del(&bus_req->list);
184 
185 	spin_unlock_bh(&ar_sdio->lock);
186 	ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
187 		   __func__, bus_req);
188 
189 	return bus_req;
190 }
191 
192 static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio,
193 				     struct bus_request *bus_req)
194 {
195 	ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
196 		   __func__, bus_req);
197 
198 	spin_lock_bh(&ar_sdio->lock);
199 	list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
200 	spin_unlock_bh(&ar_sdio->lock);
201 }
202 
203 static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req,
204 					struct mmc_data *data)
205 {
206 	struct scatterlist *sg;
207 	int i;
208 
209 	data->blksz = HIF_MBOX_BLOCK_SIZE;
210 	data->blocks = scat_req->len / HIF_MBOX_BLOCK_SIZE;
211 
212 	ath6kl_dbg(ATH6KL_DBG_SCATTER,
213 		   "hif-scatter: (%s) addr: 0x%X, (block len: %d, block count: %d) , (tot:%d,sg:%d)\n",
214 		   (scat_req->req & HIF_WRITE) ? "WR" : "RD", scat_req->addr,
215 		   data->blksz, data->blocks, scat_req->len,
216 		   scat_req->scat_entries);
217 
218 	data->flags = (scat_req->req & HIF_WRITE) ? MMC_DATA_WRITE :
219 						    MMC_DATA_READ;
220 
221 	/* fill SG entries */
222 	sg = scat_req->sgentries;
223 	sg_init_table(sg, scat_req->scat_entries);
224 
225 	/* assemble SG list */
226 	for (i = 0; i < scat_req->scat_entries; i++, sg++) {
227 		ath6kl_dbg(ATH6KL_DBG_SCATTER, "%d: addr:0x%p, len:%d\n",
228 			   i, scat_req->scat_list[i].buf,
229 			   scat_req->scat_list[i].len);
230 
231 		sg_set_buf(sg, scat_req->scat_list[i].buf,
232 			   scat_req->scat_list[i].len);
233 	}
234 
235 	/* set scatter-gather table for request */
236 	data->sg = scat_req->sgentries;
237 	data->sg_len = scat_req->scat_entries;
238 }
239 
240 static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio,
241 			       struct bus_request *req)
242 {
243 	struct mmc_request mmc_req;
244 	struct mmc_command cmd;
245 	struct mmc_data data;
246 	struct hif_scatter_req *scat_req;
247 	u8 opcode, rw;
248 	int status, len;
249 
250 	scat_req = req->scat_req;
251 
252 	if (scat_req->virt_scat) {
253 		len = scat_req->len;
254 		if (scat_req->req & HIF_BLOCK_BASIS)
255 			len = round_down(len, HIF_MBOX_BLOCK_SIZE);
256 
257 		status = ath6kl_sdio_io(ar_sdio->func, scat_req->req,
258 					scat_req->addr, scat_req->virt_dma_buf,
259 					len);
260 		goto scat_complete;
261 	}
262 
263 	memset(&mmc_req, 0, sizeof(struct mmc_request));
264 	memset(&cmd, 0, sizeof(struct mmc_command));
265 	memset(&data, 0, sizeof(struct mmc_data));
266 
267 	ath6kl_sdio_setup_scat_data(scat_req, &data);
268 
269 	opcode = (scat_req->req & HIF_FIXED_ADDRESS) ?
270 		  CMD53_ARG_FIXED_ADDRESS : CMD53_ARG_INCR_ADDRESS;
271 
272 	rw = (scat_req->req & HIF_WRITE) ? CMD53_ARG_WRITE : CMD53_ARG_READ;
273 
274 	/* Fixup the address so that the last byte will fall on MBOX EOM */
275 	if (scat_req->req & HIF_WRITE) {
276 		if (scat_req->addr == HIF_MBOX_BASE_ADDR)
277 			scat_req->addr += HIF_MBOX_WIDTH - scat_req->len;
278 		else
279 			/* Uses extended address range */
280 			scat_req->addr += HIF_MBOX0_EXT_WIDTH - scat_req->len;
281 	}
282 
283 	/* set command argument */
284 	ath6kl_sdio_set_cmd53_arg(&cmd.arg, rw, ar_sdio->func->num,
285 				  CMD53_ARG_BLOCK_BASIS, opcode, scat_req->addr,
286 				  data.blocks);
287 
288 	cmd.opcode = SD_IO_RW_EXTENDED;
289 	cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
290 
291 	mmc_req.cmd = &cmd;
292 	mmc_req.data = &data;
293 
294 	sdio_claim_host(ar_sdio->func);
295 
296 	mmc_set_data_timeout(&data, ar_sdio->func->card);
297 	/* synchronous call to process request */
298 	mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req);
299 
300 	sdio_release_host(ar_sdio->func);
301 
302 	status = cmd.error ? cmd.error : data.error;
303 
304 scat_complete:
305 	scat_req->status = status;
306 
307 	if (scat_req->status)
308 		ath6kl_err("Scatter write request failed:%d\n",
309 			   scat_req->status);
310 
311 	if (scat_req->req & HIF_ASYNCHRONOUS)
312 		scat_req->complete(ar_sdio->ar->htc_target, scat_req);
313 
314 	return status;
315 }
316 
317 static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
318 					   int n_scat_entry, int n_scat_req,
319 					   bool virt_scat)
320 {
321 	struct hif_scatter_req *s_req;
322 	struct bus_request *bus_req;
323 	int i, scat_req_sz, scat_list_sz, sg_sz, buf_sz;
324 	u8 *virt_buf;
325 
326 	scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item);
327 	scat_req_sz = sizeof(*s_req) + scat_list_sz;
328 
329 	if (!virt_scat)
330 		sg_sz = sizeof(struct scatterlist) * n_scat_entry;
331 	else
332 		buf_sz =  2 * L1_CACHE_BYTES +
333 			  ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
334 
335 	for (i = 0; i < n_scat_req; i++) {
336 		/* allocate the scatter request */
337 		s_req = kzalloc(scat_req_sz, GFP_KERNEL);
338 		if (!s_req)
339 			return -ENOMEM;
340 
341 		if (virt_scat) {
342 			virt_buf = kzalloc(buf_sz, GFP_KERNEL);
343 			if (!virt_buf) {
344 				kfree(s_req);
345 				return -ENOMEM;
346 			}
347 
348 			s_req->virt_dma_buf =
349 				(u8 *)L1_CACHE_ALIGN((unsigned long)virt_buf);
350 		} else {
351 			/* allocate sglist */
352 			s_req->sgentries = kzalloc(sg_sz, GFP_KERNEL);
353 
354 			if (!s_req->sgentries) {
355 				kfree(s_req);
356 				return -ENOMEM;
357 			}
358 		}
359 
360 		/* allocate a bus request for this scatter request */
361 		bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
362 		if (!bus_req) {
363 			kfree(s_req->sgentries);
364 			kfree(s_req->virt_dma_buf);
365 			kfree(s_req);
366 			return -ENOMEM;
367 		}
368 
369 		/* assign the scatter request to this bus request */
370 		bus_req->scat_req = s_req;
371 		s_req->busrequest = bus_req;
372 
373 		s_req->virt_scat = virt_scat;
374 
375 		/* add it to the scatter pool */
376 		hif_scatter_req_add(ar_sdio->ar, s_req);
377 	}
378 
379 	return 0;
380 }
381 
382 static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
383 				       u32 len, u32 request)
384 {
385 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
386 	u8  *tbuf = NULL;
387 	int ret;
388 	bool bounced = false;
389 
390 	if (request & HIF_BLOCK_BASIS)
391 		len = round_down(len, HIF_MBOX_BLOCK_SIZE);
392 
393 	if (buf_needs_bounce(buf)) {
394 		if (!ar_sdio->dma_buffer)
395 			return -ENOMEM;
396 		tbuf = ar_sdio->dma_buffer;
397 		memcpy(tbuf, buf, len);
398 		bounced = true;
399 	} else
400 		tbuf = buf;
401 
402 	ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len);
403 	if ((request & HIF_READ) && bounced)
404 		memcpy(buf, tbuf, len);
405 
406 	return ret;
407 }
408 
409 static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
410 				      struct bus_request *req)
411 {
412 	if (req->scat_req)
413 		ath6kl_sdio_scat_rw(ar_sdio, req);
414 	else {
415 		void *context;
416 		int status;
417 
418 		status = ath6kl_sdio_read_write_sync(ar_sdio->ar, req->address,
419 						     req->buffer, req->length,
420 						     req->request);
421 		context = req->packet;
422 		ath6kl_sdio_free_bus_req(ar_sdio, req);
423 		ath6kldev_rw_comp_handler(context, status);
424 	}
425 }
426 
427 static void ath6kl_sdio_write_async_work(struct work_struct *work)
428 {
429 	struct ath6kl_sdio *ar_sdio;
430 	struct bus_request *req, *tmp_req;
431 
432 	ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work);
433 
434 	spin_lock_bh(&ar_sdio->wr_async_lock);
435 	list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
436 		list_del(&req->list);
437 		spin_unlock_bh(&ar_sdio->wr_async_lock);
438 		__ath6kl_sdio_write_async(ar_sdio, req);
439 		spin_lock_bh(&ar_sdio->wr_async_lock);
440 	}
441 	spin_unlock_bh(&ar_sdio->wr_async_lock);
442 }
443 
444 static void ath6kl_sdio_irq_handler(struct sdio_func *func)
445 {
446 	int status;
447 	struct ath6kl_sdio *ar_sdio;
448 
449 	ath6kl_dbg(ATH6KL_DBG_SDIO, "irq\n");
450 
451 	ar_sdio = sdio_get_drvdata(func);
452 	atomic_set(&ar_sdio->irq_handling, 1);
453 
454 	/*
455 	 * Release the host during interrups so we can pick it back up when
456 	 * we process commands.
457 	 */
458 	sdio_release_host(ar_sdio->func);
459 
460 	status = ath6kldev_intr_bh_handler(ar_sdio->ar);
461 	sdio_claim_host(ar_sdio->func);
462 	atomic_set(&ar_sdio->irq_handling, 0);
463 	WARN_ON(status && status != -ECANCELED);
464 }
465 
466 static int ath6kl_sdio_power_on(struct ath6kl_sdio *ar_sdio)
467 {
468 	struct sdio_func *func = ar_sdio->func;
469 	int ret = 0;
470 
471 	if (!ar_sdio->is_disabled)
472 		return 0;
473 
474 	sdio_claim_host(func);
475 
476 	ret = sdio_enable_func(func);
477 	if (ret) {
478 		ath6kl_err("Unable to enable sdio func: %d)\n", ret);
479 		sdio_release_host(func);
480 		return ret;
481 	}
482 
483 	sdio_release_host(func);
484 
485 	/*
486 	 * Wait for hardware to initialise. It should take a lot less than
487 	 * 10 ms but let's be conservative here.
488 	 */
489 	msleep(10);
490 
491 	ar_sdio->is_disabled = false;
492 
493 	return ret;
494 }
495 
496 static int ath6kl_sdio_power_off(struct ath6kl_sdio *ar_sdio)
497 {
498 	int ret;
499 
500 	if (ar_sdio->is_disabled)
501 		return 0;
502 
503 	/* Disable the card */
504 	sdio_claim_host(ar_sdio->func);
505 	ret = sdio_disable_func(ar_sdio->func);
506 	sdio_release_host(ar_sdio->func);
507 
508 	if (ret)
509 		return ret;
510 
511 	ar_sdio->is_disabled = true;
512 
513 	return ret;
514 }
515 
516 static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
517 				   u32 length, u32 request,
518 				   struct htc_packet *packet)
519 {
520 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
521 	struct bus_request *bus_req;
522 
523 	bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
524 
525 	if (!bus_req)
526 		return -ENOMEM;
527 
528 	bus_req->address = address;
529 	bus_req->buffer = buffer;
530 	bus_req->length = length;
531 	bus_req->request = request;
532 	bus_req->packet = packet;
533 
534 	spin_lock_bh(&ar_sdio->wr_async_lock);
535 	list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
536 	spin_unlock_bh(&ar_sdio->wr_async_lock);
537 	queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
538 
539 	return 0;
540 }
541 
542 static void ath6kl_sdio_irq_enable(struct ath6kl *ar)
543 {
544 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
545 	int ret;
546 
547 	sdio_claim_host(ar_sdio->func);
548 
549 	/* Register the isr */
550 	ret =  sdio_claim_irq(ar_sdio->func, ath6kl_sdio_irq_handler);
551 	if (ret)
552 		ath6kl_err("Failed to claim sdio irq: %d\n", ret);
553 
554 	sdio_release_host(ar_sdio->func);
555 }
556 
557 static void ath6kl_sdio_irq_disable(struct ath6kl *ar)
558 {
559 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
560 	int ret;
561 
562 	sdio_claim_host(ar_sdio->func);
563 
564 	/* Mask our function IRQ */
565 	while (atomic_read(&ar_sdio->irq_handling)) {
566 		sdio_release_host(ar_sdio->func);
567 		schedule_timeout(HZ / 10);
568 		sdio_claim_host(ar_sdio->func);
569 	}
570 
571 	ret = sdio_release_irq(ar_sdio->func);
572 	if (ret)
573 		ath6kl_err("Failed to release sdio irq: %d\n", ret);
574 
575 	sdio_release_host(ar_sdio->func);
576 }
577 
578 static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
579 {
580 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
581 	struct hif_scatter_req *node = NULL;
582 
583 	spin_lock_bh(&ar_sdio->scat_lock);
584 
585 	if (!list_empty(&ar_sdio->scat_req)) {
586 		node = list_first_entry(&ar_sdio->scat_req,
587 					struct hif_scatter_req, list);
588 		list_del(&node->list);
589 	}
590 
591 	spin_unlock_bh(&ar_sdio->scat_lock);
592 
593 	return node;
594 }
595 
596 static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
597 					struct hif_scatter_req *s_req)
598 {
599 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
600 
601 	spin_lock_bh(&ar_sdio->scat_lock);
602 
603 	list_add_tail(&s_req->list, &ar_sdio->scat_req);
604 
605 	spin_unlock_bh(&ar_sdio->scat_lock);
606 
607 }
608 
609 /* scatter gather read write request */
610 static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
611 					struct hif_scatter_req *scat_req)
612 {
613 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
614 	u32 request = scat_req->req;
615 	int status = 0;
616 
617 	if (!scat_req->len)
618 		return -EINVAL;
619 
620 	ath6kl_dbg(ATH6KL_DBG_SCATTER,
621 		"hif-scatter: total len: %d scatter entries: %d\n",
622 		scat_req->len, scat_req->scat_entries);
623 
624 	if (request & HIF_SYNCHRONOUS)
625 		status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
626 	else {
627 		spin_lock_bh(&ar_sdio->wr_async_lock);
628 		list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
629 		spin_unlock_bh(&ar_sdio->wr_async_lock);
630 		queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
631 	}
632 
633 	return status;
634 }
635 
636 /* clean up scatter support */
637 static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar)
638 {
639 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
640 	struct hif_scatter_req *s_req, *tmp_req;
641 
642 	/* empty the free list */
643 	spin_lock_bh(&ar_sdio->scat_lock);
644 	list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) {
645 		list_del(&s_req->list);
646 		spin_unlock_bh(&ar_sdio->scat_lock);
647 
648 		if (s_req->busrequest)
649 			ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest);
650 		kfree(s_req->virt_dma_buf);
651 		kfree(s_req->sgentries);
652 		kfree(s_req);
653 
654 		spin_lock_bh(&ar_sdio->scat_lock);
655 	}
656 	spin_unlock_bh(&ar_sdio->scat_lock);
657 }
658 
659 /* setup of HIF scatter resources */
660 static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
661 {
662 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
663 	struct htc_target *target = ar->htc_target;
664 	int ret;
665 	bool virt_scat = false;
666 
667 	/* check if host supports scatter and it meets our requirements */
668 	if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
669 		ath6kl_err("host only supports scatter of :%d entries, need: %d\n",
670 			   ar_sdio->func->card->host->max_segs,
671 			   MAX_SCATTER_ENTRIES_PER_REQ);
672 		virt_scat = true;
673 	}
674 
675 	if (!virt_scat) {
676 		ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio,
677 				MAX_SCATTER_ENTRIES_PER_REQ,
678 				MAX_SCATTER_REQUESTS, virt_scat);
679 
680 		if (!ret) {
681 			ath6kl_dbg(ATH6KL_DBG_SCATTER,
682 				   "hif-scatter enabled: max scatter req : %d entries: %d\n",
683 				   MAX_SCATTER_REQUESTS,
684 				   MAX_SCATTER_ENTRIES_PER_REQ);
685 
686 			target->max_scat_entries = MAX_SCATTER_ENTRIES_PER_REQ;
687 			target->max_xfer_szper_scatreq =
688 						MAX_SCATTER_REQ_TRANSFER_SIZE;
689 		} else {
690 			ath6kl_sdio_cleanup_scatter(ar);
691 			ath6kl_warn("hif scatter resource setup failed, trying virtual scatter method\n");
692 		}
693 	}
694 
695 	if (virt_scat || ret) {
696 		ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio,
697 				ATH6KL_SCATTER_ENTRIES_PER_REQ,
698 				ATH6KL_SCATTER_REQS, virt_scat);
699 
700 		if (ret) {
701 			ath6kl_err("failed to alloc virtual scatter resources !\n");
702 			ath6kl_sdio_cleanup_scatter(ar);
703 			return ret;
704 		}
705 
706 		ath6kl_dbg(ATH6KL_DBG_SCATTER,
707 			   "Vitual scatter enabled, max_scat_req:%d, entries:%d\n",
708 			   ATH6KL_SCATTER_REQS, ATH6KL_SCATTER_ENTRIES_PER_REQ);
709 
710 		target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ;
711 		target->max_xfer_szper_scatreq =
712 					ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
713 	}
714 
715 	return 0;
716 }
717 
718 static int ath6kl_sdio_suspend(struct ath6kl *ar)
719 {
720 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
721 	struct sdio_func *func = ar_sdio->func;
722 	mmc_pm_flag_t flags;
723 	int ret;
724 
725 	flags = sdio_get_host_pm_caps(func);
726 
727 	if (!(flags & MMC_PM_KEEP_POWER))
728 		/* as host doesn't support keep power we need to bail out */
729 		ath6kl_dbg(ATH6KL_DBG_SDIO,
730 			   "func %d doesn't support MMC_PM_KEEP_POWER\n",
731 			   func->num);
732 		return -EINVAL;
733 
734 	ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
735 	if (ret) {
736 		printk(KERN_ERR "ath6kl: set sdio pm flags failed: %d\n",
737 		       ret);
738 		return ret;
739 	}
740 
741 	ath6kl_deep_sleep_enable(ar);
742 
743 	return 0;
744 }
745 
746 static int ath6kl_sdio_resume(struct ath6kl *ar)
747 {
748 	if (ar->wmi->pwr_mode != ar->wmi->saved_pwr_mode) {
749 		if (ath6kl_wmi_powermode_cmd(ar->wmi,
750 			ar->wmi->saved_pwr_mode) != 0)
751 			ath6kl_warn("ath6kl_sdio_resume: "
752 				"wmi_powermode_cmd failed\n");
753 	}
754 
755 	return 0;
756 }
757 
758 static const struct ath6kl_hif_ops ath6kl_sdio_ops = {
759 	.read_write_sync = ath6kl_sdio_read_write_sync,
760 	.write_async = ath6kl_sdio_write_async,
761 	.irq_enable = ath6kl_sdio_irq_enable,
762 	.irq_disable = ath6kl_sdio_irq_disable,
763 	.scatter_req_get = ath6kl_sdio_scatter_req_get,
764 	.scatter_req_add = ath6kl_sdio_scatter_req_add,
765 	.enable_scatter = ath6kl_sdio_enable_scatter,
766 	.scat_req_rw = ath6kl_sdio_async_rw_scatter,
767 	.cleanup_scatter = ath6kl_sdio_cleanup_scatter,
768 	.suspend = ath6kl_sdio_suspend,
769 	.resume = ath6kl_sdio_resume,
770 };
771 
772 static int ath6kl_sdio_probe(struct sdio_func *func,
773 			     const struct sdio_device_id *id)
774 {
775 	int ret;
776 	struct ath6kl_sdio *ar_sdio;
777 	struct ath6kl *ar;
778 	int count;
779 
780 	ath6kl_dbg(ATH6KL_DBG_SDIO,
781 		   "new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
782 		   func->num, func->vendor, func->device,
783 		   func->max_blksize, func->cur_blksize);
784 
785 	ar_sdio = kzalloc(sizeof(struct ath6kl_sdio), GFP_KERNEL);
786 	if (!ar_sdio)
787 		return -ENOMEM;
788 
789 	ar_sdio->dma_buffer = kzalloc(HIF_DMA_BUFFER_SIZE, GFP_KERNEL);
790 	if (!ar_sdio->dma_buffer) {
791 		ret = -ENOMEM;
792 		goto err_hif;
793 	}
794 
795 	ar_sdio->func = func;
796 	sdio_set_drvdata(func, ar_sdio);
797 
798 	ar_sdio->id = id;
799 	ar_sdio->is_disabled = true;
800 
801 	spin_lock_init(&ar_sdio->lock);
802 	spin_lock_init(&ar_sdio->scat_lock);
803 	spin_lock_init(&ar_sdio->wr_async_lock);
804 
805 	INIT_LIST_HEAD(&ar_sdio->scat_req);
806 	INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
807 	INIT_LIST_HEAD(&ar_sdio->wr_asyncq);
808 
809 	INIT_WORK(&ar_sdio->wr_async_work, ath6kl_sdio_write_async_work);
810 
811 	for (count = 0; count < BUS_REQUEST_MAX_NUM; count++)
812 		ath6kl_sdio_free_bus_req(ar_sdio, &ar_sdio->bus_req[count]);
813 
814 	ar = ath6kl_core_alloc(&ar_sdio->func->dev);
815 	if (!ar) {
816 		ath6kl_err("Failed to alloc ath6kl core\n");
817 		ret = -ENOMEM;
818 		goto err_dma;
819 	}
820 
821 	ar_sdio->ar = ar;
822 	ar->hif_priv = ar_sdio;
823 	ar->hif_ops = &ath6kl_sdio_ops;
824 
825 	ath6kl_sdio_set_mbox_info(ar);
826 
827 	sdio_claim_host(func);
828 
829 	if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >=
830 	    MANUFACTURER_ID_AR6003_BASE) {
831 		/* enable 4-bit ASYNC interrupt on AR6003 or later */
832 		ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
833 						CCCR_SDIO_IRQ_MODE_REG,
834 						SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
835 		if (ret) {
836 			ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
837 				   ret);
838 			sdio_release_host(func);
839 			goto err_cfg80211;
840 		}
841 
842 		ath6kl_dbg(ATH6KL_DBG_SDIO, "4-bit async irq mode enabled\n");
843 	}
844 
845 	/* give us some time to enable, in ms */
846 	func->enable_timeout = 100;
847 
848 	sdio_release_host(func);
849 
850 	ret = ath6kl_sdio_power_on(ar_sdio);
851 	if (ret)
852 		goto err_cfg80211;
853 
854 	sdio_claim_host(func);
855 
856 	ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
857 	if (ret) {
858 		ath6kl_err("Set sdio block size %d failed: %d)\n",
859 			   HIF_MBOX_BLOCK_SIZE, ret);
860 		sdio_release_host(func);
861 		goto err_off;
862 	}
863 
864 	sdio_release_host(func);
865 
866 	ret = ath6kl_core_init(ar);
867 	if (ret) {
868 		ath6kl_err("Failed to init ath6kl core\n");
869 		goto err_off;
870 	}
871 
872 	return ret;
873 
874 err_off:
875 	ath6kl_sdio_power_off(ar_sdio);
876 err_cfg80211:
877 	ath6kl_cfg80211_deinit(ar_sdio->ar);
878 err_dma:
879 	kfree(ar_sdio->dma_buffer);
880 err_hif:
881 	kfree(ar_sdio);
882 
883 	return ret;
884 }
885 
886 static void ath6kl_sdio_remove(struct sdio_func *func)
887 {
888 	struct ath6kl_sdio *ar_sdio;
889 
890 	ath6kl_dbg(ATH6KL_DBG_SDIO,
891 		   "removed func %d vendor 0x%x device 0x%x\n",
892 		   func->num, func->vendor, func->device);
893 
894 	ar_sdio = sdio_get_drvdata(func);
895 
896 	ath6kl_stop_txrx(ar_sdio->ar);
897 	cancel_work_sync(&ar_sdio->wr_async_work);
898 
899 	ath6kl_unavail_ev(ar_sdio->ar);
900 
901 	ath6kl_sdio_power_off(ar_sdio);
902 
903 	kfree(ar_sdio->dma_buffer);
904 	kfree(ar_sdio);
905 }
906 
907 static const struct sdio_device_id ath6kl_sdio_devices[] = {
908 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0))},
909 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))},
910 	{},
911 };
912 
913 MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices);
914 
915 static struct sdio_driver ath6kl_sdio_driver = {
916 	.name = "ath6kl_sdio",
917 	.id_table = ath6kl_sdio_devices,
918 	.probe = ath6kl_sdio_probe,
919 	.remove = ath6kl_sdio_remove,
920 };
921 
922 static int __init ath6kl_sdio_init(void)
923 {
924 	int ret;
925 
926 	ret = sdio_register_driver(&ath6kl_sdio_driver);
927 	if (ret)
928 		ath6kl_err("sdio driver registration failed: %d\n", ret);
929 
930 	return ret;
931 }
932 
933 static void __exit ath6kl_sdio_exit(void)
934 {
935 	sdio_unregister_driver(&ath6kl_sdio_driver);
936 }
937 
938 module_init(ath6kl_sdio_init);
939 module_exit(ath6kl_sdio_exit);
940 
941 MODULE_AUTHOR("Atheros Communications, Inc.");
942 MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices");
943 MODULE_LICENSE("Dual BSD/GPL");
944 
945 MODULE_FIRMWARE(AR6003_REV2_OTP_FILE);
946 MODULE_FIRMWARE(AR6003_REV2_FIRMWARE_FILE);
947 MODULE_FIRMWARE(AR6003_REV2_PATCH_FILE);
948 MODULE_FIRMWARE(AR6003_REV2_BOARD_DATA_FILE);
949 MODULE_FIRMWARE(AR6003_REV2_DEFAULT_BOARD_DATA_FILE);
950 MODULE_FIRMWARE(AR6003_REV3_OTP_FILE);
951 MODULE_FIRMWARE(AR6003_REV3_FIRMWARE_FILE);
952 MODULE_FIRMWARE(AR6003_REV3_PATCH_FILE);
953 MODULE_FIRMWARE(AR6003_REV3_BOARD_DATA_FILE);
954 MODULE_FIRMWARE(AR6003_REV3_DEFAULT_BOARD_DATA_FILE);
955