xref: /openbmc/linux/drivers/mmc/core/sdio_irq.c (revision a36954f5)
1 /*
2  * linux/drivers/mmc/core/sdio_irq.c
3  *
4  * Author:      Nicolas Pitre
5  * Created:     June 18, 2007
6  * Copyright:   MontaVista Software Inc.
7  *
8  * Copyright 2008 Pierre Ossman
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or (at
13  * your option) any later version.
14  */
15 
16 #include <linux/kernel.h>
17 #include <linux/sched.h>
18 #include <uapi/linux/sched/types.h>
19 #include <linux/kthread.h>
20 #include <linux/export.h>
21 #include <linux/wait.h>
22 #include <linux/delay.h>
23 
24 #include <linux/mmc/core.h>
25 #include <linux/mmc/host.h>
26 #include <linux/mmc/card.h>
27 #include <linux/mmc/sdio.h>
28 #include <linux/mmc/sdio_func.h>
29 
30 #include "sdio_ops.h"
31 #include "core.h"
32 #include "card.h"
33 
34 static int process_sdio_pending_irqs(struct mmc_host *host)
35 {
36 	struct mmc_card *card = host->card;
37 	int i, ret, count;
38 	unsigned char pending;
39 	struct sdio_func *func;
40 
41 	/*
42 	 * Optimization, if there is only 1 function interrupt registered
43 	 * and we know an IRQ was signaled then call irq handler directly.
44 	 * Otherwise do the full probe.
45 	 */
46 	func = card->sdio_single_irq;
47 	if (func && host->sdio_irq_pending) {
48 		func->irq_handler(func);
49 		return 1;
50 	}
51 
52 	ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_INTx, 0, &pending);
53 	if (ret) {
54 		pr_debug("%s: error %d reading SDIO_CCCR_INTx\n",
55 		       mmc_card_id(card), ret);
56 		return ret;
57 	}
58 
59 	if (pending && mmc_card_broken_irq_polling(card) &&
60 	    !(host->caps & MMC_CAP_SDIO_IRQ)) {
61 		unsigned char dummy;
62 
63 		/* A fake interrupt could be created when we poll SDIO_CCCR_INTx
64 		 * register with a Marvell SD8797 card. A dummy CMD52 read to
65 		 * function 0 register 0xff can avoid this.
66 		 */
67 		mmc_io_rw_direct(card, 0, 0, 0xff, 0, &dummy);
68 	}
69 
70 	count = 0;
71 	for (i = 1; i <= 7; i++) {
72 		if (pending & (1 << i)) {
73 			func = card->sdio_func[i - 1];
74 			if (!func) {
75 				pr_warn("%s: pending IRQ for non-existent function\n",
76 					mmc_card_id(card));
77 				ret = -EINVAL;
78 			} else if (func->irq_handler) {
79 				func->irq_handler(func);
80 				count++;
81 			} else {
82 				pr_warn("%s: pending IRQ with no handler\n",
83 					sdio_func_id(func));
84 				ret = -EINVAL;
85 			}
86 		}
87 	}
88 
89 	if (count)
90 		return count;
91 
92 	return ret;
93 }
94 
95 void sdio_run_irqs(struct mmc_host *host)
96 {
97 	mmc_claim_host(host);
98 	host->sdio_irq_pending = true;
99 	process_sdio_pending_irqs(host);
100 	mmc_release_host(host);
101 }
102 EXPORT_SYMBOL_GPL(sdio_run_irqs);
103 
104 static int sdio_irq_thread(void *_host)
105 {
106 	struct mmc_host *host = _host;
107 	struct sched_param param = { .sched_priority = 1 };
108 	unsigned long period, idle_period;
109 	int ret;
110 
111 	sched_setscheduler(current, SCHED_FIFO, &param);
112 
113 	/*
114 	 * We want to allow for SDIO cards to work even on non SDIO
115 	 * aware hosts.  One thing that non SDIO host cannot do is
116 	 * asynchronous notification of pending SDIO card interrupts
117 	 * hence we poll for them in that case.
118 	 */
119 	idle_period = msecs_to_jiffies(10);
120 	period = (host->caps & MMC_CAP_SDIO_IRQ) ?
121 		MAX_SCHEDULE_TIMEOUT : idle_period;
122 
123 	pr_debug("%s: IRQ thread started (poll period = %lu jiffies)\n",
124 		 mmc_hostname(host), period);
125 
126 	do {
127 		/*
128 		 * We claim the host here on drivers behalf for a couple
129 		 * reasons:
130 		 *
131 		 * 1) it is already needed to retrieve the CCCR_INTx;
132 		 * 2) we want the driver(s) to clear the IRQ condition ASAP;
133 		 * 3) we need to control the abort condition locally.
134 		 *
135 		 * Just like traditional hard IRQ handlers, we expect SDIO
136 		 * IRQ handlers to be quick and to the point, so that the
137 		 * holding of the host lock does not cover too much work
138 		 * that doesn't require that lock to be held.
139 		 */
140 		ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort);
141 		if (ret)
142 			break;
143 		ret = process_sdio_pending_irqs(host);
144 		host->sdio_irq_pending = false;
145 		mmc_release_host(host);
146 
147 		/*
148 		 * Give other threads a chance to run in the presence of
149 		 * errors.
150 		 */
151 		if (ret < 0) {
152 			set_current_state(TASK_INTERRUPTIBLE);
153 			if (!kthread_should_stop())
154 				schedule_timeout(HZ);
155 			set_current_state(TASK_RUNNING);
156 		}
157 
158 		/*
159 		 * Adaptive polling frequency based on the assumption
160 		 * that an interrupt will be closely followed by more.
161 		 * This has a substantial benefit for network devices.
162 		 */
163 		if (!(host->caps & MMC_CAP_SDIO_IRQ)) {
164 			if (ret > 0)
165 				period /= 2;
166 			else {
167 				period++;
168 				if (period > idle_period)
169 					period = idle_period;
170 			}
171 		}
172 
173 		set_current_state(TASK_INTERRUPTIBLE);
174 		if (host->caps & MMC_CAP_SDIO_IRQ)
175 			host->ops->enable_sdio_irq(host, 1);
176 		if (!kthread_should_stop())
177 			schedule_timeout(period);
178 		set_current_state(TASK_RUNNING);
179 	} while (!kthread_should_stop());
180 
181 	if (host->caps & MMC_CAP_SDIO_IRQ)
182 		host->ops->enable_sdio_irq(host, 0);
183 
184 	pr_debug("%s: IRQ thread exiting with code %d\n",
185 		 mmc_hostname(host), ret);
186 
187 	return ret;
188 }
189 
190 static int sdio_card_irq_get(struct mmc_card *card)
191 {
192 	struct mmc_host *host = card->host;
193 
194 	WARN_ON(!host->claimed);
195 
196 	if (!host->sdio_irqs++) {
197 		if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) {
198 			atomic_set(&host->sdio_irq_thread_abort, 0);
199 			host->sdio_irq_thread =
200 				kthread_run(sdio_irq_thread, host,
201 					    "ksdioirqd/%s", mmc_hostname(host));
202 			if (IS_ERR(host->sdio_irq_thread)) {
203 				int err = PTR_ERR(host->sdio_irq_thread);
204 				host->sdio_irqs--;
205 				return err;
206 			}
207 		} else if (host->caps & MMC_CAP_SDIO_IRQ) {
208 			host->ops->enable_sdio_irq(host, 1);
209 		}
210 	}
211 
212 	return 0;
213 }
214 
215 static int sdio_card_irq_put(struct mmc_card *card)
216 {
217 	struct mmc_host *host = card->host;
218 
219 	WARN_ON(!host->claimed);
220 
221 	if (host->sdio_irqs < 1)
222 		return -EINVAL;
223 
224 	if (!--host->sdio_irqs) {
225 		if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) {
226 			atomic_set(&host->sdio_irq_thread_abort, 1);
227 			kthread_stop(host->sdio_irq_thread);
228 		} else if (host->caps & MMC_CAP_SDIO_IRQ) {
229 			host->ops->enable_sdio_irq(host, 0);
230 		}
231 	}
232 
233 	return 0;
234 }
235 
236 /* If there is only 1 function registered set sdio_single_irq */
237 static void sdio_single_irq_set(struct mmc_card *card)
238 {
239 	struct sdio_func *func;
240 	int i;
241 
242 	card->sdio_single_irq = NULL;
243 	if ((card->host->caps & MMC_CAP_SDIO_IRQ) &&
244 	    card->host->sdio_irqs == 1)
245 		for (i = 0; i < card->sdio_funcs; i++) {
246 		       func = card->sdio_func[i];
247 		       if (func && func->irq_handler) {
248 			       card->sdio_single_irq = func;
249 			       break;
250 		       }
251 	       }
252 }
253 
254 /**
255  *	sdio_claim_irq - claim the IRQ for a SDIO function
256  *	@func: SDIO function
257  *	@handler: IRQ handler callback
258  *
259  *	Claim and activate the IRQ for the given SDIO function. The provided
260  *	handler will be called when that IRQ is asserted.  The host is always
261  *	claimed already when the handler is called so the handler must not
262  *	call sdio_claim_host() nor sdio_release_host().
263  */
264 int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler)
265 {
266 	int ret;
267 	unsigned char reg;
268 
269 	if (!func)
270 		return -EINVAL;
271 
272 	pr_debug("SDIO: Enabling IRQ for %s...\n", sdio_func_id(func));
273 
274 	if (func->irq_handler) {
275 		pr_debug("SDIO: IRQ for %s already in use.\n", sdio_func_id(func));
276 		return -EBUSY;
277 	}
278 
279 	ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IENx, 0, &reg);
280 	if (ret)
281 		return ret;
282 
283 	reg |= 1 << func->num;
284 
285 	reg |= 1; /* Master interrupt enable */
286 
287 	ret = mmc_io_rw_direct(func->card, 1, 0, SDIO_CCCR_IENx, reg, NULL);
288 	if (ret)
289 		return ret;
290 
291 	func->irq_handler = handler;
292 	ret = sdio_card_irq_get(func->card);
293 	if (ret)
294 		func->irq_handler = NULL;
295 	sdio_single_irq_set(func->card);
296 
297 	return ret;
298 }
299 EXPORT_SYMBOL_GPL(sdio_claim_irq);
300 
301 /**
302  *	sdio_release_irq - release the IRQ for a SDIO function
303  *	@func: SDIO function
304  *
305  *	Disable and release the IRQ for the given SDIO function.
306  */
307 int sdio_release_irq(struct sdio_func *func)
308 {
309 	int ret;
310 	unsigned char reg;
311 
312 	if (!func)
313 		return -EINVAL;
314 
315 	pr_debug("SDIO: Disabling IRQ for %s...\n", sdio_func_id(func));
316 
317 	if (func->irq_handler) {
318 		func->irq_handler = NULL;
319 		sdio_card_irq_put(func->card);
320 		sdio_single_irq_set(func->card);
321 	}
322 
323 	ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IENx, 0, &reg);
324 	if (ret)
325 		return ret;
326 
327 	reg &= ~(1 << func->num);
328 
329 	/* Disable master interrupt with the last function interrupt */
330 	if (!(reg & 0xFE))
331 		reg = 0;
332 
333 	ret = mmc_io_rw_direct(func->card, 1, 0, SDIO_CCCR_IENx, reg, NULL);
334 	if (ret)
335 		return ret;
336 
337 	return 0;
338 }
339 EXPORT_SYMBOL_GPL(sdio_release_irq);
340 
341