1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
4 *
5 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
6 *
7 * Thanks to the following companies for their support:
8 *
9 * - JMicron (hardware and technical support)
10 */
11
12 #include <linux/bitfield.h>
13 #include <linux/delay.h>
14 #include <linux/dmaengine.h>
15 #include <linux/ktime.h>
16 #include <linux/highmem.h>
17 #include <linux/io.h>
18 #include <linux/module.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/slab.h>
21 #include <linux/scatterlist.h>
22 #include <linux/sizes.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/of.h>
26
27 #include <linux/leds.h>
28
29 #include <linux/mmc/mmc.h>
30 #include <linux/mmc/host.h>
31 #include <linux/mmc/card.h>
32 #include <linux/mmc/sdio.h>
33 #include <linux/mmc/slot-gpio.h>
34
35 #include "sdhci.h"
36
37 #define DRIVER_NAME "sdhci"
38
39 #define DBG(f, x...) \
40 pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
41
42 #define SDHCI_DUMP(f, x...) \
43 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
44
45 #define MAX_TUNING_LOOP 40
46
47 static unsigned int debug_quirks = 0;
48 static unsigned int debug_quirks2;
49
50 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
51
52 static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd);
53
sdhci_dumpregs(struct sdhci_host * host)54 void sdhci_dumpregs(struct sdhci_host *host)
55 {
56 SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
57
58 SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n",
59 sdhci_readl(host, SDHCI_DMA_ADDRESS),
60 sdhci_readw(host, SDHCI_HOST_VERSION));
61 SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n",
62 sdhci_readw(host, SDHCI_BLOCK_SIZE),
63 sdhci_readw(host, SDHCI_BLOCK_COUNT));
64 SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n",
65 sdhci_readl(host, SDHCI_ARGUMENT),
66 sdhci_readw(host, SDHCI_TRANSFER_MODE));
67 SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n",
68 sdhci_readl(host, SDHCI_PRESENT_STATE),
69 sdhci_readb(host, SDHCI_HOST_CONTROL));
70 SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n",
71 sdhci_readb(host, SDHCI_POWER_CONTROL),
72 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
73 SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n",
74 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
75 sdhci_readw(host, SDHCI_CLOCK_CONTROL));
76 SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n",
77 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
78 sdhci_readl(host, SDHCI_INT_STATUS));
79 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n",
80 sdhci_readl(host, SDHCI_INT_ENABLE),
81 sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
82 SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n",
83 sdhci_readw(host, SDHCI_AUTO_CMD_STATUS),
84 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
85 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n",
86 sdhci_readl(host, SDHCI_CAPABILITIES),
87 sdhci_readl(host, SDHCI_CAPABILITIES_1));
88 SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n",
89 sdhci_readw(host, SDHCI_COMMAND),
90 sdhci_readl(host, SDHCI_MAX_CURRENT));
91 SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n",
92 sdhci_readl(host, SDHCI_RESPONSE),
93 sdhci_readl(host, SDHCI_RESPONSE + 4));
94 SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n",
95 sdhci_readl(host, SDHCI_RESPONSE + 8),
96 sdhci_readl(host, SDHCI_RESPONSE + 12));
97 SDHCI_DUMP("Host ctl2: 0x%08x\n",
98 sdhci_readw(host, SDHCI_HOST_CONTROL2));
99
100 if (host->flags & SDHCI_USE_ADMA) {
101 if (host->flags & SDHCI_USE_64_BIT_DMA) {
102 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
103 sdhci_readl(host, SDHCI_ADMA_ERROR),
104 sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
105 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
106 } else {
107 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
108 sdhci_readl(host, SDHCI_ADMA_ERROR),
109 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
110 }
111 }
112
113 if (host->ops->dump_vendor_regs)
114 host->ops->dump_vendor_regs(host);
115
116 SDHCI_DUMP("============================================\n");
117 }
118 EXPORT_SYMBOL_GPL(sdhci_dumpregs);
119
120 /*****************************************************************************\
121 * *
122 * Low level functions *
123 * *
124 \*****************************************************************************/
125
sdhci_do_enable_v4_mode(struct sdhci_host * host)126 static void sdhci_do_enable_v4_mode(struct sdhci_host *host)
127 {
128 u16 ctrl2;
129
130 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
131 if (ctrl2 & SDHCI_CTRL_V4_MODE)
132 return;
133
134 ctrl2 |= SDHCI_CTRL_V4_MODE;
135 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
136 }
137
138 /*
139 * This can be called before sdhci_add_host() by Vendor's host controller
140 * driver to enable v4 mode if supported.
141 */
sdhci_enable_v4_mode(struct sdhci_host * host)142 void sdhci_enable_v4_mode(struct sdhci_host *host)
143 {
144 host->v4_mode = true;
145 sdhci_do_enable_v4_mode(host);
146 }
147 EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode);
148
sdhci_data_line_cmd(struct mmc_command * cmd)149 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
150 {
151 return cmd->data || cmd->flags & MMC_RSP_BUSY;
152 }
153
sdhci_set_card_detection(struct sdhci_host * host,bool enable)154 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
155 {
156 u32 present;
157
158 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
159 !mmc_card_is_removable(host->mmc) || mmc_can_gpio_cd(host->mmc))
160 return;
161
162 if (enable) {
163 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
164 SDHCI_CARD_PRESENT;
165
166 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
167 SDHCI_INT_CARD_INSERT;
168 } else {
169 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
170 }
171
172 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
173 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
174 }
175
sdhci_enable_card_detection(struct sdhci_host * host)176 static void sdhci_enable_card_detection(struct sdhci_host *host)
177 {
178 sdhci_set_card_detection(host, true);
179 }
180
sdhci_disable_card_detection(struct sdhci_host * host)181 static void sdhci_disable_card_detection(struct sdhci_host *host)
182 {
183 sdhci_set_card_detection(host, false);
184 }
185
sdhci_runtime_pm_bus_on(struct sdhci_host * host)186 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
187 {
188 if (host->bus_on)
189 return;
190 host->bus_on = true;
191 pm_runtime_get_noresume(mmc_dev(host->mmc));
192 }
193
sdhci_runtime_pm_bus_off(struct sdhci_host * host)194 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
195 {
196 if (!host->bus_on)
197 return;
198 host->bus_on = false;
199 pm_runtime_put_noidle(mmc_dev(host->mmc));
200 }
201
sdhci_reset(struct sdhci_host * host,u8 mask)202 void sdhci_reset(struct sdhci_host *host, u8 mask)
203 {
204 ktime_t timeout;
205
206 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
207
208 if (mask & SDHCI_RESET_ALL) {
209 host->clock = 0;
210 /* Reset-all turns off SD Bus Power */
211 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
212 sdhci_runtime_pm_bus_off(host);
213 }
214
215 /* Wait max 100 ms */
216 timeout = ktime_add_ms(ktime_get(), 100);
217
218 /* hw clears the bit when it's done */
219 while (1) {
220 bool timedout = ktime_after(ktime_get(), timeout);
221
222 if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask))
223 break;
224 if (timedout) {
225 pr_err("%s: Reset 0x%x never completed.\n",
226 mmc_hostname(host->mmc), (int)mask);
227 sdhci_err_stats_inc(host, CTRL_TIMEOUT);
228 sdhci_dumpregs(host);
229 return;
230 }
231 udelay(10);
232 }
233 }
234 EXPORT_SYMBOL_GPL(sdhci_reset);
235
sdhci_do_reset(struct sdhci_host * host,u8 mask)236 static bool sdhci_do_reset(struct sdhci_host *host, u8 mask)
237 {
238 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
239 struct mmc_host *mmc = host->mmc;
240
241 if (!mmc->ops->get_cd(mmc))
242 return false;
243 }
244
245 host->ops->reset(host, mask);
246
247 return true;
248 }
249
sdhci_reset_for_all(struct sdhci_host * host)250 static void sdhci_reset_for_all(struct sdhci_host *host)
251 {
252 if (sdhci_do_reset(host, SDHCI_RESET_ALL)) {
253 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
254 if (host->ops->enable_dma)
255 host->ops->enable_dma(host);
256 }
257 /* Resetting the controller clears many */
258 host->preset_enabled = false;
259 }
260 }
261
262 enum sdhci_reset_reason {
263 SDHCI_RESET_FOR_INIT,
264 SDHCI_RESET_FOR_REQUEST_ERROR,
265 SDHCI_RESET_FOR_REQUEST_ERROR_DATA_ONLY,
266 SDHCI_RESET_FOR_TUNING_ABORT,
267 SDHCI_RESET_FOR_CARD_REMOVED,
268 SDHCI_RESET_FOR_CQE_RECOVERY,
269 };
270
sdhci_reset_for_reason(struct sdhci_host * host,enum sdhci_reset_reason reason)271 static void sdhci_reset_for_reason(struct sdhci_host *host, enum sdhci_reset_reason reason)
272 {
273 if (host->quirks2 & SDHCI_QUIRK2_ISSUE_CMD_DAT_RESET_TOGETHER) {
274 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
275 return;
276 }
277
278 switch (reason) {
279 case SDHCI_RESET_FOR_INIT:
280 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
281 break;
282 case SDHCI_RESET_FOR_REQUEST_ERROR:
283 case SDHCI_RESET_FOR_TUNING_ABORT:
284 case SDHCI_RESET_FOR_CARD_REMOVED:
285 case SDHCI_RESET_FOR_CQE_RECOVERY:
286 sdhci_do_reset(host, SDHCI_RESET_CMD);
287 sdhci_do_reset(host, SDHCI_RESET_DATA);
288 break;
289 case SDHCI_RESET_FOR_REQUEST_ERROR_DATA_ONLY:
290 sdhci_do_reset(host, SDHCI_RESET_DATA);
291 break;
292 }
293 }
294
295 #define sdhci_reset_for(h, r) sdhci_reset_for_reason((h), SDHCI_RESET_FOR_##r)
296
sdhci_set_default_irqs(struct sdhci_host * host)297 static void sdhci_set_default_irqs(struct sdhci_host *host)
298 {
299 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
300 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
301 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
302 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
303 SDHCI_INT_RESPONSE;
304
305 if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
306 host->tuning_mode == SDHCI_TUNING_MODE_3)
307 host->ier |= SDHCI_INT_RETUNE;
308
309 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
310 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
311 }
312
sdhci_config_dma(struct sdhci_host * host)313 static void sdhci_config_dma(struct sdhci_host *host)
314 {
315 u8 ctrl;
316 u16 ctrl2;
317
318 if (host->version < SDHCI_SPEC_200)
319 return;
320
321 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
322
323 /*
324 * Always adjust the DMA selection as some controllers
325 * (e.g. JMicron) can't do PIO properly when the selection
326 * is ADMA.
327 */
328 ctrl &= ~SDHCI_CTRL_DMA_MASK;
329 if (!(host->flags & SDHCI_REQ_USE_DMA))
330 goto out;
331
332 /* Note if DMA Select is zero then SDMA is selected */
333 if (host->flags & SDHCI_USE_ADMA)
334 ctrl |= SDHCI_CTRL_ADMA32;
335
336 if (host->flags & SDHCI_USE_64_BIT_DMA) {
337 /*
338 * If v4 mode, all supported DMA can be 64-bit addressing if
339 * controller supports 64-bit system address, otherwise only
340 * ADMA can support 64-bit addressing.
341 */
342 if (host->v4_mode) {
343 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
344 ctrl2 |= SDHCI_CTRL_64BIT_ADDR;
345 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
346 } else if (host->flags & SDHCI_USE_ADMA) {
347 /*
348 * Don't need to undo SDHCI_CTRL_ADMA32 in order to
349 * set SDHCI_CTRL_ADMA64.
350 */
351 ctrl |= SDHCI_CTRL_ADMA64;
352 }
353 }
354
355 out:
356 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
357 }
358
sdhci_init(struct sdhci_host * host,int soft)359 static void sdhci_init(struct sdhci_host *host, int soft)
360 {
361 struct mmc_host *mmc = host->mmc;
362 unsigned long flags;
363
364 if (soft)
365 sdhci_reset_for(host, INIT);
366 else
367 sdhci_reset_for_all(host);
368
369 if (host->v4_mode)
370 sdhci_do_enable_v4_mode(host);
371
372 spin_lock_irqsave(&host->lock, flags);
373 sdhci_set_default_irqs(host);
374 spin_unlock_irqrestore(&host->lock, flags);
375
376 host->cqe_on = false;
377
378 if (soft) {
379 /* force clock reconfiguration */
380 host->clock = 0;
381 host->reinit_uhs = true;
382 mmc->ops->set_ios(mmc, &mmc->ios);
383 }
384 }
385
sdhci_reinit(struct sdhci_host * host)386 static void sdhci_reinit(struct sdhci_host *host)
387 {
388 u32 cd = host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
389
390 sdhci_init(host, 0);
391 sdhci_enable_card_detection(host);
392
393 /*
394 * A change to the card detect bits indicates a change in present state,
395 * refer sdhci_set_card_detection(). A card detect interrupt might have
396 * been missed while the host controller was being reset, so trigger a
397 * rescan to check.
398 */
399 if (cd != (host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT)))
400 mmc_detect_change(host->mmc, msecs_to_jiffies(200));
401 }
402
__sdhci_led_activate(struct sdhci_host * host)403 static void __sdhci_led_activate(struct sdhci_host *host)
404 {
405 u8 ctrl;
406
407 if (host->quirks & SDHCI_QUIRK_NO_LED)
408 return;
409
410 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
411 ctrl |= SDHCI_CTRL_LED;
412 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
413 }
414
__sdhci_led_deactivate(struct sdhci_host * host)415 static void __sdhci_led_deactivate(struct sdhci_host *host)
416 {
417 u8 ctrl;
418
419 if (host->quirks & SDHCI_QUIRK_NO_LED)
420 return;
421
422 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
423 ctrl &= ~SDHCI_CTRL_LED;
424 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
425 }
426
427 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
sdhci_led_control(struct led_classdev * led,enum led_brightness brightness)428 static void sdhci_led_control(struct led_classdev *led,
429 enum led_brightness brightness)
430 {
431 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
432 unsigned long flags;
433
434 spin_lock_irqsave(&host->lock, flags);
435
436 if (host->runtime_suspended)
437 goto out;
438
439 if (brightness == LED_OFF)
440 __sdhci_led_deactivate(host);
441 else
442 __sdhci_led_activate(host);
443 out:
444 spin_unlock_irqrestore(&host->lock, flags);
445 }
446
sdhci_led_register(struct sdhci_host * host)447 static int sdhci_led_register(struct sdhci_host *host)
448 {
449 struct mmc_host *mmc = host->mmc;
450
451 if (host->quirks & SDHCI_QUIRK_NO_LED)
452 return 0;
453
454 snprintf(host->led_name, sizeof(host->led_name),
455 "%s::", mmc_hostname(mmc));
456
457 host->led.name = host->led_name;
458 host->led.brightness = LED_OFF;
459 host->led.default_trigger = mmc_hostname(mmc);
460 host->led.brightness_set = sdhci_led_control;
461
462 return led_classdev_register(mmc_dev(mmc), &host->led);
463 }
464
sdhci_led_unregister(struct sdhci_host * host)465 static void sdhci_led_unregister(struct sdhci_host *host)
466 {
467 if (host->quirks & SDHCI_QUIRK_NO_LED)
468 return;
469
470 led_classdev_unregister(&host->led);
471 }
472
sdhci_led_activate(struct sdhci_host * host)473 static inline void sdhci_led_activate(struct sdhci_host *host)
474 {
475 }
476
sdhci_led_deactivate(struct sdhci_host * host)477 static inline void sdhci_led_deactivate(struct sdhci_host *host)
478 {
479 }
480
481 #else
482
sdhci_led_register(struct sdhci_host * host)483 static inline int sdhci_led_register(struct sdhci_host *host)
484 {
485 return 0;
486 }
487
sdhci_led_unregister(struct sdhci_host * host)488 static inline void sdhci_led_unregister(struct sdhci_host *host)
489 {
490 }
491
sdhci_led_activate(struct sdhci_host * host)492 static inline void sdhci_led_activate(struct sdhci_host *host)
493 {
494 __sdhci_led_activate(host);
495 }
496
sdhci_led_deactivate(struct sdhci_host * host)497 static inline void sdhci_led_deactivate(struct sdhci_host *host)
498 {
499 __sdhci_led_deactivate(host);
500 }
501
502 #endif
503
sdhci_mod_timer(struct sdhci_host * host,struct mmc_request * mrq,unsigned long timeout)504 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
505 unsigned long timeout)
506 {
507 if (sdhci_data_line_cmd(mrq->cmd))
508 mod_timer(&host->data_timer, timeout);
509 else
510 mod_timer(&host->timer, timeout);
511 }
512
sdhci_del_timer(struct sdhci_host * host,struct mmc_request * mrq)513 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
514 {
515 if (sdhci_data_line_cmd(mrq->cmd))
516 del_timer(&host->data_timer);
517 else
518 del_timer(&host->timer);
519 }
520
sdhci_has_requests(struct sdhci_host * host)521 static inline bool sdhci_has_requests(struct sdhci_host *host)
522 {
523 return host->cmd || host->data_cmd;
524 }
525
526 /*****************************************************************************\
527 * *
528 * Core functions *
529 * *
530 \*****************************************************************************/
531
sdhci_read_block_pio(struct sdhci_host * host)532 static void sdhci_read_block_pio(struct sdhci_host *host)
533 {
534 size_t blksize, len, chunk;
535 u32 scratch;
536 u8 *buf;
537
538 DBG("PIO reading\n");
539
540 blksize = host->data->blksz;
541 chunk = 0;
542
543 while (blksize) {
544 BUG_ON(!sg_miter_next(&host->sg_miter));
545
546 len = min(host->sg_miter.length, blksize);
547
548 blksize -= len;
549 host->sg_miter.consumed = len;
550
551 buf = host->sg_miter.addr;
552
553 while (len) {
554 if (chunk == 0) {
555 scratch = sdhci_readl(host, SDHCI_BUFFER);
556 chunk = 4;
557 }
558
559 *buf = scratch & 0xFF;
560
561 buf++;
562 scratch >>= 8;
563 chunk--;
564 len--;
565 }
566 }
567
568 sg_miter_stop(&host->sg_miter);
569 }
570
sdhci_write_block_pio(struct sdhci_host * host)571 static void sdhci_write_block_pio(struct sdhci_host *host)
572 {
573 size_t blksize, len, chunk;
574 u32 scratch;
575 u8 *buf;
576
577 DBG("PIO writing\n");
578
579 blksize = host->data->blksz;
580 chunk = 0;
581 scratch = 0;
582
583 while (blksize) {
584 BUG_ON(!sg_miter_next(&host->sg_miter));
585
586 len = min(host->sg_miter.length, blksize);
587
588 blksize -= len;
589 host->sg_miter.consumed = len;
590
591 buf = host->sg_miter.addr;
592
593 while (len) {
594 scratch |= (u32)*buf << (chunk * 8);
595
596 buf++;
597 chunk++;
598 len--;
599
600 if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
601 sdhci_writel(host, scratch, SDHCI_BUFFER);
602 chunk = 0;
603 scratch = 0;
604 }
605 }
606 }
607
608 sg_miter_stop(&host->sg_miter);
609 }
610
sdhci_transfer_pio(struct sdhci_host * host)611 static void sdhci_transfer_pio(struct sdhci_host *host)
612 {
613 u32 mask;
614
615 if (host->blocks == 0)
616 return;
617
618 if (host->data->flags & MMC_DATA_READ)
619 mask = SDHCI_DATA_AVAILABLE;
620 else
621 mask = SDHCI_SPACE_AVAILABLE;
622
623 /*
624 * Some controllers (JMicron JMB38x) mess up the buffer bits
625 * for transfers < 4 bytes. As long as it is just one block,
626 * we can ignore the bits.
627 */
628 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
629 (host->data->blocks == 1))
630 mask = ~0;
631
632 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
633 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
634 udelay(100);
635
636 if (host->data->flags & MMC_DATA_READ)
637 sdhci_read_block_pio(host);
638 else
639 sdhci_write_block_pio(host);
640
641 host->blocks--;
642 if (host->blocks == 0)
643 break;
644 }
645
646 DBG("PIO transfer complete.\n");
647 }
648
sdhci_pre_dma_transfer(struct sdhci_host * host,struct mmc_data * data,int cookie)649 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
650 struct mmc_data *data, int cookie)
651 {
652 int sg_count;
653
654 /*
655 * If the data buffers are already mapped, return the previous
656 * dma_map_sg() result.
657 */
658 if (data->host_cookie == COOKIE_PRE_MAPPED)
659 return data->sg_count;
660
661 /* Bounce write requests to the bounce buffer */
662 if (host->bounce_buffer) {
663 unsigned int length = data->blksz * data->blocks;
664
665 if (length > host->bounce_buffer_size) {
666 pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n",
667 mmc_hostname(host->mmc), length,
668 host->bounce_buffer_size);
669 return -EIO;
670 }
671 if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) {
672 /* Copy the data to the bounce buffer */
673 if (host->ops->copy_to_bounce_buffer) {
674 host->ops->copy_to_bounce_buffer(host,
675 data, length);
676 } else {
677 sg_copy_to_buffer(data->sg, data->sg_len,
678 host->bounce_buffer, length);
679 }
680 }
681 /* Switch ownership to the DMA */
682 dma_sync_single_for_device(mmc_dev(host->mmc),
683 host->bounce_addr,
684 host->bounce_buffer_size,
685 mmc_get_dma_dir(data));
686 /* Just a dummy value */
687 sg_count = 1;
688 } else {
689 /* Just access the data directly from memory */
690 sg_count = dma_map_sg(mmc_dev(host->mmc),
691 data->sg, data->sg_len,
692 mmc_get_dma_dir(data));
693 }
694
695 if (sg_count == 0)
696 return -ENOSPC;
697
698 data->sg_count = sg_count;
699 data->host_cookie = cookie;
700
701 return sg_count;
702 }
703
sdhci_kmap_atomic(struct scatterlist * sg)704 static char *sdhci_kmap_atomic(struct scatterlist *sg)
705 {
706 return kmap_local_page(sg_page(sg)) + sg->offset;
707 }
708
sdhci_kunmap_atomic(void * buffer)709 static void sdhci_kunmap_atomic(void *buffer)
710 {
711 kunmap_local(buffer);
712 }
713
sdhci_adma_write_desc(struct sdhci_host * host,void ** desc,dma_addr_t addr,int len,unsigned int cmd)714 void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
715 dma_addr_t addr, int len, unsigned int cmd)
716 {
717 struct sdhci_adma2_64_desc *dma_desc = *desc;
718
719 /* 32-bit and 64-bit descriptors have these members in same position */
720 dma_desc->cmd = cpu_to_le16(cmd);
721 dma_desc->len = cpu_to_le16(len);
722 dma_desc->addr_lo = cpu_to_le32(lower_32_bits(addr));
723
724 if (host->flags & SDHCI_USE_64_BIT_DMA)
725 dma_desc->addr_hi = cpu_to_le32(upper_32_bits(addr));
726
727 *desc += host->desc_sz;
728 }
729 EXPORT_SYMBOL_GPL(sdhci_adma_write_desc);
730
__sdhci_adma_write_desc(struct sdhci_host * host,void ** desc,dma_addr_t addr,int len,unsigned int cmd)731 static inline void __sdhci_adma_write_desc(struct sdhci_host *host,
732 void **desc, dma_addr_t addr,
733 int len, unsigned int cmd)
734 {
735 if (host->ops->adma_write_desc)
736 host->ops->adma_write_desc(host, desc, addr, len, cmd);
737 else
738 sdhci_adma_write_desc(host, desc, addr, len, cmd);
739 }
740
sdhci_adma_mark_end(void * desc)741 static void sdhci_adma_mark_end(void *desc)
742 {
743 struct sdhci_adma2_64_desc *dma_desc = desc;
744
745 /* 32-bit and 64-bit descriptors have 'cmd' in same position */
746 dma_desc->cmd |= cpu_to_le16(ADMA2_END);
747 }
748
sdhci_adma_table_pre(struct sdhci_host * host,struct mmc_data * data,int sg_count)749 static void sdhci_adma_table_pre(struct sdhci_host *host,
750 struct mmc_data *data, int sg_count)
751 {
752 struct scatterlist *sg;
753 dma_addr_t addr, align_addr;
754 void *desc, *align;
755 char *buffer;
756 int len, offset, i;
757
758 /*
759 * The spec does not specify endianness of descriptor table.
760 * We currently guess that it is LE.
761 */
762
763 host->sg_count = sg_count;
764
765 desc = host->adma_table;
766 align = host->align_buffer;
767
768 align_addr = host->align_addr;
769
770 for_each_sg(data->sg, sg, host->sg_count, i) {
771 addr = sg_dma_address(sg);
772 len = sg_dma_len(sg);
773
774 /*
775 * The SDHCI specification states that ADMA addresses must
776 * be 32-bit aligned. If they aren't, then we use a bounce
777 * buffer for the (up to three) bytes that screw up the
778 * alignment.
779 */
780 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
781 SDHCI_ADMA2_MASK;
782 if (offset) {
783 if (data->flags & MMC_DATA_WRITE) {
784 buffer = sdhci_kmap_atomic(sg);
785 memcpy(align, buffer, offset);
786 sdhci_kunmap_atomic(buffer);
787 }
788
789 /* tran, valid */
790 __sdhci_adma_write_desc(host, &desc, align_addr,
791 offset, ADMA2_TRAN_VALID);
792
793 BUG_ON(offset > 65536);
794
795 align += SDHCI_ADMA2_ALIGN;
796 align_addr += SDHCI_ADMA2_ALIGN;
797
798 addr += offset;
799 len -= offset;
800 }
801
802 /*
803 * The block layer forces a minimum segment size of PAGE_SIZE,
804 * so 'len' can be too big here if PAGE_SIZE >= 64KiB. Write
805 * multiple descriptors, noting that the ADMA table is sized
806 * for 4KiB chunks anyway, so it will be big enough.
807 */
808 while (len > host->max_adma) {
809 int n = 32 * 1024; /* 32KiB*/
810
811 __sdhci_adma_write_desc(host, &desc, addr, n, ADMA2_TRAN_VALID);
812 addr += n;
813 len -= n;
814 }
815
816 /* tran, valid */
817 if (len)
818 __sdhci_adma_write_desc(host, &desc, addr, len,
819 ADMA2_TRAN_VALID);
820
821 /*
822 * If this triggers then we have a calculation bug
823 * somewhere. :/
824 */
825 WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
826 }
827
828 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
829 /* Mark the last descriptor as the terminating descriptor */
830 if (desc != host->adma_table) {
831 desc -= host->desc_sz;
832 sdhci_adma_mark_end(desc);
833 }
834 } else {
835 /* Add a terminating entry - nop, end, valid */
836 __sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID);
837 }
838 }
839
sdhci_adma_table_post(struct sdhci_host * host,struct mmc_data * data)840 static void sdhci_adma_table_post(struct sdhci_host *host,
841 struct mmc_data *data)
842 {
843 struct scatterlist *sg;
844 int i, size;
845 void *align;
846 char *buffer;
847
848 if (data->flags & MMC_DATA_READ) {
849 bool has_unaligned = false;
850
851 /* Do a quick scan of the SG list for any unaligned mappings */
852 for_each_sg(data->sg, sg, host->sg_count, i)
853 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
854 has_unaligned = true;
855 break;
856 }
857
858 if (has_unaligned) {
859 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
860 data->sg_len, DMA_FROM_DEVICE);
861
862 align = host->align_buffer;
863
864 for_each_sg(data->sg, sg, host->sg_count, i) {
865 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
866 size = SDHCI_ADMA2_ALIGN -
867 (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
868
869 buffer = sdhci_kmap_atomic(sg);
870 memcpy(buffer, align, size);
871 sdhci_kunmap_atomic(buffer);
872
873 align += SDHCI_ADMA2_ALIGN;
874 }
875 }
876 }
877 }
878 }
879
sdhci_set_adma_addr(struct sdhci_host * host,dma_addr_t addr)880 static void sdhci_set_adma_addr(struct sdhci_host *host, dma_addr_t addr)
881 {
882 sdhci_writel(host, lower_32_bits(addr), SDHCI_ADMA_ADDRESS);
883 if (host->flags & SDHCI_USE_64_BIT_DMA)
884 sdhci_writel(host, upper_32_bits(addr), SDHCI_ADMA_ADDRESS_HI);
885 }
886
sdhci_sdma_address(struct sdhci_host * host)887 static dma_addr_t sdhci_sdma_address(struct sdhci_host *host)
888 {
889 if (host->bounce_buffer)
890 return host->bounce_addr;
891 else
892 return sg_dma_address(host->data->sg);
893 }
894
sdhci_set_sdma_addr(struct sdhci_host * host,dma_addr_t addr)895 static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr)
896 {
897 if (host->v4_mode)
898 sdhci_set_adma_addr(host, addr);
899 else
900 sdhci_writel(host, addr, SDHCI_DMA_ADDRESS);
901 }
902
sdhci_target_timeout(struct sdhci_host * host,struct mmc_command * cmd,struct mmc_data * data)903 static unsigned int sdhci_target_timeout(struct sdhci_host *host,
904 struct mmc_command *cmd,
905 struct mmc_data *data)
906 {
907 unsigned int target_timeout;
908
909 /* timeout in us */
910 if (!data) {
911 target_timeout = cmd->busy_timeout * 1000;
912 } else {
913 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
914 if (host->clock && data->timeout_clks) {
915 unsigned long long val;
916
917 /*
918 * data->timeout_clks is in units of clock cycles.
919 * host->clock is in Hz. target_timeout is in us.
920 * Hence, us = 1000000 * cycles / Hz. Round up.
921 */
922 val = 1000000ULL * data->timeout_clks;
923 if (do_div(val, host->clock))
924 target_timeout++;
925 target_timeout += val;
926 }
927 }
928
929 return target_timeout;
930 }
931
sdhci_calc_sw_timeout(struct sdhci_host * host,struct mmc_command * cmd)932 static void sdhci_calc_sw_timeout(struct sdhci_host *host,
933 struct mmc_command *cmd)
934 {
935 struct mmc_data *data = cmd->data;
936 struct mmc_host *mmc = host->mmc;
937 struct mmc_ios *ios = &mmc->ios;
938 unsigned char bus_width = 1 << ios->bus_width;
939 unsigned int blksz;
940 unsigned int freq;
941 u64 target_timeout;
942 u64 transfer_time;
943
944 target_timeout = sdhci_target_timeout(host, cmd, data);
945 target_timeout *= NSEC_PER_USEC;
946
947 if (data) {
948 blksz = data->blksz;
949 freq = mmc->actual_clock ? : host->clock;
950 transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width);
951 do_div(transfer_time, freq);
952 /* multiply by '2' to account for any unknowns */
953 transfer_time = transfer_time * 2;
954 /* calculate timeout for the entire data */
955 host->data_timeout = data->blocks * target_timeout +
956 transfer_time;
957 } else {
958 host->data_timeout = target_timeout;
959 }
960
961 if (host->data_timeout)
962 host->data_timeout += MMC_CMD_TRANSFER_TIME;
963 }
964
sdhci_calc_timeout(struct sdhci_host * host,struct mmc_command * cmd,bool * too_big)965 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
966 bool *too_big)
967 {
968 u8 count;
969 struct mmc_data *data;
970 unsigned target_timeout, current_timeout;
971
972 *too_big = false;
973
974 /*
975 * If the host controller provides us with an incorrect timeout
976 * value, just skip the check and use the maximum. The hardware may take
977 * longer to time out, but that's much better than having a too-short
978 * timeout value.
979 */
980 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
981 return host->max_timeout_count;
982
983 /* Unspecified command, assume max */
984 if (cmd == NULL)
985 return host->max_timeout_count;
986
987 data = cmd->data;
988 /* Unspecified timeout, assume max */
989 if (!data && !cmd->busy_timeout)
990 return host->max_timeout_count;
991
992 /* timeout in us */
993 target_timeout = sdhci_target_timeout(host, cmd, data);
994
995 /*
996 * Figure out needed cycles.
997 * We do this in steps in order to fit inside a 32 bit int.
998 * The first step is the minimum timeout, which will have a
999 * minimum resolution of 6 bits:
1000 * (1) 2^13*1000 > 2^22,
1001 * (2) host->timeout_clk < 2^16
1002 * =>
1003 * (1) / (2) > 2^6
1004 */
1005 count = 0;
1006 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
1007 while (current_timeout < target_timeout) {
1008 count++;
1009 current_timeout <<= 1;
1010 if (count > host->max_timeout_count) {
1011 if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
1012 DBG("Too large timeout 0x%x requested for CMD%d!\n",
1013 count, cmd->opcode);
1014 count = host->max_timeout_count;
1015 *too_big = true;
1016 break;
1017 }
1018 }
1019
1020 return count;
1021 }
1022
sdhci_set_transfer_irqs(struct sdhci_host * host)1023 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
1024 {
1025 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
1026 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
1027
1028 if (host->flags & SDHCI_REQ_USE_DMA)
1029 host->ier = (host->ier & ~pio_irqs) | dma_irqs;
1030 else
1031 host->ier = (host->ier & ~dma_irqs) | pio_irqs;
1032
1033 if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12))
1034 host->ier |= SDHCI_INT_AUTO_CMD_ERR;
1035 else
1036 host->ier &= ~SDHCI_INT_AUTO_CMD_ERR;
1037
1038 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1039 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1040 }
1041
sdhci_set_data_timeout_irq(struct sdhci_host * host,bool enable)1042 void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
1043 {
1044 if (enable)
1045 host->ier |= SDHCI_INT_DATA_TIMEOUT;
1046 else
1047 host->ier &= ~SDHCI_INT_DATA_TIMEOUT;
1048 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1049 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1050 }
1051 EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq);
1052
__sdhci_set_timeout(struct sdhci_host * host,struct mmc_command * cmd)1053 void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
1054 {
1055 bool too_big = false;
1056 u8 count = sdhci_calc_timeout(host, cmd, &too_big);
1057
1058 if (too_big &&
1059 host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
1060 sdhci_calc_sw_timeout(host, cmd);
1061 sdhci_set_data_timeout_irq(host, false);
1062 } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
1063 sdhci_set_data_timeout_irq(host, true);
1064 }
1065
1066 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
1067 }
1068 EXPORT_SYMBOL_GPL(__sdhci_set_timeout);
1069
sdhci_set_timeout(struct sdhci_host * host,struct mmc_command * cmd)1070 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
1071 {
1072 if (host->ops->set_timeout)
1073 host->ops->set_timeout(host, cmd);
1074 else
1075 __sdhci_set_timeout(host, cmd);
1076 }
1077
sdhci_initialize_data(struct sdhci_host * host,struct mmc_data * data)1078 static void sdhci_initialize_data(struct sdhci_host *host,
1079 struct mmc_data *data)
1080 {
1081 WARN_ON(host->data);
1082
1083 /* Sanity checks */
1084 BUG_ON(data->blksz * data->blocks > 524288);
1085 BUG_ON(data->blksz > host->mmc->max_blk_size);
1086 BUG_ON(data->blocks > 65535);
1087
1088 host->data = data;
1089 host->data_early = 0;
1090 host->data->bytes_xfered = 0;
1091 }
1092
sdhci_set_block_info(struct sdhci_host * host,struct mmc_data * data)1093 static inline void sdhci_set_block_info(struct sdhci_host *host,
1094 struct mmc_data *data)
1095 {
1096 /* Set the DMA boundary value and block size */
1097 sdhci_writew(host,
1098 SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
1099 SDHCI_BLOCK_SIZE);
1100 /*
1101 * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
1102 * can be supported, in that case 16-bit block count register must be 0.
1103 */
1104 if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
1105 (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
1106 if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
1107 sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
1108 sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
1109 } else {
1110 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
1111 }
1112 }
1113
sdhci_prepare_data(struct sdhci_host * host,struct mmc_command * cmd)1114 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
1115 {
1116 struct mmc_data *data = cmd->data;
1117
1118 sdhci_initialize_data(host, data);
1119
1120 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
1121 struct scatterlist *sg;
1122 unsigned int length_mask, offset_mask;
1123 int i;
1124
1125 host->flags |= SDHCI_REQ_USE_DMA;
1126
1127 /*
1128 * FIXME: This doesn't account for merging when mapping the
1129 * scatterlist.
1130 *
1131 * The assumption here being that alignment and lengths are
1132 * the same after DMA mapping to device address space.
1133 */
1134 length_mask = 0;
1135 offset_mask = 0;
1136 if (host->flags & SDHCI_USE_ADMA) {
1137 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
1138 length_mask = 3;
1139 /*
1140 * As we use up to 3 byte chunks to work
1141 * around alignment problems, we need to
1142 * check the offset as well.
1143 */
1144 offset_mask = 3;
1145 }
1146 } else {
1147 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
1148 length_mask = 3;
1149 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
1150 offset_mask = 3;
1151 }
1152
1153 if (unlikely(length_mask | offset_mask)) {
1154 for_each_sg(data->sg, sg, data->sg_len, i) {
1155 if (sg->length & length_mask) {
1156 DBG("Reverting to PIO because of transfer size (%d)\n",
1157 sg->length);
1158 host->flags &= ~SDHCI_REQ_USE_DMA;
1159 break;
1160 }
1161 if (sg->offset & offset_mask) {
1162 DBG("Reverting to PIO because of bad alignment\n");
1163 host->flags &= ~SDHCI_REQ_USE_DMA;
1164 break;
1165 }
1166 }
1167 }
1168 }
1169
1170 sdhci_config_dma(host);
1171
1172 if (host->flags & SDHCI_REQ_USE_DMA) {
1173 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1174
1175 if (sg_cnt <= 0) {
1176 /*
1177 * This only happens when someone fed
1178 * us an invalid request.
1179 */
1180 WARN_ON(1);
1181 host->flags &= ~SDHCI_REQ_USE_DMA;
1182 } else if (host->flags & SDHCI_USE_ADMA) {
1183 sdhci_adma_table_pre(host, data, sg_cnt);
1184 sdhci_set_adma_addr(host, host->adma_addr);
1185 } else {
1186 WARN_ON(sg_cnt != 1);
1187 sdhci_set_sdma_addr(host, sdhci_sdma_address(host));
1188 }
1189 }
1190
1191 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
1192 int flags;
1193
1194 flags = SG_MITER_ATOMIC;
1195 if (host->data->flags & MMC_DATA_READ)
1196 flags |= SG_MITER_TO_SG;
1197 else
1198 flags |= SG_MITER_FROM_SG;
1199 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1200 host->blocks = data->blocks;
1201 }
1202
1203 sdhci_set_transfer_irqs(host);
1204
1205 sdhci_set_block_info(host, data);
1206 }
1207
1208 #if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA)
1209
sdhci_external_dma_init(struct sdhci_host * host)1210 static int sdhci_external_dma_init(struct sdhci_host *host)
1211 {
1212 int ret = 0;
1213 struct mmc_host *mmc = host->mmc;
1214
1215 host->tx_chan = dma_request_chan(mmc_dev(mmc), "tx");
1216 if (IS_ERR(host->tx_chan)) {
1217 ret = PTR_ERR(host->tx_chan);
1218 if (ret != -EPROBE_DEFER)
1219 pr_warn("Failed to request TX DMA channel.\n");
1220 host->tx_chan = NULL;
1221 return ret;
1222 }
1223
1224 host->rx_chan = dma_request_chan(mmc_dev(mmc), "rx");
1225 if (IS_ERR(host->rx_chan)) {
1226 if (host->tx_chan) {
1227 dma_release_channel(host->tx_chan);
1228 host->tx_chan = NULL;
1229 }
1230
1231 ret = PTR_ERR(host->rx_chan);
1232 if (ret != -EPROBE_DEFER)
1233 pr_warn("Failed to request RX DMA channel.\n");
1234 host->rx_chan = NULL;
1235 }
1236
1237 return ret;
1238 }
1239
sdhci_external_dma_channel(struct sdhci_host * host,struct mmc_data * data)1240 static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
1241 struct mmc_data *data)
1242 {
1243 return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
1244 }
1245
sdhci_external_dma_setup(struct sdhci_host * host,struct mmc_command * cmd)1246 static int sdhci_external_dma_setup(struct sdhci_host *host,
1247 struct mmc_command *cmd)
1248 {
1249 int ret, i;
1250 enum dma_transfer_direction dir;
1251 struct dma_async_tx_descriptor *desc;
1252 struct mmc_data *data = cmd->data;
1253 struct dma_chan *chan;
1254 struct dma_slave_config cfg;
1255 dma_cookie_t cookie;
1256 int sg_cnt;
1257
1258 if (!host->mapbase)
1259 return -EINVAL;
1260
1261 memset(&cfg, 0, sizeof(cfg));
1262 cfg.src_addr = host->mapbase + SDHCI_BUFFER;
1263 cfg.dst_addr = host->mapbase + SDHCI_BUFFER;
1264 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1265 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1266 cfg.src_maxburst = data->blksz / 4;
1267 cfg.dst_maxburst = data->blksz / 4;
1268
1269 /* Sanity check: all the SG entries must be aligned by block size. */
1270 for (i = 0; i < data->sg_len; i++) {
1271 if ((data->sg + i)->length % data->blksz)
1272 return -EINVAL;
1273 }
1274
1275 chan = sdhci_external_dma_channel(host, data);
1276
1277 ret = dmaengine_slave_config(chan, &cfg);
1278 if (ret)
1279 return ret;
1280
1281 sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1282 if (sg_cnt <= 0)
1283 return -EINVAL;
1284
1285 dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
1286 desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir,
1287 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1288 if (!desc)
1289 return -EINVAL;
1290
1291 desc->callback = NULL;
1292 desc->callback_param = NULL;
1293
1294 cookie = dmaengine_submit(desc);
1295 if (dma_submit_error(cookie))
1296 ret = cookie;
1297
1298 return ret;
1299 }
1300
sdhci_external_dma_release(struct sdhci_host * host)1301 static void sdhci_external_dma_release(struct sdhci_host *host)
1302 {
1303 if (host->tx_chan) {
1304 dma_release_channel(host->tx_chan);
1305 host->tx_chan = NULL;
1306 }
1307
1308 if (host->rx_chan) {
1309 dma_release_channel(host->rx_chan);
1310 host->rx_chan = NULL;
1311 }
1312
1313 sdhci_switch_external_dma(host, false);
1314 }
1315
__sdhci_external_dma_prepare_data(struct sdhci_host * host,struct mmc_command * cmd)1316 static void __sdhci_external_dma_prepare_data(struct sdhci_host *host,
1317 struct mmc_command *cmd)
1318 {
1319 struct mmc_data *data = cmd->data;
1320
1321 sdhci_initialize_data(host, data);
1322
1323 host->flags |= SDHCI_REQ_USE_DMA;
1324 sdhci_set_transfer_irqs(host);
1325
1326 sdhci_set_block_info(host, data);
1327 }
1328
sdhci_external_dma_prepare_data(struct sdhci_host * host,struct mmc_command * cmd)1329 static void sdhci_external_dma_prepare_data(struct sdhci_host *host,
1330 struct mmc_command *cmd)
1331 {
1332 if (!sdhci_external_dma_setup(host, cmd)) {
1333 __sdhci_external_dma_prepare_data(host, cmd);
1334 } else {
1335 sdhci_external_dma_release(host);
1336 pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n",
1337 mmc_hostname(host->mmc));
1338 sdhci_prepare_data(host, cmd);
1339 }
1340 }
1341
sdhci_external_dma_pre_transfer(struct sdhci_host * host,struct mmc_command * cmd)1342 static void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
1343 struct mmc_command *cmd)
1344 {
1345 struct dma_chan *chan;
1346
1347 if (!cmd->data)
1348 return;
1349
1350 chan = sdhci_external_dma_channel(host, cmd->data);
1351 if (chan)
1352 dma_async_issue_pending(chan);
1353 }
1354
1355 #else
1356
sdhci_external_dma_init(struct sdhci_host * host)1357 static inline int sdhci_external_dma_init(struct sdhci_host *host)
1358 {
1359 return -EOPNOTSUPP;
1360 }
1361
sdhci_external_dma_release(struct sdhci_host * host)1362 static inline void sdhci_external_dma_release(struct sdhci_host *host)
1363 {
1364 }
1365
sdhci_external_dma_prepare_data(struct sdhci_host * host,struct mmc_command * cmd)1366 static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host,
1367 struct mmc_command *cmd)
1368 {
1369 /* This should never happen */
1370 WARN_ON_ONCE(1);
1371 }
1372
sdhci_external_dma_pre_transfer(struct sdhci_host * host,struct mmc_command * cmd)1373 static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
1374 struct mmc_command *cmd)
1375 {
1376 }
1377
sdhci_external_dma_channel(struct sdhci_host * host,struct mmc_data * data)1378 static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
1379 struct mmc_data *data)
1380 {
1381 return NULL;
1382 }
1383
1384 #endif
1385
sdhci_switch_external_dma(struct sdhci_host * host,bool en)1386 void sdhci_switch_external_dma(struct sdhci_host *host, bool en)
1387 {
1388 host->use_external_dma = en;
1389 }
1390 EXPORT_SYMBOL_GPL(sdhci_switch_external_dma);
1391
sdhci_auto_cmd12(struct sdhci_host * host,struct mmc_request * mrq)1392 static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
1393 struct mmc_request *mrq)
1394 {
1395 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
1396 !mrq->cap_cmd_during_tfr;
1397 }
1398
sdhci_auto_cmd23(struct sdhci_host * host,struct mmc_request * mrq)1399 static inline bool sdhci_auto_cmd23(struct sdhci_host *host,
1400 struct mmc_request *mrq)
1401 {
1402 return mrq->sbc && (host->flags & SDHCI_AUTO_CMD23);
1403 }
1404
sdhci_manual_cmd23(struct sdhci_host * host,struct mmc_request * mrq)1405 static inline bool sdhci_manual_cmd23(struct sdhci_host *host,
1406 struct mmc_request *mrq)
1407 {
1408 return mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23);
1409 }
1410
sdhci_auto_cmd_select(struct sdhci_host * host,struct mmc_command * cmd,u16 * mode)1411 static inline void sdhci_auto_cmd_select(struct sdhci_host *host,
1412 struct mmc_command *cmd,
1413 u16 *mode)
1414 {
1415 bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) &&
1416 (cmd->opcode != SD_IO_RW_EXTENDED);
1417 bool use_cmd23 = sdhci_auto_cmd23(host, cmd->mrq);
1418 u16 ctrl2;
1419
1420 /*
1421 * In case of Version 4.10 or later, use of 'Auto CMD Auto
1422 * Select' is recommended rather than use of 'Auto CMD12
1423 * Enable' or 'Auto CMD23 Enable'. We require Version 4 Mode
1424 * here because some controllers (e.g sdhci-of-dwmshc) expect it.
1425 */
1426 if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
1427 (use_cmd12 || use_cmd23)) {
1428 *mode |= SDHCI_TRNS_AUTO_SEL;
1429
1430 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1431 if (use_cmd23)
1432 ctrl2 |= SDHCI_CMD23_ENABLE;
1433 else
1434 ctrl2 &= ~SDHCI_CMD23_ENABLE;
1435 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
1436
1437 return;
1438 }
1439
1440 /*
1441 * If we are sending CMD23, CMD12 never gets sent
1442 * on successful completion (so no Auto-CMD12).
1443 */
1444 if (use_cmd12)
1445 *mode |= SDHCI_TRNS_AUTO_CMD12;
1446 else if (use_cmd23)
1447 *mode |= SDHCI_TRNS_AUTO_CMD23;
1448 }
1449
sdhci_set_transfer_mode(struct sdhci_host * host,struct mmc_command * cmd)1450 static void sdhci_set_transfer_mode(struct sdhci_host *host,
1451 struct mmc_command *cmd)
1452 {
1453 u16 mode = 0;
1454 struct mmc_data *data = cmd->data;
1455
1456 if (data == NULL) {
1457 if (host->quirks2 &
1458 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
1459 /* must not clear SDHCI_TRANSFER_MODE when tuning */
1460 if (!mmc_op_tuning(cmd->opcode))
1461 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
1462 } else {
1463 /* clear Auto CMD settings for no data CMDs */
1464 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
1465 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
1466 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
1467 }
1468 return;
1469 }
1470
1471 WARN_ON(!host->data);
1472
1473 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
1474 mode = SDHCI_TRNS_BLK_CNT_EN;
1475
1476 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
1477 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
1478 sdhci_auto_cmd_select(host, cmd, &mode);
1479 if (sdhci_auto_cmd23(host, cmd->mrq))
1480 sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
1481 }
1482
1483 if (data->flags & MMC_DATA_READ)
1484 mode |= SDHCI_TRNS_READ;
1485 if (host->flags & SDHCI_REQ_USE_DMA)
1486 mode |= SDHCI_TRNS_DMA;
1487
1488 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
1489 }
1490
sdhci_needs_reset(struct sdhci_host * host,struct mmc_request * mrq)1491 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
1492 {
1493 return (!(host->flags & SDHCI_DEVICE_DEAD) &&
1494 ((mrq->cmd && mrq->cmd->error) ||
1495 (mrq->sbc && mrq->sbc->error) ||
1496 (mrq->data && mrq->data->stop && mrq->data->stop->error) ||
1497 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
1498 }
1499
sdhci_set_mrq_done(struct sdhci_host * host,struct mmc_request * mrq)1500 static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq)
1501 {
1502 int i;
1503
1504 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1505 if (host->mrqs_done[i] == mrq) {
1506 WARN_ON(1);
1507 return;
1508 }
1509 }
1510
1511 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1512 if (!host->mrqs_done[i]) {
1513 host->mrqs_done[i] = mrq;
1514 break;
1515 }
1516 }
1517
1518 WARN_ON(i >= SDHCI_MAX_MRQS);
1519 }
1520
__sdhci_finish_mrq(struct sdhci_host * host,struct mmc_request * mrq)1521 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1522 {
1523 if (host->cmd && host->cmd->mrq == mrq)
1524 host->cmd = NULL;
1525
1526 if (host->data_cmd && host->data_cmd->mrq == mrq)
1527 host->data_cmd = NULL;
1528
1529 if (host->deferred_cmd && host->deferred_cmd->mrq == mrq)
1530 host->deferred_cmd = NULL;
1531
1532 if (host->data && host->data->mrq == mrq)
1533 host->data = NULL;
1534
1535 if (sdhci_needs_reset(host, mrq))
1536 host->pending_reset = true;
1537
1538 sdhci_set_mrq_done(host, mrq);
1539
1540 sdhci_del_timer(host, mrq);
1541
1542 if (!sdhci_has_requests(host))
1543 sdhci_led_deactivate(host);
1544 }
1545
sdhci_finish_mrq(struct sdhci_host * host,struct mmc_request * mrq)1546 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1547 {
1548 __sdhci_finish_mrq(host, mrq);
1549
1550 queue_work(host->complete_wq, &host->complete_work);
1551 }
1552
__sdhci_finish_data(struct sdhci_host * host,bool sw_data_timeout)1553 static void __sdhci_finish_data(struct sdhci_host *host, bool sw_data_timeout)
1554 {
1555 struct mmc_command *data_cmd = host->data_cmd;
1556 struct mmc_data *data = host->data;
1557
1558 host->data = NULL;
1559 host->data_cmd = NULL;
1560
1561 /*
1562 * The controller needs a reset of internal state machines upon error
1563 * conditions.
1564 */
1565 if (data->error) {
1566 if (!host->cmd || host->cmd == data_cmd)
1567 sdhci_reset_for(host, REQUEST_ERROR);
1568 else
1569 sdhci_reset_for(host, REQUEST_ERROR_DATA_ONLY);
1570 }
1571
1572 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
1573 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
1574 sdhci_adma_table_post(host, data);
1575
1576 /*
1577 * The specification states that the block count register must
1578 * be updated, but it does not specify at what point in the
1579 * data flow. That makes the register entirely useless to read
1580 * back so we have to assume that nothing made it to the card
1581 * in the event of an error.
1582 */
1583 if (data->error)
1584 data->bytes_xfered = 0;
1585 else
1586 data->bytes_xfered = data->blksz * data->blocks;
1587
1588 /*
1589 * Need to send CMD12 if -
1590 * a) open-ended multiblock transfer not using auto CMD12 (no CMD23)
1591 * b) error in multiblock transfer
1592 */
1593 if (data->stop &&
1594 ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) ||
1595 data->error)) {
1596 /*
1597 * 'cap_cmd_during_tfr' request must not use the command line
1598 * after mmc_command_done() has been called. It is upper layer's
1599 * responsibility to send the stop command if required.
1600 */
1601 if (data->mrq->cap_cmd_during_tfr) {
1602 __sdhci_finish_mrq(host, data->mrq);
1603 } else {
1604 /* Avoid triggering warning in sdhci_send_command() */
1605 host->cmd = NULL;
1606 if (!sdhci_send_command(host, data->stop)) {
1607 if (sw_data_timeout) {
1608 /*
1609 * This is anyway a sw data timeout, so
1610 * give up now.
1611 */
1612 data->stop->error = -EIO;
1613 __sdhci_finish_mrq(host, data->mrq);
1614 } else {
1615 WARN_ON(host->deferred_cmd);
1616 host->deferred_cmd = data->stop;
1617 }
1618 }
1619 }
1620 } else {
1621 __sdhci_finish_mrq(host, data->mrq);
1622 }
1623 }
1624
sdhci_finish_data(struct sdhci_host * host)1625 static void sdhci_finish_data(struct sdhci_host *host)
1626 {
1627 __sdhci_finish_data(host, false);
1628 }
1629
sdhci_send_command(struct sdhci_host * host,struct mmc_command * cmd)1630 static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1631 {
1632 int flags;
1633 u32 mask;
1634 unsigned long timeout;
1635
1636 WARN_ON(host->cmd);
1637
1638 /* Initially, a command has no error */
1639 cmd->error = 0;
1640
1641 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
1642 cmd->opcode == MMC_STOP_TRANSMISSION)
1643 cmd->flags |= MMC_RSP_BUSY;
1644
1645 mask = SDHCI_CMD_INHIBIT;
1646 if (sdhci_data_line_cmd(cmd))
1647 mask |= SDHCI_DATA_INHIBIT;
1648
1649 /* We shouldn't wait for data inihibit for stop commands, even
1650 though they might use busy signaling */
1651 if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1652 mask &= ~SDHCI_DATA_INHIBIT;
1653
1654 if (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask)
1655 return false;
1656
1657 host->cmd = cmd;
1658 host->data_timeout = 0;
1659 if (sdhci_data_line_cmd(cmd)) {
1660 WARN_ON(host->data_cmd);
1661 host->data_cmd = cmd;
1662 sdhci_set_timeout(host, cmd);
1663 }
1664
1665 if (cmd->data) {
1666 if (host->use_external_dma)
1667 sdhci_external_dma_prepare_data(host, cmd);
1668 else
1669 sdhci_prepare_data(host, cmd);
1670 }
1671
1672 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1673
1674 sdhci_set_transfer_mode(host, cmd);
1675
1676 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1677 WARN_ONCE(1, "Unsupported response type!\n");
1678 /*
1679 * This does not happen in practice because 136-bit response
1680 * commands never have busy waiting, so rather than complicate
1681 * the error path, just remove busy waiting and continue.
1682 */
1683 cmd->flags &= ~MMC_RSP_BUSY;
1684 }
1685
1686 if (!(cmd->flags & MMC_RSP_PRESENT))
1687 flags = SDHCI_CMD_RESP_NONE;
1688 else if (cmd->flags & MMC_RSP_136)
1689 flags = SDHCI_CMD_RESP_LONG;
1690 else if (cmd->flags & MMC_RSP_BUSY)
1691 flags = SDHCI_CMD_RESP_SHORT_BUSY;
1692 else
1693 flags = SDHCI_CMD_RESP_SHORT;
1694
1695 if (cmd->flags & MMC_RSP_CRC)
1696 flags |= SDHCI_CMD_CRC;
1697 if (cmd->flags & MMC_RSP_OPCODE)
1698 flags |= SDHCI_CMD_INDEX;
1699
1700 /* CMD19 is special in that the Data Present Select should be set */
1701 if (cmd->data || mmc_op_tuning(cmd->opcode))
1702 flags |= SDHCI_CMD_DATA;
1703
1704 timeout = jiffies;
1705 if (host->data_timeout)
1706 timeout += nsecs_to_jiffies(host->data_timeout);
1707 else if (!cmd->data && cmd->busy_timeout > 9000)
1708 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1709 else
1710 timeout += 10 * HZ;
1711 sdhci_mod_timer(host, cmd->mrq, timeout);
1712
1713 if (host->use_external_dma)
1714 sdhci_external_dma_pre_transfer(host, cmd);
1715
1716 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1717
1718 return true;
1719 }
1720
sdhci_present_error(struct sdhci_host * host,struct mmc_command * cmd,bool present)1721 static bool sdhci_present_error(struct sdhci_host *host,
1722 struct mmc_command *cmd, bool present)
1723 {
1724 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1725 cmd->error = -ENOMEDIUM;
1726 return true;
1727 }
1728
1729 return false;
1730 }
1731
sdhci_send_command_retry(struct sdhci_host * host,struct mmc_command * cmd,unsigned long flags)1732 static bool sdhci_send_command_retry(struct sdhci_host *host,
1733 struct mmc_command *cmd,
1734 unsigned long flags)
1735 __releases(host->lock)
1736 __acquires(host->lock)
1737 {
1738 struct mmc_command *deferred_cmd = host->deferred_cmd;
1739 int timeout = 10; /* Approx. 10 ms */
1740 bool present;
1741
1742 while (!sdhci_send_command(host, cmd)) {
1743 if (!timeout--) {
1744 pr_err("%s: Controller never released inhibit bit(s).\n",
1745 mmc_hostname(host->mmc));
1746 sdhci_err_stats_inc(host, CTRL_TIMEOUT);
1747 sdhci_dumpregs(host);
1748 cmd->error = -EIO;
1749 return false;
1750 }
1751
1752 spin_unlock_irqrestore(&host->lock, flags);
1753
1754 usleep_range(1000, 1250);
1755
1756 present = host->mmc->ops->get_cd(host->mmc);
1757
1758 spin_lock_irqsave(&host->lock, flags);
1759
1760 /* A deferred command might disappear, handle that */
1761 if (cmd == deferred_cmd && cmd != host->deferred_cmd)
1762 return true;
1763
1764 if (sdhci_present_error(host, cmd, present))
1765 return false;
1766 }
1767
1768 if (cmd == host->deferred_cmd)
1769 host->deferred_cmd = NULL;
1770
1771 return true;
1772 }
1773
sdhci_read_rsp_136(struct sdhci_host * host,struct mmc_command * cmd)1774 static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd)
1775 {
1776 int i, reg;
1777
1778 for (i = 0; i < 4; i++) {
1779 reg = SDHCI_RESPONSE + (3 - i) * 4;
1780 cmd->resp[i] = sdhci_readl(host, reg);
1781 }
1782
1783 if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC)
1784 return;
1785
1786 /* CRC is stripped so we need to do some shifting */
1787 for (i = 0; i < 4; i++) {
1788 cmd->resp[i] <<= 8;
1789 if (i != 3)
1790 cmd->resp[i] |= cmd->resp[i + 1] >> 24;
1791 }
1792 }
1793
sdhci_finish_command(struct sdhci_host * host)1794 static void sdhci_finish_command(struct sdhci_host *host)
1795 {
1796 struct mmc_command *cmd = host->cmd;
1797
1798 host->cmd = NULL;
1799
1800 if (cmd->flags & MMC_RSP_PRESENT) {
1801 if (cmd->flags & MMC_RSP_136) {
1802 sdhci_read_rsp_136(host, cmd);
1803 } else {
1804 cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1805 }
1806 }
1807
1808 if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
1809 mmc_command_done(host->mmc, cmd->mrq);
1810
1811 /*
1812 * The host can send and interrupt when the busy state has
1813 * ended, allowing us to wait without wasting CPU cycles.
1814 * The busy signal uses DAT0 so this is similar to waiting
1815 * for data to complete.
1816 *
1817 * Note: The 1.0 specification is a bit ambiguous about this
1818 * feature so there might be some problems with older
1819 * controllers.
1820 */
1821 if (cmd->flags & MMC_RSP_BUSY) {
1822 if (cmd->data) {
1823 DBG("Cannot wait for busy signal when also doing a data transfer");
1824 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1825 cmd == host->data_cmd) {
1826 /* Command complete before busy is ended */
1827 return;
1828 }
1829 }
1830
1831 /* Finished CMD23, now send actual command. */
1832 if (cmd == cmd->mrq->sbc) {
1833 if (!sdhci_send_command(host, cmd->mrq->cmd)) {
1834 WARN_ON(host->deferred_cmd);
1835 host->deferred_cmd = cmd->mrq->cmd;
1836 }
1837 } else {
1838
1839 /* Processed actual command. */
1840 if (host->data && host->data_early)
1841 sdhci_finish_data(host);
1842
1843 if (!cmd->data)
1844 __sdhci_finish_mrq(host, cmd->mrq);
1845 }
1846 }
1847
sdhci_get_preset_value(struct sdhci_host * host)1848 static u16 sdhci_get_preset_value(struct sdhci_host *host)
1849 {
1850 u16 preset = 0;
1851
1852 switch (host->timing) {
1853 case MMC_TIMING_MMC_HS:
1854 case MMC_TIMING_SD_HS:
1855 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HIGH_SPEED);
1856 break;
1857 case MMC_TIMING_UHS_SDR12:
1858 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1859 break;
1860 case MMC_TIMING_UHS_SDR25:
1861 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1862 break;
1863 case MMC_TIMING_UHS_SDR50:
1864 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1865 break;
1866 case MMC_TIMING_UHS_SDR104:
1867 case MMC_TIMING_MMC_HS200:
1868 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1869 break;
1870 case MMC_TIMING_UHS_DDR50:
1871 case MMC_TIMING_MMC_DDR52:
1872 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1873 break;
1874 case MMC_TIMING_MMC_HS400:
1875 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1876 break;
1877 default:
1878 pr_warn("%s: Invalid UHS-I mode selected\n",
1879 mmc_hostname(host->mmc));
1880 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1881 break;
1882 }
1883 return preset;
1884 }
1885
sdhci_calc_clk(struct sdhci_host * host,unsigned int clock,unsigned int * actual_clock)1886 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
1887 unsigned int *actual_clock)
1888 {
1889 int div = 0; /* Initialized for compiler warning */
1890 int real_div = div, clk_mul = 1;
1891 u16 clk = 0;
1892 bool switch_base_clk = false;
1893
1894 if (host->version >= SDHCI_SPEC_300) {
1895 if (host->preset_enabled) {
1896 u16 pre_val;
1897
1898 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1899 pre_val = sdhci_get_preset_value(host);
1900 div = FIELD_GET(SDHCI_PRESET_SDCLK_FREQ_MASK, pre_val);
1901 if (host->clk_mul &&
1902 (pre_val & SDHCI_PRESET_CLKGEN_SEL)) {
1903 clk = SDHCI_PROG_CLOCK_MODE;
1904 real_div = div + 1;
1905 clk_mul = host->clk_mul;
1906 } else {
1907 real_div = max_t(int, 1, div << 1);
1908 }
1909 goto clock_set;
1910 }
1911
1912 /*
1913 * Check if the Host Controller supports Programmable Clock
1914 * Mode.
1915 */
1916 if (host->clk_mul) {
1917 for (div = 1; div <= 1024; div++) {
1918 if ((host->max_clk * host->clk_mul / div)
1919 <= clock)
1920 break;
1921 }
1922 if ((host->max_clk * host->clk_mul / div) <= clock) {
1923 /*
1924 * Set Programmable Clock Mode in the Clock
1925 * Control register.
1926 */
1927 clk = SDHCI_PROG_CLOCK_MODE;
1928 real_div = div;
1929 clk_mul = host->clk_mul;
1930 div--;
1931 } else {
1932 /*
1933 * Divisor can be too small to reach clock
1934 * speed requirement. Then use the base clock.
1935 */
1936 switch_base_clk = true;
1937 }
1938 }
1939
1940 if (!host->clk_mul || switch_base_clk) {
1941 /* Version 3.00 divisors must be a multiple of 2. */
1942 if (host->max_clk <= clock)
1943 div = 1;
1944 else {
1945 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1946 div += 2) {
1947 if ((host->max_clk / div) <= clock)
1948 break;
1949 }
1950 }
1951 real_div = div;
1952 div >>= 1;
1953 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1954 && !div && host->max_clk <= 25000000)
1955 div = 1;
1956 }
1957 } else {
1958 /* Version 2.00 divisors must be a power of 2. */
1959 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1960 if ((host->max_clk / div) <= clock)
1961 break;
1962 }
1963 real_div = div;
1964 div >>= 1;
1965 }
1966
1967 clock_set:
1968 if (real_div)
1969 *actual_clock = (host->max_clk * clk_mul) / real_div;
1970 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1971 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1972 << SDHCI_DIVIDER_HI_SHIFT;
1973
1974 return clk;
1975 }
1976 EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1977
sdhci_enable_clk(struct sdhci_host * host,u16 clk)1978 void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1979 {
1980 ktime_t timeout;
1981
1982 clk |= SDHCI_CLOCK_INT_EN;
1983 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1984
1985 /* Wait max 150 ms */
1986 timeout = ktime_add_ms(ktime_get(), 150);
1987 while (1) {
1988 bool timedout = ktime_after(ktime_get(), timeout);
1989
1990 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1991 if (clk & SDHCI_CLOCK_INT_STABLE)
1992 break;
1993 if (timedout) {
1994 pr_err("%s: Internal clock never stabilised.\n",
1995 mmc_hostname(host->mmc));
1996 sdhci_err_stats_inc(host, CTRL_TIMEOUT);
1997 sdhci_dumpregs(host);
1998 return;
1999 }
2000 udelay(10);
2001 }
2002
2003 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) {
2004 clk |= SDHCI_CLOCK_PLL_EN;
2005 clk &= ~SDHCI_CLOCK_INT_STABLE;
2006 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2007
2008 /* Wait max 150 ms */
2009 timeout = ktime_add_ms(ktime_get(), 150);
2010 while (1) {
2011 bool timedout = ktime_after(ktime_get(), timeout);
2012
2013 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2014 if (clk & SDHCI_CLOCK_INT_STABLE)
2015 break;
2016 if (timedout) {
2017 pr_err("%s: PLL clock never stabilised.\n",
2018 mmc_hostname(host->mmc));
2019 sdhci_err_stats_inc(host, CTRL_TIMEOUT);
2020 sdhci_dumpregs(host);
2021 return;
2022 }
2023 udelay(10);
2024 }
2025 }
2026
2027 clk |= SDHCI_CLOCK_CARD_EN;
2028 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2029 }
2030 EXPORT_SYMBOL_GPL(sdhci_enable_clk);
2031
sdhci_set_clock(struct sdhci_host * host,unsigned int clock)2032 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
2033 {
2034 u16 clk;
2035
2036 host->mmc->actual_clock = 0;
2037
2038 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
2039
2040 if (clock == 0)
2041 return;
2042
2043 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
2044 sdhci_enable_clk(host, clk);
2045 }
2046 EXPORT_SYMBOL_GPL(sdhci_set_clock);
2047
sdhci_set_power_reg(struct sdhci_host * host,unsigned char mode,unsigned short vdd)2048 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
2049 unsigned short vdd)
2050 {
2051 struct mmc_host *mmc = host->mmc;
2052
2053 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
2054
2055 if (mode != MMC_POWER_OFF)
2056 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
2057 else
2058 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
2059 }
2060
sdhci_set_power_noreg(struct sdhci_host * host,unsigned char mode,unsigned short vdd)2061 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
2062 unsigned short vdd)
2063 {
2064 u8 pwr = 0;
2065
2066 if (mode != MMC_POWER_OFF) {
2067 switch (1 << vdd) {
2068 case MMC_VDD_165_195:
2069 /*
2070 * Without a regulator, SDHCI does not support 2.0v
2071 * so we only get here if the driver deliberately
2072 * added the 2.0v range to ocr_avail. Map it to 1.8v
2073 * for the purpose of turning on the power.
2074 */
2075 case MMC_VDD_20_21:
2076 pwr = SDHCI_POWER_180;
2077 break;
2078 case MMC_VDD_29_30:
2079 case MMC_VDD_30_31:
2080 pwr = SDHCI_POWER_300;
2081 break;
2082 case MMC_VDD_32_33:
2083 case MMC_VDD_33_34:
2084 /*
2085 * 3.4 ~ 3.6V are valid only for those platforms where it's
2086 * known that the voltage range is supported by hardware.
2087 */
2088 case MMC_VDD_34_35:
2089 case MMC_VDD_35_36:
2090 pwr = SDHCI_POWER_330;
2091 break;
2092 default:
2093 WARN(1, "%s: Invalid vdd %#x\n",
2094 mmc_hostname(host->mmc), vdd);
2095 break;
2096 }
2097 }
2098
2099 if (host->pwr == pwr)
2100 return;
2101
2102 host->pwr = pwr;
2103
2104 if (pwr == 0) {
2105 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
2106 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
2107 sdhci_runtime_pm_bus_off(host);
2108 } else {
2109 /*
2110 * Spec says that we should clear the power reg before setting
2111 * a new value. Some controllers don't seem to like this though.
2112 */
2113 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
2114 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
2115
2116 /*
2117 * At least the Marvell CaFe chip gets confused if we set the
2118 * voltage and set turn on power at the same time, so set the
2119 * voltage first.
2120 */
2121 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
2122 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
2123
2124 pwr |= SDHCI_POWER_ON;
2125
2126 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
2127
2128 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
2129 sdhci_runtime_pm_bus_on(host);
2130
2131 /*
2132 * Some controllers need an extra 10ms delay of 10ms before
2133 * they can apply clock after applying power
2134 */
2135 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
2136 mdelay(10);
2137 }
2138 }
2139 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
2140
sdhci_set_power(struct sdhci_host * host,unsigned char mode,unsigned short vdd)2141 void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
2142 unsigned short vdd)
2143 {
2144 if (IS_ERR(host->mmc->supply.vmmc))
2145 sdhci_set_power_noreg(host, mode, vdd);
2146 else
2147 sdhci_set_power_reg(host, mode, vdd);
2148 }
2149 EXPORT_SYMBOL_GPL(sdhci_set_power);
2150
2151 /*
2152 * Some controllers need to configure a valid bus voltage on their power
2153 * register regardless of whether an external regulator is taking care of power
2154 * supply. This helper function takes care of it if set as the controller's
2155 * sdhci_ops.set_power callback.
2156 */
sdhci_set_power_and_bus_voltage(struct sdhci_host * host,unsigned char mode,unsigned short vdd)2157 void sdhci_set_power_and_bus_voltage(struct sdhci_host *host,
2158 unsigned char mode,
2159 unsigned short vdd)
2160 {
2161 if (!IS_ERR(host->mmc->supply.vmmc)) {
2162 struct mmc_host *mmc = host->mmc;
2163
2164 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
2165 }
2166 sdhci_set_power_noreg(host, mode, vdd);
2167 }
2168 EXPORT_SYMBOL_GPL(sdhci_set_power_and_bus_voltage);
2169
2170 /*****************************************************************************\
2171 * *
2172 * MMC callbacks *
2173 * *
2174 \*****************************************************************************/
2175
sdhci_request(struct mmc_host * mmc,struct mmc_request * mrq)2176 void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
2177 {
2178 struct sdhci_host *host = mmc_priv(mmc);
2179 struct mmc_command *cmd;
2180 unsigned long flags;
2181 bool present;
2182
2183 /* Firstly check card presence */
2184 present = mmc->ops->get_cd(mmc);
2185
2186 spin_lock_irqsave(&host->lock, flags);
2187
2188 sdhci_led_activate(host);
2189
2190 if (sdhci_present_error(host, mrq->cmd, present))
2191 goto out_finish;
2192
2193 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd;
2194
2195 if (!sdhci_send_command_retry(host, cmd, flags))
2196 goto out_finish;
2197
2198 spin_unlock_irqrestore(&host->lock, flags);
2199
2200 return;
2201
2202 out_finish:
2203 sdhci_finish_mrq(host, mrq);
2204 spin_unlock_irqrestore(&host->lock, flags);
2205 }
2206 EXPORT_SYMBOL_GPL(sdhci_request);
2207
sdhci_request_atomic(struct mmc_host * mmc,struct mmc_request * mrq)2208 int sdhci_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq)
2209 {
2210 struct sdhci_host *host = mmc_priv(mmc);
2211 struct mmc_command *cmd;
2212 unsigned long flags;
2213 int ret = 0;
2214
2215 spin_lock_irqsave(&host->lock, flags);
2216
2217 if (sdhci_present_error(host, mrq->cmd, true)) {
2218 sdhci_finish_mrq(host, mrq);
2219 goto out_finish;
2220 }
2221
2222 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd;
2223
2224 /*
2225 * The HSQ may send a command in interrupt context without polling
2226 * the busy signaling, which means we should return BUSY if controller
2227 * has not released inhibit bits to allow HSQ trying to send request
2228 * again in non-atomic context. So we should not finish this request
2229 * here.
2230 */
2231 if (!sdhci_send_command(host, cmd))
2232 ret = -EBUSY;
2233 else
2234 sdhci_led_activate(host);
2235
2236 out_finish:
2237 spin_unlock_irqrestore(&host->lock, flags);
2238 return ret;
2239 }
2240 EXPORT_SYMBOL_GPL(sdhci_request_atomic);
2241
sdhci_set_bus_width(struct sdhci_host * host,int width)2242 void sdhci_set_bus_width(struct sdhci_host *host, int width)
2243 {
2244 u8 ctrl;
2245
2246 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2247 if (width == MMC_BUS_WIDTH_8) {
2248 ctrl &= ~SDHCI_CTRL_4BITBUS;
2249 ctrl |= SDHCI_CTRL_8BITBUS;
2250 } else {
2251 if (host->mmc->caps & MMC_CAP_8_BIT_DATA)
2252 ctrl &= ~SDHCI_CTRL_8BITBUS;
2253 if (width == MMC_BUS_WIDTH_4)
2254 ctrl |= SDHCI_CTRL_4BITBUS;
2255 else
2256 ctrl &= ~SDHCI_CTRL_4BITBUS;
2257 }
2258 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2259 }
2260 EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
2261
sdhci_set_uhs_signaling(struct sdhci_host * host,unsigned timing)2262 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
2263 {
2264 u16 ctrl_2;
2265
2266 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2267 /* Select Bus Speed Mode for host */
2268 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
2269 if ((timing == MMC_TIMING_MMC_HS200) ||
2270 (timing == MMC_TIMING_UHS_SDR104))
2271 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
2272 else if (timing == MMC_TIMING_UHS_SDR12)
2273 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
2274 else if (timing == MMC_TIMING_UHS_SDR25)
2275 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
2276 else if (timing == MMC_TIMING_UHS_SDR50)
2277 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
2278 else if ((timing == MMC_TIMING_UHS_DDR50) ||
2279 (timing == MMC_TIMING_MMC_DDR52))
2280 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
2281 else if (timing == MMC_TIMING_MMC_HS400)
2282 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
2283 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2284 }
2285 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
2286
sdhci_timing_has_preset(unsigned char timing)2287 static bool sdhci_timing_has_preset(unsigned char timing)
2288 {
2289 switch (timing) {
2290 case MMC_TIMING_UHS_SDR12:
2291 case MMC_TIMING_UHS_SDR25:
2292 case MMC_TIMING_UHS_SDR50:
2293 case MMC_TIMING_UHS_SDR104:
2294 case MMC_TIMING_UHS_DDR50:
2295 case MMC_TIMING_MMC_DDR52:
2296 return true;
2297 }
2298 return false;
2299 }
2300
sdhci_preset_needed(struct sdhci_host * host,unsigned char timing)2301 static bool sdhci_preset_needed(struct sdhci_host *host, unsigned char timing)
2302 {
2303 return !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
2304 sdhci_timing_has_preset(timing);
2305 }
2306
sdhci_presetable_values_change(struct sdhci_host * host,struct mmc_ios * ios)2307 static bool sdhci_presetable_values_change(struct sdhci_host *host, struct mmc_ios *ios)
2308 {
2309 /*
2310 * Preset Values are: Driver Strength, Clock Generator and SDCLK/RCLK
2311 * Frequency. Check if preset values need to be enabled, or the Driver
2312 * Strength needs updating. Note, clock changes are handled separately.
2313 */
2314 return !host->preset_enabled &&
2315 (sdhci_preset_needed(host, ios->timing) || host->drv_type != ios->drv_type);
2316 }
2317
sdhci_set_ios(struct mmc_host * mmc,struct mmc_ios * ios)2318 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
2319 {
2320 struct sdhci_host *host = mmc_priv(mmc);
2321 bool reinit_uhs = host->reinit_uhs;
2322 bool turning_on_clk = false;
2323 u8 ctrl;
2324
2325 host->reinit_uhs = false;
2326
2327 if (ios->power_mode == MMC_POWER_UNDEFINED)
2328 return;
2329
2330 if (host->flags & SDHCI_DEVICE_DEAD) {
2331 if (!IS_ERR(mmc->supply.vmmc) &&
2332 ios->power_mode == MMC_POWER_OFF)
2333 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
2334 return;
2335 }
2336
2337 /*
2338 * Reset the chip on each power off.
2339 * Should clear out any weird states.
2340 */
2341 if (ios->power_mode == MMC_POWER_OFF) {
2342 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
2343 sdhci_reinit(host);
2344 }
2345
2346 if (host->version >= SDHCI_SPEC_300 &&
2347 (ios->power_mode == MMC_POWER_UP) &&
2348 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
2349 sdhci_enable_preset_value(host, false);
2350
2351 if (!ios->clock || ios->clock != host->clock) {
2352 turning_on_clk = ios->clock && !host->clock;
2353
2354 host->ops->set_clock(host, ios->clock);
2355 host->clock = ios->clock;
2356
2357 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
2358 host->clock) {
2359 host->timeout_clk = mmc->actual_clock ?
2360 mmc->actual_clock / 1000 :
2361 host->clock / 1000;
2362 mmc->max_busy_timeout =
2363 host->ops->get_max_timeout_count ?
2364 host->ops->get_max_timeout_count(host) :
2365 1 << 27;
2366 mmc->max_busy_timeout /= host->timeout_clk;
2367 }
2368 }
2369
2370 if (host->ops->set_power)
2371 host->ops->set_power(host, ios->power_mode, ios->vdd);
2372 else
2373 sdhci_set_power(host, ios->power_mode, ios->vdd);
2374
2375 if (host->ops->platform_send_init_74_clocks)
2376 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
2377
2378 host->ops->set_bus_width(host, ios->bus_width);
2379
2380 /*
2381 * Special case to avoid multiple clock changes during voltage
2382 * switching.
2383 */
2384 if (!reinit_uhs &&
2385 turning_on_clk &&
2386 host->timing == ios->timing &&
2387 host->version >= SDHCI_SPEC_300 &&
2388 !sdhci_presetable_values_change(host, ios))
2389 return;
2390
2391 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2392
2393 if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
2394 if (ios->timing == MMC_TIMING_SD_HS ||
2395 ios->timing == MMC_TIMING_MMC_HS ||
2396 ios->timing == MMC_TIMING_MMC_HS400 ||
2397 ios->timing == MMC_TIMING_MMC_HS200 ||
2398 ios->timing == MMC_TIMING_MMC_DDR52 ||
2399 ios->timing == MMC_TIMING_UHS_SDR50 ||
2400 ios->timing == MMC_TIMING_UHS_SDR104 ||
2401 ios->timing == MMC_TIMING_UHS_DDR50 ||
2402 ios->timing == MMC_TIMING_UHS_SDR25)
2403 ctrl |= SDHCI_CTRL_HISPD;
2404 else
2405 ctrl &= ~SDHCI_CTRL_HISPD;
2406 }
2407
2408 if (host->version >= SDHCI_SPEC_300) {
2409 u16 clk, ctrl_2;
2410
2411 /*
2412 * According to SDHCI Spec v3.00, if the Preset Value
2413 * Enable in the Host Control 2 register is set, we
2414 * need to reset SD Clock Enable before changing High
2415 * Speed Enable to avoid generating clock glitches.
2416 */
2417 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2418 if (clk & SDHCI_CLOCK_CARD_EN) {
2419 clk &= ~SDHCI_CLOCK_CARD_EN;
2420 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2421 }
2422
2423 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2424
2425 if (!host->preset_enabled) {
2426 /*
2427 * We only need to set Driver Strength if the
2428 * preset value enable is not set.
2429 */
2430 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2431 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
2432 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
2433 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
2434 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
2435 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
2436 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
2437 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
2438 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
2439 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
2440 else {
2441 pr_warn("%s: invalid driver type, default to driver type B\n",
2442 mmc_hostname(mmc));
2443 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
2444 }
2445
2446 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2447 host->drv_type = ios->drv_type;
2448 }
2449
2450 host->ops->set_uhs_signaling(host, ios->timing);
2451 host->timing = ios->timing;
2452
2453 if (sdhci_preset_needed(host, ios->timing)) {
2454 u16 preset;
2455
2456 sdhci_enable_preset_value(host, true);
2457 preset = sdhci_get_preset_value(host);
2458 ios->drv_type = FIELD_GET(SDHCI_PRESET_DRV_MASK,
2459 preset);
2460 host->drv_type = ios->drv_type;
2461 }
2462
2463 /* Re-enable SD Clock */
2464 host->ops->set_clock(host, host->clock);
2465 } else
2466 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2467 }
2468 EXPORT_SYMBOL_GPL(sdhci_set_ios);
2469
sdhci_get_cd(struct mmc_host * mmc)2470 static int sdhci_get_cd(struct mmc_host *mmc)
2471 {
2472 struct sdhci_host *host = mmc_priv(mmc);
2473 int gpio_cd = mmc_gpio_get_cd(mmc);
2474
2475 if (host->flags & SDHCI_DEVICE_DEAD)
2476 return 0;
2477
2478 /* If nonremovable, assume that the card is always present. */
2479 if (!mmc_card_is_removable(mmc))
2480 return 1;
2481
2482 /*
2483 * Try slot gpio detect, if defined it take precedence
2484 * over build in controller functionality
2485 */
2486 if (gpio_cd >= 0)
2487 return !!gpio_cd;
2488
2489 /* If polling, assume that the card is always present. */
2490 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
2491 return 1;
2492
2493 /* Host native card detect */
2494 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
2495 }
2496
sdhci_get_cd_nogpio(struct mmc_host * mmc)2497 int sdhci_get_cd_nogpio(struct mmc_host *mmc)
2498 {
2499 struct sdhci_host *host = mmc_priv(mmc);
2500 unsigned long flags;
2501 int ret = 0;
2502
2503 spin_lock_irqsave(&host->lock, flags);
2504
2505 if (host->flags & SDHCI_DEVICE_DEAD)
2506 goto out;
2507
2508 ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
2509 out:
2510 spin_unlock_irqrestore(&host->lock, flags);
2511
2512 return ret;
2513 }
2514 EXPORT_SYMBOL_GPL(sdhci_get_cd_nogpio);
2515
sdhci_check_ro(struct sdhci_host * host)2516 static int sdhci_check_ro(struct sdhci_host *host)
2517 {
2518 bool allow_invert = false;
2519 int is_readonly;
2520
2521 if (host->flags & SDHCI_DEVICE_DEAD) {
2522 is_readonly = 0;
2523 } else if (host->ops->get_ro) {
2524 is_readonly = host->ops->get_ro(host);
2525 } else if (mmc_can_gpio_ro(host->mmc)) {
2526 is_readonly = mmc_gpio_get_ro(host->mmc);
2527 /* Do not invert twice */
2528 allow_invert = !(host->mmc->caps2 & MMC_CAP2_RO_ACTIVE_HIGH);
2529 } else {
2530 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
2531 & SDHCI_WRITE_PROTECT);
2532 allow_invert = true;
2533 }
2534
2535 if (is_readonly >= 0 &&
2536 allow_invert &&
2537 (host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT))
2538 is_readonly = !is_readonly;
2539
2540 return is_readonly;
2541 }
2542
2543 #define SAMPLE_COUNT 5
2544
sdhci_get_ro(struct mmc_host * mmc)2545 static int sdhci_get_ro(struct mmc_host *mmc)
2546 {
2547 struct sdhci_host *host = mmc_priv(mmc);
2548 int i, ro_count;
2549
2550 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
2551 return sdhci_check_ro(host);
2552
2553 ro_count = 0;
2554 for (i = 0; i < SAMPLE_COUNT; i++) {
2555 if (sdhci_check_ro(host)) {
2556 if (++ro_count > SAMPLE_COUNT / 2)
2557 return 1;
2558 }
2559 msleep(30);
2560 }
2561 return 0;
2562 }
2563
sdhci_hw_reset(struct mmc_host * mmc)2564 static void sdhci_hw_reset(struct mmc_host *mmc)
2565 {
2566 struct sdhci_host *host = mmc_priv(mmc);
2567
2568 if (host->ops && host->ops->hw_reset)
2569 host->ops->hw_reset(host);
2570 }
2571
sdhci_enable_sdio_irq_nolock(struct sdhci_host * host,int enable)2572 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
2573 {
2574 if (!(host->flags & SDHCI_DEVICE_DEAD)) {
2575 if (enable)
2576 host->ier |= SDHCI_INT_CARD_INT;
2577 else
2578 host->ier &= ~SDHCI_INT_CARD_INT;
2579
2580 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2581 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2582 }
2583 }
2584
sdhci_enable_sdio_irq(struct mmc_host * mmc,int enable)2585 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
2586 {
2587 struct sdhci_host *host = mmc_priv(mmc);
2588 unsigned long flags;
2589
2590 if (enable)
2591 pm_runtime_get_noresume(mmc_dev(mmc));
2592
2593 spin_lock_irqsave(&host->lock, flags);
2594 sdhci_enable_sdio_irq_nolock(host, enable);
2595 spin_unlock_irqrestore(&host->lock, flags);
2596
2597 if (!enable)
2598 pm_runtime_put_noidle(mmc_dev(mmc));
2599 }
2600 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
2601
sdhci_ack_sdio_irq(struct mmc_host * mmc)2602 static void sdhci_ack_sdio_irq(struct mmc_host *mmc)
2603 {
2604 struct sdhci_host *host = mmc_priv(mmc);
2605 unsigned long flags;
2606
2607 spin_lock_irqsave(&host->lock, flags);
2608 sdhci_enable_sdio_irq_nolock(host, true);
2609 spin_unlock_irqrestore(&host->lock, flags);
2610 }
2611
sdhci_start_signal_voltage_switch(struct mmc_host * mmc,struct mmc_ios * ios)2612 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
2613 struct mmc_ios *ios)
2614 {
2615 struct sdhci_host *host = mmc_priv(mmc);
2616 u16 ctrl;
2617 int ret;
2618
2619 /*
2620 * Signal Voltage Switching is only applicable for Host Controllers
2621 * v3.00 and above.
2622 */
2623 if (host->version < SDHCI_SPEC_300)
2624 return 0;
2625
2626 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2627
2628 switch (ios->signal_voltage) {
2629 case MMC_SIGNAL_VOLTAGE_330:
2630 if (!(host->flags & SDHCI_SIGNALING_330))
2631 return -EINVAL;
2632 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
2633 ctrl &= ~SDHCI_CTRL_VDD_180;
2634 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2635
2636 if (!IS_ERR(mmc->supply.vqmmc)) {
2637 ret = mmc_regulator_set_vqmmc(mmc, ios);
2638 if (ret < 0) {
2639 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
2640 mmc_hostname(mmc));
2641 return -EIO;
2642 }
2643 }
2644 /* Wait for 5ms */
2645 usleep_range(5000, 5500);
2646
2647 /* 3.3V regulator output should be stable within 5 ms */
2648 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2649 if (!(ctrl & SDHCI_CTRL_VDD_180))
2650 return 0;
2651
2652 pr_warn("%s: 3.3V regulator output did not become stable\n",
2653 mmc_hostname(mmc));
2654
2655 return -EAGAIN;
2656 case MMC_SIGNAL_VOLTAGE_180:
2657 if (!(host->flags & SDHCI_SIGNALING_180))
2658 return -EINVAL;
2659 if (!IS_ERR(mmc->supply.vqmmc)) {
2660 ret = mmc_regulator_set_vqmmc(mmc, ios);
2661 if (ret < 0) {
2662 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
2663 mmc_hostname(mmc));
2664 return -EIO;
2665 }
2666 }
2667
2668 /*
2669 * Enable 1.8V Signal Enable in the Host Control2
2670 * register
2671 */
2672 ctrl |= SDHCI_CTRL_VDD_180;
2673 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2674
2675 /* Some controller need to do more when switching */
2676 if (host->ops->voltage_switch)
2677 host->ops->voltage_switch(host);
2678
2679 /* 1.8V regulator output should be stable within 5 ms */
2680 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2681 if (ctrl & SDHCI_CTRL_VDD_180)
2682 return 0;
2683
2684 pr_warn("%s: 1.8V regulator output did not become stable\n",
2685 mmc_hostname(mmc));
2686
2687 return -EAGAIN;
2688 case MMC_SIGNAL_VOLTAGE_120:
2689 if (!(host->flags & SDHCI_SIGNALING_120))
2690 return -EINVAL;
2691 if (!IS_ERR(mmc->supply.vqmmc)) {
2692 ret = mmc_regulator_set_vqmmc(mmc, ios);
2693 if (ret < 0) {
2694 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
2695 mmc_hostname(mmc));
2696 return -EIO;
2697 }
2698 }
2699 return 0;
2700 default:
2701 /* No signal voltage switch required */
2702 return 0;
2703 }
2704 }
2705 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch);
2706
sdhci_card_busy(struct mmc_host * mmc)2707 static int sdhci_card_busy(struct mmc_host *mmc)
2708 {
2709 struct sdhci_host *host = mmc_priv(mmc);
2710 u32 present_state;
2711
2712 /* Check whether DAT[0] is 0 */
2713 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
2714
2715 return !(present_state & SDHCI_DATA_0_LVL_MASK);
2716 }
2717
sdhci_prepare_hs400_tuning(struct mmc_host * mmc,struct mmc_ios * ios)2718 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
2719 {
2720 struct sdhci_host *host = mmc_priv(mmc);
2721 unsigned long flags;
2722
2723 spin_lock_irqsave(&host->lock, flags);
2724 host->flags |= SDHCI_HS400_TUNING;
2725 spin_unlock_irqrestore(&host->lock, flags);
2726
2727 return 0;
2728 }
2729
sdhci_start_tuning(struct sdhci_host * host)2730 void sdhci_start_tuning(struct sdhci_host *host)
2731 {
2732 u16 ctrl;
2733
2734 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2735 ctrl |= SDHCI_CTRL_EXEC_TUNING;
2736 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
2737 ctrl |= SDHCI_CTRL_TUNED_CLK;
2738 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2739
2740 /*
2741 * As per the Host Controller spec v3.00, tuning command
2742 * generates Buffer Read Ready interrupt, so enable that.
2743 *
2744 * Note: The spec clearly says that when tuning sequence
2745 * is being performed, the controller does not generate
2746 * interrupts other than Buffer Read Ready interrupt. But
2747 * to make sure we don't hit a controller bug, we _only_
2748 * enable Buffer Read Ready interrupt here.
2749 */
2750 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
2751 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
2752 }
2753 EXPORT_SYMBOL_GPL(sdhci_start_tuning);
2754
sdhci_end_tuning(struct sdhci_host * host)2755 void sdhci_end_tuning(struct sdhci_host *host)
2756 {
2757 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2758 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2759 }
2760 EXPORT_SYMBOL_GPL(sdhci_end_tuning);
2761
sdhci_reset_tuning(struct sdhci_host * host)2762 void sdhci_reset_tuning(struct sdhci_host *host)
2763 {
2764 u16 ctrl;
2765
2766 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2767 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2768 ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2769 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2770 }
2771 EXPORT_SYMBOL_GPL(sdhci_reset_tuning);
2772
sdhci_abort_tuning(struct sdhci_host * host,u32 opcode)2773 void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
2774 {
2775 sdhci_reset_tuning(host);
2776
2777 sdhci_reset_for(host, TUNING_ABORT);
2778
2779 sdhci_end_tuning(host);
2780
2781 mmc_send_abort_tuning(host->mmc, opcode);
2782 }
2783 EXPORT_SYMBOL_GPL(sdhci_abort_tuning);
2784
2785 /*
2786 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
2787 * tuning command does not have a data payload (or rather the hardware does it
2788 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
2789 * interrupt setup is different to other commands and there is no timeout
2790 * interrupt so special handling is needed.
2791 */
sdhci_send_tuning(struct sdhci_host * host,u32 opcode)2792 void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
2793 {
2794 struct mmc_host *mmc = host->mmc;
2795 struct mmc_command cmd = {};
2796 struct mmc_request mrq = {};
2797 unsigned long flags;
2798 u32 b = host->sdma_boundary;
2799
2800 spin_lock_irqsave(&host->lock, flags);
2801
2802 cmd.opcode = opcode;
2803 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2804 cmd.mrq = &mrq;
2805
2806 mrq.cmd = &cmd;
2807 /*
2808 * In response to CMD19, the card sends 64 bytes of tuning
2809 * block to the Host Controller. So we set the block size
2810 * to 64 here.
2811 */
2812 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
2813 mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2814 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE);
2815 else
2816 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE);
2817
2818 /*
2819 * The tuning block is sent by the card to the host controller.
2820 * So we set the TRNS_READ bit in the Transfer Mode register.
2821 * This also takes care of setting DMA Enable and Multi Block
2822 * Select in the same register to 0.
2823 */
2824 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2825
2826 if (!sdhci_send_command_retry(host, &cmd, flags)) {
2827 spin_unlock_irqrestore(&host->lock, flags);
2828 host->tuning_done = 0;
2829 return;
2830 }
2831
2832 host->cmd = NULL;
2833
2834 sdhci_del_timer(host, &mrq);
2835
2836 host->tuning_done = 0;
2837
2838 spin_unlock_irqrestore(&host->lock, flags);
2839
2840 /* Wait for Buffer Read Ready interrupt */
2841 wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
2842 msecs_to_jiffies(50));
2843
2844 }
2845 EXPORT_SYMBOL_GPL(sdhci_send_tuning);
2846
__sdhci_execute_tuning(struct sdhci_host * host,u32 opcode)2847 static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
2848 {
2849 int i;
2850
2851 /*
2852 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
2853 * of loops reaches tuning loop count.
2854 */
2855 for (i = 0; i < host->tuning_loop_count; i++) {
2856 u16 ctrl;
2857
2858 sdhci_send_tuning(host, opcode);
2859
2860 if (!host->tuning_done) {
2861 pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n",
2862 mmc_hostname(host->mmc));
2863 sdhci_abort_tuning(host, opcode);
2864 return -ETIMEDOUT;
2865 }
2866
2867 /* Spec does not require a delay between tuning cycles */
2868 if (host->tuning_delay > 0)
2869 mdelay(host->tuning_delay);
2870
2871 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2872 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
2873 if (ctrl & SDHCI_CTRL_TUNED_CLK)
2874 return 0; /* Success! */
2875 break;
2876 }
2877
2878 }
2879
2880 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
2881 mmc_hostname(host->mmc));
2882 sdhci_reset_tuning(host);
2883 return -EAGAIN;
2884 }
2885
sdhci_execute_tuning(struct mmc_host * mmc,u32 opcode)2886 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2887 {
2888 struct sdhci_host *host = mmc_priv(mmc);
2889 int err = 0;
2890 unsigned int tuning_count = 0;
2891 bool hs400_tuning;
2892
2893 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
2894
2895 if (host->tuning_mode == SDHCI_TUNING_MODE_1)
2896 tuning_count = host->tuning_count;
2897
2898 /*
2899 * The Host Controller needs tuning in case of SDR104 and DDR50
2900 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
2901 * the Capabilities register.
2902 * If the Host Controller supports the HS200 mode then the
2903 * tuning function has to be executed.
2904 */
2905 switch (host->timing) {
2906 /* HS400 tuning is done in HS200 mode */
2907 case MMC_TIMING_MMC_HS400:
2908 err = -EINVAL;
2909 goto out;
2910
2911 case MMC_TIMING_MMC_HS200:
2912 /*
2913 * Periodic re-tuning for HS400 is not expected to be needed, so
2914 * disable it here.
2915 */
2916 if (hs400_tuning)
2917 tuning_count = 0;
2918 break;
2919
2920 case MMC_TIMING_UHS_SDR104:
2921 case MMC_TIMING_UHS_DDR50:
2922 break;
2923
2924 case MMC_TIMING_UHS_SDR50:
2925 if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
2926 break;
2927 fallthrough;
2928
2929 default:
2930 goto out;
2931 }
2932
2933 if (host->ops->platform_execute_tuning) {
2934 err = host->ops->platform_execute_tuning(host, opcode);
2935 goto out;
2936 }
2937
2938 mmc->retune_period = tuning_count;
2939
2940 if (host->tuning_delay < 0)
2941 host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;
2942
2943 sdhci_start_tuning(host);
2944
2945 host->tuning_err = __sdhci_execute_tuning(host, opcode);
2946
2947 sdhci_end_tuning(host);
2948 out:
2949 host->flags &= ~SDHCI_HS400_TUNING;
2950
2951 return err;
2952 }
2953 EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
2954
sdhci_enable_preset_value(struct sdhci_host * host,bool enable)2955 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2956 {
2957 /* Host Controller v3.00 defines preset value registers */
2958 if (host->version < SDHCI_SPEC_300)
2959 return;
2960
2961 /*
2962 * We only enable or disable Preset Value if they are not already
2963 * enabled or disabled respectively. Otherwise, we bail out.
2964 */
2965 if (host->preset_enabled != enable) {
2966 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2967
2968 if (enable)
2969 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2970 else
2971 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2972
2973 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2974
2975 if (enable)
2976 host->flags |= SDHCI_PV_ENABLED;
2977 else
2978 host->flags &= ~SDHCI_PV_ENABLED;
2979
2980 host->preset_enabled = enable;
2981 }
2982 }
2983
sdhci_post_req(struct mmc_host * mmc,struct mmc_request * mrq,int err)2984 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2985 int err)
2986 {
2987 struct mmc_data *data = mrq->data;
2988
2989 if (data->host_cookie != COOKIE_UNMAPPED)
2990 dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len,
2991 mmc_get_dma_dir(data));
2992
2993 data->host_cookie = COOKIE_UNMAPPED;
2994 }
2995
sdhci_pre_req(struct mmc_host * mmc,struct mmc_request * mrq)2996 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
2997 {
2998 struct sdhci_host *host = mmc_priv(mmc);
2999
3000 mrq->data->host_cookie = COOKIE_UNMAPPED;
3001
3002 /*
3003 * No pre-mapping in the pre hook if we're using the bounce buffer,
3004 * for that we would need two bounce buffers since one buffer is
3005 * in flight when this is getting called.
3006 */
3007 if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer)
3008 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
3009 }
3010
sdhci_error_out_mrqs(struct sdhci_host * host,int err)3011 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
3012 {
3013 if (host->data_cmd) {
3014 host->data_cmd->error = err;
3015 sdhci_finish_mrq(host, host->data_cmd->mrq);
3016 }
3017
3018 if (host->cmd) {
3019 host->cmd->error = err;
3020 sdhci_finish_mrq(host, host->cmd->mrq);
3021 }
3022 }
3023
sdhci_card_event(struct mmc_host * mmc)3024 static void sdhci_card_event(struct mmc_host *mmc)
3025 {
3026 struct sdhci_host *host = mmc_priv(mmc);
3027 unsigned long flags;
3028 int present;
3029
3030 /* First check if client has provided their own card event */
3031 if (host->ops->card_event)
3032 host->ops->card_event(host);
3033
3034 present = mmc->ops->get_cd(mmc);
3035
3036 spin_lock_irqsave(&host->lock, flags);
3037
3038 /* Check sdhci_has_requests() first in case we are runtime suspended */
3039 if (sdhci_has_requests(host) && !present) {
3040 pr_err("%s: Card removed during transfer!\n",
3041 mmc_hostname(mmc));
3042 pr_err("%s: Resetting controller.\n",
3043 mmc_hostname(mmc));
3044
3045 sdhci_reset_for(host, CARD_REMOVED);
3046
3047 sdhci_error_out_mrqs(host, -ENOMEDIUM);
3048 }
3049
3050 spin_unlock_irqrestore(&host->lock, flags);
3051 }
3052
3053 static const struct mmc_host_ops sdhci_ops = {
3054 .request = sdhci_request,
3055 .post_req = sdhci_post_req,
3056 .pre_req = sdhci_pre_req,
3057 .set_ios = sdhci_set_ios,
3058 .get_cd = sdhci_get_cd,
3059 .get_ro = sdhci_get_ro,
3060 .card_hw_reset = sdhci_hw_reset,
3061 .enable_sdio_irq = sdhci_enable_sdio_irq,
3062 .ack_sdio_irq = sdhci_ack_sdio_irq,
3063 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
3064 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
3065 .execute_tuning = sdhci_execute_tuning,
3066 .card_event = sdhci_card_event,
3067 .card_busy = sdhci_card_busy,
3068 };
3069
3070 /*****************************************************************************\
3071 * *
3072 * Request done *
3073 * *
3074 \*****************************************************************************/
3075
sdhci_request_done(struct sdhci_host * host)3076 static bool sdhci_request_done(struct sdhci_host *host)
3077 {
3078 unsigned long flags;
3079 struct mmc_request *mrq;
3080 int i;
3081
3082 spin_lock_irqsave(&host->lock, flags);
3083
3084 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3085 mrq = host->mrqs_done[i];
3086 if (mrq)
3087 break;
3088 }
3089
3090 if (!mrq) {
3091 spin_unlock_irqrestore(&host->lock, flags);
3092 return true;
3093 }
3094
3095 /*
3096 * The controller needs a reset of internal state machines
3097 * upon error conditions.
3098 */
3099 if (sdhci_needs_reset(host, mrq)) {
3100 /*
3101 * Do not finish until command and data lines are available for
3102 * reset. Note there can only be one other mrq, so it cannot
3103 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
3104 * would both be null.
3105 */
3106 if (host->cmd || host->data_cmd) {
3107 spin_unlock_irqrestore(&host->lock, flags);
3108 return true;
3109 }
3110
3111 /* Some controllers need this kick or reset won't work here */
3112 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
3113 /* This is to force an update */
3114 host->ops->set_clock(host, host->clock);
3115
3116 sdhci_reset_for(host, REQUEST_ERROR);
3117
3118 host->pending_reset = false;
3119 }
3120
3121 /*
3122 * Always unmap the data buffers if they were mapped by
3123 * sdhci_prepare_data() whenever we finish with a request.
3124 * This avoids leaking DMA mappings on error.
3125 */
3126 if (host->flags & SDHCI_REQ_USE_DMA) {
3127 struct mmc_data *data = mrq->data;
3128
3129 if (host->use_external_dma && data &&
3130 (mrq->cmd->error || data->error)) {
3131 struct dma_chan *chan = sdhci_external_dma_channel(host, data);
3132
3133 host->mrqs_done[i] = NULL;
3134 spin_unlock_irqrestore(&host->lock, flags);
3135 dmaengine_terminate_sync(chan);
3136 spin_lock_irqsave(&host->lock, flags);
3137 sdhci_set_mrq_done(host, mrq);
3138 }
3139
3140 if (data && data->host_cookie == COOKIE_MAPPED) {
3141 if (host->bounce_buffer) {
3142 /*
3143 * On reads, copy the bounced data into the
3144 * sglist
3145 */
3146 if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
3147 unsigned int length = data->bytes_xfered;
3148
3149 if (length > host->bounce_buffer_size) {
3150 pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
3151 mmc_hostname(host->mmc),
3152 host->bounce_buffer_size,
3153 data->bytes_xfered);
3154 /* Cap it down and continue */
3155 length = host->bounce_buffer_size;
3156 }
3157 dma_sync_single_for_cpu(
3158 mmc_dev(host->mmc),
3159 host->bounce_addr,
3160 host->bounce_buffer_size,
3161 DMA_FROM_DEVICE);
3162 sg_copy_from_buffer(data->sg,
3163 data->sg_len,
3164 host->bounce_buffer,
3165 length);
3166 } else {
3167 /* No copying, just switch ownership */
3168 dma_sync_single_for_cpu(
3169 mmc_dev(host->mmc),
3170 host->bounce_addr,
3171 host->bounce_buffer_size,
3172 mmc_get_dma_dir(data));
3173 }
3174 } else {
3175 /* Unmap the raw data */
3176 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
3177 data->sg_len,
3178 mmc_get_dma_dir(data));
3179 }
3180 data->host_cookie = COOKIE_UNMAPPED;
3181 }
3182 }
3183
3184 host->mrqs_done[i] = NULL;
3185
3186 spin_unlock_irqrestore(&host->lock, flags);
3187
3188 if (host->ops->request_done)
3189 host->ops->request_done(host, mrq);
3190 else
3191 mmc_request_done(host->mmc, mrq);
3192
3193 return false;
3194 }
3195
sdhci_complete_work(struct work_struct * work)3196 static void sdhci_complete_work(struct work_struct *work)
3197 {
3198 struct sdhci_host *host = container_of(work, struct sdhci_host,
3199 complete_work);
3200
3201 while (!sdhci_request_done(host))
3202 ;
3203 }
3204
sdhci_timeout_timer(struct timer_list * t)3205 static void sdhci_timeout_timer(struct timer_list *t)
3206 {
3207 struct sdhci_host *host;
3208 unsigned long flags;
3209
3210 host = from_timer(host, t, timer);
3211
3212 spin_lock_irqsave(&host->lock, flags);
3213
3214 if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
3215 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
3216 mmc_hostname(host->mmc));
3217 sdhci_err_stats_inc(host, REQ_TIMEOUT);
3218 sdhci_dumpregs(host);
3219
3220 host->cmd->error = -ETIMEDOUT;
3221 sdhci_finish_mrq(host, host->cmd->mrq);
3222 }
3223
3224 spin_unlock_irqrestore(&host->lock, flags);
3225 }
3226
sdhci_timeout_data_timer(struct timer_list * t)3227 static void sdhci_timeout_data_timer(struct timer_list *t)
3228 {
3229 struct sdhci_host *host;
3230 unsigned long flags;
3231
3232 host = from_timer(host, t, data_timer);
3233
3234 spin_lock_irqsave(&host->lock, flags);
3235
3236 if (host->data || host->data_cmd ||
3237 (host->cmd && sdhci_data_line_cmd(host->cmd))) {
3238 pr_err("%s: Timeout waiting for hardware interrupt.\n",
3239 mmc_hostname(host->mmc));
3240 sdhci_err_stats_inc(host, REQ_TIMEOUT);
3241 sdhci_dumpregs(host);
3242
3243 if (host->data) {
3244 host->data->error = -ETIMEDOUT;
3245 __sdhci_finish_data(host, true);
3246 queue_work(host->complete_wq, &host->complete_work);
3247 } else if (host->data_cmd) {
3248 host->data_cmd->error = -ETIMEDOUT;
3249 sdhci_finish_mrq(host, host->data_cmd->mrq);
3250 } else {
3251 host->cmd->error = -ETIMEDOUT;
3252 sdhci_finish_mrq(host, host->cmd->mrq);
3253 }
3254 }
3255
3256 spin_unlock_irqrestore(&host->lock, flags);
3257 }
3258
3259 /*****************************************************************************\
3260 * *
3261 * Interrupt handling *
3262 * *
3263 \*****************************************************************************/
3264
sdhci_cmd_irq(struct sdhci_host * host,u32 intmask,u32 * intmask_p)3265 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
3266 {
3267 /* Handle auto-CMD12 error */
3268 if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) {
3269 struct mmc_request *mrq = host->data_cmd->mrq;
3270 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
3271 int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
3272 SDHCI_INT_DATA_TIMEOUT :
3273 SDHCI_INT_DATA_CRC;
3274
3275 /* Treat auto-CMD12 error the same as data error */
3276 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
3277 *intmask_p |= data_err_bit;
3278 return;
3279 }
3280 }
3281
3282 if (!host->cmd) {
3283 /*
3284 * SDHCI recovers from errors by resetting the cmd and data
3285 * circuits. Until that is done, there very well might be more
3286 * interrupts, so ignore them in that case.
3287 */
3288 if (host->pending_reset)
3289 return;
3290 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
3291 mmc_hostname(host->mmc), (unsigned)intmask);
3292 sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
3293 sdhci_dumpregs(host);
3294 return;
3295 }
3296
3297 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
3298 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
3299 if (intmask & SDHCI_INT_TIMEOUT) {
3300 host->cmd->error = -ETIMEDOUT;
3301 sdhci_err_stats_inc(host, CMD_TIMEOUT);
3302 } else {
3303 host->cmd->error = -EILSEQ;
3304 if (!mmc_op_tuning(host->cmd->opcode))
3305 sdhci_err_stats_inc(host, CMD_CRC);
3306 }
3307 /* Treat data command CRC error the same as data CRC error */
3308 if (host->cmd->data &&
3309 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
3310 SDHCI_INT_CRC) {
3311 host->cmd = NULL;
3312 *intmask_p |= SDHCI_INT_DATA_CRC;
3313 return;
3314 }
3315
3316 __sdhci_finish_mrq(host, host->cmd->mrq);
3317 return;
3318 }
3319
3320 /* Handle auto-CMD23 error */
3321 if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
3322 struct mmc_request *mrq = host->cmd->mrq;
3323 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
3324 int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
3325 -ETIMEDOUT :
3326 -EILSEQ;
3327
3328 sdhci_err_stats_inc(host, AUTO_CMD);
3329
3330 if (sdhci_auto_cmd23(host, mrq)) {
3331 mrq->sbc->error = err;
3332 __sdhci_finish_mrq(host, mrq);
3333 return;
3334 }
3335 }
3336
3337 if (intmask & SDHCI_INT_RESPONSE)
3338 sdhci_finish_command(host);
3339 }
3340
sdhci_adma_show_error(struct sdhci_host * host)3341 static void sdhci_adma_show_error(struct sdhci_host *host)
3342 {
3343 void *desc = host->adma_table;
3344 dma_addr_t dma = host->adma_addr;
3345
3346 sdhci_dumpregs(host);
3347
3348 while (true) {
3349 struct sdhci_adma2_64_desc *dma_desc = desc;
3350
3351 if (host->flags & SDHCI_USE_64_BIT_DMA)
3352 SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
3353 (unsigned long long)dma,
3354 le32_to_cpu(dma_desc->addr_hi),
3355 le32_to_cpu(dma_desc->addr_lo),
3356 le16_to_cpu(dma_desc->len),
3357 le16_to_cpu(dma_desc->cmd));
3358 else
3359 SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
3360 (unsigned long long)dma,
3361 le32_to_cpu(dma_desc->addr_lo),
3362 le16_to_cpu(dma_desc->len),
3363 le16_to_cpu(dma_desc->cmd));
3364
3365 desc += host->desc_sz;
3366 dma += host->desc_sz;
3367
3368 if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
3369 break;
3370 }
3371 }
3372
sdhci_data_irq(struct sdhci_host * host,u32 intmask)3373 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
3374 {
3375 /*
3376 * CMD19 generates _only_ Buffer Read Ready interrupt if
3377 * use sdhci_send_tuning.
3378 * Need to exclude this case: PIO mode and use mmc_send_tuning,
3379 * If not, sdhci_transfer_pio will never be called, make the
3380 * SDHCI_INT_DATA_AVAIL always there, stuck in irq storm.
3381 */
3382 if (intmask & SDHCI_INT_DATA_AVAIL && !host->data) {
3383 if (mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) {
3384 host->tuning_done = 1;
3385 wake_up(&host->buf_ready_int);
3386 return;
3387 }
3388 }
3389
3390 if (!host->data) {
3391 struct mmc_command *data_cmd = host->data_cmd;
3392
3393 /*
3394 * The "data complete" interrupt is also used to
3395 * indicate that a busy state has ended. See comment
3396 * above in sdhci_cmd_irq().
3397 */
3398 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
3399 if (intmask & SDHCI_INT_DATA_TIMEOUT) {
3400 host->data_cmd = NULL;
3401 data_cmd->error = -ETIMEDOUT;
3402 sdhci_err_stats_inc(host, CMD_TIMEOUT);
3403 __sdhci_finish_mrq(host, data_cmd->mrq);
3404 return;
3405 }
3406 if (intmask & SDHCI_INT_DATA_END) {
3407 host->data_cmd = NULL;
3408 /*
3409 * Some cards handle busy-end interrupt
3410 * before the command completed, so make
3411 * sure we do things in the proper order.
3412 */
3413 if (host->cmd == data_cmd)
3414 return;
3415
3416 __sdhci_finish_mrq(host, data_cmd->mrq);
3417 return;
3418 }
3419 }
3420
3421 /*
3422 * SDHCI recovers from errors by resetting the cmd and data
3423 * circuits. Until that is done, there very well might be more
3424 * interrupts, so ignore them in that case.
3425 */
3426 if (host->pending_reset)
3427 return;
3428
3429 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
3430 mmc_hostname(host->mmc), (unsigned)intmask);
3431 sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
3432 sdhci_dumpregs(host);
3433
3434 return;
3435 }
3436
3437 if (intmask & SDHCI_INT_DATA_TIMEOUT) {
3438 host->data->error = -ETIMEDOUT;
3439 sdhci_err_stats_inc(host, DAT_TIMEOUT);
3440 } else if (intmask & SDHCI_INT_DATA_END_BIT) {
3441 host->data->error = -EILSEQ;
3442 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
3443 sdhci_err_stats_inc(host, DAT_CRC);
3444 } else if ((intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_TUNING_ERROR)) &&
3445 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
3446 != MMC_BUS_TEST_R) {
3447 host->data->error = -EILSEQ;
3448 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
3449 sdhci_err_stats_inc(host, DAT_CRC);
3450 if (intmask & SDHCI_INT_TUNING_ERROR) {
3451 u16 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
3452
3453 ctrl2 &= ~SDHCI_CTRL_TUNED_CLK;
3454 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
3455 }
3456 } else if (intmask & SDHCI_INT_ADMA_ERROR) {
3457 pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
3458 intmask);
3459 sdhci_adma_show_error(host);
3460 sdhci_err_stats_inc(host, ADMA);
3461 host->data->error = -EIO;
3462 if (host->ops->adma_workaround)
3463 host->ops->adma_workaround(host, intmask);
3464 }
3465
3466 if (host->data->error)
3467 sdhci_finish_data(host);
3468 else {
3469 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
3470 sdhci_transfer_pio(host);
3471
3472 /*
3473 * We currently don't do anything fancy with DMA
3474 * boundaries, but as we can't disable the feature
3475 * we need to at least restart the transfer.
3476 *
3477 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
3478 * should return a valid address to continue from, but as
3479 * some controllers are faulty, don't trust them.
3480 */
3481 if (intmask & SDHCI_INT_DMA_END) {
3482 dma_addr_t dmastart, dmanow;
3483
3484 dmastart = sdhci_sdma_address(host);
3485 dmanow = dmastart + host->data->bytes_xfered;
3486 /*
3487 * Force update to the next DMA block boundary.
3488 */
3489 dmanow = (dmanow &
3490 ~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
3491 SDHCI_DEFAULT_BOUNDARY_SIZE;
3492 host->data->bytes_xfered = dmanow - dmastart;
3493 DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n",
3494 &dmastart, host->data->bytes_xfered, &dmanow);
3495 sdhci_set_sdma_addr(host, dmanow);
3496 }
3497
3498 if (intmask & SDHCI_INT_DATA_END) {
3499 if (host->cmd == host->data_cmd) {
3500 /*
3501 * Data managed to finish before the
3502 * command completed. Make sure we do
3503 * things in the proper order.
3504 */
3505 host->data_early = 1;
3506 } else {
3507 sdhci_finish_data(host);
3508 }
3509 }
3510 }
3511 }
3512
sdhci_defer_done(struct sdhci_host * host,struct mmc_request * mrq)3513 static inline bool sdhci_defer_done(struct sdhci_host *host,
3514 struct mmc_request *mrq)
3515 {
3516 struct mmc_data *data = mrq->data;
3517
3518 return host->pending_reset || host->always_defer_done ||
3519 ((host->flags & SDHCI_REQ_USE_DMA) && data &&
3520 data->host_cookie == COOKIE_MAPPED);
3521 }
3522
sdhci_irq(int irq,void * dev_id)3523 static irqreturn_t sdhci_irq(int irq, void *dev_id)
3524 {
3525 struct mmc_request *mrqs_done[SDHCI_MAX_MRQS] = {0};
3526 irqreturn_t result = IRQ_NONE;
3527 struct sdhci_host *host = dev_id;
3528 u32 intmask, mask, unexpected = 0;
3529 int max_loops = 16;
3530 int i;
3531
3532 spin_lock(&host->lock);
3533
3534 if (host->runtime_suspended) {
3535 spin_unlock(&host->lock);
3536 return IRQ_NONE;
3537 }
3538
3539 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3540 if (!intmask || intmask == 0xffffffff) {
3541 result = IRQ_NONE;
3542 goto out;
3543 }
3544
3545 do {
3546 DBG("IRQ status 0x%08x\n", intmask);
3547
3548 if (host->ops->irq) {
3549 intmask = host->ops->irq(host, intmask);
3550 if (!intmask)
3551 goto cont;
3552 }
3553
3554 /* Clear selected interrupts. */
3555 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
3556 SDHCI_INT_BUS_POWER);
3557 sdhci_writel(host, mask, SDHCI_INT_STATUS);
3558
3559 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3560 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
3561 SDHCI_CARD_PRESENT;
3562
3563 /*
3564 * There is a observation on i.mx esdhc. INSERT
3565 * bit will be immediately set again when it gets
3566 * cleared, if a card is inserted. We have to mask
3567 * the irq to prevent interrupt storm which will
3568 * freeze the system. And the REMOVE gets the
3569 * same situation.
3570 *
3571 * More testing are needed here to ensure it works
3572 * for other platforms though.
3573 */
3574 host->ier &= ~(SDHCI_INT_CARD_INSERT |
3575 SDHCI_INT_CARD_REMOVE);
3576 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
3577 SDHCI_INT_CARD_INSERT;
3578 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3579 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3580
3581 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
3582 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
3583
3584 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
3585 SDHCI_INT_CARD_REMOVE);
3586 result = IRQ_WAKE_THREAD;
3587 }
3588
3589 if (intmask & SDHCI_INT_CMD_MASK)
3590 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask);
3591
3592 if (intmask & SDHCI_INT_DATA_MASK)
3593 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
3594
3595 if (intmask & SDHCI_INT_BUS_POWER)
3596 pr_err("%s: Card is consuming too much power!\n",
3597 mmc_hostname(host->mmc));
3598
3599 if (intmask & SDHCI_INT_RETUNE)
3600 mmc_retune_needed(host->mmc);
3601
3602 if ((intmask & SDHCI_INT_CARD_INT) &&
3603 (host->ier & SDHCI_INT_CARD_INT)) {
3604 sdhci_enable_sdio_irq_nolock(host, false);
3605 sdio_signal_irq(host->mmc);
3606 }
3607
3608 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
3609 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
3610 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
3611 SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
3612
3613 if (intmask) {
3614 unexpected |= intmask;
3615 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3616 }
3617 cont:
3618 if (result == IRQ_NONE)
3619 result = IRQ_HANDLED;
3620
3621 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3622 } while (intmask && --max_loops);
3623
3624 /* Determine if mrqs can be completed immediately */
3625 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3626 struct mmc_request *mrq = host->mrqs_done[i];
3627
3628 if (!mrq)
3629 continue;
3630
3631 if (sdhci_defer_done(host, mrq)) {
3632 result = IRQ_WAKE_THREAD;
3633 } else {
3634 mrqs_done[i] = mrq;
3635 host->mrqs_done[i] = NULL;
3636 }
3637 }
3638 out:
3639 if (host->deferred_cmd)
3640 result = IRQ_WAKE_THREAD;
3641
3642 spin_unlock(&host->lock);
3643
3644 /* Process mrqs ready for immediate completion */
3645 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3646 if (!mrqs_done[i])
3647 continue;
3648
3649 if (host->ops->request_done)
3650 host->ops->request_done(host, mrqs_done[i]);
3651 else
3652 mmc_request_done(host->mmc, mrqs_done[i]);
3653 }
3654
3655 if (unexpected) {
3656 pr_err("%s: Unexpected interrupt 0x%08x.\n",
3657 mmc_hostname(host->mmc), unexpected);
3658 sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
3659 sdhci_dumpregs(host);
3660 }
3661
3662 return result;
3663 }
3664
sdhci_thread_irq(int irq,void * dev_id)3665 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
3666 {
3667 struct sdhci_host *host = dev_id;
3668 struct mmc_command *cmd;
3669 unsigned long flags;
3670 u32 isr;
3671
3672 while (!sdhci_request_done(host))
3673 ;
3674
3675 spin_lock_irqsave(&host->lock, flags);
3676
3677 isr = host->thread_isr;
3678 host->thread_isr = 0;
3679
3680 cmd = host->deferred_cmd;
3681 if (cmd && !sdhci_send_command_retry(host, cmd, flags))
3682 sdhci_finish_mrq(host, cmd->mrq);
3683
3684 spin_unlock_irqrestore(&host->lock, flags);
3685
3686 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3687 struct mmc_host *mmc = host->mmc;
3688
3689 mmc->ops->card_event(mmc);
3690 mmc_detect_change(mmc, msecs_to_jiffies(200));
3691 }
3692
3693 return IRQ_HANDLED;
3694 }
3695
3696 /*****************************************************************************\
3697 * *
3698 * Suspend/resume *
3699 * *
3700 \*****************************************************************************/
3701
3702 #ifdef CONFIG_PM
3703
sdhci_cd_irq_can_wakeup(struct sdhci_host * host)3704 static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host)
3705 {
3706 return mmc_card_is_removable(host->mmc) &&
3707 !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3708 !mmc_can_gpio_cd(host->mmc);
3709 }
3710
3711 /*
3712 * To enable wakeup events, the corresponding events have to be enabled in
3713 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
3714 * Table' in the SD Host Controller Standard Specification.
3715 * It is useless to restore SDHCI_INT_ENABLE state in
3716 * sdhci_disable_irq_wakeups() since it will be set by
3717 * sdhci_enable_card_detection() or sdhci_init().
3718 */
sdhci_enable_irq_wakeups(struct sdhci_host * host)3719 static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
3720 {
3721 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE |
3722 SDHCI_WAKE_ON_INT;
3723 u32 irq_val = 0;
3724 u8 wake_val = 0;
3725 u8 val;
3726
3727 if (sdhci_cd_irq_can_wakeup(host)) {
3728 wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE;
3729 irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE;
3730 }
3731
3732 if (mmc_card_wake_sdio_irq(host->mmc)) {
3733 wake_val |= SDHCI_WAKE_ON_INT;
3734 irq_val |= SDHCI_INT_CARD_INT;
3735 }
3736
3737 if (!irq_val)
3738 return false;
3739
3740 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3741 val &= ~mask;
3742 val |= wake_val;
3743 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3744
3745 sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
3746
3747 host->irq_wake_enabled = !enable_irq_wake(host->irq);
3748
3749 return host->irq_wake_enabled;
3750 }
3751
sdhci_disable_irq_wakeups(struct sdhci_host * host)3752 static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
3753 {
3754 u8 val;
3755 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
3756 | SDHCI_WAKE_ON_INT;
3757
3758 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3759 val &= ~mask;
3760 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3761
3762 disable_irq_wake(host->irq);
3763
3764 host->irq_wake_enabled = false;
3765 }
3766
sdhci_suspend_host(struct sdhci_host * host)3767 int sdhci_suspend_host(struct sdhci_host *host)
3768 {
3769 sdhci_disable_card_detection(host);
3770
3771 mmc_retune_timer_stop(host->mmc);
3772
3773 if (!device_may_wakeup(mmc_dev(host->mmc)) ||
3774 !sdhci_enable_irq_wakeups(host)) {
3775 host->ier = 0;
3776 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3777 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3778 free_irq(host->irq, host);
3779 }
3780
3781 return 0;
3782 }
3783
3784 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
3785
sdhci_resume_host(struct sdhci_host * host)3786 int sdhci_resume_host(struct sdhci_host *host)
3787 {
3788 struct mmc_host *mmc = host->mmc;
3789 int ret = 0;
3790
3791 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3792 if (host->ops->enable_dma)
3793 host->ops->enable_dma(host);
3794 }
3795
3796 if ((mmc->pm_flags & MMC_PM_KEEP_POWER) &&
3797 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
3798 /* Card keeps power but host controller does not */
3799 sdhci_init(host, 0);
3800 host->pwr = 0;
3801 host->clock = 0;
3802 host->reinit_uhs = true;
3803 mmc->ops->set_ios(mmc, &mmc->ios);
3804 } else {
3805 sdhci_init(host, (mmc->pm_flags & MMC_PM_KEEP_POWER));
3806 }
3807
3808 if (host->irq_wake_enabled) {
3809 sdhci_disable_irq_wakeups(host);
3810 } else {
3811 ret = request_threaded_irq(host->irq, sdhci_irq,
3812 sdhci_thread_irq, IRQF_SHARED,
3813 mmc_hostname(mmc), host);
3814 if (ret)
3815 return ret;
3816 }
3817
3818 sdhci_enable_card_detection(host);
3819
3820 return ret;
3821 }
3822
3823 EXPORT_SYMBOL_GPL(sdhci_resume_host);
3824
sdhci_runtime_suspend_host(struct sdhci_host * host)3825 int sdhci_runtime_suspend_host(struct sdhci_host *host)
3826 {
3827 unsigned long flags;
3828
3829 mmc_retune_timer_stop(host->mmc);
3830
3831 spin_lock_irqsave(&host->lock, flags);
3832 host->ier &= SDHCI_INT_CARD_INT;
3833 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3834 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3835 spin_unlock_irqrestore(&host->lock, flags);
3836
3837 synchronize_hardirq(host->irq);
3838
3839 spin_lock_irqsave(&host->lock, flags);
3840 host->runtime_suspended = true;
3841 spin_unlock_irqrestore(&host->lock, flags);
3842
3843 return 0;
3844 }
3845 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
3846
sdhci_runtime_resume_host(struct sdhci_host * host,int soft_reset)3847 int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset)
3848 {
3849 struct mmc_host *mmc = host->mmc;
3850 unsigned long flags;
3851 int host_flags = host->flags;
3852
3853 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3854 if (host->ops->enable_dma)
3855 host->ops->enable_dma(host);
3856 }
3857
3858 sdhci_init(host, soft_reset);
3859
3860 if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
3861 mmc->ios.power_mode != MMC_POWER_OFF) {
3862 /* Force clock and power re-program */
3863 host->pwr = 0;
3864 host->clock = 0;
3865 host->reinit_uhs = true;
3866 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
3867 mmc->ops->set_ios(mmc, &mmc->ios);
3868
3869 if ((host_flags & SDHCI_PV_ENABLED) &&
3870 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
3871 spin_lock_irqsave(&host->lock, flags);
3872 sdhci_enable_preset_value(host, true);
3873 spin_unlock_irqrestore(&host->lock, flags);
3874 }
3875
3876 if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
3877 mmc->ops->hs400_enhanced_strobe)
3878 mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
3879 }
3880
3881 spin_lock_irqsave(&host->lock, flags);
3882
3883 host->runtime_suspended = false;
3884
3885 /* Enable SDIO IRQ */
3886 if (sdio_irq_claimed(mmc))
3887 sdhci_enable_sdio_irq_nolock(host, true);
3888
3889 /* Enable Card Detection */
3890 sdhci_enable_card_detection(host);
3891
3892 spin_unlock_irqrestore(&host->lock, flags);
3893
3894 return 0;
3895 }
3896 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
3897
3898 #endif /* CONFIG_PM */
3899
3900 /*****************************************************************************\
3901 * *
3902 * Command Queue Engine (CQE) helpers *
3903 * *
3904 \*****************************************************************************/
3905
sdhci_cqe_enable(struct mmc_host * mmc)3906 void sdhci_cqe_enable(struct mmc_host *mmc)
3907 {
3908 struct sdhci_host *host = mmc_priv(mmc);
3909 unsigned long flags;
3910 u8 ctrl;
3911
3912 spin_lock_irqsave(&host->lock, flags);
3913
3914 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
3915 ctrl &= ~SDHCI_CTRL_DMA_MASK;
3916 /*
3917 * Host from V4.10 supports ADMA3 DMA type.
3918 * ADMA3 performs integrated descriptor which is more suitable
3919 * for cmd queuing to fetch both command and transfer descriptors.
3920 */
3921 if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3))
3922 ctrl |= SDHCI_CTRL_ADMA3;
3923 else if (host->flags & SDHCI_USE_64_BIT_DMA)
3924 ctrl |= SDHCI_CTRL_ADMA64;
3925 else
3926 ctrl |= SDHCI_CTRL_ADMA32;
3927 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
3928
3929 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512),
3930 SDHCI_BLOCK_SIZE);
3931
3932 /* Set maximum timeout */
3933 sdhci_set_timeout(host, NULL);
3934
3935 host->ier = host->cqe_ier;
3936
3937 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3938 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3939
3940 host->cqe_on = true;
3941
3942 pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
3943 mmc_hostname(mmc), host->ier,
3944 sdhci_readl(host, SDHCI_INT_STATUS));
3945
3946 spin_unlock_irqrestore(&host->lock, flags);
3947 }
3948 EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
3949
sdhci_cqe_disable(struct mmc_host * mmc,bool recovery)3950 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
3951 {
3952 struct sdhci_host *host = mmc_priv(mmc);
3953 unsigned long flags;
3954
3955 spin_lock_irqsave(&host->lock, flags);
3956
3957 sdhci_set_default_irqs(host);
3958
3959 host->cqe_on = false;
3960
3961 if (recovery)
3962 sdhci_reset_for(host, CQE_RECOVERY);
3963
3964 pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
3965 mmc_hostname(mmc), host->ier,
3966 sdhci_readl(host, SDHCI_INT_STATUS));
3967
3968 spin_unlock_irqrestore(&host->lock, flags);
3969 }
3970 EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
3971
sdhci_cqe_irq(struct sdhci_host * host,u32 intmask,int * cmd_error,int * data_error)3972 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
3973 int *data_error)
3974 {
3975 u32 mask;
3976
3977 if (!host->cqe_on)
3978 return false;
3979
3980 if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC)) {
3981 *cmd_error = -EILSEQ;
3982 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
3983 sdhci_err_stats_inc(host, CMD_CRC);
3984 } else if (intmask & SDHCI_INT_TIMEOUT) {
3985 *cmd_error = -ETIMEDOUT;
3986 sdhci_err_stats_inc(host, CMD_TIMEOUT);
3987 } else
3988 *cmd_error = 0;
3989
3990 if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC | SDHCI_INT_TUNING_ERROR)) {
3991 *data_error = -EILSEQ;
3992 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
3993 sdhci_err_stats_inc(host, DAT_CRC);
3994 } else if (intmask & SDHCI_INT_DATA_TIMEOUT) {
3995 *data_error = -ETIMEDOUT;
3996 sdhci_err_stats_inc(host, DAT_TIMEOUT);
3997 } else if (intmask & SDHCI_INT_ADMA_ERROR) {
3998 *data_error = -EIO;
3999 sdhci_err_stats_inc(host, ADMA);
4000 } else
4001 *data_error = 0;
4002
4003 /* Clear selected interrupts. */
4004 mask = intmask & host->cqe_ier;
4005 sdhci_writel(host, mask, SDHCI_INT_STATUS);
4006
4007 if (intmask & SDHCI_INT_BUS_POWER)
4008 pr_err("%s: Card is consuming too much power!\n",
4009 mmc_hostname(host->mmc));
4010
4011 intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
4012 if (intmask) {
4013 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
4014 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
4015 mmc_hostname(host->mmc), intmask);
4016 sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
4017 sdhci_dumpregs(host);
4018 }
4019
4020 return true;
4021 }
4022 EXPORT_SYMBOL_GPL(sdhci_cqe_irq);
4023
4024 /*****************************************************************************\
4025 * *
4026 * Device allocation/registration *
4027 * *
4028 \*****************************************************************************/
4029
sdhci_alloc_host(struct device * dev,size_t priv_size)4030 struct sdhci_host *sdhci_alloc_host(struct device *dev,
4031 size_t priv_size)
4032 {
4033 struct mmc_host *mmc;
4034 struct sdhci_host *host;
4035
4036 WARN_ON(dev == NULL);
4037
4038 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
4039 if (!mmc)
4040 return ERR_PTR(-ENOMEM);
4041
4042 host = mmc_priv(mmc);
4043 host->mmc = mmc;
4044 host->mmc_host_ops = sdhci_ops;
4045 mmc->ops = &host->mmc_host_ops;
4046
4047 host->flags = SDHCI_SIGNALING_330;
4048
4049 host->cqe_ier = SDHCI_CQE_INT_MASK;
4050 host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
4051
4052 host->tuning_delay = -1;
4053 host->tuning_loop_count = MAX_TUNING_LOOP;
4054
4055 host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
4056
4057 /*
4058 * The DMA table descriptor count is calculated as the maximum
4059 * number of segments times 2, to allow for an alignment
4060 * descriptor for each segment, plus 1 for a nop end descriptor.
4061 */
4062 host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1;
4063 host->max_adma = 65536;
4064
4065 host->max_timeout_count = 0xE;
4066
4067 return host;
4068 }
4069
4070 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
4071
sdhci_set_dma_mask(struct sdhci_host * host)4072 static int sdhci_set_dma_mask(struct sdhci_host *host)
4073 {
4074 struct mmc_host *mmc = host->mmc;
4075 struct device *dev = mmc_dev(mmc);
4076 int ret = -EINVAL;
4077
4078 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
4079 host->flags &= ~SDHCI_USE_64_BIT_DMA;
4080
4081 /* Try 64-bit mask if hardware is capable of it */
4082 if (host->flags & SDHCI_USE_64_BIT_DMA) {
4083 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
4084 if (ret) {
4085 pr_warn("%s: Failed to set 64-bit DMA mask.\n",
4086 mmc_hostname(mmc));
4087 host->flags &= ~SDHCI_USE_64_BIT_DMA;
4088 }
4089 }
4090
4091 /* 32-bit mask as default & fallback */
4092 if (ret) {
4093 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
4094 if (ret)
4095 pr_warn("%s: Failed to set 32-bit DMA mask.\n",
4096 mmc_hostname(mmc));
4097 }
4098
4099 return ret;
4100 }
4101
__sdhci_read_caps(struct sdhci_host * host,const u16 * ver,const u32 * caps,const u32 * caps1)4102 void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver,
4103 const u32 *caps, const u32 *caps1)
4104 {
4105 u16 v;
4106 u64 dt_caps_mask = 0;
4107 u64 dt_caps = 0;
4108
4109 if (host->read_caps)
4110 return;
4111
4112 host->read_caps = true;
4113
4114 if (debug_quirks)
4115 host->quirks = debug_quirks;
4116
4117 if (debug_quirks2)
4118 host->quirks2 = debug_quirks2;
4119
4120 sdhci_reset_for_all(host);
4121
4122 if (host->v4_mode)
4123 sdhci_do_enable_v4_mode(host);
4124
4125 device_property_read_u64(mmc_dev(host->mmc),
4126 "sdhci-caps-mask", &dt_caps_mask);
4127 device_property_read_u64(mmc_dev(host->mmc),
4128 "sdhci-caps", &dt_caps);
4129
4130 v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
4131 host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
4132
4133 if (caps) {
4134 host->caps = *caps;
4135 } else {
4136 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
4137 host->caps &= ~lower_32_bits(dt_caps_mask);
4138 host->caps |= lower_32_bits(dt_caps);
4139 }
4140
4141 if (host->version < SDHCI_SPEC_300)
4142 return;
4143
4144 if (caps1) {
4145 host->caps1 = *caps1;
4146 } else {
4147 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
4148 host->caps1 &= ~upper_32_bits(dt_caps_mask);
4149 host->caps1 |= upper_32_bits(dt_caps);
4150 }
4151 }
4152 EXPORT_SYMBOL_GPL(__sdhci_read_caps);
4153
sdhci_allocate_bounce_buffer(struct sdhci_host * host)4154 static void sdhci_allocate_bounce_buffer(struct sdhci_host *host)
4155 {
4156 struct mmc_host *mmc = host->mmc;
4157 unsigned int max_blocks;
4158 unsigned int bounce_size;
4159 int ret;
4160
4161 /*
4162 * Cap the bounce buffer at 64KB. Using a bigger bounce buffer
4163 * has diminishing returns, this is probably because SD/MMC
4164 * cards are usually optimized to handle this size of requests.
4165 */
4166 bounce_size = SZ_64K;
4167 /*
4168 * Adjust downwards to maximum request size if this is less
4169 * than our segment size, else hammer down the maximum
4170 * request size to the maximum buffer size.
4171 */
4172 if (mmc->max_req_size < bounce_size)
4173 bounce_size = mmc->max_req_size;
4174 max_blocks = bounce_size / 512;
4175
4176 /*
4177 * When we just support one segment, we can get significant
4178 * speedups by the help of a bounce buffer to group scattered
4179 * reads/writes together.
4180 */
4181 host->bounce_buffer = devm_kmalloc(mmc_dev(mmc),
4182 bounce_size,
4183 GFP_KERNEL);
4184 if (!host->bounce_buffer) {
4185 pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n",
4186 mmc_hostname(mmc),
4187 bounce_size);
4188 /*
4189 * Exiting with zero here makes sure we proceed with
4190 * mmc->max_segs == 1.
4191 */
4192 return;
4193 }
4194
4195 host->bounce_addr = dma_map_single(mmc_dev(mmc),
4196 host->bounce_buffer,
4197 bounce_size,
4198 DMA_BIDIRECTIONAL);
4199 ret = dma_mapping_error(mmc_dev(mmc), host->bounce_addr);
4200 if (ret) {
4201 devm_kfree(mmc_dev(mmc), host->bounce_buffer);
4202 host->bounce_buffer = NULL;
4203 /* Again fall back to max_segs == 1 */
4204 return;
4205 }
4206
4207 host->bounce_buffer_size = bounce_size;
4208
4209 /* Lie about this since we're bouncing */
4210 mmc->max_segs = max_blocks;
4211 mmc->max_seg_size = bounce_size;
4212 mmc->max_req_size = bounce_size;
4213
4214 pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n",
4215 mmc_hostname(mmc), max_blocks, bounce_size);
4216 }
4217
sdhci_can_64bit_dma(struct sdhci_host * host)4218 static inline bool sdhci_can_64bit_dma(struct sdhci_host *host)
4219 {
4220 /*
4221 * According to SD Host Controller spec v4.10, bit[27] added from
4222 * version 4.10 in Capabilities Register is used as 64-bit System
4223 * Address support for V4 mode.
4224 */
4225 if (host->version >= SDHCI_SPEC_410 && host->v4_mode)
4226 return host->caps & SDHCI_CAN_64BIT_V4;
4227
4228 return host->caps & SDHCI_CAN_64BIT;
4229 }
4230
sdhci_setup_host(struct sdhci_host * host)4231 int sdhci_setup_host(struct sdhci_host *host)
4232 {
4233 struct mmc_host *mmc;
4234 u32 max_current_caps;
4235 unsigned int ocr_avail;
4236 unsigned int override_timeout_clk;
4237 u32 max_clk;
4238 int ret = 0;
4239 bool enable_vqmmc = false;
4240
4241 WARN_ON(host == NULL);
4242 if (host == NULL)
4243 return -EINVAL;
4244
4245 mmc = host->mmc;
4246
4247 /*
4248 * If there are external regulators, get them. Note this must be done
4249 * early before resetting the host and reading the capabilities so that
4250 * the host can take the appropriate action if regulators are not
4251 * available.
4252 */
4253 if (!mmc->supply.vqmmc) {
4254 ret = mmc_regulator_get_supply(mmc);
4255 if (ret)
4256 return ret;
4257 enable_vqmmc = true;
4258 }
4259
4260 DBG("Version: 0x%08x | Present: 0x%08x\n",
4261 sdhci_readw(host, SDHCI_HOST_VERSION),
4262 sdhci_readl(host, SDHCI_PRESENT_STATE));
4263 DBG("Caps: 0x%08x | Caps_1: 0x%08x\n",
4264 sdhci_readl(host, SDHCI_CAPABILITIES),
4265 sdhci_readl(host, SDHCI_CAPABILITIES_1));
4266
4267 sdhci_read_caps(host);
4268
4269 override_timeout_clk = host->timeout_clk;
4270
4271 if (host->version > SDHCI_SPEC_420) {
4272 pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
4273 mmc_hostname(mmc), host->version);
4274 }
4275
4276 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
4277 host->flags |= SDHCI_USE_SDMA;
4278 else if (!(host->caps & SDHCI_CAN_DO_SDMA))
4279 DBG("Controller doesn't have SDMA capability\n");
4280 else
4281 host->flags |= SDHCI_USE_SDMA;
4282
4283 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
4284 (host->flags & SDHCI_USE_SDMA)) {
4285 DBG("Disabling DMA as it is marked broken\n");
4286 host->flags &= ~SDHCI_USE_SDMA;
4287 }
4288
4289 if ((host->version >= SDHCI_SPEC_200) &&
4290 (host->caps & SDHCI_CAN_DO_ADMA2))
4291 host->flags |= SDHCI_USE_ADMA;
4292
4293 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
4294 (host->flags & SDHCI_USE_ADMA)) {
4295 DBG("Disabling ADMA as it is marked broken\n");
4296 host->flags &= ~SDHCI_USE_ADMA;
4297 }
4298
4299 if (sdhci_can_64bit_dma(host))
4300 host->flags |= SDHCI_USE_64_BIT_DMA;
4301
4302 if (host->use_external_dma) {
4303 ret = sdhci_external_dma_init(host);
4304 if (ret == -EPROBE_DEFER)
4305 goto unreg;
4306 /*
4307 * Fall back to use the DMA/PIO integrated in standard SDHCI
4308 * instead of external DMA devices.
4309 */
4310 else if (ret)
4311 sdhci_switch_external_dma(host, false);
4312 /* Disable internal DMA sources */
4313 else
4314 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
4315 }
4316
4317 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
4318 if (host->ops->set_dma_mask)
4319 ret = host->ops->set_dma_mask(host);
4320 else
4321 ret = sdhci_set_dma_mask(host);
4322
4323 if (!ret && host->ops->enable_dma)
4324 ret = host->ops->enable_dma(host);
4325
4326 if (ret) {
4327 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
4328 mmc_hostname(mmc));
4329 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
4330
4331 ret = 0;
4332 }
4333 }
4334
4335 /* SDMA does not support 64-bit DMA if v4 mode not set */
4336 if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode)
4337 host->flags &= ~SDHCI_USE_SDMA;
4338
4339 if (host->flags & SDHCI_USE_ADMA) {
4340 dma_addr_t dma;
4341 void *buf;
4342
4343 if (!(host->flags & SDHCI_USE_64_BIT_DMA))
4344 host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ;
4345 else if (!host->alloc_desc_sz)
4346 host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
4347
4348 host->desc_sz = host->alloc_desc_sz;
4349 host->adma_table_sz = host->adma_table_cnt * host->desc_sz;
4350
4351 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
4352 /*
4353 * Use zalloc to zero the reserved high 32-bits of 128-bit
4354 * descriptors so that they never need to be written.
4355 */
4356 buf = dma_alloc_coherent(mmc_dev(mmc),
4357 host->align_buffer_sz + host->adma_table_sz,
4358 &dma, GFP_KERNEL);
4359 if (!buf) {
4360 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
4361 mmc_hostname(mmc));
4362 host->flags &= ~SDHCI_USE_ADMA;
4363 } else if ((dma + host->align_buffer_sz) &
4364 (SDHCI_ADMA2_DESC_ALIGN - 1)) {
4365 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
4366 mmc_hostname(mmc));
4367 host->flags &= ~SDHCI_USE_ADMA;
4368 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4369 host->adma_table_sz, buf, dma);
4370 } else {
4371 host->align_buffer = buf;
4372 host->align_addr = dma;
4373
4374 host->adma_table = buf + host->align_buffer_sz;
4375 host->adma_addr = dma + host->align_buffer_sz;
4376 }
4377 }
4378
4379 /*
4380 * If we use DMA, then it's up to the caller to set the DMA
4381 * mask, but PIO does not need the hw shim so we set a new
4382 * mask here in that case.
4383 */
4384 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
4385 host->dma_mask = DMA_BIT_MASK(64);
4386 mmc_dev(mmc)->dma_mask = &host->dma_mask;
4387 }
4388
4389 if (host->version >= SDHCI_SPEC_300)
4390 host->max_clk = FIELD_GET(SDHCI_CLOCK_V3_BASE_MASK, host->caps);
4391 else
4392 host->max_clk = FIELD_GET(SDHCI_CLOCK_BASE_MASK, host->caps);
4393
4394 host->max_clk *= 1000000;
4395 if (host->max_clk == 0 || host->quirks &
4396 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
4397 if (!host->ops->get_max_clock) {
4398 pr_err("%s: Hardware doesn't specify base clock frequency.\n",
4399 mmc_hostname(mmc));
4400 ret = -ENODEV;
4401 goto undma;
4402 }
4403 host->max_clk = host->ops->get_max_clock(host);
4404 }
4405
4406 /*
4407 * In case of Host Controller v3.00, find out whether clock
4408 * multiplier is supported.
4409 */
4410 host->clk_mul = FIELD_GET(SDHCI_CLOCK_MUL_MASK, host->caps1);
4411
4412 /*
4413 * In case the value in Clock Multiplier is 0, then programmable
4414 * clock mode is not supported, otherwise the actual clock
4415 * multiplier is one more than the value of Clock Multiplier
4416 * in the Capabilities Register.
4417 */
4418 if (host->clk_mul)
4419 host->clk_mul += 1;
4420
4421 /*
4422 * Set host parameters.
4423 */
4424 max_clk = host->max_clk;
4425
4426 if (host->ops->get_min_clock)
4427 mmc->f_min = host->ops->get_min_clock(host);
4428 else if (host->version >= SDHCI_SPEC_300) {
4429 if (host->clk_mul)
4430 max_clk = host->max_clk * host->clk_mul;
4431 /*
4432 * Divided Clock Mode minimum clock rate is always less than
4433 * Programmable Clock Mode minimum clock rate.
4434 */
4435 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
4436 } else
4437 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
4438
4439 if (!mmc->f_max || mmc->f_max > max_clk)
4440 mmc->f_max = max_clk;
4441
4442 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
4443 host->timeout_clk = FIELD_GET(SDHCI_TIMEOUT_CLK_MASK, host->caps);
4444
4445 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
4446 host->timeout_clk *= 1000;
4447
4448 if (host->timeout_clk == 0) {
4449 if (!host->ops->get_timeout_clock) {
4450 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
4451 mmc_hostname(mmc));
4452 ret = -ENODEV;
4453 goto undma;
4454 }
4455
4456 host->timeout_clk =
4457 DIV_ROUND_UP(host->ops->get_timeout_clock(host),
4458 1000);
4459 }
4460
4461 if (override_timeout_clk)
4462 host->timeout_clk = override_timeout_clk;
4463
4464 mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
4465 host->ops->get_max_timeout_count(host) : 1 << 27;
4466 mmc->max_busy_timeout /= host->timeout_clk;
4467 }
4468
4469 if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT &&
4470 !host->ops->get_max_timeout_count)
4471 mmc->max_busy_timeout = 0;
4472
4473 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_CMD23;
4474 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
4475
4476 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
4477 host->flags |= SDHCI_AUTO_CMD12;
4478
4479 /*
4480 * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO.
4481 * For v4 mode, SDMA may use Auto-CMD23 as well.
4482 */
4483 if ((host->version >= SDHCI_SPEC_300) &&
4484 ((host->flags & SDHCI_USE_ADMA) ||
4485 !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) &&
4486 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
4487 host->flags |= SDHCI_AUTO_CMD23;
4488 DBG("Auto-CMD23 available\n");
4489 } else {
4490 DBG("Auto-CMD23 unavailable\n");
4491 }
4492
4493 /*
4494 * A controller may support 8-bit width, but the board itself
4495 * might not have the pins brought out. Boards that support
4496 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
4497 * their platform code before calling sdhci_add_host(), and we
4498 * won't assume 8-bit width for hosts without that CAP.
4499 */
4500 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
4501 mmc->caps |= MMC_CAP_4_BIT_DATA;
4502
4503 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
4504 mmc->caps &= ~MMC_CAP_CMD23;
4505
4506 if (host->caps & SDHCI_CAN_DO_HISPD)
4507 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
4508
4509 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
4510 mmc_card_is_removable(mmc) &&
4511 mmc_gpio_get_cd(mmc) < 0)
4512 mmc->caps |= MMC_CAP_NEEDS_POLL;
4513
4514 if (!IS_ERR(mmc->supply.vqmmc)) {
4515 if (enable_vqmmc) {
4516 ret = regulator_enable(mmc->supply.vqmmc);
4517 host->sdhci_core_to_disable_vqmmc = !ret;
4518 }
4519
4520 /* If vqmmc provides no 1.8V signalling, then there's no UHS */
4521 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
4522 1950000))
4523 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
4524 SDHCI_SUPPORT_SDR50 |
4525 SDHCI_SUPPORT_DDR50);
4526
4527 /* In eMMC case vqmmc might be a fixed 1.8V regulator */
4528 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000,
4529 3600000))
4530 host->flags &= ~SDHCI_SIGNALING_330;
4531
4532 if (ret) {
4533 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
4534 mmc_hostname(mmc), ret);
4535 mmc->supply.vqmmc = ERR_PTR(-EINVAL);
4536 }
4537
4538 }
4539
4540 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
4541 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
4542 SDHCI_SUPPORT_DDR50);
4543 /*
4544 * The SDHCI controller in a SoC might support HS200/HS400
4545 * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property),
4546 * but if the board is modeled such that the IO lines are not
4547 * connected to 1.8v then HS200/HS400 cannot be supported.
4548 * Disable HS200/HS400 if the board does not have 1.8v connected
4549 * to the IO lines. (Applicable for other modes in 1.8v)
4550 */
4551 mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES);
4552 mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS);
4553 }
4554
4555 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
4556 if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
4557 SDHCI_SUPPORT_DDR50))
4558 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
4559
4560 /* SDR104 supports also implies SDR50 support */
4561 if (host->caps1 & SDHCI_SUPPORT_SDR104) {
4562 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
4563 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
4564 * field can be promoted to support HS200.
4565 */
4566 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
4567 mmc->caps2 |= MMC_CAP2_HS200;
4568 } else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
4569 mmc->caps |= MMC_CAP_UHS_SDR50;
4570 }
4571
4572 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
4573 (host->caps1 & SDHCI_SUPPORT_HS400))
4574 mmc->caps2 |= MMC_CAP2_HS400;
4575
4576 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
4577 (IS_ERR(mmc->supply.vqmmc) ||
4578 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
4579 1300000)))
4580 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
4581
4582 if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
4583 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
4584 mmc->caps |= MMC_CAP_UHS_DDR50;
4585
4586 /* Does the host need tuning for SDR50? */
4587 if (host->caps1 & SDHCI_USE_SDR50_TUNING)
4588 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
4589
4590 /* Driver Type(s) (A, C, D) supported by the host */
4591 if (host->caps1 & SDHCI_DRIVER_TYPE_A)
4592 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
4593 if (host->caps1 & SDHCI_DRIVER_TYPE_C)
4594 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
4595 if (host->caps1 & SDHCI_DRIVER_TYPE_D)
4596 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
4597
4598 /* Initial value for re-tuning timer count */
4599 host->tuning_count = FIELD_GET(SDHCI_RETUNING_TIMER_COUNT_MASK,
4600 host->caps1);
4601
4602 /*
4603 * In case Re-tuning Timer is not disabled, the actual value of
4604 * re-tuning timer will be 2 ^ (n - 1).
4605 */
4606 if (host->tuning_count)
4607 host->tuning_count = 1 << (host->tuning_count - 1);
4608
4609 /* Re-tuning mode supported by the Host Controller */
4610 host->tuning_mode = FIELD_GET(SDHCI_RETUNING_MODE_MASK, host->caps1);
4611
4612 ocr_avail = 0;
4613
4614 /*
4615 * According to SD Host Controller spec v3.00, if the Host System
4616 * can afford more than 150mA, Host Driver should set XPC to 1. Also
4617 * the value is meaningful only if Voltage Support in the Capabilities
4618 * register is set. The actual current value is 4 times the register
4619 * value.
4620 */
4621 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
4622 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
4623 int curr = regulator_get_current_limit(mmc->supply.vmmc);
4624 if (curr > 0) {
4625
4626 /* convert to SDHCI_MAX_CURRENT format */
4627 curr = curr/1000; /* convert to mA */
4628 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
4629
4630 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
4631 max_current_caps =
4632 FIELD_PREP(SDHCI_MAX_CURRENT_330_MASK, curr) |
4633 FIELD_PREP(SDHCI_MAX_CURRENT_300_MASK, curr) |
4634 FIELD_PREP(SDHCI_MAX_CURRENT_180_MASK, curr);
4635 }
4636 }
4637
4638 if (host->caps & SDHCI_CAN_VDD_330) {
4639 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
4640
4641 mmc->max_current_330 = FIELD_GET(SDHCI_MAX_CURRENT_330_MASK,
4642 max_current_caps) *
4643 SDHCI_MAX_CURRENT_MULTIPLIER;
4644 }
4645 if (host->caps & SDHCI_CAN_VDD_300) {
4646 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
4647
4648 mmc->max_current_300 = FIELD_GET(SDHCI_MAX_CURRENT_300_MASK,
4649 max_current_caps) *
4650 SDHCI_MAX_CURRENT_MULTIPLIER;
4651 }
4652 if (host->caps & SDHCI_CAN_VDD_180) {
4653 ocr_avail |= MMC_VDD_165_195;
4654
4655 mmc->max_current_180 = FIELD_GET(SDHCI_MAX_CURRENT_180_MASK,
4656 max_current_caps) *
4657 SDHCI_MAX_CURRENT_MULTIPLIER;
4658 }
4659
4660 /* If OCR set by host, use it instead. */
4661 if (host->ocr_mask)
4662 ocr_avail = host->ocr_mask;
4663
4664 /* If OCR set by external regulators, give it highest prio. */
4665 if (mmc->ocr_avail)
4666 ocr_avail = mmc->ocr_avail;
4667
4668 mmc->ocr_avail = ocr_avail;
4669 mmc->ocr_avail_sdio = ocr_avail;
4670 if (host->ocr_avail_sdio)
4671 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
4672 mmc->ocr_avail_sd = ocr_avail;
4673 if (host->ocr_avail_sd)
4674 mmc->ocr_avail_sd &= host->ocr_avail_sd;
4675 else /* normal SD controllers don't support 1.8V */
4676 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
4677 mmc->ocr_avail_mmc = ocr_avail;
4678 if (host->ocr_avail_mmc)
4679 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
4680
4681 if (mmc->ocr_avail == 0) {
4682 pr_err("%s: Hardware doesn't report any support voltages.\n",
4683 mmc_hostname(mmc));
4684 ret = -ENODEV;
4685 goto unreg;
4686 }
4687
4688 if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
4689 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
4690 MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
4691 (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
4692 host->flags |= SDHCI_SIGNALING_180;
4693
4694 if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
4695 host->flags |= SDHCI_SIGNALING_120;
4696
4697 spin_lock_init(&host->lock);
4698
4699 /*
4700 * Maximum number of sectors in one transfer. Limited by SDMA boundary
4701 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
4702 * is less anyway.
4703 */
4704 mmc->max_req_size = 524288;
4705
4706 /*
4707 * Maximum number of segments. Depends on if the hardware
4708 * can do scatter/gather or not.
4709 */
4710 if (host->flags & SDHCI_USE_ADMA) {
4711 mmc->max_segs = SDHCI_MAX_SEGS;
4712 } else if (host->flags & SDHCI_USE_SDMA) {
4713 mmc->max_segs = 1;
4714 mmc->max_req_size = min_t(size_t, mmc->max_req_size,
4715 dma_max_mapping_size(mmc_dev(mmc)));
4716 } else { /* PIO */
4717 mmc->max_segs = SDHCI_MAX_SEGS;
4718 }
4719
4720 /*
4721 * Maximum segment size. Could be one segment with the maximum number
4722 * of bytes. When doing hardware scatter/gather, each entry cannot
4723 * be larger than 64 KiB though.
4724 */
4725 if (host->flags & SDHCI_USE_ADMA) {
4726 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) {
4727 host->max_adma = 65532; /* 32-bit alignment */
4728 mmc->max_seg_size = 65535;
4729 } else {
4730 mmc->max_seg_size = 65536;
4731 }
4732 } else {
4733 mmc->max_seg_size = mmc->max_req_size;
4734 }
4735
4736 /*
4737 * Maximum block size. This varies from controller to controller and
4738 * is specified in the capabilities register.
4739 */
4740 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
4741 mmc->max_blk_size = 2;
4742 } else {
4743 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
4744 SDHCI_MAX_BLOCK_SHIFT;
4745 if (mmc->max_blk_size >= 3) {
4746 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
4747 mmc_hostname(mmc));
4748 mmc->max_blk_size = 0;
4749 }
4750 }
4751
4752 mmc->max_blk_size = 512 << mmc->max_blk_size;
4753
4754 /*
4755 * Maximum block count.
4756 */
4757 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
4758
4759 if (mmc->max_segs == 1)
4760 /* This may alter mmc->*_blk_* parameters */
4761 sdhci_allocate_bounce_buffer(host);
4762
4763 return 0;
4764
4765 unreg:
4766 if (host->sdhci_core_to_disable_vqmmc)
4767 regulator_disable(mmc->supply.vqmmc);
4768 undma:
4769 if (host->align_buffer)
4770 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4771 host->adma_table_sz, host->align_buffer,
4772 host->align_addr);
4773 host->adma_table = NULL;
4774 host->align_buffer = NULL;
4775
4776 return ret;
4777 }
4778 EXPORT_SYMBOL_GPL(sdhci_setup_host);
4779
sdhci_cleanup_host(struct sdhci_host * host)4780 void sdhci_cleanup_host(struct sdhci_host *host)
4781 {
4782 struct mmc_host *mmc = host->mmc;
4783
4784 if (host->sdhci_core_to_disable_vqmmc)
4785 regulator_disable(mmc->supply.vqmmc);
4786
4787 if (host->align_buffer)
4788 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4789 host->adma_table_sz, host->align_buffer,
4790 host->align_addr);
4791
4792 if (host->use_external_dma)
4793 sdhci_external_dma_release(host);
4794
4795 host->adma_table = NULL;
4796 host->align_buffer = NULL;
4797 }
4798 EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
4799
__sdhci_add_host(struct sdhci_host * host)4800 int __sdhci_add_host(struct sdhci_host *host)
4801 {
4802 unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI;
4803 struct mmc_host *mmc = host->mmc;
4804 int ret;
4805
4806 if ((mmc->caps2 & MMC_CAP2_CQE) &&
4807 (host->quirks & SDHCI_QUIRK_BROKEN_CQE)) {
4808 mmc->caps2 &= ~MMC_CAP2_CQE;
4809 mmc->cqe_ops = NULL;
4810 }
4811
4812 host->complete_wq = alloc_workqueue("sdhci", flags, 0);
4813 if (!host->complete_wq)
4814 return -ENOMEM;
4815
4816 INIT_WORK(&host->complete_work, sdhci_complete_work);
4817
4818 timer_setup(&host->timer, sdhci_timeout_timer, 0);
4819 timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
4820
4821 init_waitqueue_head(&host->buf_ready_int);
4822
4823 sdhci_init(host, 0);
4824
4825 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
4826 IRQF_SHARED, mmc_hostname(mmc), host);
4827 if (ret) {
4828 pr_err("%s: Failed to request IRQ %d: %d\n",
4829 mmc_hostname(mmc), host->irq, ret);
4830 goto unwq;
4831 }
4832
4833 ret = sdhci_led_register(host);
4834 if (ret) {
4835 pr_err("%s: Failed to register LED device: %d\n",
4836 mmc_hostname(mmc), ret);
4837 goto unirq;
4838 }
4839
4840 ret = mmc_add_host(mmc);
4841 if (ret)
4842 goto unled;
4843
4844 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
4845 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
4846 host->use_external_dma ? "External DMA" :
4847 (host->flags & SDHCI_USE_ADMA) ?
4848 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
4849 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
4850
4851 sdhci_enable_card_detection(host);
4852
4853 return 0;
4854
4855 unled:
4856 sdhci_led_unregister(host);
4857 unirq:
4858 sdhci_reset_for_all(host);
4859 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4860 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4861 free_irq(host->irq, host);
4862 unwq:
4863 destroy_workqueue(host->complete_wq);
4864
4865 return ret;
4866 }
4867 EXPORT_SYMBOL_GPL(__sdhci_add_host);
4868
sdhci_add_host(struct sdhci_host * host)4869 int sdhci_add_host(struct sdhci_host *host)
4870 {
4871 int ret;
4872
4873 ret = sdhci_setup_host(host);
4874 if (ret)
4875 return ret;
4876
4877 ret = __sdhci_add_host(host);
4878 if (ret)
4879 goto cleanup;
4880
4881 return 0;
4882
4883 cleanup:
4884 sdhci_cleanup_host(host);
4885
4886 return ret;
4887 }
4888 EXPORT_SYMBOL_GPL(sdhci_add_host);
4889
sdhci_remove_host(struct sdhci_host * host,int dead)4890 void sdhci_remove_host(struct sdhci_host *host, int dead)
4891 {
4892 struct mmc_host *mmc = host->mmc;
4893 unsigned long flags;
4894
4895 if (dead) {
4896 spin_lock_irqsave(&host->lock, flags);
4897
4898 host->flags |= SDHCI_DEVICE_DEAD;
4899
4900 if (sdhci_has_requests(host)) {
4901 pr_err("%s: Controller removed during "
4902 " transfer!\n", mmc_hostname(mmc));
4903 sdhci_error_out_mrqs(host, -ENOMEDIUM);
4904 }
4905
4906 spin_unlock_irqrestore(&host->lock, flags);
4907 }
4908
4909 sdhci_disable_card_detection(host);
4910
4911 mmc_remove_host(mmc);
4912
4913 sdhci_led_unregister(host);
4914
4915 if (!dead)
4916 sdhci_reset_for_all(host);
4917
4918 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4919 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4920 free_irq(host->irq, host);
4921
4922 del_timer_sync(&host->timer);
4923 del_timer_sync(&host->data_timer);
4924
4925 destroy_workqueue(host->complete_wq);
4926
4927 if (host->sdhci_core_to_disable_vqmmc)
4928 regulator_disable(mmc->supply.vqmmc);
4929
4930 if (host->align_buffer)
4931 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4932 host->adma_table_sz, host->align_buffer,
4933 host->align_addr);
4934
4935 if (host->use_external_dma)
4936 sdhci_external_dma_release(host);
4937
4938 host->adma_table = NULL;
4939 host->align_buffer = NULL;
4940 }
4941
4942 EXPORT_SYMBOL_GPL(sdhci_remove_host);
4943
sdhci_free_host(struct sdhci_host * host)4944 void sdhci_free_host(struct sdhci_host *host)
4945 {
4946 mmc_free_host(host->mmc);
4947 }
4948
4949 EXPORT_SYMBOL_GPL(sdhci_free_host);
4950
4951 /*****************************************************************************\
4952 * *
4953 * Driver init/exit *
4954 * *
4955 \*****************************************************************************/
4956
sdhci_drv_init(void)4957 static int __init sdhci_drv_init(void)
4958 {
4959 pr_info(DRIVER_NAME
4960 ": Secure Digital Host Controller Interface driver\n");
4961 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
4962
4963 return 0;
4964 }
4965
sdhci_drv_exit(void)4966 static void __exit sdhci_drv_exit(void)
4967 {
4968 }
4969
4970 module_init(sdhci_drv_init);
4971 module_exit(sdhci_drv_exit);
4972
4973 module_param(debug_quirks, uint, 0444);
4974 module_param(debug_quirks2, uint, 0444);
4975
4976 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
4977 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
4978 MODULE_LICENSE("GPL");
4979
4980 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
4981 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");
4982