1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* linux/drivers/mmc/host/sdhci-pci.c - SDHCI on PCI bus interface
3 *
4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5 *
6 * Thanks to the following companies for their support:
7 *
8 * - JMicron (hardware and technical support)
9 */
10
11 #include <linux/bitfield.h>
12 #include <linux/string.h>
13 #include <linux/delay.h>
14 #include <linux/highmem.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/slab.h>
19 #include <linux/device.h>
20 #include <linux/scatterlist.h>
21 #include <linux/io.h>
22 #include <linux/iopoll.h>
23 #include <linux/gpio.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/pm_qos.h>
26 #include <linux/debugfs.h>
27 #include <linux/acpi.h>
28 #include <linux/dmi.h>
29
30 #include <linux/mmc/host.h>
31 #include <linux/mmc/mmc.h>
32 #include <linux/mmc/slot-gpio.h>
33
34 #ifdef CONFIG_X86
35 #include <asm/iosf_mbi.h>
36 #endif
37
38 #include "cqhci.h"
39
40 #include "sdhci.h"
41 #include "sdhci-cqhci.h"
42 #include "sdhci-pci.h"
43
44 static void sdhci_pci_hw_reset(struct sdhci_host *host);
45
46 #ifdef CONFIG_PM_SLEEP
sdhci_pci_init_wakeup(struct sdhci_pci_chip * chip)47 static int sdhci_pci_init_wakeup(struct sdhci_pci_chip *chip)
48 {
49 mmc_pm_flag_t pm_flags = 0;
50 bool cap_cd_wake = false;
51 int i;
52
53 for (i = 0; i < chip->num_slots; i++) {
54 struct sdhci_pci_slot *slot = chip->slots[i];
55
56 if (slot) {
57 pm_flags |= slot->host->mmc->pm_flags;
58 if (slot->host->mmc->caps & MMC_CAP_CD_WAKE)
59 cap_cd_wake = true;
60 }
61 }
62
63 if ((pm_flags & MMC_PM_KEEP_POWER) && (pm_flags & MMC_PM_WAKE_SDIO_IRQ))
64 return device_wakeup_enable(&chip->pdev->dev);
65 else if (!cap_cd_wake)
66 return device_wakeup_disable(&chip->pdev->dev);
67
68 return 0;
69 }
70
sdhci_pci_suspend_host(struct sdhci_pci_chip * chip)71 static int sdhci_pci_suspend_host(struct sdhci_pci_chip *chip)
72 {
73 int i, ret;
74
75 sdhci_pci_init_wakeup(chip);
76
77 for (i = 0; i < chip->num_slots; i++) {
78 struct sdhci_pci_slot *slot = chip->slots[i];
79 struct sdhci_host *host;
80
81 if (!slot)
82 continue;
83
84 host = slot->host;
85
86 if (chip->pm_retune && host->tuning_mode != SDHCI_TUNING_MODE_3)
87 mmc_retune_needed(host->mmc);
88
89 ret = sdhci_suspend_host(host);
90 if (ret)
91 goto err_pci_suspend;
92
93 if (device_may_wakeup(&chip->pdev->dev))
94 mmc_gpio_set_cd_wake(host->mmc, true);
95 }
96
97 return 0;
98
99 err_pci_suspend:
100 while (--i >= 0)
101 sdhci_resume_host(chip->slots[i]->host);
102 return ret;
103 }
104
sdhci_pci_resume_host(struct sdhci_pci_chip * chip)105 int sdhci_pci_resume_host(struct sdhci_pci_chip *chip)
106 {
107 struct sdhci_pci_slot *slot;
108 int i, ret;
109
110 for (i = 0; i < chip->num_slots; i++) {
111 slot = chip->slots[i];
112 if (!slot)
113 continue;
114
115 ret = sdhci_resume_host(slot->host);
116 if (ret)
117 return ret;
118
119 mmc_gpio_set_cd_wake(slot->host->mmc, false);
120 }
121
122 return 0;
123 }
124
sdhci_cqhci_suspend(struct sdhci_pci_chip * chip)125 static int sdhci_cqhci_suspend(struct sdhci_pci_chip *chip)
126 {
127 int ret;
128
129 ret = cqhci_suspend(chip->slots[0]->host->mmc);
130 if (ret)
131 return ret;
132
133 return sdhci_pci_suspend_host(chip);
134 }
135
sdhci_cqhci_resume(struct sdhci_pci_chip * chip)136 static int sdhci_cqhci_resume(struct sdhci_pci_chip *chip)
137 {
138 int ret;
139
140 ret = sdhci_pci_resume_host(chip);
141 if (ret)
142 return ret;
143
144 return cqhci_resume(chip->slots[0]->host->mmc);
145 }
146 #endif
147
148 #ifdef CONFIG_PM
sdhci_pci_runtime_suspend_host(struct sdhci_pci_chip * chip)149 static int sdhci_pci_runtime_suspend_host(struct sdhci_pci_chip *chip)
150 {
151 struct sdhci_pci_slot *slot;
152 struct sdhci_host *host;
153 int i, ret;
154
155 for (i = 0; i < chip->num_slots; i++) {
156 slot = chip->slots[i];
157 if (!slot)
158 continue;
159
160 host = slot->host;
161
162 ret = sdhci_runtime_suspend_host(host);
163 if (ret)
164 goto err_pci_runtime_suspend;
165
166 if (chip->rpm_retune &&
167 host->tuning_mode != SDHCI_TUNING_MODE_3)
168 mmc_retune_needed(host->mmc);
169 }
170
171 return 0;
172
173 err_pci_runtime_suspend:
174 while (--i >= 0)
175 sdhci_runtime_resume_host(chip->slots[i]->host, 0);
176 return ret;
177 }
178
sdhci_pci_runtime_resume_host(struct sdhci_pci_chip * chip)179 static int sdhci_pci_runtime_resume_host(struct sdhci_pci_chip *chip)
180 {
181 struct sdhci_pci_slot *slot;
182 int i, ret;
183
184 for (i = 0; i < chip->num_slots; i++) {
185 slot = chip->slots[i];
186 if (!slot)
187 continue;
188
189 ret = sdhci_runtime_resume_host(slot->host, 0);
190 if (ret)
191 return ret;
192 }
193
194 return 0;
195 }
196
sdhci_cqhci_runtime_suspend(struct sdhci_pci_chip * chip)197 static int sdhci_cqhci_runtime_suspend(struct sdhci_pci_chip *chip)
198 {
199 int ret;
200
201 ret = cqhci_suspend(chip->slots[0]->host->mmc);
202 if (ret)
203 return ret;
204
205 return sdhci_pci_runtime_suspend_host(chip);
206 }
207
sdhci_cqhci_runtime_resume(struct sdhci_pci_chip * chip)208 static int sdhci_cqhci_runtime_resume(struct sdhci_pci_chip *chip)
209 {
210 int ret;
211
212 ret = sdhci_pci_runtime_resume_host(chip);
213 if (ret)
214 return ret;
215
216 return cqhci_resume(chip->slots[0]->host->mmc);
217 }
218 #endif
219
sdhci_cqhci_irq(struct sdhci_host * host,u32 intmask)220 static u32 sdhci_cqhci_irq(struct sdhci_host *host, u32 intmask)
221 {
222 int cmd_error = 0;
223 int data_error = 0;
224
225 if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
226 return intmask;
227
228 cqhci_irq(host->mmc, intmask, cmd_error, data_error);
229
230 return 0;
231 }
232
sdhci_pci_dumpregs(struct mmc_host * mmc)233 static void sdhci_pci_dumpregs(struct mmc_host *mmc)
234 {
235 sdhci_dumpregs(mmc_priv(mmc));
236 }
237
238 /*****************************************************************************\
239 * *
240 * Hardware specific quirk handling *
241 * *
242 \*****************************************************************************/
243
ricoh_probe(struct sdhci_pci_chip * chip)244 static int ricoh_probe(struct sdhci_pci_chip *chip)
245 {
246 if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG ||
247 chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SONY)
248 chip->quirks |= SDHCI_QUIRK_NO_CARD_NO_RESET;
249 return 0;
250 }
251
ricoh_mmc_probe_slot(struct sdhci_pci_slot * slot)252 static int ricoh_mmc_probe_slot(struct sdhci_pci_slot *slot)
253 {
254 u32 caps =
255 FIELD_PREP(SDHCI_TIMEOUT_CLK_MASK, 0x21) |
256 FIELD_PREP(SDHCI_CLOCK_BASE_MASK, 0x21) |
257 SDHCI_TIMEOUT_CLK_UNIT |
258 SDHCI_CAN_VDD_330 |
259 SDHCI_CAN_DO_HISPD |
260 SDHCI_CAN_DO_SDMA;
261 u32 caps1 = 0;
262
263 __sdhci_read_caps(slot->host, NULL, &caps, &caps1);
264 return 0;
265 }
266
267 #ifdef CONFIG_PM_SLEEP
ricoh_mmc_resume(struct sdhci_pci_chip * chip)268 static int ricoh_mmc_resume(struct sdhci_pci_chip *chip)
269 {
270 /* Apply a delay to allow controller to settle */
271 /* Otherwise it becomes confused if card state changed
272 during suspend */
273 msleep(500);
274 return sdhci_pci_resume_host(chip);
275 }
276 #endif
277
278 static const struct sdhci_pci_fixes sdhci_ricoh = {
279 .probe = ricoh_probe,
280 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
281 SDHCI_QUIRK_FORCE_DMA |
282 SDHCI_QUIRK_CLOCK_BEFORE_RESET,
283 };
284
285 static const struct sdhci_pci_fixes sdhci_ricoh_mmc = {
286 .probe_slot = ricoh_mmc_probe_slot,
287 #ifdef CONFIG_PM_SLEEP
288 .resume = ricoh_mmc_resume,
289 #endif
290 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
291 SDHCI_QUIRK_CLOCK_BEFORE_RESET |
292 SDHCI_QUIRK_NO_CARD_NO_RESET,
293 };
294
ene_714_set_ios(struct mmc_host * mmc,struct mmc_ios * ios)295 static void ene_714_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
296 {
297 struct sdhci_host *host = mmc_priv(mmc);
298
299 sdhci_set_ios(mmc, ios);
300
301 /*
302 * Some (ENE) controllers misbehave on some ios operations,
303 * signalling timeout and CRC errors even on CMD0. Resetting
304 * it on each ios seems to solve the problem.
305 */
306 if (!(host->flags & SDHCI_DEVICE_DEAD))
307 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
308 }
309
ene_714_probe_slot(struct sdhci_pci_slot * slot)310 static int ene_714_probe_slot(struct sdhci_pci_slot *slot)
311 {
312 slot->host->mmc_host_ops.set_ios = ene_714_set_ios;
313 return 0;
314 }
315
316 static const struct sdhci_pci_fixes sdhci_ene_712 = {
317 .quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE |
318 SDHCI_QUIRK_BROKEN_DMA,
319 };
320
321 static const struct sdhci_pci_fixes sdhci_ene_714 = {
322 .quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE |
323 SDHCI_QUIRK_BROKEN_DMA,
324 .probe_slot = ene_714_probe_slot,
325 };
326
327 static const struct sdhci_pci_fixes sdhci_cafe = {
328 .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
329 SDHCI_QUIRK_NO_BUSY_IRQ |
330 SDHCI_QUIRK_BROKEN_CARD_DETECTION |
331 SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
332 };
333
334 static const struct sdhci_pci_fixes sdhci_intel_qrk = {
335 .quirks = SDHCI_QUIRK_NO_HISPD_BIT,
336 };
337
mrst_hc_probe_slot(struct sdhci_pci_slot * slot)338 static int mrst_hc_probe_slot(struct sdhci_pci_slot *slot)
339 {
340 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA;
341 return 0;
342 }
343
344 /*
345 * ADMA operation is disabled for Moorestown platform due to
346 * hardware bugs.
347 */
mrst_hc_probe(struct sdhci_pci_chip * chip)348 static int mrst_hc_probe(struct sdhci_pci_chip *chip)
349 {
350 /*
351 * slots number is fixed here for MRST as SDIO3/5 are never used and
352 * have hardware bugs.
353 */
354 chip->num_slots = 1;
355 return 0;
356 }
357
pch_hc_probe_slot(struct sdhci_pci_slot * slot)358 static int pch_hc_probe_slot(struct sdhci_pci_slot *slot)
359 {
360 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA;
361 return 0;
362 }
363
mfd_emmc_probe_slot(struct sdhci_pci_slot * slot)364 static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot)
365 {
366 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE;
367 slot->host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
368 return 0;
369 }
370
mfd_sdio_probe_slot(struct sdhci_pci_slot * slot)371 static int mfd_sdio_probe_slot(struct sdhci_pci_slot *slot)
372 {
373 slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE;
374 return 0;
375 }
376
377 static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = {
378 .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT,
379 .probe_slot = mrst_hc_probe_slot,
380 };
381
382 static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = {
383 .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT,
384 .probe = mrst_hc_probe,
385 };
386
387 static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = {
388 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
389 .allow_runtime_pm = true,
390 .own_cd_for_runtime_pm = true,
391 };
392
393 static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio = {
394 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
395 .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON,
396 .allow_runtime_pm = true,
397 .probe_slot = mfd_sdio_probe_slot,
398 };
399
400 static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc = {
401 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
402 .allow_runtime_pm = true,
403 .probe_slot = mfd_emmc_probe_slot,
404 };
405
406 static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = {
407 .quirks = SDHCI_QUIRK_BROKEN_ADMA,
408 .probe_slot = pch_hc_probe_slot,
409 };
410
411 #ifdef CONFIG_X86
412
413 #define BYT_IOSF_SCCEP 0x63
414 #define BYT_IOSF_OCP_NETCTRL0 0x1078
415 #define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8)
416
byt_ocp_setting(struct pci_dev * pdev)417 static void byt_ocp_setting(struct pci_dev *pdev)
418 {
419 u32 val = 0;
420
421 if (pdev->device != PCI_DEVICE_ID_INTEL_BYT_EMMC &&
422 pdev->device != PCI_DEVICE_ID_INTEL_BYT_SDIO &&
423 pdev->device != PCI_DEVICE_ID_INTEL_BYT_SD &&
424 pdev->device != PCI_DEVICE_ID_INTEL_BYT_EMMC2)
425 return;
426
427 if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0,
428 &val)) {
429 dev_err(&pdev->dev, "%s read error\n", __func__);
430 return;
431 }
432
433 if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE))
434 return;
435
436 val &= ~BYT_IOSF_OCP_TIMEOUT_BASE;
437
438 if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0,
439 val)) {
440 dev_err(&pdev->dev, "%s write error\n", __func__);
441 return;
442 }
443
444 dev_dbg(&pdev->dev, "%s completed\n", __func__);
445 }
446
447 #else
448
byt_ocp_setting(struct pci_dev * pdev)449 static inline void byt_ocp_setting(struct pci_dev *pdev)
450 {
451 }
452
453 #endif
454
455 enum {
456 INTEL_DSM_FNS = 0,
457 INTEL_DSM_V18_SWITCH = 3,
458 INTEL_DSM_V33_SWITCH = 4,
459 INTEL_DSM_DRV_STRENGTH = 9,
460 INTEL_DSM_D3_RETUNE = 10,
461 };
462
463 struct intel_host {
464 u32 dsm_fns;
465 int drv_strength;
466 bool d3_retune;
467 bool rpm_retune_ok;
468 bool needs_pwr_off;
469 u32 glk_rx_ctrl1;
470 u32 glk_tun_val;
471 u32 active_ltr;
472 u32 idle_ltr;
473 };
474
475 static const guid_t intel_dsm_guid =
476 GUID_INIT(0xF6C13EA5, 0x65CD, 0x461F,
477 0xAB, 0x7A, 0x29, 0xF7, 0xE8, 0xD5, 0xBD, 0x61);
478
__intel_dsm(struct intel_host * intel_host,struct device * dev,unsigned int fn,u32 * result)479 static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
480 unsigned int fn, u32 *result)
481 {
482 union acpi_object *obj;
483 int err = 0;
484 size_t len;
485
486 obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL);
487 if (!obj)
488 return -EOPNOTSUPP;
489
490 if (obj->type != ACPI_TYPE_BUFFER || obj->buffer.length < 1) {
491 err = -EINVAL;
492 goto out;
493 }
494
495 len = min_t(size_t, obj->buffer.length, 4);
496
497 *result = 0;
498 memcpy(result, obj->buffer.pointer, len);
499 out:
500 ACPI_FREE(obj);
501
502 return err;
503 }
504
intel_dsm(struct intel_host * intel_host,struct device * dev,unsigned int fn,u32 * result)505 static int intel_dsm(struct intel_host *intel_host, struct device *dev,
506 unsigned int fn, u32 *result)
507 {
508 if (fn > 31 || !(intel_host->dsm_fns & (1 << fn)))
509 return -EOPNOTSUPP;
510
511 return __intel_dsm(intel_host, dev, fn, result);
512 }
513
intel_dsm_init(struct intel_host * intel_host,struct device * dev,struct mmc_host * mmc)514 static void intel_dsm_init(struct intel_host *intel_host, struct device *dev,
515 struct mmc_host *mmc)
516 {
517 int err;
518 u32 val;
519
520 intel_host->d3_retune = true;
521
522 err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
523 if (err) {
524 pr_debug("%s: DSM not supported, error %d\n",
525 mmc_hostname(mmc), err);
526 return;
527 }
528
529 pr_debug("%s: DSM function mask %#x\n",
530 mmc_hostname(mmc), intel_host->dsm_fns);
531
532 err = intel_dsm(intel_host, dev, INTEL_DSM_DRV_STRENGTH, &val);
533 intel_host->drv_strength = err ? 0 : val;
534
535 err = intel_dsm(intel_host, dev, INTEL_DSM_D3_RETUNE, &val);
536 intel_host->d3_retune = err ? true : !!val;
537 }
538
sdhci_pci_int_hw_reset(struct sdhci_host * host)539 static void sdhci_pci_int_hw_reset(struct sdhci_host *host)
540 {
541 u8 reg;
542
543 reg = sdhci_readb(host, SDHCI_POWER_CONTROL);
544 reg |= 0x10;
545 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
546 /* For eMMC, minimum is 1us but give it 9us for good measure */
547 udelay(9);
548 reg &= ~0x10;
549 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
550 /* For eMMC, minimum is 200us but give it 300us for good measure */
551 usleep_range(300, 1000);
552 }
553
intel_select_drive_strength(struct mmc_card * card,unsigned int max_dtr,int host_drv,int card_drv,int * drv_type)554 static int intel_select_drive_strength(struct mmc_card *card,
555 unsigned int max_dtr, int host_drv,
556 int card_drv, int *drv_type)
557 {
558 struct sdhci_host *host = mmc_priv(card->host);
559 struct sdhci_pci_slot *slot = sdhci_priv(host);
560 struct intel_host *intel_host = sdhci_pci_priv(slot);
561
562 if (!(mmc_driver_type_mask(intel_host->drv_strength) & card_drv))
563 return 0;
564
565 return intel_host->drv_strength;
566 }
567
bxt_get_cd(struct mmc_host * mmc)568 static int bxt_get_cd(struct mmc_host *mmc)
569 {
570 int gpio_cd = mmc_gpio_get_cd(mmc);
571
572 if (!gpio_cd)
573 return 0;
574
575 return sdhci_get_cd_nogpio(mmc);
576 }
577
mrfld_get_cd(struct mmc_host * mmc)578 static int mrfld_get_cd(struct mmc_host *mmc)
579 {
580 return sdhci_get_cd_nogpio(mmc);
581 }
582
583 #define SDHCI_INTEL_PWR_TIMEOUT_CNT 20
584 #define SDHCI_INTEL_PWR_TIMEOUT_UDELAY 100
585
sdhci_intel_set_power(struct sdhci_host * host,unsigned char mode,unsigned short vdd)586 static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
587 unsigned short vdd)
588 {
589 struct sdhci_pci_slot *slot = sdhci_priv(host);
590 struct intel_host *intel_host = sdhci_pci_priv(slot);
591 int cntr;
592 u8 reg;
593
594 /*
595 * Bus power may control card power, but a full reset still may not
596 * reset the power, whereas a direct write to SDHCI_POWER_CONTROL can.
597 * That might be needed to initialize correctly, if the card was left
598 * powered on previously.
599 */
600 if (intel_host->needs_pwr_off) {
601 intel_host->needs_pwr_off = false;
602 if (mode != MMC_POWER_OFF) {
603 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
604 usleep_range(10000, 12500);
605 }
606 }
607
608 sdhci_set_power(host, mode, vdd);
609
610 if (mode == MMC_POWER_OFF)
611 return;
612
613 /*
614 * Bus power might not enable after D3 -> D0 transition due to the
615 * present state not yet having propagated. Retry for up to 2ms.
616 */
617 for (cntr = 0; cntr < SDHCI_INTEL_PWR_TIMEOUT_CNT; cntr++) {
618 reg = sdhci_readb(host, SDHCI_POWER_CONTROL);
619 if (reg & SDHCI_POWER_ON)
620 break;
621 udelay(SDHCI_INTEL_PWR_TIMEOUT_UDELAY);
622 reg |= SDHCI_POWER_ON;
623 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
624 }
625 }
626
sdhci_intel_set_uhs_signaling(struct sdhci_host * host,unsigned int timing)627 static void sdhci_intel_set_uhs_signaling(struct sdhci_host *host,
628 unsigned int timing)
629 {
630 /* Set UHS timing to SDR25 for High Speed mode */
631 if (timing == MMC_TIMING_MMC_HS || timing == MMC_TIMING_SD_HS)
632 timing = MMC_TIMING_UHS_SDR25;
633 sdhci_set_uhs_signaling(host, timing);
634 }
635
636 #define INTEL_HS400_ES_REG 0x78
637 #define INTEL_HS400_ES_BIT BIT(0)
638
intel_hs400_enhanced_strobe(struct mmc_host * mmc,struct mmc_ios * ios)639 static void intel_hs400_enhanced_strobe(struct mmc_host *mmc,
640 struct mmc_ios *ios)
641 {
642 struct sdhci_host *host = mmc_priv(mmc);
643 u32 val;
644
645 val = sdhci_readl(host, INTEL_HS400_ES_REG);
646 if (ios->enhanced_strobe)
647 val |= INTEL_HS400_ES_BIT;
648 else
649 val &= ~INTEL_HS400_ES_BIT;
650 sdhci_writel(host, val, INTEL_HS400_ES_REG);
651 }
652
intel_start_signal_voltage_switch(struct mmc_host * mmc,struct mmc_ios * ios)653 static int intel_start_signal_voltage_switch(struct mmc_host *mmc,
654 struct mmc_ios *ios)
655 {
656 struct device *dev = mmc_dev(mmc);
657 struct sdhci_host *host = mmc_priv(mmc);
658 struct sdhci_pci_slot *slot = sdhci_priv(host);
659 struct intel_host *intel_host = sdhci_pci_priv(slot);
660 unsigned int fn;
661 u32 result = 0;
662 int err;
663
664 err = sdhci_start_signal_voltage_switch(mmc, ios);
665 if (err)
666 return err;
667
668 switch (ios->signal_voltage) {
669 case MMC_SIGNAL_VOLTAGE_330:
670 fn = INTEL_DSM_V33_SWITCH;
671 break;
672 case MMC_SIGNAL_VOLTAGE_180:
673 fn = INTEL_DSM_V18_SWITCH;
674 break;
675 default:
676 return 0;
677 }
678
679 err = intel_dsm(intel_host, dev, fn, &result);
680 pr_debug("%s: %s DSM fn %u error %d result %u\n",
681 mmc_hostname(mmc), __func__, fn, err, result);
682
683 return 0;
684 }
685
686 static const struct sdhci_ops sdhci_intel_byt_ops = {
687 .set_clock = sdhci_set_clock,
688 .set_power = sdhci_intel_set_power,
689 .enable_dma = sdhci_pci_enable_dma,
690 .set_bus_width = sdhci_set_bus_width,
691 .reset = sdhci_reset,
692 .set_uhs_signaling = sdhci_intel_set_uhs_signaling,
693 .hw_reset = sdhci_pci_hw_reset,
694 };
695
696 static const struct sdhci_ops sdhci_intel_glk_ops = {
697 .set_clock = sdhci_set_clock,
698 .set_power = sdhci_intel_set_power,
699 .enable_dma = sdhci_pci_enable_dma,
700 .set_bus_width = sdhci_set_bus_width,
701 .reset = sdhci_and_cqhci_reset,
702 .set_uhs_signaling = sdhci_intel_set_uhs_signaling,
703 .hw_reset = sdhci_pci_hw_reset,
704 .irq = sdhci_cqhci_irq,
705 };
706
byt_read_dsm(struct sdhci_pci_slot * slot)707 static void byt_read_dsm(struct sdhci_pci_slot *slot)
708 {
709 struct intel_host *intel_host = sdhci_pci_priv(slot);
710 struct device *dev = &slot->chip->pdev->dev;
711 struct mmc_host *mmc = slot->host->mmc;
712
713 intel_dsm_init(intel_host, dev, mmc);
714 slot->chip->rpm_retune = intel_host->d3_retune;
715 }
716
intel_execute_tuning(struct mmc_host * mmc,u32 opcode)717 static int intel_execute_tuning(struct mmc_host *mmc, u32 opcode)
718 {
719 int err = sdhci_execute_tuning(mmc, opcode);
720 struct sdhci_host *host = mmc_priv(mmc);
721
722 if (err)
723 return err;
724
725 /*
726 * Tuning can leave the IP in an active state (Buffer Read Enable bit
727 * set) which prevents the entry to low power states (i.e. S0i3). Data
728 * reset will clear it.
729 */
730 sdhci_reset(host, SDHCI_RESET_DATA);
731
732 return 0;
733 }
734
735 #define INTEL_ACTIVELTR 0x804
736 #define INTEL_IDLELTR 0x808
737
738 #define INTEL_LTR_REQ BIT(15)
739 #define INTEL_LTR_SCALE_MASK GENMASK(11, 10)
740 #define INTEL_LTR_SCALE_1US (2 << 10)
741 #define INTEL_LTR_SCALE_32US (3 << 10)
742 #define INTEL_LTR_VALUE_MASK GENMASK(9, 0)
743
intel_cache_ltr(struct sdhci_pci_slot * slot)744 static void intel_cache_ltr(struct sdhci_pci_slot *slot)
745 {
746 struct intel_host *intel_host = sdhci_pci_priv(slot);
747 struct sdhci_host *host = slot->host;
748
749 intel_host->active_ltr = readl(host->ioaddr + INTEL_ACTIVELTR);
750 intel_host->idle_ltr = readl(host->ioaddr + INTEL_IDLELTR);
751 }
752
intel_ltr_set(struct device * dev,s32 val)753 static void intel_ltr_set(struct device *dev, s32 val)
754 {
755 struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
756 struct sdhci_pci_slot *slot = chip->slots[0];
757 struct intel_host *intel_host = sdhci_pci_priv(slot);
758 struct sdhci_host *host = slot->host;
759 u32 ltr;
760
761 pm_runtime_get_sync(dev);
762
763 /*
764 * Program latency tolerance (LTR) accordingly what has been asked
765 * by the PM QoS layer or disable it in case we were passed
766 * negative value or PM_QOS_LATENCY_ANY.
767 */
768 ltr = readl(host->ioaddr + INTEL_ACTIVELTR);
769
770 if (val == PM_QOS_LATENCY_ANY || val < 0) {
771 ltr &= ~INTEL_LTR_REQ;
772 } else {
773 ltr |= INTEL_LTR_REQ;
774 ltr &= ~INTEL_LTR_SCALE_MASK;
775 ltr &= ~INTEL_LTR_VALUE_MASK;
776
777 if (val > INTEL_LTR_VALUE_MASK) {
778 val >>= 5;
779 if (val > INTEL_LTR_VALUE_MASK)
780 val = INTEL_LTR_VALUE_MASK;
781 ltr |= INTEL_LTR_SCALE_32US | val;
782 } else {
783 ltr |= INTEL_LTR_SCALE_1US | val;
784 }
785 }
786
787 if (ltr == intel_host->active_ltr)
788 goto out;
789
790 writel(ltr, host->ioaddr + INTEL_ACTIVELTR);
791 writel(ltr, host->ioaddr + INTEL_IDLELTR);
792
793 /* Cache the values into lpss structure */
794 intel_cache_ltr(slot);
795 out:
796 pm_runtime_put_autosuspend(dev);
797 }
798
intel_use_ltr(struct sdhci_pci_chip * chip)799 static bool intel_use_ltr(struct sdhci_pci_chip *chip)
800 {
801 switch (chip->pdev->device) {
802 case PCI_DEVICE_ID_INTEL_BYT_EMMC:
803 case PCI_DEVICE_ID_INTEL_BYT_EMMC2:
804 case PCI_DEVICE_ID_INTEL_BYT_SDIO:
805 case PCI_DEVICE_ID_INTEL_BYT_SD:
806 case PCI_DEVICE_ID_INTEL_BSW_EMMC:
807 case PCI_DEVICE_ID_INTEL_BSW_SDIO:
808 case PCI_DEVICE_ID_INTEL_BSW_SD:
809 return false;
810 default:
811 return true;
812 }
813 }
814
intel_ltr_expose(struct sdhci_pci_chip * chip)815 static void intel_ltr_expose(struct sdhci_pci_chip *chip)
816 {
817 struct device *dev = &chip->pdev->dev;
818
819 if (!intel_use_ltr(chip))
820 return;
821
822 dev->power.set_latency_tolerance = intel_ltr_set;
823 dev_pm_qos_expose_latency_tolerance(dev);
824 }
825
intel_ltr_hide(struct sdhci_pci_chip * chip)826 static void intel_ltr_hide(struct sdhci_pci_chip *chip)
827 {
828 struct device *dev = &chip->pdev->dev;
829
830 if (!intel_use_ltr(chip))
831 return;
832
833 dev_pm_qos_hide_latency_tolerance(dev);
834 dev->power.set_latency_tolerance = NULL;
835 }
836
byt_probe_slot(struct sdhci_pci_slot * slot)837 static void byt_probe_slot(struct sdhci_pci_slot *slot)
838 {
839 struct mmc_host_ops *ops = &slot->host->mmc_host_ops;
840 struct device *dev = &slot->chip->pdev->dev;
841 struct mmc_host *mmc = slot->host->mmc;
842
843 byt_read_dsm(slot);
844
845 byt_ocp_setting(slot->chip->pdev);
846
847 ops->execute_tuning = intel_execute_tuning;
848 ops->start_signal_voltage_switch = intel_start_signal_voltage_switch;
849
850 device_property_read_u32(dev, "max-frequency", &mmc->f_max);
851
852 if (!mmc->slotno) {
853 slot->chip->slots[mmc->slotno] = slot;
854 intel_ltr_expose(slot->chip);
855 }
856 }
857
byt_add_debugfs(struct sdhci_pci_slot * slot)858 static void byt_add_debugfs(struct sdhci_pci_slot *slot)
859 {
860 struct intel_host *intel_host = sdhci_pci_priv(slot);
861 struct mmc_host *mmc = slot->host->mmc;
862 struct dentry *dir = mmc->debugfs_root;
863
864 if (!intel_use_ltr(slot->chip))
865 return;
866
867 debugfs_create_x32("active_ltr", 0444, dir, &intel_host->active_ltr);
868 debugfs_create_x32("idle_ltr", 0444, dir, &intel_host->idle_ltr);
869
870 intel_cache_ltr(slot);
871 }
872
byt_add_host(struct sdhci_pci_slot * slot)873 static int byt_add_host(struct sdhci_pci_slot *slot)
874 {
875 int ret = sdhci_add_host(slot->host);
876
877 if (!ret)
878 byt_add_debugfs(slot);
879 return ret;
880 }
881
byt_remove_slot(struct sdhci_pci_slot * slot,int dead)882 static void byt_remove_slot(struct sdhci_pci_slot *slot, int dead)
883 {
884 struct mmc_host *mmc = slot->host->mmc;
885
886 if (!mmc->slotno)
887 intel_ltr_hide(slot->chip);
888 }
889
byt_emmc_probe_slot(struct sdhci_pci_slot * slot)890 static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
891 {
892 byt_probe_slot(slot);
893 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
894 MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
895 MMC_CAP_CMD_DURING_TFR |
896 MMC_CAP_WAIT_WHILE_BUSY;
897 slot->hw_reset = sdhci_pci_int_hw_reset;
898 if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BSW_EMMC)
899 slot->host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */
900 slot->host->mmc_host_ops.select_drive_strength =
901 intel_select_drive_strength;
902 return 0;
903 }
904
glk_broken_cqhci(struct sdhci_pci_slot * slot)905 static bool glk_broken_cqhci(struct sdhci_pci_slot *slot)
906 {
907 return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
908 (dmi_match(DMI_BIOS_VENDOR, "LENOVO") ||
909 dmi_match(DMI_SYS_VENDOR, "IRBIS"));
910 }
911
jsl_broken_hs400es(struct sdhci_pci_slot * slot)912 static bool jsl_broken_hs400es(struct sdhci_pci_slot *slot)
913 {
914 return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_JSL_EMMC &&
915 dmi_match(DMI_BIOS_VENDOR, "ASUSTeK COMPUTER INC.");
916 }
917
glk_emmc_probe_slot(struct sdhci_pci_slot * slot)918 static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot)
919 {
920 int ret = byt_emmc_probe_slot(slot);
921
922 if (!glk_broken_cqhci(slot))
923 slot->host->mmc->caps2 |= MMC_CAP2_CQE;
924
925 if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) {
926 if (!jsl_broken_hs400es(slot)) {
927 slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES;
928 slot->host->mmc_host_ops.hs400_enhanced_strobe =
929 intel_hs400_enhanced_strobe;
930 }
931 slot->host->mmc->caps2 |= MMC_CAP2_CQE_DCMD;
932 }
933
934 return ret;
935 }
936
937 static const struct cqhci_host_ops glk_cqhci_ops = {
938 .enable = sdhci_cqe_enable,
939 .disable = sdhci_cqe_disable,
940 .dumpregs = sdhci_pci_dumpregs,
941 };
942
glk_emmc_add_host(struct sdhci_pci_slot * slot)943 static int glk_emmc_add_host(struct sdhci_pci_slot *slot)
944 {
945 struct device *dev = &slot->chip->pdev->dev;
946 struct sdhci_host *host = slot->host;
947 struct cqhci_host *cq_host;
948 bool dma64;
949 int ret;
950
951 ret = sdhci_setup_host(host);
952 if (ret)
953 return ret;
954
955 cq_host = devm_kzalloc(dev, sizeof(*cq_host), GFP_KERNEL);
956 if (!cq_host) {
957 ret = -ENOMEM;
958 goto cleanup;
959 }
960
961 cq_host->mmio = host->ioaddr + 0x200;
962 cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ;
963 cq_host->ops = &glk_cqhci_ops;
964
965 dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
966 if (dma64)
967 cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
968
969 ret = cqhci_init(cq_host, host->mmc, dma64);
970 if (ret)
971 goto cleanup;
972
973 ret = __sdhci_add_host(host);
974 if (ret)
975 goto cleanup;
976
977 byt_add_debugfs(slot);
978
979 return 0;
980
981 cleanup:
982 sdhci_cleanup_host(host);
983 return ret;
984 }
985
986 #ifdef CONFIG_PM
987 #define GLK_RX_CTRL1 0x834
988 #define GLK_TUN_VAL 0x840
989 #define GLK_PATH_PLL GENMASK(13, 8)
990 #define GLK_DLY GENMASK(6, 0)
991 /* Workaround firmware failing to restore the tuning value */
glk_rpm_retune_wa(struct sdhci_pci_chip * chip,bool susp)992 static void glk_rpm_retune_wa(struct sdhci_pci_chip *chip, bool susp)
993 {
994 struct sdhci_pci_slot *slot = chip->slots[0];
995 struct intel_host *intel_host = sdhci_pci_priv(slot);
996 struct sdhci_host *host = slot->host;
997 u32 glk_rx_ctrl1;
998 u32 glk_tun_val;
999 u32 dly;
1000
1001 if (intel_host->rpm_retune_ok || !mmc_can_retune(host->mmc))
1002 return;
1003
1004 glk_rx_ctrl1 = sdhci_readl(host, GLK_RX_CTRL1);
1005 glk_tun_val = sdhci_readl(host, GLK_TUN_VAL);
1006
1007 if (susp) {
1008 intel_host->glk_rx_ctrl1 = glk_rx_ctrl1;
1009 intel_host->glk_tun_val = glk_tun_val;
1010 return;
1011 }
1012
1013 if (!intel_host->glk_tun_val)
1014 return;
1015
1016 if (glk_rx_ctrl1 != intel_host->glk_rx_ctrl1) {
1017 intel_host->rpm_retune_ok = true;
1018 return;
1019 }
1020
1021 dly = FIELD_PREP(GLK_DLY, FIELD_GET(GLK_PATH_PLL, glk_rx_ctrl1) +
1022 (intel_host->glk_tun_val << 1));
1023 if (dly == FIELD_GET(GLK_DLY, glk_rx_ctrl1))
1024 return;
1025
1026 glk_rx_ctrl1 = (glk_rx_ctrl1 & ~GLK_DLY) | dly;
1027 sdhci_writel(host, glk_rx_ctrl1, GLK_RX_CTRL1);
1028
1029 intel_host->rpm_retune_ok = true;
1030 chip->rpm_retune = true;
1031 mmc_retune_needed(host->mmc);
1032 pr_info("%s: Requiring re-tune after rpm resume", mmc_hostname(host->mmc));
1033 }
1034
glk_rpm_retune_chk(struct sdhci_pci_chip * chip,bool susp)1035 static void glk_rpm_retune_chk(struct sdhci_pci_chip *chip, bool susp)
1036 {
1037 if (chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
1038 !chip->rpm_retune)
1039 glk_rpm_retune_wa(chip, susp);
1040 }
1041
glk_runtime_suspend(struct sdhci_pci_chip * chip)1042 static int glk_runtime_suspend(struct sdhci_pci_chip *chip)
1043 {
1044 glk_rpm_retune_chk(chip, true);
1045
1046 return sdhci_cqhci_runtime_suspend(chip);
1047 }
1048
glk_runtime_resume(struct sdhci_pci_chip * chip)1049 static int glk_runtime_resume(struct sdhci_pci_chip *chip)
1050 {
1051 glk_rpm_retune_chk(chip, false);
1052
1053 return sdhci_cqhci_runtime_resume(chip);
1054 }
1055 #endif
1056
1057 #ifdef CONFIG_ACPI
ni_set_max_freq(struct sdhci_pci_slot * slot)1058 static int ni_set_max_freq(struct sdhci_pci_slot *slot)
1059 {
1060 acpi_status status;
1061 unsigned long long max_freq;
1062
1063 status = acpi_evaluate_integer(ACPI_HANDLE(&slot->chip->pdev->dev),
1064 "MXFQ", NULL, &max_freq);
1065 if (ACPI_FAILURE(status)) {
1066 dev_err(&slot->chip->pdev->dev,
1067 "MXFQ not found in acpi table\n");
1068 return -EINVAL;
1069 }
1070
1071 slot->host->mmc->f_max = max_freq * 1000000;
1072
1073 return 0;
1074 }
1075 #else
ni_set_max_freq(struct sdhci_pci_slot * slot)1076 static inline int ni_set_max_freq(struct sdhci_pci_slot *slot)
1077 {
1078 return 0;
1079 }
1080 #endif
1081
ni_byt_sdio_probe_slot(struct sdhci_pci_slot * slot)1082 static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
1083 {
1084 int err;
1085
1086 byt_probe_slot(slot);
1087
1088 err = ni_set_max_freq(slot);
1089 if (err)
1090 return err;
1091
1092 slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
1093 MMC_CAP_WAIT_WHILE_BUSY;
1094 return 0;
1095 }
1096
byt_sdio_probe_slot(struct sdhci_pci_slot * slot)1097 static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
1098 {
1099 byt_probe_slot(slot);
1100 slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
1101 MMC_CAP_WAIT_WHILE_BUSY;
1102 return 0;
1103 }
1104
byt_needs_pwr_off(struct sdhci_pci_slot * slot)1105 static void byt_needs_pwr_off(struct sdhci_pci_slot *slot)
1106 {
1107 struct intel_host *intel_host = sdhci_pci_priv(slot);
1108 u8 reg = sdhci_readb(slot->host, SDHCI_POWER_CONTROL);
1109
1110 intel_host->needs_pwr_off = reg & SDHCI_POWER_ON;
1111 }
1112
byt_sd_probe_slot(struct sdhci_pci_slot * slot)1113 static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
1114 {
1115 byt_probe_slot(slot);
1116 slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY |
1117 MMC_CAP_AGGRESSIVE_PM | MMC_CAP_CD_WAKE;
1118 slot->cd_idx = 0;
1119 slot->cd_override_level = true;
1120 if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
1121 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXTM_SD ||
1122 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD ||
1123 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_SD)
1124 slot->host->mmc_host_ops.get_cd = bxt_get_cd;
1125
1126 if (slot->chip->pdev->subsystem_vendor == PCI_VENDOR_ID_NI &&
1127 slot->chip->pdev->subsystem_device == PCI_SUBDEVICE_ID_NI_78E3)
1128 slot->host->mmc->caps2 |= MMC_CAP2_AVOID_3_3V;
1129
1130 byt_needs_pwr_off(slot);
1131
1132 return 0;
1133 }
1134
1135 #ifdef CONFIG_PM_SLEEP
1136
byt_resume(struct sdhci_pci_chip * chip)1137 static int byt_resume(struct sdhci_pci_chip *chip)
1138 {
1139 byt_ocp_setting(chip->pdev);
1140
1141 return sdhci_pci_resume_host(chip);
1142 }
1143
1144 #endif
1145
1146 #ifdef CONFIG_PM
1147
byt_runtime_resume(struct sdhci_pci_chip * chip)1148 static int byt_runtime_resume(struct sdhci_pci_chip *chip)
1149 {
1150 byt_ocp_setting(chip->pdev);
1151
1152 return sdhci_pci_runtime_resume_host(chip);
1153 }
1154
1155 #endif
1156
1157 static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
1158 #ifdef CONFIG_PM_SLEEP
1159 .resume = byt_resume,
1160 #endif
1161 #ifdef CONFIG_PM
1162 .runtime_resume = byt_runtime_resume,
1163 #endif
1164 .allow_runtime_pm = true,
1165 .probe_slot = byt_emmc_probe_slot,
1166 .add_host = byt_add_host,
1167 .remove_slot = byt_remove_slot,
1168 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
1169 SDHCI_QUIRK_NO_LED,
1170 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1171 SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 |
1172 SDHCI_QUIRK2_STOP_WITH_TC,
1173 .ops = &sdhci_intel_byt_ops,
1174 .priv_size = sizeof(struct intel_host),
1175 };
1176
1177 static const struct sdhci_pci_fixes sdhci_intel_glk_emmc = {
1178 .allow_runtime_pm = true,
1179 .probe_slot = glk_emmc_probe_slot,
1180 .add_host = glk_emmc_add_host,
1181 .remove_slot = byt_remove_slot,
1182 #ifdef CONFIG_PM_SLEEP
1183 .suspend = sdhci_cqhci_suspend,
1184 .resume = sdhci_cqhci_resume,
1185 #endif
1186 #ifdef CONFIG_PM
1187 .runtime_suspend = glk_runtime_suspend,
1188 .runtime_resume = glk_runtime_resume,
1189 #endif
1190 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
1191 SDHCI_QUIRK_NO_LED,
1192 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1193 SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 |
1194 SDHCI_QUIRK2_STOP_WITH_TC,
1195 .ops = &sdhci_intel_glk_ops,
1196 .priv_size = sizeof(struct intel_host),
1197 };
1198
1199 static const struct sdhci_pci_fixes sdhci_ni_byt_sdio = {
1200 #ifdef CONFIG_PM_SLEEP
1201 .resume = byt_resume,
1202 #endif
1203 #ifdef CONFIG_PM
1204 .runtime_resume = byt_runtime_resume,
1205 #endif
1206 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
1207 SDHCI_QUIRK_NO_LED,
1208 .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON |
1209 SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1210 .allow_runtime_pm = true,
1211 .probe_slot = ni_byt_sdio_probe_slot,
1212 .add_host = byt_add_host,
1213 .remove_slot = byt_remove_slot,
1214 .ops = &sdhci_intel_byt_ops,
1215 .priv_size = sizeof(struct intel_host),
1216 };
1217
1218 static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
1219 #ifdef CONFIG_PM_SLEEP
1220 .resume = byt_resume,
1221 #endif
1222 #ifdef CONFIG_PM
1223 .runtime_resume = byt_runtime_resume,
1224 #endif
1225 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
1226 SDHCI_QUIRK_NO_LED,
1227 .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON |
1228 SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1229 .allow_runtime_pm = true,
1230 .probe_slot = byt_sdio_probe_slot,
1231 .add_host = byt_add_host,
1232 .remove_slot = byt_remove_slot,
1233 .ops = &sdhci_intel_byt_ops,
1234 .priv_size = sizeof(struct intel_host),
1235 };
1236
1237 static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
1238 #ifdef CONFIG_PM_SLEEP
1239 .resume = byt_resume,
1240 #endif
1241 #ifdef CONFIG_PM
1242 .runtime_resume = byt_runtime_resume,
1243 #endif
1244 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
1245 SDHCI_QUIRK_NO_LED,
1246 .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON |
1247 SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1248 SDHCI_QUIRK2_STOP_WITH_TC,
1249 .allow_runtime_pm = true,
1250 .own_cd_for_runtime_pm = true,
1251 .probe_slot = byt_sd_probe_slot,
1252 .add_host = byt_add_host,
1253 .remove_slot = byt_remove_slot,
1254 .ops = &sdhci_intel_byt_ops,
1255 .priv_size = sizeof(struct intel_host),
1256 };
1257
1258 /* Define Host controllers for Intel Merrifield platform */
1259 #define INTEL_MRFLD_EMMC_0 0
1260 #define INTEL_MRFLD_EMMC_1 1
1261 #define INTEL_MRFLD_SD 2
1262 #define INTEL_MRFLD_SDIO 3
1263
1264 #ifdef CONFIG_ACPI
intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot * slot)1265 static void intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot *slot)
1266 {
1267 struct acpi_device *device;
1268
1269 device = ACPI_COMPANION(&slot->chip->pdev->dev);
1270 if (device)
1271 acpi_device_fix_up_power_extended(device);
1272 }
1273 #else
intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot * slot)1274 static inline void intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot *slot) {}
1275 #endif
1276
intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot * slot)1277 static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot)
1278 {
1279 unsigned int func = PCI_FUNC(slot->chip->pdev->devfn);
1280
1281 switch (func) {
1282 case INTEL_MRFLD_EMMC_0:
1283 case INTEL_MRFLD_EMMC_1:
1284 slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE |
1285 MMC_CAP_8_BIT_DATA |
1286 MMC_CAP_1_8V_DDR;
1287 break;
1288 case INTEL_MRFLD_SD:
1289 slot->cd_idx = 0;
1290 slot->cd_override_level = true;
1291 /*
1292 * There are two PCB designs of SD card slot with the opposite
1293 * card detection sense. Quirk this out by ignoring GPIO state
1294 * completely in the custom ->get_cd() callback.
1295 */
1296 slot->host->mmc_host_ops.get_cd = mrfld_get_cd;
1297 slot->host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
1298 break;
1299 case INTEL_MRFLD_SDIO:
1300 /* Advertise 2.0v for compatibility with the SDIO card's OCR */
1301 slot->host->ocr_mask = MMC_VDD_20_21 | MMC_VDD_165_195;
1302 slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE |
1303 MMC_CAP_POWER_OFF_CARD;
1304 break;
1305 default:
1306 return -ENODEV;
1307 }
1308
1309 intel_mrfld_mmc_fix_up_power_slot(slot);
1310 return 0;
1311 }
1312
1313 static const struct sdhci_pci_fixes sdhci_intel_mrfld_mmc = {
1314 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1315 .quirks2 = SDHCI_QUIRK2_BROKEN_HS200 |
1316 SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1317 .allow_runtime_pm = true,
1318 .probe_slot = intel_mrfld_mmc_probe_slot,
1319 };
1320
jmicron_pmos(struct sdhci_pci_chip * chip,int on)1321 static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
1322 {
1323 u8 scratch;
1324 int ret;
1325
1326 ret = pci_read_config_byte(chip->pdev, 0xAE, &scratch);
1327 if (ret)
1328 goto fail;
1329
1330 /*
1331 * Turn PMOS on [bit 0], set over current detection to 2.4 V
1332 * [bit 1:2] and enable over current debouncing [bit 6].
1333 */
1334 if (on)
1335 scratch |= 0x47;
1336 else
1337 scratch &= ~0x47;
1338
1339 ret = pci_write_config_byte(chip->pdev, 0xAE, scratch);
1340
1341 fail:
1342 return pcibios_err_to_errno(ret);
1343 }
1344
jmicron_probe(struct sdhci_pci_chip * chip)1345 static int jmicron_probe(struct sdhci_pci_chip *chip)
1346 {
1347 int ret;
1348 u16 mmcdev = 0;
1349
1350 if (chip->pdev->revision == 0) {
1351 chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR |
1352 SDHCI_QUIRK_32BIT_DMA_SIZE |
1353 SDHCI_QUIRK_32BIT_ADMA_SIZE |
1354 SDHCI_QUIRK_RESET_AFTER_REQUEST |
1355 SDHCI_QUIRK_BROKEN_SMALL_PIO;
1356 }
1357
1358 /*
1359 * JMicron chips can have two interfaces to the same hardware
1360 * in order to work around limitations in Microsoft's driver.
1361 * We need to make sure we only bind to one of them.
1362 *
1363 * This code assumes two things:
1364 *
1365 * 1. The PCI code adds subfunctions in order.
1366 *
1367 * 2. The MMC interface has a lower subfunction number
1368 * than the SD interface.
1369 */
1370 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD)
1371 mmcdev = PCI_DEVICE_ID_JMICRON_JMB38X_MMC;
1372 else if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD)
1373 mmcdev = PCI_DEVICE_ID_JMICRON_JMB388_ESD;
1374
1375 if (mmcdev) {
1376 struct pci_dev *sd_dev;
1377
1378 sd_dev = NULL;
1379 while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON,
1380 mmcdev, sd_dev)) != NULL) {
1381 if ((PCI_SLOT(chip->pdev->devfn) ==
1382 PCI_SLOT(sd_dev->devfn)) &&
1383 (chip->pdev->bus == sd_dev->bus))
1384 break;
1385 }
1386
1387 if (sd_dev) {
1388 pci_dev_put(sd_dev);
1389 dev_info(&chip->pdev->dev, "Refusing to bind to "
1390 "secondary interface.\n");
1391 return -ENODEV;
1392 }
1393 }
1394
1395 /*
1396 * JMicron chips need a bit of a nudge to enable the power
1397 * output pins.
1398 */
1399 ret = jmicron_pmos(chip, 1);
1400 if (ret) {
1401 dev_err(&chip->pdev->dev, "Failure enabling card power\n");
1402 return ret;
1403 }
1404
1405 /* quirk for unsable RO-detection on JM388 chips */
1406 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD ||
1407 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
1408 chip->quirks |= SDHCI_QUIRK_UNSTABLE_RO_DETECT;
1409
1410 return 0;
1411 }
1412
jmicron_enable_mmc(struct sdhci_host * host,int on)1413 static void jmicron_enable_mmc(struct sdhci_host *host, int on)
1414 {
1415 u8 scratch;
1416
1417 scratch = readb(host->ioaddr + 0xC0);
1418
1419 if (on)
1420 scratch |= 0x01;
1421 else
1422 scratch &= ~0x01;
1423
1424 writeb(scratch, host->ioaddr + 0xC0);
1425 }
1426
jmicron_probe_slot(struct sdhci_pci_slot * slot)1427 static int jmicron_probe_slot(struct sdhci_pci_slot *slot)
1428 {
1429 if (slot->chip->pdev->revision == 0) {
1430 u16 version;
1431
1432 version = readl(slot->host->ioaddr + SDHCI_HOST_VERSION);
1433 version = (version & SDHCI_VENDOR_VER_MASK) >>
1434 SDHCI_VENDOR_VER_SHIFT;
1435
1436 /*
1437 * Older versions of the chip have lots of nasty glitches
1438 * in the ADMA engine. It's best just to avoid it
1439 * completely.
1440 */
1441 if (version < 0xAC)
1442 slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
1443 }
1444
1445 /* JM388 MMC doesn't support 1.8V while SD supports it */
1446 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
1447 slot->host->ocr_avail_sd = MMC_VDD_32_33 | MMC_VDD_33_34 |
1448 MMC_VDD_29_30 | MMC_VDD_30_31 |
1449 MMC_VDD_165_195; /* allow 1.8V */
1450 slot->host->ocr_avail_mmc = MMC_VDD_32_33 | MMC_VDD_33_34 |
1451 MMC_VDD_29_30 | MMC_VDD_30_31; /* no 1.8V for MMC */
1452 }
1453
1454 /*
1455 * The secondary interface requires a bit set to get the
1456 * interrupts.
1457 */
1458 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
1459 slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
1460 jmicron_enable_mmc(slot->host, 1);
1461
1462 slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST;
1463
1464 return 0;
1465 }
1466
jmicron_remove_slot(struct sdhci_pci_slot * slot,int dead)1467 static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead)
1468 {
1469 if (dead)
1470 return;
1471
1472 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
1473 slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
1474 jmicron_enable_mmc(slot->host, 0);
1475 }
1476
1477 #ifdef CONFIG_PM_SLEEP
jmicron_suspend(struct sdhci_pci_chip * chip)1478 static int jmicron_suspend(struct sdhci_pci_chip *chip)
1479 {
1480 int i, ret;
1481
1482 ret = sdhci_pci_suspend_host(chip);
1483 if (ret)
1484 return ret;
1485
1486 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
1487 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
1488 for (i = 0; i < chip->num_slots; i++)
1489 jmicron_enable_mmc(chip->slots[i]->host, 0);
1490 }
1491
1492 return 0;
1493 }
1494
jmicron_resume(struct sdhci_pci_chip * chip)1495 static int jmicron_resume(struct sdhci_pci_chip *chip)
1496 {
1497 int ret, i;
1498
1499 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
1500 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
1501 for (i = 0; i < chip->num_slots; i++)
1502 jmicron_enable_mmc(chip->slots[i]->host, 1);
1503 }
1504
1505 ret = jmicron_pmos(chip, 1);
1506 if (ret) {
1507 dev_err(&chip->pdev->dev, "Failure enabling card power\n");
1508 return ret;
1509 }
1510
1511 return sdhci_pci_resume_host(chip);
1512 }
1513 #endif
1514
1515 static const struct sdhci_pci_fixes sdhci_jmicron = {
1516 .probe = jmicron_probe,
1517
1518 .probe_slot = jmicron_probe_slot,
1519 .remove_slot = jmicron_remove_slot,
1520
1521 #ifdef CONFIG_PM_SLEEP
1522 .suspend = jmicron_suspend,
1523 .resume = jmicron_resume,
1524 #endif
1525 };
1526
1527 /* SysKonnect CardBus2SDIO extra registers */
1528 #define SYSKT_CTRL 0x200
1529 #define SYSKT_RDFIFO_STAT 0x204
1530 #define SYSKT_WRFIFO_STAT 0x208
1531 #define SYSKT_POWER_DATA 0x20c
1532 #define SYSKT_POWER_330 0xef
1533 #define SYSKT_POWER_300 0xf8
1534 #define SYSKT_POWER_184 0xcc
1535 #define SYSKT_POWER_CMD 0x20d
1536 #define SYSKT_POWER_START (1 << 7)
1537 #define SYSKT_POWER_STATUS 0x20e
1538 #define SYSKT_POWER_STATUS_OK (1 << 0)
1539 #define SYSKT_BOARD_REV 0x210
1540 #define SYSKT_CHIP_REV 0x211
1541 #define SYSKT_CONF_DATA 0x212
1542 #define SYSKT_CONF_DATA_1V8 (1 << 2)
1543 #define SYSKT_CONF_DATA_2V5 (1 << 1)
1544 #define SYSKT_CONF_DATA_3V3 (1 << 0)
1545
syskt_probe(struct sdhci_pci_chip * chip)1546 static int syskt_probe(struct sdhci_pci_chip *chip)
1547 {
1548 if ((chip->pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
1549 chip->pdev->class &= ~0x0000FF;
1550 chip->pdev->class |= PCI_SDHCI_IFDMA;
1551 }
1552 return 0;
1553 }
1554
syskt_probe_slot(struct sdhci_pci_slot * slot)1555 static int syskt_probe_slot(struct sdhci_pci_slot *slot)
1556 {
1557 int tm, ps;
1558
1559 u8 board_rev = readb(slot->host->ioaddr + SYSKT_BOARD_REV);
1560 u8 chip_rev = readb(slot->host->ioaddr + SYSKT_CHIP_REV);
1561 dev_info(&slot->chip->pdev->dev, "SysKonnect CardBus2SDIO, "
1562 "board rev %d.%d, chip rev %d.%d\n",
1563 board_rev >> 4, board_rev & 0xf,
1564 chip_rev >> 4, chip_rev & 0xf);
1565 if (chip_rev >= 0x20)
1566 slot->host->quirks |= SDHCI_QUIRK_FORCE_DMA;
1567
1568 writeb(SYSKT_POWER_330, slot->host->ioaddr + SYSKT_POWER_DATA);
1569 writeb(SYSKT_POWER_START, slot->host->ioaddr + SYSKT_POWER_CMD);
1570 udelay(50);
1571 tm = 10; /* Wait max 1 ms */
1572 do {
1573 ps = readw(slot->host->ioaddr + SYSKT_POWER_STATUS);
1574 if (ps & SYSKT_POWER_STATUS_OK)
1575 break;
1576 udelay(100);
1577 } while (--tm);
1578 if (!tm) {
1579 dev_err(&slot->chip->pdev->dev,
1580 "power regulator never stabilized");
1581 writeb(0, slot->host->ioaddr + SYSKT_POWER_CMD);
1582 return -ENODEV;
1583 }
1584
1585 return 0;
1586 }
1587
1588 static const struct sdhci_pci_fixes sdhci_syskt = {
1589 .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER,
1590 .probe = syskt_probe,
1591 .probe_slot = syskt_probe_slot,
1592 };
1593
via_probe(struct sdhci_pci_chip * chip)1594 static int via_probe(struct sdhci_pci_chip *chip)
1595 {
1596 if (chip->pdev->revision == 0x10)
1597 chip->quirks |= SDHCI_QUIRK_DELAY_AFTER_POWER;
1598
1599 return 0;
1600 }
1601
1602 static const struct sdhci_pci_fixes sdhci_via = {
1603 .probe = via_probe,
1604 };
1605
rtsx_probe_slot(struct sdhci_pci_slot * slot)1606 static int rtsx_probe_slot(struct sdhci_pci_slot *slot)
1607 {
1608 slot->host->mmc->caps2 |= MMC_CAP2_HS200;
1609 return 0;
1610 }
1611
1612 static const struct sdhci_pci_fixes sdhci_rtsx = {
1613 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1614 SDHCI_QUIRK2_BROKEN_64_BIT_DMA |
1615 SDHCI_QUIRK2_BROKEN_DDR50,
1616 .probe_slot = rtsx_probe_slot,
1617 };
1618
1619 /*AMD chipset generation*/
1620 enum amd_chipset_gen {
1621 AMD_CHIPSET_BEFORE_ML,
1622 AMD_CHIPSET_CZ,
1623 AMD_CHIPSET_NL,
1624 AMD_CHIPSET_UNKNOWN,
1625 };
1626
1627 /* AMD registers */
1628 #define AMD_SD_AUTO_PATTERN 0xB8
1629 #define AMD_MSLEEP_DURATION 4
1630 #define AMD_SD_MISC_CONTROL 0xD0
1631 #define AMD_MAX_TUNE_VALUE 0x0B
1632 #define AMD_AUTO_TUNE_SEL 0x10800
1633 #define AMD_FIFO_PTR 0x30
1634 #define AMD_BIT_MASK 0x1F
1635
amd_tuning_reset(struct sdhci_host * host)1636 static void amd_tuning_reset(struct sdhci_host *host)
1637 {
1638 unsigned int val;
1639
1640 val = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1641 val |= SDHCI_CTRL_PRESET_VAL_ENABLE | SDHCI_CTRL_EXEC_TUNING;
1642 sdhci_writew(host, val, SDHCI_HOST_CONTROL2);
1643
1644 val = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1645 val &= ~SDHCI_CTRL_EXEC_TUNING;
1646 sdhci_writew(host, val, SDHCI_HOST_CONTROL2);
1647 }
1648
amd_config_tuning_phase(struct pci_dev * pdev,u8 phase)1649 static void amd_config_tuning_phase(struct pci_dev *pdev, u8 phase)
1650 {
1651 unsigned int val;
1652
1653 pci_read_config_dword(pdev, AMD_SD_AUTO_PATTERN, &val);
1654 val &= ~AMD_BIT_MASK;
1655 val |= (AMD_AUTO_TUNE_SEL | (phase << 1));
1656 pci_write_config_dword(pdev, AMD_SD_AUTO_PATTERN, val);
1657 }
1658
amd_enable_manual_tuning(struct pci_dev * pdev)1659 static void amd_enable_manual_tuning(struct pci_dev *pdev)
1660 {
1661 unsigned int val;
1662
1663 pci_read_config_dword(pdev, AMD_SD_MISC_CONTROL, &val);
1664 val |= AMD_FIFO_PTR;
1665 pci_write_config_dword(pdev, AMD_SD_MISC_CONTROL, val);
1666 }
1667
amd_execute_tuning_hs200(struct sdhci_host * host,u32 opcode)1668 static int amd_execute_tuning_hs200(struct sdhci_host *host, u32 opcode)
1669 {
1670 struct sdhci_pci_slot *slot = sdhci_priv(host);
1671 struct pci_dev *pdev = slot->chip->pdev;
1672 u8 valid_win = 0;
1673 u8 valid_win_max = 0;
1674 u8 valid_win_end = 0;
1675 u8 ctrl, tune_around;
1676
1677 amd_tuning_reset(host);
1678
1679 for (tune_around = 0; tune_around < 12; tune_around++) {
1680 amd_config_tuning_phase(pdev, tune_around);
1681
1682 if (mmc_send_tuning(host->mmc, opcode, NULL)) {
1683 valid_win = 0;
1684 msleep(AMD_MSLEEP_DURATION);
1685 ctrl = SDHCI_RESET_CMD | SDHCI_RESET_DATA;
1686 sdhci_writeb(host, ctrl, SDHCI_SOFTWARE_RESET);
1687 } else if (++valid_win > valid_win_max) {
1688 valid_win_max = valid_win;
1689 valid_win_end = tune_around;
1690 }
1691 }
1692
1693 if (!valid_win_max) {
1694 dev_err(&pdev->dev, "no tuning point found\n");
1695 return -EIO;
1696 }
1697
1698 amd_config_tuning_phase(pdev, valid_win_end - valid_win_max / 2);
1699
1700 amd_enable_manual_tuning(pdev);
1701
1702 host->mmc->retune_period = 0;
1703
1704 return 0;
1705 }
1706
amd_execute_tuning(struct mmc_host * mmc,u32 opcode)1707 static int amd_execute_tuning(struct mmc_host *mmc, u32 opcode)
1708 {
1709 struct sdhci_host *host = mmc_priv(mmc);
1710
1711 /* AMD requires custom HS200 tuning */
1712 if (host->timing == MMC_TIMING_MMC_HS200)
1713 return amd_execute_tuning_hs200(host, opcode);
1714
1715 /* Otherwise perform standard SDHCI tuning */
1716 return sdhci_execute_tuning(mmc, opcode);
1717 }
1718
amd_probe_slot(struct sdhci_pci_slot * slot)1719 static int amd_probe_slot(struct sdhci_pci_slot *slot)
1720 {
1721 struct mmc_host_ops *ops = &slot->host->mmc_host_ops;
1722
1723 ops->execute_tuning = amd_execute_tuning;
1724
1725 return 0;
1726 }
1727
amd_probe(struct sdhci_pci_chip * chip)1728 static int amd_probe(struct sdhci_pci_chip *chip)
1729 {
1730 struct pci_dev *smbus_dev;
1731 enum amd_chipset_gen gen;
1732
1733 smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
1734 PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
1735 if (smbus_dev) {
1736 gen = AMD_CHIPSET_BEFORE_ML;
1737 } else {
1738 smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
1739 PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, NULL);
1740 if (smbus_dev) {
1741 if (smbus_dev->revision < 0x51)
1742 gen = AMD_CHIPSET_CZ;
1743 else
1744 gen = AMD_CHIPSET_NL;
1745 } else {
1746 gen = AMD_CHIPSET_UNKNOWN;
1747 }
1748 }
1749
1750 pci_dev_put(smbus_dev);
1751
1752 if (gen == AMD_CHIPSET_BEFORE_ML || gen == AMD_CHIPSET_CZ)
1753 chip->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD;
1754
1755 return 0;
1756 }
1757
sdhci_read_present_state(struct sdhci_host * host)1758 static u32 sdhci_read_present_state(struct sdhci_host *host)
1759 {
1760 return sdhci_readl(host, SDHCI_PRESENT_STATE);
1761 }
1762
amd_sdhci_reset(struct sdhci_host * host,u8 mask)1763 static void amd_sdhci_reset(struct sdhci_host *host, u8 mask)
1764 {
1765 struct sdhci_pci_slot *slot = sdhci_priv(host);
1766 struct pci_dev *pdev = slot->chip->pdev;
1767 u32 present_state;
1768
1769 /*
1770 * SDHC 0x7906 requires a hard reset to clear all internal state.
1771 * Otherwise it can get into a bad state where the DATA lines are always
1772 * read as zeros.
1773 */
1774 if (pdev->device == 0x7906 && (mask & SDHCI_RESET_ALL)) {
1775 pci_clear_master(pdev);
1776
1777 pci_save_state(pdev);
1778
1779 pci_set_power_state(pdev, PCI_D3cold);
1780 pr_debug("%s: power_state=%u\n", mmc_hostname(host->mmc),
1781 pdev->current_state);
1782 pci_set_power_state(pdev, PCI_D0);
1783
1784 pci_restore_state(pdev);
1785
1786 /*
1787 * SDHCI_RESET_ALL says the card detect logic should not be
1788 * reset, but since we need to reset the entire controller
1789 * we should wait until the card detect logic has stabilized.
1790 *
1791 * This normally takes about 40ms.
1792 */
1793 readx_poll_timeout(
1794 sdhci_read_present_state,
1795 host,
1796 present_state,
1797 present_state & SDHCI_CD_STABLE,
1798 10000,
1799 100000
1800 );
1801 }
1802
1803 return sdhci_reset(host, mask);
1804 }
1805
1806 static const struct sdhci_ops amd_sdhci_pci_ops = {
1807 .set_clock = sdhci_set_clock,
1808 .enable_dma = sdhci_pci_enable_dma,
1809 .set_bus_width = sdhci_set_bus_width,
1810 .reset = amd_sdhci_reset,
1811 .set_uhs_signaling = sdhci_set_uhs_signaling,
1812 };
1813
1814 static const struct sdhci_pci_fixes sdhci_amd = {
1815 .probe = amd_probe,
1816 .ops = &amd_sdhci_pci_ops,
1817 .probe_slot = amd_probe_slot,
1818 };
1819
1820 static const struct pci_device_id pci_ids[] = {
1821 SDHCI_PCI_DEVICE(RICOH, R5C822, ricoh),
1822 SDHCI_PCI_DEVICE(RICOH, R5C843, ricoh_mmc),
1823 SDHCI_PCI_DEVICE(RICOH, R5CE822, ricoh_mmc),
1824 SDHCI_PCI_DEVICE(RICOH, R5CE823, ricoh_mmc),
1825 SDHCI_PCI_DEVICE(ENE, CB712_SD, ene_712),
1826 SDHCI_PCI_DEVICE(ENE, CB712_SD_2, ene_712),
1827 SDHCI_PCI_DEVICE(ENE, CB714_SD, ene_714),
1828 SDHCI_PCI_DEVICE(ENE, CB714_SD_2, ene_714),
1829 SDHCI_PCI_DEVICE(MARVELL, 88ALP01_SD, cafe),
1830 SDHCI_PCI_DEVICE(JMICRON, JMB38X_SD, jmicron),
1831 SDHCI_PCI_DEVICE(JMICRON, JMB38X_MMC, jmicron),
1832 SDHCI_PCI_DEVICE(JMICRON, JMB388_SD, jmicron),
1833 SDHCI_PCI_DEVICE(JMICRON, JMB388_ESD, jmicron),
1834 SDHCI_PCI_DEVICE(SYSKONNECT, 8000, syskt),
1835 SDHCI_PCI_DEVICE(VIA, 95D0, via),
1836 SDHCI_PCI_DEVICE(REALTEK, 5250, rtsx),
1837 SDHCI_PCI_DEVICE(INTEL, QRK_SD, intel_qrk),
1838 SDHCI_PCI_DEVICE(INTEL, MRST_SD0, intel_mrst_hc0),
1839 SDHCI_PCI_DEVICE(INTEL, MRST_SD1, intel_mrst_hc1_hc2),
1840 SDHCI_PCI_DEVICE(INTEL, MRST_SD2, intel_mrst_hc1_hc2),
1841 SDHCI_PCI_DEVICE(INTEL, MFD_SD, intel_mfd_sd),
1842 SDHCI_PCI_DEVICE(INTEL, MFD_SDIO1, intel_mfd_sdio),
1843 SDHCI_PCI_DEVICE(INTEL, MFD_SDIO2, intel_mfd_sdio),
1844 SDHCI_PCI_DEVICE(INTEL, MFD_EMMC0, intel_mfd_emmc),
1845 SDHCI_PCI_DEVICE(INTEL, MFD_EMMC1, intel_mfd_emmc),
1846 SDHCI_PCI_DEVICE(INTEL, PCH_SDIO0, intel_pch_sdio),
1847 SDHCI_PCI_DEVICE(INTEL, PCH_SDIO1, intel_pch_sdio),
1848 SDHCI_PCI_DEVICE(INTEL, BYT_EMMC, intel_byt_emmc),
1849 SDHCI_PCI_SUBDEVICE(INTEL, BYT_SDIO, NI, 7884, ni_byt_sdio),
1850 SDHCI_PCI_DEVICE(INTEL, BYT_SDIO, intel_byt_sdio),
1851 SDHCI_PCI_DEVICE(INTEL, BYT_SD, intel_byt_sd),
1852 SDHCI_PCI_DEVICE(INTEL, BYT_EMMC2, intel_byt_emmc),
1853 SDHCI_PCI_DEVICE(INTEL, BSW_EMMC, intel_byt_emmc),
1854 SDHCI_PCI_DEVICE(INTEL, BSW_SDIO, intel_byt_sdio),
1855 SDHCI_PCI_DEVICE(INTEL, BSW_SD, intel_byt_sd),
1856 SDHCI_PCI_DEVICE(INTEL, CLV_SDIO0, intel_mfd_sd),
1857 SDHCI_PCI_DEVICE(INTEL, CLV_SDIO1, intel_mfd_sdio),
1858 SDHCI_PCI_DEVICE(INTEL, CLV_SDIO2, intel_mfd_sdio),
1859 SDHCI_PCI_DEVICE(INTEL, CLV_EMMC0, intel_mfd_emmc),
1860 SDHCI_PCI_DEVICE(INTEL, CLV_EMMC1, intel_mfd_emmc),
1861 SDHCI_PCI_DEVICE(INTEL, MRFLD_MMC, intel_mrfld_mmc),
1862 SDHCI_PCI_DEVICE(INTEL, SPT_EMMC, intel_byt_emmc),
1863 SDHCI_PCI_DEVICE(INTEL, SPT_SDIO, intel_byt_sdio),
1864 SDHCI_PCI_DEVICE(INTEL, SPT_SD, intel_byt_sd),
1865 SDHCI_PCI_DEVICE(INTEL, DNV_EMMC, intel_byt_emmc),
1866 SDHCI_PCI_DEVICE(INTEL, CDF_EMMC, intel_glk_emmc),
1867 SDHCI_PCI_DEVICE(INTEL, BXT_EMMC, intel_byt_emmc),
1868 SDHCI_PCI_DEVICE(INTEL, BXT_SDIO, intel_byt_sdio),
1869 SDHCI_PCI_DEVICE(INTEL, BXT_SD, intel_byt_sd),
1870 SDHCI_PCI_DEVICE(INTEL, BXTM_EMMC, intel_byt_emmc),
1871 SDHCI_PCI_DEVICE(INTEL, BXTM_SDIO, intel_byt_sdio),
1872 SDHCI_PCI_DEVICE(INTEL, BXTM_SD, intel_byt_sd),
1873 SDHCI_PCI_DEVICE(INTEL, APL_EMMC, intel_byt_emmc),
1874 SDHCI_PCI_DEVICE(INTEL, APL_SDIO, intel_byt_sdio),
1875 SDHCI_PCI_DEVICE(INTEL, APL_SD, intel_byt_sd),
1876 SDHCI_PCI_DEVICE(INTEL, GLK_EMMC, intel_glk_emmc),
1877 SDHCI_PCI_DEVICE(INTEL, GLK_SDIO, intel_byt_sdio),
1878 SDHCI_PCI_DEVICE(INTEL, GLK_SD, intel_byt_sd),
1879 SDHCI_PCI_DEVICE(INTEL, CNP_EMMC, intel_glk_emmc),
1880 SDHCI_PCI_DEVICE(INTEL, CNP_SD, intel_byt_sd),
1881 SDHCI_PCI_DEVICE(INTEL, CNPH_SD, intel_byt_sd),
1882 SDHCI_PCI_DEVICE(INTEL, ICP_EMMC, intel_glk_emmc),
1883 SDHCI_PCI_DEVICE(INTEL, ICP_SD, intel_byt_sd),
1884 SDHCI_PCI_DEVICE(INTEL, EHL_EMMC, intel_glk_emmc),
1885 SDHCI_PCI_DEVICE(INTEL, EHL_SD, intel_byt_sd),
1886 SDHCI_PCI_DEVICE(INTEL, CML_EMMC, intel_glk_emmc),
1887 SDHCI_PCI_DEVICE(INTEL, CML_SD, intel_byt_sd),
1888 SDHCI_PCI_DEVICE(INTEL, CMLH_SD, intel_byt_sd),
1889 SDHCI_PCI_DEVICE(INTEL, JSL_EMMC, intel_glk_emmc),
1890 SDHCI_PCI_DEVICE(INTEL, JSL_SD, intel_byt_sd),
1891 SDHCI_PCI_DEVICE(INTEL, LKF_EMMC, intel_glk_emmc),
1892 SDHCI_PCI_DEVICE(INTEL, LKF_SD, intel_byt_sd),
1893 SDHCI_PCI_DEVICE(INTEL, ADL_EMMC, intel_glk_emmc),
1894 SDHCI_PCI_DEVICE(O2, 8120, o2),
1895 SDHCI_PCI_DEVICE(O2, 8220, o2),
1896 SDHCI_PCI_DEVICE(O2, 8221, o2),
1897 SDHCI_PCI_DEVICE(O2, 8320, o2),
1898 SDHCI_PCI_DEVICE(O2, 8321, o2),
1899 SDHCI_PCI_DEVICE(O2, FUJIN2, o2),
1900 SDHCI_PCI_DEVICE(O2, SDS0, o2),
1901 SDHCI_PCI_DEVICE(O2, SDS1, o2),
1902 SDHCI_PCI_DEVICE(O2, SEABIRD0, o2),
1903 SDHCI_PCI_DEVICE(O2, SEABIRD1, o2),
1904 SDHCI_PCI_DEVICE(O2, GG8_9860, o2),
1905 SDHCI_PCI_DEVICE(O2, GG8_9861, o2),
1906 SDHCI_PCI_DEVICE(O2, GG8_9862, o2),
1907 SDHCI_PCI_DEVICE(O2, GG8_9863, o2),
1908 SDHCI_PCI_DEVICE(ARASAN, PHY_EMMC, arasan),
1909 SDHCI_PCI_DEVICE(SYNOPSYS, DWC_MSHC, snps),
1910 SDHCI_PCI_DEVICE(GLI, 9750, gl9750),
1911 SDHCI_PCI_DEVICE(GLI, 9755, gl9755),
1912 SDHCI_PCI_DEVICE(GLI, 9763E, gl9763e),
1913 SDHCI_PCI_DEVICE(GLI, 9767, gl9767),
1914 SDHCI_PCI_DEVICE_CLASS(AMD, SYSTEM_SDHCI, PCI_CLASS_MASK, amd),
1915 /* Generic SD host controller */
1916 {PCI_DEVICE_CLASS(SYSTEM_SDHCI, PCI_CLASS_MASK)},
1917 { /* end: all zeroes */ },
1918 };
1919
1920 MODULE_DEVICE_TABLE(pci, pci_ids);
1921
1922 /*****************************************************************************\
1923 * *
1924 * SDHCI core callbacks *
1925 * *
1926 \*****************************************************************************/
1927
sdhci_pci_enable_dma(struct sdhci_host * host)1928 int sdhci_pci_enable_dma(struct sdhci_host *host)
1929 {
1930 struct sdhci_pci_slot *slot;
1931 struct pci_dev *pdev;
1932
1933 slot = sdhci_priv(host);
1934 pdev = slot->chip->pdev;
1935
1936 if (((pdev->class & 0xFFFF00) == (PCI_CLASS_SYSTEM_SDHCI << 8)) &&
1937 ((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) &&
1938 (host->flags & SDHCI_USE_SDMA)) {
1939 dev_warn(&pdev->dev, "Will use DMA mode even though HW "
1940 "doesn't fully claim to support it.\n");
1941 }
1942
1943 pci_set_master(pdev);
1944
1945 return 0;
1946 }
1947
sdhci_pci_hw_reset(struct sdhci_host * host)1948 static void sdhci_pci_hw_reset(struct sdhci_host *host)
1949 {
1950 struct sdhci_pci_slot *slot = sdhci_priv(host);
1951
1952 if (slot->hw_reset)
1953 slot->hw_reset(host);
1954 }
1955
1956 static const struct sdhci_ops sdhci_pci_ops = {
1957 .set_clock = sdhci_set_clock,
1958 .enable_dma = sdhci_pci_enable_dma,
1959 .set_bus_width = sdhci_set_bus_width,
1960 .reset = sdhci_reset,
1961 .set_uhs_signaling = sdhci_set_uhs_signaling,
1962 .hw_reset = sdhci_pci_hw_reset,
1963 };
1964
1965 /*****************************************************************************\
1966 * *
1967 * Suspend/resume *
1968 * *
1969 \*****************************************************************************/
1970
1971 #ifdef CONFIG_PM_SLEEP
sdhci_pci_suspend(struct device * dev)1972 static int sdhci_pci_suspend(struct device *dev)
1973 {
1974 struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
1975
1976 if (!chip)
1977 return 0;
1978
1979 if (chip->fixes && chip->fixes->suspend)
1980 return chip->fixes->suspend(chip);
1981
1982 return sdhci_pci_suspend_host(chip);
1983 }
1984
sdhci_pci_resume(struct device * dev)1985 static int sdhci_pci_resume(struct device *dev)
1986 {
1987 struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
1988
1989 if (!chip)
1990 return 0;
1991
1992 if (chip->fixes && chip->fixes->resume)
1993 return chip->fixes->resume(chip);
1994
1995 return sdhci_pci_resume_host(chip);
1996 }
1997 #endif
1998
1999 #ifdef CONFIG_PM
sdhci_pci_runtime_suspend(struct device * dev)2000 static int sdhci_pci_runtime_suspend(struct device *dev)
2001 {
2002 struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
2003
2004 if (!chip)
2005 return 0;
2006
2007 if (chip->fixes && chip->fixes->runtime_suspend)
2008 return chip->fixes->runtime_suspend(chip);
2009
2010 return sdhci_pci_runtime_suspend_host(chip);
2011 }
2012
sdhci_pci_runtime_resume(struct device * dev)2013 static int sdhci_pci_runtime_resume(struct device *dev)
2014 {
2015 struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
2016
2017 if (!chip)
2018 return 0;
2019
2020 if (chip->fixes && chip->fixes->runtime_resume)
2021 return chip->fixes->runtime_resume(chip);
2022
2023 return sdhci_pci_runtime_resume_host(chip);
2024 }
2025 #endif
2026
2027 static const struct dev_pm_ops sdhci_pci_pm_ops = {
2028 SET_SYSTEM_SLEEP_PM_OPS(sdhci_pci_suspend, sdhci_pci_resume)
2029 SET_RUNTIME_PM_OPS(sdhci_pci_runtime_suspend,
2030 sdhci_pci_runtime_resume, NULL)
2031 };
2032
2033 /*****************************************************************************\
2034 * *
2035 * Device probing/removal *
2036 * *
2037 \*****************************************************************************/
2038
sdhci_pci_probe_slot(struct pci_dev * pdev,struct sdhci_pci_chip * chip,int first_bar,int slotno)2039 static struct sdhci_pci_slot *sdhci_pci_probe_slot(
2040 struct pci_dev *pdev, struct sdhci_pci_chip *chip, int first_bar,
2041 int slotno)
2042 {
2043 struct sdhci_pci_slot *slot;
2044 struct sdhci_host *host;
2045 int ret, bar = first_bar + slotno;
2046 size_t priv_size = chip->fixes ? chip->fixes->priv_size : 0;
2047
2048 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
2049 dev_err(&pdev->dev, "BAR %d is not iomem. Aborting.\n", bar);
2050 return ERR_PTR(-ENODEV);
2051 }
2052
2053 if (pci_resource_len(pdev, bar) < 0x100) {
2054 dev_err(&pdev->dev, "Invalid iomem size. You may "
2055 "experience problems.\n");
2056 }
2057
2058 if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
2059 dev_err(&pdev->dev, "Vendor specific interface. Aborting.\n");
2060 return ERR_PTR(-ENODEV);
2061 }
2062
2063 if ((pdev->class & 0x0000FF) > PCI_SDHCI_IFVENDOR) {
2064 dev_err(&pdev->dev, "Unknown interface. Aborting.\n");
2065 return ERR_PTR(-ENODEV);
2066 }
2067
2068 host = sdhci_alloc_host(&pdev->dev, sizeof(*slot) + priv_size);
2069 if (IS_ERR(host)) {
2070 dev_err(&pdev->dev, "cannot allocate host\n");
2071 return ERR_CAST(host);
2072 }
2073
2074 slot = sdhci_priv(host);
2075
2076 slot->chip = chip;
2077 slot->host = host;
2078 slot->cd_idx = -1;
2079
2080 host->hw_name = "PCI";
2081 host->ops = chip->fixes && chip->fixes->ops ?
2082 chip->fixes->ops :
2083 &sdhci_pci_ops;
2084 host->quirks = chip->quirks;
2085 host->quirks2 = chip->quirks2;
2086
2087 host->irq = pdev->irq;
2088
2089 ret = pcim_iomap_regions(pdev, BIT(bar), mmc_hostname(host->mmc));
2090 if (ret) {
2091 dev_err(&pdev->dev, "cannot request region\n");
2092 goto cleanup;
2093 }
2094
2095 host->ioaddr = pcim_iomap_table(pdev)[bar];
2096
2097 if (chip->fixes && chip->fixes->probe_slot) {
2098 ret = chip->fixes->probe_slot(slot);
2099 if (ret)
2100 goto cleanup;
2101 }
2102
2103 host->mmc->pm_caps = MMC_PM_KEEP_POWER;
2104 host->mmc->slotno = slotno;
2105 host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP;
2106
2107 if (device_can_wakeup(&pdev->dev))
2108 host->mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2109
2110 if (host->mmc->caps & MMC_CAP_CD_WAKE)
2111 device_init_wakeup(&pdev->dev, true);
2112
2113 if (slot->cd_idx >= 0) {
2114 ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx,
2115 slot->cd_override_level, 0);
2116 if (ret && ret != -EPROBE_DEFER)
2117 ret = mmc_gpiod_request_cd(host->mmc, NULL,
2118 slot->cd_idx,
2119 slot->cd_override_level,
2120 0);
2121 if (ret == -EPROBE_DEFER)
2122 goto remove;
2123
2124 if (ret) {
2125 dev_warn(&pdev->dev, "failed to setup card detect gpio\n");
2126 slot->cd_idx = -1;
2127 }
2128 }
2129
2130 if (chip->fixes && chip->fixes->add_host)
2131 ret = chip->fixes->add_host(slot);
2132 else
2133 ret = sdhci_add_host(host);
2134 if (ret)
2135 goto remove;
2136
2137 /*
2138 * Check if the chip needs a separate GPIO for card detect to wake up
2139 * from runtime suspend. If it is not there, don't allow runtime PM.
2140 */
2141 if (chip->fixes && chip->fixes->own_cd_for_runtime_pm && slot->cd_idx < 0)
2142 chip->allow_runtime_pm = false;
2143
2144 return slot;
2145
2146 remove:
2147 if (chip->fixes && chip->fixes->remove_slot)
2148 chip->fixes->remove_slot(slot, 0);
2149
2150 cleanup:
2151 sdhci_free_host(host);
2152
2153 return ERR_PTR(ret);
2154 }
2155
sdhci_pci_remove_slot(struct sdhci_pci_slot * slot)2156 static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot)
2157 {
2158 int dead;
2159 u32 scratch;
2160
2161 dead = 0;
2162 scratch = readl(slot->host->ioaddr + SDHCI_INT_STATUS);
2163 if (scratch == (u32)-1)
2164 dead = 1;
2165
2166 sdhci_remove_host(slot->host, dead);
2167
2168 if (slot->chip->fixes && slot->chip->fixes->remove_slot)
2169 slot->chip->fixes->remove_slot(slot, dead);
2170
2171 sdhci_free_host(slot->host);
2172 }
2173
sdhci_pci_runtime_pm_allow(struct device * dev)2174 static void sdhci_pci_runtime_pm_allow(struct device *dev)
2175 {
2176 pm_suspend_ignore_children(dev, 1);
2177 pm_runtime_set_autosuspend_delay(dev, 50);
2178 pm_runtime_use_autosuspend(dev);
2179 pm_runtime_allow(dev);
2180 /* Stay active until mmc core scans for a card */
2181 pm_runtime_put_noidle(dev);
2182 }
2183
sdhci_pci_runtime_pm_forbid(struct device * dev)2184 static void sdhci_pci_runtime_pm_forbid(struct device *dev)
2185 {
2186 pm_runtime_forbid(dev);
2187 pm_runtime_get_noresume(dev);
2188 }
2189
sdhci_pci_probe(struct pci_dev * pdev,const struct pci_device_id * ent)2190 static int sdhci_pci_probe(struct pci_dev *pdev,
2191 const struct pci_device_id *ent)
2192 {
2193 struct sdhci_pci_chip *chip;
2194 struct sdhci_pci_slot *slot;
2195
2196 u8 slots, first_bar;
2197 int ret, i;
2198
2199 BUG_ON(pdev == NULL);
2200 BUG_ON(ent == NULL);
2201
2202 dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n",
2203 (int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
2204
2205 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);
2206 if (ret)
2207 return pcibios_err_to_errno(ret);
2208
2209 slots = PCI_SLOT_INFO_SLOTS(slots) + 1;
2210 dev_dbg(&pdev->dev, "found %d slot(s)\n", slots);
2211
2212 BUG_ON(slots > MAX_SLOTS);
2213
2214 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar);
2215 if (ret)
2216 return pcibios_err_to_errno(ret);
2217
2218 first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK;
2219
2220 if (first_bar > 5) {
2221 dev_err(&pdev->dev, "Invalid first BAR. Aborting.\n");
2222 return -ENODEV;
2223 }
2224
2225 ret = pcim_enable_device(pdev);
2226 if (ret)
2227 return ret;
2228
2229 chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
2230 if (!chip)
2231 return -ENOMEM;
2232
2233 chip->pdev = pdev;
2234 chip->fixes = (const struct sdhci_pci_fixes *)ent->driver_data;
2235 if (chip->fixes) {
2236 chip->quirks = chip->fixes->quirks;
2237 chip->quirks2 = chip->fixes->quirks2;
2238 chip->allow_runtime_pm = chip->fixes->allow_runtime_pm;
2239 }
2240 chip->num_slots = slots;
2241 chip->pm_retune = true;
2242 chip->rpm_retune = true;
2243
2244 pci_set_drvdata(pdev, chip);
2245
2246 if (chip->fixes && chip->fixes->probe) {
2247 ret = chip->fixes->probe(chip);
2248 if (ret)
2249 return ret;
2250 }
2251
2252 slots = chip->num_slots; /* Quirk may have changed this */
2253
2254 for (i = 0; i < slots; i++) {
2255 slot = sdhci_pci_probe_slot(pdev, chip, first_bar, i);
2256 if (IS_ERR(slot)) {
2257 for (i--; i >= 0; i--)
2258 sdhci_pci_remove_slot(chip->slots[i]);
2259 return PTR_ERR(slot);
2260 }
2261
2262 chip->slots[i] = slot;
2263 }
2264
2265 if (chip->allow_runtime_pm)
2266 sdhci_pci_runtime_pm_allow(&pdev->dev);
2267
2268 return 0;
2269 }
2270
sdhci_pci_remove(struct pci_dev * pdev)2271 static void sdhci_pci_remove(struct pci_dev *pdev)
2272 {
2273 int i;
2274 struct sdhci_pci_chip *chip = pci_get_drvdata(pdev);
2275
2276 if (chip->allow_runtime_pm)
2277 sdhci_pci_runtime_pm_forbid(&pdev->dev);
2278
2279 for (i = 0; i < chip->num_slots; i++)
2280 sdhci_pci_remove_slot(chip->slots[i]);
2281 }
2282
2283 static struct pci_driver sdhci_driver = {
2284 .name = "sdhci-pci",
2285 .id_table = pci_ids,
2286 .probe = sdhci_pci_probe,
2287 .remove = sdhci_pci_remove,
2288 .driver = {
2289 .pm = &sdhci_pci_pm_ops,
2290 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
2291 },
2292 };
2293
2294 module_pci_driver(sdhci_driver);
2295
2296 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
2297 MODULE_DESCRIPTION("Secure Digital Host Controller Interface PCI driver");
2298 MODULE_LICENSE("GPL");
2299