1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * linux/include/linux/mmc/host.h
4 *
5 * Host driver specific definitions.
6 */
7 #ifndef LINUX_MMC_HOST_H
8 #define LINUX_MMC_HOST_H
9
10 #include <linux/sched.h>
11 #include <linux/device.h>
12 #include <linux/fault-inject.h>
13
14 #include <linux/mmc/core.h>
15 #include <linux/mmc/card.h>
16 #include <linux/mmc/pm.h>
17 #include <linux/dma-direction.h>
18 #include <linux/blk-crypto-profile.h>
19
20 struct mmc_ios {
21 unsigned int clock; /* clock rate */
22 unsigned short vdd;
23 unsigned int power_delay_ms; /* waiting for stable power */
24
25 /* vdd stores the bit number of the selected voltage range from below. */
26
27 unsigned char bus_mode; /* command output mode */
28
29 #define MMC_BUSMODE_OPENDRAIN 1
30 #define MMC_BUSMODE_PUSHPULL 2
31
32 unsigned char chip_select; /* SPI chip select */
33
34 #define MMC_CS_DONTCARE 0
35 #define MMC_CS_HIGH 1
36 #define MMC_CS_LOW 2
37
38 unsigned char power_mode; /* power supply mode */
39
40 #define MMC_POWER_OFF 0
41 #define MMC_POWER_UP 1
42 #define MMC_POWER_ON 2
43 #define MMC_POWER_UNDEFINED 3
44
45 unsigned char bus_width; /* data bus width */
46
47 #define MMC_BUS_WIDTH_1 0
48 #define MMC_BUS_WIDTH_4 2
49 #define MMC_BUS_WIDTH_8 3
50
51 unsigned char timing; /* timing specification used */
52
53 #define MMC_TIMING_LEGACY 0
54 #define MMC_TIMING_MMC_HS 1
55 #define MMC_TIMING_SD_HS 2
56 #define MMC_TIMING_UHS_SDR12 3
57 #define MMC_TIMING_UHS_SDR25 4
58 #define MMC_TIMING_UHS_SDR50 5
59 #define MMC_TIMING_UHS_SDR104 6
60 #define MMC_TIMING_UHS_DDR50 7
61 #define MMC_TIMING_MMC_DDR52 8
62 #define MMC_TIMING_MMC_HS200 9
63 #define MMC_TIMING_MMC_HS400 10
64 #define MMC_TIMING_SD_EXP 11
65 #define MMC_TIMING_SD_EXP_1_2V 12
66
67 unsigned char signal_voltage; /* signalling voltage (1.8V or 3.3V) */
68
69 #define MMC_SIGNAL_VOLTAGE_330 0
70 #define MMC_SIGNAL_VOLTAGE_180 1
71 #define MMC_SIGNAL_VOLTAGE_120 2
72
73 unsigned char drv_type; /* driver type (A, B, C, D) */
74
75 #define MMC_SET_DRIVER_TYPE_B 0
76 #define MMC_SET_DRIVER_TYPE_A 1
77 #define MMC_SET_DRIVER_TYPE_C 2
78 #define MMC_SET_DRIVER_TYPE_D 3
79
80 bool enhanced_strobe; /* hs400es selection */
81 };
82
83 struct mmc_clk_phase {
84 bool valid;
85 u16 in_deg;
86 u16 out_deg;
87 };
88
89 #define MMC_NUM_CLK_PHASES (MMC_TIMING_MMC_HS400 + 1)
90 struct mmc_clk_phase_map {
91 struct mmc_clk_phase phase[MMC_NUM_CLK_PHASES];
92 };
93
94 struct mmc_host;
95
96 enum mmc_err_stat {
97 MMC_ERR_CMD_TIMEOUT,
98 MMC_ERR_CMD_CRC,
99 MMC_ERR_DAT_TIMEOUT,
100 MMC_ERR_DAT_CRC,
101 MMC_ERR_AUTO_CMD,
102 MMC_ERR_ADMA,
103 MMC_ERR_TUNING,
104 MMC_ERR_CMDQ_RED,
105 MMC_ERR_CMDQ_GCE,
106 MMC_ERR_CMDQ_ICCE,
107 MMC_ERR_REQ_TIMEOUT,
108 MMC_ERR_CMDQ_REQ_TIMEOUT,
109 MMC_ERR_ICE_CFG,
110 MMC_ERR_CTRL_TIMEOUT,
111 MMC_ERR_UNEXPECTED_IRQ,
112 MMC_ERR_MAX,
113 };
114
115 struct mmc_host_ops {
116 /*
117 * It is optional for the host to implement pre_req and post_req in
118 * order to support double buffering of requests (prepare one
119 * request while another request is active).
120 * pre_req() must always be followed by a post_req().
121 * To undo a call made to pre_req(), call post_req() with
122 * a nonzero err condition.
123 */
124 void (*post_req)(struct mmc_host *host, struct mmc_request *req,
125 int err);
126 void (*pre_req)(struct mmc_host *host, struct mmc_request *req);
127 void (*request)(struct mmc_host *host, struct mmc_request *req);
128 /* Submit one request to host in atomic context. */
129 int (*request_atomic)(struct mmc_host *host,
130 struct mmc_request *req);
131
132 /*
133 * Avoid calling the next three functions too often or in a "fast
134 * path", since underlaying controller might implement them in an
135 * expensive and/or slow way. Also note that these functions might
136 * sleep, so don't call them in the atomic contexts!
137 */
138
139 /*
140 * Notes to the set_ios callback:
141 * ios->clock might be 0. For some controllers, setting 0Hz
142 * as any other frequency works. However, some controllers
143 * explicitly need to disable the clock. Otherwise e.g. voltage
144 * switching might fail because the SDCLK is not really quiet.
145 */
146 void (*set_ios)(struct mmc_host *host, struct mmc_ios *ios);
147
148 /*
149 * Return values for the get_ro callback should be:
150 * 0 for a read/write card
151 * 1 for a read-only card
152 * -ENOSYS when not supported (equal to NULL callback)
153 * or a negative errno value when something bad happened
154 */
155 int (*get_ro)(struct mmc_host *host);
156
157 /*
158 * Return values for the get_cd callback should be:
159 * 0 for a absent card
160 * 1 for a present card
161 * -ENOSYS when not supported (equal to NULL callback)
162 * or a negative errno value when something bad happened
163 */
164 int (*get_cd)(struct mmc_host *host);
165
166 void (*enable_sdio_irq)(struct mmc_host *host, int enable);
167 /* Mandatory callback when using MMC_CAP2_SDIO_IRQ_NOTHREAD. */
168 void (*ack_sdio_irq)(struct mmc_host *host);
169
170 /* optional callback for HC quirks */
171 void (*init_card)(struct mmc_host *host, struct mmc_card *card);
172
173 int (*start_signal_voltage_switch)(struct mmc_host *host, struct mmc_ios *ios);
174
175 /* Check if the card is pulling dat[0] low */
176 int (*card_busy)(struct mmc_host *host);
177
178 /* The tuning command opcode value is different for SD and eMMC cards */
179 int (*execute_tuning)(struct mmc_host *host, u32 opcode);
180
181 /* Prepare HS400 target operating frequency depending host driver */
182 int (*prepare_hs400_tuning)(struct mmc_host *host, struct mmc_ios *ios);
183
184 /* Execute HS400 tuning depending host driver */
185 int (*execute_hs400_tuning)(struct mmc_host *host, struct mmc_card *card);
186
187 /* Optional callback to prepare for SD high-speed tuning */
188 int (*prepare_sd_hs_tuning)(struct mmc_host *host, struct mmc_card *card);
189
190 /* Optional callback to execute SD high-speed tuning */
191 int (*execute_sd_hs_tuning)(struct mmc_host *host, struct mmc_card *card);
192
193 /* Prepare switch to DDR during the HS400 init sequence */
194 int (*hs400_prepare_ddr)(struct mmc_host *host);
195
196 /* Prepare for switching from HS400 to HS200 */
197 void (*hs400_downgrade)(struct mmc_host *host);
198
199 /* Complete selection of HS400 */
200 void (*hs400_complete)(struct mmc_host *host);
201
202 /* Prepare enhanced strobe depending host driver */
203 void (*hs400_enhanced_strobe)(struct mmc_host *host,
204 struct mmc_ios *ios);
205 int (*select_drive_strength)(struct mmc_card *card,
206 unsigned int max_dtr, int host_drv,
207 int card_drv, int *drv_type);
208 /* Reset the eMMC card via RST_n */
209 void (*card_hw_reset)(struct mmc_host *host);
210 void (*card_event)(struct mmc_host *host);
211
212 /*
213 * Optional callback to support controllers with HW issues for multiple
214 * I/O. Returns the number of supported blocks for the request.
215 */
216 int (*multi_io_quirk)(struct mmc_card *card,
217 unsigned int direction, int blk_size);
218
219 /* Initialize an SD express card, mandatory for MMC_CAP2_SD_EXP. */
220 int (*init_sd_express)(struct mmc_host *host, struct mmc_ios *ios);
221 };
222
223 struct mmc_cqe_ops {
224 /* Allocate resources, and make the CQE operational */
225 int (*cqe_enable)(struct mmc_host *host, struct mmc_card *card);
226 /* Free resources, and make the CQE non-operational */
227 void (*cqe_disable)(struct mmc_host *host);
228 /*
229 * Issue a read, write or DCMD request to the CQE. Also deal with the
230 * effect of ->cqe_off().
231 */
232 int (*cqe_request)(struct mmc_host *host, struct mmc_request *mrq);
233 /* Free resources (e.g. DMA mapping) associated with the request */
234 void (*cqe_post_req)(struct mmc_host *host, struct mmc_request *mrq);
235 /*
236 * Prepare the CQE and host controller to accept non-CQ commands. There
237 * is no corresponding ->cqe_on(), instead ->cqe_request() is required
238 * to deal with that.
239 */
240 void (*cqe_off)(struct mmc_host *host);
241 /*
242 * Wait for all CQE tasks to complete. Return an error if recovery
243 * becomes necessary.
244 */
245 int (*cqe_wait_for_idle)(struct mmc_host *host);
246 /*
247 * Notify CQE that a request has timed out. Return false if the request
248 * completed or true if a timeout happened in which case indicate if
249 * recovery is needed.
250 */
251 bool (*cqe_timeout)(struct mmc_host *host, struct mmc_request *mrq,
252 bool *recovery_needed);
253 /*
254 * Stop all CQE activity and prepare the CQE and host controller to
255 * accept recovery commands.
256 */
257 void (*cqe_recovery_start)(struct mmc_host *host);
258 /*
259 * Clear the queue and call mmc_cqe_request_done() on all requests.
260 * Requests that errored will have the error set on the mmc_request
261 * (data->error or cmd->error for DCMD). Requests that did not error
262 * will have zero data bytes transferred.
263 */
264 void (*cqe_recovery_finish)(struct mmc_host *host);
265 };
266
267 struct mmc_async_req {
268 /* active mmc request */
269 struct mmc_request *mrq;
270 /*
271 * Check error status of completed mmc request.
272 * Returns 0 if success otherwise non zero.
273 */
274 enum mmc_blk_status (*err_check)(struct mmc_card *, struct mmc_async_req *);
275 };
276
277 /**
278 * struct mmc_slot - MMC slot functions
279 *
280 * @cd_irq: MMC/SD-card slot hotplug detection IRQ or -EINVAL
281 * @handler_priv: MMC/SD-card slot context
282 *
283 * Some MMC/SD host controllers implement slot-functions like card and
284 * write-protect detection natively. However, a large number of controllers
285 * leave these functions to the CPU. This struct provides a hook to attach
286 * such slot-function drivers.
287 */
288 struct mmc_slot {
289 int cd_irq;
290 bool cd_wake_enabled;
291 void *handler_priv;
292 };
293
294 /**
295 * mmc_context_info - synchronization details for mmc context
296 * @is_done_rcv wake up reason was done request
297 * @is_new_req wake up reason was new request
298 * @is_waiting_last_req mmc context waiting for single running request
299 * @wait wait queue
300 */
301 struct mmc_context_info {
302 bool is_done_rcv;
303 bool is_new_req;
304 bool is_waiting_last_req;
305 wait_queue_head_t wait;
306 };
307
308 struct regulator;
309 struct mmc_pwrseq;
310
311 struct mmc_supply {
312 struct regulator *vmmc; /* Card power supply */
313 struct regulator *vqmmc; /* Optional Vccq supply */
314 };
315
316 struct mmc_ctx {
317 struct task_struct *task;
318 };
319
320 struct mmc_host {
321 struct device *parent;
322 struct device class_dev;
323 int index;
324 const struct mmc_host_ops *ops;
325 struct mmc_pwrseq *pwrseq;
326 unsigned int f_min;
327 unsigned int f_max;
328 unsigned int f_init;
329 u32 ocr_avail;
330 u32 ocr_avail_sdio; /* SDIO-specific OCR */
331 u32 ocr_avail_sd; /* SD-specific OCR */
332 u32 ocr_avail_mmc; /* MMC-specific OCR */
333 struct wakeup_source *ws; /* Enable consume of uevents */
334 u32 max_current_330;
335 u32 max_current_300;
336 u32 max_current_180;
337
338 #define MMC_VDD_165_195 0x00000080 /* VDD voltage 1.65 - 1.95 */
339 #define MMC_VDD_20_21 0x00000100 /* VDD voltage 2.0 ~ 2.1 */
340 #define MMC_VDD_21_22 0x00000200 /* VDD voltage 2.1 ~ 2.2 */
341 #define MMC_VDD_22_23 0x00000400 /* VDD voltage 2.2 ~ 2.3 */
342 #define MMC_VDD_23_24 0x00000800 /* VDD voltage 2.3 ~ 2.4 */
343 #define MMC_VDD_24_25 0x00001000 /* VDD voltage 2.4 ~ 2.5 */
344 #define MMC_VDD_25_26 0x00002000 /* VDD voltage 2.5 ~ 2.6 */
345 #define MMC_VDD_26_27 0x00004000 /* VDD voltage 2.6 ~ 2.7 */
346 #define MMC_VDD_27_28 0x00008000 /* VDD voltage 2.7 ~ 2.8 */
347 #define MMC_VDD_28_29 0x00010000 /* VDD voltage 2.8 ~ 2.9 */
348 #define MMC_VDD_29_30 0x00020000 /* VDD voltage 2.9 ~ 3.0 */
349 #define MMC_VDD_30_31 0x00040000 /* VDD voltage 3.0 ~ 3.1 */
350 #define MMC_VDD_31_32 0x00080000 /* VDD voltage 3.1 ~ 3.2 */
351 #define MMC_VDD_32_33 0x00100000 /* VDD voltage 3.2 ~ 3.3 */
352 #define MMC_VDD_33_34 0x00200000 /* VDD voltage 3.3 ~ 3.4 */
353 #define MMC_VDD_34_35 0x00400000 /* VDD voltage 3.4 ~ 3.5 */
354 #define MMC_VDD_35_36 0x00800000 /* VDD voltage 3.5 ~ 3.6 */
355
356 u32 caps; /* Host capabilities */
357
358 #define MMC_CAP_4_BIT_DATA (1 << 0) /* Can the host do 4 bit transfers */
359 #define MMC_CAP_MMC_HIGHSPEED (1 << 1) /* Can do MMC high-speed timing */
360 #define MMC_CAP_SD_HIGHSPEED (1 << 2) /* Can do SD high-speed timing */
361 #define MMC_CAP_SDIO_IRQ (1 << 3) /* Can signal pending SDIO IRQs */
362 #define MMC_CAP_SPI (1 << 4) /* Talks only SPI protocols */
363 #define MMC_CAP_NEEDS_POLL (1 << 5) /* Needs polling for card-detection */
364 #define MMC_CAP_8_BIT_DATA (1 << 6) /* Can the host do 8 bit transfers */
365 #define MMC_CAP_AGGRESSIVE_PM (1 << 7) /* Suspend (e)MMC/SD at idle */
366 #define MMC_CAP_NONREMOVABLE (1 << 8) /* Nonremovable e.g. eMMC */
367 #define MMC_CAP_WAIT_WHILE_BUSY (1 << 9) /* Waits while card is busy */
368 #define MMC_CAP_3_3V_DDR (1 << 11) /* Host supports eMMC DDR 3.3V */
369 #define MMC_CAP_1_8V_DDR (1 << 12) /* Host supports eMMC DDR 1.8V */
370 #define MMC_CAP_1_2V_DDR (1 << 13) /* Host supports eMMC DDR 1.2V */
371 #define MMC_CAP_DDR (MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR | \
372 MMC_CAP_1_2V_DDR)
373 #define MMC_CAP_POWER_OFF_CARD (1 << 14) /* Can power off after boot */
374 #define MMC_CAP_BUS_WIDTH_TEST (1 << 15) /* CMD14/CMD19 bus width ok */
375 #define MMC_CAP_UHS_SDR12 (1 << 16) /* Host supports UHS SDR12 mode */
376 #define MMC_CAP_UHS_SDR25 (1 << 17) /* Host supports UHS SDR25 mode */
377 #define MMC_CAP_UHS_SDR50 (1 << 18) /* Host supports UHS SDR50 mode */
378 #define MMC_CAP_UHS_SDR104 (1 << 19) /* Host supports UHS SDR104 mode */
379 #define MMC_CAP_UHS_DDR50 (1 << 20) /* Host supports UHS DDR50 mode */
380 #define MMC_CAP_UHS (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | \
381 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | \
382 MMC_CAP_UHS_DDR50)
383 #define MMC_CAP_SYNC_RUNTIME_PM (1 << 21) /* Synced runtime PM suspends. */
384 #define MMC_CAP_NEED_RSP_BUSY (1 << 22) /* Commands with R1B can't use R1. */
385 #define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
386 #define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
387 #define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
388 #define MMC_CAP_DONE_COMPLETE (1 << 27) /* RW reqs can be completed within mmc_request_done() */
389 #define MMC_CAP_CD_WAKE (1 << 28) /* Enable card detect wake */
390 #define MMC_CAP_CMD_DURING_TFR (1 << 29) /* Commands during data transfer */
391 #define MMC_CAP_CMD23 (1 << 30) /* CMD23 supported. */
392 #define MMC_CAP_HW_RESET (1 << 31) /* Reset the eMMC card via RST_n */
393
394 u32 caps2; /* More host capabilities */
395
396 #define MMC_CAP2_BOOTPART_NOACC (1 << 0) /* Boot partition no access */
397 #define MMC_CAP2_FULL_PWR_CYCLE (1 << 2) /* Can do full power cycle */
398 #define MMC_CAP2_FULL_PWR_CYCLE_IN_SUSPEND (1 << 3) /* Can do full power cycle in suspend */
399 #define MMC_CAP2_HS200_1_8V_SDR (1 << 5) /* can support */
400 #define MMC_CAP2_HS200_1_2V_SDR (1 << 6) /* can support */
401 #define MMC_CAP2_HS200 (MMC_CAP2_HS200_1_8V_SDR | \
402 MMC_CAP2_HS200_1_2V_SDR)
403 #define MMC_CAP2_SD_EXP (1 << 7) /* SD express via PCIe */
404 #define MMC_CAP2_SD_EXP_1_2V (1 << 8) /* SD express 1.2V */
405 #define MMC_CAP2_CD_ACTIVE_HIGH (1 << 10) /* Card-detect signal active high */
406 #define MMC_CAP2_RO_ACTIVE_HIGH (1 << 11) /* Write-protect signal active high */
407 #define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14) /* Don't power up before scan */
408 #define MMC_CAP2_HS400_1_8V (1 << 15) /* Can support HS400 1.8V */
409 #define MMC_CAP2_HS400_1_2V (1 << 16) /* Can support HS400 1.2V */
410 #define MMC_CAP2_HS400 (MMC_CAP2_HS400_1_8V | \
411 MMC_CAP2_HS400_1_2V)
412 #define MMC_CAP2_HSX00_1_8V (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)
413 #define MMC_CAP2_HSX00_1_2V (MMC_CAP2_HS200_1_2V_SDR | MMC_CAP2_HS400_1_2V)
414 #define MMC_CAP2_SDIO_IRQ_NOTHREAD (1 << 17)
415 #define MMC_CAP2_NO_WRITE_PROTECT (1 << 18) /* No physical write protect pin, assume that card is always read-write */
416 #define MMC_CAP2_NO_SDIO (1 << 19) /* Do not send SDIO commands during initialization */
417 #define MMC_CAP2_HS400_ES (1 << 20) /* Host supports enhanced strobe */
418 #define MMC_CAP2_NO_SD (1 << 21) /* Do not send SD commands during initialization */
419 #define MMC_CAP2_NO_MMC (1 << 22) /* Do not send (e)MMC commands during initialization */
420 #define MMC_CAP2_CQE (1 << 23) /* Has eMMC command queue engine */
421 #define MMC_CAP2_CQE_DCMD (1 << 24) /* CQE can issue a direct command */
422 #define MMC_CAP2_AVOID_3_3V (1 << 25) /* Host must negotiate down from 3.3V */
423 #define MMC_CAP2_MERGE_CAPABLE (1 << 26) /* Host can merge a segment over the segment size */
424 #ifdef CONFIG_MMC_CRYPTO
425 #define MMC_CAP2_CRYPTO (1 << 27) /* Host supports inline encryption */
426 #else
427 #define MMC_CAP2_CRYPTO 0
428 #endif
429 #define MMC_CAP2_ALT_GPT_TEGRA (1 << 28) /* Host with eMMC that has GPT entry at a non-standard location */
430
431 int fixed_drv_type; /* fixed driver type for non-removable media */
432
433 mmc_pm_flag_t pm_caps; /* supported pm features */
434
435 /* host specific block data */
436 unsigned int max_seg_size; /* see blk_queue_max_segment_size */
437 unsigned short max_segs; /* see blk_queue_max_segments */
438 unsigned short unused;
439 unsigned int max_req_size; /* maximum number of bytes in one req */
440 unsigned int max_blk_size; /* maximum size of one mmc block */
441 unsigned int max_blk_count; /* maximum number of blocks in one req */
442 unsigned int max_busy_timeout; /* max busy timeout in ms */
443
444 /* private data */
445 spinlock_t lock; /* lock for claim and bus ops */
446
447 struct mmc_ios ios; /* current io bus settings */
448
449 /* group bitfields together to minimize padding */
450 unsigned int use_spi_crc:1;
451 unsigned int claimed:1; /* host exclusively claimed */
452 unsigned int doing_init_tune:1; /* initial tuning in progress */
453 unsigned int can_retune:1; /* re-tuning can be used */
454 unsigned int doing_retune:1; /* re-tuning in progress */
455 unsigned int retune_now:1; /* do re-tuning at next req */
456 unsigned int retune_paused:1; /* re-tuning is temporarily disabled */
457 unsigned int retune_crc_disable:1; /* don't trigger retune upon crc */
458 unsigned int can_dma_map_merge:1; /* merging can be used */
459 unsigned int vqmmc_enabled:1; /* vqmmc regulator is enabled */
460
461 int rescan_disable; /* disable card detection */
462 int rescan_entered; /* used with nonremovable devices */
463
464 int need_retune; /* re-tuning is needed */
465 int hold_retune; /* hold off re-tuning */
466 unsigned int retune_period; /* re-tuning period in secs */
467 struct timer_list retune_timer; /* for periodic re-tuning */
468
469 bool trigger_card_event; /* card_event necessary */
470
471 struct mmc_card *card; /* device attached to this host */
472
473 wait_queue_head_t wq;
474 struct mmc_ctx *claimer; /* context that has host claimed */
475 int claim_cnt; /* "claim" nesting count */
476 struct mmc_ctx default_ctx; /* default context */
477
478 struct delayed_work detect;
479 int detect_change; /* card detect flag */
480 struct mmc_slot slot;
481
482 const struct mmc_bus_ops *bus_ops; /* current bus driver */
483
484 unsigned int sdio_irqs;
485 struct task_struct *sdio_irq_thread;
486 struct work_struct sdio_irq_work;
487 bool sdio_irq_pending;
488 atomic_t sdio_irq_thread_abort;
489
490 mmc_pm_flag_t pm_flags; /* requested pm features */
491
492 struct led_trigger *led; /* activity led */
493
494 #ifdef CONFIG_REGULATOR
495 bool regulator_enabled; /* regulator state */
496 #endif
497 struct mmc_supply supply;
498
499 struct dentry *debugfs_root;
500
501 /* Ongoing data transfer that allows commands during transfer */
502 struct mmc_request *ongoing_mrq;
503
504 #ifdef CONFIG_FAIL_MMC_REQUEST
505 struct fault_attr fail_mmc_request;
506 #endif
507
508 unsigned int actual_clock; /* Actual HC clock rate */
509
510 unsigned int slotno; /* used for sdio acpi binding */
511
512 int dsr_req; /* DSR value is valid */
513 u32 dsr; /* optional driver stage (DSR) value */
514
515 /* Command Queue Engine (CQE) support */
516 const struct mmc_cqe_ops *cqe_ops;
517 void *cqe_private;
518 int cqe_qdepth;
519 bool cqe_enabled;
520 bool cqe_on;
521
522 /* Inline encryption support */
523 #ifdef CONFIG_MMC_CRYPTO
524 struct blk_crypto_profile crypto_profile;
525 #endif
526
527 /* Host Software Queue support */
528 bool hsq_enabled;
529
530 u32 err_stats[MMC_ERR_MAX];
531 unsigned long private[] ____cacheline_aligned;
532 };
533
534 struct device_node;
535
536 struct mmc_host *mmc_alloc_host(int extra, struct device *);
537 struct mmc_host *devm_mmc_alloc_host(struct device *dev, int extra);
538 int mmc_add_host(struct mmc_host *);
539 void mmc_remove_host(struct mmc_host *);
540 void mmc_free_host(struct mmc_host *);
541 void mmc_of_parse_clk_phase(struct mmc_host *host,
542 struct mmc_clk_phase_map *map);
543 int mmc_of_parse(struct mmc_host *host);
544 int mmc_of_parse_voltage(struct mmc_host *host, u32 *mask);
545
mmc_priv(struct mmc_host * host)546 static inline void *mmc_priv(struct mmc_host *host)
547 {
548 return (void *)host->private;
549 }
550
mmc_from_priv(void * priv)551 static inline struct mmc_host *mmc_from_priv(void *priv)
552 {
553 return container_of(priv, struct mmc_host, private);
554 }
555
556 #define mmc_host_is_spi(host) ((host)->caps & MMC_CAP_SPI)
557
558 #define mmc_dev(x) ((x)->parent)
559 #define mmc_classdev(x) (&(x)->class_dev)
560 #define mmc_hostname(x) (dev_name(&(x)->class_dev))
561
562 void mmc_detect_change(struct mmc_host *, unsigned long delay);
563 void mmc_request_done(struct mmc_host *, struct mmc_request *);
564 void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq);
565
566 void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq);
567
568 /*
569 * May be called from host driver's system/runtime suspend/resume callbacks,
570 * to know if SDIO IRQs has been claimed.
571 */
sdio_irq_claimed(struct mmc_host * host)572 static inline bool sdio_irq_claimed(struct mmc_host *host)
573 {
574 return host->sdio_irqs > 0;
575 }
576
mmc_signal_sdio_irq(struct mmc_host * host)577 static inline void mmc_signal_sdio_irq(struct mmc_host *host)
578 {
579 host->ops->enable_sdio_irq(host, 0);
580 host->sdio_irq_pending = true;
581 if (host->sdio_irq_thread)
582 wake_up_process(host->sdio_irq_thread);
583 }
584
585 void sdio_signal_irq(struct mmc_host *host);
586
587 #ifdef CONFIG_REGULATOR
588 int mmc_regulator_set_ocr(struct mmc_host *mmc,
589 struct regulator *supply,
590 unsigned short vdd_bit);
591 int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios);
592 #else
mmc_regulator_set_ocr(struct mmc_host * mmc,struct regulator * supply,unsigned short vdd_bit)593 static inline int mmc_regulator_set_ocr(struct mmc_host *mmc,
594 struct regulator *supply,
595 unsigned short vdd_bit)
596 {
597 return 0;
598 }
599
mmc_regulator_set_vqmmc(struct mmc_host * mmc,struct mmc_ios * ios)600 static inline int mmc_regulator_set_vqmmc(struct mmc_host *mmc,
601 struct mmc_ios *ios)
602 {
603 return -EINVAL;
604 }
605 #endif
606
607 int mmc_regulator_get_supply(struct mmc_host *mmc);
608 int mmc_regulator_enable_vqmmc(struct mmc_host *mmc);
609 void mmc_regulator_disable_vqmmc(struct mmc_host *mmc);
610
mmc_card_is_removable(struct mmc_host * host)611 static inline int mmc_card_is_removable(struct mmc_host *host)
612 {
613 return !(host->caps & MMC_CAP_NONREMOVABLE);
614 }
615
mmc_card_keep_power(struct mmc_host * host)616 static inline int mmc_card_keep_power(struct mmc_host *host)
617 {
618 return host->pm_flags & MMC_PM_KEEP_POWER;
619 }
620
mmc_card_wake_sdio_irq(struct mmc_host * host)621 static inline int mmc_card_wake_sdio_irq(struct mmc_host *host)
622 {
623 return host->pm_flags & MMC_PM_WAKE_SDIO_IRQ;
624 }
625
626 /* TODO: Move to private header */
mmc_card_hs(struct mmc_card * card)627 static inline int mmc_card_hs(struct mmc_card *card)
628 {
629 return card->host->ios.timing == MMC_TIMING_SD_HS ||
630 card->host->ios.timing == MMC_TIMING_MMC_HS;
631 }
632
633 /* TODO: Move to private header */
mmc_card_uhs(struct mmc_card * card)634 static inline int mmc_card_uhs(struct mmc_card *card)
635 {
636 return card->host->ios.timing >= MMC_TIMING_UHS_SDR12 &&
637 card->host->ios.timing <= MMC_TIMING_UHS_DDR50;
638 }
639
640 void mmc_retune_timer_stop(struct mmc_host *host);
641
mmc_retune_needed(struct mmc_host * host)642 static inline void mmc_retune_needed(struct mmc_host *host)
643 {
644 if (host->can_retune)
645 host->need_retune = 1;
646 }
647
mmc_can_retune(struct mmc_host * host)648 static inline bool mmc_can_retune(struct mmc_host *host)
649 {
650 return host->can_retune == 1;
651 }
652
mmc_doing_retune(struct mmc_host * host)653 static inline bool mmc_doing_retune(struct mmc_host *host)
654 {
655 return host->doing_retune == 1;
656 }
657
mmc_doing_tune(struct mmc_host * host)658 static inline bool mmc_doing_tune(struct mmc_host *host)
659 {
660 return host->doing_retune == 1 || host->doing_init_tune == 1;
661 }
662
mmc_get_dma_dir(struct mmc_data * data)663 static inline enum dma_data_direction mmc_get_dma_dir(struct mmc_data *data)
664 {
665 return data->flags & MMC_DATA_WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
666 }
667
mmc_debugfs_err_stats_inc(struct mmc_host * host,enum mmc_err_stat stat)668 static inline void mmc_debugfs_err_stats_inc(struct mmc_host *host,
669 enum mmc_err_stat stat)
670 {
671 host->err_stats[stat] += 1;
672 }
673
674 int mmc_sd_switch(struct mmc_card *card, int mode, int group, u8 value, u8 *resp);
675 int mmc_send_status(struct mmc_card *card, u32 *status);
676 int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
677 int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode);
678 int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
679
680 #endif /* LINUX_MMC_HOST_H */
681