xref: /openbmc/linux/drivers/mmc/core/core.c (revision 4e1a33b1)
1 /*
2  *  linux/drivers/mmc/core/core.c
3  *
4  *  Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5  *  SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6  *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7  *  MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/completion.h>
17 #include <linux/device.h>
18 #include <linux/delay.h>
19 #include <linux/pagemap.h>
20 #include <linux/err.h>
21 #include <linux/leds.h>
22 #include <linux/scatterlist.h>
23 #include <linux/log2.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm_wakeup.h>
27 #include <linux/suspend.h>
28 #include <linux/fault-inject.h>
29 #include <linux/random.h>
30 #include <linux/slab.h>
31 #include <linux/of.h>
32 
33 #include <linux/mmc/card.h>
34 #include <linux/mmc/host.h>
35 #include <linux/mmc/mmc.h>
36 #include <linux/mmc/sd.h>
37 #include <linux/mmc/slot-gpio.h>
38 
39 #define CREATE_TRACE_POINTS
40 #include <trace/events/mmc.h>
41 
42 #include "core.h"
43 #include "card.h"
44 #include "bus.h"
45 #include "host.h"
46 #include "sdio_bus.h"
47 #include "pwrseq.h"
48 
49 #include "mmc_ops.h"
50 #include "sd_ops.h"
51 #include "sdio_ops.h"
52 
53 /* If the device is not responding */
54 #define MMC_CORE_TIMEOUT_MS	(10 * 60 * 1000) /* 10 minute timeout */
55 
56 /*
57  * Background operations can take a long time, depending on the housekeeping
58  * operations the card has to perform.
59  */
60 #define MMC_BKOPS_MAX_TIMEOUT	(4 * 60 * 1000) /* max time to wait in ms */
61 
62 /* The max erase timeout, used when host->max_busy_timeout isn't specified */
63 #define MMC_ERASE_TIMEOUT_MS	(60 * 1000) /* 60 s */
64 
65 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
66 
67 /*
68  * Enabling software CRCs on the data blocks can be a significant (30%)
69  * performance cost, and for other reasons may not always be desired.
70  * So we allow it it to be disabled.
71  */
72 bool use_spi_crc = 1;
73 module_param(use_spi_crc, bool, 0);
74 
75 static int mmc_schedule_delayed_work(struct delayed_work *work,
76 				     unsigned long delay)
77 {
78 	/*
79 	 * We use the system_freezable_wq, because of two reasons.
80 	 * First, it allows several works (not the same work item) to be
81 	 * executed simultaneously. Second, the queue becomes frozen when
82 	 * userspace becomes frozen during system PM.
83 	 */
84 	return queue_delayed_work(system_freezable_wq, work, delay);
85 }
86 
87 #ifdef CONFIG_FAIL_MMC_REQUEST
88 
89 /*
90  * Internal function. Inject random data errors.
91  * If mmc_data is NULL no errors are injected.
92  */
93 static void mmc_should_fail_request(struct mmc_host *host,
94 				    struct mmc_request *mrq)
95 {
96 	struct mmc_command *cmd = mrq->cmd;
97 	struct mmc_data *data = mrq->data;
98 	static const int data_errors[] = {
99 		-ETIMEDOUT,
100 		-EILSEQ,
101 		-EIO,
102 	};
103 
104 	if (!data)
105 		return;
106 
107 	if (cmd->error || data->error ||
108 	    !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
109 		return;
110 
111 	data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
112 	data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
113 }
114 
115 #else /* CONFIG_FAIL_MMC_REQUEST */
116 
117 static inline void mmc_should_fail_request(struct mmc_host *host,
118 					   struct mmc_request *mrq)
119 {
120 }
121 
122 #endif /* CONFIG_FAIL_MMC_REQUEST */
123 
124 static inline void mmc_complete_cmd(struct mmc_request *mrq)
125 {
126 	if (mrq->cap_cmd_during_tfr && !completion_done(&mrq->cmd_completion))
127 		complete_all(&mrq->cmd_completion);
128 }
129 
130 void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq)
131 {
132 	if (!mrq->cap_cmd_during_tfr)
133 		return;
134 
135 	mmc_complete_cmd(mrq);
136 
137 	pr_debug("%s: cmd done, tfr ongoing (CMD%u)\n",
138 		 mmc_hostname(host), mrq->cmd->opcode);
139 }
140 EXPORT_SYMBOL(mmc_command_done);
141 
142 /**
143  *	mmc_request_done - finish processing an MMC request
144  *	@host: MMC host which completed request
145  *	@mrq: MMC request which request
146  *
147  *	MMC drivers should call this function when they have completed
148  *	their processing of a request.
149  */
150 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
151 {
152 	struct mmc_command *cmd = mrq->cmd;
153 	int err = cmd->error;
154 
155 	/* Flag re-tuning needed on CRC errors */
156 	if ((cmd->opcode != MMC_SEND_TUNING_BLOCK &&
157 	    cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
158 	    (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
159 	    (mrq->data && mrq->data->error == -EILSEQ) ||
160 	    (mrq->stop && mrq->stop->error == -EILSEQ)))
161 		mmc_retune_needed(host);
162 
163 	if (err && cmd->retries && mmc_host_is_spi(host)) {
164 		if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
165 			cmd->retries = 0;
166 	}
167 
168 	if (host->ongoing_mrq == mrq)
169 		host->ongoing_mrq = NULL;
170 
171 	mmc_complete_cmd(mrq);
172 
173 	trace_mmc_request_done(host, mrq);
174 
175 	if (err && cmd->retries && !mmc_card_removed(host->card)) {
176 		/*
177 		 * Request starter must handle retries - see
178 		 * mmc_wait_for_req_done().
179 		 */
180 		if (mrq->done)
181 			mrq->done(mrq);
182 	} else {
183 		mmc_should_fail_request(host, mrq);
184 
185 		if (!host->ongoing_mrq)
186 			led_trigger_event(host->led, LED_OFF);
187 
188 		if (mrq->sbc) {
189 			pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
190 				mmc_hostname(host), mrq->sbc->opcode,
191 				mrq->sbc->error,
192 				mrq->sbc->resp[0], mrq->sbc->resp[1],
193 				mrq->sbc->resp[2], mrq->sbc->resp[3]);
194 		}
195 
196 		pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
197 			mmc_hostname(host), cmd->opcode, err,
198 			cmd->resp[0], cmd->resp[1],
199 			cmd->resp[2], cmd->resp[3]);
200 
201 		if (mrq->data) {
202 			pr_debug("%s:     %d bytes transferred: %d\n",
203 				mmc_hostname(host),
204 				mrq->data->bytes_xfered, mrq->data->error);
205 		}
206 
207 		if (mrq->stop) {
208 			pr_debug("%s:     (CMD%u): %d: %08x %08x %08x %08x\n",
209 				mmc_hostname(host), mrq->stop->opcode,
210 				mrq->stop->error,
211 				mrq->stop->resp[0], mrq->stop->resp[1],
212 				mrq->stop->resp[2], mrq->stop->resp[3]);
213 		}
214 
215 		if (mrq->done)
216 			mrq->done(mrq);
217 	}
218 }
219 
220 EXPORT_SYMBOL(mmc_request_done);
221 
222 static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
223 {
224 	int err;
225 
226 	/* Assumes host controller has been runtime resumed by mmc_claim_host */
227 	err = mmc_retune(host);
228 	if (err) {
229 		mrq->cmd->error = err;
230 		mmc_request_done(host, mrq);
231 		return;
232 	}
233 
234 	/*
235 	 * For sdio rw commands we must wait for card busy otherwise some
236 	 * sdio devices won't work properly.
237 	 */
238 	if (mmc_is_io_op(mrq->cmd->opcode) && host->ops->card_busy) {
239 		int tries = 500; /* Wait aprox 500ms at maximum */
240 
241 		while (host->ops->card_busy(host) && --tries)
242 			mmc_delay(1);
243 
244 		if (tries == 0) {
245 			mrq->cmd->error = -EBUSY;
246 			mmc_request_done(host, mrq);
247 			return;
248 		}
249 	}
250 
251 	if (mrq->cap_cmd_during_tfr) {
252 		host->ongoing_mrq = mrq;
253 		/*
254 		 * Retry path could come through here without having waiting on
255 		 * cmd_completion, so ensure it is reinitialised.
256 		 */
257 		reinit_completion(&mrq->cmd_completion);
258 	}
259 
260 	trace_mmc_request_start(host, mrq);
261 
262 	host->ops->request(host, mrq);
263 }
264 
265 static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
266 {
267 #ifdef CONFIG_MMC_DEBUG
268 	unsigned int i, sz;
269 	struct scatterlist *sg;
270 #endif
271 	mmc_retune_hold(host);
272 
273 	if (mmc_card_removed(host->card))
274 		return -ENOMEDIUM;
275 
276 	if (mrq->sbc) {
277 		pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
278 			 mmc_hostname(host), mrq->sbc->opcode,
279 			 mrq->sbc->arg, mrq->sbc->flags);
280 	}
281 
282 	pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
283 		 mmc_hostname(host), mrq->cmd->opcode,
284 		 mrq->cmd->arg, mrq->cmd->flags);
285 
286 	if (mrq->data) {
287 		pr_debug("%s:     blksz %d blocks %d flags %08x "
288 			"tsac %d ms nsac %d\n",
289 			mmc_hostname(host), mrq->data->blksz,
290 			mrq->data->blocks, mrq->data->flags,
291 			mrq->data->timeout_ns / 1000000,
292 			mrq->data->timeout_clks);
293 	}
294 
295 	if (mrq->stop) {
296 		pr_debug("%s:     CMD%u arg %08x flags %08x\n",
297 			 mmc_hostname(host), mrq->stop->opcode,
298 			 mrq->stop->arg, mrq->stop->flags);
299 	}
300 
301 	WARN_ON(!host->claimed);
302 
303 	mrq->cmd->error = 0;
304 	mrq->cmd->mrq = mrq;
305 	if (mrq->sbc) {
306 		mrq->sbc->error = 0;
307 		mrq->sbc->mrq = mrq;
308 	}
309 	if (mrq->data) {
310 		if (mrq->data->blksz > host->max_blk_size ||
311 		    mrq->data->blocks > host->max_blk_count ||
312 		    mrq->data->blocks * mrq->data->blksz > host->max_req_size)
313 			return -EINVAL;
314 #ifdef CONFIG_MMC_DEBUG
315 		sz = 0;
316 		for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
317 			sz += sg->length;
318 		if (sz != mrq->data->blocks * mrq->data->blksz)
319 			return -EINVAL;
320 #endif
321 
322 		mrq->cmd->data = mrq->data;
323 		mrq->data->error = 0;
324 		mrq->data->mrq = mrq;
325 		if (mrq->stop) {
326 			mrq->data->stop = mrq->stop;
327 			mrq->stop->error = 0;
328 			mrq->stop->mrq = mrq;
329 		}
330 	}
331 	led_trigger_event(host->led, LED_FULL);
332 	__mmc_start_request(host, mrq);
333 
334 	return 0;
335 }
336 
337 /**
338  *	mmc_start_bkops - start BKOPS for supported cards
339  *	@card: MMC card to start BKOPS
340  *	@form_exception: A flag to indicate if this function was
341  *			 called due to an exception raised by the card
342  *
343  *	Start background operations whenever requested.
344  *	When the urgent BKOPS bit is set in a R1 command response
345  *	then background operations should be started immediately.
346 */
347 void mmc_start_bkops(struct mmc_card *card, bool from_exception)
348 {
349 	int err;
350 	int timeout;
351 	bool use_busy_signal;
352 
353 	if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
354 		return;
355 
356 	err = mmc_read_bkops_status(card);
357 	if (err) {
358 		pr_err("%s: Failed to read bkops status: %d\n",
359 		       mmc_hostname(card->host), err);
360 		return;
361 	}
362 
363 	if (!card->ext_csd.raw_bkops_status)
364 		return;
365 
366 	if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
367 	    from_exception)
368 		return;
369 
370 	mmc_claim_host(card->host);
371 	if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
372 		timeout = MMC_BKOPS_MAX_TIMEOUT;
373 		use_busy_signal = true;
374 	} else {
375 		timeout = 0;
376 		use_busy_signal = false;
377 	}
378 
379 	mmc_retune_hold(card->host);
380 
381 	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
382 			EXT_CSD_BKOPS_START, 1, timeout, 0,
383 			use_busy_signal, true, false);
384 	if (err) {
385 		pr_warn("%s: Error %d starting bkops\n",
386 			mmc_hostname(card->host), err);
387 		mmc_retune_release(card->host);
388 		goto out;
389 	}
390 
391 	/*
392 	 * For urgent bkops status (LEVEL_2 and more)
393 	 * bkops executed synchronously, otherwise
394 	 * the operation is in progress
395 	 */
396 	if (!use_busy_signal)
397 		mmc_card_set_doing_bkops(card);
398 	else
399 		mmc_retune_release(card->host);
400 out:
401 	mmc_release_host(card->host);
402 }
403 EXPORT_SYMBOL(mmc_start_bkops);
404 
405 /*
406  * mmc_wait_data_done() - done callback for data request
407  * @mrq: done data request
408  *
409  * Wakes up mmc context, passed as a callback to host controller driver
410  */
411 static void mmc_wait_data_done(struct mmc_request *mrq)
412 {
413 	struct mmc_context_info *context_info = &mrq->host->context_info;
414 
415 	context_info->is_done_rcv = true;
416 	wake_up_interruptible(&context_info->wait);
417 }
418 
419 static void mmc_wait_done(struct mmc_request *mrq)
420 {
421 	complete(&mrq->completion);
422 }
423 
424 static inline void mmc_wait_ongoing_tfr_cmd(struct mmc_host *host)
425 {
426 	struct mmc_request *ongoing_mrq = READ_ONCE(host->ongoing_mrq);
427 
428 	/*
429 	 * If there is an ongoing transfer, wait for the command line to become
430 	 * available.
431 	 */
432 	if (ongoing_mrq && !completion_done(&ongoing_mrq->cmd_completion))
433 		wait_for_completion(&ongoing_mrq->cmd_completion);
434 }
435 
436 /*
437  *__mmc_start_data_req() - starts data request
438  * @host: MMC host to start the request
439  * @mrq: data request to start
440  *
441  * Sets the done callback to be called when request is completed by the card.
442  * Starts data mmc request execution
443  * If an ongoing transfer is already in progress, wait for the command line
444  * to become available before sending another command.
445  */
446 static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
447 {
448 	int err;
449 
450 	mmc_wait_ongoing_tfr_cmd(host);
451 
452 	mrq->done = mmc_wait_data_done;
453 	mrq->host = host;
454 
455 	init_completion(&mrq->cmd_completion);
456 
457 	err = mmc_start_request(host, mrq);
458 	if (err) {
459 		mrq->cmd->error = err;
460 		mmc_complete_cmd(mrq);
461 		mmc_wait_data_done(mrq);
462 	}
463 
464 	return err;
465 }
466 
467 static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
468 {
469 	int err;
470 
471 	mmc_wait_ongoing_tfr_cmd(host);
472 
473 	init_completion(&mrq->completion);
474 	mrq->done = mmc_wait_done;
475 
476 	init_completion(&mrq->cmd_completion);
477 
478 	err = mmc_start_request(host, mrq);
479 	if (err) {
480 		mrq->cmd->error = err;
481 		mmc_complete_cmd(mrq);
482 		complete(&mrq->completion);
483 	}
484 
485 	return err;
486 }
487 
488 /*
489  * mmc_wait_for_data_req_done() - wait for request completed
490  * @host: MMC host to prepare the command.
491  * @mrq: MMC request to wait for
492  *
493  * Blocks MMC context till host controller will ack end of data request
494  * execution or new request notification arrives from the block layer.
495  * Handles command retries.
496  *
497  * Returns enum mmc_blk_status after checking errors.
498  */
499 static enum mmc_blk_status mmc_wait_for_data_req_done(struct mmc_host *host,
500 						      struct mmc_request *mrq)
501 {
502 	struct mmc_command *cmd;
503 	struct mmc_context_info *context_info = &host->context_info;
504 	enum mmc_blk_status status;
505 
506 	while (1) {
507 		wait_event_interruptible(context_info->wait,
508 				(context_info->is_done_rcv ||
509 				 context_info->is_new_req));
510 
511 		if (context_info->is_done_rcv) {
512 			context_info->is_done_rcv = false;
513 			cmd = mrq->cmd;
514 
515 			if (!cmd->error || !cmd->retries ||
516 			    mmc_card_removed(host->card)) {
517 				status = host->areq->err_check(host->card,
518 							       host->areq);
519 				break; /* return status */
520 			} else {
521 				mmc_retune_recheck(host);
522 				pr_info("%s: req failed (CMD%u): %d, retrying...\n",
523 					mmc_hostname(host),
524 					cmd->opcode, cmd->error);
525 				cmd->retries--;
526 				cmd->error = 0;
527 				__mmc_start_request(host, mrq);
528 				continue; /* wait for done/new event again */
529 			}
530 		}
531 
532 		return MMC_BLK_NEW_REQUEST;
533 	}
534 	mmc_retune_release(host);
535 	return status;
536 }
537 
538 void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
539 {
540 	struct mmc_command *cmd;
541 
542 	while (1) {
543 		wait_for_completion(&mrq->completion);
544 
545 		cmd = mrq->cmd;
546 
547 		/*
548 		 * If host has timed out waiting for the sanitize
549 		 * to complete, card might be still in programming state
550 		 * so let's try to bring the card out of programming
551 		 * state.
552 		 */
553 		if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) {
554 			if (!mmc_interrupt_hpi(host->card)) {
555 				pr_warn("%s: %s: Interrupted sanitize\n",
556 					mmc_hostname(host), __func__);
557 				cmd->error = 0;
558 				break;
559 			} else {
560 				pr_err("%s: %s: Failed to interrupt sanitize\n",
561 				       mmc_hostname(host), __func__);
562 			}
563 		}
564 		if (!cmd->error || !cmd->retries ||
565 		    mmc_card_removed(host->card))
566 			break;
567 
568 		mmc_retune_recheck(host);
569 
570 		pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
571 			 mmc_hostname(host), cmd->opcode, cmd->error);
572 		cmd->retries--;
573 		cmd->error = 0;
574 		__mmc_start_request(host, mrq);
575 	}
576 
577 	mmc_retune_release(host);
578 }
579 EXPORT_SYMBOL(mmc_wait_for_req_done);
580 
581 /**
582  *	mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done
583  *	@host: MMC host
584  *	@mrq: MMC request
585  *
586  *	mmc_is_req_done() is used with requests that have
587  *	mrq->cap_cmd_during_tfr = true. mmc_is_req_done() must be called after
588  *	starting a request and before waiting for it to complete. That is,
589  *	either in between calls to mmc_start_req(), or after mmc_wait_for_req()
590  *	and before mmc_wait_for_req_done(). If it is called at other times the
591  *	result is not meaningful.
592  */
593 bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq)
594 {
595 	if (host->areq)
596 		return host->context_info.is_done_rcv;
597 	else
598 		return completion_done(&mrq->completion);
599 }
600 EXPORT_SYMBOL(mmc_is_req_done);
601 
602 /**
603  *	mmc_pre_req - Prepare for a new request
604  *	@host: MMC host to prepare command
605  *	@mrq: MMC request to prepare for
606  *
607  *	mmc_pre_req() is called in prior to mmc_start_req() to let
608  *	host prepare for the new request. Preparation of a request may be
609  *	performed while another request is running on the host.
610  */
611 static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq)
612 {
613 	if (host->ops->pre_req)
614 		host->ops->pre_req(host, mrq);
615 }
616 
617 /**
618  *	mmc_post_req - Post process a completed request
619  *	@host: MMC host to post process command
620  *	@mrq: MMC request to post process for
621  *	@err: Error, if non zero, clean up any resources made in pre_req
622  *
623  *	Let the host post process a completed request. Post processing of
624  *	a request may be performed while another reuqest is running.
625  */
626 static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
627 			 int err)
628 {
629 	if (host->ops->post_req)
630 		host->ops->post_req(host, mrq, err);
631 }
632 
633 /**
634  * mmc_finalize_areq() - finalize an asynchronous request
635  * @host: MMC host to finalize any ongoing request on
636  *
637  * Returns the status of the ongoing asynchronous request, but
638  * MMC_BLK_SUCCESS if no request was going on.
639  */
640 static enum mmc_blk_status mmc_finalize_areq(struct mmc_host *host)
641 {
642 	enum mmc_blk_status status;
643 
644 	if (!host->areq)
645 		return MMC_BLK_SUCCESS;
646 
647 	status = mmc_wait_for_data_req_done(host, host->areq->mrq);
648 	if (status == MMC_BLK_NEW_REQUEST)
649 		return status;
650 
651 	/*
652 	 * Check BKOPS urgency for each R1 response
653 	 */
654 	if (host->card && mmc_card_mmc(host->card) &&
655 	    ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
656 	     (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
657 	    (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) {
658 		mmc_start_bkops(host->card, true);
659 	}
660 
661 	return status;
662 }
663 
664 /**
665  *	mmc_start_areq - start an asynchronous request
666  *	@host: MMC host to start command
667  *	@areq: asynchronous request to start
668  *	@ret_stat: out parameter for status
669  *
670  *	Start a new MMC custom command request for a host.
671  *	If there is on ongoing async request wait for completion
672  *	of that request and start the new one and return.
673  *	Does not wait for the new request to complete.
674  *
675  *      Returns the completed request, NULL in case of none completed.
676  *	Wait for the an ongoing request (previoulsy started) to complete and
677  *	return the completed request. If there is no ongoing request, NULL
678  *	is returned without waiting. NULL is not an error condition.
679  */
680 struct mmc_async_req *mmc_start_areq(struct mmc_host *host,
681 				     struct mmc_async_req *areq,
682 				     enum mmc_blk_status *ret_stat)
683 {
684 	enum mmc_blk_status status;
685 	int start_err = 0;
686 	struct mmc_async_req *data = host->areq;
687 
688 	/* Prepare a new request */
689 	if (areq)
690 		mmc_pre_req(host, areq->mrq);
691 
692 	/* Finalize previous request */
693 	status = mmc_finalize_areq(host);
694 
695 	/* The previous request is still going on... */
696 	if (status == MMC_BLK_NEW_REQUEST) {
697 		if (ret_stat)
698 			*ret_stat = status;
699 		return NULL;
700 	}
701 
702 	/* Fine so far, start the new request! */
703 	if (status == MMC_BLK_SUCCESS && areq)
704 		start_err = __mmc_start_data_req(host, areq->mrq);
705 
706 	/* Postprocess the old request at this point */
707 	if (host->areq)
708 		mmc_post_req(host, host->areq->mrq, 0);
709 
710 	/* Cancel a prepared request if it was not started. */
711 	if ((status != MMC_BLK_SUCCESS || start_err) && areq)
712 		mmc_post_req(host, areq->mrq, -EINVAL);
713 
714 	if (status != MMC_BLK_SUCCESS)
715 		host->areq = NULL;
716 	else
717 		host->areq = areq;
718 
719 	if (ret_stat)
720 		*ret_stat = status;
721 	return data;
722 }
723 EXPORT_SYMBOL(mmc_start_areq);
724 
725 /**
726  *	mmc_wait_for_req - start a request and wait for completion
727  *	@host: MMC host to start command
728  *	@mrq: MMC request to start
729  *
730  *	Start a new MMC custom command request for a host, and wait
731  *	for the command to complete. In the case of 'cap_cmd_during_tfr'
732  *	requests, the transfer is ongoing and the caller can issue further
733  *	commands that do not use the data lines, and then wait by calling
734  *	mmc_wait_for_req_done().
735  *	Does not attempt to parse the response.
736  */
737 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
738 {
739 	__mmc_start_req(host, mrq);
740 
741 	if (!mrq->cap_cmd_during_tfr)
742 		mmc_wait_for_req_done(host, mrq);
743 }
744 EXPORT_SYMBOL(mmc_wait_for_req);
745 
746 /**
747  *	mmc_interrupt_hpi - Issue for High priority Interrupt
748  *	@card: the MMC card associated with the HPI transfer
749  *
750  *	Issued High Priority Interrupt, and check for card status
751  *	until out-of prg-state.
752  */
753 int mmc_interrupt_hpi(struct mmc_card *card)
754 {
755 	int err;
756 	u32 status;
757 	unsigned long prg_wait;
758 
759 	if (!card->ext_csd.hpi_en) {
760 		pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
761 		return 1;
762 	}
763 
764 	mmc_claim_host(card->host);
765 	err = mmc_send_status(card, &status);
766 	if (err) {
767 		pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
768 		goto out;
769 	}
770 
771 	switch (R1_CURRENT_STATE(status)) {
772 	case R1_STATE_IDLE:
773 	case R1_STATE_READY:
774 	case R1_STATE_STBY:
775 	case R1_STATE_TRAN:
776 		/*
777 		 * In idle and transfer states, HPI is not needed and the caller
778 		 * can issue the next intended command immediately
779 		 */
780 		goto out;
781 	case R1_STATE_PRG:
782 		break;
783 	default:
784 		/* In all other states, it's illegal to issue HPI */
785 		pr_debug("%s: HPI cannot be sent. Card state=%d\n",
786 			mmc_hostname(card->host), R1_CURRENT_STATE(status));
787 		err = -EINVAL;
788 		goto out;
789 	}
790 
791 	err = mmc_send_hpi_cmd(card, &status);
792 	if (err)
793 		goto out;
794 
795 	prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
796 	do {
797 		err = mmc_send_status(card, &status);
798 
799 		if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
800 			break;
801 		if (time_after(jiffies, prg_wait))
802 			err = -ETIMEDOUT;
803 	} while (!err);
804 
805 out:
806 	mmc_release_host(card->host);
807 	return err;
808 }
809 EXPORT_SYMBOL(mmc_interrupt_hpi);
810 
811 /**
812  *	mmc_wait_for_cmd - start a command and wait for completion
813  *	@host: MMC host to start command
814  *	@cmd: MMC command to start
815  *	@retries: maximum number of retries
816  *
817  *	Start a new MMC command for a host, and wait for the command
818  *	to complete.  Return any error that occurred while the command
819  *	was executing.  Do not attempt to parse the response.
820  */
821 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
822 {
823 	struct mmc_request mrq = {};
824 
825 	WARN_ON(!host->claimed);
826 
827 	memset(cmd->resp, 0, sizeof(cmd->resp));
828 	cmd->retries = retries;
829 
830 	mrq.cmd = cmd;
831 	cmd->data = NULL;
832 
833 	mmc_wait_for_req(host, &mrq);
834 
835 	return cmd->error;
836 }
837 
838 EXPORT_SYMBOL(mmc_wait_for_cmd);
839 
840 /**
841  *	mmc_stop_bkops - stop ongoing BKOPS
842  *	@card: MMC card to check BKOPS
843  *
844  *	Send HPI command to stop ongoing background operations to
845  *	allow rapid servicing of foreground operations, e.g. read/
846  *	writes. Wait until the card comes out of the programming state
847  *	to avoid errors in servicing read/write requests.
848  */
849 int mmc_stop_bkops(struct mmc_card *card)
850 {
851 	int err = 0;
852 
853 	err = mmc_interrupt_hpi(card);
854 
855 	/*
856 	 * If err is EINVAL, we can't issue an HPI.
857 	 * It should complete the BKOPS.
858 	 */
859 	if (!err || (err == -EINVAL)) {
860 		mmc_card_clr_doing_bkops(card);
861 		mmc_retune_release(card->host);
862 		err = 0;
863 	}
864 
865 	return err;
866 }
867 EXPORT_SYMBOL(mmc_stop_bkops);
868 
869 int mmc_read_bkops_status(struct mmc_card *card)
870 {
871 	int err;
872 	u8 *ext_csd;
873 
874 	mmc_claim_host(card->host);
875 	err = mmc_get_ext_csd(card, &ext_csd);
876 	mmc_release_host(card->host);
877 	if (err)
878 		return err;
879 
880 	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
881 	card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
882 	kfree(ext_csd);
883 	return 0;
884 }
885 EXPORT_SYMBOL(mmc_read_bkops_status);
886 
887 /**
888  *	mmc_set_data_timeout - set the timeout for a data command
889  *	@data: data phase for command
890  *	@card: the MMC card associated with the data transfer
891  *
892  *	Computes the data timeout parameters according to the
893  *	correct algorithm given the card type.
894  */
895 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
896 {
897 	unsigned int mult;
898 
899 	/*
900 	 * SDIO cards only define an upper 1 s limit on access.
901 	 */
902 	if (mmc_card_sdio(card)) {
903 		data->timeout_ns = 1000000000;
904 		data->timeout_clks = 0;
905 		return;
906 	}
907 
908 	/*
909 	 * SD cards use a 100 multiplier rather than 10
910 	 */
911 	mult = mmc_card_sd(card) ? 100 : 10;
912 
913 	/*
914 	 * Scale up the multiplier (and therefore the timeout) by
915 	 * the r2w factor for writes.
916 	 */
917 	if (data->flags & MMC_DATA_WRITE)
918 		mult <<= card->csd.r2w_factor;
919 
920 	data->timeout_ns = card->csd.tacc_ns * mult;
921 	data->timeout_clks = card->csd.tacc_clks * mult;
922 
923 	/*
924 	 * SD cards also have an upper limit on the timeout.
925 	 */
926 	if (mmc_card_sd(card)) {
927 		unsigned int timeout_us, limit_us;
928 
929 		timeout_us = data->timeout_ns / 1000;
930 		if (card->host->ios.clock)
931 			timeout_us += data->timeout_clks * 1000 /
932 				(card->host->ios.clock / 1000);
933 
934 		if (data->flags & MMC_DATA_WRITE)
935 			/*
936 			 * The MMC spec "It is strongly recommended
937 			 * for hosts to implement more than 500ms
938 			 * timeout value even if the card indicates
939 			 * the 250ms maximum busy length."  Even the
940 			 * previous value of 300ms is known to be
941 			 * insufficient for some cards.
942 			 */
943 			limit_us = 3000000;
944 		else
945 			limit_us = 100000;
946 
947 		/*
948 		 * SDHC cards always use these fixed values.
949 		 */
950 		if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
951 			data->timeout_ns = limit_us * 1000;
952 			data->timeout_clks = 0;
953 		}
954 
955 		/* assign limit value if invalid */
956 		if (timeout_us == 0)
957 			data->timeout_ns = limit_us * 1000;
958 	}
959 
960 	/*
961 	 * Some cards require longer data read timeout than indicated in CSD.
962 	 * Address this by setting the read timeout to a "reasonably high"
963 	 * value. For the cards tested, 600ms has proven enough. If necessary,
964 	 * this value can be increased if other problematic cards require this.
965 	 */
966 	if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
967 		data->timeout_ns = 600000000;
968 		data->timeout_clks = 0;
969 	}
970 
971 	/*
972 	 * Some cards need very high timeouts if driven in SPI mode.
973 	 * The worst observed timeout was 900ms after writing a
974 	 * continuous stream of data until the internal logic
975 	 * overflowed.
976 	 */
977 	if (mmc_host_is_spi(card->host)) {
978 		if (data->flags & MMC_DATA_WRITE) {
979 			if (data->timeout_ns < 1000000000)
980 				data->timeout_ns = 1000000000;	/* 1s */
981 		} else {
982 			if (data->timeout_ns < 100000000)
983 				data->timeout_ns =  100000000;	/* 100ms */
984 		}
985 	}
986 }
987 EXPORT_SYMBOL(mmc_set_data_timeout);
988 
989 /**
990  *	mmc_align_data_size - pads a transfer size to a more optimal value
991  *	@card: the MMC card associated with the data transfer
992  *	@sz: original transfer size
993  *
994  *	Pads the original data size with a number of extra bytes in
995  *	order to avoid controller bugs and/or performance hits
996  *	(e.g. some controllers revert to PIO for certain sizes).
997  *
998  *	Returns the improved size, which might be unmodified.
999  *
1000  *	Note that this function is only relevant when issuing a
1001  *	single scatter gather entry.
1002  */
1003 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
1004 {
1005 	/*
1006 	 * FIXME: We don't have a system for the controller to tell
1007 	 * the core about its problems yet, so for now we just 32-bit
1008 	 * align the size.
1009 	 */
1010 	sz = ((sz + 3) / 4) * 4;
1011 
1012 	return sz;
1013 }
1014 EXPORT_SYMBOL(mmc_align_data_size);
1015 
1016 /**
1017  *	__mmc_claim_host - exclusively claim a host
1018  *	@host: mmc host to claim
1019  *	@abort: whether or not the operation should be aborted
1020  *
1021  *	Claim a host for a set of operations.  If @abort is non null and
1022  *	dereference a non-zero value then this will return prematurely with
1023  *	that non-zero value without acquiring the lock.  Returns zero
1024  *	with the lock held otherwise.
1025  */
1026 int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
1027 {
1028 	DECLARE_WAITQUEUE(wait, current);
1029 	unsigned long flags;
1030 	int stop;
1031 	bool pm = false;
1032 
1033 	might_sleep();
1034 
1035 	add_wait_queue(&host->wq, &wait);
1036 	spin_lock_irqsave(&host->lock, flags);
1037 	while (1) {
1038 		set_current_state(TASK_UNINTERRUPTIBLE);
1039 		stop = abort ? atomic_read(abort) : 0;
1040 		if (stop || !host->claimed || host->claimer == current)
1041 			break;
1042 		spin_unlock_irqrestore(&host->lock, flags);
1043 		schedule();
1044 		spin_lock_irqsave(&host->lock, flags);
1045 	}
1046 	set_current_state(TASK_RUNNING);
1047 	if (!stop) {
1048 		host->claimed = 1;
1049 		host->claimer = current;
1050 		host->claim_cnt += 1;
1051 		if (host->claim_cnt == 1)
1052 			pm = true;
1053 	} else
1054 		wake_up(&host->wq);
1055 	spin_unlock_irqrestore(&host->lock, flags);
1056 	remove_wait_queue(&host->wq, &wait);
1057 
1058 	if (pm)
1059 		pm_runtime_get_sync(mmc_dev(host));
1060 
1061 	return stop;
1062 }
1063 EXPORT_SYMBOL(__mmc_claim_host);
1064 
1065 /**
1066  *	mmc_release_host - release a host
1067  *	@host: mmc host to release
1068  *
1069  *	Release a MMC host, allowing others to claim the host
1070  *	for their operations.
1071  */
1072 void mmc_release_host(struct mmc_host *host)
1073 {
1074 	unsigned long flags;
1075 
1076 	WARN_ON(!host->claimed);
1077 
1078 	spin_lock_irqsave(&host->lock, flags);
1079 	if (--host->claim_cnt) {
1080 		/* Release for nested claim */
1081 		spin_unlock_irqrestore(&host->lock, flags);
1082 	} else {
1083 		host->claimed = 0;
1084 		host->claimer = NULL;
1085 		spin_unlock_irqrestore(&host->lock, flags);
1086 		wake_up(&host->wq);
1087 		pm_runtime_mark_last_busy(mmc_dev(host));
1088 		pm_runtime_put_autosuspend(mmc_dev(host));
1089 	}
1090 }
1091 EXPORT_SYMBOL(mmc_release_host);
1092 
1093 /*
1094  * This is a helper function, which fetches a runtime pm reference for the
1095  * card device and also claims the host.
1096  */
1097 void mmc_get_card(struct mmc_card *card)
1098 {
1099 	pm_runtime_get_sync(&card->dev);
1100 	mmc_claim_host(card->host);
1101 }
1102 EXPORT_SYMBOL(mmc_get_card);
1103 
1104 /*
1105  * This is a helper function, which releases the host and drops the runtime
1106  * pm reference for the card device.
1107  */
1108 void mmc_put_card(struct mmc_card *card)
1109 {
1110 	mmc_release_host(card->host);
1111 	pm_runtime_mark_last_busy(&card->dev);
1112 	pm_runtime_put_autosuspend(&card->dev);
1113 }
1114 EXPORT_SYMBOL(mmc_put_card);
1115 
1116 /*
1117  * Internal function that does the actual ios call to the host driver,
1118  * optionally printing some debug output.
1119  */
1120 static inline void mmc_set_ios(struct mmc_host *host)
1121 {
1122 	struct mmc_ios *ios = &host->ios;
1123 
1124 	pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
1125 		"width %u timing %u\n",
1126 		 mmc_hostname(host), ios->clock, ios->bus_mode,
1127 		 ios->power_mode, ios->chip_select, ios->vdd,
1128 		 1 << ios->bus_width, ios->timing);
1129 
1130 	host->ops->set_ios(host, ios);
1131 }
1132 
1133 /*
1134  * Control chip select pin on a host.
1135  */
1136 void mmc_set_chip_select(struct mmc_host *host, int mode)
1137 {
1138 	host->ios.chip_select = mode;
1139 	mmc_set_ios(host);
1140 }
1141 
1142 /*
1143  * Sets the host clock to the highest possible frequency that
1144  * is below "hz".
1145  */
1146 void mmc_set_clock(struct mmc_host *host, unsigned int hz)
1147 {
1148 	WARN_ON(hz && hz < host->f_min);
1149 
1150 	if (hz > host->f_max)
1151 		hz = host->f_max;
1152 
1153 	host->ios.clock = hz;
1154 	mmc_set_ios(host);
1155 }
1156 
1157 int mmc_execute_tuning(struct mmc_card *card)
1158 {
1159 	struct mmc_host *host = card->host;
1160 	u32 opcode;
1161 	int err;
1162 
1163 	if (!host->ops->execute_tuning)
1164 		return 0;
1165 
1166 	if (mmc_card_mmc(card))
1167 		opcode = MMC_SEND_TUNING_BLOCK_HS200;
1168 	else
1169 		opcode = MMC_SEND_TUNING_BLOCK;
1170 
1171 	err = host->ops->execute_tuning(host, opcode);
1172 
1173 	if (err)
1174 		pr_err("%s: tuning execution failed: %d\n",
1175 			mmc_hostname(host), err);
1176 	else
1177 		mmc_retune_enable(host);
1178 
1179 	return err;
1180 }
1181 
1182 /*
1183  * Change the bus mode (open drain/push-pull) of a host.
1184  */
1185 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
1186 {
1187 	host->ios.bus_mode = mode;
1188 	mmc_set_ios(host);
1189 }
1190 
1191 /*
1192  * Change data bus width of a host.
1193  */
1194 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
1195 {
1196 	host->ios.bus_width = width;
1197 	mmc_set_ios(host);
1198 }
1199 
1200 /*
1201  * Set initial state after a power cycle or a hw_reset.
1202  */
1203 void mmc_set_initial_state(struct mmc_host *host)
1204 {
1205 	mmc_retune_disable(host);
1206 
1207 	if (mmc_host_is_spi(host))
1208 		host->ios.chip_select = MMC_CS_HIGH;
1209 	else
1210 		host->ios.chip_select = MMC_CS_DONTCARE;
1211 	host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1212 	host->ios.bus_width = MMC_BUS_WIDTH_1;
1213 	host->ios.timing = MMC_TIMING_LEGACY;
1214 	host->ios.drv_type = 0;
1215 	host->ios.enhanced_strobe = false;
1216 
1217 	/*
1218 	 * Make sure we are in non-enhanced strobe mode before we
1219 	 * actually enable it in ext_csd.
1220 	 */
1221 	if ((host->caps2 & MMC_CAP2_HS400_ES) &&
1222 	     host->ops->hs400_enhanced_strobe)
1223 		host->ops->hs400_enhanced_strobe(host, &host->ios);
1224 
1225 	mmc_set_ios(host);
1226 }
1227 
1228 /**
1229  * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
1230  * @vdd:	voltage (mV)
1231  * @low_bits:	prefer low bits in boundary cases
1232  *
1233  * This function returns the OCR bit number according to the provided @vdd
1234  * value. If conversion is not possible a negative errno value returned.
1235  *
1236  * Depending on the @low_bits flag the function prefers low or high OCR bits
1237  * on boundary voltages. For example,
1238  * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
1239  * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
1240  *
1241  * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
1242  */
1243 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
1244 {
1245 	const int max_bit = ilog2(MMC_VDD_35_36);
1246 	int bit;
1247 
1248 	if (vdd < 1650 || vdd > 3600)
1249 		return -EINVAL;
1250 
1251 	if (vdd >= 1650 && vdd <= 1950)
1252 		return ilog2(MMC_VDD_165_195);
1253 
1254 	if (low_bits)
1255 		vdd -= 1;
1256 
1257 	/* Base 2000 mV, step 100 mV, bit's base 8. */
1258 	bit = (vdd - 2000) / 100 + 8;
1259 	if (bit > max_bit)
1260 		return max_bit;
1261 	return bit;
1262 }
1263 
1264 /**
1265  * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
1266  * @vdd_min:	minimum voltage value (mV)
1267  * @vdd_max:	maximum voltage value (mV)
1268  *
1269  * This function returns the OCR mask bits according to the provided @vdd_min
1270  * and @vdd_max values. If conversion is not possible the function returns 0.
1271  *
1272  * Notes wrt boundary cases:
1273  * This function sets the OCR bits for all boundary voltages, for example
1274  * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
1275  * MMC_VDD_34_35 mask.
1276  */
1277 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
1278 {
1279 	u32 mask = 0;
1280 
1281 	if (vdd_max < vdd_min)
1282 		return 0;
1283 
1284 	/* Prefer high bits for the boundary vdd_max values. */
1285 	vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
1286 	if (vdd_max < 0)
1287 		return 0;
1288 
1289 	/* Prefer low bits for the boundary vdd_min values. */
1290 	vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
1291 	if (vdd_min < 0)
1292 		return 0;
1293 
1294 	/* Fill the mask, from max bit to min bit. */
1295 	while (vdd_max >= vdd_min)
1296 		mask |= 1 << vdd_max--;
1297 
1298 	return mask;
1299 }
1300 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
1301 
1302 #ifdef CONFIG_OF
1303 
1304 /**
1305  * mmc_of_parse_voltage - return mask of supported voltages
1306  * @np: The device node need to be parsed.
1307  * @mask: mask of voltages available for MMC/SD/SDIO
1308  *
1309  * Parse the "voltage-ranges" DT property, returning zero if it is not
1310  * found, negative errno if the voltage-range specification is invalid,
1311  * or one if the voltage-range is specified and successfully parsed.
1312  */
1313 int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
1314 {
1315 	const u32 *voltage_ranges;
1316 	int num_ranges, i;
1317 
1318 	voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
1319 	num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
1320 	if (!voltage_ranges) {
1321 		pr_debug("%s: voltage-ranges unspecified\n", np->full_name);
1322 		return 0;
1323 	}
1324 	if (!num_ranges) {
1325 		pr_err("%s: voltage-ranges empty\n", np->full_name);
1326 		return -EINVAL;
1327 	}
1328 
1329 	for (i = 0; i < num_ranges; i++) {
1330 		const int j = i * 2;
1331 		u32 ocr_mask;
1332 
1333 		ocr_mask = mmc_vddrange_to_ocrmask(
1334 				be32_to_cpu(voltage_ranges[j]),
1335 				be32_to_cpu(voltage_ranges[j + 1]));
1336 		if (!ocr_mask) {
1337 			pr_err("%s: voltage-range #%d is invalid\n",
1338 				np->full_name, i);
1339 			return -EINVAL;
1340 		}
1341 		*mask |= ocr_mask;
1342 	}
1343 
1344 	return 1;
1345 }
1346 EXPORT_SYMBOL(mmc_of_parse_voltage);
1347 
1348 #endif /* CONFIG_OF */
1349 
1350 static int mmc_of_get_func_num(struct device_node *node)
1351 {
1352 	u32 reg;
1353 	int ret;
1354 
1355 	ret = of_property_read_u32(node, "reg", &reg);
1356 	if (ret < 0)
1357 		return ret;
1358 
1359 	return reg;
1360 }
1361 
1362 struct device_node *mmc_of_find_child_device(struct mmc_host *host,
1363 		unsigned func_num)
1364 {
1365 	struct device_node *node;
1366 
1367 	if (!host->parent || !host->parent->of_node)
1368 		return NULL;
1369 
1370 	for_each_child_of_node(host->parent->of_node, node) {
1371 		if (mmc_of_get_func_num(node) == func_num)
1372 			return node;
1373 	}
1374 
1375 	return NULL;
1376 }
1377 
1378 #ifdef CONFIG_REGULATOR
1379 
1380 /**
1381  * mmc_ocrbitnum_to_vdd - Convert a OCR bit number to its voltage
1382  * @vdd_bit:	OCR bit number
1383  * @min_uV:	minimum voltage value (mV)
1384  * @max_uV:	maximum voltage value (mV)
1385  *
1386  * This function returns the voltage range according to the provided OCR
1387  * bit number. If conversion is not possible a negative errno value returned.
1388  */
1389 static int mmc_ocrbitnum_to_vdd(int vdd_bit, int *min_uV, int *max_uV)
1390 {
1391 	int		tmp;
1392 
1393 	if (!vdd_bit)
1394 		return -EINVAL;
1395 
1396 	/*
1397 	 * REVISIT mmc_vddrange_to_ocrmask() may have set some
1398 	 * bits this regulator doesn't quite support ... don't
1399 	 * be too picky, most cards and regulators are OK with
1400 	 * a 0.1V range goof (it's a small error percentage).
1401 	 */
1402 	tmp = vdd_bit - ilog2(MMC_VDD_165_195);
1403 	if (tmp == 0) {
1404 		*min_uV = 1650 * 1000;
1405 		*max_uV = 1950 * 1000;
1406 	} else {
1407 		*min_uV = 1900 * 1000 + tmp * 100 * 1000;
1408 		*max_uV = *min_uV + 100 * 1000;
1409 	}
1410 
1411 	return 0;
1412 }
1413 
1414 /**
1415  * mmc_regulator_get_ocrmask - return mask of supported voltages
1416  * @supply: regulator to use
1417  *
1418  * This returns either a negative errno, or a mask of voltages that
1419  * can be provided to MMC/SD/SDIO devices using the specified voltage
1420  * regulator.  This would normally be called before registering the
1421  * MMC host adapter.
1422  */
1423 int mmc_regulator_get_ocrmask(struct regulator *supply)
1424 {
1425 	int			result = 0;
1426 	int			count;
1427 	int			i;
1428 	int			vdd_uV;
1429 	int			vdd_mV;
1430 
1431 	count = regulator_count_voltages(supply);
1432 	if (count < 0)
1433 		return count;
1434 
1435 	for (i = 0; i < count; i++) {
1436 		vdd_uV = regulator_list_voltage(supply, i);
1437 		if (vdd_uV <= 0)
1438 			continue;
1439 
1440 		vdd_mV = vdd_uV / 1000;
1441 		result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1442 	}
1443 
1444 	if (!result) {
1445 		vdd_uV = regulator_get_voltage(supply);
1446 		if (vdd_uV <= 0)
1447 			return vdd_uV;
1448 
1449 		vdd_mV = vdd_uV / 1000;
1450 		result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1451 	}
1452 
1453 	return result;
1454 }
1455 EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
1456 
1457 /**
1458  * mmc_regulator_set_ocr - set regulator to match host->ios voltage
1459  * @mmc: the host to regulate
1460  * @supply: regulator to use
1461  * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
1462  *
1463  * Returns zero on success, else negative errno.
1464  *
1465  * MMC host drivers may use this to enable or disable a regulator using
1466  * a particular supply voltage.  This would normally be called from the
1467  * set_ios() method.
1468  */
1469 int mmc_regulator_set_ocr(struct mmc_host *mmc,
1470 			struct regulator *supply,
1471 			unsigned short vdd_bit)
1472 {
1473 	int			result = 0;
1474 	int			min_uV, max_uV;
1475 
1476 	if (vdd_bit) {
1477 		mmc_ocrbitnum_to_vdd(vdd_bit, &min_uV, &max_uV);
1478 
1479 		result = regulator_set_voltage(supply, min_uV, max_uV);
1480 		if (result == 0 && !mmc->regulator_enabled) {
1481 			result = regulator_enable(supply);
1482 			if (!result)
1483 				mmc->regulator_enabled = true;
1484 		}
1485 	} else if (mmc->regulator_enabled) {
1486 		result = regulator_disable(supply);
1487 		if (result == 0)
1488 			mmc->regulator_enabled = false;
1489 	}
1490 
1491 	if (result)
1492 		dev_err(mmc_dev(mmc),
1493 			"could not set regulator OCR (%d)\n", result);
1494 	return result;
1495 }
1496 EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
1497 
1498 static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator,
1499 						  int min_uV, int target_uV,
1500 						  int max_uV)
1501 {
1502 	/*
1503 	 * Check if supported first to avoid errors since we may try several
1504 	 * signal levels during power up and don't want to show errors.
1505 	 */
1506 	if (!regulator_is_supported_voltage(regulator, min_uV, max_uV))
1507 		return -EINVAL;
1508 
1509 	return regulator_set_voltage_triplet(regulator, min_uV, target_uV,
1510 					     max_uV);
1511 }
1512 
1513 /**
1514  * mmc_regulator_set_vqmmc - Set VQMMC as per the ios
1515  *
1516  * For 3.3V signaling, we try to match VQMMC to VMMC as closely as possible.
1517  * That will match the behavior of old boards where VQMMC and VMMC were supplied
1518  * by the same supply.  The Bus Operating conditions for 3.3V signaling in the
1519  * SD card spec also define VQMMC in terms of VMMC.
1520  * If this is not possible we'll try the full 2.7-3.6V of the spec.
1521  *
1522  * For 1.2V and 1.8V signaling we'll try to get as close as possible to the
1523  * requested voltage.  This is definitely a good idea for UHS where there's a
1524  * separate regulator on the card that's trying to make 1.8V and it's best if
1525  * we match.
1526  *
1527  * This function is expected to be used by a controller's
1528  * start_signal_voltage_switch() function.
1529  */
1530 int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios)
1531 {
1532 	struct device *dev = mmc_dev(mmc);
1533 	int ret, volt, min_uV, max_uV;
1534 
1535 	/* If no vqmmc supply then we can't change the voltage */
1536 	if (IS_ERR(mmc->supply.vqmmc))
1537 		return -EINVAL;
1538 
1539 	switch (ios->signal_voltage) {
1540 	case MMC_SIGNAL_VOLTAGE_120:
1541 		return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1542 						1100000, 1200000, 1300000);
1543 	case MMC_SIGNAL_VOLTAGE_180:
1544 		return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1545 						1700000, 1800000, 1950000);
1546 	case MMC_SIGNAL_VOLTAGE_330:
1547 		ret = mmc_ocrbitnum_to_vdd(mmc->ios.vdd, &volt, &max_uV);
1548 		if (ret < 0)
1549 			return ret;
1550 
1551 		dev_dbg(dev, "%s: found vmmc voltage range of %d-%duV\n",
1552 			__func__, volt, max_uV);
1553 
1554 		min_uV = max(volt - 300000, 2700000);
1555 		max_uV = min(max_uV + 200000, 3600000);
1556 
1557 		/*
1558 		 * Due to a limitation in the current implementation of
1559 		 * regulator_set_voltage_triplet() which is taking the lowest
1560 		 * voltage possible if below the target, search for a suitable
1561 		 * voltage in two steps and try to stay close to vmmc
1562 		 * with a 0.3V tolerance at first.
1563 		 */
1564 		if (!mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1565 						min_uV, volt, max_uV))
1566 			return 0;
1567 
1568 		return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1569 						2700000, volt, 3600000);
1570 	default:
1571 		return -EINVAL;
1572 	}
1573 }
1574 EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc);
1575 
1576 #endif /* CONFIG_REGULATOR */
1577 
1578 int mmc_regulator_get_supply(struct mmc_host *mmc)
1579 {
1580 	struct device *dev = mmc_dev(mmc);
1581 	int ret;
1582 
1583 	mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
1584 	mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
1585 
1586 	if (IS_ERR(mmc->supply.vmmc)) {
1587 		if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
1588 			return -EPROBE_DEFER;
1589 		dev_dbg(dev, "No vmmc regulator found\n");
1590 	} else {
1591 		ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
1592 		if (ret > 0)
1593 			mmc->ocr_avail = ret;
1594 		else
1595 			dev_warn(dev, "Failed getting OCR mask: %d\n", ret);
1596 	}
1597 
1598 	if (IS_ERR(mmc->supply.vqmmc)) {
1599 		if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
1600 			return -EPROBE_DEFER;
1601 		dev_dbg(dev, "No vqmmc regulator found\n");
1602 	}
1603 
1604 	return 0;
1605 }
1606 EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
1607 
1608 /*
1609  * Mask off any voltages we don't support and select
1610  * the lowest voltage
1611  */
1612 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1613 {
1614 	int bit;
1615 
1616 	/*
1617 	 * Sanity check the voltages that the card claims to
1618 	 * support.
1619 	 */
1620 	if (ocr & 0x7F) {
1621 		dev_warn(mmc_dev(host),
1622 		"card claims to support voltages below defined range\n");
1623 		ocr &= ~0x7F;
1624 	}
1625 
1626 	ocr &= host->ocr_avail;
1627 	if (!ocr) {
1628 		dev_warn(mmc_dev(host), "no support for card's volts\n");
1629 		return 0;
1630 	}
1631 
1632 	if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
1633 		bit = ffs(ocr) - 1;
1634 		ocr &= 3 << bit;
1635 		mmc_power_cycle(host, ocr);
1636 	} else {
1637 		bit = fls(ocr) - 1;
1638 		ocr &= 3 << bit;
1639 		if (bit != host->ios.vdd)
1640 			dev_warn(mmc_dev(host), "exceeding card's volts\n");
1641 	}
1642 
1643 	return ocr;
1644 }
1645 
1646 int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
1647 {
1648 	int err = 0;
1649 	int old_signal_voltage = host->ios.signal_voltage;
1650 
1651 	host->ios.signal_voltage = signal_voltage;
1652 	if (host->ops->start_signal_voltage_switch)
1653 		err = host->ops->start_signal_voltage_switch(host, &host->ios);
1654 
1655 	if (err)
1656 		host->ios.signal_voltage = old_signal_voltage;
1657 
1658 	return err;
1659 
1660 }
1661 
1662 int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
1663 {
1664 	struct mmc_command cmd = {};
1665 	int err = 0;
1666 	u32 clock;
1667 
1668 	/*
1669 	 * If we cannot switch voltages, return failure so the caller
1670 	 * can continue without UHS mode
1671 	 */
1672 	if (!host->ops->start_signal_voltage_switch)
1673 		return -EPERM;
1674 	if (!host->ops->card_busy)
1675 		pr_warn("%s: cannot verify signal voltage switch\n",
1676 			mmc_hostname(host));
1677 
1678 	cmd.opcode = SD_SWITCH_VOLTAGE;
1679 	cmd.arg = 0;
1680 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1681 
1682 	err = mmc_wait_for_cmd(host, &cmd, 0);
1683 	if (err)
1684 		return err;
1685 
1686 	if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
1687 		return -EIO;
1688 
1689 	/*
1690 	 * The card should drive cmd and dat[0:3] low immediately
1691 	 * after the response of cmd11, but wait 1 ms to be sure
1692 	 */
1693 	mmc_delay(1);
1694 	if (host->ops->card_busy && !host->ops->card_busy(host)) {
1695 		err = -EAGAIN;
1696 		goto power_cycle;
1697 	}
1698 	/*
1699 	 * During a signal voltage level switch, the clock must be gated
1700 	 * for 5 ms according to the SD spec
1701 	 */
1702 	clock = host->ios.clock;
1703 	host->ios.clock = 0;
1704 	mmc_set_ios(host);
1705 
1706 	if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180)) {
1707 		/*
1708 		 * Voltages may not have been switched, but we've already
1709 		 * sent CMD11, so a power cycle is required anyway
1710 		 */
1711 		err = -EAGAIN;
1712 		goto power_cycle;
1713 	}
1714 
1715 	/* Keep clock gated for at least 10 ms, though spec only says 5 ms */
1716 	mmc_delay(10);
1717 	host->ios.clock = clock;
1718 	mmc_set_ios(host);
1719 
1720 	/* Wait for at least 1 ms according to spec */
1721 	mmc_delay(1);
1722 
1723 	/*
1724 	 * Failure to switch is indicated by the card holding
1725 	 * dat[0:3] low
1726 	 */
1727 	if (host->ops->card_busy && host->ops->card_busy(host))
1728 		err = -EAGAIN;
1729 
1730 power_cycle:
1731 	if (err) {
1732 		pr_debug("%s: Signal voltage switch failed, "
1733 			"power cycling card\n", mmc_hostname(host));
1734 		mmc_power_cycle(host, ocr);
1735 	}
1736 
1737 	return err;
1738 }
1739 
1740 /*
1741  * Select timing parameters for host.
1742  */
1743 void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1744 {
1745 	host->ios.timing = timing;
1746 	mmc_set_ios(host);
1747 }
1748 
1749 /*
1750  * Select appropriate driver type for host.
1751  */
1752 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1753 {
1754 	host->ios.drv_type = drv_type;
1755 	mmc_set_ios(host);
1756 }
1757 
1758 int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
1759 			      int card_drv_type, int *drv_type)
1760 {
1761 	struct mmc_host *host = card->host;
1762 	int host_drv_type = SD_DRIVER_TYPE_B;
1763 
1764 	*drv_type = 0;
1765 
1766 	if (!host->ops->select_drive_strength)
1767 		return 0;
1768 
1769 	/* Use SD definition of driver strength for hosts */
1770 	if (host->caps & MMC_CAP_DRIVER_TYPE_A)
1771 		host_drv_type |= SD_DRIVER_TYPE_A;
1772 
1773 	if (host->caps & MMC_CAP_DRIVER_TYPE_C)
1774 		host_drv_type |= SD_DRIVER_TYPE_C;
1775 
1776 	if (host->caps & MMC_CAP_DRIVER_TYPE_D)
1777 		host_drv_type |= SD_DRIVER_TYPE_D;
1778 
1779 	/*
1780 	 * The drive strength that the hardware can support
1781 	 * depends on the board design.  Pass the appropriate
1782 	 * information and let the hardware specific code
1783 	 * return what is possible given the options
1784 	 */
1785 	return host->ops->select_drive_strength(card, max_dtr,
1786 						host_drv_type,
1787 						card_drv_type,
1788 						drv_type);
1789 }
1790 
1791 /*
1792  * Apply power to the MMC stack.  This is a two-stage process.
1793  * First, we enable power to the card without the clock running.
1794  * We then wait a bit for the power to stabilise.  Finally,
1795  * enable the bus drivers and clock to the card.
1796  *
1797  * We must _NOT_ enable the clock prior to power stablising.
1798  *
1799  * If a host does all the power sequencing itself, ignore the
1800  * initial MMC_POWER_UP stage.
1801  */
1802 void mmc_power_up(struct mmc_host *host, u32 ocr)
1803 {
1804 	if (host->ios.power_mode == MMC_POWER_ON)
1805 		return;
1806 
1807 	mmc_pwrseq_pre_power_on(host);
1808 
1809 	host->ios.vdd = fls(ocr) - 1;
1810 	host->ios.power_mode = MMC_POWER_UP;
1811 	/* Set initial state and call mmc_set_ios */
1812 	mmc_set_initial_state(host);
1813 
1814 	/* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
1815 	if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330))
1816 		dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
1817 	else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
1818 		dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
1819 	else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120))
1820 		dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
1821 
1822 	/*
1823 	 * This delay should be sufficient to allow the power supply
1824 	 * to reach the minimum voltage.
1825 	 */
1826 	mmc_delay(10);
1827 
1828 	mmc_pwrseq_post_power_on(host);
1829 
1830 	host->ios.clock = host->f_init;
1831 
1832 	host->ios.power_mode = MMC_POWER_ON;
1833 	mmc_set_ios(host);
1834 
1835 	/*
1836 	 * This delay must be at least 74 clock sizes, or 1 ms, or the
1837 	 * time required to reach a stable voltage.
1838 	 */
1839 	mmc_delay(10);
1840 }
1841 
1842 void mmc_power_off(struct mmc_host *host)
1843 {
1844 	if (host->ios.power_mode == MMC_POWER_OFF)
1845 		return;
1846 
1847 	mmc_pwrseq_power_off(host);
1848 
1849 	host->ios.clock = 0;
1850 	host->ios.vdd = 0;
1851 
1852 	host->ios.power_mode = MMC_POWER_OFF;
1853 	/* Set initial state and call mmc_set_ios */
1854 	mmc_set_initial_state(host);
1855 
1856 	/*
1857 	 * Some configurations, such as the 802.11 SDIO card in the OLPC
1858 	 * XO-1.5, require a short delay after poweroff before the card
1859 	 * can be successfully turned on again.
1860 	 */
1861 	mmc_delay(1);
1862 }
1863 
1864 void mmc_power_cycle(struct mmc_host *host, u32 ocr)
1865 {
1866 	mmc_power_off(host);
1867 	/* Wait at least 1 ms according to SD spec */
1868 	mmc_delay(1);
1869 	mmc_power_up(host, ocr);
1870 }
1871 
1872 /*
1873  * Cleanup when the last reference to the bus operator is dropped.
1874  */
1875 static void __mmc_release_bus(struct mmc_host *host)
1876 {
1877 	WARN_ON(!host->bus_dead);
1878 
1879 	host->bus_ops = NULL;
1880 }
1881 
1882 /*
1883  * Increase reference count of bus operator
1884  */
1885 static inline void mmc_bus_get(struct mmc_host *host)
1886 {
1887 	unsigned long flags;
1888 
1889 	spin_lock_irqsave(&host->lock, flags);
1890 	host->bus_refs++;
1891 	spin_unlock_irqrestore(&host->lock, flags);
1892 }
1893 
1894 /*
1895  * Decrease reference count of bus operator and free it if
1896  * it is the last reference.
1897  */
1898 static inline void mmc_bus_put(struct mmc_host *host)
1899 {
1900 	unsigned long flags;
1901 
1902 	spin_lock_irqsave(&host->lock, flags);
1903 	host->bus_refs--;
1904 	if ((host->bus_refs == 0) && host->bus_ops)
1905 		__mmc_release_bus(host);
1906 	spin_unlock_irqrestore(&host->lock, flags);
1907 }
1908 
1909 /*
1910  * Assign a mmc bus handler to a host. Only one bus handler may control a
1911  * host at any given time.
1912  */
1913 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1914 {
1915 	unsigned long flags;
1916 
1917 	WARN_ON(!host->claimed);
1918 
1919 	spin_lock_irqsave(&host->lock, flags);
1920 
1921 	WARN_ON(host->bus_ops);
1922 	WARN_ON(host->bus_refs);
1923 
1924 	host->bus_ops = ops;
1925 	host->bus_refs = 1;
1926 	host->bus_dead = 0;
1927 
1928 	spin_unlock_irqrestore(&host->lock, flags);
1929 }
1930 
1931 /*
1932  * Remove the current bus handler from a host.
1933  */
1934 void mmc_detach_bus(struct mmc_host *host)
1935 {
1936 	unsigned long flags;
1937 
1938 	WARN_ON(!host->claimed);
1939 	WARN_ON(!host->bus_ops);
1940 
1941 	spin_lock_irqsave(&host->lock, flags);
1942 
1943 	host->bus_dead = 1;
1944 
1945 	spin_unlock_irqrestore(&host->lock, flags);
1946 
1947 	mmc_bus_put(host);
1948 }
1949 
1950 static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
1951 				bool cd_irq)
1952 {
1953 #ifdef CONFIG_MMC_DEBUG
1954 	unsigned long flags;
1955 	spin_lock_irqsave(&host->lock, flags);
1956 	WARN_ON(host->removed);
1957 	spin_unlock_irqrestore(&host->lock, flags);
1958 #endif
1959 
1960 	/*
1961 	 * If the device is configured as wakeup, we prevent a new sleep for
1962 	 * 5 s to give provision for user space to consume the event.
1963 	 */
1964 	if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) &&
1965 		device_can_wakeup(mmc_dev(host)))
1966 		pm_wakeup_event(mmc_dev(host), 5000);
1967 
1968 	host->detect_change = 1;
1969 	mmc_schedule_delayed_work(&host->detect, delay);
1970 }
1971 
1972 /**
1973  *	mmc_detect_change - process change of state on a MMC socket
1974  *	@host: host which changed state.
1975  *	@delay: optional delay to wait before detection (jiffies)
1976  *
1977  *	MMC drivers should call this when they detect a card has been
1978  *	inserted or removed. The MMC layer will confirm that any
1979  *	present card is still functional, and initialize any newly
1980  *	inserted.
1981  */
1982 void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1983 {
1984 	_mmc_detect_change(host, delay, true);
1985 }
1986 EXPORT_SYMBOL(mmc_detect_change);
1987 
1988 void mmc_init_erase(struct mmc_card *card)
1989 {
1990 	unsigned int sz;
1991 
1992 	if (is_power_of_2(card->erase_size))
1993 		card->erase_shift = ffs(card->erase_size) - 1;
1994 	else
1995 		card->erase_shift = 0;
1996 
1997 	/*
1998 	 * It is possible to erase an arbitrarily large area of an SD or MMC
1999 	 * card.  That is not desirable because it can take a long time
2000 	 * (minutes) potentially delaying more important I/O, and also the
2001 	 * timeout calculations become increasingly hugely over-estimated.
2002 	 * Consequently, 'pref_erase' is defined as a guide to limit erases
2003 	 * to that size and alignment.
2004 	 *
2005 	 * For SD cards that define Allocation Unit size, limit erases to one
2006 	 * Allocation Unit at a time.
2007 	 * For MMC, have a stab at ai good value and for modern cards it will
2008 	 * end up being 4MiB. Note that if the value is too small, it can end
2009 	 * up taking longer to erase. Also note, erase_size is already set to
2010 	 * High Capacity Erase Size if available when this function is called.
2011 	 */
2012 	if (mmc_card_sd(card) && card->ssr.au) {
2013 		card->pref_erase = card->ssr.au;
2014 		card->erase_shift = ffs(card->ssr.au) - 1;
2015 	} else if (card->erase_size) {
2016 		sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
2017 		if (sz < 128)
2018 			card->pref_erase = 512 * 1024 / 512;
2019 		else if (sz < 512)
2020 			card->pref_erase = 1024 * 1024 / 512;
2021 		else if (sz < 1024)
2022 			card->pref_erase = 2 * 1024 * 1024 / 512;
2023 		else
2024 			card->pref_erase = 4 * 1024 * 1024 / 512;
2025 		if (card->pref_erase < card->erase_size)
2026 			card->pref_erase = card->erase_size;
2027 		else {
2028 			sz = card->pref_erase % card->erase_size;
2029 			if (sz)
2030 				card->pref_erase += card->erase_size - sz;
2031 		}
2032 	} else
2033 		card->pref_erase = 0;
2034 }
2035 
2036 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
2037 				          unsigned int arg, unsigned int qty)
2038 {
2039 	unsigned int erase_timeout;
2040 
2041 	if (arg == MMC_DISCARD_ARG ||
2042 	    (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
2043 		erase_timeout = card->ext_csd.trim_timeout;
2044 	} else if (card->ext_csd.erase_group_def & 1) {
2045 		/* High Capacity Erase Group Size uses HC timeouts */
2046 		if (arg == MMC_TRIM_ARG)
2047 			erase_timeout = card->ext_csd.trim_timeout;
2048 		else
2049 			erase_timeout = card->ext_csd.hc_erase_timeout;
2050 	} else {
2051 		/* CSD Erase Group Size uses write timeout */
2052 		unsigned int mult = (10 << card->csd.r2w_factor);
2053 		unsigned int timeout_clks = card->csd.tacc_clks * mult;
2054 		unsigned int timeout_us;
2055 
2056 		/* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
2057 		if (card->csd.tacc_ns < 1000000)
2058 			timeout_us = (card->csd.tacc_ns * mult) / 1000;
2059 		else
2060 			timeout_us = (card->csd.tacc_ns / 1000) * mult;
2061 
2062 		/*
2063 		 * ios.clock is only a target.  The real clock rate might be
2064 		 * less but not that much less, so fudge it by multiplying by 2.
2065 		 */
2066 		timeout_clks <<= 1;
2067 		timeout_us += (timeout_clks * 1000) /
2068 			      (card->host->ios.clock / 1000);
2069 
2070 		erase_timeout = timeout_us / 1000;
2071 
2072 		/*
2073 		 * Theoretically, the calculation could underflow so round up
2074 		 * to 1ms in that case.
2075 		 */
2076 		if (!erase_timeout)
2077 			erase_timeout = 1;
2078 	}
2079 
2080 	/* Multiplier for secure operations */
2081 	if (arg & MMC_SECURE_ARGS) {
2082 		if (arg == MMC_SECURE_ERASE_ARG)
2083 			erase_timeout *= card->ext_csd.sec_erase_mult;
2084 		else
2085 			erase_timeout *= card->ext_csd.sec_trim_mult;
2086 	}
2087 
2088 	erase_timeout *= qty;
2089 
2090 	/*
2091 	 * Ensure at least a 1 second timeout for SPI as per
2092 	 * 'mmc_set_data_timeout()'
2093 	 */
2094 	if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
2095 		erase_timeout = 1000;
2096 
2097 	return erase_timeout;
2098 }
2099 
2100 static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
2101 					 unsigned int arg,
2102 					 unsigned int qty)
2103 {
2104 	unsigned int erase_timeout;
2105 
2106 	if (card->ssr.erase_timeout) {
2107 		/* Erase timeout specified in SD Status Register (SSR) */
2108 		erase_timeout = card->ssr.erase_timeout * qty +
2109 				card->ssr.erase_offset;
2110 	} else {
2111 		/*
2112 		 * Erase timeout not specified in SD Status Register (SSR) so
2113 		 * use 250ms per write block.
2114 		 */
2115 		erase_timeout = 250 * qty;
2116 	}
2117 
2118 	/* Must not be less than 1 second */
2119 	if (erase_timeout < 1000)
2120 		erase_timeout = 1000;
2121 
2122 	return erase_timeout;
2123 }
2124 
2125 static unsigned int mmc_erase_timeout(struct mmc_card *card,
2126 				      unsigned int arg,
2127 				      unsigned int qty)
2128 {
2129 	if (mmc_card_sd(card))
2130 		return mmc_sd_erase_timeout(card, arg, qty);
2131 	else
2132 		return mmc_mmc_erase_timeout(card, arg, qty);
2133 }
2134 
2135 static int mmc_do_erase(struct mmc_card *card, unsigned int from,
2136 			unsigned int to, unsigned int arg)
2137 {
2138 	struct mmc_command cmd = {};
2139 	unsigned int qty = 0, busy_timeout = 0;
2140 	bool use_r1b_resp = false;
2141 	unsigned long timeout;
2142 	int err;
2143 
2144 	mmc_retune_hold(card->host);
2145 
2146 	/*
2147 	 * qty is used to calculate the erase timeout which depends on how many
2148 	 * erase groups (or allocation units in SD terminology) are affected.
2149 	 * We count erasing part of an erase group as one erase group.
2150 	 * For SD, the allocation units are always a power of 2.  For MMC, the
2151 	 * erase group size is almost certainly also power of 2, but it does not
2152 	 * seem to insist on that in the JEDEC standard, so we fall back to
2153 	 * division in that case.  SD may not specify an allocation unit size,
2154 	 * in which case the timeout is based on the number of write blocks.
2155 	 *
2156 	 * Note that the timeout for secure trim 2 will only be correct if the
2157 	 * number of erase groups specified is the same as the total of all
2158 	 * preceding secure trim 1 commands.  Since the power may have been
2159 	 * lost since the secure trim 1 commands occurred, it is generally
2160 	 * impossible to calculate the secure trim 2 timeout correctly.
2161 	 */
2162 	if (card->erase_shift)
2163 		qty += ((to >> card->erase_shift) -
2164 			(from >> card->erase_shift)) + 1;
2165 	else if (mmc_card_sd(card))
2166 		qty += to - from + 1;
2167 	else
2168 		qty += ((to / card->erase_size) -
2169 			(from / card->erase_size)) + 1;
2170 
2171 	if (!mmc_card_blockaddr(card)) {
2172 		from <<= 9;
2173 		to <<= 9;
2174 	}
2175 
2176 	if (mmc_card_sd(card))
2177 		cmd.opcode = SD_ERASE_WR_BLK_START;
2178 	else
2179 		cmd.opcode = MMC_ERASE_GROUP_START;
2180 	cmd.arg = from;
2181 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2182 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
2183 	if (err) {
2184 		pr_err("mmc_erase: group start error %d, "
2185 		       "status %#x\n", err, cmd.resp[0]);
2186 		err = -EIO;
2187 		goto out;
2188 	}
2189 
2190 	memset(&cmd, 0, sizeof(struct mmc_command));
2191 	if (mmc_card_sd(card))
2192 		cmd.opcode = SD_ERASE_WR_BLK_END;
2193 	else
2194 		cmd.opcode = MMC_ERASE_GROUP_END;
2195 	cmd.arg = to;
2196 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2197 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
2198 	if (err) {
2199 		pr_err("mmc_erase: group end error %d, status %#x\n",
2200 		       err, cmd.resp[0]);
2201 		err = -EIO;
2202 		goto out;
2203 	}
2204 
2205 	memset(&cmd, 0, sizeof(struct mmc_command));
2206 	cmd.opcode = MMC_ERASE;
2207 	cmd.arg = arg;
2208 	busy_timeout = mmc_erase_timeout(card, arg, qty);
2209 	/*
2210 	 * If the host controller supports busy signalling and the timeout for
2211 	 * the erase operation does not exceed the max_busy_timeout, we should
2212 	 * use R1B response. Or we need to prevent the host from doing hw busy
2213 	 * detection, which is done by converting to a R1 response instead.
2214 	 */
2215 	if (card->host->max_busy_timeout &&
2216 	    busy_timeout > card->host->max_busy_timeout) {
2217 		cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2218 	} else {
2219 		cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
2220 		cmd.busy_timeout = busy_timeout;
2221 		use_r1b_resp = true;
2222 	}
2223 
2224 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
2225 	if (err) {
2226 		pr_err("mmc_erase: erase error %d, status %#x\n",
2227 		       err, cmd.resp[0]);
2228 		err = -EIO;
2229 		goto out;
2230 	}
2231 
2232 	if (mmc_host_is_spi(card->host))
2233 		goto out;
2234 
2235 	/*
2236 	 * In case of when R1B + MMC_CAP_WAIT_WHILE_BUSY is used, the polling
2237 	 * shall be avoided.
2238 	 */
2239 	if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
2240 		goto out;
2241 
2242 	timeout = jiffies + msecs_to_jiffies(busy_timeout);
2243 	do {
2244 		memset(&cmd, 0, sizeof(struct mmc_command));
2245 		cmd.opcode = MMC_SEND_STATUS;
2246 		cmd.arg = card->rca << 16;
2247 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
2248 		/* Do not retry else we can't see errors */
2249 		err = mmc_wait_for_cmd(card->host, &cmd, 0);
2250 		if (err || (cmd.resp[0] & 0xFDF92000)) {
2251 			pr_err("error %d requesting status %#x\n",
2252 				err, cmd.resp[0]);
2253 			err = -EIO;
2254 			goto out;
2255 		}
2256 
2257 		/* Timeout if the device never becomes ready for data and
2258 		 * never leaves the program state.
2259 		 */
2260 		if (time_after(jiffies, timeout)) {
2261 			pr_err("%s: Card stuck in programming state! %s\n",
2262 				mmc_hostname(card->host), __func__);
2263 			err =  -EIO;
2264 			goto out;
2265 		}
2266 
2267 	} while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
2268 		 (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
2269 out:
2270 	mmc_retune_release(card->host);
2271 	return err;
2272 }
2273 
2274 static unsigned int mmc_align_erase_size(struct mmc_card *card,
2275 					 unsigned int *from,
2276 					 unsigned int *to,
2277 					 unsigned int nr)
2278 {
2279 	unsigned int from_new = *from, nr_new = nr, rem;
2280 
2281 	/*
2282 	 * When the 'card->erase_size' is power of 2, we can use round_up/down()
2283 	 * to align the erase size efficiently.
2284 	 */
2285 	if (is_power_of_2(card->erase_size)) {
2286 		unsigned int temp = from_new;
2287 
2288 		from_new = round_up(temp, card->erase_size);
2289 		rem = from_new - temp;
2290 
2291 		if (nr_new > rem)
2292 			nr_new -= rem;
2293 		else
2294 			return 0;
2295 
2296 		nr_new = round_down(nr_new, card->erase_size);
2297 	} else {
2298 		rem = from_new % card->erase_size;
2299 		if (rem) {
2300 			rem = card->erase_size - rem;
2301 			from_new += rem;
2302 			if (nr_new > rem)
2303 				nr_new -= rem;
2304 			else
2305 				return 0;
2306 		}
2307 
2308 		rem = nr_new % card->erase_size;
2309 		if (rem)
2310 			nr_new -= rem;
2311 	}
2312 
2313 	if (nr_new == 0)
2314 		return 0;
2315 
2316 	*to = from_new + nr_new;
2317 	*from = from_new;
2318 
2319 	return nr_new;
2320 }
2321 
2322 /**
2323  * mmc_erase - erase sectors.
2324  * @card: card to erase
2325  * @from: first sector to erase
2326  * @nr: number of sectors to erase
2327  * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
2328  *
2329  * Caller must claim host before calling this function.
2330  */
2331 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
2332 	      unsigned int arg)
2333 {
2334 	unsigned int rem, to = from + nr;
2335 	int err;
2336 
2337 	if (!(card->host->caps & MMC_CAP_ERASE) ||
2338 	    !(card->csd.cmdclass & CCC_ERASE))
2339 		return -EOPNOTSUPP;
2340 
2341 	if (!card->erase_size)
2342 		return -EOPNOTSUPP;
2343 
2344 	if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
2345 		return -EOPNOTSUPP;
2346 
2347 	if ((arg & MMC_SECURE_ARGS) &&
2348 	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
2349 		return -EOPNOTSUPP;
2350 
2351 	if ((arg & MMC_TRIM_ARGS) &&
2352 	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
2353 		return -EOPNOTSUPP;
2354 
2355 	if (arg == MMC_SECURE_ERASE_ARG) {
2356 		if (from % card->erase_size || nr % card->erase_size)
2357 			return -EINVAL;
2358 	}
2359 
2360 	if (arg == MMC_ERASE_ARG)
2361 		nr = mmc_align_erase_size(card, &from, &to, nr);
2362 
2363 	if (nr == 0)
2364 		return 0;
2365 
2366 	if (to <= from)
2367 		return -EINVAL;
2368 
2369 	/* 'from' and 'to' are inclusive */
2370 	to -= 1;
2371 
2372 	/*
2373 	 * Special case where only one erase-group fits in the timeout budget:
2374 	 * If the region crosses an erase-group boundary on this particular
2375 	 * case, we will be trimming more than one erase-group which, does not
2376 	 * fit in the timeout budget of the controller, so we need to split it
2377 	 * and call mmc_do_erase() twice if necessary. This special case is
2378 	 * identified by the card->eg_boundary flag.
2379 	 */
2380 	rem = card->erase_size - (from % card->erase_size);
2381 	if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) {
2382 		err = mmc_do_erase(card, from, from + rem - 1, arg);
2383 		from += rem;
2384 		if ((err) || (to <= from))
2385 			return err;
2386 	}
2387 
2388 	return mmc_do_erase(card, from, to, arg);
2389 }
2390 EXPORT_SYMBOL(mmc_erase);
2391 
2392 int mmc_can_erase(struct mmc_card *card)
2393 {
2394 	if ((card->host->caps & MMC_CAP_ERASE) &&
2395 	    (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
2396 		return 1;
2397 	return 0;
2398 }
2399 EXPORT_SYMBOL(mmc_can_erase);
2400 
2401 int mmc_can_trim(struct mmc_card *card)
2402 {
2403 	if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) &&
2404 	    (!(card->quirks & MMC_QUIRK_TRIM_BROKEN)))
2405 		return 1;
2406 	return 0;
2407 }
2408 EXPORT_SYMBOL(mmc_can_trim);
2409 
2410 int mmc_can_discard(struct mmc_card *card)
2411 {
2412 	/*
2413 	 * As there's no way to detect the discard support bit at v4.5
2414 	 * use the s/w feature support filed.
2415 	 */
2416 	if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
2417 		return 1;
2418 	return 0;
2419 }
2420 EXPORT_SYMBOL(mmc_can_discard);
2421 
2422 int mmc_can_sanitize(struct mmc_card *card)
2423 {
2424 	if (!mmc_can_trim(card) && !mmc_can_erase(card))
2425 		return 0;
2426 	if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
2427 		return 1;
2428 	return 0;
2429 }
2430 EXPORT_SYMBOL(mmc_can_sanitize);
2431 
2432 int mmc_can_secure_erase_trim(struct mmc_card *card)
2433 {
2434 	if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
2435 	    !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
2436 		return 1;
2437 	return 0;
2438 }
2439 EXPORT_SYMBOL(mmc_can_secure_erase_trim);
2440 
2441 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
2442 			    unsigned int nr)
2443 {
2444 	if (!card->erase_size)
2445 		return 0;
2446 	if (from % card->erase_size || nr % card->erase_size)
2447 		return 0;
2448 	return 1;
2449 }
2450 EXPORT_SYMBOL(mmc_erase_group_aligned);
2451 
2452 static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
2453 					    unsigned int arg)
2454 {
2455 	struct mmc_host *host = card->host;
2456 	unsigned int max_discard, x, y, qty = 0, max_qty, min_qty, timeout;
2457 	unsigned int last_timeout = 0;
2458 	unsigned int max_busy_timeout = host->max_busy_timeout ?
2459 			host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS;
2460 
2461 	if (card->erase_shift) {
2462 		max_qty = UINT_MAX >> card->erase_shift;
2463 		min_qty = card->pref_erase >> card->erase_shift;
2464 	} else if (mmc_card_sd(card)) {
2465 		max_qty = UINT_MAX;
2466 		min_qty = card->pref_erase;
2467 	} else {
2468 		max_qty = UINT_MAX / card->erase_size;
2469 		min_qty = card->pref_erase / card->erase_size;
2470 	}
2471 
2472 	/*
2473 	 * We should not only use 'host->max_busy_timeout' as the limitation
2474 	 * when deciding the max discard sectors. We should set a balance value
2475 	 * to improve the erase speed, and it can not get too long timeout at
2476 	 * the same time.
2477 	 *
2478 	 * Here we set 'card->pref_erase' as the minimal discard sectors no
2479 	 * matter what size of 'host->max_busy_timeout', but if the
2480 	 * 'host->max_busy_timeout' is large enough for more discard sectors,
2481 	 * then we can continue to increase the max discard sectors until we
2482 	 * get a balance value. In cases when the 'host->max_busy_timeout'
2483 	 * isn't specified, use the default max erase timeout.
2484 	 */
2485 	do {
2486 		y = 0;
2487 		for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
2488 			timeout = mmc_erase_timeout(card, arg, qty + x);
2489 
2490 			if (qty + x > min_qty && timeout > max_busy_timeout)
2491 				break;
2492 
2493 			if (timeout < last_timeout)
2494 				break;
2495 			last_timeout = timeout;
2496 			y = x;
2497 		}
2498 		qty += y;
2499 	} while (y);
2500 
2501 	if (!qty)
2502 		return 0;
2503 
2504 	/*
2505 	 * When specifying a sector range to trim, chances are we might cross
2506 	 * an erase-group boundary even if the amount of sectors is less than
2507 	 * one erase-group.
2508 	 * If we can only fit one erase-group in the controller timeout budget,
2509 	 * we have to care that erase-group boundaries are not crossed by a
2510 	 * single trim operation. We flag that special case with "eg_boundary".
2511 	 * In all other cases we can just decrement qty and pretend that we
2512 	 * always touch (qty + 1) erase-groups as a simple optimization.
2513 	 */
2514 	if (qty == 1)
2515 		card->eg_boundary = 1;
2516 	else
2517 		qty--;
2518 
2519 	/* Convert qty to sectors */
2520 	if (card->erase_shift)
2521 		max_discard = qty << card->erase_shift;
2522 	else if (mmc_card_sd(card))
2523 		max_discard = qty + 1;
2524 	else
2525 		max_discard = qty * card->erase_size;
2526 
2527 	return max_discard;
2528 }
2529 
2530 unsigned int mmc_calc_max_discard(struct mmc_card *card)
2531 {
2532 	struct mmc_host *host = card->host;
2533 	unsigned int max_discard, max_trim;
2534 
2535 	/*
2536 	 * Without erase_group_def set, MMC erase timeout depends on clock
2537 	 * frequence which can change.  In that case, the best choice is
2538 	 * just the preferred erase size.
2539 	 */
2540 	if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
2541 		return card->pref_erase;
2542 
2543 	max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
2544 	if (mmc_can_trim(card)) {
2545 		max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
2546 		if (max_trim < max_discard)
2547 			max_discard = max_trim;
2548 	} else if (max_discard < card->erase_size) {
2549 		max_discard = 0;
2550 	}
2551 	pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
2552 		mmc_hostname(host), max_discard, host->max_busy_timeout ?
2553 		host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS);
2554 	return max_discard;
2555 }
2556 EXPORT_SYMBOL(mmc_calc_max_discard);
2557 
2558 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
2559 {
2560 	struct mmc_command cmd = {};
2561 
2562 	if (mmc_card_blockaddr(card) || mmc_card_ddr52(card) ||
2563 	    mmc_card_hs400(card) || mmc_card_hs400es(card))
2564 		return 0;
2565 
2566 	cmd.opcode = MMC_SET_BLOCKLEN;
2567 	cmd.arg = blocklen;
2568 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2569 	return mmc_wait_for_cmd(card->host, &cmd, 5);
2570 }
2571 EXPORT_SYMBOL(mmc_set_blocklen);
2572 
2573 int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
2574 			bool is_rel_write)
2575 {
2576 	struct mmc_command cmd = {};
2577 
2578 	cmd.opcode = MMC_SET_BLOCK_COUNT;
2579 	cmd.arg = blockcount & 0x0000FFFF;
2580 	if (is_rel_write)
2581 		cmd.arg |= 1 << 31;
2582 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2583 	return mmc_wait_for_cmd(card->host, &cmd, 5);
2584 }
2585 EXPORT_SYMBOL(mmc_set_blockcount);
2586 
2587 static void mmc_hw_reset_for_init(struct mmc_host *host)
2588 {
2589 	if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
2590 		return;
2591 	host->ops->hw_reset(host);
2592 }
2593 
2594 int mmc_hw_reset(struct mmc_host *host)
2595 {
2596 	int ret;
2597 
2598 	if (!host->card)
2599 		return -EINVAL;
2600 
2601 	mmc_bus_get(host);
2602 	if (!host->bus_ops || host->bus_dead || !host->bus_ops->reset) {
2603 		mmc_bus_put(host);
2604 		return -EOPNOTSUPP;
2605 	}
2606 
2607 	ret = host->bus_ops->reset(host);
2608 	mmc_bus_put(host);
2609 
2610 	if (ret)
2611 		pr_warn("%s: tried to reset card, got error %d\n",
2612 			mmc_hostname(host), ret);
2613 
2614 	return ret;
2615 }
2616 EXPORT_SYMBOL(mmc_hw_reset);
2617 
2618 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
2619 {
2620 	host->f_init = freq;
2621 
2622 #ifdef CONFIG_MMC_DEBUG
2623 	pr_info("%s: %s: trying to init card at %u Hz\n",
2624 		mmc_hostname(host), __func__, host->f_init);
2625 #endif
2626 	mmc_power_up(host, host->ocr_avail);
2627 
2628 	/*
2629 	 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
2630 	 * do a hardware reset if possible.
2631 	 */
2632 	mmc_hw_reset_for_init(host);
2633 
2634 	/*
2635 	 * sdio_reset sends CMD52 to reset card.  Since we do not know
2636 	 * if the card is being re-initialized, just send it.  CMD52
2637 	 * should be ignored by SD/eMMC cards.
2638 	 * Skip it if we already know that we do not support SDIO commands
2639 	 */
2640 	if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2641 		sdio_reset(host);
2642 
2643 	mmc_go_idle(host);
2644 
2645 	if (!(host->caps2 & MMC_CAP2_NO_SD))
2646 		mmc_send_if_cond(host, host->ocr_avail);
2647 
2648 	/* Order's important: probe SDIO, then SD, then MMC */
2649 	if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2650 		if (!mmc_attach_sdio(host))
2651 			return 0;
2652 
2653 	if (!(host->caps2 & MMC_CAP2_NO_SD))
2654 		if (!mmc_attach_sd(host))
2655 			return 0;
2656 
2657 	if (!(host->caps2 & MMC_CAP2_NO_MMC))
2658 		if (!mmc_attach_mmc(host))
2659 			return 0;
2660 
2661 	mmc_power_off(host);
2662 	return -EIO;
2663 }
2664 
2665 int _mmc_detect_card_removed(struct mmc_host *host)
2666 {
2667 	int ret;
2668 
2669 	if (!host->card || mmc_card_removed(host->card))
2670 		return 1;
2671 
2672 	ret = host->bus_ops->alive(host);
2673 
2674 	/*
2675 	 * Card detect status and alive check may be out of sync if card is
2676 	 * removed slowly, when card detect switch changes while card/slot
2677 	 * pads are still contacted in hardware (refer to "SD Card Mechanical
2678 	 * Addendum, Appendix C: Card Detection Switch"). So reschedule a
2679 	 * detect work 200ms later for this case.
2680 	 */
2681 	if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
2682 		mmc_detect_change(host, msecs_to_jiffies(200));
2683 		pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
2684 	}
2685 
2686 	if (ret) {
2687 		mmc_card_set_removed(host->card);
2688 		pr_debug("%s: card remove detected\n", mmc_hostname(host));
2689 	}
2690 
2691 	return ret;
2692 }
2693 
2694 int mmc_detect_card_removed(struct mmc_host *host)
2695 {
2696 	struct mmc_card *card = host->card;
2697 	int ret;
2698 
2699 	WARN_ON(!host->claimed);
2700 
2701 	if (!card)
2702 		return 1;
2703 
2704 	if (!mmc_card_is_removable(host))
2705 		return 0;
2706 
2707 	ret = mmc_card_removed(card);
2708 	/*
2709 	 * The card will be considered unchanged unless we have been asked to
2710 	 * detect a change or host requires polling to provide card detection.
2711 	 */
2712 	if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
2713 		return ret;
2714 
2715 	host->detect_change = 0;
2716 	if (!ret) {
2717 		ret = _mmc_detect_card_removed(host);
2718 		if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) {
2719 			/*
2720 			 * Schedule a detect work as soon as possible to let a
2721 			 * rescan handle the card removal.
2722 			 */
2723 			cancel_delayed_work(&host->detect);
2724 			_mmc_detect_change(host, 0, false);
2725 		}
2726 	}
2727 
2728 	return ret;
2729 }
2730 EXPORT_SYMBOL(mmc_detect_card_removed);
2731 
2732 void mmc_rescan(struct work_struct *work)
2733 {
2734 	struct mmc_host *host =
2735 		container_of(work, struct mmc_host, detect.work);
2736 	int i;
2737 
2738 	if (host->rescan_disable)
2739 		return;
2740 
2741 	/* If there is a non-removable card registered, only scan once */
2742 	if (!mmc_card_is_removable(host) && host->rescan_entered)
2743 		return;
2744 	host->rescan_entered = 1;
2745 
2746 	if (host->trigger_card_event && host->ops->card_event) {
2747 		mmc_claim_host(host);
2748 		host->ops->card_event(host);
2749 		mmc_release_host(host);
2750 		host->trigger_card_event = false;
2751 	}
2752 
2753 	mmc_bus_get(host);
2754 
2755 	/*
2756 	 * if there is a _removable_ card registered, check whether it is
2757 	 * still present
2758 	 */
2759 	if (host->bus_ops && !host->bus_dead && mmc_card_is_removable(host))
2760 		host->bus_ops->detect(host);
2761 
2762 	host->detect_change = 0;
2763 
2764 	/*
2765 	 * Let mmc_bus_put() free the bus/bus_ops if we've found that
2766 	 * the card is no longer present.
2767 	 */
2768 	mmc_bus_put(host);
2769 	mmc_bus_get(host);
2770 
2771 	/* if there still is a card present, stop here */
2772 	if (host->bus_ops != NULL) {
2773 		mmc_bus_put(host);
2774 		goto out;
2775 	}
2776 
2777 	/*
2778 	 * Only we can add a new handler, so it's safe to
2779 	 * release the lock here.
2780 	 */
2781 	mmc_bus_put(host);
2782 
2783 	mmc_claim_host(host);
2784 	if (mmc_card_is_removable(host) && host->ops->get_cd &&
2785 			host->ops->get_cd(host) == 0) {
2786 		mmc_power_off(host);
2787 		mmc_release_host(host);
2788 		goto out;
2789 	}
2790 
2791 	for (i = 0; i < ARRAY_SIZE(freqs); i++) {
2792 		if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
2793 			break;
2794 		if (freqs[i] <= host->f_min)
2795 			break;
2796 	}
2797 	mmc_release_host(host);
2798 
2799  out:
2800 	if (host->caps & MMC_CAP_NEEDS_POLL)
2801 		mmc_schedule_delayed_work(&host->detect, HZ);
2802 }
2803 
2804 void mmc_start_host(struct mmc_host *host)
2805 {
2806 	host->f_init = max(freqs[0], host->f_min);
2807 	host->rescan_disable = 0;
2808 	host->ios.power_mode = MMC_POWER_UNDEFINED;
2809 
2810 	if (!(host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)) {
2811 		mmc_claim_host(host);
2812 		mmc_power_up(host, host->ocr_avail);
2813 		mmc_release_host(host);
2814 	}
2815 
2816 	mmc_gpiod_request_cd_irq(host);
2817 	_mmc_detect_change(host, 0, false);
2818 }
2819 
2820 void mmc_stop_host(struct mmc_host *host)
2821 {
2822 #ifdef CONFIG_MMC_DEBUG
2823 	unsigned long flags;
2824 	spin_lock_irqsave(&host->lock, flags);
2825 	host->removed = 1;
2826 	spin_unlock_irqrestore(&host->lock, flags);
2827 #endif
2828 	if (host->slot.cd_irq >= 0)
2829 		disable_irq(host->slot.cd_irq);
2830 
2831 	host->rescan_disable = 1;
2832 	cancel_delayed_work_sync(&host->detect);
2833 
2834 	/* clear pm flags now and let card drivers set them as needed */
2835 	host->pm_flags = 0;
2836 
2837 	mmc_bus_get(host);
2838 	if (host->bus_ops && !host->bus_dead) {
2839 		/* Calling bus_ops->remove() with a claimed host can deadlock */
2840 		host->bus_ops->remove(host);
2841 		mmc_claim_host(host);
2842 		mmc_detach_bus(host);
2843 		mmc_power_off(host);
2844 		mmc_release_host(host);
2845 		mmc_bus_put(host);
2846 		return;
2847 	}
2848 	mmc_bus_put(host);
2849 
2850 	mmc_claim_host(host);
2851 	mmc_power_off(host);
2852 	mmc_release_host(host);
2853 }
2854 
2855 int mmc_power_save_host(struct mmc_host *host)
2856 {
2857 	int ret = 0;
2858 
2859 #ifdef CONFIG_MMC_DEBUG
2860 	pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
2861 #endif
2862 
2863 	mmc_bus_get(host);
2864 
2865 	if (!host->bus_ops || host->bus_dead) {
2866 		mmc_bus_put(host);
2867 		return -EINVAL;
2868 	}
2869 
2870 	if (host->bus_ops->power_save)
2871 		ret = host->bus_ops->power_save(host);
2872 
2873 	mmc_bus_put(host);
2874 
2875 	mmc_power_off(host);
2876 
2877 	return ret;
2878 }
2879 EXPORT_SYMBOL(mmc_power_save_host);
2880 
2881 int mmc_power_restore_host(struct mmc_host *host)
2882 {
2883 	int ret;
2884 
2885 #ifdef CONFIG_MMC_DEBUG
2886 	pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
2887 #endif
2888 
2889 	mmc_bus_get(host);
2890 
2891 	if (!host->bus_ops || host->bus_dead) {
2892 		mmc_bus_put(host);
2893 		return -EINVAL;
2894 	}
2895 
2896 	mmc_power_up(host, host->card->ocr);
2897 	ret = host->bus_ops->power_restore(host);
2898 
2899 	mmc_bus_put(host);
2900 
2901 	return ret;
2902 }
2903 EXPORT_SYMBOL(mmc_power_restore_host);
2904 
2905 /*
2906  * Flush the cache to the non-volatile storage.
2907  */
2908 int mmc_flush_cache(struct mmc_card *card)
2909 {
2910 	int err = 0;
2911 
2912 	if (mmc_card_mmc(card) &&
2913 			(card->ext_csd.cache_size > 0) &&
2914 			(card->ext_csd.cache_ctrl & 1)) {
2915 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2916 				EXT_CSD_FLUSH_CACHE, 1, 0);
2917 		if (err)
2918 			pr_err("%s: cache flush error %d\n",
2919 					mmc_hostname(card->host), err);
2920 	}
2921 
2922 	return err;
2923 }
2924 EXPORT_SYMBOL(mmc_flush_cache);
2925 
2926 #ifdef CONFIG_PM_SLEEP
2927 /* Do the card removal on suspend if card is assumed removeable
2928  * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2929    to sync the card.
2930 */
2931 static int mmc_pm_notify(struct notifier_block *notify_block,
2932 			unsigned long mode, void *unused)
2933 {
2934 	struct mmc_host *host = container_of(
2935 		notify_block, struct mmc_host, pm_notify);
2936 	unsigned long flags;
2937 	int err = 0;
2938 
2939 	switch (mode) {
2940 	case PM_HIBERNATION_PREPARE:
2941 	case PM_SUSPEND_PREPARE:
2942 	case PM_RESTORE_PREPARE:
2943 		spin_lock_irqsave(&host->lock, flags);
2944 		host->rescan_disable = 1;
2945 		spin_unlock_irqrestore(&host->lock, flags);
2946 		cancel_delayed_work_sync(&host->detect);
2947 
2948 		if (!host->bus_ops)
2949 			break;
2950 
2951 		/* Validate prerequisites for suspend */
2952 		if (host->bus_ops->pre_suspend)
2953 			err = host->bus_ops->pre_suspend(host);
2954 		if (!err)
2955 			break;
2956 
2957 		/* Calling bus_ops->remove() with a claimed host can deadlock */
2958 		host->bus_ops->remove(host);
2959 		mmc_claim_host(host);
2960 		mmc_detach_bus(host);
2961 		mmc_power_off(host);
2962 		mmc_release_host(host);
2963 		host->pm_flags = 0;
2964 		break;
2965 
2966 	case PM_POST_SUSPEND:
2967 	case PM_POST_HIBERNATION:
2968 	case PM_POST_RESTORE:
2969 
2970 		spin_lock_irqsave(&host->lock, flags);
2971 		host->rescan_disable = 0;
2972 		spin_unlock_irqrestore(&host->lock, flags);
2973 		_mmc_detect_change(host, 0, false);
2974 
2975 	}
2976 
2977 	return 0;
2978 }
2979 
2980 void mmc_register_pm_notifier(struct mmc_host *host)
2981 {
2982 	host->pm_notify.notifier_call = mmc_pm_notify;
2983 	register_pm_notifier(&host->pm_notify);
2984 }
2985 
2986 void mmc_unregister_pm_notifier(struct mmc_host *host)
2987 {
2988 	unregister_pm_notifier(&host->pm_notify);
2989 }
2990 #endif
2991 
2992 /**
2993  * mmc_init_context_info() - init synchronization context
2994  * @host: mmc host
2995  *
2996  * Init struct context_info needed to implement asynchronous
2997  * request mechanism, used by mmc core, host driver and mmc requests
2998  * supplier.
2999  */
3000 void mmc_init_context_info(struct mmc_host *host)
3001 {
3002 	host->context_info.is_new_req = false;
3003 	host->context_info.is_done_rcv = false;
3004 	host->context_info.is_waiting_last_req = false;
3005 	init_waitqueue_head(&host->context_info.wait);
3006 }
3007 
3008 static int __init mmc_init(void)
3009 {
3010 	int ret;
3011 
3012 	ret = mmc_register_bus();
3013 	if (ret)
3014 		return ret;
3015 
3016 	ret = mmc_register_host_class();
3017 	if (ret)
3018 		goto unregister_bus;
3019 
3020 	ret = sdio_register_bus();
3021 	if (ret)
3022 		goto unregister_host_class;
3023 
3024 	return 0;
3025 
3026 unregister_host_class:
3027 	mmc_unregister_host_class();
3028 unregister_bus:
3029 	mmc_unregister_bus();
3030 	return ret;
3031 }
3032 
3033 static void __exit mmc_exit(void)
3034 {
3035 	sdio_unregister_bus();
3036 	mmc_unregister_host_class();
3037 	mmc_unregister_bus();
3038 }
3039 
3040 subsys_initcall(mmc_init);
3041 module_exit(mmc_exit);
3042 
3043 MODULE_LICENSE("GPL");
3044