xref: /openbmc/linux/drivers/mmc/core/core.c (revision a977d045)
1 /*
2  *  linux/drivers/mmc/core/core.c
3  *
4  *  Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5  *  SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6  *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7  *  MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/completion.h>
17 #include <linux/device.h>
18 #include <linux/delay.h>
19 #include <linux/pagemap.h>
20 #include <linux/err.h>
21 #include <linux/leds.h>
22 #include <linux/scatterlist.h>
23 #include <linux/log2.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm_wakeup.h>
27 #include <linux/suspend.h>
28 #include <linux/fault-inject.h>
29 #include <linux/random.h>
30 #include <linux/slab.h>
31 #include <linux/of.h>
32 
33 #include <linux/mmc/card.h>
34 #include <linux/mmc/host.h>
35 #include <linux/mmc/mmc.h>
36 #include <linux/mmc/sd.h>
37 #include <linux/mmc/slot-gpio.h>
38 
39 #define CREATE_TRACE_POINTS
40 #include <trace/events/mmc.h>
41 
42 #include "core.h"
43 #include "card.h"
44 #include "bus.h"
45 #include "host.h"
46 #include "sdio_bus.h"
47 #include "pwrseq.h"
48 
49 #include "mmc_ops.h"
50 #include "sd_ops.h"
51 #include "sdio_ops.h"
52 
53 /* If the device is not responding */
54 #define MMC_CORE_TIMEOUT_MS	(10 * 60 * 1000) /* 10 minute timeout */
55 
56 /* The max erase timeout, used when host->max_busy_timeout isn't specified */
57 #define MMC_ERASE_TIMEOUT_MS	(60 * 1000) /* 60 s */
58 
59 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
60 
61 /*
62  * Enabling software CRCs on the data blocks can be a significant (30%)
63  * performance cost, and for other reasons may not always be desired.
64  * So we allow it it to be disabled.
65  */
66 bool use_spi_crc = 1;
67 module_param(use_spi_crc, bool, 0);
68 
69 static int mmc_schedule_delayed_work(struct delayed_work *work,
70 				     unsigned long delay)
71 {
72 	/*
73 	 * We use the system_freezable_wq, because of two reasons.
74 	 * First, it allows several works (not the same work item) to be
75 	 * executed simultaneously. Second, the queue becomes frozen when
76 	 * userspace becomes frozen during system PM.
77 	 */
78 	return queue_delayed_work(system_freezable_wq, work, delay);
79 }
80 
81 #ifdef CONFIG_FAIL_MMC_REQUEST
82 
83 /*
84  * Internal function. Inject random data errors.
85  * If mmc_data is NULL no errors are injected.
86  */
87 static void mmc_should_fail_request(struct mmc_host *host,
88 				    struct mmc_request *mrq)
89 {
90 	struct mmc_command *cmd = mrq->cmd;
91 	struct mmc_data *data = mrq->data;
92 	static const int data_errors[] = {
93 		-ETIMEDOUT,
94 		-EILSEQ,
95 		-EIO,
96 	};
97 
98 	if (!data)
99 		return;
100 
101 	if (cmd->error || data->error ||
102 	    !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
103 		return;
104 
105 	data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
106 	data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
107 }
108 
109 #else /* CONFIG_FAIL_MMC_REQUEST */
110 
111 static inline void mmc_should_fail_request(struct mmc_host *host,
112 					   struct mmc_request *mrq)
113 {
114 }
115 
116 #endif /* CONFIG_FAIL_MMC_REQUEST */
117 
118 static inline void mmc_complete_cmd(struct mmc_request *mrq)
119 {
120 	if (mrq->cap_cmd_during_tfr && !completion_done(&mrq->cmd_completion))
121 		complete_all(&mrq->cmd_completion);
122 }
123 
124 void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq)
125 {
126 	if (!mrq->cap_cmd_during_tfr)
127 		return;
128 
129 	mmc_complete_cmd(mrq);
130 
131 	pr_debug("%s: cmd done, tfr ongoing (CMD%u)\n",
132 		 mmc_hostname(host), mrq->cmd->opcode);
133 }
134 EXPORT_SYMBOL(mmc_command_done);
135 
136 /**
137  *	mmc_request_done - finish processing an MMC request
138  *	@host: MMC host which completed request
139  *	@mrq: MMC request which request
140  *
141  *	MMC drivers should call this function when they have completed
142  *	their processing of a request.
143  */
144 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
145 {
146 	struct mmc_command *cmd = mrq->cmd;
147 	int err = cmd->error;
148 
149 	/* Flag re-tuning needed on CRC errors */
150 	if ((cmd->opcode != MMC_SEND_TUNING_BLOCK &&
151 	    cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
152 	    (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
153 	    (mrq->data && mrq->data->error == -EILSEQ) ||
154 	    (mrq->stop && mrq->stop->error == -EILSEQ)))
155 		mmc_retune_needed(host);
156 
157 	if (err && cmd->retries && mmc_host_is_spi(host)) {
158 		if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
159 			cmd->retries = 0;
160 	}
161 
162 	if (host->ongoing_mrq == mrq)
163 		host->ongoing_mrq = NULL;
164 
165 	mmc_complete_cmd(mrq);
166 
167 	trace_mmc_request_done(host, mrq);
168 
169 	/*
170 	 * We list various conditions for the command to be considered
171 	 * properly done:
172 	 *
173 	 * - There was no error, OK fine then
174 	 * - We are not doing some kind of retry
175 	 * - The card was removed (...so just complete everything no matter
176 	 *   if there are errors or retries)
177 	 */
178 	if (!err || !cmd->retries || mmc_card_removed(host->card)) {
179 		mmc_should_fail_request(host, mrq);
180 
181 		if (!host->ongoing_mrq)
182 			led_trigger_event(host->led, LED_OFF);
183 
184 		if (mrq->sbc) {
185 			pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
186 				mmc_hostname(host), mrq->sbc->opcode,
187 				mrq->sbc->error,
188 				mrq->sbc->resp[0], mrq->sbc->resp[1],
189 				mrq->sbc->resp[2], mrq->sbc->resp[3]);
190 		}
191 
192 		pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
193 			mmc_hostname(host), cmd->opcode, err,
194 			cmd->resp[0], cmd->resp[1],
195 			cmd->resp[2], cmd->resp[3]);
196 
197 		if (mrq->data) {
198 			pr_debug("%s:     %d bytes transferred: %d\n",
199 				mmc_hostname(host),
200 				mrq->data->bytes_xfered, mrq->data->error);
201 		}
202 
203 		if (mrq->stop) {
204 			pr_debug("%s:     (CMD%u): %d: %08x %08x %08x %08x\n",
205 				mmc_hostname(host), mrq->stop->opcode,
206 				mrq->stop->error,
207 				mrq->stop->resp[0], mrq->stop->resp[1],
208 				mrq->stop->resp[2], mrq->stop->resp[3]);
209 		}
210 	}
211 	/*
212 	 * Request starter must handle retries - see
213 	 * mmc_wait_for_req_done().
214 	 */
215 	if (mrq->done)
216 		mrq->done(mrq);
217 }
218 
219 EXPORT_SYMBOL(mmc_request_done);
220 
221 static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
222 {
223 	int err;
224 
225 	/* Assumes host controller has been runtime resumed by mmc_claim_host */
226 	err = mmc_retune(host);
227 	if (err) {
228 		mrq->cmd->error = err;
229 		mmc_request_done(host, mrq);
230 		return;
231 	}
232 
233 	/*
234 	 * For sdio rw commands we must wait for card busy otherwise some
235 	 * sdio devices won't work properly.
236 	 * And bypass I/O abort, reset and bus suspend operations.
237 	 */
238 	if (sdio_is_io_busy(mrq->cmd->opcode, mrq->cmd->arg) &&
239 	    host->ops->card_busy) {
240 		int tries = 500; /* Wait aprox 500ms at maximum */
241 
242 		while (host->ops->card_busy(host) && --tries)
243 			mmc_delay(1);
244 
245 		if (tries == 0) {
246 			mrq->cmd->error = -EBUSY;
247 			mmc_request_done(host, mrq);
248 			return;
249 		}
250 	}
251 
252 	if (mrq->cap_cmd_during_tfr) {
253 		host->ongoing_mrq = mrq;
254 		/*
255 		 * Retry path could come through here without having waiting on
256 		 * cmd_completion, so ensure it is reinitialised.
257 		 */
258 		reinit_completion(&mrq->cmd_completion);
259 	}
260 
261 	trace_mmc_request_start(host, mrq);
262 
263 	host->ops->request(host, mrq);
264 }
265 
266 static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq)
267 {
268 	if (mrq->sbc) {
269 		pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
270 			 mmc_hostname(host), mrq->sbc->opcode,
271 			 mrq->sbc->arg, mrq->sbc->flags);
272 	}
273 
274 	if (mrq->cmd) {
275 		pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
276 			 mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->arg,
277 			 mrq->cmd->flags);
278 	}
279 
280 	if (mrq->data) {
281 		pr_debug("%s:     blksz %d blocks %d flags %08x "
282 			"tsac %d ms nsac %d\n",
283 			mmc_hostname(host), mrq->data->blksz,
284 			mrq->data->blocks, mrq->data->flags,
285 			mrq->data->timeout_ns / 1000000,
286 			mrq->data->timeout_clks);
287 	}
288 
289 	if (mrq->stop) {
290 		pr_debug("%s:     CMD%u arg %08x flags %08x\n",
291 			 mmc_hostname(host), mrq->stop->opcode,
292 			 mrq->stop->arg, mrq->stop->flags);
293 	}
294 }
295 
296 static int mmc_mrq_prep(struct mmc_host *host, struct mmc_request *mrq)
297 {
298 #ifdef CONFIG_MMC_DEBUG
299 	unsigned int i, sz;
300 	struct scatterlist *sg;
301 #endif
302 
303 	if (mrq->cmd) {
304 		mrq->cmd->error = 0;
305 		mrq->cmd->mrq = mrq;
306 		mrq->cmd->data = mrq->data;
307 	}
308 	if (mrq->sbc) {
309 		mrq->sbc->error = 0;
310 		mrq->sbc->mrq = mrq;
311 	}
312 	if (mrq->data) {
313 		if (mrq->data->blksz > host->max_blk_size ||
314 		    mrq->data->blocks > host->max_blk_count ||
315 		    mrq->data->blocks * mrq->data->blksz > host->max_req_size)
316 			return -EINVAL;
317 #ifdef CONFIG_MMC_DEBUG
318 		sz = 0;
319 		for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
320 			sz += sg->length;
321 		if (sz != mrq->data->blocks * mrq->data->blksz)
322 			return -EINVAL;
323 #endif
324 		mrq->data->error = 0;
325 		mrq->data->mrq = mrq;
326 		if (mrq->stop) {
327 			mrq->data->stop = mrq->stop;
328 			mrq->stop->error = 0;
329 			mrq->stop->mrq = mrq;
330 		}
331 	}
332 
333 	return 0;
334 }
335 
336 static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
337 {
338 	int err;
339 
340 	mmc_retune_hold(host);
341 
342 	if (mmc_card_removed(host->card))
343 		return -ENOMEDIUM;
344 
345 	mmc_mrq_pr_debug(host, mrq);
346 
347 	WARN_ON(!host->claimed);
348 
349 	err = mmc_mrq_prep(host, mrq);
350 	if (err)
351 		return err;
352 
353 	led_trigger_event(host->led, LED_FULL);
354 	__mmc_start_request(host, mrq);
355 
356 	return 0;
357 }
358 
359 /*
360  * mmc_wait_data_done() - done callback for data request
361  * @mrq: done data request
362  *
363  * Wakes up mmc context, passed as a callback to host controller driver
364  */
365 static void mmc_wait_data_done(struct mmc_request *mrq)
366 {
367 	struct mmc_context_info *context_info = &mrq->host->context_info;
368 
369 	context_info->is_done_rcv = true;
370 	wake_up_interruptible(&context_info->wait);
371 }
372 
373 static void mmc_wait_done(struct mmc_request *mrq)
374 {
375 	complete(&mrq->completion);
376 }
377 
378 static inline void mmc_wait_ongoing_tfr_cmd(struct mmc_host *host)
379 {
380 	struct mmc_request *ongoing_mrq = READ_ONCE(host->ongoing_mrq);
381 
382 	/*
383 	 * If there is an ongoing transfer, wait for the command line to become
384 	 * available.
385 	 */
386 	if (ongoing_mrq && !completion_done(&ongoing_mrq->cmd_completion))
387 		wait_for_completion(&ongoing_mrq->cmd_completion);
388 }
389 
390 /*
391  *__mmc_start_data_req() - starts data request
392  * @host: MMC host to start the request
393  * @mrq: data request to start
394  *
395  * Sets the done callback to be called when request is completed by the card.
396  * Starts data mmc request execution
397  * If an ongoing transfer is already in progress, wait for the command line
398  * to become available before sending another command.
399  */
400 static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
401 {
402 	int err;
403 
404 	mmc_wait_ongoing_tfr_cmd(host);
405 
406 	mrq->done = mmc_wait_data_done;
407 	mrq->host = host;
408 
409 	init_completion(&mrq->cmd_completion);
410 
411 	err = mmc_start_request(host, mrq);
412 	if (err) {
413 		mrq->cmd->error = err;
414 		mmc_complete_cmd(mrq);
415 		mmc_wait_data_done(mrq);
416 	}
417 
418 	return err;
419 }
420 
421 static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
422 {
423 	int err;
424 
425 	mmc_wait_ongoing_tfr_cmd(host);
426 
427 	init_completion(&mrq->completion);
428 	mrq->done = mmc_wait_done;
429 
430 	init_completion(&mrq->cmd_completion);
431 
432 	err = mmc_start_request(host, mrq);
433 	if (err) {
434 		mrq->cmd->error = err;
435 		mmc_complete_cmd(mrq);
436 		complete(&mrq->completion);
437 	}
438 
439 	return err;
440 }
441 
442 void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
443 {
444 	struct mmc_command *cmd;
445 
446 	while (1) {
447 		wait_for_completion(&mrq->completion);
448 
449 		cmd = mrq->cmd;
450 
451 		/*
452 		 * If host has timed out waiting for the sanitize
453 		 * to complete, card might be still in programming state
454 		 * so let's try to bring the card out of programming
455 		 * state.
456 		 */
457 		if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) {
458 			if (!mmc_interrupt_hpi(host->card)) {
459 				pr_warn("%s: %s: Interrupted sanitize\n",
460 					mmc_hostname(host), __func__);
461 				cmd->error = 0;
462 				break;
463 			} else {
464 				pr_err("%s: %s: Failed to interrupt sanitize\n",
465 				       mmc_hostname(host), __func__);
466 			}
467 		}
468 		if (!cmd->error || !cmd->retries ||
469 		    mmc_card_removed(host->card))
470 			break;
471 
472 		mmc_retune_recheck(host);
473 
474 		pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
475 			 mmc_hostname(host), cmd->opcode, cmd->error);
476 		cmd->retries--;
477 		cmd->error = 0;
478 		__mmc_start_request(host, mrq);
479 	}
480 
481 	mmc_retune_release(host);
482 }
483 EXPORT_SYMBOL(mmc_wait_for_req_done);
484 
485 /**
486  *	mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done
487  *	@host: MMC host
488  *	@mrq: MMC request
489  *
490  *	mmc_is_req_done() is used with requests that have
491  *	mrq->cap_cmd_during_tfr = true. mmc_is_req_done() must be called after
492  *	starting a request and before waiting for it to complete. That is,
493  *	either in between calls to mmc_start_req(), or after mmc_wait_for_req()
494  *	and before mmc_wait_for_req_done(). If it is called at other times the
495  *	result is not meaningful.
496  */
497 bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq)
498 {
499 	if (host->areq)
500 		return host->context_info.is_done_rcv;
501 	else
502 		return completion_done(&mrq->completion);
503 }
504 EXPORT_SYMBOL(mmc_is_req_done);
505 
506 /**
507  *	mmc_pre_req - Prepare for a new request
508  *	@host: MMC host to prepare command
509  *	@mrq: MMC request to prepare for
510  *
511  *	mmc_pre_req() is called in prior to mmc_start_req() to let
512  *	host prepare for the new request. Preparation of a request may be
513  *	performed while another request is running on the host.
514  */
515 static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq)
516 {
517 	if (host->ops->pre_req)
518 		host->ops->pre_req(host, mrq);
519 }
520 
521 /**
522  *	mmc_post_req - Post process a completed request
523  *	@host: MMC host to post process command
524  *	@mrq: MMC request to post process for
525  *	@err: Error, if non zero, clean up any resources made in pre_req
526  *
527  *	Let the host post process a completed request. Post processing of
528  *	a request may be performed while another reuqest is running.
529  */
530 static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
531 			 int err)
532 {
533 	if (host->ops->post_req)
534 		host->ops->post_req(host, mrq, err);
535 }
536 
537 /**
538  * mmc_finalize_areq() - finalize an asynchronous request
539  * @host: MMC host to finalize any ongoing request on
540  *
541  * Returns the status of the ongoing asynchronous request, but
542  * MMC_BLK_SUCCESS if no request was going on.
543  */
544 static enum mmc_blk_status mmc_finalize_areq(struct mmc_host *host)
545 {
546 	struct mmc_context_info *context_info = &host->context_info;
547 	enum mmc_blk_status status;
548 
549 	if (!host->areq)
550 		return MMC_BLK_SUCCESS;
551 
552 	while (1) {
553 		wait_event_interruptible(context_info->wait,
554 				(context_info->is_done_rcv ||
555 				 context_info->is_new_req));
556 
557 		if (context_info->is_done_rcv) {
558 			struct mmc_command *cmd;
559 
560 			context_info->is_done_rcv = false;
561 			cmd = host->areq->mrq->cmd;
562 
563 			if (!cmd->error || !cmd->retries ||
564 			    mmc_card_removed(host->card)) {
565 				status = host->areq->err_check(host->card,
566 							       host->areq);
567 				break; /* return status */
568 			} else {
569 				mmc_retune_recheck(host);
570 				pr_info("%s: req failed (CMD%u): %d, retrying...\n",
571 					mmc_hostname(host),
572 					cmd->opcode, cmd->error);
573 				cmd->retries--;
574 				cmd->error = 0;
575 				__mmc_start_request(host, host->areq->mrq);
576 				continue; /* wait for done/new event again */
577 			}
578 		}
579 
580 		return MMC_BLK_NEW_REQUEST;
581 	}
582 
583 	mmc_retune_release(host);
584 
585 	/*
586 	 * Check BKOPS urgency for each R1 response
587 	 */
588 	if (host->card && mmc_card_mmc(host->card) &&
589 	    ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
590 	     (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
591 	    (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) {
592 		mmc_start_bkops(host->card, true);
593 	}
594 
595 	return status;
596 }
597 
598 /**
599  *	mmc_start_areq - start an asynchronous request
600  *	@host: MMC host to start command
601  *	@areq: asynchronous request to start
602  *	@ret_stat: out parameter for status
603  *
604  *	Start a new MMC custom command request for a host.
605  *	If there is on ongoing async request wait for completion
606  *	of that request and start the new one and return.
607  *	Does not wait for the new request to complete.
608  *
609  *      Returns the completed request, NULL in case of none completed.
610  *	Wait for the an ongoing request (previoulsy started) to complete and
611  *	return the completed request. If there is no ongoing request, NULL
612  *	is returned without waiting. NULL is not an error condition.
613  */
614 struct mmc_async_req *mmc_start_areq(struct mmc_host *host,
615 				     struct mmc_async_req *areq,
616 				     enum mmc_blk_status *ret_stat)
617 {
618 	enum mmc_blk_status status;
619 	int start_err = 0;
620 	struct mmc_async_req *previous = host->areq;
621 
622 	/* Prepare a new request */
623 	if (areq)
624 		mmc_pre_req(host, areq->mrq);
625 
626 	/* Finalize previous request */
627 	status = mmc_finalize_areq(host);
628 	if (ret_stat)
629 		*ret_stat = status;
630 
631 	/* The previous request is still going on... */
632 	if (status == MMC_BLK_NEW_REQUEST)
633 		return NULL;
634 
635 	/* Fine so far, start the new request! */
636 	if (status == MMC_BLK_SUCCESS && areq)
637 		start_err = __mmc_start_data_req(host, areq->mrq);
638 
639 	/* Postprocess the old request at this point */
640 	if (host->areq)
641 		mmc_post_req(host, host->areq->mrq, 0);
642 
643 	/* Cancel a prepared request if it was not started. */
644 	if ((status != MMC_BLK_SUCCESS || start_err) && areq)
645 		mmc_post_req(host, areq->mrq, -EINVAL);
646 
647 	if (status != MMC_BLK_SUCCESS)
648 		host->areq = NULL;
649 	else
650 		host->areq = areq;
651 
652 	return previous;
653 }
654 EXPORT_SYMBOL(mmc_start_areq);
655 
656 /**
657  *	mmc_wait_for_req - start a request and wait for completion
658  *	@host: MMC host to start command
659  *	@mrq: MMC request to start
660  *
661  *	Start a new MMC custom command request for a host, and wait
662  *	for the command to complete. In the case of 'cap_cmd_during_tfr'
663  *	requests, the transfer is ongoing and the caller can issue further
664  *	commands that do not use the data lines, and then wait by calling
665  *	mmc_wait_for_req_done().
666  *	Does not attempt to parse the response.
667  */
668 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
669 {
670 	__mmc_start_req(host, mrq);
671 
672 	if (!mrq->cap_cmd_during_tfr)
673 		mmc_wait_for_req_done(host, mrq);
674 }
675 EXPORT_SYMBOL(mmc_wait_for_req);
676 
677 /**
678  *	mmc_wait_for_cmd - start a command and wait for completion
679  *	@host: MMC host to start command
680  *	@cmd: MMC command to start
681  *	@retries: maximum number of retries
682  *
683  *	Start a new MMC command for a host, and wait for the command
684  *	to complete.  Return any error that occurred while the command
685  *	was executing.  Do not attempt to parse the response.
686  */
687 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
688 {
689 	struct mmc_request mrq = {};
690 
691 	WARN_ON(!host->claimed);
692 
693 	memset(cmd->resp, 0, sizeof(cmd->resp));
694 	cmd->retries = retries;
695 
696 	mrq.cmd = cmd;
697 	cmd->data = NULL;
698 
699 	mmc_wait_for_req(host, &mrq);
700 
701 	return cmd->error;
702 }
703 
704 EXPORT_SYMBOL(mmc_wait_for_cmd);
705 
706 /**
707  *	mmc_set_data_timeout - set the timeout for a data command
708  *	@data: data phase for command
709  *	@card: the MMC card associated with the data transfer
710  *
711  *	Computes the data timeout parameters according to the
712  *	correct algorithm given the card type.
713  */
714 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
715 {
716 	unsigned int mult;
717 
718 	/*
719 	 * SDIO cards only define an upper 1 s limit on access.
720 	 */
721 	if (mmc_card_sdio(card)) {
722 		data->timeout_ns = 1000000000;
723 		data->timeout_clks = 0;
724 		return;
725 	}
726 
727 	/*
728 	 * SD cards use a 100 multiplier rather than 10
729 	 */
730 	mult = mmc_card_sd(card) ? 100 : 10;
731 
732 	/*
733 	 * Scale up the multiplier (and therefore the timeout) by
734 	 * the r2w factor for writes.
735 	 */
736 	if (data->flags & MMC_DATA_WRITE)
737 		mult <<= card->csd.r2w_factor;
738 
739 	data->timeout_ns = card->csd.tacc_ns * mult;
740 	data->timeout_clks = card->csd.tacc_clks * mult;
741 
742 	/*
743 	 * SD cards also have an upper limit on the timeout.
744 	 */
745 	if (mmc_card_sd(card)) {
746 		unsigned int timeout_us, limit_us;
747 
748 		timeout_us = data->timeout_ns / 1000;
749 		if (card->host->ios.clock)
750 			timeout_us += data->timeout_clks * 1000 /
751 				(card->host->ios.clock / 1000);
752 
753 		if (data->flags & MMC_DATA_WRITE)
754 			/*
755 			 * The MMC spec "It is strongly recommended
756 			 * for hosts to implement more than 500ms
757 			 * timeout value even if the card indicates
758 			 * the 250ms maximum busy length."  Even the
759 			 * previous value of 300ms is known to be
760 			 * insufficient for some cards.
761 			 */
762 			limit_us = 3000000;
763 		else
764 			limit_us = 100000;
765 
766 		/*
767 		 * SDHC cards always use these fixed values.
768 		 */
769 		if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
770 			data->timeout_ns = limit_us * 1000;
771 			data->timeout_clks = 0;
772 		}
773 
774 		/* assign limit value if invalid */
775 		if (timeout_us == 0)
776 			data->timeout_ns = limit_us * 1000;
777 	}
778 
779 	/*
780 	 * Some cards require longer data read timeout than indicated in CSD.
781 	 * Address this by setting the read timeout to a "reasonably high"
782 	 * value. For the cards tested, 600ms has proven enough. If necessary,
783 	 * this value can be increased if other problematic cards require this.
784 	 */
785 	if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
786 		data->timeout_ns = 600000000;
787 		data->timeout_clks = 0;
788 	}
789 
790 	/*
791 	 * Some cards need very high timeouts if driven in SPI mode.
792 	 * The worst observed timeout was 900ms after writing a
793 	 * continuous stream of data until the internal logic
794 	 * overflowed.
795 	 */
796 	if (mmc_host_is_spi(card->host)) {
797 		if (data->flags & MMC_DATA_WRITE) {
798 			if (data->timeout_ns < 1000000000)
799 				data->timeout_ns = 1000000000;	/* 1s */
800 		} else {
801 			if (data->timeout_ns < 100000000)
802 				data->timeout_ns =  100000000;	/* 100ms */
803 		}
804 	}
805 }
806 EXPORT_SYMBOL(mmc_set_data_timeout);
807 
808 /**
809  *	mmc_align_data_size - pads a transfer size to a more optimal value
810  *	@card: the MMC card associated with the data transfer
811  *	@sz: original transfer size
812  *
813  *	Pads the original data size with a number of extra bytes in
814  *	order to avoid controller bugs and/or performance hits
815  *	(e.g. some controllers revert to PIO for certain sizes).
816  *
817  *	Returns the improved size, which might be unmodified.
818  *
819  *	Note that this function is only relevant when issuing a
820  *	single scatter gather entry.
821  */
822 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
823 {
824 	/*
825 	 * FIXME: We don't have a system for the controller to tell
826 	 * the core about its problems yet, so for now we just 32-bit
827 	 * align the size.
828 	 */
829 	sz = ((sz + 3) / 4) * 4;
830 
831 	return sz;
832 }
833 EXPORT_SYMBOL(mmc_align_data_size);
834 
835 /**
836  *	__mmc_claim_host - exclusively claim a host
837  *	@host: mmc host to claim
838  *	@abort: whether or not the operation should be aborted
839  *
840  *	Claim a host for a set of operations.  If @abort is non null and
841  *	dereference a non-zero value then this will return prematurely with
842  *	that non-zero value without acquiring the lock.  Returns zero
843  *	with the lock held otherwise.
844  */
845 int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
846 {
847 	DECLARE_WAITQUEUE(wait, current);
848 	unsigned long flags;
849 	int stop;
850 	bool pm = false;
851 
852 	might_sleep();
853 
854 	add_wait_queue(&host->wq, &wait);
855 	spin_lock_irqsave(&host->lock, flags);
856 	while (1) {
857 		set_current_state(TASK_UNINTERRUPTIBLE);
858 		stop = abort ? atomic_read(abort) : 0;
859 		if (stop || !host->claimed || host->claimer == current)
860 			break;
861 		spin_unlock_irqrestore(&host->lock, flags);
862 		schedule();
863 		spin_lock_irqsave(&host->lock, flags);
864 	}
865 	set_current_state(TASK_RUNNING);
866 	if (!stop) {
867 		host->claimed = 1;
868 		host->claimer = current;
869 		host->claim_cnt += 1;
870 		if (host->claim_cnt == 1)
871 			pm = true;
872 	} else
873 		wake_up(&host->wq);
874 	spin_unlock_irqrestore(&host->lock, flags);
875 	remove_wait_queue(&host->wq, &wait);
876 
877 	if (pm)
878 		pm_runtime_get_sync(mmc_dev(host));
879 
880 	return stop;
881 }
882 EXPORT_SYMBOL(__mmc_claim_host);
883 
884 /**
885  *	mmc_release_host - release a host
886  *	@host: mmc host to release
887  *
888  *	Release a MMC host, allowing others to claim the host
889  *	for their operations.
890  */
891 void mmc_release_host(struct mmc_host *host)
892 {
893 	unsigned long flags;
894 
895 	WARN_ON(!host->claimed);
896 
897 	spin_lock_irqsave(&host->lock, flags);
898 	if (--host->claim_cnt) {
899 		/* Release for nested claim */
900 		spin_unlock_irqrestore(&host->lock, flags);
901 	} else {
902 		host->claimed = 0;
903 		host->claimer = NULL;
904 		spin_unlock_irqrestore(&host->lock, flags);
905 		wake_up(&host->wq);
906 		pm_runtime_mark_last_busy(mmc_dev(host));
907 		pm_runtime_put_autosuspend(mmc_dev(host));
908 	}
909 }
910 EXPORT_SYMBOL(mmc_release_host);
911 
912 /*
913  * This is a helper function, which fetches a runtime pm reference for the
914  * card device and also claims the host.
915  */
916 void mmc_get_card(struct mmc_card *card)
917 {
918 	pm_runtime_get_sync(&card->dev);
919 	mmc_claim_host(card->host);
920 }
921 EXPORT_SYMBOL(mmc_get_card);
922 
923 /*
924  * This is a helper function, which releases the host and drops the runtime
925  * pm reference for the card device.
926  */
927 void mmc_put_card(struct mmc_card *card)
928 {
929 	mmc_release_host(card->host);
930 	pm_runtime_mark_last_busy(&card->dev);
931 	pm_runtime_put_autosuspend(&card->dev);
932 }
933 EXPORT_SYMBOL(mmc_put_card);
934 
935 /*
936  * Internal function that does the actual ios call to the host driver,
937  * optionally printing some debug output.
938  */
939 static inline void mmc_set_ios(struct mmc_host *host)
940 {
941 	struct mmc_ios *ios = &host->ios;
942 
943 	pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
944 		"width %u timing %u\n",
945 		 mmc_hostname(host), ios->clock, ios->bus_mode,
946 		 ios->power_mode, ios->chip_select, ios->vdd,
947 		 1 << ios->bus_width, ios->timing);
948 
949 	host->ops->set_ios(host, ios);
950 }
951 
952 /*
953  * Control chip select pin on a host.
954  */
955 void mmc_set_chip_select(struct mmc_host *host, int mode)
956 {
957 	host->ios.chip_select = mode;
958 	mmc_set_ios(host);
959 }
960 
961 /*
962  * Sets the host clock to the highest possible frequency that
963  * is below "hz".
964  */
965 void mmc_set_clock(struct mmc_host *host, unsigned int hz)
966 {
967 	WARN_ON(hz && hz < host->f_min);
968 
969 	if (hz > host->f_max)
970 		hz = host->f_max;
971 
972 	host->ios.clock = hz;
973 	mmc_set_ios(host);
974 }
975 
976 int mmc_execute_tuning(struct mmc_card *card)
977 {
978 	struct mmc_host *host = card->host;
979 	u32 opcode;
980 	int err;
981 
982 	if (!host->ops->execute_tuning)
983 		return 0;
984 
985 	if (mmc_card_mmc(card))
986 		opcode = MMC_SEND_TUNING_BLOCK_HS200;
987 	else
988 		opcode = MMC_SEND_TUNING_BLOCK;
989 
990 	err = host->ops->execute_tuning(host, opcode);
991 
992 	if (err)
993 		pr_err("%s: tuning execution failed: %d\n",
994 			mmc_hostname(host), err);
995 	else
996 		mmc_retune_enable(host);
997 
998 	return err;
999 }
1000 
1001 /*
1002  * Change the bus mode (open drain/push-pull) of a host.
1003  */
1004 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
1005 {
1006 	host->ios.bus_mode = mode;
1007 	mmc_set_ios(host);
1008 }
1009 
1010 /*
1011  * Change data bus width of a host.
1012  */
1013 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
1014 {
1015 	host->ios.bus_width = width;
1016 	mmc_set_ios(host);
1017 }
1018 
1019 /*
1020  * Set initial state after a power cycle or a hw_reset.
1021  */
1022 void mmc_set_initial_state(struct mmc_host *host)
1023 {
1024 	mmc_retune_disable(host);
1025 
1026 	if (mmc_host_is_spi(host))
1027 		host->ios.chip_select = MMC_CS_HIGH;
1028 	else
1029 		host->ios.chip_select = MMC_CS_DONTCARE;
1030 	host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1031 	host->ios.bus_width = MMC_BUS_WIDTH_1;
1032 	host->ios.timing = MMC_TIMING_LEGACY;
1033 	host->ios.drv_type = 0;
1034 	host->ios.enhanced_strobe = false;
1035 
1036 	/*
1037 	 * Make sure we are in non-enhanced strobe mode before we
1038 	 * actually enable it in ext_csd.
1039 	 */
1040 	if ((host->caps2 & MMC_CAP2_HS400_ES) &&
1041 	     host->ops->hs400_enhanced_strobe)
1042 		host->ops->hs400_enhanced_strobe(host, &host->ios);
1043 
1044 	mmc_set_ios(host);
1045 }
1046 
1047 /**
1048  * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
1049  * @vdd:	voltage (mV)
1050  * @low_bits:	prefer low bits in boundary cases
1051  *
1052  * This function returns the OCR bit number according to the provided @vdd
1053  * value. If conversion is not possible a negative errno value returned.
1054  *
1055  * Depending on the @low_bits flag the function prefers low or high OCR bits
1056  * on boundary voltages. For example,
1057  * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
1058  * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
1059  *
1060  * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
1061  */
1062 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
1063 {
1064 	const int max_bit = ilog2(MMC_VDD_35_36);
1065 	int bit;
1066 
1067 	if (vdd < 1650 || vdd > 3600)
1068 		return -EINVAL;
1069 
1070 	if (vdd >= 1650 && vdd <= 1950)
1071 		return ilog2(MMC_VDD_165_195);
1072 
1073 	if (low_bits)
1074 		vdd -= 1;
1075 
1076 	/* Base 2000 mV, step 100 mV, bit's base 8. */
1077 	bit = (vdd - 2000) / 100 + 8;
1078 	if (bit > max_bit)
1079 		return max_bit;
1080 	return bit;
1081 }
1082 
1083 /**
1084  * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
1085  * @vdd_min:	minimum voltage value (mV)
1086  * @vdd_max:	maximum voltage value (mV)
1087  *
1088  * This function returns the OCR mask bits according to the provided @vdd_min
1089  * and @vdd_max values. If conversion is not possible the function returns 0.
1090  *
1091  * Notes wrt boundary cases:
1092  * This function sets the OCR bits for all boundary voltages, for example
1093  * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
1094  * MMC_VDD_34_35 mask.
1095  */
1096 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
1097 {
1098 	u32 mask = 0;
1099 
1100 	if (vdd_max < vdd_min)
1101 		return 0;
1102 
1103 	/* Prefer high bits for the boundary vdd_max values. */
1104 	vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
1105 	if (vdd_max < 0)
1106 		return 0;
1107 
1108 	/* Prefer low bits for the boundary vdd_min values. */
1109 	vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
1110 	if (vdd_min < 0)
1111 		return 0;
1112 
1113 	/* Fill the mask, from max bit to min bit. */
1114 	while (vdd_max >= vdd_min)
1115 		mask |= 1 << vdd_max--;
1116 
1117 	return mask;
1118 }
1119 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
1120 
1121 #ifdef CONFIG_OF
1122 
1123 /**
1124  * mmc_of_parse_voltage - return mask of supported voltages
1125  * @np: The device node need to be parsed.
1126  * @mask: mask of voltages available for MMC/SD/SDIO
1127  *
1128  * Parse the "voltage-ranges" DT property, returning zero if it is not
1129  * found, negative errno if the voltage-range specification is invalid,
1130  * or one if the voltage-range is specified and successfully parsed.
1131  */
1132 int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
1133 {
1134 	const u32 *voltage_ranges;
1135 	int num_ranges, i;
1136 
1137 	voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
1138 	num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
1139 	if (!voltage_ranges) {
1140 		pr_debug("%s: voltage-ranges unspecified\n", np->full_name);
1141 		return 0;
1142 	}
1143 	if (!num_ranges) {
1144 		pr_err("%s: voltage-ranges empty\n", np->full_name);
1145 		return -EINVAL;
1146 	}
1147 
1148 	for (i = 0; i < num_ranges; i++) {
1149 		const int j = i * 2;
1150 		u32 ocr_mask;
1151 
1152 		ocr_mask = mmc_vddrange_to_ocrmask(
1153 				be32_to_cpu(voltage_ranges[j]),
1154 				be32_to_cpu(voltage_ranges[j + 1]));
1155 		if (!ocr_mask) {
1156 			pr_err("%s: voltage-range #%d is invalid\n",
1157 				np->full_name, i);
1158 			return -EINVAL;
1159 		}
1160 		*mask |= ocr_mask;
1161 	}
1162 
1163 	return 1;
1164 }
1165 EXPORT_SYMBOL(mmc_of_parse_voltage);
1166 
1167 #endif /* CONFIG_OF */
1168 
1169 static int mmc_of_get_func_num(struct device_node *node)
1170 {
1171 	u32 reg;
1172 	int ret;
1173 
1174 	ret = of_property_read_u32(node, "reg", &reg);
1175 	if (ret < 0)
1176 		return ret;
1177 
1178 	return reg;
1179 }
1180 
1181 struct device_node *mmc_of_find_child_device(struct mmc_host *host,
1182 		unsigned func_num)
1183 {
1184 	struct device_node *node;
1185 
1186 	if (!host->parent || !host->parent->of_node)
1187 		return NULL;
1188 
1189 	for_each_child_of_node(host->parent->of_node, node) {
1190 		if (mmc_of_get_func_num(node) == func_num)
1191 			return node;
1192 	}
1193 
1194 	return NULL;
1195 }
1196 
1197 #ifdef CONFIG_REGULATOR
1198 
1199 /**
1200  * mmc_ocrbitnum_to_vdd - Convert a OCR bit number to its voltage
1201  * @vdd_bit:	OCR bit number
1202  * @min_uV:	minimum voltage value (mV)
1203  * @max_uV:	maximum voltage value (mV)
1204  *
1205  * This function returns the voltage range according to the provided OCR
1206  * bit number. If conversion is not possible a negative errno value returned.
1207  */
1208 static int mmc_ocrbitnum_to_vdd(int vdd_bit, int *min_uV, int *max_uV)
1209 {
1210 	int		tmp;
1211 
1212 	if (!vdd_bit)
1213 		return -EINVAL;
1214 
1215 	/*
1216 	 * REVISIT mmc_vddrange_to_ocrmask() may have set some
1217 	 * bits this regulator doesn't quite support ... don't
1218 	 * be too picky, most cards and regulators are OK with
1219 	 * a 0.1V range goof (it's a small error percentage).
1220 	 */
1221 	tmp = vdd_bit - ilog2(MMC_VDD_165_195);
1222 	if (tmp == 0) {
1223 		*min_uV = 1650 * 1000;
1224 		*max_uV = 1950 * 1000;
1225 	} else {
1226 		*min_uV = 1900 * 1000 + tmp * 100 * 1000;
1227 		*max_uV = *min_uV + 100 * 1000;
1228 	}
1229 
1230 	return 0;
1231 }
1232 
1233 /**
1234  * mmc_regulator_get_ocrmask - return mask of supported voltages
1235  * @supply: regulator to use
1236  *
1237  * This returns either a negative errno, or a mask of voltages that
1238  * can be provided to MMC/SD/SDIO devices using the specified voltage
1239  * regulator.  This would normally be called before registering the
1240  * MMC host adapter.
1241  */
1242 int mmc_regulator_get_ocrmask(struct regulator *supply)
1243 {
1244 	int			result = 0;
1245 	int			count;
1246 	int			i;
1247 	int			vdd_uV;
1248 	int			vdd_mV;
1249 
1250 	count = regulator_count_voltages(supply);
1251 	if (count < 0)
1252 		return count;
1253 
1254 	for (i = 0; i < count; i++) {
1255 		vdd_uV = regulator_list_voltage(supply, i);
1256 		if (vdd_uV <= 0)
1257 			continue;
1258 
1259 		vdd_mV = vdd_uV / 1000;
1260 		result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1261 	}
1262 
1263 	if (!result) {
1264 		vdd_uV = regulator_get_voltage(supply);
1265 		if (vdd_uV <= 0)
1266 			return vdd_uV;
1267 
1268 		vdd_mV = vdd_uV / 1000;
1269 		result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1270 	}
1271 
1272 	return result;
1273 }
1274 EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
1275 
1276 /**
1277  * mmc_regulator_set_ocr - set regulator to match host->ios voltage
1278  * @mmc: the host to regulate
1279  * @supply: regulator to use
1280  * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
1281  *
1282  * Returns zero on success, else negative errno.
1283  *
1284  * MMC host drivers may use this to enable or disable a regulator using
1285  * a particular supply voltage.  This would normally be called from the
1286  * set_ios() method.
1287  */
1288 int mmc_regulator_set_ocr(struct mmc_host *mmc,
1289 			struct regulator *supply,
1290 			unsigned short vdd_bit)
1291 {
1292 	int			result = 0;
1293 	int			min_uV, max_uV;
1294 
1295 	if (vdd_bit) {
1296 		mmc_ocrbitnum_to_vdd(vdd_bit, &min_uV, &max_uV);
1297 
1298 		result = regulator_set_voltage(supply, min_uV, max_uV);
1299 		if (result == 0 && !mmc->regulator_enabled) {
1300 			result = regulator_enable(supply);
1301 			if (!result)
1302 				mmc->regulator_enabled = true;
1303 		}
1304 	} else if (mmc->regulator_enabled) {
1305 		result = regulator_disable(supply);
1306 		if (result == 0)
1307 			mmc->regulator_enabled = false;
1308 	}
1309 
1310 	if (result)
1311 		dev_err(mmc_dev(mmc),
1312 			"could not set regulator OCR (%d)\n", result);
1313 	return result;
1314 }
1315 EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
1316 
1317 static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator,
1318 						  int min_uV, int target_uV,
1319 						  int max_uV)
1320 {
1321 	/*
1322 	 * Check if supported first to avoid errors since we may try several
1323 	 * signal levels during power up and don't want to show errors.
1324 	 */
1325 	if (!regulator_is_supported_voltage(regulator, min_uV, max_uV))
1326 		return -EINVAL;
1327 
1328 	return regulator_set_voltage_triplet(regulator, min_uV, target_uV,
1329 					     max_uV);
1330 }
1331 
1332 /**
1333  * mmc_regulator_set_vqmmc - Set VQMMC as per the ios
1334  *
1335  * For 3.3V signaling, we try to match VQMMC to VMMC as closely as possible.
1336  * That will match the behavior of old boards where VQMMC and VMMC were supplied
1337  * by the same supply.  The Bus Operating conditions for 3.3V signaling in the
1338  * SD card spec also define VQMMC in terms of VMMC.
1339  * If this is not possible we'll try the full 2.7-3.6V of the spec.
1340  *
1341  * For 1.2V and 1.8V signaling we'll try to get as close as possible to the
1342  * requested voltage.  This is definitely a good idea for UHS where there's a
1343  * separate regulator on the card that's trying to make 1.8V and it's best if
1344  * we match.
1345  *
1346  * This function is expected to be used by a controller's
1347  * start_signal_voltage_switch() function.
1348  */
1349 int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios)
1350 {
1351 	struct device *dev = mmc_dev(mmc);
1352 	int ret, volt, min_uV, max_uV;
1353 
1354 	/* If no vqmmc supply then we can't change the voltage */
1355 	if (IS_ERR(mmc->supply.vqmmc))
1356 		return -EINVAL;
1357 
1358 	switch (ios->signal_voltage) {
1359 	case MMC_SIGNAL_VOLTAGE_120:
1360 		return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1361 						1100000, 1200000, 1300000);
1362 	case MMC_SIGNAL_VOLTAGE_180:
1363 		return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1364 						1700000, 1800000, 1950000);
1365 	case MMC_SIGNAL_VOLTAGE_330:
1366 		ret = mmc_ocrbitnum_to_vdd(mmc->ios.vdd, &volt, &max_uV);
1367 		if (ret < 0)
1368 			return ret;
1369 
1370 		dev_dbg(dev, "%s: found vmmc voltage range of %d-%duV\n",
1371 			__func__, volt, max_uV);
1372 
1373 		min_uV = max(volt - 300000, 2700000);
1374 		max_uV = min(max_uV + 200000, 3600000);
1375 
1376 		/*
1377 		 * Due to a limitation in the current implementation of
1378 		 * regulator_set_voltage_triplet() which is taking the lowest
1379 		 * voltage possible if below the target, search for a suitable
1380 		 * voltage in two steps and try to stay close to vmmc
1381 		 * with a 0.3V tolerance at first.
1382 		 */
1383 		if (!mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1384 						min_uV, volt, max_uV))
1385 			return 0;
1386 
1387 		return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1388 						2700000, volt, 3600000);
1389 	default:
1390 		return -EINVAL;
1391 	}
1392 }
1393 EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc);
1394 
1395 #endif /* CONFIG_REGULATOR */
1396 
1397 int mmc_regulator_get_supply(struct mmc_host *mmc)
1398 {
1399 	struct device *dev = mmc_dev(mmc);
1400 	int ret;
1401 
1402 	mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
1403 	mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
1404 
1405 	if (IS_ERR(mmc->supply.vmmc)) {
1406 		if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
1407 			return -EPROBE_DEFER;
1408 		dev_dbg(dev, "No vmmc regulator found\n");
1409 	} else {
1410 		ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
1411 		if (ret > 0)
1412 			mmc->ocr_avail = ret;
1413 		else
1414 			dev_warn(dev, "Failed getting OCR mask: %d\n", ret);
1415 	}
1416 
1417 	if (IS_ERR(mmc->supply.vqmmc)) {
1418 		if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
1419 			return -EPROBE_DEFER;
1420 		dev_dbg(dev, "No vqmmc regulator found\n");
1421 	}
1422 
1423 	return 0;
1424 }
1425 EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
1426 
1427 /*
1428  * Mask off any voltages we don't support and select
1429  * the lowest voltage
1430  */
1431 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1432 {
1433 	int bit;
1434 
1435 	/*
1436 	 * Sanity check the voltages that the card claims to
1437 	 * support.
1438 	 */
1439 	if (ocr & 0x7F) {
1440 		dev_warn(mmc_dev(host),
1441 		"card claims to support voltages below defined range\n");
1442 		ocr &= ~0x7F;
1443 	}
1444 
1445 	ocr &= host->ocr_avail;
1446 	if (!ocr) {
1447 		dev_warn(mmc_dev(host), "no support for card's volts\n");
1448 		return 0;
1449 	}
1450 
1451 	if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
1452 		bit = ffs(ocr) - 1;
1453 		ocr &= 3 << bit;
1454 		mmc_power_cycle(host, ocr);
1455 	} else {
1456 		bit = fls(ocr) - 1;
1457 		ocr &= 3 << bit;
1458 		if (bit != host->ios.vdd)
1459 			dev_warn(mmc_dev(host), "exceeding card's volts\n");
1460 	}
1461 
1462 	return ocr;
1463 }
1464 
1465 int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
1466 {
1467 	int err = 0;
1468 	int old_signal_voltage = host->ios.signal_voltage;
1469 
1470 	host->ios.signal_voltage = signal_voltage;
1471 	if (host->ops->start_signal_voltage_switch)
1472 		err = host->ops->start_signal_voltage_switch(host, &host->ios);
1473 
1474 	if (err)
1475 		host->ios.signal_voltage = old_signal_voltage;
1476 
1477 	return err;
1478 
1479 }
1480 
1481 int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
1482 {
1483 	struct mmc_command cmd = {};
1484 	int err = 0;
1485 	u32 clock;
1486 
1487 	/*
1488 	 * If we cannot switch voltages, return failure so the caller
1489 	 * can continue without UHS mode
1490 	 */
1491 	if (!host->ops->start_signal_voltage_switch)
1492 		return -EPERM;
1493 	if (!host->ops->card_busy)
1494 		pr_warn("%s: cannot verify signal voltage switch\n",
1495 			mmc_hostname(host));
1496 
1497 	cmd.opcode = SD_SWITCH_VOLTAGE;
1498 	cmd.arg = 0;
1499 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1500 
1501 	err = mmc_wait_for_cmd(host, &cmd, 0);
1502 	if (err)
1503 		return err;
1504 
1505 	if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
1506 		return -EIO;
1507 
1508 	/*
1509 	 * The card should drive cmd and dat[0:3] low immediately
1510 	 * after the response of cmd11, but wait 1 ms to be sure
1511 	 */
1512 	mmc_delay(1);
1513 	if (host->ops->card_busy && !host->ops->card_busy(host)) {
1514 		err = -EAGAIN;
1515 		goto power_cycle;
1516 	}
1517 	/*
1518 	 * During a signal voltage level switch, the clock must be gated
1519 	 * for 5 ms according to the SD spec
1520 	 */
1521 	clock = host->ios.clock;
1522 	host->ios.clock = 0;
1523 	mmc_set_ios(host);
1524 
1525 	if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180)) {
1526 		/*
1527 		 * Voltages may not have been switched, but we've already
1528 		 * sent CMD11, so a power cycle is required anyway
1529 		 */
1530 		err = -EAGAIN;
1531 		goto power_cycle;
1532 	}
1533 
1534 	/* Keep clock gated for at least 10 ms, though spec only says 5 ms */
1535 	mmc_delay(10);
1536 	host->ios.clock = clock;
1537 	mmc_set_ios(host);
1538 
1539 	/* Wait for at least 1 ms according to spec */
1540 	mmc_delay(1);
1541 
1542 	/*
1543 	 * Failure to switch is indicated by the card holding
1544 	 * dat[0:3] low
1545 	 */
1546 	if (host->ops->card_busy && host->ops->card_busy(host))
1547 		err = -EAGAIN;
1548 
1549 power_cycle:
1550 	if (err) {
1551 		pr_debug("%s: Signal voltage switch failed, "
1552 			"power cycling card\n", mmc_hostname(host));
1553 		mmc_power_cycle(host, ocr);
1554 	}
1555 
1556 	return err;
1557 }
1558 
1559 /*
1560  * Select timing parameters for host.
1561  */
1562 void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1563 {
1564 	host->ios.timing = timing;
1565 	mmc_set_ios(host);
1566 }
1567 
1568 /*
1569  * Select appropriate driver type for host.
1570  */
1571 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1572 {
1573 	host->ios.drv_type = drv_type;
1574 	mmc_set_ios(host);
1575 }
1576 
1577 int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
1578 			      int card_drv_type, int *drv_type)
1579 {
1580 	struct mmc_host *host = card->host;
1581 	int host_drv_type = SD_DRIVER_TYPE_B;
1582 
1583 	*drv_type = 0;
1584 
1585 	if (!host->ops->select_drive_strength)
1586 		return 0;
1587 
1588 	/* Use SD definition of driver strength for hosts */
1589 	if (host->caps & MMC_CAP_DRIVER_TYPE_A)
1590 		host_drv_type |= SD_DRIVER_TYPE_A;
1591 
1592 	if (host->caps & MMC_CAP_DRIVER_TYPE_C)
1593 		host_drv_type |= SD_DRIVER_TYPE_C;
1594 
1595 	if (host->caps & MMC_CAP_DRIVER_TYPE_D)
1596 		host_drv_type |= SD_DRIVER_TYPE_D;
1597 
1598 	/*
1599 	 * The drive strength that the hardware can support
1600 	 * depends on the board design.  Pass the appropriate
1601 	 * information and let the hardware specific code
1602 	 * return what is possible given the options
1603 	 */
1604 	return host->ops->select_drive_strength(card, max_dtr,
1605 						host_drv_type,
1606 						card_drv_type,
1607 						drv_type);
1608 }
1609 
1610 /*
1611  * Apply power to the MMC stack.  This is a two-stage process.
1612  * First, we enable power to the card without the clock running.
1613  * We then wait a bit for the power to stabilise.  Finally,
1614  * enable the bus drivers and clock to the card.
1615  *
1616  * We must _NOT_ enable the clock prior to power stablising.
1617  *
1618  * If a host does all the power sequencing itself, ignore the
1619  * initial MMC_POWER_UP stage.
1620  */
1621 void mmc_power_up(struct mmc_host *host, u32 ocr)
1622 {
1623 	if (host->ios.power_mode == MMC_POWER_ON)
1624 		return;
1625 
1626 	mmc_pwrseq_pre_power_on(host);
1627 
1628 	host->ios.vdd = fls(ocr) - 1;
1629 	host->ios.power_mode = MMC_POWER_UP;
1630 	/* Set initial state and call mmc_set_ios */
1631 	mmc_set_initial_state(host);
1632 
1633 	/* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
1634 	if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330))
1635 		dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
1636 	else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
1637 		dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
1638 	else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120))
1639 		dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
1640 
1641 	/*
1642 	 * This delay should be sufficient to allow the power supply
1643 	 * to reach the minimum voltage.
1644 	 */
1645 	mmc_delay(10);
1646 
1647 	mmc_pwrseq_post_power_on(host);
1648 
1649 	host->ios.clock = host->f_init;
1650 
1651 	host->ios.power_mode = MMC_POWER_ON;
1652 	mmc_set_ios(host);
1653 
1654 	/*
1655 	 * This delay must be at least 74 clock sizes, or 1 ms, or the
1656 	 * time required to reach a stable voltage.
1657 	 */
1658 	mmc_delay(10);
1659 }
1660 
1661 void mmc_power_off(struct mmc_host *host)
1662 {
1663 	if (host->ios.power_mode == MMC_POWER_OFF)
1664 		return;
1665 
1666 	mmc_pwrseq_power_off(host);
1667 
1668 	host->ios.clock = 0;
1669 	host->ios.vdd = 0;
1670 
1671 	host->ios.power_mode = MMC_POWER_OFF;
1672 	/* Set initial state and call mmc_set_ios */
1673 	mmc_set_initial_state(host);
1674 
1675 	/*
1676 	 * Some configurations, such as the 802.11 SDIO card in the OLPC
1677 	 * XO-1.5, require a short delay after poweroff before the card
1678 	 * can be successfully turned on again.
1679 	 */
1680 	mmc_delay(1);
1681 }
1682 
1683 void mmc_power_cycle(struct mmc_host *host, u32 ocr)
1684 {
1685 	mmc_power_off(host);
1686 	/* Wait at least 1 ms according to SD spec */
1687 	mmc_delay(1);
1688 	mmc_power_up(host, ocr);
1689 }
1690 
1691 /*
1692  * Cleanup when the last reference to the bus operator is dropped.
1693  */
1694 static void __mmc_release_bus(struct mmc_host *host)
1695 {
1696 	WARN_ON(!host->bus_dead);
1697 
1698 	host->bus_ops = NULL;
1699 }
1700 
1701 /*
1702  * Increase reference count of bus operator
1703  */
1704 static inline void mmc_bus_get(struct mmc_host *host)
1705 {
1706 	unsigned long flags;
1707 
1708 	spin_lock_irqsave(&host->lock, flags);
1709 	host->bus_refs++;
1710 	spin_unlock_irqrestore(&host->lock, flags);
1711 }
1712 
1713 /*
1714  * Decrease reference count of bus operator and free it if
1715  * it is the last reference.
1716  */
1717 static inline void mmc_bus_put(struct mmc_host *host)
1718 {
1719 	unsigned long flags;
1720 
1721 	spin_lock_irqsave(&host->lock, flags);
1722 	host->bus_refs--;
1723 	if ((host->bus_refs == 0) && host->bus_ops)
1724 		__mmc_release_bus(host);
1725 	spin_unlock_irqrestore(&host->lock, flags);
1726 }
1727 
1728 /*
1729  * Assign a mmc bus handler to a host. Only one bus handler may control a
1730  * host at any given time.
1731  */
1732 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1733 {
1734 	unsigned long flags;
1735 
1736 	WARN_ON(!host->claimed);
1737 
1738 	spin_lock_irqsave(&host->lock, flags);
1739 
1740 	WARN_ON(host->bus_ops);
1741 	WARN_ON(host->bus_refs);
1742 
1743 	host->bus_ops = ops;
1744 	host->bus_refs = 1;
1745 	host->bus_dead = 0;
1746 
1747 	spin_unlock_irqrestore(&host->lock, flags);
1748 }
1749 
1750 /*
1751  * Remove the current bus handler from a host.
1752  */
1753 void mmc_detach_bus(struct mmc_host *host)
1754 {
1755 	unsigned long flags;
1756 
1757 	WARN_ON(!host->claimed);
1758 	WARN_ON(!host->bus_ops);
1759 
1760 	spin_lock_irqsave(&host->lock, flags);
1761 
1762 	host->bus_dead = 1;
1763 
1764 	spin_unlock_irqrestore(&host->lock, flags);
1765 
1766 	mmc_bus_put(host);
1767 }
1768 
1769 static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
1770 				bool cd_irq)
1771 {
1772 #ifdef CONFIG_MMC_DEBUG
1773 	unsigned long flags;
1774 	spin_lock_irqsave(&host->lock, flags);
1775 	WARN_ON(host->removed);
1776 	spin_unlock_irqrestore(&host->lock, flags);
1777 #endif
1778 
1779 	/*
1780 	 * If the device is configured as wakeup, we prevent a new sleep for
1781 	 * 5 s to give provision for user space to consume the event.
1782 	 */
1783 	if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) &&
1784 		device_can_wakeup(mmc_dev(host)))
1785 		pm_wakeup_event(mmc_dev(host), 5000);
1786 
1787 	host->detect_change = 1;
1788 	mmc_schedule_delayed_work(&host->detect, delay);
1789 }
1790 
1791 /**
1792  *	mmc_detect_change - process change of state on a MMC socket
1793  *	@host: host which changed state.
1794  *	@delay: optional delay to wait before detection (jiffies)
1795  *
1796  *	MMC drivers should call this when they detect a card has been
1797  *	inserted or removed. The MMC layer will confirm that any
1798  *	present card is still functional, and initialize any newly
1799  *	inserted.
1800  */
1801 void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1802 {
1803 	_mmc_detect_change(host, delay, true);
1804 }
1805 EXPORT_SYMBOL(mmc_detect_change);
1806 
1807 void mmc_init_erase(struct mmc_card *card)
1808 {
1809 	unsigned int sz;
1810 
1811 	if (is_power_of_2(card->erase_size))
1812 		card->erase_shift = ffs(card->erase_size) - 1;
1813 	else
1814 		card->erase_shift = 0;
1815 
1816 	/*
1817 	 * It is possible to erase an arbitrarily large area of an SD or MMC
1818 	 * card.  That is not desirable because it can take a long time
1819 	 * (minutes) potentially delaying more important I/O, and also the
1820 	 * timeout calculations become increasingly hugely over-estimated.
1821 	 * Consequently, 'pref_erase' is defined as a guide to limit erases
1822 	 * to that size and alignment.
1823 	 *
1824 	 * For SD cards that define Allocation Unit size, limit erases to one
1825 	 * Allocation Unit at a time.
1826 	 * For MMC, have a stab at ai good value and for modern cards it will
1827 	 * end up being 4MiB. Note that if the value is too small, it can end
1828 	 * up taking longer to erase. Also note, erase_size is already set to
1829 	 * High Capacity Erase Size if available when this function is called.
1830 	 */
1831 	if (mmc_card_sd(card) && card->ssr.au) {
1832 		card->pref_erase = card->ssr.au;
1833 		card->erase_shift = ffs(card->ssr.au) - 1;
1834 	} else if (card->erase_size) {
1835 		sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
1836 		if (sz < 128)
1837 			card->pref_erase = 512 * 1024 / 512;
1838 		else if (sz < 512)
1839 			card->pref_erase = 1024 * 1024 / 512;
1840 		else if (sz < 1024)
1841 			card->pref_erase = 2 * 1024 * 1024 / 512;
1842 		else
1843 			card->pref_erase = 4 * 1024 * 1024 / 512;
1844 		if (card->pref_erase < card->erase_size)
1845 			card->pref_erase = card->erase_size;
1846 		else {
1847 			sz = card->pref_erase % card->erase_size;
1848 			if (sz)
1849 				card->pref_erase += card->erase_size - sz;
1850 		}
1851 	} else
1852 		card->pref_erase = 0;
1853 }
1854 
1855 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1856 				          unsigned int arg, unsigned int qty)
1857 {
1858 	unsigned int erase_timeout;
1859 
1860 	if (arg == MMC_DISCARD_ARG ||
1861 	    (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
1862 		erase_timeout = card->ext_csd.trim_timeout;
1863 	} else if (card->ext_csd.erase_group_def & 1) {
1864 		/* High Capacity Erase Group Size uses HC timeouts */
1865 		if (arg == MMC_TRIM_ARG)
1866 			erase_timeout = card->ext_csd.trim_timeout;
1867 		else
1868 			erase_timeout = card->ext_csd.hc_erase_timeout;
1869 	} else {
1870 		/* CSD Erase Group Size uses write timeout */
1871 		unsigned int mult = (10 << card->csd.r2w_factor);
1872 		unsigned int timeout_clks = card->csd.tacc_clks * mult;
1873 		unsigned int timeout_us;
1874 
1875 		/* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
1876 		if (card->csd.tacc_ns < 1000000)
1877 			timeout_us = (card->csd.tacc_ns * mult) / 1000;
1878 		else
1879 			timeout_us = (card->csd.tacc_ns / 1000) * mult;
1880 
1881 		/*
1882 		 * ios.clock is only a target.  The real clock rate might be
1883 		 * less but not that much less, so fudge it by multiplying by 2.
1884 		 */
1885 		timeout_clks <<= 1;
1886 		timeout_us += (timeout_clks * 1000) /
1887 			      (card->host->ios.clock / 1000);
1888 
1889 		erase_timeout = timeout_us / 1000;
1890 
1891 		/*
1892 		 * Theoretically, the calculation could underflow so round up
1893 		 * to 1ms in that case.
1894 		 */
1895 		if (!erase_timeout)
1896 			erase_timeout = 1;
1897 	}
1898 
1899 	/* Multiplier for secure operations */
1900 	if (arg & MMC_SECURE_ARGS) {
1901 		if (arg == MMC_SECURE_ERASE_ARG)
1902 			erase_timeout *= card->ext_csd.sec_erase_mult;
1903 		else
1904 			erase_timeout *= card->ext_csd.sec_trim_mult;
1905 	}
1906 
1907 	erase_timeout *= qty;
1908 
1909 	/*
1910 	 * Ensure at least a 1 second timeout for SPI as per
1911 	 * 'mmc_set_data_timeout()'
1912 	 */
1913 	if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
1914 		erase_timeout = 1000;
1915 
1916 	return erase_timeout;
1917 }
1918 
1919 static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
1920 					 unsigned int arg,
1921 					 unsigned int qty)
1922 {
1923 	unsigned int erase_timeout;
1924 
1925 	if (card->ssr.erase_timeout) {
1926 		/* Erase timeout specified in SD Status Register (SSR) */
1927 		erase_timeout = card->ssr.erase_timeout * qty +
1928 				card->ssr.erase_offset;
1929 	} else {
1930 		/*
1931 		 * Erase timeout not specified in SD Status Register (SSR) so
1932 		 * use 250ms per write block.
1933 		 */
1934 		erase_timeout = 250 * qty;
1935 	}
1936 
1937 	/* Must not be less than 1 second */
1938 	if (erase_timeout < 1000)
1939 		erase_timeout = 1000;
1940 
1941 	return erase_timeout;
1942 }
1943 
1944 static unsigned int mmc_erase_timeout(struct mmc_card *card,
1945 				      unsigned int arg,
1946 				      unsigned int qty)
1947 {
1948 	if (mmc_card_sd(card))
1949 		return mmc_sd_erase_timeout(card, arg, qty);
1950 	else
1951 		return mmc_mmc_erase_timeout(card, arg, qty);
1952 }
1953 
1954 static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1955 			unsigned int to, unsigned int arg)
1956 {
1957 	struct mmc_command cmd = {};
1958 	unsigned int qty = 0, busy_timeout = 0;
1959 	bool use_r1b_resp = false;
1960 	unsigned long timeout;
1961 	int err;
1962 
1963 	mmc_retune_hold(card->host);
1964 
1965 	/*
1966 	 * qty is used to calculate the erase timeout which depends on how many
1967 	 * erase groups (or allocation units in SD terminology) are affected.
1968 	 * We count erasing part of an erase group as one erase group.
1969 	 * For SD, the allocation units are always a power of 2.  For MMC, the
1970 	 * erase group size is almost certainly also power of 2, but it does not
1971 	 * seem to insist on that in the JEDEC standard, so we fall back to
1972 	 * division in that case.  SD may not specify an allocation unit size,
1973 	 * in which case the timeout is based on the number of write blocks.
1974 	 *
1975 	 * Note that the timeout for secure trim 2 will only be correct if the
1976 	 * number of erase groups specified is the same as the total of all
1977 	 * preceding secure trim 1 commands.  Since the power may have been
1978 	 * lost since the secure trim 1 commands occurred, it is generally
1979 	 * impossible to calculate the secure trim 2 timeout correctly.
1980 	 */
1981 	if (card->erase_shift)
1982 		qty += ((to >> card->erase_shift) -
1983 			(from >> card->erase_shift)) + 1;
1984 	else if (mmc_card_sd(card))
1985 		qty += to - from + 1;
1986 	else
1987 		qty += ((to / card->erase_size) -
1988 			(from / card->erase_size)) + 1;
1989 
1990 	if (!mmc_card_blockaddr(card)) {
1991 		from <<= 9;
1992 		to <<= 9;
1993 	}
1994 
1995 	if (mmc_card_sd(card))
1996 		cmd.opcode = SD_ERASE_WR_BLK_START;
1997 	else
1998 		cmd.opcode = MMC_ERASE_GROUP_START;
1999 	cmd.arg = from;
2000 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2001 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
2002 	if (err) {
2003 		pr_err("mmc_erase: group start error %d, "
2004 		       "status %#x\n", err, cmd.resp[0]);
2005 		err = -EIO;
2006 		goto out;
2007 	}
2008 
2009 	memset(&cmd, 0, sizeof(struct mmc_command));
2010 	if (mmc_card_sd(card))
2011 		cmd.opcode = SD_ERASE_WR_BLK_END;
2012 	else
2013 		cmd.opcode = MMC_ERASE_GROUP_END;
2014 	cmd.arg = to;
2015 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2016 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
2017 	if (err) {
2018 		pr_err("mmc_erase: group end error %d, status %#x\n",
2019 		       err, cmd.resp[0]);
2020 		err = -EIO;
2021 		goto out;
2022 	}
2023 
2024 	memset(&cmd, 0, sizeof(struct mmc_command));
2025 	cmd.opcode = MMC_ERASE;
2026 	cmd.arg = arg;
2027 	busy_timeout = mmc_erase_timeout(card, arg, qty);
2028 	/*
2029 	 * If the host controller supports busy signalling and the timeout for
2030 	 * the erase operation does not exceed the max_busy_timeout, we should
2031 	 * use R1B response. Or we need to prevent the host from doing hw busy
2032 	 * detection, which is done by converting to a R1 response instead.
2033 	 */
2034 	if (card->host->max_busy_timeout &&
2035 	    busy_timeout > card->host->max_busy_timeout) {
2036 		cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2037 	} else {
2038 		cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
2039 		cmd.busy_timeout = busy_timeout;
2040 		use_r1b_resp = true;
2041 	}
2042 
2043 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
2044 	if (err) {
2045 		pr_err("mmc_erase: erase error %d, status %#x\n",
2046 		       err, cmd.resp[0]);
2047 		err = -EIO;
2048 		goto out;
2049 	}
2050 
2051 	if (mmc_host_is_spi(card->host))
2052 		goto out;
2053 
2054 	/*
2055 	 * In case of when R1B + MMC_CAP_WAIT_WHILE_BUSY is used, the polling
2056 	 * shall be avoided.
2057 	 */
2058 	if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
2059 		goto out;
2060 
2061 	timeout = jiffies + msecs_to_jiffies(busy_timeout);
2062 	do {
2063 		memset(&cmd, 0, sizeof(struct mmc_command));
2064 		cmd.opcode = MMC_SEND_STATUS;
2065 		cmd.arg = card->rca << 16;
2066 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
2067 		/* Do not retry else we can't see errors */
2068 		err = mmc_wait_for_cmd(card->host, &cmd, 0);
2069 		if (err || (cmd.resp[0] & 0xFDF92000)) {
2070 			pr_err("error %d requesting status %#x\n",
2071 				err, cmd.resp[0]);
2072 			err = -EIO;
2073 			goto out;
2074 		}
2075 
2076 		/* Timeout if the device never becomes ready for data and
2077 		 * never leaves the program state.
2078 		 */
2079 		if (time_after(jiffies, timeout)) {
2080 			pr_err("%s: Card stuck in programming state! %s\n",
2081 				mmc_hostname(card->host), __func__);
2082 			err =  -EIO;
2083 			goto out;
2084 		}
2085 
2086 	} while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
2087 		 (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
2088 out:
2089 	mmc_retune_release(card->host);
2090 	return err;
2091 }
2092 
2093 static unsigned int mmc_align_erase_size(struct mmc_card *card,
2094 					 unsigned int *from,
2095 					 unsigned int *to,
2096 					 unsigned int nr)
2097 {
2098 	unsigned int from_new = *from, nr_new = nr, rem;
2099 
2100 	/*
2101 	 * When the 'card->erase_size' is power of 2, we can use round_up/down()
2102 	 * to align the erase size efficiently.
2103 	 */
2104 	if (is_power_of_2(card->erase_size)) {
2105 		unsigned int temp = from_new;
2106 
2107 		from_new = round_up(temp, card->erase_size);
2108 		rem = from_new - temp;
2109 
2110 		if (nr_new > rem)
2111 			nr_new -= rem;
2112 		else
2113 			return 0;
2114 
2115 		nr_new = round_down(nr_new, card->erase_size);
2116 	} else {
2117 		rem = from_new % card->erase_size;
2118 		if (rem) {
2119 			rem = card->erase_size - rem;
2120 			from_new += rem;
2121 			if (nr_new > rem)
2122 				nr_new -= rem;
2123 			else
2124 				return 0;
2125 		}
2126 
2127 		rem = nr_new % card->erase_size;
2128 		if (rem)
2129 			nr_new -= rem;
2130 	}
2131 
2132 	if (nr_new == 0)
2133 		return 0;
2134 
2135 	*to = from_new + nr_new;
2136 	*from = from_new;
2137 
2138 	return nr_new;
2139 }
2140 
2141 /**
2142  * mmc_erase - erase sectors.
2143  * @card: card to erase
2144  * @from: first sector to erase
2145  * @nr: number of sectors to erase
2146  * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
2147  *
2148  * Caller must claim host before calling this function.
2149  */
2150 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
2151 	      unsigned int arg)
2152 {
2153 	unsigned int rem, to = from + nr;
2154 	int err;
2155 
2156 	if (!(card->host->caps & MMC_CAP_ERASE) ||
2157 	    !(card->csd.cmdclass & CCC_ERASE))
2158 		return -EOPNOTSUPP;
2159 
2160 	if (!card->erase_size)
2161 		return -EOPNOTSUPP;
2162 
2163 	if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
2164 		return -EOPNOTSUPP;
2165 
2166 	if ((arg & MMC_SECURE_ARGS) &&
2167 	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
2168 		return -EOPNOTSUPP;
2169 
2170 	if ((arg & MMC_TRIM_ARGS) &&
2171 	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
2172 		return -EOPNOTSUPP;
2173 
2174 	if (arg == MMC_SECURE_ERASE_ARG) {
2175 		if (from % card->erase_size || nr % card->erase_size)
2176 			return -EINVAL;
2177 	}
2178 
2179 	if (arg == MMC_ERASE_ARG)
2180 		nr = mmc_align_erase_size(card, &from, &to, nr);
2181 
2182 	if (nr == 0)
2183 		return 0;
2184 
2185 	if (to <= from)
2186 		return -EINVAL;
2187 
2188 	/* 'from' and 'to' are inclusive */
2189 	to -= 1;
2190 
2191 	/*
2192 	 * Special case where only one erase-group fits in the timeout budget:
2193 	 * If the region crosses an erase-group boundary on this particular
2194 	 * case, we will be trimming more than one erase-group which, does not
2195 	 * fit in the timeout budget of the controller, so we need to split it
2196 	 * and call mmc_do_erase() twice if necessary. This special case is
2197 	 * identified by the card->eg_boundary flag.
2198 	 */
2199 	rem = card->erase_size - (from % card->erase_size);
2200 	if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) {
2201 		err = mmc_do_erase(card, from, from + rem - 1, arg);
2202 		from += rem;
2203 		if ((err) || (to <= from))
2204 			return err;
2205 	}
2206 
2207 	return mmc_do_erase(card, from, to, arg);
2208 }
2209 EXPORT_SYMBOL(mmc_erase);
2210 
2211 int mmc_can_erase(struct mmc_card *card)
2212 {
2213 	if ((card->host->caps & MMC_CAP_ERASE) &&
2214 	    (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
2215 		return 1;
2216 	return 0;
2217 }
2218 EXPORT_SYMBOL(mmc_can_erase);
2219 
2220 int mmc_can_trim(struct mmc_card *card)
2221 {
2222 	if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) &&
2223 	    (!(card->quirks & MMC_QUIRK_TRIM_BROKEN)))
2224 		return 1;
2225 	return 0;
2226 }
2227 EXPORT_SYMBOL(mmc_can_trim);
2228 
2229 int mmc_can_discard(struct mmc_card *card)
2230 {
2231 	/*
2232 	 * As there's no way to detect the discard support bit at v4.5
2233 	 * use the s/w feature support filed.
2234 	 */
2235 	if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
2236 		return 1;
2237 	return 0;
2238 }
2239 EXPORT_SYMBOL(mmc_can_discard);
2240 
2241 int mmc_can_sanitize(struct mmc_card *card)
2242 {
2243 	if (!mmc_can_trim(card) && !mmc_can_erase(card))
2244 		return 0;
2245 	if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
2246 		return 1;
2247 	return 0;
2248 }
2249 EXPORT_SYMBOL(mmc_can_sanitize);
2250 
2251 int mmc_can_secure_erase_trim(struct mmc_card *card)
2252 {
2253 	if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
2254 	    !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
2255 		return 1;
2256 	return 0;
2257 }
2258 EXPORT_SYMBOL(mmc_can_secure_erase_trim);
2259 
2260 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
2261 			    unsigned int nr)
2262 {
2263 	if (!card->erase_size)
2264 		return 0;
2265 	if (from % card->erase_size || nr % card->erase_size)
2266 		return 0;
2267 	return 1;
2268 }
2269 EXPORT_SYMBOL(mmc_erase_group_aligned);
2270 
2271 static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
2272 					    unsigned int arg)
2273 {
2274 	struct mmc_host *host = card->host;
2275 	unsigned int max_discard, x, y, qty = 0, max_qty, min_qty, timeout;
2276 	unsigned int last_timeout = 0;
2277 	unsigned int max_busy_timeout = host->max_busy_timeout ?
2278 			host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS;
2279 
2280 	if (card->erase_shift) {
2281 		max_qty = UINT_MAX >> card->erase_shift;
2282 		min_qty = card->pref_erase >> card->erase_shift;
2283 	} else if (mmc_card_sd(card)) {
2284 		max_qty = UINT_MAX;
2285 		min_qty = card->pref_erase;
2286 	} else {
2287 		max_qty = UINT_MAX / card->erase_size;
2288 		min_qty = card->pref_erase / card->erase_size;
2289 	}
2290 
2291 	/*
2292 	 * We should not only use 'host->max_busy_timeout' as the limitation
2293 	 * when deciding the max discard sectors. We should set a balance value
2294 	 * to improve the erase speed, and it can not get too long timeout at
2295 	 * the same time.
2296 	 *
2297 	 * Here we set 'card->pref_erase' as the minimal discard sectors no
2298 	 * matter what size of 'host->max_busy_timeout', but if the
2299 	 * 'host->max_busy_timeout' is large enough for more discard sectors,
2300 	 * then we can continue to increase the max discard sectors until we
2301 	 * get a balance value. In cases when the 'host->max_busy_timeout'
2302 	 * isn't specified, use the default max erase timeout.
2303 	 */
2304 	do {
2305 		y = 0;
2306 		for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
2307 			timeout = mmc_erase_timeout(card, arg, qty + x);
2308 
2309 			if (qty + x > min_qty && timeout > max_busy_timeout)
2310 				break;
2311 
2312 			if (timeout < last_timeout)
2313 				break;
2314 			last_timeout = timeout;
2315 			y = x;
2316 		}
2317 		qty += y;
2318 	} while (y);
2319 
2320 	if (!qty)
2321 		return 0;
2322 
2323 	/*
2324 	 * When specifying a sector range to trim, chances are we might cross
2325 	 * an erase-group boundary even if the amount of sectors is less than
2326 	 * one erase-group.
2327 	 * If we can only fit one erase-group in the controller timeout budget,
2328 	 * we have to care that erase-group boundaries are not crossed by a
2329 	 * single trim operation. We flag that special case with "eg_boundary".
2330 	 * In all other cases we can just decrement qty and pretend that we
2331 	 * always touch (qty + 1) erase-groups as a simple optimization.
2332 	 */
2333 	if (qty == 1)
2334 		card->eg_boundary = 1;
2335 	else
2336 		qty--;
2337 
2338 	/* Convert qty to sectors */
2339 	if (card->erase_shift)
2340 		max_discard = qty << card->erase_shift;
2341 	else if (mmc_card_sd(card))
2342 		max_discard = qty + 1;
2343 	else
2344 		max_discard = qty * card->erase_size;
2345 
2346 	return max_discard;
2347 }
2348 
2349 unsigned int mmc_calc_max_discard(struct mmc_card *card)
2350 {
2351 	struct mmc_host *host = card->host;
2352 	unsigned int max_discard, max_trim;
2353 
2354 	/*
2355 	 * Without erase_group_def set, MMC erase timeout depends on clock
2356 	 * frequence which can change.  In that case, the best choice is
2357 	 * just the preferred erase size.
2358 	 */
2359 	if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
2360 		return card->pref_erase;
2361 
2362 	max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
2363 	if (mmc_can_trim(card)) {
2364 		max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
2365 		if (max_trim < max_discard)
2366 			max_discard = max_trim;
2367 	} else if (max_discard < card->erase_size) {
2368 		max_discard = 0;
2369 	}
2370 	pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
2371 		mmc_hostname(host), max_discard, host->max_busy_timeout ?
2372 		host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS);
2373 	return max_discard;
2374 }
2375 EXPORT_SYMBOL(mmc_calc_max_discard);
2376 
2377 bool mmc_card_is_blockaddr(struct mmc_card *card)
2378 {
2379 	return card ? mmc_card_blockaddr(card) : false;
2380 }
2381 EXPORT_SYMBOL(mmc_card_is_blockaddr);
2382 
2383 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
2384 {
2385 	struct mmc_command cmd = {};
2386 
2387 	if (mmc_card_blockaddr(card) || mmc_card_ddr52(card) ||
2388 	    mmc_card_hs400(card) || mmc_card_hs400es(card))
2389 		return 0;
2390 
2391 	cmd.opcode = MMC_SET_BLOCKLEN;
2392 	cmd.arg = blocklen;
2393 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2394 	return mmc_wait_for_cmd(card->host, &cmd, 5);
2395 }
2396 EXPORT_SYMBOL(mmc_set_blocklen);
2397 
2398 int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
2399 			bool is_rel_write)
2400 {
2401 	struct mmc_command cmd = {};
2402 
2403 	cmd.opcode = MMC_SET_BLOCK_COUNT;
2404 	cmd.arg = blockcount & 0x0000FFFF;
2405 	if (is_rel_write)
2406 		cmd.arg |= 1 << 31;
2407 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2408 	return mmc_wait_for_cmd(card->host, &cmd, 5);
2409 }
2410 EXPORT_SYMBOL(mmc_set_blockcount);
2411 
2412 static void mmc_hw_reset_for_init(struct mmc_host *host)
2413 {
2414 	mmc_pwrseq_reset(host);
2415 
2416 	if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
2417 		return;
2418 	host->ops->hw_reset(host);
2419 }
2420 
2421 int mmc_hw_reset(struct mmc_host *host)
2422 {
2423 	int ret;
2424 
2425 	if (!host->card)
2426 		return -EINVAL;
2427 
2428 	mmc_bus_get(host);
2429 	if (!host->bus_ops || host->bus_dead || !host->bus_ops->reset) {
2430 		mmc_bus_put(host);
2431 		return -EOPNOTSUPP;
2432 	}
2433 
2434 	ret = host->bus_ops->reset(host);
2435 	mmc_bus_put(host);
2436 
2437 	if (ret)
2438 		pr_warn("%s: tried to reset card, got error %d\n",
2439 			mmc_hostname(host), ret);
2440 
2441 	return ret;
2442 }
2443 EXPORT_SYMBOL(mmc_hw_reset);
2444 
2445 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
2446 {
2447 	host->f_init = freq;
2448 
2449 #ifdef CONFIG_MMC_DEBUG
2450 	pr_info("%s: %s: trying to init card at %u Hz\n",
2451 		mmc_hostname(host), __func__, host->f_init);
2452 #endif
2453 	mmc_power_up(host, host->ocr_avail);
2454 
2455 	/*
2456 	 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
2457 	 * do a hardware reset if possible.
2458 	 */
2459 	mmc_hw_reset_for_init(host);
2460 
2461 	/*
2462 	 * sdio_reset sends CMD52 to reset card.  Since we do not know
2463 	 * if the card is being re-initialized, just send it.  CMD52
2464 	 * should be ignored by SD/eMMC cards.
2465 	 * Skip it if we already know that we do not support SDIO commands
2466 	 */
2467 	if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2468 		sdio_reset(host);
2469 
2470 	mmc_go_idle(host);
2471 
2472 	if (!(host->caps2 & MMC_CAP2_NO_SD))
2473 		mmc_send_if_cond(host, host->ocr_avail);
2474 
2475 	/* Order's important: probe SDIO, then SD, then MMC */
2476 	if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2477 		if (!mmc_attach_sdio(host))
2478 			return 0;
2479 
2480 	if (!(host->caps2 & MMC_CAP2_NO_SD))
2481 		if (!mmc_attach_sd(host))
2482 			return 0;
2483 
2484 	if (!(host->caps2 & MMC_CAP2_NO_MMC))
2485 		if (!mmc_attach_mmc(host))
2486 			return 0;
2487 
2488 	mmc_power_off(host);
2489 	return -EIO;
2490 }
2491 
2492 int _mmc_detect_card_removed(struct mmc_host *host)
2493 {
2494 	int ret;
2495 
2496 	if (!host->card || mmc_card_removed(host->card))
2497 		return 1;
2498 
2499 	ret = host->bus_ops->alive(host);
2500 
2501 	/*
2502 	 * Card detect status and alive check may be out of sync if card is
2503 	 * removed slowly, when card detect switch changes while card/slot
2504 	 * pads are still contacted in hardware (refer to "SD Card Mechanical
2505 	 * Addendum, Appendix C: Card Detection Switch"). So reschedule a
2506 	 * detect work 200ms later for this case.
2507 	 */
2508 	if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
2509 		mmc_detect_change(host, msecs_to_jiffies(200));
2510 		pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
2511 	}
2512 
2513 	if (ret) {
2514 		mmc_card_set_removed(host->card);
2515 		pr_debug("%s: card remove detected\n", mmc_hostname(host));
2516 	}
2517 
2518 	return ret;
2519 }
2520 
2521 int mmc_detect_card_removed(struct mmc_host *host)
2522 {
2523 	struct mmc_card *card = host->card;
2524 	int ret;
2525 
2526 	WARN_ON(!host->claimed);
2527 
2528 	if (!card)
2529 		return 1;
2530 
2531 	if (!mmc_card_is_removable(host))
2532 		return 0;
2533 
2534 	ret = mmc_card_removed(card);
2535 	/*
2536 	 * The card will be considered unchanged unless we have been asked to
2537 	 * detect a change or host requires polling to provide card detection.
2538 	 */
2539 	if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
2540 		return ret;
2541 
2542 	host->detect_change = 0;
2543 	if (!ret) {
2544 		ret = _mmc_detect_card_removed(host);
2545 		if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) {
2546 			/*
2547 			 * Schedule a detect work as soon as possible to let a
2548 			 * rescan handle the card removal.
2549 			 */
2550 			cancel_delayed_work(&host->detect);
2551 			_mmc_detect_change(host, 0, false);
2552 		}
2553 	}
2554 
2555 	return ret;
2556 }
2557 EXPORT_SYMBOL(mmc_detect_card_removed);
2558 
2559 void mmc_rescan(struct work_struct *work)
2560 {
2561 	struct mmc_host *host =
2562 		container_of(work, struct mmc_host, detect.work);
2563 	int i;
2564 
2565 	if (host->rescan_disable)
2566 		return;
2567 
2568 	/* If there is a non-removable card registered, only scan once */
2569 	if (!mmc_card_is_removable(host) && host->rescan_entered)
2570 		return;
2571 	host->rescan_entered = 1;
2572 
2573 	if (host->trigger_card_event && host->ops->card_event) {
2574 		mmc_claim_host(host);
2575 		host->ops->card_event(host);
2576 		mmc_release_host(host);
2577 		host->trigger_card_event = false;
2578 	}
2579 
2580 	mmc_bus_get(host);
2581 
2582 	/*
2583 	 * if there is a _removable_ card registered, check whether it is
2584 	 * still present
2585 	 */
2586 	if (host->bus_ops && !host->bus_dead && mmc_card_is_removable(host))
2587 		host->bus_ops->detect(host);
2588 
2589 	host->detect_change = 0;
2590 
2591 	/*
2592 	 * Let mmc_bus_put() free the bus/bus_ops if we've found that
2593 	 * the card is no longer present.
2594 	 */
2595 	mmc_bus_put(host);
2596 	mmc_bus_get(host);
2597 
2598 	/* if there still is a card present, stop here */
2599 	if (host->bus_ops != NULL) {
2600 		mmc_bus_put(host);
2601 		goto out;
2602 	}
2603 
2604 	/*
2605 	 * Only we can add a new handler, so it's safe to
2606 	 * release the lock here.
2607 	 */
2608 	mmc_bus_put(host);
2609 
2610 	mmc_claim_host(host);
2611 	if (mmc_card_is_removable(host) && host->ops->get_cd &&
2612 			host->ops->get_cd(host) == 0) {
2613 		mmc_power_off(host);
2614 		mmc_release_host(host);
2615 		goto out;
2616 	}
2617 
2618 	for (i = 0; i < ARRAY_SIZE(freqs); i++) {
2619 		if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
2620 			break;
2621 		if (freqs[i] <= host->f_min)
2622 			break;
2623 	}
2624 	mmc_release_host(host);
2625 
2626  out:
2627 	if (host->caps & MMC_CAP_NEEDS_POLL)
2628 		mmc_schedule_delayed_work(&host->detect, HZ);
2629 }
2630 
2631 void mmc_start_host(struct mmc_host *host)
2632 {
2633 	host->f_init = max(freqs[0], host->f_min);
2634 	host->rescan_disable = 0;
2635 	host->ios.power_mode = MMC_POWER_UNDEFINED;
2636 
2637 	if (!(host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)) {
2638 		mmc_claim_host(host);
2639 		mmc_power_up(host, host->ocr_avail);
2640 		mmc_release_host(host);
2641 	}
2642 
2643 	mmc_gpiod_request_cd_irq(host);
2644 	_mmc_detect_change(host, 0, false);
2645 }
2646 
2647 void mmc_stop_host(struct mmc_host *host)
2648 {
2649 #ifdef CONFIG_MMC_DEBUG
2650 	unsigned long flags;
2651 	spin_lock_irqsave(&host->lock, flags);
2652 	host->removed = 1;
2653 	spin_unlock_irqrestore(&host->lock, flags);
2654 #endif
2655 	if (host->slot.cd_irq >= 0) {
2656 		if (host->slot.cd_wake_enabled)
2657 			disable_irq_wake(host->slot.cd_irq);
2658 		disable_irq(host->slot.cd_irq);
2659 	}
2660 
2661 	host->rescan_disable = 1;
2662 	cancel_delayed_work_sync(&host->detect);
2663 
2664 	/* clear pm flags now and let card drivers set them as needed */
2665 	host->pm_flags = 0;
2666 
2667 	mmc_bus_get(host);
2668 	if (host->bus_ops && !host->bus_dead) {
2669 		/* Calling bus_ops->remove() with a claimed host can deadlock */
2670 		host->bus_ops->remove(host);
2671 		mmc_claim_host(host);
2672 		mmc_detach_bus(host);
2673 		mmc_power_off(host);
2674 		mmc_release_host(host);
2675 		mmc_bus_put(host);
2676 		return;
2677 	}
2678 	mmc_bus_put(host);
2679 
2680 	mmc_claim_host(host);
2681 	mmc_power_off(host);
2682 	mmc_release_host(host);
2683 }
2684 
2685 int mmc_power_save_host(struct mmc_host *host)
2686 {
2687 	int ret = 0;
2688 
2689 #ifdef CONFIG_MMC_DEBUG
2690 	pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
2691 #endif
2692 
2693 	mmc_bus_get(host);
2694 
2695 	if (!host->bus_ops || host->bus_dead) {
2696 		mmc_bus_put(host);
2697 		return -EINVAL;
2698 	}
2699 
2700 	if (host->bus_ops->power_save)
2701 		ret = host->bus_ops->power_save(host);
2702 
2703 	mmc_bus_put(host);
2704 
2705 	mmc_power_off(host);
2706 
2707 	return ret;
2708 }
2709 EXPORT_SYMBOL(mmc_power_save_host);
2710 
2711 int mmc_power_restore_host(struct mmc_host *host)
2712 {
2713 	int ret;
2714 
2715 #ifdef CONFIG_MMC_DEBUG
2716 	pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
2717 #endif
2718 
2719 	mmc_bus_get(host);
2720 
2721 	if (!host->bus_ops || host->bus_dead) {
2722 		mmc_bus_put(host);
2723 		return -EINVAL;
2724 	}
2725 
2726 	mmc_power_up(host, host->card->ocr);
2727 	ret = host->bus_ops->power_restore(host);
2728 
2729 	mmc_bus_put(host);
2730 
2731 	return ret;
2732 }
2733 EXPORT_SYMBOL(mmc_power_restore_host);
2734 
2735 #ifdef CONFIG_PM_SLEEP
2736 /* Do the card removal on suspend if card is assumed removeable
2737  * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2738    to sync the card.
2739 */
2740 static int mmc_pm_notify(struct notifier_block *notify_block,
2741 			unsigned long mode, void *unused)
2742 {
2743 	struct mmc_host *host = container_of(
2744 		notify_block, struct mmc_host, pm_notify);
2745 	unsigned long flags;
2746 	int err = 0;
2747 
2748 	switch (mode) {
2749 	case PM_HIBERNATION_PREPARE:
2750 	case PM_SUSPEND_PREPARE:
2751 	case PM_RESTORE_PREPARE:
2752 		spin_lock_irqsave(&host->lock, flags);
2753 		host->rescan_disable = 1;
2754 		spin_unlock_irqrestore(&host->lock, flags);
2755 		cancel_delayed_work_sync(&host->detect);
2756 
2757 		if (!host->bus_ops)
2758 			break;
2759 
2760 		/* Validate prerequisites for suspend */
2761 		if (host->bus_ops->pre_suspend)
2762 			err = host->bus_ops->pre_suspend(host);
2763 		if (!err)
2764 			break;
2765 
2766 		/* Calling bus_ops->remove() with a claimed host can deadlock */
2767 		host->bus_ops->remove(host);
2768 		mmc_claim_host(host);
2769 		mmc_detach_bus(host);
2770 		mmc_power_off(host);
2771 		mmc_release_host(host);
2772 		host->pm_flags = 0;
2773 		break;
2774 
2775 	case PM_POST_SUSPEND:
2776 	case PM_POST_HIBERNATION:
2777 	case PM_POST_RESTORE:
2778 
2779 		spin_lock_irqsave(&host->lock, flags);
2780 		host->rescan_disable = 0;
2781 		spin_unlock_irqrestore(&host->lock, flags);
2782 		_mmc_detect_change(host, 0, false);
2783 
2784 	}
2785 
2786 	return 0;
2787 }
2788 
2789 void mmc_register_pm_notifier(struct mmc_host *host)
2790 {
2791 	host->pm_notify.notifier_call = mmc_pm_notify;
2792 	register_pm_notifier(&host->pm_notify);
2793 }
2794 
2795 void mmc_unregister_pm_notifier(struct mmc_host *host)
2796 {
2797 	unregister_pm_notifier(&host->pm_notify);
2798 }
2799 #endif
2800 
2801 /**
2802  * mmc_init_context_info() - init synchronization context
2803  * @host: mmc host
2804  *
2805  * Init struct context_info needed to implement asynchronous
2806  * request mechanism, used by mmc core, host driver and mmc requests
2807  * supplier.
2808  */
2809 void mmc_init_context_info(struct mmc_host *host)
2810 {
2811 	host->context_info.is_new_req = false;
2812 	host->context_info.is_done_rcv = false;
2813 	host->context_info.is_waiting_last_req = false;
2814 	init_waitqueue_head(&host->context_info.wait);
2815 }
2816 
2817 static int __init mmc_init(void)
2818 {
2819 	int ret;
2820 
2821 	ret = mmc_register_bus();
2822 	if (ret)
2823 		return ret;
2824 
2825 	ret = mmc_register_host_class();
2826 	if (ret)
2827 		goto unregister_bus;
2828 
2829 	ret = sdio_register_bus();
2830 	if (ret)
2831 		goto unregister_host_class;
2832 
2833 	return 0;
2834 
2835 unregister_host_class:
2836 	mmc_unregister_host_class();
2837 unregister_bus:
2838 	mmc_unregister_bus();
2839 	return ret;
2840 }
2841 
2842 static void __exit mmc_exit(void)
2843 {
2844 	sdio_unregister_bus();
2845 	mmc_unregister_host_class();
2846 	mmc_unregister_bus();
2847 }
2848 
2849 subsys_initcall(mmc_init);
2850 module_exit(mmc_exit);
2851 
2852 MODULE_LICENSE("GPL");
2853