xref: /openbmc/linux/drivers/mmc/host/jz4740_mmc.c (revision 67ad8238)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
4  *  Copyright (C) 2013, Imagination Technologies
5  *
6  *  JZ4740 SD/MMC controller driver
7  */
8 
9 #include <linux/bitops.h>
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dmaengine.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/err.h>
15 #include <linux/interrupt.h>
16 #include <linux/io.h>
17 #include <linux/irq.h>
18 #include <linux/mmc/host.h>
19 #include <linux/mmc/slot-gpio.h>
20 #include <linux/module.h>
21 #include <linux/of_device.h>
22 #include <linux/pinctrl/consumer.h>
23 #include <linux/platform_device.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/scatterlist.h>
26 
27 #include <asm/cacheflush.h>
28 
29 #define JZ_REG_MMC_STRPCL	0x00
30 #define JZ_REG_MMC_STATUS	0x04
31 #define JZ_REG_MMC_CLKRT	0x08
32 #define JZ_REG_MMC_CMDAT	0x0C
33 #define JZ_REG_MMC_RESTO	0x10
34 #define JZ_REG_MMC_RDTO		0x14
35 #define JZ_REG_MMC_BLKLEN	0x18
36 #define JZ_REG_MMC_NOB		0x1C
37 #define JZ_REG_MMC_SNOB		0x20
38 #define JZ_REG_MMC_IMASK	0x24
39 #define JZ_REG_MMC_IREG		0x28
40 #define JZ_REG_MMC_CMD		0x2C
41 #define JZ_REG_MMC_ARG		0x30
42 #define JZ_REG_MMC_RESP_FIFO	0x34
43 #define JZ_REG_MMC_RXFIFO	0x38
44 #define JZ_REG_MMC_TXFIFO	0x3C
45 #define JZ_REG_MMC_LPM		0x40
46 #define JZ_REG_MMC_DMAC		0x44
47 
48 #define JZ_MMC_STRPCL_EXIT_MULTIPLE BIT(7)
49 #define JZ_MMC_STRPCL_EXIT_TRANSFER BIT(6)
50 #define JZ_MMC_STRPCL_START_READWAIT BIT(5)
51 #define JZ_MMC_STRPCL_STOP_READWAIT BIT(4)
52 #define JZ_MMC_STRPCL_RESET BIT(3)
53 #define JZ_MMC_STRPCL_START_OP BIT(2)
54 #define JZ_MMC_STRPCL_CLOCK_CONTROL (BIT(1) | BIT(0))
55 #define JZ_MMC_STRPCL_CLOCK_STOP BIT(0)
56 #define JZ_MMC_STRPCL_CLOCK_START BIT(1)
57 
58 
59 #define JZ_MMC_STATUS_IS_RESETTING BIT(15)
60 #define JZ_MMC_STATUS_SDIO_INT_ACTIVE BIT(14)
61 #define JZ_MMC_STATUS_PRG_DONE BIT(13)
62 #define JZ_MMC_STATUS_DATA_TRAN_DONE BIT(12)
63 #define JZ_MMC_STATUS_END_CMD_RES BIT(11)
64 #define JZ_MMC_STATUS_DATA_FIFO_AFULL BIT(10)
65 #define JZ_MMC_STATUS_IS_READWAIT BIT(9)
66 #define JZ_MMC_STATUS_CLK_EN BIT(8)
67 #define JZ_MMC_STATUS_DATA_FIFO_FULL BIT(7)
68 #define JZ_MMC_STATUS_DATA_FIFO_EMPTY BIT(6)
69 #define JZ_MMC_STATUS_CRC_RES_ERR BIT(5)
70 #define JZ_MMC_STATUS_CRC_READ_ERROR BIT(4)
71 #define JZ_MMC_STATUS_TIMEOUT_WRITE BIT(3)
72 #define JZ_MMC_STATUS_CRC_WRITE_ERROR BIT(2)
73 #define JZ_MMC_STATUS_TIMEOUT_RES BIT(1)
74 #define JZ_MMC_STATUS_TIMEOUT_READ BIT(0)
75 
76 #define JZ_MMC_STATUS_READ_ERROR_MASK (BIT(4) | BIT(0))
77 #define JZ_MMC_STATUS_WRITE_ERROR_MASK (BIT(3) | BIT(2))
78 
79 
80 #define JZ_MMC_CMDAT_IO_ABORT BIT(11)
81 #define JZ_MMC_CMDAT_BUS_WIDTH_4BIT BIT(10)
82 #define JZ_MMC_CMDAT_BUS_WIDTH_8BIT (BIT(10) | BIT(9))
83 #define	JZ_MMC_CMDAT_BUS_WIDTH_MASK (BIT(10) | BIT(9))
84 #define JZ_MMC_CMDAT_DMA_EN BIT(8)
85 #define JZ_MMC_CMDAT_INIT BIT(7)
86 #define JZ_MMC_CMDAT_BUSY BIT(6)
87 #define JZ_MMC_CMDAT_STREAM BIT(5)
88 #define JZ_MMC_CMDAT_WRITE BIT(4)
89 #define JZ_MMC_CMDAT_DATA_EN BIT(3)
90 #define JZ_MMC_CMDAT_RESPONSE_FORMAT (BIT(2) | BIT(1) | BIT(0))
91 #define JZ_MMC_CMDAT_RSP_R1 1
92 #define JZ_MMC_CMDAT_RSP_R2 2
93 #define JZ_MMC_CMDAT_RSP_R3 3
94 
95 #define JZ_MMC_IRQ_SDIO BIT(7)
96 #define JZ_MMC_IRQ_TXFIFO_WR_REQ BIT(6)
97 #define JZ_MMC_IRQ_RXFIFO_RD_REQ BIT(5)
98 #define JZ_MMC_IRQ_END_CMD_RES BIT(2)
99 #define JZ_MMC_IRQ_PRG_DONE BIT(1)
100 #define JZ_MMC_IRQ_DATA_TRAN_DONE BIT(0)
101 
102 #define JZ_MMC_DMAC_DMA_SEL BIT(1)
103 #define JZ_MMC_DMAC_DMA_EN BIT(0)
104 
105 #define	JZ_MMC_LPM_DRV_RISING BIT(31)
106 #define	JZ_MMC_LPM_DRV_RISING_QTR_PHASE_DLY BIT(31)
107 #define	JZ_MMC_LPM_DRV_RISING_1NS_DLY BIT(30)
108 #define	JZ_MMC_LPM_SMP_RISING_QTR_OR_HALF_PHASE_DLY BIT(29)
109 #define	JZ_MMC_LPM_LOW_POWER_MODE_EN BIT(0)
110 
111 #define JZ_MMC_CLK_RATE 24000000
112 #define JZ_MMC_REQ_TIMEOUT_MS 5000
113 
114 enum jz4740_mmc_version {
115 	JZ_MMC_JZ4740,
116 	JZ_MMC_JZ4725B,
117 	JZ_MMC_JZ4760,
118 	JZ_MMC_JZ4780,
119 	JZ_MMC_X1000,
120 };
121 
122 enum jz4740_mmc_state {
123 	JZ4740_MMC_STATE_READ_RESPONSE,
124 	JZ4740_MMC_STATE_TRANSFER_DATA,
125 	JZ4740_MMC_STATE_SEND_STOP,
126 	JZ4740_MMC_STATE_DONE,
127 };
128 
129 /*
130  * The MMC core allows to prepare a mmc_request while another mmc_request
131  * is in-flight. This is used via the pre_req/post_req hooks.
132  * This driver uses the pre_req/post_req hooks to map/unmap the mmc_request.
133  * Following what other drivers do (sdhci, dw_mmc) we use the following cookie
134  * flags to keep track of the mmc_request mapping state.
135  *
136  * COOKIE_UNMAPPED: the request is not mapped.
137  * COOKIE_PREMAPPED: the request was mapped in pre_req,
138  * and should be unmapped in post_req.
139  * COOKIE_MAPPED: the request was mapped in the irq handler,
140  * and should be unmapped before mmc_request_done is called..
141  */
142 enum jz4780_cookie {
143 	COOKIE_UNMAPPED = 0,
144 	COOKIE_PREMAPPED,
145 	COOKIE_MAPPED,
146 };
147 
148 struct jz4740_mmc_host {
149 	struct mmc_host *mmc;
150 	struct platform_device *pdev;
151 	struct clk *clk;
152 
153 	enum jz4740_mmc_version version;
154 
155 	int irq;
156 
157 	void __iomem *base;
158 	struct resource *mem_res;
159 	struct mmc_request *req;
160 	struct mmc_command *cmd;
161 
162 	bool vqmmc_enabled;
163 
164 	unsigned long waiting;
165 
166 	uint32_t cmdat;
167 
168 	uint32_t irq_mask;
169 
170 	spinlock_t lock;
171 
172 	struct timer_list timeout_timer;
173 	struct sg_mapping_iter miter;
174 	enum jz4740_mmc_state state;
175 
176 	/* DMA support */
177 	struct dma_chan *dma_rx;
178 	struct dma_chan *dma_tx;
179 	bool use_dma;
180 
181 /* The DMA trigger level is 8 words, that is to say, the DMA read
182  * trigger is when data words in MSC_RXFIFO is >= 8 and the DMA write
183  * trigger is when data words in MSC_TXFIFO is < 8.
184  */
185 #define JZ4740_MMC_FIFO_HALF_SIZE 8
186 };
187 
jz4740_mmc_write_irq_mask(struct jz4740_mmc_host * host,uint32_t val)188 static void jz4740_mmc_write_irq_mask(struct jz4740_mmc_host *host,
189 				      uint32_t val)
190 {
191 	if (host->version >= JZ_MMC_JZ4725B)
192 		return writel(val, host->base + JZ_REG_MMC_IMASK);
193 	else
194 		return writew(val, host->base + JZ_REG_MMC_IMASK);
195 }
196 
jz4740_mmc_write_irq_reg(struct jz4740_mmc_host * host,uint32_t val)197 static void jz4740_mmc_write_irq_reg(struct jz4740_mmc_host *host,
198 				     uint32_t val)
199 {
200 	if (host->version >= JZ_MMC_JZ4780)
201 		writel(val, host->base + JZ_REG_MMC_IREG);
202 	else
203 		writew(val, host->base + JZ_REG_MMC_IREG);
204 }
205 
jz4740_mmc_read_irq_reg(struct jz4740_mmc_host * host)206 static uint32_t jz4740_mmc_read_irq_reg(struct jz4740_mmc_host *host)
207 {
208 	if (host->version >= JZ_MMC_JZ4780)
209 		return readl(host->base + JZ_REG_MMC_IREG);
210 	else
211 		return readw(host->base + JZ_REG_MMC_IREG);
212 }
213 
214 /*----------------------------------------------------------------------------*/
215 /* DMA infrastructure */
216 
jz4740_mmc_release_dma_channels(struct jz4740_mmc_host * host)217 static void jz4740_mmc_release_dma_channels(struct jz4740_mmc_host *host)
218 {
219 	if (!host->use_dma)
220 		return;
221 
222 	dma_release_channel(host->dma_tx);
223 	if (host->dma_rx)
224 		dma_release_channel(host->dma_rx);
225 }
226 
jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host * host)227 static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host)
228 {
229 	struct device *dev = mmc_dev(host->mmc);
230 
231 	host->dma_tx = dma_request_chan(dev, "tx-rx");
232 	if (!IS_ERR(host->dma_tx))
233 		return 0;
234 
235 	if (PTR_ERR(host->dma_tx) != -ENODEV) {
236 		dev_err(dev, "Failed to get dma tx-rx channel\n");
237 		return PTR_ERR(host->dma_tx);
238 	}
239 
240 	host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx");
241 	if (IS_ERR(host->dma_tx)) {
242 		dev_err(mmc_dev(host->mmc), "Failed to get dma_tx channel\n");
243 		return PTR_ERR(host->dma_tx);
244 	}
245 
246 	host->dma_rx = dma_request_chan(mmc_dev(host->mmc), "rx");
247 	if (IS_ERR(host->dma_rx)) {
248 		dev_err(mmc_dev(host->mmc), "Failed to get dma_rx channel\n");
249 		dma_release_channel(host->dma_tx);
250 		return PTR_ERR(host->dma_rx);
251 	}
252 
253 	/*
254 	 * Limit the maximum segment size in any SG entry according to
255 	 * the parameters of the DMA engine device.
256 	 */
257 	if (host->dma_tx) {
258 		struct device *dev = host->dma_tx->device->dev;
259 		unsigned int max_seg_size = dma_get_max_seg_size(dev);
260 
261 		if (max_seg_size < host->mmc->max_seg_size)
262 			host->mmc->max_seg_size = max_seg_size;
263 	}
264 
265 	if (host->dma_rx) {
266 		struct device *dev = host->dma_rx->device->dev;
267 		unsigned int max_seg_size = dma_get_max_seg_size(dev);
268 
269 		if (max_seg_size < host->mmc->max_seg_size)
270 			host->mmc->max_seg_size = max_seg_size;
271 	}
272 
273 	return 0;
274 }
275 
jz4740_mmc_get_dma_chan(struct jz4740_mmc_host * host,struct mmc_data * data)276 static inline struct dma_chan *jz4740_mmc_get_dma_chan(struct jz4740_mmc_host *host,
277 						       struct mmc_data *data)
278 {
279 	if ((data->flags & MMC_DATA_READ) && host->dma_rx)
280 		return host->dma_rx;
281 	else
282 		return host->dma_tx;
283 }
284 
jz4740_mmc_dma_unmap(struct jz4740_mmc_host * host,struct mmc_data * data)285 static void jz4740_mmc_dma_unmap(struct jz4740_mmc_host *host,
286 				 struct mmc_data *data)
287 {
288 	struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
289 	enum dma_data_direction dir = mmc_get_dma_dir(data);
290 
291 	dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
292 	data->host_cookie = COOKIE_UNMAPPED;
293 }
294 
295 /* Prepares DMA data for current or next transfer.
296  * A request can be in-flight when this is called.
297  */
jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host * host,struct mmc_data * data,int cookie)298 static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host,
299 				       struct mmc_data *data,
300 				       int cookie)
301 {
302 	struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
303 	enum dma_data_direction dir = mmc_get_dma_dir(data);
304 	unsigned int sg_count;
305 
306 	if (data->host_cookie == COOKIE_PREMAPPED)
307 		return data->sg_count;
308 
309 	sg_count = dma_map_sg(chan->device->dev,
310 			data->sg,
311 			data->sg_len,
312 			dir);
313 
314 	if (!sg_count) {
315 		dev_err(mmc_dev(host->mmc),
316 			"Failed to map scatterlist for DMA operation\n");
317 		return -EINVAL;
318 	}
319 
320 	data->sg_count = sg_count;
321 	data->host_cookie = cookie;
322 
323 	return data->sg_count;
324 }
325 
jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host * host,struct mmc_data * data)326 static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host,
327 					 struct mmc_data *data)
328 {
329 	struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
330 	struct dma_async_tx_descriptor *desc;
331 	struct dma_slave_config conf = {
332 		.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
333 		.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
334 		.src_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
335 		.dst_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
336 	};
337 	int sg_count;
338 
339 	if (data->flags & MMC_DATA_WRITE) {
340 		conf.direction = DMA_MEM_TO_DEV;
341 		conf.dst_addr = host->mem_res->start + JZ_REG_MMC_TXFIFO;
342 	} else {
343 		conf.direction = DMA_DEV_TO_MEM;
344 		conf.src_addr = host->mem_res->start + JZ_REG_MMC_RXFIFO;
345 	}
346 
347 	sg_count = jz4740_mmc_prepare_dma_data(host, data, COOKIE_MAPPED);
348 	if (sg_count < 0)
349 		return sg_count;
350 
351 	dmaengine_slave_config(chan, &conf);
352 	desc = dmaengine_prep_slave_sg(chan, data->sg, sg_count,
353 			conf.direction,
354 			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
355 	if (!desc) {
356 		dev_err(mmc_dev(host->mmc),
357 			"Failed to allocate DMA %s descriptor",
358 			 conf.direction == DMA_MEM_TO_DEV ? "TX" : "RX");
359 		goto dma_unmap;
360 	}
361 
362 	dmaengine_submit(desc);
363 	dma_async_issue_pending(chan);
364 
365 	return 0;
366 
367 dma_unmap:
368 	if (data->host_cookie == COOKIE_MAPPED)
369 		jz4740_mmc_dma_unmap(host, data);
370 	return -ENOMEM;
371 }
372 
jz4740_mmc_pre_request(struct mmc_host * mmc,struct mmc_request * mrq)373 static void jz4740_mmc_pre_request(struct mmc_host *mmc,
374 				   struct mmc_request *mrq)
375 {
376 	struct jz4740_mmc_host *host = mmc_priv(mmc);
377 	struct mmc_data *data = mrq->data;
378 
379 	if (!host->use_dma)
380 		return;
381 
382 	data->host_cookie = COOKIE_UNMAPPED;
383 	if (jz4740_mmc_prepare_dma_data(host, data, COOKIE_PREMAPPED) < 0)
384 		data->host_cookie = COOKIE_UNMAPPED;
385 }
386 
jz4740_mmc_post_request(struct mmc_host * mmc,struct mmc_request * mrq,int err)387 static void jz4740_mmc_post_request(struct mmc_host *mmc,
388 				    struct mmc_request *mrq,
389 				    int err)
390 {
391 	struct jz4740_mmc_host *host = mmc_priv(mmc);
392 	struct mmc_data *data = mrq->data;
393 
394 	if (data && data->host_cookie != COOKIE_UNMAPPED)
395 		jz4740_mmc_dma_unmap(host, data);
396 
397 	if (err) {
398 		struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
399 
400 		dmaengine_terminate_all(chan);
401 	}
402 }
403 
404 /*----------------------------------------------------------------------------*/
405 
jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host * host,unsigned int irq,bool enabled)406 static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host,
407 	unsigned int irq, bool enabled)
408 {
409 	unsigned long flags;
410 
411 	spin_lock_irqsave(&host->lock, flags);
412 	if (enabled)
413 		host->irq_mask &= ~irq;
414 	else
415 		host->irq_mask |= irq;
416 
417 	jz4740_mmc_write_irq_mask(host, host->irq_mask);
418 	spin_unlock_irqrestore(&host->lock, flags);
419 }
420 
jz4740_mmc_clock_enable(struct jz4740_mmc_host * host,bool start_transfer)421 static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host,
422 	bool start_transfer)
423 {
424 	uint16_t val = JZ_MMC_STRPCL_CLOCK_START;
425 
426 	if (start_transfer)
427 		val |= JZ_MMC_STRPCL_START_OP;
428 
429 	writew(val, host->base + JZ_REG_MMC_STRPCL);
430 }
431 
jz4740_mmc_clock_disable(struct jz4740_mmc_host * host)432 static void jz4740_mmc_clock_disable(struct jz4740_mmc_host *host)
433 {
434 	uint32_t status;
435 	unsigned int timeout = 1000;
436 
437 	writew(JZ_MMC_STRPCL_CLOCK_STOP, host->base + JZ_REG_MMC_STRPCL);
438 	do {
439 		status = readl(host->base + JZ_REG_MMC_STATUS);
440 	} while (status & JZ_MMC_STATUS_CLK_EN && --timeout);
441 }
442 
jz4740_mmc_reset(struct jz4740_mmc_host * host)443 static void jz4740_mmc_reset(struct jz4740_mmc_host *host)
444 {
445 	uint32_t status;
446 	unsigned int timeout = 1000;
447 
448 	writew(JZ_MMC_STRPCL_RESET, host->base + JZ_REG_MMC_STRPCL);
449 	udelay(10);
450 	do {
451 		status = readl(host->base + JZ_REG_MMC_STATUS);
452 	} while (status & JZ_MMC_STATUS_IS_RESETTING && --timeout);
453 }
454 
jz4740_mmc_request_done(struct jz4740_mmc_host * host)455 static void jz4740_mmc_request_done(struct jz4740_mmc_host *host)
456 {
457 	struct mmc_request *req;
458 	struct mmc_data *data;
459 
460 	req = host->req;
461 	data = req->data;
462 	host->req = NULL;
463 
464 	if (data && data->host_cookie == COOKIE_MAPPED)
465 		jz4740_mmc_dma_unmap(host, data);
466 	mmc_request_done(host->mmc, req);
467 }
468 
jz4740_mmc_poll_irq(struct jz4740_mmc_host * host,unsigned int irq)469 static unsigned int jz4740_mmc_poll_irq(struct jz4740_mmc_host *host,
470 	unsigned int irq)
471 {
472 	unsigned int timeout = 0x800;
473 	uint32_t status;
474 
475 	do {
476 		status = jz4740_mmc_read_irq_reg(host);
477 	} while (!(status & irq) && --timeout);
478 
479 	if (timeout == 0) {
480 		set_bit(0, &host->waiting);
481 		mod_timer(&host->timeout_timer,
482 			  jiffies + msecs_to_jiffies(JZ_MMC_REQ_TIMEOUT_MS));
483 		jz4740_mmc_set_irq_enabled(host, irq, true);
484 		return true;
485 	}
486 
487 	return false;
488 }
489 
jz4740_mmc_transfer_check_state(struct jz4740_mmc_host * host,struct mmc_data * data)490 static void jz4740_mmc_transfer_check_state(struct jz4740_mmc_host *host,
491 	struct mmc_data *data)
492 {
493 	int status;
494 
495 	status = readl(host->base + JZ_REG_MMC_STATUS);
496 	if (status & JZ_MMC_STATUS_WRITE_ERROR_MASK) {
497 		if (status & (JZ_MMC_STATUS_TIMEOUT_WRITE)) {
498 			host->req->cmd->error = -ETIMEDOUT;
499 			data->error = -ETIMEDOUT;
500 		} else {
501 			host->req->cmd->error = -EIO;
502 			data->error = -EIO;
503 		}
504 	} else if (status & JZ_MMC_STATUS_READ_ERROR_MASK) {
505 		if (status & (JZ_MMC_STATUS_TIMEOUT_READ)) {
506 			host->req->cmd->error = -ETIMEDOUT;
507 			data->error = -ETIMEDOUT;
508 		} else {
509 			host->req->cmd->error = -EIO;
510 			data->error = -EIO;
511 		}
512 	}
513 }
514 
jz4740_mmc_write_data(struct jz4740_mmc_host * host,struct mmc_data * data)515 static bool jz4740_mmc_write_data(struct jz4740_mmc_host *host,
516 	struct mmc_data *data)
517 {
518 	struct sg_mapping_iter *miter = &host->miter;
519 	void __iomem *fifo_addr = host->base + JZ_REG_MMC_TXFIFO;
520 	uint32_t *buf;
521 	bool timeout;
522 	size_t i, j;
523 
524 	while (sg_miter_next(miter)) {
525 		buf = miter->addr;
526 		i = miter->length / 4;
527 		j = i / 8;
528 		i = i & 0x7;
529 		while (j) {
530 			timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ);
531 			if (unlikely(timeout))
532 				goto poll_timeout;
533 
534 			writel(buf[0], fifo_addr);
535 			writel(buf[1], fifo_addr);
536 			writel(buf[2], fifo_addr);
537 			writel(buf[3], fifo_addr);
538 			writel(buf[4], fifo_addr);
539 			writel(buf[5], fifo_addr);
540 			writel(buf[6], fifo_addr);
541 			writel(buf[7], fifo_addr);
542 			buf += 8;
543 			--j;
544 		}
545 		if (unlikely(i)) {
546 			timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ);
547 			if (unlikely(timeout))
548 				goto poll_timeout;
549 
550 			while (i) {
551 				writel(*buf, fifo_addr);
552 				++buf;
553 				--i;
554 			}
555 		}
556 		data->bytes_xfered += miter->length;
557 	}
558 	sg_miter_stop(miter);
559 
560 	return false;
561 
562 poll_timeout:
563 	miter->consumed = (void *)buf - miter->addr;
564 	data->bytes_xfered += miter->consumed;
565 	sg_miter_stop(miter);
566 
567 	return true;
568 }
569 
jz4740_mmc_read_data(struct jz4740_mmc_host * host,struct mmc_data * data)570 static bool jz4740_mmc_read_data(struct jz4740_mmc_host *host,
571 				struct mmc_data *data)
572 {
573 	struct sg_mapping_iter *miter = &host->miter;
574 	void __iomem *fifo_addr = host->base + JZ_REG_MMC_RXFIFO;
575 	uint32_t *buf;
576 	uint32_t d;
577 	uint32_t status;
578 	size_t i, j;
579 	unsigned int timeout;
580 
581 	while (sg_miter_next(miter)) {
582 		buf = miter->addr;
583 		i = miter->length;
584 		j = i / 32;
585 		i = i & 0x1f;
586 		while (j) {
587 			timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ);
588 			if (unlikely(timeout))
589 				goto poll_timeout;
590 
591 			buf[0] = readl(fifo_addr);
592 			buf[1] = readl(fifo_addr);
593 			buf[2] = readl(fifo_addr);
594 			buf[3] = readl(fifo_addr);
595 			buf[4] = readl(fifo_addr);
596 			buf[5] = readl(fifo_addr);
597 			buf[6] = readl(fifo_addr);
598 			buf[7] = readl(fifo_addr);
599 
600 			buf += 8;
601 			--j;
602 		}
603 
604 		if (unlikely(i)) {
605 			timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ);
606 			if (unlikely(timeout))
607 				goto poll_timeout;
608 
609 			while (i >= 4) {
610 				*buf++ = readl(fifo_addr);
611 				i -= 4;
612 			}
613 			if (unlikely(i > 0)) {
614 				d = readl(fifo_addr);
615 				memcpy(buf, &d, i);
616 			}
617 		}
618 		data->bytes_xfered += miter->length;
619 	}
620 	sg_miter_stop(miter);
621 
622 	/* For whatever reason there is sometime one word more in the fifo then
623 	 * requested */
624 	timeout = 1000;
625 	status = readl(host->base + JZ_REG_MMC_STATUS);
626 	while (!(status & JZ_MMC_STATUS_DATA_FIFO_EMPTY) && --timeout) {
627 		d = readl(fifo_addr);
628 		status = readl(host->base + JZ_REG_MMC_STATUS);
629 	}
630 
631 	return false;
632 
633 poll_timeout:
634 	miter->consumed = (void *)buf - miter->addr;
635 	data->bytes_xfered += miter->consumed;
636 	sg_miter_stop(miter);
637 
638 	return true;
639 }
640 
jz4740_mmc_timeout(struct timer_list * t)641 static void jz4740_mmc_timeout(struct timer_list *t)
642 {
643 	struct jz4740_mmc_host *host = from_timer(host, t, timeout_timer);
644 
645 	if (!test_and_clear_bit(0, &host->waiting))
646 		return;
647 
648 	jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, false);
649 
650 	host->req->cmd->error = -ETIMEDOUT;
651 	jz4740_mmc_request_done(host);
652 }
653 
jz4740_mmc_read_response(struct jz4740_mmc_host * host,struct mmc_command * cmd)654 static void jz4740_mmc_read_response(struct jz4740_mmc_host *host,
655 	struct mmc_command *cmd)
656 {
657 	int i;
658 	uint16_t tmp;
659 	void __iomem *fifo_addr = host->base + JZ_REG_MMC_RESP_FIFO;
660 
661 	if (cmd->flags & MMC_RSP_136) {
662 		tmp = readw(fifo_addr);
663 		for (i = 0; i < 4; ++i) {
664 			cmd->resp[i] = tmp << 24;
665 			tmp = readw(fifo_addr);
666 			cmd->resp[i] |= tmp << 8;
667 			tmp = readw(fifo_addr);
668 			cmd->resp[i] |= tmp >> 8;
669 		}
670 	} else {
671 		cmd->resp[0] = readw(fifo_addr) << 24;
672 		cmd->resp[0] |= readw(fifo_addr) << 8;
673 		cmd->resp[0] |= readw(fifo_addr) & 0xff;
674 	}
675 }
676 
jz4740_mmc_send_command(struct jz4740_mmc_host * host,struct mmc_command * cmd)677 static void jz4740_mmc_send_command(struct jz4740_mmc_host *host,
678 	struct mmc_command *cmd)
679 {
680 	uint32_t cmdat = host->cmdat;
681 
682 	host->cmdat &= ~JZ_MMC_CMDAT_INIT;
683 	jz4740_mmc_clock_disable(host);
684 
685 	host->cmd = cmd;
686 
687 	if (cmd->flags & MMC_RSP_BUSY)
688 		cmdat |= JZ_MMC_CMDAT_BUSY;
689 
690 	switch (mmc_resp_type(cmd)) {
691 	case MMC_RSP_R1B:
692 	case MMC_RSP_R1:
693 		cmdat |= JZ_MMC_CMDAT_RSP_R1;
694 		break;
695 	case MMC_RSP_R2:
696 		cmdat |= JZ_MMC_CMDAT_RSP_R2;
697 		break;
698 	case MMC_RSP_R3:
699 		cmdat |= JZ_MMC_CMDAT_RSP_R3;
700 		break;
701 	default:
702 		break;
703 	}
704 
705 	if (cmd->data) {
706 		cmdat |= JZ_MMC_CMDAT_DATA_EN;
707 		if (cmd->data->flags & MMC_DATA_WRITE)
708 			cmdat |= JZ_MMC_CMDAT_WRITE;
709 		if (host->use_dma) {
710 			/*
711 			 * The JZ4780's MMC controller has integrated DMA ability
712 			 * in addition to being able to use the external DMA
713 			 * controller. It moves DMA control bits to a separate
714 			 * register. The DMA_SEL bit chooses the external
715 			 * controller over the integrated one. Earlier SoCs
716 			 * can only use the external controller, and have a
717 			 * single DMA enable bit in CMDAT.
718 			 */
719 			if (host->version >= JZ_MMC_JZ4780) {
720 				writel(JZ_MMC_DMAC_DMA_EN | JZ_MMC_DMAC_DMA_SEL,
721 				       host->base + JZ_REG_MMC_DMAC);
722 			} else {
723 				cmdat |= JZ_MMC_CMDAT_DMA_EN;
724 			}
725 		} else if (host->version >= JZ_MMC_JZ4780) {
726 			writel(0, host->base + JZ_REG_MMC_DMAC);
727 		}
728 
729 		writew(cmd->data->blksz, host->base + JZ_REG_MMC_BLKLEN);
730 		writew(cmd->data->blocks, host->base + JZ_REG_MMC_NOB);
731 	}
732 
733 	writeb(cmd->opcode, host->base + JZ_REG_MMC_CMD);
734 	writel(cmd->arg, host->base + JZ_REG_MMC_ARG);
735 	writel(cmdat, host->base + JZ_REG_MMC_CMDAT);
736 
737 	jz4740_mmc_clock_enable(host, 1);
738 }
739 
jz_mmc_prepare_data_transfer(struct jz4740_mmc_host * host)740 static void jz_mmc_prepare_data_transfer(struct jz4740_mmc_host *host)
741 {
742 	struct mmc_command *cmd = host->req->cmd;
743 	struct mmc_data *data = cmd->data;
744 	int direction;
745 
746 	if (data->flags & MMC_DATA_READ)
747 		direction = SG_MITER_TO_SG;
748 	else
749 		direction = SG_MITER_FROM_SG;
750 
751 	sg_miter_start(&host->miter, data->sg, data->sg_len, direction);
752 }
753 
754 
jz_mmc_irq_worker(int irq,void * devid)755 static irqreturn_t jz_mmc_irq_worker(int irq, void *devid)
756 {
757 	struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)devid;
758 	struct mmc_command *cmd = host->req->cmd;
759 	struct mmc_request *req = host->req;
760 	struct mmc_data *data = cmd->data;
761 	bool timeout = false;
762 
763 	if (cmd->error)
764 		host->state = JZ4740_MMC_STATE_DONE;
765 
766 	switch (host->state) {
767 	case JZ4740_MMC_STATE_READ_RESPONSE:
768 		if (cmd->flags & MMC_RSP_PRESENT)
769 			jz4740_mmc_read_response(host, cmd);
770 
771 		if (!data)
772 			break;
773 
774 		jz_mmc_prepare_data_transfer(host);
775 		fallthrough;
776 
777 	case JZ4740_MMC_STATE_TRANSFER_DATA:
778 		if (host->use_dma) {
779 			/* Use DMA if enabled.
780 			 * Data transfer direction is defined later by
781 			 * relying on data flags in
782 			 * jz4740_mmc_prepare_dma_data() and
783 			 * jz4740_mmc_start_dma_transfer().
784 			 */
785 			timeout = jz4740_mmc_start_dma_transfer(host, data);
786 			data->bytes_xfered = data->blocks * data->blksz;
787 		} else if (data->flags & MMC_DATA_READ)
788 			/* Use PIO if DMA is not enabled.
789 			 * Data transfer direction was defined before
790 			 * by relying on data flags in
791 			 * jz_mmc_prepare_data_transfer().
792 			 */
793 			timeout = jz4740_mmc_read_data(host, data);
794 		else
795 			timeout = jz4740_mmc_write_data(host, data);
796 
797 		if (unlikely(timeout)) {
798 			host->state = JZ4740_MMC_STATE_TRANSFER_DATA;
799 			break;
800 		}
801 
802 		jz4740_mmc_transfer_check_state(host, data);
803 
804 		timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_DATA_TRAN_DONE);
805 		if (unlikely(timeout)) {
806 			host->state = JZ4740_MMC_STATE_SEND_STOP;
807 			break;
808 		}
809 		jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_DATA_TRAN_DONE);
810 		fallthrough;
811 
812 	case JZ4740_MMC_STATE_SEND_STOP:
813 		if (!req->stop)
814 			break;
815 
816 		jz4740_mmc_send_command(host, req->stop);
817 
818 		if (mmc_resp_type(req->stop) & MMC_RSP_BUSY) {
819 			timeout = jz4740_mmc_poll_irq(host,
820 						      JZ_MMC_IRQ_PRG_DONE);
821 			if (timeout) {
822 				host->state = JZ4740_MMC_STATE_DONE;
823 				break;
824 			}
825 		}
826 		fallthrough;
827 
828 	case JZ4740_MMC_STATE_DONE:
829 		break;
830 	}
831 
832 	if (!timeout)
833 		jz4740_mmc_request_done(host);
834 
835 	return IRQ_HANDLED;
836 }
837 
jz_mmc_irq(int irq,void * devid)838 static irqreturn_t jz_mmc_irq(int irq, void *devid)
839 {
840 	struct jz4740_mmc_host *host = devid;
841 	struct mmc_command *cmd = host->cmd;
842 	uint32_t irq_reg, status, tmp;
843 
844 	status = readl(host->base + JZ_REG_MMC_STATUS);
845 	irq_reg = jz4740_mmc_read_irq_reg(host);
846 
847 	tmp = irq_reg;
848 	irq_reg &= ~host->irq_mask;
849 
850 	tmp &= ~(JZ_MMC_IRQ_TXFIFO_WR_REQ | JZ_MMC_IRQ_RXFIFO_RD_REQ |
851 		JZ_MMC_IRQ_PRG_DONE | JZ_MMC_IRQ_DATA_TRAN_DONE);
852 
853 	if (tmp != irq_reg)
854 		jz4740_mmc_write_irq_reg(host, tmp & ~irq_reg);
855 
856 	if (irq_reg & JZ_MMC_IRQ_SDIO) {
857 		jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_SDIO);
858 		mmc_signal_sdio_irq(host->mmc);
859 		irq_reg &= ~JZ_MMC_IRQ_SDIO;
860 	}
861 
862 	if (host->req && cmd && irq_reg) {
863 		if (test_and_clear_bit(0, &host->waiting)) {
864 			del_timer(&host->timeout_timer);
865 
866 			if (status & JZ_MMC_STATUS_TIMEOUT_RES) {
867 				cmd->error = -ETIMEDOUT;
868 			} else if (status & JZ_MMC_STATUS_CRC_RES_ERR) {
869 				cmd->error = -EIO;
870 			} else if (status & (JZ_MMC_STATUS_CRC_READ_ERROR |
871 				    JZ_MMC_STATUS_CRC_WRITE_ERROR)) {
872 				if (cmd->data)
873 					cmd->data->error = -EIO;
874 				cmd->error = -EIO;
875 			}
876 
877 			jz4740_mmc_set_irq_enabled(host, irq_reg, false);
878 			jz4740_mmc_write_irq_reg(host, irq_reg);
879 
880 			return IRQ_WAKE_THREAD;
881 		}
882 	}
883 
884 	return IRQ_HANDLED;
885 }
886 
jz4740_mmc_set_clock_rate(struct jz4740_mmc_host * host,int rate)887 static int jz4740_mmc_set_clock_rate(struct jz4740_mmc_host *host, int rate)
888 {
889 	int div = 0;
890 	int real_rate;
891 
892 	jz4740_mmc_clock_disable(host);
893 	clk_set_rate(host->clk, host->mmc->f_max);
894 
895 	real_rate = clk_get_rate(host->clk);
896 
897 	while (real_rate > rate && div < 7) {
898 		++div;
899 		real_rate >>= 1;
900 	}
901 
902 	writew(div, host->base + JZ_REG_MMC_CLKRT);
903 
904 	if (real_rate > 25000000) {
905 		if (host->version >= JZ_MMC_JZ4780) {
906 			writel(JZ_MMC_LPM_DRV_RISING_QTR_PHASE_DLY |
907 				   JZ_MMC_LPM_SMP_RISING_QTR_OR_HALF_PHASE_DLY |
908 				   JZ_MMC_LPM_LOW_POWER_MODE_EN,
909 				   host->base + JZ_REG_MMC_LPM);
910 		} else if (host->version >= JZ_MMC_JZ4760) {
911 			writel(JZ_MMC_LPM_DRV_RISING |
912 				   JZ_MMC_LPM_LOW_POWER_MODE_EN,
913 				   host->base + JZ_REG_MMC_LPM);
914 		} else if (host->version >= JZ_MMC_JZ4725B)
915 			writel(JZ_MMC_LPM_LOW_POWER_MODE_EN,
916 				   host->base + JZ_REG_MMC_LPM);
917 	}
918 
919 	return real_rate;
920 }
921 
jz4740_mmc_request(struct mmc_host * mmc,struct mmc_request * req)922 static void jz4740_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
923 {
924 	struct jz4740_mmc_host *host = mmc_priv(mmc);
925 
926 	host->req = req;
927 
928 	jz4740_mmc_write_irq_reg(host, ~0);
929 	jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, true);
930 
931 	host->state = JZ4740_MMC_STATE_READ_RESPONSE;
932 	set_bit(0, &host->waiting);
933 	mod_timer(&host->timeout_timer,
934 		  jiffies + msecs_to_jiffies(JZ_MMC_REQ_TIMEOUT_MS));
935 	jz4740_mmc_send_command(host, req->cmd);
936 }
937 
jz4740_mmc_set_ios(struct mmc_host * mmc,struct mmc_ios * ios)938 static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
939 {
940 	struct jz4740_mmc_host *host = mmc_priv(mmc);
941 	int ret;
942 
943 	if (ios->clock)
944 		jz4740_mmc_set_clock_rate(host, ios->clock);
945 
946 	switch (ios->power_mode) {
947 	case MMC_POWER_UP:
948 		jz4740_mmc_reset(host);
949 		if (!IS_ERR(mmc->supply.vmmc))
950 			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
951 		host->cmdat |= JZ_MMC_CMDAT_INIT;
952 		clk_prepare_enable(host->clk);
953 		break;
954 	case MMC_POWER_ON:
955 		if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
956 			ret = regulator_enable(mmc->supply.vqmmc);
957 			if (ret)
958 				dev_err(&host->pdev->dev, "Failed to set vqmmc power!\n");
959 			else
960 				host->vqmmc_enabled = true;
961 		}
962 		break;
963 	case MMC_POWER_OFF:
964 		if (!IS_ERR(mmc->supply.vmmc))
965 			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
966 		if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
967 			regulator_disable(mmc->supply.vqmmc);
968 			host->vqmmc_enabled = false;
969 		}
970 		clk_disable_unprepare(host->clk);
971 		break;
972 	default:
973 		break;
974 	}
975 
976 	switch (ios->bus_width) {
977 	case MMC_BUS_WIDTH_1:
978 		host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
979 		break;
980 	case MMC_BUS_WIDTH_4:
981 		host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
982 		host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_4BIT;
983 		break;
984 	case MMC_BUS_WIDTH_8:
985 		host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
986 		host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_8BIT;
987 		break;
988 	default:
989 		break;
990 	}
991 }
992 
jz4740_mmc_enable_sdio_irq(struct mmc_host * mmc,int enable)993 static void jz4740_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
994 {
995 	struct jz4740_mmc_host *host = mmc_priv(mmc);
996 	jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_SDIO, enable);
997 }
998 
jz4740_voltage_switch(struct mmc_host * mmc,struct mmc_ios * ios)999 static int jz4740_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios)
1000 {
1001 	int ret;
1002 
1003 	/* vqmmc regulator is available */
1004 	if (!IS_ERR(mmc->supply.vqmmc)) {
1005 		ret = mmc_regulator_set_vqmmc(mmc, ios);
1006 		return ret < 0 ? ret : 0;
1007 	}
1008 
1009 	/* no vqmmc regulator, assume fixed regulator at 3/3.3V */
1010 	if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1011 		return 0;
1012 
1013 	return -EINVAL;
1014 }
1015 
1016 static const struct mmc_host_ops jz4740_mmc_ops = {
1017 	.request	= jz4740_mmc_request,
1018 	.pre_req	= jz4740_mmc_pre_request,
1019 	.post_req	= jz4740_mmc_post_request,
1020 	.set_ios	= jz4740_mmc_set_ios,
1021 	.get_ro		= mmc_gpio_get_ro,
1022 	.get_cd		= mmc_gpio_get_cd,
1023 	.enable_sdio_irq = jz4740_mmc_enable_sdio_irq,
1024 	.start_signal_voltage_switch = jz4740_voltage_switch,
1025 };
1026 
1027 static const struct of_device_id jz4740_mmc_of_match[] = {
1028 	{ .compatible = "ingenic,jz4740-mmc", .data = (void *) JZ_MMC_JZ4740 },
1029 	{ .compatible = "ingenic,jz4725b-mmc", .data = (void *)JZ_MMC_JZ4725B },
1030 	{ .compatible = "ingenic,jz4760-mmc", .data = (void *) JZ_MMC_JZ4760 },
1031 	{ .compatible = "ingenic,jz4775-mmc", .data = (void *) JZ_MMC_JZ4780 },
1032 	{ .compatible = "ingenic,jz4780-mmc", .data = (void *) JZ_MMC_JZ4780 },
1033 	{ .compatible = "ingenic,x1000-mmc", .data = (void *) JZ_MMC_X1000 },
1034 	{},
1035 };
1036 MODULE_DEVICE_TABLE(of, jz4740_mmc_of_match);
1037 
jz4740_mmc_probe(struct platform_device * pdev)1038 static int jz4740_mmc_probe(struct platform_device* pdev)
1039 {
1040 	int ret;
1041 	struct mmc_host *mmc;
1042 	struct jz4740_mmc_host *host;
1043 	const struct of_device_id *match;
1044 
1045 	mmc = mmc_alloc_host(sizeof(struct jz4740_mmc_host), &pdev->dev);
1046 	if (!mmc) {
1047 		dev_err(&pdev->dev, "Failed to alloc mmc host structure\n");
1048 		return -ENOMEM;
1049 	}
1050 
1051 	host = mmc_priv(mmc);
1052 
1053 	match = of_match_device(jz4740_mmc_of_match, &pdev->dev);
1054 	if (match) {
1055 		host->version = (enum jz4740_mmc_version)match->data;
1056 	} else {
1057 		/* JZ4740 should be the only one using legacy probe */
1058 		host->version = JZ_MMC_JZ4740;
1059 	}
1060 
1061 	ret = mmc_of_parse(mmc);
1062 	if (ret) {
1063 		dev_err_probe(&pdev->dev, ret, "could not parse device properties\n");
1064 		goto err_free_host;
1065 	}
1066 
1067 	mmc_regulator_get_supply(mmc);
1068 
1069 	host->irq = platform_get_irq(pdev, 0);
1070 	if (host->irq < 0) {
1071 		ret = host->irq;
1072 		goto err_free_host;
1073 	}
1074 
1075 	host->clk = devm_clk_get(&pdev->dev, "mmc");
1076 	if (IS_ERR(host->clk)) {
1077 		ret = PTR_ERR(host->clk);
1078 		dev_err(&pdev->dev, "Failed to get mmc clock\n");
1079 		goto err_free_host;
1080 	}
1081 
1082 	host->base = devm_platform_get_and_ioremap_resource(pdev, 0, &host->mem_res);
1083 	if (IS_ERR(host->base)) {
1084 		ret = PTR_ERR(host->base);
1085 		goto err_free_host;
1086 	}
1087 
1088 	mmc->ops = &jz4740_mmc_ops;
1089 	if (!mmc->f_max)
1090 		mmc->f_max = JZ_MMC_CLK_RATE;
1091 
1092 	/*
1093 	 * There seems to be a problem with this driver on the JZ4760 and
1094 	 * JZ4760B SoCs. There, when using the maximum rate supported (50 MHz),
1095 	 * the communication fails with many SD cards.
1096 	 * Until this bug is sorted out, limit the maximum rate to 24 MHz.
1097 	 */
1098 	if (host->version == JZ_MMC_JZ4760 && mmc->f_max > JZ_MMC_CLK_RATE)
1099 		mmc->f_max = JZ_MMC_CLK_RATE;
1100 
1101 	mmc->f_min = mmc->f_max / 128;
1102 	mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1103 
1104 	/*
1105 	 * We use a fixed timeout of 5s, hence inform the core about it. A
1106 	 * future improvement should instead respect the cmd->busy_timeout.
1107 	 */
1108 	mmc->max_busy_timeout = JZ_MMC_REQ_TIMEOUT_MS;
1109 
1110 	mmc->max_blk_size = (1 << 10) - 1;
1111 	mmc->max_blk_count = (1 << 15) - 1;
1112 	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1113 
1114 	mmc->max_segs = 128;
1115 	mmc->max_seg_size = mmc->max_req_size;
1116 
1117 	host->mmc = mmc;
1118 	host->pdev = pdev;
1119 	spin_lock_init(&host->lock);
1120 	host->irq_mask = ~0;
1121 
1122 	jz4740_mmc_reset(host);
1123 
1124 	ret = request_threaded_irq(host->irq, jz_mmc_irq, jz_mmc_irq_worker, 0,
1125 			dev_name(&pdev->dev), host);
1126 	if (ret) {
1127 		dev_err(&pdev->dev, "Failed to request irq: %d\n", ret);
1128 		goto err_free_host;
1129 	}
1130 
1131 	jz4740_mmc_clock_disable(host);
1132 	timer_setup(&host->timeout_timer, jz4740_mmc_timeout, 0);
1133 
1134 	ret = jz4740_mmc_acquire_dma_channels(host);
1135 	if (ret == -EPROBE_DEFER)
1136 		goto err_free_irq;
1137 	host->use_dma = !ret;
1138 
1139 	platform_set_drvdata(pdev, host);
1140 	ret = mmc_add_host(mmc);
1141 
1142 	if (ret) {
1143 		dev_err(&pdev->dev, "Failed to add mmc host: %d\n", ret);
1144 		goto err_release_dma;
1145 	}
1146 	dev_info(&pdev->dev, "Ingenic SD/MMC card driver registered\n");
1147 
1148 	dev_info(&pdev->dev, "Using %s, %d-bit mode\n",
1149 		 host->use_dma ? "DMA" : "PIO",
1150 		 (mmc->caps & MMC_CAP_8_BIT_DATA) ? 8 :
1151 		 ((mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1));
1152 
1153 	return 0;
1154 
1155 err_release_dma:
1156 	if (host->use_dma)
1157 		jz4740_mmc_release_dma_channels(host);
1158 err_free_irq:
1159 	free_irq(host->irq, host);
1160 err_free_host:
1161 	mmc_free_host(mmc);
1162 
1163 	return ret;
1164 }
1165 
jz4740_mmc_remove(struct platform_device * pdev)1166 static void jz4740_mmc_remove(struct platform_device *pdev)
1167 {
1168 	struct jz4740_mmc_host *host = platform_get_drvdata(pdev);
1169 
1170 	del_timer_sync(&host->timeout_timer);
1171 	jz4740_mmc_set_irq_enabled(host, 0xff, false);
1172 	jz4740_mmc_reset(host);
1173 
1174 	mmc_remove_host(host->mmc);
1175 
1176 	free_irq(host->irq, host);
1177 
1178 	if (host->use_dma)
1179 		jz4740_mmc_release_dma_channels(host);
1180 
1181 	mmc_free_host(host->mmc);
1182 }
1183 
jz4740_mmc_suspend(struct device * dev)1184 static int jz4740_mmc_suspend(struct device *dev)
1185 {
1186 	return pinctrl_pm_select_sleep_state(dev);
1187 }
1188 
jz4740_mmc_resume(struct device * dev)1189 static int jz4740_mmc_resume(struct device *dev)
1190 {
1191 	return pinctrl_select_default_state(dev);
1192 }
1193 
1194 static DEFINE_SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend,
1195 				jz4740_mmc_resume);
1196 
1197 static struct platform_driver jz4740_mmc_driver = {
1198 	.probe = jz4740_mmc_probe,
1199 	.remove_new = jz4740_mmc_remove,
1200 	.driver = {
1201 		.name = "jz4740-mmc",
1202 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1203 		.of_match_table = of_match_ptr(jz4740_mmc_of_match),
1204 		.pm = pm_sleep_ptr(&jz4740_mmc_pm_ops),
1205 	},
1206 };
1207 
1208 module_platform_driver(jz4740_mmc_driver);
1209 
1210 MODULE_DESCRIPTION("JZ4740 SD/MMC controller driver");
1211 MODULE_LICENSE("GPL");
1212 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
1213