xref: /openbmc/u-boot/drivers/mmc/tmio-common.c (revision 66c433ed4342e5761ee9b048c85fe47d31130b2e)
1  // SPDX-License-Identifier: GPL-2.0+
2  /*
3   * Copyright (C) 2016 Socionext Inc.
4   *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
5   */
6  
7  #include <common.h>
8  #include <clk.h>
9  #include <fdtdec.h>
10  #include <mmc.h>
11  #include <dm.h>
12  #include <dm/pinctrl.h>
13  #include <linux/compat.h>
14  #include <linux/dma-direction.h>
15  #include <linux/io.h>
16  #include <linux/sizes.h>
17  #include <power/regulator.h>
18  #include <asm/unaligned.h>
19  
20  #include "tmio-common.h"
21  
22  DECLARE_GLOBAL_DATA_PTR;
23  
tmio_sd_readq(struct tmio_sd_priv * priv,unsigned int reg)24  static u64 tmio_sd_readq(struct tmio_sd_priv *priv, unsigned int reg)
25  {
26  	return readq(priv->regbase + (reg << 1));
27  }
28  
tmio_sd_writeq(struct tmio_sd_priv * priv,u64 val,unsigned int reg)29  static void tmio_sd_writeq(struct tmio_sd_priv *priv,
30  			       u64 val, unsigned int reg)
31  {
32  	writeq(val, priv->regbase + (reg << 1));
33  }
34  
tmio_sd_readw(struct tmio_sd_priv * priv,unsigned int reg)35  static u16 tmio_sd_readw(struct tmio_sd_priv *priv, unsigned int reg)
36  {
37  	return readw(priv->regbase + (reg >> 1));
38  }
39  
tmio_sd_writew(struct tmio_sd_priv * priv,u16 val,unsigned int reg)40  static void tmio_sd_writew(struct tmio_sd_priv *priv,
41  			       u16 val, unsigned int reg)
42  {
43  	writew(val, priv->regbase + (reg >> 1));
44  }
45  
tmio_sd_readl(struct tmio_sd_priv * priv,unsigned int reg)46  u32 tmio_sd_readl(struct tmio_sd_priv *priv, unsigned int reg)
47  {
48  	u32 val;
49  
50  	if (priv->caps & TMIO_SD_CAP_64BIT)
51  		return readl(priv->regbase + (reg << 1));
52  	else if (priv->caps & TMIO_SD_CAP_16BIT) {
53  		val = readw(priv->regbase + (reg >> 1)) & 0xffff;
54  		if ((reg == TMIO_SD_RSP10) || (reg == TMIO_SD_RSP32) ||
55  		    (reg == TMIO_SD_RSP54) || (reg == TMIO_SD_RSP76)) {
56  			val |= readw(priv->regbase + (reg >> 1) + 2) << 16;
57  		}
58  		return val;
59  	} else
60  		return readl(priv->regbase + reg);
61  }
62  
tmio_sd_writel(struct tmio_sd_priv * priv,u32 val,unsigned int reg)63  void tmio_sd_writel(struct tmio_sd_priv *priv,
64  			       u32 val, unsigned int reg)
65  {
66  	if (priv->caps & TMIO_SD_CAP_64BIT)
67  		writel(val, priv->regbase + (reg << 1));
68  	else if (priv->caps & TMIO_SD_CAP_16BIT) {
69  		writew(val & 0xffff, priv->regbase + (reg >> 1));
70  		if (reg == TMIO_SD_INFO1 || reg == TMIO_SD_INFO1_MASK ||
71  		    reg == TMIO_SD_INFO2 || reg == TMIO_SD_INFO2_MASK ||
72  		    reg == TMIO_SD_ARG)
73  			writew(val >> 16, priv->regbase + (reg >> 1) + 2);
74  	} else
75  		writel(val, priv->regbase + reg);
76  }
77  
__dma_map_single(void * ptr,size_t size,enum dma_data_direction dir)78  static dma_addr_t __dma_map_single(void *ptr, size_t size,
79  				   enum dma_data_direction dir)
80  {
81  	unsigned long addr = (unsigned long)ptr;
82  
83  	if (dir == DMA_FROM_DEVICE)
84  		invalidate_dcache_range(addr, addr + size);
85  	else
86  		flush_dcache_range(addr, addr + size);
87  
88  	return addr;
89  }
90  
__dma_unmap_single(dma_addr_t addr,size_t size,enum dma_data_direction dir)91  static void __dma_unmap_single(dma_addr_t addr, size_t size,
92  			       enum dma_data_direction dir)
93  {
94  	if (dir != DMA_TO_DEVICE)
95  		invalidate_dcache_range(addr, addr + size);
96  }
97  
tmio_sd_check_error(struct udevice * dev,struct mmc_cmd * cmd)98  static int tmio_sd_check_error(struct udevice *dev, struct mmc_cmd *cmd)
99  {
100  	struct tmio_sd_priv *priv = dev_get_priv(dev);
101  	u32 info2 = tmio_sd_readl(priv, TMIO_SD_INFO2);
102  
103  	if (info2 & TMIO_SD_INFO2_ERR_RTO) {
104  		/*
105  		 * TIMEOUT must be returned for unsupported command.  Do not
106  		 * display error log since this might be a part of sequence to
107  		 * distinguish between SD and MMC.
108  		 */
109  		return -ETIMEDOUT;
110  	}
111  
112  	if (info2 & TMIO_SD_INFO2_ERR_TO) {
113  		dev_err(dev, "timeout error\n");
114  		return -ETIMEDOUT;
115  	}
116  
117  	if (info2 & (TMIO_SD_INFO2_ERR_END | TMIO_SD_INFO2_ERR_CRC |
118  		     TMIO_SD_INFO2_ERR_IDX)) {
119  		if ((cmd->cmdidx != MMC_CMD_SEND_TUNING_BLOCK) &&
120  		    (cmd->cmdidx != MMC_CMD_SEND_TUNING_BLOCK_HS200))
121  			dev_err(dev, "communication out of sync\n");
122  		return -EILSEQ;
123  	}
124  
125  	if (info2 & (TMIO_SD_INFO2_ERR_ILA | TMIO_SD_INFO2_ERR_ILR |
126  		     TMIO_SD_INFO2_ERR_ILW)) {
127  		dev_err(dev, "illegal access\n");
128  		return -EIO;
129  	}
130  
131  	return 0;
132  }
133  
tmio_sd_wait_for_irq(struct udevice * dev,struct mmc_cmd * cmd,unsigned int reg,u32 flag)134  static int tmio_sd_wait_for_irq(struct udevice *dev, struct mmc_cmd *cmd,
135  				unsigned int reg, u32 flag)
136  {
137  	struct tmio_sd_priv *priv = dev_get_priv(dev);
138  	long wait = 1000000;
139  	int ret;
140  
141  	while (!(tmio_sd_readl(priv, reg) & flag)) {
142  		if (wait-- < 0) {
143  			dev_err(dev, "timeout\n");
144  			return -ETIMEDOUT;
145  		}
146  
147  		ret = tmio_sd_check_error(dev, cmd);
148  		if (ret)
149  			return ret;
150  
151  		udelay(1);
152  	}
153  
154  	return 0;
155  }
156  
157  #define tmio_pio_read_fifo(__width, __suffix)				\
158  static void tmio_pio_read_fifo_##__width(struct tmio_sd_priv *priv,	\
159  					  char *pbuf, uint blksz)	\
160  {									\
161  	u##__width *buf = (u##__width *)pbuf;				\
162  	int i;								\
163  									\
164  	if (likely(IS_ALIGNED((uintptr_t)buf, ((__width) / 8)))) {	\
165  		for (i = 0; i < blksz / ((__width) / 8); i++) {		\
166  			*buf++ = tmio_sd_read##__suffix(priv,		\
167  							 TMIO_SD_BUF);	\
168  		}							\
169  	} else {							\
170  		for (i = 0; i < blksz / ((__width) / 8); i++) {		\
171  			u##__width data;				\
172  			data = tmio_sd_read##__suffix(priv,		\
173  						       TMIO_SD_BUF);	\
174  			put_unaligned(data, buf++);			\
175  		}							\
176  	}								\
177  }
178  
179  tmio_pio_read_fifo(64, q)
180  tmio_pio_read_fifo(32, l)
181  tmio_pio_read_fifo(16, w)
182  
tmio_sd_pio_read_one_block(struct udevice * dev,struct mmc_cmd * cmd,char * pbuf,uint blocksize)183  static int tmio_sd_pio_read_one_block(struct udevice *dev, struct mmc_cmd *cmd,
184  				      char *pbuf, uint blocksize)
185  {
186  	struct tmio_sd_priv *priv = dev_get_priv(dev);
187  	int ret;
188  
189  	/* wait until the buffer is filled with data */
190  	ret = tmio_sd_wait_for_irq(dev, cmd, TMIO_SD_INFO2,
191  				   TMIO_SD_INFO2_BRE);
192  	if (ret)
193  		return ret;
194  
195  	/*
196  	 * Clear the status flag _before_ read the buffer out because
197  	 * TMIO_SD_INFO2_BRE is edge-triggered, not level-triggered.
198  	 */
199  	tmio_sd_writel(priv, 0, TMIO_SD_INFO2);
200  
201  	if (priv->caps & TMIO_SD_CAP_64BIT)
202  		tmio_pio_read_fifo_64(priv, pbuf, blocksize);
203  	else if (priv->caps & TMIO_SD_CAP_16BIT)
204  		tmio_pio_read_fifo_16(priv, pbuf, blocksize);
205  	else
206  		tmio_pio_read_fifo_32(priv, pbuf, blocksize);
207  
208  	return 0;
209  }
210  
211  #define tmio_pio_write_fifo(__width, __suffix)				\
212  static void tmio_pio_write_fifo_##__width(struct tmio_sd_priv *priv,	\
213  					   const char *pbuf, uint blksz)\
214  {									\
215  	const u##__width *buf = (const u##__width *)pbuf;		\
216  	int i;								\
217  									\
218  	if (likely(IS_ALIGNED((uintptr_t)buf, ((__width) / 8)))) {	\
219  		for (i = 0; i < blksz / ((__width) / 8); i++) {		\
220  			tmio_sd_write##__suffix(priv, *buf++,		\
221  						 TMIO_SD_BUF);		\
222  		}							\
223  	} else {							\
224  		for (i = 0; i < blksz / ((__width) / 8); i++) {		\
225  			u##__width data = get_unaligned(buf++);		\
226  			tmio_sd_write##__suffix(priv, data,		\
227  						 TMIO_SD_BUF);		\
228  		}							\
229  	}								\
230  }
231  
232  tmio_pio_write_fifo(64, q)
233  tmio_pio_write_fifo(32, l)
234  tmio_pio_write_fifo(16, w)
235  
tmio_sd_pio_write_one_block(struct udevice * dev,struct mmc_cmd * cmd,const char * pbuf,uint blocksize)236  static int tmio_sd_pio_write_one_block(struct udevice *dev, struct mmc_cmd *cmd,
237  					   const char *pbuf, uint blocksize)
238  {
239  	struct tmio_sd_priv *priv = dev_get_priv(dev);
240  	int ret;
241  
242  	/* wait until the buffer becomes empty */
243  	ret = tmio_sd_wait_for_irq(dev, cmd, TMIO_SD_INFO2,
244  				   TMIO_SD_INFO2_BWE);
245  	if (ret)
246  		return ret;
247  
248  	tmio_sd_writel(priv, 0, TMIO_SD_INFO2);
249  
250  	if (priv->caps & TMIO_SD_CAP_64BIT)
251  		tmio_pio_write_fifo_64(priv, pbuf, blocksize);
252  	else if (priv->caps & TMIO_SD_CAP_16BIT)
253  		tmio_pio_write_fifo_16(priv, pbuf, blocksize);
254  	else
255  		tmio_pio_write_fifo_32(priv, pbuf, blocksize);
256  
257  	return 0;
258  }
259  
tmio_sd_pio_xfer(struct udevice * dev,struct mmc_cmd * cmd,struct mmc_data * data)260  static int tmio_sd_pio_xfer(struct udevice *dev, struct mmc_cmd *cmd,
261  			    struct mmc_data *data)
262  {
263  	const char *src = data->src;
264  	char *dest = data->dest;
265  	int i, ret;
266  
267  	for (i = 0; i < data->blocks; i++) {
268  		if (data->flags & MMC_DATA_READ)
269  			ret = tmio_sd_pio_read_one_block(dev, cmd, dest,
270  							     data->blocksize);
271  		else
272  			ret = tmio_sd_pio_write_one_block(dev, cmd, src,
273  							      data->blocksize);
274  		if (ret)
275  			return ret;
276  
277  		if (data->flags & MMC_DATA_READ)
278  			dest += data->blocksize;
279  		else
280  			src += data->blocksize;
281  	}
282  
283  	return 0;
284  }
285  
tmio_sd_dma_start(struct tmio_sd_priv * priv,dma_addr_t dma_addr)286  static void tmio_sd_dma_start(struct tmio_sd_priv *priv,
287  				  dma_addr_t dma_addr)
288  {
289  	u32 tmp;
290  
291  	tmio_sd_writel(priv, 0, TMIO_SD_DMA_INFO1);
292  	tmio_sd_writel(priv, 0, TMIO_SD_DMA_INFO2);
293  
294  	/* enable DMA */
295  	tmp = tmio_sd_readl(priv, TMIO_SD_EXTMODE);
296  	tmp |= TMIO_SD_EXTMODE_DMA_EN;
297  	tmio_sd_writel(priv, tmp, TMIO_SD_EXTMODE);
298  
299  	tmio_sd_writel(priv, dma_addr & U32_MAX, TMIO_SD_DMA_ADDR_L);
300  
301  	/* suppress the warning "right shift count >= width of type" */
302  	dma_addr >>= min_t(int, 32, 8 * sizeof(dma_addr));
303  
304  	tmio_sd_writel(priv, dma_addr & U32_MAX, TMIO_SD_DMA_ADDR_H);
305  
306  	tmio_sd_writel(priv, TMIO_SD_DMA_CTL_START, TMIO_SD_DMA_CTL);
307  }
308  
tmio_sd_dma_wait_for_irq(struct udevice * dev,u32 flag,unsigned int blocks)309  static int tmio_sd_dma_wait_for_irq(struct udevice *dev, u32 flag,
310  					unsigned int blocks)
311  {
312  	struct tmio_sd_priv *priv = dev_get_priv(dev);
313  	long wait = 1000000 + 10 * blocks;
314  
315  	while (!(tmio_sd_readl(priv, TMIO_SD_DMA_INFO1) & flag)) {
316  		if (wait-- < 0) {
317  			dev_err(dev, "timeout during DMA\n");
318  			return -ETIMEDOUT;
319  		}
320  
321  		udelay(10);
322  	}
323  
324  	if (tmio_sd_readl(priv, TMIO_SD_DMA_INFO2)) {
325  		dev_err(dev, "error during DMA\n");
326  		return -EIO;
327  	}
328  
329  	return 0;
330  }
331  
tmio_sd_dma_xfer(struct udevice * dev,struct mmc_data * data)332  static int tmio_sd_dma_xfer(struct udevice *dev, struct mmc_data *data)
333  {
334  	struct tmio_sd_priv *priv = dev_get_priv(dev);
335  	size_t len = data->blocks * data->blocksize;
336  	void *buf;
337  	enum dma_data_direction dir;
338  	dma_addr_t dma_addr;
339  	u32 poll_flag, tmp;
340  	int ret;
341  
342  	tmp = tmio_sd_readl(priv, TMIO_SD_DMA_MODE);
343  
344  	if (data->flags & MMC_DATA_READ) {
345  		buf = data->dest;
346  		dir = DMA_FROM_DEVICE;
347  		/*
348  		 * The DMA READ completion flag position differs on Socionext
349  		 * and Renesas SoCs. It is bit 20 on Socionext SoCs and using
350  		 * bit 17 is a hardware bug and forbidden. It is either bit 17
351  		 * or bit 20 on Renesas SoCs, depending on SoC.
352  		 */
353  		poll_flag = priv->read_poll_flag;
354  		tmp |= TMIO_SD_DMA_MODE_DIR_RD;
355  	} else {
356  		buf = (void *)data->src;
357  		dir = DMA_TO_DEVICE;
358  		poll_flag = TMIO_SD_DMA_INFO1_END_WR;
359  		tmp &= ~TMIO_SD_DMA_MODE_DIR_RD;
360  	}
361  
362  	tmio_sd_writel(priv, tmp, TMIO_SD_DMA_MODE);
363  
364  	dma_addr = __dma_map_single(buf, len, dir);
365  
366  	tmio_sd_dma_start(priv, dma_addr);
367  
368  	ret = tmio_sd_dma_wait_for_irq(dev, poll_flag, data->blocks);
369  
370  	if (poll_flag == TMIO_SD_DMA_INFO1_END_RD)
371  		udelay(1);
372  
373  	__dma_unmap_single(dma_addr, len, dir);
374  
375  	return ret;
376  }
377  
378  /* check if the address is DMA'able */
tmio_sd_addr_is_dmaable(const char * src)379  static bool tmio_sd_addr_is_dmaable(const char *src)
380  {
381  	uintptr_t addr = (uintptr_t)src;
382  
383  	if (!IS_ALIGNED(addr, TMIO_SD_DMA_MINALIGN))
384  		return false;
385  
386  #if defined(CONFIG_RCAR_GEN3)
387  	/* Gen3 DMA has 32bit limit */
388  	if (addr >> 32)
389  		return false;
390  #endif
391  
392  #if defined(CONFIG_ARCH_UNIPHIER) && !defined(CONFIG_ARM64) && \
393  	defined(CONFIG_SPL_BUILD)
394  	/*
395  	 * For UniPhier ARMv7 SoCs, the stack is allocated in the locked ways
396  	 * of L2, which is unreachable from the DMA engine.
397  	 */
398  	if (addr < CONFIG_SPL_STACK)
399  		return false;
400  #endif
401  
402  	return true;
403  }
404  
tmio_sd_send_cmd(struct udevice * dev,struct mmc_cmd * cmd,struct mmc_data * data)405  int tmio_sd_send_cmd(struct udevice *dev, struct mmc_cmd *cmd,
406  		      struct mmc_data *data)
407  {
408  	struct tmio_sd_priv *priv = dev_get_priv(dev);
409  	int ret;
410  	u32 tmp;
411  
412  	if (tmio_sd_readl(priv, TMIO_SD_INFO2) & TMIO_SD_INFO2_CBSY) {
413  		dev_err(dev, "command busy\n");
414  		return -EBUSY;
415  	}
416  
417  	/* clear all status flags */
418  	tmio_sd_writel(priv, 0, TMIO_SD_INFO1);
419  	tmio_sd_writel(priv, 0, TMIO_SD_INFO2);
420  
421  	/* disable DMA once */
422  	tmp = tmio_sd_readl(priv, TMIO_SD_EXTMODE);
423  	tmp &= ~TMIO_SD_EXTMODE_DMA_EN;
424  	tmio_sd_writel(priv, tmp, TMIO_SD_EXTMODE);
425  
426  	tmio_sd_writel(priv, cmd->cmdarg, TMIO_SD_ARG);
427  
428  	tmp = cmd->cmdidx;
429  
430  	if (data) {
431  		tmio_sd_writel(priv, data->blocksize, TMIO_SD_SIZE);
432  		tmio_sd_writel(priv, data->blocks, TMIO_SD_SECCNT);
433  
434  		/* Do not send CMD12 automatically */
435  		tmp |= TMIO_SD_CMD_NOSTOP | TMIO_SD_CMD_DATA;
436  
437  		if (data->blocks > 1)
438  			tmp |= TMIO_SD_CMD_MULTI;
439  
440  		if (data->flags & MMC_DATA_READ)
441  			tmp |= TMIO_SD_CMD_RD;
442  	}
443  
444  	/*
445  	 * Do not use the response type auto-detection on this hardware.
446  	 * CMD8, for example, has different response types on SD and eMMC,
447  	 * while this controller always assumes the response type for SD.
448  	 * Set the response type manually.
449  	 */
450  	switch (cmd->resp_type) {
451  	case MMC_RSP_NONE:
452  		tmp |= TMIO_SD_CMD_RSP_NONE;
453  		break;
454  	case MMC_RSP_R1:
455  		tmp |= TMIO_SD_CMD_RSP_R1;
456  		break;
457  	case MMC_RSP_R1b:
458  		tmp |= TMIO_SD_CMD_RSP_R1B;
459  		break;
460  	case MMC_RSP_R2:
461  		tmp |= TMIO_SD_CMD_RSP_R2;
462  		break;
463  	case MMC_RSP_R3:
464  		tmp |= TMIO_SD_CMD_RSP_R3;
465  		break;
466  	default:
467  		dev_err(dev, "unknown response type\n");
468  		return -EINVAL;
469  	}
470  
471  	dev_dbg(dev, "sending CMD%d (SD_CMD=%08x, SD_ARG=%08x)\n",
472  		cmd->cmdidx, tmp, cmd->cmdarg);
473  	tmio_sd_writel(priv, tmp, TMIO_SD_CMD);
474  
475  	ret = tmio_sd_wait_for_irq(dev, cmd, TMIO_SD_INFO1,
476  				   TMIO_SD_INFO1_RSP);
477  	if (ret)
478  		return ret;
479  
480  	if (cmd->resp_type & MMC_RSP_136) {
481  		u32 rsp_127_104 = tmio_sd_readl(priv, TMIO_SD_RSP76);
482  		u32 rsp_103_72 = tmio_sd_readl(priv, TMIO_SD_RSP54);
483  		u32 rsp_71_40 = tmio_sd_readl(priv, TMIO_SD_RSP32);
484  		u32 rsp_39_8 = tmio_sd_readl(priv, TMIO_SD_RSP10);
485  
486  		cmd->response[0] = ((rsp_127_104 & 0x00ffffff) << 8) |
487  				   ((rsp_103_72  & 0xff000000) >> 24);
488  		cmd->response[1] = ((rsp_103_72  & 0x00ffffff) << 8) |
489  				   ((rsp_71_40   & 0xff000000) >> 24);
490  		cmd->response[2] = ((rsp_71_40   & 0x00ffffff) << 8) |
491  				   ((rsp_39_8    & 0xff000000) >> 24);
492  		cmd->response[3] = (rsp_39_8     & 0xffffff)   << 8;
493  	} else {
494  		/* bit 39-8 */
495  		cmd->response[0] = tmio_sd_readl(priv, TMIO_SD_RSP10);
496  	}
497  
498  	if (data) {
499  		/* use DMA if the HW supports it and the buffer is aligned */
500  		if (priv->caps & TMIO_SD_CAP_DMA_INTERNAL &&
501  		    tmio_sd_addr_is_dmaable(data->src))
502  			ret = tmio_sd_dma_xfer(dev, data);
503  		else
504  			ret = tmio_sd_pio_xfer(dev, cmd, data);
505  		if (ret)
506  			return ret;
507  
508  		ret = tmio_sd_wait_for_irq(dev, cmd, TMIO_SD_INFO1,
509  					   TMIO_SD_INFO1_CMP);
510  		if (ret)
511  			return ret;
512  	}
513  
514  	return tmio_sd_wait_for_irq(dev, cmd, TMIO_SD_INFO2,
515  				    TMIO_SD_INFO2_SCLKDIVEN);
516  }
517  
tmio_sd_set_bus_width(struct tmio_sd_priv * priv,struct mmc * mmc)518  static int tmio_sd_set_bus_width(struct tmio_sd_priv *priv,
519  				     struct mmc *mmc)
520  {
521  	u32 val, tmp;
522  
523  	switch (mmc->bus_width) {
524  	case 0:
525  	case 1:
526  		val = TMIO_SD_OPTION_WIDTH_1;
527  		break;
528  	case 4:
529  		val = TMIO_SD_OPTION_WIDTH_4;
530  		break;
531  	case 8:
532  		val = TMIO_SD_OPTION_WIDTH_8;
533  		break;
534  	default:
535  		return -EINVAL;
536  	}
537  
538  	tmp = tmio_sd_readl(priv, TMIO_SD_OPTION);
539  	tmp &= ~TMIO_SD_OPTION_WIDTH_MASK;
540  	tmp |= val;
541  	tmio_sd_writel(priv, tmp, TMIO_SD_OPTION);
542  
543  	return 0;
544  }
545  
tmio_sd_set_ddr_mode(struct tmio_sd_priv * priv,struct mmc * mmc)546  static void tmio_sd_set_ddr_mode(struct tmio_sd_priv *priv,
547  				     struct mmc *mmc)
548  {
549  	u32 tmp;
550  
551  	tmp = tmio_sd_readl(priv, TMIO_SD_IF_MODE);
552  	if (mmc->ddr_mode)
553  		tmp |= TMIO_SD_IF_MODE_DDR;
554  	else
555  		tmp &= ~TMIO_SD_IF_MODE_DDR;
556  	tmio_sd_writel(priv, tmp, TMIO_SD_IF_MODE);
557  }
558  
tmio_sd_clk_get_rate(struct tmio_sd_priv * priv)559  static ulong tmio_sd_clk_get_rate(struct tmio_sd_priv *priv)
560  {
561  	return priv->clk_get_rate(priv);
562  }
563  
tmio_sd_set_clk_rate(struct tmio_sd_priv * priv,struct mmc * mmc)564  static void tmio_sd_set_clk_rate(struct tmio_sd_priv *priv, struct mmc *mmc)
565  {
566  	unsigned int divisor;
567  	u32 tmp, val = 0;
568  	ulong mclk;
569  
570  	if (mmc->clock) {
571  		mclk = tmio_sd_clk_get_rate(priv);
572  
573  		divisor = DIV_ROUND_UP(mclk, mmc->clock);
574  
575  		/* Do not set divider to 0xff in DDR mode */
576  		if (mmc->ddr_mode && (divisor == 1))
577  			divisor = 2;
578  
579  		if (divisor <= 1)
580  			val = (priv->caps & TMIO_SD_CAP_RCAR) ?
581  			      TMIO_SD_CLKCTL_RCAR_DIV1 : TMIO_SD_CLKCTL_DIV1;
582  		else if (divisor <= 2)
583  			val = TMIO_SD_CLKCTL_DIV2;
584  		else if (divisor <= 4)
585  			val = TMIO_SD_CLKCTL_DIV4;
586  		else if (divisor <= 8)
587  			val = TMIO_SD_CLKCTL_DIV8;
588  		else if (divisor <= 16)
589  			val = TMIO_SD_CLKCTL_DIV16;
590  		else if (divisor <= 32)
591  			val = TMIO_SD_CLKCTL_DIV32;
592  		else if (divisor <= 64)
593  			val = TMIO_SD_CLKCTL_DIV64;
594  		else if (divisor <= 128)
595  			val = TMIO_SD_CLKCTL_DIV128;
596  		else if (divisor <= 256)
597  			val = TMIO_SD_CLKCTL_DIV256;
598  		else if (divisor <= 512 || !(priv->caps & TMIO_SD_CAP_DIV1024))
599  			val = TMIO_SD_CLKCTL_DIV512;
600  		else
601  			val = TMIO_SD_CLKCTL_DIV1024;
602  	}
603  
604  	tmp = tmio_sd_readl(priv, TMIO_SD_CLKCTL);
605  	if (mmc->clock &&
606  	    !((tmp & TMIO_SD_CLKCTL_SCLKEN) &&
607  	      ((tmp & TMIO_SD_CLKCTL_DIV_MASK) == val))) {
608  		/*
609  		 * Stop the clock before changing its rate
610  		 * to avoid a glitch signal
611  		 */
612  		tmp &= ~TMIO_SD_CLKCTL_SCLKEN;
613  		tmio_sd_writel(priv, tmp, TMIO_SD_CLKCTL);
614  
615  		/* Change the clock rate. */
616  		tmp &= ~TMIO_SD_CLKCTL_DIV_MASK;
617  		tmp |= val;
618  	}
619  
620  	/* Enable or Disable the clock */
621  	if (mmc->clk_disable) {
622  		tmp |= TMIO_SD_CLKCTL_OFFEN;
623  		tmp &= ~TMIO_SD_CLKCTL_SCLKEN;
624  	} else {
625  		tmp &= ~TMIO_SD_CLKCTL_OFFEN;
626  		tmp |= TMIO_SD_CLKCTL_SCLKEN;
627  	}
628  
629  	tmio_sd_writel(priv, tmp, TMIO_SD_CLKCTL);
630  
631  	udelay(1000);
632  }
633  
tmio_sd_set_pins(struct udevice * dev)634  static void tmio_sd_set_pins(struct udevice *dev)
635  {
636  	__maybe_unused struct mmc *mmc = mmc_get_mmc_dev(dev);
637  
638  #ifdef CONFIG_DM_REGULATOR
639  	struct tmio_sd_priv *priv = dev_get_priv(dev);
640  
641  	if (priv->vqmmc_dev) {
642  		if (mmc->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
643  			regulator_set_value(priv->vqmmc_dev, 1800000);
644  		else
645  			regulator_set_value(priv->vqmmc_dev, 3300000);
646  		regulator_set_enable(priv->vqmmc_dev, true);
647  	}
648  #endif
649  
650  #ifdef CONFIG_PINCTRL
651  	if (mmc->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
652  		pinctrl_select_state(dev, "state_uhs");
653  	else
654  		pinctrl_select_state(dev, "default");
655  #endif
656  }
657  
tmio_sd_set_ios(struct udevice * dev)658  int tmio_sd_set_ios(struct udevice *dev)
659  {
660  	struct tmio_sd_priv *priv = dev_get_priv(dev);
661  	struct mmc *mmc = mmc_get_mmc_dev(dev);
662  	int ret;
663  
664  	dev_dbg(dev, "clock %uHz, DDRmode %d, width %u\n",
665  		mmc->clock, mmc->ddr_mode, mmc->bus_width);
666  
667  	tmio_sd_set_clk_rate(priv, mmc);
668  	ret = tmio_sd_set_bus_width(priv, mmc);
669  	if (ret)
670  		return ret;
671  	tmio_sd_set_ddr_mode(priv, mmc);
672  	tmio_sd_set_pins(dev);
673  
674  	return 0;
675  }
676  
tmio_sd_get_cd(struct udevice * dev)677  int tmio_sd_get_cd(struct udevice *dev)
678  {
679  	struct tmio_sd_priv *priv = dev_get_priv(dev);
680  
681  	if (priv->caps & TMIO_SD_CAP_NONREMOVABLE)
682  		return 1;
683  
684  	return !!(tmio_sd_readl(priv, TMIO_SD_INFO1) &
685  		  TMIO_SD_INFO1_CD);
686  }
687  
tmio_sd_host_init(struct tmio_sd_priv * priv)688  static void tmio_sd_host_init(struct tmio_sd_priv *priv)
689  {
690  	u32 tmp;
691  
692  	/* soft reset of the host */
693  	tmp = tmio_sd_readl(priv, TMIO_SD_SOFT_RST);
694  	tmp &= ~TMIO_SD_SOFT_RST_RSTX;
695  	tmio_sd_writel(priv, tmp, TMIO_SD_SOFT_RST);
696  	tmp |= TMIO_SD_SOFT_RST_RSTX;
697  	tmio_sd_writel(priv, tmp, TMIO_SD_SOFT_RST);
698  
699  	/* FIXME: implement eMMC hw_reset */
700  
701  	tmio_sd_writel(priv, TMIO_SD_STOP_SEC, TMIO_SD_STOP);
702  
703  	/*
704  	 * Connected to 32bit AXI.
705  	 * This register dropped backward compatibility at version 0x10.
706  	 * Write an appropriate value depending on the IP version.
707  	 */
708  	if (priv->version >= 0x10) {
709  		if (priv->caps & TMIO_SD_CAP_64BIT)
710  			tmio_sd_writel(priv, 0x000, TMIO_SD_HOST_MODE);
711  		else
712  			tmio_sd_writel(priv, 0x101, TMIO_SD_HOST_MODE);
713  	} else {
714  		tmio_sd_writel(priv, 0x0, TMIO_SD_HOST_MODE);
715  	}
716  
717  	if (priv->caps & TMIO_SD_CAP_DMA_INTERNAL) {
718  		tmp = tmio_sd_readl(priv, TMIO_SD_DMA_MODE);
719  		tmp |= TMIO_SD_DMA_MODE_ADDR_INC;
720  		tmio_sd_writel(priv, tmp, TMIO_SD_DMA_MODE);
721  	}
722  }
723  
tmio_sd_bind(struct udevice * dev)724  int tmio_sd_bind(struct udevice *dev)
725  {
726  	struct tmio_sd_plat *plat = dev_get_platdata(dev);
727  
728  	return mmc_bind(dev, &plat->mmc, &plat->cfg);
729  }
730  
tmio_sd_probe(struct udevice * dev,u32 quirks)731  int tmio_sd_probe(struct udevice *dev, u32 quirks)
732  {
733  	struct tmio_sd_plat *plat = dev_get_platdata(dev);
734  	struct tmio_sd_priv *priv = dev_get_priv(dev);
735  	struct mmc_uclass_priv *upriv = dev_get_uclass_priv(dev);
736  	fdt_addr_t base;
737  	ulong mclk;
738  	int ret;
739  
740  	base = devfdt_get_addr(dev);
741  	if (base == FDT_ADDR_T_NONE)
742  		return -EINVAL;
743  
744  	priv->regbase = devm_ioremap(dev, base, SZ_2K);
745  	if (!priv->regbase)
746  		return -ENOMEM;
747  
748  #ifdef CONFIG_DM_REGULATOR
749  	device_get_supply_regulator(dev, "vqmmc-supply", &priv->vqmmc_dev);
750  	if (priv->vqmmc_dev)
751  		regulator_set_value(priv->vqmmc_dev, 3300000);
752  #endif
753  
754  	ret = mmc_of_parse(dev, &plat->cfg);
755  	if (ret < 0) {
756  		dev_err(dev, "failed to parse host caps\n");
757  		return ret;
758  	}
759  
760  	plat->cfg.name = dev->name;
761  	plat->cfg.host_caps |= MMC_MODE_HS_52MHz | MMC_MODE_HS;
762  
763  	if (quirks)
764  		priv->caps = quirks;
765  
766  	priv->version = tmio_sd_readl(priv, TMIO_SD_VERSION) &
767  						TMIO_SD_VERSION_IP;
768  	dev_dbg(dev, "version %x\n", priv->version);
769  	if (priv->version >= 0x10) {
770  		priv->caps |= TMIO_SD_CAP_DMA_INTERNAL;
771  		priv->caps |= TMIO_SD_CAP_DIV1024;
772  	}
773  
774  	if (fdt_get_property(gd->fdt_blob, dev_of_offset(dev), "non-removable",
775  			     NULL))
776  		priv->caps |= TMIO_SD_CAP_NONREMOVABLE;
777  
778  	tmio_sd_host_init(priv);
779  
780  	mclk = tmio_sd_clk_get_rate(priv);
781  
782  	plat->cfg.voltages = MMC_VDD_165_195 | MMC_VDD_32_33 | MMC_VDD_33_34;
783  	plat->cfg.f_min = mclk /
784  			(priv->caps & TMIO_SD_CAP_DIV1024 ? 1024 : 512);
785  	plat->cfg.f_max = mclk;
786  	if (quirks & TMIO_SD_CAP_16BIT)
787  		plat->cfg.b_max = U16_MAX; /* max value of TMIO_SD_SECCNT */
788  	else
789  		plat->cfg.b_max = U32_MAX; /* max value of TMIO_SD_SECCNT */
790  
791  	upriv->mmc = &plat->mmc;
792  
793  	return 0;
794  }
795