1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * (C) Copyright 2012 SAMSUNG Electronics
4 * Jaehoon Chung <jh80.chung@samsung.com>
5 * Rajeshawari Shinde <rajeshwari.s@samsung.com>
6 */
7
8 #include <bouncebuf.h>
9 #include <common.h>
10 #include <errno.h>
11 #include <malloc.h>
12 #include <memalign.h>
13 #include <mmc.h>
14 #include <dwmmc.h>
15 #include <wait_bit.h>
16
17 #define PAGE_SIZE 4096
18
dwmci_wait_reset(struct dwmci_host * host,u32 value)19 static int dwmci_wait_reset(struct dwmci_host *host, u32 value)
20 {
21 unsigned long timeout = 1000;
22 u32 ctrl;
23
24 dwmci_writel(host, DWMCI_CTRL, value);
25
26 while (timeout--) {
27 ctrl = dwmci_readl(host, DWMCI_CTRL);
28 if (!(ctrl & DWMCI_RESET_ALL))
29 return 1;
30 }
31 return 0;
32 }
33
dwmci_set_idma_desc(struct dwmci_idmac * idmac,u32 desc0,u32 desc1,u32 desc2)34 static void dwmci_set_idma_desc(struct dwmci_idmac *idmac,
35 u32 desc0, u32 desc1, u32 desc2)
36 {
37 struct dwmci_idmac *desc = idmac;
38
39 desc->flags = desc0;
40 desc->cnt = desc1;
41 desc->addr = desc2;
42 desc->next_addr = (ulong)desc + sizeof(struct dwmci_idmac);
43 }
44
dwmci_prepare_data(struct dwmci_host * host,struct mmc_data * data,struct dwmci_idmac * cur_idmac,void * bounce_buffer)45 static void dwmci_prepare_data(struct dwmci_host *host,
46 struct mmc_data *data,
47 struct dwmci_idmac *cur_idmac,
48 void *bounce_buffer)
49 {
50 unsigned long ctrl;
51 unsigned int i = 0, flags, cnt, blk_cnt;
52 ulong data_start, data_end;
53
54
55 blk_cnt = data->blocks;
56
57 dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
58
59 /* Clear IDMAC interrupt */
60 dwmci_writel(host, DWMCI_IDSTS, 0xFFFFFFFF);
61
62 data_start = (ulong)cur_idmac;
63 dwmci_writel(host, DWMCI_DBADDR, (ulong)cur_idmac);
64
65 do {
66 flags = DWMCI_IDMAC_OWN | DWMCI_IDMAC_CH ;
67 flags |= (i == 0) ? DWMCI_IDMAC_FS : 0;
68 if (blk_cnt <= 8) {
69 flags |= DWMCI_IDMAC_LD;
70 cnt = data->blocksize * blk_cnt;
71 } else
72 cnt = data->blocksize * 8;
73
74 dwmci_set_idma_desc(cur_idmac, flags, cnt,
75 (ulong)bounce_buffer + (i * PAGE_SIZE));
76
77 if (blk_cnt <= 8)
78 break;
79 blk_cnt -= 8;
80 cur_idmac++;
81 i++;
82 } while(1);
83
84 data_end = (ulong)cur_idmac;
85 flush_dcache_range(data_start, data_end + ARCH_DMA_MINALIGN);
86
87 ctrl = dwmci_readl(host, DWMCI_CTRL);
88 ctrl |= DWMCI_IDMAC_EN | DWMCI_DMA_EN;
89 dwmci_writel(host, DWMCI_CTRL, ctrl);
90
91 ctrl = dwmci_readl(host, DWMCI_BMOD);
92 ctrl |= DWMCI_BMOD_IDMAC_FB | DWMCI_BMOD_IDMAC_EN;
93 dwmci_writel(host, DWMCI_BMOD, ctrl);
94
95 dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
96 dwmci_writel(host, DWMCI_BYTCNT, data->blocksize * data->blocks);
97 }
98
dwmci_fifo_ready(struct dwmci_host * host,u32 bit,u32 * len)99 static int dwmci_fifo_ready(struct dwmci_host *host, u32 bit, u32 *len)
100 {
101 u32 timeout = 20000;
102
103 *len = dwmci_readl(host, DWMCI_STATUS);
104 while (--timeout && (*len & bit)) {
105 udelay(200);
106 *len = dwmci_readl(host, DWMCI_STATUS);
107 }
108
109 if (!timeout) {
110 debug("%s: FIFO underflow timeout\n", __func__);
111 return -ETIMEDOUT;
112 }
113
114 return 0;
115 }
116
dwmci_data_transfer(struct dwmci_host * host,struct mmc_data * data)117 static int dwmci_data_transfer(struct dwmci_host *host, struct mmc_data *data)
118 {
119 int ret = 0;
120 u32 timeout = 240000;
121 u32 mask, size, i, len = 0;
122 u32 *buf = NULL;
123 ulong start = get_timer(0);
124 u32 fifo_depth = (((host->fifoth_val & RX_WMARK_MASK) >>
125 RX_WMARK_SHIFT) + 1) * 2;
126
127 size = data->blocksize * data->blocks / 4;
128 if (data->flags == MMC_DATA_READ)
129 buf = (unsigned int *)data->dest;
130 else
131 buf = (unsigned int *)data->src;
132
133 for (;;) {
134 mask = dwmci_readl(host, DWMCI_RINTSTS);
135 /* Error during data transfer. */
136 if (mask & (DWMCI_DATA_ERR | DWMCI_DATA_TOUT)) {
137 debug("%s: DATA ERROR!\n", __func__);
138 ret = -EINVAL;
139 break;
140 }
141
142 if (host->fifo_mode && size) {
143 len = 0;
144 if (data->flags == MMC_DATA_READ &&
145 (mask & DWMCI_INTMSK_RXDR)) {
146 while (size) {
147 ret = dwmci_fifo_ready(host,
148 DWMCI_FIFO_EMPTY,
149 &len);
150 if (ret < 0)
151 break;
152
153 len = (len >> DWMCI_FIFO_SHIFT) &
154 DWMCI_FIFO_MASK;
155 len = min(size, len);
156 for (i = 0; i < len; i++)
157 *buf++ =
158 dwmci_readl(host, DWMCI_DATA);
159 size = size > len ? (size - len) : 0;
160 }
161 dwmci_writel(host, DWMCI_RINTSTS,
162 DWMCI_INTMSK_RXDR);
163 } else if (data->flags == MMC_DATA_WRITE &&
164 (mask & DWMCI_INTMSK_TXDR)) {
165 while (size) {
166 ret = dwmci_fifo_ready(host,
167 DWMCI_FIFO_FULL,
168 &len);
169 if (ret < 0)
170 break;
171
172 len = fifo_depth - ((len >>
173 DWMCI_FIFO_SHIFT) &
174 DWMCI_FIFO_MASK);
175 len = min(size, len);
176 for (i = 0; i < len; i++)
177 dwmci_writel(host, DWMCI_DATA,
178 *buf++);
179 size = size > len ? (size - len) : 0;
180 }
181 dwmci_writel(host, DWMCI_RINTSTS,
182 DWMCI_INTMSK_TXDR);
183 }
184 }
185
186 /* Data arrived correctly. */
187 if (mask & DWMCI_INTMSK_DTO) {
188 ret = 0;
189 break;
190 }
191
192 /* Check for timeout. */
193 if (get_timer(start) > timeout) {
194 debug("%s: Timeout waiting for data!\n",
195 __func__);
196 ret = -ETIMEDOUT;
197 break;
198 }
199 }
200
201 dwmci_writel(host, DWMCI_RINTSTS, mask);
202
203 return ret;
204 }
205
dwmci_set_transfer_mode(struct dwmci_host * host,struct mmc_data * data)206 static int dwmci_set_transfer_mode(struct dwmci_host *host,
207 struct mmc_data *data)
208 {
209 unsigned long mode;
210
211 mode = DWMCI_CMD_DATA_EXP;
212 if (data->flags & MMC_DATA_WRITE)
213 mode |= DWMCI_CMD_RW;
214
215 return mode;
216 }
217
218 #ifdef CONFIG_DM_MMC
dwmci_send_cmd(struct udevice * dev,struct mmc_cmd * cmd,struct mmc_data * data)219 static int dwmci_send_cmd(struct udevice *dev, struct mmc_cmd *cmd,
220 struct mmc_data *data)
221 {
222 struct mmc *mmc = mmc_get_mmc_dev(dev);
223 #else
224 static int dwmci_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd,
225 struct mmc_data *data)
226 {
227 #endif
228 struct dwmci_host *host = mmc->priv;
229 ALLOC_CACHE_ALIGN_BUFFER(struct dwmci_idmac, cur_idmac,
230 data ? DIV_ROUND_UP(data->blocks, 8) : 0);
231 int ret = 0, flags = 0, i;
232 unsigned int timeout = 500;
233 u32 retry = 100000;
234 u32 mask, ctrl;
235 ulong start = get_timer(0);
236 struct bounce_buffer bbstate;
237
238 while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) {
239 if (get_timer(start) > timeout) {
240 debug("%s: Timeout on data busy\n", __func__);
241 return -ETIMEDOUT;
242 }
243 }
244
245 dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
246
247 if (data) {
248 if (host->fifo_mode) {
249 dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
250 dwmci_writel(host, DWMCI_BYTCNT,
251 data->blocksize * data->blocks);
252 dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
253 } else {
254 if (data->flags == MMC_DATA_READ) {
255 bounce_buffer_start(&bbstate, (void*)data->dest,
256 data->blocksize *
257 data->blocks, GEN_BB_WRITE);
258 } else {
259 bounce_buffer_start(&bbstate, (void*)data->src,
260 data->blocksize *
261 data->blocks, GEN_BB_READ);
262 }
263 dwmci_prepare_data(host, data, cur_idmac,
264 bbstate.bounce_buffer);
265 }
266 }
267
268 dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
269
270 if (data)
271 flags = dwmci_set_transfer_mode(host, data);
272
273 if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
274 return -1;
275
276 if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
277 flags |= DWMCI_CMD_ABORT_STOP;
278 else
279 flags |= DWMCI_CMD_PRV_DAT_WAIT;
280
281 if (cmd->resp_type & MMC_RSP_PRESENT) {
282 flags |= DWMCI_CMD_RESP_EXP;
283 if (cmd->resp_type & MMC_RSP_136)
284 flags |= DWMCI_CMD_RESP_LENGTH;
285 }
286
287 if (cmd->resp_type & MMC_RSP_CRC)
288 flags |= DWMCI_CMD_CHECK_CRC;
289
290 flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);
291
292 debug("Sending CMD%d\n",cmd->cmdidx);
293
294 dwmci_writel(host, DWMCI_CMD, flags);
295
296 for (i = 0; i < retry; i++) {
297 mask = dwmci_readl(host, DWMCI_RINTSTS);
298 if (mask & DWMCI_INTMSK_CDONE) {
299 if (!data)
300 dwmci_writel(host, DWMCI_RINTSTS, mask);
301 break;
302 }
303 }
304
305 if (i == retry) {
306 debug("%s: Timeout.\n", __func__);
307 return -ETIMEDOUT;
308 }
309
310 if (mask & DWMCI_INTMSK_RTO) {
311 /*
312 * Timeout here is not necessarily fatal. (e)MMC cards
313 * will splat here when they receive CMD55 as they do
314 * not support this command and that is exactly the way
315 * to tell them apart from SD cards. Thus, this output
316 * below shall be debug(). eMMC cards also do not favor
317 * CMD8, please keep that in mind.
318 */
319 debug("%s: Response Timeout.\n", __func__);
320 return -ETIMEDOUT;
321 } else if (mask & DWMCI_INTMSK_RE) {
322 debug("%s: Response Error.\n", __func__);
323 return -EIO;
324 } else if ((cmd->resp_type & MMC_RSP_CRC) &&
325 (mask & DWMCI_INTMSK_RCRC)) {
326 debug("%s: Response CRC Error.\n", __func__);
327 return -EIO;
328 }
329
330
331 if (cmd->resp_type & MMC_RSP_PRESENT) {
332 if (cmd->resp_type & MMC_RSP_136) {
333 cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
334 cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
335 cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
336 cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
337 } else {
338 cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
339 }
340 }
341
342 if (data) {
343 ret = dwmci_data_transfer(host, data);
344
345 /* only dma mode need it */
346 if (!host->fifo_mode) {
347 if (data->flags == MMC_DATA_READ)
348 mask = DWMCI_IDINTEN_RI;
349 else
350 mask = DWMCI_IDINTEN_TI;
351 ret = wait_for_bit_le32(host->ioaddr + DWMCI_IDSTS,
352 mask, true, 1000, false);
353 if (ret)
354 debug("%s: DWMCI_IDINTEN mask 0x%x timeout.\n",
355 __func__, mask);
356 /* clear interrupts */
357 dwmci_writel(host, DWMCI_IDSTS, DWMCI_IDINTEN_MASK);
358
359 ctrl = dwmci_readl(host, DWMCI_CTRL);
360 ctrl &= ~(DWMCI_DMA_EN);
361 dwmci_writel(host, DWMCI_CTRL, ctrl);
362 bounce_buffer_stop(&bbstate);
363 }
364 }
365
366 udelay(100);
367
368 return ret;
369 }
370
371 static int dwmci_setup_bus(struct dwmci_host *host, u32 freq)
372 {
373 u32 div, status;
374 int timeout = 10000;
375 unsigned long sclk;
376
377 if ((freq == host->clock) || (freq == 0))
378 return 0;
379 /*
380 * If host->get_mmc_clk isn't defined,
381 * then assume that host->bus_hz is source clock value.
382 * host->bus_hz should be set by user.
383 */
384 if (host->get_mmc_clk)
385 sclk = host->get_mmc_clk(host, freq);
386 else if (host->bus_hz)
387 sclk = host->bus_hz;
388 else {
389 debug("%s: Didn't get source clock value.\n", __func__);
390 return -EINVAL;
391 }
392
393 if (sclk == freq)
394 div = 0; /* bypass mode */
395 else
396 div = DIV_ROUND_UP(sclk, 2 * freq);
397
398 dwmci_writel(host, DWMCI_CLKENA, 0);
399 dwmci_writel(host, DWMCI_CLKSRC, 0);
400
401 dwmci_writel(host, DWMCI_CLKDIV, div);
402 dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
403 DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
404
405 do {
406 status = dwmci_readl(host, DWMCI_CMD);
407 if (timeout-- < 0) {
408 debug("%s: Timeout!\n", __func__);
409 return -ETIMEDOUT;
410 }
411 } while (status & DWMCI_CMD_START);
412
413 dwmci_writel(host, DWMCI_CLKENA, DWMCI_CLKEN_ENABLE |
414 DWMCI_CLKEN_LOW_PWR);
415
416 dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
417 DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
418
419 timeout = 10000;
420 do {
421 status = dwmci_readl(host, DWMCI_CMD);
422 if (timeout-- < 0) {
423 debug("%s: Timeout!\n", __func__);
424 return -ETIMEDOUT;
425 }
426 } while (status & DWMCI_CMD_START);
427
428 host->clock = freq;
429
430 return 0;
431 }
432
433 #ifdef CONFIG_DM_MMC
434 static int dwmci_set_ios(struct udevice *dev)
435 {
436 struct mmc *mmc = mmc_get_mmc_dev(dev);
437 #else
438 static int dwmci_set_ios(struct mmc *mmc)
439 {
440 #endif
441 struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
442 u32 ctype, regs;
443
444 debug("Buswidth = %d, clock: %d\n", mmc->bus_width, mmc->clock);
445
446 dwmci_setup_bus(host, mmc->clock);
447 switch (mmc->bus_width) {
448 case 8:
449 ctype = DWMCI_CTYPE_8BIT;
450 break;
451 case 4:
452 ctype = DWMCI_CTYPE_4BIT;
453 break;
454 default:
455 ctype = DWMCI_CTYPE_1BIT;
456 break;
457 }
458
459 dwmci_writel(host, DWMCI_CTYPE, ctype);
460
461 regs = dwmci_readl(host, DWMCI_UHS_REG);
462 if (mmc->ddr_mode)
463 regs |= DWMCI_DDR_MODE;
464 else
465 regs &= ~DWMCI_DDR_MODE;
466
467 dwmci_writel(host, DWMCI_UHS_REG, regs);
468
469 if (host->clksel)
470 host->clksel(host);
471
472 return 0;
473 }
474
475 static int dwmci_init(struct mmc *mmc)
476 {
477 struct dwmci_host *host = mmc->priv;
478
479 if (host->board_init)
480 host->board_init(host);
481
482 dwmci_writel(host, DWMCI_PWREN, 1);
483
484 if (!dwmci_wait_reset(host, DWMCI_RESET_ALL)) {
485 debug("%s[%d] Fail-reset!!\n", __func__, __LINE__);
486 return -EIO;
487 }
488
489 /* Enumerate at 400KHz */
490 dwmci_setup_bus(host, mmc->cfg->f_min);
491
492 dwmci_writel(host, DWMCI_RINTSTS, 0xFFFFFFFF);
493 dwmci_writel(host, DWMCI_INTMASK, 0);
494
495 dwmci_writel(host, DWMCI_TMOUT, 0xFFFFFFFF);
496
497 dwmci_writel(host, DWMCI_IDINTEN, 0);
498 dwmci_writel(host, DWMCI_BMOD, 1);
499
500 if (!host->fifoth_val) {
501 uint32_t fifo_size;
502
503 fifo_size = dwmci_readl(host, DWMCI_FIFOTH);
504 fifo_size = ((fifo_size & RX_WMARK_MASK) >> RX_WMARK_SHIFT) + 1;
505 host->fifoth_val = MSIZE(0x2) | RX_WMARK(fifo_size / 2 - 1) |
506 TX_WMARK(fifo_size / 2);
507 }
508 dwmci_writel(host, DWMCI_FIFOTH, host->fifoth_val);
509
510 dwmci_writel(host, DWMCI_CLKENA, 0);
511 dwmci_writel(host, DWMCI_CLKSRC, 0);
512
513 if (!host->fifo_mode)
514 dwmci_writel(host, DWMCI_IDINTEN, DWMCI_IDINTEN_MASK);
515
516 return 0;
517 }
518
519 #ifdef CONFIG_DM_MMC
520 int dwmci_probe(struct udevice *dev)
521 {
522 struct mmc *mmc = mmc_get_mmc_dev(dev);
523
524 return dwmci_init(mmc);
525 }
526
527 const struct dm_mmc_ops dm_dwmci_ops = {
528 .send_cmd = dwmci_send_cmd,
529 .set_ios = dwmci_set_ios,
530 };
531
532 #else
533 static const struct mmc_ops dwmci_ops = {
534 .send_cmd = dwmci_send_cmd,
535 .set_ios = dwmci_set_ios,
536 .init = dwmci_init,
537 };
538 #endif
539
540 void dwmci_setup_cfg(struct mmc_config *cfg, struct dwmci_host *host,
541 u32 max_clk, u32 min_clk)
542 {
543 cfg->name = host->name;
544 #ifndef CONFIG_DM_MMC
545 cfg->ops = &dwmci_ops;
546 #endif
547 cfg->f_min = min_clk;
548 cfg->f_max = max_clk;
549
550 cfg->voltages = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
551
552 cfg->host_caps = host->caps;
553
554 if (host->buswidth == 8) {
555 cfg->host_caps |= MMC_MODE_8BIT;
556 cfg->host_caps &= ~MMC_MODE_4BIT;
557 } else {
558 cfg->host_caps |= MMC_MODE_4BIT;
559 cfg->host_caps &= ~MMC_MODE_8BIT;
560 }
561 cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz;
562
563 cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
564 }
565
566 #ifdef CONFIG_BLK
567 int dwmci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
568 {
569 return mmc_bind(dev, mmc, cfg);
570 }
571 #else
572 int add_dwmci(struct dwmci_host *host, u32 max_clk, u32 min_clk)
573 {
574 dwmci_setup_cfg(&host->cfg, host, max_clk, min_clk);
575
576 host->mmc = mmc_create(&host->cfg, host);
577 if (host->mmc == NULL)
578 return -1;
579
580 return 0;
581 }
582 #endif
583