xref: /openbmc/linux/drivers/mmc/host/alcor.c (revision 367e5927)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2018 Oleksij Rempel <linux@rempel-privat.de>
4  *
5  * Driver for Alcor Micro AU6601 and AU6621 controllers
6  */
7 
8 /* Note: this driver was created without any documentation. Based
9  * on sniffing, testing and in some cases mimic of original driver.
10  * As soon as some one with documentation or more experience in SD/MMC, or
11  * reverse engineering then me, please review this driver and question every
12  * thing what I did. 2018 Oleksij Rempel <linux@rempel-privat.de>
13  */
14 
15 #include <linux/delay.h>
16 #include <linux/pci.h>
17 #include <linux/module.h>
18 #include <linux/io.h>
19 #include <linux/pm.h>
20 #include <linux/irq.h>
21 #include <linux/interrupt.h>
22 #include <linux/platform_device.h>
23 
24 #include <linux/mmc/host.h>
25 #include <linux/mmc/mmc.h>
26 
27 #include <linux/alcor_pci.h>
28 
29 enum alcor_cookie {
30 	COOKIE_UNMAPPED,
31 	COOKIE_PRE_MAPPED,
32 	COOKIE_MAPPED,
33 };
34 
35 struct alcor_pll_conf {
36 	unsigned int clk_src_freq;
37 	unsigned int clk_src_reg;
38 	unsigned int min_div;
39 	unsigned int max_div;
40 };
41 
42 struct alcor_sdmmc_host {
43 	struct  device *dev;
44 	struct alcor_pci_priv *alcor_pci;
45 
46 	struct mmc_host *mmc;
47 	struct mmc_request *mrq;
48 	struct mmc_command *cmd;
49 	struct mmc_data *data;
50 	unsigned int dma_on:1;
51 	unsigned int early_data:1;
52 
53 	struct mutex cmd_mutex;
54 
55 	struct delayed_work timeout_work;
56 
57 	struct sg_mapping_iter sg_miter;	/* SG state for PIO */
58 	struct scatterlist *sg;
59 	unsigned int blocks;		/* remaining PIO blocks */
60 	int sg_count;
61 
62 	u32			irq_status_sd;
63 	unsigned char		cur_power_mode;
64 };
65 
66 static const struct alcor_pll_conf alcor_pll_cfg[] = {
67 	/* MHZ,		CLK src,		max div, min div */
68 	{ 31250000,	AU6601_CLK_31_25_MHZ,	1,	511},
69 	{ 48000000,	AU6601_CLK_48_MHZ,	1,	511},
70 	{125000000,	AU6601_CLK_125_MHZ,	1,	511},
71 	{384000000,	AU6601_CLK_384_MHZ,	1,	511},
72 };
73 
74 static inline void alcor_rmw8(struct alcor_sdmmc_host *host, unsigned int addr,
75 			       u8 clear, u8 set)
76 {
77 	struct alcor_pci_priv *priv = host->alcor_pci;
78 	u32 var;
79 
80 	var = alcor_read8(priv, addr);
81 	var &= ~clear;
82 	var |= set;
83 	alcor_write8(priv, var, addr);
84 }
85 
86 /* As soon as irqs are masked, some status updates may be missed.
87  * Use this with care.
88  */
89 static inline void alcor_mask_sd_irqs(struct alcor_sdmmc_host *host)
90 {
91 	struct alcor_pci_priv *priv = host->alcor_pci;
92 
93 	alcor_write32(priv, 0, AU6601_REG_INT_ENABLE);
94 }
95 
96 static inline void alcor_unmask_sd_irqs(struct alcor_sdmmc_host *host)
97 {
98 	struct alcor_pci_priv *priv = host->alcor_pci;
99 
100 	alcor_write32(priv, AU6601_INT_CMD_MASK | AU6601_INT_DATA_MASK |
101 		  AU6601_INT_CARD_INSERT | AU6601_INT_CARD_REMOVE |
102 		  AU6601_INT_OVER_CURRENT_ERR,
103 		  AU6601_REG_INT_ENABLE);
104 }
105 
106 static void alcor_reset(struct alcor_sdmmc_host *host, u8 val)
107 {
108 	struct alcor_pci_priv *priv = host->alcor_pci;
109 	int i;
110 
111 	alcor_write8(priv, val | AU6601_BUF_CTRL_RESET,
112 		      AU6601_REG_SW_RESET);
113 	for (i = 0; i < 100; i++) {
114 		if (!(alcor_read8(priv, AU6601_REG_SW_RESET) & val))
115 			return;
116 		udelay(50);
117 	}
118 	dev_err(host->dev, "%s: timeout\n", __func__);
119 }
120 
121 static void alcor_data_set_dma(struct alcor_sdmmc_host *host)
122 {
123 	struct alcor_pci_priv *priv = host->alcor_pci;
124 	u32 addr;
125 
126 	if (!host->sg_count)
127 		return;
128 
129 	if (!host->sg) {
130 		dev_err(host->dev, "have blocks, but no SG\n");
131 		return;
132 	}
133 
134 	if (!sg_dma_len(host->sg)) {
135 		dev_err(host->dev, "DMA SG len == 0\n");
136 		return;
137 	}
138 
139 
140 	addr = (u32)sg_dma_address(host->sg);
141 
142 	alcor_write32(priv, addr, AU6601_REG_SDMA_ADDR);
143 	host->sg = sg_next(host->sg);
144 	host->sg_count--;
145 }
146 
147 static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host,
148 					bool early)
149 {
150 	struct alcor_pci_priv *priv = host->alcor_pci;
151 	struct mmc_data *data = host->data;
152 	u8 ctrl = 0;
153 
154 	if (data->flags & MMC_DATA_WRITE)
155 		ctrl |= AU6601_DATA_WRITE;
156 
157 	if (data->host_cookie == COOKIE_MAPPED) {
158 		if (host->early_data) {
159 			host->early_data = false;
160 			return;
161 		}
162 
163 		host->early_data = early;
164 
165 		alcor_data_set_dma(host);
166 		ctrl |= AU6601_DATA_DMA_MODE;
167 		host->dma_on = 1;
168 		alcor_write32(priv, data->sg_count * 0x1000,
169 			       AU6601_REG_BLOCK_SIZE);
170 	} else {
171 		alcor_write32(priv, data->blksz, AU6601_REG_BLOCK_SIZE);
172 	}
173 
174 	alcor_write8(priv, ctrl | AU6601_DATA_START_XFER,
175 		      AU6601_DATA_XFER_CTRL);
176 }
177 
178 static void alcor_trf_block_pio(struct alcor_sdmmc_host *host, bool read)
179 {
180 	struct alcor_pci_priv *priv = host->alcor_pci;
181 	size_t blksize, len;
182 	u8 *buf;
183 
184 	if (!host->blocks)
185 		return;
186 
187 	if (host->dma_on) {
188 		dev_err(host->dev, "configured DMA but got PIO request.\n");
189 		return;
190 	}
191 
192 	if (!!(host->data->flags & MMC_DATA_READ) != read) {
193 		dev_err(host->dev, "got unexpected direction %i != %i\n",
194 			!!(host->data->flags & MMC_DATA_READ), read);
195 	}
196 
197 	if (!sg_miter_next(&host->sg_miter))
198 		return;
199 
200 	blksize = host->data->blksz;
201 	len = min(host->sg_miter.length, blksize);
202 
203 	dev_dbg(host->dev, "PIO, %s block size: 0x%zx\n",
204 		read ? "read" : "write", blksize);
205 
206 	host->sg_miter.consumed = len;
207 	host->blocks--;
208 
209 	buf = host->sg_miter.addr;
210 
211 	if (read)
212 		ioread32_rep(priv->iobase + AU6601_REG_BUFFER, buf, len >> 2);
213 	else
214 		iowrite32_rep(priv->iobase + AU6601_REG_BUFFER, buf, len >> 2);
215 
216 	sg_miter_stop(&host->sg_miter);
217 }
218 
219 static void alcor_prepare_sg_miter(struct alcor_sdmmc_host *host)
220 {
221 	unsigned int flags = SG_MITER_ATOMIC;
222 	struct mmc_data *data = host->data;
223 
224 	if (data->flags & MMC_DATA_READ)
225 		flags |= SG_MITER_TO_SG;
226 	else
227 		flags |= SG_MITER_FROM_SG;
228 	sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
229 }
230 
231 static void alcor_prepare_data(struct alcor_sdmmc_host *host,
232 			       struct mmc_command *cmd)
233 {
234 	struct mmc_data *data = cmd->data;
235 
236 	if (!data)
237 		return;
238 
239 
240 	host->data = data;
241 	host->data->bytes_xfered = 0;
242 	host->blocks = data->blocks;
243 	host->sg = data->sg;
244 	host->sg_count = data->sg_count;
245 	dev_dbg(host->dev, "prepare DATA: sg %i, blocks: %i\n",
246 			host->sg_count, host->blocks);
247 
248 	if (data->host_cookie != COOKIE_MAPPED)
249 		alcor_prepare_sg_miter(host);
250 
251 	alcor_trigger_data_transfer(host, true);
252 }
253 
254 static void alcor_send_cmd(struct alcor_sdmmc_host *host,
255 			   struct mmc_command *cmd, bool set_timeout)
256 {
257 	struct alcor_pci_priv *priv = host->alcor_pci;
258 	unsigned long timeout = 0;
259 	u8 ctrl = 0;
260 
261 	host->cmd = cmd;
262 	alcor_prepare_data(host, cmd);
263 
264 	dev_dbg(host->dev, "send CMD. opcode: 0x%02x, arg; 0x%08x\n",
265 		cmd->opcode, cmd->arg);
266 	alcor_write8(priv, cmd->opcode | 0x40, AU6601_REG_CMD_OPCODE);
267 	alcor_write32be(priv, cmd->arg, AU6601_REG_CMD_ARG);
268 
269 	switch (mmc_resp_type(cmd)) {
270 	case MMC_RSP_NONE:
271 		ctrl = AU6601_CMD_NO_RESP;
272 		break;
273 	case MMC_RSP_R1:
274 		ctrl = AU6601_CMD_6_BYTE_CRC;
275 		break;
276 	case MMC_RSP_R1B:
277 		ctrl = AU6601_CMD_6_BYTE_CRC | AU6601_CMD_STOP_WAIT_RDY;
278 		break;
279 	case MMC_RSP_R2:
280 		ctrl = AU6601_CMD_17_BYTE_CRC;
281 		break;
282 	case MMC_RSP_R3:
283 		ctrl = AU6601_CMD_6_BYTE_WO_CRC;
284 		break;
285 	default:
286 		dev_err(host->dev, "%s: cmd->flag (0x%02x) is not valid\n",
287 			mmc_hostname(host->mmc), mmc_resp_type(cmd));
288 		break;
289 	}
290 
291 	if (set_timeout) {
292 		if (!cmd->data && cmd->busy_timeout)
293 			timeout = cmd->busy_timeout;
294 		else
295 			timeout = 10000;
296 
297 		schedule_delayed_work(&host->timeout_work,
298 				      msecs_to_jiffies(timeout));
299 	}
300 
301 	dev_dbg(host->dev, "xfer ctrl: 0x%02x; timeout: %lu\n", ctrl, timeout);
302 	alcor_write8(priv, ctrl | AU6601_CMD_START_XFER,
303 				 AU6601_CMD_XFER_CTRL);
304 }
305 
306 static void alcor_request_complete(struct alcor_sdmmc_host *host,
307 				   bool cancel_timeout)
308 {
309 	struct mmc_request *mrq;
310 
311 	/*
312 	 * If this work gets rescheduled while running, it will
313 	 * be run again afterwards but without any active request.
314 	 */
315 	if (!host->mrq)
316 		return;
317 
318 	if (cancel_timeout)
319 		cancel_delayed_work(&host->timeout_work);
320 
321 	mrq = host->mrq;
322 
323 	host->mrq = NULL;
324 	host->cmd = NULL;
325 	host->data = NULL;
326 	host->dma_on = 0;
327 
328 	mmc_request_done(host->mmc, mrq);
329 }
330 
331 static void alcor_finish_data(struct alcor_sdmmc_host *host)
332 {
333 	struct mmc_data *data;
334 
335 	data = host->data;
336 	host->data = NULL;
337 	host->dma_on = 0;
338 
339 	/*
340 	 * The specification states that the block count register must
341 	 * be updated, but it does not specify at what point in the
342 	 * data flow. That makes the register entirely useless to read
343 	 * back so we have to assume that nothing made it to the card
344 	 * in the event of an error.
345 	 */
346 	if (data->error)
347 		data->bytes_xfered = 0;
348 	else
349 		data->bytes_xfered = data->blksz * data->blocks;
350 
351 	/*
352 	 * Need to send CMD12 if -
353 	 * a) open-ended multiblock transfer (no CMD23)
354 	 * b) error in multiblock transfer
355 	 */
356 	if (data->stop &&
357 	    (data->error ||
358 	     !host->mrq->sbc)) {
359 
360 		/*
361 		 * The controller needs a reset of internal state machines
362 		 * upon error conditions.
363 		 */
364 		if (data->error)
365 			alcor_reset(host, AU6601_RESET_CMD | AU6601_RESET_DATA);
366 
367 		alcor_unmask_sd_irqs(host);
368 		alcor_send_cmd(host, data->stop, false);
369 		return;
370 	}
371 
372 	alcor_request_complete(host, 1);
373 }
374 
375 static void alcor_err_irq(struct alcor_sdmmc_host *host, u32 intmask)
376 {
377 	dev_dbg(host->dev, "ERR IRQ %x\n", intmask);
378 
379 	if (host->cmd) {
380 		if (intmask & AU6601_INT_CMD_TIMEOUT_ERR)
381 			host->cmd->error = -ETIMEDOUT;
382 		else
383 			host->cmd->error = -EILSEQ;
384 	}
385 
386 	if (host->data) {
387 		if (intmask & AU6601_INT_DATA_TIMEOUT_ERR)
388 			host->data->error = -ETIMEDOUT;
389 		else
390 			host->data->error = -EILSEQ;
391 
392 		host->data->bytes_xfered = 0;
393 	}
394 
395 	alcor_reset(host, AU6601_RESET_CMD | AU6601_RESET_DATA);
396 	alcor_request_complete(host, 1);
397 }
398 
399 static int alcor_cmd_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
400 {
401 	struct alcor_pci_priv *priv = host->alcor_pci;
402 
403 	intmask &= AU6601_INT_CMD_END;
404 
405 	if (!intmask)
406 		return true;
407 
408 	/* got CMD_END but no CMD is in progress, wake thread an process the
409 	 * error
410 	 */
411 	if (!host->cmd)
412 		return false;
413 
414 	if (host->cmd->flags & MMC_RSP_PRESENT) {
415 		struct mmc_command *cmd = host->cmd;
416 
417 		cmd->resp[0] = alcor_read32be(priv, AU6601_REG_CMD_RSP0);
418 		dev_dbg(host->dev, "RSP0: 0x%04x\n", cmd->resp[0]);
419 		if (host->cmd->flags & MMC_RSP_136) {
420 			cmd->resp[1] =
421 				alcor_read32be(priv, AU6601_REG_CMD_RSP1);
422 			cmd->resp[2] =
423 				alcor_read32be(priv, AU6601_REG_CMD_RSP2);
424 			cmd->resp[3] =
425 				alcor_read32be(priv, AU6601_REG_CMD_RSP3);
426 			dev_dbg(host->dev, "RSP1,2,3: 0x%04x 0x%04x 0x%04x\n",
427 				cmd->resp[1], cmd->resp[2], cmd->resp[3]);
428 		}
429 
430 	}
431 
432 	host->cmd->error = 0;
433 
434 	/* Processed actual command. */
435 	if (!host->data)
436 		return false;
437 
438 	alcor_trigger_data_transfer(host, false);
439 	host->cmd = NULL;
440 	return true;
441 }
442 
443 static void alcor_cmd_irq_thread(struct alcor_sdmmc_host *host, u32 intmask)
444 {
445 	intmask &= AU6601_INT_CMD_END;
446 
447 	if (!intmask)
448 		return;
449 
450 	if (!host->cmd && intmask & AU6601_INT_CMD_END) {
451 		dev_dbg(host->dev, "Got command interrupt 0x%08x even though no command operation was in progress.\n",
452 			intmask);
453 	}
454 
455 	/* Processed actual command. */
456 	if (!host->data)
457 		alcor_request_complete(host, 1);
458 	else
459 		alcor_trigger_data_transfer(host, false);
460 	host->cmd = NULL;
461 }
462 
463 static int alcor_data_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
464 {
465 	u32 tmp;
466 
467 	intmask &= AU6601_INT_DATA_MASK;
468 
469 	/* nothing here to do */
470 	if (!intmask)
471 		return 1;
472 
473 	/* we was too fast and got DATA_END after it was processed?
474 	 * lets ignore it for now.
475 	 */
476 	if (!host->data && intmask == AU6601_INT_DATA_END)
477 		return 1;
478 
479 	/* looks like an error, so lets handle it. */
480 	if (!host->data)
481 		return 0;
482 
483 	tmp = intmask & (AU6601_INT_READ_BUF_RDY | AU6601_INT_WRITE_BUF_RDY
484 			 | AU6601_INT_DMA_END);
485 	switch (tmp) {
486 	case 0:
487 		break;
488 	case AU6601_INT_READ_BUF_RDY:
489 		alcor_trf_block_pio(host, true);
490 		if (!host->blocks)
491 			break;
492 		alcor_trigger_data_transfer(host, false);
493 		return 1;
494 	case AU6601_INT_WRITE_BUF_RDY:
495 		alcor_trf_block_pio(host, false);
496 		if (!host->blocks)
497 			break;
498 		alcor_trigger_data_transfer(host, false);
499 		return 1;
500 	case AU6601_INT_DMA_END:
501 		if (!host->sg_count)
502 			break;
503 
504 		alcor_data_set_dma(host);
505 		break;
506 	default:
507 		dev_err(host->dev, "Got READ_BUF_RDY and WRITE_BUF_RDY at same time\n");
508 		break;
509 	}
510 
511 	if (intmask & AU6601_INT_DATA_END)
512 		return 0;
513 
514 	return 1;
515 }
516 
517 static void alcor_data_irq_thread(struct alcor_sdmmc_host *host, u32 intmask)
518 {
519 	intmask &= AU6601_INT_DATA_MASK;
520 
521 	if (!intmask)
522 		return;
523 
524 	if (!host->data) {
525 		dev_dbg(host->dev, "Got data interrupt 0x%08x even though no data operation was in progress.\n",
526 			intmask);
527 		alcor_reset(host, AU6601_RESET_DATA);
528 		return;
529 	}
530 
531 	if (alcor_data_irq_done(host, intmask))
532 		return;
533 
534 	if ((intmask & AU6601_INT_DATA_END) || !host->blocks ||
535 	    (host->dma_on && !host->sg_count))
536 		alcor_finish_data(host);
537 }
538 
539 static void alcor_cd_irq(struct alcor_sdmmc_host *host, u32 intmask)
540 {
541 	dev_dbg(host->dev, "card %s\n",
542 		intmask & AU6601_INT_CARD_REMOVE ? "removed" : "inserted");
543 
544 	if (host->mrq) {
545 		dev_dbg(host->dev, "cancel all pending tasks.\n");
546 
547 		if (host->data)
548 			host->data->error = -ENOMEDIUM;
549 
550 		if (host->cmd)
551 			host->cmd->error = -ENOMEDIUM;
552 		else
553 			host->mrq->cmd->error = -ENOMEDIUM;
554 
555 		alcor_request_complete(host, 1);
556 	}
557 
558 	mmc_detect_change(host->mmc, msecs_to_jiffies(1));
559 }
560 
561 static irqreturn_t alcor_irq_thread(int irq, void *d)
562 {
563 	struct alcor_sdmmc_host *host = d;
564 	irqreturn_t ret = IRQ_HANDLED;
565 	u32 intmask, tmp;
566 
567 	mutex_lock(&host->cmd_mutex);
568 
569 	intmask = host->irq_status_sd;
570 
571 	/* some thing bad */
572 	if (unlikely(!intmask || AU6601_INT_ALL_MASK == intmask)) {
573 		dev_dbg(host->dev, "unexpected IRQ: 0x%04x\n", intmask);
574 		ret = IRQ_NONE;
575 		goto exit;
576 	}
577 
578 	tmp = intmask & (AU6601_INT_CMD_MASK | AU6601_INT_DATA_MASK);
579 	if (tmp) {
580 		if (tmp & AU6601_INT_ERROR_MASK)
581 			alcor_err_irq(host, tmp);
582 		else {
583 			alcor_cmd_irq_thread(host, tmp);
584 			alcor_data_irq_thread(host, tmp);
585 		}
586 		intmask &= ~(AU6601_INT_CMD_MASK | AU6601_INT_DATA_MASK);
587 	}
588 
589 	if (intmask & (AU6601_INT_CARD_INSERT | AU6601_INT_CARD_REMOVE)) {
590 		alcor_cd_irq(host, intmask);
591 		intmask &= ~(AU6601_INT_CARD_INSERT | AU6601_INT_CARD_REMOVE);
592 	}
593 
594 	if (intmask & AU6601_INT_OVER_CURRENT_ERR) {
595 		dev_warn(host->dev,
596 			 "warning: over current detected!\n");
597 		intmask &= ~AU6601_INT_OVER_CURRENT_ERR;
598 	}
599 
600 	if (intmask)
601 		dev_dbg(host->dev, "got not handled IRQ: 0x%04x\n", intmask);
602 
603 exit:
604 	mutex_unlock(&host->cmd_mutex);
605 	alcor_unmask_sd_irqs(host);
606 	return ret;
607 }
608 
609 
610 static irqreturn_t alcor_irq(int irq, void *d)
611 {
612 	struct alcor_sdmmc_host *host = d;
613 	struct alcor_pci_priv *priv = host->alcor_pci;
614 	u32 status, tmp;
615 	irqreturn_t ret;
616 	int cmd_done, data_done;
617 
618 	status = alcor_read32(priv, AU6601_REG_INT_STATUS);
619 	if (!status)
620 		return IRQ_NONE;
621 
622 	alcor_write32(priv, status, AU6601_REG_INT_STATUS);
623 
624 	tmp = status & (AU6601_INT_READ_BUF_RDY | AU6601_INT_WRITE_BUF_RDY
625 			| AU6601_INT_DATA_END | AU6601_INT_DMA_END
626 			| AU6601_INT_CMD_END);
627 	if (tmp == status) {
628 		cmd_done = alcor_cmd_irq_done(host, tmp);
629 		data_done = alcor_data_irq_done(host, tmp);
630 		/* use fast path for simple tasks */
631 		if (cmd_done && data_done) {
632 			ret = IRQ_HANDLED;
633 			goto alcor_irq_done;
634 		}
635 	}
636 
637 	host->irq_status_sd = status;
638 	ret = IRQ_WAKE_THREAD;
639 	alcor_mask_sd_irqs(host);
640 alcor_irq_done:
641 	return ret;
642 }
643 
644 static void alcor_set_clock(struct alcor_sdmmc_host *host, unsigned int clock)
645 {
646 	struct alcor_pci_priv *priv = host->alcor_pci;
647 	int i, diff = 0x7fffffff, tmp_clock = 0;
648 	u16 clk_src = 0;
649 	u8 clk_div = 0;
650 
651 	if (clock == 0) {
652 		alcor_write16(priv, 0, AU6601_CLK_SELECT);
653 		return;
654 	}
655 
656 	for (i = 0; i < ARRAY_SIZE(alcor_pll_cfg); i++) {
657 		unsigned int tmp_div, tmp_diff;
658 		const struct alcor_pll_conf *cfg = &alcor_pll_cfg[i];
659 
660 		tmp_div = DIV_ROUND_UP(cfg->clk_src_freq, clock);
661 		if (cfg->min_div > tmp_div || tmp_div > cfg->max_div)
662 			continue;
663 
664 		tmp_clock = DIV_ROUND_UP(cfg->clk_src_freq, tmp_div);
665 		tmp_diff = abs(clock - tmp_clock);
666 
667 		if (tmp_diff >= 0 && tmp_diff < diff) {
668 			diff = tmp_diff;
669 			clk_src = cfg->clk_src_reg;
670 			clk_div = tmp_div;
671 		}
672 	}
673 
674 	clk_src |= ((clk_div - 1) << 8);
675 	clk_src |= AU6601_CLK_ENABLE;
676 
677 	dev_dbg(host->dev, "set freq %d cal freq %d, use div %d, mod %x\n",
678 			clock, tmp_clock, clk_div, clk_src);
679 
680 	alcor_write16(priv, clk_src, AU6601_CLK_SELECT);
681 
682 }
683 
684 static void alcor_set_timing(struct mmc_host *mmc, struct mmc_ios *ios)
685 {
686 	struct alcor_sdmmc_host *host = mmc_priv(mmc);
687 
688 	if (ios->timing == MMC_TIMING_LEGACY) {
689 		alcor_rmw8(host, AU6601_CLK_DELAY,
690 			    AU6601_CLK_POSITIVE_EDGE_ALL, 0);
691 	} else {
692 		alcor_rmw8(host, AU6601_CLK_DELAY,
693 			    0, AU6601_CLK_POSITIVE_EDGE_ALL);
694 	}
695 }
696 
697 static void alcor_set_bus_width(struct mmc_host *mmc, struct mmc_ios *ios)
698 {
699 	struct alcor_sdmmc_host *host = mmc_priv(mmc);
700 	struct alcor_pci_priv *priv = host->alcor_pci;
701 
702 	if (ios->bus_width == MMC_BUS_WIDTH_1) {
703 		alcor_write8(priv, 0, AU6601_REG_BUS_CTRL);
704 	} else if (ios->bus_width == MMC_BUS_WIDTH_4) {
705 		alcor_write8(priv, AU6601_BUS_WIDTH_4BIT,
706 			      AU6601_REG_BUS_CTRL);
707 	} else
708 		dev_err(host->dev, "Unknown BUS mode\n");
709 
710 }
711 
712 static int alcor_card_busy(struct mmc_host *mmc)
713 {
714 	struct alcor_sdmmc_host *host = mmc_priv(mmc);
715 	struct alcor_pci_priv *priv = host->alcor_pci;
716 	u8 status;
717 
718 	/* Check whether dat[0:3] low */
719 	status = alcor_read8(priv, AU6601_DATA_PIN_STATE);
720 
721 	return !(status & AU6601_BUS_STAT_DAT_MASK);
722 }
723 
724 static int alcor_get_cd(struct mmc_host *mmc)
725 {
726 	struct alcor_sdmmc_host *host = mmc_priv(mmc);
727 	struct alcor_pci_priv *priv = host->alcor_pci;
728 	u8 detect;
729 
730 	detect = alcor_read8(priv, AU6601_DETECT_STATUS)
731 		& AU6601_DETECT_STATUS_M;
732 	/* check if card is present then send command and data */
733 	return (detect == AU6601_SD_DETECTED);
734 }
735 
736 static int alcor_get_ro(struct mmc_host *mmc)
737 {
738 	struct alcor_sdmmc_host *host = mmc_priv(mmc);
739 	struct alcor_pci_priv *priv = host->alcor_pci;
740 	u8 status;
741 
742 	/* get write protect pin status */
743 	status = alcor_read8(priv, AU6601_INTERFACE_MODE_CTRL);
744 
745 	return !!(status & AU6601_SD_CARD_WP);
746 }
747 
748 static void alcor_request(struct mmc_host *mmc, struct mmc_request *mrq)
749 {
750 	struct alcor_sdmmc_host *host = mmc_priv(mmc);
751 
752 	mutex_lock(&host->cmd_mutex);
753 
754 	host->mrq = mrq;
755 
756 	/* check if card is present then send command and data */
757 	if (alcor_get_cd(mmc))
758 		alcor_send_cmd(host, mrq->cmd, true);
759 	else {
760 		mrq->cmd->error = -ENOMEDIUM;
761 		alcor_request_complete(host, 1);
762 	}
763 
764 	mutex_unlock(&host->cmd_mutex);
765 }
766 
767 static void alcor_pre_req(struct mmc_host *mmc,
768 			   struct mmc_request *mrq)
769 {
770 	struct alcor_sdmmc_host *host = mmc_priv(mmc);
771 	struct mmc_data *data = mrq->data;
772 	struct mmc_command *cmd = mrq->cmd;
773 	struct scatterlist *sg;
774 	unsigned int i, sg_len;
775 
776 	if (!data || !cmd)
777 		return;
778 
779 	data->host_cookie = COOKIE_UNMAPPED;
780 
781 	/* FIXME: looks like the DMA engine works only with CMD18 */
782 	if (cmd->opcode != 18)
783 		return;
784 	/*
785 	 * We don't do DMA on "complex" transfers, i.e. with
786 	 * non-word-aligned buffers or lengths. Also, we don't bother
787 	 * with all the DMA setup overhead for short transfers.
788 	 */
789 	if (data->blocks * data->blksz < AU6601_MAX_DMA_BLOCK_SIZE)
790 		return;
791 
792 	if (data->blksz & 3)
793 		return;
794 
795 	for_each_sg(data->sg, sg, data->sg_len, i) {
796 		if (sg->length != AU6601_MAX_DMA_BLOCK_SIZE)
797 			return;
798 	}
799 
800 	/* This data might be unmapped at this time */
801 
802 	sg_len = dma_map_sg(host->dev, data->sg, data->sg_len,
803 			    mmc_get_dma_dir(data));
804 	if (sg_len)
805 		data->host_cookie = COOKIE_MAPPED;
806 
807 	data->sg_count = sg_len;
808 }
809 
810 static void alcor_post_req(struct mmc_host *mmc,
811 			    struct mmc_request *mrq,
812 			    int err)
813 {
814 	struct alcor_sdmmc_host *host = mmc_priv(mmc);
815 	struct mmc_data *data = mrq->data;
816 
817 	if (!data)
818 		return;
819 
820 	if (data->host_cookie == COOKIE_MAPPED) {
821 		dma_unmap_sg(host->dev,
822 			     data->sg,
823 			     data->sg_len,
824 			     mmc_get_dma_dir(data));
825 	}
826 
827 	data->host_cookie = COOKIE_UNMAPPED;
828 }
829 
830 static void alcor_set_power_mode(struct mmc_host *mmc, struct mmc_ios *ios)
831 {
832 	struct alcor_sdmmc_host *host = mmc_priv(mmc);
833 	struct alcor_pci_priv *priv = host->alcor_pci;
834 
835 	switch (ios->power_mode) {
836 	case MMC_POWER_OFF:
837 		alcor_set_clock(host, ios->clock);
838 		/* set all pins to input */
839 		alcor_write8(priv, 0, AU6601_OUTPUT_ENABLE);
840 		/* turn of VDD */
841 		alcor_write8(priv, 0, AU6601_POWER_CONTROL);
842 		break;
843 	case MMC_POWER_UP:
844 		break;
845 	case MMC_POWER_ON:
846 		/* This is most trickiest part. The order and timings of
847 		 * instructions seems to play important role. Any changes may
848 		 * confuse internal state engine if this HW.
849 		 * FIXME: If we will ever get access to documentation, then this
850 		 * part should be reviewed again.
851 		 */
852 
853 		/* enable SD card mode */
854 		alcor_write8(priv, AU6601_SD_CARD,
855 			      AU6601_ACTIVE_CTRL);
856 		/* set signal voltage to 3.3V */
857 		alcor_write8(priv, 0, AU6601_OPT);
858 		/* no documentation about clk delay, for now just try to mimic
859 		 * original driver.
860 		 */
861 		alcor_write8(priv, 0x20, AU6601_CLK_DELAY);
862 		/* set BUS width to 1 bit */
863 		alcor_write8(priv, 0, AU6601_REG_BUS_CTRL);
864 		/* set CLK first time */
865 		alcor_set_clock(host, ios->clock);
866 		/* power on VDD */
867 		alcor_write8(priv, AU6601_SD_CARD,
868 			      AU6601_POWER_CONTROL);
869 		/* wait until the CLK will get stable */
870 		mdelay(20);
871 		/* set CLK again, mimic original driver. */
872 		alcor_set_clock(host, ios->clock);
873 
874 		/* enable output */
875 		alcor_write8(priv, AU6601_SD_CARD,
876 			      AU6601_OUTPUT_ENABLE);
877 		/* The clk will not work on au6621. We need to trigger data
878 		 * transfer.
879 		 */
880 		alcor_write8(priv, AU6601_DATA_WRITE,
881 			      AU6601_DATA_XFER_CTRL);
882 		/* configure timeout. Not clear what exactly it means. */
883 		alcor_write8(priv, 0x7d, AU6601_TIME_OUT_CTRL);
884 		mdelay(100);
885 		break;
886 	default:
887 		dev_err(host->dev, "Unknown power parameter\n");
888 	}
889 }
890 
891 static void alcor_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
892 {
893 	struct alcor_sdmmc_host *host = mmc_priv(mmc);
894 
895 	mutex_lock(&host->cmd_mutex);
896 
897 	dev_dbg(host->dev, "set ios. bus width: %x, power mode: %x\n",
898 		ios->bus_width, ios->power_mode);
899 
900 	if (ios->power_mode != host->cur_power_mode) {
901 		alcor_set_power_mode(mmc, ios);
902 		host->cur_power_mode = ios->power_mode;
903 	} else {
904 		alcor_set_timing(mmc, ios);
905 		alcor_set_bus_width(mmc, ios);
906 		alcor_set_clock(host, ios->clock);
907 	}
908 
909 	mutex_unlock(&host->cmd_mutex);
910 }
911 
912 static int alcor_signal_voltage_switch(struct mmc_host *mmc,
913 				       struct mmc_ios *ios)
914 {
915 	struct alcor_sdmmc_host *host = mmc_priv(mmc);
916 
917 	mutex_lock(&host->cmd_mutex);
918 
919 	switch (ios->signal_voltage) {
920 	case MMC_SIGNAL_VOLTAGE_330:
921 		alcor_rmw8(host, AU6601_OPT, AU6601_OPT_SD_18V, 0);
922 		break;
923 	case MMC_SIGNAL_VOLTAGE_180:
924 		alcor_rmw8(host, AU6601_OPT, 0, AU6601_OPT_SD_18V);
925 		break;
926 	default:
927 		/* No signal voltage switch required */
928 		break;
929 	}
930 
931 	mutex_unlock(&host->cmd_mutex);
932 	return 0;
933 }
934 
935 static const struct mmc_host_ops alcor_sdc_ops = {
936 	.card_busy	= alcor_card_busy,
937 	.get_cd		= alcor_get_cd,
938 	.get_ro		= alcor_get_ro,
939 	.post_req	= alcor_post_req,
940 	.pre_req	= alcor_pre_req,
941 	.request	= alcor_request,
942 	.set_ios	= alcor_set_ios,
943 	.start_signal_voltage_switch = alcor_signal_voltage_switch,
944 };
945 
946 static void alcor_timeout_timer(struct work_struct *work)
947 {
948 	struct delayed_work *d = to_delayed_work(work);
949 	struct alcor_sdmmc_host *host = container_of(d, struct alcor_sdmmc_host,
950 						timeout_work);
951 	mutex_lock(&host->cmd_mutex);
952 
953 	dev_dbg(host->dev, "triggered timeout\n");
954 	if (host->mrq) {
955 		dev_err(host->dev, "Timeout waiting for hardware interrupt.\n");
956 
957 		if (host->data) {
958 			host->data->error = -ETIMEDOUT;
959 		} else {
960 			if (host->cmd)
961 				host->cmd->error = -ETIMEDOUT;
962 			else
963 				host->mrq->cmd->error = -ETIMEDOUT;
964 		}
965 
966 		alcor_reset(host, AU6601_RESET_CMD | AU6601_RESET_DATA);
967 		alcor_request_complete(host, 0);
968 	}
969 
970 	mmiowb();
971 	mutex_unlock(&host->cmd_mutex);
972 }
973 
974 static void alcor_hw_init(struct alcor_sdmmc_host *host)
975 {
976 	struct alcor_pci_priv *priv = host->alcor_pci;
977 	struct alcor_dev_cfg *cfg = priv->cfg;
978 
979 	/* FIXME: This part is a mimics HW init of original driver.
980 	 * If we will ever get access to documentation, then this part
981 	 * should be reviewed again.
982 	 */
983 
984 	/* reset command state engine */
985 	alcor_reset(host, AU6601_RESET_CMD);
986 
987 	alcor_write8(priv, 0, AU6601_DMA_BOUNDARY);
988 	/* enable sd card mode */
989 	alcor_write8(priv, AU6601_SD_CARD, AU6601_ACTIVE_CTRL);
990 
991 	/* set BUS width to 1 bit */
992 	alcor_write8(priv, 0, AU6601_REG_BUS_CTRL);
993 
994 	/* reset data state engine */
995 	alcor_reset(host, AU6601_RESET_DATA);
996 	/* Not sure if a voodoo with AU6601_DMA_BOUNDARY is really needed */
997 	alcor_write8(priv, 0, AU6601_DMA_BOUNDARY);
998 
999 	alcor_write8(priv, 0, AU6601_INTERFACE_MODE_CTRL);
1000 	/* not clear what we are doing here. */
1001 	alcor_write8(priv, 0x44, AU6601_PAD_DRIVE0);
1002 	alcor_write8(priv, 0x44, AU6601_PAD_DRIVE1);
1003 	alcor_write8(priv, 0x00, AU6601_PAD_DRIVE2);
1004 
1005 	/* for 6601 - dma_boundary; for 6621 - dma_page_cnt
1006 	 * exact meaning of this register is not clear.
1007 	 */
1008 	alcor_write8(priv, cfg->dma, AU6601_DMA_BOUNDARY);
1009 
1010 	/* make sure all pins are set to input and VDD is off */
1011 	alcor_write8(priv, 0, AU6601_OUTPUT_ENABLE);
1012 	alcor_write8(priv, 0, AU6601_POWER_CONTROL);
1013 
1014 	alcor_write8(priv, AU6601_DETECT_EN, AU6601_DETECT_STATUS);
1015 	/* now we should be safe to enable IRQs */
1016 	alcor_unmask_sd_irqs(host);
1017 }
1018 
1019 static void alcor_hw_uninit(struct alcor_sdmmc_host *host)
1020 {
1021 	struct alcor_pci_priv *priv = host->alcor_pci;
1022 
1023 	alcor_mask_sd_irqs(host);
1024 	alcor_reset(host, AU6601_RESET_CMD | AU6601_RESET_DATA);
1025 
1026 	alcor_write8(priv, 0, AU6601_DETECT_STATUS);
1027 
1028 	alcor_write8(priv, 0, AU6601_OUTPUT_ENABLE);
1029 	alcor_write8(priv, 0, AU6601_POWER_CONTROL);
1030 
1031 	alcor_write8(priv, 0, AU6601_OPT);
1032 }
1033 
1034 static void alcor_init_mmc(struct alcor_sdmmc_host *host)
1035 {
1036 	struct mmc_host *mmc = host->mmc;
1037 
1038 	mmc->f_min = AU6601_MIN_CLOCK;
1039 	mmc->f_max = AU6601_MAX_CLOCK;
1040 	mmc->ocr_avail = MMC_VDD_33_34;
1041 	mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED
1042 		| MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
1043 		| MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50;
1044 	mmc->caps2 = MMC_CAP2_NO_SDIO;
1045 	mmc->ops = &alcor_sdc_ops;
1046 
1047 	/* The hardware does DMA data transfer of 4096 bytes to/from a single
1048 	 * buffer address. Scatterlists are not supported, but upon DMA
1049 	 * completion (signalled via IRQ), the original vendor driver does
1050 	 * then immediately set up another DMA transfer of the next 4096
1051 	 * bytes.
1052 	 *
1053 	 * This means that we need to handle the I/O in 4096 byte chunks.
1054 	 * Lacking a way to limit the sglist entries to 4096 bytes, we instead
1055 	 * impose that only one segment is provided, with maximum size 4096,
1056 	 * which also happens to be the minimum size. This means that the
1057 	 * single-entry sglist handled by this driver can be handed directly
1058 	 * to the hardware, nice and simple.
1059 	 *
1060 	 * Unfortunately though, that means we only do 4096 bytes I/O per
1061 	 * MMC command. A future improvement would be to make the driver
1062 	 * accept sg lists and entries of any size, and simply iterate
1063 	 * through them 4096 bytes at a time.
1064 	 */
1065 	mmc->max_segs = AU6601_MAX_DMA_SEGMENTS;
1066 	mmc->max_seg_size = AU6601_MAX_DMA_BLOCK_SIZE;
1067 	mmc->max_req_size = mmc->max_seg_size;
1068 }
1069 
1070 static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
1071 {
1072 	struct alcor_pci_priv *priv = pdev->dev.platform_data;
1073 	struct mmc_host *mmc;
1074 	struct alcor_sdmmc_host *host;
1075 	int ret;
1076 
1077 	mmc = mmc_alloc_host(sizeof(*host), &pdev->dev);
1078 	if (!mmc) {
1079 		dev_err(&pdev->dev, "Can't allocate MMC\n");
1080 		return -ENOMEM;
1081 	}
1082 
1083 	host = mmc_priv(mmc);
1084 	host->mmc = mmc;
1085 	host->dev = &pdev->dev;
1086 	host->cur_power_mode = MMC_POWER_UNDEFINED;
1087 	host->alcor_pci = priv;
1088 
1089 	/* make sure irqs are disabled */
1090 	alcor_write32(priv, 0, AU6601_REG_INT_ENABLE);
1091 	alcor_write32(priv, 0, AU6601_MS_INT_ENABLE);
1092 
1093 	ret = devm_request_threaded_irq(&pdev->dev, priv->irq,
1094 			alcor_irq, alcor_irq_thread, IRQF_SHARED,
1095 			DRV_NAME_ALCOR_PCI_SDMMC, host);
1096 
1097 	if (ret) {
1098 		dev_err(&pdev->dev, "Failed to get irq for data line\n");
1099 		return ret;
1100 	}
1101 
1102 	mutex_init(&host->cmd_mutex);
1103 	INIT_DELAYED_WORK(&host->timeout_work, alcor_timeout_timer);
1104 
1105 	alcor_init_mmc(host);
1106 	alcor_hw_init(host);
1107 
1108 	dev_set_drvdata(&pdev->dev, host);
1109 	mmc_add_host(mmc);
1110 	return 0;
1111 }
1112 
1113 static int alcor_pci_sdmmc_drv_remove(struct platform_device *pdev)
1114 {
1115 	struct alcor_sdmmc_host *host = dev_get_drvdata(&pdev->dev);
1116 
1117 	if (cancel_delayed_work_sync(&host->timeout_work))
1118 		alcor_request_complete(host, 0);
1119 
1120 	alcor_hw_uninit(host);
1121 	mmc_remove_host(host->mmc);
1122 	mmc_free_host(host->mmc);
1123 
1124 	return 0;
1125 }
1126 
1127 #ifdef CONFIG_PM_SLEEP
1128 static int alcor_pci_sdmmc_suspend(struct device *dev)
1129 {
1130 	struct alcor_sdmmc_host *host = dev_get_drvdata(dev);
1131 
1132 	if (cancel_delayed_work_sync(&host->timeout_work))
1133 		alcor_request_complete(host, 0);
1134 
1135 	alcor_hw_uninit(host);
1136 
1137 	return 0;
1138 }
1139 
1140 static int alcor_pci_sdmmc_resume(struct device *dev)
1141 {
1142 	struct alcor_sdmmc_host *host = dev_get_drvdata(dev);
1143 
1144 	alcor_hw_init(host);
1145 
1146 	return 0;
1147 }
1148 #endif /* CONFIG_PM_SLEEP */
1149 
1150 static SIMPLE_DEV_PM_OPS(alcor_mmc_pm_ops, alcor_pci_sdmmc_suspend,
1151 			 alcor_pci_sdmmc_resume);
1152 
1153 static const struct platform_device_id alcor_pci_sdmmc_ids[] = {
1154 	{
1155 		.name = DRV_NAME_ALCOR_PCI_SDMMC,
1156 	}, {
1157 		/* sentinel */
1158 	}
1159 };
1160 MODULE_DEVICE_TABLE(platform, alcor_pci_sdmmc_ids);
1161 
1162 static struct platform_driver alcor_pci_sdmmc_driver = {
1163 	.probe		= alcor_pci_sdmmc_drv_probe,
1164 	.remove		= alcor_pci_sdmmc_drv_remove,
1165 	.id_table	= alcor_pci_sdmmc_ids,
1166 	.driver		= {
1167 		.name	= DRV_NAME_ALCOR_PCI_SDMMC,
1168 		.pm	= &alcor_mmc_pm_ops
1169 	},
1170 };
1171 module_platform_driver(alcor_pci_sdmmc_driver);
1172 
1173 MODULE_AUTHOR("Oleksij Rempel <linux@rempel-privat.de>");
1174 MODULE_DESCRIPTION("PCI driver for Alcor Micro AU6601 Secure Digital Host Controller Interface");
1175 MODULE_LICENSE("GPL");
1176