Lines Matching refs:host

257 #define sh_mmcif_host_to_dev(host) (&host->pd->dev)  argument
259 static inline void sh_mmcif_bitset(struct sh_mmcif_host *host, in sh_mmcif_bitset() argument
262 writel(val | readl(host->addr + reg), host->addr + reg); in sh_mmcif_bitset()
265 static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host, in sh_mmcif_bitclr() argument
268 writel(~val & readl(host->addr + reg), host->addr + reg); in sh_mmcif_bitclr()
273 struct sh_mmcif_host *host = arg; in sh_mmcif_dma_complete() local
274 struct mmc_request *mrq = host->mrq; in sh_mmcif_dma_complete()
275 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_dma_complete()
283 complete(&host->dma_complete); in sh_mmcif_dma_complete()
286 static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host) in sh_mmcif_start_dma_rx() argument
288 struct mmc_data *data = host->mrq->data; in sh_mmcif_start_dma_rx()
291 struct dma_chan *chan = host->chan_rx; in sh_mmcif_start_dma_rx()
292 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_start_dma_rx()
299 host->dma_active = true; in sh_mmcif_start_dma_rx()
306 desc->callback_param = host; in sh_mmcif_start_dma_rx()
308 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN); in sh_mmcif_start_dma_rx()
318 host->chan_rx = NULL; in sh_mmcif_start_dma_rx()
319 host->dma_active = false; in sh_mmcif_start_dma_rx()
322 chan = host->chan_tx; in sh_mmcif_start_dma_rx()
324 host->chan_tx = NULL; in sh_mmcif_start_dma_rx()
329 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN); in sh_mmcif_start_dma_rx()
336 static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host) in sh_mmcif_start_dma_tx() argument
338 struct mmc_data *data = host->mrq->data; in sh_mmcif_start_dma_tx()
341 struct dma_chan *chan = host->chan_tx; in sh_mmcif_start_dma_tx()
342 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_start_dma_tx()
349 host->dma_active = true; in sh_mmcif_start_dma_tx()
356 desc->callback_param = host; in sh_mmcif_start_dma_tx()
358 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN); in sh_mmcif_start_dma_tx()
368 host->chan_tx = NULL; in sh_mmcif_start_dma_tx()
369 host->dma_active = false; in sh_mmcif_start_dma_tx()
372 chan = host->chan_rx; in sh_mmcif_start_dma_tx()
374 host->chan_rx = NULL; in sh_mmcif_start_dma_tx()
379 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN); in sh_mmcif_start_dma_tx()
387 sh_mmcif_request_dma_pdata(struct sh_mmcif_host *host, uintptr_t slave_id) in sh_mmcif_request_dma_pdata() argument
399 static int sh_mmcif_dma_slave_config(struct sh_mmcif_host *host, in sh_mmcif_dma_slave_config() argument
406 res = platform_get_resource(host->pd, IORESOURCE_MEM, 0); in sh_mmcif_dma_slave_config()
423 static void sh_mmcif_request_dma(struct sh_mmcif_host *host) in sh_mmcif_request_dma() argument
425 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_request_dma()
426 host->dma_active = false; in sh_mmcif_request_dma()
432 host->chan_tx = sh_mmcif_request_dma_pdata(host, in sh_mmcif_request_dma()
434 host->chan_rx = sh_mmcif_request_dma_pdata(host, in sh_mmcif_request_dma()
437 host->chan_tx = dma_request_chan(dev, "tx"); in sh_mmcif_request_dma()
438 if (IS_ERR(host->chan_tx)) in sh_mmcif_request_dma()
439 host->chan_tx = NULL; in sh_mmcif_request_dma()
440 host->chan_rx = dma_request_chan(dev, "rx"); in sh_mmcif_request_dma()
441 if (IS_ERR(host->chan_rx)) in sh_mmcif_request_dma()
442 host->chan_rx = NULL; in sh_mmcif_request_dma()
444 dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx, in sh_mmcif_request_dma()
445 host->chan_rx); in sh_mmcif_request_dma()
447 if (!host->chan_tx || !host->chan_rx || in sh_mmcif_request_dma()
448 sh_mmcif_dma_slave_config(host, host->chan_tx, DMA_MEM_TO_DEV) || in sh_mmcif_request_dma()
449 sh_mmcif_dma_slave_config(host, host->chan_rx, DMA_DEV_TO_MEM)) in sh_mmcif_request_dma()
455 if (host->chan_tx) in sh_mmcif_request_dma()
456 dma_release_channel(host->chan_tx); in sh_mmcif_request_dma()
457 if (host->chan_rx) in sh_mmcif_request_dma()
458 dma_release_channel(host->chan_rx); in sh_mmcif_request_dma()
459 host->chan_tx = host->chan_rx = NULL; in sh_mmcif_request_dma()
462 static void sh_mmcif_release_dma(struct sh_mmcif_host *host) in sh_mmcif_release_dma() argument
464 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN); in sh_mmcif_release_dma()
466 if (host->chan_tx) { in sh_mmcif_release_dma()
467 struct dma_chan *chan = host->chan_tx; in sh_mmcif_release_dma()
468 host->chan_tx = NULL; in sh_mmcif_release_dma()
471 if (host->chan_rx) { in sh_mmcif_release_dma()
472 struct dma_chan *chan = host->chan_rx; in sh_mmcif_release_dma()
473 host->chan_rx = NULL; in sh_mmcif_release_dma()
477 host->dma_active = false; in sh_mmcif_release_dma()
480 static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk) in sh_mmcif_clock_control() argument
482 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_clock_control()
485 unsigned int current_clk = clk_get_rate(host->clk); in sh_mmcif_clock_control()
488 sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE); in sh_mmcif_clock_control()
489 sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR); in sh_mmcif_clock_control()
494 if (host->clkdiv_map) { in sh_mmcif_clock_control()
502 if (!((1 << i) & host->clkdiv_map)) in sh_mmcif_clock_control()
511 freq = clk_round_rate(host->clk, clk * div); in sh_mmcif_clock_control()
525 clk_set_rate(host->clk, best_freq); in sh_mmcif_clock_control()
533 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR & clkdiv); in sh_mmcif_clock_control()
534 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE); in sh_mmcif_clock_control()
537 static void sh_mmcif_sync_reset(struct sh_mmcif_host *host) in sh_mmcif_sync_reset() argument
541 tmp = 0x010f0000 & sh_mmcif_readl(host->addr, MMCIF_CE_CLK_CTRL); in sh_mmcif_sync_reset()
543 sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON); in sh_mmcif_sync_reset()
544 sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF); in sh_mmcif_sync_reset()
545 if (host->ccs_enable) in sh_mmcif_sync_reset()
547 if (host->clk_ctrl2_enable) in sh_mmcif_sync_reset()
548 sh_mmcif_writel(host->addr, MMCIF_CE_CLK_CTRL2, 0x0F0F0000); in sh_mmcif_sync_reset()
549 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp | in sh_mmcif_sync_reset()
552 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP); in sh_mmcif_sync_reset()
555 static int sh_mmcif_error_manage(struct sh_mmcif_host *host) in sh_mmcif_error_manage() argument
557 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_error_manage()
561 host->sd_error = false; in sh_mmcif_error_manage()
563 state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1); in sh_mmcif_error_manage()
564 state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2); in sh_mmcif_error_manage()
569 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK); in sh_mmcif_error_manage()
570 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK); in sh_mmcif_error_manage()
572 if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1) in sh_mmcif_error_manage()
582 sh_mmcif_sync_reset(host); in sh_mmcif_error_manage()
589 host->state, host->wait_for); in sh_mmcif_error_manage()
593 host->state, host->wait_for); in sh_mmcif_error_manage()
597 host->state, host->wait_for); in sh_mmcif_error_manage()
603 static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p) in sh_mmcif_next_block() argument
605 struct mmc_data *data = host->mrq->data; in sh_mmcif_next_block()
607 host->sg_blkidx += host->blocksize; in sh_mmcif_next_block()
610 BUG_ON(host->sg_blkidx > data->sg->length); in sh_mmcif_next_block()
612 if (host->sg_blkidx == data->sg->length) { in sh_mmcif_next_block()
613 host->sg_blkidx = 0; in sh_mmcif_next_block()
614 if (++host->sg_idx < data->sg_len) in sh_mmcif_next_block()
615 host->pio_ptr = sg_virt(++data->sg); in sh_mmcif_next_block()
617 host->pio_ptr = p; in sh_mmcif_next_block()
620 return host->sg_idx != data->sg_len; in sh_mmcif_next_block()
623 static void sh_mmcif_single_read(struct sh_mmcif_host *host, in sh_mmcif_single_read() argument
626 host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & in sh_mmcif_single_read()
629 host->wait_for = MMCIF_WAIT_FOR_READ; in sh_mmcif_single_read()
632 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); in sh_mmcif_single_read()
635 static bool sh_mmcif_read_block(struct sh_mmcif_host *host) in sh_mmcif_read_block() argument
637 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_read_block()
638 struct mmc_data *data = host->mrq->data; in sh_mmcif_read_block()
642 if (host->sd_error) { in sh_mmcif_read_block()
643 data->error = sh_mmcif_error_manage(host); in sh_mmcif_read_block()
648 for (i = 0; i < host->blocksize / 4; i++) in sh_mmcif_read_block()
649 *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA); in sh_mmcif_read_block()
652 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE); in sh_mmcif_read_block()
653 host->wait_for = MMCIF_WAIT_FOR_READ_END; in sh_mmcif_read_block()
658 static void sh_mmcif_multi_read(struct sh_mmcif_host *host, in sh_mmcif_multi_read() argument
666 host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & in sh_mmcif_multi_read()
669 host->wait_for = MMCIF_WAIT_FOR_MREAD; in sh_mmcif_multi_read()
670 host->sg_idx = 0; in sh_mmcif_multi_read()
671 host->sg_blkidx = 0; in sh_mmcif_multi_read()
672 host->pio_ptr = sg_virt(data->sg); in sh_mmcif_multi_read()
674 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); in sh_mmcif_multi_read()
677 static bool sh_mmcif_mread_block(struct sh_mmcif_host *host) in sh_mmcif_mread_block() argument
679 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_mread_block()
680 struct mmc_data *data = host->mrq->data; in sh_mmcif_mread_block()
681 u32 *p = host->pio_ptr; in sh_mmcif_mread_block()
684 if (host->sd_error) { in sh_mmcif_mread_block()
685 data->error = sh_mmcif_error_manage(host); in sh_mmcif_mread_block()
692 for (i = 0; i < host->blocksize / 4; i++) in sh_mmcif_mread_block()
693 *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA); in sh_mmcif_mread_block()
695 if (!sh_mmcif_next_block(host, p)) in sh_mmcif_mread_block()
698 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); in sh_mmcif_mread_block()
703 static void sh_mmcif_single_write(struct sh_mmcif_host *host, in sh_mmcif_single_write() argument
706 host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & in sh_mmcif_single_write()
709 host->wait_for = MMCIF_WAIT_FOR_WRITE; in sh_mmcif_single_write()
712 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); in sh_mmcif_single_write()
715 static bool sh_mmcif_write_block(struct sh_mmcif_host *host) in sh_mmcif_write_block() argument
717 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_write_block()
718 struct mmc_data *data = host->mrq->data; in sh_mmcif_write_block()
722 if (host->sd_error) { in sh_mmcif_write_block()
723 data->error = sh_mmcif_error_manage(host); in sh_mmcif_write_block()
728 for (i = 0; i < host->blocksize / 4; i++) in sh_mmcif_write_block()
729 sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++); in sh_mmcif_write_block()
732 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE); in sh_mmcif_write_block()
733 host->wait_for = MMCIF_WAIT_FOR_WRITE_END; in sh_mmcif_write_block()
738 static void sh_mmcif_multi_write(struct sh_mmcif_host *host, in sh_mmcif_multi_write() argument
746 host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & in sh_mmcif_multi_write()
749 host->wait_for = MMCIF_WAIT_FOR_MWRITE; in sh_mmcif_multi_write()
750 host->sg_idx = 0; in sh_mmcif_multi_write()
751 host->sg_blkidx = 0; in sh_mmcif_multi_write()
752 host->pio_ptr = sg_virt(data->sg); in sh_mmcif_multi_write()
754 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); in sh_mmcif_multi_write()
757 static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host) in sh_mmcif_mwrite_block() argument
759 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_mwrite_block()
760 struct mmc_data *data = host->mrq->data; in sh_mmcif_mwrite_block()
761 u32 *p = host->pio_ptr; in sh_mmcif_mwrite_block()
764 if (host->sd_error) { in sh_mmcif_mwrite_block()
765 data->error = sh_mmcif_error_manage(host); in sh_mmcif_mwrite_block()
772 for (i = 0; i < host->blocksize / 4; i++) in sh_mmcif_mwrite_block()
773 sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++); in sh_mmcif_mwrite_block()
775 if (!sh_mmcif_next_block(host, p)) in sh_mmcif_mwrite_block()
778 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); in sh_mmcif_mwrite_block()
783 static void sh_mmcif_get_response(struct sh_mmcif_host *host, in sh_mmcif_get_response() argument
787 cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP3); in sh_mmcif_get_response()
788 cmd->resp[1] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP2); in sh_mmcif_get_response()
789 cmd->resp[2] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP1); in sh_mmcif_get_response()
790 cmd->resp[3] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0); in sh_mmcif_get_response()
792 cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0); in sh_mmcif_get_response()
795 static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host, in sh_mmcif_get_cmd12response() argument
798 cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP_CMD12); in sh_mmcif_get_cmd12response()
801 static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host, in sh_mmcif_set_cmd() argument
804 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_set_cmd()
833 switch (host->bus_width) { in sh_mmcif_set_cmd()
847 switch (host->timing) { in sh_mmcif_set_cmd()
866 sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET, in sh_mmcif_set_cmd()
884 static int sh_mmcif_data_trans(struct sh_mmcif_host *host, in sh_mmcif_data_trans() argument
887 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_data_trans()
891 sh_mmcif_multi_read(host, mrq); in sh_mmcif_data_trans()
894 sh_mmcif_multi_write(host, mrq); in sh_mmcif_data_trans()
897 sh_mmcif_single_write(host, mrq); in sh_mmcif_data_trans()
901 sh_mmcif_single_read(host, mrq); in sh_mmcif_data_trans()
909 static void sh_mmcif_start_cmd(struct sh_mmcif_host *host, in sh_mmcif_start_cmd() argument
922 if (host->ccs_enable) in sh_mmcif_start_cmd()
926 sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0); in sh_mmcif_start_cmd()
927 sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, in sh_mmcif_start_cmd()
930 opc = sh_mmcif_set_cmd(host, mrq); in sh_mmcif_start_cmd()
932 if (host->ccs_enable) in sh_mmcif_start_cmd()
933 sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0); in sh_mmcif_start_cmd()
935 sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0 | INT_CCS); in sh_mmcif_start_cmd()
936 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask); in sh_mmcif_start_cmd()
938 sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg); in sh_mmcif_start_cmd()
940 spin_lock_irqsave(&host->lock, flags); in sh_mmcif_start_cmd()
941 sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc); in sh_mmcif_start_cmd()
943 host->wait_for = MMCIF_WAIT_FOR_CMD; in sh_mmcif_start_cmd()
944 schedule_delayed_work(&host->timeout_work, host->timeout); in sh_mmcif_start_cmd()
945 spin_unlock_irqrestore(&host->lock, flags); in sh_mmcif_start_cmd()
948 static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host, in sh_mmcif_stop_cmd() argument
951 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_stop_cmd()
955 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE); in sh_mmcif_stop_cmd()
958 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE); in sh_mmcif_stop_cmd()
962 mrq->stop->error = sh_mmcif_error_manage(host); in sh_mmcif_stop_cmd()
966 host->wait_for = MMCIF_WAIT_FOR_STOP; in sh_mmcif_stop_cmd()
971 struct sh_mmcif_host *host = mmc_priv(mmc); in sh_mmcif_request() local
972 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_request()
975 spin_lock_irqsave(&host->lock, flags); in sh_mmcif_request()
976 if (host->state != STATE_IDLE) { in sh_mmcif_request()
978 __func__, host->state); in sh_mmcif_request()
979 spin_unlock_irqrestore(&host->lock, flags); in sh_mmcif_request()
985 host->state = STATE_REQUEST; in sh_mmcif_request()
986 spin_unlock_irqrestore(&host->lock, flags); in sh_mmcif_request()
988 host->mrq = mrq; in sh_mmcif_request()
990 sh_mmcif_start_cmd(host, mrq); in sh_mmcif_request()
993 static void sh_mmcif_clk_setup(struct sh_mmcif_host *host) in sh_mmcif_clk_setup() argument
995 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_clk_setup()
997 if (host->mmc->f_max) { in sh_mmcif_clk_setup()
1000 f_max = host->mmc->f_max; in sh_mmcif_clk_setup()
1002 f_min = clk_round_rate(host->clk, f_min_old / 2); in sh_mmcif_clk_setup()
1011 host->clkdiv_map = 0x3ff; in sh_mmcif_clk_setup()
1013 host->mmc->f_max = f_max >> ffs(host->clkdiv_map); in sh_mmcif_clk_setup()
1014 host->mmc->f_min = f_min >> fls(host->clkdiv_map); in sh_mmcif_clk_setup()
1016 unsigned int clk = clk_get_rate(host->clk); in sh_mmcif_clk_setup()
1018 host->mmc->f_max = clk / 2; in sh_mmcif_clk_setup()
1019 host->mmc->f_min = clk / 512; in sh_mmcif_clk_setup()
1023 host->mmc->f_max, host->mmc->f_min); in sh_mmcif_clk_setup()
1028 struct sh_mmcif_host *host = mmc_priv(mmc); in sh_mmcif_set_ios() local
1029 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_set_ios()
1032 spin_lock_irqsave(&host->lock, flags); in sh_mmcif_set_ios()
1033 if (host->state != STATE_IDLE) { in sh_mmcif_set_ios()
1035 __func__, host->state); in sh_mmcif_set_ios()
1036 spin_unlock_irqrestore(&host->lock, flags); in sh_mmcif_set_ios()
1040 host->state = STATE_IOS; in sh_mmcif_set_ios()
1041 spin_unlock_irqrestore(&host->lock, flags); in sh_mmcif_set_ios()
1047 if (!host->power) { in sh_mmcif_set_ios()
1048 clk_prepare_enable(host->clk); in sh_mmcif_set_ios()
1050 sh_mmcif_sync_reset(host); in sh_mmcif_set_ios()
1051 sh_mmcif_request_dma(host); in sh_mmcif_set_ios()
1052 host->power = true; in sh_mmcif_set_ios()
1058 if (host->power) { in sh_mmcif_set_ios()
1059 sh_mmcif_clock_control(host, 0); in sh_mmcif_set_ios()
1060 sh_mmcif_release_dma(host); in sh_mmcif_set_ios()
1062 clk_disable_unprepare(host->clk); in sh_mmcif_set_ios()
1063 host->power = false; in sh_mmcif_set_ios()
1067 sh_mmcif_clock_control(host, ios->clock); in sh_mmcif_set_ios()
1071 host->timing = ios->timing; in sh_mmcif_set_ios()
1072 host->bus_width = ios->bus_width; in sh_mmcif_set_ios()
1073 host->state = STATE_IDLE; in sh_mmcif_set_ios()
1082 static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host) in sh_mmcif_end_cmd() argument
1084 struct mmc_command *cmd = host->mrq->cmd; in sh_mmcif_end_cmd()
1085 struct mmc_data *data = host->mrq->data; in sh_mmcif_end_cmd()
1086 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_end_cmd()
1089 if (host->sd_error) { in sh_mmcif_end_cmd()
1097 cmd->error = sh_mmcif_error_manage(host); in sh_mmcif_end_cmd()
1102 host->sd_error = false; in sh_mmcif_end_cmd()
1110 sh_mmcif_get_response(host, cmd); in sh_mmcif_end_cmd()
1119 init_completion(&host->dma_complete); in sh_mmcif_end_cmd()
1122 if (host->chan_rx) in sh_mmcif_end_cmd()
1123 sh_mmcif_start_dma_rx(host); in sh_mmcif_end_cmd()
1125 if (host->chan_tx) in sh_mmcif_end_cmd()
1126 sh_mmcif_start_dma_tx(host); in sh_mmcif_end_cmd()
1129 if (!host->dma_active) { in sh_mmcif_end_cmd()
1130 data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode); in sh_mmcif_end_cmd()
1135 time = wait_for_completion_interruptible_timeout(&host->dma_complete, in sh_mmcif_end_cmd()
1136 host->timeout); in sh_mmcif_end_cmd()
1139 dma_unmap_sg(host->chan_rx->device->dev, in sh_mmcif_end_cmd()
1143 dma_unmap_sg(host->chan_tx->device->dev, in sh_mmcif_end_cmd()
1147 if (host->sd_error) { in sh_mmcif_end_cmd()
1148 dev_err(host->mmc->parent, in sh_mmcif_end_cmd()
1151 data->error = sh_mmcif_error_manage(host); in sh_mmcif_end_cmd()
1153 dev_err(host->mmc->parent, "DMA timeout!\n"); in sh_mmcif_end_cmd()
1156 dev_err(host->mmc->parent, in sh_mmcif_end_cmd()
1160 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, in sh_mmcif_end_cmd()
1162 host->dma_active = false; in sh_mmcif_end_cmd()
1168 dmaengine_terminate_sync(host->chan_rx); in sh_mmcif_end_cmd()
1170 dmaengine_terminate_sync(host->chan_tx); in sh_mmcif_end_cmd()
1178 struct sh_mmcif_host *host = dev_id; in sh_mmcif_irqt() local
1180 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_irqt()
1185 spin_lock_irqsave(&host->lock, flags); in sh_mmcif_irqt()
1186 wait_work = host->wait_for; in sh_mmcif_irqt()
1187 spin_unlock_irqrestore(&host->lock, flags); in sh_mmcif_irqt()
1189 cancel_delayed_work_sync(&host->timeout_work); in sh_mmcif_irqt()
1191 mutex_lock(&host->thread_lock); in sh_mmcif_irqt()
1193 mrq = host->mrq; in sh_mmcif_irqt()
1196 host->state, host->wait_for); in sh_mmcif_irqt()
1197 mutex_unlock(&host->thread_lock); in sh_mmcif_irqt()
1208 mutex_unlock(&host->thread_lock); in sh_mmcif_irqt()
1212 wait = sh_mmcif_end_cmd(host); in sh_mmcif_irqt()
1216 wait = sh_mmcif_mread_block(host); in sh_mmcif_irqt()
1220 wait = sh_mmcif_read_block(host); in sh_mmcif_irqt()
1224 wait = sh_mmcif_mwrite_block(host); in sh_mmcif_irqt()
1228 wait = sh_mmcif_write_block(host); in sh_mmcif_irqt()
1231 if (host->sd_error) { in sh_mmcif_irqt()
1232 mrq->stop->error = sh_mmcif_error_manage(host); in sh_mmcif_irqt()
1236 sh_mmcif_get_cmd12response(host, mrq->stop); in sh_mmcif_irqt()
1241 if (host->sd_error) { in sh_mmcif_irqt()
1242 mrq->data->error = sh_mmcif_error_manage(host); in sh_mmcif_irqt()
1251 schedule_delayed_work(&host->timeout_work, host->timeout); in sh_mmcif_irqt()
1253 mutex_unlock(&host->thread_lock); in sh_mmcif_irqt()
1257 if (host->wait_for != MMCIF_WAIT_FOR_STOP) { in sh_mmcif_irqt()
1264 sh_mmcif_stop_cmd(host, mrq); in sh_mmcif_irqt()
1266 schedule_delayed_work(&host->timeout_work, host->timeout); in sh_mmcif_irqt()
1267 mutex_unlock(&host->thread_lock); in sh_mmcif_irqt()
1273 host->wait_for = MMCIF_WAIT_FOR_REQUEST; in sh_mmcif_irqt()
1274 host->state = STATE_IDLE; in sh_mmcif_irqt()
1275 host->mrq = NULL; in sh_mmcif_irqt()
1276 mmc_request_done(host->mmc, mrq); in sh_mmcif_irqt()
1278 mutex_unlock(&host->thread_lock); in sh_mmcif_irqt()
1285 struct sh_mmcif_host *host = dev_id; in sh_mmcif_intr() local
1286 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_intr()
1289 state = sh_mmcif_readl(host->addr, MMCIF_CE_INT); in sh_mmcif_intr()
1290 mask = sh_mmcif_readl(host->addr, MMCIF_CE_INT_MASK); in sh_mmcif_intr()
1291 if (host->ccs_enable) in sh_mmcif_intr()
1292 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~(state & mask)); in sh_mmcif_intr()
1294 sh_mmcif_writel(host->addr, MMCIF_CE_INT, INT_CCS | ~(state & mask)); in sh_mmcif_intr()
1295 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN); in sh_mmcif_intr()
1302 host->sd_error = true; in sh_mmcif_intr()
1306 if (!host->mrq) in sh_mmcif_intr()
1308 if (!host->dma_active) in sh_mmcif_intr()
1310 else if (host->sd_error) in sh_mmcif_intr()
1311 sh_mmcif_dma_complete(host); in sh_mmcif_intr()
1322 struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work); in sh_mmcif_timeout_work() local
1323 struct mmc_request *mrq = host->mrq; in sh_mmcif_timeout_work()
1324 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_timeout_work()
1327 if (host->dying) in sh_mmcif_timeout_work()
1331 spin_lock_irqsave(&host->lock, flags); in sh_mmcif_timeout_work()
1332 if (host->state == STATE_IDLE) { in sh_mmcif_timeout_work()
1333 spin_unlock_irqrestore(&host->lock, flags); in sh_mmcif_timeout_work()
1338 host->wait_for, mrq->cmd->opcode); in sh_mmcif_timeout_work()
1340 host->state = STATE_TIMEOUT; in sh_mmcif_timeout_work()
1341 spin_unlock_irqrestore(&host->lock, flags); in sh_mmcif_timeout_work()
1347 switch (host->wait_for) { in sh_mmcif_timeout_work()
1349 mrq->cmd->error = sh_mmcif_error_manage(host); in sh_mmcif_timeout_work()
1352 mrq->stop->error = sh_mmcif_error_manage(host); in sh_mmcif_timeout_work()
1360 mrq->data->error = sh_mmcif_error_manage(host); in sh_mmcif_timeout_work()
1366 host->state = STATE_IDLE; in sh_mmcif_timeout_work()
1367 host->wait_for = MMCIF_WAIT_FOR_REQUEST; in sh_mmcif_timeout_work()
1368 host->mrq = NULL; in sh_mmcif_timeout_work()
1369 mmc_request_done(host->mmc, mrq); in sh_mmcif_timeout_work()
1372 static void sh_mmcif_init_ocr(struct sh_mmcif_host *host) in sh_mmcif_init_ocr() argument
1374 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_init_ocr()
1376 struct mmc_host *mmc = host->mmc; in sh_mmcif_init_ocr()
1393 struct sh_mmcif_host *host; in sh_mmcif_probe() local
1416 host = mmc_priv(mmc); in sh_mmcif_probe()
1417 host->mmc = mmc; in sh_mmcif_probe()
1418 host->addr = reg; in sh_mmcif_probe()
1419 host->timeout = msecs_to_jiffies(10000); in sh_mmcif_probe()
1420 host->ccs_enable = true; in sh_mmcif_probe()
1421 host->clk_ctrl2_enable = false; in sh_mmcif_probe()
1423 host->pd = pdev; in sh_mmcif_probe()
1425 spin_lock_init(&host->lock); in sh_mmcif_probe()
1428 sh_mmcif_init_ocr(host); in sh_mmcif_probe()
1442 platform_set_drvdata(pdev, host); in sh_mmcif_probe()
1444 host->clk = devm_clk_get(dev, NULL); in sh_mmcif_probe()
1445 if (IS_ERR(host->clk)) { in sh_mmcif_probe()
1446 ret = PTR_ERR(host->clk); in sh_mmcif_probe()
1451 ret = clk_prepare_enable(host->clk); in sh_mmcif_probe()
1455 sh_mmcif_clk_setup(host); in sh_mmcif_probe()
1458 host->power = false; in sh_mmcif_probe()
1464 INIT_DELAYED_WORK(&host->timeout_work, sh_mmcif_timeout_work); in sh_mmcif_probe()
1466 sh_mmcif_sync_reset(host); in sh_mmcif_probe()
1467 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); in sh_mmcif_probe()
1471 sh_mmcif_irqt, 0, name, host); in sh_mmcif_probe()
1479 0, "sh_mmc:int", host); in sh_mmcif_probe()
1486 mutex_init(&host->thread_lock); in sh_mmcif_probe()
1495 sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0xffff, in sh_mmcif_probe()
1496 clk_get_rate(host->clk) / 1000000UL); in sh_mmcif_probe()
1499 clk_disable_unprepare(host->clk); in sh_mmcif_probe()
1503 clk_disable_unprepare(host->clk); in sh_mmcif_probe()
1513 struct sh_mmcif_host *host = platform_get_drvdata(pdev); in sh_mmcif_remove() local
1515 host->dying = true; in sh_mmcif_remove()
1516 clk_prepare_enable(host->clk); in sh_mmcif_remove()
1521 mmc_remove_host(host->mmc); in sh_mmcif_remove()
1522 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); in sh_mmcif_remove()
1529 cancel_delayed_work_sync(&host->timeout_work); in sh_mmcif_remove()
1531 clk_disable_unprepare(host->clk); in sh_mmcif_remove()
1532 mmc_free_host(host->mmc); in sh_mmcif_remove()
1540 struct sh_mmcif_host *host = dev_get_drvdata(dev); in sh_mmcif_suspend() local
1543 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); in sh_mmcif_suspend()