xref: /openbmc/linux/drivers/mmc/host/cqhci-core.c (revision c7cf5f0b)
106533002SEric Biggers // SPDX-License-Identifier: GPL-2.0-only
206533002SEric Biggers /* Copyright (c) 2015, The Linux Foundation. All rights reserved.
306533002SEric Biggers  */
406533002SEric Biggers 
506533002SEric Biggers #include <linux/delay.h>
606533002SEric Biggers #include <linux/highmem.h>
706533002SEric Biggers #include <linux/io.h>
806533002SEric Biggers #include <linux/iopoll.h>
906533002SEric Biggers #include <linux/module.h>
1006533002SEric Biggers #include <linux/dma-mapping.h>
1106533002SEric Biggers #include <linux/slab.h>
1206533002SEric Biggers #include <linux/scatterlist.h>
1306533002SEric Biggers #include <linux/platform_device.h>
1406533002SEric Biggers #include <linux/ktime.h>
1506533002SEric Biggers 
1606533002SEric Biggers #include <linux/mmc/mmc.h>
1706533002SEric Biggers #include <linux/mmc/host.h>
1806533002SEric Biggers #include <linux/mmc/card.h>
1906533002SEric Biggers 
2006533002SEric Biggers #include "cqhci.h"
211e80709bSEric Biggers #include "cqhci-crypto.h"
2206533002SEric Biggers 
2306533002SEric Biggers #define DCMD_SLOT 31
2406533002SEric Biggers #define NUM_SLOTS 32
2506533002SEric Biggers 
2606533002SEric Biggers struct cqhci_slot {
2706533002SEric Biggers 	struct mmc_request *mrq;
2806533002SEric Biggers 	unsigned int flags;
2906533002SEric Biggers #define CQHCI_EXTERNAL_TIMEOUT	BIT(0)
3006533002SEric Biggers #define CQHCI_COMPLETED		BIT(1)
3106533002SEric Biggers #define CQHCI_HOST_CRC		BIT(2)
3206533002SEric Biggers #define CQHCI_HOST_TIMEOUT	BIT(3)
3306533002SEric Biggers #define CQHCI_HOST_OTHER	BIT(4)
3406533002SEric Biggers };
3506533002SEric Biggers 
get_desc(struct cqhci_host * cq_host,u8 tag)3606533002SEric Biggers static inline u8 *get_desc(struct cqhci_host *cq_host, u8 tag)
3706533002SEric Biggers {
3806533002SEric Biggers 	return cq_host->desc_base + (tag * cq_host->slot_sz);
3906533002SEric Biggers }
4006533002SEric Biggers 
get_link_desc(struct cqhci_host * cq_host,u8 tag)4106533002SEric Biggers static inline u8 *get_link_desc(struct cqhci_host *cq_host, u8 tag)
4206533002SEric Biggers {
4306533002SEric Biggers 	u8 *desc = get_desc(cq_host, tag);
4406533002SEric Biggers 
4506533002SEric Biggers 	return desc + cq_host->task_desc_len;
4606533002SEric Biggers }
4706533002SEric Biggers 
get_trans_desc_offset(struct cqhci_host * cq_host,u8 tag)4883c49302SYue Hu static inline size_t get_trans_desc_offset(struct cqhci_host *cq_host, u8 tag)
4983c49302SYue Hu {
5083c49302SYue Hu 	return cq_host->trans_desc_len * cq_host->mmc->max_segs * tag;
5183c49302SYue Hu }
5283c49302SYue Hu 
get_trans_desc_dma(struct cqhci_host * cq_host,u8 tag)5306533002SEric Biggers static inline dma_addr_t get_trans_desc_dma(struct cqhci_host *cq_host, u8 tag)
5406533002SEric Biggers {
5583c49302SYue Hu 	size_t offset = get_trans_desc_offset(cq_host, tag);
5683c49302SYue Hu 
5783c49302SYue Hu 	return cq_host->trans_desc_dma_base + offset;
5806533002SEric Biggers }
5906533002SEric Biggers 
get_trans_desc(struct cqhci_host * cq_host,u8 tag)6006533002SEric Biggers static inline u8 *get_trans_desc(struct cqhci_host *cq_host, u8 tag)
6106533002SEric Biggers {
6283c49302SYue Hu 	size_t offset = get_trans_desc_offset(cq_host, tag);
6383c49302SYue Hu 
6483c49302SYue Hu 	return cq_host->trans_desc_base + offset;
6506533002SEric Biggers }
6606533002SEric Biggers 
setup_trans_desc(struct cqhci_host * cq_host,u8 tag)6706533002SEric Biggers static void setup_trans_desc(struct cqhci_host *cq_host, u8 tag)
6806533002SEric Biggers {
6906533002SEric Biggers 	u8 *link_temp;
7006533002SEric Biggers 	dma_addr_t trans_temp;
7106533002SEric Biggers 
7206533002SEric Biggers 	link_temp = get_link_desc(cq_host, tag);
7306533002SEric Biggers 	trans_temp = get_trans_desc_dma(cq_host, tag);
7406533002SEric Biggers 
7506533002SEric Biggers 	memset(link_temp, 0, cq_host->link_desc_len);
7606533002SEric Biggers 	if (cq_host->link_desc_len > 8)
7706533002SEric Biggers 		*(link_temp + 8) = 0;
7806533002SEric Biggers 
7906533002SEric Biggers 	if (tag == DCMD_SLOT && (cq_host->mmc->caps2 & MMC_CAP2_CQE_DCMD)) {
8006533002SEric Biggers 		*link_temp = CQHCI_VALID(0) | CQHCI_ACT(0) | CQHCI_END(1);
8106533002SEric Biggers 		return;
8206533002SEric Biggers 	}
8306533002SEric Biggers 
8406533002SEric Biggers 	*link_temp = CQHCI_VALID(1) | CQHCI_ACT(0x6) | CQHCI_END(0);
8506533002SEric Biggers 
8606533002SEric Biggers 	if (cq_host->dma64) {
8706533002SEric Biggers 		__le64 *data_addr = (__le64 __force *)(link_temp + 4);
8806533002SEric Biggers 
8906533002SEric Biggers 		data_addr[0] = cpu_to_le64(trans_temp);
9006533002SEric Biggers 	} else {
9106533002SEric Biggers 		__le32 *data_addr = (__le32 __force *)(link_temp + 4);
9206533002SEric Biggers 
9306533002SEric Biggers 		data_addr[0] = cpu_to_le32(trans_temp);
9406533002SEric Biggers 	}
9506533002SEric Biggers }
9606533002SEric Biggers 
cqhci_set_irqs(struct cqhci_host * cq_host,u32 set)9706533002SEric Biggers static void cqhci_set_irqs(struct cqhci_host *cq_host, u32 set)
9806533002SEric Biggers {
9906533002SEric Biggers 	cqhci_writel(cq_host, set, CQHCI_ISTE);
10006533002SEric Biggers 	cqhci_writel(cq_host, set, CQHCI_ISGE);
10106533002SEric Biggers }
10206533002SEric Biggers 
10306533002SEric Biggers #define DRV_NAME "cqhci"
10406533002SEric Biggers 
10506533002SEric Biggers #define CQHCI_DUMP(f, x...) \
10606533002SEric Biggers 	pr_err("%s: " DRV_NAME ": " f, mmc_hostname(mmc), ## x)
10706533002SEric Biggers 
cqhci_dumpregs(struct cqhci_host * cq_host)10806533002SEric Biggers static void cqhci_dumpregs(struct cqhci_host *cq_host)
10906533002SEric Biggers {
11006533002SEric Biggers 	struct mmc_host *mmc = cq_host->mmc;
11106533002SEric Biggers 
11206533002SEric Biggers 	CQHCI_DUMP("============ CQHCI REGISTER DUMP ===========\n");
11306533002SEric Biggers 
11406533002SEric Biggers 	CQHCI_DUMP("Caps:      0x%08x | Version:  0x%08x\n",
11506533002SEric Biggers 		   cqhci_readl(cq_host, CQHCI_CAP),
11606533002SEric Biggers 		   cqhci_readl(cq_host, CQHCI_VER));
11706533002SEric Biggers 	CQHCI_DUMP("Config:    0x%08x | Control:  0x%08x\n",
11806533002SEric Biggers 		   cqhci_readl(cq_host, CQHCI_CFG),
11906533002SEric Biggers 		   cqhci_readl(cq_host, CQHCI_CTL));
12006533002SEric Biggers 	CQHCI_DUMP("Int stat:  0x%08x | Int enab: 0x%08x\n",
12106533002SEric Biggers 		   cqhci_readl(cq_host, CQHCI_IS),
12206533002SEric Biggers 		   cqhci_readl(cq_host, CQHCI_ISTE));
12306533002SEric Biggers 	CQHCI_DUMP("Int sig:   0x%08x | Int Coal: 0x%08x\n",
12406533002SEric Biggers 		   cqhci_readl(cq_host, CQHCI_ISGE),
12506533002SEric Biggers 		   cqhci_readl(cq_host, CQHCI_IC));
12606533002SEric Biggers 	CQHCI_DUMP("TDL base:  0x%08x | TDL up32: 0x%08x\n",
12706533002SEric Biggers 		   cqhci_readl(cq_host, CQHCI_TDLBA),
12806533002SEric Biggers 		   cqhci_readl(cq_host, CQHCI_TDLBAU));
12906533002SEric Biggers 	CQHCI_DUMP("Doorbell:  0x%08x | TCN:      0x%08x\n",
13006533002SEric Biggers 		   cqhci_readl(cq_host, CQHCI_TDBR),
13106533002SEric Biggers 		   cqhci_readl(cq_host, CQHCI_TCN));
13206533002SEric Biggers 	CQHCI_DUMP("Dev queue: 0x%08x | Dev Pend: 0x%08x\n",
13306533002SEric Biggers 		   cqhci_readl(cq_host, CQHCI_DQS),
13406533002SEric Biggers 		   cqhci_readl(cq_host, CQHCI_DPT));
13506533002SEric Biggers 	CQHCI_DUMP("Task clr:  0x%08x | SSC1:     0x%08x\n",
13606533002SEric Biggers 		   cqhci_readl(cq_host, CQHCI_TCLR),
13706533002SEric Biggers 		   cqhci_readl(cq_host, CQHCI_SSC1));
13806533002SEric Biggers 	CQHCI_DUMP("SSC2:      0x%08x | DCMD rsp: 0x%08x\n",
13906533002SEric Biggers 		   cqhci_readl(cq_host, CQHCI_SSC2),
14006533002SEric Biggers 		   cqhci_readl(cq_host, CQHCI_CRDCT));
14106533002SEric Biggers 	CQHCI_DUMP("RED mask:  0x%08x | TERRI:    0x%08x\n",
14206533002SEric Biggers 		   cqhci_readl(cq_host, CQHCI_RMEM),
14306533002SEric Biggers 		   cqhci_readl(cq_host, CQHCI_TERRI));
14406533002SEric Biggers 	CQHCI_DUMP("Resp idx:  0x%08x | Resp arg: 0x%08x\n",
14506533002SEric Biggers 		   cqhci_readl(cq_host, CQHCI_CRI),
14606533002SEric Biggers 		   cqhci_readl(cq_host, CQHCI_CRA));
14706533002SEric Biggers 
14806533002SEric Biggers 	if (cq_host->ops->dumpregs)
14906533002SEric Biggers 		cq_host->ops->dumpregs(mmc);
15006533002SEric Biggers 	else
15106533002SEric Biggers 		CQHCI_DUMP(": ===========================================\n");
15206533002SEric Biggers }
15306533002SEric Biggers 
15406533002SEric Biggers /*
15547d23c95SYue Hu  * The allocated descriptor table for task, link & transfer descriptors
15606533002SEric Biggers  * looks like:
15706533002SEric Biggers  * |----------|
15806533002SEric Biggers  * |task desc |  |->|----------|
15906533002SEric Biggers  * |----------|  |  |trans desc|
16006533002SEric Biggers  * |link desc-|->|  |----------|
16106533002SEric Biggers  * |----------|          .
16206533002SEric Biggers  *      .                .
16306533002SEric Biggers  *  no. of slots      max-segs
16406533002SEric Biggers  *      .           |----------|
16506533002SEric Biggers  * |----------|
16606533002SEric Biggers  * The idea here is to create the [task+trans] table and mark & point the
16706533002SEric Biggers  * link desc to the transfer desc table on a per slot basis.
16806533002SEric Biggers  */
cqhci_host_alloc_tdl(struct cqhci_host * cq_host)16906533002SEric Biggers static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
17006533002SEric Biggers {
17106533002SEric Biggers 	int i = 0;
17206533002SEric Biggers 
17306533002SEric Biggers 	/* task descriptor can be 64/128 bit irrespective of arch */
17406533002SEric Biggers 	if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) {
17506533002SEric Biggers 		cqhci_writel(cq_host, cqhci_readl(cq_host, CQHCI_CFG) |
17606533002SEric Biggers 			       CQHCI_TASK_DESC_SZ, CQHCI_CFG);
17706533002SEric Biggers 		cq_host->task_desc_len = 16;
17806533002SEric Biggers 	} else {
17906533002SEric Biggers 		cq_host->task_desc_len = 8;
18006533002SEric Biggers 	}
18106533002SEric Biggers 
18206533002SEric Biggers 	/*
18306533002SEric Biggers 	 * 96 bits length of transfer desc instead of 128 bits which means
18406533002SEric Biggers 	 * ADMA would expect next valid descriptor at the 96th bit
18506533002SEric Biggers 	 * or 128th bit
18606533002SEric Biggers 	 */
18706533002SEric Biggers 	if (cq_host->dma64) {
18806533002SEric Biggers 		if (cq_host->quirks & CQHCI_QUIRK_SHORT_TXFR_DESC_SZ)
18906533002SEric Biggers 			cq_host->trans_desc_len = 12;
19006533002SEric Biggers 		else
19106533002SEric Biggers 			cq_host->trans_desc_len = 16;
19206533002SEric Biggers 		cq_host->link_desc_len = 16;
19306533002SEric Biggers 	} else {
19406533002SEric Biggers 		cq_host->trans_desc_len = 8;
19506533002SEric Biggers 		cq_host->link_desc_len = 8;
19606533002SEric Biggers 	}
19706533002SEric Biggers 
19806533002SEric Biggers 	/* total size of a slot: 1 task & 1 transfer (link) */
19906533002SEric Biggers 	cq_host->slot_sz = cq_host->task_desc_len + cq_host->link_desc_len;
20006533002SEric Biggers 
20106533002SEric Biggers 	cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
20206533002SEric Biggers 
20383c49302SYue Hu 	cq_host->data_size = get_trans_desc_offset(cq_host, cq_host->mmc->cqe_qdepth);
20406533002SEric Biggers 
20506533002SEric Biggers 	pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
20606533002SEric Biggers 		 mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size,
20706533002SEric Biggers 		 cq_host->slot_sz);
20806533002SEric Biggers 
20906533002SEric Biggers 	/*
21006533002SEric Biggers 	 * allocate a dma-mapped chunk of memory for the descriptors
21106533002SEric Biggers 	 * allocate a dma-mapped chunk of memory for link descriptors
21206533002SEric Biggers 	 * setup each link-desc memory offset per slot-number to
21306533002SEric Biggers 	 * the descriptor table.
21406533002SEric Biggers 	 */
21506533002SEric Biggers 	cq_host->desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
21606533002SEric Biggers 						 cq_host->desc_size,
21706533002SEric Biggers 						 &cq_host->desc_dma_base,
21806533002SEric Biggers 						 GFP_KERNEL);
21906533002SEric Biggers 	if (!cq_host->desc_base)
22006533002SEric Biggers 		return -ENOMEM;
22106533002SEric Biggers 
22206533002SEric Biggers 	cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
22306533002SEric Biggers 					      cq_host->data_size,
22406533002SEric Biggers 					      &cq_host->trans_desc_dma_base,
22506533002SEric Biggers 					      GFP_KERNEL);
22606533002SEric Biggers 	if (!cq_host->trans_desc_base) {
22706533002SEric Biggers 		dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size,
22806533002SEric Biggers 				   cq_host->desc_base,
22906533002SEric Biggers 				   cq_host->desc_dma_base);
23006533002SEric Biggers 		cq_host->desc_base = NULL;
23106533002SEric Biggers 		cq_host->desc_dma_base = 0;
23206533002SEric Biggers 		return -ENOMEM;
23306533002SEric Biggers 	}
23406533002SEric Biggers 
23506533002SEric Biggers 	pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
23606533002SEric Biggers 		 mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base,
23706533002SEric Biggers 		(unsigned long long)cq_host->desc_dma_base,
23806533002SEric Biggers 		(unsigned long long)cq_host->trans_desc_dma_base);
23906533002SEric Biggers 
24006533002SEric Biggers 	for (; i < (cq_host->num_slots); i++)
24106533002SEric Biggers 		setup_trans_desc(cq_host, i);
24206533002SEric Biggers 
24306533002SEric Biggers 	return 0;
24406533002SEric Biggers }
24506533002SEric Biggers 
__cqhci_enable(struct cqhci_host * cq_host)24606533002SEric Biggers static void __cqhci_enable(struct cqhci_host *cq_host)
24706533002SEric Biggers {
24806533002SEric Biggers 	struct mmc_host *mmc = cq_host->mmc;
24906533002SEric Biggers 	u32 cqcfg;
25006533002SEric Biggers 
25106533002SEric Biggers 	cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
25206533002SEric Biggers 
25306533002SEric Biggers 	/* Configuration must not be changed while enabled */
25406533002SEric Biggers 	if (cqcfg & CQHCI_ENABLE) {
25506533002SEric Biggers 		cqcfg &= ~CQHCI_ENABLE;
25606533002SEric Biggers 		cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
25706533002SEric Biggers 	}
25806533002SEric Biggers 
25906533002SEric Biggers 	cqcfg &= ~(CQHCI_DCMD | CQHCI_TASK_DESC_SZ);
26006533002SEric Biggers 
26106533002SEric Biggers 	if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
26206533002SEric Biggers 		cqcfg |= CQHCI_DCMD;
26306533002SEric Biggers 
26406533002SEric Biggers 	if (cq_host->caps & CQHCI_TASK_DESC_SZ_128)
26506533002SEric Biggers 		cqcfg |= CQHCI_TASK_DESC_SZ;
26606533002SEric Biggers 
2671e80709bSEric Biggers 	if (mmc->caps2 & MMC_CAP2_CRYPTO)
2681e80709bSEric Biggers 		cqcfg |= CQHCI_CRYPTO_GENERAL_ENABLE;
2691e80709bSEric Biggers 
27006533002SEric Biggers 	cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
27106533002SEric Biggers 
27206533002SEric Biggers 	cqhci_writel(cq_host, lower_32_bits(cq_host->desc_dma_base),
27306533002SEric Biggers 		     CQHCI_TDLBA);
27406533002SEric Biggers 	cqhci_writel(cq_host, upper_32_bits(cq_host->desc_dma_base),
27506533002SEric Biggers 		     CQHCI_TDLBAU);
27606533002SEric Biggers 
27706533002SEric Biggers 	cqhci_writel(cq_host, cq_host->rca, CQHCI_SSC2);
27806533002SEric Biggers 
27906533002SEric Biggers 	cqhci_set_irqs(cq_host, 0);
28006533002SEric Biggers 
28106533002SEric Biggers 	cqcfg |= CQHCI_ENABLE;
28206533002SEric Biggers 
28306533002SEric Biggers 	cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
28406533002SEric Biggers 
28592b18252SWenbin Mei 	if (cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT)
28692b18252SWenbin Mei 		cqhci_writel(cq_host, 0, CQHCI_CTL);
28792b18252SWenbin Mei 
28806533002SEric Biggers 	mmc->cqe_on = true;
28906533002SEric Biggers 
29006533002SEric Biggers 	if (cq_host->ops->enable)
29106533002SEric Biggers 		cq_host->ops->enable(mmc);
29206533002SEric Biggers 
29306533002SEric Biggers 	/* Ensure all writes are done before interrupts are enabled */
29406533002SEric Biggers 	wmb();
29506533002SEric Biggers 
29606533002SEric Biggers 	cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
29706533002SEric Biggers 
29806533002SEric Biggers 	cq_host->activated = true;
29906533002SEric Biggers }
30006533002SEric Biggers 
__cqhci_disable(struct cqhci_host * cq_host)30106533002SEric Biggers static void __cqhci_disable(struct cqhci_host *cq_host)
30206533002SEric Biggers {
30306533002SEric Biggers 	u32 cqcfg;
30406533002SEric Biggers 
30506533002SEric Biggers 	cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
30606533002SEric Biggers 	cqcfg &= ~CQHCI_ENABLE;
30706533002SEric Biggers 	cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
30806533002SEric Biggers 
30906533002SEric Biggers 	cq_host->mmc->cqe_on = false;
31006533002SEric Biggers 
31106533002SEric Biggers 	cq_host->activated = false;
31206533002SEric Biggers }
31306533002SEric Biggers 
cqhci_deactivate(struct mmc_host * mmc)31406533002SEric Biggers int cqhci_deactivate(struct mmc_host *mmc)
31506533002SEric Biggers {
31606533002SEric Biggers 	struct cqhci_host *cq_host = mmc->cqe_private;
31706533002SEric Biggers 
31806533002SEric Biggers 	if (cq_host->enabled && cq_host->activated)
31906533002SEric Biggers 		__cqhci_disable(cq_host);
32006533002SEric Biggers 
32106533002SEric Biggers 	return 0;
32206533002SEric Biggers }
32306533002SEric Biggers EXPORT_SYMBOL(cqhci_deactivate);
32406533002SEric Biggers 
cqhci_resume(struct mmc_host * mmc)32506533002SEric Biggers int cqhci_resume(struct mmc_host *mmc)
32606533002SEric Biggers {
32706533002SEric Biggers 	/* Re-enable is done upon first request */
32806533002SEric Biggers 	return 0;
32906533002SEric Biggers }
33006533002SEric Biggers EXPORT_SYMBOL(cqhci_resume);
33106533002SEric Biggers 
cqhci_enable(struct mmc_host * mmc,struct mmc_card * card)33206533002SEric Biggers static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
33306533002SEric Biggers {
33406533002SEric Biggers 	struct cqhci_host *cq_host = mmc->cqe_private;
33506533002SEric Biggers 	int err;
33606533002SEric Biggers 
33706533002SEric Biggers 	if (!card->ext_csd.cmdq_en)
33806533002SEric Biggers 		return -EINVAL;
33906533002SEric Biggers 
34006533002SEric Biggers 	if (cq_host->enabled)
34106533002SEric Biggers 		return 0;
34206533002SEric Biggers 
34306533002SEric Biggers 	cq_host->rca = card->rca;
34406533002SEric Biggers 
34506533002SEric Biggers 	err = cqhci_host_alloc_tdl(cq_host);
34606533002SEric Biggers 	if (err) {
34706533002SEric Biggers 		pr_err("%s: Failed to enable CQE, error %d\n",
34806533002SEric Biggers 		       mmc_hostname(mmc), err);
34906533002SEric Biggers 		return err;
35006533002SEric Biggers 	}
35106533002SEric Biggers 
35206533002SEric Biggers 	__cqhci_enable(cq_host);
35306533002SEric Biggers 
35406533002SEric Biggers 	cq_host->enabled = true;
35506533002SEric Biggers 
35606533002SEric Biggers #ifdef DEBUG
35706533002SEric Biggers 	cqhci_dumpregs(cq_host);
35806533002SEric Biggers #endif
35906533002SEric Biggers 	return 0;
36006533002SEric Biggers }
36106533002SEric Biggers 
36206533002SEric Biggers /* CQHCI is idle and should halt immediately, so set a small timeout */
36306533002SEric Biggers #define CQHCI_OFF_TIMEOUT 100
36406533002SEric Biggers 
cqhci_read_ctl(struct cqhci_host * cq_host)36506533002SEric Biggers static u32 cqhci_read_ctl(struct cqhci_host *cq_host)
36606533002SEric Biggers {
36706533002SEric Biggers 	return cqhci_readl(cq_host, CQHCI_CTL);
36806533002SEric Biggers }
36906533002SEric Biggers 
cqhci_off(struct mmc_host * mmc)37006533002SEric Biggers static void cqhci_off(struct mmc_host *mmc)
37106533002SEric Biggers {
37206533002SEric Biggers 	struct cqhci_host *cq_host = mmc->cqe_private;
37306533002SEric Biggers 	u32 reg;
37406533002SEric Biggers 	int err;
37506533002SEric Biggers 
37606533002SEric Biggers 	if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt)
37706533002SEric Biggers 		return;
37806533002SEric Biggers 
37906533002SEric Biggers 	if (cq_host->ops->disable)
38006533002SEric Biggers 		cq_host->ops->disable(mmc, false);
38106533002SEric Biggers 
38206533002SEric Biggers 	cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
38306533002SEric Biggers 
38406533002SEric Biggers 	err = readx_poll_timeout(cqhci_read_ctl, cq_host, reg,
38506533002SEric Biggers 				 reg & CQHCI_HALT, 0, CQHCI_OFF_TIMEOUT);
38606533002SEric Biggers 	if (err < 0)
38706533002SEric Biggers 		pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc));
38806533002SEric Biggers 	else
38906533002SEric Biggers 		pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc));
39006533002SEric Biggers 
39106533002SEric Biggers 	if (cq_host->ops->post_disable)
39206533002SEric Biggers 		cq_host->ops->post_disable(mmc);
39306533002SEric Biggers 
39406533002SEric Biggers 	mmc->cqe_on = false;
39506533002SEric Biggers }
39606533002SEric Biggers 
cqhci_disable(struct mmc_host * mmc)39706533002SEric Biggers static void cqhci_disable(struct mmc_host *mmc)
39806533002SEric Biggers {
39906533002SEric Biggers 	struct cqhci_host *cq_host = mmc->cqe_private;
40006533002SEric Biggers 
40106533002SEric Biggers 	if (!cq_host->enabled)
40206533002SEric Biggers 		return;
40306533002SEric Biggers 
40406533002SEric Biggers 	cqhci_off(mmc);
40506533002SEric Biggers 
40606533002SEric Biggers 	__cqhci_disable(cq_host);
40706533002SEric Biggers 
40806533002SEric Biggers 	dmam_free_coherent(mmc_dev(mmc), cq_host->data_size,
40906533002SEric Biggers 			   cq_host->trans_desc_base,
41006533002SEric Biggers 			   cq_host->trans_desc_dma_base);
41106533002SEric Biggers 
41206533002SEric Biggers 	dmam_free_coherent(mmc_dev(mmc), cq_host->desc_size,
41306533002SEric Biggers 			   cq_host->desc_base,
41406533002SEric Biggers 			   cq_host->desc_dma_base);
41506533002SEric Biggers 
41606533002SEric Biggers 	cq_host->trans_desc_base = NULL;
41706533002SEric Biggers 	cq_host->desc_base = NULL;
41806533002SEric Biggers 
41906533002SEric Biggers 	cq_host->enabled = false;
42006533002SEric Biggers }
42106533002SEric Biggers 
cqhci_prep_task_desc(struct mmc_request * mrq,struct cqhci_host * cq_host,int tag)42206533002SEric Biggers static void cqhci_prep_task_desc(struct mmc_request *mrq,
423ee49d032SEric Biggers 				 struct cqhci_host *cq_host, int tag)
42406533002SEric Biggers {
425ee49d032SEric Biggers 	__le64 *task_desc = (__le64 __force *)get_desc(cq_host, tag);
42606533002SEric Biggers 	u32 req_flags = mrq->data->flags;
427ee49d032SEric Biggers 	u64 desc0;
42806533002SEric Biggers 
429ee49d032SEric Biggers 	desc0 = CQHCI_VALID(1) |
43006533002SEric Biggers 		CQHCI_END(1) |
431ee49d032SEric Biggers 		CQHCI_INT(1) |
43206533002SEric Biggers 		CQHCI_ACT(0x5) |
43306533002SEric Biggers 		CQHCI_FORCED_PROG(!!(req_flags & MMC_DATA_FORCED_PRG)) |
43406533002SEric Biggers 		CQHCI_DATA_TAG(!!(req_flags & MMC_DATA_DAT_TAG)) |
43506533002SEric Biggers 		CQHCI_DATA_DIR(!!(req_flags & MMC_DATA_READ)) |
43606533002SEric Biggers 		CQHCI_PRIORITY(!!(req_flags & MMC_DATA_PRIO)) |
43706533002SEric Biggers 		CQHCI_QBAR(!!(req_flags & MMC_DATA_QBR)) |
43806533002SEric Biggers 		CQHCI_REL_WRITE(!!(req_flags & MMC_DATA_REL_WR)) |
43906533002SEric Biggers 		CQHCI_BLK_COUNT(mrq->data->blocks) |
44006533002SEric Biggers 		CQHCI_BLK_ADDR((u64)mrq->data->blk_addr);
44106533002SEric Biggers 
442ee49d032SEric Biggers 	task_desc[0] = cpu_to_le64(desc0);
443ee49d032SEric Biggers 
444ee49d032SEric Biggers 	if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) {
4451e80709bSEric Biggers 		u64 desc1 = cqhci_crypto_prep_task_desc(mrq);
446ee49d032SEric Biggers 
447ee49d032SEric Biggers 		task_desc[1] = cpu_to_le64(desc1);
448ee49d032SEric Biggers 
449ee49d032SEric Biggers 		pr_debug("%s: cqhci: tag %d task descriptor 0x%016llx%016llx\n",
450ee49d032SEric Biggers 			 mmc_hostname(mrq->host), mrq->tag, desc1, desc0);
451ee49d032SEric Biggers 	} else {
45206533002SEric Biggers 		pr_debug("%s: cqhci: tag %d task descriptor 0x%016llx\n",
453ee49d032SEric Biggers 			 mmc_hostname(mrq->host), mrq->tag, desc0);
454ee49d032SEric Biggers 	}
45506533002SEric Biggers }
45606533002SEric Biggers 
cqhci_dma_map(struct mmc_host * host,struct mmc_request * mrq)45706533002SEric Biggers static int cqhci_dma_map(struct mmc_host *host, struct mmc_request *mrq)
45806533002SEric Biggers {
45906533002SEric Biggers 	int sg_count;
46006533002SEric Biggers 	struct mmc_data *data = mrq->data;
46106533002SEric Biggers 
46206533002SEric Biggers 	if (!data)
46306533002SEric Biggers 		return -EINVAL;
46406533002SEric Biggers 
46506533002SEric Biggers 	sg_count = dma_map_sg(mmc_dev(host), data->sg,
46606533002SEric Biggers 			      data->sg_len,
46706533002SEric Biggers 			      (data->flags & MMC_DATA_WRITE) ?
46806533002SEric Biggers 			      DMA_TO_DEVICE : DMA_FROM_DEVICE);
46906533002SEric Biggers 	if (!sg_count) {
47006533002SEric Biggers 		pr_err("%s: sg-len: %d\n", __func__, data->sg_len);
47106533002SEric Biggers 		return -ENOMEM;
47206533002SEric Biggers 	}
47306533002SEric Biggers 
47406533002SEric Biggers 	return sg_count;
47506533002SEric Biggers }
47606533002SEric Biggers 
cqhci_set_tran_desc(u8 * desc,dma_addr_t addr,int len,bool end,bool dma64)47706533002SEric Biggers static void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end,
47806533002SEric Biggers 				bool dma64)
47906533002SEric Biggers {
48006533002SEric Biggers 	__le32 *attr = (__le32 __force *)desc;
48106533002SEric Biggers 
48206533002SEric Biggers 	*attr = (CQHCI_VALID(1) |
48306533002SEric Biggers 		 CQHCI_END(end ? 1 : 0) |
48406533002SEric Biggers 		 CQHCI_INT(0) |
48506533002SEric Biggers 		 CQHCI_ACT(0x4) |
48606533002SEric Biggers 		 CQHCI_DAT_LENGTH(len));
48706533002SEric Biggers 
48806533002SEric Biggers 	if (dma64) {
48906533002SEric Biggers 		__le64 *dataddr = (__le64 __force *)(desc + 4);
49006533002SEric Biggers 
49106533002SEric Biggers 		dataddr[0] = cpu_to_le64(addr);
49206533002SEric Biggers 	} else {
49306533002SEric Biggers 		__le32 *dataddr = (__le32 __force *)(desc + 4);
49406533002SEric Biggers 
49506533002SEric Biggers 		dataddr[0] = cpu_to_le32(addr);
49606533002SEric Biggers 	}
49706533002SEric Biggers }
49806533002SEric Biggers 
cqhci_prep_tran_desc(struct mmc_request * mrq,struct cqhci_host * cq_host,int tag)49906533002SEric Biggers static int cqhci_prep_tran_desc(struct mmc_request *mrq,
50006533002SEric Biggers 			       struct cqhci_host *cq_host, int tag)
50106533002SEric Biggers {
50206533002SEric Biggers 	struct mmc_data *data = mrq->data;
50306533002SEric Biggers 	int i, sg_count, len;
50406533002SEric Biggers 	bool end = false;
50506533002SEric Biggers 	bool dma64 = cq_host->dma64;
50606533002SEric Biggers 	dma_addr_t addr;
50706533002SEric Biggers 	u8 *desc;
50806533002SEric Biggers 	struct scatterlist *sg;
50906533002SEric Biggers 
51006533002SEric Biggers 	sg_count = cqhci_dma_map(mrq->host, mrq);
51106533002SEric Biggers 	if (sg_count < 0) {
51206533002SEric Biggers 		pr_err("%s: %s: unable to map sg lists, %d\n",
51306533002SEric Biggers 				mmc_hostname(mrq->host), __func__, sg_count);
51406533002SEric Biggers 		return sg_count;
51506533002SEric Biggers 	}
51606533002SEric Biggers 
51706533002SEric Biggers 	desc = get_trans_desc(cq_host, tag);
51806533002SEric Biggers 
51906533002SEric Biggers 	for_each_sg(data->sg, sg, sg_count, i) {
52006533002SEric Biggers 		addr = sg_dma_address(sg);
52106533002SEric Biggers 		len = sg_dma_len(sg);
52206533002SEric Biggers 
52306533002SEric Biggers 		if ((i+1) == sg_count)
52406533002SEric Biggers 			end = true;
52506533002SEric Biggers 		cqhci_set_tran_desc(desc, addr, len, end, dma64);
52606533002SEric Biggers 		desc += cq_host->trans_desc_len;
52706533002SEric Biggers 	}
52806533002SEric Biggers 
52906533002SEric Biggers 	return 0;
53006533002SEric Biggers }
53106533002SEric Biggers 
cqhci_prep_dcmd_desc(struct mmc_host * mmc,struct mmc_request * mrq)53206533002SEric Biggers static void cqhci_prep_dcmd_desc(struct mmc_host *mmc,
53306533002SEric Biggers 				   struct mmc_request *mrq)
53406533002SEric Biggers {
53506533002SEric Biggers 	u64 *task_desc = NULL;
53606533002SEric Biggers 	u64 data = 0;
53706533002SEric Biggers 	u8 resp_type;
53806533002SEric Biggers 	u8 *desc;
53906533002SEric Biggers 	__le64 *dataddr;
54006533002SEric Biggers 	struct cqhci_host *cq_host = mmc->cqe_private;
54106533002SEric Biggers 	u8 timing;
54206533002SEric Biggers 
54306533002SEric Biggers 	if (!(mrq->cmd->flags & MMC_RSP_PRESENT)) {
54406533002SEric Biggers 		resp_type = 0x0;
54506533002SEric Biggers 		timing = 0x1;
54606533002SEric Biggers 	} else {
54706533002SEric Biggers 		if (mrq->cmd->flags & MMC_RSP_R1B) {
54806533002SEric Biggers 			resp_type = 0x3;
54906533002SEric Biggers 			timing = 0x0;
55006533002SEric Biggers 		} else {
55106533002SEric Biggers 			resp_type = 0x2;
55206533002SEric Biggers 			timing = 0x1;
55306533002SEric Biggers 		}
55406533002SEric Biggers 	}
55506533002SEric Biggers 
55606533002SEric Biggers 	task_desc = (__le64 __force *)get_desc(cq_host, cq_host->dcmd_slot);
55706533002SEric Biggers 	memset(task_desc, 0, cq_host->task_desc_len);
55806533002SEric Biggers 	data |= (CQHCI_VALID(1) |
55906533002SEric Biggers 		 CQHCI_END(1) |
56006533002SEric Biggers 		 CQHCI_INT(1) |
56106533002SEric Biggers 		 CQHCI_QBAR(1) |
56206533002SEric Biggers 		 CQHCI_ACT(0x5) |
56306533002SEric Biggers 		 CQHCI_CMD_INDEX(mrq->cmd->opcode) |
56406533002SEric Biggers 		 CQHCI_CMD_TIMING(timing) | CQHCI_RESP_TYPE(resp_type));
56506533002SEric Biggers 	if (cq_host->ops->update_dcmd_desc)
56606533002SEric Biggers 		cq_host->ops->update_dcmd_desc(mmc, mrq, &data);
56706533002SEric Biggers 	*task_desc |= data;
56806533002SEric Biggers 	desc = (u8 *)task_desc;
56906533002SEric Biggers 	pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n",
57006533002SEric Biggers 		 mmc_hostname(mmc), mrq->cmd->opcode, timing, resp_type);
57106533002SEric Biggers 	dataddr = (__le64 __force *)(desc + 4);
57206533002SEric Biggers 	dataddr[0] = cpu_to_le64((u64)mrq->cmd->arg);
57306533002SEric Biggers 
57406533002SEric Biggers }
57506533002SEric Biggers 
cqhci_post_req(struct mmc_host * host,struct mmc_request * mrq)57606533002SEric Biggers static void cqhci_post_req(struct mmc_host *host, struct mmc_request *mrq)
57706533002SEric Biggers {
57806533002SEric Biggers 	struct mmc_data *data = mrq->data;
57906533002SEric Biggers 
58006533002SEric Biggers 	if (data) {
58106533002SEric Biggers 		dma_unmap_sg(mmc_dev(host), data->sg, data->sg_len,
58206533002SEric Biggers 			     (data->flags & MMC_DATA_READ) ?
58306533002SEric Biggers 			     DMA_FROM_DEVICE : DMA_TO_DEVICE);
58406533002SEric Biggers 	}
58506533002SEric Biggers }
58606533002SEric Biggers 
cqhci_tag(struct mmc_request * mrq)58706533002SEric Biggers static inline int cqhci_tag(struct mmc_request *mrq)
58806533002SEric Biggers {
58906533002SEric Biggers 	return mrq->cmd ? DCMD_SLOT : mrq->tag;
59006533002SEric Biggers }
59106533002SEric Biggers 
cqhci_request(struct mmc_host * mmc,struct mmc_request * mrq)59206533002SEric Biggers static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
59306533002SEric Biggers {
59406533002SEric Biggers 	int err = 0;
59506533002SEric Biggers 	int tag = cqhci_tag(mrq);
59606533002SEric Biggers 	struct cqhci_host *cq_host = mmc->cqe_private;
59706533002SEric Biggers 	unsigned long flags;
59806533002SEric Biggers 
59906533002SEric Biggers 	if (!cq_host->enabled) {
60006533002SEric Biggers 		pr_err("%s: cqhci: not enabled\n", mmc_hostname(mmc));
60106533002SEric Biggers 		return -EINVAL;
60206533002SEric Biggers 	}
60306533002SEric Biggers 
60406533002SEric Biggers 	/* First request after resume has to re-enable */
60506533002SEric Biggers 	if (!cq_host->activated)
60606533002SEric Biggers 		__cqhci_enable(cq_host);
60706533002SEric Biggers 
60806533002SEric Biggers 	if (!mmc->cqe_on) {
60906533002SEric Biggers 		if (cq_host->ops->pre_enable)
61006533002SEric Biggers 			cq_host->ops->pre_enable(mmc);
61106533002SEric Biggers 
61206533002SEric Biggers 		cqhci_writel(cq_host, 0, CQHCI_CTL);
61306533002SEric Biggers 		mmc->cqe_on = true;
61406533002SEric Biggers 		pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc));
61506533002SEric Biggers 		if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) {
61606533002SEric Biggers 			pr_err("%s: cqhci: CQE failed to exit halt state\n",
61706533002SEric Biggers 			       mmc_hostname(mmc));
61806533002SEric Biggers 		}
61906533002SEric Biggers 		if (cq_host->ops->enable)
62006533002SEric Biggers 			cq_host->ops->enable(mmc);
62106533002SEric Biggers 	}
62206533002SEric Biggers 
62306533002SEric Biggers 	if (mrq->data) {
624ee49d032SEric Biggers 		cqhci_prep_task_desc(mrq, cq_host, tag);
625ee49d032SEric Biggers 
62606533002SEric Biggers 		err = cqhci_prep_tran_desc(mrq, cq_host, tag);
62706533002SEric Biggers 		if (err) {
62806533002SEric Biggers 			pr_err("%s: cqhci: failed to setup tx desc: %d\n",
62906533002SEric Biggers 			       mmc_hostname(mmc), err);
63006533002SEric Biggers 			return err;
63106533002SEric Biggers 		}
63206533002SEric Biggers 	} else {
63306533002SEric Biggers 		cqhci_prep_dcmd_desc(mmc, mrq);
63406533002SEric Biggers 	}
63506533002SEric Biggers 
63606533002SEric Biggers 	spin_lock_irqsave(&cq_host->lock, flags);
63706533002SEric Biggers 
63806533002SEric Biggers 	if (cq_host->recovery_halt) {
63906533002SEric Biggers 		err = -EBUSY;
64006533002SEric Biggers 		goto out_unlock;
64106533002SEric Biggers 	}
64206533002SEric Biggers 
64306533002SEric Biggers 	cq_host->slot[tag].mrq = mrq;
64406533002SEric Biggers 	cq_host->slot[tag].flags = 0;
64506533002SEric Biggers 
64606533002SEric Biggers 	cq_host->qcnt += 1;
64706533002SEric Biggers 	/* Make sure descriptors are ready before ringing the doorbell */
64806533002SEric Biggers 	wmb();
64906533002SEric Biggers 	cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR);
65006533002SEric Biggers 	if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag)))
65106533002SEric Biggers 		pr_debug("%s: cqhci: doorbell not set for tag %d\n",
65206533002SEric Biggers 			 mmc_hostname(mmc), tag);
65306533002SEric Biggers out_unlock:
65406533002SEric Biggers 	spin_unlock_irqrestore(&cq_host->lock, flags);
65506533002SEric Biggers 
65606533002SEric Biggers 	if (err)
65706533002SEric Biggers 		cqhci_post_req(mmc, mrq);
65806533002SEric Biggers 
65906533002SEric Biggers 	return err;
66006533002SEric Biggers }
66106533002SEric Biggers 
cqhci_recovery_needed(struct mmc_host * mmc,struct mmc_request * mrq,bool notify)66206533002SEric Biggers static void cqhci_recovery_needed(struct mmc_host *mmc, struct mmc_request *mrq,
66306533002SEric Biggers 				  bool notify)
66406533002SEric Biggers {
66506533002SEric Biggers 	struct cqhci_host *cq_host = mmc->cqe_private;
66606533002SEric Biggers 
66706533002SEric Biggers 	if (!cq_host->recovery_halt) {
66806533002SEric Biggers 		cq_host->recovery_halt = true;
66906533002SEric Biggers 		pr_debug("%s: cqhci: recovery needed\n", mmc_hostname(mmc));
67006533002SEric Biggers 		wake_up(&cq_host->wait_queue);
67106533002SEric Biggers 		if (notify && mrq->recovery_notifier)
67206533002SEric Biggers 			mrq->recovery_notifier(mrq);
67306533002SEric Biggers 	}
67406533002SEric Biggers }
67506533002SEric Biggers 
cqhci_error_flags(int error1,int error2)67606533002SEric Biggers static unsigned int cqhci_error_flags(int error1, int error2)
67706533002SEric Biggers {
67806533002SEric Biggers 	int error = error1 ? error1 : error2;
67906533002SEric Biggers 
68006533002SEric Biggers 	switch (error) {
68106533002SEric Biggers 	case -EILSEQ:
68206533002SEric Biggers 		return CQHCI_HOST_CRC;
68306533002SEric Biggers 	case -ETIMEDOUT:
68406533002SEric Biggers 		return CQHCI_HOST_TIMEOUT;
68506533002SEric Biggers 	default:
68606533002SEric Biggers 		return CQHCI_HOST_OTHER;
68706533002SEric Biggers 	}
68806533002SEric Biggers }
68906533002SEric Biggers 
cqhci_error_irq(struct mmc_host * mmc,u32 status,int cmd_error,int data_error)69006533002SEric Biggers static void cqhci_error_irq(struct mmc_host *mmc, u32 status, int cmd_error,
69106533002SEric Biggers 			    int data_error)
69206533002SEric Biggers {
69306533002SEric Biggers 	struct cqhci_host *cq_host = mmc->cqe_private;
69406533002SEric Biggers 	struct cqhci_slot *slot;
69506533002SEric Biggers 	u32 terri;
6961e80709bSEric Biggers 	u32 tdpe;
69706533002SEric Biggers 	int tag;
69806533002SEric Biggers 
69906533002SEric Biggers 	spin_lock(&cq_host->lock);
70006533002SEric Biggers 
70106533002SEric Biggers 	terri = cqhci_readl(cq_host, CQHCI_TERRI);
70206533002SEric Biggers 
70306533002SEric Biggers 	pr_debug("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
70406533002SEric Biggers 		 mmc_hostname(mmc), status, cmd_error, data_error, terri);
70506533002SEric Biggers 
70606533002SEric Biggers 	/* Forget about errors when recovery has already been triggered */
70706533002SEric Biggers 	if (cq_host->recovery_halt)
70806533002SEric Biggers 		goto out_unlock;
70906533002SEric Biggers 
71006533002SEric Biggers 	if (!cq_host->qcnt) {
71106533002SEric Biggers 		WARN_ONCE(1, "%s: cqhci: error when idle. IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
71206533002SEric Biggers 			  mmc_hostname(mmc), status, cmd_error, data_error,
71306533002SEric Biggers 			  terri);
71406533002SEric Biggers 		goto out_unlock;
71506533002SEric Biggers 	}
71606533002SEric Biggers 
71706533002SEric Biggers 	if (CQHCI_TERRI_C_VALID(terri)) {
71806533002SEric Biggers 		tag = CQHCI_TERRI_C_TASK(terri);
71906533002SEric Biggers 		slot = &cq_host->slot[tag];
72006533002SEric Biggers 		if (slot->mrq) {
72106533002SEric Biggers 			slot->flags = cqhci_error_flags(cmd_error, data_error);
72206533002SEric Biggers 			cqhci_recovery_needed(mmc, slot->mrq, true);
72306533002SEric Biggers 		}
72406533002SEric Biggers 	}
72506533002SEric Biggers 
72606533002SEric Biggers 	if (CQHCI_TERRI_D_VALID(terri)) {
72706533002SEric Biggers 		tag = CQHCI_TERRI_D_TASK(terri);
72806533002SEric Biggers 		slot = &cq_host->slot[tag];
72906533002SEric Biggers 		if (slot->mrq) {
73006533002SEric Biggers 			slot->flags = cqhci_error_flags(data_error, cmd_error);
73106533002SEric Biggers 			cqhci_recovery_needed(mmc, slot->mrq, true);
73206533002SEric Biggers 		}
73306533002SEric Biggers 	}
73406533002SEric Biggers 
7351e80709bSEric Biggers 	/*
7361e80709bSEric Biggers 	 * Handle ICCE ("Invalid Crypto Configuration Error").  This should
7371e80709bSEric Biggers 	 * never happen, since the block layer ensures that all crypto-enabled
7381e80709bSEric Biggers 	 * I/O requests have a valid keyslot before they reach the driver.
7391e80709bSEric Biggers 	 *
7401e80709bSEric Biggers 	 * Note that GCE ("General Crypto Error") is different; it already got
7411e80709bSEric Biggers 	 * handled above by checking TERRI.
7421e80709bSEric Biggers 	 */
7431e80709bSEric Biggers 	if (status & CQHCI_IS_ICCE) {
7441e80709bSEric Biggers 		tdpe = cqhci_readl(cq_host, CQHCI_TDPE);
7451e80709bSEric Biggers 		WARN_ONCE(1,
7461e80709bSEric Biggers 			  "%s: cqhci: invalid crypto configuration error. IRQ status: 0x%08x TDPE: 0x%08x\n",
7471e80709bSEric Biggers 			  mmc_hostname(mmc), status, tdpe);
7481e80709bSEric Biggers 		while (tdpe != 0) {
7491e80709bSEric Biggers 			tag = __ffs(tdpe);
7501e80709bSEric Biggers 			tdpe &= ~(1 << tag);
7511e80709bSEric Biggers 			slot = &cq_host->slot[tag];
7521e80709bSEric Biggers 			if (!slot->mrq)
7531e80709bSEric Biggers 				continue;
7541e80709bSEric Biggers 			slot->flags = cqhci_error_flags(data_error, cmd_error);
7551e80709bSEric Biggers 			cqhci_recovery_needed(mmc, slot->mrq, true);
7561e80709bSEric Biggers 		}
7571e80709bSEric Biggers 	}
7581e80709bSEric Biggers 
75906533002SEric Biggers 	if (!cq_host->recovery_halt) {
76006533002SEric Biggers 		/*
76106533002SEric Biggers 		 * The only way to guarantee forward progress is to mark at
76206533002SEric Biggers 		 * least one task in error, so if none is indicated, pick one.
76306533002SEric Biggers 		 */
76406533002SEric Biggers 		for (tag = 0; tag < NUM_SLOTS; tag++) {
76506533002SEric Biggers 			slot = &cq_host->slot[tag];
76606533002SEric Biggers 			if (!slot->mrq)
76706533002SEric Biggers 				continue;
76806533002SEric Biggers 			slot->flags = cqhci_error_flags(data_error, cmd_error);
76906533002SEric Biggers 			cqhci_recovery_needed(mmc, slot->mrq, true);
77006533002SEric Biggers 			break;
77106533002SEric Biggers 		}
77206533002SEric Biggers 	}
77306533002SEric Biggers 
77406533002SEric Biggers out_unlock:
77506533002SEric Biggers 	spin_unlock(&cq_host->lock);
77606533002SEric Biggers }
77706533002SEric Biggers 
cqhci_finish_mrq(struct mmc_host * mmc,unsigned int tag)77806533002SEric Biggers static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag)
77906533002SEric Biggers {
78006533002SEric Biggers 	struct cqhci_host *cq_host = mmc->cqe_private;
78106533002SEric Biggers 	struct cqhci_slot *slot = &cq_host->slot[tag];
78206533002SEric Biggers 	struct mmc_request *mrq = slot->mrq;
78306533002SEric Biggers 	struct mmc_data *data;
78406533002SEric Biggers 
78506533002SEric Biggers 	if (!mrq) {
78606533002SEric Biggers 		WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n",
78706533002SEric Biggers 			  mmc_hostname(mmc), tag);
78806533002SEric Biggers 		return;
78906533002SEric Biggers 	}
79006533002SEric Biggers 
79106533002SEric Biggers 	/* No completions allowed during recovery */
79206533002SEric Biggers 	if (cq_host->recovery_halt) {
79306533002SEric Biggers 		slot->flags |= CQHCI_COMPLETED;
79406533002SEric Biggers 		return;
79506533002SEric Biggers 	}
79606533002SEric Biggers 
79706533002SEric Biggers 	slot->mrq = NULL;
79806533002SEric Biggers 
79906533002SEric Biggers 	cq_host->qcnt -= 1;
80006533002SEric Biggers 
80106533002SEric Biggers 	data = mrq->data;
80206533002SEric Biggers 	if (data) {
80306533002SEric Biggers 		if (data->error)
80406533002SEric Biggers 			data->bytes_xfered = 0;
80506533002SEric Biggers 		else
80606533002SEric Biggers 			data->bytes_xfered = data->blksz * data->blocks;
80706533002SEric Biggers 	}
80806533002SEric Biggers 
80906533002SEric Biggers 	mmc_cqe_request_done(mmc, mrq);
81006533002SEric Biggers }
81106533002SEric Biggers 
cqhci_irq(struct mmc_host * mmc,u32 intmask,int cmd_error,int data_error)81206533002SEric Biggers irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
81306533002SEric Biggers 		      int data_error)
81406533002SEric Biggers {
81506533002SEric Biggers 	u32 status;
81606533002SEric Biggers 	unsigned long tag = 0, comp_status;
81706533002SEric Biggers 	struct cqhci_host *cq_host = mmc->cqe_private;
81806533002SEric Biggers 
81906533002SEric Biggers 	status = cqhci_readl(cq_host, CQHCI_IS);
82006533002SEric Biggers 	cqhci_writel(cq_host, status, CQHCI_IS);
82106533002SEric Biggers 
82206533002SEric Biggers 	pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status);
82306533002SEric Biggers 
8241e80709bSEric Biggers 	if ((status & (CQHCI_IS_RED | CQHCI_IS_GCE | CQHCI_IS_ICCE)) ||
825e5f7a3c6SShaik Sajida Bhanu 	    cmd_error || data_error) {
826e5f7a3c6SShaik Sajida Bhanu 		if (status & CQHCI_IS_RED)
827e5f7a3c6SShaik Sajida Bhanu 			mmc_debugfs_err_stats_inc(mmc, MMC_ERR_CMDQ_RED);
828e5f7a3c6SShaik Sajida Bhanu 		if (status & CQHCI_IS_GCE)
829e5f7a3c6SShaik Sajida Bhanu 			mmc_debugfs_err_stats_inc(mmc, MMC_ERR_CMDQ_GCE);
830e5f7a3c6SShaik Sajida Bhanu 		if (status & CQHCI_IS_ICCE)
831e5f7a3c6SShaik Sajida Bhanu 			mmc_debugfs_err_stats_inc(mmc, MMC_ERR_CMDQ_ICCE);
83206533002SEric Biggers 		cqhci_error_irq(mmc, status, cmd_error, data_error);
833e5f7a3c6SShaik Sajida Bhanu 	}
83406533002SEric Biggers 
83506533002SEric Biggers 	if (status & CQHCI_IS_TCC) {
83606533002SEric Biggers 		/* read TCN and complete the request */
83706533002SEric Biggers 		comp_status = cqhci_readl(cq_host, CQHCI_TCN);
83806533002SEric Biggers 		cqhci_writel(cq_host, comp_status, CQHCI_TCN);
83906533002SEric Biggers 		pr_debug("%s: cqhci: TCN: 0x%08lx\n",
84006533002SEric Biggers 			 mmc_hostname(mmc), comp_status);
84106533002SEric Biggers 
84206533002SEric Biggers 		spin_lock(&cq_host->lock);
84306533002SEric Biggers 
84406533002SEric Biggers 		for_each_set_bit(tag, &comp_status, cq_host->num_slots) {
84506533002SEric Biggers 			/* complete the corresponding mrq */
84606533002SEric Biggers 			pr_debug("%s: cqhci: completing tag %lu\n",
84706533002SEric Biggers 				 mmc_hostname(mmc), tag);
84806533002SEric Biggers 			cqhci_finish_mrq(mmc, tag);
84906533002SEric Biggers 		}
85006533002SEric Biggers 
85106533002SEric Biggers 		if (cq_host->waiting_for_idle && !cq_host->qcnt) {
85206533002SEric Biggers 			cq_host->waiting_for_idle = false;
85306533002SEric Biggers 			wake_up(&cq_host->wait_queue);
85406533002SEric Biggers 		}
85506533002SEric Biggers 
85606533002SEric Biggers 		spin_unlock(&cq_host->lock);
85706533002SEric Biggers 	}
85806533002SEric Biggers 
85906533002SEric Biggers 	if (status & CQHCI_IS_TCL)
86006533002SEric Biggers 		wake_up(&cq_host->wait_queue);
86106533002SEric Biggers 
86206533002SEric Biggers 	if (status & CQHCI_IS_HAC)
86306533002SEric Biggers 		wake_up(&cq_host->wait_queue);
86406533002SEric Biggers 
86506533002SEric Biggers 	return IRQ_HANDLED;
86606533002SEric Biggers }
86706533002SEric Biggers EXPORT_SYMBOL(cqhci_irq);
86806533002SEric Biggers 
cqhci_is_idle(struct cqhci_host * cq_host,int * ret)86906533002SEric Biggers static bool cqhci_is_idle(struct cqhci_host *cq_host, int *ret)
87006533002SEric Biggers {
87106533002SEric Biggers 	unsigned long flags;
87206533002SEric Biggers 	bool is_idle;
87306533002SEric Biggers 
87406533002SEric Biggers 	spin_lock_irqsave(&cq_host->lock, flags);
87506533002SEric Biggers 	is_idle = !cq_host->qcnt || cq_host->recovery_halt;
87606533002SEric Biggers 	*ret = cq_host->recovery_halt ? -EBUSY : 0;
87706533002SEric Biggers 	cq_host->waiting_for_idle = !is_idle;
87806533002SEric Biggers 	spin_unlock_irqrestore(&cq_host->lock, flags);
87906533002SEric Biggers 
88006533002SEric Biggers 	return is_idle;
88106533002SEric Biggers }
88206533002SEric Biggers 
cqhci_wait_for_idle(struct mmc_host * mmc)88306533002SEric Biggers static int cqhci_wait_for_idle(struct mmc_host *mmc)
88406533002SEric Biggers {
88506533002SEric Biggers 	struct cqhci_host *cq_host = mmc->cqe_private;
88606533002SEric Biggers 	int ret;
88706533002SEric Biggers 
88806533002SEric Biggers 	wait_event(cq_host->wait_queue, cqhci_is_idle(cq_host, &ret));
88906533002SEric Biggers 
89006533002SEric Biggers 	return ret;
89106533002SEric Biggers }
89206533002SEric Biggers 
cqhci_timeout(struct mmc_host * mmc,struct mmc_request * mrq,bool * recovery_needed)89306533002SEric Biggers static bool cqhci_timeout(struct mmc_host *mmc, struct mmc_request *mrq,
89406533002SEric Biggers 			  bool *recovery_needed)
89506533002SEric Biggers {
89606533002SEric Biggers 	struct cqhci_host *cq_host = mmc->cqe_private;
89706533002SEric Biggers 	int tag = cqhci_tag(mrq);
89806533002SEric Biggers 	struct cqhci_slot *slot = &cq_host->slot[tag];
89906533002SEric Biggers 	unsigned long flags;
90006533002SEric Biggers 	bool timed_out;
90106533002SEric Biggers 
90206533002SEric Biggers 	spin_lock_irqsave(&cq_host->lock, flags);
90306533002SEric Biggers 	timed_out = slot->mrq == mrq;
90406533002SEric Biggers 	if (timed_out) {
90506533002SEric Biggers 		slot->flags |= CQHCI_EXTERNAL_TIMEOUT;
90606533002SEric Biggers 		cqhci_recovery_needed(mmc, mrq, false);
90706533002SEric Biggers 		*recovery_needed = cq_host->recovery_halt;
90806533002SEric Biggers 	}
90906533002SEric Biggers 	spin_unlock_irqrestore(&cq_host->lock, flags);
91006533002SEric Biggers 
91106533002SEric Biggers 	if (timed_out) {
912d47f163cSBean Huo 		pr_err("%s: cqhci: timeout for tag %d, qcnt %d\n",
913d47f163cSBean Huo 		       mmc_hostname(mmc), tag, cq_host->qcnt);
91406533002SEric Biggers 		cqhci_dumpregs(cq_host);
91506533002SEric Biggers 	}
91606533002SEric Biggers 
91706533002SEric Biggers 	return timed_out;
91806533002SEric Biggers }
91906533002SEric Biggers 
cqhci_tasks_cleared(struct cqhci_host * cq_host)92006533002SEric Biggers static bool cqhci_tasks_cleared(struct cqhci_host *cq_host)
92106533002SEric Biggers {
92206533002SEric Biggers 	return !(cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_CLEAR_ALL_TASKS);
92306533002SEric Biggers }
92406533002SEric Biggers 
cqhci_clear_all_tasks(struct mmc_host * mmc,unsigned int timeout)92506533002SEric Biggers static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
92606533002SEric Biggers {
92706533002SEric Biggers 	struct cqhci_host *cq_host = mmc->cqe_private;
92806533002SEric Biggers 	bool ret;
92906533002SEric Biggers 	u32 ctl;
93006533002SEric Biggers 
93106533002SEric Biggers 	cqhci_set_irqs(cq_host, CQHCI_IS_TCL);
93206533002SEric Biggers 
93306533002SEric Biggers 	ctl = cqhci_readl(cq_host, CQHCI_CTL);
93406533002SEric Biggers 	ctl |= CQHCI_CLEAR_ALL_TASKS;
93506533002SEric Biggers 	cqhci_writel(cq_host, ctl, CQHCI_CTL);
93606533002SEric Biggers 
93706533002SEric Biggers 	wait_event_timeout(cq_host->wait_queue, cqhci_tasks_cleared(cq_host),
93806533002SEric Biggers 			   msecs_to_jiffies(timeout) + 1);
93906533002SEric Biggers 
94006533002SEric Biggers 	cqhci_set_irqs(cq_host, 0);
94106533002SEric Biggers 
94206533002SEric Biggers 	ret = cqhci_tasks_cleared(cq_host);
94306533002SEric Biggers 
94406533002SEric Biggers 	if (!ret)
94525b14a7eSAdrian Hunter 		pr_warn("%s: cqhci: Failed to clear tasks\n",
94606533002SEric Biggers 			mmc_hostname(mmc));
94706533002SEric Biggers 
94806533002SEric Biggers 	return ret;
94906533002SEric Biggers }
95006533002SEric Biggers 
cqhci_halted(struct cqhci_host * cq_host)95106533002SEric Biggers static bool cqhci_halted(struct cqhci_host *cq_host)
95206533002SEric Biggers {
95306533002SEric Biggers 	return cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT;
95406533002SEric Biggers }
95506533002SEric Biggers 
cqhci_halt(struct mmc_host * mmc,unsigned int timeout)95606533002SEric Biggers static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
95706533002SEric Biggers {
95806533002SEric Biggers 	struct cqhci_host *cq_host = mmc->cqe_private;
95906533002SEric Biggers 	bool ret;
96006533002SEric Biggers 	u32 ctl;
96106533002SEric Biggers 
96206533002SEric Biggers 	if (cqhci_halted(cq_host))
96306533002SEric Biggers 		return true;
96406533002SEric Biggers 
96506533002SEric Biggers 	cqhci_set_irqs(cq_host, CQHCI_IS_HAC);
96606533002SEric Biggers 
96706533002SEric Biggers 	ctl = cqhci_readl(cq_host, CQHCI_CTL);
96806533002SEric Biggers 	ctl |= CQHCI_HALT;
96906533002SEric Biggers 	cqhci_writel(cq_host, ctl, CQHCI_CTL);
97006533002SEric Biggers 
97106533002SEric Biggers 	wait_event_timeout(cq_host->wait_queue, cqhci_halted(cq_host),
97206533002SEric Biggers 			   msecs_to_jiffies(timeout) + 1);
97306533002SEric Biggers 
97406533002SEric Biggers 	cqhci_set_irqs(cq_host, 0);
97506533002SEric Biggers 
97606533002SEric Biggers 	ret = cqhci_halted(cq_host);
97706533002SEric Biggers 
97806533002SEric Biggers 	if (!ret)
97925b14a7eSAdrian Hunter 		pr_warn("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
98006533002SEric Biggers 
98106533002SEric Biggers 	return ret;
98206533002SEric Biggers }
98306533002SEric Biggers 
98406533002SEric Biggers /*
98506533002SEric Biggers  * After halting we expect to be able to use the command line. We interpret the
98606533002SEric Biggers  * failure to halt to mean the data lines might still be in use (and the upper
987a2e06909SAdrian Hunter  * layers will need to send a STOP command), however failing to halt complicates
988a2e06909SAdrian Hunter  * the recovery, so set a timeout that would reasonably allow I/O to complete.
98906533002SEric Biggers  */
990a2e06909SAdrian Hunter #define CQHCI_START_HALT_TIMEOUT	500
99106533002SEric Biggers 
cqhci_recovery_start(struct mmc_host * mmc)99206533002SEric Biggers static void cqhci_recovery_start(struct mmc_host *mmc)
99306533002SEric Biggers {
99406533002SEric Biggers 	struct cqhci_host *cq_host = mmc->cqe_private;
99506533002SEric Biggers 
99606533002SEric Biggers 	pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
99706533002SEric Biggers 
99806533002SEric Biggers 	WARN_ON(!cq_host->recovery_halt);
99906533002SEric Biggers 
100006533002SEric Biggers 	cqhci_halt(mmc, CQHCI_START_HALT_TIMEOUT);
100106533002SEric Biggers 
100206533002SEric Biggers 	if (cq_host->ops->disable)
100306533002SEric Biggers 		cq_host->ops->disable(mmc, true);
100406533002SEric Biggers 
100506533002SEric Biggers 	mmc->cqe_on = false;
100606533002SEric Biggers }
100706533002SEric Biggers 
cqhci_error_from_flags(unsigned int flags)100806533002SEric Biggers static int cqhci_error_from_flags(unsigned int flags)
100906533002SEric Biggers {
101006533002SEric Biggers 	if (!flags)
101106533002SEric Biggers 		return 0;
101206533002SEric Biggers 
101306533002SEric Biggers 	/* CRC errors might indicate re-tuning so prefer to report that */
101406533002SEric Biggers 	if (flags & CQHCI_HOST_CRC)
101506533002SEric Biggers 		return -EILSEQ;
101606533002SEric Biggers 
101706533002SEric Biggers 	if (flags & (CQHCI_EXTERNAL_TIMEOUT | CQHCI_HOST_TIMEOUT))
101806533002SEric Biggers 		return -ETIMEDOUT;
101906533002SEric Biggers 
102006533002SEric Biggers 	return -EIO;
102106533002SEric Biggers }
102206533002SEric Biggers 
cqhci_recover_mrq(struct cqhci_host * cq_host,unsigned int tag)102306533002SEric Biggers static void cqhci_recover_mrq(struct cqhci_host *cq_host, unsigned int tag)
102406533002SEric Biggers {
102506533002SEric Biggers 	struct cqhci_slot *slot = &cq_host->slot[tag];
102606533002SEric Biggers 	struct mmc_request *mrq = slot->mrq;
102706533002SEric Biggers 	struct mmc_data *data;
102806533002SEric Biggers 
102906533002SEric Biggers 	if (!mrq)
103006533002SEric Biggers 		return;
103106533002SEric Biggers 
103206533002SEric Biggers 	slot->mrq = NULL;
103306533002SEric Biggers 
103406533002SEric Biggers 	cq_host->qcnt -= 1;
103506533002SEric Biggers 
103606533002SEric Biggers 	data = mrq->data;
103706533002SEric Biggers 	if (data) {
103806533002SEric Biggers 		data->bytes_xfered = 0;
103906533002SEric Biggers 		data->error = cqhci_error_from_flags(slot->flags);
104006533002SEric Biggers 	} else {
104106533002SEric Biggers 		mrq->cmd->error = cqhci_error_from_flags(slot->flags);
104206533002SEric Biggers 	}
104306533002SEric Biggers 
104406533002SEric Biggers 	mmc_cqe_request_done(cq_host->mmc, mrq);
104506533002SEric Biggers }
104606533002SEric Biggers 
cqhci_recover_mrqs(struct cqhci_host * cq_host)104706533002SEric Biggers static void cqhci_recover_mrqs(struct cqhci_host *cq_host)
104806533002SEric Biggers {
104906533002SEric Biggers 	int i;
105006533002SEric Biggers 
105106533002SEric Biggers 	for (i = 0; i < cq_host->num_slots; i++)
105206533002SEric Biggers 		cqhci_recover_mrq(cq_host, i);
105306533002SEric Biggers }
105406533002SEric Biggers 
105506533002SEric Biggers /*
105606533002SEric Biggers  * By now the command and data lines should be unused so there is no reason for
105706533002SEric Biggers  * CQHCI to take a long time to halt, but if it doesn't halt there could be
105806533002SEric Biggers  * problems clearing tasks, so be generous.
105906533002SEric Biggers  */
106006533002SEric Biggers #define CQHCI_FINISH_HALT_TIMEOUT	20
106106533002SEric Biggers 
106206533002SEric Biggers /* CQHCI could be expected to clear it's internal state pretty quickly */
106306533002SEric Biggers #define CQHCI_CLEAR_TIMEOUT		20
106406533002SEric Biggers 
cqhci_recovery_finish(struct mmc_host * mmc)106506533002SEric Biggers static void cqhci_recovery_finish(struct mmc_host *mmc)
106606533002SEric Biggers {
106706533002SEric Biggers 	struct cqhci_host *cq_host = mmc->cqe_private;
106806533002SEric Biggers 	unsigned long flags;
106906533002SEric Biggers 	u32 cqcfg;
107006533002SEric Biggers 	bool ok;
107106533002SEric Biggers 
107206533002SEric Biggers 	pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
107306533002SEric Biggers 
107406533002SEric Biggers 	WARN_ON(!cq_host->recovery_halt);
107506533002SEric Biggers 
107606533002SEric Biggers 	ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
107706533002SEric Biggers 
107806533002SEric Biggers 	/*
107906533002SEric Biggers 	 * The specification contradicts itself, by saying that tasks cannot be
108006533002SEric Biggers 	 * cleared if CQHCI does not halt, but if CQHCI does not halt, it should
108106533002SEric Biggers 	 * be disabled/re-enabled, but not to disable before clearing tasks.
108206533002SEric Biggers 	 * Have a go anyway.
108306533002SEric Biggers 	 */
1084*c7cf5f0bSAdrian Hunter 	if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
1085*c7cf5f0bSAdrian Hunter 		ok = false;
1086*c7cf5f0bSAdrian Hunter 
1087*c7cf5f0bSAdrian Hunter 	/* Disable to make sure tasks really are cleared */
108806533002SEric Biggers 	cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
108906533002SEric Biggers 	cqcfg &= ~CQHCI_ENABLE;
109006533002SEric Biggers 	cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
1091*c7cf5f0bSAdrian Hunter 
1092*c7cf5f0bSAdrian Hunter 	cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
109306533002SEric Biggers 	cqcfg |= CQHCI_ENABLE;
109406533002SEric Biggers 	cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
1095*c7cf5f0bSAdrian Hunter 
1096*c7cf5f0bSAdrian Hunter 	cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
1097*c7cf5f0bSAdrian Hunter 
1098*c7cf5f0bSAdrian Hunter 	if (!ok)
1099*c7cf5f0bSAdrian Hunter 		cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT);
110006533002SEric Biggers 
110106533002SEric Biggers 	cqhci_recover_mrqs(cq_host);
110206533002SEric Biggers 
110306533002SEric Biggers 	WARN_ON(cq_host->qcnt);
110406533002SEric Biggers 
110506533002SEric Biggers 	spin_lock_irqsave(&cq_host->lock, flags);
110606533002SEric Biggers 	cq_host->qcnt = 0;
110706533002SEric Biggers 	cq_host->recovery_halt = false;
110806533002SEric Biggers 	mmc->cqe_on = false;
110906533002SEric Biggers 	spin_unlock_irqrestore(&cq_host->lock, flags);
111006533002SEric Biggers 
111106533002SEric Biggers 	/* Ensure all writes are done before interrupts are re-enabled */
111206533002SEric Biggers 	wmb();
111306533002SEric Biggers 
111406533002SEric Biggers 	cqhci_writel(cq_host, CQHCI_IS_HAC | CQHCI_IS_TCL, CQHCI_IS);
111506533002SEric Biggers 
111606533002SEric Biggers 	cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
111706533002SEric Biggers 
111806533002SEric Biggers 	pr_debug("%s: cqhci: recovery done\n", mmc_hostname(mmc));
111906533002SEric Biggers }
112006533002SEric Biggers 
112106533002SEric Biggers static const struct mmc_cqe_ops cqhci_cqe_ops = {
112206533002SEric Biggers 	.cqe_enable = cqhci_enable,
112306533002SEric Biggers 	.cqe_disable = cqhci_disable,
112406533002SEric Biggers 	.cqe_request = cqhci_request,
112506533002SEric Biggers 	.cqe_post_req = cqhci_post_req,
112606533002SEric Biggers 	.cqe_off = cqhci_off,
112706533002SEric Biggers 	.cqe_wait_for_idle = cqhci_wait_for_idle,
112806533002SEric Biggers 	.cqe_timeout = cqhci_timeout,
112906533002SEric Biggers 	.cqe_recovery_start = cqhci_recovery_start,
113006533002SEric Biggers 	.cqe_recovery_finish = cqhci_recovery_finish,
113106533002SEric Biggers };
113206533002SEric Biggers 
cqhci_pltfm_init(struct platform_device * pdev)113306533002SEric Biggers struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev)
113406533002SEric Biggers {
113506533002SEric Biggers 	struct cqhci_host *cq_host;
113606533002SEric Biggers 	struct resource *cqhci_memres = NULL;
113706533002SEric Biggers 
113806533002SEric Biggers 	/* check and setup CMDQ interface */
113906533002SEric Biggers 	cqhci_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM,
114006533002SEric Biggers 						   "cqhci");
114106533002SEric Biggers 	if (!cqhci_memres) {
114206533002SEric Biggers 		dev_dbg(&pdev->dev, "CMDQ not supported\n");
114306533002SEric Biggers 		return ERR_PTR(-EINVAL);
114406533002SEric Biggers 	}
114506533002SEric Biggers 
114606533002SEric Biggers 	cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL);
114706533002SEric Biggers 	if (!cq_host)
114806533002SEric Biggers 		return ERR_PTR(-ENOMEM);
114906533002SEric Biggers 	cq_host->mmio = devm_ioremap(&pdev->dev,
115006533002SEric Biggers 				     cqhci_memres->start,
115106533002SEric Biggers 				     resource_size(cqhci_memres));
115206533002SEric Biggers 	if (!cq_host->mmio) {
115306533002SEric Biggers 		dev_err(&pdev->dev, "failed to remap cqhci regs\n");
115406533002SEric Biggers 		return ERR_PTR(-EBUSY);
115506533002SEric Biggers 	}
115606533002SEric Biggers 	dev_dbg(&pdev->dev, "CMDQ ioremap: done\n");
115706533002SEric Biggers 
115806533002SEric Biggers 	return cq_host;
115906533002SEric Biggers }
116006533002SEric Biggers EXPORT_SYMBOL(cqhci_pltfm_init);
116106533002SEric Biggers 
cqhci_ver_major(struct cqhci_host * cq_host)116206533002SEric Biggers static unsigned int cqhci_ver_major(struct cqhci_host *cq_host)
116306533002SEric Biggers {
116406533002SEric Biggers 	return CQHCI_VER_MAJOR(cqhci_readl(cq_host, CQHCI_VER));
116506533002SEric Biggers }
116606533002SEric Biggers 
cqhci_ver_minor(struct cqhci_host * cq_host)116706533002SEric Biggers static unsigned int cqhci_ver_minor(struct cqhci_host *cq_host)
116806533002SEric Biggers {
116906533002SEric Biggers 	u32 ver = cqhci_readl(cq_host, CQHCI_VER);
117006533002SEric Biggers 
117106533002SEric Biggers 	return CQHCI_VER_MINOR1(ver) * 10 + CQHCI_VER_MINOR2(ver);
117206533002SEric Biggers }
117306533002SEric Biggers 
cqhci_init(struct cqhci_host * cq_host,struct mmc_host * mmc,bool dma64)117406533002SEric Biggers int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc,
117506533002SEric Biggers 	      bool dma64)
117606533002SEric Biggers {
117706533002SEric Biggers 	int err;
117806533002SEric Biggers 
117906533002SEric Biggers 	cq_host->dma64 = dma64;
118006533002SEric Biggers 	cq_host->mmc = mmc;
118106533002SEric Biggers 	cq_host->mmc->cqe_private = cq_host;
118206533002SEric Biggers 
118306533002SEric Biggers 	cq_host->num_slots = NUM_SLOTS;
118406533002SEric Biggers 	cq_host->dcmd_slot = DCMD_SLOT;
118506533002SEric Biggers 
118606533002SEric Biggers 	mmc->cqe_ops = &cqhci_cqe_ops;
118706533002SEric Biggers 
118806533002SEric Biggers 	mmc->cqe_qdepth = NUM_SLOTS;
118906533002SEric Biggers 	if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
119006533002SEric Biggers 		mmc->cqe_qdepth -= 1;
119106533002SEric Biggers 
119206533002SEric Biggers 	cq_host->slot = devm_kcalloc(mmc_dev(mmc), cq_host->num_slots,
119306533002SEric Biggers 				     sizeof(*cq_host->slot), GFP_KERNEL);
119406533002SEric Biggers 	if (!cq_host->slot) {
119506533002SEric Biggers 		err = -ENOMEM;
119606533002SEric Biggers 		goto out_err;
119706533002SEric Biggers 	}
119806533002SEric Biggers 
11991e80709bSEric Biggers 	err = cqhci_crypto_init(cq_host);
12001e80709bSEric Biggers 	if (err) {
12011e80709bSEric Biggers 		pr_err("%s: CQHCI crypto initialization failed\n",
12021e80709bSEric Biggers 		       mmc_hostname(mmc));
12031e80709bSEric Biggers 		goto out_err;
12041e80709bSEric Biggers 	}
12051e80709bSEric Biggers 
120606533002SEric Biggers 	spin_lock_init(&cq_host->lock);
120706533002SEric Biggers 
120806533002SEric Biggers 	init_completion(&cq_host->halt_comp);
120906533002SEric Biggers 	init_waitqueue_head(&cq_host->wait_queue);
121006533002SEric Biggers 
121106533002SEric Biggers 	pr_info("%s: CQHCI version %u.%02u\n",
121206533002SEric Biggers 		mmc_hostname(mmc), cqhci_ver_major(cq_host),
121306533002SEric Biggers 		cqhci_ver_minor(cq_host));
121406533002SEric Biggers 
121506533002SEric Biggers 	return 0;
121606533002SEric Biggers 
121706533002SEric Biggers out_err:
121806533002SEric Biggers 	pr_err("%s: CQHCI version %u.%02u failed to initialize, error %d\n",
121906533002SEric Biggers 	       mmc_hostname(mmc), cqhci_ver_major(cq_host),
122006533002SEric Biggers 	       cqhci_ver_minor(cq_host), err);
122106533002SEric Biggers 	return err;
122206533002SEric Biggers }
122306533002SEric Biggers EXPORT_SYMBOL(cqhci_init);
122406533002SEric Biggers 
122506533002SEric Biggers MODULE_AUTHOR("Venkat Gopalakrishnan <venkatg@codeaurora.org>");
122606533002SEric Biggers MODULE_DESCRIPTION("Command Queue Host Controller Interface driver");
122706533002SEric Biggers MODULE_LICENSE("GPL v2");
1228