xref: /openbmc/linux/drivers/ufs/core/ufs-mcq.c (revision 09138ba68c1487a42c400485e999386a74911dbc)
157b1c0efSAsutosh Das // SPDX-License-Identifier: GPL-2.0-only
257b1c0efSAsutosh Das /*
357b1c0efSAsutosh Das  * Copyright (c) 2022 Qualcomm Innovation Center. All rights reserved.
457b1c0efSAsutosh Das  *
557b1c0efSAsutosh Das  * Authors:
657b1c0efSAsutosh Das  *	Asutosh Das <quic_asutoshd@quicinc.com>
757b1c0efSAsutosh Das  *	Can Guo <quic_cang@quicinc.com>
857b1c0efSAsutosh Das  */
957b1c0efSAsutosh Das 
1057b1c0efSAsutosh Das #include <asm/unaligned.h>
1157b1c0efSAsutosh Das #include <linux/dma-mapping.h>
1257b1c0efSAsutosh Das #include <linux/module.h>
1357b1c0efSAsutosh Das #include <linux/platform_device.h>
1457b1c0efSAsutosh Das #include "ufshcd-priv.h"
158d729034SBao D. Nguyen #include <linux/delay.h>
168d729034SBao D. Nguyen #include <scsi/scsi_cmnd.h>
178d729034SBao D. Nguyen #include <linux/bitfield.h>
188d729034SBao D. Nguyen #include <linux/iopoll.h>
1957b1c0efSAsutosh Das 
2057b1c0efSAsutosh Das #define MAX_QUEUE_SUP GENMASK(7, 0)
2157b1c0efSAsutosh Das #define UFS_MCQ_MIN_RW_QUEUES 2
2257b1c0efSAsutosh Das #define UFS_MCQ_MIN_READ_QUEUES 0
2357b1c0efSAsutosh Das #define UFS_MCQ_MIN_POLL_QUEUES 0
242468da61SAsutosh Das #define QUEUE_EN_OFFSET 31
252468da61SAsutosh Das #define QUEUE_ID_OFFSET 16
2657b1c0efSAsutosh Das 
277224c806SAsutosh Das #define MCQ_CFG_MAC_MASK	GENMASK(16, 8)
282468da61SAsutosh Das #define MCQ_QCFG_SIZE		0x40
292468da61SAsutosh Das #define MCQ_ENTRY_SIZE_IN_DWORD	8
30f87b2c41SAsutosh Das #define CQE_UCD_BA GENMASK_ULL(63, 7)
317224c806SAsutosh Das 
328d729034SBao D. Nguyen /* Max mcq register polling time in microseconds */
338d729034SBao D. Nguyen #define MCQ_POLL_US 500000
348d729034SBao D. Nguyen 
rw_queue_count_set(const char * val,const struct kernel_param * kp)3557b1c0efSAsutosh Das static int rw_queue_count_set(const char *val, const struct kernel_param *kp)
3657b1c0efSAsutosh Das {
3757b1c0efSAsutosh Das 	return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_RW_QUEUES,
3857b1c0efSAsutosh Das 				     num_possible_cpus());
3957b1c0efSAsutosh Das }
4057b1c0efSAsutosh Das 
4157b1c0efSAsutosh Das static const struct kernel_param_ops rw_queue_count_ops = {
4257b1c0efSAsutosh Das 	.set = rw_queue_count_set,
4357b1c0efSAsutosh Das 	.get = param_get_uint,
4457b1c0efSAsutosh Das };
4557b1c0efSAsutosh Das 
4657b1c0efSAsutosh Das static unsigned int rw_queues;
4757b1c0efSAsutosh Das module_param_cb(rw_queues, &rw_queue_count_ops, &rw_queues, 0644);
4857b1c0efSAsutosh Das MODULE_PARM_DESC(rw_queues,
4957b1c0efSAsutosh Das 		 "Number of interrupt driven I/O queues used for rw. Default value is nr_cpus");
5057b1c0efSAsutosh Das 
read_queue_count_set(const char * val,const struct kernel_param * kp)5157b1c0efSAsutosh Das static int read_queue_count_set(const char *val, const struct kernel_param *kp)
5257b1c0efSAsutosh Das {
5357b1c0efSAsutosh Das 	return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_READ_QUEUES,
5457b1c0efSAsutosh Das 				     num_possible_cpus());
5557b1c0efSAsutosh Das }
5657b1c0efSAsutosh Das 
5757b1c0efSAsutosh Das static const struct kernel_param_ops read_queue_count_ops = {
5857b1c0efSAsutosh Das 	.set = read_queue_count_set,
5957b1c0efSAsutosh Das 	.get = param_get_uint,
6057b1c0efSAsutosh Das };
6157b1c0efSAsutosh Das 
6257b1c0efSAsutosh Das static unsigned int read_queues;
6357b1c0efSAsutosh Das module_param_cb(read_queues, &read_queue_count_ops, &read_queues, 0644);
6457b1c0efSAsutosh Das MODULE_PARM_DESC(read_queues,
6557b1c0efSAsutosh Das 		 "Number of interrupt driven read queues used for read. Default value is 0");
6657b1c0efSAsutosh Das 
poll_queue_count_set(const char * val,const struct kernel_param * kp)6757b1c0efSAsutosh Das static int poll_queue_count_set(const char *val, const struct kernel_param *kp)
6857b1c0efSAsutosh Das {
6957b1c0efSAsutosh Das 	return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_POLL_QUEUES,
7057b1c0efSAsutosh Das 				     num_possible_cpus());
7157b1c0efSAsutosh Das }
7257b1c0efSAsutosh Das 
7357b1c0efSAsutosh Das static const struct kernel_param_ops poll_queue_count_ops = {
7457b1c0efSAsutosh Das 	.set = poll_queue_count_set,
7557b1c0efSAsutosh Das 	.get = param_get_uint,
7657b1c0efSAsutosh Das };
7757b1c0efSAsutosh Das 
7857b1c0efSAsutosh Das static unsigned int poll_queues = 1;
7957b1c0efSAsutosh Das module_param_cb(poll_queues, &poll_queue_count_ops, &poll_queues, 0644);
8057b1c0efSAsutosh Das MODULE_PARM_DESC(poll_queues,
8157b1c0efSAsutosh Das 		 "Number of poll queues used for r/w. Default value is 1");
8257b1c0efSAsutosh Das 
837224c806SAsutosh Das /**
842468da61SAsutosh Das  * ufshcd_mcq_config_mac - Set the #Max Activ Cmds.
85b62c8292SBart Van Assche  * @hba: per adapter instance
86b62c8292SBart Van Assche  * @max_active_cmds: maximum # of active commands to the device at any time.
872468da61SAsutosh Das  *
882468da61SAsutosh Das  * The controller won't send more than the max_active_cmds to the device at
892468da61SAsutosh Das  * any time.
902468da61SAsutosh Das  */
ufshcd_mcq_config_mac(struct ufs_hba * hba,u32 max_active_cmds)912468da61SAsutosh Das void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds)
922468da61SAsutosh Das {
932468da61SAsutosh Das 	u32 val;
942468da61SAsutosh Das 
952468da61SAsutosh Das 	val = ufshcd_readl(hba, REG_UFS_MCQ_CFG);
962468da61SAsutosh Das 	val &= ~MCQ_CFG_MAC_MASK;
97ff54c87fSRohit Ner 	val |= FIELD_PREP(MCQ_CFG_MAC_MASK, max_active_cmds - 1);
982468da61SAsutosh Das 	ufshcd_writel(hba, val, REG_UFS_MCQ_CFG);
992468da61SAsutosh Das }
10011afb65cSPo-Wen Kao EXPORT_SYMBOL_GPL(ufshcd_mcq_config_mac);
1012468da61SAsutosh Das 
1022468da61SAsutosh Das /**
103854f84e7SAsutosh Das  * ufshcd_mcq_req_to_hwq - find the hardware queue on which the
104854f84e7SAsutosh Das  * request would be issued.
105b62c8292SBart Van Assche  * @hba: per adapter instance
106b62c8292SBart Van Assche  * @req: pointer to the request to be issued
107854f84e7SAsutosh Das  *
108bed08960SPeter Wang  * Return: the hardware queue instance on which the request will be or has
109bed08960SPeter Wang  * been queued. %NULL if the request has already been freed.
110854f84e7SAsutosh Das  */
ufshcd_mcq_req_to_hwq(struct ufs_hba * hba,struct request * req)111854f84e7SAsutosh Das struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
112854f84e7SAsutosh Das 					 struct request *req)
113854f84e7SAsutosh Das {
114bed08960SPeter Wang 	struct blk_mq_hw_ctx *hctx = READ_ONCE(req->mq_hctx);
115854f84e7SAsutosh Das 
116bed08960SPeter Wang 	return hctx ? &hba->uhq[hctx->queue_num] : NULL;
117854f84e7SAsutosh Das }
118854f84e7SAsutosh Das 
119854f84e7SAsutosh Das /**
1207224c806SAsutosh Das  * ufshcd_mcq_decide_queue_depth - decide the queue depth
121b62c8292SBart Van Assche  * @hba: per adapter instance
1227224c806SAsutosh Das  *
1233a17fefeSBart Van Assche  * Return: queue-depth on success, non-zero on error
1247224c806SAsutosh Das  *
1257224c806SAsutosh Das  * MAC - Max. Active Command of the Host Controller (HC)
1267224c806SAsutosh Das  * HC wouldn't send more than this commands to the device.
1277224c806SAsutosh Das  * It is mandatory to implement get_hba_mac() to enable MCQ mode.
1287224c806SAsutosh Das  * Calculates and adjusts the queue depth based on the depth
1297224c806SAsutosh Das  * supported by the HC and ufs device.
1307224c806SAsutosh Das  */
ufshcd_mcq_decide_queue_depth(struct ufs_hba * hba)1317224c806SAsutosh Das int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba)
1327224c806SAsutosh Das {
1337224c806SAsutosh Das 	int mac;
1347224c806SAsutosh Das 
1357224c806SAsutosh Das 	/* Mandatory to implement get_hba_mac() */
1367224c806SAsutosh Das 	mac = ufshcd_mcq_vops_get_hba_mac(hba);
1377224c806SAsutosh Das 	if (mac < 0) {
1387224c806SAsutosh Das 		dev_err(hba->dev, "Failed to get mac, err=%d\n", mac);
1397224c806SAsutosh Das 		return mac;
1407224c806SAsutosh Das 	}
1417224c806SAsutosh Das 
1427224c806SAsutosh Das 	WARN_ON_ONCE(!hba->dev_info.bqueuedepth);
1437224c806SAsutosh Das 	/*
1447224c806SAsutosh Das 	 * max. value of bqueuedepth = 256, mac is host dependent.
1457224c806SAsutosh Das 	 * It is mandatory for UFS device to define bQueueDepth if
1467224c806SAsutosh Das 	 * shared queuing architecture is enabled.
1477224c806SAsutosh Das 	 */
1487224c806SAsutosh Das 	return min_t(int, mac, hba->dev_info.bqueuedepth);
1497224c806SAsutosh Das }
1507224c806SAsutosh Das 
ufshcd_mcq_config_nr_queues(struct ufs_hba * hba)15157b1c0efSAsutosh Das static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
15257b1c0efSAsutosh Das {
15357b1c0efSAsutosh Das 	int i;
15457b1c0efSAsutosh Das 	u32 hba_maxq, rem, tot_queues;
15557b1c0efSAsutosh Das 	struct Scsi_Host *host = hba->host;
15657b1c0efSAsutosh Das 
15772a81bb0SPo-Wen Kao 	/* maxq is 0 based value */
15872a81bb0SPo-Wen Kao 	hba_maxq = FIELD_GET(MAX_QUEUE_SUP, hba->mcq_capabilities) + 1;
15957b1c0efSAsutosh Das 
160ccb23dc3SPo-Wen Kao 	tot_queues = read_queues + poll_queues + rw_queues;
16157b1c0efSAsutosh Das 
16257b1c0efSAsutosh Das 	if (hba_maxq < tot_queues) {
16357b1c0efSAsutosh Das 		dev_err(hba->dev, "Total queues (%d) exceeds HC capacity (%d)\n",
16457b1c0efSAsutosh Das 			tot_queues, hba_maxq);
16557b1c0efSAsutosh Das 		return -EOPNOTSUPP;
16657b1c0efSAsutosh Das 	}
16757b1c0efSAsutosh Das 
168ccb23dc3SPo-Wen Kao 	rem = hba_maxq;
16957b1c0efSAsutosh Das 
17057b1c0efSAsutosh Das 	if (rw_queues) {
17157b1c0efSAsutosh Das 		hba->nr_queues[HCTX_TYPE_DEFAULT] = rw_queues;
17257b1c0efSAsutosh Das 		rem -= hba->nr_queues[HCTX_TYPE_DEFAULT];
17357b1c0efSAsutosh Das 	} else {
17457b1c0efSAsutosh Das 		rw_queues = num_possible_cpus();
17557b1c0efSAsutosh Das 	}
17657b1c0efSAsutosh Das 
17757b1c0efSAsutosh Das 	if (poll_queues) {
17857b1c0efSAsutosh Das 		hba->nr_queues[HCTX_TYPE_POLL] = poll_queues;
17957b1c0efSAsutosh Das 		rem -= hba->nr_queues[HCTX_TYPE_POLL];
18057b1c0efSAsutosh Das 	}
18157b1c0efSAsutosh Das 
18257b1c0efSAsutosh Das 	if (read_queues) {
18357b1c0efSAsutosh Das 		hba->nr_queues[HCTX_TYPE_READ] = read_queues;
18457b1c0efSAsutosh Das 		rem -= hba->nr_queues[HCTX_TYPE_READ];
18557b1c0efSAsutosh Das 	}
18657b1c0efSAsutosh Das 
18757b1c0efSAsutosh Das 	if (!hba->nr_queues[HCTX_TYPE_DEFAULT])
18857b1c0efSAsutosh Das 		hba->nr_queues[HCTX_TYPE_DEFAULT] = min3(rem, rw_queues,
18957b1c0efSAsutosh Das 							 num_possible_cpus());
19057b1c0efSAsutosh Das 
19157b1c0efSAsutosh Das 	for (i = 0; i < HCTX_MAX_TYPES; i++)
19257b1c0efSAsutosh Das 		host->nr_hw_queues += hba->nr_queues[i];
19357b1c0efSAsutosh Das 
194ccb23dc3SPo-Wen Kao 	hba->nr_hw_queues = host->nr_hw_queues;
19557b1c0efSAsutosh Das 	return 0;
19657b1c0efSAsutosh Das }
19757b1c0efSAsutosh Das 
ufshcd_mcq_memory_alloc(struct ufs_hba * hba)1984682abfaSAsutosh Das int ufshcd_mcq_memory_alloc(struct ufs_hba *hba)
1994682abfaSAsutosh Das {
2004682abfaSAsutosh Das 	struct ufs_hw_queue *hwq;
2014682abfaSAsutosh Das 	size_t utrdl_size, cqe_size;
2024682abfaSAsutosh Das 	int i;
2034682abfaSAsutosh Das 
2044682abfaSAsutosh Das 	for (i = 0; i < hba->nr_hw_queues; i++) {
2054682abfaSAsutosh Das 		hwq = &hba->uhq[i];
2064682abfaSAsutosh Das 
2074682abfaSAsutosh Das 		utrdl_size = sizeof(struct utp_transfer_req_desc) *
2084682abfaSAsutosh Das 			     hwq->max_entries;
2094682abfaSAsutosh Das 		hwq->sqe_base_addr = dmam_alloc_coherent(hba->dev, utrdl_size,
2104682abfaSAsutosh Das 							 &hwq->sqe_dma_addr,
2114682abfaSAsutosh Das 							 GFP_KERNEL);
2124682abfaSAsutosh Das 		if (!hwq->sqe_dma_addr) {
2134682abfaSAsutosh Das 			dev_err(hba->dev, "SQE allocation failed\n");
2144682abfaSAsutosh Das 			return -ENOMEM;
2154682abfaSAsutosh Das 		}
2164682abfaSAsutosh Das 
2174682abfaSAsutosh Das 		cqe_size = sizeof(struct cq_entry) * hwq->max_entries;
2184682abfaSAsutosh Das 		hwq->cqe_base_addr = dmam_alloc_coherent(hba->dev, cqe_size,
2194682abfaSAsutosh Das 							 &hwq->cqe_dma_addr,
2204682abfaSAsutosh Das 							 GFP_KERNEL);
2214682abfaSAsutosh Das 		if (!hwq->cqe_dma_addr) {
2224682abfaSAsutosh Das 			dev_err(hba->dev, "CQE allocation failed\n");
2234682abfaSAsutosh Das 			return -ENOMEM;
2244682abfaSAsutosh Das 		}
2254682abfaSAsutosh Das 	}
2264682abfaSAsutosh Das 
2274682abfaSAsutosh Das 	return 0;
2284682abfaSAsutosh Das }
2294682abfaSAsutosh Das 
2304682abfaSAsutosh Das 
2312468da61SAsutosh Das /* Operation and runtime registers configuration */
2322468da61SAsutosh Das #define MCQ_CFG_n(r, i)	((r) + MCQ_QCFG_SIZE * (i))
2332468da61SAsutosh Das 
mcq_opr_base(struct ufs_hba * hba,enum ufshcd_mcq_opr n,int i)2342468da61SAsutosh Das static void __iomem *mcq_opr_base(struct ufs_hba *hba,
2352468da61SAsutosh Das 					 enum ufshcd_mcq_opr n, int i)
2362468da61SAsutosh Das {
2372468da61SAsutosh Das 	struct ufshcd_mcq_opr_info_t *opr = &hba->mcq_opr[n];
2382468da61SAsutosh Das 
2392468da61SAsutosh Das 	return opr->base + opr->stride * i;
2402468da61SAsutosh Das }
2412468da61SAsutosh Das 
ufshcd_mcq_read_cqis(struct ufs_hba * hba,int i)242f87b2c41SAsutosh Das u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i)
243f87b2c41SAsutosh Das {
244f87b2c41SAsutosh Das 	return readl(mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIS);
245f87b2c41SAsutosh Das }
24611afb65cSPo-Wen Kao EXPORT_SYMBOL_GPL(ufshcd_mcq_read_cqis);
247f87b2c41SAsutosh Das 
ufshcd_mcq_write_cqis(struct ufs_hba * hba,u32 val,int i)248f87b2c41SAsutosh Das void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i)
249f87b2c41SAsutosh Das {
250f87b2c41SAsutosh Das 	writel(val, mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIS);
251f87b2c41SAsutosh Das }
252e02288e0SCan Guo EXPORT_SYMBOL_GPL(ufshcd_mcq_write_cqis);
253f87b2c41SAsutosh Das 
254f87b2c41SAsutosh Das /*
255f87b2c41SAsutosh Das  * Current MCQ specification doesn't provide a Task Tag or its equivalent in
256f87b2c41SAsutosh Das  * the Completion Queue Entry. Find the Task Tag using an indirect method.
257f87b2c41SAsutosh Das  */
ufshcd_mcq_get_tag(struct ufs_hba * hba,struct ufs_hw_queue * hwq,struct cq_entry * cqe)258f87b2c41SAsutosh Das static int ufshcd_mcq_get_tag(struct ufs_hba *hba,
259f87b2c41SAsutosh Das 				     struct ufs_hw_queue *hwq,
260f87b2c41SAsutosh Das 				     struct cq_entry *cqe)
261f87b2c41SAsutosh Das {
262f87b2c41SAsutosh Das 	u64 addr;
263f87b2c41SAsutosh Das 
264f87b2c41SAsutosh Das 	/* sizeof(struct utp_transfer_cmd_desc) must be a multiple of 128 */
265f87b2c41SAsutosh Das 	BUILD_BUG_ON(sizeof(struct utp_transfer_cmd_desc) & GENMASK(6, 0));
266f87b2c41SAsutosh Das 
267f87b2c41SAsutosh Das 	/* Bits 63:7 UCD base address, 6:5 are reserved, 4:0 is SQ ID */
268f87b2c41SAsutosh Das 	addr = (le64_to_cpu(cqe->command_desc_base_addr) & CQE_UCD_BA) -
269f87b2c41SAsutosh Das 		hba->ucdl_dma_addr;
270f87b2c41SAsutosh Das 
27106caeb53SPo-Wen Kao 	return div_u64(addr, ufshcd_get_ucd_size(hba));
272f87b2c41SAsutosh Das }
273f87b2c41SAsutosh Das 
ufshcd_mcq_process_cqe(struct ufs_hba * hba,struct ufs_hw_queue * hwq)274f87b2c41SAsutosh Das static void ufshcd_mcq_process_cqe(struct ufs_hba *hba,
275f87b2c41SAsutosh Das 				   struct ufs_hw_queue *hwq)
276f87b2c41SAsutosh Das {
277f87b2c41SAsutosh Das 	struct cq_entry *cqe = ufshcd_mcq_cur_cqe(hwq);
278f87b2c41SAsutosh Das 	int tag = ufshcd_mcq_get_tag(hba, hwq, cqe);
279f87b2c41SAsutosh Das 
280ab248643SBao D. Nguyen 	if (cqe->command_desc_base_addr) {
281f87b2c41SAsutosh Das 		ufshcd_compl_one_cqe(hba, tag, cqe);
282ab248643SBao D. Nguyen 		/* After processed the cqe, mark it empty (invalid) entry */
283ab248643SBao D. Nguyen 		cqe->command_desc_base_addr = 0;
284ab248643SBao D. Nguyen 	}
285f87b2c41SAsutosh Das }
286f87b2c41SAsutosh Das 
ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba * hba,struct ufs_hw_queue * hwq)287ab248643SBao D. Nguyen void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba *hba,
288ab248643SBao D. Nguyen 				    struct ufs_hw_queue *hwq)
289ab248643SBao D. Nguyen {
290ab248643SBao D. Nguyen 	unsigned long flags;
291ab248643SBao D. Nguyen 	u32 entries = hwq->max_entries;
292ab248643SBao D. Nguyen 
293ab248643SBao D. Nguyen 	spin_lock_irqsave(&hwq->cq_lock, flags);
294ab248643SBao D. Nguyen 	while (entries > 0) {
295ab248643SBao D. Nguyen 		ufshcd_mcq_process_cqe(hba, hwq);
296ab248643SBao D. Nguyen 		ufshcd_mcq_inc_cq_head_slot(hwq);
297ab248643SBao D. Nguyen 		entries--;
298ab248643SBao D. Nguyen 	}
299ab248643SBao D. Nguyen 
300ab248643SBao D. Nguyen 	ufshcd_mcq_update_cq_tail_slot(hwq);
301ab248643SBao D. Nguyen 	hwq->cq_head_slot = hwq->cq_tail_slot;
302ab248643SBao D. Nguyen 	spin_unlock_irqrestore(&hwq->cq_lock, flags);
303f87b2c41SAsutosh Das }
304f87b2c41SAsutosh Das 
ufshcd_mcq_poll_cqe_lock(struct ufs_hba * hba,struct ufs_hw_queue * hwq)3059c24f90fSStanley Chu unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
306f87b2c41SAsutosh Das 				       struct ufs_hw_queue *hwq)
307f87b2c41SAsutosh Das {
308f87b2c41SAsutosh Das 	unsigned long completed_reqs = 0;
3099c24f90fSStanley Chu 	unsigned long flags;
310f87b2c41SAsutosh Das 
3119c24f90fSStanley Chu 	spin_lock_irqsave(&hwq->cq_lock, flags);
312f87b2c41SAsutosh Das 	ufshcd_mcq_update_cq_tail_slot(hwq);
313f87b2c41SAsutosh Das 	while (!ufshcd_mcq_is_cq_empty(hwq)) {
314f87b2c41SAsutosh Das 		ufshcd_mcq_process_cqe(hba, hwq);
315f87b2c41SAsutosh Das 		ufshcd_mcq_inc_cq_head_slot(hwq);
316f87b2c41SAsutosh Das 		completed_reqs++;
317f87b2c41SAsutosh Das 	}
318f87b2c41SAsutosh Das 
319f87b2c41SAsutosh Das 	if (completed_reqs)
320f87b2c41SAsutosh Das 		ufshcd_mcq_update_cq_head(hwq);
321948afc69SAlice Chao 	spin_unlock_irqrestore(&hwq->cq_lock, flags);
322ed975065SAsutosh Das 
323ed975065SAsutosh Das 	return completed_reqs;
324ed975065SAsutosh Das }
32557d6ef46SBao D. Nguyen EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_lock);
326ed975065SAsutosh Das 
ufshcd_mcq_make_queues_operational(struct ufs_hba * hba)3272468da61SAsutosh Das void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
3282468da61SAsutosh Das {
3292468da61SAsutosh Das 	struct ufs_hw_queue *hwq;
3302468da61SAsutosh Das 	u16 qsize;
3312468da61SAsutosh Das 	int i;
3322468da61SAsutosh Das 
3332468da61SAsutosh Das 	for (i = 0; i < hba->nr_hw_queues; i++) {
3342468da61SAsutosh Das 		hwq = &hba->uhq[i];
3352468da61SAsutosh Das 		hwq->id = i;
3362468da61SAsutosh Das 		qsize = hwq->max_entries * MCQ_ENTRY_SIZE_IN_DWORD - 1;
3372468da61SAsutosh Das 
3382468da61SAsutosh Das 		/* Submission Queue Lower Base Address */
3392468da61SAsutosh Das 		ufsmcq_writelx(hba, lower_32_bits(hwq->sqe_dma_addr),
3402468da61SAsutosh Das 			      MCQ_CFG_n(REG_SQLBA, i));
3412468da61SAsutosh Das 		/* Submission Queue Upper Base Address */
3422468da61SAsutosh Das 		ufsmcq_writelx(hba, upper_32_bits(hwq->sqe_dma_addr),
3432468da61SAsutosh Das 			      MCQ_CFG_n(REG_SQUBA, i));
3442468da61SAsutosh Das 		/* Submission Queue Doorbell Address Offset */
345993cace4SMinwoo Im 		ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_SQD, i),
3462468da61SAsutosh Das 			      MCQ_CFG_n(REG_SQDAO, i));
3472468da61SAsutosh Das 		/* Submission Queue Interrupt Status Address Offset */
348993cace4SMinwoo Im 		ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_SQIS, i),
3492468da61SAsutosh Das 			      MCQ_CFG_n(REG_SQISAO, i));
3502468da61SAsutosh Das 
3512468da61SAsutosh Das 		/* Completion Queue Lower Base Address */
3522468da61SAsutosh Das 		ufsmcq_writelx(hba, lower_32_bits(hwq->cqe_dma_addr),
3532468da61SAsutosh Das 			      MCQ_CFG_n(REG_CQLBA, i));
3542468da61SAsutosh Das 		/* Completion Queue Upper Base Address */
3552468da61SAsutosh Das 		ufsmcq_writelx(hba, upper_32_bits(hwq->cqe_dma_addr),
3562468da61SAsutosh Das 			      MCQ_CFG_n(REG_CQUBA, i));
3572468da61SAsutosh Das 		/* Completion Queue Doorbell Address Offset */
358993cace4SMinwoo Im 		ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_CQD, i),
3592468da61SAsutosh Das 			      MCQ_CFG_n(REG_CQDAO, i));
3602468da61SAsutosh Das 		/* Completion Queue Interrupt Status Address Offset */
361993cace4SMinwoo Im 		ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_CQIS, i),
3622468da61SAsutosh Das 			      MCQ_CFG_n(REG_CQISAO, i));
3632468da61SAsutosh Das 
3642468da61SAsutosh Das 		/* Save the base addresses for quicker access */
3652468da61SAsutosh Das 		hwq->mcq_sq_head = mcq_opr_base(hba, OPR_SQD, i) + REG_SQHP;
3662468da61SAsutosh Das 		hwq->mcq_sq_tail = mcq_opr_base(hba, OPR_SQD, i) + REG_SQTP;
3672468da61SAsutosh Das 		hwq->mcq_cq_head = mcq_opr_base(hba, OPR_CQD, i) + REG_CQHP;
3682468da61SAsutosh Das 		hwq->mcq_cq_tail = mcq_opr_base(hba, OPR_CQD, i) + REG_CQTP;
3692468da61SAsutosh Das 
370f87b2c41SAsutosh Das 		/* Reinitializing is needed upon HC reset */
371f87b2c41SAsutosh Das 		hwq->sq_tail_slot = hwq->cq_tail_slot = hwq->cq_head_slot = 0;
372f87b2c41SAsutosh Das 
3732468da61SAsutosh Das 		/* Enable Tail Entry Push Status interrupt only for non-poll queues */
3742468da61SAsutosh Das 		if (i < hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL])
3752468da61SAsutosh Das 			writel(1, mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIE);
3762468da61SAsutosh Das 
3772468da61SAsutosh Das 		/* Completion Queue Enable|Size to Completion Queue Attribute */
3782468da61SAsutosh Das 		ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize,
3792468da61SAsutosh Das 			      MCQ_CFG_n(REG_CQATTR, i));
3802468da61SAsutosh Das 
3812468da61SAsutosh Das 		/*
3822468da61SAsutosh Das 		 * Submission Qeueue Enable|Size|Completion Queue ID to
3832468da61SAsutosh Das 		 * Submission Queue Attribute
3842468da61SAsutosh Das 		 */
3852468da61SAsutosh Das 		ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize |
3862468da61SAsutosh Das 			      (i << QUEUE_ID_OFFSET),
3872468da61SAsutosh Das 			      MCQ_CFG_n(REG_SQATTR, i));
3882468da61SAsutosh Das 	}
3892468da61SAsutosh Das }
39011afb65cSPo-Wen Kao EXPORT_SYMBOL_GPL(ufshcd_mcq_make_queues_operational);
3912468da61SAsutosh Das 
ufshcd_mcq_enable_esi(struct ufs_hba * hba)392e02288e0SCan Guo void ufshcd_mcq_enable_esi(struct ufs_hba *hba)
393e02288e0SCan Guo {
394e02288e0SCan Guo 	ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x2,
395e02288e0SCan Guo 		      REG_UFS_MEM_CFG);
396e02288e0SCan Guo }
397e02288e0SCan Guo EXPORT_SYMBOL_GPL(ufshcd_mcq_enable_esi);
398e02288e0SCan Guo 
ufshcd_mcq_config_esi(struct ufs_hba * hba,struct msi_msg * msg)399e02288e0SCan Guo void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg)
400e02288e0SCan Guo {
401e02288e0SCan Guo 	ufshcd_writel(hba, msg->address_lo, REG_UFS_ESILBA);
402e02288e0SCan Guo 	ufshcd_writel(hba, msg->address_hi, REG_UFS_ESIUBA);
403e02288e0SCan Guo }
404e02288e0SCan Guo EXPORT_SYMBOL_GPL(ufshcd_mcq_config_esi);
405e02288e0SCan Guo 
ufshcd_mcq_init(struct ufs_hba * hba)40657b1c0efSAsutosh Das int ufshcd_mcq_init(struct ufs_hba *hba)
40757b1c0efSAsutosh Das {
4080d33728fSAsutosh Das 	struct Scsi_Host *host = hba->host;
4094682abfaSAsutosh Das 	struct ufs_hw_queue *hwq;
4104682abfaSAsutosh Das 	int ret, i;
41157b1c0efSAsutosh Das 
41257b1c0efSAsutosh Das 	ret = ufshcd_mcq_config_nr_queues(hba);
413c263b4efSAsutosh Das 	if (ret)
414c263b4efSAsutosh Das 		return ret;
41557b1c0efSAsutosh Das 
416c263b4efSAsutosh Das 	ret = ufshcd_vops_mcq_config_resource(hba);
4174682abfaSAsutosh Das 	if (ret)
41857b1c0efSAsutosh Das 		return ret;
4194682abfaSAsutosh Das 
4202468da61SAsutosh Das 	ret = ufshcd_mcq_vops_op_runtime_config(hba);
4212468da61SAsutosh Das 	if (ret) {
4222468da61SAsutosh Das 		dev_err(hba->dev, "Operation runtime config failed, ret=%d\n",
4232468da61SAsutosh Das 			ret);
4242468da61SAsutosh Das 		return ret;
4252468da61SAsutosh Das 	}
4264682abfaSAsutosh Das 	hba->uhq = devm_kzalloc(hba->dev,
4274682abfaSAsutosh Das 				hba->nr_hw_queues * sizeof(struct ufs_hw_queue),
4284682abfaSAsutosh Das 				GFP_KERNEL);
4294682abfaSAsutosh Das 	if (!hba->uhq) {
4304682abfaSAsutosh Das 		dev_err(hba->dev, "ufs hw queue memory allocation failed\n");
4314682abfaSAsutosh Das 		return -ENOMEM;
4324682abfaSAsutosh Das 	}
4334682abfaSAsutosh Das 
4344682abfaSAsutosh Das 	for (i = 0; i < hba->nr_hw_queues; i++) {
4354682abfaSAsutosh Das 		hwq = &hba->uhq[i];
4366046c267SNaomi Chu 		hwq->max_entries = hba->nutrs + 1;
43722a2d563SAsutosh Das 		spin_lock_init(&hwq->sq_lock);
438ed975065SAsutosh Das 		spin_lock_init(&hwq->cq_lock);
4398d729034SBao D. Nguyen 		mutex_init(&hwq->sq_mutex);
4404682abfaSAsutosh Das 	}
4414682abfaSAsutosh Das 
4424682abfaSAsutosh Das 	/* The very first HW queue serves device commands */
4434682abfaSAsutosh Das 	hba->dev_cmd_queue = &hba->uhq[0];
4444682abfaSAsutosh Das 
4450d33728fSAsutosh Das 	host->host_tagset = 1;
4464682abfaSAsutosh Das 	return 0;
44757b1c0efSAsutosh Das }
4488d729034SBao D. Nguyen 
ufshcd_mcq_sq_stop(struct ufs_hba * hba,struct ufs_hw_queue * hwq)4498d729034SBao D. Nguyen static int ufshcd_mcq_sq_stop(struct ufs_hba *hba, struct ufs_hw_queue *hwq)
4508d729034SBao D. Nguyen {
4518d729034SBao D. Nguyen 	void __iomem *reg;
4528d729034SBao D. Nguyen 	u32 id = hwq->id, val;
4538d729034SBao D. Nguyen 	int err;
4548d729034SBao D. Nguyen 
455aa9d5d00SPo-Wen Kao 	if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC)
456aa9d5d00SPo-Wen Kao 		return -ETIMEDOUT;
457aa9d5d00SPo-Wen Kao 
4588d729034SBao D. Nguyen 	writel(SQ_STOP, mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTC);
4598d729034SBao D. Nguyen 	reg = mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTS;
4608d729034SBao D. Nguyen 	err = read_poll_timeout(readl, val, val & SQ_STS, 20,
4618d729034SBao D. Nguyen 				MCQ_POLL_US, false, reg);
4628d729034SBao D. Nguyen 	if (err)
4638d729034SBao D. Nguyen 		dev_err(hba->dev, "%s: failed. hwq-id=%d, err=%d\n",
4648d729034SBao D. Nguyen 			__func__, id, err);
4658d729034SBao D. Nguyen 	return err;
4668d729034SBao D. Nguyen }
4678d729034SBao D. Nguyen 
ufshcd_mcq_sq_start(struct ufs_hba * hba,struct ufs_hw_queue * hwq)4688d729034SBao D. Nguyen static int ufshcd_mcq_sq_start(struct ufs_hba *hba, struct ufs_hw_queue *hwq)
4698d729034SBao D. Nguyen {
4708d729034SBao D. Nguyen 	void __iomem *reg;
4718d729034SBao D. Nguyen 	u32 id = hwq->id, val;
4728d729034SBao D. Nguyen 	int err;
4738d729034SBao D. Nguyen 
474aa9d5d00SPo-Wen Kao 	if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC)
475aa9d5d00SPo-Wen Kao 		return -ETIMEDOUT;
476aa9d5d00SPo-Wen Kao 
4778d729034SBao D. Nguyen 	writel(SQ_START, mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTC);
4788d729034SBao D. Nguyen 	reg = mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTS;
4798d729034SBao D. Nguyen 	err = read_poll_timeout(readl, val, !(val & SQ_STS), 20,
4808d729034SBao D. Nguyen 				MCQ_POLL_US, false, reg);
4818d729034SBao D. Nguyen 	if (err)
4828d729034SBao D. Nguyen 		dev_err(hba->dev, "%s: failed. hwq-id=%d, err=%d\n",
4838d729034SBao D. Nguyen 			__func__, id, err);
4848d729034SBao D. Nguyen 	return err;
4858d729034SBao D. Nguyen }
4868d729034SBao D. Nguyen 
4878d729034SBao D. Nguyen /**
4888d729034SBao D. Nguyen  * ufshcd_mcq_sq_cleanup - Clean up submission queue resources
4898d729034SBao D. Nguyen  * associated with the pending command.
490317a3804SYang Li  * @hba: per adapter instance.
491317a3804SYang Li  * @task_tag: The command's task tag.
4928d729034SBao D. Nguyen  *
4933a17fefeSBart Van Assche  * Return: 0 for success; error code otherwise.
4948d729034SBao D. Nguyen  */
ufshcd_mcq_sq_cleanup(struct ufs_hba * hba,int task_tag)4958d729034SBao D. Nguyen int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
4968d729034SBao D. Nguyen {
4978d729034SBao D. Nguyen 	struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
4988d729034SBao D. Nguyen 	struct scsi_cmnd *cmd = lrbp->cmd;
4998d729034SBao D. Nguyen 	struct ufs_hw_queue *hwq;
5008d729034SBao D. Nguyen 	void __iomem *reg, *opr_sqd_base;
501*8e6ca01bSPeter Wang 	u32 nexus, id, val, rtc;
5028d729034SBao D. Nguyen 	int err;
5038d729034SBao D. Nguyen 
504aa9d5d00SPo-Wen Kao 	if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC)
505aa9d5d00SPo-Wen Kao 		return -ETIMEDOUT;
506aa9d5d00SPo-Wen Kao 
5078d729034SBao D. Nguyen 	if (task_tag != hba->nutrs - UFSHCD_NUM_RESERVED) {
5088d729034SBao D. Nguyen 		if (!cmd)
5098d729034SBao D. Nguyen 			return -EINVAL;
5108d729034SBao D. Nguyen 		hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
511bed08960SPeter Wang 		if (!hwq)
512bed08960SPeter Wang 			return 0;
5138d729034SBao D. Nguyen 	} else {
5148d729034SBao D. Nguyen 		hwq = hba->dev_cmd_queue;
5158d729034SBao D. Nguyen 	}
5168d729034SBao D. Nguyen 
5178d729034SBao D. Nguyen 	id = hwq->id;
5188d729034SBao D. Nguyen 
5198d729034SBao D. Nguyen 	mutex_lock(&hwq->sq_mutex);
5208d729034SBao D. Nguyen 
5218d729034SBao D. Nguyen 	/* stop the SQ fetching before working on it */
5228d729034SBao D. Nguyen 	err = ufshcd_mcq_sq_stop(hba, hwq);
5238d729034SBao D. Nguyen 	if (err)
5248d729034SBao D. Nguyen 		goto unlock;
5258d729034SBao D. Nguyen 
5268d729034SBao D. Nguyen 	/* SQCTI = EXT_IID, IID, LUN, Task Tag */
5278d729034SBao D. Nguyen 	nexus = lrbp->lun << 8 | task_tag;
5288d729034SBao D. Nguyen 	opr_sqd_base = mcq_opr_base(hba, OPR_SQD, id);
5298d729034SBao D. Nguyen 	writel(nexus, opr_sqd_base + REG_SQCTI);
5308d729034SBao D. Nguyen 
531*8e6ca01bSPeter Wang 	/* Initiate Cleanup */
532*8e6ca01bSPeter Wang 	writel(readl(opr_sqd_base + REG_SQRTC) | SQ_ICU,
533*8e6ca01bSPeter Wang 		opr_sqd_base + REG_SQRTC);
5348d729034SBao D. Nguyen 
5358d729034SBao D. Nguyen 	/* Poll SQRTSy.CUS = 1. Return result from SQRTSy.RTC */
5368d729034SBao D. Nguyen 	reg = opr_sqd_base + REG_SQRTS;
5378d729034SBao D. Nguyen 	err = read_poll_timeout(readl, val, val & SQ_CUS, 20,
5388d729034SBao D. Nguyen 				MCQ_POLL_US, false, reg);
539*8e6ca01bSPeter Wang 	rtc = FIELD_GET(SQ_ICU_ERR_CODE_MASK, readl(reg));
540*8e6ca01bSPeter Wang 	if (err || rtc)
541*8e6ca01bSPeter Wang 		dev_err(hba->dev, "%s: failed. hwq=%d, tag=%d err=%d RTC=%d\n",
542*8e6ca01bSPeter Wang 			__func__, id, task_tag, err, rtc);
5438d729034SBao D. Nguyen 
5448d729034SBao D. Nguyen 	if (ufshcd_mcq_sq_start(hba, hwq))
5458d729034SBao D. Nguyen 		err = -ETIMEDOUT;
5468d729034SBao D. Nguyen 
5478d729034SBao D. Nguyen unlock:
5488d729034SBao D. Nguyen 	mutex_unlock(&hwq->sq_mutex);
5498d729034SBao D. Nguyen 	return err;
5508d729034SBao D. Nguyen }
5518d729034SBao D. Nguyen 
5528d729034SBao D. Nguyen /**
5538d729034SBao D. Nguyen  * ufshcd_mcq_nullify_sqe - Nullify the submission queue entry.
5548d729034SBao D. Nguyen  * Write the sqe's Command Type to 0xF. The host controller will not
5558d729034SBao D. Nguyen  * fetch any sqe with Command Type = 0xF.
5568d729034SBao D. Nguyen  *
557317a3804SYang Li  * @utrd: UTP Transfer Request Descriptor to be nullified.
5588d729034SBao D. Nguyen  */
ufshcd_mcq_nullify_sqe(struct utp_transfer_req_desc * utrd)5598d729034SBao D. Nguyen static void ufshcd_mcq_nullify_sqe(struct utp_transfer_req_desc *utrd)
5608d729034SBao D. Nguyen {
56167a2a897SBart Van Assche 	utrd->header.command_type = 0xf;
5628d729034SBao D. Nguyen }
5638d729034SBao D. Nguyen 
5648d729034SBao D. Nguyen /**
5658d729034SBao D. Nguyen  * ufshcd_mcq_sqe_search - Search for the command in the submission queue
5668d729034SBao D. Nguyen  * If the command is in the submission queue and not issued to the device yet,
5678d729034SBao D. Nguyen  * nullify the sqe so the host controller will skip fetching the sqe.
5688d729034SBao D. Nguyen  *
569317a3804SYang Li  * @hba: per adapter instance.
570317a3804SYang Li  * @hwq: Hardware Queue to be searched.
571317a3804SYang Li  * @task_tag: The command's task tag.
5728d729034SBao D. Nguyen  *
5733a17fefeSBart Van Assche  * Return: true if the SQE containing the command is present in the SQ
5748d729034SBao D. Nguyen  * (not fetched by the controller); returns false if the SQE is not in the SQ.
5758d729034SBao D. Nguyen  */
ufshcd_mcq_sqe_search(struct ufs_hba * hba,struct ufs_hw_queue * hwq,int task_tag)5768d729034SBao D. Nguyen static bool ufshcd_mcq_sqe_search(struct ufs_hba *hba,
5778d729034SBao D. Nguyen 				  struct ufs_hw_queue *hwq, int task_tag)
5788d729034SBao D. Nguyen {
5798d729034SBao D. Nguyen 	struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
5808d729034SBao D. Nguyen 	struct utp_transfer_req_desc *utrd;
5818d729034SBao D. Nguyen 	__le64  cmd_desc_base_addr;
5828d729034SBao D. Nguyen 	bool ret = false;
5838d729034SBao D. Nguyen 	u64 addr, match;
5848d729034SBao D. Nguyen 	u32 sq_head_slot;
5858d729034SBao D. Nguyen 
586aa9d5d00SPo-Wen Kao 	if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC)
587aa9d5d00SPo-Wen Kao 		return true;
588aa9d5d00SPo-Wen Kao 
5898d729034SBao D. Nguyen 	mutex_lock(&hwq->sq_mutex);
5908d729034SBao D. Nguyen 
5918d729034SBao D. Nguyen 	ufshcd_mcq_sq_stop(hba, hwq);
5928d729034SBao D. Nguyen 	sq_head_slot = ufshcd_mcq_get_sq_head_slot(hwq);
5938d729034SBao D. Nguyen 	if (sq_head_slot == hwq->sq_tail_slot)
5948d729034SBao D. Nguyen 		goto out;
5958d729034SBao D. Nguyen 
5968d729034SBao D. Nguyen 	cmd_desc_base_addr = lrbp->utr_descriptor_ptr->command_desc_base_addr;
5978d729034SBao D. Nguyen 	addr = le64_to_cpu(cmd_desc_base_addr) & CQE_UCD_BA;
5988d729034SBao D. Nguyen 
5998d729034SBao D. Nguyen 	while (sq_head_slot != hwq->sq_tail_slot) {
600e1dcff6eSBart Van Assche 		utrd = hwq->sqe_base_addr + sq_head_slot;
6018d729034SBao D. Nguyen 		match = le64_to_cpu(utrd->command_desc_base_addr) & CQE_UCD_BA;
6028d729034SBao D. Nguyen 		if (addr == match) {
6038d729034SBao D. Nguyen 			ufshcd_mcq_nullify_sqe(utrd);
6048d729034SBao D. Nguyen 			ret = true;
6058d729034SBao D. Nguyen 			goto out;
6068d729034SBao D. Nguyen 		}
607d0c89af3SBao D. Nguyen 
608d0c89af3SBao D. Nguyen 		sq_head_slot++;
609d0c89af3SBao D. Nguyen 		if (sq_head_slot == hwq->max_entries)
610d0c89af3SBao D. Nguyen 			sq_head_slot = 0;
6118d729034SBao D. Nguyen 	}
6128d729034SBao D. Nguyen 
6138d729034SBao D. Nguyen out:
6148d729034SBao D. Nguyen 	ufshcd_mcq_sq_start(hba, hwq);
6158d729034SBao D. Nguyen 	mutex_unlock(&hwq->sq_mutex);
6168d729034SBao D. Nguyen 	return ret;
6178d729034SBao D. Nguyen }
618f1304d44SBao D. Nguyen 
619f1304d44SBao D. Nguyen /**
620f1304d44SBao D. Nguyen  * ufshcd_mcq_abort - Abort the command in MCQ.
621317a3804SYang Li  * @cmd: The command to be aborted.
622f1304d44SBao D. Nguyen  *
6233a17fefeSBart Van Assche  * Return: SUCCESS or FAILED error codes
624f1304d44SBao D. Nguyen  */
ufshcd_mcq_abort(struct scsi_cmnd * cmd)625f1304d44SBao D. Nguyen int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
626f1304d44SBao D. Nguyen {
627f1304d44SBao D. Nguyen 	struct Scsi_Host *host = cmd->device->host;
628f1304d44SBao D. Nguyen 	struct ufs_hba *hba = shost_priv(host);
629f1304d44SBao D. Nguyen 	int tag = scsi_cmd_to_rq(cmd)->tag;
630f1304d44SBao D. Nguyen 	struct ufshcd_lrb *lrbp = &hba->lrb[tag];
631f1304d44SBao D. Nguyen 	struct ufs_hw_queue *hwq;
632f84d461fSPeter Wang 	unsigned long flags;
633b2b1043aSChanwoo Lee 	int err;
634f1304d44SBao D. Nguyen 
635f1304d44SBao D. Nguyen 	if (!ufshcd_cmd_inflight(lrbp->cmd)) {
636f1304d44SBao D. Nguyen 		dev_err(hba->dev,
637f1304d44SBao D. Nguyen 			"%s: skip abort. cmd at tag %d already completed.\n",
638f1304d44SBao D. Nguyen 			__func__, tag);
639b2b1043aSChanwoo Lee 		return FAILED;
640f1304d44SBao D. Nguyen 	}
641f1304d44SBao D. Nguyen 
642f1304d44SBao D. Nguyen 	/* Skip task abort in case previous aborts failed and report failure */
643f1304d44SBao D. Nguyen 	if (lrbp->req_abort_skip) {
644f1304d44SBao D. Nguyen 		dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n",
645f1304d44SBao D. Nguyen 			__func__, tag);
646b2b1043aSChanwoo Lee 		return FAILED;
647f1304d44SBao D. Nguyen 	}
648f1304d44SBao D. Nguyen 
649f1304d44SBao D. Nguyen 	hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
650f1304d44SBao D. Nguyen 
651f1304d44SBao D. Nguyen 	if (ufshcd_mcq_sqe_search(hba, hwq, tag)) {
652f1304d44SBao D. Nguyen 		/*
653f1304d44SBao D. Nguyen 		 * Failure. The command should not be "stuck" in SQ for
654f1304d44SBao D. Nguyen 		 * a long time which resulted in command being aborted.
655f1304d44SBao D. Nguyen 		 */
656f1304d44SBao D. Nguyen 		dev_err(hba->dev, "%s: cmd found in sq. hwq=%d, tag=%d\n",
657f1304d44SBao D. Nguyen 			__func__, hwq->id, tag);
658b2b1043aSChanwoo Lee 		return FAILED;
659f1304d44SBao D. Nguyen 	}
660f1304d44SBao D. Nguyen 
661f1304d44SBao D. Nguyen 	/*
662f1304d44SBao D. Nguyen 	 * The command is not in the submission queue, and it is not
663f1304d44SBao D. Nguyen 	 * in the completion queue either. Query the device to see if
664f1304d44SBao D. Nguyen 	 * the command is being processed in the device.
665f1304d44SBao D. Nguyen 	 */
666b2b1043aSChanwoo Lee 	err = ufshcd_try_to_abort_task(hba, tag);
667b2b1043aSChanwoo Lee 	if (err) {
668f1304d44SBao D. Nguyen 		dev_err(hba->dev, "%s: device abort failed %d\n", __func__, err);
669f1304d44SBao D. Nguyen 		lrbp->req_abort_skip = true;
670b2b1043aSChanwoo Lee 		return FAILED;
671f1304d44SBao D. Nguyen 	}
672f1304d44SBao D. Nguyen 
673f84d461fSPeter Wang 	spin_lock_irqsave(&hwq->cq_lock, flags);
674f1304d44SBao D. Nguyen 	if (ufshcd_cmd_inflight(lrbp->cmd))
675f1304d44SBao D. Nguyen 		ufshcd_release_scsi_cmd(hba, lrbp);
676f84d461fSPeter Wang 	spin_unlock_irqrestore(&hwq->cq_lock, flags);
677f1304d44SBao D. Nguyen 
678b2b1043aSChanwoo Lee 	return SUCCESS;
679f1304d44SBao D. Nguyen }
680