xref: /openbmc/linux/drivers/ufs/core/ufs-mcq.c (revision feeeeb4c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2022 Qualcomm Innovation Center. All rights reserved.
4  *
5  * Authors:
6  *	Asutosh Das <quic_asutoshd@quicinc.com>
7  *	Can Guo <quic_cang@quicinc.com>
8  */
9 
10 #include <asm/unaligned.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/module.h>
13 #include <linux/platform_device.h>
14 #include "ufshcd-priv.h"
15 #include <linux/delay.h>
16 #include <scsi/scsi_cmnd.h>
17 #include <linux/bitfield.h>
18 #include <linux/iopoll.h>
19 
20 #define MAX_QUEUE_SUP GENMASK(7, 0)
21 #define UFS_MCQ_MIN_RW_QUEUES 2
22 #define UFS_MCQ_MIN_READ_QUEUES 0
23 #define UFS_MCQ_MIN_POLL_QUEUES 0
24 #define QUEUE_EN_OFFSET 31
25 #define QUEUE_ID_OFFSET 16
26 
27 #define MCQ_CFG_MAC_MASK	GENMASK(16, 8)
28 #define MCQ_QCFG_SIZE		0x40
29 #define MCQ_ENTRY_SIZE_IN_DWORD	8
30 #define CQE_UCD_BA GENMASK_ULL(63, 7)
31 
32 /* Max mcq register polling time in microseconds */
33 #define MCQ_POLL_US 500000
34 
35 static int rw_queue_count_set(const char *val, const struct kernel_param *kp)
36 {
37 	return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_RW_QUEUES,
38 				     num_possible_cpus());
39 }
40 
41 static const struct kernel_param_ops rw_queue_count_ops = {
42 	.set = rw_queue_count_set,
43 	.get = param_get_uint,
44 };
45 
46 static unsigned int rw_queues;
47 module_param_cb(rw_queues, &rw_queue_count_ops, &rw_queues, 0644);
48 MODULE_PARM_DESC(rw_queues,
49 		 "Number of interrupt driven I/O queues used for rw. Default value is nr_cpus");
50 
51 static int read_queue_count_set(const char *val, const struct kernel_param *kp)
52 {
53 	return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_READ_QUEUES,
54 				     num_possible_cpus());
55 }
56 
57 static const struct kernel_param_ops read_queue_count_ops = {
58 	.set = read_queue_count_set,
59 	.get = param_get_uint,
60 };
61 
62 static unsigned int read_queues;
63 module_param_cb(read_queues, &read_queue_count_ops, &read_queues, 0644);
64 MODULE_PARM_DESC(read_queues,
65 		 "Number of interrupt driven read queues used for read. Default value is 0");
66 
67 static int poll_queue_count_set(const char *val, const struct kernel_param *kp)
68 {
69 	return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_POLL_QUEUES,
70 				     num_possible_cpus());
71 }
72 
73 static const struct kernel_param_ops poll_queue_count_ops = {
74 	.set = poll_queue_count_set,
75 	.get = param_get_uint,
76 };
77 
78 static unsigned int poll_queues = 1;
79 module_param_cb(poll_queues, &poll_queue_count_ops, &poll_queues, 0644);
80 MODULE_PARM_DESC(poll_queues,
81 		 "Number of poll queues used for r/w. Default value is 1");
82 
83 /**
84  * ufshcd_mcq_config_mac - Set the #Max Activ Cmds.
85  * @hba: per adapter instance
86  * @max_active_cmds: maximum # of active commands to the device at any time.
87  *
88  * The controller won't send more than the max_active_cmds to the device at
89  * any time.
90  */
91 void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds)
92 {
93 	u32 val;
94 
95 	val = ufshcd_readl(hba, REG_UFS_MCQ_CFG);
96 	val &= ~MCQ_CFG_MAC_MASK;
97 	val |= FIELD_PREP(MCQ_CFG_MAC_MASK, max_active_cmds - 1);
98 	ufshcd_writel(hba, val, REG_UFS_MCQ_CFG);
99 }
100 EXPORT_SYMBOL_GPL(ufshcd_mcq_config_mac);
101 
102 /**
103  * ufshcd_mcq_req_to_hwq - find the hardware queue on which the
104  * request would be issued.
105  * @hba: per adapter instance
106  * @req: pointer to the request to be issued
107  *
108  * Return: the hardware queue instance on which the request will be or has
109  * been queued. %NULL if the request has already been freed.
110  */
111 struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
112 					 struct request *req)
113 {
114 	struct blk_mq_hw_ctx *hctx = READ_ONCE(req->mq_hctx);
115 
116 	return hctx ? &hba->uhq[hctx->queue_num] : NULL;
117 }
118 
119 /**
120  * ufshcd_mcq_decide_queue_depth - decide the queue depth
121  * @hba: per adapter instance
122  *
123  * Return: queue-depth on success, non-zero on error
124  *
125  * MAC - Max. Active Command of the Host Controller (HC)
126  * HC wouldn't send more than this commands to the device.
127  * It is mandatory to implement get_hba_mac() to enable MCQ mode.
128  * Calculates and adjusts the queue depth based on the depth
129  * supported by the HC and ufs device.
130  */
131 int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba)
132 {
133 	int mac;
134 
135 	/* Mandatory to implement get_hba_mac() */
136 	mac = ufshcd_mcq_vops_get_hba_mac(hba);
137 	if (mac < 0) {
138 		dev_err(hba->dev, "Failed to get mac, err=%d\n", mac);
139 		return mac;
140 	}
141 
142 	WARN_ON_ONCE(!hba->dev_info.bqueuedepth);
143 	/*
144 	 * max. value of bqueuedepth = 256, mac is host dependent.
145 	 * It is mandatory for UFS device to define bQueueDepth if
146 	 * shared queuing architecture is enabled.
147 	 */
148 	return min_t(int, mac, hba->dev_info.bqueuedepth);
149 }
150 
151 static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
152 {
153 	int i;
154 	u32 hba_maxq, rem, tot_queues;
155 	struct Scsi_Host *host = hba->host;
156 
157 	/* maxq is 0 based value */
158 	hba_maxq = FIELD_GET(MAX_QUEUE_SUP, hba->mcq_capabilities) + 1;
159 
160 	tot_queues = read_queues + poll_queues + rw_queues;
161 
162 	if (hba_maxq < tot_queues) {
163 		dev_err(hba->dev, "Total queues (%d) exceeds HC capacity (%d)\n",
164 			tot_queues, hba_maxq);
165 		return -EOPNOTSUPP;
166 	}
167 
168 	rem = hba_maxq;
169 
170 	if (rw_queues) {
171 		hba->nr_queues[HCTX_TYPE_DEFAULT] = rw_queues;
172 		rem -= hba->nr_queues[HCTX_TYPE_DEFAULT];
173 	} else {
174 		rw_queues = num_possible_cpus();
175 	}
176 
177 	if (poll_queues) {
178 		hba->nr_queues[HCTX_TYPE_POLL] = poll_queues;
179 		rem -= hba->nr_queues[HCTX_TYPE_POLL];
180 	}
181 
182 	if (read_queues) {
183 		hba->nr_queues[HCTX_TYPE_READ] = read_queues;
184 		rem -= hba->nr_queues[HCTX_TYPE_READ];
185 	}
186 
187 	if (!hba->nr_queues[HCTX_TYPE_DEFAULT])
188 		hba->nr_queues[HCTX_TYPE_DEFAULT] = min3(rem, rw_queues,
189 							 num_possible_cpus());
190 
191 	for (i = 0; i < HCTX_MAX_TYPES; i++)
192 		host->nr_hw_queues += hba->nr_queues[i];
193 
194 	hba->nr_hw_queues = host->nr_hw_queues;
195 	return 0;
196 }
197 
198 int ufshcd_mcq_memory_alloc(struct ufs_hba *hba)
199 {
200 	struct ufs_hw_queue *hwq;
201 	size_t utrdl_size, cqe_size;
202 	int i;
203 
204 	for (i = 0; i < hba->nr_hw_queues; i++) {
205 		hwq = &hba->uhq[i];
206 
207 		utrdl_size = sizeof(struct utp_transfer_req_desc) *
208 			     hwq->max_entries;
209 		hwq->sqe_base_addr = dmam_alloc_coherent(hba->dev, utrdl_size,
210 							 &hwq->sqe_dma_addr,
211 							 GFP_KERNEL);
212 		if (!hwq->sqe_dma_addr) {
213 			dev_err(hba->dev, "SQE allocation failed\n");
214 			return -ENOMEM;
215 		}
216 
217 		cqe_size = sizeof(struct cq_entry) * hwq->max_entries;
218 		hwq->cqe_base_addr = dmam_alloc_coherent(hba->dev, cqe_size,
219 							 &hwq->cqe_dma_addr,
220 							 GFP_KERNEL);
221 		if (!hwq->cqe_dma_addr) {
222 			dev_err(hba->dev, "CQE allocation failed\n");
223 			return -ENOMEM;
224 		}
225 	}
226 
227 	return 0;
228 }
229 
230 
231 /* Operation and runtime registers configuration */
232 #define MCQ_CFG_n(r, i)	((r) + MCQ_QCFG_SIZE * (i))
233 #define MCQ_OPR_OFFSET_n(p, i) \
234 	(hba->mcq_opr[(p)].offset + hba->mcq_opr[(p)].stride * (i))
235 
236 static void __iomem *mcq_opr_base(struct ufs_hba *hba,
237 					 enum ufshcd_mcq_opr n, int i)
238 {
239 	struct ufshcd_mcq_opr_info_t *opr = &hba->mcq_opr[n];
240 
241 	return opr->base + opr->stride * i;
242 }
243 
244 u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i)
245 {
246 	return readl(mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIS);
247 }
248 EXPORT_SYMBOL_GPL(ufshcd_mcq_read_cqis);
249 
250 void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i)
251 {
252 	writel(val, mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIS);
253 }
254 EXPORT_SYMBOL_GPL(ufshcd_mcq_write_cqis);
255 
256 /*
257  * Current MCQ specification doesn't provide a Task Tag or its equivalent in
258  * the Completion Queue Entry. Find the Task Tag using an indirect method.
259  */
260 static int ufshcd_mcq_get_tag(struct ufs_hba *hba,
261 				     struct ufs_hw_queue *hwq,
262 				     struct cq_entry *cqe)
263 {
264 	u64 addr;
265 
266 	/* sizeof(struct utp_transfer_cmd_desc) must be a multiple of 128 */
267 	BUILD_BUG_ON(sizeof(struct utp_transfer_cmd_desc) & GENMASK(6, 0));
268 
269 	/* Bits 63:7 UCD base address, 6:5 are reserved, 4:0 is SQ ID */
270 	addr = (le64_to_cpu(cqe->command_desc_base_addr) & CQE_UCD_BA) -
271 		hba->ucdl_dma_addr;
272 
273 	return div_u64(addr, ufshcd_get_ucd_size(hba));
274 }
275 
276 static void ufshcd_mcq_process_cqe(struct ufs_hba *hba,
277 				   struct ufs_hw_queue *hwq)
278 {
279 	struct cq_entry *cqe = ufshcd_mcq_cur_cqe(hwq);
280 	int tag = ufshcd_mcq_get_tag(hba, hwq, cqe);
281 
282 	if (cqe->command_desc_base_addr) {
283 		ufshcd_compl_one_cqe(hba, tag, cqe);
284 		/* After processed the cqe, mark it empty (invalid) entry */
285 		cqe->command_desc_base_addr = 0;
286 	}
287 }
288 
289 void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba *hba,
290 				    struct ufs_hw_queue *hwq)
291 {
292 	unsigned long flags;
293 	u32 entries = hwq->max_entries;
294 
295 	spin_lock_irqsave(&hwq->cq_lock, flags);
296 	while (entries > 0) {
297 		ufshcd_mcq_process_cqe(hba, hwq);
298 		ufshcd_mcq_inc_cq_head_slot(hwq);
299 		entries--;
300 	}
301 
302 	ufshcd_mcq_update_cq_tail_slot(hwq);
303 	hwq->cq_head_slot = hwq->cq_tail_slot;
304 	spin_unlock_irqrestore(&hwq->cq_lock, flags);
305 }
306 
307 unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
308 				       struct ufs_hw_queue *hwq)
309 {
310 	unsigned long completed_reqs = 0;
311 	unsigned long flags;
312 
313 	spin_lock_irqsave(&hwq->cq_lock, flags);
314 	ufshcd_mcq_update_cq_tail_slot(hwq);
315 	while (!ufshcd_mcq_is_cq_empty(hwq)) {
316 		ufshcd_mcq_process_cqe(hba, hwq);
317 		ufshcd_mcq_inc_cq_head_slot(hwq);
318 		completed_reqs++;
319 	}
320 
321 	if (completed_reqs)
322 		ufshcd_mcq_update_cq_head(hwq);
323 	spin_unlock_irqrestore(&hwq->cq_lock, flags);
324 
325 	return completed_reqs;
326 }
327 EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_lock);
328 
329 void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
330 {
331 	struct ufs_hw_queue *hwq;
332 	u16 qsize;
333 	int i;
334 
335 	for (i = 0; i < hba->nr_hw_queues; i++) {
336 		hwq = &hba->uhq[i];
337 		hwq->id = i;
338 		qsize = hwq->max_entries * MCQ_ENTRY_SIZE_IN_DWORD - 1;
339 
340 		/* Submission Queue Lower Base Address */
341 		ufsmcq_writelx(hba, lower_32_bits(hwq->sqe_dma_addr),
342 			      MCQ_CFG_n(REG_SQLBA, i));
343 		/* Submission Queue Upper Base Address */
344 		ufsmcq_writelx(hba, upper_32_bits(hwq->sqe_dma_addr),
345 			      MCQ_CFG_n(REG_SQUBA, i));
346 		/* Submission Queue Doorbell Address Offset */
347 		ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQD, i),
348 			      MCQ_CFG_n(REG_SQDAO, i));
349 		/* Submission Queue Interrupt Status Address Offset */
350 		ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQIS, i),
351 			      MCQ_CFG_n(REG_SQISAO, i));
352 
353 		/* Completion Queue Lower Base Address */
354 		ufsmcq_writelx(hba, lower_32_bits(hwq->cqe_dma_addr),
355 			      MCQ_CFG_n(REG_CQLBA, i));
356 		/* Completion Queue Upper Base Address */
357 		ufsmcq_writelx(hba, upper_32_bits(hwq->cqe_dma_addr),
358 			      MCQ_CFG_n(REG_CQUBA, i));
359 		/* Completion Queue Doorbell Address Offset */
360 		ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQD, i),
361 			      MCQ_CFG_n(REG_CQDAO, i));
362 		/* Completion Queue Interrupt Status Address Offset */
363 		ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQIS, i),
364 			      MCQ_CFG_n(REG_CQISAO, i));
365 
366 		/* Save the base addresses for quicker access */
367 		hwq->mcq_sq_head = mcq_opr_base(hba, OPR_SQD, i) + REG_SQHP;
368 		hwq->mcq_sq_tail = mcq_opr_base(hba, OPR_SQD, i) + REG_SQTP;
369 		hwq->mcq_cq_head = mcq_opr_base(hba, OPR_CQD, i) + REG_CQHP;
370 		hwq->mcq_cq_tail = mcq_opr_base(hba, OPR_CQD, i) + REG_CQTP;
371 
372 		/* Reinitializing is needed upon HC reset */
373 		hwq->sq_tail_slot = hwq->cq_tail_slot = hwq->cq_head_slot = 0;
374 
375 		/* Enable Tail Entry Push Status interrupt only for non-poll queues */
376 		if (i < hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL])
377 			writel(1, mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIE);
378 
379 		/* Completion Queue Enable|Size to Completion Queue Attribute */
380 		ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize,
381 			      MCQ_CFG_n(REG_CQATTR, i));
382 
383 		/*
384 		 * Submission Qeueue Enable|Size|Completion Queue ID to
385 		 * Submission Queue Attribute
386 		 */
387 		ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize |
388 			      (i << QUEUE_ID_OFFSET),
389 			      MCQ_CFG_n(REG_SQATTR, i));
390 	}
391 }
392 EXPORT_SYMBOL_GPL(ufshcd_mcq_make_queues_operational);
393 
394 void ufshcd_mcq_enable_esi(struct ufs_hba *hba)
395 {
396 	ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x2,
397 		      REG_UFS_MEM_CFG);
398 }
399 EXPORT_SYMBOL_GPL(ufshcd_mcq_enable_esi);
400 
401 void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg)
402 {
403 	ufshcd_writel(hba, msg->address_lo, REG_UFS_ESILBA);
404 	ufshcd_writel(hba, msg->address_hi, REG_UFS_ESIUBA);
405 }
406 EXPORT_SYMBOL_GPL(ufshcd_mcq_config_esi);
407 
408 int ufshcd_mcq_init(struct ufs_hba *hba)
409 {
410 	struct Scsi_Host *host = hba->host;
411 	struct ufs_hw_queue *hwq;
412 	int ret, i;
413 
414 	ret = ufshcd_mcq_config_nr_queues(hba);
415 	if (ret)
416 		return ret;
417 
418 	ret = ufshcd_vops_mcq_config_resource(hba);
419 	if (ret)
420 		return ret;
421 
422 	ret = ufshcd_mcq_vops_op_runtime_config(hba);
423 	if (ret) {
424 		dev_err(hba->dev, "Operation runtime config failed, ret=%d\n",
425 			ret);
426 		return ret;
427 	}
428 	hba->uhq = devm_kzalloc(hba->dev,
429 				hba->nr_hw_queues * sizeof(struct ufs_hw_queue),
430 				GFP_KERNEL);
431 	if (!hba->uhq) {
432 		dev_err(hba->dev, "ufs hw queue memory allocation failed\n");
433 		return -ENOMEM;
434 	}
435 
436 	for (i = 0; i < hba->nr_hw_queues; i++) {
437 		hwq = &hba->uhq[i];
438 		hwq->max_entries = hba->nutrs + 1;
439 		spin_lock_init(&hwq->sq_lock);
440 		spin_lock_init(&hwq->cq_lock);
441 		mutex_init(&hwq->sq_mutex);
442 	}
443 
444 	/* The very first HW queue serves device commands */
445 	hba->dev_cmd_queue = &hba->uhq[0];
446 
447 	host->host_tagset = 1;
448 	return 0;
449 }
450 
451 static int ufshcd_mcq_sq_stop(struct ufs_hba *hba, struct ufs_hw_queue *hwq)
452 {
453 	void __iomem *reg;
454 	u32 id = hwq->id, val;
455 	int err;
456 
457 	if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC)
458 		return -ETIMEDOUT;
459 
460 	writel(SQ_STOP, mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTC);
461 	reg = mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTS;
462 	err = read_poll_timeout(readl, val, val & SQ_STS, 20,
463 				MCQ_POLL_US, false, reg);
464 	if (err)
465 		dev_err(hba->dev, "%s: failed. hwq-id=%d, err=%d\n",
466 			__func__, id, err);
467 	return err;
468 }
469 
470 static int ufshcd_mcq_sq_start(struct ufs_hba *hba, struct ufs_hw_queue *hwq)
471 {
472 	void __iomem *reg;
473 	u32 id = hwq->id, val;
474 	int err;
475 
476 	if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC)
477 		return -ETIMEDOUT;
478 
479 	writel(SQ_START, mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTC);
480 	reg = mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTS;
481 	err = read_poll_timeout(readl, val, !(val & SQ_STS), 20,
482 				MCQ_POLL_US, false, reg);
483 	if (err)
484 		dev_err(hba->dev, "%s: failed. hwq-id=%d, err=%d\n",
485 			__func__, id, err);
486 	return err;
487 }
488 
489 /**
490  * ufshcd_mcq_sq_cleanup - Clean up submission queue resources
491  * associated with the pending command.
492  * @hba: per adapter instance.
493  * @task_tag: The command's task tag.
494  *
495  * Return: 0 for success; error code otherwise.
496  */
497 int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
498 {
499 	struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
500 	struct scsi_cmnd *cmd = lrbp->cmd;
501 	struct ufs_hw_queue *hwq;
502 	void __iomem *reg, *opr_sqd_base;
503 	u32 nexus, id, val;
504 	int err;
505 
506 	if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC)
507 		return -ETIMEDOUT;
508 
509 	if (task_tag != hba->nutrs - UFSHCD_NUM_RESERVED) {
510 		if (!cmd)
511 			return -EINVAL;
512 		hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
513 		if (!hwq)
514 			return 0;
515 	} else {
516 		hwq = hba->dev_cmd_queue;
517 	}
518 
519 	id = hwq->id;
520 
521 	mutex_lock(&hwq->sq_mutex);
522 
523 	/* stop the SQ fetching before working on it */
524 	err = ufshcd_mcq_sq_stop(hba, hwq);
525 	if (err)
526 		goto unlock;
527 
528 	/* SQCTI = EXT_IID, IID, LUN, Task Tag */
529 	nexus = lrbp->lun << 8 | task_tag;
530 	opr_sqd_base = mcq_opr_base(hba, OPR_SQD, id);
531 	writel(nexus, opr_sqd_base + REG_SQCTI);
532 
533 	/* SQRTCy.ICU = 1 */
534 	writel(SQ_ICU, opr_sqd_base + REG_SQRTC);
535 
536 	/* Poll SQRTSy.CUS = 1. Return result from SQRTSy.RTC */
537 	reg = opr_sqd_base + REG_SQRTS;
538 	err = read_poll_timeout(readl, val, val & SQ_CUS, 20,
539 				MCQ_POLL_US, false, reg);
540 	if (err)
541 		dev_err(hba->dev, "%s: failed. hwq=%d, tag=%d err=%ld\n",
542 			__func__, id, task_tag,
543 			FIELD_GET(SQ_ICU_ERR_CODE_MASK, readl(reg)));
544 
545 	if (ufshcd_mcq_sq_start(hba, hwq))
546 		err = -ETIMEDOUT;
547 
548 unlock:
549 	mutex_unlock(&hwq->sq_mutex);
550 	return err;
551 }
552 
553 /**
554  * ufshcd_mcq_nullify_sqe - Nullify the submission queue entry.
555  * Write the sqe's Command Type to 0xF. The host controller will not
556  * fetch any sqe with Command Type = 0xF.
557  *
558  * @utrd: UTP Transfer Request Descriptor to be nullified.
559  */
560 static void ufshcd_mcq_nullify_sqe(struct utp_transfer_req_desc *utrd)
561 {
562 	utrd->header.command_type = 0xf;
563 }
564 
565 /**
566  * ufshcd_mcq_sqe_search - Search for the command in the submission queue
567  * If the command is in the submission queue and not issued to the device yet,
568  * nullify the sqe so the host controller will skip fetching the sqe.
569  *
570  * @hba: per adapter instance.
571  * @hwq: Hardware Queue to be searched.
572  * @task_tag: The command's task tag.
573  *
574  * Return: true if the SQE containing the command is present in the SQ
575  * (not fetched by the controller); returns false if the SQE is not in the SQ.
576  */
577 static bool ufshcd_mcq_sqe_search(struct ufs_hba *hba,
578 				  struct ufs_hw_queue *hwq, int task_tag)
579 {
580 	struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
581 	struct utp_transfer_req_desc *utrd;
582 	__le64  cmd_desc_base_addr;
583 	bool ret = false;
584 	u64 addr, match;
585 	u32 sq_head_slot;
586 
587 	if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC)
588 		return true;
589 
590 	mutex_lock(&hwq->sq_mutex);
591 
592 	ufshcd_mcq_sq_stop(hba, hwq);
593 	sq_head_slot = ufshcd_mcq_get_sq_head_slot(hwq);
594 	if (sq_head_slot == hwq->sq_tail_slot)
595 		goto out;
596 
597 	cmd_desc_base_addr = lrbp->utr_descriptor_ptr->command_desc_base_addr;
598 	addr = le64_to_cpu(cmd_desc_base_addr) & CQE_UCD_BA;
599 
600 	while (sq_head_slot != hwq->sq_tail_slot) {
601 		utrd = hwq->sqe_base_addr + sq_head_slot;
602 		match = le64_to_cpu(utrd->command_desc_base_addr) & CQE_UCD_BA;
603 		if (addr == match) {
604 			ufshcd_mcq_nullify_sqe(utrd);
605 			ret = true;
606 			goto out;
607 		}
608 
609 		sq_head_slot++;
610 		if (sq_head_slot == hwq->max_entries)
611 			sq_head_slot = 0;
612 	}
613 
614 out:
615 	ufshcd_mcq_sq_start(hba, hwq);
616 	mutex_unlock(&hwq->sq_mutex);
617 	return ret;
618 }
619 
620 /**
621  * ufshcd_mcq_abort - Abort the command in MCQ.
622  * @cmd: The command to be aborted.
623  *
624  * Return: SUCCESS or FAILED error codes
625  */
626 int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
627 {
628 	struct Scsi_Host *host = cmd->device->host;
629 	struct ufs_hba *hba = shost_priv(host);
630 	int tag = scsi_cmd_to_rq(cmd)->tag;
631 	struct ufshcd_lrb *lrbp = &hba->lrb[tag];
632 	struct ufs_hw_queue *hwq;
633 	unsigned long flags;
634 	int err;
635 
636 	if (!ufshcd_cmd_inflight(lrbp->cmd)) {
637 		dev_err(hba->dev,
638 			"%s: skip abort. cmd at tag %d already completed.\n",
639 			__func__, tag);
640 		return FAILED;
641 	}
642 
643 	/* Skip task abort in case previous aborts failed and report failure */
644 	if (lrbp->req_abort_skip) {
645 		dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n",
646 			__func__, tag);
647 		return FAILED;
648 	}
649 
650 	hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
651 
652 	if (ufshcd_mcq_sqe_search(hba, hwq, tag)) {
653 		/*
654 		 * Failure. The command should not be "stuck" in SQ for
655 		 * a long time which resulted in command being aborted.
656 		 */
657 		dev_err(hba->dev, "%s: cmd found in sq. hwq=%d, tag=%d\n",
658 			__func__, hwq->id, tag);
659 		return FAILED;
660 	}
661 
662 	/*
663 	 * The command is not in the submission queue, and it is not
664 	 * in the completion queue either. Query the device to see if
665 	 * the command is being processed in the device.
666 	 */
667 	err = ufshcd_try_to_abort_task(hba, tag);
668 	if (err) {
669 		dev_err(hba->dev, "%s: device abort failed %d\n", __func__, err);
670 		lrbp->req_abort_skip = true;
671 		return FAILED;
672 	}
673 
674 	spin_lock_irqsave(&hwq->cq_lock, flags);
675 	if (ufshcd_cmd_inflight(lrbp->cmd))
676 		ufshcd_release_scsi_cmd(hba, lrbp);
677 	spin_unlock_irqrestore(&hwq->cq_lock, flags);
678 
679 	return SUCCESS;
680 }
681