Lines Matching refs:hba

91 void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds)  in ufshcd_mcq_config_mac()  argument
95 val = ufshcd_readl(hba, REG_UFS_MCQ_CFG); in ufshcd_mcq_config_mac()
98 ufshcd_writel(hba, val, REG_UFS_MCQ_CFG); in ufshcd_mcq_config_mac()
111 struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba, in ufshcd_mcq_req_to_hwq() argument
117 return &hba->uhq[hwq]; in ufshcd_mcq_req_to_hwq()
132 int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba) in ufshcd_mcq_decide_queue_depth() argument
137 mac = ufshcd_mcq_vops_get_hba_mac(hba); in ufshcd_mcq_decide_queue_depth()
139 dev_err(hba->dev, "Failed to get mac, err=%d\n", mac); in ufshcd_mcq_decide_queue_depth()
143 WARN_ON_ONCE(!hba->dev_info.bqueuedepth); in ufshcd_mcq_decide_queue_depth()
149 return min_t(int, mac, hba->dev_info.bqueuedepth); in ufshcd_mcq_decide_queue_depth()
152 static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba) in ufshcd_mcq_config_nr_queues() argument
156 struct Scsi_Host *host = hba->host; in ufshcd_mcq_config_nr_queues()
159 hba_maxq = FIELD_GET(MAX_QUEUE_SUP, hba->mcq_capabilities) + 1; in ufshcd_mcq_config_nr_queues()
164 dev_err(hba->dev, "Total queues (%d) exceeds HC capacity (%d)\n", in ufshcd_mcq_config_nr_queues()
172 hba->nr_queues[HCTX_TYPE_DEFAULT] = rw_queues; in ufshcd_mcq_config_nr_queues()
173 rem -= hba->nr_queues[HCTX_TYPE_DEFAULT]; in ufshcd_mcq_config_nr_queues()
179 hba->nr_queues[HCTX_TYPE_POLL] = poll_queues; in ufshcd_mcq_config_nr_queues()
180 rem -= hba->nr_queues[HCTX_TYPE_POLL]; in ufshcd_mcq_config_nr_queues()
184 hba->nr_queues[HCTX_TYPE_READ] = read_queues; in ufshcd_mcq_config_nr_queues()
185 rem -= hba->nr_queues[HCTX_TYPE_READ]; in ufshcd_mcq_config_nr_queues()
188 if (!hba->nr_queues[HCTX_TYPE_DEFAULT]) in ufshcd_mcq_config_nr_queues()
189 hba->nr_queues[HCTX_TYPE_DEFAULT] = min3(rem, rw_queues, in ufshcd_mcq_config_nr_queues()
193 host->nr_hw_queues += hba->nr_queues[i]; in ufshcd_mcq_config_nr_queues()
195 hba->nr_hw_queues = host->nr_hw_queues; in ufshcd_mcq_config_nr_queues()
199 int ufshcd_mcq_memory_alloc(struct ufs_hba *hba) in ufshcd_mcq_memory_alloc() argument
205 for (i = 0; i < hba->nr_hw_queues; i++) { in ufshcd_mcq_memory_alloc()
206 hwq = &hba->uhq[i]; in ufshcd_mcq_memory_alloc()
210 hwq->sqe_base_addr = dmam_alloc_coherent(hba->dev, utrdl_size, in ufshcd_mcq_memory_alloc()
214 dev_err(hba->dev, "SQE allocation failed\n"); in ufshcd_mcq_memory_alloc()
219 hwq->cqe_base_addr = dmam_alloc_coherent(hba->dev, cqe_size, in ufshcd_mcq_memory_alloc()
223 dev_err(hba->dev, "CQE allocation failed\n"); in ufshcd_mcq_memory_alloc()
235 (hba->mcq_opr[(p)].offset + hba->mcq_opr[(p)].stride * (i))
237 static void __iomem *mcq_opr_base(struct ufs_hba *hba, in mcq_opr_base() argument
240 struct ufshcd_mcq_opr_info_t *opr = &hba->mcq_opr[n]; in mcq_opr_base()
245 u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i) in ufshcd_mcq_read_cqis() argument
247 return readl(mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIS); in ufshcd_mcq_read_cqis()
251 void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i) in ufshcd_mcq_write_cqis() argument
253 writel(val, mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIS); in ufshcd_mcq_write_cqis()
261 static int ufshcd_mcq_get_tag(struct ufs_hba *hba, in ufshcd_mcq_get_tag() argument
272 hba->ucdl_dma_addr; in ufshcd_mcq_get_tag()
274 return div_u64(addr, ufshcd_get_ucd_size(hba)); in ufshcd_mcq_get_tag()
277 static void ufshcd_mcq_process_cqe(struct ufs_hba *hba, in ufshcd_mcq_process_cqe() argument
281 int tag = ufshcd_mcq_get_tag(hba, hwq, cqe); in ufshcd_mcq_process_cqe()
284 ufshcd_compl_one_cqe(hba, tag, cqe); in ufshcd_mcq_process_cqe()
290 void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba *hba, in ufshcd_mcq_compl_all_cqes_lock() argument
298 ufshcd_mcq_process_cqe(hba, hwq); in ufshcd_mcq_compl_all_cqes_lock()
308 unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba, in ufshcd_mcq_poll_cqe_lock() argument
317 ufshcd_mcq_process_cqe(hba, hwq); in ufshcd_mcq_poll_cqe_lock()
330 void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba) in ufshcd_mcq_make_queues_operational() argument
336 for (i = 0; i < hba->nr_hw_queues; i++) { in ufshcd_mcq_make_queues_operational()
337 hwq = &hba->uhq[i]; in ufshcd_mcq_make_queues_operational()
342 ufsmcq_writelx(hba, lower_32_bits(hwq->sqe_dma_addr), in ufshcd_mcq_make_queues_operational()
345 ufsmcq_writelx(hba, upper_32_bits(hwq->sqe_dma_addr), in ufshcd_mcq_make_queues_operational()
348 ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQD, i), in ufshcd_mcq_make_queues_operational()
351 ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQIS, i), in ufshcd_mcq_make_queues_operational()
355 ufsmcq_writelx(hba, lower_32_bits(hwq->cqe_dma_addr), in ufshcd_mcq_make_queues_operational()
358 ufsmcq_writelx(hba, upper_32_bits(hwq->cqe_dma_addr), in ufshcd_mcq_make_queues_operational()
361 ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQD, i), in ufshcd_mcq_make_queues_operational()
364 ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQIS, i), in ufshcd_mcq_make_queues_operational()
368 hwq->mcq_sq_head = mcq_opr_base(hba, OPR_SQD, i) + REG_SQHP; in ufshcd_mcq_make_queues_operational()
369 hwq->mcq_sq_tail = mcq_opr_base(hba, OPR_SQD, i) + REG_SQTP; in ufshcd_mcq_make_queues_operational()
370 hwq->mcq_cq_head = mcq_opr_base(hba, OPR_CQD, i) + REG_CQHP; in ufshcd_mcq_make_queues_operational()
371 hwq->mcq_cq_tail = mcq_opr_base(hba, OPR_CQD, i) + REG_CQTP; in ufshcd_mcq_make_queues_operational()
377 if (i < hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL]) in ufshcd_mcq_make_queues_operational()
378 writel(1, mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIE); in ufshcd_mcq_make_queues_operational()
381 ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize, in ufshcd_mcq_make_queues_operational()
388 ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize | in ufshcd_mcq_make_queues_operational()
395 void ufshcd_mcq_enable_esi(struct ufs_hba *hba) in ufshcd_mcq_enable_esi() argument
397 ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x2, in ufshcd_mcq_enable_esi()
402 void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg) in ufshcd_mcq_config_esi() argument
404 ufshcd_writel(hba, msg->address_lo, REG_UFS_ESILBA); in ufshcd_mcq_config_esi()
405 ufshcd_writel(hba, msg->address_hi, REG_UFS_ESIUBA); in ufshcd_mcq_config_esi()
409 int ufshcd_mcq_init(struct ufs_hba *hba) in ufshcd_mcq_init() argument
411 struct Scsi_Host *host = hba->host; in ufshcd_mcq_init()
415 ret = ufshcd_mcq_config_nr_queues(hba); in ufshcd_mcq_init()
419 ret = ufshcd_vops_mcq_config_resource(hba); in ufshcd_mcq_init()
423 ret = ufshcd_mcq_vops_op_runtime_config(hba); in ufshcd_mcq_init()
425 dev_err(hba->dev, "Operation runtime config failed, ret=%d\n", in ufshcd_mcq_init()
429 hba->uhq = devm_kzalloc(hba->dev, in ufshcd_mcq_init()
430 hba->nr_hw_queues * sizeof(struct ufs_hw_queue), in ufshcd_mcq_init()
432 if (!hba->uhq) { in ufshcd_mcq_init()
433 dev_err(hba->dev, "ufs hw queue memory allocation failed\n"); in ufshcd_mcq_init()
437 for (i = 0; i < hba->nr_hw_queues; i++) { in ufshcd_mcq_init()
438 hwq = &hba->uhq[i]; in ufshcd_mcq_init()
439 hwq->max_entries = hba->nutrs + 1; in ufshcd_mcq_init()
446 hba->dev_cmd_queue = &hba->uhq[0]; in ufshcd_mcq_init()
452 static int ufshcd_mcq_sq_stop(struct ufs_hba *hba, struct ufs_hw_queue *hwq) in ufshcd_mcq_sq_stop() argument
458 if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC) in ufshcd_mcq_sq_stop()
461 writel(SQ_STOP, mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTC); in ufshcd_mcq_sq_stop()
462 reg = mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTS; in ufshcd_mcq_sq_stop()
466 dev_err(hba->dev, "%s: failed. hwq-id=%d, err=%d\n", in ufshcd_mcq_sq_stop()
471 static int ufshcd_mcq_sq_start(struct ufs_hba *hba, struct ufs_hw_queue *hwq) in ufshcd_mcq_sq_start() argument
477 if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC) in ufshcd_mcq_sq_start()
480 writel(SQ_START, mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTC); in ufshcd_mcq_sq_start()
481 reg = mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTS; in ufshcd_mcq_sq_start()
485 dev_err(hba->dev, "%s: failed. hwq-id=%d, err=%d\n", in ufshcd_mcq_sq_start()
498 int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag) in ufshcd_mcq_sq_cleanup() argument
500 struct ufshcd_lrb *lrbp = &hba->lrb[task_tag]; in ufshcd_mcq_sq_cleanup()
507 if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC) in ufshcd_mcq_sq_cleanup()
510 if (task_tag != hba->nutrs - UFSHCD_NUM_RESERVED) { in ufshcd_mcq_sq_cleanup()
513 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); in ufshcd_mcq_sq_cleanup()
515 hwq = hba->dev_cmd_queue; in ufshcd_mcq_sq_cleanup()
523 err = ufshcd_mcq_sq_stop(hba, hwq); in ufshcd_mcq_sq_cleanup()
529 opr_sqd_base = mcq_opr_base(hba, OPR_SQD, id); in ufshcd_mcq_sq_cleanup()
540 dev_err(hba->dev, "%s: failed. hwq=%d, tag=%d err=%ld\n", in ufshcd_mcq_sq_cleanup()
544 if (ufshcd_mcq_sq_start(hba, hwq)) in ufshcd_mcq_sq_cleanup()
576 static bool ufshcd_mcq_sqe_search(struct ufs_hba *hba, in ufshcd_mcq_sqe_search() argument
579 struct ufshcd_lrb *lrbp = &hba->lrb[task_tag]; in ufshcd_mcq_sqe_search()
586 if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC) in ufshcd_mcq_sqe_search()
591 ufshcd_mcq_sq_stop(hba, hwq); in ufshcd_mcq_sqe_search()
614 ufshcd_mcq_sq_start(hba, hwq); in ufshcd_mcq_sqe_search()
628 struct ufs_hba *hba = shost_priv(host); in ufshcd_mcq_abort() local
630 struct ufshcd_lrb *lrbp = &hba->lrb[tag]; in ufshcd_mcq_abort()
636 dev_err(hba->dev, in ufshcd_mcq_abort()
644 dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n", in ufshcd_mcq_abort()
649 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); in ufshcd_mcq_abort()
651 if (ufshcd_mcq_sqe_search(hba, hwq, tag)) { in ufshcd_mcq_abort()
656 dev_err(hba->dev, "%s: cmd found in sq. hwq=%d, tag=%d\n", in ufshcd_mcq_abort()
666 err = ufshcd_try_to_abort_task(hba, tag); in ufshcd_mcq_abort()
668 dev_err(hba->dev, "%s: device abort failed %d\n", __func__, err); in ufshcd_mcq_abort()
675 ufshcd_release_scsi_cmd(hba, lrbp); in ufshcd_mcq_abort()