Lines Matching refs:mq

26 static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq)  in mmc_cqe_dcmd_busy()  argument
29 return mq->in_flight[MMC_ISSUE_DCMD]; in mmc_cqe_dcmd_busy()
32 void mmc_cqe_check_busy(struct mmc_queue *mq) in mmc_cqe_check_busy() argument
34 if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq)) in mmc_cqe_check_busy()
35 mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY; in mmc_cqe_check_busy()
60 enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req) in mmc_issue_type() argument
62 struct mmc_host *host = mq->card->host; in mmc_issue_type()
73 static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq) in __mmc_cqe_recovery_notifier() argument
75 if (!mq->recovery_needed) { in __mmc_cqe_recovery_notifier()
76 mq->recovery_needed = true; in __mmc_cqe_recovery_notifier()
77 schedule_work(&mq->recovery_work); in __mmc_cqe_recovery_notifier()
87 struct mmc_queue *mq = q->queuedata; in mmc_cqe_recovery_notifier() local
90 spin_lock_irqsave(&mq->lock, flags); in mmc_cqe_recovery_notifier()
91 __mmc_cqe_recovery_notifier(mq); in mmc_cqe_recovery_notifier()
92 spin_unlock_irqrestore(&mq->lock, flags); in mmc_cqe_recovery_notifier()
99 struct mmc_queue *mq = req->q->queuedata; in mmc_cqe_timed_out() local
100 struct mmc_host *host = mq->card->host; in mmc_cqe_timed_out()
101 enum mmc_issue_type issue_type = mmc_issue_type(mq, req); in mmc_cqe_timed_out()
123 struct mmc_queue *mq = q->queuedata; in mmc_mq_timed_out() local
124 struct mmc_card *card = mq->card; in mmc_mq_timed_out()
129 spin_lock_irqsave(&mq->lock, flags); in mmc_mq_timed_out()
130 ignore_tout = mq->recovery_needed || !host->cqe_enabled || host->hsq_enabled; in mmc_mq_timed_out()
131 spin_unlock_irqrestore(&mq->lock, flags); in mmc_mq_timed_out()
138 struct mmc_queue *mq = container_of(work, struct mmc_queue, in mmc_mq_recovery_handler() local
140 struct request_queue *q = mq->queue; in mmc_mq_recovery_handler()
141 struct mmc_host *host = mq->card->host; in mmc_mq_recovery_handler()
143 mmc_get_card(mq->card, &mq->ctx); in mmc_mq_recovery_handler()
145 mq->in_recovery = true; in mmc_mq_recovery_handler()
148 mmc_blk_cqe_recovery(mq); in mmc_mq_recovery_handler()
150 mmc_blk_mq_recovery(mq); in mmc_mq_recovery_handler()
152 mq->in_recovery = false; in mmc_mq_recovery_handler()
154 spin_lock_irq(&mq->lock); in mmc_mq_recovery_handler()
155 mq->recovery_needed = false; in mmc_mq_recovery_handler()
156 spin_unlock_irq(&mq->lock); in mmc_mq_recovery_handler()
161 mmc_put_card(mq->card, &mq->ctx); in mmc_mq_recovery_handler()
207 struct mmc_queue *mq = set->driver_data; in mmc_mq_init_request() local
208 struct mmc_card *card = mq->card; in mmc_mq_init_request()
232 struct mmc_queue *mq = q->queuedata; in mmc_mq_queue_rq() local
233 struct mmc_card *card = mq->card; in mmc_mq_queue_rq()
240 if (mmc_card_removed(mq->card)) { in mmc_mq_queue_rq()
245 issue_type = mmc_issue_type(mq, req); in mmc_mq_queue_rq()
247 spin_lock_irq(&mq->lock); in mmc_mq_queue_rq()
249 if (mq->recovery_needed || mq->busy) { in mmc_mq_queue_rq()
250 spin_unlock_irq(&mq->lock); in mmc_mq_queue_rq()
256 if (mmc_cqe_dcmd_busy(mq)) { in mmc_mq_queue_rq()
257 mq->cqe_busy |= MMC_CQE_DCMD_BUSY; in mmc_mq_queue_rq()
258 spin_unlock_irq(&mq->lock); in mmc_mq_queue_rq()
267 if (host->hsq_enabled && mq->in_flight[issue_type] > 2) { in mmc_mq_queue_rq()
268 spin_unlock_irq(&mq->lock); in mmc_mq_queue_rq()
285 mq->busy = true; in mmc_mq_queue_rq()
287 mq->in_flight[issue_type] += 1; in mmc_mq_queue_rq()
288 get_card = (mmc_tot_in_flight(mq) == 1); in mmc_mq_queue_rq()
289 cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1); in mmc_mq_queue_rq()
291 spin_unlock_irq(&mq->lock); in mmc_mq_queue_rq()
299 mmc_get_card(card, &mq->ctx); in mmc_mq_queue_rq()
308 issued = mmc_blk_mq_issue_rq(mq, req); in mmc_mq_queue_rq()
325 spin_lock_irq(&mq->lock); in mmc_mq_queue_rq()
326 mq->in_flight[issue_type] -= 1; in mmc_mq_queue_rq()
327 if (mmc_tot_in_flight(mq) == 0) in mmc_mq_queue_rq()
329 mq->busy = false; in mmc_mq_queue_rq()
330 spin_unlock_irq(&mq->lock); in mmc_mq_queue_rq()
332 mmc_put_card(card, &mq->ctx); in mmc_mq_queue_rq()
334 WRITE_ONCE(mq->busy, false); in mmc_mq_queue_rq()
348 static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) in mmc_setup_queue() argument
353 blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue); in mmc_setup_queue()
354 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue); in mmc_setup_queue()
356 mmc_queue_setup_discard(mq->queue, card); in mmc_setup_queue()
359 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH); in mmc_setup_queue()
360 blk_queue_max_hw_sectors(mq->queue, in mmc_setup_queue()
363 WARN(!blk_queue_can_use_dma_map_merging(mq->queue, in mmc_setup_queue()
366 blk_queue_max_segments(mq->queue, mmc_get_max_segments(host)); in mmc_setup_queue()
373 blk_queue_logical_block_size(mq->queue, block_size); in mmc_setup_queue()
380 blk_queue_max_segment_size(mq->queue, in mmc_setup_queue()
383 dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue)); in mmc_setup_queue()
385 INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler); in mmc_setup_queue()
386 INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work); in mmc_setup_queue()
388 mutex_init(&mq->complete_lock); in mmc_setup_queue()
390 init_waitqueue_head(&mq->wait); in mmc_setup_queue()
392 mmc_crypto_setup_queue(mq->queue, host); in mmc_setup_queue()
410 struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card) in mmc_init_queue() argument
416 mq->card = card; in mmc_init_queue()
418 spin_lock_init(&mq->lock); in mmc_init_queue()
420 memset(&mq->tag_set, 0, sizeof(mq->tag_set)); in mmc_init_queue()
421 mq->tag_set.ops = &mmc_mq_ops; in mmc_init_queue()
427 mq->tag_set.queue_depth = in mmc_init_queue()
430 mq->tag_set.queue_depth = MMC_QUEUE_DEPTH; in mmc_init_queue()
431 mq->tag_set.numa_node = NUMA_NO_NODE; in mmc_init_queue()
432 mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; in mmc_init_queue()
433 mq->tag_set.nr_hw_queues = 1; in mmc_init_queue()
434 mq->tag_set.cmd_size = sizeof(struct mmc_queue_req); in mmc_init_queue()
435 mq->tag_set.driver_data = mq; in mmc_init_queue()
449 ret = blk_mq_alloc_tag_set(&mq->tag_set); in mmc_init_queue()
454 disk = blk_mq_alloc_disk(&mq->tag_set, mq); in mmc_init_queue()
456 blk_mq_free_tag_set(&mq->tag_set); in mmc_init_queue()
459 mq->queue = disk->queue; in mmc_init_queue()
462 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue); in mmc_init_queue()
463 blk_queue_rq_timeout(mq->queue, 60 * HZ); in mmc_init_queue()
465 mmc_setup_queue(mq, card); in mmc_init_queue()
469 void mmc_queue_suspend(struct mmc_queue *mq) in mmc_queue_suspend() argument
471 blk_mq_quiesce_queue(mq->queue); in mmc_queue_suspend()
477 mmc_claim_host(mq->card->host); in mmc_queue_suspend()
478 mmc_release_host(mq->card->host); in mmc_queue_suspend()
481 void mmc_queue_resume(struct mmc_queue *mq) in mmc_queue_resume() argument
483 blk_mq_unquiesce_queue(mq->queue); in mmc_queue_resume()
486 void mmc_cleanup_queue(struct mmc_queue *mq) in mmc_cleanup_queue() argument
488 struct request_queue *q = mq->queue; in mmc_cleanup_queue()
502 cancel_work_sync(&mq->recovery_work); in mmc_cleanup_queue()
504 blk_mq_free_tag_set(&mq->tag_set); in mmc_cleanup_queue()
511 flush_work(&mq->complete_work); in mmc_cleanup_queue()
513 mq->card = NULL; in mmc_cleanup_queue()
519 unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) in mmc_queue_map_sg() argument
523 return blk_rq_map_sg(mq->queue, req, mqrq->sg); in mmc_queue_map_sg()