Lines Matching refs:mq
179 struct mmc_queue *mq);
249 struct mmc_queue *mq; in power_ro_lock_store() local
260 mq = &md->queue; in power_ro_lock_store()
263 req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_OUT, 0); in power_ro_lock_store()
663 struct mmc_queue *mq; in mmc_blk_ioctl_cmd() local
683 mq = &md->queue; in mmc_blk_ioctl_cmd()
684 req = blk_mq_alloc_request(mq->queue, in mmc_blk_ioctl_cmd()
714 struct mmc_queue *mq; in mmc_blk_ioctl_multi_cmd() local
756 mq = &md->queue; in mmc_blk_ioctl_multi_cmd()
757 req = blk_mq_alloc_request(mq->queue, in mmc_blk_ioctl_multi_cmd()
1081 static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req) in mmc_blk_issue_drv_op() argument
1084 struct mmc_card *card = mq->card; in mmc_blk_issue_drv_op()
1085 struct mmc_blk_data *md = mq->blkdata; in mmc_blk_issue_drv_op()
1151 static void mmc_blk_issue_erase_rq(struct mmc_queue *mq, struct request *req, in mmc_blk_issue_erase_rq() argument
1154 struct mmc_blk_data *md = mq->blkdata; in mmc_blk_issue_erase_rq()
1189 static void mmc_blk_issue_trim_rq(struct mmc_queue *mq, struct request *req) in mmc_blk_issue_trim_rq() argument
1191 mmc_blk_issue_erase_rq(mq, req, MMC_BLK_TRIM, MMC_TRIM_ARG); in mmc_blk_issue_trim_rq()
1194 static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) in mmc_blk_issue_discard_rq() argument
1196 struct mmc_blk_data *md = mq->blkdata; in mmc_blk_issue_discard_rq()
1203 mmc_blk_issue_erase_rq(mq, req, MMC_BLK_DISCARD, arg); in mmc_blk_issue_discard_rq()
1206 static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, in mmc_blk_issue_secdiscard_rq() argument
1209 struct mmc_blk_data *md = mq->blkdata; in mmc_blk_issue_secdiscard_rq()
1276 static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) in mmc_blk_issue_flush() argument
1278 struct mmc_blk_data *md = mq->blkdata; in mmc_blk_issue_flush()
1363 static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, in mmc_blk_data_prep() argument
1367 struct mmc_blk_data *md = mq->blkdata; in mmc_blk_data_prep()
1435 brq->data.blocks = queue_physical_block_size(mq->queue) >> 9; in mmc_blk_data_prep()
1469 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); in mmc_blk_data_prep()
1499 static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req) in mmc_blk_cqe_complete_rq() argument
1504 struct mmc_host *host = mq->card->host; in mmc_blk_cqe_complete_rq()
1505 enum mmc_issue_type issue_type = mmc_issue_type(mq, req); in mmc_blk_cqe_complete_rq()
1529 } else if (mq->in_recovery) { in mmc_blk_cqe_complete_rq()
1535 spin_lock_irqsave(&mq->lock, flags); in mmc_blk_cqe_complete_rq()
1537 mq->in_flight[issue_type] -= 1; in mmc_blk_cqe_complete_rq()
1539 put_card = (mmc_tot_in_flight(mq) == 0); in mmc_blk_cqe_complete_rq()
1541 mmc_cqe_check_busy(mq); in mmc_blk_cqe_complete_rq()
1543 spin_unlock_irqrestore(&mq->lock, flags); in mmc_blk_cqe_complete_rq()
1545 if (!mq->cqe_busy) in mmc_blk_cqe_complete_rq()
1549 mmc_put_card(mq->card, &mq->ctx); in mmc_blk_cqe_complete_rq()
1552 void mmc_blk_cqe_recovery(struct mmc_queue *mq) in mmc_blk_cqe_recovery() argument
1554 struct mmc_card *card = mq->card; in mmc_blk_cqe_recovery()
1562 mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY); in mmc_blk_cqe_recovery()
1563 mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY); in mmc_blk_cqe_recovery()
1574 struct mmc_queue *mq = q->queuedata; in mmc_blk_cqe_req_done() local
1580 if (mq->in_recovery) in mmc_blk_cqe_req_done()
1581 mmc_blk_cqe_complete_rq(mq, req); in mmc_blk_cqe_req_done()
1607 static int mmc_blk_cqe_issue_flush(struct mmc_queue *mq, struct request *req) in mmc_blk_cqe_issue_flush() argument
1619 return mmc_blk_cqe_start_req(mq->card->host, mrq); in mmc_blk_cqe_issue_flush()
1622 static int mmc_blk_hsq_issue_rw_rq(struct mmc_queue *mq, struct request *req) in mmc_blk_hsq_issue_rw_rq() argument
1625 struct mmc_host *host = mq->card->host; in mmc_blk_hsq_issue_rw_rq()
1628 mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq); in mmc_blk_hsq_issue_rw_rq()
1639 static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req) in mmc_blk_cqe_issue_rw_rq() argument
1642 struct mmc_host *host = mq->card->host; in mmc_blk_cqe_issue_rw_rq()
1645 return mmc_blk_hsq_issue_rw_rq(mq, req); in mmc_blk_cqe_issue_rw_rq()
1647 mmc_blk_data_prep(mq, mqrq, 0, NULL, NULL); in mmc_blk_cqe_issue_rw_rq()
1649 return mmc_blk_cqe_start_req(mq->card->host, &mqrq->brq.mrq); in mmc_blk_cqe_issue_rw_rq()
1655 struct mmc_queue *mq) in mmc_blk_rw_rq_prep() argument
1660 struct mmc_blk_data *md = mq->blkdata; in mmc_blk_rw_rq_prep()
1663 mmc_blk_data_prep(mq, mqrq, recovery_mode, &do_rel_wr, &do_data_tag); in mmc_blk_rw_rq_prep()
1755 static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req) in mmc_blk_read_single() argument
1759 struct mmc_card *card = mq->card; in mmc_blk_read_single()
1762 size_t bytes_per_read = queue_physical_block_size(mq->queue); in mmc_blk_read_single()
1770 mmc_blk_rw_rq_prep(mqrq, card, 1, mq); in mmc_blk_read_single()
1827 struct mmc_queue *mq = req->q->queuedata; in mmc_blk_status_error() local
1830 if (mmc_host_is_spi(mq->card->host)) in mmc_blk_status_error()
1863 static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req) in mmc_blk_mq_rw_recovery() argument
1868 struct mmc_blk_data *md = mq->blkdata; in mmc_blk_mq_rw_recovery()
1869 struct mmc_card *card = mq->card; in mmc_blk_mq_rw_recovery()
1899 if (!mmc_host_is_spi(mq->card->host) && in mmc_blk_mq_rw_recovery()
1901 err = mmc_blk_fix_state(mq->card, req); in mmc_blk_mq_rw_recovery()
1916 if (!mmc_host_is_spi(mq->card->host) && in mmc_blk_mq_rw_recovery()
1946 queue_physical_block_size(mq->queue) >> 9) { in mmc_blk_mq_rw_recovery()
1948 mmc_blk_read_single(mq, req); in mmc_blk_mq_rw_recovery()
2040 static inline void mmc_blk_rw_reset_success(struct mmc_queue *mq, in mmc_blk_rw_reset_success() argument
2045 mmc_blk_reset_success(mq->blkdata, type); in mmc_blk_rw_reset_success()
2048 static void mmc_blk_mq_complete_rq(struct mmc_queue *mq, struct request *req) in mmc_blk_mq_complete_rq() argument
2063 if (mmc_card_removed(mq->card)) in mmc_blk_mq_complete_rq()
2069 static bool mmc_blk_urgent_bkops_needed(struct mmc_queue *mq, in mmc_blk_urgent_bkops_needed() argument
2072 return mmc_card_mmc(mq->card) && !mmc_host_is_spi(mq->card->host) && in mmc_blk_urgent_bkops_needed()
2077 static void mmc_blk_urgent_bkops(struct mmc_queue *mq, in mmc_blk_urgent_bkops() argument
2080 if (mmc_blk_urgent_bkops_needed(mq, mqrq)) in mmc_blk_urgent_bkops()
2081 mmc_run_bkops(mq->card); in mmc_blk_urgent_bkops()
2090 struct mmc_queue *mq = q->queuedata; in mmc_blk_hsq_req_done() local
2091 struct mmc_host *host = mq->card->host; in mmc_blk_hsq_req_done()
2095 mmc_blk_urgent_bkops_needed(mq, mqrq)) { in mmc_blk_hsq_req_done()
2096 spin_lock_irqsave(&mq->lock, flags); in mmc_blk_hsq_req_done()
2097 mq->recovery_needed = true; in mmc_blk_hsq_req_done()
2098 mq->recovery_req = req; in mmc_blk_hsq_req_done()
2099 spin_unlock_irqrestore(&mq->lock, flags); in mmc_blk_hsq_req_done()
2103 schedule_work(&mq->recovery_work); in mmc_blk_hsq_req_done()
2107 mmc_blk_rw_reset_success(mq, req); in mmc_blk_hsq_req_done()
2113 if (mq->in_recovery) in mmc_blk_hsq_req_done()
2114 mmc_blk_cqe_complete_rq(mq, req); in mmc_blk_hsq_req_done()
2121 struct mmc_queue *mq = req->q->queuedata; in mmc_blk_mq_complete() local
2122 struct mmc_host *host = mq->card->host; in mmc_blk_mq_complete()
2125 mmc_blk_cqe_complete_rq(mq, req); in mmc_blk_mq_complete()
2127 mmc_blk_mq_complete_rq(mq, req); in mmc_blk_mq_complete()
2130 static void mmc_blk_mq_poll_completion(struct mmc_queue *mq, in mmc_blk_mq_poll_completion() argument
2134 struct mmc_host *host = mq->card->host; in mmc_blk_mq_poll_completion()
2137 mmc_blk_card_busy(mq->card, req)) { in mmc_blk_mq_poll_completion()
2138 mmc_blk_mq_rw_recovery(mq, req); in mmc_blk_mq_poll_completion()
2140 mmc_blk_rw_reset_success(mq, req); in mmc_blk_mq_poll_completion()
2144 mmc_blk_urgent_bkops(mq, mqrq); in mmc_blk_mq_poll_completion()
2147 static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, enum mmc_issue_type issue_type) in mmc_blk_mq_dec_in_flight() argument
2152 spin_lock_irqsave(&mq->lock, flags); in mmc_blk_mq_dec_in_flight()
2154 mq->in_flight[issue_type] -= 1; in mmc_blk_mq_dec_in_flight()
2156 put_card = (mmc_tot_in_flight(mq) == 0); in mmc_blk_mq_dec_in_flight()
2158 spin_unlock_irqrestore(&mq->lock, flags); in mmc_blk_mq_dec_in_flight()
2161 mmc_put_card(mq->card, &mq->ctx); in mmc_blk_mq_dec_in_flight()
2164 static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req, in mmc_blk_mq_post_req() argument
2167 enum mmc_issue_type issue_type = mmc_issue_type(mq, req); in mmc_blk_mq_post_req()
2170 struct mmc_host *host = mq->card->host; in mmc_blk_mq_post_req()
2178 if (mq->in_recovery) { in mmc_blk_mq_post_req()
2179 mmc_blk_mq_complete_rq(mq, req); in mmc_blk_mq_post_req()
2187 mmc_blk_mq_dec_in_flight(mq, issue_type); in mmc_blk_mq_post_req()
2190 void mmc_blk_mq_recovery(struct mmc_queue *mq) in mmc_blk_mq_recovery() argument
2192 struct request *req = mq->recovery_req; in mmc_blk_mq_recovery()
2193 struct mmc_host *host = mq->card->host; in mmc_blk_mq_recovery()
2196 mq->recovery_req = NULL; in mmc_blk_mq_recovery()
2197 mq->rw_wait = false; in mmc_blk_mq_recovery()
2201 mmc_blk_mq_rw_recovery(mq, req); in mmc_blk_mq_recovery()
2204 mmc_blk_urgent_bkops(mq, mqrq); in mmc_blk_mq_recovery()
2206 mmc_blk_mq_post_req(mq, req, true); in mmc_blk_mq_recovery()
2209 static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq, in mmc_blk_mq_complete_prev_req() argument
2212 if (mmc_host_done_complete(mq->card->host)) in mmc_blk_mq_complete_prev_req()
2215 mutex_lock(&mq->complete_lock); in mmc_blk_mq_complete_prev_req()
2217 if (!mq->complete_req) in mmc_blk_mq_complete_prev_req()
2220 mmc_blk_mq_poll_completion(mq, mq->complete_req); in mmc_blk_mq_complete_prev_req()
2223 *prev_req = mq->complete_req; in mmc_blk_mq_complete_prev_req()
2225 mmc_blk_mq_post_req(mq, mq->complete_req, true); in mmc_blk_mq_complete_prev_req()
2227 mq->complete_req = NULL; in mmc_blk_mq_complete_prev_req()
2230 mutex_unlock(&mq->complete_lock); in mmc_blk_mq_complete_prev_req()
2235 struct mmc_queue *mq = container_of(work, struct mmc_queue, in mmc_blk_mq_complete_work() local
2238 mmc_blk_mq_complete_prev_req(mq, NULL); in mmc_blk_mq_complete_work()
2247 struct mmc_queue *mq = q->queuedata; in mmc_blk_mq_req_done() local
2248 struct mmc_host *host = mq->card->host; in mmc_blk_mq_req_done()
2260 spin_lock_irqsave(&mq->lock, flags); in mmc_blk_mq_req_done()
2261 mq->complete_req = req; in mmc_blk_mq_req_done()
2262 mq->rw_wait = false; in mmc_blk_mq_req_done()
2263 waiting = mq->waiting; in mmc_blk_mq_req_done()
2264 spin_unlock_irqrestore(&mq->lock, flags); in mmc_blk_mq_req_done()
2273 wake_up(&mq->wait); in mmc_blk_mq_req_done()
2275 queue_work(mq->card->complete_wq, &mq->complete_work); in mmc_blk_mq_req_done()
2282 mmc_blk_urgent_bkops_needed(mq, mqrq)) { in mmc_blk_mq_req_done()
2283 spin_lock_irqsave(&mq->lock, flags); in mmc_blk_mq_req_done()
2284 mq->recovery_needed = true; in mmc_blk_mq_req_done()
2285 mq->recovery_req = req; in mmc_blk_mq_req_done()
2286 spin_unlock_irqrestore(&mq->lock, flags); in mmc_blk_mq_req_done()
2287 wake_up(&mq->wait); in mmc_blk_mq_req_done()
2288 schedule_work(&mq->recovery_work); in mmc_blk_mq_req_done()
2292 mmc_blk_rw_reset_success(mq, req); in mmc_blk_mq_req_done()
2294 mq->rw_wait = false; in mmc_blk_mq_req_done()
2295 wake_up(&mq->wait); in mmc_blk_mq_req_done()
2298 mmc_blk_mq_post_req(mq, req, false); in mmc_blk_mq_req_done()
2301 static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err) in mmc_blk_rw_wait_cond() argument
2310 spin_lock_irqsave(&mq->lock, flags); in mmc_blk_rw_wait_cond()
2311 if (mq->recovery_needed) { in mmc_blk_rw_wait_cond()
2315 done = !mq->rw_wait; in mmc_blk_rw_wait_cond()
2317 mq->waiting = !done; in mmc_blk_rw_wait_cond()
2318 spin_unlock_irqrestore(&mq->lock, flags); in mmc_blk_rw_wait_cond()
2323 static int mmc_blk_rw_wait(struct mmc_queue *mq, struct request **prev_req) in mmc_blk_rw_wait() argument
2327 wait_event(mq->wait, mmc_blk_rw_wait_cond(mq, &err)); in mmc_blk_rw_wait()
2330 mmc_blk_mq_complete_prev_req(mq, prev_req); in mmc_blk_rw_wait()
2335 static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq, in mmc_blk_mq_issue_rw_rq() argument
2339 struct mmc_host *host = mq->card->host; in mmc_blk_mq_issue_rw_rq()
2343 mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq); in mmc_blk_mq_issue_rw_rq()
2349 err = mmc_blk_rw_wait(mq, &prev_req); in mmc_blk_mq_issue_rw_rq()
2353 mq->rw_wait = true; in mmc_blk_mq_issue_rw_rq()
2358 mmc_blk_mq_post_req(mq, prev_req, true); in mmc_blk_mq_issue_rw_rq()
2361 mq->rw_wait = false; in mmc_blk_mq_issue_rw_rq()
2374 static int mmc_blk_wait_for_idle(struct mmc_queue *mq, struct mmc_host *host) in mmc_blk_wait_for_idle() argument
2379 return mmc_blk_rw_wait(mq, NULL); in mmc_blk_wait_for_idle()
2382 enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req) in mmc_blk_mq_issue_rq() argument
2384 struct mmc_blk_data *md = mq->blkdata; in mmc_blk_mq_issue_rq()
2393 switch (mmc_issue_type(mq, req)) { in mmc_blk_mq_issue_rq()
2395 ret = mmc_blk_wait_for_idle(mq, host); in mmc_blk_mq_issue_rq()
2401 mmc_blk_issue_drv_op(mq, req); in mmc_blk_mq_issue_rq()
2404 mmc_blk_issue_discard_rq(mq, req); in mmc_blk_mq_issue_rq()
2407 mmc_blk_issue_secdiscard_rq(mq, req); in mmc_blk_mq_issue_rq()
2410 mmc_blk_issue_trim_rq(mq, req); in mmc_blk_mq_issue_rq()
2413 mmc_blk_issue_flush(mq, req); in mmc_blk_mq_issue_rq()
2428 ret = mmc_blk_cqe_issue_flush(mq, req); in mmc_blk_mq_issue_rq()
2435 ret = mmc_blk_cqe_issue_rw_rq(mq, req); in mmc_blk_mq_issue_rq()
2437 ret = mmc_blk_mq_issue_rw_rq(mq, req); in mmc_blk_mq_issue_rq()
2859 struct mmc_queue *mq = &md->queue; in mmc_dbg_card_status_get() local
2864 req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_IN, 0); in mmc_dbg_card_status_get()
2889 struct mmc_queue *mq = &md->queue; in mmc_ext_csd_open() local
2901 req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_IN, 0); in mmc_ext_csd_open()