1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2003 Russell King, All Rights Reserved. 4 * Copyright 2006-2007 Pierre Ossman 5 */ 6 #include <linux/slab.h> 7 #include <linux/module.h> 8 #include <linux/blkdev.h> 9 #include <linux/freezer.h> 10 #include <linux/kthread.h> 11 #include <linux/scatterlist.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/backing-dev.h> 14 15 #include <linux/mmc/card.h> 16 #include <linux/mmc/host.h> 17 18 #include "queue.h" 19 #include "block.h" 20 #include "core.h" 21 #include "card.h" 22 #include "host.h" 23 24 #define MMC_DMA_MAP_MERGE_SEGMENTS 512 25 26 static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq) 27 { 28 /* Allow only 1 DCMD at a time */ 29 return mq->in_flight[MMC_ISSUE_DCMD]; 30 } 31 32 void mmc_cqe_check_busy(struct mmc_queue *mq) 33 { 34 if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq)) 35 mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY; 36 37 mq->cqe_busy &= ~MMC_CQE_QUEUE_FULL; 38 } 39 40 static inline bool mmc_cqe_can_dcmd(struct mmc_host *host) 41 { 42 return host->caps2 & MMC_CAP2_CQE_DCMD; 43 } 44 45 static enum mmc_issue_type mmc_cqe_issue_type(struct mmc_host *host, 46 struct request *req) 47 { 48 switch (req_op(req)) { 49 case REQ_OP_DRV_IN: 50 case REQ_OP_DRV_OUT: 51 case REQ_OP_DISCARD: 52 case REQ_OP_SECURE_ERASE: 53 return MMC_ISSUE_SYNC; 54 case REQ_OP_FLUSH: 55 return mmc_cqe_can_dcmd(host) ? MMC_ISSUE_DCMD : MMC_ISSUE_SYNC; 56 default: 57 return MMC_ISSUE_ASYNC; 58 } 59 } 60 61 enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req) 62 { 63 struct mmc_host *host = mq->card->host; 64 65 if (mq->use_cqe && !host->hsq_enabled) 66 return mmc_cqe_issue_type(host, req); 67 68 if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE) 69 return MMC_ISSUE_ASYNC; 70 71 return MMC_ISSUE_SYNC; 72 } 73 74 static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq) 75 { 76 if (!mq->recovery_needed) { 77 mq->recovery_needed = true; 78 schedule_work(&mq->recovery_work); 79 } 80 } 81 82 void mmc_cqe_recovery_notifier(struct mmc_request *mrq) 83 { 84 struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req, 85 brq.mrq); 86 struct request *req = mmc_queue_req_to_req(mqrq); 87 struct request_queue *q = req->q; 88 struct mmc_queue *mq = q->queuedata; 89 unsigned long flags; 90 91 spin_lock_irqsave(&mq->lock, flags); 92 __mmc_cqe_recovery_notifier(mq); 93 spin_unlock_irqrestore(&mq->lock, flags); 94 } 95 96 static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req) 97 { 98 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 99 struct mmc_request *mrq = &mqrq->brq.mrq; 100 struct mmc_queue *mq = req->q->queuedata; 101 struct mmc_host *host = mq->card->host; 102 enum mmc_issue_type issue_type = mmc_issue_type(mq, req); 103 bool recovery_needed = false; 104 105 switch (issue_type) { 106 case MMC_ISSUE_ASYNC: 107 case MMC_ISSUE_DCMD: 108 if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) { 109 if (recovery_needed) 110 mmc_cqe_recovery_notifier(mrq); 111 return BLK_EH_RESET_TIMER; 112 } 113 /* The request has gone already */ 114 return BLK_EH_DONE; 115 default: 116 /* Timeout is handled by mmc core */ 117 return BLK_EH_RESET_TIMER; 118 } 119 } 120 121 static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req, 122 bool reserved) 123 { 124 struct request_queue *q = req->q; 125 struct mmc_queue *mq = q->queuedata; 126 struct mmc_card *card = mq->card; 127 struct mmc_host *host = card->host; 128 unsigned long flags; 129 bool ignore_tout; 130 131 spin_lock_irqsave(&mq->lock, flags); 132 ignore_tout = mq->recovery_needed || !mq->use_cqe || host->hsq_enabled; 133 spin_unlock_irqrestore(&mq->lock, flags); 134 135 return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req); 136 } 137 138 static void mmc_mq_recovery_handler(struct work_struct *work) 139 { 140 struct mmc_queue *mq = container_of(work, struct mmc_queue, 141 recovery_work); 142 struct request_queue *q = mq->queue; 143 struct mmc_host *host = mq->card->host; 144 145 mmc_get_card(mq->card, &mq->ctx); 146 147 mq->in_recovery = true; 148 149 if (mq->use_cqe && !host->hsq_enabled) 150 mmc_blk_cqe_recovery(mq); 151 else 152 mmc_blk_mq_recovery(mq); 153 154 mq->in_recovery = false; 155 156 spin_lock_irq(&mq->lock); 157 mq->recovery_needed = false; 158 spin_unlock_irq(&mq->lock); 159 160 if (host->hsq_enabled) 161 host->cqe_ops->cqe_recovery_finish(host); 162 163 mmc_put_card(mq->card, &mq->ctx); 164 165 blk_mq_run_hw_queues(q, true); 166 } 167 168 static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp) 169 { 170 struct scatterlist *sg; 171 172 sg = kmalloc_array(sg_len, sizeof(*sg), gfp); 173 if (sg) 174 sg_init_table(sg, sg_len); 175 176 return sg; 177 } 178 179 static void mmc_queue_setup_discard(struct request_queue *q, 180 struct mmc_card *card) 181 { 182 unsigned max_discard; 183 184 max_discard = mmc_calc_max_discard(card); 185 if (!max_discard) 186 return; 187 188 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); 189 blk_queue_max_discard_sectors(q, max_discard); 190 q->limits.discard_granularity = card->pref_erase << 9; 191 /* granularity must not be greater than max. discard */ 192 if (card->pref_erase > max_discard) 193 q->limits.discard_granularity = SECTOR_SIZE; 194 if (mmc_can_secure_erase_trim(card)) 195 blk_queue_flag_set(QUEUE_FLAG_SECERASE, q); 196 } 197 198 static unsigned int mmc_get_max_segments(struct mmc_host *host) 199 { 200 return host->can_dma_map_merge ? MMC_DMA_MAP_MERGE_SEGMENTS : 201 host->max_segs; 202 } 203 204 /** 205 * mmc_init_request() - initialize the MMC-specific per-request data 206 * @mq: the request queue 207 * @req: the request 208 * @gfp: memory allocation policy 209 */ 210 static int __mmc_init_request(struct mmc_queue *mq, struct request *req, 211 gfp_t gfp) 212 { 213 struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); 214 struct mmc_card *card = mq->card; 215 struct mmc_host *host = card->host; 216 217 mq_rq->sg = mmc_alloc_sg(mmc_get_max_segments(host), gfp); 218 if (!mq_rq->sg) 219 return -ENOMEM; 220 221 return 0; 222 } 223 224 static void mmc_exit_request(struct request_queue *q, struct request *req) 225 { 226 struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); 227 228 kfree(mq_rq->sg); 229 mq_rq->sg = NULL; 230 } 231 232 static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req, 233 unsigned int hctx_idx, unsigned int numa_node) 234 { 235 return __mmc_init_request(set->driver_data, req, GFP_KERNEL); 236 } 237 238 static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req, 239 unsigned int hctx_idx) 240 { 241 struct mmc_queue *mq = set->driver_data; 242 243 mmc_exit_request(mq->queue, req); 244 } 245 246 static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, 247 const struct blk_mq_queue_data *bd) 248 { 249 struct request *req = bd->rq; 250 struct request_queue *q = req->q; 251 struct mmc_queue *mq = q->queuedata; 252 struct mmc_card *card = mq->card; 253 struct mmc_host *host = card->host; 254 enum mmc_issue_type issue_type; 255 enum mmc_issued issued; 256 bool get_card, cqe_retune_ok; 257 int ret; 258 259 if (mmc_card_removed(mq->card)) { 260 req->rq_flags |= RQF_QUIET; 261 return BLK_STS_IOERR; 262 } 263 264 issue_type = mmc_issue_type(mq, req); 265 266 spin_lock_irq(&mq->lock); 267 268 if (mq->recovery_needed || mq->busy) { 269 spin_unlock_irq(&mq->lock); 270 return BLK_STS_RESOURCE; 271 } 272 273 switch (issue_type) { 274 case MMC_ISSUE_DCMD: 275 if (mmc_cqe_dcmd_busy(mq)) { 276 mq->cqe_busy |= MMC_CQE_DCMD_BUSY; 277 spin_unlock_irq(&mq->lock); 278 return BLK_STS_RESOURCE; 279 } 280 break; 281 case MMC_ISSUE_ASYNC: 282 /* 283 * For MMC host software queue, we only allow 2 requests in 284 * flight to avoid a long latency. 285 */ 286 if (host->hsq_enabled && mq->in_flight[issue_type] > 2) { 287 spin_unlock_irq(&mq->lock); 288 return BLK_STS_RESOURCE; 289 } 290 break; 291 default: 292 /* 293 * Timeouts are handled by mmc core, and we don't have a host 294 * API to abort requests, so we can't handle the timeout anyway. 295 * However, when the timeout happens, blk_mq_complete_request() 296 * no longer works (to stop the request disappearing under us). 297 * To avoid racing with that, set a large timeout. 298 */ 299 req->timeout = 600 * HZ; 300 break; 301 } 302 303 /* Parallel dispatch of requests is not supported at the moment */ 304 mq->busy = true; 305 306 mq->in_flight[issue_type] += 1; 307 get_card = (mmc_tot_in_flight(mq) == 1); 308 cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1); 309 310 spin_unlock_irq(&mq->lock); 311 312 if (!(req->rq_flags & RQF_DONTPREP)) { 313 req_to_mmc_queue_req(req)->retries = 0; 314 req->rq_flags |= RQF_DONTPREP; 315 } 316 317 if (get_card) 318 mmc_get_card(card, &mq->ctx); 319 320 if (mq->use_cqe) { 321 host->retune_now = host->need_retune && cqe_retune_ok && 322 !host->hold_retune; 323 } 324 325 blk_mq_start_request(req); 326 327 issued = mmc_blk_mq_issue_rq(mq, req); 328 329 switch (issued) { 330 case MMC_REQ_BUSY: 331 ret = BLK_STS_RESOURCE; 332 break; 333 case MMC_REQ_FAILED_TO_START: 334 ret = BLK_STS_IOERR; 335 break; 336 default: 337 ret = BLK_STS_OK; 338 break; 339 } 340 341 if (issued != MMC_REQ_STARTED) { 342 bool put_card = false; 343 344 spin_lock_irq(&mq->lock); 345 mq->in_flight[issue_type] -= 1; 346 if (mmc_tot_in_flight(mq) == 0) 347 put_card = true; 348 mq->busy = false; 349 spin_unlock_irq(&mq->lock); 350 if (put_card) 351 mmc_put_card(card, &mq->ctx); 352 } else { 353 WRITE_ONCE(mq->busy, false); 354 } 355 356 return ret; 357 } 358 359 static const struct blk_mq_ops mmc_mq_ops = { 360 .queue_rq = mmc_mq_queue_rq, 361 .init_request = mmc_mq_init_request, 362 .exit_request = mmc_mq_exit_request, 363 .complete = mmc_blk_mq_complete, 364 .timeout = mmc_mq_timed_out, 365 }; 366 367 static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) 368 { 369 struct mmc_host *host = card->host; 370 unsigned block_size = 512; 371 372 blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue); 373 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue); 374 if (mmc_can_erase(card)) 375 mmc_queue_setup_discard(mq->queue, card); 376 377 if (!mmc_dev(host)->dma_mask || !*mmc_dev(host)->dma_mask) 378 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH); 379 blk_queue_max_hw_sectors(mq->queue, 380 min(host->max_blk_count, host->max_req_size / 512)); 381 if (host->can_dma_map_merge) 382 WARN(!blk_queue_can_use_dma_map_merging(mq->queue, 383 mmc_dev(host)), 384 "merging was advertised but not possible"); 385 blk_queue_max_segments(mq->queue, mmc_get_max_segments(host)); 386 387 if (mmc_card_mmc(card) && card->ext_csd.data_sector_size) { 388 block_size = card->ext_csd.data_sector_size; 389 WARN_ON(block_size != 512 && block_size != 4096); 390 } 391 392 blk_queue_logical_block_size(mq->queue, block_size); 393 /* 394 * After blk_queue_can_use_dma_map_merging() was called with succeed, 395 * since it calls blk_queue_virt_boundary(), the mmc should not call 396 * both blk_queue_max_segment_size(). 397 */ 398 if (!host->can_dma_map_merge) 399 blk_queue_max_segment_size(mq->queue, 400 round_down(host->max_seg_size, block_size)); 401 402 dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue)); 403 404 INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler); 405 INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work); 406 407 mutex_init(&mq->complete_lock); 408 409 init_waitqueue_head(&mq->wait); 410 } 411 412 static inline bool mmc_merge_capable(struct mmc_host *host) 413 { 414 return host->caps2 & MMC_CAP2_MERGE_CAPABLE; 415 } 416 417 /* Set queue depth to get a reasonable value for q->nr_requests */ 418 #define MMC_QUEUE_DEPTH 64 419 420 /** 421 * mmc_init_queue - initialise a queue structure. 422 * @mq: mmc queue 423 * @card: mmc card to attach this queue 424 * 425 * Initialise a MMC card request queue. 426 */ 427 int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card) 428 { 429 struct mmc_host *host = card->host; 430 int ret; 431 432 mq->card = card; 433 mq->use_cqe = host->cqe_enabled; 434 435 spin_lock_init(&mq->lock); 436 437 memset(&mq->tag_set, 0, sizeof(mq->tag_set)); 438 mq->tag_set.ops = &mmc_mq_ops; 439 /* 440 * The queue depth for CQE must match the hardware because the request 441 * tag is used to index the hardware queue. 442 */ 443 if (mq->use_cqe && !host->hsq_enabled) 444 mq->tag_set.queue_depth = 445 min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth); 446 else 447 mq->tag_set.queue_depth = MMC_QUEUE_DEPTH; 448 mq->tag_set.numa_node = NUMA_NO_NODE; 449 mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; 450 mq->tag_set.nr_hw_queues = 1; 451 mq->tag_set.cmd_size = sizeof(struct mmc_queue_req); 452 mq->tag_set.driver_data = mq; 453 454 /* 455 * Since blk_mq_alloc_tag_set() calls .init_request() of mmc_mq_ops, 456 * the host->can_dma_map_merge should be set before to get max_segs 457 * from mmc_get_max_segments(). 458 */ 459 if (mmc_merge_capable(host) && 460 host->max_segs < MMC_DMA_MAP_MERGE_SEGMENTS && 461 dma_get_merge_boundary(mmc_dev(host))) 462 host->can_dma_map_merge = 1; 463 else 464 host->can_dma_map_merge = 0; 465 466 ret = blk_mq_alloc_tag_set(&mq->tag_set); 467 if (ret) 468 return ret; 469 470 mq->queue = blk_mq_init_queue(&mq->tag_set); 471 if (IS_ERR(mq->queue)) { 472 ret = PTR_ERR(mq->queue); 473 goto free_tag_set; 474 } 475 476 if (mmc_host_is_spi(host) && host->use_spi_crc) 477 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue); 478 479 mq->queue->queuedata = mq; 480 blk_queue_rq_timeout(mq->queue, 60 * HZ); 481 482 mmc_setup_queue(mq, card); 483 return 0; 484 485 free_tag_set: 486 blk_mq_free_tag_set(&mq->tag_set); 487 return ret; 488 } 489 490 void mmc_queue_suspend(struct mmc_queue *mq) 491 { 492 blk_mq_quiesce_queue(mq->queue); 493 494 /* 495 * The host remains claimed while there are outstanding requests, so 496 * simply claiming and releasing here ensures there are none. 497 */ 498 mmc_claim_host(mq->card->host); 499 mmc_release_host(mq->card->host); 500 } 501 502 void mmc_queue_resume(struct mmc_queue *mq) 503 { 504 blk_mq_unquiesce_queue(mq->queue); 505 } 506 507 void mmc_cleanup_queue(struct mmc_queue *mq) 508 { 509 struct request_queue *q = mq->queue; 510 511 /* 512 * The legacy code handled the possibility of being suspended, 513 * so do that here too. 514 */ 515 if (blk_queue_quiesced(q)) 516 blk_mq_unquiesce_queue(q); 517 518 blk_cleanup_queue(q); 519 blk_mq_free_tag_set(&mq->tag_set); 520 521 /* 522 * A request can be completed before the next request, potentially 523 * leaving a complete_work with nothing to do. Such a work item might 524 * still be queued at this point. Flush it. 525 */ 526 flush_work(&mq->complete_work); 527 528 mq->card = NULL; 529 } 530 531 /* 532 * Prepare the sg list(s) to be handed of to the host driver 533 */ 534 unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) 535 { 536 struct request *req = mmc_queue_req_to_req(mqrq); 537 538 return blk_rq_map_sg(mq->queue, req, mqrq->sg); 539 } 540