1 /* 2 * Copyright (C) 2003 Russell King, All Rights Reserved. 3 * Copyright 2006-2007 Pierre Ossman 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 */ 10 #include <linux/slab.h> 11 #include <linux/module.h> 12 #include <linux/blkdev.h> 13 #include <linux/freezer.h> 14 #include <linux/kthread.h> 15 #include <linux/scatterlist.h> 16 #include <linux/dma-mapping.h> 17 18 #include <linux/mmc/card.h> 19 #include <linux/mmc/host.h> 20 21 #include "queue.h" 22 #include "block.h" 23 #include "core.h" 24 #include "card.h" 25 26 #define MMC_QUEUE_BOUNCESZ 65536 27 28 /* 29 * Prepare a MMC request. This just filters out odd stuff. 30 */ 31 static int mmc_prep_request(struct request_queue *q, struct request *req) 32 { 33 struct mmc_queue *mq = q->queuedata; 34 35 if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq))) 36 return BLKPREP_KILL; 37 38 req->rq_flags |= RQF_DONTPREP; 39 40 return BLKPREP_OK; 41 } 42 43 static int mmc_queue_thread(void *d) 44 { 45 struct mmc_queue *mq = d; 46 struct request_queue *q = mq->queue; 47 struct mmc_context_info *cntx = &mq->card->host->context_info; 48 49 current->flags |= PF_MEMALLOC; 50 51 down(&mq->thread_sem); 52 do { 53 struct request *req = NULL; 54 55 spin_lock_irq(q->queue_lock); 56 set_current_state(TASK_INTERRUPTIBLE); 57 req = blk_fetch_request(q); 58 mq->asleep = false; 59 cntx->is_waiting_last_req = false; 60 cntx->is_new_req = false; 61 if (!req) { 62 /* 63 * Dispatch queue is empty so set flags for 64 * mmc_request_fn() to wake us up. 65 */ 66 if (mq->mqrq_prev->req) 67 cntx->is_waiting_last_req = true; 68 else 69 mq->asleep = true; 70 } 71 mq->mqrq_cur->req = req; 72 spin_unlock_irq(q->queue_lock); 73 74 if (req || mq->mqrq_prev->req) { 75 bool req_is_special = mmc_req_is_special(req); 76 77 set_current_state(TASK_RUNNING); 78 mmc_blk_issue_rq(mq, req); 79 cond_resched(); 80 if (mq->new_request) { 81 mq->new_request = false; 82 continue; /* fetch again */ 83 } 84 85 /* 86 * Current request becomes previous request 87 * and vice versa. 88 * In case of special requests, current request 89 * has been finished. Do not assign it to previous 90 * request. 91 */ 92 if (req_is_special) 93 mq->mqrq_cur->req = NULL; 94 95 mq->mqrq_prev->brq.mrq.data = NULL; 96 mq->mqrq_prev->req = NULL; 97 swap(mq->mqrq_prev, mq->mqrq_cur); 98 } else { 99 if (kthread_should_stop()) { 100 set_current_state(TASK_RUNNING); 101 break; 102 } 103 up(&mq->thread_sem); 104 schedule(); 105 down(&mq->thread_sem); 106 } 107 } while (1); 108 up(&mq->thread_sem); 109 110 return 0; 111 } 112 113 /* 114 * Generic MMC request handler. This is called for any queue on a 115 * particular host. When the host is not busy, we look for a request 116 * on any queue on this host, and attempt to issue it. This may 117 * not be the queue we were asked to process. 118 */ 119 static void mmc_request_fn(struct request_queue *q) 120 { 121 struct mmc_queue *mq = q->queuedata; 122 struct request *req; 123 struct mmc_context_info *cntx; 124 125 if (!mq) { 126 while ((req = blk_fetch_request(q)) != NULL) { 127 req->rq_flags |= RQF_QUIET; 128 __blk_end_request_all(req, -EIO); 129 } 130 return; 131 } 132 133 cntx = &mq->card->host->context_info; 134 135 if (cntx->is_waiting_last_req) { 136 cntx->is_new_req = true; 137 wake_up_interruptible(&cntx->wait); 138 } 139 140 if (mq->asleep) 141 wake_up_process(mq->thread); 142 } 143 144 static struct scatterlist *mmc_alloc_sg(int sg_len, int *err) 145 { 146 struct scatterlist *sg; 147 148 sg = kmalloc_array(sg_len, sizeof(*sg), GFP_KERNEL); 149 if (!sg) 150 *err = -ENOMEM; 151 else { 152 *err = 0; 153 sg_init_table(sg, sg_len); 154 } 155 156 return sg; 157 } 158 159 static void mmc_queue_setup_discard(struct request_queue *q, 160 struct mmc_card *card) 161 { 162 unsigned max_discard; 163 164 max_discard = mmc_calc_max_discard(card); 165 if (!max_discard) 166 return; 167 168 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); 169 blk_queue_max_discard_sectors(q, max_discard); 170 if (card->erased_byte == 0 && !mmc_can_discard(card)) 171 q->limits.discard_zeroes_data = 1; 172 q->limits.discard_granularity = card->pref_erase << 9; 173 /* granularity must not be greater than max. discard */ 174 if (card->pref_erase > max_discard) 175 q->limits.discard_granularity = 0; 176 if (mmc_can_secure_erase_trim(card)) 177 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); 178 } 179 180 #ifdef CONFIG_MMC_BLOCK_BOUNCE 181 static bool mmc_queue_alloc_bounce_bufs(struct mmc_queue *mq, 182 unsigned int bouncesz) 183 { 184 int i; 185 186 for (i = 0; i < mq->qdepth; i++) { 187 mq->mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL); 188 if (!mq->mqrq[i].bounce_buf) 189 goto out_err; 190 } 191 192 return true; 193 194 out_err: 195 while (--i >= 0) { 196 kfree(mq->mqrq[i].bounce_buf); 197 mq->mqrq[i].bounce_buf = NULL; 198 } 199 pr_warn("%s: unable to allocate bounce buffers\n", 200 mmc_card_name(mq->card)); 201 return false; 202 } 203 204 static int mmc_queue_alloc_bounce_sgs(struct mmc_queue *mq, 205 unsigned int bouncesz) 206 { 207 int i, ret; 208 209 for (i = 0; i < mq->qdepth; i++) { 210 mq->mqrq[i].sg = mmc_alloc_sg(1, &ret); 211 if (ret) 212 return ret; 213 214 mq->mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret); 215 if (ret) 216 return ret; 217 } 218 219 return 0; 220 } 221 #endif 222 223 static int mmc_queue_alloc_sgs(struct mmc_queue *mq, int max_segs) 224 { 225 int i, ret; 226 227 for (i = 0; i < mq->qdepth; i++) { 228 mq->mqrq[i].sg = mmc_alloc_sg(max_segs, &ret); 229 if (ret) 230 return ret; 231 } 232 233 return 0; 234 } 235 236 static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq) 237 { 238 kfree(mqrq->bounce_sg); 239 mqrq->bounce_sg = NULL; 240 241 kfree(mqrq->sg); 242 mqrq->sg = NULL; 243 244 kfree(mqrq->bounce_buf); 245 mqrq->bounce_buf = NULL; 246 } 247 248 static void mmc_queue_reqs_free_bufs(struct mmc_queue *mq) 249 { 250 int i; 251 252 for (i = 0; i < mq->qdepth; i++) 253 mmc_queue_req_free_bufs(&mq->mqrq[i]); 254 } 255 256 /** 257 * mmc_init_queue - initialise a queue structure. 258 * @mq: mmc queue 259 * @card: mmc card to attach this queue 260 * @lock: queue lock 261 * @subname: partition subname 262 * 263 * Initialise a MMC card request queue. 264 */ 265 int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, 266 spinlock_t *lock, const char *subname) 267 { 268 struct mmc_host *host = card->host; 269 u64 limit = BLK_BOUNCE_HIGH; 270 bool bounce = false; 271 int ret = -ENOMEM; 272 273 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) 274 limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; 275 276 mq->card = card; 277 mq->queue = blk_init_queue(mmc_request_fn, lock); 278 if (!mq->queue) 279 return -ENOMEM; 280 281 mq->qdepth = 2; 282 mq->mqrq = kcalloc(mq->qdepth, sizeof(struct mmc_queue_req), 283 GFP_KERNEL); 284 if (!mq->mqrq) 285 goto blk_cleanup; 286 mq->mqrq_cur = &mq->mqrq[0]; 287 mq->mqrq_prev = &mq->mqrq[1]; 288 mq->queue->queuedata = mq; 289 290 blk_queue_prep_rq(mq->queue, mmc_prep_request); 291 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); 292 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue); 293 if (mmc_can_erase(card)) 294 mmc_queue_setup_discard(mq->queue, card); 295 296 #ifdef CONFIG_MMC_BLOCK_BOUNCE 297 if (host->max_segs == 1) { 298 unsigned int bouncesz; 299 300 bouncesz = MMC_QUEUE_BOUNCESZ; 301 302 if (bouncesz > host->max_req_size) 303 bouncesz = host->max_req_size; 304 if (bouncesz > host->max_seg_size) 305 bouncesz = host->max_seg_size; 306 if (bouncesz > (host->max_blk_count * 512)) 307 bouncesz = host->max_blk_count * 512; 308 309 if (bouncesz > 512 && 310 mmc_queue_alloc_bounce_bufs(mq, bouncesz)) { 311 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); 312 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); 313 blk_queue_max_segments(mq->queue, bouncesz / 512); 314 blk_queue_max_segment_size(mq->queue, bouncesz); 315 316 ret = mmc_queue_alloc_bounce_sgs(mq, bouncesz); 317 if (ret) 318 goto cleanup_queue; 319 bounce = true; 320 } 321 } 322 #endif 323 324 if (!bounce) { 325 blk_queue_bounce_limit(mq->queue, limit); 326 blk_queue_max_hw_sectors(mq->queue, 327 min(host->max_blk_count, host->max_req_size / 512)); 328 blk_queue_max_segments(mq->queue, host->max_segs); 329 blk_queue_max_segment_size(mq->queue, host->max_seg_size); 330 331 ret = mmc_queue_alloc_sgs(mq, host->max_segs); 332 if (ret) 333 goto cleanup_queue; 334 } 335 336 sema_init(&mq->thread_sem, 1); 337 338 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s", 339 host->index, subname ? subname : ""); 340 341 if (IS_ERR(mq->thread)) { 342 ret = PTR_ERR(mq->thread); 343 goto cleanup_queue; 344 } 345 346 return 0; 347 348 cleanup_queue: 349 mmc_queue_reqs_free_bufs(mq); 350 kfree(mq->mqrq); 351 mq->mqrq = NULL; 352 blk_cleanup: 353 blk_cleanup_queue(mq->queue); 354 return ret; 355 } 356 357 void mmc_cleanup_queue(struct mmc_queue *mq) 358 { 359 struct request_queue *q = mq->queue; 360 unsigned long flags; 361 362 /* Make sure the queue isn't suspended, as that will deadlock */ 363 mmc_queue_resume(mq); 364 365 /* Then terminate our worker thread */ 366 kthread_stop(mq->thread); 367 368 /* Empty the queue */ 369 spin_lock_irqsave(q->queue_lock, flags); 370 q->queuedata = NULL; 371 blk_start_queue(q); 372 spin_unlock_irqrestore(q->queue_lock, flags); 373 374 mmc_queue_reqs_free_bufs(mq); 375 kfree(mq->mqrq); 376 mq->mqrq = NULL; 377 378 mq->card = NULL; 379 } 380 EXPORT_SYMBOL(mmc_cleanup_queue); 381 382 /** 383 * mmc_queue_suspend - suspend a MMC request queue 384 * @mq: MMC queue to suspend 385 * 386 * Stop the block request queue, and wait for our thread to 387 * complete any outstanding requests. This ensures that we 388 * won't suspend while a request is being processed. 389 */ 390 void mmc_queue_suspend(struct mmc_queue *mq) 391 { 392 struct request_queue *q = mq->queue; 393 unsigned long flags; 394 395 if (!mq->suspended) { 396 mq->suspended |= true; 397 398 spin_lock_irqsave(q->queue_lock, flags); 399 blk_stop_queue(q); 400 spin_unlock_irqrestore(q->queue_lock, flags); 401 402 down(&mq->thread_sem); 403 } 404 } 405 406 /** 407 * mmc_queue_resume - resume a previously suspended MMC request queue 408 * @mq: MMC queue to resume 409 */ 410 void mmc_queue_resume(struct mmc_queue *mq) 411 { 412 struct request_queue *q = mq->queue; 413 unsigned long flags; 414 415 if (mq->suspended) { 416 mq->suspended = false; 417 418 up(&mq->thread_sem); 419 420 spin_lock_irqsave(q->queue_lock, flags); 421 blk_start_queue(q); 422 spin_unlock_irqrestore(q->queue_lock, flags); 423 } 424 } 425 426 /* 427 * Prepare the sg list(s) to be handed of to the host driver 428 */ 429 unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) 430 { 431 unsigned int sg_len; 432 size_t buflen; 433 struct scatterlist *sg; 434 int i; 435 436 if (!mqrq->bounce_buf) 437 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg); 438 439 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg); 440 441 mqrq->bounce_sg_len = sg_len; 442 443 buflen = 0; 444 for_each_sg(mqrq->bounce_sg, sg, sg_len, i) 445 buflen += sg->length; 446 447 sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen); 448 449 return 1; 450 } 451 452 /* 453 * If writing, bounce the data to the buffer before the request 454 * is sent to the host driver 455 */ 456 void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq) 457 { 458 if (!mqrq->bounce_buf) 459 return; 460 461 if (rq_data_dir(mqrq->req) != WRITE) 462 return; 463 464 sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, 465 mqrq->bounce_buf, mqrq->sg[0].length); 466 } 467 468 /* 469 * If reading, bounce the data from the buffer after the request 470 * has been handled by the host driver 471 */ 472 void mmc_queue_bounce_post(struct mmc_queue_req *mqrq) 473 { 474 if (!mqrq->bounce_buf) 475 return; 476 477 if (rq_data_dir(mqrq->req) != READ) 478 return; 479 480 sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, 481 mqrq->bounce_buf, mqrq->sg[0].length); 482 } 483