1 /* 2 * Copyright (C) 2003 Russell King, All Rights Reserved. 3 * Copyright 2006-2007 Pierre Ossman 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 */ 10 #include <linux/slab.h> 11 #include <linux/module.h> 12 #include <linux/blkdev.h> 13 #include <linux/freezer.h> 14 #include <linux/kthread.h> 15 #include <linux/scatterlist.h> 16 #include <linux/dma-mapping.h> 17 18 #include <linux/mmc/card.h> 19 #include <linux/mmc/host.h> 20 21 #include "queue.h" 22 #include "block.h" 23 #include "core.h" 24 #include "card.h" 25 26 /* 27 * Prepare a MMC request. This just filters out odd stuff. 28 */ 29 static int mmc_prep_request(struct request_queue *q, struct request *req) 30 { 31 struct mmc_queue *mq = q->queuedata; 32 33 if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq))) 34 return BLKPREP_KILL; 35 36 req->rq_flags |= RQF_DONTPREP; 37 38 return BLKPREP_OK; 39 } 40 41 static int mmc_queue_thread(void *d) 42 { 43 struct mmc_queue *mq = d; 44 struct request_queue *q = mq->queue; 45 struct mmc_context_info *cntx = &mq->card->host->context_info; 46 47 current->flags |= PF_MEMALLOC; 48 49 down(&mq->thread_sem); 50 do { 51 struct request *req; 52 53 spin_lock_irq(q->queue_lock); 54 set_current_state(TASK_INTERRUPTIBLE); 55 req = blk_fetch_request(q); 56 mq->asleep = false; 57 cntx->is_waiting_last_req = false; 58 cntx->is_new_req = false; 59 if (!req) { 60 /* 61 * Dispatch queue is empty so set flags for 62 * mmc_request_fn() to wake us up. 63 */ 64 if (mq->qcnt) 65 cntx->is_waiting_last_req = true; 66 else 67 mq->asleep = true; 68 } 69 spin_unlock_irq(q->queue_lock); 70 71 if (req || mq->qcnt) { 72 set_current_state(TASK_RUNNING); 73 mmc_blk_issue_rq(mq, req); 74 cond_resched(); 75 } else { 76 if (kthread_should_stop()) { 77 set_current_state(TASK_RUNNING); 78 break; 79 } 80 up(&mq->thread_sem); 81 schedule(); 82 down(&mq->thread_sem); 83 } 84 } while (1); 85 up(&mq->thread_sem); 86 87 return 0; 88 } 89 90 /* 91 * Generic MMC request handler. This is called for any queue on a 92 * particular host. When the host is not busy, we look for a request 93 * on any queue on this host, and attempt to issue it. This may 94 * not be the queue we were asked to process. 95 */ 96 static void mmc_request_fn(struct request_queue *q) 97 { 98 struct mmc_queue *mq = q->queuedata; 99 struct request *req; 100 struct mmc_context_info *cntx; 101 102 if (!mq) { 103 while ((req = blk_fetch_request(q)) != NULL) { 104 req->rq_flags |= RQF_QUIET; 105 __blk_end_request_all(req, BLK_STS_IOERR); 106 } 107 return; 108 } 109 110 cntx = &mq->card->host->context_info; 111 112 if (cntx->is_waiting_last_req) { 113 cntx->is_new_req = true; 114 wake_up_interruptible(&cntx->wait); 115 } 116 117 if (mq->asleep) 118 wake_up_process(mq->thread); 119 } 120 121 static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp) 122 { 123 struct scatterlist *sg; 124 125 sg = kmalloc_array(sg_len, sizeof(*sg), gfp); 126 if (sg) 127 sg_init_table(sg, sg_len); 128 129 return sg; 130 } 131 132 static void mmc_queue_setup_discard(struct request_queue *q, 133 struct mmc_card *card) 134 { 135 unsigned max_discard; 136 137 max_discard = mmc_calc_max_discard(card); 138 if (!max_discard) 139 return; 140 141 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); 142 blk_queue_max_discard_sectors(q, max_discard); 143 q->limits.discard_granularity = card->pref_erase << 9; 144 /* granularity must not be greater than max. discard */ 145 if (card->pref_erase > max_discard) 146 q->limits.discard_granularity = 0; 147 if (mmc_can_secure_erase_trim(card)) 148 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); 149 } 150 151 /** 152 * mmc_init_request() - initialize the MMC-specific per-request data 153 * @q: the request queue 154 * @req: the request 155 * @gfp: memory allocation policy 156 */ 157 static int mmc_init_request(struct request_queue *q, struct request *req, 158 gfp_t gfp) 159 { 160 struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); 161 struct mmc_queue *mq = q->queuedata; 162 struct mmc_card *card = mq->card; 163 struct mmc_host *host = card->host; 164 165 mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp); 166 if (!mq_rq->sg) 167 return -ENOMEM; 168 169 return 0; 170 } 171 172 static void mmc_exit_request(struct request_queue *q, struct request *req) 173 { 174 struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); 175 176 kfree(mq_rq->sg); 177 mq_rq->sg = NULL; 178 } 179 180 /** 181 * mmc_init_queue - initialise a queue structure. 182 * @mq: mmc queue 183 * @card: mmc card to attach this queue 184 * @lock: queue lock 185 * @subname: partition subname 186 * 187 * Initialise a MMC card request queue. 188 */ 189 int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, 190 spinlock_t *lock, const char *subname) 191 { 192 struct mmc_host *host = card->host; 193 u64 limit = BLK_BOUNCE_HIGH; 194 int ret = -ENOMEM; 195 196 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) 197 limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; 198 199 mq->card = card; 200 mq->queue = blk_alloc_queue(GFP_KERNEL); 201 if (!mq->queue) 202 return -ENOMEM; 203 mq->queue->queue_lock = lock; 204 mq->queue->request_fn = mmc_request_fn; 205 mq->queue->init_rq_fn = mmc_init_request; 206 mq->queue->exit_rq_fn = mmc_exit_request; 207 mq->queue->cmd_size = sizeof(struct mmc_queue_req); 208 mq->queue->queuedata = mq; 209 mq->qcnt = 0; 210 ret = blk_init_allocated_queue(mq->queue); 211 if (ret) { 212 blk_cleanup_queue(mq->queue); 213 return ret; 214 } 215 216 blk_queue_prep_rq(mq->queue, mmc_prep_request); 217 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); 218 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue); 219 if (mmc_can_erase(card)) 220 mmc_queue_setup_discard(mq->queue, card); 221 222 blk_queue_bounce_limit(mq->queue, limit); 223 blk_queue_max_hw_sectors(mq->queue, 224 min(host->max_blk_count, host->max_req_size / 512)); 225 blk_queue_max_segments(mq->queue, host->max_segs); 226 blk_queue_max_segment_size(mq->queue, host->max_seg_size); 227 228 sema_init(&mq->thread_sem, 1); 229 230 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s", 231 host->index, subname ? subname : ""); 232 233 if (IS_ERR(mq->thread)) { 234 ret = PTR_ERR(mq->thread); 235 goto cleanup_queue; 236 } 237 238 return 0; 239 240 cleanup_queue: 241 blk_cleanup_queue(mq->queue); 242 return ret; 243 } 244 245 void mmc_cleanup_queue(struct mmc_queue *mq) 246 { 247 struct request_queue *q = mq->queue; 248 unsigned long flags; 249 250 /* Make sure the queue isn't suspended, as that will deadlock */ 251 mmc_queue_resume(mq); 252 253 /* Then terminate our worker thread */ 254 kthread_stop(mq->thread); 255 256 /* Empty the queue */ 257 spin_lock_irqsave(q->queue_lock, flags); 258 q->queuedata = NULL; 259 blk_start_queue(q); 260 spin_unlock_irqrestore(q->queue_lock, flags); 261 262 mq->card = NULL; 263 } 264 EXPORT_SYMBOL(mmc_cleanup_queue); 265 266 /** 267 * mmc_queue_suspend - suspend a MMC request queue 268 * @mq: MMC queue to suspend 269 * 270 * Stop the block request queue, and wait for our thread to 271 * complete any outstanding requests. This ensures that we 272 * won't suspend while a request is being processed. 273 */ 274 void mmc_queue_suspend(struct mmc_queue *mq) 275 { 276 struct request_queue *q = mq->queue; 277 unsigned long flags; 278 279 if (!mq->suspended) { 280 mq->suspended |= true; 281 282 spin_lock_irqsave(q->queue_lock, flags); 283 blk_stop_queue(q); 284 spin_unlock_irqrestore(q->queue_lock, flags); 285 286 down(&mq->thread_sem); 287 } 288 } 289 290 /** 291 * mmc_queue_resume - resume a previously suspended MMC request queue 292 * @mq: MMC queue to resume 293 */ 294 void mmc_queue_resume(struct mmc_queue *mq) 295 { 296 struct request_queue *q = mq->queue; 297 unsigned long flags; 298 299 if (mq->suspended) { 300 mq->suspended = false; 301 302 up(&mq->thread_sem); 303 304 spin_lock_irqsave(q->queue_lock, flags); 305 blk_start_queue(q); 306 spin_unlock_irqrestore(q->queue_lock, flags); 307 } 308 } 309 310 /* 311 * Prepare the sg list(s) to be handed of to the host driver 312 */ 313 unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) 314 { 315 struct request *req = mmc_queue_req_to_req(mqrq); 316 317 return blk_rq_map_sg(mq->queue, req, mqrq->sg); 318 } 319