18839a0e0STejun Heo /* 24fed947cSTejun Heo * Functions to sequence FLUSH and FUA writes. 3ae1b1539STejun Heo * 4ae1b1539STejun Heo * Copyright (C) 2011 Max Planck Institute for Gravitational Physics 5ae1b1539STejun Heo * Copyright (C) 2011 Tejun Heo <tj@kernel.org> 6ae1b1539STejun Heo * 7ae1b1539STejun Heo * This file is released under the GPLv2. 8ae1b1539STejun Heo * 9ae1b1539STejun Heo * REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three 10ae1b1539STejun Heo * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request 11ae1b1539STejun Heo * properties and hardware capability. 12ae1b1539STejun Heo * 13ae1b1539STejun Heo * If a request doesn't have data, only REQ_FLUSH makes sense, which 14ae1b1539STejun Heo * indicates a simple flush request. If there is data, REQ_FLUSH indicates 15ae1b1539STejun Heo * that the device cache should be flushed before the data is executed, and 16ae1b1539STejun Heo * REQ_FUA means that the data must be on non-volatile media on request 17ae1b1539STejun Heo * completion. 18ae1b1539STejun Heo * 19ae1b1539STejun Heo * If the device doesn't have writeback cache, FLUSH and FUA don't make any 20ae1b1539STejun Heo * difference. The requests are either completed immediately if there's no 21ae1b1539STejun Heo * data or executed as normal requests otherwise. 22ae1b1539STejun Heo * 23ae1b1539STejun Heo * If the device has writeback cache and supports FUA, REQ_FLUSH is 24ae1b1539STejun Heo * translated to PREFLUSH but REQ_FUA is passed down directly with DATA. 25ae1b1539STejun Heo * 26ae1b1539STejun Heo * If the device has writeback cache and doesn't support FUA, REQ_FLUSH is 27ae1b1539STejun Heo * translated to PREFLUSH and REQ_FUA to POSTFLUSH. 28ae1b1539STejun Heo * 29ae1b1539STejun Heo * The actual execution of flush is double buffered. Whenever a request 30ae1b1539STejun Heo * needs to execute PRE or POSTFLUSH, it queues at 317c94e1c1SMing Lei * fq->flush_queue[fq->flush_pending_idx]. Once certain criteria are met, a 32ae1b1539STejun Heo * flush is issued and the pending_idx is toggled. When the flush 33ae1b1539STejun Heo * completes, all the requests which were pending are proceeded to the next 34ae1b1539STejun Heo * step. This allows arbitrary merging of different types of FLUSH/FUA 35ae1b1539STejun Heo * requests. 36ae1b1539STejun Heo * 37ae1b1539STejun Heo * Currently, the following conditions are used to determine when to issue 38ae1b1539STejun Heo * flush. 39ae1b1539STejun Heo * 40ae1b1539STejun Heo * C1. At any given time, only one flush shall be in progress. This makes 41ae1b1539STejun Heo * double buffering sufficient. 42ae1b1539STejun Heo * 43ae1b1539STejun Heo * C2. Flush is deferred if any request is executing DATA of its sequence. 44ae1b1539STejun Heo * This avoids issuing separate POSTFLUSHes for requests which shared 45ae1b1539STejun Heo * PREFLUSH. 46ae1b1539STejun Heo * 47ae1b1539STejun Heo * C3. The second condition is ignored if there is a request which has 48ae1b1539STejun Heo * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid 49ae1b1539STejun Heo * starvation in the unlikely case where there are continuous stream of 50ae1b1539STejun Heo * FUA (without FLUSH) requests. 51ae1b1539STejun Heo * 52ae1b1539STejun Heo * For devices which support FUA, it isn't clear whether C2 (and thus C3) 53ae1b1539STejun Heo * is beneficial. 54ae1b1539STejun Heo * 55ae1b1539STejun Heo * Note that a sequenced FLUSH/FUA request with DATA is completed twice. 56ae1b1539STejun Heo * Once while executing DATA and again after the whole sequence is 57ae1b1539STejun Heo * complete. The first completion updates the contained bio but doesn't 58ae1b1539STejun Heo * finish it so that the bio submitter is notified only after the whole 59ae1b1539STejun Heo * sequence is complete. This is implemented by testing REQ_FLUSH_SEQ in 60ae1b1539STejun Heo * req_bio_endio(). 61ae1b1539STejun Heo * 62ae1b1539STejun Heo * The above peculiarity requires that each FLUSH/FUA request has only one 63ae1b1539STejun Heo * bio attached to it, which is guaranteed as they aren't allowed to be 64ae1b1539STejun Heo * merged in the usual way. 658839a0e0STejun Heo */ 66ae1b1539STejun Heo 678839a0e0STejun Heo #include <linux/kernel.h> 688839a0e0STejun Heo #include <linux/module.h> 698839a0e0STejun Heo #include <linux/bio.h> 708839a0e0STejun Heo #include <linux/blkdev.h> 718839a0e0STejun Heo #include <linux/gfp.h> 72320ae51fSJens Axboe #include <linux/blk-mq.h> 738839a0e0STejun Heo 748839a0e0STejun Heo #include "blk.h" 75320ae51fSJens Axboe #include "blk-mq.h" 76*0048b483SMing Lei #include "blk-mq-tag.h" 778839a0e0STejun Heo 784fed947cSTejun Heo /* FLUSH/FUA sequences */ 794fed947cSTejun Heo enum { 80ae1b1539STejun Heo REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */ 81ae1b1539STejun Heo REQ_FSEQ_DATA = (1 << 1), /* data write in progress */ 82ae1b1539STejun Heo REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */ 83ae1b1539STejun Heo REQ_FSEQ_DONE = (1 << 3), 84ae1b1539STejun Heo 85ae1b1539STejun Heo REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA | 86ae1b1539STejun Heo REQ_FSEQ_POSTFLUSH, 87ae1b1539STejun Heo 88ae1b1539STejun Heo /* 89ae1b1539STejun Heo * If flush has been pending longer than the following timeout, 90ae1b1539STejun Heo * it's issued even if flush_data requests are still in flight. 91ae1b1539STejun Heo */ 92ae1b1539STejun Heo FLUSH_PENDING_TIMEOUT = 5 * HZ, 934fed947cSTejun Heo }; 944fed947cSTejun Heo 950bae352dSMing Lei static bool blk_kick_flush(struct request_queue *q, 960bae352dSMing Lei struct blk_flush_queue *fq); 978839a0e0STejun Heo 98ae1b1539STejun Heo static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq) 998839a0e0STejun Heo { 100ae1b1539STejun Heo unsigned int policy = 0; 101ae1b1539STejun Heo 102fa1bf42fSJeff Moyer if (blk_rq_sectors(rq)) 103fa1bf42fSJeff Moyer policy |= REQ_FSEQ_DATA; 104fa1bf42fSJeff Moyer 105ae1b1539STejun Heo if (fflags & REQ_FLUSH) { 106ae1b1539STejun Heo if (rq->cmd_flags & REQ_FLUSH) 107ae1b1539STejun Heo policy |= REQ_FSEQ_PREFLUSH; 108ae1b1539STejun Heo if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA)) 109ae1b1539STejun Heo policy |= REQ_FSEQ_POSTFLUSH; 110ae1b1539STejun Heo } 111ae1b1539STejun Heo return policy; 1128839a0e0STejun Heo } 1138839a0e0STejun Heo 114ae1b1539STejun Heo static unsigned int blk_flush_cur_seq(struct request *rq) 1158839a0e0STejun Heo { 116ae1b1539STejun Heo return 1 << ffz(rq->flush.seq); 1178839a0e0STejun Heo } 1188839a0e0STejun Heo 119ae1b1539STejun Heo static void blk_flush_restore_request(struct request *rq) 12047f70d5aSTejun Heo { 12147f70d5aSTejun Heo /* 122ae1b1539STejun Heo * After flush data completion, @rq->bio is %NULL but we need to 123ae1b1539STejun Heo * complete the bio again. @rq->biotail is guaranteed to equal the 124ae1b1539STejun Heo * original @rq->bio. Restore it. 12547f70d5aSTejun Heo */ 126ae1b1539STejun Heo rq->bio = rq->biotail; 127ae1b1539STejun Heo 128ae1b1539STejun Heo /* make @rq a normal request */ 129ae1b1539STejun Heo rq->cmd_flags &= ~REQ_FLUSH_SEQ; 1304853abaaSJeff Moyer rq->end_io = rq->flush.saved_end_io; 131320ae51fSJens Axboe } 132320ae51fSJens Axboe 13310beafc1SMike Snitzer static bool blk_flush_queue_rq(struct request *rq, bool add_front) 134320ae51fSJens Axboe { 13518741986SChristoph Hellwig if (rq->q->mq_ops) { 1366fca6a61SChristoph Hellwig struct request_queue *q = rq->q; 1376fca6a61SChristoph Hellwig 1386fca6a61SChristoph Hellwig blk_mq_add_to_requeue_list(rq, add_front); 1396fca6a61SChristoph Hellwig blk_mq_kick_requeue_list(q); 14018741986SChristoph Hellwig return false; 14118741986SChristoph Hellwig } else { 14210beafc1SMike Snitzer if (add_front) 14310beafc1SMike Snitzer list_add(&rq->queuelist, &rq->q->queue_head); 14410beafc1SMike Snitzer else 14518741986SChristoph Hellwig list_add_tail(&rq->queuelist, &rq->q->queue_head); 14618741986SChristoph Hellwig return true; 14718741986SChristoph Hellwig } 14847f70d5aSTejun Heo } 14947f70d5aSTejun Heo 150ae1b1539STejun Heo /** 151ae1b1539STejun Heo * blk_flush_complete_seq - complete flush sequence 152ae1b1539STejun Heo * @rq: FLUSH/FUA request being sequenced 1530bae352dSMing Lei * @fq: flush queue 154ae1b1539STejun Heo * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero) 155ae1b1539STejun Heo * @error: whether an error occurred 156ae1b1539STejun Heo * 157ae1b1539STejun Heo * @rq just completed @seq part of its flush sequence, record the 158ae1b1539STejun Heo * completion and trigger the next step. 159ae1b1539STejun Heo * 160ae1b1539STejun Heo * CONTEXT: 1617c94e1c1SMing Lei * spin_lock_irq(q->queue_lock or fq->mq_flush_lock) 162ae1b1539STejun Heo * 163ae1b1539STejun Heo * RETURNS: 164ae1b1539STejun Heo * %true if requests were added to the dispatch queue, %false otherwise. 165ae1b1539STejun Heo */ 1660bae352dSMing Lei static bool blk_flush_complete_seq(struct request *rq, 1670bae352dSMing Lei struct blk_flush_queue *fq, 1680bae352dSMing Lei unsigned int seq, int error) 1698839a0e0STejun Heo { 170ae1b1539STejun Heo struct request_queue *q = rq->q; 1717c94e1c1SMing Lei struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; 172320ae51fSJens Axboe bool queued = false, kicked; 1738839a0e0STejun Heo 174ae1b1539STejun Heo BUG_ON(rq->flush.seq & seq); 175ae1b1539STejun Heo rq->flush.seq |= seq; 1768839a0e0STejun Heo 177ae1b1539STejun Heo if (likely(!error)) 178ae1b1539STejun Heo seq = blk_flush_cur_seq(rq); 179ae1b1539STejun Heo else 180ae1b1539STejun Heo seq = REQ_FSEQ_DONE; 1818839a0e0STejun Heo 182ae1b1539STejun Heo switch (seq) { 183ae1b1539STejun Heo case REQ_FSEQ_PREFLUSH: 184ae1b1539STejun Heo case REQ_FSEQ_POSTFLUSH: 185ae1b1539STejun Heo /* queue for flush */ 186ae1b1539STejun Heo if (list_empty(pending)) 1877c94e1c1SMing Lei fq->flush_pending_since = jiffies; 188ae1b1539STejun Heo list_move_tail(&rq->flush.list, pending); 1898839a0e0STejun Heo break; 190ae1b1539STejun Heo 191ae1b1539STejun Heo case REQ_FSEQ_DATA: 1927c94e1c1SMing Lei list_move_tail(&rq->flush.list, &fq->flush_data_in_flight); 19310beafc1SMike Snitzer queued = blk_flush_queue_rq(rq, true); 194ae1b1539STejun Heo break; 195ae1b1539STejun Heo 196ae1b1539STejun Heo case REQ_FSEQ_DONE: 19709d60c70STejun Heo /* 198ae1b1539STejun Heo * @rq was previously adjusted by blk_flush_issue() for 199ae1b1539STejun Heo * flush sequencing and may already have gone through the 200ae1b1539STejun Heo * flush data request completion path. Restore @rq for 201ae1b1539STejun Heo * normal completion and end it. 20209d60c70STejun Heo */ 203ae1b1539STejun Heo BUG_ON(!list_empty(&rq->queuelist)); 204ae1b1539STejun Heo list_del_init(&rq->flush.list); 205ae1b1539STejun Heo blk_flush_restore_request(rq); 206320ae51fSJens Axboe if (q->mq_ops) 207c8a446adSChristoph Hellwig blk_mq_end_request(rq, error); 208320ae51fSJens Axboe else 209ae1b1539STejun Heo __blk_end_request_all(rq, error); 2108839a0e0STejun Heo break; 211ae1b1539STejun Heo 2128839a0e0STejun Heo default: 2138839a0e0STejun Heo BUG(); 2148839a0e0STejun Heo } 215cde4c406SChristoph Hellwig 2160bae352dSMing Lei kicked = blk_kick_flush(q, fq); 217320ae51fSJens Axboe return kicked | queued; 2188839a0e0STejun Heo } 2198839a0e0STejun Heo 220ae1b1539STejun Heo static void flush_end_io(struct request *flush_rq, int error) 2218839a0e0STejun Heo { 222ae1b1539STejun Heo struct request_queue *q = flush_rq->q; 223320ae51fSJens Axboe struct list_head *running; 224ae1b1539STejun Heo bool queued = false; 225ae1b1539STejun Heo struct request *rq, *n; 226320ae51fSJens Axboe unsigned long flags = 0; 227e97c293cSMing Lei struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx); 2288839a0e0STejun Heo 22922302375SShaohua Li if (q->mq_ops) { 230*0048b483SMing Lei struct blk_mq_hw_ctx *hctx; 231*0048b483SMing Lei 232*0048b483SMing Lei /* release the tag's ownership to the req cloned from */ 2337c94e1c1SMing Lei spin_lock_irqsave(&fq->mq_flush_lock, flags); 234*0048b483SMing Lei hctx = q->mq_ops->map_queue(q, flush_rq->mq_ctx->cpu); 235*0048b483SMing Lei blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq); 2367ddab5deSMing Lei flush_rq->tag = -1; 23722302375SShaohua Li } 23818741986SChristoph Hellwig 2397c94e1c1SMing Lei running = &fq->flush_queue[fq->flush_running_idx]; 2407c94e1c1SMing Lei BUG_ON(fq->flush_pending_idx == fq->flush_running_idx); 241ae1b1539STejun Heo 242ae1b1539STejun Heo /* account completion of the flush request */ 2437c94e1c1SMing Lei fq->flush_running_idx ^= 1; 244320ae51fSJens Axboe 245320ae51fSJens Axboe if (!q->mq_ops) 246ae1b1539STejun Heo elv_completed_request(q, flush_rq); 247ae1b1539STejun Heo 248ae1b1539STejun Heo /* and push the waiting requests to the next stage */ 249ae1b1539STejun Heo list_for_each_entry_safe(rq, n, running, flush.list) { 250ae1b1539STejun Heo unsigned int seq = blk_flush_cur_seq(rq); 251ae1b1539STejun Heo 252ae1b1539STejun Heo BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH); 2530bae352dSMing Lei queued |= blk_flush_complete_seq(rq, fq, seq, error); 254ae1b1539STejun Heo } 255ae1b1539STejun Heo 2568839a0e0STejun Heo /* 2573ac0cc45Sshaohua.li@intel.com * Kick the queue to avoid stall for two cases: 2583ac0cc45Sshaohua.li@intel.com * 1. Moving a request silently to empty queue_head may stall the 2593ac0cc45Sshaohua.li@intel.com * queue. 2603ac0cc45Sshaohua.li@intel.com * 2. When flush request is running in non-queueable queue, the 2613ac0cc45Sshaohua.li@intel.com * queue is hold. Restart the queue after flush request is finished 2623ac0cc45Sshaohua.li@intel.com * to avoid stall. 2633ac0cc45Sshaohua.li@intel.com * This function is called from request completion path and calling 2643ac0cc45Sshaohua.li@intel.com * directly into request_fn may confuse the driver. Always use 2653ac0cc45Sshaohua.li@intel.com * kblockd. 2668839a0e0STejun Heo */ 2677c94e1c1SMing Lei if (queued || fq->flush_queue_delayed) { 26818741986SChristoph Hellwig WARN_ON(q->mq_ops); 26924ecfbe2SChristoph Hellwig blk_run_queue_async(q); 270320ae51fSJens Axboe } 2717c94e1c1SMing Lei fq->flush_queue_delayed = 0; 272320ae51fSJens Axboe if (q->mq_ops) 2737c94e1c1SMing Lei spin_unlock_irqrestore(&fq->mq_flush_lock, flags); 274320ae51fSJens Axboe } 275320ae51fSJens Axboe 276ae1b1539STejun Heo /** 277ae1b1539STejun Heo * blk_kick_flush - consider issuing flush request 278ae1b1539STejun Heo * @q: request_queue being kicked 2790bae352dSMing Lei * @fq: flush queue 2804fed947cSTejun Heo * 281ae1b1539STejun Heo * Flush related states of @q have changed, consider issuing flush request. 282ae1b1539STejun Heo * Please read the comment at the top of this file for more info. 283ae1b1539STejun Heo * 284ae1b1539STejun Heo * CONTEXT: 2857c94e1c1SMing Lei * spin_lock_irq(q->queue_lock or fq->mq_flush_lock) 286ae1b1539STejun Heo * 287ae1b1539STejun Heo * RETURNS: 288ae1b1539STejun Heo * %true if flush was issued, %false otherwise. 2898839a0e0STejun Heo */ 2900bae352dSMing Lei static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq) 291ae1b1539STejun Heo { 2927c94e1c1SMing Lei struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; 293ae1b1539STejun Heo struct request *first_rq = 294ae1b1539STejun Heo list_first_entry(pending, struct request, flush.list); 2957c94e1c1SMing Lei struct request *flush_rq = fq->flush_rq; 296ae1b1539STejun Heo 297ae1b1539STejun Heo /* C1 described at the top of this file */ 2987c94e1c1SMing Lei if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending)) 299ae1b1539STejun Heo return false; 300ae1b1539STejun Heo 301ae1b1539STejun Heo /* C2 and C3 */ 3027c94e1c1SMing Lei if (!list_empty(&fq->flush_data_in_flight) && 303ae1b1539STejun Heo time_before(jiffies, 3047c94e1c1SMing Lei fq->flush_pending_since + FLUSH_PENDING_TIMEOUT)) 305ae1b1539STejun Heo return false; 306ae1b1539STejun Heo 307ae1b1539STejun Heo /* 308ae1b1539STejun Heo * Issue flush and toggle pending_idx. This makes pending_idx 309ae1b1539STejun Heo * different from running_idx, which means flush is in flight. 310ae1b1539STejun Heo */ 3117c94e1c1SMing Lei fq->flush_pending_idx ^= 1; 31218741986SChristoph Hellwig 3137ddab5deSMing Lei blk_rq_init(q, flush_rq); 314f70ced09SMing Lei 315f70ced09SMing Lei /* 316f70ced09SMing Lei * Borrow tag from the first request since they can't 317*0048b483SMing Lei * be in flight at the same time. And acquire the tag's 318*0048b483SMing Lei * ownership for flush req. 319f70ced09SMing Lei */ 320f70ced09SMing Lei if (q->mq_ops) { 321*0048b483SMing Lei struct blk_mq_hw_ctx *hctx; 322*0048b483SMing Lei 323f70ced09SMing Lei flush_rq->mq_ctx = first_rq->mq_ctx; 324f70ced09SMing Lei flush_rq->tag = first_rq->tag; 325*0048b483SMing Lei fq->orig_rq = first_rq; 326*0048b483SMing Lei 327*0048b483SMing Lei hctx = q->mq_ops->map_queue(q, first_rq->mq_ctx->cpu); 328*0048b483SMing Lei blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq); 329f70ced09SMing Lei } 330320ae51fSJens Axboe 3317ddab5deSMing Lei flush_rq->cmd_type = REQ_TYPE_FS; 3327ddab5deSMing Lei flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ; 3337ddab5deSMing Lei flush_rq->rq_disk = first_rq->rq_disk; 3347ddab5deSMing Lei flush_rq->end_io = flush_end_io; 335ae1b1539STejun Heo 3367ddab5deSMing Lei return blk_flush_queue_rq(flush_rq, false); 337ae1b1539STejun Heo } 338ae1b1539STejun Heo 339ae1b1539STejun Heo static void flush_data_end_io(struct request *rq, int error) 340ae1b1539STejun Heo { 341ae1b1539STejun Heo struct request_queue *q = rq->q; 342e97c293cSMing Lei struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); 343ae1b1539STejun Heo 3448839a0e0STejun Heo /* 345e83a46bbSTejun Heo * After populating an empty queue, kick it to avoid stall. Read 346e83a46bbSTejun Heo * the comment in flush_end_io(). 3478839a0e0STejun Heo */ 3480bae352dSMing Lei if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error)) 34924ecfbe2SChristoph Hellwig blk_run_queue_async(q); 350ae1b1539STejun Heo } 351ae1b1539STejun Heo 352320ae51fSJens Axboe static void mq_flush_data_end_io(struct request *rq, int error) 353320ae51fSJens Axboe { 354320ae51fSJens Axboe struct request_queue *q = rq->q; 355320ae51fSJens Axboe struct blk_mq_hw_ctx *hctx; 356e97c293cSMing Lei struct blk_mq_ctx *ctx = rq->mq_ctx; 357320ae51fSJens Axboe unsigned long flags; 358e97c293cSMing Lei struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx); 359320ae51fSJens Axboe 360320ae51fSJens Axboe hctx = q->mq_ops->map_queue(q, ctx->cpu); 361320ae51fSJens Axboe 362320ae51fSJens Axboe /* 363320ae51fSJens Axboe * After populating an empty queue, kick it to avoid stall. Read 364320ae51fSJens Axboe * the comment in flush_end_io(). 365320ae51fSJens Axboe */ 3667c94e1c1SMing Lei spin_lock_irqsave(&fq->mq_flush_lock, flags); 3670bae352dSMing Lei if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error)) 368320ae51fSJens Axboe blk_mq_run_hw_queue(hctx, true); 3697c94e1c1SMing Lei spin_unlock_irqrestore(&fq->mq_flush_lock, flags); 370320ae51fSJens Axboe } 371320ae51fSJens Axboe 372ae1b1539STejun Heo /** 373ae1b1539STejun Heo * blk_insert_flush - insert a new FLUSH/FUA request 374ae1b1539STejun Heo * @rq: request to insert 375ae1b1539STejun Heo * 376b710a480SJens Axboe * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions. 377320ae51fSJens Axboe * or __blk_mq_run_hw_queue() to dispatch request. 378ae1b1539STejun Heo * @rq is being submitted. Analyze what needs to be done and put it on the 379ae1b1539STejun Heo * right queue. 380ae1b1539STejun Heo * 381ae1b1539STejun Heo * CONTEXT: 382320ae51fSJens Axboe * spin_lock_irq(q->queue_lock) in !mq case 383ae1b1539STejun Heo */ 384ae1b1539STejun Heo void blk_insert_flush(struct request *rq) 385ae1b1539STejun Heo { 386ae1b1539STejun Heo struct request_queue *q = rq->q; 387ae1b1539STejun Heo unsigned int fflags = q->flush_flags; /* may change, cache */ 388ae1b1539STejun Heo unsigned int policy = blk_flush_policy(fflags, rq); 389e97c293cSMing Lei struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx); 390ae1b1539STejun Heo 391ae1b1539STejun Heo /* 392ae1b1539STejun Heo * @policy now records what operations need to be done. Adjust 393ae1b1539STejun Heo * REQ_FLUSH and FUA for the driver. 394ae1b1539STejun Heo */ 3954fed947cSTejun Heo rq->cmd_flags &= ~REQ_FLUSH; 396ae1b1539STejun Heo if (!(fflags & REQ_FUA)) 3974fed947cSTejun Heo rq->cmd_flags &= ~REQ_FUA; 398ae1b1539STejun Heo 399ae1b1539STejun Heo /* 4004853abaaSJeff Moyer * An empty flush handed down from a stacking driver may 4014853abaaSJeff Moyer * translate into nothing if the underlying device does not 4024853abaaSJeff Moyer * advertise a write-back cache. In this case, simply 4034853abaaSJeff Moyer * complete the request. 4044853abaaSJeff Moyer */ 4054853abaaSJeff Moyer if (!policy) { 406320ae51fSJens Axboe if (q->mq_ops) 407c8a446adSChristoph Hellwig blk_mq_end_request(rq, 0); 408320ae51fSJens Axboe else 4094853abaaSJeff Moyer __blk_end_bidi_request(rq, 0, 0, 0); 4104853abaaSJeff Moyer return; 4114853abaaSJeff Moyer } 4124853abaaSJeff Moyer 413834f9f61SJeff Moyer BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */ 4144853abaaSJeff Moyer 4154853abaaSJeff Moyer /* 416ae1b1539STejun Heo * If there's data but flush is not necessary, the request can be 417ae1b1539STejun Heo * processed directly without going through flush machinery. Queue 418ae1b1539STejun Heo * for normal execution. 419ae1b1539STejun Heo */ 420ae1b1539STejun Heo if ((policy & REQ_FSEQ_DATA) && 421ae1b1539STejun Heo !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { 422320ae51fSJens Axboe if (q->mq_ops) { 423feb71daeSChristoph Hellwig blk_mq_insert_request(rq, false, false, true); 424320ae51fSJens Axboe } else 42553d63e6bSJens Axboe list_add_tail(&rq->queuelist, &q->queue_head); 426ae1b1539STejun Heo return; 4278839a0e0STejun Heo } 4288839a0e0STejun Heo 4298839a0e0STejun Heo /* 430ae1b1539STejun Heo * @rq should go through flush machinery. Mark it part of flush 431ae1b1539STejun Heo * sequence and submit for further processing. 4328839a0e0STejun Heo */ 433ae1b1539STejun Heo memset(&rq->flush, 0, sizeof(rq->flush)); 434ae1b1539STejun Heo INIT_LIST_HEAD(&rq->flush.list); 435ae1b1539STejun Heo rq->cmd_flags |= REQ_FLUSH_SEQ; 4364853abaaSJeff Moyer rq->flush.saved_end_io = rq->end_io; /* Usually NULL */ 437320ae51fSJens Axboe if (q->mq_ops) { 438320ae51fSJens Axboe rq->end_io = mq_flush_data_end_io; 439320ae51fSJens Axboe 4407c94e1c1SMing Lei spin_lock_irq(&fq->mq_flush_lock); 4410bae352dSMing Lei blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); 4427c94e1c1SMing Lei spin_unlock_irq(&fq->mq_flush_lock); 443320ae51fSJens Axboe return; 444320ae51fSJens Axboe } 445ae1b1539STejun Heo rq->end_io = flush_data_end_io; 446ae1b1539STejun Heo 4470bae352dSMing Lei blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); 448ae1b1539STejun Heo } 449ae1b1539STejun Heo 450ae1b1539STejun Heo /** 4518839a0e0STejun Heo * blkdev_issue_flush - queue a flush 4528839a0e0STejun Heo * @bdev: blockdev to issue flush for 4538839a0e0STejun Heo * @gfp_mask: memory allocation flags (for bio_alloc) 4548839a0e0STejun Heo * @error_sector: error sector 4558839a0e0STejun Heo * 4568839a0e0STejun Heo * Description: 4578839a0e0STejun Heo * Issue a flush for the block device in question. Caller can supply 4588839a0e0STejun Heo * room for storing the error offset in case of a flush error, if they 4598839a0e0STejun Heo * wish to. If WAIT flag is not passed then caller may check only what 4608839a0e0STejun Heo * request was pushed in some internal queue for later handling. 4618839a0e0STejun Heo */ 4628839a0e0STejun Heo int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, 463dd3932edSChristoph Hellwig sector_t *error_sector) 4648839a0e0STejun Heo { 4658839a0e0STejun Heo struct request_queue *q; 4668839a0e0STejun Heo struct bio *bio; 4678839a0e0STejun Heo int ret = 0; 4688839a0e0STejun Heo 4698839a0e0STejun Heo if (bdev->bd_disk == NULL) 4708839a0e0STejun Heo return -ENXIO; 4718839a0e0STejun Heo 4728839a0e0STejun Heo q = bdev_get_queue(bdev); 4738839a0e0STejun Heo if (!q) 4748839a0e0STejun Heo return -ENXIO; 4758839a0e0STejun Heo 4768839a0e0STejun Heo /* 4778839a0e0STejun Heo * some block devices may not have their queue correctly set up here 4788839a0e0STejun Heo * (e.g. loop device without a backing file) and so issuing a flush 4798839a0e0STejun Heo * here will panic. Ensure there is a request function before issuing 480d391a2ddSTejun Heo * the flush. 4818839a0e0STejun Heo */ 4828839a0e0STejun Heo if (!q->make_request_fn) 4838839a0e0STejun Heo return -ENXIO; 4848839a0e0STejun Heo 4858839a0e0STejun Heo bio = bio_alloc(gfp_mask, 0); 4868839a0e0STejun Heo bio->bi_bdev = bdev; 4878839a0e0STejun Heo 48833879d45SKent Overstreet ret = submit_bio_wait(WRITE_FLUSH, bio); 489dd3932edSChristoph Hellwig 4908839a0e0STejun Heo /* 4918839a0e0STejun Heo * The driver must store the error location in ->bi_sector, if 4928839a0e0STejun Heo * it supports it. For non-stacked drivers, this should be 4938839a0e0STejun Heo * copied from blk_rq_pos(rq). 4948839a0e0STejun Heo */ 4958839a0e0STejun Heo if (error_sector) 4964f024f37SKent Overstreet *error_sector = bio->bi_iter.bi_sector; 4978839a0e0STejun Heo 4988839a0e0STejun Heo bio_put(bio); 4998839a0e0STejun Heo return ret; 5008839a0e0STejun Heo } 5018839a0e0STejun Heo EXPORT_SYMBOL(blkdev_issue_flush); 502320ae51fSJens Axboe 503f70ced09SMing Lei struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q, 504f70ced09SMing Lei int node, int cmd_size) 505320ae51fSJens Axboe { 5067c94e1c1SMing Lei struct blk_flush_queue *fq; 5077c94e1c1SMing Lei int rq_sz = sizeof(struct request); 5081bcb1eadSMing Lei 509f70ced09SMing Lei fq = kzalloc_node(sizeof(*fq), GFP_KERNEL, node); 5107c94e1c1SMing Lei if (!fq) 5117c94e1c1SMing Lei goto fail; 5121bcb1eadSMing Lei 5137c94e1c1SMing Lei if (q->mq_ops) { 5147c94e1c1SMing Lei spin_lock_init(&fq->mq_flush_lock); 515f70ced09SMing Lei rq_sz = round_up(rq_sz + cmd_size, cache_line_size()); 5167c94e1c1SMing Lei } 5177c94e1c1SMing Lei 518f70ced09SMing Lei fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node); 5197c94e1c1SMing Lei if (!fq->flush_rq) 5207c94e1c1SMing Lei goto fail_rq; 5217c94e1c1SMing Lei 5227c94e1c1SMing Lei INIT_LIST_HEAD(&fq->flush_queue[0]); 5237c94e1c1SMing Lei INIT_LIST_HEAD(&fq->flush_queue[1]); 5247c94e1c1SMing Lei INIT_LIST_HEAD(&fq->flush_data_in_flight); 5257c94e1c1SMing Lei 5267c94e1c1SMing Lei return fq; 5277c94e1c1SMing Lei 5287c94e1c1SMing Lei fail_rq: 5297c94e1c1SMing Lei kfree(fq); 5307c94e1c1SMing Lei fail: 5317c94e1c1SMing Lei return NULL; 5327c94e1c1SMing Lei } 5337c94e1c1SMing Lei 534ba483388SMing Lei void blk_free_flush_queue(struct blk_flush_queue *fq) 5357c94e1c1SMing Lei { 5367c94e1c1SMing Lei /* bio based request queue hasn't flush queue */ 5377c94e1c1SMing Lei if (!fq) 5387c94e1c1SMing Lei return; 5397c94e1c1SMing Lei 5407c94e1c1SMing Lei kfree(fq->flush_rq); 5417c94e1c1SMing Lei kfree(fq); 542320ae51fSJens Axboe } 543