18839a0e0STejun Heo /* 24fed947cSTejun Heo * Functions to sequence FLUSH and FUA writes. 3ae1b1539STejun Heo * 4ae1b1539STejun Heo * Copyright (C) 2011 Max Planck Institute for Gravitational Physics 5ae1b1539STejun Heo * Copyright (C) 2011 Tejun Heo <tj@kernel.org> 6ae1b1539STejun Heo * 7ae1b1539STejun Heo * This file is released under the GPLv2. 8ae1b1539STejun Heo * 9ae1b1539STejun Heo * REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three 10ae1b1539STejun Heo * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request 11ae1b1539STejun Heo * properties and hardware capability. 12ae1b1539STejun Heo * 13ae1b1539STejun Heo * If a request doesn't have data, only REQ_FLUSH makes sense, which 14ae1b1539STejun Heo * indicates a simple flush request. If there is data, REQ_FLUSH indicates 15ae1b1539STejun Heo * that the device cache should be flushed before the data is executed, and 16ae1b1539STejun Heo * REQ_FUA means that the data must be on non-volatile media on request 17ae1b1539STejun Heo * completion. 18ae1b1539STejun Heo * 19ae1b1539STejun Heo * If the device doesn't have writeback cache, FLUSH and FUA don't make any 20ae1b1539STejun Heo * difference. The requests are either completed immediately if there's no 21ae1b1539STejun Heo * data or executed as normal requests otherwise. 22ae1b1539STejun Heo * 23ae1b1539STejun Heo * If the device has writeback cache and supports FUA, REQ_FLUSH is 24ae1b1539STejun Heo * translated to PREFLUSH but REQ_FUA is passed down directly with DATA. 25ae1b1539STejun Heo * 26ae1b1539STejun Heo * If the device has writeback cache and doesn't support FUA, REQ_FLUSH is 27ae1b1539STejun Heo * translated to PREFLUSH and REQ_FUA to POSTFLUSH. 28ae1b1539STejun Heo * 29ae1b1539STejun Heo * The actual execution of flush is double buffered. Whenever a request 30ae1b1539STejun Heo * needs to execute PRE or POSTFLUSH, it queues at 31ae1b1539STejun Heo * q->flush_queue[q->flush_pending_idx]. Once certain criteria are met, a 32ae1b1539STejun Heo * flush is issued and the pending_idx is toggled. When the flush 33ae1b1539STejun Heo * completes, all the requests which were pending are proceeded to the next 34ae1b1539STejun Heo * step. This allows arbitrary merging of different types of FLUSH/FUA 35ae1b1539STejun Heo * requests. 36ae1b1539STejun Heo * 37ae1b1539STejun Heo * Currently, the following conditions are used to determine when to issue 38ae1b1539STejun Heo * flush. 39ae1b1539STejun Heo * 40ae1b1539STejun Heo * C1. At any given time, only one flush shall be in progress. This makes 41ae1b1539STejun Heo * double buffering sufficient. 42ae1b1539STejun Heo * 43ae1b1539STejun Heo * C2. Flush is deferred if any request is executing DATA of its sequence. 44ae1b1539STejun Heo * This avoids issuing separate POSTFLUSHes for requests which shared 45ae1b1539STejun Heo * PREFLUSH. 46ae1b1539STejun Heo * 47ae1b1539STejun Heo * C3. The second condition is ignored if there is a request which has 48ae1b1539STejun Heo * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid 49ae1b1539STejun Heo * starvation in the unlikely case where there are continuous stream of 50ae1b1539STejun Heo * FUA (without FLUSH) requests. 51ae1b1539STejun Heo * 52ae1b1539STejun Heo * For devices which support FUA, it isn't clear whether C2 (and thus C3) 53ae1b1539STejun Heo * is beneficial. 54ae1b1539STejun Heo * 55ae1b1539STejun Heo * Note that a sequenced FLUSH/FUA request with DATA is completed twice. 56ae1b1539STejun Heo * Once while executing DATA and again after the whole sequence is 57ae1b1539STejun Heo * complete. The first completion updates the contained bio but doesn't 58ae1b1539STejun Heo * finish it so that the bio submitter is notified only after the whole 59ae1b1539STejun Heo * sequence is complete. This is implemented by testing REQ_FLUSH_SEQ in 60ae1b1539STejun Heo * req_bio_endio(). 61ae1b1539STejun Heo * 62ae1b1539STejun Heo * The above peculiarity requires that each FLUSH/FUA request has only one 63ae1b1539STejun Heo * bio attached to it, which is guaranteed as they aren't allowed to be 64ae1b1539STejun Heo * merged in the usual way. 658839a0e0STejun Heo */ 66ae1b1539STejun Heo 678839a0e0STejun Heo #include <linux/kernel.h> 688839a0e0STejun Heo #include <linux/module.h> 698839a0e0STejun Heo #include <linux/bio.h> 708839a0e0STejun Heo #include <linux/blkdev.h> 718839a0e0STejun Heo #include <linux/gfp.h> 728839a0e0STejun Heo 738839a0e0STejun Heo #include "blk.h" 748839a0e0STejun Heo 754fed947cSTejun Heo /* FLUSH/FUA sequences */ 764fed947cSTejun Heo enum { 77ae1b1539STejun Heo REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */ 78ae1b1539STejun Heo REQ_FSEQ_DATA = (1 << 1), /* data write in progress */ 79ae1b1539STejun Heo REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */ 80ae1b1539STejun Heo REQ_FSEQ_DONE = (1 << 3), 81ae1b1539STejun Heo 82ae1b1539STejun Heo REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA | 83ae1b1539STejun Heo REQ_FSEQ_POSTFLUSH, 84ae1b1539STejun Heo 85ae1b1539STejun Heo /* 86ae1b1539STejun Heo * If flush has been pending longer than the following timeout, 87ae1b1539STejun Heo * it's issued even if flush_data requests are still in flight. 88ae1b1539STejun Heo */ 89ae1b1539STejun Heo FLUSH_PENDING_TIMEOUT = 5 * HZ, 904fed947cSTejun Heo }; 914fed947cSTejun Heo 92ae1b1539STejun Heo static bool blk_kick_flush(struct request_queue *q); 938839a0e0STejun Heo 94ae1b1539STejun Heo static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq) 958839a0e0STejun Heo { 96ae1b1539STejun Heo unsigned int policy = 0; 97ae1b1539STejun Heo 98*fa1bf42fSJeff Moyer if (blk_rq_sectors(rq)) 99*fa1bf42fSJeff Moyer policy |= REQ_FSEQ_DATA; 100*fa1bf42fSJeff Moyer 101ae1b1539STejun Heo if (fflags & REQ_FLUSH) { 102ae1b1539STejun Heo if (rq->cmd_flags & REQ_FLUSH) 103ae1b1539STejun Heo policy |= REQ_FSEQ_PREFLUSH; 104ae1b1539STejun Heo if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA)) 105ae1b1539STejun Heo policy |= REQ_FSEQ_POSTFLUSH; 106ae1b1539STejun Heo } 107ae1b1539STejun Heo return policy; 1088839a0e0STejun Heo } 1098839a0e0STejun Heo 110ae1b1539STejun Heo static unsigned int blk_flush_cur_seq(struct request *rq) 1118839a0e0STejun Heo { 112ae1b1539STejun Heo return 1 << ffz(rq->flush.seq); 1138839a0e0STejun Heo } 1148839a0e0STejun Heo 115ae1b1539STejun Heo static void blk_flush_restore_request(struct request *rq) 11647f70d5aSTejun Heo { 11747f70d5aSTejun Heo /* 118ae1b1539STejun Heo * After flush data completion, @rq->bio is %NULL but we need to 119ae1b1539STejun Heo * complete the bio again. @rq->biotail is guaranteed to equal the 120ae1b1539STejun Heo * original @rq->bio. Restore it. 12147f70d5aSTejun Heo */ 122ae1b1539STejun Heo rq->bio = rq->biotail; 123ae1b1539STejun Heo 124ae1b1539STejun Heo /* make @rq a normal request */ 125ae1b1539STejun Heo rq->cmd_flags &= ~REQ_FLUSH_SEQ; 126ae1b1539STejun Heo rq->end_io = NULL; 12747f70d5aSTejun Heo } 12847f70d5aSTejun Heo 129ae1b1539STejun Heo /** 130ae1b1539STejun Heo * blk_flush_complete_seq - complete flush sequence 131ae1b1539STejun Heo * @rq: FLUSH/FUA request being sequenced 132ae1b1539STejun Heo * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero) 133ae1b1539STejun Heo * @error: whether an error occurred 134ae1b1539STejun Heo * 135ae1b1539STejun Heo * @rq just completed @seq part of its flush sequence, record the 136ae1b1539STejun Heo * completion and trigger the next step. 137ae1b1539STejun Heo * 138ae1b1539STejun Heo * CONTEXT: 139ae1b1539STejun Heo * spin_lock_irq(q->queue_lock) 140ae1b1539STejun Heo * 141ae1b1539STejun Heo * RETURNS: 142ae1b1539STejun Heo * %true if requests were added to the dispatch queue, %false otherwise. 143ae1b1539STejun Heo */ 144ae1b1539STejun Heo static bool blk_flush_complete_seq(struct request *rq, unsigned int seq, 145ae1b1539STejun Heo int error) 1468839a0e0STejun Heo { 147ae1b1539STejun Heo struct request_queue *q = rq->q; 148ae1b1539STejun Heo struct list_head *pending = &q->flush_queue[q->flush_pending_idx]; 149ae1b1539STejun Heo bool queued = false; 1508839a0e0STejun Heo 151ae1b1539STejun Heo BUG_ON(rq->flush.seq & seq); 152ae1b1539STejun Heo rq->flush.seq |= seq; 1538839a0e0STejun Heo 154ae1b1539STejun Heo if (likely(!error)) 155ae1b1539STejun Heo seq = blk_flush_cur_seq(rq); 156ae1b1539STejun Heo else 157ae1b1539STejun Heo seq = REQ_FSEQ_DONE; 1588839a0e0STejun Heo 159ae1b1539STejun Heo switch (seq) { 160ae1b1539STejun Heo case REQ_FSEQ_PREFLUSH: 161ae1b1539STejun Heo case REQ_FSEQ_POSTFLUSH: 162ae1b1539STejun Heo /* queue for flush */ 163ae1b1539STejun Heo if (list_empty(pending)) 164ae1b1539STejun Heo q->flush_pending_since = jiffies; 165ae1b1539STejun Heo list_move_tail(&rq->flush.list, pending); 1668839a0e0STejun Heo break; 167ae1b1539STejun Heo 168ae1b1539STejun Heo case REQ_FSEQ_DATA: 169ae1b1539STejun Heo list_move_tail(&rq->flush.list, &q->flush_data_in_flight); 170ae1b1539STejun Heo list_add(&rq->queuelist, &q->queue_head); 171ae1b1539STejun Heo queued = true; 172ae1b1539STejun Heo break; 173ae1b1539STejun Heo 174ae1b1539STejun Heo case REQ_FSEQ_DONE: 17509d60c70STejun Heo /* 176ae1b1539STejun Heo * @rq was previously adjusted by blk_flush_issue() for 177ae1b1539STejun Heo * flush sequencing and may already have gone through the 178ae1b1539STejun Heo * flush data request completion path. Restore @rq for 179ae1b1539STejun Heo * normal completion and end it. 18009d60c70STejun Heo */ 181ae1b1539STejun Heo BUG_ON(!list_empty(&rq->queuelist)); 182ae1b1539STejun Heo list_del_init(&rq->flush.list); 183ae1b1539STejun Heo blk_flush_restore_request(rq); 184ae1b1539STejun Heo __blk_end_request_all(rq, error); 1858839a0e0STejun Heo break; 186ae1b1539STejun Heo 1878839a0e0STejun Heo default: 1888839a0e0STejun Heo BUG(); 1898839a0e0STejun Heo } 190cde4c406SChristoph Hellwig 191ae1b1539STejun Heo return blk_kick_flush(q) | queued; 1928839a0e0STejun Heo } 1938839a0e0STejun Heo 194ae1b1539STejun Heo static void flush_end_io(struct request *flush_rq, int error) 1958839a0e0STejun Heo { 196ae1b1539STejun Heo struct request_queue *q = flush_rq->q; 197ae1b1539STejun Heo struct list_head *running = &q->flush_queue[q->flush_running_idx]; 198ae1b1539STejun Heo bool queued = false; 199ae1b1539STejun Heo struct request *rq, *n; 2008839a0e0STejun Heo 201ae1b1539STejun Heo BUG_ON(q->flush_pending_idx == q->flush_running_idx); 202ae1b1539STejun Heo 203ae1b1539STejun Heo /* account completion of the flush request */ 204ae1b1539STejun Heo q->flush_running_idx ^= 1; 205ae1b1539STejun Heo elv_completed_request(q, flush_rq); 206ae1b1539STejun Heo 207ae1b1539STejun Heo /* and push the waiting requests to the next stage */ 208ae1b1539STejun Heo list_for_each_entry_safe(rq, n, running, flush.list) { 209ae1b1539STejun Heo unsigned int seq = blk_flush_cur_seq(rq); 210ae1b1539STejun Heo 211ae1b1539STejun Heo BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH); 212ae1b1539STejun Heo queued |= blk_flush_complete_seq(rq, seq, error); 213ae1b1539STejun Heo } 214ae1b1539STejun Heo 2158839a0e0STejun Heo /* 2163ac0cc45Sshaohua.li@intel.com * Kick the queue to avoid stall for two cases: 2173ac0cc45Sshaohua.li@intel.com * 1. Moving a request silently to empty queue_head may stall the 2183ac0cc45Sshaohua.li@intel.com * queue. 2193ac0cc45Sshaohua.li@intel.com * 2. When flush request is running in non-queueable queue, the 2203ac0cc45Sshaohua.li@intel.com * queue is hold. Restart the queue after flush request is finished 2213ac0cc45Sshaohua.li@intel.com * to avoid stall. 2223ac0cc45Sshaohua.li@intel.com * This function is called from request completion path and calling 2233ac0cc45Sshaohua.li@intel.com * directly into request_fn may confuse the driver. Always use 2243ac0cc45Sshaohua.li@intel.com * kblockd. 2258839a0e0STejun Heo */ 2263ac0cc45Sshaohua.li@intel.com if (queued || q->flush_queue_delayed) 22724ecfbe2SChristoph Hellwig blk_run_queue_async(q); 2283ac0cc45Sshaohua.li@intel.com q->flush_queue_delayed = 0; 229ae1b1539STejun Heo } 230ae1b1539STejun Heo 231ae1b1539STejun Heo /** 232ae1b1539STejun Heo * blk_kick_flush - consider issuing flush request 233ae1b1539STejun Heo * @q: request_queue being kicked 2344fed947cSTejun Heo * 235ae1b1539STejun Heo * Flush related states of @q have changed, consider issuing flush request. 236ae1b1539STejun Heo * Please read the comment at the top of this file for more info. 237ae1b1539STejun Heo * 238ae1b1539STejun Heo * CONTEXT: 239ae1b1539STejun Heo * spin_lock_irq(q->queue_lock) 240ae1b1539STejun Heo * 241ae1b1539STejun Heo * RETURNS: 242ae1b1539STejun Heo * %true if flush was issued, %false otherwise. 2438839a0e0STejun Heo */ 244ae1b1539STejun Heo static bool blk_kick_flush(struct request_queue *q) 245ae1b1539STejun Heo { 246ae1b1539STejun Heo struct list_head *pending = &q->flush_queue[q->flush_pending_idx]; 247ae1b1539STejun Heo struct request *first_rq = 248ae1b1539STejun Heo list_first_entry(pending, struct request, flush.list); 249ae1b1539STejun Heo 250ae1b1539STejun Heo /* C1 described at the top of this file */ 251ae1b1539STejun Heo if (q->flush_pending_idx != q->flush_running_idx || list_empty(pending)) 252ae1b1539STejun Heo return false; 253ae1b1539STejun Heo 254ae1b1539STejun Heo /* C2 and C3 */ 255ae1b1539STejun Heo if (!list_empty(&q->flush_data_in_flight) && 256ae1b1539STejun Heo time_before(jiffies, 257ae1b1539STejun Heo q->flush_pending_since + FLUSH_PENDING_TIMEOUT)) 258ae1b1539STejun Heo return false; 259ae1b1539STejun Heo 260ae1b1539STejun Heo /* 261ae1b1539STejun Heo * Issue flush and toggle pending_idx. This makes pending_idx 262ae1b1539STejun Heo * different from running_idx, which means flush is in flight. 263ae1b1539STejun Heo */ 264ae1b1539STejun Heo blk_rq_init(q, &q->flush_rq); 265ae1b1539STejun Heo q->flush_rq.cmd_type = REQ_TYPE_FS; 266ae1b1539STejun Heo q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ; 267ae1b1539STejun Heo q->flush_rq.rq_disk = first_rq->rq_disk; 268ae1b1539STejun Heo q->flush_rq.end_io = flush_end_io; 269ae1b1539STejun Heo 270ae1b1539STejun Heo q->flush_pending_idx ^= 1; 27153d63e6bSJens Axboe list_add_tail(&q->flush_rq.queuelist, &q->queue_head); 272ae1b1539STejun Heo return true; 273ae1b1539STejun Heo } 274ae1b1539STejun Heo 275ae1b1539STejun Heo static void flush_data_end_io(struct request *rq, int error) 276ae1b1539STejun Heo { 277ae1b1539STejun Heo struct request_queue *q = rq->q; 278ae1b1539STejun Heo 2798839a0e0STejun Heo /* 280e83a46bbSTejun Heo * After populating an empty queue, kick it to avoid stall. Read 281e83a46bbSTejun Heo * the comment in flush_end_io(). 2828839a0e0STejun Heo */ 28373c10101SJens Axboe if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error)) 28424ecfbe2SChristoph Hellwig blk_run_queue_async(q); 285ae1b1539STejun Heo } 286ae1b1539STejun Heo 287ae1b1539STejun Heo /** 288ae1b1539STejun Heo * blk_insert_flush - insert a new FLUSH/FUA request 289ae1b1539STejun Heo * @rq: request to insert 290ae1b1539STejun Heo * 291b710a480SJens Axboe * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions. 292ae1b1539STejun Heo * @rq is being submitted. Analyze what needs to be done and put it on the 293ae1b1539STejun Heo * right queue. 294ae1b1539STejun Heo * 295ae1b1539STejun Heo * CONTEXT: 296ae1b1539STejun Heo * spin_lock_irq(q->queue_lock) 297ae1b1539STejun Heo */ 298ae1b1539STejun Heo void blk_insert_flush(struct request *rq) 299ae1b1539STejun Heo { 300ae1b1539STejun Heo struct request_queue *q = rq->q; 301ae1b1539STejun Heo unsigned int fflags = q->flush_flags; /* may change, cache */ 302ae1b1539STejun Heo unsigned int policy = blk_flush_policy(fflags, rq); 303ae1b1539STejun Heo 304ae1b1539STejun Heo BUG_ON(rq->end_io); 305ae1b1539STejun Heo BUG_ON(!rq->bio || rq->bio != rq->biotail); 306ae1b1539STejun Heo 307ae1b1539STejun Heo /* 308ae1b1539STejun Heo * @policy now records what operations need to be done. Adjust 309ae1b1539STejun Heo * REQ_FLUSH and FUA for the driver. 310ae1b1539STejun Heo */ 3114fed947cSTejun Heo rq->cmd_flags &= ~REQ_FLUSH; 312ae1b1539STejun Heo if (!(fflags & REQ_FUA)) 3134fed947cSTejun Heo rq->cmd_flags &= ~REQ_FUA; 314ae1b1539STejun Heo 315ae1b1539STejun Heo /* 316ae1b1539STejun Heo * If there's data but flush is not necessary, the request can be 317ae1b1539STejun Heo * processed directly without going through flush machinery. Queue 318ae1b1539STejun Heo * for normal execution. 319ae1b1539STejun Heo */ 320ae1b1539STejun Heo if ((policy & REQ_FSEQ_DATA) && 321ae1b1539STejun Heo !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { 32253d63e6bSJens Axboe list_add_tail(&rq->queuelist, &q->queue_head); 323ae1b1539STejun Heo return; 3248839a0e0STejun Heo } 3258839a0e0STejun Heo 3268839a0e0STejun Heo /* 327ae1b1539STejun Heo * @rq should go through flush machinery. Mark it part of flush 328ae1b1539STejun Heo * sequence and submit for further processing. 3298839a0e0STejun Heo */ 330ae1b1539STejun Heo memset(&rq->flush, 0, sizeof(rq->flush)); 331ae1b1539STejun Heo INIT_LIST_HEAD(&rq->flush.list); 332ae1b1539STejun Heo rq->cmd_flags |= REQ_FLUSH_SEQ; 333ae1b1539STejun Heo rq->end_io = flush_data_end_io; 334ae1b1539STejun Heo 335ae1b1539STejun Heo blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0); 336ae1b1539STejun Heo } 337ae1b1539STejun Heo 338ae1b1539STejun Heo /** 339ae1b1539STejun Heo * blk_abort_flushes - @q is being aborted, abort flush requests 340ae1b1539STejun Heo * @q: request_queue being aborted 341ae1b1539STejun Heo * 342ae1b1539STejun Heo * To be called from elv_abort_queue(). @q is being aborted. Prepare all 343ae1b1539STejun Heo * FLUSH/FUA requests for abortion. 344ae1b1539STejun Heo * 345ae1b1539STejun Heo * CONTEXT: 346ae1b1539STejun Heo * spin_lock_irq(q->queue_lock) 347ae1b1539STejun Heo */ 348ae1b1539STejun Heo void blk_abort_flushes(struct request_queue *q) 349ae1b1539STejun Heo { 350ae1b1539STejun Heo struct request *rq, *n; 351ae1b1539STejun Heo int i; 352ae1b1539STejun Heo 353ae1b1539STejun Heo /* 354ae1b1539STejun Heo * Requests in flight for data are already owned by the dispatch 355ae1b1539STejun Heo * queue or the device driver. Just restore for normal completion. 356ae1b1539STejun Heo */ 357ae1b1539STejun Heo list_for_each_entry_safe(rq, n, &q->flush_data_in_flight, flush.list) { 358ae1b1539STejun Heo list_del_init(&rq->flush.list); 359ae1b1539STejun Heo blk_flush_restore_request(rq); 3608839a0e0STejun Heo } 3618839a0e0STejun Heo 3628839a0e0STejun Heo /* 363ae1b1539STejun Heo * We need to give away requests on flush queues. Restore for 364ae1b1539STejun Heo * normal completion and put them on the dispatch queue. 3658839a0e0STejun Heo */ 366ae1b1539STejun Heo for (i = 0; i < ARRAY_SIZE(q->flush_queue); i++) { 367ae1b1539STejun Heo list_for_each_entry_safe(rq, n, &q->flush_queue[i], 368ae1b1539STejun Heo flush.list) { 369ae1b1539STejun Heo list_del_init(&rq->flush.list); 370ae1b1539STejun Heo blk_flush_restore_request(rq); 371ae1b1539STejun Heo list_add_tail(&rq->queuelist, &q->queue_head); 372ae1b1539STejun Heo } 373ae1b1539STejun Heo } 3748839a0e0STejun Heo } 3758839a0e0STejun Heo 376d391a2ddSTejun Heo static void bio_end_flush(struct bio *bio, int err) 3778839a0e0STejun Heo { 378d391a2ddSTejun Heo if (err) 3798839a0e0STejun Heo clear_bit(BIO_UPTODATE, &bio->bi_flags); 3808839a0e0STejun Heo if (bio->bi_private) 3818839a0e0STejun Heo complete(bio->bi_private); 3828839a0e0STejun Heo bio_put(bio); 3838839a0e0STejun Heo } 3848839a0e0STejun Heo 3858839a0e0STejun Heo /** 3868839a0e0STejun Heo * blkdev_issue_flush - queue a flush 3878839a0e0STejun Heo * @bdev: blockdev to issue flush for 3888839a0e0STejun Heo * @gfp_mask: memory allocation flags (for bio_alloc) 3898839a0e0STejun Heo * @error_sector: error sector 3908839a0e0STejun Heo * 3918839a0e0STejun Heo * Description: 3928839a0e0STejun Heo * Issue a flush for the block device in question. Caller can supply 3938839a0e0STejun Heo * room for storing the error offset in case of a flush error, if they 3948839a0e0STejun Heo * wish to. If WAIT flag is not passed then caller may check only what 3958839a0e0STejun Heo * request was pushed in some internal queue for later handling. 3968839a0e0STejun Heo */ 3978839a0e0STejun Heo int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, 398dd3932edSChristoph Hellwig sector_t *error_sector) 3998839a0e0STejun Heo { 4008839a0e0STejun Heo DECLARE_COMPLETION_ONSTACK(wait); 4018839a0e0STejun Heo struct request_queue *q; 4028839a0e0STejun Heo struct bio *bio; 4038839a0e0STejun Heo int ret = 0; 4048839a0e0STejun Heo 4058839a0e0STejun Heo if (bdev->bd_disk == NULL) 4068839a0e0STejun Heo return -ENXIO; 4078839a0e0STejun Heo 4088839a0e0STejun Heo q = bdev_get_queue(bdev); 4098839a0e0STejun Heo if (!q) 4108839a0e0STejun Heo return -ENXIO; 4118839a0e0STejun Heo 4128839a0e0STejun Heo /* 4138839a0e0STejun Heo * some block devices may not have their queue correctly set up here 4148839a0e0STejun Heo * (e.g. loop device without a backing file) and so issuing a flush 4158839a0e0STejun Heo * here will panic. Ensure there is a request function before issuing 416d391a2ddSTejun Heo * the flush. 4178839a0e0STejun Heo */ 4188839a0e0STejun Heo if (!q->make_request_fn) 4198839a0e0STejun Heo return -ENXIO; 4208839a0e0STejun Heo 4218839a0e0STejun Heo bio = bio_alloc(gfp_mask, 0); 422d391a2ddSTejun Heo bio->bi_end_io = bio_end_flush; 4238839a0e0STejun Heo bio->bi_bdev = bdev; 4248839a0e0STejun Heo bio->bi_private = &wait; 4258839a0e0STejun Heo 4268839a0e0STejun Heo bio_get(bio); 427d391a2ddSTejun Heo submit_bio(WRITE_FLUSH, bio); 4288839a0e0STejun Heo wait_for_completion(&wait); 429dd3932edSChristoph Hellwig 4308839a0e0STejun Heo /* 4318839a0e0STejun Heo * The driver must store the error location in ->bi_sector, if 4328839a0e0STejun Heo * it supports it. For non-stacked drivers, this should be 4338839a0e0STejun Heo * copied from blk_rq_pos(rq). 4348839a0e0STejun Heo */ 4358839a0e0STejun Heo if (error_sector) 4368839a0e0STejun Heo *error_sector = bio->bi_sector; 4378839a0e0STejun Heo 438d391a2ddSTejun Heo if (!bio_flagged(bio, BIO_UPTODATE)) 4398839a0e0STejun Heo ret = -EIO; 4408839a0e0STejun Heo 4418839a0e0STejun Heo bio_put(bio); 4428839a0e0STejun Heo return ret; 4438839a0e0STejun Heo } 4448839a0e0STejun Heo EXPORT_SYMBOL(blkdev_issue_flush); 445