18c16567dSChristoph Hellwig // SPDX-License-Identifier: GPL-2.0 28839a0e0STejun Heo /* 33140c3cfSOmar Sandoval * Functions to sequence PREFLUSH and FUA writes. 4ae1b1539STejun Heo * 5ae1b1539STejun Heo * Copyright (C) 2011 Max Planck Institute for Gravitational Physics 6ae1b1539STejun Heo * Copyright (C) 2011 Tejun Heo <tj@kernel.org> 7ae1b1539STejun Heo * 83140c3cfSOmar Sandoval * REQ_{PREFLUSH|FUA} requests are decomposed to sequences consisted of three 9ae1b1539STejun Heo * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request 10ae1b1539STejun Heo * properties and hardware capability. 11ae1b1539STejun Heo * 1228a8f0d3SMike Christie * If a request doesn't have data, only REQ_PREFLUSH makes sense, which 1328a8f0d3SMike Christie * indicates a simple flush request. If there is data, REQ_PREFLUSH indicates 14ae1b1539STejun Heo * that the device cache should be flushed before the data is executed, and 15ae1b1539STejun Heo * REQ_FUA means that the data must be on non-volatile media on request 16ae1b1539STejun Heo * completion. 17ae1b1539STejun Heo * 183140c3cfSOmar Sandoval * If the device doesn't have writeback cache, PREFLUSH and FUA don't make any 193140c3cfSOmar Sandoval * difference. The requests are either completed immediately if there's no data 203140c3cfSOmar Sandoval * or executed as normal requests otherwise. 21ae1b1539STejun Heo * 2228a8f0d3SMike Christie * If the device has writeback cache and supports FUA, REQ_PREFLUSH is 23ae1b1539STejun Heo * translated to PREFLUSH but REQ_FUA is passed down directly with DATA. 24ae1b1539STejun Heo * 2528a8f0d3SMike Christie * If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH 2628a8f0d3SMike Christie * is translated to PREFLUSH and REQ_FUA to POSTFLUSH. 27ae1b1539STejun Heo * 28ae1b1539STejun Heo * The actual execution of flush is double buffered. Whenever a request 29ae1b1539STejun Heo * needs to execute PRE or POSTFLUSH, it queues at 307c94e1c1SMing Lei * fq->flush_queue[fq->flush_pending_idx]. Once certain criteria are met, a 313a5e02ceSMike Christie * REQ_OP_FLUSH is issued and the pending_idx is toggled. When the flush 32ae1b1539STejun Heo * completes, all the requests which were pending are proceeded to the next 333140c3cfSOmar Sandoval * step. This allows arbitrary merging of different types of PREFLUSH/FUA 34ae1b1539STejun Heo * requests. 35ae1b1539STejun Heo * 36ae1b1539STejun Heo * Currently, the following conditions are used to determine when to issue 37ae1b1539STejun Heo * flush. 38ae1b1539STejun Heo * 39ae1b1539STejun Heo * C1. At any given time, only one flush shall be in progress. This makes 40ae1b1539STejun Heo * double buffering sufficient. 41ae1b1539STejun Heo * 42ae1b1539STejun Heo * C2. Flush is deferred if any request is executing DATA of its sequence. 43ae1b1539STejun Heo * This avoids issuing separate POSTFLUSHes for requests which shared 44ae1b1539STejun Heo * PREFLUSH. 45ae1b1539STejun Heo * 46ae1b1539STejun Heo * C3. The second condition is ignored if there is a request which has 47ae1b1539STejun Heo * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid 48ae1b1539STejun Heo * starvation in the unlikely case where there are continuous stream of 493140c3cfSOmar Sandoval * FUA (without PREFLUSH) requests. 50ae1b1539STejun Heo * 51ae1b1539STejun Heo * For devices which support FUA, it isn't clear whether C2 (and thus C3) 52ae1b1539STejun Heo * is beneficial. 53ae1b1539STejun Heo * 543140c3cfSOmar Sandoval * Note that a sequenced PREFLUSH/FUA request with DATA is completed twice. 55ae1b1539STejun Heo * Once while executing DATA and again after the whole sequence is 56ae1b1539STejun Heo * complete. The first completion updates the contained bio but doesn't 57ae1b1539STejun Heo * finish it so that the bio submitter is notified only after the whole 58e8064021SChristoph Hellwig * sequence is complete. This is implemented by testing RQF_FLUSH_SEQ in 59ae1b1539STejun Heo * req_bio_endio(). 60ae1b1539STejun Heo * 613140c3cfSOmar Sandoval * The above peculiarity requires that each PREFLUSH/FUA request has only one 62ae1b1539STejun Heo * bio attached to it, which is guaranteed as they aren't allowed to be 63ae1b1539STejun Heo * merged in the usual way. 648839a0e0STejun Heo */ 65ae1b1539STejun Heo 668839a0e0STejun Heo #include <linux/kernel.h> 678839a0e0STejun Heo #include <linux/module.h> 688839a0e0STejun Heo #include <linux/bio.h> 698839a0e0STejun Heo #include <linux/blkdev.h> 708839a0e0STejun Heo #include <linux/gfp.h> 71320ae51fSJens Axboe #include <linux/blk-mq.h> 72b3c6a599SBart Van Assche #include <linux/lockdep.h> 738839a0e0STejun Heo 748839a0e0STejun Heo #include "blk.h" 75320ae51fSJens Axboe #include "blk-mq.h" 760048b483SMing Lei #include "blk-mq-tag.h" 77bd166ef1SJens Axboe #include "blk-mq-sched.h" 788839a0e0STejun Heo 793140c3cfSOmar Sandoval /* PREFLUSH/FUA sequences */ 804fed947cSTejun Heo enum { 81ae1b1539STejun Heo REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */ 82ae1b1539STejun Heo REQ_FSEQ_DATA = (1 << 1), /* data write in progress */ 83ae1b1539STejun Heo REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */ 84ae1b1539STejun Heo REQ_FSEQ_DONE = (1 << 3), 85ae1b1539STejun Heo 86ae1b1539STejun Heo REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA | 87ae1b1539STejun Heo REQ_FSEQ_POSTFLUSH, 88ae1b1539STejun Heo 89ae1b1539STejun Heo /* 90ae1b1539STejun Heo * If flush has been pending longer than the following timeout, 91ae1b1539STejun Heo * it's issued even if flush_data requests are still in flight. 92ae1b1539STejun Heo */ 93ae1b1539STejun Heo FLUSH_PENDING_TIMEOUT = 5 * HZ, 944fed947cSTejun Heo }; 954fed947cSTejun Heo 96404b8f5aSJens Axboe static void blk_kick_flush(struct request_queue *q, 9784fca1b0SHannes Reinecke struct blk_flush_queue *fq, unsigned int flags); 988839a0e0STejun Heo 99c888a8f9SJens Axboe static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq) 1008839a0e0STejun Heo { 101ae1b1539STejun Heo unsigned int policy = 0; 102ae1b1539STejun Heo 103fa1bf42fSJeff Moyer if (blk_rq_sectors(rq)) 104fa1bf42fSJeff Moyer policy |= REQ_FSEQ_DATA; 105fa1bf42fSJeff Moyer 106c888a8f9SJens Axboe if (fflags & (1UL << QUEUE_FLAG_WC)) { 10728a8f0d3SMike Christie if (rq->cmd_flags & REQ_PREFLUSH) 108ae1b1539STejun Heo policy |= REQ_FSEQ_PREFLUSH; 109c888a8f9SJens Axboe if (!(fflags & (1UL << QUEUE_FLAG_FUA)) && 110c888a8f9SJens Axboe (rq->cmd_flags & REQ_FUA)) 111ae1b1539STejun Heo policy |= REQ_FSEQ_POSTFLUSH; 112ae1b1539STejun Heo } 113ae1b1539STejun Heo return policy; 1148839a0e0STejun Heo } 1158839a0e0STejun Heo 116ae1b1539STejun Heo static unsigned int blk_flush_cur_seq(struct request *rq) 1178839a0e0STejun Heo { 118ae1b1539STejun Heo return 1 << ffz(rq->flush.seq); 1198839a0e0STejun Heo } 1208839a0e0STejun Heo 121ae1b1539STejun Heo static void blk_flush_restore_request(struct request *rq) 12247f70d5aSTejun Heo { 12347f70d5aSTejun Heo /* 124ae1b1539STejun Heo * After flush data completion, @rq->bio is %NULL but we need to 125ae1b1539STejun Heo * complete the bio again. @rq->biotail is guaranteed to equal the 126ae1b1539STejun Heo * original @rq->bio. Restore it. 12747f70d5aSTejun Heo */ 128ae1b1539STejun Heo rq->bio = rq->biotail; 129ae1b1539STejun Heo 130ae1b1539STejun Heo /* make @rq a normal request */ 131e8064021SChristoph Hellwig rq->rq_flags &= ~RQF_FLUSH_SEQ; 1324853abaaSJeff Moyer rq->end_io = rq->flush.saved_end_io; 133320ae51fSJens Axboe } 134320ae51fSJens Axboe 135404b8f5aSJens Axboe static void blk_flush_queue_rq(struct request *rq, bool add_front) 136320ae51fSJens Axboe { 1372b053acaSBart Van Assche blk_mq_add_to_requeue_list(rq, add_front, true); 13847f70d5aSTejun Heo } 13947f70d5aSTejun Heo 140b6866318SKonstantin Khlebnikov static void blk_account_io_flush(struct request *rq) 141b6866318SKonstantin Khlebnikov { 142b6866318SKonstantin Khlebnikov struct hd_struct *part = &rq->rq_disk->part0; 143b6866318SKonstantin Khlebnikov 144b6866318SKonstantin Khlebnikov part_stat_lock(); 145b6866318SKonstantin Khlebnikov part_stat_inc(part, ios[STAT_FLUSH]); 146b6866318SKonstantin Khlebnikov part_stat_add(part, nsecs[STAT_FLUSH], 147b6866318SKonstantin Khlebnikov ktime_get_ns() - rq->start_time_ns); 148b6866318SKonstantin Khlebnikov part_stat_unlock(); 149b6866318SKonstantin Khlebnikov } 150b6866318SKonstantin Khlebnikov 151ae1b1539STejun Heo /** 152ae1b1539STejun Heo * blk_flush_complete_seq - complete flush sequence 1533140c3cfSOmar Sandoval * @rq: PREFLUSH/FUA request being sequenced 1540bae352dSMing Lei * @fq: flush queue 155ae1b1539STejun Heo * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero) 156ae1b1539STejun Heo * @error: whether an error occurred 157ae1b1539STejun Heo * 158ae1b1539STejun Heo * @rq just completed @seq part of its flush sequence, record the 159ae1b1539STejun Heo * completion and trigger the next step. 160ae1b1539STejun Heo * 161ae1b1539STejun Heo * CONTEXT: 1629809b4eeSChristoph Hellwig * spin_lock_irq(fq->mq_flush_lock) 163ae1b1539STejun Heo * 164ae1b1539STejun Heo * RETURNS: 165ae1b1539STejun Heo * %true if requests were added to the dispatch queue, %false otherwise. 166ae1b1539STejun Heo */ 167404b8f5aSJens Axboe static void blk_flush_complete_seq(struct request *rq, 1680bae352dSMing Lei struct blk_flush_queue *fq, 1692a842acaSChristoph Hellwig unsigned int seq, blk_status_t error) 1708839a0e0STejun Heo { 171ae1b1539STejun Heo struct request_queue *q = rq->q; 1727c94e1c1SMing Lei struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; 173190b02edSJens Axboe unsigned int cmd_flags; 1748839a0e0STejun Heo 175ae1b1539STejun Heo BUG_ON(rq->flush.seq & seq); 176ae1b1539STejun Heo rq->flush.seq |= seq; 177190b02edSJens Axboe cmd_flags = rq->cmd_flags; 1788839a0e0STejun Heo 179ae1b1539STejun Heo if (likely(!error)) 180ae1b1539STejun Heo seq = blk_flush_cur_seq(rq); 181ae1b1539STejun Heo else 182ae1b1539STejun Heo seq = REQ_FSEQ_DONE; 1838839a0e0STejun Heo 184ae1b1539STejun Heo switch (seq) { 185ae1b1539STejun Heo case REQ_FSEQ_PREFLUSH: 186ae1b1539STejun Heo case REQ_FSEQ_POSTFLUSH: 187ae1b1539STejun Heo /* queue for flush */ 188ae1b1539STejun Heo if (list_empty(pending)) 1897c94e1c1SMing Lei fq->flush_pending_since = jiffies; 190ae1b1539STejun Heo list_move_tail(&rq->flush.list, pending); 1918839a0e0STejun Heo break; 192ae1b1539STejun Heo 193ae1b1539STejun Heo case REQ_FSEQ_DATA: 1947c94e1c1SMing Lei list_move_tail(&rq->flush.list, &fq->flush_data_in_flight); 195404b8f5aSJens Axboe blk_flush_queue_rq(rq, true); 196ae1b1539STejun Heo break; 197ae1b1539STejun Heo 198ae1b1539STejun Heo case REQ_FSEQ_DONE: 19909d60c70STejun Heo /* 200b6866318SKonstantin Khlebnikov * @rq was previously adjusted by blk_insert_flush() for 201ae1b1539STejun Heo * flush sequencing and may already have gone through the 202ae1b1539STejun Heo * flush data request completion path. Restore @rq for 203ae1b1539STejun Heo * normal completion and end it. 20409d60c70STejun Heo */ 205ae1b1539STejun Heo BUG_ON(!list_empty(&rq->queuelist)); 206ae1b1539STejun Heo list_del_init(&rq->flush.list); 207ae1b1539STejun Heo blk_flush_restore_request(rq); 208c8a446adSChristoph Hellwig blk_mq_end_request(rq, error); 2098839a0e0STejun Heo break; 210ae1b1539STejun Heo 2118839a0e0STejun Heo default: 2128839a0e0STejun Heo BUG(); 2138839a0e0STejun Heo } 214cde4c406SChristoph Hellwig 215404b8f5aSJens Axboe blk_kick_flush(q, fq, cmd_flags); 2168839a0e0STejun Heo } 2178839a0e0STejun Heo 2182a842acaSChristoph Hellwig static void flush_end_io(struct request *flush_rq, blk_status_t error) 2198839a0e0STejun Heo { 220ae1b1539STejun Heo struct request_queue *q = flush_rq->q; 221320ae51fSJens Axboe struct list_head *running; 222ae1b1539STejun Heo struct request *rq, *n; 223320ae51fSJens Axboe unsigned long flags = 0; 224e97c293cSMing Lei struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx); 2250048b483SMing Lei struct blk_mq_hw_ctx *hctx; 2260048b483SMing Lei 227b6866318SKonstantin Khlebnikov blk_account_io_flush(flush_rq); 228b6866318SKonstantin Khlebnikov 2290048b483SMing Lei /* release the tag's ownership to the req cloned from */ 2307c94e1c1SMing Lei spin_lock_irqsave(&fq->mq_flush_lock, flags); 2318d699663SYufen Yu 2328d699663SYufen Yu if (!refcount_dec_and_test(&flush_rq->ref)) { 2338d699663SYufen Yu fq->rq_status = error; 2348d699663SYufen Yu spin_unlock_irqrestore(&fq->mq_flush_lock, flags); 2358d699663SYufen Yu return; 2368d699663SYufen Yu } 2378d699663SYufen Yu 2388d699663SYufen Yu if (fq->rq_status != BLK_STS_OK) 2398d699663SYufen Yu error = fq->rq_status; 2408d699663SYufen Yu 241ea4f995eSJens Axboe hctx = flush_rq->mq_hctx; 242923218f6SMing Lei if (!q->elevator) { 2430048b483SMing Lei blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq); 2447ddab5deSMing Lei flush_rq->tag = -1; 245923218f6SMing Lei } else { 24613f06381SYufen Yu blk_mq_put_driver_tag(flush_rq); 247923218f6SMing Lei flush_rq->internal_tag = -1; 248923218f6SMing Lei } 24918741986SChristoph Hellwig 2507c94e1c1SMing Lei running = &fq->flush_queue[fq->flush_running_idx]; 2517c94e1c1SMing Lei BUG_ON(fq->flush_pending_idx == fq->flush_running_idx); 252ae1b1539STejun Heo 253ae1b1539STejun Heo /* account completion of the flush request */ 2547c94e1c1SMing Lei fq->flush_running_idx ^= 1; 255320ae51fSJens Axboe 256ae1b1539STejun Heo /* and push the waiting requests to the next stage */ 257ae1b1539STejun Heo list_for_each_entry_safe(rq, n, running, flush.list) { 258ae1b1539STejun Heo unsigned int seq = blk_flush_cur_seq(rq); 259ae1b1539STejun Heo 260ae1b1539STejun Heo BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH); 261404b8f5aSJens Axboe blk_flush_complete_seq(rq, fq, seq, error); 262ae1b1539STejun Heo } 263ae1b1539STejun Heo 2647c94e1c1SMing Lei fq->flush_queue_delayed = 0; 2657c94e1c1SMing Lei spin_unlock_irqrestore(&fq->mq_flush_lock, flags); 266320ae51fSJens Axboe } 267320ae51fSJens Axboe 268ae1b1539STejun Heo /** 269ae1b1539STejun Heo * blk_kick_flush - consider issuing flush request 270ae1b1539STejun Heo * @q: request_queue being kicked 2710bae352dSMing Lei * @fq: flush queue 27284fca1b0SHannes Reinecke * @flags: cmd_flags of the original request 2734fed947cSTejun Heo * 274ae1b1539STejun Heo * Flush related states of @q have changed, consider issuing flush request. 275ae1b1539STejun Heo * Please read the comment at the top of this file for more info. 276ae1b1539STejun Heo * 277ae1b1539STejun Heo * CONTEXT: 2789809b4eeSChristoph Hellwig * spin_lock_irq(fq->mq_flush_lock) 279ae1b1539STejun Heo * 2808839a0e0STejun Heo */ 281404b8f5aSJens Axboe static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq, 28284fca1b0SHannes Reinecke unsigned int flags) 283ae1b1539STejun Heo { 2847c94e1c1SMing Lei struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; 285ae1b1539STejun Heo struct request *first_rq = 286ae1b1539STejun Heo list_first_entry(pending, struct request, flush.list); 2877c94e1c1SMing Lei struct request *flush_rq = fq->flush_rq; 288ae1b1539STejun Heo 289ae1b1539STejun Heo /* C1 described at the top of this file */ 2907c94e1c1SMing Lei if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending)) 291404b8f5aSJens Axboe return; 292ae1b1539STejun Heo 2937520872cSJens Axboe /* C2 and C3 2947520872cSJens Axboe * 2957520872cSJens Axboe * For blk-mq + scheduling, we can risk having all driver tags 2967520872cSJens Axboe * assigned to empty flushes, and we deadlock if we are expecting 2977520872cSJens Axboe * other requests to make progress. Don't defer for that case. 2987520872cSJens Axboe */ 299344e9ffcSJens Axboe if (!list_empty(&fq->flush_data_in_flight) && q->elevator && 300ae1b1539STejun Heo time_before(jiffies, 3017c94e1c1SMing Lei fq->flush_pending_since + FLUSH_PENDING_TIMEOUT)) 302404b8f5aSJens Axboe return; 303ae1b1539STejun Heo 304ae1b1539STejun Heo /* 305ae1b1539STejun Heo * Issue flush and toggle pending_idx. This makes pending_idx 306ae1b1539STejun Heo * different from running_idx, which means flush is in flight. 307ae1b1539STejun Heo */ 3087c94e1c1SMing Lei fq->flush_pending_idx ^= 1; 30918741986SChristoph Hellwig 3107ddab5deSMing Lei blk_rq_init(q, flush_rq); 311f70ced09SMing Lei 312f70ced09SMing Lei /* 313923218f6SMing Lei * In case of none scheduler, borrow tag from the first request 314923218f6SMing Lei * since they can't be in flight at the same time. And acquire 315923218f6SMing Lei * the tag's ownership for flush req. 316923218f6SMing Lei * 317923218f6SMing Lei * In case of IO scheduler, flush rq need to borrow scheduler tag 318923218f6SMing Lei * just for cheating put/get driver tag. 319f70ced09SMing Lei */ 320f70ced09SMing Lei flush_rq->mq_ctx = first_rq->mq_ctx; 321ea4f995eSJens Axboe flush_rq->mq_hctx = first_rq->mq_hctx; 3220048b483SMing Lei 323923218f6SMing Lei if (!q->elevator) { 324923218f6SMing Lei fq->orig_rq = first_rq; 325923218f6SMing Lei flush_rq->tag = first_rq->tag; 326ea4f995eSJens Axboe blk_mq_tag_set_rq(flush_rq->mq_hctx, first_rq->tag, flush_rq); 327923218f6SMing Lei } else { 328923218f6SMing Lei flush_rq->internal_tag = first_rq->internal_tag; 329923218f6SMing Lei } 330320ae51fSJens Axboe 33170fd7614SChristoph Hellwig flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH; 33284fca1b0SHannes Reinecke flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK); 333e8064021SChristoph Hellwig flush_rq->rq_flags |= RQF_FLUSH_SEQ; 3347ddab5deSMing Lei flush_rq->rq_disk = first_rq->rq_disk; 3357ddab5deSMing Lei flush_rq->end_io = flush_end_io; 336ae1b1539STejun Heo 337404b8f5aSJens Axboe blk_flush_queue_rq(flush_rq, false); 338ae1b1539STejun Heo } 339ae1b1539STejun Heo 3402a842acaSChristoph Hellwig static void mq_flush_data_end_io(struct request *rq, blk_status_t error) 341320ae51fSJens Axboe { 342320ae51fSJens Axboe struct request_queue *q = rq->q; 343ea4f995eSJens Axboe struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 344e97c293cSMing Lei struct blk_mq_ctx *ctx = rq->mq_ctx; 345320ae51fSJens Axboe unsigned long flags; 346e97c293cSMing Lei struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx); 347320ae51fSJens Axboe 348923218f6SMing Lei if (q->elevator) { 349923218f6SMing Lei WARN_ON(rq->tag < 0); 35013f06381SYufen Yu blk_mq_put_driver_tag(rq); 351923218f6SMing Lei } 352923218f6SMing Lei 353320ae51fSJens Axboe /* 354320ae51fSJens Axboe * After populating an empty queue, kick it to avoid stall. Read 355320ae51fSJens Axboe * the comment in flush_end_io(). 356320ae51fSJens Axboe */ 3577c94e1c1SMing Lei spin_lock_irqsave(&fq->mq_flush_lock, flags); 358bd166ef1SJens Axboe blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error); 3597c94e1c1SMing Lei spin_unlock_irqrestore(&fq->mq_flush_lock, flags); 360bd166ef1SJens Axboe 36185bd6e61SJianchao Wang blk_mq_sched_restart(hctx); 362320ae51fSJens Axboe } 363320ae51fSJens Axboe 364ae1b1539STejun Heo /** 3653140c3cfSOmar Sandoval * blk_insert_flush - insert a new PREFLUSH/FUA request 366ae1b1539STejun Heo * @rq: request to insert 367ae1b1539STejun Heo * 368b710a480SJens Axboe * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions. 369320ae51fSJens Axboe * or __blk_mq_run_hw_queue() to dispatch request. 370ae1b1539STejun Heo * @rq is being submitted. Analyze what needs to be done and put it on the 371ae1b1539STejun Heo * right queue. 372ae1b1539STejun Heo */ 373ae1b1539STejun Heo void blk_insert_flush(struct request *rq) 374ae1b1539STejun Heo { 375ae1b1539STejun Heo struct request_queue *q = rq->q; 376c888a8f9SJens Axboe unsigned long fflags = q->queue_flags; /* may change, cache */ 377ae1b1539STejun Heo unsigned int policy = blk_flush_policy(fflags, rq); 378e97c293cSMing Lei struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx); 379ae1b1539STejun Heo 380ae1b1539STejun Heo /* 381ae1b1539STejun Heo * @policy now records what operations need to be done. Adjust 38228a8f0d3SMike Christie * REQ_PREFLUSH and FUA for the driver. 383ae1b1539STejun Heo */ 38428a8f0d3SMike Christie rq->cmd_flags &= ~REQ_PREFLUSH; 385c888a8f9SJens Axboe if (!(fflags & (1UL << QUEUE_FLAG_FUA))) 3864fed947cSTejun Heo rq->cmd_flags &= ~REQ_FUA; 387ae1b1539STejun Heo 388ae1b1539STejun Heo /* 389ae5b2ec8SJens Axboe * REQ_PREFLUSH|REQ_FUA implies REQ_SYNC, so if we clear any 390ae5b2ec8SJens Axboe * of those flags, we have to set REQ_SYNC to avoid skewing 391ae5b2ec8SJens Axboe * the request accounting. 392ae5b2ec8SJens Axboe */ 393ae5b2ec8SJens Axboe rq->cmd_flags |= REQ_SYNC; 394ae5b2ec8SJens Axboe 395ae5b2ec8SJens Axboe /* 3964853abaaSJeff Moyer * An empty flush handed down from a stacking driver may 3974853abaaSJeff Moyer * translate into nothing if the underlying device does not 3984853abaaSJeff Moyer * advertise a write-back cache. In this case, simply 3994853abaaSJeff Moyer * complete the request. 4004853abaaSJeff Moyer */ 4014853abaaSJeff Moyer if (!policy) { 402c8a446adSChristoph Hellwig blk_mq_end_request(rq, 0); 4034853abaaSJeff Moyer return; 4044853abaaSJeff Moyer } 4054853abaaSJeff Moyer 406834f9f61SJeff Moyer BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */ 4074853abaaSJeff Moyer 4084853abaaSJeff Moyer /* 409ae1b1539STejun Heo * If there's data but flush is not necessary, the request can be 410ae1b1539STejun Heo * processed directly without going through flush machinery. Queue 411ae1b1539STejun Heo * for normal execution. 412ae1b1539STejun Heo */ 413ae1b1539STejun Heo if ((policy & REQ_FSEQ_DATA) && 414ae1b1539STejun Heo !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { 415*01e99aecSMing Lei blk_mq_request_bypass_insert(rq, false, false); 416ae1b1539STejun Heo return; 4178839a0e0STejun Heo } 4188839a0e0STejun Heo 4198839a0e0STejun Heo /* 420ae1b1539STejun Heo * @rq should go through flush machinery. Mark it part of flush 421ae1b1539STejun Heo * sequence and submit for further processing. 4228839a0e0STejun Heo */ 423ae1b1539STejun Heo memset(&rq->flush, 0, sizeof(rq->flush)); 424ae1b1539STejun Heo INIT_LIST_HEAD(&rq->flush.list); 425e8064021SChristoph Hellwig rq->rq_flags |= RQF_FLUSH_SEQ; 4264853abaaSJeff Moyer rq->flush.saved_end_io = rq->end_io; /* Usually NULL */ 4277e992f84SJens Axboe 428320ae51fSJens Axboe rq->end_io = mq_flush_data_end_io; 429320ae51fSJens Axboe 4307c94e1c1SMing Lei spin_lock_irq(&fq->mq_flush_lock); 4310bae352dSMing Lei blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); 4327c94e1c1SMing Lei spin_unlock_irq(&fq->mq_flush_lock); 433ae1b1539STejun Heo } 434ae1b1539STejun Heo 435ae1b1539STejun Heo /** 4368839a0e0STejun Heo * blkdev_issue_flush - queue a flush 4378839a0e0STejun Heo * @bdev: blockdev to issue flush for 4388839a0e0STejun Heo * @gfp_mask: memory allocation flags (for bio_alloc) 4398839a0e0STejun Heo * @error_sector: error sector 4408839a0e0STejun Heo * 4418839a0e0STejun Heo * Description: 4428839a0e0STejun Heo * Issue a flush for the block device in question. Caller can supply 4438839a0e0STejun Heo * room for storing the error offset in case of a flush error, if they 4441be7d207SEric Biggers * wish to. 4458839a0e0STejun Heo */ 4468839a0e0STejun Heo int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, 447dd3932edSChristoph Hellwig sector_t *error_sector) 4488839a0e0STejun Heo { 4498839a0e0STejun Heo struct request_queue *q; 4508839a0e0STejun Heo struct bio *bio; 4518839a0e0STejun Heo int ret = 0; 4528839a0e0STejun Heo 4538839a0e0STejun Heo if (bdev->bd_disk == NULL) 4548839a0e0STejun Heo return -ENXIO; 4558839a0e0STejun Heo 4568839a0e0STejun Heo q = bdev_get_queue(bdev); 4578839a0e0STejun Heo if (!q) 4588839a0e0STejun Heo return -ENXIO; 4598839a0e0STejun Heo 4608839a0e0STejun Heo /* 4618839a0e0STejun Heo * some block devices may not have their queue correctly set up here 4628839a0e0STejun Heo * (e.g. loop device without a backing file) and so issuing a flush 4638839a0e0STejun Heo * here will panic. Ensure there is a request function before issuing 464d391a2ddSTejun Heo * the flush. 4658839a0e0STejun Heo */ 4668839a0e0STejun Heo if (!q->make_request_fn) 4678839a0e0STejun Heo return -ENXIO; 4688839a0e0STejun Heo 4698839a0e0STejun Heo bio = bio_alloc(gfp_mask, 0); 47074d46992SChristoph Hellwig bio_set_dev(bio, bdev); 47170fd7614SChristoph Hellwig bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 4728839a0e0STejun Heo 4734e49ea4aSMike Christie ret = submit_bio_wait(bio); 474dd3932edSChristoph Hellwig 4758839a0e0STejun Heo /* 4768839a0e0STejun Heo * The driver must store the error location in ->bi_sector, if 4778839a0e0STejun Heo * it supports it. For non-stacked drivers, this should be 4788839a0e0STejun Heo * copied from blk_rq_pos(rq). 4798839a0e0STejun Heo */ 4808839a0e0STejun Heo if (error_sector) 4814f024f37SKent Overstreet *error_sector = bio->bi_iter.bi_sector; 4828839a0e0STejun Heo 4838839a0e0STejun Heo bio_put(bio); 4848839a0e0STejun Heo return ret; 4858839a0e0STejun Heo } 4868839a0e0STejun Heo EXPORT_SYMBOL(blkdev_issue_flush); 487320ae51fSJens Axboe 488f70ced09SMing Lei struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q, 4895b202853SJianchao Wang int node, int cmd_size, gfp_t flags) 490320ae51fSJens Axboe { 4917c94e1c1SMing Lei struct blk_flush_queue *fq; 4927c94e1c1SMing Lei int rq_sz = sizeof(struct request); 4931bcb1eadSMing Lei 4945b202853SJianchao Wang fq = kzalloc_node(sizeof(*fq), flags, node); 4957c94e1c1SMing Lei if (!fq) 4967c94e1c1SMing Lei goto fail; 4971bcb1eadSMing Lei 4987c94e1c1SMing Lei spin_lock_init(&fq->mq_flush_lock); 4997c94e1c1SMing Lei 5006d247d7fSChristoph Hellwig rq_sz = round_up(rq_sz + cmd_size, cache_line_size()); 5015b202853SJianchao Wang fq->flush_rq = kzalloc_node(rq_sz, flags, node); 5027c94e1c1SMing Lei if (!fq->flush_rq) 5037c94e1c1SMing Lei goto fail_rq; 5047c94e1c1SMing Lei 5057c94e1c1SMing Lei INIT_LIST_HEAD(&fq->flush_queue[0]); 5067c94e1c1SMing Lei INIT_LIST_HEAD(&fq->flush_queue[1]); 5077c94e1c1SMing Lei INIT_LIST_HEAD(&fq->flush_data_in_flight); 5087c94e1c1SMing Lei 509b3c6a599SBart Van Assche lockdep_register_key(&fq->key); 510b3c6a599SBart Van Assche lockdep_set_class(&fq->mq_flush_lock, &fq->key); 511b3c6a599SBart Van Assche 5127c94e1c1SMing Lei return fq; 5137c94e1c1SMing Lei 5147c94e1c1SMing Lei fail_rq: 5157c94e1c1SMing Lei kfree(fq); 5167c94e1c1SMing Lei fail: 5177c94e1c1SMing Lei return NULL; 5187c94e1c1SMing Lei } 5197c94e1c1SMing Lei 520ba483388SMing Lei void blk_free_flush_queue(struct blk_flush_queue *fq) 5217c94e1c1SMing Lei { 5227c94e1c1SMing Lei /* bio based request queue hasn't flush queue */ 5237c94e1c1SMing Lei if (!fq) 5247c94e1c1SMing Lei return; 5257c94e1c1SMing Lei 526b3c6a599SBart Van Assche lockdep_unregister_key(&fq->key); 5277c94e1c1SMing Lei kfree(fq->flush_rq); 5287c94e1c1SMing Lei kfree(fq); 529320ae51fSJens Axboe } 530