xref: /openbmc/linux/block/blk-flush.c (revision c2da19ed)
18c16567dSChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
28839a0e0STejun Heo /*
33140c3cfSOmar Sandoval  * Functions to sequence PREFLUSH and FUA writes.
4ae1b1539STejun Heo  *
5ae1b1539STejun Heo  * Copyright (C) 2011		Max Planck Institute for Gravitational Physics
6ae1b1539STejun Heo  * Copyright (C) 2011		Tejun Heo <tj@kernel.org>
7ae1b1539STejun Heo  *
83140c3cfSOmar Sandoval  * REQ_{PREFLUSH|FUA} requests are decomposed to sequences consisted of three
9ae1b1539STejun Heo  * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
10ae1b1539STejun Heo  * properties and hardware capability.
11ae1b1539STejun Heo  *
1228a8f0d3SMike Christie  * If a request doesn't have data, only REQ_PREFLUSH makes sense, which
1328a8f0d3SMike Christie  * indicates a simple flush request.  If there is data, REQ_PREFLUSH indicates
14ae1b1539STejun Heo  * that the device cache should be flushed before the data is executed, and
15ae1b1539STejun Heo  * REQ_FUA means that the data must be on non-volatile media on request
16ae1b1539STejun Heo  * completion.
17ae1b1539STejun Heo  *
183140c3cfSOmar Sandoval  * If the device doesn't have writeback cache, PREFLUSH and FUA don't make any
193140c3cfSOmar Sandoval  * difference.  The requests are either completed immediately if there's no data
203140c3cfSOmar Sandoval  * or executed as normal requests otherwise.
21ae1b1539STejun Heo  *
2228a8f0d3SMike Christie  * If the device has writeback cache and supports FUA, REQ_PREFLUSH is
23ae1b1539STejun Heo  * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
24ae1b1539STejun Heo  *
2528a8f0d3SMike Christie  * If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH
2628a8f0d3SMike Christie  * is translated to PREFLUSH and REQ_FUA to POSTFLUSH.
27ae1b1539STejun Heo  *
28ae1b1539STejun Heo  * The actual execution of flush is double buffered.  Whenever a request
29ae1b1539STejun Heo  * needs to execute PRE or POSTFLUSH, it queues at
307c94e1c1SMing Lei  * fq->flush_queue[fq->flush_pending_idx].  Once certain criteria are met, a
313a5e02ceSMike Christie  * REQ_OP_FLUSH is issued and the pending_idx is toggled.  When the flush
32ae1b1539STejun Heo  * completes, all the requests which were pending are proceeded to the next
333140c3cfSOmar Sandoval  * step.  This allows arbitrary merging of different types of PREFLUSH/FUA
34ae1b1539STejun Heo  * requests.
35ae1b1539STejun Heo  *
36ae1b1539STejun Heo  * Currently, the following conditions are used to determine when to issue
37ae1b1539STejun Heo  * flush.
38ae1b1539STejun Heo  *
39ae1b1539STejun Heo  * C1. At any given time, only one flush shall be in progress.  This makes
40ae1b1539STejun Heo  *     double buffering sufficient.
41ae1b1539STejun Heo  *
42ae1b1539STejun Heo  * C2. Flush is deferred if any request is executing DATA of its sequence.
43ae1b1539STejun Heo  *     This avoids issuing separate POSTFLUSHes for requests which shared
44ae1b1539STejun Heo  *     PREFLUSH.
45ae1b1539STejun Heo  *
46ae1b1539STejun Heo  * C3. The second condition is ignored if there is a request which has
47ae1b1539STejun Heo  *     waited longer than FLUSH_PENDING_TIMEOUT.  This is to avoid
48ae1b1539STejun Heo  *     starvation in the unlikely case where there are continuous stream of
493140c3cfSOmar Sandoval  *     FUA (without PREFLUSH) requests.
50ae1b1539STejun Heo  *
51ae1b1539STejun Heo  * For devices which support FUA, it isn't clear whether C2 (and thus C3)
52ae1b1539STejun Heo  * is beneficial.
53ae1b1539STejun Heo  *
543140c3cfSOmar Sandoval  * Note that a sequenced PREFLUSH/FUA request with DATA is completed twice.
55ae1b1539STejun Heo  * Once while executing DATA and again after the whole sequence is
56ae1b1539STejun Heo  * complete.  The first completion updates the contained bio but doesn't
57ae1b1539STejun Heo  * finish it so that the bio submitter is notified only after the whole
58e8064021SChristoph Hellwig  * sequence is complete.  This is implemented by testing RQF_FLUSH_SEQ in
59ae1b1539STejun Heo  * req_bio_endio().
60ae1b1539STejun Heo  *
613140c3cfSOmar Sandoval  * The above peculiarity requires that each PREFLUSH/FUA request has only one
62ae1b1539STejun Heo  * bio attached to it, which is guaranteed as they aren't allowed to be
63ae1b1539STejun Heo  * merged in the usual way.
648839a0e0STejun Heo  */
65ae1b1539STejun Heo 
668839a0e0STejun Heo #include <linux/kernel.h>
678839a0e0STejun Heo #include <linux/module.h>
688839a0e0STejun Heo #include <linux/bio.h>
698839a0e0STejun Heo #include <linux/blkdev.h>
708839a0e0STejun Heo #include <linux/gfp.h>
71320ae51fSJens Axboe #include <linux/blk-mq.h>
728839a0e0STejun Heo 
738839a0e0STejun Heo #include "blk.h"
74320ae51fSJens Axboe #include "blk-mq.h"
750048b483SMing Lei #include "blk-mq-tag.h"
76bd166ef1SJens Axboe #include "blk-mq-sched.h"
778839a0e0STejun Heo 
783140c3cfSOmar Sandoval /* PREFLUSH/FUA sequences */
794fed947cSTejun Heo enum {
80ae1b1539STejun Heo 	REQ_FSEQ_PREFLUSH	= (1 << 0), /* pre-flushing in progress */
81ae1b1539STejun Heo 	REQ_FSEQ_DATA		= (1 << 1), /* data write in progress */
82ae1b1539STejun Heo 	REQ_FSEQ_POSTFLUSH	= (1 << 2), /* post-flushing in progress */
83ae1b1539STejun Heo 	REQ_FSEQ_DONE		= (1 << 3),
84ae1b1539STejun Heo 
85ae1b1539STejun Heo 	REQ_FSEQ_ACTIONS	= REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
86ae1b1539STejun Heo 				  REQ_FSEQ_POSTFLUSH,
87ae1b1539STejun Heo 
88ae1b1539STejun Heo 	/*
89ae1b1539STejun Heo 	 * If flush has been pending longer than the following timeout,
90ae1b1539STejun Heo 	 * it's issued even if flush_data requests are still in flight.
91ae1b1539STejun Heo 	 */
92ae1b1539STejun Heo 	FLUSH_PENDING_TIMEOUT	= 5 * HZ,
934fed947cSTejun Heo };
944fed947cSTejun Heo 
95404b8f5aSJens Axboe static void blk_kick_flush(struct request_queue *q,
9684fca1b0SHannes Reinecke 			   struct blk_flush_queue *fq, unsigned int flags);
978839a0e0STejun Heo 
98c888a8f9SJens Axboe static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
998839a0e0STejun Heo {
100ae1b1539STejun Heo 	unsigned int policy = 0;
101ae1b1539STejun Heo 
102fa1bf42fSJeff Moyer 	if (blk_rq_sectors(rq))
103fa1bf42fSJeff Moyer 		policy |= REQ_FSEQ_DATA;
104fa1bf42fSJeff Moyer 
105c888a8f9SJens Axboe 	if (fflags & (1UL << QUEUE_FLAG_WC)) {
10628a8f0d3SMike Christie 		if (rq->cmd_flags & REQ_PREFLUSH)
107ae1b1539STejun Heo 			policy |= REQ_FSEQ_PREFLUSH;
108c888a8f9SJens Axboe 		if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
109c888a8f9SJens Axboe 		    (rq->cmd_flags & REQ_FUA))
110ae1b1539STejun Heo 			policy |= REQ_FSEQ_POSTFLUSH;
111ae1b1539STejun Heo 	}
112ae1b1539STejun Heo 	return policy;
1138839a0e0STejun Heo }
1148839a0e0STejun Heo 
115ae1b1539STejun Heo static unsigned int blk_flush_cur_seq(struct request *rq)
1168839a0e0STejun Heo {
117ae1b1539STejun Heo 	return 1 << ffz(rq->flush.seq);
1188839a0e0STejun Heo }
1198839a0e0STejun Heo 
120ae1b1539STejun Heo static void blk_flush_restore_request(struct request *rq)
12147f70d5aSTejun Heo {
12247f70d5aSTejun Heo 	/*
123ae1b1539STejun Heo 	 * After flush data completion, @rq->bio is %NULL but we need to
124ae1b1539STejun Heo 	 * complete the bio again.  @rq->biotail is guaranteed to equal the
125ae1b1539STejun Heo 	 * original @rq->bio.  Restore it.
12647f70d5aSTejun Heo 	 */
127ae1b1539STejun Heo 	rq->bio = rq->biotail;
128ae1b1539STejun Heo 
129ae1b1539STejun Heo 	/* make @rq a normal request */
130e8064021SChristoph Hellwig 	rq->rq_flags &= ~RQF_FLUSH_SEQ;
1314853abaaSJeff Moyer 	rq->end_io = rq->flush.saved_end_io;
132320ae51fSJens Axboe }
133320ae51fSJens Axboe 
134404b8f5aSJens Axboe static void blk_flush_queue_rq(struct request *rq, bool add_front)
135320ae51fSJens Axboe {
1362b053acaSBart Van Assche 	blk_mq_add_to_requeue_list(rq, add_front, true);
13747f70d5aSTejun Heo }
13847f70d5aSTejun Heo 
139b6866318SKonstantin Khlebnikov static void blk_account_io_flush(struct request *rq)
140b6866318SKonstantin Khlebnikov {
1418446fe92SChristoph Hellwig 	struct block_device *part = rq->rq_disk->part0;
142b6866318SKonstantin Khlebnikov 
143b6866318SKonstantin Khlebnikov 	part_stat_lock();
144b6866318SKonstantin Khlebnikov 	part_stat_inc(part, ios[STAT_FLUSH]);
145b6866318SKonstantin Khlebnikov 	part_stat_add(part, nsecs[STAT_FLUSH],
146b6866318SKonstantin Khlebnikov 		      ktime_get_ns() - rq->start_time_ns);
147b6866318SKonstantin Khlebnikov 	part_stat_unlock();
148b6866318SKonstantin Khlebnikov }
149b6866318SKonstantin Khlebnikov 
150ae1b1539STejun Heo /**
151ae1b1539STejun Heo  * blk_flush_complete_seq - complete flush sequence
1523140c3cfSOmar Sandoval  * @rq: PREFLUSH/FUA request being sequenced
1530bae352dSMing Lei  * @fq: flush queue
154ae1b1539STejun Heo  * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
155ae1b1539STejun Heo  * @error: whether an error occurred
156ae1b1539STejun Heo  *
157ae1b1539STejun Heo  * @rq just completed @seq part of its flush sequence, record the
158ae1b1539STejun Heo  * completion and trigger the next step.
159ae1b1539STejun Heo  *
160ae1b1539STejun Heo  * CONTEXT:
1619809b4eeSChristoph Hellwig  * spin_lock_irq(fq->mq_flush_lock)
162ae1b1539STejun Heo  */
163404b8f5aSJens Axboe static void blk_flush_complete_seq(struct request *rq,
1640bae352dSMing Lei 				   struct blk_flush_queue *fq,
1652a842acaSChristoph Hellwig 				   unsigned int seq, blk_status_t error)
1668839a0e0STejun Heo {
167ae1b1539STejun Heo 	struct request_queue *q = rq->q;
1687c94e1c1SMing Lei 	struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
169190b02edSJens Axboe 	unsigned int cmd_flags;
1708839a0e0STejun Heo 
171ae1b1539STejun Heo 	BUG_ON(rq->flush.seq & seq);
172ae1b1539STejun Heo 	rq->flush.seq |= seq;
173190b02edSJens Axboe 	cmd_flags = rq->cmd_flags;
1748839a0e0STejun Heo 
175ae1b1539STejun Heo 	if (likely(!error))
176ae1b1539STejun Heo 		seq = blk_flush_cur_seq(rq);
177ae1b1539STejun Heo 	else
178ae1b1539STejun Heo 		seq = REQ_FSEQ_DONE;
1798839a0e0STejun Heo 
180ae1b1539STejun Heo 	switch (seq) {
181ae1b1539STejun Heo 	case REQ_FSEQ_PREFLUSH:
182ae1b1539STejun Heo 	case REQ_FSEQ_POSTFLUSH:
183ae1b1539STejun Heo 		/* queue for flush */
184ae1b1539STejun Heo 		if (list_empty(pending))
1857c94e1c1SMing Lei 			fq->flush_pending_since = jiffies;
186ae1b1539STejun Heo 		list_move_tail(&rq->flush.list, pending);
1878839a0e0STejun Heo 		break;
188ae1b1539STejun Heo 
189ae1b1539STejun Heo 	case REQ_FSEQ_DATA:
1907c94e1c1SMing Lei 		list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
191404b8f5aSJens Axboe 		blk_flush_queue_rq(rq, true);
192ae1b1539STejun Heo 		break;
193ae1b1539STejun Heo 
194ae1b1539STejun Heo 	case REQ_FSEQ_DONE:
19509d60c70STejun Heo 		/*
196b6866318SKonstantin Khlebnikov 		 * @rq was previously adjusted by blk_insert_flush() for
197ae1b1539STejun Heo 		 * flush sequencing and may already have gone through the
198ae1b1539STejun Heo 		 * flush data request completion path.  Restore @rq for
199ae1b1539STejun Heo 		 * normal completion and end it.
20009d60c70STejun Heo 		 */
201ae1b1539STejun Heo 		BUG_ON(!list_empty(&rq->queuelist));
202ae1b1539STejun Heo 		list_del_init(&rq->flush.list);
203ae1b1539STejun Heo 		blk_flush_restore_request(rq);
204c8a446adSChristoph Hellwig 		blk_mq_end_request(rq, error);
2058839a0e0STejun Heo 		break;
206ae1b1539STejun Heo 
2078839a0e0STejun Heo 	default:
2088839a0e0STejun Heo 		BUG();
2098839a0e0STejun Heo 	}
210cde4c406SChristoph Hellwig 
211404b8f5aSJens Axboe 	blk_kick_flush(q, fq, cmd_flags);
2128839a0e0STejun Heo }
2138839a0e0STejun Heo 
2142a842acaSChristoph Hellwig static void flush_end_io(struct request *flush_rq, blk_status_t error)
2158839a0e0STejun Heo {
216ae1b1539STejun Heo 	struct request_queue *q = flush_rq->q;
217320ae51fSJens Axboe 	struct list_head *running;
218ae1b1539STejun Heo 	struct request *rq, *n;
219320ae51fSJens Axboe 	unsigned long flags = 0;
220e97c293cSMing Lei 	struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
2210048b483SMing Lei 
2220048b483SMing Lei 	/* release the tag's ownership to the req cloned from */
2237c94e1c1SMing Lei 	spin_lock_irqsave(&fq->mq_flush_lock, flags);
2248d699663SYufen Yu 
2258d699663SYufen Yu 	if (!refcount_dec_and_test(&flush_rq->ref)) {
2268d699663SYufen Yu 		fq->rq_status = error;
2278d699663SYufen Yu 		spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
2288d699663SYufen Yu 		return;
2298d699663SYufen Yu 	}
2308d699663SYufen Yu 
23184da7accSMing Lei 	blk_account_io_flush(flush_rq);
2329f16a667SMing Lei 	/*
2339f16a667SMing Lei 	 * Flush request has to be marked as IDLE when it is really ended
2349f16a667SMing Lei 	 * because its .end_io() is called from timeout code path too for
2359f16a667SMing Lei 	 * avoiding use-after-free.
2369f16a667SMing Lei 	 */
2379f16a667SMing Lei 	WRITE_ONCE(flush_rq->state, MQ_RQ_IDLE);
2388d699663SYufen Yu 	if (fq->rq_status != BLK_STS_OK)
2398d699663SYufen Yu 		error = fq->rq_status;
2408d699663SYufen Yu 
2414e2f62e5SJens Axboe 	if (!q->elevator) {
242568f2700SMing Lei 		flush_rq->tag = BLK_MQ_NO_TAG;
2434e2f62e5SJens Axboe 	} else {
2444e2f62e5SJens Axboe 		blk_mq_put_driver_tag(flush_rq);
245568f2700SMing Lei 		flush_rq->internal_tag = BLK_MQ_NO_TAG;
2464e2f62e5SJens Axboe 	}
24718741986SChristoph Hellwig 
2487c94e1c1SMing Lei 	running = &fq->flush_queue[fq->flush_running_idx];
2497c94e1c1SMing Lei 	BUG_ON(fq->flush_pending_idx == fq->flush_running_idx);
250ae1b1539STejun Heo 
251ae1b1539STejun Heo 	/* account completion of the flush request */
2527c94e1c1SMing Lei 	fq->flush_running_idx ^= 1;
253320ae51fSJens Axboe 
254ae1b1539STejun Heo 	/* and push the waiting requests to the next stage */
255ae1b1539STejun Heo 	list_for_each_entry_safe(rq, n, running, flush.list) {
256ae1b1539STejun Heo 		unsigned int seq = blk_flush_cur_seq(rq);
257ae1b1539STejun Heo 
258ae1b1539STejun Heo 		BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
259404b8f5aSJens Axboe 		blk_flush_complete_seq(rq, fq, seq, error);
260ae1b1539STejun Heo 	}
261ae1b1539STejun Heo 
2627c94e1c1SMing Lei 	spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
263320ae51fSJens Axboe }
264320ae51fSJens Axboe 
265ae1b1539STejun Heo /**
266ae1b1539STejun Heo  * blk_kick_flush - consider issuing flush request
267ae1b1539STejun Heo  * @q: request_queue being kicked
2680bae352dSMing Lei  * @fq: flush queue
26984fca1b0SHannes Reinecke  * @flags: cmd_flags of the original request
2704fed947cSTejun Heo  *
271ae1b1539STejun Heo  * Flush related states of @q have changed, consider issuing flush request.
272ae1b1539STejun Heo  * Please read the comment at the top of this file for more info.
273ae1b1539STejun Heo  *
274ae1b1539STejun Heo  * CONTEXT:
2759809b4eeSChristoph Hellwig  * spin_lock_irq(fq->mq_flush_lock)
276ae1b1539STejun Heo  *
2778839a0e0STejun Heo  */
278404b8f5aSJens Axboe static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
27984fca1b0SHannes Reinecke 			   unsigned int flags)
280ae1b1539STejun Heo {
2817c94e1c1SMing Lei 	struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
282ae1b1539STejun Heo 	struct request *first_rq =
283ae1b1539STejun Heo 		list_first_entry(pending, struct request, flush.list);
2847c94e1c1SMing Lei 	struct request *flush_rq = fq->flush_rq;
285ae1b1539STejun Heo 
286ae1b1539STejun Heo 	/* C1 described at the top of this file */
2877c94e1c1SMing Lei 	if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
288404b8f5aSJens Axboe 		return;
289ae1b1539STejun Heo 
290b5718d6cSYufen Yu 	/* C2 and C3 */
291b5718d6cSYufen Yu 	if (!list_empty(&fq->flush_data_in_flight) &&
292ae1b1539STejun Heo 	    time_before(jiffies,
2937c94e1c1SMing Lei 			fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
294404b8f5aSJens Axboe 		return;
295ae1b1539STejun Heo 
296ae1b1539STejun Heo 	/*
297ae1b1539STejun Heo 	 * Issue flush and toggle pending_idx.  This makes pending_idx
298ae1b1539STejun Heo 	 * different from running_idx, which means flush is in flight.
299ae1b1539STejun Heo 	 */
3007c94e1c1SMing Lei 	fq->flush_pending_idx ^= 1;
30118741986SChristoph Hellwig 
3027ddab5deSMing Lei 	blk_rq_init(q, flush_rq);
303f70ced09SMing Lei 
304f70ced09SMing Lei 	/*
305923218f6SMing Lei 	 * In case of none scheduler, borrow tag from the first request
306923218f6SMing Lei 	 * since they can't be in flight at the same time. And acquire
307923218f6SMing Lei 	 * the tag's ownership for flush req.
308923218f6SMing Lei 	 *
309923218f6SMing Lei 	 * In case of IO scheduler, flush rq need to borrow scheduler tag
310923218f6SMing Lei 	 * just for cheating put/get driver tag.
311f70ced09SMing Lei 	 */
312f70ced09SMing Lei 	flush_rq->mq_ctx = first_rq->mq_ctx;
313ea4f995eSJens Axboe 	flush_rq->mq_hctx = first_rq->mq_hctx;
3140048b483SMing Lei 
315c1e2b842SMing Lei 	if (!q->elevator) {
316923218f6SMing Lei 		flush_rq->tag = first_rq->tag;
317c1e2b842SMing Lei 
318c1e2b842SMing Lei 		/*
319c1e2b842SMing Lei 		 * We borrow data request's driver tag, so have to mark
320c1e2b842SMing Lei 		 * this flush request as INFLIGHT for avoiding double
321c1e2b842SMing Lei 		 * account of this driver tag
322c1e2b842SMing Lei 		 */
323c1e2b842SMing Lei 		flush_rq->rq_flags |= RQF_MQ_INFLIGHT;
324c1e2b842SMing Lei 	} else
325923218f6SMing Lei 		flush_rq->internal_tag = first_rq->internal_tag;
326320ae51fSJens Axboe 
32770fd7614SChristoph Hellwig 	flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
32884fca1b0SHannes Reinecke 	flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
329e8064021SChristoph Hellwig 	flush_rq->rq_flags |= RQF_FLUSH_SEQ;
3307ddab5deSMing Lei 	flush_rq->rq_disk = first_rq->rq_disk;
3317ddab5deSMing Lei 	flush_rq->end_io = flush_end_io;
332*c2da19edSMing Lei 	/*
333*c2da19edSMing Lei 	 * Order WRITE ->end_io and WRITE rq->ref, and its pair is the one
334*c2da19edSMing Lei 	 * implied in refcount_inc_not_zero() called from
335*c2da19edSMing Lei 	 * blk_mq_find_and_get_req(), which orders WRITE/READ flush_rq->ref
336*c2da19edSMing Lei 	 * and READ flush_rq->end_io
337*c2da19edSMing Lei 	 */
338*c2da19edSMing Lei 	smp_wmb();
339*c2da19edSMing Lei 	refcount_set(&flush_rq->ref, 1);
340ae1b1539STejun Heo 
341404b8f5aSJens Axboe 	blk_flush_queue_rq(flush_rq, false);
342ae1b1539STejun Heo }
343ae1b1539STejun Heo 
3442a842acaSChristoph Hellwig static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
345320ae51fSJens Axboe {
346320ae51fSJens Axboe 	struct request_queue *q = rq->q;
347ea4f995eSJens Axboe 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
348e97c293cSMing Lei 	struct blk_mq_ctx *ctx = rq->mq_ctx;
349320ae51fSJens Axboe 	unsigned long flags;
350e97c293cSMing Lei 	struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
351320ae51fSJens Axboe 
3524e2f62e5SJens Axboe 	if (q->elevator) {
3534e2f62e5SJens Axboe 		WARN_ON(rq->tag < 0);
3544e2f62e5SJens Axboe 		blk_mq_put_driver_tag(rq);
3554e2f62e5SJens Axboe 	}
3564e2f62e5SJens Axboe 
357320ae51fSJens Axboe 	/*
358320ae51fSJens Axboe 	 * After populating an empty queue, kick it to avoid stall.  Read
359320ae51fSJens Axboe 	 * the comment in flush_end_io().
360320ae51fSJens Axboe 	 */
3617c94e1c1SMing Lei 	spin_lock_irqsave(&fq->mq_flush_lock, flags);
362bd166ef1SJens Axboe 	blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
3637c94e1c1SMing Lei 	spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
364bd166ef1SJens Axboe 
36585bd6e61SJianchao Wang 	blk_mq_sched_restart(hctx);
366320ae51fSJens Axboe }
367320ae51fSJens Axboe 
368ae1b1539STejun Heo /**
3693140c3cfSOmar Sandoval  * blk_insert_flush - insert a new PREFLUSH/FUA request
370ae1b1539STejun Heo  * @rq: request to insert
371ae1b1539STejun Heo  *
372b710a480SJens Axboe  * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
373320ae51fSJens Axboe  * or __blk_mq_run_hw_queue() to dispatch request.
374ae1b1539STejun Heo  * @rq is being submitted.  Analyze what needs to be done and put it on the
375ae1b1539STejun Heo  * right queue.
376ae1b1539STejun Heo  */
377ae1b1539STejun Heo void blk_insert_flush(struct request *rq)
378ae1b1539STejun Heo {
379ae1b1539STejun Heo 	struct request_queue *q = rq->q;
380c888a8f9SJens Axboe 	unsigned long fflags = q->queue_flags;	/* may change, cache */
381ae1b1539STejun Heo 	unsigned int policy = blk_flush_policy(fflags, rq);
382e97c293cSMing Lei 	struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
383ae1b1539STejun Heo 
384ae1b1539STejun Heo 	/*
385ae1b1539STejun Heo 	 * @policy now records what operations need to be done.  Adjust
38628a8f0d3SMike Christie 	 * REQ_PREFLUSH and FUA for the driver.
387ae1b1539STejun Heo 	 */
38828a8f0d3SMike Christie 	rq->cmd_flags &= ~REQ_PREFLUSH;
389c888a8f9SJens Axboe 	if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
3904fed947cSTejun Heo 		rq->cmd_flags &= ~REQ_FUA;
391ae1b1539STejun Heo 
392ae1b1539STejun Heo 	/*
393ae5b2ec8SJens Axboe 	 * REQ_PREFLUSH|REQ_FUA implies REQ_SYNC, so if we clear any
394ae5b2ec8SJens Axboe 	 * of those flags, we have to set REQ_SYNC to avoid skewing
395ae5b2ec8SJens Axboe 	 * the request accounting.
396ae5b2ec8SJens Axboe 	 */
397ae5b2ec8SJens Axboe 	rq->cmd_flags |= REQ_SYNC;
398ae5b2ec8SJens Axboe 
399ae5b2ec8SJens Axboe 	/*
4004853abaaSJeff Moyer 	 * An empty flush handed down from a stacking driver may
4014853abaaSJeff Moyer 	 * translate into nothing if the underlying device does not
4024853abaaSJeff Moyer 	 * advertise a write-back cache.  In this case, simply
4034853abaaSJeff Moyer 	 * complete the request.
4044853abaaSJeff Moyer 	 */
4054853abaaSJeff Moyer 	if (!policy) {
406c8a446adSChristoph Hellwig 		blk_mq_end_request(rq, 0);
4074853abaaSJeff Moyer 		return;
4084853abaaSJeff Moyer 	}
4094853abaaSJeff Moyer 
410834f9f61SJeff Moyer 	BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
4114853abaaSJeff Moyer 
4124853abaaSJeff Moyer 	/*
413ae1b1539STejun Heo 	 * If there's data but flush is not necessary, the request can be
414ae1b1539STejun Heo 	 * processed directly without going through flush machinery.  Queue
415ae1b1539STejun Heo 	 * for normal execution.
416ae1b1539STejun Heo 	 */
417ae1b1539STejun Heo 	if ((policy & REQ_FSEQ_DATA) &&
418ae1b1539STejun Heo 	    !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
41901e99aecSMing Lei 		blk_mq_request_bypass_insert(rq, false, false);
420ae1b1539STejun Heo 		return;
4218839a0e0STejun Heo 	}
4228839a0e0STejun Heo 
4238839a0e0STejun Heo 	/*
424ae1b1539STejun Heo 	 * @rq should go through flush machinery.  Mark it part of flush
425ae1b1539STejun Heo 	 * sequence and submit for further processing.
4268839a0e0STejun Heo 	 */
427ae1b1539STejun Heo 	memset(&rq->flush, 0, sizeof(rq->flush));
428ae1b1539STejun Heo 	INIT_LIST_HEAD(&rq->flush.list);
429e8064021SChristoph Hellwig 	rq->rq_flags |= RQF_FLUSH_SEQ;
4304853abaaSJeff Moyer 	rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
4317e992f84SJens Axboe 
432320ae51fSJens Axboe 	rq->end_io = mq_flush_data_end_io;
433320ae51fSJens Axboe 
4347c94e1c1SMing Lei 	spin_lock_irq(&fq->mq_flush_lock);
4350bae352dSMing Lei 	blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
4367c94e1c1SMing Lei 	spin_unlock_irq(&fq->mq_flush_lock);
437ae1b1539STejun Heo }
438ae1b1539STejun Heo 
439ae1b1539STejun Heo /**
4408839a0e0STejun Heo  * blkdev_issue_flush - queue a flush
4418839a0e0STejun Heo  * @bdev:	blockdev to issue flush for
4428839a0e0STejun Heo  *
4438839a0e0STejun Heo  * Description:
4449398554fSChristoph Hellwig  *    Issue a flush for the block device in question.
4458839a0e0STejun Heo  */
446c6bf3f0eSChristoph Hellwig int blkdev_issue_flush(struct block_device *bdev)
4478839a0e0STejun Heo {
448c6bf3f0eSChristoph Hellwig 	struct bio bio;
4498839a0e0STejun Heo 
450c6bf3f0eSChristoph Hellwig 	bio_init(&bio, NULL, 0);
451c6bf3f0eSChristoph Hellwig 	bio_set_dev(&bio, bdev);
452c6bf3f0eSChristoph Hellwig 	bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
453c6bf3f0eSChristoph Hellwig 	return submit_bio_wait(&bio);
4548839a0e0STejun Heo }
4558839a0e0STejun Heo EXPORT_SYMBOL(blkdev_issue_flush);
456320ae51fSJens Axboe 
457754a1572SGuoqing Jiang struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
458754a1572SGuoqing Jiang 					      gfp_t flags)
459320ae51fSJens Axboe {
4607c94e1c1SMing Lei 	struct blk_flush_queue *fq;
4617c94e1c1SMing Lei 	int rq_sz = sizeof(struct request);
4621bcb1eadSMing Lei 
4635b202853SJianchao Wang 	fq = kzalloc_node(sizeof(*fq), flags, node);
4647c94e1c1SMing Lei 	if (!fq)
4657c94e1c1SMing Lei 		goto fail;
4661bcb1eadSMing Lei 
4677c94e1c1SMing Lei 	spin_lock_init(&fq->mq_flush_lock);
4687c94e1c1SMing Lei 
4696d247d7fSChristoph Hellwig 	rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
4705b202853SJianchao Wang 	fq->flush_rq = kzalloc_node(rq_sz, flags, node);
4717c94e1c1SMing Lei 	if (!fq->flush_rq)
4727c94e1c1SMing Lei 		goto fail_rq;
4737c94e1c1SMing Lei 
4747c94e1c1SMing Lei 	INIT_LIST_HEAD(&fq->flush_queue[0]);
4757c94e1c1SMing Lei 	INIT_LIST_HEAD(&fq->flush_queue[1]);
4767c94e1c1SMing Lei 	INIT_LIST_HEAD(&fq->flush_data_in_flight);
4777c94e1c1SMing Lei 
4787c94e1c1SMing Lei 	return fq;
4797c94e1c1SMing Lei 
4807c94e1c1SMing Lei  fail_rq:
4817c94e1c1SMing Lei 	kfree(fq);
4827c94e1c1SMing Lei  fail:
4837c94e1c1SMing Lei 	return NULL;
4847c94e1c1SMing Lei }
4857c94e1c1SMing Lei 
486ba483388SMing Lei void blk_free_flush_queue(struct blk_flush_queue *fq)
4877c94e1c1SMing Lei {
4887c94e1c1SMing Lei 	/* bio based request queue hasn't flush queue */
4897c94e1c1SMing Lei 	if (!fq)
4907c94e1c1SMing Lei 		return;
4917c94e1c1SMing Lei 
4927c94e1c1SMing Lei 	kfree(fq->flush_rq);
4937c94e1c1SMing Lei 	kfree(fq);
494320ae51fSJens Axboe }
495fb01a293SMing Lei 
496fb01a293SMing Lei /*
497fb01a293SMing Lei  * Allow driver to set its own lock class to fq->mq_flush_lock for
498fb01a293SMing Lei  * avoiding lockdep complaint.
499fb01a293SMing Lei  *
500fb01a293SMing Lei  * flush_end_io() may be called recursively from some driver, such as
501fb01a293SMing Lei  * nvme-loop, so lockdep may complain 'possible recursive locking' because
502fb01a293SMing Lei  * all 'struct blk_flush_queue' instance share same mq_flush_lock lock class
503fb01a293SMing Lei  * key. We need to assign different lock class for these driver's
504fb01a293SMing Lei  * fq->mq_flush_lock for avoiding the lockdep warning.
505fb01a293SMing Lei  *
506fb01a293SMing Lei  * Use dynamically allocated lock class key for each 'blk_flush_queue'
507fb01a293SMing Lei  * instance is over-kill, and more worse it introduces horrible boot delay
508fb01a293SMing Lei  * issue because synchronize_rcu() is implied in lockdep_unregister_key which
509fb01a293SMing Lei  * is called for each hctx release. SCSI probing may synchronously create and
510fb01a293SMing Lei  * destroy lots of MQ request_queues for non-existent devices, and some robot
511fb01a293SMing Lei  * test kernel always enable lockdep option. It is observed that more than half
512fb01a293SMing Lei  * an hour is taken during SCSI MQ probe with per-fq lock class.
513fb01a293SMing Lei  */
514fb01a293SMing Lei void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
515fb01a293SMing Lei 		struct lock_class_key *key)
516fb01a293SMing Lei {
517fb01a293SMing Lei 	lockdep_set_class(&hctx->fq->mq_flush_lock, key);
518fb01a293SMing Lei }
519fb01a293SMing Lei EXPORT_SYMBOL_GPL(blk_mq_hctx_set_fq_lock_class);
520