xref: /openbmc/linux/block/blk-flush.c (revision 7ddab5de)
18839a0e0STejun Heo /*
24fed947cSTejun Heo  * Functions to sequence FLUSH and FUA writes.
3ae1b1539STejun Heo  *
4ae1b1539STejun Heo  * Copyright (C) 2011		Max Planck Institute for Gravitational Physics
5ae1b1539STejun Heo  * Copyright (C) 2011		Tejun Heo <tj@kernel.org>
6ae1b1539STejun Heo  *
7ae1b1539STejun Heo  * This file is released under the GPLv2.
8ae1b1539STejun Heo  *
9ae1b1539STejun Heo  * REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three
10ae1b1539STejun Heo  * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
11ae1b1539STejun Heo  * properties and hardware capability.
12ae1b1539STejun Heo  *
13ae1b1539STejun Heo  * If a request doesn't have data, only REQ_FLUSH makes sense, which
14ae1b1539STejun Heo  * indicates a simple flush request.  If there is data, REQ_FLUSH indicates
15ae1b1539STejun Heo  * that the device cache should be flushed before the data is executed, and
16ae1b1539STejun Heo  * REQ_FUA means that the data must be on non-volatile media on request
17ae1b1539STejun Heo  * completion.
18ae1b1539STejun Heo  *
19ae1b1539STejun Heo  * If the device doesn't have writeback cache, FLUSH and FUA don't make any
20ae1b1539STejun Heo  * difference.  The requests are either completed immediately if there's no
21ae1b1539STejun Heo  * data or executed as normal requests otherwise.
22ae1b1539STejun Heo  *
23ae1b1539STejun Heo  * If the device has writeback cache and supports FUA, REQ_FLUSH is
24ae1b1539STejun Heo  * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
25ae1b1539STejun Heo  *
26ae1b1539STejun Heo  * If the device has writeback cache and doesn't support FUA, REQ_FLUSH is
27ae1b1539STejun Heo  * translated to PREFLUSH and REQ_FUA to POSTFLUSH.
28ae1b1539STejun Heo  *
29ae1b1539STejun Heo  * The actual execution of flush is double buffered.  Whenever a request
30ae1b1539STejun Heo  * needs to execute PRE or POSTFLUSH, it queues at
31ae1b1539STejun Heo  * q->flush_queue[q->flush_pending_idx].  Once certain criteria are met, a
32ae1b1539STejun Heo  * flush is issued and the pending_idx is toggled.  When the flush
33ae1b1539STejun Heo  * completes, all the requests which were pending are proceeded to the next
34ae1b1539STejun Heo  * step.  This allows arbitrary merging of different types of FLUSH/FUA
35ae1b1539STejun Heo  * requests.
36ae1b1539STejun Heo  *
37ae1b1539STejun Heo  * Currently, the following conditions are used to determine when to issue
38ae1b1539STejun Heo  * flush.
39ae1b1539STejun Heo  *
40ae1b1539STejun Heo  * C1. At any given time, only one flush shall be in progress.  This makes
41ae1b1539STejun Heo  *     double buffering sufficient.
42ae1b1539STejun Heo  *
43ae1b1539STejun Heo  * C2. Flush is deferred if any request is executing DATA of its sequence.
44ae1b1539STejun Heo  *     This avoids issuing separate POSTFLUSHes for requests which shared
45ae1b1539STejun Heo  *     PREFLUSH.
46ae1b1539STejun Heo  *
47ae1b1539STejun Heo  * C3. The second condition is ignored if there is a request which has
48ae1b1539STejun Heo  *     waited longer than FLUSH_PENDING_TIMEOUT.  This is to avoid
49ae1b1539STejun Heo  *     starvation in the unlikely case where there are continuous stream of
50ae1b1539STejun Heo  *     FUA (without FLUSH) requests.
51ae1b1539STejun Heo  *
52ae1b1539STejun Heo  * For devices which support FUA, it isn't clear whether C2 (and thus C3)
53ae1b1539STejun Heo  * is beneficial.
54ae1b1539STejun Heo  *
55ae1b1539STejun Heo  * Note that a sequenced FLUSH/FUA request with DATA is completed twice.
56ae1b1539STejun Heo  * Once while executing DATA and again after the whole sequence is
57ae1b1539STejun Heo  * complete.  The first completion updates the contained bio but doesn't
58ae1b1539STejun Heo  * finish it so that the bio submitter is notified only after the whole
59ae1b1539STejun Heo  * sequence is complete.  This is implemented by testing REQ_FLUSH_SEQ in
60ae1b1539STejun Heo  * req_bio_endio().
61ae1b1539STejun Heo  *
62ae1b1539STejun Heo  * The above peculiarity requires that each FLUSH/FUA request has only one
63ae1b1539STejun Heo  * bio attached to it, which is guaranteed as they aren't allowed to be
64ae1b1539STejun Heo  * merged in the usual way.
658839a0e0STejun Heo  */
66ae1b1539STejun Heo 
678839a0e0STejun Heo #include <linux/kernel.h>
688839a0e0STejun Heo #include <linux/module.h>
698839a0e0STejun Heo #include <linux/bio.h>
708839a0e0STejun Heo #include <linux/blkdev.h>
718839a0e0STejun Heo #include <linux/gfp.h>
72320ae51fSJens Axboe #include <linux/blk-mq.h>
738839a0e0STejun Heo 
748839a0e0STejun Heo #include "blk.h"
75320ae51fSJens Axboe #include "blk-mq.h"
768839a0e0STejun Heo 
774fed947cSTejun Heo /* FLUSH/FUA sequences */
784fed947cSTejun Heo enum {
79ae1b1539STejun Heo 	REQ_FSEQ_PREFLUSH	= (1 << 0), /* pre-flushing in progress */
80ae1b1539STejun Heo 	REQ_FSEQ_DATA		= (1 << 1), /* data write in progress */
81ae1b1539STejun Heo 	REQ_FSEQ_POSTFLUSH	= (1 << 2), /* post-flushing in progress */
82ae1b1539STejun Heo 	REQ_FSEQ_DONE		= (1 << 3),
83ae1b1539STejun Heo 
84ae1b1539STejun Heo 	REQ_FSEQ_ACTIONS	= REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
85ae1b1539STejun Heo 				  REQ_FSEQ_POSTFLUSH,
86ae1b1539STejun Heo 
87ae1b1539STejun Heo 	/*
88ae1b1539STejun Heo 	 * If flush has been pending longer than the following timeout,
89ae1b1539STejun Heo 	 * it's issued even if flush_data requests are still in flight.
90ae1b1539STejun Heo 	 */
91ae1b1539STejun Heo 	FLUSH_PENDING_TIMEOUT	= 5 * HZ,
924fed947cSTejun Heo };
934fed947cSTejun Heo 
94ae1b1539STejun Heo static bool blk_kick_flush(struct request_queue *q);
958839a0e0STejun Heo 
96ae1b1539STejun Heo static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq)
978839a0e0STejun Heo {
98ae1b1539STejun Heo 	unsigned int policy = 0;
99ae1b1539STejun Heo 
100fa1bf42fSJeff Moyer 	if (blk_rq_sectors(rq))
101fa1bf42fSJeff Moyer 		policy |= REQ_FSEQ_DATA;
102fa1bf42fSJeff Moyer 
103ae1b1539STejun Heo 	if (fflags & REQ_FLUSH) {
104ae1b1539STejun Heo 		if (rq->cmd_flags & REQ_FLUSH)
105ae1b1539STejun Heo 			policy |= REQ_FSEQ_PREFLUSH;
106ae1b1539STejun Heo 		if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA))
107ae1b1539STejun Heo 			policy |= REQ_FSEQ_POSTFLUSH;
108ae1b1539STejun Heo 	}
109ae1b1539STejun Heo 	return policy;
1108839a0e0STejun Heo }
1118839a0e0STejun Heo 
112ae1b1539STejun Heo static unsigned int blk_flush_cur_seq(struct request *rq)
1138839a0e0STejun Heo {
114ae1b1539STejun Heo 	return 1 << ffz(rq->flush.seq);
1158839a0e0STejun Heo }
1168839a0e0STejun Heo 
117ae1b1539STejun Heo static void blk_flush_restore_request(struct request *rq)
11847f70d5aSTejun Heo {
11947f70d5aSTejun Heo 	/*
120ae1b1539STejun Heo 	 * After flush data completion, @rq->bio is %NULL but we need to
121ae1b1539STejun Heo 	 * complete the bio again.  @rq->biotail is guaranteed to equal the
122ae1b1539STejun Heo 	 * original @rq->bio.  Restore it.
12347f70d5aSTejun Heo 	 */
124ae1b1539STejun Heo 	rq->bio = rq->biotail;
125ae1b1539STejun Heo 
126ae1b1539STejun Heo 	/* make @rq a normal request */
127ae1b1539STejun Heo 	rq->cmd_flags &= ~REQ_FLUSH_SEQ;
1284853abaaSJeff Moyer 	rq->end_io = rq->flush.saved_end_io;
129320ae51fSJens Axboe }
130320ae51fSJens Axboe 
13110beafc1SMike Snitzer static bool blk_flush_queue_rq(struct request *rq, bool add_front)
132320ae51fSJens Axboe {
13318741986SChristoph Hellwig 	if (rq->q->mq_ops) {
1346fca6a61SChristoph Hellwig 		struct request_queue *q = rq->q;
1356fca6a61SChristoph Hellwig 
1366fca6a61SChristoph Hellwig 		blk_mq_add_to_requeue_list(rq, add_front);
1376fca6a61SChristoph Hellwig 		blk_mq_kick_requeue_list(q);
13818741986SChristoph Hellwig 		return false;
13918741986SChristoph Hellwig 	} else {
14010beafc1SMike Snitzer 		if (add_front)
14110beafc1SMike Snitzer 			list_add(&rq->queuelist, &rq->q->queue_head);
14210beafc1SMike Snitzer 		else
14318741986SChristoph Hellwig 			list_add_tail(&rq->queuelist, &rq->q->queue_head);
14418741986SChristoph Hellwig 		return true;
14518741986SChristoph Hellwig 	}
14647f70d5aSTejun Heo }
14747f70d5aSTejun Heo 
148ae1b1539STejun Heo /**
149ae1b1539STejun Heo  * blk_flush_complete_seq - complete flush sequence
150ae1b1539STejun Heo  * @rq: FLUSH/FUA request being sequenced
151ae1b1539STejun Heo  * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
152ae1b1539STejun Heo  * @error: whether an error occurred
153ae1b1539STejun Heo  *
154ae1b1539STejun Heo  * @rq just completed @seq part of its flush sequence, record the
155ae1b1539STejun Heo  * completion and trigger the next step.
156ae1b1539STejun Heo  *
157ae1b1539STejun Heo  * CONTEXT:
158320ae51fSJens Axboe  * spin_lock_irq(q->queue_lock or q->mq_flush_lock)
159ae1b1539STejun Heo  *
160ae1b1539STejun Heo  * RETURNS:
161ae1b1539STejun Heo  * %true if requests were added to the dispatch queue, %false otherwise.
162ae1b1539STejun Heo  */
163ae1b1539STejun Heo static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
164ae1b1539STejun Heo 				   int error)
1658839a0e0STejun Heo {
166ae1b1539STejun Heo 	struct request_queue *q = rq->q;
167ae1b1539STejun Heo 	struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
168320ae51fSJens Axboe 	bool queued = false, kicked;
1698839a0e0STejun Heo 
170ae1b1539STejun Heo 	BUG_ON(rq->flush.seq & seq);
171ae1b1539STejun Heo 	rq->flush.seq |= seq;
1728839a0e0STejun Heo 
173ae1b1539STejun Heo 	if (likely(!error))
174ae1b1539STejun Heo 		seq = blk_flush_cur_seq(rq);
175ae1b1539STejun Heo 	else
176ae1b1539STejun Heo 		seq = REQ_FSEQ_DONE;
1778839a0e0STejun Heo 
178ae1b1539STejun Heo 	switch (seq) {
179ae1b1539STejun Heo 	case REQ_FSEQ_PREFLUSH:
180ae1b1539STejun Heo 	case REQ_FSEQ_POSTFLUSH:
181ae1b1539STejun Heo 		/* queue for flush */
182ae1b1539STejun Heo 		if (list_empty(pending))
183ae1b1539STejun Heo 			q->flush_pending_since = jiffies;
184ae1b1539STejun Heo 		list_move_tail(&rq->flush.list, pending);
1858839a0e0STejun Heo 		break;
186ae1b1539STejun Heo 
187ae1b1539STejun Heo 	case REQ_FSEQ_DATA:
188ae1b1539STejun Heo 		list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
18910beafc1SMike Snitzer 		queued = blk_flush_queue_rq(rq, true);
190ae1b1539STejun Heo 		break;
191ae1b1539STejun Heo 
192ae1b1539STejun Heo 	case REQ_FSEQ_DONE:
19309d60c70STejun Heo 		/*
194ae1b1539STejun Heo 		 * @rq was previously adjusted by blk_flush_issue() for
195ae1b1539STejun Heo 		 * flush sequencing and may already have gone through the
196ae1b1539STejun Heo 		 * flush data request completion path.  Restore @rq for
197ae1b1539STejun Heo 		 * normal completion and end it.
19809d60c70STejun Heo 		 */
199ae1b1539STejun Heo 		BUG_ON(!list_empty(&rq->queuelist));
200ae1b1539STejun Heo 		list_del_init(&rq->flush.list);
201ae1b1539STejun Heo 		blk_flush_restore_request(rq);
202320ae51fSJens Axboe 		if (q->mq_ops)
203c8a446adSChristoph Hellwig 			blk_mq_end_request(rq, error);
204320ae51fSJens Axboe 		else
205ae1b1539STejun Heo 			__blk_end_request_all(rq, error);
2068839a0e0STejun Heo 		break;
207ae1b1539STejun Heo 
2088839a0e0STejun Heo 	default:
2098839a0e0STejun Heo 		BUG();
2108839a0e0STejun Heo 	}
211cde4c406SChristoph Hellwig 
212320ae51fSJens Axboe 	kicked = blk_kick_flush(q);
213320ae51fSJens Axboe 	return kicked | queued;
2148839a0e0STejun Heo }
2158839a0e0STejun Heo 
216ae1b1539STejun Heo static void flush_end_io(struct request *flush_rq, int error)
2178839a0e0STejun Heo {
218ae1b1539STejun Heo 	struct request_queue *q = flush_rq->q;
219320ae51fSJens Axboe 	struct list_head *running;
220ae1b1539STejun Heo 	bool queued = false;
221ae1b1539STejun Heo 	struct request *rq, *n;
222320ae51fSJens Axboe 	unsigned long flags = 0;
2238839a0e0STejun Heo 
22422302375SShaohua Li 	if (q->mq_ops) {
225320ae51fSJens Axboe 		spin_lock_irqsave(&q->mq_flush_lock, flags);
2267ddab5deSMing Lei 		flush_rq->tag = -1;
22722302375SShaohua Li 	}
22818741986SChristoph Hellwig 
229320ae51fSJens Axboe 	running = &q->flush_queue[q->flush_running_idx];
230ae1b1539STejun Heo 	BUG_ON(q->flush_pending_idx == q->flush_running_idx);
231ae1b1539STejun Heo 
232ae1b1539STejun Heo 	/* account completion of the flush request */
233ae1b1539STejun Heo 	q->flush_running_idx ^= 1;
234320ae51fSJens Axboe 
235320ae51fSJens Axboe 	if (!q->mq_ops)
236ae1b1539STejun Heo 		elv_completed_request(q, flush_rq);
237ae1b1539STejun Heo 
238ae1b1539STejun Heo 	/* and push the waiting requests to the next stage */
239ae1b1539STejun Heo 	list_for_each_entry_safe(rq, n, running, flush.list) {
240ae1b1539STejun Heo 		unsigned int seq = blk_flush_cur_seq(rq);
241ae1b1539STejun Heo 
242ae1b1539STejun Heo 		BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
243ae1b1539STejun Heo 		queued |= blk_flush_complete_seq(rq, seq, error);
244ae1b1539STejun Heo 	}
245ae1b1539STejun Heo 
2468839a0e0STejun Heo 	/*
2473ac0cc45Sshaohua.li@intel.com 	 * Kick the queue to avoid stall for two cases:
2483ac0cc45Sshaohua.li@intel.com 	 * 1. Moving a request silently to empty queue_head may stall the
2493ac0cc45Sshaohua.li@intel.com 	 * queue.
2503ac0cc45Sshaohua.li@intel.com 	 * 2. When flush request is running in non-queueable queue, the
2513ac0cc45Sshaohua.li@intel.com 	 * queue is hold. Restart the queue after flush request is finished
2523ac0cc45Sshaohua.li@intel.com 	 * to avoid stall.
2533ac0cc45Sshaohua.li@intel.com 	 * This function is called from request completion path and calling
2543ac0cc45Sshaohua.li@intel.com 	 * directly into request_fn may confuse the driver.  Always use
2553ac0cc45Sshaohua.li@intel.com 	 * kblockd.
2568839a0e0STejun Heo 	 */
257320ae51fSJens Axboe 	if (queued || q->flush_queue_delayed) {
25818741986SChristoph Hellwig 		WARN_ON(q->mq_ops);
25924ecfbe2SChristoph Hellwig 		blk_run_queue_async(q);
260320ae51fSJens Axboe 	}
2613ac0cc45Sshaohua.li@intel.com 	q->flush_queue_delayed = 0;
262320ae51fSJens Axboe 	if (q->mq_ops)
263320ae51fSJens Axboe 		spin_unlock_irqrestore(&q->mq_flush_lock, flags);
264320ae51fSJens Axboe }
265320ae51fSJens Axboe 
266ae1b1539STejun Heo /**
267ae1b1539STejun Heo  * blk_kick_flush - consider issuing flush request
268ae1b1539STejun Heo  * @q: request_queue being kicked
2694fed947cSTejun Heo  *
270ae1b1539STejun Heo  * Flush related states of @q have changed, consider issuing flush request.
271ae1b1539STejun Heo  * Please read the comment at the top of this file for more info.
272ae1b1539STejun Heo  *
273ae1b1539STejun Heo  * CONTEXT:
274320ae51fSJens Axboe  * spin_lock_irq(q->queue_lock or q->mq_flush_lock)
275ae1b1539STejun Heo  *
276ae1b1539STejun Heo  * RETURNS:
277ae1b1539STejun Heo  * %true if flush was issued, %false otherwise.
2788839a0e0STejun Heo  */
279ae1b1539STejun Heo static bool blk_kick_flush(struct request_queue *q)
280ae1b1539STejun Heo {
281ae1b1539STejun Heo 	struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
282ae1b1539STejun Heo 	struct request *first_rq =
283ae1b1539STejun Heo 		list_first_entry(pending, struct request, flush.list);
2847ddab5deSMing Lei 	struct request *flush_rq = q->flush_rq;
285ae1b1539STejun Heo 
286ae1b1539STejun Heo 	/* C1 described at the top of this file */
287ae1b1539STejun Heo 	if (q->flush_pending_idx != q->flush_running_idx || list_empty(pending))
288ae1b1539STejun Heo 		return false;
289ae1b1539STejun Heo 
290ae1b1539STejun Heo 	/* C2 and C3 */
291ae1b1539STejun Heo 	if (!list_empty(&q->flush_data_in_flight) &&
292ae1b1539STejun Heo 	    time_before(jiffies,
293ae1b1539STejun Heo 			q->flush_pending_since + FLUSH_PENDING_TIMEOUT))
294ae1b1539STejun Heo 		return false;
295ae1b1539STejun Heo 
296ae1b1539STejun Heo 	/*
297ae1b1539STejun Heo 	 * Issue flush and toggle pending_idx.  This makes pending_idx
298ae1b1539STejun Heo 	 * different from running_idx, which means flush is in flight.
299ae1b1539STejun Heo 	 */
300320ae51fSJens Axboe 	q->flush_pending_idx ^= 1;
30118741986SChristoph Hellwig 
3027ddab5deSMing Lei 	blk_rq_init(q, flush_rq);
3038727af4bSChristoph Hellwig 	if (q->mq_ops)
3047ddab5deSMing Lei 		blk_mq_clone_flush_request(flush_rq, first_rq);
305320ae51fSJens Axboe 
3067ddab5deSMing Lei 	flush_rq->cmd_type = REQ_TYPE_FS;
3077ddab5deSMing Lei 	flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
3087ddab5deSMing Lei 	flush_rq->rq_disk = first_rq->rq_disk;
3097ddab5deSMing Lei 	flush_rq->end_io = flush_end_io;
310ae1b1539STejun Heo 
3117ddab5deSMing Lei 	return blk_flush_queue_rq(flush_rq, false);
312ae1b1539STejun Heo }
313ae1b1539STejun Heo 
314ae1b1539STejun Heo static void flush_data_end_io(struct request *rq, int error)
315ae1b1539STejun Heo {
316ae1b1539STejun Heo 	struct request_queue *q = rq->q;
317ae1b1539STejun Heo 
3188839a0e0STejun Heo 	/*
319e83a46bbSTejun Heo 	 * After populating an empty queue, kick it to avoid stall.  Read
320e83a46bbSTejun Heo 	 * the comment in flush_end_io().
3218839a0e0STejun Heo 	 */
32273c10101SJens Axboe 	if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
32324ecfbe2SChristoph Hellwig 		blk_run_queue_async(q);
324ae1b1539STejun Heo }
325ae1b1539STejun Heo 
326320ae51fSJens Axboe static void mq_flush_data_end_io(struct request *rq, int error)
327320ae51fSJens Axboe {
328320ae51fSJens Axboe 	struct request_queue *q = rq->q;
329320ae51fSJens Axboe 	struct blk_mq_hw_ctx *hctx;
330320ae51fSJens Axboe 	struct blk_mq_ctx *ctx;
331320ae51fSJens Axboe 	unsigned long flags;
332320ae51fSJens Axboe 
333320ae51fSJens Axboe 	ctx = rq->mq_ctx;
334320ae51fSJens Axboe 	hctx = q->mq_ops->map_queue(q, ctx->cpu);
335320ae51fSJens Axboe 
336320ae51fSJens Axboe 	/*
337320ae51fSJens Axboe 	 * After populating an empty queue, kick it to avoid stall.  Read
338320ae51fSJens Axboe 	 * the comment in flush_end_io().
339320ae51fSJens Axboe 	 */
340320ae51fSJens Axboe 	spin_lock_irqsave(&q->mq_flush_lock, flags);
341320ae51fSJens Axboe 	if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
342320ae51fSJens Axboe 		blk_mq_run_hw_queue(hctx, true);
343320ae51fSJens Axboe 	spin_unlock_irqrestore(&q->mq_flush_lock, flags);
344320ae51fSJens Axboe }
345320ae51fSJens Axboe 
346ae1b1539STejun Heo /**
347ae1b1539STejun Heo  * blk_insert_flush - insert a new FLUSH/FUA request
348ae1b1539STejun Heo  * @rq: request to insert
349ae1b1539STejun Heo  *
350b710a480SJens Axboe  * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
351320ae51fSJens Axboe  * or __blk_mq_run_hw_queue() to dispatch request.
352ae1b1539STejun Heo  * @rq is being submitted.  Analyze what needs to be done and put it on the
353ae1b1539STejun Heo  * right queue.
354ae1b1539STejun Heo  *
355ae1b1539STejun Heo  * CONTEXT:
356320ae51fSJens Axboe  * spin_lock_irq(q->queue_lock) in !mq case
357ae1b1539STejun Heo  */
358ae1b1539STejun Heo void blk_insert_flush(struct request *rq)
359ae1b1539STejun Heo {
360ae1b1539STejun Heo 	struct request_queue *q = rq->q;
361ae1b1539STejun Heo 	unsigned int fflags = q->flush_flags;	/* may change, cache */
362ae1b1539STejun Heo 	unsigned int policy = blk_flush_policy(fflags, rq);
363ae1b1539STejun Heo 
364ae1b1539STejun Heo 	/*
365ae1b1539STejun Heo 	 * @policy now records what operations need to be done.  Adjust
366ae1b1539STejun Heo 	 * REQ_FLUSH and FUA for the driver.
367ae1b1539STejun Heo 	 */
3684fed947cSTejun Heo 	rq->cmd_flags &= ~REQ_FLUSH;
369ae1b1539STejun Heo 	if (!(fflags & REQ_FUA))
3704fed947cSTejun Heo 		rq->cmd_flags &= ~REQ_FUA;
371ae1b1539STejun Heo 
372ae1b1539STejun Heo 	/*
3734853abaaSJeff Moyer 	 * An empty flush handed down from a stacking driver may
3744853abaaSJeff Moyer 	 * translate into nothing if the underlying device does not
3754853abaaSJeff Moyer 	 * advertise a write-back cache.  In this case, simply
3764853abaaSJeff Moyer 	 * complete the request.
3774853abaaSJeff Moyer 	 */
3784853abaaSJeff Moyer 	if (!policy) {
379320ae51fSJens Axboe 		if (q->mq_ops)
380c8a446adSChristoph Hellwig 			blk_mq_end_request(rq, 0);
381320ae51fSJens Axboe 		else
3824853abaaSJeff Moyer 			__blk_end_bidi_request(rq, 0, 0, 0);
3834853abaaSJeff Moyer 		return;
3844853abaaSJeff Moyer 	}
3854853abaaSJeff Moyer 
386834f9f61SJeff Moyer 	BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
3874853abaaSJeff Moyer 
3884853abaaSJeff Moyer 	/*
389ae1b1539STejun Heo 	 * If there's data but flush is not necessary, the request can be
390ae1b1539STejun Heo 	 * processed directly without going through flush machinery.  Queue
391ae1b1539STejun Heo 	 * for normal execution.
392ae1b1539STejun Heo 	 */
393ae1b1539STejun Heo 	if ((policy & REQ_FSEQ_DATA) &&
394ae1b1539STejun Heo 	    !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
395320ae51fSJens Axboe 		if (q->mq_ops) {
396feb71daeSChristoph Hellwig 			blk_mq_insert_request(rq, false, false, true);
397320ae51fSJens Axboe 		} else
39853d63e6bSJens Axboe 			list_add_tail(&rq->queuelist, &q->queue_head);
399ae1b1539STejun Heo 		return;
4008839a0e0STejun Heo 	}
4018839a0e0STejun Heo 
4028839a0e0STejun Heo 	/*
403ae1b1539STejun Heo 	 * @rq should go through flush machinery.  Mark it part of flush
404ae1b1539STejun Heo 	 * sequence and submit for further processing.
4058839a0e0STejun Heo 	 */
406ae1b1539STejun Heo 	memset(&rq->flush, 0, sizeof(rq->flush));
407ae1b1539STejun Heo 	INIT_LIST_HEAD(&rq->flush.list);
408ae1b1539STejun Heo 	rq->cmd_flags |= REQ_FLUSH_SEQ;
4094853abaaSJeff Moyer 	rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
410320ae51fSJens Axboe 	if (q->mq_ops) {
411320ae51fSJens Axboe 		rq->end_io = mq_flush_data_end_io;
412320ae51fSJens Axboe 
413320ae51fSJens Axboe 		spin_lock_irq(&q->mq_flush_lock);
414320ae51fSJens Axboe 		blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
415320ae51fSJens Axboe 		spin_unlock_irq(&q->mq_flush_lock);
416320ae51fSJens Axboe 		return;
417320ae51fSJens Axboe 	}
418ae1b1539STejun Heo 	rq->end_io = flush_data_end_io;
419ae1b1539STejun Heo 
420ae1b1539STejun Heo 	blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
421ae1b1539STejun Heo }
422ae1b1539STejun Heo 
423ae1b1539STejun Heo /**
4248839a0e0STejun Heo  * blkdev_issue_flush - queue a flush
4258839a0e0STejun Heo  * @bdev:	blockdev to issue flush for
4268839a0e0STejun Heo  * @gfp_mask:	memory allocation flags (for bio_alloc)
4278839a0e0STejun Heo  * @error_sector:	error sector
4288839a0e0STejun Heo  *
4298839a0e0STejun Heo  * Description:
4308839a0e0STejun Heo  *    Issue a flush for the block device in question. Caller can supply
4318839a0e0STejun Heo  *    room for storing the error offset in case of a flush error, if they
4328839a0e0STejun Heo  *    wish to. If WAIT flag is not passed then caller may check only what
4338839a0e0STejun Heo  *    request was pushed in some internal queue for later handling.
4348839a0e0STejun Heo  */
4358839a0e0STejun Heo int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
436dd3932edSChristoph Hellwig 		sector_t *error_sector)
4378839a0e0STejun Heo {
4388839a0e0STejun Heo 	struct request_queue *q;
4398839a0e0STejun Heo 	struct bio *bio;
4408839a0e0STejun Heo 	int ret = 0;
4418839a0e0STejun Heo 
4428839a0e0STejun Heo 	if (bdev->bd_disk == NULL)
4438839a0e0STejun Heo 		return -ENXIO;
4448839a0e0STejun Heo 
4458839a0e0STejun Heo 	q = bdev_get_queue(bdev);
4468839a0e0STejun Heo 	if (!q)
4478839a0e0STejun Heo 		return -ENXIO;
4488839a0e0STejun Heo 
4498839a0e0STejun Heo 	/*
4508839a0e0STejun Heo 	 * some block devices may not have their queue correctly set up here
4518839a0e0STejun Heo 	 * (e.g. loop device without a backing file) and so issuing a flush
4528839a0e0STejun Heo 	 * here will panic. Ensure there is a request function before issuing
453d391a2ddSTejun Heo 	 * the flush.
4548839a0e0STejun Heo 	 */
4558839a0e0STejun Heo 	if (!q->make_request_fn)
4568839a0e0STejun Heo 		return -ENXIO;
4578839a0e0STejun Heo 
4588839a0e0STejun Heo 	bio = bio_alloc(gfp_mask, 0);
4598839a0e0STejun Heo 	bio->bi_bdev = bdev;
4608839a0e0STejun Heo 
46133879d45SKent Overstreet 	ret = submit_bio_wait(WRITE_FLUSH, bio);
462dd3932edSChristoph Hellwig 
4638839a0e0STejun Heo 	/*
4648839a0e0STejun Heo 	 * The driver must store the error location in ->bi_sector, if
4658839a0e0STejun Heo 	 * it supports it. For non-stacked drivers, this should be
4668839a0e0STejun Heo 	 * copied from blk_rq_pos(rq).
4678839a0e0STejun Heo 	 */
4688839a0e0STejun Heo 	if (error_sector)
4694f024f37SKent Overstreet 		*error_sector = bio->bi_iter.bi_sector;
4708839a0e0STejun Heo 
4718839a0e0STejun Heo 	bio_put(bio);
4728839a0e0STejun Heo 	return ret;
4738839a0e0STejun Heo }
4748839a0e0STejun Heo EXPORT_SYMBOL(blkdev_issue_flush);
475320ae51fSJens Axboe 
476f3552655SMing Lei static int blk_mq_init_flush(struct request_queue *q)
477320ae51fSJens Axboe {
4781bcb1eadSMing Lei 	struct blk_mq_tag_set *set = q->tag_set;
4791bcb1eadSMing Lei 
480320ae51fSJens Axboe 	spin_lock_init(&q->mq_flush_lock);
4811bcb1eadSMing Lei 
4821bcb1eadSMing Lei 	q->flush_rq = kzalloc(round_up(sizeof(struct request) +
4831bcb1eadSMing Lei 				set->cmd_size, cache_line_size()),
4841bcb1eadSMing Lei 				GFP_KERNEL);
4851bcb1eadSMing Lei 	if (!q->flush_rq)
4861bcb1eadSMing Lei 		return -ENOMEM;
4871bcb1eadSMing Lei 	return 0;
488320ae51fSJens Axboe }
489f3552655SMing Lei 
490f3552655SMing Lei int blk_init_flush(struct request_queue *q)
491f3552655SMing Lei {
4923c09676cSMing Lei 	INIT_LIST_HEAD(&q->flush_queue[0]);
4933c09676cSMing Lei 	INIT_LIST_HEAD(&q->flush_queue[1]);
4943c09676cSMing Lei 	INIT_LIST_HEAD(&q->flush_data_in_flight);
4953c09676cSMing Lei 
496f3552655SMing Lei 	if (q->mq_ops)
497f3552655SMing Lei 		return blk_mq_init_flush(q);
498f3552655SMing Lei 
499f3552655SMing Lei 	q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
500f3552655SMing Lei 	if (!q->flush_rq)
501f3552655SMing Lei 		return -ENOMEM;
502f3552655SMing Lei 
503f3552655SMing Lei 	return 0;
504f3552655SMing Lei }
505f3552655SMing Lei 
506f3552655SMing Lei void blk_exit_flush(struct request_queue *q)
507f3552655SMing Lei {
508f3552655SMing Lei 	kfree(q->flush_rq);
509f3552655SMing Lei }
510