xref: /openbmc/linux/block/blk-flush.c (revision ea4f995e)
18839a0e0STejun Heo /*
23140c3cfSOmar Sandoval  * Functions to sequence PREFLUSH and FUA writes.
3ae1b1539STejun Heo  *
4ae1b1539STejun Heo  * Copyright (C) 2011		Max Planck Institute for Gravitational Physics
5ae1b1539STejun Heo  * Copyright (C) 2011		Tejun Heo <tj@kernel.org>
6ae1b1539STejun Heo  *
7ae1b1539STejun Heo  * This file is released under the GPLv2.
8ae1b1539STejun Heo  *
93140c3cfSOmar Sandoval  * REQ_{PREFLUSH|FUA} requests are decomposed to sequences consisted of three
10ae1b1539STejun Heo  * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
11ae1b1539STejun Heo  * properties and hardware capability.
12ae1b1539STejun Heo  *
1328a8f0d3SMike Christie  * If a request doesn't have data, only REQ_PREFLUSH makes sense, which
1428a8f0d3SMike Christie  * indicates a simple flush request.  If there is data, REQ_PREFLUSH indicates
15ae1b1539STejun Heo  * that the device cache should be flushed before the data is executed, and
16ae1b1539STejun Heo  * REQ_FUA means that the data must be on non-volatile media on request
17ae1b1539STejun Heo  * completion.
18ae1b1539STejun Heo  *
193140c3cfSOmar Sandoval  * If the device doesn't have writeback cache, PREFLUSH and FUA don't make any
203140c3cfSOmar Sandoval  * difference.  The requests are either completed immediately if there's no data
213140c3cfSOmar Sandoval  * or executed as normal requests otherwise.
22ae1b1539STejun Heo  *
2328a8f0d3SMike Christie  * If the device has writeback cache and supports FUA, REQ_PREFLUSH is
24ae1b1539STejun Heo  * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
25ae1b1539STejun Heo  *
2628a8f0d3SMike Christie  * If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH
2728a8f0d3SMike Christie  * is translated to PREFLUSH and REQ_FUA to POSTFLUSH.
28ae1b1539STejun Heo  *
29ae1b1539STejun Heo  * The actual execution of flush is double buffered.  Whenever a request
30ae1b1539STejun Heo  * needs to execute PRE or POSTFLUSH, it queues at
317c94e1c1SMing Lei  * fq->flush_queue[fq->flush_pending_idx].  Once certain criteria are met, a
323a5e02ceSMike Christie  * REQ_OP_FLUSH is issued and the pending_idx is toggled.  When the flush
33ae1b1539STejun Heo  * completes, all the requests which were pending are proceeded to the next
343140c3cfSOmar Sandoval  * step.  This allows arbitrary merging of different types of PREFLUSH/FUA
35ae1b1539STejun Heo  * requests.
36ae1b1539STejun Heo  *
37ae1b1539STejun Heo  * Currently, the following conditions are used to determine when to issue
38ae1b1539STejun Heo  * flush.
39ae1b1539STejun Heo  *
40ae1b1539STejun Heo  * C1. At any given time, only one flush shall be in progress.  This makes
41ae1b1539STejun Heo  *     double buffering sufficient.
42ae1b1539STejun Heo  *
43ae1b1539STejun Heo  * C2. Flush is deferred if any request is executing DATA of its sequence.
44ae1b1539STejun Heo  *     This avoids issuing separate POSTFLUSHes for requests which shared
45ae1b1539STejun Heo  *     PREFLUSH.
46ae1b1539STejun Heo  *
47ae1b1539STejun Heo  * C3. The second condition is ignored if there is a request which has
48ae1b1539STejun Heo  *     waited longer than FLUSH_PENDING_TIMEOUT.  This is to avoid
49ae1b1539STejun Heo  *     starvation in the unlikely case where there are continuous stream of
503140c3cfSOmar Sandoval  *     FUA (without PREFLUSH) requests.
51ae1b1539STejun Heo  *
52ae1b1539STejun Heo  * For devices which support FUA, it isn't clear whether C2 (and thus C3)
53ae1b1539STejun Heo  * is beneficial.
54ae1b1539STejun Heo  *
553140c3cfSOmar Sandoval  * Note that a sequenced PREFLUSH/FUA request with DATA is completed twice.
56ae1b1539STejun Heo  * Once while executing DATA and again after the whole sequence is
57ae1b1539STejun Heo  * complete.  The first completion updates the contained bio but doesn't
58ae1b1539STejun Heo  * finish it so that the bio submitter is notified only after the whole
59e8064021SChristoph Hellwig  * sequence is complete.  This is implemented by testing RQF_FLUSH_SEQ in
60ae1b1539STejun Heo  * req_bio_endio().
61ae1b1539STejun Heo  *
623140c3cfSOmar Sandoval  * The above peculiarity requires that each PREFLUSH/FUA request has only one
63ae1b1539STejun Heo  * bio attached to it, which is guaranteed as they aren't allowed to be
64ae1b1539STejun Heo  * merged in the usual way.
658839a0e0STejun Heo  */
66ae1b1539STejun Heo 
678839a0e0STejun Heo #include <linux/kernel.h>
688839a0e0STejun Heo #include <linux/module.h>
698839a0e0STejun Heo #include <linux/bio.h>
708839a0e0STejun Heo #include <linux/blkdev.h>
718839a0e0STejun Heo #include <linux/gfp.h>
72320ae51fSJens Axboe #include <linux/blk-mq.h>
738839a0e0STejun Heo 
748839a0e0STejun Heo #include "blk.h"
75320ae51fSJens Axboe #include "blk-mq.h"
760048b483SMing Lei #include "blk-mq-tag.h"
77bd166ef1SJens Axboe #include "blk-mq-sched.h"
788839a0e0STejun Heo 
793140c3cfSOmar Sandoval /* PREFLUSH/FUA sequences */
804fed947cSTejun Heo enum {
81ae1b1539STejun Heo 	REQ_FSEQ_PREFLUSH	= (1 << 0), /* pre-flushing in progress */
82ae1b1539STejun Heo 	REQ_FSEQ_DATA		= (1 << 1), /* data write in progress */
83ae1b1539STejun Heo 	REQ_FSEQ_POSTFLUSH	= (1 << 2), /* post-flushing in progress */
84ae1b1539STejun Heo 	REQ_FSEQ_DONE		= (1 << 3),
85ae1b1539STejun Heo 
86ae1b1539STejun Heo 	REQ_FSEQ_ACTIONS	= REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
87ae1b1539STejun Heo 				  REQ_FSEQ_POSTFLUSH,
88ae1b1539STejun Heo 
89ae1b1539STejun Heo 	/*
90ae1b1539STejun Heo 	 * If flush has been pending longer than the following timeout,
91ae1b1539STejun Heo 	 * it's issued even if flush_data requests are still in flight.
92ae1b1539STejun Heo 	 */
93ae1b1539STejun Heo 	FLUSH_PENDING_TIMEOUT	= 5 * HZ,
944fed947cSTejun Heo };
954fed947cSTejun Heo 
96404b8f5aSJens Axboe static void blk_kick_flush(struct request_queue *q,
9784fca1b0SHannes Reinecke 			   struct blk_flush_queue *fq, unsigned int flags);
988839a0e0STejun Heo 
99c888a8f9SJens Axboe static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
1008839a0e0STejun Heo {
101ae1b1539STejun Heo 	unsigned int policy = 0;
102ae1b1539STejun Heo 
103fa1bf42fSJeff Moyer 	if (blk_rq_sectors(rq))
104fa1bf42fSJeff Moyer 		policy |= REQ_FSEQ_DATA;
105fa1bf42fSJeff Moyer 
106c888a8f9SJens Axboe 	if (fflags & (1UL << QUEUE_FLAG_WC)) {
10728a8f0d3SMike Christie 		if (rq->cmd_flags & REQ_PREFLUSH)
108ae1b1539STejun Heo 			policy |= REQ_FSEQ_PREFLUSH;
109c888a8f9SJens Axboe 		if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
110c888a8f9SJens Axboe 		    (rq->cmd_flags & REQ_FUA))
111ae1b1539STejun Heo 			policy |= REQ_FSEQ_POSTFLUSH;
112ae1b1539STejun Heo 	}
113ae1b1539STejun Heo 	return policy;
1148839a0e0STejun Heo }
1158839a0e0STejun Heo 
116ae1b1539STejun Heo static unsigned int blk_flush_cur_seq(struct request *rq)
1178839a0e0STejun Heo {
118ae1b1539STejun Heo 	return 1 << ffz(rq->flush.seq);
1198839a0e0STejun Heo }
1208839a0e0STejun Heo 
121ae1b1539STejun Heo static void blk_flush_restore_request(struct request *rq)
12247f70d5aSTejun Heo {
12347f70d5aSTejun Heo 	/*
124ae1b1539STejun Heo 	 * After flush data completion, @rq->bio is %NULL but we need to
125ae1b1539STejun Heo 	 * complete the bio again.  @rq->biotail is guaranteed to equal the
126ae1b1539STejun Heo 	 * original @rq->bio.  Restore it.
12747f70d5aSTejun Heo 	 */
128ae1b1539STejun Heo 	rq->bio = rq->biotail;
129ae1b1539STejun Heo 
130ae1b1539STejun Heo 	/* make @rq a normal request */
131e8064021SChristoph Hellwig 	rq->rq_flags &= ~RQF_FLUSH_SEQ;
1324853abaaSJeff Moyer 	rq->end_io = rq->flush.saved_end_io;
133320ae51fSJens Axboe }
134320ae51fSJens Axboe 
135404b8f5aSJens Axboe static void blk_flush_queue_rq(struct request *rq, bool add_front)
136320ae51fSJens Axboe {
1372b053acaSBart Van Assche 	blk_mq_add_to_requeue_list(rq, add_front, true);
13847f70d5aSTejun Heo }
13947f70d5aSTejun Heo 
140ae1b1539STejun Heo /**
141ae1b1539STejun Heo  * blk_flush_complete_seq - complete flush sequence
1423140c3cfSOmar Sandoval  * @rq: PREFLUSH/FUA request being sequenced
1430bae352dSMing Lei  * @fq: flush queue
144ae1b1539STejun Heo  * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
145ae1b1539STejun Heo  * @error: whether an error occurred
146ae1b1539STejun Heo  *
147ae1b1539STejun Heo  * @rq just completed @seq part of its flush sequence, record the
148ae1b1539STejun Heo  * completion and trigger the next step.
149ae1b1539STejun Heo  *
150ae1b1539STejun Heo  * CONTEXT:
1517c94e1c1SMing Lei  * spin_lock_irq(q->queue_lock or fq->mq_flush_lock)
152ae1b1539STejun Heo  *
153ae1b1539STejun Heo  * RETURNS:
154ae1b1539STejun Heo  * %true if requests were added to the dispatch queue, %false otherwise.
155ae1b1539STejun Heo  */
156404b8f5aSJens Axboe static void blk_flush_complete_seq(struct request *rq,
1570bae352dSMing Lei 				   struct blk_flush_queue *fq,
1582a842acaSChristoph Hellwig 				   unsigned int seq, blk_status_t error)
1598839a0e0STejun Heo {
160ae1b1539STejun Heo 	struct request_queue *q = rq->q;
1617c94e1c1SMing Lei 	struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
162190b02edSJens Axboe 	unsigned int cmd_flags;
1638839a0e0STejun Heo 
164ae1b1539STejun Heo 	BUG_ON(rq->flush.seq & seq);
165ae1b1539STejun Heo 	rq->flush.seq |= seq;
166190b02edSJens Axboe 	cmd_flags = rq->cmd_flags;
1678839a0e0STejun Heo 
168ae1b1539STejun Heo 	if (likely(!error))
169ae1b1539STejun Heo 		seq = blk_flush_cur_seq(rq);
170ae1b1539STejun Heo 	else
171ae1b1539STejun Heo 		seq = REQ_FSEQ_DONE;
1728839a0e0STejun Heo 
173ae1b1539STejun Heo 	switch (seq) {
174ae1b1539STejun Heo 	case REQ_FSEQ_PREFLUSH:
175ae1b1539STejun Heo 	case REQ_FSEQ_POSTFLUSH:
176ae1b1539STejun Heo 		/* queue for flush */
177ae1b1539STejun Heo 		if (list_empty(pending))
1787c94e1c1SMing Lei 			fq->flush_pending_since = jiffies;
179ae1b1539STejun Heo 		list_move_tail(&rq->flush.list, pending);
1808839a0e0STejun Heo 		break;
181ae1b1539STejun Heo 
182ae1b1539STejun Heo 	case REQ_FSEQ_DATA:
1837c94e1c1SMing Lei 		list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
184404b8f5aSJens Axboe 		blk_flush_queue_rq(rq, true);
185ae1b1539STejun Heo 		break;
186ae1b1539STejun Heo 
187ae1b1539STejun Heo 	case REQ_FSEQ_DONE:
18809d60c70STejun Heo 		/*
189ae1b1539STejun Heo 		 * @rq was previously adjusted by blk_flush_issue() for
190ae1b1539STejun Heo 		 * flush sequencing and may already have gone through the
191ae1b1539STejun Heo 		 * flush data request completion path.  Restore @rq for
192ae1b1539STejun Heo 		 * normal completion and end it.
19309d60c70STejun Heo 		 */
194ae1b1539STejun Heo 		BUG_ON(!list_empty(&rq->queuelist));
195ae1b1539STejun Heo 		list_del_init(&rq->flush.list);
196ae1b1539STejun Heo 		blk_flush_restore_request(rq);
197c8a446adSChristoph Hellwig 		blk_mq_end_request(rq, error);
1988839a0e0STejun Heo 		break;
199ae1b1539STejun Heo 
2008839a0e0STejun Heo 	default:
2018839a0e0STejun Heo 		BUG();
2028839a0e0STejun Heo 	}
203cde4c406SChristoph Hellwig 
204404b8f5aSJens Axboe 	blk_kick_flush(q, fq, cmd_flags);
2058839a0e0STejun Heo }
2068839a0e0STejun Heo 
2072a842acaSChristoph Hellwig static void flush_end_io(struct request *flush_rq, blk_status_t error)
2088839a0e0STejun Heo {
209ae1b1539STejun Heo 	struct request_queue *q = flush_rq->q;
210320ae51fSJens Axboe 	struct list_head *running;
211ae1b1539STejun Heo 	struct request *rq, *n;
212320ae51fSJens Axboe 	unsigned long flags = 0;
213e97c293cSMing Lei 	struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
2140048b483SMing Lei 	struct blk_mq_hw_ctx *hctx;
2150048b483SMing Lei 
2160048b483SMing Lei 	/* release the tag's ownership to the req cloned from */
2177c94e1c1SMing Lei 	spin_lock_irqsave(&fq->mq_flush_lock, flags);
218ea4f995eSJens Axboe 	hctx = flush_rq->mq_hctx;
219923218f6SMing Lei 	if (!q->elevator) {
2200048b483SMing Lei 		blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
2217ddab5deSMing Lei 		flush_rq->tag = -1;
222923218f6SMing Lei 	} else {
223923218f6SMing Lei 		blk_mq_put_driver_tag_hctx(hctx, flush_rq);
224923218f6SMing Lei 		flush_rq->internal_tag = -1;
225923218f6SMing Lei 	}
22618741986SChristoph Hellwig 
2277c94e1c1SMing Lei 	running = &fq->flush_queue[fq->flush_running_idx];
2287c94e1c1SMing Lei 	BUG_ON(fq->flush_pending_idx == fq->flush_running_idx);
229ae1b1539STejun Heo 
230ae1b1539STejun Heo 	/* account completion of the flush request */
2317c94e1c1SMing Lei 	fq->flush_running_idx ^= 1;
232320ae51fSJens Axboe 
233ae1b1539STejun Heo 	/* and push the waiting requests to the next stage */
234ae1b1539STejun Heo 	list_for_each_entry_safe(rq, n, running, flush.list) {
235ae1b1539STejun Heo 		unsigned int seq = blk_flush_cur_seq(rq);
236ae1b1539STejun Heo 
237ae1b1539STejun Heo 		BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
238404b8f5aSJens Axboe 		blk_flush_complete_seq(rq, fq, seq, error);
239ae1b1539STejun Heo 	}
240ae1b1539STejun Heo 
2417c94e1c1SMing Lei 	fq->flush_queue_delayed = 0;
2427c94e1c1SMing Lei 	spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
243320ae51fSJens Axboe }
244320ae51fSJens Axboe 
245ae1b1539STejun Heo /**
246ae1b1539STejun Heo  * blk_kick_flush - consider issuing flush request
247ae1b1539STejun Heo  * @q: request_queue being kicked
2480bae352dSMing Lei  * @fq: flush queue
24984fca1b0SHannes Reinecke  * @flags: cmd_flags of the original request
2504fed947cSTejun Heo  *
251ae1b1539STejun Heo  * Flush related states of @q have changed, consider issuing flush request.
252ae1b1539STejun Heo  * Please read the comment at the top of this file for more info.
253ae1b1539STejun Heo  *
254ae1b1539STejun Heo  * CONTEXT:
2557c94e1c1SMing Lei  * spin_lock_irq(q->queue_lock or fq->mq_flush_lock)
256ae1b1539STejun Heo  *
2578839a0e0STejun Heo  */
258404b8f5aSJens Axboe static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
25984fca1b0SHannes Reinecke 			   unsigned int flags)
260ae1b1539STejun Heo {
2617c94e1c1SMing Lei 	struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
262ae1b1539STejun Heo 	struct request *first_rq =
263ae1b1539STejun Heo 		list_first_entry(pending, struct request, flush.list);
2647c94e1c1SMing Lei 	struct request *flush_rq = fq->flush_rq;
265ae1b1539STejun Heo 
266ae1b1539STejun Heo 	/* C1 described at the top of this file */
2677c94e1c1SMing Lei 	if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
268404b8f5aSJens Axboe 		return;
269ae1b1539STejun Heo 
2707520872cSJens Axboe 	/* C2 and C3
2717520872cSJens Axboe 	 *
2727520872cSJens Axboe 	 * For blk-mq + scheduling, we can risk having all driver tags
2737520872cSJens Axboe 	 * assigned to empty flushes, and we deadlock if we are expecting
2747520872cSJens Axboe 	 * other requests to make progress. Don't defer for that case.
2757520872cSJens Axboe 	 */
2767c94e1c1SMing Lei 	if (!list_empty(&fq->flush_data_in_flight) &&
2777520872cSJens Axboe 	    !(q->mq_ops && q->elevator) &&
278ae1b1539STejun Heo 	    time_before(jiffies,
2797c94e1c1SMing Lei 			fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
280404b8f5aSJens Axboe 		return;
281ae1b1539STejun Heo 
282ae1b1539STejun Heo 	/*
283ae1b1539STejun Heo 	 * Issue flush and toggle pending_idx.  This makes pending_idx
284ae1b1539STejun Heo 	 * different from running_idx, which means flush is in flight.
285ae1b1539STejun Heo 	 */
2867c94e1c1SMing Lei 	fq->flush_pending_idx ^= 1;
28718741986SChristoph Hellwig 
2887ddab5deSMing Lei 	blk_rq_init(q, flush_rq);
289f70ced09SMing Lei 
290f70ced09SMing Lei 	/*
291923218f6SMing Lei 	 * In case of none scheduler, borrow tag from the first request
292923218f6SMing Lei 	 * since they can't be in flight at the same time. And acquire
293923218f6SMing Lei 	 * the tag's ownership for flush req.
294923218f6SMing Lei 	 *
295923218f6SMing Lei 	 * In case of IO scheduler, flush rq need to borrow scheduler tag
296923218f6SMing Lei 	 * just for cheating put/get driver tag.
297f70ced09SMing Lei 	 */
298f70ced09SMing Lei 	flush_rq->mq_ctx = first_rq->mq_ctx;
299ea4f995eSJens Axboe 	flush_rq->mq_hctx = first_rq->mq_hctx;
3000048b483SMing Lei 
301923218f6SMing Lei 	if (!q->elevator) {
302923218f6SMing Lei 		fq->orig_rq = first_rq;
303923218f6SMing Lei 		flush_rq->tag = first_rq->tag;
304ea4f995eSJens Axboe 		blk_mq_tag_set_rq(flush_rq->mq_hctx, first_rq->tag, flush_rq);
305923218f6SMing Lei 	} else {
306923218f6SMing Lei 		flush_rq->internal_tag = first_rq->internal_tag;
307923218f6SMing Lei 	}
308320ae51fSJens Axboe 
30970fd7614SChristoph Hellwig 	flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
31084fca1b0SHannes Reinecke 	flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
311e8064021SChristoph Hellwig 	flush_rq->rq_flags |= RQF_FLUSH_SEQ;
3127ddab5deSMing Lei 	flush_rq->rq_disk = first_rq->rq_disk;
3137ddab5deSMing Lei 	flush_rq->end_io = flush_end_io;
314ae1b1539STejun Heo 
315404b8f5aSJens Axboe 	blk_flush_queue_rq(flush_rq, false);
316ae1b1539STejun Heo }
317ae1b1539STejun Heo 
3182a842acaSChristoph Hellwig static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
319320ae51fSJens Axboe {
320320ae51fSJens Axboe 	struct request_queue *q = rq->q;
321ea4f995eSJens Axboe 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
322e97c293cSMing Lei 	struct blk_mq_ctx *ctx = rq->mq_ctx;
323320ae51fSJens Axboe 	unsigned long flags;
324e97c293cSMing Lei 	struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
325320ae51fSJens Axboe 
326923218f6SMing Lei 	if (q->elevator) {
327923218f6SMing Lei 		WARN_ON(rq->tag < 0);
328923218f6SMing Lei 		blk_mq_put_driver_tag_hctx(hctx, rq);
329923218f6SMing Lei 	}
330923218f6SMing Lei 
331320ae51fSJens Axboe 	/*
332320ae51fSJens Axboe 	 * After populating an empty queue, kick it to avoid stall.  Read
333320ae51fSJens Axboe 	 * the comment in flush_end_io().
334320ae51fSJens Axboe 	 */
3357c94e1c1SMing Lei 	spin_lock_irqsave(&fq->mq_flush_lock, flags);
336bd166ef1SJens Axboe 	blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
3377c94e1c1SMing Lei 	spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
338bd166ef1SJens Axboe 
339bd166ef1SJens Axboe 	blk_mq_run_hw_queue(hctx, true);
340320ae51fSJens Axboe }
341320ae51fSJens Axboe 
342ae1b1539STejun Heo /**
3433140c3cfSOmar Sandoval  * blk_insert_flush - insert a new PREFLUSH/FUA request
344ae1b1539STejun Heo  * @rq: request to insert
345ae1b1539STejun Heo  *
346b710a480SJens Axboe  * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
347320ae51fSJens Axboe  * or __blk_mq_run_hw_queue() to dispatch request.
348ae1b1539STejun Heo  * @rq is being submitted.  Analyze what needs to be done and put it on the
349ae1b1539STejun Heo  * right queue.
350ae1b1539STejun Heo  */
351ae1b1539STejun Heo void blk_insert_flush(struct request *rq)
352ae1b1539STejun Heo {
353ae1b1539STejun Heo 	struct request_queue *q = rq->q;
354c888a8f9SJens Axboe 	unsigned long fflags = q->queue_flags;	/* may change, cache */
355ae1b1539STejun Heo 	unsigned int policy = blk_flush_policy(fflags, rq);
356e97c293cSMing Lei 	struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
357ae1b1539STejun Heo 
358ae1b1539STejun Heo 	/*
359ae1b1539STejun Heo 	 * @policy now records what operations need to be done.  Adjust
36028a8f0d3SMike Christie 	 * REQ_PREFLUSH and FUA for the driver.
361ae1b1539STejun Heo 	 */
36228a8f0d3SMike Christie 	rq->cmd_flags &= ~REQ_PREFLUSH;
363c888a8f9SJens Axboe 	if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
3644fed947cSTejun Heo 		rq->cmd_flags &= ~REQ_FUA;
365ae1b1539STejun Heo 
366ae1b1539STejun Heo 	/*
367ae5b2ec8SJens Axboe 	 * REQ_PREFLUSH|REQ_FUA implies REQ_SYNC, so if we clear any
368ae5b2ec8SJens Axboe 	 * of those flags, we have to set REQ_SYNC to avoid skewing
369ae5b2ec8SJens Axboe 	 * the request accounting.
370ae5b2ec8SJens Axboe 	 */
371ae5b2ec8SJens Axboe 	rq->cmd_flags |= REQ_SYNC;
372ae5b2ec8SJens Axboe 
373ae5b2ec8SJens Axboe 	/*
3744853abaaSJeff Moyer 	 * An empty flush handed down from a stacking driver may
3754853abaaSJeff Moyer 	 * translate into nothing if the underlying device does not
3764853abaaSJeff Moyer 	 * advertise a write-back cache.  In this case, simply
3774853abaaSJeff Moyer 	 * complete the request.
3784853abaaSJeff Moyer 	 */
3794853abaaSJeff Moyer 	if (!policy) {
380c8a446adSChristoph Hellwig 		blk_mq_end_request(rq, 0);
3814853abaaSJeff Moyer 		return;
3824853abaaSJeff Moyer 	}
3834853abaaSJeff Moyer 
384834f9f61SJeff Moyer 	BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
3854853abaaSJeff Moyer 
3864853abaaSJeff Moyer 	/*
387ae1b1539STejun Heo 	 * If there's data but flush is not necessary, the request can be
388ae1b1539STejun Heo 	 * processed directly without going through flush machinery.  Queue
389ae1b1539STejun Heo 	 * for normal execution.
390ae1b1539STejun Heo 	 */
391ae1b1539STejun Heo 	if ((policy & REQ_FSEQ_DATA) &&
392ae1b1539STejun Heo 	    !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
393598906f8SMing Lei 		blk_mq_request_bypass_insert(rq, false);
394ae1b1539STejun Heo 		return;
3958839a0e0STejun Heo 	}
3968839a0e0STejun Heo 
3978839a0e0STejun Heo 	/*
398ae1b1539STejun Heo 	 * @rq should go through flush machinery.  Mark it part of flush
399ae1b1539STejun Heo 	 * sequence and submit for further processing.
4008839a0e0STejun Heo 	 */
401ae1b1539STejun Heo 	memset(&rq->flush, 0, sizeof(rq->flush));
402ae1b1539STejun Heo 	INIT_LIST_HEAD(&rq->flush.list);
403e8064021SChristoph Hellwig 	rq->rq_flags |= RQF_FLUSH_SEQ;
4044853abaaSJeff Moyer 	rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
4057e992f84SJens Axboe 
406320ae51fSJens Axboe 	rq->end_io = mq_flush_data_end_io;
407320ae51fSJens Axboe 
4087c94e1c1SMing Lei 	spin_lock_irq(&fq->mq_flush_lock);
4090bae352dSMing Lei 	blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
4107c94e1c1SMing Lei 	spin_unlock_irq(&fq->mq_flush_lock);
411ae1b1539STejun Heo }
412ae1b1539STejun Heo 
413ae1b1539STejun Heo /**
4148839a0e0STejun Heo  * blkdev_issue_flush - queue a flush
4158839a0e0STejun Heo  * @bdev:	blockdev to issue flush for
4168839a0e0STejun Heo  * @gfp_mask:	memory allocation flags (for bio_alloc)
4178839a0e0STejun Heo  * @error_sector:	error sector
4188839a0e0STejun Heo  *
4198839a0e0STejun Heo  * Description:
4208839a0e0STejun Heo  *    Issue a flush for the block device in question. Caller can supply
4218839a0e0STejun Heo  *    room for storing the error offset in case of a flush error, if they
4221be7d207SEric Biggers  *    wish to.
4238839a0e0STejun Heo  */
4248839a0e0STejun Heo int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
425dd3932edSChristoph Hellwig 		sector_t *error_sector)
4268839a0e0STejun Heo {
4278839a0e0STejun Heo 	struct request_queue *q;
4288839a0e0STejun Heo 	struct bio *bio;
4298839a0e0STejun Heo 	int ret = 0;
4308839a0e0STejun Heo 
4318839a0e0STejun Heo 	if (bdev->bd_disk == NULL)
4328839a0e0STejun Heo 		return -ENXIO;
4338839a0e0STejun Heo 
4348839a0e0STejun Heo 	q = bdev_get_queue(bdev);
4358839a0e0STejun Heo 	if (!q)
4368839a0e0STejun Heo 		return -ENXIO;
4378839a0e0STejun Heo 
4388839a0e0STejun Heo 	/*
4398839a0e0STejun Heo 	 * some block devices may not have their queue correctly set up here
4408839a0e0STejun Heo 	 * (e.g. loop device without a backing file) and so issuing a flush
4418839a0e0STejun Heo 	 * here will panic. Ensure there is a request function before issuing
442d391a2ddSTejun Heo 	 * the flush.
4438839a0e0STejun Heo 	 */
4448839a0e0STejun Heo 	if (!q->make_request_fn)
4458839a0e0STejun Heo 		return -ENXIO;
4468839a0e0STejun Heo 
4478839a0e0STejun Heo 	bio = bio_alloc(gfp_mask, 0);
44874d46992SChristoph Hellwig 	bio_set_dev(bio, bdev);
44970fd7614SChristoph Hellwig 	bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
4508839a0e0STejun Heo 
4514e49ea4aSMike Christie 	ret = submit_bio_wait(bio);
452dd3932edSChristoph Hellwig 
4538839a0e0STejun Heo 	/*
4548839a0e0STejun Heo 	 * The driver must store the error location in ->bi_sector, if
4558839a0e0STejun Heo 	 * it supports it. For non-stacked drivers, this should be
4568839a0e0STejun Heo 	 * copied from blk_rq_pos(rq).
4578839a0e0STejun Heo 	 */
4588839a0e0STejun Heo 	if (error_sector)
4594f024f37SKent Overstreet 		*error_sector = bio->bi_iter.bi_sector;
4608839a0e0STejun Heo 
4618839a0e0STejun Heo 	bio_put(bio);
4628839a0e0STejun Heo 	return ret;
4638839a0e0STejun Heo }
4648839a0e0STejun Heo EXPORT_SYMBOL(blkdev_issue_flush);
465320ae51fSJens Axboe 
466f70ced09SMing Lei struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
4675b202853SJianchao Wang 		int node, int cmd_size, gfp_t flags)
468320ae51fSJens Axboe {
4697c94e1c1SMing Lei 	struct blk_flush_queue *fq;
4707c94e1c1SMing Lei 	int rq_sz = sizeof(struct request);
4711bcb1eadSMing Lei 
4725b202853SJianchao Wang 	fq = kzalloc_node(sizeof(*fq), flags, node);
4737c94e1c1SMing Lei 	if (!fq)
4747c94e1c1SMing Lei 		goto fail;
4751bcb1eadSMing Lei 
4767c94e1c1SMing Lei 	spin_lock_init(&fq->mq_flush_lock);
4777c94e1c1SMing Lei 
4786d247d7fSChristoph Hellwig 	rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
4795b202853SJianchao Wang 	fq->flush_rq = kzalloc_node(rq_sz, flags, node);
4807c94e1c1SMing Lei 	if (!fq->flush_rq)
4817c94e1c1SMing Lei 		goto fail_rq;
4827c94e1c1SMing Lei 
4837c94e1c1SMing Lei 	INIT_LIST_HEAD(&fq->flush_queue[0]);
4847c94e1c1SMing Lei 	INIT_LIST_HEAD(&fq->flush_queue[1]);
4857c94e1c1SMing Lei 	INIT_LIST_HEAD(&fq->flush_data_in_flight);
4867c94e1c1SMing Lei 
4877c94e1c1SMing Lei 	return fq;
4887c94e1c1SMing Lei 
4897c94e1c1SMing Lei  fail_rq:
4907c94e1c1SMing Lei 	kfree(fq);
4917c94e1c1SMing Lei  fail:
4927c94e1c1SMing Lei 	return NULL;
4937c94e1c1SMing Lei }
4947c94e1c1SMing Lei 
495ba483388SMing Lei void blk_free_flush_queue(struct blk_flush_queue *fq)
4967c94e1c1SMing Lei {
4977c94e1c1SMing Lei 	/* bio based request queue hasn't flush queue */
4987c94e1c1SMing Lei 	if (!fq)
4997c94e1c1SMing Lei 		return;
5007c94e1c1SMing Lei 
5017c94e1c1SMing Lei 	kfree(fq->flush_rq);
5027c94e1c1SMing Lei 	kfree(fq);
503320ae51fSJens Axboe }
504