xref: /openbmc/linux/block/blk-flush.c (revision ae5b2ec8ad5e017126cd4552220f25ce8a6b92e9)
18839a0e0STejun Heo /*
24fed947cSTejun Heo  * Functions to sequence FLUSH and FUA writes.
3ae1b1539STejun Heo  *
4ae1b1539STejun Heo  * Copyright (C) 2011		Max Planck Institute for Gravitational Physics
5ae1b1539STejun Heo  * Copyright (C) 2011		Tejun Heo <tj@kernel.org>
6ae1b1539STejun Heo  *
7ae1b1539STejun Heo  * This file is released under the GPLv2.
8ae1b1539STejun Heo  *
9ae1b1539STejun Heo  * REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three
10ae1b1539STejun Heo  * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
11ae1b1539STejun Heo  * properties and hardware capability.
12ae1b1539STejun Heo  *
1328a8f0d3SMike Christie  * If a request doesn't have data, only REQ_PREFLUSH makes sense, which
1428a8f0d3SMike Christie  * indicates a simple flush request.  If there is data, REQ_PREFLUSH indicates
15ae1b1539STejun Heo  * that the device cache should be flushed before the data is executed, and
16ae1b1539STejun Heo  * REQ_FUA means that the data must be on non-volatile media on request
17ae1b1539STejun Heo  * completion.
18ae1b1539STejun Heo  *
19ae1b1539STejun Heo  * If the device doesn't have writeback cache, FLUSH and FUA don't make any
20ae1b1539STejun Heo  * difference.  The requests are either completed immediately if there's no
21ae1b1539STejun Heo  * data or executed as normal requests otherwise.
22ae1b1539STejun Heo  *
2328a8f0d3SMike Christie  * If the device has writeback cache and supports FUA, REQ_PREFLUSH is
24ae1b1539STejun Heo  * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
25ae1b1539STejun Heo  *
2628a8f0d3SMike Christie  * If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH
2728a8f0d3SMike Christie  * is translated to PREFLUSH and REQ_FUA to POSTFLUSH.
28ae1b1539STejun Heo  *
29ae1b1539STejun Heo  * The actual execution of flush is double buffered.  Whenever a request
30ae1b1539STejun Heo  * needs to execute PRE or POSTFLUSH, it queues at
317c94e1c1SMing Lei  * fq->flush_queue[fq->flush_pending_idx].  Once certain criteria are met, a
323a5e02ceSMike Christie  * REQ_OP_FLUSH is issued and the pending_idx is toggled.  When the flush
33ae1b1539STejun Heo  * completes, all the requests which were pending are proceeded to the next
34ae1b1539STejun Heo  * step.  This allows arbitrary merging of different types of FLUSH/FUA
35ae1b1539STejun Heo  * requests.
36ae1b1539STejun Heo  *
37ae1b1539STejun Heo  * Currently, the following conditions are used to determine when to issue
38ae1b1539STejun Heo  * flush.
39ae1b1539STejun Heo  *
40ae1b1539STejun Heo  * C1. At any given time, only one flush shall be in progress.  This makes
41ae1b1539STejun Heo  *     double buffering sufficient.
42ae1b1539STejun Heo  *
43ae1b1539STejun Heo  * C2. Flush is deferred if any request is executing DATA of its sequence.
44ae1b1539STejun Heo  *     This avoids issuing separate POSTFLUSHes for requests which shared
45ae1b1539STejun Heo  *     PREFLUSH.
46ae1b1539STejun Heo  *
47ae1b1539STejun Heo  * C3. The second condition is ignored if there is a request which has
48ae1b1539STejun Heo  *     waited longer than FLUSH_PENDING_TIMEOUT.  This is to avoid
49ae1b1539STejun Heo  *     starvation in the unlikely case where there are continuous stream of
50ae1b1539STejun Heo  *     FUA (without FLUSH) requests.
51ae1b1539STejun Heo  *
52ae1b1539STejun Heo  * For devices which support FUA, it isn't clear whether C2 (and thus C3)
53ae1b1539STejun Heo  * is beneficial.
54ae1b1539STejun Heo  *
55ae1b1539STejun Heo  * Note that a sequenced FLUSH/FUA request with DATA is completed twice.
56ae1b1539STejun Heo  * Once while executing DATA and again after the whole sequence is
57ae1b1539STejun Heo  * complete.  The first completion updates the contained bio but doesn't
58ae1b1539STejun Heo  * finish it so that the bio submitter is notified only after the whole
59e8064021SChristoph Hellwig  * sequence is complete.  This is implemented by testing RQF_FLUSH_SEQ in
60ae1b1539STejun Heo  * req_bio_endio().
61ae1b1539STejun Heo  *
62ae1b1539STejun Heo  * The above peculiarity requires that each FLUSH/FUA request has only one
63ae1b1539STejun Heo  * bio attached to it, which is guaranteed as they aren't allowed to be
64ae1b1539STejun Heo  * merged in the usual way.
658839a0e0STejun Heo  */
66ae1b1539STejun Heo 
678839a0e0STejun Heo #include <linux/kernel.h>
688839a0e0STejun Heo #include <linux/module.h>
698839a0e0STejun Heo #include <linux/bio.h>
708839a0e0STejun Heo #include <linux/blkdev.h>
718839a0e0STejun Heo #include <linux/gfp.h>
72320ae51fSJens Axboe #include <linux/blk-mq.h>
738839a0e0STejun Heo 
748839a0e0STejun Heo #include "blk.h"
75320ae51fSJens Axboe #include "blk-mq.h"
760048b483SMing Lei #include "blk-mq-tag.h"
778839a0e0STejun Heo 
784fed947cSTejun Heo /* FLUSH/FUA sequences */
794fed947cSTejun Heo enum {
80ae1b1539STejun Heo 	REQ_FSEQ_PREFLUSH	= (1 << 0), /* pre-flushing in progress */
81ae1b1539STejun Heo 	REQ_FSEQ_DATA		= (1 << 1), /* data write in progress */
82ae1b1539STejun Heo 	REQ_FSEQ_POSTFLUSH	= (1 << 2), /* post-flushing in progress */
83ae1b1539STejun Heo 	REQ_FSEQ_DONE		= (1 << 3),
84ae1b1539STejun Heo 
85ae1b1539STejun Heo 	REQ_FSEQ_ACTIONS	= REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
86ae1b1539STejun Heo 				  REQ_FSEQ_POSTFLUSH,
87ae1b1539STejun Heo 
88ae1b1539STejun Heo 	/*
89ae1b1539STejun Heo 	 * If flush has been pending longer than the following timeout,
90ae1b1539STejun Heo 	 * it's issued even if flush_data requests are still in flight.
91ae1b1539STejun Heo 	 */
92ae1b1539STejun Heo 	FLUSH_PENDING_TIMEOUT	= 5 * HZ,
934fed947cSTejun Heo };
944fed947cSTejun Heo 
950bae352dSMing Lei static bool blk_kick_flush(struct request_queue *q,
960bae352dSMing Lei 			   struct blk_flush_queue *fq);
978839a0e0STejun Heo 
98c888a8f9SJens Axboe static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
998839a0e0STejun Heo {
100ae1b1539STejun Heo 	unsigned int policy = 0;
101ae1b1539STejun Heo 
102fa1bf42fSJeff Moyer 	if (blk_rq_sectors(rq))
103fa1bf42fSJeff Moyer 		policy |= REQ_FSEQ_DATA;
104fa1bf42fSJeff Moyer 
105c888a8f9SJens Axboe 	if (fflags & (1UL << QUEUE_FLAG_WC)) {
10628a8f0d3SMike Christie 		if (rq->cmd_flags & REQ_PREFLUSH)
107ae1b1539STejun Heo 			policy |= REQ_FSEQ_PREFLUSH;
108c888a8f9SJens Axboe 		if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
109c888a8f9SJens Axboe 		    (rq->cmd_flags & REQ_FUA))
110ae1b1539STejun Heo 			policy |= REQ_FSEQ_POSTFLUSH;
111ae1b1539STejun Heo 	}
112ae1b1539STejun Heo 	return policy;
1138839a0e0STejun Heo }
1148839a0e0STejun Heo 
115ae1b1539STejun Heo static unsigned int blk_flush_cur_seq(struct request *rq)
1168839a0e0STejun Heo {
117ae1b1539STejun Heo 	return 1 << ffz(rq->flush.seq);
1188839a0e0STejun Heo }
1198839a0e0STejun Heo 
120ae1b1539STejun Heo static void blk_flush_restore_request(struct request *rq)
12147f70d5aSTejun Heo {
12247f70d5aSTejun Heo 	/*
123ae1b1539STejun Heo 	 * After flush data completion, @rq->bio is %NULL but we need to
124ae1b1539STejun Heo 	 * complete the bio again.  @rq->biotail is guaranteed to equal the
125ae1b1539STejun Heo 	 * original @rq->bio.  Restore it.
12647f70d5aSTejun Heo 	 */
127ae1b1539STejun Heo 	rq->bio = rq->biotail;
128ae1b1539STejun Heo 
129ae1b1539STejun Heo 	/* make @rq a normal request */
130e8064021SChristoph Hellwig 	rq->rq_flags &= ~RQF_FLUSH_SEQ;
1314853abaaSJeff Moyer 	rq->end_io = rq->flush.saved_end_io;
132320ae51fSJens Axboe }
133320ae51fSJens Axboe 
13410beafc1SMike Snitzer static bool blk_flush_queue_rq(struct request *rq, bool add_front)
135320ae51fSJens Axboe {
13618741986SChristoph Hellwig 	if (rq->q->mq_ops) {
1372b053acaSBart Van Assche 		blk_mq_add_to_requeue_list(rq, add_front, true);
13818741986SChristoph Hellwig 		return false;
13918741986SChristoph Hellwig 	} else {
14010beafc1SMike Snitzer 		if (add_front)
14110beafc1SMike Snitzer 			list_add(&rq->queuelist, &rq->q->queue_head);
14210beafc1SMike Snitzer 		else
14318741986SChristoph Hellwig 			list_add_tail(&rq->queuelist, &rq->q->queue_head);
14418741986SChristoph Hellwig 		return true;
14518741986SChristoph Hellwig 	}
14647f70d5aSTejun Heo }
14747f70d5aSTejun Heo 
148ae1b1539STejun Heo /**
149ae1b1539STejun Heo  * blk_flush_complete_seq - complete flush sequence
150ae1b1539STejun Heo  * @rq: FLUSH/FUA request being sequenced
1510bae352dSMing Lei  * @fq: flush queue
152ae1b1539STejun Heo  * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
153ae1b1539STejun Heo  * @error: whether an error occurred
154ae1b1539STejun Heo  *
155ae1b1539STejun Heo  * @rq just completed @seq part of its flush sequence, record the
156ae1b1539STejun Heo  * completion and trigger the next step.
157ae1b1539STejun Heo  *
158ae1b1539STejun Heo  * CONTEXT:
1597c94e1c1SMing Lei  * spin_lock_irq(q->queue_lock or fq->mq_flush_lock)
160ae1b1539STejun Heo  *
161ae1b1539STejun Heo  * RETURNS:
162ae1b1539STejun Heo  * %true if requests were added to the dispatch queue, %false otherwise.
163ae1b1539STejun Heo  */
1640bae352dSMing Lei static bool blk_flush_complete_seq(struct request *rq,
1650bae352dSMing Lei 				   struct blk_flush_queue *fq,
1660bae352dSMing Lei 				   unsigned int seq, int error)
1678839a0e0STejun Heo {
168ae1b1539STejun Heo 	struct request_queue *q = rq->q;
1697c94e1c1SMing Lei 	struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
170320ae51fSJens Axboe 	bool queued = false, kicked;
1718839a0e0STejun Heo 
172ae1b1539STejun Heo 	BUG_ON(rq->flush.seq & seq);
173ae1b1539STejun Heo 	rq->flush.seq |= seq;
1748839a0e0STejun Heo 
175ae1b1539STejun Heo 	if (likely(!error))
176ae1b1539STejun Heo 		seq = blk_flush_cur_seq(rq);
177ae1b1539STejun Heo 	else
178ae1b1539STejun Heo 		seq = REQ_FSEQ_DONE;
1798839a0e0STejun Heo 
180ae1b1539STejun Heo 	switch (seq) {
181ae1b1539STejun Heo 	case REQ_FSEQ_PREFLUSH:
182ae1b1539STejun Heo 	case REQ_FSEQ_POSTFLUSH:
183ae1b1539STejun Heo 		/* queue for flush */
184ae1b1539STejun Heo 		if (list_empty(pending))
1857c94e1c1SMing Lei 			fq->flush_pending_since = jiffies;
186ae1b1539STejun Heo 		list_move_tail(&rq->flush.list, pending);
1878839a0e0STejun Heo 		break;
188ae1b1539STejun Heo 
189ae1b1539STejun Heo 	case REQ_FSEQ_DATA:
1907c94e1c1SMing Lei 		list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
19110beafc1SMike Snitzer 		queued = blk_flush_queue_rq(rq, true);
192ae1b1539STejun Heo 		break;
193ae1b1539STejun Heo 
194ae1b1539STejun Heo 	case REQ_FSEQ_DONE:
19509d60c70STejun Heo 		/*
196ae1b1539STejun Heo 		 * @rq was previously adjusted by blk_flush_issue() for
197ae1b1539STejun Heo 		 * flush sequencing and may already have gone through the
198ae1b1539STejun Heo 		 * flush data request completion path.  Restore @rq for
199ae1b1539STejun Heo 		 * normal completion and end it.
20009d60c70STejun Heo 		 */
201ae1b1539STejun Heo 		BUG_ON(!list_empty(&rq->queuelist));
202ae1b1539STejun Heo 		list_del_init(&rq->flush.list);
203ae1b1539STejun Heo 		blk_flush_restore_request(rq);
204320ae51fSJens Axboe 		if (q->mq_ops)
205c8a446adSChristoph Hellwig 			blk_mq_end_request(rq, error);
206320ae51fSJens Axboe 		else
207ae1b1539STejun Heo 			__blk_end_request_all(rq, error);
2088839a0e0STejun Heo 		break;
209ae1b1539STejun Heo 
2108839a0e0STejun Heo 	default:
2118839a0e0STejun Heo 		BUG();
2128839a0e0STejun Heo 	}
213cde4c406SChristoph Hellwig 
2140bae352dSMing Lei 	kicked = blk_kick_flush(q, fq);
215320ae51fSJens Axboe 	return kicked | queued;
2168839a0e0STejun Heo }
2178839a0e0STejun Heo 
218ae1b1539STejun Heo static void flush_end_io(struct request *flush_rq, int error)
2198839a0e0STejun Heo {
220ae1b1539STejun Heo 	struct request_queue *q = flush_rq->q;
221320ae51fSJens Axboe 	struct list_head *running;
222ae1b1539STejun Heo 	bool queued = false;
223ae1b1539STejun Heo 	struct request *rq, *n;
224320ae51fSJens Axboe 	unsigned long flags = 0;
225e97c293cSMing Lei 	struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
2268839a0e0STejun Heo 
22722302375SShaohua Li 	if (q->mq_ops) {
2280048b483SMing Lei 		struct blk_mq_hw_ctx *hctx;
2290048b483SMing Lei 
2300048b483SMing Lei 		/* release the tag's ownership to the req cloned from */
2317c94e1c1SMing Lei 		spin_lock_irqsave(&fq->mq_flush_lock, flags);
2327d7e0f90SChristoph Hellwig 		hctx = blk_mq_map_queue(q, flush_rq->mq_ctx->cpu);
2330048b483SMing Lei 		blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
2347ddab5deSMing Lei 		flush_rq->tag = -1;
23522302375SShaohua Li 	}
23618741986SChristoph Hellwig 
2377c94e1c1SMing Lei 	running = &fq->flush_queue[fq->flush_running_idx];
2387c94e1c1SMing Lei 	BUG_ON(fq->flush_pending_idx == fq->flush_running_idx);
239ae1b1539STejun Heo 
240ae1b1539STejun Heo 	/* account completion of the flush request */
2417c94e1c1SMing Lei 	fq->flush_running_idx ^= 1;
242320ae51fSJens Axboe 
243320ae51fSJens Axboe 	if (!q->mq_ops)
244ae1b1539STejun Heo 		elv_completed_request(q, flush_rq);
245ae1b1539STejun Heo 
246ae1b1539STejun Heo 	/* and push the waiting requests to the next stage */
247ae1b1539STejun Heo 	list_for_each_entry_safe(rq, n, running, flush.list) {
248ae1b1539STejun Heo 		unsigned int seq = blk_flush_cur_seq(rq);
249ae1b1539STejun Heo 
250ae1b1539STejun Heo 		BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
2510bae352dSMing Lei 		queued |= blk_flush_complete_seq(rq, fq, seq, error);
252ae1b1539STejun Heo 	}
253ae1b1539STejun Heo 
2548839a0e0STejun Heo 	/*
2553ac0cc45Sshaohua.li@intel.com 	 * Kick the queue to avoid stall for two cases:
2563ac0cc45Sshaohua.li@intel.com 	 * 1. Moving a request silently to empty queue_head may stall the
2573ac0cc45Sshaohua.li@intel.com 	 * queue.
2583ac0cc45Sshaohua.li@intel.com 	 * 2. When flush request is running in non-queueable queue, the
2593ac0cc45Sshaohua.li@intel.com 	 * queue is hold. Restart the queue after flush request is finished
2603ac0cc45Sshaohua.li@intel.com 	 * to avoid stall.
2613ac0cc45Sshaohua.li@intel.com 	 * This function is called from request completion path and calling
2623ac0cc45Sshaohua.li@intel.com 	 * directly into request_fn may confuse the driver.  Always use
2633ac0cc45Sshaohua.li@intel.com 	 * kblockd.
2648839a0e0STejun Heo 	 */
2657c94e1c1SMing Lei 	if (queued || fq->flush_queue_delayed) {
26618741986SChristoph Hellwig 		WARN_ON(q->mq_ops);
26724ecfbe2SChristoph Hellwig 		blk_run_queue_async(q);
268320ae51fSJens Axboe 	}
2697c94e1c1SMing Lei 	fq->flush_queue_delayed = 0;
270320ae51fSJens Axboe 	if (q->mq_ops)
2717c94e1c1SMing Lei 		spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
272320ae51fSJens Axboe }
273320ae51fSJens Axboe 
274ae1b1539STejun Heo /**
275ae1b1539STejun Heo  * blk_kick_flush - consider issuing flush request
276ae1b1539STejun Heo  * @q: request_queue being kicked
2770bae352dSMing Lei  * @fq: flush queue
2784fed947cSTejun Heo  *
279ae1b1539STejun Heo  * Flush related states of @q have changed, consider issuing flush request.
280ae1b1539STejun Heo  * Please read the comment at the top of this file for more info.
281ae1b1539STejun Heo  *
282ae1b1539STejun Heo  * CONTEXT:
2837c94e1c1SMing Lei  * spin_lock_irq(q->queue_lock or fq->mq_flush_lock)
284ae1b1539STejun Heo  *
285ae1b1539STejun Heo  * RETURNS:
286ae1b1539STejun Heo  * %true if flush was issued, %false otherwise.
2878839a0e0STejun Heo  */
2880bae352dSMing Lei static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
289ae1b1539STejun Heo {
2907c94e1c1SMing Lei 	struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
291ae1b1539STejun Heo 	struct request *first_rq =
292ae1b1539STejun Heo 		list_first_entry(pending, struct request, flush.list);
2937c94e1c1SMing Lei 	struct request *flush_rq = fq->flush_rq;
294ae1b1539STejun Heo 
295ae1b1539STejun Heo 	/* C1 described at the top of this file */
2967c94e1c1SMing Lei 	if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
297ae1b1539STejun Heo 		return false;
298ae1b1539STejun Heo 
299ae1b1539STejun Heo 	/* C2 and C3 */
3007c94e1c1SMing Lei 	if (!list_empty(&fq->flush_data_in_flight) &&
301ae1b1539STejun Heo 	    time_before(jiffies,
3027c94e1c1SMing Lei 			fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
303ae1b1539STejun Heo 		return false;
304ae1b1539STejun Heo 
305ae1b1539STejun Heo 	/*
306ae1b1539STejun Heo 	 * Issue flush and toggle pending_idx.  This makes pending_idx
307ae1b1539STejun Heo 	 * different from running_idx, which means flush is in flight.
308ae1b1539STejun Heo 	 */
3097c94e1c1SMing Lei 	fq->flush_pending_idx ^= 1;
31018741986SChristoph Hellwig 
3117ddab5deSMing Lei 	blk_rq_init(q, flush_rq);
312f70ced09SMing Lei 
313f70ced09SMing Lei 	/*
314f70ced09SMing Lei 	 * Borrow tag from the first request since they can't
3150048b483SMing Lei 	 * be in flight at the same time. And acquire the tag's
3160048b483SMing Lei 	 * ownership for flush req.
317f70ced09SMing Lei 	 */
318f70ced09SMing Lei 	if (q->mq_ops) {
3190048b483SMing Lei 		struct blk_mq_hw_ctx *hctx;
3200048b483SMing Lei 
321f70ced09SMing Lei 		flush_rq->mq_ctx = first_rq->mq_ctx;
322f70ced09SMing Lei 		flush_rq->tag = first_rq->tag;
3230048b483SMing Lei 		fq->orig_rq = first_rq;
3240048b483SMing Lei 
3257d7e0f90SChristoph Hellwig 		hctx = blk_mq_map_queue(q, first_rq->mq_ctx->cpu);
3260048b483SMing Lei 		blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq);
327f70ced09SMing Lei 	}
328320ae51fSJens Axboe 
3297ddab5deSMing Lei 	flush_rq->cmd_type = REQ_TYPE_FS;
33070fd7614SChristoph Hellwig 	flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
331e8064021SChristoph Hellwig 	flush_rq->rq_flags |= RQF_FLUSH_SEQ;
3327ddab5deSMing Lei 	flush_rq->rq_disk = first_rq->rq_disk;
3337ddab5deSMing Lei 	flush_rq->end_io = flush_end_io;
334ae1b1539STejun Heo 
3357ddab5deSMing Lei 	return blk_flush_queue_rq(flush_rq, false);
336ae1b1539STejun Heo }
337ae1b1539STejun Heo 
338ae1b1539STejun Heo static void flush_data_end_io(struct request *rq, int error)
339ae1b1539STejun Heo {
340ae1b1539STejun Heo 	struct request_queue *q = rq->q;
341e97c293cSMing Lei 	struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
342ae1b1539STejun Heo 
3438839a0e0STejun Heo 	/*
344e83a46bbSTejun Heo 	 * After populating an empty queue, kick it to avoid stall.  Read
345e83a46bbSTejun Heo 	 * the comment in flush_end_io().
3468839a0e0STejun Heo 	 */
3470bae352dSMing Lei 	if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error))
34824ecfbe2SChristoph Hellwig 		blk_run_queue_async(q);
349ae1b1539STejun Heo }
350ae1b1539STejun Heo 
351320ae51fSJens Axboe static void mq_flush_data_end_io(struct request *rq, int error)
352320ae51fSJens Axboe {
353320ae51fSJens Axboe 	struct request_queue *q = rq->q;
354320ae51fSJens Axboe 	struct blk_mq_hw_ctx *hctx;
355e97c293cSMing Lei 	struct blk_mq_ctx *ctx = rq->mq_ctx;
356320ae51fSJens Axboe 	unsigned long flags;
357e97c293cSMing Lei 	struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
358320ae51fSJens Axboe 
3597d7e0f90SChristoph Hellwig 	hctx = blk_mq_map_queue(q, ctx->cpu);
360320ae51fSJens Axboe 
361320ae51fSJens Axboe 	/*
362320ae51fSJens Axboe 	 * After populating an empty queue, kick it to avoid stall.  Read
363320ae51fSJens Axboe 	 * the comment in flush_end_io().
364320ae51fSJens Axboe 	 */
3657c94e1c1SMing Lei 	spin_lock_irqsave(&fq->mq_flush_lock, flags);
3660bae352dSMing Lei 	if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error))
367320ae51fSJens Axboe 		blk_mq_run_hw_queue(hctx, true);
3687c94e1c1SMing Lei 	spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
369320ae51fSJens Axboe }
370320ae51fSJens Axboe 
371ae1b1539STejun Heo /**
372ae1b1539STejun Heo  * blk_insert_flush - insert a new FLUSH/FUA request
373ae1b1539STejun Heo  * @rq: request to insert
374ae1b1539STejun Heo  *
375b710a480SJens Axboe  * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
376320ae51fSJens Axboe  * or __blk_mq_run_hw_queue() to dispatch request.
377ae1b1539STejun Heo  * @rq is being submitted.  Analyze what needs to be done and put it on the
378ae1b1539STejun Heo  * right queue.
379ae1b1539STejun Heo  *
380ae1b1539STejun Heo  * CONTEXT:
381320ae51fSJens Axboe  * spin_lock_irq(q->queue_lock) in !mq case
382ae1b1539STejun Heo  */
383ae1b1539STejun Heo void blk_insert_flush(struct request *rq)
384ae1b1539STejun Heo {
385ae1b1539STejun Heo 	struct request_queue *q = rq->q;
386c888a8f9SJens Axboe 	unsigned long fflags = q->queue_flags;	/* may change, cache */
387ae1b1539STejun Heo 	unsigned int policy = blk_flush_policy(fflags, rq);
388e97c293cSMing Lei 	struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
389ae1b1539STejun Heo 
390ae1b1539STejun Heo 	/*
391ae1b1539STejun Heo 	 * @policy now records what operations need to be done.  Adjust
39228a8f0d3SMike Christie 	 * REQ_PREFLUSH and FUA for the driver.
393ae1b1539STejun Heo 	 */
39428a8f0d3SMike Christie 	rq->cmd_flags &= ~REQ_PREFLUSH;
395c888a8f9SJens Axboe 	if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
3964fed947cSTejun Heo 		rq->cmd_flags &= ~REQ_FUA;
397ae1b1539STejun Heo 
398ae1b1539STejun Heo 	/*
399*ae5b2ec8SJens Axboe 	 * REQ_PREFLUSH|REQ_FUA implies REQ_SYNC, so if we clear any
400*ae5b2ec8SJens Axboe 	 * of those flags, we have to set REQ_SYNC to avoid skewing
401*ae5b2ec8SJens Axboe 	 * the request accounting.
402*ae5b2ec8SJens Axboe 	 */
403*ae5b2ec8SJens Axboe 	rq->cmd_flags |= REQ_SYNC;
404*ae5b2ec8SJens Axboe 
405*ae5b2ec8SJens Axboe 	/*
4064853abaaSJeff Moyer 	 * An empty flush handed down from a stacking driver may
4074853abaaSJeff Moyer 	 * translate into nothing if the underlying device does not
4084853abaaSJeff Moyer 	 * advertise a write-back cache.  In this case, simply
4094853abaaSJeff Moyer 	 * complete the request.
4104853abaaSJeff Moyer 	 */
4114853abaaSJeff Moyer 	if (!policy) {
412320ae51fSJens Axboe 		if (q->mq_ops)
413c8a446adSChristoph Hellwig 			blk_mq_end_request(rq, 0);
414320ae51fSJens Axboe 		else
4154853abaaSJeff Moyer 			__blk_end_bidi_request(rq, 0, 0, 0);
4164853abaaSJeff Moyer 		return;
4174853abaaSJeff Moyer 	}
4184853abaaSJeff Moyer 
419834f9f61SJeff Moyer 	BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
4204853abaaSJeff Moyer 
4214853abaaSJeff Moyer 	/*
422ae1b1539STejun Heo 	 * If there's data but flush is not necessary, the request can be
423ae1b1539STejun Heo 	 * processed directly without going through flush machinery.  Queue
424ae1b1539STejun Heo 	 * for normal execution.
425ae1b1539STejun Heo 	 */
426ae1b1539STejun Heo 	if ((policy & REQ_FSEQ_DATA) &&
427ae1b1539STejun Heo 	    !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
428320ae51fSJens Axboe 		if (q->mq_ops) {
429feb71daeSChristoph Hellwig 			blk_mq_insert_request(rq, false, false, true);
430320ae51fSJens Axboe 		} else
431dcd8376cSJens Axboe 			list_add_tail(&rq->queuelist, &q->queue_head);
432ae1b1539STejun Heo 		return;
4338839a0e0STejun Heo 	}
4348839a0e0STejun Heo 
4358839a0e0STejun Heo 	/*
436ae1b1539STejun Heo 	 * @rq should go through flush machinery.  Mark it part of flush
437ae1b1539STejun Heo 	 * sequence and submit for further processing.
4388839a0e0STejun Heo 	 */
439ae1b1539STejun Heo 	memset(&rq->flush, 0, sizeof(rq->flush));
440ae1b1539STejun Heo 	INIT_LIST_HEAD(&rq->flush.list);
441e8064021SChristoph Hellwig 	rq->rq_flags |= RQF_FLUSH_SEQ;
4424853abaaSJeff Moyer 	rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
443320ae51fSJens Axboe 	if (q->mq_ops) {
444320ae51fSJens Axboe 		rq->end_io = mq_flush_data_end_io;
445320ae51fSJens Axboe 
4467c94e1c1SMing Lei 		spin_lock_irq(&fq->mq_flush_lock);
4470bae352dSMing Lei 		blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
4487c94e1c1SMing Lei 		spin_unlock_irq(&fq->mq_flush_lock);
449320ae51fSJens Axboe 		return;
450320ae51fSJens Axboe 	}
451ae1b1539STejun Heo 	rq->end_io = flush_data_end_io;
452ae1b1539STejun Heo 
4530bae352dSMing Lei 	blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
454ae1b1539STejun Heo }
455ae1b1539STejun Heo 
456ae1b1539STejun Heo /**
4578839a0e0STejun Heo  * blkdev_issue_flush - queue a flush
4588839a0e0STejun Heo  * @bdev:	blockdev to issue flush for
4598839a0e0STejun Heo  * @gfp_mask:	memory allocation flags (for bio_alloc)
4608839a0e0STejun Heo  * @error_sector:	error sector
4618839a0e0STejun Heo  *
4628839a0e0STejun Heo  * Description:
4638839a0e0STejun Heo  *    Issue a flush for the block device in question. Caller can supply
4648839a0e0STejun Heo  *    room for storing the error offset in case of a flush error, if they
4658839a0e0STejun Heo  *    wish to. If WAIT flag is not passed then caller may check only what
4668839a0e0STejun Heo  *    request was pushed in some internal queue for later handling.
4678839a0e0STejun Heo  */
4688839a0e0STejun Heo int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
469dd3932edSChristoph Hellwig 		sector_t *error_sector)
4708839a0e0STejun Heo {
4718839a0e0STejun Heo 	struct request_queue *q;
4728839a0e0STejun Heo 	struct bio *bio;
4738839a0e0STejun Heo 	int ret = 0;
4748839a0e0STejun Heo 
4758839a0e0STejun Heo 	if (bdev->bd_disk == NULL)
4768839a0e0STejun Heo 		return -ENXIO;
4778839a0e0STejun Heo 
4788839a0e0STejun Heo 	q = bdev_get_queue(bdev);
4798839a0e0STejun Heo 	if (!q)
4808839a0e0STejun Heo 		return -ENXIO;
4818839a0e0STejun Heo 
4828839a0e0STejun Heo 	/*
4838839a0e0STejun Heo 	 * some block devices may not have their queue correctly set up here
4848839a0e0STejun Heo 	 * (e.g. loop device without a backing file) and so issuing a flush
4858839a0e0STejun Heo 	 * here will panic. Ensure there is a request function before issuing
486d391a2ddSTejun Heo 	 * the flush.
4878839a0e0STejun Heo 	 */
4888839a0e0STejun Heo 	if (!q->make_request_fn)
4898839a0e0STejun Heo 		return -ENXIO;
4908839a0e0STejun Heo 
4918839a0e0STejun Heo 	bio = bio_alloc(gfp_mask, 0);
4928839a0e0STejun Heo 	bio->bi_bdev = bdev;
49370fd7614SChristoph Hellwig 	bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
4948839a0e0STejun Heo 
4954e49ea4aSMike Christie 	ret = submit_bio_wait(bio);
496dd3932edSChristoph Hellwig 
4978839a0e0STejun Heo 	/*
4988839a0e0STejun Heo 	 * The driver must store the error location in ->bi_sector, if
4998839a0e0STejun Heo 	 * it supports it. For non-stacked drivers, this should be
5008839a0e0STejun Heo 	 * copied from blk_rq_pos(rq).
5018839a0e0STejun Heo 	 */
5028839a0e0STejun Heo 	if (error_sector)
5034f024f37SKent Overstreet 		*error_sector = bio->bi_iter.bi_sector;
5048839a0e0STejun Heo 
5058839a0e0STejun Heo 	bio_put(bio);
5068839a0e0STejun Heo 	return ret;
5078839a0e0STejun Heo }
5088839a0e0STejun Heo EXPORT_SYMBOL(blkdev_issue_flush);
509320ae51fSJens Axboe 
510f70ced09SMing Lei struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
511f70ced09SMing Lei 		int node, int cmd_size)
512320ae51fSJens Axboe {
5137c94e1c1SMing Lei 	struct blk_flush_queue *fq;
5147c94e1c1SMing Lei 	int rq_sz = sizeof(struct request);
5151bcb1eadSMing Lei 
516f70ced09SMing Lei 	fq = kzalloc_node(sizeof(*fq), GFP_KERNEL, node);
5177c94e1c1SMing Lei 	if (!fq)
5187c94e1c1SMing Lei 		goto fail;
5191bcb1eadSMing Lei 
5207c94e1c1SMing Lei 	if (q->mq_ops) {
5217c94e1c1SMing Lei 		spin_lock_init(&fq->mq_flush_lock);
522f70ced09SMing Lei 		rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
5237c94e1c1SMing Lei 	}
5247c94e1c1SMing Lei 
525f70ced09SMing Lei 	fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node);
5267c94e1c1SMing Lei 	if (!fq->flush_rq)
5277c94e1c1SMing Lei 		goto fail_rq;
5287c94e1c1SMing Lei 
5297c94e1c1SMing Lei 	INIT_LIST_HEAD(&fq->flush_queue[0]);
5307c94e1c1SMing Lei 	INIT_LIST_HEAD(&fq->flush_queue[1]);
5317c94e1c1SMing Lei 	INIT_LIST_HEAD(&fq->flush_data_in_flight);
5327c94e1c1SMing Lei 
5337c94e1c1SMing Lei 	return fq;
5347c94e1c1SMing Lei 
5357c94e1c1SMing Lei  fail_rq:
5367c94e1c1SMing Lei 	kfree(fq);
5377c94e1c1SMing Lei  fail:
5387c94e1c1SMing Lei 	return NULL;
5397c94e1c1SMing Lei }
5407c94e1c1SMing Lei 
541ba483388SMing Lei void blk_free_flush_queue(struct blk_flush_queue *fq)
5427c94e1c1SMing Lei {
5437c94e1c1SMing Lei 	/* bio based request queue hasn't flush queue */
5447c94e1c1SMing Lei 	if (!fq)
5457c94e1c1SMing Lei 		return;
5467c94e1c1SMing Lei 
5477c94e1c1SMing Lei 	kfree(fq->flush_rq);
5487c94e1c1SMing Lei 	kfree(fq);
549320ae51fSJens Axboe }
550