xref: /openbmc/linux/drivers/block/drbd/drbd_req.h (revision ad878a0d)
193c68cc4SChristoph Böhmwalder /* SPDX-License-Identifier: GPL-2.0-only */
2b411b363SPhilipp Reisner /*
3b411b363SPhilipp Reisner    drbd_req.h
4b411b363SPhilipp Reisner 
5b411b363SPhilipp Reisner    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6b411b363SPhilipp Reisner 
7b411b363SPhilipp Reisner    Copyright (C) 2006-2008, LINBIT Information Technologies GmbH.
8b411b363SPhilipp Reisner    Copyright (C) 2006-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9b411b363SPhilipp Reisner    Copyright (C) 2006-2008, Philipp Reisner <philipp.reisner@linbit.com>.
10b411b363SPhilipp Reisner 
11b411b363SPhilipp Reisner  */
12b411b363SPhilipp Reisner 
13b411b363SPhilipp Reisner #ifndef _DRBD_REQ_H
14b411b363SPhilipp Reisner #define _DRBD_REQ_H
15b411b363SPhilipp Reisner 
16b411b363SPhilipp Reisner #include <linux/module.h>
17b411b363SPhilipp Reisner 
18b411b363SPhilipp Reisner #include <linux/slab.h>
19b411b363SPhilipp Reisner #include <linux/drbd.h>
20b411b363SPhilipp Reisner #include "drbd_int.h"
21b411b363SPhilipp Reisner 
22b411b363SPhilipp Reisner /* The request callbacks will be called in irq context by the IDE drivers,
23b411b363SPhilipp Reisner    and in Softirqs/Tasklets/BH context by the SCSI drivers,
24b411b363SPhilipp Reisner    and by the receiver and worker in kernel-thread context.
25b411b363SPhilipp Reisner    Try to get the locking right :) */
26b411b363SPhilipp Reisner 
27b411b363SPhilipp Reisner /*
28b411b363SPhilipp Reisner  * Objects of type struct drbd_request do only exist on a R_PRIMARY node, and are
29b411b363SPhilipp Reisner  * associated with IO requests originating from the block layer above us.
30b411b363SPhilipp Reisner  *
31b411b363SPhilipp Reisner  * There are quite a few things that may happen to a drbd request
32b411b363SPhilipp Reisner  * during its lifetime.
33b411b363SPhilipp Reisner  *
34b411b363SPhilipp Reisner  *  It will be created.
35b411b363SPhilipp Reisner  *  It will be marked with the intention to be
36b411b363SPhilipp Reisner  *    submitted to local disk and/or
37b411b363SPhilipp Reisner  *    send via the network.
38b411b363SPhilipp Reisner  *
39b411b363SPhilipp Reisner  *  It has to be placed on the transfer log and other housekeeping lists,
40b411b363SPhilipp Reisner  *  In case we have a network connection.
41b411b363SPhilipp Reisner  *
42b411b363SPhilipp Reisner  *  It may be identified as a concurrent (write) request
43b411b363SPhilipp Reisner  *    and be handled accordingly.
44b411b363SPhilipp Reisner  *
45b411b363SPhilipp Reisner  *  It may me handed over to the local disk subsystem.
46b411b363SPhilipp Reisner  *  It may be completed by the local disk subsystem,
473ad2f3fbSDaniel Mack  *    either successfully or with io-error.
48b411b363SPhilipp Reisner  *  In case it is a READ request, and it failed locally,
49b411b363SPhilipp Reisner  *    it may be retried remotely.
50b411b363SPhilipp Reisner  *
51b411b363SPhilipp Reisner  *  It may be queued for sending.
52b411b363SPhilipp Reisner  *  It may be handed over to the network stack,
53b411b363SPhilipp Reisner  *    which may fail.
54b411b363SPhilipp Reisner  *  It may be acknowledged by the "peer" according to the wire_protocol in use.
55b411b363SPhilipp Reisner  *    this may be a negative ack.
56b411b363SPhilipp Reisner  *  It may receive a faked ack when the network connection is lost and the
57b411b363SPhilipp Reisner  *  transfer log is cleaned up.
58b411b363SPhilipp Reisner  *  Sending may be canceled due to network connection loss.
59b411b363SPhilipp Reisner  *  When it finally has outlived its time,
60b411b363SPhilipp Reisner  *    corresponding dirty bits in the resync-bitmap may be cleared or set,
61b411b363SPhilipp Reisner  *    it will be destroyed,
62b411b363SPhilipp Reisner  *    and completion will be signalled to the originator,
63b411b363SPhilipp Reisner  *      with or without "success".
64b411b363SPhilipp Reisner  */
65b411b363SPhilipp Reisner 
66b411b363SPhilipp Reisner enum drbd_req_event {
678554df1cSAndreas Gruenbacher 	CREATED,
688554df1cSAndreas Gruenbacher 	TO_BE_SENT,
698554df1cSAndreas Gruenbacher 	TO_BE_SUBMITTED,
70b411b363SPhilipp Reisner 
71b411b363SPhilipp Reisner 	/* XXX yes, now I am inconsistent...
7273a01a18SPhilipp Reisner 	 * these are not "events" but "actions"
73b411b363SPhilipp Reisner 	 * oh, well... */
748554df1cSAndreas Gruenbacher 	QUEUE_FOR_NET_WRITE,
758554df1cSAndreas Gruenbacher 	QUEUE_FOR_NET_READ,
768554df1cSAndreas Gruenbacher 	QUEUE_FOR_SEND_OOS,
77b411b363SPhilipp Reisner 
787074e4a7SLars Ellenberg 	/* An empty flush is queued as P_BARRIER,
797074e4a7SLars Ellenberg 	 * which will cause it to complete "successfully",
807074e4a7SLars Ellenberg 	 * even if the local disk flush failed.
817074e4a7SLars Ellenberg 	 *
827074e4a7SLars Ellenberg 	 * Just like "real" requests, empty flushes (blkdev_issue_flush()) will
837074e4a7SLars Ellenberg 	 * only see an error if neither local nor remote data is reachable. */
847074e4a7SLars Ellenberg 	QUEUE_AS_DRBD_BARRIER,
857074e4a7SLars Ellenberg 
868554df1cSAndreas Gruenbacher 	SEND_CANCELED,
878554df1cSAndreas Gruenbacher 	SEND_FAILED,
888554df1cSAndreas Gruenbacher 	HANDED_OVER_TO_NETWORK,
898554df1cSAndreas Gruenbacher 	OOS_HANDED_TO_NETWORK,
908554df1cSAndreas Gruenbacher 	CONNECTION_LOST_WHILE_PENDING,
918554df1cSAndreas Gruenbacher 	READ_RETRY_REMOTE_CANCELED,
928554df1cSAndreas Gruenbacher 	RECV_ACKED_BY_PEER,
938554df1cSAndreas Gruenbacher 	WRITE_ACKED_BY_PEER,
948554df1cSAndreas Gruenbacher 	WRITE_ACKED_BY_PEER_AND_SIS, /* and set_in_sync */
95d4dabbe2SLars Ellenberg 	CONFLICT_RESOLVED,
967be8da07SAndreas Gruenbacher 	POSTPONE_WRITE,
978554df1cSAndreas Gruenbacher 	NEG_ACKED,
988554df1cSAndreas Gruenbacher 	BARRIER_ACKED, /* in protocol A and B */
998554df1cSAndreas Gruenbacher 	DATA_RECEIVED, /* (remote read) */
100b411b363SPhilipp Reisner 
1012f632aebSLars Ellenberg 	COMPLETED_OK,
1028554df1cSAndreas Gruenbacher 	READ_COMPLETED_WITH_ERROR,
1038554df1cSAndreas Gruenbacher 	READ_AHEAD_COMPLETED_WITH_ERROR,
1048554df1cSAndreas Gruenbacher 	WRITE_COMPLETED_WITH_ERROR,
1052f632aebSLars Ellenberg 	DISCARD_COMPLETED_NOTSUPP,
1062f632aebSLars Ellenberg 	DISCARD_COMPLETED_WITH_ERROR,
1072f632aebSLars Ellenberg 
108cdfda633SPhilipp Reisner 	ABORT_DISK_IO,
1098554df1cSAndreas Gruenbacher 	RESEND,
1108554df1cSAndreas Gruenbacher 	FAIL_FROZEN_DISK_IO,
1118554df1cSAndreas Gruenbacher 	RESTART_FROZEN_DISK_IO,
1128554df1cSAndreas Gruenbacher 	NOTHING,
113b411b363SPhilipp Reisner };
114b411b363SPhilipp Reisner 
115b411b363SPhilipp Reisner /* encoding of request states for now.  we don't actually need that many bits.
116b411b363SPhilipp Reisner  * we don't need to do atomic bit operations either, since most of the time we
117b411b363SPhilipp Reisner  * need to look at the connection state and/or manipulate some lists at the
118b411b363SPhilipp Reisner  * same time, so we should hold the request lock anyways.
119b411b363SPhilipp Reisner  */
120b411b363SPhilipp Reisner enum drbd_req_state_bits {
121cdfda633SPhilipp Reisner 	/* 3210
122cdfda633SPhilipp Reisner 	 * 0000: no local possible
123cdfda633SPhilipp Reisner 	 * 0001: to be submitted
124b411b363SPhilipp Reisner 	 *    UNUSED, we could map: 011: submitted, completion still pending
125cdfda633SPhilipp Reisner 	 * 0110: completed ok
126cdfda633SPhilipp Reisner 	 * 0010: completed with error
127cdfda633SPhilipp Reisner 	 * 1001: Aborted (before completion)
128cdfda633SPhilipp Reisner 	 * 1x10: Aborted and completed -> free
129b411b363SPhilipp Reisner 	 */
130b411b363SPhilipp Reisner 	__RQ_LOCAL_PENDING,
131b411b363SPhilipp Reisner 	__RQ_LOCAL_COMPLETED,
132b411b363SPhilipp Reisner 	__RQ_LOCAL_OK,
133cdfda633SPhilipp Reisner 	__RQ_LOCAL_ABORTED,
134b411b363SPhilipp Reisner 
135cdfda633SPhilipp Reisner 	/* 87654
136b411b363SPhilipp Reisner 	 * 00000: no network possible
137b411b363SPhilipp Reisner 	 * 00001: to be send
138b411b363SPhilipp Reisner 	 * 00011: to be send, on worker queue
139b411b363SPhilipp Reisner 	 * 00101: sent, expecting recv_ack (B) or write_ack (C)
140b411b363SPhilipp Reisner 	 * 11101: sent,
141b411b363SPhilipp Reisner 	 *        recv_ack (B) or implicit "ack" (A),
142b411b363SPhilipp Reisner 	 *        still waiting for the barrier ack.
143b411b363SPhilipp Reisner 	 *        master_bio may already be completed and invalidated.
1448554df1cSAndreas Gruenbacher 	 * 11100: write acked (C),
1458554df1cSAndreas Gruenbacher 	 *        data received (for remote read, any protocol)
146b411b363SPhilipp Reisner 	 *        or finally the barrier ack has arrived (B,A)...
147b411b363SPhilipp Reisner 	 *        request can be freed
148b411b363SPhilipp Reisner 	 * 01100: neg-acked (write, protocol C)
149b411b363SPhilipp Reisner 	 *        or neg-d-acked (read, any protocol)
150b411b363SPhilipp Reisner 	 *        or killed from the transfer log
151b411b363SPhilipp Reisner 	 *        during cleanup after connection loss
152b411b363SPhilipp Reisner 	 *        request can be freed
153b411b363SPhilipp Reisner 	 * 01000: canceled or send failed...
154b411b363SPhilipp Reisner 	 *        request can be freed
155b411b363SPhilipp Reisner 	 */
156b411b363SPhilipp Reisner 
157b411b363SPhilipp Reisner 	/* if "SENT" is not set, yet, this can still fail or be canceled.
158b411b363SPhilipp Reisner 	 * if "SENT" is set already, we still wait for an Ack packet.
159b411b363SPhilipp Reisner 	 * when cleared, the master_bio may be completed.
160b411b363SPhilipp Reisner 	 * in (B,A) the request object may still linger on the transaction log
161b411b363SPhilipp Reisner 	 * until the corresponding barrier ack comes in */
162b411b363SPhilipp Reisner 	__RQ_NET_PENDING,
163b411b363SPhilipp Reisner 
164b411b363SPhilipp Reisner 	/* If it is QUEUED, and it is a WRITE, it is also registered in the
165b411b363SPhilipp Reisner 	 * transfer log. Currently we need this flag to avoid conflicts between
166b411b363SPhilipp Reisner 	 * worker canceling the request and tl_clear_barrier killing it from
167b411b363SPhilipp Reisner 	 * transfer log.  We should restructure the code so this conflict does
168b411b363SPhilipp Reisner 	 * no longer occur. */
169b411b363SPhilipp Reisner 	__RQ_NET_QUEUED,
170b411b363SPhilipp Reisner 
171b411b363SPhilipp Reisner 	/* well, actually only "handed over to the network stack".
172b411b363SPhilipp Reisner 	 *
173b411b363SPhilipp Reisner 	 * TODO can potentially be dropped because of the similar meaning
174b411b363SPhilipp Reisner 	 * of RQ_NET_SENT and ~RQ_NET_QUEUED.
175b411b363SPhilipp Reisner 	 * however it is not exactly the same. before we drop it
176b411b363SPhilipp Reisner 	 * we must ensure that we can tell a request with network part
177b411b363SPhilipp Reisner 	 * from a request without, regardless of what happens to it. */
178b411b363SPhilipp Reisner 	__RQ_NET_SENT,
179b411b363SPhilipp Reisner 
180b411b363SPhilipp Reisner 	/* when set, the request may be freed (if RQ_NET_QUEUED is clear).
181b411b363SPhilipp Reisner 	 * basically this means the corresponding P_BARRIER_ACK was received */
182b411b363SPhilipp Reisner 	__RQ_NET_DONE,
183b411b363SPhilipp Reisner 
184b411b363SPhilipp Reisner 	/* whether or not we know (C) or pretend (B,A) that the write
185b411b363SPhilipp Reisner 	 * was successfully written on the peer.
186b411b363SPhilipp Reisner 	 */
187b411b363SPhilipp Reisner 	__RQ_NET_OK,
188b411b363SPhilipp Reisner 
189b411b363SPhilipp Reisner 	/* peer called drbd_set_in_sync() for this write */
190b411b363SPhilipp Reisner 	__RQ_NET_SIS,
191b411b363SPhilipp Reisner 
192b411b363SPhilipp Reisner 	/* keep this last, its for the RQ_NET_MASK */
193b411b363SPhilipp Reisner 	__RQ_NET_MAX,
194288f422eSPhilipp Reisner 
195288f422eSPhilipp Reisner 	/* Set when this is a write, clear for a read */
196288f422eSPhilipp Reisner 	__RQ_WRITE,
1979104d31aSLars Ellenberg 	__RQ_WSAME,
1989104d31aSLars Ellenberg 	__RQ_UNMAP,
199f31e583aSLars Ellenberg 	__RQ_ZEROES,
2000778286aSPhilipp Reisner 
2010778286aSPhilipp Reisner 	/* Should call drbd_al_complete_io() for this request... */
2020778286aSPhilipp Reisner 	__RQ_IN_ACT_LOG,
2037be8da07SAndreas Gruenbacher 
204c51a0ef3SLars Ellenberg 	/* This was the most recent request during some blk_finish_plug()
205c51a0ef3SLars Ellenberg 	 * or its implicit from-schedule equivalent.
206c51a0ef3SLars Ellenberg 	 * We may use it as hint to send a P_UNPLUG_REMOTE */
207c51a0ef3SLars Ellenberg 	__RQ_UNPLUG,
208c51a0ef3SLars Ellenberg 
2097be8da07SAndreas Gruenbacher 	/* The peer has sent a retry ACK */
2107be8da07SAndreas Gruenbacher 	__RQ_POSTPONED,
211303d1448SPhilipp Reisner 
212a0d856dfSLars Ellenberg 	/* would have been completed,
213a0d856dfSLars Ellenberg 	 * but was not, because of drbd_suspended() */
214a0d856dfSLars Ellenberg 	__RQ_COMPLETION_SUSP,
215a0d856dfSLars Ellenberg 
216303d1448SPhilipp Reisner 	/* We expect a receive ACK (wire proto B) */
217303d1448SPhilipp Reisner 	__RQ_EXP_RECEIVE_ACK,
218303d1448SPhilipp Reisner 
219303d1448SPhilipp Reisner 	/* We expect a write ACK (wite proto C) */
220303d1448SPhilipp Reisner 	__RQ_EXP_WRITE_ACK,
221a0d856dfSLars Ellenberg 
222a0d856dfSLars Ellenberg 	/* waiting for a barrier ack, did an extra kref_get */
223a0d856dfSLars Ellenberg 	__RQ_EXP_BARR_ACK,
224b411b363SPhilipp Reisner };
225b411b363SPhilipp Reisner 
226b411b363SPhilipp Reisner #define RQ_LOCAL_PENDING   (1UL << __RQ_LOCAL_PENDING)
227b411b363SPhilipp Reisner #define RQ_LOCAL_COMPLETED (1UL << __RQ_LOCAL_COMPLETED)
228b411b363SPhilipp Reisner #define RQ_LOCAL_OK        (1UL << __RQ_LOCAL_OK)
229cdfda633SPhilipp Reisner #define RQ_LOCAL_ABORTED   (1UL << __RQ_LOCAL_ABORTED)
230b411b363SPhilipp Reisner 
231cdfda633SPhilipp Reisner #define RQ_LOCAL_MASK      ((RQ_LOCAL_ABORTED << 1)-1)
232b411b363SPhilipp Reisner 
233b411b363SPhilipp Reisner #define RQ_NET_PENDING     (1UL << __RQ_NET_PENDING)
234b411b363SPhilipp Reisner #define RQ_NET_QUEUED      (1UL << __RQ_NET_QUEUED)
235b411b363SPhilipp Reisner #define RQ_NET_SENT        (1UL << __RQ_NET_SENT)
236b411b363SPhilipp Reisner #define RQ_NET_DONE        (1UL << __RQ_NET_DONE)
237b411b363SPhilipp Reisner #define RQ_NET_OK          (1UL << __RQ_NET_OK)
238b411b363SPhilipp Reisner #define RQ_NET_SIS         (1UL << __RQ_NET_SIS)
239b411b363SPhilipp Reisner 
240b411b363SPhilipp Reisner #define RQ_NET_MASK        (((1UL << __RQ_NET_MAX)-1) & ~RQ_LOCAL_MASK)
241b411b363SPhilipp Reisner 
242288f422eSPhilipp Reisner #define RQ_WRITE           (1UL << __RQ_WRITE)
2439104d31aSLars Ellenberg #define RQ_WSAME           (1UL << __RQ_WSAME)
2449104d31aSLars Ellenberg #define RQ_UNMAP           (1UL << __RQ_UNMAP)
245f31e583aSLars Ellenberg #define RQ_ZEROES          (1UL << __RQ_ZEROES)
2460778286aSPhilipp Reisner #define RQ_IN_ACT_LOG      (1UL << __RQ_IN_ACT_LOG)
247c51a0ef3SLars Ellenberg #define RQ_UNPLUG          (1UL << __RQ_UNPLUG)
2487be8da07SAndreas Gruenbacher #define RQ_POSTPONED	   (1UL << __RQ_POSTPONED)
249a0d856dfSLars Ellenberg #define RQ_COMPLETION_SUSP (1UL << __RQ_COMPLETION_SUSP)
250303d1448SPhilipp Reisner #define RQ_EXP_RECEIVE_ACK (1UL << __RQ_EXP_RECEIVE_ACK)
251303d1448SPhilipp Reisner #define RQ_EXP_WRITE_ACK   (1UL << __RQ_EXP_WRITE_ACK)
252a0d856dfSLars Ellenberg #define RQ_EXP_BARR_ACK    (1UL << __RQ_EXP_BARR_ACK)
253288f422eSPhilipp Reisner 
25411b58e73SPhilipp Reisner /* For waking up the frozen transfer log mod_req() has to return if the request
25511b58e73SPhilipp Reisner    should be counted in the epoch object*/
256f497609eSAndreas Gruenbacher #define MR_WRITE       1
257f497609eSAndreas Gruenbacher #define MR_READ        2
25811b58e73SPhilipp Reisner 
259b411b363SPhilipp Reisner /* Short lived temporary struct on the stack.
260b411b363SPhilipp Reisner  * We could squirrel the error to be returned into
2614f024f37SKent Overstreet  * bio->bi_iter.bi_size, or similar. But that would be too ugly. */
262b411b363SPhilipp Reisner struct bio_and_error {
263b411b363SPhilipp Reisner 	struct bio *bio;
264b411b363SPhilipp Reisner 	int error;
265b411b363SPhilipp Reisner };
266b411b363SPhilipp Reisner 
267bde89a9eSAndreas Gruenbacher extern void start_new_tl_epoch(struct drbd_connection *connection);
2689a278a79SLars Ellenberg extern void drbd_req_destroy(struct kref *kref);
2692a80699fSPhilipp Reisner extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
270*ad878a0dSChristoph Böhmwalder 		struct drbd_peer_device *peer_device,
271b411b363SPhilipp Reisner 		struct bio_and_error *m);
272b30ab791SAndreas Gruenbacher extern void complete_master_bio(struct drbd_device *device,
273b411b363SPhilipp Reisner 		struct bio_and_error *m);
2742bccef39SKees Cook extern void request_timer_fn(struct timer_list *t);
275bde89a9eSAndreas Gruenbacher extern void tl_restart(struct drbd_connection *connection, enum drbd_req_event what);
276bde89a9eSAndreas Gruenbacher extern void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what);
2778ce953aaSLars Ellenberg extern void tl_abort_disk_io(struct drbd_device *device);
278b411b363SPhilipp Reisner 
2792312f0b3SLars Ellenberg /* this is in drbd_main.c */
2809d05e7c4SLars Ellenberg extern void drbd_restart_request(struct drbd_request *req);
2812312f0b3SLars Ellenberg 
282b411b363SPhilipp Reisner /* use this if you don't want to deal with calling complete_master_bio()
283b411b363SPhilipp Reisner  * outside the spinlock, e.g. when walking some list on cleanup. */
_req_mod(struct drbd_request * req,enum drbd_req_event what,struct drbd_peer_device * peer_device)284*ad878a0dSChristoph Böhmwalder static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what,
285*ad878a0dSChristoph Böhmwalder 		struct drbd_peer_device *peer_device)
286b411b363SPhilipp Reisner {
28784b8c06bSAndreas Gruenbacher 	struct drbd_device *device = req->device;
288b411b363SPhilipp Reisner 	struct bio_and_error m;
2892a80699fSPhilipp Reisner 	int rv;
290b411b363SPhilipp Reisner 
291b411b363SPhilipp Reisner 	/* __req_mod possibly frees req, do not touch req after that! */
292*ad878a0dSChristoph Böhmwalder 	rv = __req_mod(req, what, peer_device, &m);
293b411b363SPhilipp Reisner 	if (m.bio)
294b30ab791SAndreas Gruenbacher 		complete_master_bio(device, &m);
2952a80699fSPhilipp Reisner 
2962a80699fSPhilipp Reisner 	return rv;
297b411b363SPhilipp Reisner }
298b411b363SPhilipp Reisner 
299759fbdfbSPhilipp Reisner /* completion of master bio is outside of our spinlock.
300759fbdfbSPhilipp Reisner  * We still may or may not be inside some irqs disabled section
301759fbdfbSPhilipp Reisner  * of the lower level driver completion callback, so we need to
302759fbdfbSPhilipp Reisner  * spin_lock_irqsave here. */
req_mod(struct drbd_request * req,enum drbd_req_event what,struct drbd_peer_device * peer_device)3032a80699fSPhilipp Reisner static inline int req_mod(struct drbd_request *req,
304*ad878a0dSChristoph Böhmwalder 		enum drbd_req_event what,
305*ad878a0dSChristoph Böhmwalder 		struct drbd_peer_device *peer_device)
306b411b363SPhilipp Reisner {
307759fbdfbSPhilipp Reisner 	unsigned long flags;
30884b8c06bSAndreas Gruenbacher 	struct drbd_device *device = req->device;
309b411b363SPhilipp Reisner 	struct bio_and_error m;
3102a80699fSPhilipp Reisner 	int rv;
3112a80699fSPhilipp Reisner 
3120500813fSAndreas Gruenbacher 	spin_lock_irqsave(&device->resource->req_lock, flags);
313*ad878a0dSChristoph Böhmwalder 	rv = __req_mod(req, what, peer_device, &m);
3140500813fSAndreas Gruenbacher 	spin_unlock_irqrestore(&device->resource->req_lock, flags);
315b411b363SPhilipp Reisner 
316b411b363SPhilipp Reisner 	if (m.bio)
317b30ab791SAndreas Gruenbacher 		complete_master_bio(device, &m);
3182a80699fSPhilipp Reisner 
3192a80699fSPhilipp Reisner 	return rv;
320b411b363SPhilipp Reisner }
3216a35c45fSPhilipp Reisner 
3222e9ffde6SAndreas Gruenbacher extern bool drbd_should_do_remote(union drbd_dev_state);
3236a35c45fSPhilipp Reisner 
324b411b363SPhilipp Reisner #endif
325