xref: /openbmc/linux/drivers/block/drbd/drbd_req.h (revision b30ab791)
1b411b363SPhilipp Reisner /*
2b411b363SPhilipp Reisner    drbd_req.h
3b411b363SPhilipp Reisner 
4b411b363SPhilipp Reisner    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5b411b363SPhilipp Reisner 
6b411b363SPhilipp Reisner    Copyright (C) 2006-2008, LINBIT Information Technologies GmbH.
7b411b363SPhilipp Reisner    Copyright (C) 2006-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
8b411b363SPhilipp Reisner    Copyright (C) 2006-2008, Philipp Reisner <philipp.reisner@linbit.com>.
9b411b363SPhilipp Reisner 
10b411b363SPhilipp Reisner    DRBD is free software; you can redistribute it and/or modify
11b411b363SPhilipp Reisner    it under the terms of the GNU General Public License as published by
12b411b363SPhilipp Reisner    the Free Software Foundation; either version 2, or (at your option)
13b411b363SPhilipp Reisner    any later version.
14b411b363SPhilipp Reisner 
15b411b363SPhilipp Reisner    DRBD is distributed in the hope that it will be useful,
16b411b363SPhilipp Reisner    but WITHOUT ANY WARRANTY; without even the implied warranty of
17b411b363SPhilipp Reisner    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18b411b363SPhilipp Reisner    GNU General Public License for more details.
19b411b363SPhilipp Reisner 
20b411b363SPhilipp Reisner    You should have received a copy of the GNU General Public License
21b411b363SPhilipp Reisner    along with drbd; see the file COPYING.  If not, write to
22b411b363SPhilipp Reisner    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23b411b363SPhilipp Reisner  */
24b411b363SPhilipp Reisner 
25b411b363SPhilipp Reisner #ifndef _DRBD_REQ_H
26b411b363SPhilipp Reisner #define _DRBD_REQ_H
27b411b363SPhilipp Reisner 
28b411b363SPhilipp Reisner #include <linux/module.h>
29b411b363SPhilipp Reisner 
30b411b363SPhilipp Reisner #include <linux/slab.h>
31b411b363SPhilipp Reisner #include <linux/drbd.h>
32b411b363SPhilipp Reisner #include "drbd_int.h"
33b411b363SPhilipp Reisner #include "drbd_wrappers.h"
34b411b363SPhilipp Reisner 
35b411b363SPhilipp Reisner /* The request callbacks will be called in irq context by the IDE drivers,
36b411b363SPhilipp Reisner    and in Softirqs/Tasklets/BH context by the SCSI drivers,
37b411b363SPhilipp Reisner    and by the receiver and worker in kernel-thread context.
38b411b363SPhilipp Reisner    Try to get the locking right :) */
39b411b363SPhilipp Reisner 
40b411b363SPhilipp Reisner /*
41b411b363SPhilipp Reisner  * Objects of type struct drbd_request do only exist on a R_PRIMARY node, and are
42b411b363SPhilipp Reisner  * associated with IO requests originating from the block layer above us.
43b411b363SPhilipp Reisner  *
44b411b363SPhilipp Reisner  * There are quite a few things that may happen to a drbd request
45b411b363SPhilipp Reisner  * during its lifetime.
46b411b363SPhilipp Reisner  *
47b411b363SPhilipp Reisner  *  It will be created.
48b411b363SPhilipp Reisner  *  It will be marked with the intention to be
49b411b363SPhilipp Reisner  *    submitted to local disk and/or
50b411b363SPhilipp Reisner  *    send via the network.
51b411b363SPhilipp Reisner  *
52b411b363SPhilipp Reisner  *  It has to be placed on the transfer log and other housekeeping lists,
53b411b363SPhilipp Reisner  *  In case we have a network connection.
54b411b363SPhilipp Reisner  *
55b411b363SPhilipp Reisner  *  It may be identified as a concurrent (write) request
56b411b363SPhilipp Reisner  *    and be handled accordingly.
57b411b363SPhilipp Reisner  *
58b411b363SPhilipp Reisner  *  It may me handed over to the local disk subsystem.
59b411b363SPhilipp Reisner  *  It may be completed by the local disk subsystem,
603ad2f3fbSDaniel Mack  *    either successfully or with io-error.
61b411b363SPhilipp Reisner  *  In case it is a READ request, and it failed locally,
62b411b363SPhilipp Reisner  *    it may be retried remotely.
63b411b363SPhilipp Reisner  *
64b411b363SPhilipp Reisner  *  It may be queued for sending.
65b411b363SPhilipp Reisner  *  It may be handed over to the network stack,
66b411b363SPhilipp Reisner  *    which may fail.
67b411b363SPhilipp Reisner  *  It may be acknowledged by the "peer" according to the wire_protocol in use.
68b411b363SPhilipp Reisner  *    this may be a negative ack.
69b411b363SPhilipp Reisner  *  It may receive a faked ack when the network connection is lost and the
70b411b363SPhilipp Reisner  *  transfer log is cleaned up.
71b411b363SPhilipp Reisner  *  Sending may be canceled due to network connection loss.
72b411b363SPhilipp Reisner  *  When it finally has outlived its time,
73b411b363SPhilipp Reisner  *    corresponding dirty bits in the resync-bitmap may be cleared or set,
74b411b363SPhilipp Reisner  *    it will be destroyed,
75b411b363SPhilipp Reisner  *    and completion will be signalled to the originator,
76b411b363SPhilipp Reisner  *      with or without "success".
77b411b363SPhilipp Reisner  */
78b411b363SPhilipp Reisner 
79b411b363SPhilipp Reisner enum drbd_req_event {
808554df1cSAndreas Gruenbacher 	CREATED,
818554df1cSAndreas Gruenbacher 	TO_BE_SENT,
828554df1cSAndreas Gruenbacher 	TO_BE_SUBMITTED,
83b411b363SPhilipp Reisner 
84b411b363SPhilipp Reisner 	/* XXX yes, now I am inconsistent...
8573a01a18SPhilipp Reisner 	 * these are not "events" but "actions"
86b411b363SPhilipp Reisner 	 * oh, well... */
878554df1cSAndreas Gruenbacher 	QUEUE_FOR_NET_WRITE,
888554df1cSAndreas Gruenbacher 	QUEUE_FOR_NET_READ,
898554df1cSAndreas Gruenbacher 	QUEUE_FOR_SEND_OOS,
90b411b363SPhilipp Reisner 
917074e4a7SLars Ellenberg 	/* An empty flush is queued as P_BARRIER,
927074e4a7SLars Ellenberg 	 * which will cause it to complete "successfully",
937074e4a7SLars Ellenberg 	 * even if the local disk flush failed.
947074e4a7SLars Ellenberg 	 *
957074e4a7SLars Ellenberg 	 * Just like "real" requests, empty flushes (blkdev_issue_flush()) will
967074e4a7SLars Ellenberg 	 * only see an error if neither local nor remote data is reachable. */
977074e4a7SLars Ellenberg 	QUEUE_AS_DRBD_BARRIER,
987074e4a7SLars Ellenberg 
998554df1cSAndreas Gruenbacher 	SEND_CANCELED,
1008554df1cSAndreas Gruenbacher 	SEND_FAILED,
1018554df1cSAndreas Gruenbacher 	HANDED_OVER_TO_NETWORK,
1028554df1cSAndreas Gruenbacher 	OOS_HANDED_TO_NETWORK,
1038554df1cSAndreas Gruenbacher 	CONNECTION_LOST_WHILE_PENDING,
1048554df1cSAndreas Gruenbacher 	READ_RETRY_REMOTE_CANCELED,
1058554df1cSAndreas Gruenbacher 	RECV_ACKED_BY_PEER,
1068554df1cSAndreas Gruenbacher 	WRITE_ACKED_BY_PEER,
1078554df1cSAndreas Gruenbacher 	WRITE_ACKED_BY_PEER_AND_SIS, /* and set_in_sync */
108d4dabbe2SLars Ellenberg 	CONFLICT_RESOLVED,
1097be8da07SAndreas Gruenbacher 	POSTPONE_WRITE,
1108554df1cSAndreas Gruenbacher 	NEG_ACKED,
1118554df1cSAndreas Gruenbacher 	BARRIER_ACKED, /* in protocol A and B */
1128554df1cSAndreas Gruenbacher 	DATA_RECEIVED, /* (remote read) */
113b411b363SPhilipp Reisner 
1148554df1cSAndreas Gruenbacher 	READ_COMPLETED_WITH_ERROR,
1158554df1cSAndreas Gruenbacher 	READ_AHEAD_COMPLETED_WITH_ERROR,
1168554df1cSAndreas Gruenbacher 	WRITE_COMPLETED_WITH_ERROR,
117cdfda633SPhilipp Reisner 	ABORT_DISK_IO,
1188554df1cSAndreas Gruenbacher 	COMPLETED_OK,
1198554df1cSAndreas Gruenbacher 	RESEND,
1208554df1cSAndreas Gruenbacher 	FAIL_FROZEN_DISK_IO,
1218554df1cSAndreas Gruenbacher 	RESTART_FROZEN_DISK_IO,
1228554df1cSAndreas Gruenbacher 	NOTHING,
123b411b363SPhilipp Reisner };
124b411b363SPhilipp Reisner 
125b411b363SPhilipp Reisner /* encoding of request states for now.  we don't actually need that many bits.
126b411b363SPhilipp Reisner  * we don't need to do atomic bit operations either, since most of the time we
127b411b363SPhilipp Reisner  * need to look at the connection state and/or manipulate some lists at the
128b411b363SPhilipp Reisner  * same time, so we should hold the request lock anyways.
129b411b363SPhilipp Reisner  */
130b411b363SPhilipp Reisner enum drbd_req_state_bits {
131cdfda633SPhilipp Reisner 	/* 3210
132cdfda633SPhilipp Reisner 	 * 0000: no local possible
133cdfda633SPhilipp Reisner 	 * 0001: to be submitted
134b411b363SPhilipp Reisner 	 *    UNUSED, we could map: 011: submitted, completion still pending
135cdfda633SPhilipp Reisner 	 * 0110: completed ok
136cdfda633SPhilipp Reisner 	 * 0010: completed with error
137cdfda633SPhilipp Reisner 	 * 1001: Aborted (before completion)
138cdfda633SPhilipp Reisner 	 * 1x10: Aborted and completed -> free
139b411b363SPhilipp Reisner 	 */
140b411b363SPhilipp Reisner 	__RQ_LOCAL_PENDING,
141b411b363SPhilipp Reisner 	__RQ_LOCAL_COMPLETED,
142b411b363SPhilipp Reisner 	__RQ_LOCAL_OK,
143cdfda633SPhilipp Reisner 	__RQ_LOCAL_ABORTED,
144b411b363SPhilipp Reisner 
145cdfda633SPhilipp Reisner 	/* 87654
146b411b363SPhilipp Reisner 	 * 00000: no network possible
147b411b363SPhilipp Reisner 	 * 00001: to be send
148b411b363SPhilipp Reisner 	 * 00011: to be send, on worker queue
149b411b363SPhilipp Reisner 	 * 00101: sent, expecting recv_ack (B) or write_ack (C)
150b411b363SPhilipp Reisner 	 * 11101: sent,
151b411b363SPhilipp Reisner 	 *        recv_ack (B) or implicit "ack" (A),
152b411b363SPhilipp Reisner 	 *        still waiting for the barrier ack.
153b411b363SPhilipp Reisner 	 *        master_bio may already be completed and invalidated.
1548554df1cSAndreas Gruenbacher 	 * 11100: write acked (C),
1558554df1cSAndreas Gruenbacher 	 *        data received (for remote read, any protocol)
156b411b363SPhilipp Reisner 	 *        or finally the barrier ack has arrived (B,A)...
157b411b363SPhilipp Reisner 	 *        request can be freed
158b411b363SPhilipp Reisner 	 * 01100: neg-acked (write, protocol C)
159b411b363SPhilipp Reisner 	 *        or neg-d-acked (read, any protocol)
160b411b363SPhilipp Reisner 	 *        or killed from the transfer log
161b411b363SPhilipp Reisner 	 *        during cleanup after connection loss
162b411b363SPhilipp Reisner 	 *        request can be freed
163b411b363SPhilipp Reisner 	 * 01000: canceled or send failed...
164b411b363SPhilipp Reisner 	 *        request can be freed
165b411b363SPhilipp Reisner 	 */
166b411b363SPhilipp Reisner 
167b411b363SPhilipp Reisner 	/* if "SENT" is not set, yet, this can still fail or be canceled.
168b411b363SPhilipp Reisner 	 * if "SENT" is set already, we still wait for an Ack packet.
169b411b363SPhilipp Reisner 	 * when cleared, the master_bio may be completed.
170b411b363SPhilipp Reisner 	 * in (B,A) the request object may still linger on the transaction log
171b411b363SPhilipp Reisner 	 * until the corresponding barrier ack comes in */
172b411b363SPhilipp Reisner 	__RQ_NET_PENDING,
173b411b363SPhilipp Reisner 
174b411b363SPhilipp Reisner 	/* If it is QUEUED, and it is a WRITE, it is also registered in the
175b411b363SPhilipp Reisner 	 * transfer log. Currently we need this flag to avoid conflicts between
176b411b363SPhilipp Reisner 	 * worker canceling the request and tl_clear_barrier killing it from
177b411b363SPhilipp Reisner 	 * transfer log.  We should restructure the code so this conflict does
178b411b363SPhilipp Reisner 	 * no longer occur. */
179b411b363SPhilipp Reisner 	__RQ_NET_QUEUED,
180b411b363SPhilipp Reisner 
181b411b363SPhilipp Reisner 	/* well, actually only "handed over to the network stack".
182b411b363SPhilipp Reisner 	 *
183b411b363SPhilipp Reisner 	 * TODO can potentially be dropped because of the similar meaning
184b411b363SPhilipp Reisner 	 * of RQ_NET_SENT and ~RQ_NET_QUEUED.
185b411b363SPhilipp Reisner 	 * however it is not exactly the same. before we drop it
186b411b363SPhilipp Reisner 	 * we must ensure that we can tell a request with network part
187b411b363SPhilipp Reisner 	 * from a request without, regardless of what happens to it. */
188b411b363SPhilipp Reisner 	__RQ_NET_SENT,
189b411b363SPhilipp Reisner 
190b411b363SPhilipp Reisner 	/* when set, the request may be freed (if RQ_NET_QUEUED is clear).
191b411b363SPhilipp Reisner 	 * basically this means the corresponding P_BARRIER_ACK was received */
192b411b363SPhilipp Reisner 	__RQ_NET_DONE,
193b411b363SPhilipp Reisner 
194b411b363SPhilipp Reisner 	/* whether or not we know (C) or pretend (B,A) that the write
195b411b363SPhilipp Reisner 	 * was successfully written on the peer.
196b411b363SPhilipp Reisner 	 */
197b411b363SPhilipp Reisner 	__RQ_NET_OK,
198b411b363SPhilipp Reisner 
199b411b363SPhilipp Reisner 	/* peer called drbd_set_in_sync() for this write */
200b411b363SPhilipp Reisner 	__RQ_NET_SIS,
201b411b363SPhilipp Reisner 
202b411b363SPhilipp Reisner 	/* keep this last, its for the RQ_NET_MASK */
203b411b363SPhilipp Reisner 	__RQ_NET_MAX,
204288f422eSPhilipp Reisner 
205288f422eSPhilipp Reisner 	/* Set when this is a write, clear for a read */
206288f422eSPhilipp Reisner 	__RQ_WRITE,
2070778286aSPhilipp Reisner 
2080778286aSPhilipp Reisner 	/* Should call drbd_al_complete_io() for this request... */
2090778286aSPhilipp Reisner 	__RQ_IN_ACT_LOG,
2107be8da07SAndreas Gruenbacher 
2117be8da07SAndreas Gruenbacher 	/* The peer has sent a retry ACK */
2127be8da07SAndreas Gruenbacher 	__RQ_POSTPONED,
213303d1448SPhilipp Reisner 
214a0d856dfSLars Ellenberg 	/* would have been completed,
215a0d856dfSLars Ellenberg 	 * but was not, because of drbd_suspended() */
216a0d856dfSLars Ellenberg 	__RQ_COMPLETION_SUSP,
217a0d856dfSLars Ellenberg 
218303d1448SPhilipp Reisner 	/* We expect a receive ACK (wire proto B) */
219303d1448SPhilipp Reisner 	__RQ_EXP_RECEIVE_ACK,
220303d1448SPhilipp Reisner 
221303d1448SPhilipp Reisner 	/* We expect a write ACK (wite proto C) */
222303d1448SPhilipp Reisner 	__RQ_EXP_WRITE_ACK,
223a0d856dfSLars Ellenberg 
224a0d856dfSLars Ellenberg 	/* waiting for a barrier ack, did an extra kref_get */
225a0d856dfSLars Ellenberg 	__RQ_EXP_BARR_ACK,
226b411b363SPhilipp Reisner };
227b411b363SPhilipp Reisner 
228b411b363SPhilipp Reisner #define RQ_LOCAL_PENDING   (1UL << __RQ_LOCAL_PENDING)
229b411b363SPhilipp Reisner #define RQ_LOCAL_COMPLETED (1UL << __RQ_LOCAL_COMPLETED)
230b411b363SPhilipp Reisner #define RQ_LOCAL_OK        (1UL << __RQ_LOCAL_OK)
231cdfda633SPhilipp Reisner #define RQ_LOCAL_ABORTED   (1UL << __RQ_LOCAL_ABORTED)
232b411b363SPhilipp Reisner 
233cdfda633SPhilipp Reisner #define RQ_LOCAL_MASK      ((RQ_LOCAL_ABORTED << 1)-1)
234b411b363SPhilipp Reisner 
235b411b363SPhilipp Reisner #define RQ_NET_PENDING     (1UL << __RQ_NET_PENDING)
236b411b363SPhilipp Reisner #define RQ_NET_QUEUED      (1UL << __RQ_NET_QUEUED)
237b411b363SPhilipp Reisner #define RQ_NET_SENT        (1UL << __RQ_NET_SENT)
238b411b363SPhilipp Reisner #define RQ_NET_DONE        (1UL << __RQ_NET_DONE)
239b411b363SPhilipp Reisner #define RQ_NET_OK          (1UL << __RQ_NET_OK)
240b411b363SPhilipp Reisner #define RQ_NET_SIS         (1UL << __RQ_NET_SIS)
241b411b363SPhilipp Reisner 
242b411b363SPhilipp Reisner /* 0x1f8 */
243b411b363SPhilipp Reisner #define RQ_NET_MASK        (((1UL << __RQ_NET_MAX)-1) & ~RQ_LOCAL_MASK)
244b411b363SPhilipp Reisner 
245288f422eSPhilipp Reisner #define RQ_WRITE           (1UL << __RQ_WRITE)
2460778286aSPhilipp Reisner #define RQ_IN_ACT_LOG      (1UL << __RQ_IN_ACT_LOG)
2477be8da07SAndreas Gruenbacher #define RQ_POSTPONED	   (1UL << __RQ_POSTPONED)
248a0d856dfSLars Ellenberg #define RQ_COMPLETION_SUSP (1UL << __RQ_COMPLETION_SUSP)
249303d1448SPhilipp Reisner #define RQ_EXP_RECEIVE_ACK (1UL << __RQ_EXP_RECEIVE_ACK)
250303d1448SPhilipp Reisner #define RQ_EXP_WRITE_ACK   (1UL << __RQ_EXP_WRITE_ACK)
251a0d856dfSLars Ellenberg #define RQ_EXP_BARR_ACK    (1UL << __RQ_EXP_BARR_ACK)
252288f422eSPhilipp Reisner 
25311b58e73SPhilipp Reisner /* For waking up the frozen transfer log mod_req() has to return if the request
25411b58e73SPhilipp Reisner    should be counted in the epoch object*/
255f497609eSAndreas Gruenbacher #define MR_WRITE       1
256f497609eSAndreas Gruenbacher #define MR_READ        2
25711b58e73SPhilipp Reisner 
2585ba82308SPhilipp Reisner static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bio *bio_src)
259b411b363SPhilipp Reisner {
260b411b363SPhilipp Reisner 	struct bio *bio;
261b411b363SPhilipp Reisner 	bio = bio_clone(bio_src, GFP_NOIO); /* XXX cannot fail?? */
262b411b363SPhilipp Reisner 
263b411b363SPhilipp Reisner 	req->private_bio = bio;
264b411b363SPhilipp Reisner 
265b411b363SPhilipp Reisner 	bio->bi_private  = req;
266fcefa62eSAndreas Gruenbacher 	bio->bi_end_io   = drbd_request_endio;
267b411b363SPhilipp Reisner 	bio->bi_next     = NULL;
268b411b363SPhilipp Reisner }
2695ba82308SPhilipp Reisner 
270b411b363SPhilipp Reisner /* Short lived temporary struct on the stack.
271b411b363SPhilipp Reisner  * We could squirrel the error to be returned into
2724f024f37SKent Overstreet  * bio->bi_iter.bi_size, or similar. But that would be too ugly. */
273b411b363SPhilipp Reisner struct bio_and_error {
274b411b363SPhilipp Reisner 	struct bio *bio;
275b411b363SPhilipp Reisner 	int error;
276b411b363SPhilipp Reisner };
277b411b363SPhilipp Reisner 
2782681f7f6SLars Ellenberg extern void start_new_tl_epoch(struct drbd_tconn *tconn);
2799a278a79SLars Ellenberg extern void drbd_req_destroy(struct kref *kref);
280b411b363SPhilipp Reisner extern void _req_may_be_done(struct drbd_request *req,
281b411b363SPhilipp Reisner 		struct bio_and_error *m);
2822a80699fSPhilipp Reisner extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
283b411b363SPhilipp Reisner 		struct bio_and_error *m);
284b30ab791SAndreas Gruenbacher extern void complete_master_bio(struct drbd_device *device,
285b411b363SPhilipp Reisner 		struct bio_and_error *m);
2867fde2be9SPhilipp Reisner extern void request_timer_fn(unsigned long data);
2872f5cdd0bSPhilipp Reisner extern void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what);
2882f5cdd0bSPhilipp Reisner extern void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what);
289b411b363SPhilipp Reisner 
2902312f0b3SLars Ellenberg /* this is in drbd_main.c */
2919d05e7c4SLars Ellenberg extern void drbd_restart_request(struct drbd_request *req);
2922312f0b3SLars Ellenberg 
293b411b363SPhilipp Reisner /* use this if you don't want to deal with calling complete_master_bio()
294b411b363SPhilipp Reisner  * outside the spinlock, e.g. when walking some list on cleanup. */
2952a80699fSPhilipp Reisner static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what)
296b411b363SPhilipp Reisner {
297b30ab791SAndreas Gruenbacher 	struct drbd_device *device = req->w.device;
298b411b363SPhilipp Reisner 	struct bio_and_error m;
2992a80699fSPhilipp Reisner 	int rv;
300b411b363SPhilipp Reisner 
301b411b363SPhilipp Reisner 	/* __req_mod possibly frees req, do not touch req after that! */
3022a80699fSPhilipp Reisner 	rv = __req_mod(req, what, &m);
303b411b363SPhilipp Reisner 	if (m.bio)
304b30ab791SAndreas Gruenbacher 		complete_master_bio(device, &m);
3052a80699fSPhilipp Reisner 
3062a80699fSPhilipp Reisner 	return rv;
307b411b363SPhilipp Reisner }
308b411b363SPhilipp Reisner 
309759fbdfbSPhilipp Reisner /* completion of master bio is outside of our spinlock.
310759fbdfbSPhilipp Reisner  * We still may or may not be inside some irqs disabled section
311759fbdfbSPhilipp Reisner  * of the lower level driver completion callback, so we need to
312759fbdfbSPhilipp Reisner  * spin_lock_irqsave here. */
3132a80699fSPhilipp Reisner static inline int req_mod(struct drbd_request *req,
314b411b363SPhilipp Reisner 		enum drbd_req_event what)
315b411b363SPhilipp Reisner {
316759fbdfbSPhilipp Reisner 	unsigned long flags;
317b30ab791SAndreas Gruenbacher 	struct drbd_device *device = req->w.device;
318b411b363SPhilipp Reisner 	struct bio_and_error m;
3192a80699fSPhilipp Reisner 	int rv;
3202a80699fSPhilipp Reisner 
321b30ab791SAndreas Gruenbacher 	spin_lock_irqsave(&device->tconn->req_lock, flags);
3222a80699fSPhilipp Reisner 	rv = __req_mod(req, what, &m);
323b30ab791SAndreas Gruenbacher 	spin_unlock_irqrestore(&device->tconn->req_lock, flags);
324b411b363SPhilipp Reisner 
325b411b363SPhilipp Reisner 	if (m.bio)
326b30ab791SAndreas Gruenbacher 		complete_master_bio(device, &m);
3272a80699fSPhilipp Reisner 
3282a80699fSPhilipp Reisner 	return rv;
329b411b363SPhilipp Reisner }
3306a35c45fSPhilipp Reisner 
331da9fbc27SPhilipp Reisner static inline bool drbd_should_do_remote(union drbd_dev_state s)
3326a35c45fSPhilipp Reisner {
3336a35c45fSPhilipp Reisner 	return s.pdsk == D_UP_TO_DATE ||
3346a35c45fSPhilipp Reisner 		(s.pdsk >= D_INCONSISTENT &&
3356a35c45fSPhilipp Reisner 		 s.conn >= C_WF_BITMAP_T &&
3366a35c45fSPhilipp Reisner 		 s.conn < C_AHEAD);
3376a35c45fSPhilipp Reisner 	/* Before proto 96 that was >= CONNECTED instead of >= C_WF_BITMAP_T.
3386a35c45fSPhilipp Reisner 	   That is equivalent since before 96 IO was frozen in the C_WF_BITMAP*
3396a35c45fSPhilipp Reisner 	   states. */
3406a35c45fSPhilipp Reisner }
341da9fbc27SPhilipp Reisner static inline bool drbd_should_send_out_of_sync(union drbd_dev_state s)
3426a35c45fSPhilipp Reisner {
3436a35c45fSPhilipp Reisner 	return s.conn == C_AHEAD || s.conn == C_WF_BITMAP_S;
3446a35c45fSPhilipp Reisner 	/* pdsk = D_INCONSISTENT as a consequence. Protocol 96 check not necessary
3456a35c45fSPhilipp Reisner 	   since we enter state C_AHEAD only if proto >= 96 */
3466a35c45fSPhilipp Reisner }
3476a35c45fSPhilipp Reisner 
348b411b363SPhilipp Reisner #endif
349