xref: /openbmc/linux/drivers/block/drbd/drbd_req.h (revision 7fde2be9)
1b411b363SPhilipp Reisner /*
2b411b363SPhilipp Reisner    drbd_req.h
3b411b363SPhilipp Reisner 
4b411b363SPhilipp Reisner    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5b411b363SPhilipp Reisner 
6b411b363SPhilipp Reisner    Copyright (C) 2006-2008, LINBIT Information Technologies GmbH.
7b411b363SPhilipp Reisner    Copyright (C) 2006-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
8b411b363SPhilipp Reisner    Copyright (C) 2006-2008, Philipp Reisner <philipp.reisner@linbit.com>.
9b411b363SPhilipp Reisner 
10b411b363SPhilipp Reisner    DRBD is free software; you can redistribute it and/or modify
11b411b363SPhilipp Reisner    it under the terms of the GNU General Public License as published by
12b411b363SPhilipp Reisner    the Free Software Foundation; either version 2, or (at your option)
13b411b363SPhilipp Reisner    any later version.
14b411b363SPhilipp Reisner 
15b411b363SPhilipp Reisner    DRBD is distributed in the hope that it will be useful,
16b411b363SPhilipp Reisner    but WITHOUT ANY WARRANTY; without even the implied warranty of
17b411b363SPhilipp Reisner    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18b411b363SPhilipp Reisner    GNU General Public License for more details.
19b411b363SPhilipp Reisner 
20b411b363SPhilipp Reisner    You should have received a copy of the GNU General Public License
21b411b363SPhilipp Reisner    along with drbd; see the file COPYING.  If not, write to
22b411b363SPhilipp Reisner    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23b411b363SPhilipp Reisner  */
24b411b363SPhilipp Reisner 
25b411b363SPhilipp Reisner #ifndef _DRBD_REQ_H
26b411b363SPhilipp Reisner #define _DRBD_REQ_H
27b411b363SPhilipp Reisner 
28b411b363SPhilipp Reisner #include <linux/module.h>
29b411b363SPhilipp Reisner 
30b411b363SPhilipp Reisner #include <linux/slab.h>
31b411b363SPhilipp Reisner #include <linux/drbd.h>
32b411b363SPhilipp Reisner #include "drbd_int.h"
33b411b363SPhilipp Reisner #include "drbd_wrappers.h"
34b411b363SPhilipp Reisner 
35b411b363SPhilipp Reisner /* The request callbacks will be called in irq context by the IDE drivers,
36b411b363SPhilipp Reisner    and in Softirqs/Tasklets/BH context by the SCSI drivers,
37b411b363SPhilipp Reisner    and by the receiver and worker in kernel-thread context.
38b411b363SPhilipp Reisner    Try to get the locking right :) */
39b411b363SPhilipp Reisner 
40b411b363SPhilipp Reisner /*
41b411b363SPhilipp Reisner  * Objects of type struct drbd_request do only exist on a R_PRIMARY node, and are
42b411b363SPhilipp Reisner  * associated with IO requests originating from the block layer above us.
43b411b363SPhilipp Reisner  *
44b411b363SPhilipp Reisner  * There are quite a few things that may happen to a drbd request
45b411b363SPhilipp Reisner  * during its lifetime.
46b411b363SPhilipp Reisner  *
47b411b363SPhilipp Reisner  *  It will be created.
48b411b363SPhilipp Reisner  *  It will be marked with the intention to be
49b411b363SPhilipp Reisner  *    submitted to local disk and/or
50b411b363SPhilipp Reisner  *    send via the network.
51b411b363SPhilipp Reisner  *
52b411b363SPhilipp Reisner  *  It has to be placed on the transfer log and other housekeeping lists,
53b411b363SPhilipp Reisner  *  In case we have a network connection.
54b411b363SPhilipp Reisner  *
55b411b363SPhilipp Reisner  *  It may be identified as a concurrent (write) request
56b411b363SPhilipp Reisner  *    and be handled accordingly.
57b411b363SPhilipp Reisner  *
58b411b363SPhilipp Reisner  *  It may me handed over to the local disk subsystem.
59b411b363SPhilipp Reisner  *  It may be completed by the local disk subsystem,
603ad2f3fbSDaniel Mack  *    either successfully or with io-error.
61b411b363SPhilipp Reisner  *  In case it is a READ request, and it failed locally,
62b411b363SPhilipp Reisner  *    it may be retried remotely.
63b411b363SPhilipp Reisner  *
64b411b363SPhilipp Reisner  *  It may be queued for sending.
65b411b363SPhilipp Reisner  *  It may be handed over to the network stack,
66b411b363SPhilipp Reisner  *    which may fail.
67b411b363SPhilipp Reisner  *  It may be acknowledged by the "peer" according to the wire_protocol in use.
68b411b363SPhilipp Reisner  *    this may be a negative ack.
69b411b363SPhilipp Reisner  *  It may receive a faked ack when the network connection is lost and the
70b411b363SPhilipp Reisner  *  transfer log is cleaned up.
71b411b363SPhilipp Reisner  *  Sending may be canceled due to network connection loss.
72b411b363SPhilipp Reisner  *  When it finally has outlived its time,
73b411b363SPhilipp Reisner  *    corresponding dirty bits in the resync-bitmap may be cleared or set,
74b411b363SPhilipp Reisner  *    it will be destroyed,
75b411b363SPhilipp Reisner  *    and completion will be signalled to the originator,
76b411b363SPhilipp Reisner  *      with or without "success".
77b411b363SPhilipp Reisner  */
78b411b363SPhilipp Reisner 
79b411b363SPhilipp Reisner enum drbd_req_event {
80b411b363SPhilipp Reisner 	created,
81b411b363SPhilipp Reisner 	to_be_send,
82b411b363SPhilipp Reisner 	to_be_submitted,
83b411b363SPhilipp Reisner 
84b411b363SPhilipp Reisner 	/* XXX yes, now I am inconsistent...
8573a01a18SPhilipp Reisner 	 * these are not "events" but "actions"
86b411b363SPhilipp Reisner 	 * oh, well... */
87b411b363SPhilipp Reisner 	queue_for_net_write,
88b411b363SPhilipp Reisner 	queue_for_net_read,
8973a01a18SPhilipp Reisner 	queue_for_send_oos,
90b411b363SPhilipp Reisner 
91b411b363SPhilipp Reisner 	send_canceled,
92b411b363SPhilipp Reisner 	send_failed,
93b411b363SPhilipp Reisner 	handed_over_to_network,
9473a01a18SPhilipp Reisner 	oos_handed_to_network,
95b411b363SPhilipp Reisner 	connection_lost_while_pending,
96d255e5ffSLars Ellenberg 	read_retry_remote_canceled,
97b411b363SPhilipp Reisner 	recv_acked_by_peer,
98b411b363SPhilipp Reisner 	write_acked_by_peer,
99b411b363SPhilipp Reisner 	write_acked_by_peer_and_sis, /* and set_in_sync */
100b411b363SPhilipp Reisner 	conflict_discarded_by_peer,
101b411b363SPhilipp Reisner 	neg_acked,
102b411b363SPhilipp Reisner 	barrier_acked, /* in protocol A and B */
103b411b363SPhilipp Reisner 	data_received, /* (remote read) */
104b411b363SPhilipp Reisner 
105b411b363SPhilipp Reisner 	read_completed_with_error,
106b411b363SPhilipp Reisner 	read_ahead_completed_with_error,
107b411b363SPhilipp Reisner 	write_completed_with_error,
108b411b363SPhilipp Reisner 	completed_ok,
10911b58e73SPhilipp Reisner 	resend,
110265be2d0SPhilipp Reisner 	fail_frozen_disk_io,
111265be2d0SPhilipp Reisner 	restart_frozen_disk_io,
112b411b363SPhilipp Reisner 	nothing, /* for tracing only */
113b411b363SPhilipp Reisner };
114b411b363SPhilipp Reisner 
115b411b363SPhilipp Reisner /* encoding of request states for now.  we don't actually need that many bits.
116b411b363SPhilipp Reisner  * we don't need to do atomic bit operations either, since most of the time we
117b411b363SPhilipp Reisner  * need to look at the connection state and/or manipulate some lists at the
118b411b363SPhilipp Reisner  * same time, so we should hold the request lock anyways.
119b411b363SPhilipp Reisner  */
120b411b363SPhilipp Reisner enum drbd_req_state_bits {
121b411b363SPhilipp Reisner 	/* 210
122b411b363SPhilipp Reisner 	 * 000: no local possible
123b411b363SPhilipp Reisner 	 * 001: to be submitted
124b411b363SPhilipp Reisner 	 *    UNUSED, we could map: 011: submitted, completion still pending
125b411b363SPhilipp Reisner 	 * 110: completed ok
126b411b363SPhilipp Reisner 	 * 010: completed with error
127b411b363SPhilipp Reisner 	 */
128b411b363SPhilipp Reisner 	__RQ_LOCAL_PENDING,
129b411b363SPhilipp Reisner 	__RQ_LOCAL_COMPLETED,
130b411b363SPhilipp Reisner 	__RQ_LOCAL_OK,
131b411b363SPhilipp Reisner 
132b411b363SPhilipp Reisner 	/* 76543
133b411b363SPhilipp Reisner 	 * 00000: no network possible
134b411b363SPhilipp Reisner 	 * 00001: to be send
135b411b363SPhilipp Reisner 	 * 00011: to be send, on worker queue
136b411b363SPhilipp Reisner 	 * 00101: sent, expecting recv_ack (B) or write_ack (C)
137b411b363SPhilipp Reisner 	 * 11101: sent,
138b411b363SPhilipp Reisner 	 *        recv_ack (B) or implicit "ack" (A),
139b411b363SPhilipp Reisner 	 *        still waiting for the barrier ack.
140b411b363SPhilipp Reisner 	 *        master_bio may already be completed and invalidated.
141b411b363SPhilipp Reisner 	 * 11100: write_acked (C),
142b411b363SPhilipp Reisner 	 *        data_received (for remote read, any protocol)
143b411b363SPhilipp Reisner 	 *        or finally the barrier ack has arrived (B,A)...
144b411b363SPhilipp Reisner 	 *        request can be freed
145b411b363SPhilipp Reisner 	 * 01100: neg-acked (write, protocol C)
146b411b363SPhilipp Reisner 	 *        or neg-d-acked (read, any protocol)
147b411b363SPhilipp Reisner 	 *        or killed from the transfer log
148b411b363SPhilipp Reisner 	 *        during cleanup after connection loss
149b411b363SPhilipp Reisner 	 *        request can be freed
150b411b363SPhilipp Reisner 	 * 01000: canceled or send failed...
151b411b363SPhilipp Reisner 	 *        request can be freed
152b411b363SPhilipp Reisner 	 */
153b411b363SPhilipp Reisner 
154b411b363SPhilipp Reisner 	/* if "SENT" is not set, yet, this can still fail or be canceled.
155b411b363SPhilipp Reisner 	 * if "SENT" is set already, we still wait for an Ack packet.
156b411b363SPhilipp Reisner 	 * when cleared, the master_bio may be completed.
157b411b363SPhilipp Reisner 	 * in (B,A) the request object may still linger on the transaction log
158b411b363SPhilipp Reisner 	 * until the corresponding barrier ack comes in */
159b411b363SPhilipp Reisner 	__RQ_NET_PENDING,
160b411b363SPhilipp Reisner 
161b411b363SPhilipp Reisner 	/* If it is QUEUED, and it is a WRITE, it is also registered in the
162b411b363SPhilipp Reisner 	 * transfer log. Currently we need this flag to avoid conflicts between
163b411b363SPhilipp Reisner 	 * worker canceling the request and tl_clear_barrier killing it from
164b411b363SPhilipp Reisner 	 * transfer log.  We should restructure the code so this conflict does
165b411b363SPhilipp Reisner 	 * no longer occur. */
166b411b363SPhilipp Reisner 	__RQ_NET_QUEUED,
167b411b363SPhilipp Reisner 
168b411b363SPhilipp Reisner 	/* well, actually only "handed over to the network stack".
169b411b363SPhilipp Reisner 	 *
170b411b363SPhilipp Reisner 	 * TODO can potentially be dropped because of the similar meaning
171b411b363SPhilipp Reisner 	 * of RQ_NET_SENT and ~RQ_NET_QUEUED.
172b411b363SPhilipp Reisner 	 * however it is not exactly the same. before we drop it
173b411b363SPhilipp Reisner 	 * we must ensure that we can tell a request with network part
174b411b363SPhilipp Reisner 	 * from a request without, regardless of what happens to it. */
175b411b363SPhilipp Reisner 	__RQ_NET_SENT,
176b411b363SPhilipp Reisner 
177b411b363SPhilipp Reisner 	/* when set, the request may be freed (if RQ_NET_QUEUED is clear).
178b411b363SPhilipp Reisner 	 * basically this means the corresponding P_BARRIER_ACK was received */
179b411b363SPhilipp Reisner 	__RQ_NET_DONE,
180b411b363SPhilipp Reisner 
181b411b363SPhilipp Reisner 	/* whether or not we know (C) or pretend (B,A) that the write
182b411b363SPhilipp Reisner 	 * was successfully written on the peer.
183b411b363SPhilipp Reisner 	 */
184b411b363SPhilipp Reisner 	__RQ_NET_OK,
185b411b363SPhilipp Reisner 
186b411b363SPhilipp Reisner 	/* peer called drbd_set_in_sync() for this write */
187b411b363SPhilipp Reisner 	__RQ_NET_SIS,
188b411b363SPhilipp Reisner 
189b411b363SPhilipp Reisner 	/* keep this last, its for the RQ_NET_MASK */
190b411b363SPhilipp Reisner 	__RQ_NET_MAX,
191288f422eSPhilipp Reisner 
192288f422eSPhilipp Reisner 	/* Set when this is a write, clear for a read */
193288f422eSPhilipp Reisner 	__RQ_WRITE,
1940778286aSPhilipp Reisner 
1950778286aSPhilipp Reisner 	/* Should call drbd_al_complete_io() for this request... */
1960778286aSPhilipp Reisner 	__RQ_IN_ACT_LOG,
197b411b363SPhilipp Reisner };
198b411b363SPhilipp Reisner 
199b411b363SPhilipp Reisner #define RQ_LOCAL_PENDING   (1UL << __RQ_LOCAL_PENDING)
200b411b363SPhilipp Reisner #define RQ_LOCAL_COMPLETED (1UL << __RQ_LOCAL_COMPLETED)
201b411b363SPhilipp Reisner #define RQ_LOCAL_OK        (1UL << __RQ_LOCAL_OK)
202b411b363SPhilipp Reisner 
203b411b363SPhilipp Reisner #define RQ_LOCAL_MASK      ((RQ_LOCAL_OK << 1)-1) /* 0x07 */
204b411b363SPhilipp Reisner 
205b411b363SPhilipp Reisner #define RQ_NET_PENDING     (1UL << __RQ_NET_PENDING)
206b411b363SPhilipp Reisner #define RQ_NET_QUEUED      (1UL << __RQ_NET_QUEUED)
207b411b363SPhilipp Reisner #define RQ_NET_SENT        (1UL << __RQ_NET_SENT)
208b411b363SPhilipp Reisner #define RQ_NET_DONE        (1UL << __RQ_NET_DONE)
209b411b363SPhilipp Reisner #define RQ_NET_OK          (1UL << __RQ_NET_OK)
210b411b363SPhilipp Reisner #define RQ_NET_SIS         (1UL << __RQ_NET_SIS)
211b411b363SPhilipp Reisner 
212b411b363SPhilipp Reisner /* 0x1f8 */
213b411b363SPhilipp Reisner #define RQ_NET_MASK        (((1UL << __RQ_NET_MAX)-1) & ~RQ_LOCAL_MASK)
214b411b363SPhilipp Reisner 
215288f422eSPhilipp Reisner #define RQ_WRITE           (1UL << __RQ_WRITE)
2160778286aSPhilipp Reisner #define RQ_IN_ACT_LOG      (1UL << __RQ_IN_ACT_LOG)
217288f422eSPhilipp Reisner 
21811b58e73SPhilipp Reisner /* For waking up the frozen transfer log mod_req() has to return if the request
21911b58e73SPhilipp Reisner    should be counted in the epoch object*/
22011b58e73SPhilipp Reisner #define MR_WRITE_SHIFT 0
22111b58e73SPhilipp Reisner #define MR_WRITE       (1 << MR_WRITE_SHIFT)
22211b58e73SPhilipp Reisner #define MR_READ_SHIFT  1
22311b58e73SPhilipp Reisner #define MR_READ        (1 << MR_READ_SHIFT)
22411b58e73SPhilipp Reisner 
225b411b363SPhilipp Reisner /* epoch entries */
226b411b363SPhilipp Reisner static inline
227b411b363SPhilipp Reisner struct hlist_head *ee_hash_slot(struct drbd_conf *mdev, sector_t sector)
228b411b363SPhilipp Reisner {
229b411b363SPhilipp Reisner 	BUG_ON(mdev->ee_hash_s == 0);
230b411b363SPhilipp Reisner 	return mdev->ee_hash +
231b411b363SPhilipp Reisner 		((unsigned int)(sector>>HT_SHIFT) % mdev->ee_hash_s);
232b411b363SPhilipp Reisner }
233b411b363SPhilipp Reisner 
234b411b363SPhilipp Reisner /* transfer log (drbd_request objects) */
235b411b363SPhilipp Reisner static inline
236b411b363SPhilipp Reisner struct hlist_head *tl_hash_slot(struct drbd_conf *mdev, sector_t sector)
237b411b363SPhilipp Reisner {
238b411b363SPhilipp Reisner 	BUG_ON(mdev->tl_hash_s == 0);
239b411b363SPhilipp Reisner 	return mdev->tl_hash +
240b411b363SPhilipp Reisner 		((unsigned int)(sector>>HT_SHIFT) % mdev->tl_hash_s);
241b411b363SPhilipp Reisner }
242b411b363SPhilipp Reisner 
243b411b363SPhilipp Reisner /* application reads (drbd_request objects) */
244b411b363SPhilipp Reisner static struct hlist_head *ar_hash_slot(struct drbd_conf *mdev, sector_t sector)
245b411b363SPhilipp Reisner {
246b411b363SPhilipp Reisner 	return mdev->app_reads_hash
247b411b363SPhilipp Reisner 		+ ((unsigned int)(sector) % APP_R_HSIZE);
248b411b363SPhilipp Reisner }
249b411b363SPhilipp Reisner 
250b411b363SPhilipp Reisner /* when we receive the answer for a read request,
251b411b363SPhilipp Reisner  * verify that we actually know about it */
252b411b363SPhilipp Reisner static inline struct drbd_request *_ar_id_to_req(struct drbd_conf *mdev,
253b411b363SPhilipp Reisner 	u64 id, sector_t sector)
254b411b363SPhilipp Reisner {
255b411b363SPhilipp Reisner 	struct hlist_head *slot = ar_hash_slot(mdev, sector);
256b411b363SPhilipp Reisner 	struct hlist_node *n;
257b411b363SPhilipp Reisner 	struct drbd_request *req;
258b411b363SPhilipp Reisner 
259b411b363SPhilipp Reisner 	hlist_for_each_entry(req, n, slot, colision) {
260b411b363SPhilipp Reisner 		if ((unsigned long)req == (unsigned long)id) {
261b411b363SPhilipp Reisner 			D_ASSERT(req->sector == sector);
262b411b363SPhilipp Reisner 			return req;
263b411b363SPhilipp Reisner 		}
264b411b363SPhilipp Reisner 	}
265b411b363SPhilipp Reisner 	return NULL;
266b411b363SPhilipp Reisner }
267b411b363SPhilipp Reisner 
2685ba82308SPhilipp Reisner static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bio *bio_src)
269b411b363SPhilipp Reisner {
270b411b363SPhilipp Reisner 	struct bio *bio;
271b411b363SPhilipp Reisner 	bio = bio_clone(bio_src, GFP_NOIO); /* XXX cannot fail?? */
272b411b363SPhilipp Reisner 
273b411b363SPhilipp Reisner 	req->private_bio = bio;
274b411b363SPhilipp Reisner 
275b411b363SPhilipp Reisner 	bio->bi_private  = req;
276b411b363SPhilipp Reisner 	bio->bi_end_io   = drbd_endio_pri;
277b411b363SPhilipp Reisner 	bio->bi_next     = NULL;
278b411b363SPhilipp Reisner }
2795ba82308SPhilipp Reisner 
2805ba82308SPhilipp Reisner static inline struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
2815ba82308SPhilipp Reisner 	struct bio *bio_src)
2825ba82308SPhilipp Reisner {
2835ba82308SPhilipp Reisner 	struct drbd_request *req =
2845ba82308SPhilipp Reisner 		mempool_alloc(drbd_request_mempool, GFP_NOIO);
2855ba82308SPhilipp Reisner 	if (likely(req)) {
2865ba82308SPhilipp Reisner 		drbd_req_make_private_bio(req, bio_src);
2875ba82308SPhilipp Reisner 
2885ba82308SPhilipp Reisner 		req->rq_state    = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0;
2895ba82308SPhilipp Reisner 		req->mdev        = mdev;
2905ba82308SPhilipp Reisner 		req->master_bio  = bio_src;
2915ba82308SPhilipp Reisner 		req->epoch       = 0;
2925ba82308SPhilipp Reisner 		req->sector      = bio_src->bi_sector;
2935ba82308SPhilipp Reisner 		req->size        = bio_src->bi_size;
2945ba82308SPhilipp Reisner 		INIT_HLIST_NODE(&req->colision);
2955ba82308SPhilipp Reisner 		INIT_LIST_HEAD(&req->tl_requests);
2965ba82308SPhilipp Reisner 		INIT_LIST_HEAD(&req->w.list);
2975ba82308SPhilipp Reisner 	}
298b411b363SPhilipp Reisner 	return req;
299b411b363SPhilipp Reisner }
300b411b363SPhilipp Reisner 
301b411b363SPhilipp Reisner static inline void drbd_req_free(struct drbd_request *req)
302b411b363SPhilipp Reisner {
303b411b363SPhilipp Reisner 	mempool_free(req, drbd_request_mempool);
304b411b363SPhilipp Reisner }
305b411b363SPhilipp Reisner 
306b411b363SPhilipp Reisner static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
307b411b363SPhilipp Reisner {
308b411b363SPhilipp Reisner 	return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
309b411b363SPhilipp Reisner }
310b411b363SPhilipp Reisner 
311b411b363SPhilipp Reisner /* Short lived temporary struct on the stack.
312b411b363SPhilipp Reisner  * We could squirrel the error to be returned into
313b411b363SPhilipp Reisner  * bio->bi_size, or similar. But that would be too ugly. */
314b411b363SPhilipp Reisner struct bio_and_error {
315b411b363SPhilipp Reisner 	struct bio *bio;
316b411b363SPhilipp Reisner 	int error;
317b411b363SPhilipp Reisner };
318b411b363SPhilipp Reisner 
319b411b363SPhilipp Reisner extern void _req_may_be_done(struct drbd_request *req,
320b411b363SPhilipp Reisner 		struct bio_and_error *m);
3212a80699fSPhilipp Reisner extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
322b411b363SPhilipp Reisner 		struct bio_and_error *m);
323b411b363SPhilipp Reisner extern void complete_master_bio(struct drbd_conf *mdev,
324b411b363SPhilipp Reisner 		struct bio_and_error *m);
3257fde2be9SPhilipp Reisner extern void request_timer_fn(unsigned long data);
326b411b363SPhilipp Reisner 
327b411b363SPhilipp Reisner /* use this if you don't want to deal with calling complete_master_bio()
328b411b363SPhilipp Reisner  * outside the spinlock, e.g. when walking some list on cleanup. */
3292a80699fSPhilipp Reisner static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what)
330b411b363SPhilipp Reisner {
331b411b363SPhilipp Reisner 	struct drbd_conf *mdev = req->mdev;
332b411b363SPhilipp Reisner 	struct bio_and_error m;
3332a80699fSPhilipp Reisner 	int rv;
334b411b363SPhilipp Reisner 
335b411b363SPhilipp Reisner 	/* __req_mod possibly frees req, do not touch req after that! */
3362a80699fSPhilipp Reisner 	rv = __req_mod(req, what, &m);
337b411b363SPhilipp Reisner 	if (m.bio)
338b411b363SPhilipp Reisner 		complete_master_bio(mdev, &m);
3392a80699fSPhilipp Reisner 
3402a80699fSPhilipp Reisner 	return rv;
341b411b363SPhilipp Reisner }
342b411b363SPhilipp Reisner 
343759fbdfbSPhilipp Reisner /* completion of master bio is outside of our spinlock.
344759fbdfbSPhilipp Reisner  * We still may or may not be inside some irqs disabled section
345759fbdfbSPhilipp Reisner  * of the lower level driver completion callback, so we need to
346759fbdfbSPhilipp Reisner  * spin_lock_irqsave here. */
3472a80699fSPhilipp Reisner static inline int req_mod(struct drbd_request *req,
348b411b363SPhilipp Reisner 		enum drbd_req_event what)
349b411b363SPhilipp Reisner {
350759fbdfbSPhilipp Reisner 	unsigned long flags;
351b411b363SPhilipp Reisner 	struct drbd_conf *mdev = req->mdev;
352b411b363SPhilipp Reisner 	struct bio_and_error m;
3532a80699fSPhilipp Reisner 	int rv;
3542a80699fSPhilipp Reisner 
355759fbdfbSPhilipp Reisner 	spin_lock_irqsave(&mdev->req_lock, flags);
3562a80699fSPhilipp Reisner 	rv = __req_mod(req, what, &m);
357759fbdfbSPhilipp Reisner 	spin_unlock_irqrestore(&mdev->req_lock, flags);
358b411b363SPhilipp Reisner 
359b411b363SPhilipp Reisner 	if (m.bio)
360b411b363SPhilipp Reisner 		complete_master_bio(mdev, &m);
3612a80699fSPhilipp Reisner 
3622a80699fSPhilipp Reisner 	return rv;
363b411b363SPhilipp Reisner }
3646a35c45fSPhilipp Reisner 
3656a35c45fSPhilipp Reisner static inline bool drbd_should_do_remote(union drbd_state s)
3666a35c45fSPhilipp Reisner {
3676a35c45fSPhilipp Reisner 	return s.pdsk == D_UP_TO_DATE ||
3686a35c45fSPhilipp Reisner 		(s.pdsk >= D_INCONSISTENT &&
3696a35c45fSPhilipp Reisner 		 s.conn >= C_WF_BITMAP_T &&
3706a35c45fSPhilipp Reisner 		 s.conn < C_AHEAD);
3716a35c45fSPhilipp Reisner 	/* Before proto 96 that was >= CONNECTED instead of >= C_WF_BITMAP_T.
3726a35c45fSPhilipp Reisner 	   That is equivalent since before 96 IO was frozen in the C_WF_BITMAP*
3736a35c45fSPhilipp Reisner 	   states. */
3746a35c45fSPhilipp Reisner }
3756a35c45fSPhilipp Reisner static inline bool drbd_should_send_oos(union drbd_state s)
3766a35c45fSPhilipp Reisner {
3776a35c45fSPhilipp Reisner 	return s.conn == C_AHEAD || s.conn == C_WF_BITMAP_S;
3786a35c45fSPhilipp Reisner 	/* pdsk = D_INCONSISTENT as a consequence. Protocol 96 check not necessary
3796a35c45fSPhilipp Reisner 	   since we enter state C_AHEAD only if proto >= 96 */
3806a35c45fSPhilipp Reisner }
3816a35c45fSPhilipp Reisner 
382b411b363SPhilipp Reisner #endif
383