xref: /openbmc/linux/drivers/block/drbd/drbd_req.h (revision 565d76cb)
1 /*
2    drbd_req.h
3 
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5 
6    Copyright (C) 2006-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 2006-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
8    Copyright (C) 2006-2008, Philipp Reisner <philipp.reisner@linbit.com>.
9 
10    DRBD is free software; you can redistribute it and/or modify
11    it under the terms of the GNU General Public License as published by
12    the Free Software Foundation; either version 2, or (at your option)
13    any later version.
14 
15    DRBD is distributed in the hope that it will be useful,
16    but WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18    GNU General Public License for more details.
19 
20    You should have received a copy of the GNU General Public License
21    along with drbd; see the file COPYING.  If not, write to
22    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23  */
24 
25 #ifndef _DRBD_REQ_H
26 #define _DRBD_REQ_H
27 
28 #include <linux/module.h>
29 
30 #include <linux/slab.h>
31 #include <linux/drbd.h>
32 #include "drbd_int.h"
33 #include "drbd_wrappers.h"
34 
35 /* The request callbacks will be called in irq context by the IDE drivers,
36    and in Softirqs/Tasklets/BH context by the SCSI drivers,
37    and by the receiver and worker in kernel-thread context.
38    Try to get the locking right :) */
39 
40 /*
41  * Objects of type struct drbd_request do only exist on a R_PRIMARY node, and are
42  * associated with IO requests originating from the block layer above us.
43  *
44  * There are quite a few things that may happen to a drbd request
45  * during its lifetime.
46  *
47  *  It will be created.
48  *  It will be marked with the intention to be
49  *    submitted to local disk and/or
50  *    send via the network.
51  *
52  *  It has to be placed on the transfer log and other housekeeping lists,
53  *  In case we have a network connection.
54  *
55  *  It may be identified as a concurrent (write) request
56  *    and be handled accordingly.
57  *
58  *  It may me handed over to the local disk subsystem.
59  *  It may be completed by the local disk subsystem,
60  *    either successfully or with io-error.
61  *  In case it is a READ request, and it failed locally,
62  *    it may be retried remotely.
63  *
64  *  It may be queued for sending.
65  *  It may be handed over to the network stack,
66  *    which may fail.
67  *  It may be acknowledged by the "peer" according to the wire_protocol in use.
68  *    this may be a negative ack.
69  *  It may receive a faked ack when the network connection is lost and the
70  *  transfer log is cleaned up.
71  *  Sending may be canceled due to network connection loss.
72  *  When it finally has outlived its time,
73  *    corresponding dirty bits in the resync-bitmap may be cleared or set,
74  *    it will be destroyed,
75  *    and completion will be signalled to the originator,
76  *      with or without "success".
77  */
78 
79 enum drbd_req_event {
80 	created,
81 	to_be_send,
82 	to_be_submitted,
83 
84 	/* XXX yes, now I am inconsistent...
85 	 * these two are not "events" but "actions"
86 	 * oh, well... */
87 	queue_for_net_write,
88 	queue_for_net_read,
89 
90 	send_canceled,
91 	send_failed,
92 	handed_over_to_network,
93 	connection_lost_while_pending,
94 	read_retry_remote_canceled,
95 	recv_acked_by_peer,
96 	write_acked_by_peer,
97 	write_acked_by_peer_and_sis, /* and set_in_sync */
98 	conflict_discarded_by_peer,
99 	neg_acked,
100 	barrier_acked, /* in protocol A and B */
101 	data_received, /* (remote read) */
102 
103 	read_completed_with_error,
104 	read_ahead_completed_with_error,
105 	write_completed_with_error,
106 	completed_ok,
107 	resend,
108 	fail_frozen_disk_io,
109 	restart_frozen_disk_io,
110 	nothing, /* for tracing only */
111 };
112 
113 /* encoding of request states for now.  we don't actually need that many bits.
114  * we don't need to do atomic bit operations either, since most of the time we
115  * need to look at the connection state and/or manipulate some lists at the
116  * same time, so we should hold the request lock anyways.
117  */
118 enum drbd_req_state_bits {
119 	/* 210
120 	 * 000: no local possible
121 	 * 001: to be submitted
122 	 *    UNUSED, we could map: 011: submitted, completion still pending
123 	 * 110: completed ok
124 	 * 010: completed with error
125 	 */
126 	__RQ_LOCAL_PENDING,
127 	__RQ_LOCAL_COMPLETED,
128 	__RQ_LOCAL_OK,
129 
130 	/* 76543
131 	 * 00000: no network possible
132 	 * 00001: to be send
133 	 * 00011: to be send, on worker queue
134 	 * 00101: sent, expecting recv_ack (B) or write_ack (C)
135 	 * 11101: sent,
136 	 *        recv_ack (B) or implicit "ack" (A),
137 	 *        still waiting for the barrier ack.
138 	 *        master_bio may already be completed and invalidated.
139 	 * 11100: write_acked (C),
140 	 *        data_received (for remote read, any protocol)
141 	 *        or finally the barrier ack has arrived (B,A)...
142 	 *        request can be freed
143 	 * 01100: neg-acked (write, protocol C)
144 	 *        or neg-d-acked (read, any protocol)
145 	 *        or killed from the transfer log
146 	 *        during cleanup after connection loss
147 	 *        request can be freed
148 	 * 01000: canceled or send failed...
149 	 *        request can be freed
150 	 */
151 
152 	/* if "SENT" is not set, yet, this can still fail or be canceled.
153 	 * if "SENT" is set already, we still wait for an Ack packet.
154 	 * when cleared, the master_bio may be completed.
155 	 * in (B,A) the request object may still linger on the transaction log
156 	 * until the corresponding barrier ack comes in */
157 	__RQ_NET_PENDING,
158 
159 	/* If it is QUEUED, and it is a WRITE, it is also registered in the
160 	 * transfer log. Currently we need this flag to avoid conflicts between
161 	 * worker canceling the request and tl_clear_barrier killing it from
162 	 * transfer log.  We should restructure the code so this conflict does
163 	 * no longer occur. */
164 	__RQ_NET_QUEUED,
165 
166 	/* well, actually only "handed over to the network stack".
167 	 *
168 	 * TODO can potentially be dropped because of the similar meaning
169 	 * of RQ_NET_SENT and ~RQ_NET_QUEUED.
170 	 * however it is not exactly the same. before we drop it
171 	 * we must ensure that we can tell a request with network part
172 	 * from a request without, regardless of what happens to it. */
173 	__RQ_NET_SENT,
174 
175 	/* when set, the request may be freed (if RQ_NET_QUEUED is clear).
176 	 * basically this means the corresponding P_BARRIER_ACK was received */
177 	__RQ_NET_DONE,
178 
179 	/* whether or not we know (C) or pretend (B,A) that the write
180 	 * was successfully written on the peer.
181 	 */
182 	__RQ_NET_OK,
183 
184 	/* peer called drbd_set_in_sync() for this write */
185 	__RQ_NET_SIS,
186 
187 	/* keep this last, its for the RQ_NET_MASK */
188 	__RQ_NET_MAX,
189 
190 	/* Set when this is a write, clear for a read */
191 	__RQ_WRITE,
192 
193 	/* Should call drbd_al_complete_io() for this request... */
194 	__RQ_IN_ACT_LOG,
195 };
196 
197 #define RQ_LOCAL_PENDING   (1UL << __RQ_LOCAL_PENDING)
198 #define RQ_LOCAL_COMPLETED (1UL << __RQ_LOCAL_COMPLETED)
199 #define RQ_LOCAL_OK        (1UL << __RQ_LOCAL_OK)
200 
201 #define RQ_LOCAL_MASK      ((RQ_LOCAL_OK << 1)-1) /* 0x07 */
202 
203 #define RQ_NET_PENDING     (1UL << __RQ_NET_PENDING)
204 #define RQ_NET_QUEUED      (1UL << __RQ_NET_QUEUED)
205 #define RQ_NET_SENT        (1UL << __RQ_NET_SENT)
206 #define RQ_NET_DONE        (1UL << __RQ_NET_DONE)
207 #define RQ_NET_OK          (1UL << __RQ_NET_OK)
208 #define RQ_NET_SIS         (1UL << __RQ_NET_SIS)
209 
210 /* 0x1f8 */
211 #define RQ_NET_MASK        (((1UL << __RQ_NET_MAX)-1) & ~RQ_LOCAL_MASK)
212 
213 #define RQ_WRITE           (1UL << __RQ_WRITE)
214 #define RQ_IN_ACT_LOG      (1UL << __RQ_IN_ACT_LOG)
215 
216 /* For waking up the frozen transfer log mod_req() has to return if the request
217    should be counted in the epoch object*/
218 #define MR_WRITE_SHIFT 0
219 #define MR_WRITE       (1 << MR_WRITE_SHIFT)
220 #define MR_READ_SHIFT  1
221 #define MR_READ        (1 << MR_READ_SHIFT)
222 
223 /* epoch entries */
224 static inline
225 struct hlist_head *ee_hash_slot(struct drbd_conf *mdev, sector_t sector)
226 {
227 	BUG_ON(mdev->ee_hash_s == 0);
228 	return mdev->ee_hash +
229 		((unsigned int)(sector>>HT_SHIFT) % mdev->ee_hash_s);
230 }
231 
232 /* transfer log (drbd_request objects) */
233 static inline
234 struct hlist_head *tl_hash_slot(struct drbd_conf *mdev, sector_t sector)
235 {
236 	BUG_ON(mdev->tl_hash_s == 0);
237 	return mdev->tl_hash +
238 		((unsigned int)(sector>>HT_SHIFT) % mdev->tl_hash_s);
239 }
240 
241 /* application reads (drbd_request objects) */
242 static struct hlist_head *ar_hash_slot(struct drbd_conf *mdev, sector_t sector)
243 {
244 	return mdev->app_reads_hash
245 		+ ((unsigned int)(sector) % APP_R_HSIZE);
246 }
247 
248 /* when we receive the answer for a read request,
249  * verify that we actually know about it */
250 static inline struct drbd_request *_ar_id_to_req(struct drbd_conf *mdev,
251 	u64 id, sector_t sector)
252 {
253 	struct hlist_head *slot = ar_hash_slot(mdev, sector);
254 	struct hlist_node *n;
255 	struct drbd_request *req;
256 
257 	hlist_for_each_entry(req, n, slot, colision) {
258 		if ((unsigned long)req == (unsigned long)id) {
259 			D_ASSERT(req->sector == sector);
260 			return req;
261 		}
262 	}
263 	return NULL;
264 }
265 
266 static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bio *bio_src)
267 {
268 	struct bio *bio;
269 	bio = bio_clone(bio_src, GFP_NOIO); /* XXX cannot fail?? */
270 
271 	req->private_bio = bio;
272 
273 	bio->bi_private  = req;
274 	bio->bi_end_io   = drbd_endio_pri;
275 	bio->bi_next     = NULL;
276 }
277 
278 static inline struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
279 	struct bio *bio_src)
280 {
281 	struct drbd_request *req =
282 		mempool_alloc(drbd_request_mempool, GFP_NOIO);
283 	if (likely(req)) {
284 		drbd_req_make_private_bio(req, bio_src);
285 
286 		req->rq_state    = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0;
287 		req->mdev        = mdev;
288 		req->master_bio  = bio_src;
289 		req->epoch       = 0;
290 		req->sector      = bio_src->bi_sector;
291 		req->size        = bio_src->bi_size;
292 		req->start_time  = jiffies;
293 		INIT_HLIST_NODE(&req->colision);
294 		INIT_LIST_HEAD(&req->tl_requests);
295 		INIT_LIST_HEAD(&req->w.list);
296 	}
297 	return req;
298 }
299 
300 static inline void drbd_req_free(struct drbd_request *req)
301 {
302 	mempool_free(req, drbd_request_mempool);
303 }
304 
305 static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
306 {
307 	return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
308 }
309 
310 /* Short lived temporary struct on the stack.
311  * We could squirrel the error to be returned into
312  * bio->bi_size, or similar. But that would be too ugly. */
313 struct bio_and_error {
314 	struct bio *bio;
315 	int error;
316 };
317 
318 extern void _req_may_be_done(struct drbd_request *req,
319 		struct bio_and_error *m);
320 extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
321 		struct bio_and_error *m);
322 extern void complete_master_bio(struct drbd_conf *mdev,
323 		struct bio_and_error *m);
324 
325 /* use this if you don't want to deal with calling complete_master_bio()
326  * outside the spinlock, e.g. when walking some list on cleanup. */
327 static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what)
328 {
329 	struct drbd_conf *mdev = req->mdev;
330 	struct bio_and_error m;
331 	int rv;
332 
333 	/* __req_mod possibly frees req, do not touch req after that! */
334 	rv = __req_mod(req, what, &m);
335 	if (m.bio)
336 		complete_master_bio(mdev, &m);
337 
338 	return rv;
339 }
340 
341 /* completion of master bio is outside of spinlock.
342  * If you need it irqsave, do it your self!
343  * Which means: don't use from bio endio callback. */
344 static inline int req_mod(struct drbd_request *req,
345 		enum drbd_req_event what)
346 {
347 	struct drbd_conf *mdev = req->mdev;
348 	struct bio_and_error m;
349 	int rv;
350 
351 	spin_lock_irq(&mdev->req_lock);
352 	rv = __req_mod(req, what, &m);
353 	spin_unlock_irq(&mdev->req_lock);
354 
355 	if (m.bio)
356 		complete_master_bio(mdev, &m);
357 
358 	return rv;
359 }
360 #endif
361