xref: /openbmc/linux/drivers/block/drbd/drbd_int.h (revision 4a44a19b)
1 /*
2   drbd_int.h
3 
4   This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5 
6   Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7   Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8   Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9 
10   drbd is free software; you can redistribute it and/or modify
11   it under the terms of the GNU General Public License as published by
12   the Free Software Foundation; either version 2, or (at your option)
13   any later version.
14 
15   drbd is distributed in the hope that it will be useful,
16   but WITHOUT ANY WARRANTY; without even the implied warranty of
17   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the
18   GNU General Public License for more details.
19 
20   You should have received a copy of the GNU General Public License
21   along with drbd; see the file COPYING.  If not, write to
22   the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 
24 */
25 
26 #ifndef _DRBD_INT_H
27 #define _DRBD_INT_H
28 
29 #include <linux/compiler.h>
30 #include <linux/types.h>
31 #include <linux/list.h>
32 #include <linux/sched.h>
33 #include <linux/bitops.h>
34 #include <linux/slab.h>
35 #include <linux/crypto.h>
36 #include <linux/ratelimit.h>
37 #include <linux/tcp.h>
38 #include <linux/mutex.h>
39 #include <linux/major.h>
40 #include <linux/blkdev.h>
41 #include <linux/genhd.h>
42 #include <linux/idr.h>
43 #include <net/tcp.h>
44 #include <linux/lru_cache.h>
45 #include <linux/prefetch.h>
46 #include <linux/drbd_genl_api.h>
47 #include <linux/drbd.h>
48 #include "drbd_strings.h"
49 #include "drbd_state.h"
50 #include "drbd_protocol.h"
51 
52 #ifdef __CHECKER__
53 # define __protected_by(x)       __attribute__((require_context(x,1,999,"rdwr")))
54 # define __protected_read_by(x)  __attribute__((require_context(x,1,999,"read")))
55 # define __protected_write_by(x) __attribute__((require_context(x,1,999,"write")))
56 # define __must_hold(x)       __attribute__((context(x,1,1), require_context(x,1,999,"call")))
57 #else
58 # define __protected_by(x)
59 # define __protected_read_by(x)
60 # define __protected_write_by(x)
61 # define __must_hold(x)
62 #endif
63 
64 /* module parameter, defined in drbd_main.c */
65 extern unsigned int minor_count;
66 extern bool disable_sendpage;
67 extern bool allow_oos;
68 void tl_abort_disk_io(struct drbd_device *device);
69 
70 #ifdef CONFIG_DRBD_FAULT_INJECTION
71 extern int enable_faults;
72 extern int fault_rate;
73 extern int fault_devs;
74 #endif
75 
76 extern char usermode_helper[];
77 
78 
79 /* I don't remember why XCPU ...
80  * This is used to wake the asender,
81  * and to interrupt sending the sending task
82  * on disconnect.
83  */
84 #define DRBD_SIG SIGXCPU
85 
86 /* This is used to stop/restart our threads.
87  * Cannot use SIGTERM nor SIGKILL, since these
88  * are sent out by init on runlevel changes
89  * I choose SIGHUP for now.
90  */
91 #define DRBD_SIGKILL SIGHUP
92 
93 #define ID_IN_SYNC      (4711ULL)
94 #define ID_OUT_OF_SYNC  (4712ULL)
95 #define ID_SYNCER (-1ULL)
96 
97 #define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL)
98 
99 struct drbd_device;
100 struct drbd_connection;
101 
102 #define __drbd_printk_device(level, device, fmt, args...) \
103 	dev_printk(level, disk_to_dev((device)->vdisk), fmt, ## args)
104 #define __drbd_printk_peer_device(level, peer_device, fmt, args...) \
105 	dev_printk(level, disk_to_dev((peer_device)->device->vdisk), fmt, ## args)
106 #define __drbd_printk_resource(level, resource, fmt, args...) \
107 	printk(level "drbd %s: " fmt, (resource)->name, ## args)
108 #define __drbd_printk_connection(level, connection, fmt, args...) \
109 	printk(level "drbd %s: " fmt, (connection)->resource->name, ## args)
110 
111 void drbd_printk_with_wrong_object_type(void);
112 
113 #define __drbd_printk_if_same_type(obj, type, func, level, fmt, args...) \
114 	(__builtin_types_compatible_p(typeof(obj), type) || \
115 	 __builtin_types_compatible_p(typeof(obj), const type)), \
116 	func(level, (const type)(obj), fmt, ## args)
117 
118 #define drbd_printk(level, obj, fmt, args...) \
119 	__builtin_choose_expr( \
120 	  __drbd_printk_if_same_type(obj, struct drbd_device *, \
121 			     __drbd_printk_device, level, fmt, ## args), \
122 	  __builtin_choose_expr( \
123 	    __drbd_printk_if_same_type(obj, struct drbd_resource *, \
124 			       __drbd_printk_resource, level, fmt, ## args), \
125 	    __builtin_choose_expr( \
126 	      __drbd_printk_if_same_type(obj, struct drbd_connection *, \
127 				 __drbd_printk_connection, level, fmt, ## args), \
128 	      __builtin_choose_expr( \
129 		__drbd_printk_if_same_type(obj, struct drbd_peer_device *, \
130 				 __drbd_printk_peer_device, level, fmt, ## args), \
131 		drbd_printk_with_wrong_object_type()))))
132 
133 #define drbd_dbg(obj, fmt, args...) \
134 	drbd_printk(KERN_DEBUG, obj, fmt, ## args)
135 #define drbd_alert(obj, fmt, args...) \
136 	drbd_printk(KERN_ALERT, obj, fmt, ## args)
137 #define drbd_err(obj, fmt, args...) \
138 	drbd_printk(KERN_ERR, obj, fmt, ## args)
139 #define drbd_warn(obj, fmt, args...) \
140 	drbd_printk(KERN_WARNING, obj, fmt, ## args)
141 #define drbd_info(obj, fmt, args...) \
142 	drbd_printk(KERN_INFO, obj, fmt, ## args)
143 #define drbd_emerg(obj, fmt, args...) \
144 	drbd_printk(KERN_EMERG, obj, fmt, ## args)
145 
146 #define dynamic_drbd_dbg(device, fmt, args...) \
147 	dynamic_dev_dbg(disk_to_dev(device->vdisk), fmt, ## args)
148 
149 #define D_ASSERT(device, exp)	do { \
150 	if (!(exp)) \
151 		drbd_err(device, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__); \
152 	} while (0)
153 
154 /**
155  * expect  -  Make an assertion
156  *
157  * Unlike the assert macro, this macro returns a boolean result.
158  */
159 #define expect(exp) ({								\
160 		bool _bool = (exp);						\
161 		if (!_bool)							\
162 			drbd_err(device, "ASSERTION %s FAILED in %s\n",		\
163 			        #exp, __func__);				\
164 		_bool;								\
165 		})
166 
167 /* Defines to control fault insertion */
168 enum {
169 	DRBD_FAULT_MD_WR = 0,	/* meta data write */
170 	DRBD_FAULT_MD_RD = 1,	/*           read  */
171 	DRBD_FAULT_RS_WR = 2,	/* resync          */
172 	DRBD_FAULT_RS_RD = 3,
173 	DRBD_FAULT_DT_WR = 4,	/* data            */
174 	DRBD_FAULT_DT_RD = 5,
175 	DRBD_FAULT_DT_RA = 6,	/* data read ahead */
176 	DRBD_FAULT_BM_ALLOC = 7,	/* bitmap allocation */
177 	DRBD_FAULT_AL_EE = 8,	/* alloc ee */
178 	DRBD_FAULT_RECEIVE = 9, /* Changes some bytes upon receiving a [rs]data block */
179 
180 	DRBD_FAULT_MAX,
181 };
182 
183 extern unsigned int
184 _drbd_insert_fault(struct drbd_device *device, unsigned int type);
185 
186 static inline int
187 drbd_insert_fault(struct drbd_device *device, unsigned int type) {
188 #ifdef CONFIG_DRBD_FAULT_INJECTION
189 	return fault_rate &&
190 		(enable_faults & (1<<type)) &&
191 		_drbd_insert_fault(device, type);
192 #else
193 	return 0;
194 #endif
195 }
196 
197 /* integer division, round _UP_ to the next integer */
198 #define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0))
199 /* usual integer division */
200 #define div_floor(A, B) ((A)/(B))
201 
202 extern struct ratelimit_state drbd_ratelimit_state;
203 extern struct idr drbd_devices; /* RCU, updates: genl_lock() */
204 extern struct list_head drbd_resources; /* RCU, updates: genl_lock() */
205 
206 extern const char *cmdname(enum drbd_packet cmd);
207 
208 /* for sending/receiving the bitmap,
209  * possibly in some encoding scheme */
210 struct bm_xfer_ctx {
211 	/* "const"
212 	 * stores total bits and long words
213 	 * of the bitmap, so we don't need to
214 	 * call the accessor functions over and again. */
215 	unsigned long bm_bits;
216 	unsigned long bm_words;
217 	/* during xfer, current position within the bitmap */
218 	unsigned long bit_offset;
219 	unsigned long word_offset;
220 
221 	/* statistics; index: (h->command == P_BITMAP) */
222 	unsigned packets[2];
223 	unsigned bytes[2];
224 };
225 
226 extern void INFO_bm_xfer_stats(struct drbd_device *device,
227 		const char *direction, struct bm_xfer_ctx *c);
228 
229 static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c)
230 {
231 	/* word_offset counts "native long words" (32 or 64 bit),
232 	 * aligned at 64 bit.
233 	 * Encoded packet may end at an unaligned bit offset.
234 	 * In case a fallback clear text packet is transmitted in
235 	 * between, we adjust this offset back to the last 64bit
236 	 * aligned "native long word", which makes coding and decoding
237 	 * the plain text bitmap much more convenient.  */
238 #if BITS_PER_LONG == 64
239 	c->word_offset = c->bit_offset >> 6;
240 #elif BITS_PER_LONG == 32
241 	c->word_offset = c->bit_offset >> 5;
242 	c->word_offset &= ~(1UL);
243 #else
244 # error "unsupported BITS_PER_LONG"
245 #endif
246 }
247 
248 extern unsigned int drbd_header_size(struct drbd_connection *connection);
249 
250 /**********************************************************************/
251 enum drbd_thread_state {
252 	NONE,
253 	RUNNING,
254 	EXITING,
255 	RESTARTING
256 };
257 
258 struct drbd_thread {
259 	spinlock_t t_lock;
260 	struct task_struct *task;
261 	struct completion stop;
262 	enum drbd_thread_state t_state;
263 	int (*function) (struct drbd_thread *);
264 	struct drbd_resource *resource;
265 	struct drbd_connection *connection;
266 	int reset_cpu_mask;
267 	const char *name;
268 };
269 
270 static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
271 {
272 	/* THINK testing the t_state seems to be uncritical in all cases
273 	 * (but thread_{start,stop}), so we can read it *without* the lock.
274 	 *	--lge */
275 
276 	smp_rmb();
277 	return thi->t_state;
278 }
279 
280 struct drbd_work {
281 	struct list_head list;
282 	int (*cb)(struct drbd_work *, int cancel);
283 };
284 
285 struct drbd_device_work {
286 	struct drbd_work w;
287 	struct drbd_device *device;
288 };
289 
290 #include "drbd_interval.h"
291 
292 extern int drbd_wait_misc(struct drbd_device *, struct drbd_interval *);
293 
294 struct drbd_request {
295 	struct drbd_work w;
296 	struct drbd_device *device;
297 
298 	/* if local IO is not allowed, will be NULL.
299 	 * if local IO _is_ allowed, holds the locally submitted bio clone,
300 	 * or, after local IO completion, the ERR_PTR(error).
301 	 * see drbd_request_endio(). */
302 	struct bio *private_bio;
303 
304 	struct drbd_interval i;
305 
306 	/* epoch: used to check on "completion" whether this req was in
307 	 * the current epoch, and we therefore have to close it,
308 	 * causing a p_barrier packet to be send, starting a new epoch.
309 	 *
310 	 * This corresponds to "barrier" in struct p_barrier[_ack],
311 	 * and to "barrier_nr" in struct drbd_epoch (and various
312 	 * comments/function parameters/local variable names).
313 	 */
314 	unsigned int epoch;
315 
316 	struct list_head tl_requests; /* ring list in the transfer log */
317 	struct bio *master_bio;       /* master bio pointer */
318 
319 	/* see struct drbd_device */
320 	struct list_head req_pending_master_completion;
321 	struct list_head req_pending_local;
322 
323 	/* for generic IO accounting */
324 	unsigned long start_jif;
325 
326 	/* for DRBD internal statistics */
327 
328 	/* Minimal set of time stamps to determine if we wait for activity log
329 	 * transactions, local disk or peer.  32 bit "jiffies" are good enough,
330 	 * we don't expect a DRBD request to be stalled for several month.
331 	 */
332 
333 	/* before actual request processing */
334 	unsigned long in_actlog_jif;
335 
336 	/* local disk */
337 	unsigned long pre_submit_jif;
338 
339 	/* per connection */
340 	unsigned long pre_send_jif;
341 	unsigned long acked_jif;
342 	unsigned long net_done_jif;
343 
344 	/* Possibly even more detail to track each phase:
345 	 *  master_completion_jif
346 	 *      how long did it take to complete the master bio
347 	 *      (application visible latency)
348 	 *  allocated_jif
349 	 *      how long the master bio was blocked until we finally allocated
350 	 *      a tracking struct
351 	 *  in_actlog_jif
352 	 *      how long did we wait for activity log transactions
353 	 *
354 	 *  net_queued_jif
355 	 *      when did we finally queue it for sending
356 	 *  pre_send_jif
357 	 *      when did we start sending it
358 	 *  post_send_jif
359 	 *      how long did we block in the network stack trying to send it
360 	 *  acked_jif
361 	 *      when did we receive (or fake, in protocol A) a remote ACK
362 	 *  net_done_jif
363 	 *      when did we receive final acknowledgement (P_BARRIER_ACK),
364 	 *      or decide, e.g. on connection loss, that we do no longer expect
365 	 *      anything from this peer for this request.
366 	 *
367 	 *  pre_submit_jif
368 	 *  post_sub_jif
369 	 *      when did we start submiting to the lower level device,
370 	 *      and how long did we block in that submit function
371 	 *  local_completion_jif
372 	 *      how long did it take the lower level device to complete this request
373 	 */
374 
375 
376 	/* once it hits 0, we may complete the master_bio */
377 	atomic_t completion_ref;
378 	/* once it hits 0, we may destroy this drbd_request object */
379 	struct kref kref;
380 
381 	unsigned rq_state; /* see comments above _req_mod() */
382 };
383 
384 struct drbd_epoch {
385 	struct drbd_connection *connection;
386 	struct list_head list;
387 	unsigned int barrier_nr;
388 	atomic_t epoch_size; /* increased on every request added. */
389 	atomic_t active;     /* increased on every req. added, and dec on every finished. */
390 	unsigned long flags;
391 };
392 
393 /* Prototype declaration of function defined in drbd_receiver.c */
394 int drbdd_init(struct drbd_thread *);
395 int drbd_asender(struct drbd_thread *);
396 
397 /* drbd_epoch flag bits */
398 enum {
399 	DE_HAVE_BARRIER_NUMBER,
400 };
401 
402 enum epoch_event {
403 	EV_PUT,
404 	EV_GOT_BARRIER_NR,
405 	EV_BECAME_LAST,
406 	EV_CLEANUP = 32, /* used as flag */
407 };
408 
409 struct digest_info {
410 	int digest_size;
411 	void *digest;
412 };
413 
414 struct drbd_peer_request {
415 	struct drbd_work w;
416 	struct drbd_peer_device *peer_device;
417 	struct drbd_epoch *epoch; /* for writes */
418 	struct page *pages;
419 	atomic_t pending_bios;
420 	struct drbd_interval i;
421 	/* see comments on ee flag bits below */
422 	unsigned long flags;
423 	unsigned long submit_jif;
424 	union {
425 		u64 block_id;
426 		struct digest_info *digest;
427 	};
428 };
429 
430 /* ee flag bits.
431  * While corresponding bios are in flight, the only modification will be
432  * set_bit WAS_ERROR, which has to be atomic.
433  * If no bios are in flight yet, or all have been completed,
434  * non-atomic modification to ee->flags is ok.
435  */
436 enum {
437 	__EE_CALL_AL_COMPLETE_IO,
438 	__EE_MAY_SET_IN_SYNC,
439 
440 	/* is this a TRIM aka REQ_DISCARD? */
441 	__EE_IS_TRIM,
442 	/* our lower level cannot handle trim,
443 	 * and we want to fall back to zeroout instead */
444 	__EE_IS_TRIM_USE_ZEROOUT,
445 
446 	/* In case a barrier failed,
447 	 * we need to resubmit without the barrier flag. */
448 	__EE_RESUBMITTED,
449 
450 	/* we may have several bios per peer request.
451 	 * if any of those fail, we set this flag atomically
452 	 * from the endio callback */
453 	__EE_WAS_ERROR,
454 
455 	/* This ee has a pointer to a digest instead of a block id */
456 	__EE_HAS_DIGEST,
457 
458 	/* Conflicting local requests need to be restarted after this request */
459 	__EE_RESTART_REQUESTS,
460 
461 	/* The peer wants a write ACK for this (wire proto C) */
462 	__EE_SEND_WRITE_ACK,
463 
464 	/* Is set when net_conf had two_primaries set while creating this peer_req */
465 	__EE_IN_INTERVAL_TREE,
466 
467 	/* for debugfs: */
468 	/* has this been submitted, or does it still wait for something else? */
469 	__EE_SUBMITTED,
470 
471 	/* this is/was a write request */
472 	__EE_WRITE,
473 
474 	/* this originates from application on peer
475 	 * (not some resync or verify or other DRBD internal request) */
476 	__EE_APPLICATION,
477 };
478 #define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
479 #define EE_MAY_SET_IN_SYNC     (1<<__EE_MAY_SET_IN_SYNC)
480 #define EE_IS_TRIM             (1<<__EE_IS_TRIM)
481 #define EE_IS_TRIM_USE_ZEROOUT (1<<__EE_IS_TRIM_USE_ZEROOUT)
482 #define EE_RESUBMITTED         (1<<__EE_RESUBMITTED)
483 #define EE_WAS_ERROR           (1<<__EE_WAS_ERROR)
484 #define EE_HAS_DIGEST          (1<<__EE_HAS_DIGEST)
485 #define EE_RESTART_REQUESTS	(1<<__EE_RESTART_REQUESTS)
486 #define EE_SEND_WRITE_ACK	(1<<__EE_SEND_WRITE_ACK)
487 #define EE_IN_INTERVAL_TREE	(1<<__EE_IN_INTERVAL_TREE)
488 #define EE_SUBMITTED		(1<<__EE_SUBMITTED)
489 #define EE_WRITE		(1<<__EE_WRITE)
490 #define EE_APPLICATION		(1<<__EE_APPLICATION)
491 
492 /* flag bits per device */
493 enum {
494 	UNPLUG_REMOTE,		/* sending a "UnplugRemote" could help */
495 	MD_DIRTY,		/* current uuids and flags not yet on disk */
496 	USE_DEGR_WFC_T,		/* degr-wfc-timeout instead of wfc-timeout. */
497 	CL_ST_CHG_SUCCESS,
498 	CL_ST_CHG_FAIL,
499 	CRASHED_PRIMARY,	/* This node was a crashed primary.
500 				 * Gets cleared when the state.conn
501 				 * goes into C_CONNECTED state. */
502 	CONSIDER_RESYNC,
503 
504 	MD_NO_FUA,		/* Users wants us to not use FUA/FLUSH on meta data dev */
505 
506 	SUSPEND_IO,		/* suspend application io */
507 	BITMAP_IO,		/* suspend application io;
508 				   once no more io in flight, start bitmap io */
509 	BITMAP_IO_QUEUED,       /* Started bitmap IO */
510 	WAS_IO_ERROR,		/* Local disk failed, returned IO error */
511 	WAS_READ_ERROR,		/* Local disk READ failed (set additionally to the above) */
512 	FORCE_DETACH,		/* Force-detach from local disk, aborting any pending local IO */
513 	RESYNC_AFTER_NEG,       /* Resync after online grow after the attach&negotiate finished. */
514 	RESIZE_PENDING,		/* Size change detected locally, waiting for the response from
515 				 * the peer, if it changed there as well. */
516 	NEW_CUR_UUID,		/* Create new current UUID when thawing IO */
517 	AL_SUSPENDED,		/* Activity logging is currently suspended. */
518 	AHEAD_TO_SYNC_SOURCE,   /* Ahead -> SyncSource queued */
519 	B_RS_H_DONE,		/* Before resync handler done (already executed) */
520 	DISCARD_MY_DATA,	/* discard_my_data flag per volume */
521 	READ_BALANCE_RR,
522 
523 	FLUSH_PENDING,		/* if set, device->flush_jif is when we submitted that flush
524 				 * from drbd_flush_after_epoch() */
525 
526 	/* cleared only after backing device related structures have been destroyed. */
527 	GOING_DISKLESS,		/* Disk is being detached, because of io-error, or admin request. */
528 
529 	/* to be used in drbd_device_post_work() */
530 	GO_DISKLESS,		/* tell worker to schedule cleanup before detach */
531 	DESTROY_DISK,		/* tell worker to close backing devices and destroy related structures. */
532 	MD_SYNC,		/* tell worker to call drbd_md_sync() */
533 	RS_START,		/* tell worker to start resync/OV */
534 	RS_PROGRESS,		/* tell worker that resync made significant progress */
535 	RS_DONE,		/* tell worker that resync is done */
536 };
537 
538 struct drbd_bitmap; /* opaque for drbd_device */
539 
540 /* definition of bits in bm_flags to be used in drbd_bm_lock
541  * and drbd_bitmap_io and friends. */
542 enum bm_flag {
543 	/* do we need to kfree, or vfree bm_pages? */
544 	BM_P_VMALLOCED = 0x10000, /* internal use only, will be masked out */
545 
546 	/* currently locked for bulk operation */
547 	BM_LOCKED_MASK = 0xf,
548 
549 	/* in detail, that is: */
550 	BM_DONT_CLEAR = 0x1,
551 	BM_DONT_SET   = 0x2,
552 	BM_DONT_TEST  = 0x4,
553 
554 	/* so we can mark it locked for bulk operation,
555 	 * and still allow all non-bulk operations */
556 	BM_IS_LOCKED  = 0x8,
557 
558 	/* (test bit, count bit) allowed (common case) */
559 	BM_LOCKED_TEST_ALLOWED = BM_DONT_CLEAR | BM_DONT_SET | BM_IS_LOCKED,
560 
561 	/* testing bits, as well as setting new bits allowed, but clearing bits
562 	 * would be unexpected.  Used during bitmap receive.  Setting new bits
563 	 * requires sending of "out-of-sync" information, though. */
564 	BM_LOCKED_SET_ALLOWED = BM_DONT_CLEAR | BM_IS_LOCKED,
565 
566 	/* for drbd_bm_write_copy_pages, everything is allowed,
567 	 * only concurrent bulk operations are locked out. */
568 	BM_LOCKED_CHANGE_ALLOWED = BM_IS_LOCKED,
569 };
570 
571 struct drbd_work_queue {
572 	struct list_head q;
573 	spinlock_t q_lock;  /* to protect the list. */
574 	wait_queue_head_t q_wait;
575 };
576 
577 struct drbd_socket {
578 	struct mutex mutex;
579 	struct socket    *socket;
580 	/* this way we get our
581 	 * send/receive buffers off the stack */
582 	void *sbuf;
583 	void *rbuf;
584 };
585 
586 struct drbd_md {
587 	u64 md_offset;		/* sector offset to 'super' block */
588 
589 	u64 la_size_sect;	/* last agreed size, unit sectors */
590 	spinlock_t uuid_lock;
591 	u64 uuid[UI_SIZE];
592 	u64 device_uuid;
593 	u32 flags;
594 	u32 md_size_sect;
595 
596 	s32 al_offset;	/* signed relative sector offset to activity log */
597 	s32 bm_offset;	/* signed relative sector offset to bitmap */
598 
599 	/* cached value of bdev->disk_conf->meta_dev_idx (see below) */
600 	s32 meta_dev_idx;
601 
602 	/* see al_tr_number_to_on_disk_sector() */
603 	u32 al_stripes;
604 	u32 al_stripe_size_4k;
605 	u32 al_size_4k; /* cached product of the above */
606 };
607 
608 struct drbd_backing_dev {
609 	struct block_device *backing_bdev;
610 	struct block_device *md_bdev;
611 	struct drbd_md md;
612 	struct disk_conf *disk_conf; /* RCU, for updates: resource->conf_update */
613 	sector_t known_size; /* last known size of that backing device */
614 };
615 
616 struct drbd_md_io {
617 	struct page *page;
618 	unsigned long start_jif;	/* last call to drbd_md_get_buffer */
619 	unsigned long submit_jif;	/* last _drbd_md_sync_page_io() submit */
620 	const char *current_use;
621 	atomic_t in_use;
622 	unsigned int done;
623 	int error;
624 };
625 
626 struct bm_io_work {
627 	struct drbd_work w;
628 	char *why;
629 	enum bm_flag flags;
630 	int (*io_fn)(struct drbd_device *device);
631 	void (*done)(struct drbd_device *device, int rv);
632 };
633 
634 enum write_ordering_e {
635 	WO_none,
636 	WO_drain_io,
637 	WO_bdev_flush,
638 };
639 
640 struct fifo_buffer {
641 	unsigned int head_index;
642 	unsigned int size;
643 	int total; /* sum of all values */
644 	int values[0];
645 };
646 extern struct fifo_buffer *fifo_alloc(int fifo_size);
647 
648 /* flag bits per connection */
649 enum {
650 	NET_CONGESTED,		/* The data socket is congested */
651 	RESOLVE_CONFLICTS,	/* Set on one node, cleared on the peer! */
652 	SEND_PING,		/* whether asender should send a ping asap */
653 	SIGNAL_ASENDER,		/* whether asender wants to be interrupted */
654 	GOT_PING_ACK,		/* set when we receive a ping_ack packet, ping_wait gets woken */
655 	CONN_WD_ST_CHG_REQ,	/* A cluster wide state change on the connection is active */
656 	CONN_WD_ST_CHG_OKAY,
657 	CONN_WD_ST_CHG_FAIL,
658 	CONN_DRY_RUN,		/* Expect disconnect after resync handshake. */
659 	CREATE_BARRIER,		/* next P_DATA is preceded by a P_BARRIER */
660 	STATE_SENT,		/* Do not change state/UUIDs while this is set */
661 	CALLBACK_PENDING,	/* Whether we have a call_usermodehelper(, UMH_WAIT_PROC)
662 				 * pending, from drbd worker context.
663 				 * If set, bdi_write_congested() returns true,
664 				 * so shrink_page_list() would not recurse into,
665 				 * and potentially deadlock on, this drbd worker.
666 				 */
667 	DISCONNECT_SENT,
668 
669 	DEVICE_WORK_PENDING,	/* tell worker that some device has pending work */
670 };
671 
672 struct drbd_resource {
673 	char *name;
674 #ifdef CONFIG_DEBUG_FS
675 	struct dentry *debugfs_res;
676 	struct dentry *debugfs_res_volumes;
677 	struct dentry *debugfs_res_connections;
678 	struct dentry *debugfs_res_in_flight_summary;
679 #endif
680 	struct kref kref;
681 	struct idr devices;		/* volume number to device mapping */
682 	struct list_head connections;
683 	struct list_head resources;
684 	struct res_opts res_opts;
685 	struct mutex conf_update;	/* mutex for ready-copy-update of net_conf and disk_conf */
686 	struct mutex adm_mutex;		/* mutex to serialize administrative requests */
687 	spinlock_t req_lock;
688 
689 	unsigned susp:1;		/* IO suspended by user */
690 	unsigned susp_nod:1;		/* IO suspended because no data */
691 	unsigned susp_fen:1;		/* IO suspended because fence peer handler runs */
692 
693 	enum write_ordering_e write_ordering;
694 
695 	cpumask_var_t cpu_mask;
696 };
697 
698 struct drbd_thread_timing_details
699 {
700 	unsigned long start_jif;
701 	void *cb_addr;
702 	const char *caller_fn;
703 	unsigned int line;
704 	unsigned int cb_nr;
705 };
706 
707 struct drbd_connection {
708 	struct list_head connections;
709 	struct drbd_resource *resource;
710 #ifdef CONFIG_DEBUG_FS
711 	struct dentry *debugfs_conn;
712 	struct dentry *debugfs_conn_callback_history;
713 	struct dentry *debugfs_conn_oldest_requests;
714 #endif
715 	struct kref kref;
716 	struct idr peer_devices;	/* volume number to peer device mapping */
717 	enum drbd_conns cstate;		/* Only C_STANDALONE to C_WF_REPORT_PARAMS */
718 	struct mutex cstate_mutex;	/* Protects graceful disconnects */
719 	unsigned int connect_cnt;	/* Inc each time a connection is established */
720 
721 	unsigned long flags;
722 	struct net_conf *net_conf;	/* content protected by rcu */
723 	wait_queue_head_t ping_wait;	/* Woken upon reception of a ping, and a state change */
724 
725 	struct sockaddr_storage my_addr;
726 	int my_addr_len;
727 	struct sockaddr_storage peer_addr;
728 	int peer_addr_len;
729 
730 	struct drbd_socket data;	/* data/barrier/cstate/parameter packets */
731 	struct drbd_socket meta;	/* ping/ack (metadata) packets */
732 	int agreed_pro_version;		/* actually used protocol version */
733 	u32 agreed_features;
734 	unsigned long last_received;	/* in jiffies, either socket */
735 	unsigned int ko_count;
736 
737 	struct list_head transfer_log;	/* all requests not yet fully processed */
738 
739 	struct crypto_hash *cram_hmac_tfm;
740 	struct crypto_hash *integrity_tfm;  /* checksums we compute, updates protected by connection->data->mutex */
741 	struct crypto_hash *peer_integrity_tfm;  /* checksums we verify, only accessed from receiver thread  */
742 	struct crypto_hash *csums_tfm;
743 	struct crypto_hash *verify_tfm;
744 	void *int_dig_in;
745 	void *int_dig_vv;
746 
747 	/* receiver side */
748 	struct drbd_epoch *current_epoch;
749 	spinlock_t epoch_lock;
750 	unsigned int epochs;
751 	atomic_t current_tle_nr;	/* transfer log epoch number */
752 	unsigned current_tle_writes;	/* writes seen within this tl epoch */
753 
754 	unsigned long last_reconnect_jif;
755 	struct drbd_thread receiver;
756 	struct drbd_thread worker;
757 	struct drbd_thread asender;
758 
759 	/* cached pointers,
760 	 * so we can look up the oldest pending requests more quickly.
761 	 * protected by resource->req_lock */
762 	struct drbd_request *req_next; /* DRBD 9: todo.req_next */
763 	struct drbd_request *req_ack_pending;
764 	struct drbd_request *req_not_net_done;
765 
766 	/* sender side */
767 	struct drbd_work_queue sender_work;
768 
769 #define DRBD_THREAD_DETAILS_HIST	16
770 	unsigned int w_cb_nr; /* keeps counting up */
771 	unsigned int r_cb_nr; /* keeps counting up */
772 	struct drbd_thread_timing_details w_timing_details[DRBD_THREAD_DETAILS_HIST];
773 	struct drbd_thread_timing_details r_timing_details[DRBD_THREAD_DETAILS_HIST];
774 
775 	struct {
776 		/* whether this sender thread
777 		 * has processed a single write yet. */
778 		bool seen_any_write_yet;
779 
780 		/* Which barrier number to send with the next P_BARRIER */
781 		int current_epoch_nr;
782 
783 		/* how many write requests have been sent
784 		 * with req->epoch == current_epoch_nr.
785 		 * If none, no P_BARRIER will be sent. */
786 		unsigned current_epoch_writes;
787 	} send;
788 };
789 
790 void __update_timing_details(
791 		struct drbd_thread_timing_details *tdp,
792 		unsigned int *cb_nr,
793 		void *cb,
794 		const char *fn, const unsigned int line);
795 
796 #define update_worker_timing_details(c, cb) \
797 	__update_timing_details(c->w_timing_details, &c->w_cb_nr, cb, __func__ , __LINE__ )
798 #define update_receiver_timing_details(c, cb) \
799 	__update_timing_details(c->r_timing_details, &c->r_cb_nr, cb, __func__ , __LINE__ )
800 
801 struct submit_worker {
802 	struct workqueue_struct *wq;
803 	struct work_struct worker;
804 
805 	/* protected by ..->resource->req_lock */
806 	struct list_head writes;
807 };
808 
809 struct drbd_peer_device {
810 	struct list_head peer_devices;
811 	struct drbd_device *device;
812 	struct drbd_connection *connection;
813 #ifdef CONFIG_DEBUG_FS
814 	struct dentry *debugfs_peer_dev;
815 #endif
816 };
817 
818 struct drbd_device {
819 	struct drbd_resource *resource;
820 	struct list_head peer_devices;
821 	struct list_head pending_bitmap_io;
822 
823 	unsigned long flush_jif;
824 #ifdef CONFIG_DEBUG_FS
825 	struct dentry *debugfs_minor;
826 	struct dentry *debugfs_vol;
827 	struct dentry *debugfs_vol_oldest_requests;
828 	struct dentry *debugfs_vol_act_log_extents;
829 	struct dentry *debugfs_vol_resync_extents;
830 	struct dentry *debugfs_vol_data_gen_id;
831 #endif
832 
833 	unsigned int vnr;	/* volume number within the connection */
834 	unsigned int minor;	/* device minor number */
835 
836 	struct kref kref;
837 
838 	/* things that are stored as / read from meta data on disk */
839 	unsigned long flags;
840 
841 	/* configured by drbdsetup */
842 	struct drbd_backing_dev *ldev __protected_by(local);
843 
844 	sector_t p_size;     /* partner's disk size */
845 	struct request_queue *rq_queue;
846 	struct block_device *this_bdev;
847 	struct gendisk	    *vdisk;
848 
849 	unsigned long last_reattach_jif;
850 	struct drbd_work resync_work;
851 	struct drbd_work unplug_work;
852 	struct timer_list resync_timer;
853 	struct timer_list md_sync_timer;
854 	struct timer_list start_resync_timer;
855 	struct timer_list request_timer;
856 
857 	/* Used after attach while negotiating new disk state. */
858 	union drbd_state new_state_tmp;
859 
860 	union drbd_dev_state state;
861 	wait_queue_head_t misc_wait;
862 	wait_queue_head_t state_wait;  /* upon each state change. */
863 	unsigned int send_cnt;
864 	unsigned int recv_cnt;
865 	unsigned int read_cnt;
866 	unsigned int writ_cnt;
867 	unsigned int al_writ_cnt;
868 	unsigned int bm_writ_cnt;
869 	atomic_t ap_bio_cnt;	 /* Requests we need to complete */
870 	atomic_t ap_actlog_cnt;  /* Requests waiting for activity log */
871 	atomic_t ap_pending_cnt; /* AP data packets on the wire, ack expected */
872 	atomic_t rs_pending_cnt; /* RS request/data packets on the wire */
873 	atomic_t unacked_cnt;	 /* Need to send replies for */
874 	atomic_t local_cnt;	 /* Waiting for local completion */
875 
876 	/* Interval tree of pending local requests */
877 	struct rb_root read_requests;
878 	struct rb_root write_requests;
879 
880 	/* for statistics and timeouts */
881 	/* [0] read, [1] write */
882 	struct list_head pending_master_completion[2];
883 	struct list_head pending_completion[2];
884 
885 	/* use checksums for *this* resync */
886 	bool use_csums;
887 	/* blocks to resync in this run [unit BM_BLOCK_SIZE] */
888 	unsigned long rs_total;
889 	/* number of resync blocks that failed in this run */
890 	unsigned long rs_failed;
891 	/* Syncer's start time [unit jiffies] */
892 	unsigned long rs_start;
893 	/* cumulated time in PausedSyncX state [unit jiffies] */
894 	unsigned long rs_paused;
895 	/* skipped because csum was equal [unit BM_BLOCK_SIZE] */
896 	unsigned long rs_same_csum;
897 #define DRBD_SYNC_MARKS 8
898 #define DRBD_SYNC_MARK_STEP (3*HZ)
899 	/* block not up-to-date at mark [unit BM_BLOCK_SIZE] */
900 	unsigned long rs_mark_left[DRBD_SYNC_MARKS];
901 	/* marks's time [unit jiffies] */
902 	unsigned long rs_mark_time[DRBD_SYNC_MARKS];
903 	/* current index into rs_mark_{left,time} */
904 	int rs_last_mark;
905 	unsigned long rs_last_bcast; /* [unit jiffies] */
906 
907 	/* where does the admin want us to start? (sector) */
908 	sector_t ov_start_sector;
909 	sector_t ov_stop_sector;
910 	/* where are we now? (sector) */
911 	sector_t ov_position;
912 	/* Start sector of out of sync range (to merge printk reporting). */
913 	sector_t ov_last_oos_start;
914 	/* size of out-of-sync range in sectors. */
915 	sector_t ov_last_oos_size;
916 	unsigned long ov_left; /* in bits */
917 
918 	struct drbd_bitmap *bitmap;
919 	unsigned long bm_resync_fo; /* bit offset for drbd_bm_find_next */
920 
921 	/* Used to track operations of resync... */
922 	struct lru_cache *resync;
923 	/* Number of locked elements in resync LRU */
924 	unsigned int resync_locked;
925 	/* resync extent number waiting for application requests */
926 	unsigned int resync_wenr;
927 
928 	int open_cnt;
929 	u64 *p_uuid;
930 
931 	struct list_head active_ee; /* IO in progress (P_DATA gets written to disk) */
932 	struct list_head sync_ee;   /* IO in progress (P_RS_DATA_REPLY gets written to disk) */
933 	struct list_head done_ee;   /* need to send P_WRITE_ACK */
934 	struct list_head read_ee;   /* [RS]P_DATA_REQUEST being read */
935 	struct list_head net_ee;    /* zero-copy network send in progress */
936 
937 	int next_barrier_nr;
938 	struct list_head resync_reads;
939 	atomic_t pp_in_use;		/* allocated from page pool */
940 	atomic_t pp_in_use_by_net;	/* sendpage()d, still referenced by tcp */
941 	wait_queue_head_t ee_wait;
942 	struct drbd_md_io md_io;
943 	spinlock_t al_lock;
944 	wait_queue_head_t al_wait;
945 	struct lru_cache *act_log;	/* activity log */
946 	unsigned int al_tr_number;
947 	int al_tr_cycle;
948 	wait_queue_head_t seq_wait;
949 	atomic_t packet_seq;
950 	unsigned int peer_seq;
951 	spinlock_t peer_seq_lock;
952 	unsigned long comm_bm_set; /* communicated number of set bits. */
953 	struct bm_io_work bm_io_work;
954 	u64 ed_uuid; /* UUID of the exposed data */
955 	struct mutex own_state_mutex;
956 	struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
957 	char congestion_reason;  /* Why we where congested... */
958 	atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
959 	atomic_t rs_sect_ev; /* for submitted resync data rate, both */
960 	int rs_last_sect_ev; /* counter to compare with */
961 	int rs_last_events;  /* counter of read or write "events" (unit sectors)
962 			      * on the lower level device when we last looked. */
963 	int c_sync_rate; /* current resync rate after syncer throttle magic */
964 	struct fifo_buffer *rs_plan_s; /* correction values of resync planer (RCU, connection->conn_update) */
965 	int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */
966 	atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
967 	unsigned int peer_max_bio_size;
968 	unsigned int local_max_bio_size;
969 
970 	/* any requests that would block in drbd_make_request()
971 	 * are deferred to this single-threaded work queue */
972 	struct submit_worker submit;
973 };
974 
975 struct drbd_bm_aio_ctx {
976 	struct drbd_device *device;
977 	struct list_head list; /* on device->pending_bitmap_io */;
978 	unsigned long start_jif;
979 	atomic_t in_flight;
980 	unsigned int done;
981 	unsigned flags;
982 #define BM_AIO_COPY_PAGES	1
983 #define BM_AIO_WRITE_HINTED	2
984 #define BM_AIO_WRITE_ALL_PAGES	4
985 #define BM_AIO_READ		8
986 	int error;
987 	struct kref kref;
988 };
989 
990 struct drbd_config_context {
991 	/* assigned from drbd_genlmsghdr */
992 	unsigned int minor;
993 	/* assigned from request attributes, if present */
994 	unsigned int volume;
995 #define VOLUME_UNSPECIFIED		(-1U)
996 	/* pointer into the request skb,
997 	 * limited lifetime! */
998 	char *resource_name;
999 	struct nlattr *my_addr;
1000 	struct nlattr *peer_addr;
1001 
1002 	/* reply buffer */
1003 	struct sk_buff *reply_skb;
1004 	/* pointer into reply buffer */
1005 	struct drbd_genlmsghdr *reply_dh;
1006 	/* resolved from attributes, if possible */
1007 	struct drbd_device *device;
1008 	struct drbd_resource *resource;
1009 	struct drbd_connection *connection;
1010 };
1011 
1012 static inline struct drbd_device *minor_to_device(unsigned int minor)
1013 {
1014 	return (struct drbd_device *)idr_find(&drbd_devices, minor);
1015 }
1016 
1017 static inline struct drbd_peer_device *first_peer_device(struct drbd_device *device)
1018 {
1019 	return list_first_entry_or_null(&device->peer_devices, struct drbd_peer_device, peer_devices);
1020 }
1021 
1022 #define for_each_resource(resource, _resources) \
1023 	list_for_each_entry(resource, _resources, resources)
1024 
1025 #define for_each_resource_rcu(resource, _resources) \
1026 	list_for_each_entry_rcu(resource, _resources, resources)
1027 
1028 #define for_each_resource_safe(resource, tmp, _resources) \
1029 	list_for_each_entry_safe(resource, tmp, _resources, resources)
1030 
1031 #define for_each_connection(connection, resource) \
1032 	list_for_each_entry(connection, &resource->connections, connections)
1033 
1034 #define for_each_connection_rcu(connection, resource) \
1035 	list_for_each_entry_rcu(connection, &resource->connections, connections)
1036 
1037 #define for_each_connection_safe(connection, tmp, resource) \
1038 	list_for_each_entry_safe(connection, tmp, &resource->connections, connections)
1039 
1040 #define for_each_peer_device(peer_device, device) \
1041 	list_for_each_entry(peer_device, &device->peer_devices, peer_devices)
1042 
1043 #define for_each_peer_device_rcu(peer_device, device) \
1044 	list_for_each_entry_rcu(peer_device, &device->peer_devices, peer_devices)
1045 
1046 #define for_each_peer_device_safe(peer_device, tmp, device) \
1047 	list_for_each_entry_safe(peer_device, tmp, &device->peer_devices, peer_devices)
1048 
1049 static inline unsigned int device_to_minor(struct drbd_device *device)
1050 {
1051 	return device->minor;
1052 }
1053 
1054 /*
1055  * function declarations
1056  *************************/
1057 
1058 /* drbd_main.c */
1059 
1060 enum dds_flags {
1061 	DDSF_FORCED    = 1,
1062 	DDSF_NO_RESYNC = 2, /* Do not run a resync for the new space */
1063 };
1064 
1065 extern void drbd_init_set_defaults(struct drbd_device *device);
1066 extern int  drbd_thread_start(struct drbd_thread *thi);
1067 extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait);
1068 #ifdef CONFIG_SMP
1069 extern void drbd_thread_current_set_cpu(struct drbd_thread *thi);
1070 #else
1071 #define drbd_thread_current_set_cpu(A) ({})
1072 #endif
1073 extern void tl_release(struct drbd_connection *, unsigned int barrier_nr,
1074 		       unsigned int set_size);
1075 extern void tl_clear(struct drbd_connection *);
1076 extern void drbd_free_sock(struct drbd_connection *connection);
1077 extern int drbd_send(struct drbd_connection *connection, struct socket *sock,
1078 		     void *buf, size_t size, unsigned msg_flags);
1079 extern int drbd_send_all(struct drbd_connection *, struct socket *, void *, size_t,
1080 			 unsigned);
1081 
1082 extern int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd);
1083 extern int drbd_send_protocol(struct drbd_connection *connection);
1084 extern int drbd_send_uuids(struct drbd_peer_device *);
1085 extern int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *);
1086 extern void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *);
1087 extern int drbd_send_sizes(struct drbd_peer_device *, int trigger_reply, enum dds_flags flags);
1088 extern int drbd_send_state(struct drbd_peer_device *, union drbd_state s);
1089 extern int drbd_send_current_state(struct drbd_peer_device *);
1090 extern int drbd_send_sync_param(struct drbd_peer_device *);
1091 extern void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr,
1092 			    u32 set_size);
1093 extern int drbd_send_ack(struct drbd_peer_device *, enum drbd_packet,
1094 			 struct drbd_peer_request *);
1095 extern void drbd_send_ack_rp(struct drbd_peer_device *, enum drbd_packet,
1096 			     struct p_block_req *rp);
1097 extern void drbd_send_ack_dp(struct drbd_peer_device *, enum drbd_packet,
1098 			     struct p_data *dp, int data_size);
1099 extern int drbd_send_ack_ex(struct drbd_peer_device *, enum drbd_packet,
1100 			    sector_t sector, int blksize, u64 block_id);
1101 extern int drbd_send_out_of_sync(struct drbd_peer_device *, struct drbd_request *);
1102 extern int drbd_send_block(struct drbd_peer_device *, enum drbd_packet,
1103 			   struct drbd_peer_request *);
1104 extern int drbd_send_dblock(struct drbd_peer_device *, struct drbd_request *req);
1105 extern int drbd_send_drequest(struct drbd_peer_device *, int cmd,
1106 			      sector_t sector, int size, u64 block_id);
1107 extern int drbd_send_drequest_csum(struct drbd_peer_device *, sector_t sector,
1108 				   int size, void *digest, int digest_size,
1109 				   enum drbd_packet cmd);
1110 extern int drbd_send_ov_request(struct drbd_peer_device *, sector_t sector, int size);
1111 
1112 extern int drbd_send_bitmap(struct drbd_device *device);
1113 extern void drbd_send_sr_reply(struct drbd_peer_device *, enum drbd_state_rv retcode);
1114 extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode);
1115 extern void drbd_free_ldev(struct drbd_backing_dev *ldev);
1116 extern void drbd_device_cleanup(struct drbd_device *device);
1117 void drbd_print_uuids(struct drbd_device *device, const char *text);
1118 
1119 extern void conn_md_sync(struct drbd_connection *connection);
1120 extern void drbd_md_write(struct drbd_device *device, void *buffer);
1121 extern void drbd_md_sync(struct drbd_device *device);
1122 extern int  drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev);
1123 extern void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1124 extern void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1125 extern void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local);
1126 extern void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local);
1127 extern void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local);
1128 extern void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1129 extern void drbd_md_set_flag(struct drbd_device *device, int flags) __must_hold(local);
1130 extern void drbd_md_clear_flag(struct drbd_device *device, int flags)__must_hold(local);
1131 extern int drbd_md_test_flag(struct drbd_backing_dev *, int);
1132 extern void drbd_md_mark_dirty(struct drbd_device *device);
1133 extern void drbd_queue_bitmap_io(struct drbd_device *device,
1134 				 int (*io_fn)(struct drbd_device *),
1135 				 void (*done)(struct drbd_device *, int),
1136 				 char *why, enum bm_flag flags);
1137 extern int drbd_bitmap_io(struct drbd_device *device,
1138 		int (*io_fn)(struct drbd_device *),
1139 		char *why, enum bm_flag flags);
1140 extern int drbd_bitmap_io_from_worker(struct drbd_device *device,
1141 		int (*io_fn)(struct drbd_device *),
1142 		char *why, enum bm_flag flags);
1143 extern int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local);
1144 extern int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local);
1145 
1146 /* Meta data layout
1147  *
1148  * We currently have two possible layouts.
1149  * Offsets in (512 byte) sectors.
1150  * external:
1151  *   |----------- md_size_sect ------------------|
1152  *   [ 4k superblock ][ activity log ][  Bitmap  ]
1153  *   | al_offset == 8 |
1154  *   | bm_offset = al_offset + X      |
1155  *  ==> bitmap sectors = md_size_sect - bm_offset
1156  *
1157  *  Variants:
1158  *     old, indexed fixed size meta data:
1159  *
1160  * internal:
1161  *            |----------- md_size_sect ------------------|
1162  * [data.....][  Bitmap  ][ activity log ][ 4k superblock ][padding*]
1163  *                        | al_offset < 0 |
1164  *            | bm_offset = al_offset - Y |
1165  *  ==> bitmap sectors = Y = al_offset - bm_offset
1166  *
1167  *  [padding*] are zero or up to 7 unused 512 Byte sectors to the
1168  *  end of the device, so that the [4k superblock] will be 4k aligned.
1169  *
1170  *  The activity log consists of 4k transaction blocks,
1171  *  which are written in a ring-buffer, or striped ring-buffer like fashion,
1172  *  which are writtensize used to be fixed 32kB,
1173  *  but is about to become configurable.
1174  */
1175 
1176 /* Our old fixed size meta data layout
1177  * allows up to about 3.8TB, so if you want more,
1178  * you need to use the "flexible" meta data format. */
1179 #define MD_128MB_SECT (128LLU << 11)  /* 128 MB, unit sectors */
1180 #define MD_4kB_SECT	 8
1181 #define MD_32kB_SECT	64
1182 
1183 /* One activity log extent represents 4M of storage */
1184 #define AL_EXTENT_SHIFT 22
1185 #define AL_EXTENT_SIZE (1<<AL_EXTENT_SHIFT)
1186 
1187 /* We could make these currently hardcoded constants configurable
1188  * variables at create-md time (or even re-configurable at runtime?).
1189  * Which will require some more changes to the DRBD "super block"
1190  * and attach code.
1191  *
1192  * updates per transaction:
1193  *   This many changes to the active set can be logged with one transaction.
1194  *   This number is arbitrary.
1195  * context per transaction:
1196  *   This many context extent numbers are logged with each transaction.
1197  *   This number is resulting from the transaction block size (4k), the layout
1198  *   of the transaction header, and the number of updates per transaction.
1199  *   See drbd_actlog.c:struct al_transaction_on_disk
1200  * */
1201 #define AL_UPDATES_PER_TRANSACTION	 64	// arbitrary
1202 #define AL_CONTEXT_PER_TRANSACTION	919	// (4096 - 36 - 6*64)/4
1203 
1204 #if BITS_PER_LONG == 32
1205 #define LN2_BPL 5
1206 #define cpu_to_lel(A) cpu_to_le32(A)
1207 #define lel_to_cpu(A) le32_to_cpu(A)
1208 #elif BITS_PER_LONG == 64
1209 #define LN2_BPL 6
1210 #define cpu_to_lel(A) cpu_to_le64(A)
1211 #define lel_to_cpu(A) le64_to_cpu(A)
1212 #else
1213 #error "LN2 of BITS_PER_LONG unknown!"
1214 #endif
1215 
1216 /* resync bitmap */
1217 /* 16MB sized 'bitmap extent' to track syncer usage */
1218 struct bm_extent {
1219 	int rs_left; /* number of bits set (out of sync) in this extent. */
1220 	int rs_failed; /* number of failed resync requests in this extent. */
1221 	unsigned long flags;
1222 	struct lc_element lce;
1223 };
1224 
1225 #define BME_NO_WRITES  0  /* bm_extent.flags: no more requests on this one! */
1226 #define BME_LOCKED     1  /* bm_extent.flags: syncer active on this one. */
1227 #define BME_PRIORITY   2  /* finish resync IO on this extent ASAP! App IO waiting! */
1228 
1229 /* drbd_bitmap.c */
1230 /*
1231  * We need to store one bit for a block.
1232  * Example: 1GB disk @ 4096 byte blocks ==> we need 32 KB bitmap.
1233  * Bit 0 ==> local node thinks this block is binary identical on both nodes
1234  * Bit 1 ==> local node thinks this block needs to be synced.
1235  */
1236 
1237 #define SLEEP_TIME (HZ/10)
1238 
1239 /* We do bitmap IO in units of 4k blocks.
1240  * We also still have a hardcoded 4k per bit relation. */
1241 #define BM_BLOCK_SHIFT	12			 /* 4k per bit */
1242 #define BM_BLOCK_SIZE	 (1<<BM_BLOCK_SHIFT)
1243 /* mostly arbitrarily set the represented size of one bitmap extent,
1244  * aka resync extent, to 16 MiB (which is also 512 Byte worth of bitmap
1245  * at 4k per bit resolution) */
1246 #define BM_EXT_SHIFT	 24	/* 16 MiB per resync extent */
1247 #define BM_EXT_SIZE	 (1<<BM_EXT_SHIFT)
1248 
1249 #if (BM_EXT_SHIFT != 24) || (BM_BLOCK_SHIFT != 12)
1250 #error "HAVE YOU FIXED drbdmeta AS WELL??"
1251 #endif
1252 
1253 /* thus many _storage_ sectors are described by one bit */
1254 #define BM_SECT_TO_BIT(x)   ((x)>>(BM_BLOCK_SHIFT-9))
1255 #define BM_BIT_TO_SECT(x)   ((sector_t)(x)<<(BM_BLOCK_SHIFT-9))
1256 #define BM_SECT_PER_BIT     BM_BIT_TO_SECT(1)
1257 
1258 /* bit to represented kilo byte conversion */
1259 #define Bit2KB(bits) ((bits)<<(BM_BLOCK_SHIFT-10))
1260 
1261 /* in which _bitmap_ extent (resp. sector) the bit for a certain
1262  * _storage_ sector is located in */
1263 #define BM_SECT_TO_EXT(x)   ((x)>>(BM_EXT_SHIFT-9))
1264 #define BM_BIT_TO_EXT(x)    ((x) >> (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
1265 
1266 /* first storage sector a bitmap extent corresponds to */
1267 #define BM_EXT_TO_SECT(x)   ((sector_t)(x) << (BM_EXT_SHIFT-9))
1268 /* how much _storage_ sectors we have per bitmap extent */
1269 #define BM_SECT_PER_EXT     BM_EXT_TO_SECT(1)
1270 /* how many bits are covered by one bitmap extent (resync extent) */
1271 #define BM_BITS_PER_EXT     (1UL << (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
1272 
1273 #define BM_BLOCKS_PER_BM_EXT_MASK  (BM_BITS_PER_EXT - 1)
1274 
1275 
1276 /* in one sector of the bitmap, we have this many activity_log extents. */
1277 #define AL_EXT_PER_BM_SECT  (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT))
1278 
1279 /* the extent in "PER_EXTENT" below is an activity log extent
1280  * we need that many (long words/bytes) to store the bitmap
1281  *		     of one AL_EXTENT_SIZE chunk of storage.
1282  * we can store the bitmap for that many AL_EXTENTS within
1283  * one sector of the _on_disk_ bitmap:
1284  * bit	 0	  bit 37   bit 38	     bit (512*8)-1
1285  *	     ...|........|........|.. // ..|........|
1286  * sect. 0	 `296	  `304			   ^(512*8*8)-1
1287  *
1288 #define BM_WORDS_PER_EXT    ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / BITS_PER_LONG )
1289 #define BM_BYTES_PER_EXT    ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / 8 )  // 128
1290 #define BM_EXT_PER_SECT	    ( 512 / BM_BYTES_PER_EXTENT )	 //   4
1291  */
1292 
1293 #define DRBD_MAX_SECTORS_32 (0xffffffffLU)
1294 /* we have a certain meta data variant that has a fixed on-disk size of 128
1295  * MiB, of which 4k are our "superblock", and 32k are the fixed size activity
1296  * log, leaving this many sectors for the bitmap.
1297  */
1298 
1299 #define DRBD_MAX_SECTORS_FIXED_BM \
1300 	  ((MD_128MB_SECT - MD_32kB_SECT - MD_4kB_SECT) * (1LL<<(BM_EXT_SHIFT-9)))
1301 #if !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32
1302 #define DRBD_MAX_SECTORS      DRBD_MAX_SECTORS_32
1303 #define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32
1304 #else
1305 #define DRBD_MAX_SECTORS      DRBD_MAX_SECTORS_FIXED_BM
1306 /* 16 TB in units of sectors */
1307 #if BITS_PER_LONG == 32
1308 /* adjust by one page worth of bitmap,
1309  * so we won't wrap around in drbd_bm_find_next_bit.
1310  * you should use 64bit OS for that much storage, anyways. */
1311 #define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff)
1312 #else
1313 /* we allow up to 1 PiB now on 64bit architecture with "flexible" meta data */
1314 #define DRBD_MAX_SECTORS_FLEX (1UL << 51)
1315 /* corresponds to (1UL << 38) bits right now. */
1316 #endif
1317 #endif
1318 
1319 /* BIO_MAX_SIZE is 256 * PAGE_CACHE_SIZE,
1320  * so for typical PAGE_CACHE_SIZE of 4k, that is (1<<20) Byte.
1321  * Since we may live in a mixed-platform cluster,
1322  * we limit us to a platform agnostic constant here for now.
1323  * A followup commit may allow even bigger BIO sizes,
1324  * once we thought that through. */
1325 #define DRBD_MAX_BIO_SIZE (1U << 20)
1326 #if DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
1327 #error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
1328 #endif
1329 #define DRBD_MAX_BIO_SIZE_SAFE (1U << 12)       /* Works always = 4k */
1330 
1331 #define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* Header 80 only allows packets up to 32KiB data */
1332 #define DRBD_MAX_BIO_SIZE_P95    (1U << 17) /* Protocol 95 to 99 allows bios up to 128KiB */
1333 
1334 /* For now, don't allow more than one activity log extent worth of data
1335  * to be discarded in one go. We may need to rework drbd_al_begin_io()
1336  * to allow for even larger discard ranges */
1337 #define DRBD_MAX_DISCARD_SIZE	AL_EXTENT_SIZE
1338 #define DRBD_MAX_DISCARD_SECTORS (DRBD_MAX_DISCARD_SIZE >> 9)
1339 
1340 extern int  drbd_bm_init(struct drbd_device *device);
1341 extern int  drbd_bm_resize(struct drbd_device *device, sector_t sectors, int set_new_bits);
1342 extern void drbd_bm_cleanup(struct drbd_device *device);
1343 extern void drbd_bm_set_all(struct drbd_device *device);
1344 extern void drbd_bm_clear_all(struct drbd_device *device);
1345 /* set/clear/test only a few bits at a time */
1346 extern int  drbd_bm_set_bits(
1347 		struct drbd_device *device, unsigned long s, unsigned long e);
1348 extern int  drbd_bm_clear_bits(
1349 		struct drbd_device *device, unsigned long s, unsigned long e);
1350 extern int drbd_bm_count_bits(
1351 	struct drbd_device *device, const unsigned long s, const unsigned long e);
1352 /* bm_set_bits variant for use while holding drbd_bm_lock,
1353  * may process the whole bitmap in one go */
1354 extern void _drbd_bm_set_bits(struct drbd_device *device,
1355 		const unsigned long s, const unsigned long e);
1356 extern int  drbd_bm_test_bit(struct drbd_device *device, unsigned long bitnr);
1357 extern int  drbd_bm_e_weight(struct drbd_device *device, unsigned long enr);
1358 extern int  drbd_bm_read(struct drbd_device *device) __must_hold(local);
1359 extern void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr);
1360 extern int  drbd_bm_write(struct drbd_device *device) __must_hold(local);
1361 extern int  drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local);
1362 extern int  drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local);
1363 extern int drbd_bm_write_all(struct drbd_device *device) __must_hold(local);
1364 extern int  drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local);
1365 extern size_t	     drbd_bm_words(struct drbd_device *device);
1366 extern unsigned long drbd_bm_bits(struct drbd_device *device);
1367 extern sector_t      drbd_bm_capacity(struct drbd_device *device);
1368 
1369 #define DRBD_END_OF_BITMAP	(~(unsigned long)0)
1370 extern unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1371 /* bm_find_next variants for use while you hold drbd_bm_lock() */
1372 extern unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1373 extern unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo);
1374 extern unsigned long _drbd_bm_total_weight(struct drbd_device *device);
1375 extern unsigned long drbd_bm_total_weight(struct drbd_device *device);
1376 /* for receive_bitmap */
1377 extern void drbd_bm_merge_lel(struct drbd_device *device, size_t offset,
1378 		size_t number, unsigned long *buffer);
1379 /* for _drbd_send_bitmap */
1380 extern void drbd_bm_get_lel(struct drbd_device *device, size_t offset,
1381 		size_t number, unsigned long *buffer);
1382 
1383 extern void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags);
1384 extern void drbd_bm_unlock(struct drbd_device *device);
1385 /* drbd_main.c */
1386 
1387 extern struct kmem_cache *drbd_request_cache;
1388 extern struct kmem_cache *drbd_ee_cache;	/* peer requests */
1389 extern struct kmem_cache *drbd_bm_ext_cache;	/* bitmap extents */
1390 extern struct kmem_cache *drbd_al_ext_cache;	/* activity log extents */
1391 extern mempool_t *drbd_request_mempool;
1392 extern mempool_t *drbd_ee_mempool;
1393 
1394 /* drbd's page pool, used to buffer data received from the peer,
1395  * or data requested by the peer.
1396  *
1397  * This does not have an emergency reserve.
1398  *
1399  * When allocating from this pool, it first takes pages from the pool.
1400  * Only if the pool is depleted will try to allocate from the system.
1401  *
1402  * The assumption is that pages taken from this pool will be processed,
1403  * and given back, "quickly", and then can be recycled, so we can avoid
1404  * frequent calls to alloc_page(), and still will be able to make progress even
1405  * under memory pressure.
1406  */
1407 extern struct page *drbd_pp_pool;
1408 extern spinlock_t   drbd_pp_lock;
1409 extern int	    drbd_pp_vacant;
1410 extern wait_queue_head_t drbd_pp_wait;
1411 
1412 /* We also need a standard (emergency-reserve backed) page pool
1413  * for meta data IO (activity log, bitmap).
1414  * We can keep it global, as long as it is used as "N pages at a time".
1415  * 128 should be plenty, currently we probably can get away with as few as 1.
1416  */
1417 #define DRBD_MIN_POOL_PAGES	128
1418 extern mempool_t *drbd_md_io_page_pool;
1419 
1420 /* We also need to make sure we get a bio
1421  * when we need it for housekeeping purposes */
1422 extern struct bio_set *drbd_md_io_bio_set;
1423 /* to allocate from that set */
1424 extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
1425 
1426 extern rwlock_t global_state_lock;
1427 
1428 extern int conn_lowest_minor(struct drbd_connection *connection);
1429 extern enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor);
1430 extern void drbd_destroy_device(struct kref *kref);
1431 extern void drbd_delete_device(struct drbd_device *device);
1432 
1433 extern struct drbd_resource *drbd_create_resource(const char *name);
1434 extern void drbd_free_resource(struct drbd_resource *resource);
1435 
1436 extern int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts);
1437 extern struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts);
1438 extern void drbd_destroy_connection(struct kref *kref);
1439 extern struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
1440 					    void *peer_addr, int peer_addr_len);
1441 extern struct drbd_resource *drbd_find_resource(const char *name);
1442 extern void drbd_destroy_resource(struct kref *kref);
1443 extern void conn_free_crypto(struct drbd_connection *connection);
1444 
1445 extern int proc_details;
1446 
1447 /* drbd_req */
1448 extern void do_submit(struct work_struct *ws);
1449 extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long);
1450 extern void drbd_make_request(struct request_queue *q, struct bio *bio);
1451 extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
1452 extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec);
1453 extern int is_valid_ar_handle(struct drbd_request *, sector_t);
1454 
1455 
1456 /* drbd_nl.c */
1457 extern int drbd_msg_put_info(struct sk_buff *skb, const char *info);
1458 extern void drbd_suspend_io(struct drbd_device *device);
1459 extern void drbd_resume_io(struct drbd_device *device);
1460 extern char *ppsize(char *buf, unsigned long long size);
1461 extern sector_t drbd_new_dev_size(struct drbd_device *, struct drbd_backing_dev *, sector_t, int);
1462 enum determine_dev_size {
1463 	DS_ERROR_SHRINK = -3,
1464 	DS_ERROR_SPACE_MD = -2,
1465 	DS_ERROR = -1,
1466 	DS_UNCHANGED = 0,
1467 	DS_SHRUNK = 1,
1468 	DS_GREW = 2,
1469 	DS_GREW_FROM_ZERO = 3,
1470 };
1471 extern enum determine_dev_size
1472 drbd_determine_dev_size(struct drbd_device *, enum dds_flags, struct resize_parms *) __must_hold(local);
1473 extern void resync_after_online_grow(struct drbd_device *);
1474 extern void drbd_reconsider_max_bio_size(struct drbd_device *device, struct drbd_backing_dev *bdev);
1475 extern enum drbd_state_rv drbd_set_role(struct drbd_device *device,
1476 					enum drbd_role new_role,
1477 					int force);
1478 extern bool conn_try_outdate_peer(struct drbd_connection *connection);
1479 extern void conn_try_outdate_peer_async(struct drbd_connection *connection);
1480 extern int drbd_khelper(struct drbd_device *device, char *cmd);
1481 
1482 /* drbd_worker.c */
1483 /* bi_end_io handlers */
1484 extern void drbd_md_endio(struct bio *bio, int error);
1485 extern void drbd_peer_request_endio(struct bio *bio, int error);
1486 extern void drbd_request_endio(struct bio *bio, int error);
1487 extern int drbd_worker(struct drbd_thread *thi);
1488 enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor);
1489 void drbd_resync_after_changed(struct drbd_device *device);
1490 extern void drbd_start_resync(struct drbd_device *device, enum drbd_conns side);
1491 extern void resume_next_sg(struct drbd_device *device);
1492 extern void suspend_other_sg(struct drbd_device *device);
1493 extern int drbd_resync_finished(struct drbd_device *device);
1494 /* maybe rather drbd_main.c ? */
1495 extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent);
1496 extern void drbd_md_put_buffer(struct drbd_device *device);
1497 extern int drbd_md_sync_page_io(struct drbd_device *device,
1498 		struct drbd_backing_dev *bdev, sector_t sector, int rw);
1499 extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int);
1500 extern void wait_until_done_or_force_detached(struct drbd_device *device,
1501 		struct drbd_backing_dev *bdev, unsigned int *done);
1502 extern void drbd_rs_controller_reset(struct drbd_device *device);
1503 
1504 static inline void ov_out_of_sync_print(struct drbd_device *device)
1505 {
1506 	if (device->ov_last_oos_size) {
1507 		drbd_err(device, "Out of sync: start=%llu, size=%lu (sectors)\n",
1508 		     (unsigned long long)device->ov_last_oos_start,
1509 		     (unsigned long)device->ov_last_oos_size);
1510 	}
1511 	device->ov_last_oos_size = 0;
1512 }
1513 
1514 
1515 extern void drbd_csum_bio(struct crypto_hash *, struct bio *, void *);
1516 extern void drbd_csum_ee(struct crypto_hash *, struct drbd_peer_request *, void *);
1517 /* worker callbacks */
1518 extern int w_e_end_data_req(struct drbd_work *, int);
1519 extern int w_e_end_rsdata_req(struct drbd_work *, int);
1520 extern int w_e_end_csum_rs_req(struct drbd_work *, int);
1521 extern int w_e_end_ov_reply(struct drbd_work *, int);
1522 extern int w_e_end_ov_req(struct drbd_work *, int);
1523 extern int w_ov_finished(struct drbd_work *, int);
1524 extern int w_resync_timer(struct drbd_work *, int);
1525 extern int w_send_write_hint(struct drbd_work *, int);
1526 extern int w_send_dblock(struct drbd_work *, int);
1527 extern int w_send_read_req(struct drbd_work *, int);
1528 extern int w_e_reissue(struct drbd_work *, int);
1529 extern int w_restart_disk_io(struct drbd_work *, int);
1530 extern int w_send_out_of_sync(struct drbd_work *, int);
1531 extern int w_start_resync(struct drbd_work *, int);
1532 
1533 extern void resync_timer_fn(unsigned long data);
1534 extern void start_resync_timer_fn(unsigned long data);
1535 
1536 extern void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req);
1537 
1538 /* drbd_receiver.c */
1539 extern int drbd_receiver(struct drbd_thread *thi);
1540 extern int drbd_asender(struct drbd_thread *thi);
1541 extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device);
1542 extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector,
1543 		bool throttle_if_app_is_waiting);
1544 extern int drbd_submit_peer_request(struct drbd_device *,
1545 				    struct drbd_peer_request *, const unsigned,
1546 				    const int);
1547 extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *);
1548 extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64,
1549 						     sector_t, unsigned int,
1550 						     bool,
1551 						     gfp_t) __must_hold(local);
1552 extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *,
1553 				 int);
1554 #define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
1555 #define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
1556 extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool);
1557 extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled);
1558 extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
1559 extern int drbd_connected(struct drbd_peer_device *);
1560 
1561 /* Yes, there is kernel_setsockopt, but only since 2.6.18.
1562  * So we have our own copy of it here. */
1563 static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
1564 				  char *optval, int optlen)
1565 {
1566 	mm_segment_t oldfs = get_fs();
1567 	char __user *uoptval;
1568 	int err;
1569 
1570 	uoptval = (char __user __force *)optval;
1571 
1572 	set_fs(KERNEL_DS);
1573 	if (level == SOL_SOCKET)
1574 		err = sock_setsockopt(sock, level, optname, uoptval, optlen);
1575 	else
1576 		err = sock->ops->setsockopt(sock, level, optname, uoptval,
1577 					    optlen);
1578 	set_fs(oldfs);
1579 	return err;
1580 }
1581 
1582 static inline void drbd_tcp_cork(struct socket *sock)
1583 {
1584 	int val = 1;
1585 	(void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
1586 			(char*)&val, sizeof(val));
1587 }
1588 
1589 static inline void drbd_tcp_uncork(struct socket *sock)
1590 {
1591 	int val = 0;
1592 	(void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
1593 			(char*)&val, sizeof(val));
1594 }
1595 
1596 static inline void drbd_tcp_nodelay(struct socket *sock)
1597 {
1598 	int val = 1;
1599 	(void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
1600 			(char*)&val, sizeof(val));
1601 }
1602 
1603 static inline void drbd_tcp_quickack(struct socket *sock)
1604 {
1605 	int val = 2;
1606 	(void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
1607 			(char*)&val, sizeof(val));
1608 }
1609 
1610 /* sets the number of 512 byte sectors of our virtual device */
1611 static inline void drbd_set_my_capacity(struct drbd_device *device,
1612 					sector_t size)
1613 {
1614 	/* set_capacity(device->this_bdev->bd_disk, size); */
1615 	set_capacity(device->vdisk, size);
1616 	device->this_bdev->bd_inode->i_size = (loff_t)size << 9;
1617 }
1618 
1619 /*
1620  * used to submit our private bio
1621  */
1622 static inline void drbd_generic_make_request(struct drbd_device *device,
1623 					     int fault_type, struct bio *bio)
1624 {
1625 	__release(local);
1626 	if (!bio->bi_bdev) {
1627 		drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n");
1628 		bio_endio(bio, -ENODEV);
1629 		return;
1630 	}
1631 
1632 	if (drbd_insert_fault(device, fault_type))
1633 		bio_endio(bio, -EIO);
1634 	else
1635 		generic_make_request(bio);
1636 }
1637 
1638 void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev,
1639 			      enum write_ordering_e wo);
1640 
1641 /* drbd_proc.c */
1642 extern struct proc_dir_entry *drbd_proc;
1643 extern const struct file_operations drbd_proc_fops;
1644 extern const char *drbd_conn_str(enum drbd_conns s);
1645 extern const char *drbd_role_str(enum drbd_role s);
1646 
1647 /* drbd_actlog.c */
1648 extern bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i);
1649 extern int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i);
1650 extern void drbd_al_begin_io_commit(struct drbd_device *device);
1651 extern bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i);
1652 extern void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i);
1653 extern void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i);
1654 extern void drbd_rs_complete_io(struct drbd_device *device, sector_t sector);
1655 extern int drbd_rs_begin_io(struct drbd_device *device, sector_t sector);
1656 extern int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector);
1657 extern void drbd_rs_cancel_all(struct drbd_device *device);
1658 extern int drbd_rs_del_all(struct drbd_device *device);
1659 extern void drbd_rs_failed_io(struct drbd_device *device,
1660 		sector_t sector, int size);
1661 extern void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go);
1662 
1663 enum update_sync_bits_mode { RECORD_RS_FAILED, SET_OUT_OF_SYNC, SET_IN_SYNC };
1664 extern int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size,
1665 		enum update_sync_bits_mode mode,
1666 		const char *file, const unsigned int line);
1667 #define drbd_set_in_sync(device, sector, size) \
1668 	__drbd_change_sync(device, sector, size, SET_IN_SYNC, __FILE__, __LINE__)
1669 #define drbd_set_out_of_sync(device, sector, size) \
1670 	__drbd_change_sync(device, sector, size, SET_OUT_OF_SYNC, __FILE__, __LINE__)
1671 #define drbd_rs_failed_io(device, sector, size) \
1672 	__drbd_change_sync(device, sector, size, RECORD_RS_FAILED, __FILE__, __LINE__)
1673 extern void drbd_al_shrink(struct drbd_device *device);
1674 extern int drbd_initialize_al(struct drbd_device *, void *);
1675 
1676 /* drbd_nl.c */
1677 /* state info broadcast */
1678 struct sib_info {
1679 	enum drbd_state_info_bcast_reason sib_reason;
1680 	union {
1681 		struct {
1682 			char *helper_name;
1683 			unsigned helper_exit_code;
1684 		};
1685 		struct {
1686 			union drbd_state os;
1687 			union drbd_state ns;
1688 		};
1689 	};
1690 };
1691 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib);
1692 
1693 /*
1694  * inline helper functions
1695  *************************/
1696 
1697 /* see also page_chain_add and friends in drbd_receiver.c */
1698 static inline struct page *page_chain_next(struct page *page)
1699 {
1700 	return (struct page *)page_private(page);
1701 }
1702 #define page_chain_for_each(page) \
1703 	for (; page && ({ prefetch(page_chain_next(page)); 1; }); \
1704 			page = page_chain_next(page))
1705 #define page_chain_for_each_safe(page, n) \
1706 	for (; page && ({ n = page_chain_next(page); 1; }); page = n)
1707 
1708 
1709 static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req)
1710 {
1711 	struct page *page = peer_req->pages;
1712 	page_chain_for_each(page) {
1713 		if (page_count(page) > 1)
1714 			return 1;
1715 	}
1716 	return 0;
1717 }
1718 
1719 static inline enum drbd_state_rv
1720 _drbd_set_state(struct drbd_device *device, union drbd_state ns,
1721 		enum chg_state_flags flags, struct completion *done)
1722 {
1723 	enum drbd_state_rv rv;
1724 
1725 	read_lock(&global_state_lock);
1726 	rv = __drbd_set_state(device, ns, flags, done);
1727 	read_unlock(&global_state_lock);
1728 
1729 	return rv;
1730 }
1731 
1732 static inline union drbd_state drbd_read_state(struct drbd_device *device)
1733 {
1734 	struct drbd_resource *resource = device->resource;
1735 	union drbd_state rv;
1736 
1737 	rv.i = device->state.i;
1738 	rv.susp = resource->susp;
1739 	rv.susp_nod = resource->susp_nod;
1740 	rv.susp_fen = resource->susp_fen;
1741 
1742 	return rv;
1743 }
1744 
1745 enum drbd_force_detach_flags {
1746 	DRBD_READ_ERROR,
1747 	DRBD_WRITE_ERROR,
1748 	DRBD_META_IO_ERROR,
1749 	DRBD_FORCE_DETACH,
1750 };
1751 
1752 #define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
1753 static inline void __drbd_chk_io_error_(struct drbd_device *device,
1754 		enum drbd_force_detach_flags df,
1755 		const char *where)
1756 {
1757 	enum drbd_io_error_p ep;
1758 
1759 	rcu_read_lock();
1760 	ep = rcu_dereference(device->ldev->disk_conf)->on_io_error;
1761 	rcu_read_unlock();
1762 	switch (ep) {
1763 	case EP_PASS_ON: /* FIXME would this be better named "Ignore"? */
1764 		if (df == DRBD_READ_ERROR || df == DRBD_WRITE_ERROR) {
1765 			if (__ratelimit(&drbd_ratelimit_state))
1766 				drbd_err(device, "Local IO failed in %s.\n", where);
1767 			if (device->state.disk > D_INCONSISTENT)
1768 				_drbd_set_state(_NS(device, disk, D_INCONSISTENT), CS_HARD, NULL);
1769 			break;
1770 		}
1771 		/* NOTE fall through for DRBD_META_IO_ERROR or DRBD_FORCE_DETACH */
1772 	case EP_DETACH:
1773 	case EP_CALL_HELPER:
1774 		/* Remember whether we saw a READ or WRITE error.
1775 		 *
1776 		 * Recovery of the affected area for WRITE failure is covered
1777 		 * by the activity log.
1778 		 * READ errors may fall outside that area though. Certain READ
1779 		 * errors can be "healed" by writing good data to the affected
1780 		 * blocks, which triggers block re-allocation in lower layers.
1781 		 *
1782 		 * If we can not write the bitmap after a READ error,
1783 		 * we may need to trigger a full sync (see w_go_diskless()).
1784 		 *
1785 		 * Force-detach is not really an IO error, but rather a
1786 		 * desperate measure to try to deal with a completely
1787 		 * unresponsive lower level IO stack.
1788 		 * Still it should be treated as a WRITE error.
1789 		 *
1790 		 * Meta IO error is always WRITE error:
1791 		 * we read meta data only once during attach,
1792 		 * which will fail in case of errors.
1793 		 */
1794 		set_bit(WAS_IO_ERROR, &device->flags);
1795 		if (df == DRBD_READ_ERROR)
1796 			set_bit(WAS_READ_ERROR, &device->flags);
1797 		if (df == DRBD_FORCE_DETACH)
1798 			set_bit(FORCE_DETACH, &device->flags);
1799 		if (device->state.disk > D_FAILED) {
1800 			_drbd_set_state(_NS(device, disk, D_FAILED), CS_HARD, NULL);
1801 			drbd_err(device,
1802 				"Local IO failed in %s. Detaching...\n", where);
1803 		}
1804 		break;
1805 	}
1806 }
1807 
1808 /**
1809  * drbd_chk_io_error: Handle the on_io_error setting, should be called from all io completion handlers
1810  * @device:	 DRBD device.
1811  * @error:	 Error code passed to the IO completion callback
1812  * @forcedetach: Force detach. I.e. the error happened while accessing the meta data
1813  *
1814  * See also drbd_main.c:after_state_ch() if (os.disk > D_FAILED && ns.disk == D_FAILED)
1815  */
1816 #define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__)
1817 static inline void drbd_chk_io_error_(struct drbd_device *device,
1818 	int error, enum drbd_force_detach_flags forcedetach, const char *where)
1819 {
1820 	if (error) {
1821 		unsigned long flags;
1822 		spin_lock_irqsave(&device->resource->req_lock, flags);
1823 		__drbd_chk_io_error_(device, forcedetach, where);
1824 		spin_unlock_irqrestore(&device->resource->req_lock, flags);
1825 	}
1826 }
1827 
1828 
1829 /**
1830  * drbd_md_first_sector() - Returns the first sector number of the meta data area
1831  * @bdev:	Meta data block device.
1832  *
1833  * BTW, for internal meta data, this happens to be the maximum capacity
1834  * we could agree upon with our peer node.
1835  */
1836 static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
1837 {
1838 	switch (bdev->md.meta_dev_idx) {
1839 	case DRBD_MD_INDEX_INTERNAL:
1840 	case DRBD_MD_INDEX_FLEX_INT:
1841 		return bdev->md.md_offset + bdev->md.bm_offset;
1842 	case DRBD_MD_INDEX_FLEX_EXT:
1843 	default:
1844 		return bdev->md.md_offset;
1845 	}
1846 }
1847 
1848 /**
1849  * drbd_md_last_sector() - Return the last sector number of the meta data area
1850  * @bdev:	Meta data block device.
1851  */
1852 static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
1853 {
1854 	switch (bdev->md.meta_dev_idx) {
1855 	case DRBD_MD_INDEX_INTERNAL:
1856 	case DRBD_MD_INDEX_FLEX_INT:
1857 		return bdev->md.md_offset + MD_4kB_SECT -1;
1858 	case DRBD_MD_INDEX_FLEX_EXT:
1859 	default:
1860 		return bdev->md.md_offset + bdev->md.md_size_sect -1;
1861 	}
1862 }
1863 
1864 /* Returns the number of 512 byte sectors of the device */
1865 static inline sector_t drbd_get_capacity(struct block_device *bdev)
1866 {
1867 	/* return bdev ? get_capacity(bdev->bd_disk) : 0; */
1868 	return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0;
1869 }
1870 
1871 /**
1872  * drbd_get_max_capacity() - Returns the capacity we announce to out peer
1873  * @bdev:	Meta data block device.
1874  *
1875  * returns the capacity we announce to out peer.  we clip ourselves at the
1876  * various MAX_SECTORS, because if we don't, current implementation will
1877  * oops sooner or later
1878  */
1879 static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev)
1880 {
1881 	sector_t s;
1882 
1883 	switch (bdev->md.meta_dev_idx) {
1884 	case DRBD_MD_INDEX_INTERNAL:
1885 	case DRBD_MD_INDEX_FLEX_INT:
1886 		s = drbd_get_capacity(bdev->backing_bdev)
1887 			? min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1888 				drbd_md_first_sector(bdev))
1889 			: 0;
1890 		break;
1891 	case DRBD_MD_INDEX_FLEX_EXT:
1892 		s = min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1893 				drbd_get_capacity(bdev->backing_bdev));
1894 		/* clip at maximum size the meta device can support */
1895 		s = min_t(sector_t, s,
1896 			BM_EXT_TO_SECT(bdev->md.md_size_sect
1897 				     - bdev->md.bm_offset));
1898 		break;
1899 	default:
1900 		s = min_t(sector_t, DRBD_MAX_SECTORS,
1901 				drbd_get_capacity(bdev->backing_bdev));
1902 	}
1903 	return s;
1904 }
1905 
1906 /**
1907  * drbd_md_ss() - Return the sector number of our meta data super block
1908  * @bdev:	Meta data block device.
1909  */
1910 static inline sector_t drbd_md_ss(struct drbd_backing_dev *bdev)
1911 {
1912 	const int meta_dev_idx = bdev->md.meta_dev_idx;
1913 
1914 	if (meta_dev_idx == DRBD_MD_INDEX_FLEX_EXT)
1915 		return 0;
1916 
1917 	/* Since drbd08, internal meta data is always "flexible".
1918 	 * position: last 4k aligned block of 4k size */
1919 	if (meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1920 	    meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)
1921 		return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL) - 8;
1922 
1923 	/* external, some index; this is the old fixed size layout */
1924 	return MD_128MB_SECT * bdev->md.meta_dev_idx;
1925 }
1926 
1927 static inline void
1928 drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
1929 {
1930 	unsigned long flags;
1931 	spin_lock_irqsave(&q->q_lock, flags);
1932 	list_add_tail(&w->list, &q->q);
1933 	spin_unlock_irqrestore(&q->q_lock, flags);
1934 	wake_up(&q->q_wait);
1935 }
1936 
1937 static inline void
1938 drbd_queue_work_if_unqueued(struct drbd_work_queue *q, struct drbd_work *w)
1939 {
1940 	unsigned long flags;
1941 	spin_lock_irqsave(&q->q_lock, flags);
1942 	if (list_empty_careful(&w->list))
1943 		list_add_tail(&w->list, &q->q);
1944 	spin_unlock_irqrestore(&q->q_lock, flags);
1945 	wake_up(&q->q_wait);
1946 }
1947 
1948 static inline void
1949 drbd_device_post_work(struct drbd_device *device, int work_bit)
1950 {
1951 	if (!test_and_set_bit(work_bit, &device->flags)) {
1952 		struct drbd_connection *connection =
1953 			first_peer_device(device)->connection;
1954 		struct drbd_work_queue *q = &connection->sender_work;
1955 		if (!test_and_set_bit(DEVICE_WORK_PENDING, &connection->flags))
1956 			wake_up(&q->q_wait);
1957 	}
1958 }
1959 
1960 extern void drbd_flush_workqueue(struct drbd_work_queue *work_queue);
1961 
1962 static inline void wake_asender(struct drbd_connection *connection)
1963 {
1964 	if (test_bit(SIGNAL_ASENDER, &connection->flags))
1965 		force_sig(DRBD_SIG, connection->asender.task);
1966 }
1967 
1968 static inline void request_ping(struct drbd_connection *connection)
1969 {
1970 	set_bit(SEND_PING, &connection->flags);
1971 	wake_asender(connection);
1972 }
1973 
1974 extern void *conn_prepare_command(struct drbd_connection *, struct drbd_socket *);
1975 extern void *drbd_prepare_command(struct drbd_peer_device *, struct drbd_socket *);
1976 extern int conn_send_command(struct drbd_connection *, struct drbd_socket *,
1977 			     enum drbd_packet, unsigned int, void *,
1978 			     unsigned int);
1979 extern int drbd_send_command(struct drbd_peer_device *, struct drbd_socket *,
1980 			     enum drbd_packet, unsigned int, void *,
1981 			     unsigned int);
1982 
1983 extern int drbd_send_ping(struct drbd_connection *connection);
1984 extern int drbd_send_ping_ack(struct drbd_connection *connection);
1985 extern int drbd_send_state_req(struct drbd_peer_device *, union drbd_state, union drbd_state);
1986 extern int conn_send_state_req(struct drbd_connection *, union drbd_state, union drbd_state);
1987 
1988 static inline void drbd_thread_stop(struct drbd_thread *thi)
1989 {
1990 	_drbd_thread_stop(thi, false, true);
1991 }
1992 
1993 static inline void drbd_thread_stop_nowait(struct drbd_thread *thi)
1994 {
1995 	_drbd_thread_stop(thi, false, false);
1996 }
1997 
1998 static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
1999 {
2000 	_drbd_thread_stop(thi, true, false);
2001 }
2002 
2003 /* counts how many answer packets packets we expect from our peer,
2004  * for either explicit application requests,
2005  * or implicit barrier packets as necessary.
2006  * increased:
2007  *  w_send_barrier
2008  *  _req_mod(req, QUEUE_FOR_NET_WRITE or QUEUE_FOR_NET_READ);
2009  *    it is much easier and equally valid to count what we queue for the
2010  *    worker, even before it actually was queued or send.
2011  *    (drbd_make_request_common; recovery path on read io-error)
2012  * decreased:
2013  *  got_BarrierAck (respective tl_clear, tl_clear_barrier)
2014  *  _req_mod(req, DATA_RECEIVED)
2015  *     [from receive_DataReply]
2016  *  _req_mod(req, WRITE_ACKED_BY_PEER or RECV_ACKED_BY_PEER or NEG_ACKED)
2017  *     [from got_BlockAck (P_WRITE_ACK, P_RECV_ACK)]
2018  *     for some reason it is NOT decreased in got_NegAck,
2019  *     but in the resulting cleanup code from report_params.
2020  *     we should try to remember the reason for that...
2021  *  _req_mod(req, SEND_FAILED or SEND_CANCELED)
2022  *  _req_mod(req, CONNECTION_LOST_WHILE_PENDING)
2023  *     [from tl_clear_barrier]
2024  */
2025 static inline void inc_ap_pending(struct drbd_device *device)
2026 {
2027 	atomic_inc(&device->ap_pending_cnt);
2028 }
2029 
2030 #define ERR_IF_CNT_IS_NEGATIVE(which, func, line)			\
2031 	if (atomic_read(&device->which) < 0)				\
2032 		drbd_err(device, "in %s:%d: " #which " = %d < 0 !\n",	\
2033 			func, line,					\
2034 			atomic_read(&device->which))
2035 
2036 #define dec_ap_pending(device) _dec_ap_pending(device, __func__, __LINE__)
2037 static inline void _dec_ap_pending(struct drbd_device *device, const char *func, int line)
2038 {
2039 	if (atomic_dec_and_test(&device->ap_pending_cnt))
2040 		wake_up(&device->misc_wait);
2041 	ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt, func, line);
2042 }
2043 
2044 /* counts how many resync-related answers we still expect from the peer
2045  *		     increase			decrease
2046  * C_SYNC_TARGET sends P_RS_DATA_REQUEST (and expects P_RS_DATA_REPLY)
2047  * C_SYNC_SOURCE sends P_RS_DATA_REPLY   (and expects P_WRITE_ACK with ID_SYNCER)
2048  *					   (or P_NEG_ACK with ID_SYNCER)
2049  */
2050 static inline void inc_rs_pending(struct drbd_device *device)
2051 {
2052 	atomic_inc(&device->rs_pending_cnt);
2053 }
2054 
2055 #define dec_rs_pending(device) _dec_rs_pending(device, __func__, __LINE__)
2056 static inline void _dec_rs_pending(struct drbd_device *device, const char *func, int line)
2057 {
2058 	atomic_dec(&device->rs_pending_cnt);
2059 	ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt, func, line);
2060 }
2061 
2062 /* counts how many answers we still need to send to the peer.
2063  * increased on
2064  *  receive_Data	unless protocol A;
2065  *			we need to send a P_RECV_ACK (proto B)
2066  *			or P_WRITE_ACK (proto C)
2067  *  receive_RSDataReply (recv_resync_read) we need to send a P_WRITE_ACK
2068  *  receive_DataRequest (receive_RSDataRequest) we need to send back P_DATA
2069  *  receive_Barrier_*	we need to send a P_BARRIER_ACK
2070  */
2071 static inline void inc_unacked(struct drbd_device *device)
2072 {
2073 	atomic_inc(&device->unacked_cnt);
2074 }
2075 
2076 #define dec_unacked(device) _dec_unacked(device, __func__, __LINE__)
2077 static inline void _dec_unacked(struct drbd_device *device, const char *func, int line)
2078 {
2079 	atomic_dec(&device->unacked_cnt);
2080 	ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
2081 }
2082 
2083 #define sub_unacked(device, n) _sub_unacked(device, n, __func__, __LINE__)
2084 static inline void _sub_unacked(struct drbd_device *device, int n, const char *func, int line)
2085 {
2086 	atomic_sub(n, &device->unacked_cnt);
2087 	ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
2088 }
2089 
2090 static inline bool is_sync_state(enum drbd_conns connection_state)
2091 {
2092 	return
2093 	   (connection_state == C_SYNC_SOURCE
2094 	||  connection_state == C_SYNC_TARGET
2095 	||  connection_state == C_PAUSED_SYNC_S
2096 	||  connection_state == C_PAUSED_SYNC_T);
2097 }
2098 
2099 /**
2100  * get_ldev() - Increase the ref count on device->ldev. Returns 0 if there is no ldev
2101  * @_device:		DRBD device.
2102  * @_min_state:		Minimum device state required for success.
2103  *
2104  * You have to call put_ldev() when finished working with device->ldev.
2105  */
2106 #define get_ldev_if_state(_device, _min_state)				\
2107 	(_get_ldev_if_state((_device), (_min_state)) ?			\
2108 	 ({ __acquire(x); true; }) : false)
2109 #define get_ldev(_device) get_ldev_if_state(_device, D_INCONSISTENT)
2110 
2111 static inline void put_ldev(struct drbd_device *device)
2112 {
2113 	enum drbd_disk_state disk_state = device->state.disk;
2114 	/* We must check the state *before* the atomic_dec becomes visible,
2115 	 * or we have a theoretical race where someone hitting zero,
2116 	 * while state still D_FAILED, will then see D_DISKLESS in the
2117 	 * condition below and calling into destroy, where he must not, yet. */
2118 	int i = atomic_dec_return(&device->local_cnt);
2119 
2120 	/* This may be called from some endio handler,
2121 	 * so we must not sleep here. */
2122 
2123 	__release(local);
2124 	D_ASSERT(device, i >= 0);
2125 	if (i == 0) {
2126 		if (disk_state == D_DISKLESS)
2127 			/* even internal references gone, safe to destroy */
2128 			drbd_device_post_work(device, DESTROY_DISK);
2129 		if (disk_state == D_FAILED)
2130 			/* all application IO references gone. */
2131 			if (!test_and_set_bit(GOING_DISKLESS, &device->flags))
2132 				drbd_device_post_work(device, GO_DISKLESS);
2133 		wake_up(&device->misc_wait);
2134 	}
2135 }
2136 
2137 #ifndef __CHECKER__
2138 static inline int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
2139 {
2140 	int io_allowed;
2141 
2142 	/* never get a reference while D_DISKLESS */
2143 	if (device->state.disk == D_DISKLESS)
2144 		return 0;
2145 
2146 	atomic_inc(&device->local_cnt);
2147 	io_allowed = (device->state.disk >= mins);
2148 	if (!io_allowed)
2149 		put_ldev(device);
2150 	return io_allowed;
2151 }
2152 #else
2153 extern int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins);
2154 #endif
2155 
2156 /* this throttles on-the-fly application requests
2157  * according to max_buffers settings;
2158  * maybe re-implement using semaphores? */
2159 static inline int drbd_get_max_buffers(struct drbd_device *device)
2160 {
2161 	struct net_conf *nc;
2162 	int mxb;
2163 
2164 	rcu_read_lock();
2165 	nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
2166 	mxb = nc ? nc->max_buffers : 1000000;  /* arbitrary limit on open requests */
2167 	rcu_read_unlock();
2168 
2169 	return mxb;
2170 }
2171 
2172 static inline int drbd_state_is_stable(struct drbd_device *device)
2173 {
2174 	union drbd_dev_state s = device->state;
2175 
2176 	/* DO NOT add a default clause, we want the compiler to warn us
2177 	 * for any newly introduced state we may have forgotten to add here */
2178 
2179 	switch ((enum drbd_conns)s.conn) {
2180 	/* new io only accepted when there is no connection, ... */
2181 	case C_STANDALONE:
2182 	case C_WF_CONNECTION:
2183 	/* ... or there is a well established connection. */
2184 	case C_CONNECTED:
2185 	case C_SYNC_SOURCE:
2186 	case C_SYNC_TARGET:
2187 	case C_VERIFY_S:
2188 	case C_VERIFY_T:
2189 	case C_PAUSED_SYNC_S:
2190 	case C_PAUSED_SYNC_T:
2191 	case C_AHEAD:
2192 	case C_BEHIND:
2193 		/* transitional states, IO allowed */
2194 	case C_DISCONNECTING:
2195 	case C_UNCONNECTED:
2196 	case C_TIMEOUT:
2197 	case C_BROKEN_PIPE:
2198 	case C_NETWORK_FAILURE:
2199 	case C_PROTOCOL_ERROR:
2200 	case C_TEAR_DOWN:
2201 	case C_WF_REPORT_PARAMS:
2202 	case C_STARTING_SYNC_S:
2203 	case C_STARTING_SYNC_T:
2204 		break;
2205 
2206 		/* Allow IO in BM exchange states with new protocols */
2207 	case C_WF_BITMAP_S:
2208 		if (first_peer_device(device)->connection->agreed_pro_version < 96)
2209 			return 0;
2210 		break;
2211 
2212 		/* no new io accepted in these states */
2213 	case C_WF_BITMAP_T:
2214 	case C_WF_SYNC_UUID:
2215 	case C_MASK:
2216 		/* not "stable" */
2217 		return 0;
2218 	}
2219 
2220 	switch ((enum drbd_disk_state)s.disk) {
2221 	case D_DISKLESS:
2222 	case D_INCONSISTENT:
2223 	case D_OUTDATED:
2224 	case D_CONSISTENT:
2225 	case D_UP_TO_DATE:
2226 	case D_FAILED:
2227 		/* disk state is stable as well. */
2228 		break;
2229 
2230 	/* no new io accepted during transitional states */
2231 	case D_ATTACHING:
2232 	case D_NEGOTIATING:
2233 	case D_UNKNOWN:
2234 	case D_MASK:
2235 		/* not "stable" */
2236 		return 0;
2237 	}
2238 
2239 	return 1;
2240 }
2241 
2242 static inline int drbd_suspended(struct drbd_device *device)
2243 {
2244 	struct drbd_resource *resource = device->resource;
2245 
2246 	return resource->susp || resource->susp_fen || resource->susp_nod;
2247 }
2248 
2249 static inline bool may_inc_ap_bio(struct drbd_device *device)
2250 {
2251 	int mxb = drbd_get_max_buffers(device);
2252 
2253 	if (drbd_suspended(device))
2254 		return false;
2255 	if (test_bit(SUSPEND_IO, &device->flags))
2256 		return false;
2257 
2258 	/* to avoid potential deadlock or bitmap corruption,
2259 	 * in various places, we only allow new application io
2260 	 * to start during "stable" states. */
2261 
2262 	/* no new io accepted when attaching or detaching the disk */
2263 	if (!drbd_state_is_stable(device))
2264 		return false;
2265 
2266 	/* since some older kernels don't have atomic_add_unless,
2267 	 * and we are within the spinlock anyways, we have this workaround.  */
2268 	if (atomic_read(&device->ap_bio_cnt) > mxb)
2269 		return false;
2270 	if (test_bit(BITMAP_IO, &device->flags))
2271 		return false;
2272 	return true;
2273 }
2274 
2275 static inline bool inc_ap_bio_cond(struct drbd_device *device)
2276 {
2277 	bool rv = false;
2278 
2279 	spin_lock_irq(&device->resource->req_lock);
2280 	rv = may_inc_ap_bio(device);
2281 	if (rv)
2282 		atomic_inc(&device->ap_bio_cnt);
2283 	spin_unlock_irq(&device->resource->req_lock);
2284 
2285 	return rv;
2286 }
2287 
2288 static inline void inc_ap_bio(struct drbd_device *device)
2289 {
2290 	/* we wait here
2291 	 *    as long as the device is suspended
2292 	 *    until the bitmap is no longer on the fly during connection
2293 	 *    handshake as long as we would exceed the max_buffer limit.
2294 	 *
2295 	 * to avoid races with the reconnect code,
2296 	 * we need to atomic_inc within the spinlock. */
2297 
2298 	wait_event(device->misc_wait, inc_ap_bio_cond(device));
2299 }
2300 
2301 static inline void dec_ap_bio(struct drbd_device *device)
2302 {
2303 	int mxb = drbd_get_max_buffers(device);
2304 	int ap_bio = atomic_dec_return(&device->ap_bio_cnt);
2305 
2306 	D_ASSERT(device, ap_bio >= 0);
2307 
2308 	if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) {
2309 		if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
2310 			drbd_queue_work(&first_peer_device(device)->
2311 				connection->sender_work,
2312 				&device->bm_io_work.w);
2313 	}
2314 
2315 	/* this currently does wake_up for every dec_ap_bio!
2316 	 * maybe rather introduce some type of hysteresis?
2317 	 * e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */
2318 	if (ap_bio < mxb)
2319 		wake_up(&device->misc_wait);
2320 }
2321 
2322 static inline bool verify_can_do_stop_sector(struct drbd_device *device)
2323 {
2324 	return first_peer_device(device)->connection->agreed_pro_version >= 97 &&
2325 		first_peer_device(device)->connection->agreed_pro_version != 100;
2326 }
2327 
2328 static inline int drbd_set_ed_uuid(struct drbd_device *device, u64 val)
2329 {
2330 	int changed = device->ed_uuid != val;
2331 	device->ed_uuid = val;
2332 	return changed;
2333 }
2334 
2335 static inline int drbd_queue_order_type(struct drbd_device *device)
2336 {
2337 	/* sorry, we currently have no working implementation
2338 	 * of distributed TCQ stuff */
2339 #ifndef QUEUE_ORDERED_NONE
2340 #define QUEUE_ORDERED_NONE 0
2341 #endif
2342 	return QUEUE_ORDERED_NONE;
2343 }
2344 
2345 static inline struct drbd_connection *first_connection(struct drbd_resource *resource)
2346 {
2347 	return list_first_entry_or_null(&resource->connections,
2348 				struct drbd_connection, connections);
2349 }
2350 
2351 #endif
2352