xref: /openbmc/linux/drivers/block/drbd/drbd_int.h (revision efe4a1ac)
1 /*
2   drbd_int.h
3 
4   This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5 
6   Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7   Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8   Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9 
10   drbd is free software; you can redistribute it and/or modify
11   it under the terms of the GNU General Public License as published by
12   the Free Software Foundation; either version 2, or (at your option)
13   any later version.
14 
15   drbd is distributed in the hope that it will be useful,
16   but WITHOUT ANY WARRANTY; without even the implied warranty of
17   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the
18   GNU General Public License for more details.
19 
20   You should have received a copy of the GNU General Public License
21   along with drbd; see the file COPYING.  If not, write to
22   the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 
24 */
25 
26 #ifndef _DRBD_INT_H
27 #define _DRBD_INT_H
28 
29 #include <crypto/hash.h>
30 #include <linux/compiler.h>
31 #include <linux/types.h>
32 #include <linux/list.h>
33 #include <linux/sched/signal.h>
34 #include <linux/bitops.h>
35 #include <linux/slab.h>
36 #include <linux/ratelimit.h>
37 #include <linux/tcp.h>
38 #include <linux/mutex.h>
39 #include <linux/major.h>
40 #include <linux/blkdev.h>
41 #include <linux/backing-dev.h>
42 #include <linux/genhd.h>
43 #include <linux/idr.h>
44 #include <linux/dynamic_debug.h>
45 #include <net/tcp.h>
46 #include <linux/lru_cache.h>
47 #include <linux/prefetch.h>
48 #include <linux/drbd_genl_api.h>
49 #include <linux/drbd.h>
50 #include "drbd_strings.h"
51 #include "drbd_state.h"
52 #include "drbd_protocol.h"
53 
54 #ifdef __CHECKER__
55 # define __protected_by(x)       __attribute__((require_context(x,1,999,"rdwr")))
56 # define __protected_read_by(x)  __attribute__((require_context(x,1,999,"read")))
57 # define __protected_write_by(x) __attribute__((require_context(x,1,999,"write")))
58 # define __must_hold(x)       __attribute__((context(x,1,1), require_context(x,1,999,"call")))
59 #else
60 # define __protected_by(x)
61 # define __protected_read_by(x)
62 # define __protected_write_by(x)
63 # define __must_hold(x)
64 #endif
65 
66 /* module parameter, defined in drbd_main.c */
67 extern unsigned int minor_count;
68 extern bool disable_sendpage;
69 extern bool allow_oos;
70 void tl_abort_disk_io(struct drbd_device *device);
71 
72 #ifdef CONFIG_DRBD_FAULT_INJECTION
73 extern int enable_faults;
74 extern int fault_rate;
75 extern int fault_devs;
76 #endif
77 
78 extern char usermode_helper[];
79 
80 
81 /* This is used to stop/restart our threads.
82  * Cannot use SIGTERM nor SIGKILL, since these
83  * are sent out by init on runlevel changes
84  * I choose SIGHUP for now.
85  */
86 #define DRBD_SIGKILL SIGHUP
87 
88 #define ID_IN_SYNC      (4711ULL)
89 #define ID_OUT_OF_SYNC  (4712ULL)
90 #define ID_SYNCER (-1ULL)
91 
92 #define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL)
93 
94 struct drbd_device;
95 struct drbd_connection;
96 
97 #define __drbd_printk_device(level, device, fmt, args...) \
98 	dev_printk(level, disk_to_dev((device)->vdisk), fmt, ## args)
99 #define __drbd_printk_peer_device(level, peer_device, fmt, args...) \
100 	dev_printk(level, disk_to_dev((peer_device)->device->vdisk), fmt, ## args)
101 #define __drbd_printk_resource(level, resource, fmt, args...) \
102 	printk(level "drbd %s: " fmt, (resource)->name, ## args)
103 #define __drbd_printk_connection(level, connection, fmt, args...) \
104 	printk(level "drbd %s: " fmt, (connection)->resource->name, ## args)
105 
106 void drbd_printk_with_wrong_object_type(void);
107 
108 #define __drbd_printk_if_same_type(obj, type, func, level, fmt, args...) \
109 	(__builtin_types_compatible_p(typeof(obj), type) || \
110 	 __builtin_types_compatible_p(typeof(obj), const type)), \
111 	func(level, (const type)(obj), fmt, ## args)
112 
113 #define drbd_printk(level, obj, fmt, args...) \
114 	__builtin_choose_expr( \
115 	  __drbd_printk_if_same_type(obj, struct drbd_device *, \
116 			     __drbd_printk_device, level, fmt, ## args), \
117 	  __builtin_choose_expr( \
118 	    __drbd_printk_if_same_type(obj, struct drbd_resource *, \
119 			       __drbd_printk_resource, level, fmt, ## args), \
120 	    __builtin_choose_expr( \
121 	      __drbd_printk_if_same_type(obj, struct drbd_connection *, \
122 				 __drbd_printk_connection, level, fmt, ## args), \
123 	      __builtin_choose_expr( \
124 		__drbd_printk_if_same_type(obj, struct drbd_peer_device *, \
125 				 __drbd_printk_peer_device, level, fmt, ## args), \
126 		drbd_printk_with_wrong_object_type()))))
127 
128 #define drbd_dbg(obj, fmt, args...) \
129 	drbd_printk(KERN_DEBUG, obj, fmt, ## args)
130 #define drbd_alert(obj, fmt, args...) \
131 	drbd_printk(KERN_ALERT, obj, fmt, ## args)
132 #define drbd_err(obj, fmt, args...) \
133 	drbd_printk(KERN_ERR, obj, fmt, ## args)
134 #define drbd_warn(obj, fmt, args...) \
135 	drbd_printk(KERN_WARNING, obj, fmt, ## args)
136 #define drbd_info(obj, fmt, args...) \
137 	drbd_printk(KERN_INFO, obj, fmt, ## args)
138 #define drbd_emerg(obj, fmt, args...) \
139 	drbd_printk(KERN_EMERG, obj, fmt, ## args)
140 
141 #define dynamic_drbd_dbg(device, fmt, args...) \
142 	dynamic_dev_dbg(disk_to_dev(device->vdisk), fmt, ## args)
143 
144 #define D_ASSERT(device, exp)	do { \
145 	if (!(exp)) \
146 		drbd_err(device, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__); \
147 	} while (0)
148 
149 /**
150  * expect  -  Make an assertion
151  *
152  * Unlike the assert macro, this macro returns a boolean result.
153  */
154 #define expect(exp) ({								\
155 		bool _bool = (exp);						\
156 		if (!_bool)							\
157 			drbd_err(device, "ASSERTION %s FAILED in %s\n",		\
158 			        #exp, __func__);				\
159 		_bool;								\
160 		})
161 
162 /* Defines to control fault insertion */
163 enum {
164 	DRBD_FAULT_MD_WR = 0,	/* meta data write */
165 	DRBD_FAULT_MD_RD = 1,	/*           read  */
166 	DRBD_FAULT_RS_WR = 2,	/* resync          */
167 	DRBD_FAULT_RS_RD = 3,
168 	DRBD_FAULT_DT_WR = 4,	/* data            */
169 	DRBD_FAULT_DT_RD = 5,
170 	DRBD_FAULT_DT_RA = 6,	/* data read ahead */
171 	DRBD_FAULT_BM_ALLOC = 7,	/* bitmap allocation */
172 	DRBD_FAULT_AL_EE = 8,	/* alloc ee */
173 	DRBD_FAULT_RECEIVE = 9, /* Changes some bytes upon receiving a [rs]data block */
174 
175 	DRBD_FAULT_MAX,
176 };
177 
178 extern unsigned int
179 _drbd_insert_fault(struct drbd_device *device, unsigned int type);
180 
181 static inline int
182 drbd_insert_fault(struct drbd_device *device, unsigned int type) {
183 #ifdef CONFIG_DRBD_FAULT_INJECTION
184 	return fault_rate &&
185 		(enable_faults & (1<<type)) &&
186 		_drbd_insert_fault(device, type);
187 #else
188 	return 0;
189 #endif
190 }
191 
192 /* integer division, round _UP_ to the next integer */
193 #define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0))
194 /* usual integer division */
195 #define div_floor(A, B) ((A)/(B))
196 
197 extern struct ratelimit_state drbd_ratelimit_state;
198 extern struct idr drbd_devices; /* RCU, updates: genl_lock() */
199 extern struct list_head drbd_resources; /* RCU, updates: genl_lock() */
200 
201 extern const char *cmdname(enum drbd_packet cmd);
202 
203 /* for sending/receiving the bitmap,
204  * possibly in some encoding scheme */
205 struct bm_xfer_ctx {
206 	/* "const"
207 	 * stores total bits and long words
208 	 * of the bitmap, so we don't need to
209 	 * call the accessor functions over and again. */
210 	unsigned long bm_bits;
211 	unsigned long bm_words;
212 	/* during xfer, current position within the bitmap */
213 	unsigned long bit_offset;
214 	unsigned long word_offset;
215 
216 	/* statistics; index: (h->command == P_BITMAP) */
217 	unsigned packets[2];
218 	unsigned bytes[2];
219 };
220 
221 extern void INFO_bm_xfer_stats(struct drbd_device *device,
222 		const char *direction, struct bm_xfer_ctx *c);
223 
224 static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c)
225 {
226 	/* word_offset counts "native long words" (32 or 64 bit),
227 	 * aligned at 64 bit.
228 	 * Encoded packet may end at an unaligned bit offset.
229 	 * In case a fallback clear text packet is transmitted in
230 	 * between, we adjust this offset back to the last 64bit
231 	 * aligned "native long word", which makes coding and decoding
232 	 * the plain text bitmap much more convenient.  */
233 #if BITS_PER_LONG == 64
234 	c->word_offset = c->bit_offset >> 6;
235 #elif BITS_PER_LONG == 32
236 	c->word_offset = c->bit_offset >> 5;
237 	c->word_offset &= ~(1UL);
238 #else
239 # error "unsupported BITS_PER_LONG"
240 #endif
241 }
242 
243 extern unsigned int drbd_header_size(struct drbd_connection *connection);
244 
245 /**********************************************************************/
246 enum drbd_thread_state {
247 	NONE,
248 	RUNNING,
249 	EXITING,
250 	RESTARTING
251 };
252 
253 struct drbd_thread {
254 	spinlock_t t_lock;
255 	struct task_struct *task;
256 	struct completion stop;
257 	enum drbd_thread_state t_state;
258 	int (*function) (struct drbd_thread *);
259 	struct drbd_resource *resource;
260 	struct drbd_connection *connection;
261 	int reset_cpu_mask;
262 	const char *name;
263 };
264 
265 static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
266 {
267 	/* THINK testing the t_state seems to be uncritical in all cases
268 	 * (but thread_{start,stop}), so we can read it *without* the lock.
269 	 *	--lge */
270 
271 	smp_rmb();
272 	return thi->t_state;
273 }
274 
275 struct drbd_work {
276 	struct list_head list;
277 	int (*cb)(struct drbd_work *, int cancel);
278 };
279 
280 struct drbd_device_work {
281 	struct drbd_work w;
282 	struct drbd_device *device;
283 };
284 
285 #include "drbd_interval.h"
286 
287 extern int drbd_wait_misc(struct drbd_device *, struct drbd_interval *);
288 
289 extern void lock_all_resources(void);
290 extern void unlock_all_resources(void);
291 
292 struct drbd_request {
293 	struct drbd_work w;
294 	struct drbd_device *device;
295 
296 	/* if local IO is not allowed, will be NULL.
297 	 * if local IO _is_ allowed, holds the locally submitted bio clone,
298 	 * or, after local IO completion, the ERR_PTR(error).
299 	 * see drbd_request_endio(). */
300 	struct bio *private_bio;
301 
302 	struct drbd_interval i;
303 
304 	/* epoch: used to check on "completion" whether this req was in
305 	 * the current epoch, and we therefore have to close it,
306 	 * causing a p_barrier packet to be send, starting a new epoch.
307 	 *
308 	 * This corresponds to "barrier" in struct p_barrier[_ack],
309 	 * and to "barrier_nr" in struct drbd_epoch (and various
310 	 * comments/function parameters/local variable names).
311 	 */
312 	unsigned int epoch;
313 
314 	struct list_head tl_requests; /* ring list in the transfer log */
315 	struct bio *master_bio;       /* master bio pointer */
316 
317 	/* see struct drbd_device */
318 	struct list_head req_pending_master_completion;
319 	struct list_head req_pending_local;
320 
321 	/* for generic IO accounting */
322 	unsigned long start_jif;
323 
324 	/* for DRBD internal statistics */
325 
326 	/* Minimal set of time stamps to determine if we wait for activity log
327 	 * transactions, local disk or peer.  32 bit "jiffies" are good enough,
328 	 * we don't expect a DRBD request to be stalled for several month.
329 	 */
330 
331 	/* before actual request processing */
332 	unsigned long in_actlog_jif;
333 
334 	/* local disk */
335 	unsigned long pre_submit_jif;
336 
337 	/* per connection */
338 	unsigned long pre_send_jif;
339 	unsigned long acked_jif;
340 	unsigned long net_done_jif;
341 
342 	/* Possibly even more detail to track each phase:
343 	 *  master_completion_jif
344 	 *      how long did it take to complete the master bio
345 	 *      (application visible latency)
346 	 *  allocated_jif
347 	 *      how long the master bio was blocked until we finally allocated
348 	 *      a tracking struct
349 	 *  in_actlog_jif
350 	 *      how long did we wait for activity log transactions
351 	 *
352 	 *  net_queued_jif
353 	 *      when did we finally queue it for sending
354 	 *  pre_send_jif
355 	 *      when did we start sending it
356 	 *  post_send_jif
357 	 *      how long did we block in the network stack trying to send it
358 	 *  acked_jif
359 	 *      when did we receive (or fake, in protocol A) a remote ACK
360 	 *  net_done_jif
361 	 *      when did we receive final acknowledgement (P_BARRIER_ACK),
362 	 *      or decide, e.g. on connection loss, that we do no longer expect
363 	 *      anything from this peer for this request.
364 	 *
365 	 *  pre_submit_jif
366 	 *  post_sub_jif
367 	 *      when did we start submiting to the lower level device,
368 	 *      and how long did we block in that submit function
369 	 *  local_completion_jif
370 	 *      how long did it take the lower level device to complete this request
371 	 */
372 
373 
374 	/* once it hits 0, we may complete the master_bio */
375 	atomic_t completion_ref;
376 	/* once it hits 0, we may destroy this drbd_request object */
377 	struct kref kref;
378 
379 	unsigned rq_state; /* see comments above _req_mod() */
380 };
381 
382 struct drbd_epoch {
383 	struct drbd_connection *connection;
384 	struct list_head list;
385 	unsigned int barrier_nr;
386 	atomic_t epoch_size; /* increased on every request added. */
387 	atomic_t active;     /* increased on every req. added, and dec on every finished. */
388 	unsigned long flags;
389 };
390 
391 /* Prototype declaration of function defined in drbd_receiver.c */
392 int drbdd_init(struct drbd_thread *);
393 int drbd_asender(struct drbd_thread *);
394 
395 /* drbd_epoch flag bits */
396 enum {
397 	DE_HAVE_BARRIER_NUMBER,
398 };
399 
400 enum epoch_event {
401 	EV_PUT,
402 	EV_GOT_BARRIER_NR,
403 	EV_BECAME_LAST,
404 	EV_CLEANUP = 32, /* used as flag */
405 };
406 
407 struct digest_info {
408 	int digest_size;
409 	void *digest;
410 };
411 
412 struct drbd_peer_request {
413 	struct drbd_work w;
414 	struct drbd_peer_device *peer_device;
415 	struct drbd_epoch *epoch; /* for writes */
416 	struct page *pages;
417 	atomic_t pending_bios;
418 	struct drbd_interval i;
419 	/* see comments on ee flag bits below */
420 	unsigned long flags;
421 	unsigned long submit_jif;
422 	union {
423 		u64 block_id;
424 		struct digest_info *digest;
425 	};
426 };
427 
428 /* ee flag bits.
429  * While corresponding bios are in flight, the only modification will be
430  * set_bit WAS_ERROR, which has to be atomic.
431  * If no bios are in flight yet, or all have been completed,
432  * non-atomic modification to ee->flags is ok.
433  */
434 enum {
435 	__EE_CALL_AL_COMPLETE_IO,
436 	__EE_MAY_SET_IN_SYNC,
437 
438 	/* is this a TRIM aka REQ_DISCARD? */
439 	__EE_IS_TRIM,
440 
441 	/* In case a barrier failed,
442 	 * we need to resubmit without the barrier flag. */
443 	__EE_RESUBMITTED,
444 
445 	/* we may have several bios per peer request.
446 	 * if any of those fail, we set this flag atomically
447 	 * from the endio callback */
448 	__EE_WAS_ERROR,
449 
450 	/* This ee has a pointer to a digest instead of a block id */
451 	__EE_HAS_DIGEST,
452 
453 	/* Conflicting local requests need to be restarted after this request */
454 	__EE_RESTART_REQUESTS,
455 
456 	/* The peer wants a write ACK for this (wire proto C) */
457 	__EE_SEND_WRITE_ACK,
458 
459 	/* Is set when net_conf had two_primaries set while creating this peer_req */
460 	__EE_IN_INTERVAL_TREE,
461 
462 	/* for debugfs: */
463 	/* has this been submitted, or does it still wait for something else? */
464 	__EE_SUBMITTED,
465 
466 	/* this is/was a write request */
467 	__EE_WRITE,
468 
469 	/* this is/was a write same request */
470 	__EE_WRITE_SAME,
471 
472 	/* this originates from application on peer
473 	 * (not some resync or verify or other DRBD internal request) */
474 	__EE_APPLICATION,
475 
476 	/* If it contains only 0 bytes, send back P_RS_DEALLOCATED */
477 	__EE_RS_THIN_REQ,
478 };
479 #define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
480 #define EE_MAY_SET_IN_SYNC     (1<<__EE_MAY_SET_IN_SYNC)
481 #define EE_IS_TRIM             (1<<__EE_IS_TRIM)
482 #define EE_RESUBMITTED         (1<<__EE_RESUBMITTED)
483 #define EE_WAS_ERROR           (1<<__EE_WAS_ERROR)
484 #define EE_HAS_DIGEST          (1<<__EE_HAS_DIGEST)
485 #define EE_RESTART_REQUESTS	(1<<__EE_RESTART_REQUESTS)
486 #define EE_SEND_WRITE_ACK	(1<<__EE_SEND_WRITE_ACK)
487 #define EE_IN_INTERVAL_TREE	(1<<__EE_IN_INTERVAL_TREE)
488 #define EE_SUBMITTED		(1<<__EE_SUBMITTED)
489 #define EE_WRITE		(1<<__EE_WRITE)
490 #define EE_WRITE_SAME		(1<<__EE_WRITE_SAME)
491 #define EE_APPLICATION		(1<<__EE_APPLICATION)
492 #define EE_RS_THIN_REQ		(1<<__EE_RS_THIN_REQ)
493 
494 /* flag bits per device */
495 enum {
496 	UNPLUG_REMOTE,		/* sending a "UnplugRemote" could help */
497 	MD_DIRTY,		/* current uuids and flags not yet on disk */
498 	USE_DEGR_WFC_T,		/* degr-wfc-timeout instead of wfc-timeout. */
499 	CL_ST_CHG_SUCCESS,
500 	CL_ST_CHG_FAIL,
501 	CRASHED_PRIMARY,	/* This node was a crashed primary.
502 				 * Gets cleared when the state.conn
503 				 * goes into C_CONNECTED state. */
504 	CONSIDER_RESYNC,
505 
506 	MD_NO_FUA,		/* Users wants us to not use FUA/FLUSH on meta data dev */
507 
508 	BITMAP_IO,		/* suspend application io;
509 				   once no more io in flight, start bitmap io */
510 	BITMAP_IO_QUEUED,       /* Started bitmap IO */
511 	WAS_IO_ERROR,		/* Local disk failed, returned IO error */
512 	WAS_READ_ERROR,		/* Local disk READ failed (set additionally to the above) */
513 	FORCE_DETACH,		/* Force-detach from local disk, aborting any pending local IO */
514 	RESYNC_AFTER_NEG,       /* Resync after online grow after the attach&negotiate finished. */
515 	RESIZE_PENDING,		/* Size change detected locally, waiting for the response from
516 				 * the peer, if it changed there as well. */
517 	NEW_CUR_UUID,		/* Create new current UUID when thawing IO */
518 	AL_SUSPENDED,		/* Activity logging is currently suspended. */
519 	AHEAD_TO_SYNC_SOURCE,   /* Ahead -> SyncSource queued */
520 	B_RS_H_DONE,		/* Before resync handler done (already executed) */
521 	DISCARD_MY_DATA,	/* discard_my_data flag per volume */
522 	READ_BALANCE_RR,
523 
524 	FLUSH_PENDING,		/* if set, device->flush_jif is when we submitted that flush
525 				 * from drbd_flush_after_epoch() */
526 
527 	/* cleared only after backing device related structures have been destroyed. */
528 	GOING_DISKLESS,		/* Disk is being detached, because of io-error, or admin request. */
529 
530 	/* to be used in drbd_device_post_work() */
531 	GO_DISKLESS,		/* tell worker to schedule cleanup before detach */
532 	DESTROY_DISK,		/* tell worker to close backing devices and destroy related structures. */
533 	MD_SYNC,		/* tell worker to call drbd_md_sync() */
534 	RS_START,		/* tell worker to start resync/OV */
535 	RS_PROGRESS,		/* tell worker that resync made significant progress */
536 	RS_DONE,		/* tell worker that resync is done */
537 };
538 
539 struct drbd_bitmap; /* opaque for drbd_device */
540 
541 /* definition of bits in bm_flags to be used in drbd_bm_lock
542  * and drbd_bitmap_io and friends. */
543 enum bm_flag {
544 	/* currently locked for bulk operation */
545 	BM_LOCKED_MASK = 0xf,
546 
547 	/* in detail, that is: */
548 	BM_DONT_CLEAR = 0x1,
549 	BM_DONT_SET   = 0x2,
550 	BM_DONT_TEST  = 0x4,
551 
552 	/* so we can mark it locked for bulk operation,
553 	 * and still allow all non-bulk operations */
554 	BM_IS_LOCKED  = 0x8,
555 
556 	/* (test bit, count bit) allowed (common case) */
557 	BM_LOCKED_TEST_ALLOWED = BM_DONT_CLEAR | BM_DONT_SET | BM_IS_LOCKED,
558 
559 	/* testing bits, as well as setting new bits allowed, but clearing bits
560 	 * would be unexpected.  Used during bitmap receive.  Setting new bits
561 	 * requires sending of "out-of-sync" information, though. */
562 	BM_LOCKED_SET_ALLOWED = BM_DONT_CLEAR | BM_IS_LOCKED,
563 
564 	/* for drbd_bm_write_copy_pages, everything is allowed,
565 	 * only concurrent bulk operations are locked out. */
566 	BM_LOCKED_CHANGE_ALLOWED = BM_IS_LOCKED,
567 };
568 
569 struct drbd_work_queue {
570 	struct list_head q;
571 	spinlock_t q_lock;  /* to protect the list. */
572 	wait_queue_head_t q_wait;
573 };
574 
575 struct drbd_socket {
576 	struct mutex mutex;
577 	struct socket    *socket;
578 	/* this way we get our
579 	 * send/receive buffers off the stack */
580 	void *sbuf;
581 	void *rbuf;
582 };
583 
584 struct drbd_md {
585 	u64 md_offset;		/* sector offset to 'super' block */
586 
587 	u64 la_size_sect;	/* last agreed size, unit sectors */
588 	spinlock_t uuid_lock;
589 	u64 uuid[UI_SIZE];
590 	u64 device_uuid;
591 	u32 flags;
592 	u32 md_size_sect;
593 
594 	s32 al_offset;	/* signed relative sector offset to activity log */
595 	s32 bm_offset;	/* signed relative sector offset to bitmap */
596 
597 	/* cached value of bdev->disk_conf->meta_dev_idx (see below) */
598 	s32 meta_dev_idx;
599 
600 	/* see al_tr_number_to_on_disk_sector() */
601 	u32 al_stripes;
602 	u32 al_stripe_size_4k;
603 	u32 al_size_4k; /* cached product of the above */
604 };
605 
606 struct drbd_backing_dev {
607 	struct block_device *backing_bdev;
608 	struct block_device *md_bdev;
609 	struct drbd_md md;
610 	struct disk_conf *disk_conf; /* RCU, for updates: resource->conf_update */
611 	sector_t known_size; /* last known size of that backing device */
612 };
613 
614 struct drbd_md_io {
615 	struct page *page;
616 	unsigned long start_jif;	/* last call to drbd_md_get_buffer */
617 	unsigned long submit_jif;	/* last _drbd_md_sync_page_io() submit */
618 	const char *current_use;
619 	atomic_t in_use;
620 	unsigned int done;
621 	int error;
622 };
623 
624 struct bm_io_work {
625 	struct drbd_work w;
626 	char *why;
627 	enum bm_flag flags;
628 	int (*io_fn)(struct drbd_device *device);
629 	void (*done)(struct drbd_device *device, int rv);
630 };
631 
632 struct fifo_buffer {
633 	unsigned int head_index;
634 	unsigned int size;
635 	int total; /* sum of all values */
636 	int values[0];
637 };
638 extern struct fifo_buffer *fifo_alloc(int fifo_size);
639 
640 /* flag bits per connection */
641 enum {
642 	NET_CONGESTED,		/* The data socket is congested */
643 	RESOLVE_CONFLICTS,	/* Set on one node, cleared on the peer! */
644 	SEND_PING,
645 	GOT_PING_ACK,		/* set when we receive a ping_ack packet, ping_wait gets woken */
646 	CONN_WD_ST_CHG_REQ,	/* A cluster wide state change on the connection is active */
647 	CONN_WD_ST_CHG_OKAY,
648 	CONN_WD_ST_CHG_FAIL,
649 	CONN_DRY_RUN,		/* Expect disconnect after resync handshake. */
650 	CREATE_BARRIER,		/* next P_DATA is preceded by a P_BARRIER */
651 	STATE_SENT,		/* Do not change state/UUIDs while this is set */
652 	CALLBACK_PENDING,	/* Whether we have a call_usermodehelper(, UMH_WAIT_PROC)
653 				 * pending, from drbd worker context.
654 				 * If set, bdi_write_congested() returns true,
655 				 * so shrink_page_list() would not recurse into,
656 				 * and potentially deadlock on, this drbd worker.
657 				 */
658 	DISCONNECT_SENT,
659 
660 	DEVICE_WORK_PENDING,	/* tell worker that some device has pending work */
661 };
662 
663 enum which_state { NOW, OLD = NOW, NEW };
664 
665 struct drbd_resource {
666 	char *name;
667 #ifdef CONFIG_DEBUG_FS
668 	struct dentry *debugfs_res;
669 	struct dentry *debugfs_res_volumes;
670 	struct dentry *debugfs_res_connections;
671 	struct dentry *debugfs_res_in_flight_summary;
672 #endif
673 	struct kref kref;
674 	struct idr devices;		/* volume number to device mapping */
675 	struct list_head connections;
676 	struct list_head resources;
677 	struct res_opts res_opts;
678 	struct mutex conf_update;	/* mutex for ready-copy-update of net_conf and disk_conf */
679 	struct mutex adm_mutex;		/* mutex to serialize administrative requests */
680 	spinlock_t req_lock;
681 
682 	unsigned susp:1;		/* IO suspended by user */
683 	unsigned susp_nod:1;		/* IO suspended because no data */
684 	unsigned susp_fen:1;		/* IO suspended because fence peer handler runs */
685 
686 	enum write_ordering_e write_ordering;
687 
688 	cpumask_var_t cpu_mask;
689 };
690 
691 struct drbd_thread_timing_details
692 {
693 	unsigned long start_jif;
694 	void *cb_addr;
695 	const char *caller_fn;
696 	unsigned int line;
697 	unsigned int cb_nr;
698 };
699 
700 struct drbd_connection {
701 	struct list_head connections;
702 	struct drbd_resource *resource;
703 #ifdef CONFIG_DEBUG_FS
704 	struct dentry *debugfs_conn;
705 	struct dentry *debugfs_conn_callback_history;
706 	struct dentry *debugfs_conn_oldest_requests;
707 #endif
708 	struct kref kref;
709 	struct idr peer_devices;	/* volume number to peer device mapping */
710 	enum drbd_conns cstate;		/* Only C_STANDALONE to C_WF_REPORT_PARAMS */
711 	struct mutex cstate_mutex;	/* Protects graceful disconnects */
712 	unsigned int connect_cnt;	/* Inc each time a connection is established */
713 
714 	unsigned long flags;
715 	struct net_conf *net_conf;	/* content protected by rcu */
716 	wait_queue_head_t ping_wait;	/* Woken upon reception of a ping, and a state change */
717 
718 	struct sockaddr_storage my_addr;
719 	int my_addr_len;
720 	struct sockaddr_storage peer_addr;
721 	int peer_addr_len;
722 
723 	struct drbd_socket data;	/* data/barrier/cstate/parameter packets */
724 	struct drbd_socket meta;	/* ping/ack (metadata) packets */
725 	int agreed_pro_version;		/* actually used protocol version */
726 	u32 agreed_features;
727 	unsigned long last_received;	/* in jiffies, either socket */
728 	unsigned int ko_count;
729 
730 	struct list_head transfer_log;	/* all requests not yet fully processed */
731 
732 	struct crypto_shash *cram_hmac_tfm;
733 	struct crypto_ahash *integrity_tfm;  /* checksums we compute, updates protected by connection->data->mutex */
734 	struct crypto_ahash *peer_integrity_tfm;  /* checksums we verify, only accessed from receiver thread  */
735 	struct crypto_ahash *csums_tfm;
736 	struct crypto_ahash *verify_tfm;
737 	void *int_dig_in;
738 	void *int_dig_vv;
739 
740 	/* receiver side */
741 	struct drbd_epoch *current_epoch;
742 	spinlock_t epoch_lock;
743 	unsigned int epochs;
744 	atomic_t current_tle_nr;	/* transfer log epoch number */
745 	unsigned current_tle_writes;	/* writes seen within this tl epoch */
746 
747 	unsigned long last_reconnect_jif;
748 	struct drbd_thread receiver;
749 	struct drbd_thread worker;
750 	struct drbd_thread ack_receiver;
751 	struct workqueue_struct *ack_sender;
752 
753 	/* cached pointers,
754 	 * so we can look up the oldest pending requests more quickly.
755 	 * protected by resource->req_lock */
756 	struct drbd_request *req_next; /* DRBD 9: todo.req_next */
757 	struct drbd_request *req_ack_pending;
758 	struct drbd_request *req_not_net_done;
759 
760 	/* sender side */
761 	struct drbd_work_queue sender_work;
762 
763 #define DRBD_THREAD_DETAILS_HIST	16
764 	unsigned int w_cb_nr; /* keeps counting up */
765 	unsigned int r_cb_nr; /* keeps counting up */
766 	struct drbd_thread_timing_details w_timing_details[DRBD_THREAD_DETAILS_HIST];
767 	struct drbd_thread_timing_details r_timing_details[DRBD_THREAD_DETAILS_HIST];
768 
769 	struct {
770 		unsigned long last_sent_barrier_jif;
771 
772 		/* whether this sender thread
773 		 * has processed a single write yet. */
774 		bool seen_any_write_yet;
775 
776 		/* Which barrier number to send with the next P_BARRIER */
777 		int current_epoch_nr;
778 
779 		/* how many write requests have been sent
780 		 * with req->epoch == current_epoch_nr.
781 		 * If none, no P_BARRIER will be sent. */
782 		unsigned current_epoch_writes;
783 	} send;
784 };
785 
786 static inline bool has_net_conf(struct drbd_connection *connection)
787 {
788 	bool has_net_conf;
789 
790 	rcu_read_lock();
791 	has_net_conf = rcu_dereference(connection->net_conf);
792 	rcu_read_unlock();
793 
794 	return has_net_conf;
795 }
796 
797 void __update_timing_details(
798 		struct drbd_thread_timing_details *tdp,
799 		unsigned int *cb_nr,
800 		void *cb,
801 		const char *fn, const unsigned int line);
802 
803 #define update_worker_timing_details(c, cb) \
804 	__update_timing_details(c->w_timing_details, &c->w_cb_nr, cb, __func__ , __LINE__ )
805 #define update_receiver_timing_details(c, cb) \
806 	__update_timing_details(c->r_timing_details, &c->r_cb_nr, cb, __func__ , __LINE__ )
807 
808 struct submit_worker {
809 	struct workqueue_struct *wq;
810 	struct work_struct worker;
811 
812 	/* protected by ..->resource->req_lock */
813 	struct list_head writes;
814 };
815 
816 struct drbd_peer_device {
817 	struct list_head peer_devices;
818 	struct drbd_device *device;
819 	struct drbd_connection *connection;
820 	struct work_struct send_acks_work;
821 #ifdef CONFIG_DEBUG_FS
822 	struct dentry *debugfs_peer_dev;
823 #endif
824 };
825 
826 struct drbd_device {
827 	struct drbd_resource *resource;
828 	struct list_head peer_devices;
829 	struct list_head pending_bitmap_io;
830 
831 	unsigned long flush_jif;
832 #ifdef CONFIG_DEBUG_FS
833 	struct dentry *debugfs_minor;
834 	struct dentry *debugfs_vol;
835 	struct dentry *debugfs_vol_oldest_requests;
836 	struct dentry *debugfs_vol_act_log_extents;
837 	struct dentry *debugfs_vol_resync_extents;
838 	struct dentry *debugfs_vol_data_gen_id;
839 	struct dentry *debugfs_vol_ed_gen_id;
840 #endif
841 
842 	unsigned int vnr;	/* volume number within the connection */
843 	unsigned int minor;	/* device minor number */
844 
845 	struct kref kref;
846 
847 	/* things that are stored as / read from meta data on disk */
848 	unsigned long flags;
849 
850 	/* configured by drbdsetup */
851 	struct drbd_backing_dev *ldev __protected_by(local);
852 
853 	sector_t p_size;     /* partner's disk size */
854 	struct request_queue *rq_queue;
855 	struct block_device *this_bdev;
856 	struct gendisk	    *vdisk;
857 
858 	unsigned long last_reattach_jif;
859 	struct drbd_work resync_work;
860 	struct drbd_work unplug_work;
861 	struct timer_list resync_timer;
862 	struct timer_list md_sync_timer;
863 	struct timer_list start_resync_timer;
864 	struct timer_list request_timer;
865 
866 	/* Used after attach while negotiating new disk state. */
867 	union drbd_state new_state_tmp;
868 
869 	union drbd_dev_state state;
870 	wait_queue_head_t misc_wait;
871 	wait_queue_head_t state_wait;  /* upon each state change. */
872 	unsigned int send_cnt;
873 	unsigned int recv_cnt;
874 	unsigned int read_cnt;
875 	unsigned int writ_cnt;
876 	unsigned int al_writ_cnt;
877 	unsigned int bm_writ_cnt;
878 	atomic_t ap_bio_cnt;	 /* Requests we need to complete */
879 	atomic_t ap_actlog_cnt;  /* Requests waiting for activity log */
880 	atomic_t ap_pending_cnt; /* AP data packets on the wire, ack expected */
881 	atomic_t rs_pending_cnt; /* RS request/data packets on the wire */
882 	atomic_t unacked_cnt;	 /* Need to send replies for */
883 	atomic_t local_cnt;	 /* Waiting for local completion */
884 	atomic_t suspend_cnt;
885 
886 	/* Interval tree of pending local requests */
887 	struct rb_root read_requests;
888 	struct rb_root write_requests;
889 
890 	/* for statistics and timeouts */
891 	/* [0] read, [1] write */
892 	struct list_head pending_master_completion[2];
893 	struct list_head pending_completion[2];
894 
895 	/* use checksums for *this* resync */
896 	bool use_csums;
897 	/* blocks to resync in this run [unit BM_BLOCK_SIZE] */
898 	unsigned long rs_total;
899 	/* number of resync blocks that failed in this run */
900 	unsigned long rs_failed;
901 	/* Syncer's start time [unit jiffies] */
902 	unsigned long rs_start;
903 	/* cumulated time in PausedSyncX state [unit jiffies] */
904 	unsigned long rs_paused;
905 	/* skipped because csum was equal [unit BM_BLOCK_SIZE] */
906 	unsigned long rs_same_csum;
907 #define DRBD_SYNC_MARKS 8
908 #define DRBD_SYNC_MARK_STEP (3*HZ)
909 	/* block not up-to-date at mark [unit BM_BLOCK_SIZE] */
910 	unsigned long rs_mark_left[DRBD_SYNC_MARKS];
911 	/* marks's time [unit jiffies] */
912 	unsigned long rs_mark_time[DRBD_SYNC_MARKS];
913 	/* current index into rs_mark_{left,time} */
914 	int rs_last_mark;
915 	unsigned long rs_last_bcast; /* [unit jiffies] */
916 
917 	/* where does the admin want us to start? (sector) */
918 	sector_t ov_start_sector;
919 	sector_t ov_stop_sector;
920 	/* where are we now? (sector) */
921 	sector_t ov_position;
922 	/* Start sector of out of sync range (to merge printk reporting). */
923 	sector_t ov_last_oos_start;
924 	/* size of out-of-sync range in sectors. */
925 	sector_t ov_last_oos_size;
926 	unsigned long ov_left; /* in bits */
927 
928 	struct drbd_bitmap *bitmap;
929 	unsigned long bm_resync_fo; /* bit offset for drbd_bm_find_next */
930 
931 	/* Used to track operations of resync... */
932 	struct lru_cache *resync;
933 	/* Number of locked elements in resync LRU */
934 	unsigned int resync_locked;
935 	/* resync extent number waiting for application requests */
936 	unsigned int resync_wenr;
937 
938 	int open_cnt;
939 	u64 *p_uuid;
940 
941 	struct list_head active_ee; /* IO in progress (P_DATA gets written to disk) */
942 	struct list_head sync_ee;   /* IO in progress (P_RS_DATA_REPLY gets written to disk) */
943 	struct list_head done_ee;   /* need to send P_WRITE_ACK */
944 	struct list_head read_ee;   /* [RS]P_DATA_REQUEST being read */
945 	struct list_head net_ee;    /* zero-copy network send in progress */
946 
947 	int next_barrier_nr;
948 	struct list_head resync_reads;
949 	atomic_t pp_in_use;		/* allocated from page pool */
950 	atomic_t pp_in_use_by_net;	/* sendpage()d, still referenced by tcp */
951 	wait_queue_head_t ee_wait;
952 	struct drbd_md_io md_io;
953 	spinlock_t al_lock;
954 	wait_queue_head_t al_wait;
955 	struct lru_cache *act_log;	/* activity log */
956 	unsigned int al_tr_number;
957 	int al_tr_cycle;
958 	wait_queue_head_t seq_wait;
959 	atomic_t packet_seq;
960 	unsigned int peer_seq;
961 	spinlock_t peer_seq_lock;
962 	unsigned long comm_bm_set; /* communicated number of set bits. */
963 	struct bm_io_work bm_io_work;
964 	u64 ed_uuid; /* UUID of the exposed data */
965 	struct mutex own_state_mutex;
966 	struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
967 	char congestion_reason;  /* Why we where congested... */
968 	atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
969 	atomic_t rs_sect_ev; /* for submitted resync data rate, both */
970 	int rs_last_sect_ev; /* counter to compare with */
971 	int rs_last_events;  /* counter of read or write "events" (unit sectors)
972 			      * on the lower level device when we last looked. */
973 	int c_sync_rate; /* current resync rate after syncer throttle magic */
974 	struct fifo_buffer *rs_plan_s; /* correction values of resync planer (RCU, connection->conn_update) */
975 	int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */
976 	atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
977 	unsigned int peer_max_bio_size;
978 	unsigned int local_max_bio_size;
979 
980 	/* any requests that would block in drbd_make_request()
981 	 * are deferred to this single-threaded work queue */
982 	struct submit_worker submit;
983 };
984 
985 struct drbd_bm_aio_ctx {
986 	struct drbd_device *device;
987 	struct list_head list; /* on device->pending_bitmap_io */;
988 	unsigned long start_jif;
989 	atomic_t in_flight;
990 	unsigned int done;
991 	unsigned flags;
992 #define BM_AIO_COPY_PAGES	1
993 #define BM_AIO_WRITE_HINTED	2
994 #define BM_AIO_WRITE_ALL_PAGES	4
995 #define BM_AIO_READ		8
996 	int error;
997 	struct kref kref;
998 };
999 
1000 struct drbd_config_context {
1001 	/* assigned from drbd_genlmsghdr */
1002 	unsigned int minor;
1003 	/* assigned from request attributes, if present */
1004 	unsigned int volume;
1005 #define VOLUME_UNSPECIFIED		(-1U)
1006 	/* pointer into the request skb,
1007 	 * limited lifetime! */
1008 	char *resource_name;
1009 	struct nlattr *my_addr;
1010 	struct nlattr *peer_addr;
1011 
1012 	/* reply buffer */
1013 	struct sk_buff *reply_skb;
1014 	/* pointer into reply buffer */
1015 	struct drbd_genlmsghdr *reply_dh;
1016 	/* resolved from attributes, if possible */
1017 	struct drbd_device *device;
1018 	struct drbd_resource *resource;
1019 	struct drbd_connection *connection;
1020 };
1021 
1022 static inline struct drbd_device *minor_to_device(unsigned int minor)
1023 {
1024 	return (struct drbd_device *)idr_find(&drbd_devices, minor);
1025 }
1026 
1027 static inline struct drbd_peer_device *first_peer_device(struct drbd_device *device)
1028 {
1029 	return list_first_entry_or_null(&device->peer_devices, struct drbd_peer_device, peer_devices);
1030 }
1031 
1032 static inline struct drbd_peer_device *
1033 conn_peer_device(struct drbd_connection *connection, int volume_number)
1034 {
1035 	return idr_find(&connection->peer_devices, volume_number);
1036 }
1037 
1038 #define for_each_resource(resource, _resources) \
1039 	list_for_each_entry(resource, _resources, resources)
1040 
1041 #define for_each_resource_rcu(resource, _resources) \
1042 	list_for_each_entry_rcu(resource, _resources, resources)
1043 
1044 #define for_each_resource_safe(resource, tmp, _resources) \
1045 	list_for_each_entry_safe(resource, tmp, _resources, resources)
1046 
1047 #define for_each_connection(connection, resource) \
1048 	list_for_each_entry(connection, &resource->connections, connections)
1049 
1050 #define for_each_connection_rcu(connection, resource) \
1051 	list_for_each_entry_rcu(connection, &resource->connections, connections)
1052 
1053 #define for_each_connection_safe(connection, tmp, resource) \
1054 	list_for_each_entry_safe(connection, tmp, &resource->connections, connections)
1055 
1056 #define for_each_peer_device(peer_device, device) \
1057 	list_for_each_entry(peer_device, &device->peer_devices, peer_devices)
1058 
1059 #define for_each_peer_device_rcu(peer_device, device) \
1060 	list_for_each_entry_rcu(peer_device, &device->peer_devices, peer_devices)
1061 
1062 #define for_each_peer_device_safe(peer_device, tmp, device) \
1063 	list_for_each_entry_safe(peer_device, tmp, &device->peer_devices, peer_devices)
1064 
1065 static inline unsigned int device_to_minor(struct drbd_device *device)
1066 {
1067 	return device->minor;
1068 }
1069 
1070 /*
1071  * function declarations
1072  *************************/
1073 
1074 /* drbd_main.c */
1075 
1076 enum dds_flags {
1077 	DDSF_FORCED    = 1,
1078 	DDSF_NO_RESYNC = 2, /* Do not run a resync for the new space */
1079 };
1080 
1081 extern void drbd_init_set_defaults(struct drbd_device *device);
1082 extern int  drbd_thread_start(struct drbd_thread *thi);
1083 extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait);
1084 #ifdef CONFIG_SMP
1085 extern void drbd_thread_current_set_cpu(struct drbd_thread *thi);
1086 #else
1087 #define drbd_thread_current_set_cpu(A) ({})
1088 #endif
1089 extern void tl_release(struct drbd_connection *, unsigned int barrier_nr,
1090 		       unsigned int set_size);
1091 extern void tl_clear(struct drbd_connection *);
1092 extern void drbd_free_sock(struct drbd_connection *connection);
1093 extern int drbd_send(struct drbd_connection *connection, struct socket *sock,
1094 		     void *buf, size_t size, unsigned msg_flags);
1095 extern int drbd_send_all(struct drbd_connection *, struct socket *, void *, size_t,
1096 			 unsigned);
1097 
1098 extern int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd);
1099 extern int drbd_send_protocol(struct drbd_connection *connection);
1100 extern int drbd_send_uuids(struct drbd_peer_device *);
1101 extern int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *);
1102 extern void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *);
1103 extern int drbd_send_sizes(struct drbd_peer_device *, int trigger_reply, enum dds_flags flags);
1104 extern int drbd_send_state(struct drbd_peer_device *, union drbd_state s);
1105 extern int drbd_send_current_state(struct drbd_peer_device *);
1106 extern int drbd_send_sync_param(struct drbd_peer_device *);
1107 extern void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr,
1108 			    u32 set_size);
1109 extern int drbd_send_ack(struct drbd_peer_device *, enum drbd_packet,
1110 			 struct drbd_peer_request *);
1111 extern void drbd_send_ack_rp(struct drbd_peer_device *, enum drbd_packet,
1112 			     struct p_block_req *rp);
1113 extern void drbd_send_ack_dp(struct drbd_peer_device *, enum drbd_packet,
1114 			     struct p_data *dp, int data_size);
1115 extern int drbd_send_ack_ex(struct drbd_peer_device *, enum drbd_packet,
1116 			    sector_t sector, int blksize, u64 block_id);
1117 extern int drbd_send_out_of_sync(struct drbd_peer_device *, struct drbd_request *);
1118 extern int drbd_send_block(struct drbd_peer_device *, enum drbd_packet,
1119 			   struct drbd_peer_request *);
1120 extern int drbd_send_dblock(struct drbd_peer_device *, struct drbd_request *req);
1121 extern int drbd_send_drequest(struct drbd_peer_device *, int cmd,
1122 			      sector_t sector, int size, u64 block_id);
1123 extern int drbd_send_drequest_csum(struct drbd_peer_device *, sector_t sector,
1124 				   int size, void *digest, int digest_size,
1125 				   enum drbd_packet cmd);
1126 extern int drbd_send_ov_request(struct drbd_peer_device *, sector_t sector, int size);
1127 
1128 extern int drbd_send_bitmap(struct drbd_device *device);
1129 extern void drbd_send_sr_reply(struct drbd_peer_device *, enum drbd_state_rv retcode);
1130 extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode);
1131 extern int drbd_send_rs_deallocated(struct drbd_peer_device *, struct drbd_peer_request *);
1132 extern void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev);
1133 extern void drbd_device_cleanup(struct drbd_device *device);
1134 void drbd_print_uuids(struct drbd_device *device, const char *text);
1135 
1136 extern void conn_md_sync(struct drbd_connection *connection);
1137 extern void drbd_md_write(struct drbd_device *device, void *buffer);
1138 extern void drbd_md_sync(struct drbd_device *device);
1139 extern int  drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev);
1140 extern void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1141 extern void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1142 extern void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local);
1143 extern void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local);
1144 extern void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local);
1145 extern void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1146 extern void drbd_md_set_flag(struct drbd_device *device, int flags) __must_hold(local);
1147 extern void drbd_md_clear_flag(struct drbd_device *device, int flags)__must_hold(local);
1148 extern int drbd_md_test_flag(struct drbd_backing_dev *, int);
1149 extern void drbd_md_mark_dirty(struct drbd_device *device);
1150 extern void drbd_queue_bitmap_io(struct drbd_device *device,
1151 				 int (*io_fn)(struct drbd_device *),
1152 				 void (*done)(struct drbd_device *, int),
1153 				 char *why, enum bm_flag flags);
1154 extern int drbd_bitmap_io(struct drbd_device *device,
1155 		int (*io_fn)(struct drbd_device *),
1156 		char *why, enum bm_flag flags);
1157 extern int drbd_bitmap_io_from_worker(struct drbd_device *device,
1158 		int (*io_fn)(struct drbd_device *),
1159 		char *why, enum bm_flag flags);
1160 extern int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local);
1161 extern int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local);
1162 
1163 /* Meta data layout
1164  *
1165  * We currently have two possible layouts.
1166  * Offsets in (512 byte) sectors.
1167  * external:
1168  *   |----------- md_size_sect ------------------|
1169  *   [ 4k superblock ][ activity log ][  Bitmap  ]
1170  *   | al_offset == 8 |
1171  *   | bm_offset = al_offset + X      |
1172  *  ==> bitmap sectors = md_size_sect - bm_offset
1173  *
1174  *  Variants:
1175  *     old, indexed fixed size meta data:
1176  *
1177  * internal:
1178  *            |----------- md_size_sect ------------------|
1179  * [data.....][  Bitmap  ][ activity log ][ 4k superblock ][padding*]
1180  *                        | al_offset < 0 |
1181  *            | bm_offset = al_offset - Y |
1182  *  ==> bitmap sectors = Y = al_offset - bm_offset
1183  *
1184  *  [padding*] are zero or up to 7 unused 512 Byte sectors to the
1185  *  end of the device, so that the [4k superblock] will be 4k aligned.
1186  *
1187  *  The activity log consists of 4k transaction blocks,
1188  *  which are written in a ring-buffer, or striped ring-buffer like fashion,
1189  *  which are writtensize used to be fixed 32kB,
1190  *  but is about to become configurable.
1191  */
1192 
1193 /* Our old fixed size meta data layout
1194  * allows up to about 3.8TB, so if you want more,
1195  * you need to use the "flexible" meta data format. */
1196 #define MD_128MB_SECT (128LLU << 11)  /* 128 MB, unit sectors */
1197 #define MD_4kB_SECT	 8
1198 #define MD_32kB_SECT	64
1199 
1200 /* One activity log extent represents 4M of storage */
1201 #define AL_EXTENT_SHIFT 22
1202 #define AL_EXTENT_SIZE (1<<AL_EXTENT_SHIFT)
1203 
1204 /* We could make these currently hardcoded constants configurable
1205  * variables at create-md time (or even re-configurable at runtime?).
1206  * Which will require some more changes to the DRBD "super block"
1207  * and attach code.
1208  *
1209  * updates per transaction:
1210  *   This many changes to the active set can be logged with one transaction.
1211  *   This number is arbitrary.
1212  * context per transaction:
1213  *   This many context extent numbers are logged with each transaction.
1214  *   This number is resulting from the transaction block size (4k), the layout
1215  *   of the transaction header, and the number of updates per transaction.
1216  *   See drbd_actlog.c:struct al_transaction_on_disk
1217  * */
1218 #define AL_UPDATES_PER_TRANSACTION	 64	// arbitrary
1219 #define AL_CONTEXT_PER_TRANSACTION	919	// (4096 - 36 - 6*64)/4
1220 
1221 #if BITS_PER_LONG == 32
1222 #define LN2_BPL 5
1223 #define cpu_to_lel(A) cpu_to_le32(A)
1224 #define lel_to_cpu(A) le32_to_cpu(A)
1225 #elif BITS_PER_LONG == 64
1226 #define LN2_BPL 6
1227 #define cpu_to_lel(A) cpu_to_le64(A)
1228 #define lel_to_cpu(A) le64_to_cpu(A)
1229 #else
1230 #error "LN2 of BITS_PER_LONG unknown!"
1231 #endif
1232 
1233 /* resync bitmap */
1234 /* 16MB sized 'bitmap extent' to track syncer usage */
1235 struct bm_extent {
1236 	int rs_left; /* number of bits set (out of sync) in this extent. */
1237 	int rs_failed; /* number of failed resync requests in this extent. */
1238 	unsigned long flags;
1239 	struct lc_element lce;
1240 };
1241 
1242 #define BME_NO_WRITES  0  /* bm_extent.flags: no more requests on this one! */
1243 #define BME_LOCKED     1  /* bm_extent.flags: syncer active on this one. */
1244 #define BME_PRIORITY   2  /* finish resync IO on this extent ASAP! App IO waiting! */
1245 
1246 /* drbd_bitmap.c */
1247 /*
1248  * We need to store one bit for a block.
1249  * Example: 1GB disk @ 4096 byte blocks ==> we need 32 KB bitmap.
1250  * Bit 0 ==> local node thinks this block is binary identical on both nodes
1251  * Bit 1 ==> local node thinks this block needs to be synced.
1252  */
1253 
1254 #define SLEEP_TIME (HZ/10)
1255 
1256 /* We do bitmap IO in units of 4k blocks.
1257  * We also still have a hardcoded 4k per bit relation. */
1258 #define BM_BLOCK_SHIFT	12			 /* 4k per bit */
1259 #define BM_BLOCK_SIZE	 (1<<BM_BLOCK_SHIFT)
1260 /* mostly arbitrarily set the represented size of one bitmap extent,
1261  * aka resync extent, to 16 MiB (which is also 512 Byte worth of bitmap
1262  * at 4k per bit resolution) */
1263 #define BM_EXT_SHIFT	 24	/* 16 MiB per resync extent */
1264 #define BM_EXT_SIZE	 (1<<BM_EXT_SHIFT)
1265 
1266 #if (BM_EXT_SHIFT != 24) || (BM_BLOCK_SHIFT != 12)
1267 #error "HAVE YOU FIXED drbdmeta AS WELL??"
1268 #endif
1269 
1270 /* thus many _storage_ sectors are described by one bit */
1271 #define BM_SECT_TO_BIT(x)   ((x)>>(BM_BLOCK_SHIFT-9))
1272 #define BM_BIT_TO_SECT(x)   ((sector_t)(x)<<(BM_BLOCK_SHIFT-9))
1273 #define BM_SECT_PER_BIT     BM_BIT_TO_SECT(1)
1274 
1275 /* bit to represented kilo byte conversion */
1276 #define Bit2KB(bits) ((bits)<<(BM_BLOCK_SHIFT-10))
1277 
1278 /* in which _bitmap_ extent (resp. sector) the bit for a certain
1279  * _storage_ sector is located in */
1280 #define BM_SECT_TO_EXT(x)   ((x)>>(BM_EXT_SHIFT-9))
1281 #define BM_BIT_TO_EXT(x)    ((x) >> (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
1282 
1283 /* first storage sector a bitmap extent corresponds to */
1284 #define BM_EXT_TO_SECT(x)   ((sector_t)(x) << (BM_EXT_SHIFT-9))
1285 /* how much _storage_ sectors we have per bitmap extent */
1286 #define BM_SECT_PER_EXT     BM_EXT_TO_SECT(1)
1287 /* how many bits are covered by one bitmap extent (resync extent) */
1288 #define BM_BITS_PER_EXT     (1UL << (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
1289 
1290 #define BM_BLOCKS_PER_BM_EXT_MASK  (BM_BITS_PER_EXT - 1)
1291 
1292 
1293 /* in one sector of the bitmap, we have this many activity_log extents. */
1294 #define AL_EXT_PER_BM_SECT  (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT))
1295 
1296 /* the extent in "PER_EXTENT" below is an activity log extent
1297  * we need that many (long words/bytes) to store the bitmap
1298  *		     of one AL_EXTENT_SIZE chunk of storage.
1299  * we can store the bitmap for that many AL_EXTENTS within
1300  * one sector of the _on_disk_ bitmap:
1301  * bit	 0	  bit 37   bit 38	     bit (512*8)-1
1302  *	     ...|........|........|.. // ..|........|
1303  * sect. 0	 `296	  `304			   ^(512*8*8)-1
1304  *
1305 #define BM_WORDS_PER_EXT    ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / BITS_PER_LONG )
1306 #define BM_BYTES_PER_EXT    ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / 8 )  // 128
1307 #define BM_EXT_PER_SECT	    ( 512 / BM_BYTES_PER_EXTENT )	 //   4
1308  */
1309 
1310 #define DRBD_MAX_SECTORS_32 (0xffffffffLU)
1311 /* we have a certain meta data variant that has a fixed on-disk size of 128
1312  * MiB, of which 4k are our "superblock", and 32k are the fixed size activity
1313  * log, leaving this many sectors for the bitmap.
1314  */
1315 
1316 #define DRBD_MAX_SECTORS_FIXED_BM \
1317 	  ((MD_128MB_SECT - MD_32kB_SECT - MD_4kB_SECT) * (1LL<<(BM_EXT_SHIFT-9)))
1318 #if !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32
1319 #define DRBD_MAX_SECTORS      DRBD_MAX_SECTORS_32
1320 #define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32
1321 #else
1322 #define DRBD_MAX_SECTORS      DRBD_MAX_SECTORS_FIXED_BM
1323 /* 16 TB in units of sectors */
1324 #if BITS_PER_LONG == 32
1325 /* adjust by one page worth of bitmap,
1326  * so we won't wrap around in drbd_bm_find_next_bit.
1327  * you should use 64bit OS for that much storage, anyways. */
1328 #define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff)
1329 #else
1330 /* we allow up to 1 PiB now on 64bit architecture with "flexible" meta data */
1331 #define DRBD_MAX_SECTORS_FLEX (1UL << 51)
1332 /* corresponds to (1UL << 38) bits right now. */
1333 #endif
1334 #endif
1335 
1336 /* Estimate max bio size as 256 * PAGE_SIZE,
1337  * so for typical PAGE_SIZE of 4k, that is (1<<20) Byte.
1338  * Since we may live in a mixed-platform cluster,
1339  * we limit us to a platform agnostic constant here for now.
1340  * A followup commit may allow even bigger BIO sizes,
1341  * once we thought that through. */
1342 #define DRBD_MAX_BIO_SIZE (1U << 20)
1343 #if DRBD_MAX_BIO_SIZE > (BIO_MAX_PAGES << PAGE_SHIFT)
1344 #error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
1345 #endif
1346 #define DRBD_MAX_BIO_SIZE_SAFE (1U << 12)       /* Works always = 4k */
1347 
1348 #define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* Header 80 only allows packets up to 32KiB data */
1349 #define DRBD_MAX_BIO_SIZE_P95    (1U << 17) /* Protocol 95 to 99 allows bios up to 128KiB */
1350 
1351 /* For now, don't allow more than half of what we can "activate" in one
1352  * activity log transaction to be discarded in one go. We may need to rework
1353  * drbd_al_begin_io() to allow for even larger discard ranges */
1354 #define DRBD_MAX_BATCH_BIO_SIZE	 (AL_UPDATES_PER_TRANSACTION/2*AL_EXTENT_SIZE)
1355 #define DRBD_MAX_BBIO_SECTORS    (DRBD_MAX_BATCH_BIO_SIZE >> 9)
1356 
1357 extern int  drbd_bm_init(struct drbd_device *device);
1358 extern int  drbd_bm_resize(struct drbd_device *device, sector_t sectors, int set_new_bits);
1359 extern void drbd_bm_cleanup(struct drbd_device *device);
1360 extern void drbd_bm_set_all(struct drbd_device *device);
1361 extern void drbd_bm_clear_all(struct drbd_device *device);
1362 /* set/clear/test only a few bits at a time */
1363 extern int  drbd_bm_set_bits(
1364 		struct drbd_device *device, unsigned long s, unsigned long e);
1365 extern int  drbd_bm_clear_bits(
1366 		struct drbd_device *device, unsigned long s, unsigned long e);
1367 extern int drbd_bm_count_bits(
1368 	struct drbd_device *device, const unsigned long s, const unsigned long e);
1369 /* bm_set_bits variant for use while holding drbd_bm_lock,
1370  * may process the whole bitmap in one go */
1371 extern void _drbd_bm_set_bits(struct drbd_device *device,
1372 		const unsigned long s, const unsigned long e);
1373 extern int  drbd_bm_test_bit(struct drbd_device *device, unsigned long bitnr);
1374 extern int  drbd_bm_e_weight(struct drbd_device *device, unsigned long enr);
1375 extern int  drbd_bm_read(struct drbd_device *device) __must_hold(local);
1376 extern void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr);
1377 extern int  drbd_bm_write(struct drbd_device *device) __must_hold(local);
1378 extern void drbd_bm_reset_al_hints(struct drbd_device *device) __must_hold(local);
1379 extern int  drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local);
1380 extern int  drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local);
1381 extern int drbd_bm_write_all(struct drbd_device *device) __must_hold(local);
1382 extern int  drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local);
1383 extern size_t	     drbd_bm_words(struct drbd_device *device);
1384 extern unsigned long drbd_bm_bits(struct drbd_device *device);
1385 extern sector_t      drbd_bm_capacity(struct drbd_device *device);
1386 
1387 #define DRBD_END_OF_BITMAP	(~(unsigned long)0)
1388 extern unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1389 /* bm_find_next variants for use while you hold drbd_bm_lock() */
1390 extern unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1391 extern unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo);
1392 extern unsigned long _drbd_bm_total_weight(struct drbd_device *device);
1393 extern unsigned long drbd_bm_total_weight(struct drbd_device *device);
1394 /* for receive_bitmap */
1395 extern void drbd_bm_merge_lel(struct drbd_device *device, size_t offset,
1396 		size_t number, unsigned long *buffer);
1397 /* for _drbd_send_bitmap */
1398 extern void drbd_bm_get_lel(struct drbd_device *device, size_t offset,
1399 		size_t number, unsigned long *buffer);
1400 
1401 extern void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags);
1402 extern void drbd_bm_unlock(struct drbd_device *device);
1403 /* drbd_main.c */
1404 
1405 extern struct kmem_cache *drbd_request_cache;
1406 extern struct kmem_cache *drbd_ee_cache;	/* peer requests */
1407 extern struct kmem_cache *drbd_bm_ext_cache;	/* bitmap extents */
1408 extern struct kmem_cache *drbd_al_ext_cache;	/* activity log extents */
1409 extern mempool_t *drbd_request_mempool;
1410 extern mempool_t *drbd_ee_mempool;
1411 
1412 /* drbd's page pool, used to buffer data received from the peer,
1413  * or data requested by the peer.
1414  *
1415  * This does not have an emergency reserve.
1416  *
1417  * When allocating from this pool, it first takes pages from the pool.
1418  * Only if the pool is depleted will try to allocate from the system.
1419  *
1420  * The assumption is that pages taken from this pool will be processed,
1421  * and given back, "quickly", and then can be recycled, so we can avoid
1422  * frequent calls to alloc_page(), and still will be able to make progress even
1423  * under memory pressure.
1424  */
1425 extern struct page *drbd_pp_pool;
1426 extern spinlock_t   drbd_pp_lock;
1427 extern int	    drbd_pp_vacant;
1428 extern wait_queue_head_t drbd_pp_wait;
1429 
1430 /* We also need a standard (emergency-reserve backed) page pool
1431  * for meta data IO (activity log, bitmap).
1432  * We can keep it global, as long as it is used as "N pages at a time".
1433  * 128 should be plenty, currently we probably can get away with as few as 1.
1434  */
1435 #define DRBD_MIN_POOL_PAGES	128
1436 extern mempool_t *drbd_md_io_page_pool;
1437 
1438 /* We also need to make sure we get a bio
1439  * when we need it for housekeeping purposes */
1440 extern struct bio_set *drbd_md_io_bio_set;
1441 /* to allocate from that set */
1442 extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
1443 
1444 extern struct mutex resources_mutex;
1445 
1446 extern int conn_lowest_minor(struct drbd_connection *connection);
1447 extern enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor);
1448 extern void drbd_destroy_device(struct kref *kref);
1449 extern void drbd_delete_device(struct drbd_device *device);
1450 
1451 extern struct drbd_resource *drbd_create_resource(const char *name);
1452 extern void drbd_free_resource(struct drbd_resource *resource);
1453 
1454 extern int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts);
1455 extern struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts);
1456 extern void drbd_destroy_connection(struct kref *kref);
1457 extern struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
1458 					    void *peer_addr, int peer_addr_len);
1459 extern struct drbd_resource *drbd_find_resource(const char *name);
1460 extern void drbd_destroy_resource(struct kref *kref);
1461 extern void conn_free_crypto(struct drbd_connection *connection);
1462 
1463 extern int proc_details;
1464 
1465 /* drbd_req */
1466 extern void do_submit(struct work_struct *ws);
1467 extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long);
1468 extern blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio);
1469 extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
1470 extern int is_valid_ar_handle(struct drbd_request *, sector_t);
1471 
1472 
1473 /* drbd_nl.c */
1474 
1475 extern struct mutex notification_mutex;
1476 
1477 extern void drbd_suspend_io(struct drbd_device *device);
1478 extern void drbd_resume_io(struct drbd_device *device);
1479 extern char *ppsize(char *buf, unsigned long long size);
1480 extern sector_t drbd_new_dev_size(struct drbd_device *, struct drbd_backing_dev *, sector_t, int);
1481 enum determine_dev_size {
1482 	DS_ERROR_SHRINK = -3,
1483 	DS_ERROR_SPACE_MD = -2,
1484 	DS_ERROR = -1,
1485 	DS_UNCHANGED = 0,
1486 	DS_SHRUNK = 1,
1487 	DS_GREW = 2,
1488 	DS_GREW_FROM_ZERO = 3,
1489 };
1490 extern enum determine_dev_size
1491 drbd_determine_dev_size(struct drbd_device *, enum dds_flags, struct resize_parms *) __must_hold(local);
1492 extern void resync_after_online_grow(struct drbd_device *);
1493 extern void drbd_reconsider_queue_parameters(struct drbd_device *device,
1494 			struct drbd_backing_dev *bdev, struct o_qlim *o);
1495 extern enum drbd_state_rv drbd_set_role(struct drbd_device *device,
1496 					enum drbd_role new_role,
1497 					int force);
1498 extern bool conn_try_outdate_peer(struct drbd_connection *connection);
1499 extern void conn_try_outdate_peer_async(struct drbd_connection *connection);
1500 extern enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd);
1501 extern int drbd_khelper(struct drbd_device *device, char *cmd);
1502 
1503 /* drbd_worker.c */
1504 /* bi_end_io handlers */
1505 extern void drbd_md_endio(struct bio *bio);
1506 extern void drbd_peer_request_endio(struct bio *bio);
1507 extern void drbd_request_endio(struct bio *bio);
1508 extern int drbd_worker(struct drbd_thread *thi);
1509 enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor);
1510 void drbd_resync_after_changed(struct drbd_device *device);
1511 extern void drbd_start_resync(struct drbd_device *device, enum drbd_conns side);
1512 extern void resume_next_sg(struct drbd_device *device);
1513 extern void suspend_other_sg(struct drbd_device *device);
1514 extern int drbd_resync_finished(struct drbd_device *device);
1515 /* maybe rather drbd_main.c ? */
1516 extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent);
1517 extern void drbd_md_put_buffer(struct drbd_device *device);
1518 extern int drbd_md_sync_page_io(struct drbd_device *device,
1519 		struct drbd_backing_dev *bdev, sector_t sector, int op);
1520 extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int);
1521 extern void wait_until_done_or_force_detached(struct drbd_device *device,
1522 		struct drbd_backing_dev *bdev, unsigned int *done);
1523 extern void drbd_rs_controller_reset(struct drbd_device *device);
1524 
1525 static inline void ov_out_of_sync_print(struct drbd_device *device)
1526 {
1527 	if (device->ov_last_oos_size) {
1528 		drbd_err(device, "Out of sync: start=%llu, size=%lu (sectors)\n",
1529 		     (unsigned long long)device->ov_last_oos_start,
1530 		     (unsigned long)device->ov_last_oos_size);
1531 	}
1532 	device->ov_last_oos_size = 0;
1533 }
1534 
1535 
1536 extern void drbd_csum_bio(struct crypto_ahash *, struct bio *, void *);
1537 extern void drbd_csum_ee(struct crypto_ahash *, struct drbd_peer_request *, void *);
1538 /* worker callbacks */
1539 extern int w_e_end_data_req(struct drbd_work *, int);
1540 extern int w_e_end_rsdata_req(struct drbd_work *, int);
1541 extern int w_e_end_csum_rs_req(struct drbd_work *, int);
1542 extern int w_e_end_ov_reply(struct drbd_work *, int);
1543 extern int w_e_end_ov_req(struct drbd_work *, int);
1544 extern int w_ov_finished(struct drbd_work *, int);
1545 extern int w_resync_timer(struct drbd_work *, int);
1546 extern int w_send_write_hint(struct drbd_work *, int);
1547 extern int w_send_dblock(struct drbd_work *, int);
1548 extern int w_send_read_req(struct drbd_work *, int);
1549 extern int w_e_reissue(struct drbd_work *, int);
1550 extern int w_restart_disk_io(struct drbd_work *, int);
1551 extern int w_send_out_of_sync(struct drbd_work *, int);
1552 extern int w_start_resync(struct drbd_work *, int);
1553 
1554 extern void resync_timer_fn(unsigned long data);
1555 extern void start_resync_timer_fn(unsigned long data);
1556 
1557 extern void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req);
1558 
1559 /* drbd_receiver.c */
1560 extern int drbd_receiver(struct drbd_thread *thi);
1561 extern int drbd_ack_receiver(struct drbd_thread *thi);
1562 extern void drbd_send_ping_wf(struct work_struct *ws);
1563 extern void drbd_send_acks_wf(struct work_struct *ws);
1564 extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device);
1565 extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector,
1566 		bool throttle_if_app_is_waiting);
1567 extern int drbd_submit_peer_request(struct drbd_device *,
1568 				    struct drbd_peer_request *, const unsigned,
1569 				    const unsigned, const int);
1570 extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *);
1571 extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64,
1572 						     sector_t, unsigned int,
1573 						     unsigned int,
1574 						     gfp_t) __must_hold(local);
1575 extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *,
1576 				 int);
1577 #define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
1578 #define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
1579 extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool);
1580 extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled);
1581 extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
1582 extern int drbd_connected(struct drbd_peer_device *);
1583 
1584 static inline void drbd_tcp_cork(struct socket *sock)
1585 {
1586 	int val = 1;
1587 	(void) kernel_setsockopt(sock, SOL_TCP, TCP_CORK,
1588 			(char*)&val, sizeof(val));
1589 }
1590 
1591 static inline void drbd_tcp_uncork(struct socket *sock)
1592 {
1593 	int val = 0;
1594 	(void) kernel_setsockopt(sock, SOL_TCP, TCP_CORK,
1595 			(char*)&val, sizeof(val));
1596 }
1597 
1598 static inline void drbd_tcp_nodelay(struct socket *sock)
1599 {
1600 	int val = 1;
1601 	(void) kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
1602 			(char*)&val, sizeof(val));
1603 }
1604 
1605 static inline void drbd_tcp_quickack(struct socket *sock)
1606 {
1607 	int val = 2;
1608 	(void) kernel_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
1609 			(char*)&val, sizeof(val));
1610 }
1611 
1612 /* sets the number of 512 byte sectors of our virtual device */
1613 static inline void drbd_set_my_capacity(struct drbd_device *device,
1614 					sector_t size)
1615 {
1616 	/* set_capacity(device->this_bdev->bd_disk, size); */
1617 	set_capacity(device->vdisk, size);
1618 	device->this_bdev->bd_inode->i_size = (loff_t)size << 9;
1619 }
1620 
1621 /*
1622  * used to submit our private bio
1623  */
1624 static inline void drbd_generic_make_request(struct drbd_device *device,
1625 					     int fault_type, struct bio *bio)
1626 {
1627 	__release(local);
1628 	if (!bio->bi_bdev) {
1629 		drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n");
1630 		bio->bi_error = -ENODEV;
1631 		bio_endio(bio);
1632 		return;
1633 	}
1634 
1635 	if (drbd_insert_fault(device, fault_type))
1636 		bio_io_error(bio);
1637 	else
1638 		generic_make_request(bio);
1639 }
1640 
1641 void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev,
1642 			      enum write_ordering_e wo);
1643 
1644 /* drbd_proc.c */
1645 extern struct proc_dir_entry *drbd_proc;
1646 extern const struct file_operations drbd_proc_fops;
1647 
1648 /* drbd_actlog.c */
1649 extern bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i);
1650 extern int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i);
1651 extern void drbd_al_begin_io_commit(struct drbd_device *device);
1652 extern bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i);
1653 extern void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i);
1654 extern void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i);
1655 extern void drbd_rs_complete_io(struct drbd_device *device, sector_t sector);
1656 extern int drbd_rs_begin_io(struct drbd_device *device, sector_t sector);
1657 extern int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector);
1658 extern void drbd_rs_cancel_all(struct drbd_device *device);
1659 extern int drbd_rs_del_all(struct drbd_device *device);
1660 extern void drbd_rs_failed_io(struct drbd_device *device,
1661 		sector_t sector, int size);
1662 extern void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go);
1663 
1664 enum update_sync_bits_mode { RECORD_RS_FAILED, SET_OUT_OF_SYNC, SET_IN_SYNC };
1665 extern int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size,
1666 		enum update_sync_bits_mode mode);
1667 #define drbd_set_in_sync(device, sector, size) \
1668 	__drbd_change_sync(device, sector, size, SET_IN_SYNC)
1669 #define drbd_set_out_of_sync(device, sector, size) \
1670 	__drbd_change_sync(device, sector, size, SET_OUT_OF_SYNC)
1671 #define drbd_rs_failed_io(device, sector, size) \
1672 	__drbd_change_sync(device, sector, size, RECORD_RS_FAILED)
1673 extern void drbd_al_shrink(struct drbd_device *device);
1674 extern int drbd_al_initialize(struct drbd_device *, void *);
1675 
1676 /* drbd_nl.c */
1677 /* state info broadcast */
1678 struct sib_info {
1679 	enum drbd_state_info_bcast_reason sib_reason;
1680 	union {
1681 		struct {
1682 			char *helper_name;
1683 			unsigned helper_exit_code;
1684 		};
1685 		struct {
1686 			union drbd_state os;
1687 			union drbd_state ns;
1688 		};
1689 	};
1690 };
1691 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib);
1692 
1693 extern void notify_resource_state(struct sk_buff *,
1694 				  unsigned int,
1695 				  struct drbd_resource *,
1696 				  struct resource_info *,
1697 				  enum drbd_notification_type);
1698 extern void notify_device_state(struct sk_buff *,
1699 				unsigned int,
1700 				struct drbd_device *,
1701 				struct device_info *,
1702 				enum drbd_notification_type);
1703 extern void notify_connection_state(struct sk_buff *,
1704 				    unsigned int,
1705 				    struct drbd_connection *,
1706 				    struct connection_info *,
1707 				    enum drbd_notification_type);
1708 extern void notify_peer_device_state(struct sk_buff *,
1709 				     unsigned int,
1710 				     struct drbd_peer_device *,
1711 				     struct peer_device_info *,
1712 				     enum drbd_notification_type);
1713 extern void notify_helper(enum drbd_notification_type, struct drbd_device *,
1714 			  struct drbd_connection *, const char *, int);
1715 
1716 /*
1717  * inline helper functions
1718  *************************/
1719 
1720 /* see also page_chain_add and friends in drbd_receiver.c */
1721 static inline struct page *page_chain_next(struct page *page)
1722 {
1723 	return (struct page *)page_private(page);
1724 }
1725 #define page_chain_for_each(page) \
1726 	for (; page && ({ prefetch(page_chain_next(page)); 1; }); \
1727 			page = page_chain_next(page))
1728 #define page_chain_for_each_safe(page, n) \
1729 	for (; page && ({ n = page_chain_next(page); 1; }); page = n)
1730 
1731 
1732 static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req)
1733 {
1734 	struct page *page = peer_req->pages;
1735 	page_chain_for_each(page) {
1736 		if (page_count(page) > 1)
1737 			return 1;
1738 	}
1739 	return 0;
1740 }
1741 
1742 static inline union drbd_state drbd_read_state(struct drbd_device *device)
1743 {
1744 	struct drbd_resource *resource = device->resource;
1745 	union drbd_state rv;
1746 
1747 	rv.i = device->state.i;
1748 	rv.susp = resource->susp;
1749 	rv.susp_nod = resource->susp_nod;
1750 	rv.susp_fen = resource->susp_fen;
1751 
1752 	return rv;
1753 }
1754 
1755 enum drbd_force_detach_flags {
1756 	DRBD_READ_ERROR,
1757 	DRBD_WRITE_ERROR,
1758 	DRBD_META_IO_ERROR,
1759 	DRBD_FORCE_DETACH,
1760 };
1761 
1762 #define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
1763 static inline void __drbd_chk_io_error_(struct drbd_device *device,
1764 		enum drbd_force_detach_flags df,
1765 		const char *where)
1766 {
1767 	enum drbd_io_error_p ep;
1768 
1769 	rcu_read_lock();
1770 	ep = rcu_dereference(device->ldev->disk_conf)->on_io_error;
1771 	rcu_read_unlock();
1772 	switch (ep) {
1773 	case EP_PASS_ON: /* FIXME would this be better named "Ignore"? */
1774 		if (df == DRBD_READ_ERROR || df == DRBD_WRITE_ERROR) {
1775 			if (__ratelimit(&drbd_ratelimit_state))
1776 				drbd_err(device, "Local IO failed in %s.\n", where);
1777 			if (device->state.disk > D_INCONSISTENT)
1778 				_drbd_set_state(_NS(device, disk, D_INCONSISTENT), CS_HARD, NULL);
1779 			break;
1780 		}
1781 		/* NOTE fall through for DRBD_META_IO_ERROR or DRBD_FORCE_DETACH */
1782 	case EP_DETACH:
1783 	case EP_CALL_HELPER:
1784 		/* Remember whether we saw a READ or WRITE error.
1785 		 *
1786 		 * Recovery of the affected area for WRITE failure is covered
1787 		 * by the activity log.
1788 		 * READ errors may fall outside that area though. Certain READ
1789 		 * errors can be "healed" by writing good data to the affected
1790 		 * blocks, which triggers block re-allocation in lower layers.
1791 		 *
1792 		 * If we can not write the bitmap after a READ error,
1793 		 * we may need to trigger a full sync (see w_go_diskless()).
1794 		 *
1795 		 * Force-detach is not really an IO error, but rather a
1796 		 * desperate measure to try to deal with a completely
1797 		 * unresponsive lower level IO stack.
1798 		 * Still it should be treated as a WRITE error.
1799 		 *
1800 		 * Meta IO error is always WRITE error:
1801 		 * we read meta data only once during attach,
1802 		 * which will fail in case of errors.
1803 		 */
1804 		set_bit(WAS_IO_ERROR, &device->flags);
1805 		if (df == DRBD_READ_ERROR)
1806 			set_bit(WAS_READ_ERROR, &device->flags);
1807 		if (df == DRBD_FORCE_DETACH)
1808 			set_bit(FORCE_DETACH, &device->flags);
1809 		if (device->state.disk > D_FAILED) {
1810 			_drbd_set_state(_NS(device, disk, D_FAILED), CS_HARD, NULL);
1811 			drbd_err(device,
1812 				"Local IO failed in %s. Detaching...\n", where);
1813 		}
1814 		break;
1815 	}
1816 }
1817 
1818 /**
1819  * drbd_chk_io_error: Handle the on_io_error setting, should be called from all io completion handlers
1820  * @device:	 DRBD device.
1821  * @error:	 Error code passed to the IO completion callback
1822  * @forcedetach: Force detach. I.e. the error happened while accessing the meta data
1823  *
1824  * See also drbd_main.c:after_state_ch() if (os.disk > D_FAILED && ns.disk == D_FAILED)
1825  */
1826 #define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__)
1827 static inline void drbd_chk_io_error_(struct drbd_device *device,
1828 	int error, enum drbd_force_detach_flags forcedetach, const char *where)
1829 {
1830 	if (error) {
1831 		unsigned long flags;
1832 		spin_lock_irqsave(&device->resource->req_lock, flags);
1833 		__drbd_chk_io_error_(device, forcedetach, where);
1834 		spin_unlock_irqrestore(&device->resource->req_lock, flags);
1835 	}
1836 }
1837 
1838 
1839 /**
1840  * drbd_md_first_sector() - Returns the first sector number of the meta data area
1841  * @bdev:	Meta data block device.
1842  *
1843  * BTW, for internal meta data, this happens to be the maximum capacity
1844  * we could agree upon with our peer node.
1845  */
1846 static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
1847 {
1848 	switch (bdev->md.meta_dev_idx) {
1849 	case DRBD_MD_INDEX_INTERNAL:
1850 	case DRBD_MD_INDEX_FLEX_INT:
1851 		return bdev->md.md_offset + bdev->md.bm_offset;
1852 	case DRBD_MD_INDEX_FLEX_EXT:
1853 	default:
1854 		return bdev->md.md_offset;
1855 	}
1856 }
1857 
1858 /**
1859  * drbd_md_last_sector() - Return the last sector number of the meta data area
1860  * @bdev:	Meta data block device.
1861  */
1862 static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
1863 {
1864 	switch (bdev->md.meta_dev_idx) {
1865 	case DRBD_MD_INDEX_INTERNAL:
1866 	case DRBD_MD_INDEX_FLEX_INT:
1867 		return bdev->md.md_offset + MD_4kB_SECT -1;
1868 	case DRBD_MD_INDEX_FLEX_EXT:
1869 	default:
1870 		return bdev->md.md_offset + bdev->md.md_size_sect -1;
1871 	}
1872 }
1873 
1874 /* Returns the number of 512 byte sectors of the device */
1875 static inline sector_t drbd_get_capacity(struct block_device *bdev)
1876 {
1877 	/* return bdev ? get_capacity(bdev->bd_disk) : 0; */
1878 	return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0;
1879 }
1880 
1881 /**
1882  * drbd_get_max_capacity() - Returns the capacity we announce to out peer
1883  * @bdev:	Meta data block device.
1884  *
1885  * returns the capacity we announce to out peer.  we clip ourselves at the
1886  * various MAX_SECTORS, because if we don't, current implementation will
1887  * oops sooner or later
1888  */
1889 static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev)
1890 {
1891 	sector_t s;
1892 
1893 	switch (bdev->md.meta_dev_idx) {
1894 	case DRBD_MD_INDEX_INTERNAL:
1895 	case DRBD_MD_INDEX_FLEX_INT:
1896 		s = drbd_get_capacity(bdev->backing_bdev)
1897 			? min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1898 				drbd_md_first_sector(bdev))
1899 			: 0;
1900 		break;
1901 	case DRBD_MD_INDEX_FLEX_EXT:
1902 		s = min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1903 				drbd_get_capacity(bdev->backing_bdev));
1904 		/* clip at maximum size the meta device can support */
1905 		s = min_t(sector_t, s,
1906 			BM_EXT_TO_SECT(bdev->md.md_size_sect
1907 				     - bdev->md.bm_offset));
1908 		break;
1909 	default:
1910 		s = min_t(sector_t, DRBD_MAX_SECTORS,
1911 				drbd_get_capacity(bdev->backing_bdev));
1912 	}
1913 	return s;
1914 }
1915 
1916 /**
1917  * drbd_md_ss() - Return the sector number of our meta data super block
1918  * @bdev:	Meta data block device.
1919  */
1920 static inline sector_t drbd_md_ss(struct drbd_backing_dev *bdev)
1921 {
1922 	const int meta_dev_idx = bdev->md.meta_dev_idx;
1923 
1924 	if (meta_dev_idx == DRBD_MD_INDEX_FLEX_EXT)
1925 		return 0;
1926 
1927 	/* Since drbd08, internal meta data is always "flexible".
1928 	 * position: last 4k aligned block of 4k size */
1929 	if (meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1930 	    meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)
1931 		return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL) - 8;
1932 
1933 	/* external, some index; this is the old fixed size layout */
1934 	return MD_128MB_SECT * bdev->md.meta_dev_idx;
1935 }
1936 
1937 static inline void
1938 drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
1939 {
1940 	unsigned long flags;
1941 	spin_lock_irqsave(&q->q_lock, flags);
1942 	list_add_tail(&w->list, &q->q);
1943 	spin_unlock_irqrestore(&q->q_lock, flags);
1944 	wake_up(&q->q_wait);
1945 }
1946 
1947 static inline void
1948 drbd_queue_work_if_unqueued(struct drbd_work_queue *q, struct drbd_work *w)
1949 {
1950 	unsigned long flags;
1951 	spin_lock_irqsave(&q->q_lock, flags);
1952 	if (list_empty_careful(&w->list))
1953 		list_add_tail(&w->list, &q->q);
1954 	spin_unlock_irqrestore(&q->q_lock, flags);
1955 	wake_up(&q->q_wait);
1956 }
1957 
1958 static inline void
1959 drbd_device_post_work(struct drbd_device *device, int work_bit)
1960 {
1961 	if (!test_and_set_bit(work_bit, &device->flags)) {
1962 		struct drbd_connection *connection =
1963 			first_peer_device(device)->connection;
1964 		struct drbd_work_queue *q = &connection->sender_work;
1965 		if (!test_and_set_bit(DEVICE_WORK_PENDING, &connection->flags))
1966 			wake_up(&q->q_wait);
1967 	}
1968 }
1969 
1970 extern void drbd_flush_workqueue(struct drbd_work_queue *work_queue);
1971 
1972 /* To get the ack_receiver out of the blocking network stack,
1973  * so it can change its sk_rcvtimeo from idle- to ping-timeout,
1974  * and send a ping, we need to send a signal.
1975  * Which signal we send is irrelevant. */
1976 static inline void wake_ack_receiver(struct drbd_connection *connection)
1977 {
1978 	struct task_struct *task = connection->ack_receiver.task;
1979 	if (task && get_t_state(&connection->ack_receiver) == RUNNING)
1980 		force_sig(SIGXCPU, task);
1981 }
1982 
1983 static inline void request_ping(struct drbd_connection *connection)
1984 {
1985 	set_bit(SEND_PING, &connection->flags);
1986 	wake_ack_receiver(connection);
1987 }
1988 
1989 extern void *conn_prepare_command(struct drbd_connection *, struct drbd_socket *);
1990 extern void *drbd_prepare_command(struct drbd_peer_device *, struct drbd_socket *);
1991 extern int conn_send_command(struct drbd_connection *, struct drbd_socket *,
1992 			     enum drbd_packet, unsigned int, void *,
1993 			     unsigned int);
1994 extern int drbd_send_command(struct drbd_peer_device *, struct drbd_socket *,
1995 			     enum drbd_packet, unsigned int, void *,
1996 			     unsigned int);
1997 
1998 extern int drbd_send_ping(struct drbd_connection *connection);
1999 extern int drbd_send_ping_ack(struct drbd_connection *connection);
2000 extern int drbd_send_state_req(struct drbd_peer_device *, union drbd_state, union drbd_state);
2001 extern int conn_send_state_req(struct drbd_connection *, union drbd_state, union drbd_state);
2002 
2003 static inline void drbd_thread_stop(struct drbd_thread *thi)
2004 {
2005 	_drbd_thread_stop(thi, false, true);
2006 }
2007 
2008 static inline void drbd_thread_stop_nowait(struct drbd_thread *thi)
2009 {
2010 	_drbd_thread_stop(thi, false, false);
2011 }
2012 
2013 static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
2014 {
2015 	_drbd_thread_stop(thi, true, false);
2016 }
2017 
2018 /* counts how many answer packets packets we expect from our peer,
2019  * for either explicit application requests,
2020  * or implicit barrier packets as necessary.
2021  * increased:
2022  *  w_send_barrier
2023  *  _req_mod(req, QUEUE_FOR_NET_WRITE or QUEUE_FOR_NET_READ);
2024  *    it is much easier and equally valid to count what we queue for the
2025  *    worker, even before it actually was queued or send.
2026  *    (drbd_make_request_common; recovery path on read io-error)
2027  * decreased:
2028  *  got_BarrierAck (respective tl_clear, tl_clear_barrier)
2029  *  _req_mod(req, DATA_RECEIVED)
2030  *     [from receive_DataReply]
2031  *  _req_mod(req, WRITE_ACKED_BY_PEER or RECV_ACKED_BY_PEER or NEG_ACKED)
2032  *     [from got_BlockAck (P_WRITE_ACK, P_RECV_ACK)]
2033  *     for some reason it is NOT decreased in got_NegAck,
2034  *     but in the resulting cleanup code from report_params.
2035  *     we should try to remember the reason for that...
2036  *  _req_mod(req, SEND_FAILED or SEND_CANCELED)
2037  *  _req_mod(req, CONNECTION_LOST_WHILE_PENDING)
2038  *     [from tl_clear_barrier]
2039  */
2040 static inline void inc_ap_pending(struct drbd_device *device)
2041 {
2042 	atomic_inc(&device->ap_pending_cnt);
2043 }
2044 
2045 #define ERR_IF_CNT_IS_NEGATIVE(which, func, line)			\
2046 	if (atomic_read(&device->which) < 0)				\
2047 		drbd_err(device, "in %s:%d: " #which " = %d < 0 !\n",	\
2048 			func, line,					\
2049 			atomic_read(&device->which))
2050 
2051 #define dec_ap_pending(device) _dec_ap_pending(device, __func__, __LINE__)
2052 static inline void _dec_ap_pending(struct drbd_device *device, const char *func, int line)
2053 {
2054 	if (atomic_dec_and_test(&device->ap_pending_cnt))
2055 		wake_up(&device->misc_wait);
2056 	ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt, func, line);
2057 }
2058 
2059 /* counts how many resync-related answers we still expect from the peer
2060  *		     increase			decrease
2061  * C_SYNC_TARGET sends P_RS_DATA_REQUEST (and expects P_RS_DATA_REPLY)
2062  * C_SYNC_SOURCE sends P_RS_DATA_REPLY   (and expects P_WRITE_ACK with ID_SYNCER)
2063  *					   (or P_NEG_ACK with ID_SYNCER)
2064  */
2065 static inline void inc_rs_pending(struct drbd_device *device)
2066 {
2067 	atomic_inc(&device->rs_pending_cnt);
2068 }
2069 
2070 #define dec_rs_pending(device) _dec_rs_pending(device, __func__, __LINE__)
2071 static inline void _dec_rs_pending(struct drbd_device *device, const char *func, int line)
2072 {
2073 	atomic_dec(&device->rs_pending_cnt);
2074 	ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt, func, line);
2075 }
2076 
2077 /* counts how many answers we still need to send to the peer.
2078  * increased on
2079  *  receive_Data	unless protocol A;
2080  *			we need to send a P_RECV_ACK (proto B)
2081  *			or P_WRITE_ACK (proto C)
2082  *  receive_RSDataReply (recv_resync_read) we need to send a P_WRITE_ACK
2083  *  receive_DataRequest (receive_RSDataRequest) we need to send back P_DATA
2084  *  receive_Barrier_*	we need to send a P_BARRIER_ACK
2085  */
2086 static inline void inc_unacked(struct drbd_device *device)
2087 {
2088 	atomic_inc(&device->unacked_cnt);
2089 }
2090 
2091 #define dec_unacked(device) _dec_unacked(device, __func__, __LINE__)
2092 static inline void _dec_unacked(struct drbd_device *device, const char *func, int line)
2093 {
2094 	atomic_dec(&device->unacked_cnt);
2095 	ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
2096 }
2097 
2098 #define sub_unacked(device, n) _sub_unacked(device, n, __func__, __LINE__)
2099 static inline void _sub_unacked(struct drbd_device *device, int n, const char *func, int line)
2100 {
2101 	atomic_sub(n, &device->unacked_cnt);
2102 	ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
2103 }
2104 
2105 static inline bool is_sync_target_state(enum drbd_conns connection_state)
2106 {
2107 	return	connection_state == C_SYNC_TARGET ||
2108 		connection_state == C_PAUSED_SYNC_T;
2109 }
2110 
2111 static inline bool is_sync_source_state(enum drbd_conns connection_state)
2112 {
2113 	return	connection_state == C_SYNC_SOURCE ||
2114 		connection_state == C_PAUSED_SYNC_S;
2115 }
2116 
2117 static inline bool is_sync_state(enum drbd_conns connection_state)
2118 {
2119 	return	is_sync_source_state(connection_state) ||
2120 		is_sync_target_state(connection_state);
2121 }
2122 
2123 /**
2124  * get_ldev() - Increase the ref count on device->ldev. Returns 0 if there is no ldev
2125  * @_device:		DRBD device.
2126  * @_min_state:		Minimum device state required for success.
2127  *
2128  * You have to call put_ldev() when finished working with device->ldev.
2129  */
2130 #define get_ldev_if_state(_device, _min_state)				\
2131 	(_get_ldev_if_state((_device), (_min_state)) ?			\
2132 	 ({ __acquire(x); true; }) : false)
2133 #define get_ldev(_device) get_ldev_if_state(_device, D_INCONSISTENT)
2134 
2135 static inline void put_ldev(struct drbd_device *device)
2136 {
2137 	enum drbd_disk_state disk_state = device->state.disk;
2138 	/* We must check the state *before* the atomic_dec becomes visible,
2139 	 * or we have a theoretical race where someone hitting zero,
2140 	 * while state still D_FAILED, will then see D_DISKLESS in the
2141 	 * condition below and calling into destroy, where he must not, yet. */
2142 	int i = atomic_dec_return(&device->local_cnt);
2143 
2144 	/* This may be called from some endio handler,
2145 	 * so we must not sleep here. */
2146 
2147 	__release(local);
2148 	D_ASSERT(device, i >= 0);
2149 	if (i == 0) {
2150 		if (disk_state == D_DISKLESS)
2151 			/* even internal references gone, safe to destroy */
2152 			drbd_device_post_work(device, DESTROY_DISK);
2153 		if (disk_state == D_FAILED)
2154 			/* all application IO references gone. */
2155 			if (!test_and_set_bit(GOING_DISKLESS, &device->flags))
2156 				drbd_device_post_work(device, GO_DISKLESS);
2157 		wake_up(&device->misc_wait);
2158 	}
2159 }
2160 
2161 #ifndef __CHECKER__
2162 static inline int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
2163 {
2164 	int io_allowed;
2165 
2166 	/* never get a reference while D_DISKLESS */
2167 	if (device->state.disk == D_DISKLESS)
2168 		return 0;
2169 
2170 	atomic_inc(&device->local_cnt);
2171 	io_allowed = (device->state.disk >= mins);
2172 	if (!io_allowed)
2173 		put_ldev(device);
2174 	return io_allowed;
2175 }
2176 #else
2177 extern int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins);
2178 #endif
2179 
2180 /* this throttles on-the-fly application requests
2181  * according to max_buffers settings;
2182  * maybe re-implement using semaphores? */
2183 static inline int drbd_get_max_buffers(struct drbd_device *device)
2184 {
2185 	struct net_conf *nc;
2186 	int mxb;
2187 
2188 	rcu_read_lock();
2189 	nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
2190 	mxb = nc ? nc->max_buffers : 1000000;  /* arbitrary limit on open requests */
2191 	rcu_read_unlock();
2192 
2193 	return mxb;
2194 }
2195 
2196 static inline int drbd_state_is_stable(struct drbd_device *device)
2197 {
2198 	union drbd_dev_state s = device->state;
2199 
2200 	/* DO NOT add a default clause, we want the compiler to warn us
2201 	 * for any newly introduced state we may have forgotten to add here */
2202 
2203 	switch ((enum drbd_conns)s.conn) {
2204 	/* new io only accepted when there is no connection, ... */
2205 	case C_STANDALONE:
2206 	case C_WF_CONNECTION:
2207 	/* ... or there is a well established connection. */
2208 	case C_CONNECTED:
2209 	case C_SYNC_SOURCE:
2210 	case C_SYNC_TARGET:
2211 	case C_VERIFY_S:
2212 	case C_VERIFY_T:
2213 	case C_PAUSED_SYNC_S:
2214 	case C_PAUSED_SYNC_T:
2215 	case C_AHEAD:
2216 	case C_BEHIND:
2217 		/* transitional states, IO allowed */
2218 	case C_DISCONNECTING:
2219 	case C_UNCONNECTED:
2220 	case C_TIMEOUT:
2221 	case C_BROKEN_PIPE:
2222 	case C_NETWORK_FAILURE:
2223 	case C_PROTOCOL_ERROR:
2224 	case C_TEAR_DOWN:
2225 	case C_WF_REPORT_PARAMS:
2226 	case C_STARTING_SYNC_S:
2227 	case C_STARTING_SYNC_T:
2228 		break;
2229 
2230 		/* Allow IO in BM exchange states with new protocols */
2231 	case C_WF_BITMAP_S:
2232 		if (first_peer_device(device)->connection->agreed_pro_version < 96)
2233 			return 0;
2234 		break;
2235 
2236 		/* no new io accepted in these states */
2237 	case C_WF_BITMAP_T:
2238 	case C_WF_SYNC_UUID:
2239 	case C_MASK:
2240 		/* not "stable" */
2241 		return 0;
2242 	}
2243 
2244 	switch ((enum drbd_disk_state)s.disk) {
2245 	case D_DISKLESS:
2246 	case D_INCONSISTENT:
2247 	case D_OUTDATED:
2248 	case D_CONSISTENT:
2249 	case D_UP_TO_DATE:
2250 	case D_FAILED:
2251 		/* disk state is stable as well. */
2252 		break;
2253 
2254 	/* no new io accepted during transitional states */
2255 	case D_ATTACHING:
2256 	case D_NEGOTIATING:
2257 	case D_UNKNOWN:
2258 	case D_MASK:
2259 		/* not "stable" */
2260 		return 0;
2261 	}
2262 
2263 	return 1;
2264 }
2265 
2266 static inline int drbd_suspended(struct drbd_device *device)
2267 {
2268 	struct drbd_resource *resource = device->resource;
2269 
2270 	return resource->susp || resource->susp_fen || resource->susp_nod;
2271 }
2272 
2273 static inline bool may_inc_ap_bio(struct drbd_device *device)
2274 {
2275 	int mxb = drbd_get_max_buffers(device);
2276 
2277 	if (drbd_suspended(device))
2278 		return false;
2279 	if (atomic_read(&device->suspend_cnt))
2280 		return false;
2281 
2282 	/* to avoid potential deadlock or bitmap corruption,
2283 	 * in various places, we only allow new application io
2284 	 * to start during "stable" states. */
2285 
2286 	/* no new io accepted when attaching or detaching the disk */
2287 	if (!drbd_state_is_stable(device))
2288 		return false;
2289 
2290 	/* since some older kernels don't have atomic_add_unless,
2291 	 * and we are within the spinlock anyways, we have this workaround.  */
2292 	if (atomic_read(&device->ap_bio_cnt) > mxb)
2293 		return false;
2294 	if (test_bit(BITMAP_IO, &device->flags))
2295 		return false;
2296 	return true;
2297 }
2298 
2299 static inline bool inc_ap_bio_cond(struct drbd_device *device)
2300 {
2301 	bool rv = false;
2302 
2303 	spin_lock_irq(&device->resource->req_lock);
2304 	rv = may_inc_ap_bio(device);
2305 	if (rv)
2306 		atomic_inc(&device->ap_bio_cnt);
2307 	spin_unlock_irq(&device->resource->req_lock);
2308 
2309 	return rv;
2310 }
2311 
2312 static inline void inc_ap_bio(struct drbd_device *device)
2313 {
2314 	/* we wait here
2315 	 *    as long as the device is suspended
2316 	 *    until the bitmap is no longer on the fly during connection
2317 	 *    handshake as long as we would exceed the max_buffer limit.
2318 	 *
2319 	 * to avoid races with the reconnect code,
2320 	 * we need to atomic_inc within the spinlock. */
2321 
2322 	wait_event(device->misc_wait, inc_ap_bio_cond(device));
2323 }
2324 
2325 static inline void dec_ap_bio(struct drbd_device *device)
2326 {
2327 	int mxb = drbd_get_max_buffers(device);
2328 	int ap_bio = atomic_dec_return(&device->ap_bio_cnt);
2329 
2330 	D_ASSERT(device, ap_bio >= 0);
2331 
2332 	if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) {
2333 		if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
2334 			drbd_queue_work(&first_peer_device(device)->
2335 				connection->sender_work,
2336 				&device->bm_io_work.w);
2337 	}
2338 
2339 	/* this currently does wake_up for every dec_ap_bio!
2340 	 * maybe rather introduce some type of hysteresis?
2341 	 * e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */
2342 	if (ap_bio < mxb)
2343 		wake_up(&device->misc_wait);
2344 }
2345 
2346 static inline bool verify_can_do_stop_sector(struct drbd_device *device)
2347 {
2348 	return first_peer_device(device)->connection->agreed_pro_version >= 97 &&
2349 		first_peer_device(device)->connection->agreed_pro_version != 100;
2350 }
2351 
2352 static inline int drbd_set_ed_uuid(struct drbd_device *device, u64 val)
2353 {
2354 	int changed = device->ed_uuid != val;
2355 	device->ed_uuid = val;
2356 	return changed;
2357 }
2358 
2359 static inline int drbd_queue_order_type(struct drbd_device *device)
2360 {
2361 	/* sorry, we currently have no working implementation
2362 	 * of distributed TCQ stuff */
2363 #ifndef QUEUE_ORDERED_NONE
2364 #define QUEUE_ORDERED_NONE 0
2365 #endif
2366 	return QUEUE_ORDERED_NONE;
2367 }
2368 
2369 static inline struct drbd_connection *first_connection(struct drbd_resource *resource)
2370 {
2371 	return list_first_entry_or_null(&resource->connections,
2372 				struct drbd_connection, connections);
2373 }
2374 
2375 #endif
2376