1 /* 2 drbd_int.h 3 4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg. 5 6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. 7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. 8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. 9 10 drbd is free software; you can redistribute it and/or modify 11 it under the terms of the GNU General Public License as published by 12 the Free Software Foundation; either version 2, or (at your option) 13 any later version. 14 15 drbd is distributed in the hope that it will be useful, 16 but WITHOUT ANY WARRANTY; without even the implied warranty of 17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 GNU General Public License for more details. 19 20 You should have received a copy of the GNU General Public License 21 along with drbd; see the file COPYING. If not, write to 22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23 24 */ 25 26 #ifndef _DRBD_INT_H 27 #define _DRBD_INT_H 28 29 #include <linux/compiler.h> 30 #include <linux/types.h> 31 #include <linux/list.h> 32 #include <linux/sched.h> 33 #include <linux/bitops.h> 34 #include <linux/slab.h> 35 #include <linux/crypto.h> 36 #include <linux/ratelimit.h> 37 #include <linux/tcp.h> 38 #include <linux/mutex.h> 39 #include <linux/major.h> 40 #include <linux/blkdev.h> 41 #include <linux/genhd.h> 42 #include <linux/idr.h> 43 #include <net/tcp.h> 44 #include <linux/lru_cache.h> 45 #include <linux/prefetch.h> 46 #include <linux/drbd_genl_api.h> 47 #include <linux/drbd.h> 48 #include "drbd_state.h" 49 50 #ifdef __CHECKER__ 51 # define __protected_by(x) __attribute__((require_context(x,1,999,"rdwr"))) 52 # define __protected_read_by(x) __attribute__((require_context(x,1,999,"read"))) 53 # define __protected_write_by(x) __attribute__((require_context(x,1,999,"write"))) 54 # define __must_hold(x) __attribute__((context(x,1,1), require_context(x,1,999,"call"))) 55 #else 56 # define __protected_by(x) 57 # define __protected_read_by(x) 58 # define __protected_write_by(x) 59 # define __must_hold(x) 60 #endif 61 62 #define __no_warn(lock, stmt) do { __acquire(lock); stmt; __release(lock); } while (0) 63 64 /* module parameter, defined in drbd_main.c */ 65 extern unsigned int minor_count; 66 extern bool disable_sendpage; 67 extern bool allow_oos; 68 69 #ifdef CONFIG_DRBD_FAULT_INJECTION 70 extern int enable_faults; 71 extern int fault_rate; 72 extern int fault_devs; 73 #endif 74 75 extern char usermode_helper[]; 76 77 78 /* I don't remember why XCPU ... 79 * This is used to wake the asender, 80 * and to interrupt sending the sending task 81 * on disconnect. 82 */ 83 #define DRBD_SIG SIGXCPU 84 85 /* This is used to stop/restart our threads. 86 * Cannot use SIGTERM nor SIGKILL, since these 87 * are sent out by init on runlevel changes 88 * I choose SIGHUP for now. 89 */ 90 #define DRBD_SIGKILL SIGHUP 91 92 #define ID_IN_SYNC (4711ULL) 93 #define ID_OUT_OF_SYNC (4712ULL) 94 #define ID_SYNCER (-1ULL) 95 96 #define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL) 97 98 struct drbd_conf; 99 struct drbd_tconn; 100 101 102 /* to shorten dev_warn(DEV, "msg"); and relatives statements */ 103 #define DEV (disk_to_dev(mdev->vdisk)) 104 105 #define conn_printk(LEVEL, TCONN, FMT, ARGS...) \ 106 printk(LEVEL "d-con %s: " FMT, TCONN->name , ## ARGS) 107 #define conn_alert(TCONN, FMT, ARGS...) conn_printk(KERN_ALERT, TCONN, FMT, ## ARGS) 108 #define conn_crit(TCONN, FMT, ARGS...) conn_printk(KERN_CRIT, TCONN, FMT, ## ARGS) 109 #define conn_err(TCONN, FMT, ARGS...) conn_printk(KERN_ERR, TCONN, FMT, ## ARGS) 110 #define conn_warn(TCONN, FMT, ARGS...) conn_printk(KERN_WARNING, TCONN, FMT, ## ARGS) 111 #define conn_notice(TCONN, FMT, ARGS...) conn_printk(KERN_NOTICE, TCONN, FMT, ## ARGS) 112 #define conn_info(TCONN, FMT, ARGS...) conn_printk(KERN_INFO, TCONN, FMT, ## ARGS) 113 #define conn_dbg(TCONN, FMT, ARGS...) conn_printk(KERN_DEBUG, TCONN, FMT, ## ARGS) 114 115 #define D_ASSERT(exp) if (!(exp)) \ 116 dev_err(DEV, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__) 117 118 /** 119 * expect - Make an assertion 120 * 121 * Unlike the assert macro, this macro returns a boolean result. 122 */ 123 #define expect(exp) ({ \ 124 bool _bool = (exp); \ 125 if (!_bool) \ 126 dev_err(DEV, "ASSERTION %s FAILED in %s\n", \ 127 #exp, __func__); \ 128 _bool; \ 129 }) 130 131 /* Defines to control fault insertion */ 132 enum { 133 DRBD_FAULT_MD_WR = 0, /* meta data write */ 134 DRBD_FAULT_MD_RD = 1, /* read */ 135 DRBD_FAULT_RS_WR = 2, /* resync */ 136 DRBD_FAULT_RS_RD = 3, 137 DRBD_FAULT_DT_WR = 4, /* data */ 138 DRBD_FAULT_DT_RD = 5, 139 DRBD_FAULT_DT_RA = 6, /* data read ahead */ 140 DRBD_FAULT_BM_ALLOC = 7, /* bitmap allocation */ 141 DRBD_FAULT_AL_EE = 8, /* alloc ee */ 142 DRBD_FAULT_RECEIVE = 9, /* Changes some bytes upon receiving a [rs]data block */ 143 144 DRBD_FAULT_MAX, 145 }; 146 147 extern unsigned int 148 _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type); 149 150 static inline int 151 drbd_insert_fault(struct drbd_conf *mdev, unsigned int type) { 152 #ifdef CONFIG_DRBD_FAULT_INJECTION 153 return fault_rate && 154 (enable_faults & (1<<type)) && 155 _drbd_insert_fault(mdev, type); 156 #else 157 return 0; 158 #endif 159 } 160 161 /* integer division, round _UP_ to the next integer */ 162 #define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0)) 163 /* usual integer division */ 164 #define div_floor(A, B) ((A)/(B)) 165 166 extern struct ratelimit_state drbd_ratelimit_state; 167 extern struct idr minors; /* RCU, updates: genl_lock() */ 168 extern struct list_head drbd_tconns; /* RCU, updates: genl_lock() */ 169 170 /* on the wire */ 171 enum drbd_packet { 172 /* receiver (data socket) */ 173 P_DATA = 0x00, 174 P_DATA_REPLY = 0x01, /* Response to P_DATA_REQUEST */ 175 P_RS_DATA_REPLY = 0x02, /* Response to P_RS_DATA_REQUEST */ 176 P_BARRIER = 0x03, 177 P_BITMAP = 0x04, 178 P_BECOME_SYNC_TARGET = 0x05, 179 P_BECOME_SYNC_SOURCE = 0x06, 180 P_UNPLUG_REMOTE = 0x07, /* Used at various times to hint the peer */ 181 P_DATA_REQUEST = 0x08, /* Used to ask for a data block */ 182 P_RS_DATA_REQUEST = 0x09, /* Used to ask for a data block for resync */ 183 P_SYNC_PARAM = 0x0a, 184 P_PROTOCOL = 0x0b, 185 P_UUIDS = 0x0c, 186 P_SIZES = 0x0d, 187 P_STATE = 0x0e, 188 P_SYNC_UUID = 0x0f, 189 P_AUTH_CHALLENGE = 0x10, 190 P_AUTH_RESPONSE = 0x11, 191 P_STATE_CHG_REQ = 0x12, 192 193 /* asender (meta socket */ 194 P_PING = 0x13, 195 P_PING_ACK = 0x14, 196 P_RECV_ACK = 0x15, /* Used in protocol B */ 197 P_WRITE_ACK = 0x16, /* Used in protocol C */ 198 P_RS_WRITE_ACK = 0x17, /* Is a P_WRITE_ACK, additionally call set_in_sync(). */ 199 P_SUPERSEDED = 0x18, /* Used in proto C, two-primaries conflict detection */ 200 P_NEG_ACK = 0x19, /* Sent if local disk is unusable */ 201 P_NEG_DREPLY = 0x1a, /* Local disk is broken... */ 202 P_NEG_RS_DREPLY = 0x1b, /* Local disk is broken... */ 203 P_BARRIER_ACK = 0x1c, 204 P_STATE_CHG_REPLY = 0x1d, 205 206 /* "new" commands, no longer fitting into the ordering scheme above */ 207 208 P_OV_REQUEST = 0x1e, /* data socket */ 209 P_OV_REPLY = 0x1f, 210 P_OV_RESULT = 0x20, /* meta socket */ 211 P_CSUM_RS_REQUEST = 0x21, /* data socket */ 212 P_RS_IS_IN_SYNC = 0x22, /* meta socket */ 213 P_SYNC_PARAM89 = 0x23, /* data socket, protocol version 89 replacement for P_SYNC_PARAM */ 214 P_COMPRESSED_BITMAP = 0x24, /* compressed or otherwise encoded bitmap transfer */ 215 /* P_CKPT_FENCE_REQ = 0x25, * currently reserved for protocol D */ 216 /* P_CKPT_DISABLE_REQ = 0x26, * currently reserved for protocol D */ 217 P_DELAY_PROBE = 0x27, /* is used on BOTH sockets */ 218 P_OUT_OF_SYNC = 0x28, /* Mark as out of sync (Outrunning), data socket */ 219 P_RS_CANCEL = 0x29, /* meta: Used to cancel RS_DATA_REQUEST packet by SyncSource */ 220 P_CONN_ST_CHG_REQ = 0x2a, /* data sock: Connection wide state request */ 221 P_CONN_ST_CHG_REPLY = 0x2b, /* meta sock: Connection side state req reply */ 222 P_RETRY_WRITE = 0x2c, /* Protocol C: retry conflicting write request */ 223 P_PROTOCOL_UPDATE = 0x2d, /* data sock: is used in established connections */ 224 225 P_MAY_IGNORE = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */ 226 P_MAX_OPT_CMD = 0x101, 227 228 /* special command ids for handshake */ 229 230 P_INITIAL_META = 0xfff1, /* First Packet on the MetaSock */ 231 P_INITIAL_DATA = 0xfff2, /* First Packet on the Socket */ 232 233 P_CONNECTION_FEATURES = 0xfffe /* FIXED for the next century! */ 234 }; 235 236 extern const char *cmdname(enum drbd_packet cmd); 237 238 /* for sending/receiving the bitmap, 239 * possibly in some encoding scheme */ 240 struct bm_xfer_ctx { 241 /* "const" 242 * stores total bits and long words 243 * of the bitmap, so we don't need to 244 * call the accessor functions over and again. */ 245 unsigned long bm_bits; 246 unsigned long bm_words; 247 /* during xfer, current position within the bitmap */ 248 unsigned long bit_offset; 249 unsigned long word_offset; 250 251 /* statistics; index: (h->command == P_BITMAP) */ 252 unsigned packets[2]; 253 unsigned bytes[2]; 254 }; 255 256 extern void INFO_bm_xfer_stats(struct drbd_conf *mdev, 257 const char *direction, struct bm_xfer_ctx *c); 258 259 static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c) 260 { 261 /* word_offset counts "native long words" (32 or 64 bit), 262 * aligned at 64 bit. 263 * Encoded packet may end at an unaligned bit offset. 264 * In case a fallback clear text packet is transmitted in 265 * between, we adjust this offset back to the last 64bit 266 * aligned "native long word", which makes coding and decoding 267 * the plain text bitmap much more convenient. */ 268 #if BITS_PER_LONG == 64 269 c->word_offset = c->bit_offset >> 6; 270 #elif BITS_PER_LONG == 32 271 c->word_offset = c->bit_offset >> 5; 272 c->word_offset &= ~(1UL); 273 #else 274 # error "unsupported BITS_PER_LONG" 275 #endif 276 } 277 278 #ifndef __packed 279 #define __packed __attribute__((packed)) 280 #endif 281 282 /* This is the layout for a packet on the wire. 283 * The byteorder is the network byte order. 284 * (except block_id and barrier fields. 285 * these are pointers to local structs 286 * and have no relevance for the partner, 287 * which just echoes them as received.) 288 * 289 * NOTE that the payload starts at a long aligned offset, 290 * regardless of 32 or 64 bit arch! 291 */ 292 struct p_header80 { 293 u32 magic; 294 u16 command; 295 u16 length; /* bytes of data after this header */ 296 } __packed; 297 298 /* Header for big packets, Used for data packets exceeding 64kB */ 299 struct p_header95 { 300 u16 magic; /* use DRBD_MAGIC_BIG here */ 301 u16 command; 302 u32 length; 303 } __packed; 304 305 struct p_header100 { 306 u32 magic; 307 u16 volume; 308 u16 command; 309 u32 length; 310 u32 pad; 311 } __packed; 312 313 extern unsigned int drbd_header_size(struct drbd_tconn *tconn); 314 315 /* these defines must not be changed without changing the protocol version */ 316 #define DP_HARDBARRIER 1 /* depricated */ 317 #define DP_RW_SYNC 2 /* equals REQ_SYNC */ 318 #define DP_MAY_SET_IN_SYNC 4 319 #define DP_UNPLUG 8 /* not used anymore */ 320 #define DP_FUA 16 /* equals REQ_FUA */ 321 #define DP_FLUSH 32 /* equals REQ_FLUSH */ 322 #define DP_DISCARD 64 /* equals REQ_DISCARD */ 323 #define DP_SEND_RECEIVE_ACK 128 /* This is a proto B write request */ 324 #define DP_SEND_WRITE_ACK 256 /* This is a proto C write request */ 325 326 struct p_data { 327 u64 sector; /* 64 bits sector number */ 328 u64 block_id; /* to identify the request in protocol B&C */ 329 u32 seq_num; 330 u32 dp_flags; 331 } __packed; 332 333 /* 334 * commands which share a struct: 335 * p_block_ack: 336 * P_RECV_ACK (proto B), P_WRITE_ACK (proto C), 337 * P_SUPERSEDED (proto C, two-primaries conflict detection) 338 * p_block_req: 339 * P_DATA_REQUEST, P_RS_DATA_REQUEST 340 */ 341 struct p_block_ack { 342 u64 sector; 343 u64 block_id; 344 u32 blksize; 345 u32 seq_num; 346 } __packed; 347 348 struct p_block_req { 349 u64 sector; 350 u64 block_id; 351 u32 blksize; 352 u32 pad; /* to multiple of 8 Byte */ 353 } __packed; 354 355 /* 356 * commands with their own struct for additional fields: 357 * P_CONNECTION_FEATURES 358 * P_BARRIER 359 * P_BARRIER_ACK 360 * P_SYNC_PARAM 361 * ReportParams 362 */ 363 364 struct p_connection_features { 365 u32 protocol_min; 366 u32 feature_flags; 367 u32 protocol_max; 368 369 /* should be more than enough for future enhancements 370 * for now, feature_flags and the reserved array shall be zero. 371 */ 372 373 u32 _pad; 374 u64 reserved[7]; 375 } __packed; 376 377 struct p_barrier { 378 u32 barrier; /* barrier number _handle_ only */ 379 u32 pad; /* to multiple of 8 Byte */ 380 } __packed; 381 382 struct p_barrier_ack { 383 u32 barrier; 384 u32 set_size; 385 } __packed; 386 387 struct p_rs_param { 388 u32 resync_rate; 389 390 /* Since protocol version 88 and higher. */ 391 char verify_alg[0]; 392 } __packed; 393 394 struct p_rs_param_89 { 395 u32 resync_rate; 396 /* protocol version 89: */ 397 char verify_alg[SHARED_SECRET_MAX]; 398 char csums_alg[SHARED_SECRET_MAX]; 399 } __packed; 400 401 struct p_rs_param_95 { 402 u32 resync_rate; 403 char verify_alg[SHARED_SECRET_MAX]; 404 char csums_alg[SHARED_SECRET_MAX]; 405 u32 c_plan_ahead; 406 u32 c_delay_target; 407 u32 c_fill_target; 408 u32 c_max_rate; 409 } __packed; 410 411 enum drbd_conn_flags { 412 CF_DISCARD_MY_DATA = 1, 413 CF_DRY_RUN = 2, 414 }; 415 416 struct p_protocol { 417 u32 protocol; 418 u32 after_sb_0p; 419 u32 after_sb_1p; 420 u32 after_sb_2p; 421 u32 conn_flags; 422 u32 two_primaries; 423 424 /* Since protocol version 87 and higher. */ 425 char integrity_alg[0]; 426 427 } __packed; 428 429 struct p_uuids { 430 u64 uuid[UI_EXTENDED_SIZE]; 431 } __packed; 432 433 struct p_rs_uuid { 434 u64 uuid; 435 } __packed; 436 437 struct p_sizes { 438 u64 d_size; /* size of disk */ 439 u64 u_size; /* user requested size */ 440 u64 c_size; /* current exported size */ 441 u32 max_bio_size; /* Maximal size of a BIO */ 442 u16 queue_order_type; /* not yet implemented in DRBD*/ 443 u16 dds_flags; /* use enum dds_flags here. */ 444 } __packed; 445 446 struct p_state { 447 u32 state; 448 } __packed; 449 450 struct p_req_state { 451 u32 mask; 452 u32 val; 453 } __packed; 454 455 struct p_req_state_reply { 456 u32 retcode; 457 } __packed; 458 459 struct p_drbd06_param { 460 u64 size; 461 u32 state; 462 u32 blksize; 463 u32 protocol; 464 u32 version; 465 u32 gen_cnt[5]; 466 u32 bit_map_gen[5]; 467 } __packed; 468 469 struct p_block_desc { 470 u64 sector; 471 u32 blksize; 472 u32 pad; /* to multiple of 8 Byte */ 473 } __packed; 474 475 /* Valid values for the encoding field. 476 * Bump proto version when changing this. */ 477 enum drbd_bitmap_code { 478 /* RLE_VLI_Bytes = 0, 479 * and other bit variants had been defined during 480 * algorithm evaluation. */ 481 RLE_VLI_Bits = 2, 482 }; 483 484 struct p_compressed_bm { 485 /* (encoding & 0x0f): actual encoding, see enum drbd_bitmap_code 486 * (encoding & 0x80): polarity (set/unset) of first runlength 487 * ((encoding >> 4) & 0x07): pad_bits, number of trailing zero bits 488 * used to pad up to head.length bytes 489 */ 490 u8 encoding; 491 492 u8 code[0]; 493 } __packed; 494 495 struct p_delay_probe93 { 496 u32 seq_num; /* sequence number to match the two probe packets */ 497 u32 offset; /* usecs the probe got sent after the reference time point */ 498 } __packed; 499 500 /* 501 * Bitmap packets need to fit within a single page on the sender and receiver, 502 * so we are limited to 4 KiB (and not to PAGE_SIZE, which can be bigger). 503 */ 504 #define DRBD_SOCKET_BUFFER_SIZE 4096 505 506 /**********************************************************************/ 507 enum drbd_thread_state { 508 NONE, 509 RUNNING, 510 EXITING, 511 RESTARTING 512 }; 513 514 struct drbd_thread { 515 spinlock_t t_lock; 516 struct task_struct *task; 517 struct completion stop; 518 enum drbd_thread_state t_state; 519 int (*function) (struct drbd_thread *); 520 struct drbd_tconn *tconn; 521 int reset_cpu_mask; 522 char name[9]; 523 }; 524 525 static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi) 526 { 527 /* THINK testing the t_state seems to be uncritical in all cases 528 * (but thread_{start,stop}), so we can read it *without* the lock. 529 * --lge */ 530 531 smp_rmb(); 532 return thi->t_state; 533 } 534 535 struct drbd_work { 536 struct list_head list; 537 int (*cb)(struct drbd_work *, int cancel); 538 union { 539 struct drbd_conf *mdev; 540 struct drbd_tconn *tconn; 541 }; 542 }; 543 544 #include "drbd_interval.h" 545 546 extern int drbd_wait_misc(struct drbd_conf *, struct drbd_interval *); 547 548 struct drbd_request { 549 struct drbd_work w; 550 551 /* if local IO is not allowed, will be NULL. 552 * if local IO _is_ allowed, holds the locally submitted bio clone, 553 * or, after local IO completion, the ERR_PTR(error). 554 * see drbd_request_endio(). */ 555 struct bio *private_bio; 556 557 struct drbd_interval i; 558 559 /* epoch: used to check on "completion" whether this req was in 560 * the current epoch, and we therefore have to close it, 561 * causing a p_barrier packet to be send, starting a new epoch. 562 * 563 * This corresponds to "barrier" in struct p_barrier[_ack], 564 * and to "barrier_nr" in struct drbd_epoch (and various 565 * comments/function parameters/local variable names). 566 */ 567 unsigned int epoch; 568 569 struct list_head tl_requests; /* ring list in the transfer log */ 570 struct bio *master_bio; /* master bio pointer */ 571 unsigned long start_time; 572 573 /* once it hits 0, we may complete the master_bio */ 574 atomic_t completion_ref; 575 /* once it hits 0, we may destroy this drbd_request object */ 576 struct kref kref; 577 578 unsigned rq_state; /* see comments above _req_mod() */ 579 }; 580 581 struct drbd_epoch { 582 struct drbd_tconn *tconn; 583 struct list_head list; 584 unsigned int barrier_nr; 585 atomic_t epoch_size; /* increased on every request added. */ 586 atomic_t active; /* increased on every req. added, and dec on every finished. */ 587 unsigned long flags; 588 }; 589 590 /* drbd_epoch flag bits */ 591 enum { 592 DE_HAVE_BARRIER_NUMBER, 593 }; 594 595 enum epoch_event { 596 EV_PUT, 597 EV_GOT_BARRIER_NR, 598 EV_BECAME_LAST, 599 EV_CLEANUP = 32, /* used as flag */ 600 }; 601 602 struct drbd_wq_barrier { 603 struct drbd_work w; 604 struct completion done; 605 }; 606 607 struct digest_info { 608 int digest_size; 609 void *digest; 610 }; 611 612 struct drbd_peer_request { 613 struct drbd_work w; 614 struct drbd_epoch *epoch; /* for writes */ 615 struct page *pages; 616 atomic_t pending_bios; 617 struct drbd_interval i; 618 /* see comments on ee flag bits below */ 619 unsigned long flags; 620 union { 621 u64 block_id; 622 struct digest_info *digest; 623 }; 624 }; 625 626 /* ee flag bits. 627 * While corresponding bios are in flight, the only modification will be 628 * set_bit WAS_ERROR, which has to be atomic. 629 * If no bios are in flight yet, or all have been completed, 630 * non-atomic modification to ee->flags is ok. 631 */ 632 enum { 633 __EE_CALL_AL_COMPLETE_IO, 634 __EE_MAY_SET_IN_SYNC, 635 636 /* In case a barrier failed, 637 * we need to resubmit without the barrier flag. */ 638 __EE_RESUBMITTED, 639 640 /* we may have several bios per peer request. 641 * if any of those fail, we set this flag atomically 642 * from the endio callback */ 643 __EE_WAS_ERROR, 644 645 /* This ee has a pointer to a digest instead of a block id */ 646 __EE_HAS_DIGEST, 647 648 /* Conflicting local requests need to be restarted after this request */ 649 __EE_RESTART_REQUESTS, 650 651 /* The peer wants a write ACK for this (wire proto C) */ 652 __EE_SEND_WRITE_ACK, 653 654 /* Is set when net_conf had two_primaries set while creating this peer_req */ 655 __EE_IN_INTERVAL_TREE, 656 }; 657 #define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO) 658 #define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC) 659 #define EE_RESUBMITTED (1<<__EE_RESUBMITTED) 660 #define EE_WAS_ERROR (1<<__EE_WAS_ERROR) 661 #define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST) 662 #define EE_RESTART_REQUESTS (1<<__EE_RESTART_REQUESTS) 663 #define EE_SEND_WRITE_ACK (1<<__EE_SEND_WRITE_ACK) 664 #define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE) 665 666 /* flag bits per mdev */ 667 enum { 668 UNPLUG_REMOTE, /* sending a "UnplugRemote" could help */ 669 MD_DIRTY, /* current uuids and flags not yet on disk */ 670 USE_DEGR_WFC_T, /* degr-wfc-timeout instead of wfc-timeout. */ 671 CL_ST_CHG_SUCCESS, 672 CL_ST_CHG_FAIL, 673 CRASHED_PRIMARY, /* This node was a crashed primary. 674 * Gets cleared when the state.conn 675 * goes into C_CONNECTED state. */ 676 CONSIDER_RESYNC, 677 678 MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */ 679 SUSPEND_IO, /* suspend application io */ 680 BITMAP_IO, /* suspend application io; 681 once no more io in flight, start bitmap io */ 682 BITMAP_IO_QUEUED, /* Started bitmap IO */ 683 GO_DISKLESS, /* Disk is being detached, on io-error or admin request. */ 684 WAS_IO_ERROR, /* Local disk failed, returned IO error */ 685 WAS_READ_ERROR, /* Local disk READ failed (set additionally to the above) */ 686 FORCE_DETACH, /* Force-detach from local disk, aborting any pending local IO */ 687 RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */ 688 RESIZE_PENDING, /* Size change detected locally, waiting for the response from 689 * the peer, if it changed there as well. */ 690 NEW_CUR_UUID, /* Create new current UUID when thawing IO */ 691 AL_SUSPENDED, /* Activity logging is currently suspended. */ 692 AHEAD_TO_SYNC_SOURCE, /* Ahead -> SyncSource queued */ 693 B_RS_H_DONE, /* Before resync handler done (already executed) */ 694 DISCARD_MY_DATA, /* discard_my_data flag per volume */ 695 READ_BALANCE_RR, 696 }; 697 698 struct drbd_bitmap; /* opaque for drbd_conf */ 699 700 /* definition of bits in bm_flags to be used in drbd_bm_lock 701 * and drbd_bitmap_io and friends. */ 702 enum bm_flag { 703 /* do we need to kfree, or vfree bm_pages? */ 704 BM_P_VMALLOCED = 0x10000, /* internal use only, will be masked out */ 705 706 /* currently locked for bulk operation */ 707 BM_LOCKED_MASK = 0xf, 708 709 /* in detail, that is: */ 710 BM_DONT_CLEAR = 0x1, 711 BM_DONT_SET = 0x2, 712 BM_DONT_TEST = 0x4, 713 714 /* so we can mark it locked for bulk operation, 715 * and still allow all non-bulk operations */ 716 BM_IS_LOCKED = 0x8, 717 718 /* (test bit, count bit) allowed (common case) */ 719 BM_LOCKED_TEST_ALLOWED = BM_DONT_CLEAR | BM_DONT_SET | BM_IS_LOCKED, 720 721 /* testing bits, as well as setting new bits allowed, but clearing bits 722 * would be unexpected. Used during bitmap receive. Setting new bits 723 * requires sending of "out-of-sync" information, though. */ 724 BM_LOCKED_SET_ALLOWED = BM_DONT_CLEAR | BM_IS_LOCKED, 725 726 /* for drbd_bm_write_copy_pages, everything is allowed, 727 * only concurrent bulk operations are locked out. */ 728 BM_LOCKED_CHANGE_ALLOWED = BM_IS_LOCKED, 729 }; 730 731 struct drbd_work_queue { 732 struct list_head q; 733 spinlock_t q_lock; /* to protect the list. */ 734 wait_queue_head_t q_wait; 735 }; 736 737 struct drbd_socket { 738 struct mutex mutex; 739 struct socket *socket; 740 /* this way we get our 741 * send/receive buffers off the stack */ 742 void *sbuf; 743 void *rbuf; 744 }; 745 746 struct drbd_md { 747 u64 md_offset; /* sector offset to 'super' block */ 748 749 u64 la_size_sect; /* last agreed size, unit sectors */ 750 spinlock_t uuid_lock; 751 u64 uuid[UI_SIZE]; 752 u64 device_uuid; 753 u32 flags; 754 u32 md_size_sect; 755 756 s32 al_offset; /* signed relative sector offset to activity log */ 757 s32 bm_offset; /* signed relative sector offset to bitmap */ 758 759 /* cached value of bdev->disk_conf->meta_dev_idx (see below) */ 760 s32 meta_dev_idx; 761 762 /* see al_tr_number_to_on_disk_sector() */ 763 u32 al_stripes; 764 u32 al_stripe_size_4k; 765 u32 al_size_4k; /* cached product of the above */ 766 }; 767 768 struct drbd_backing_dev { 769 struct block_device *backing_bdev; 770 struct block_device *md_bdev; 771 struct drbd_md md; 772 struct disk_conf *disk_conf; /* RCU, for updates: mdev->tconn->conf_update */ 773 sector_t known_size; /* last known size of that backing device */ 774 }; 775 776 struct drbd_md_io { 777 unsigned int done; 778 int error; 779 }; 780 781 struct bm_io_work { 782 struct drbd_work w; 783 char *why; 784 enum bm_flag flags; 785 int (*io_fn)(struct drbd_conf *mdev); 786 void (*done)(struct drbd_conf *mdev, int rv); 787 }; 788 789 enum write_ordering_e { 790 WO_none, 791 WO_drain_io, 792 WO_bdev_flush, 793 }; 794 795 struct fifo_buffer { 796 unsigned int head_index; 797 unsigned int size; 798 int total; /* sum of all values */ 799 int values[0]; 800 }; 801 extern struct fifo_buffer *fifo_alloc(int fifo_size); 802 803 /* flag bits per tconn */ 804 enum { 805 NET_CONGESTED, /* The data socket is congested */ 806 RESOLVE_CONFLICTS, /* Set on one node, cleared on the peer! */ 807 SEND_PING, /* whether asender should send a ping asap */ 808 SIGNAL_ASENDER, /* whether asender wants to be interrupted */ 809 GOT_PING_ACK, /* set when we receive a ping_ack packet, ping_wait gets woken */ 810 CONN_WD_ST_CHG_REQ, /* A cluster wide state change on the connection is active */ 811 CONN_WD_ST_CHG_OKAY, 812 CONN_WD_ST_CHG_FAIL, 813 CONN_DRY_RUN, /* Expect disconnect after resync handshake. */ 814 CREATE_BARRIER, /* next P_DATA is preceded by a P_BARRIER */ 815 STATE_SENT, /* Do not change state/UUIDs while this is set */ 816 CALLBACK_PENDING, /* Whether we have a call_usermodehelper(, UMH_WAIT_PROC) 817 * pending, from drbd worker context. 818 * If set, bdi_write_congested() returns true, 819 * so shrink_page_list() would not recurse into, 820 * and potentially deadlock on, this drbd worker. 821 */ 822 DISCONNECT_SENT, 823 }; 824 825 struct drbd_tconn { /* is a resource from the config file */ 826 char *name; /* Resource name */ 827 struct list_head all_tconn; /* linked on global drbd_tconns */ 828 struct kref kref; 829 struct idr volumes; /* <tconn, vnr> to mdev mapping */ 830 enum drbd_conns cstate; /* Only C_STANDALONE to C_WF_REPORT_PARAMS */ 831 unsigned susp:1; /* IO suspended by user */ 832 unsigned susp_nod:1; /* IO suspended because no data */ 833 unsigned susp_fen:1; /* IO suspended because fence peer handler runs */ 834 struct mutex cstate_mutex; /* Protects graceful disconnects */ 835 unsigned int connect_cnt; /* Inc each time a connection is established */ 836 837 unsigned long flags; 838 struct net_conf *net_conf; /* content protected by rcu */ 839 struct mutex conf_update; /* mutex for ready-copy-update of net_conf and disk_conf */ 840 wait_queue_head_t ping_wait; /* Woken upon reception of a ping, and a state change */ 841 struct res_opts res_opts; 842 843 struct sockaddr_storage my_addr; 844 int my_addr_len; 845 struct sockaddr_storage peer_addr; 846 int peer_addr_len; 847 848 struct drbd_socket data; /* data/barrier/cstate/parameter packets */ 849 struct drbd_socket meta; /* ping/ack (metadata) packets */ 850 int agreed_pro_version; /* actually used protocol version */ 851 unsigned long last_received; /* in jiffies, either socket */ 852 unsigned int ko_count; 853 854 spinlock_t req_lock; 855 856 struct list_head transfer_log; /* all requests not yet fully processed */ 857 858 struct crypto_hash *cram_hmac_tfm; 859 struct crypto_hash *integrity_tfm; /* checksums we compute, updates protected by tconn->data->mutex */ 860 struct crypto_hash *peer_integrity_tfm; /* checksums we verify, only accessed from receiver thread */ 861 struct crypto_hash *csums_tfm; 862 struct crypto_hash *verify_tfm; 863 void *int_dig_in; 864 void *int_dig_vv; 865 866 /* receiver side */ 867 struct drbd_epoch *current_epoch; 868 spinlock_t epoch_lock; 869 unsigned int epochs; 870 enum write_ordering_e write_ordering; 871 atomic_t current_tle_nr; /* transfer log epoch number */ 872 unsigned current_tle_writes; /* writes seen within this tl epoch */ 873 874 unsigned long last_reconnect_jif; 875 struct drbd_thread receiver; 876 struct drbd_thread worker; 877 struct drbd_thread asender; 878 cpumask_var_t cpu_mask; 879 880 /* sender side */ 881 struct drbd_work_queue sender_work; 882 883 struct { 884 /* whether this sender thread 885 * has processed a single write yet. */ 886 bool seen_any_write_yet; 887 888 /* Which barrier number to send with the next P_BARRIER */ 889 int current_epoch_nr; 890 891 /* how many write requests have been sent 892 * with req->epoch == current_epoch_nr. 893 * If none, no P_BARRIER will be sent. */ 894 unsigned current_epoch_writes; 895 } send; 896 }; 897 898 struct submit_worker { 899 struct workqueue_struct *wq; 900 struct work_struct worker; 901 902 spinlock_t lock; 903 struct list_head writes; 904 }; 905 906 struct drbd_conf { 907 struct drbd_tconn *tconn; 908 int vnr; /* volume number within the connection */ 909 struct kref kref; 910 911 /* things that are stored as / read from meta data on disk */ 912 unsigned long flags; 913 914 /* configured by drbdsetup */ 915 struct drbd_backing_dev *ldev __protected_by(local); 916 917 sector_t p_size; /* partner's disk size */ 918 struct request_queue *rq_queue; 919 struct block_device *this_bdev; 920 struct gendisk *vdisk; 921 922 unsigned long last_reattach_jif; 923 struct drbd_work resync_work, 924 unplug_work, 925 go_diskless, 926 md_sync_work, 927 start_resync_work; 928 struct timer_list resync_timer; 929 struct timer_list md_sync_timer; 930 struct timer_list start_resync_timer; 931 struct timer_list request_timer; 932 #ifdef DRBD_DEBUG_MD_SYNC 933 struct { 934 unsigned int line; 935 const char* func; 936 } last_md_mark_dirty; 937 #endif 938 939 /* Used after attach while negotiating new disk state. */ 940 union drbd_state new_state_tmp; 941 942 union drbd_dev_state state; 943 wait_queue_head_t misc_wait; 944 wait_queue_head_t state_wait; /* upon each state change. */ 945 unsigned int send_cnt; 946 unsigned int recv_cnt; 947 unsigned int read_cnt; 948 unsigned int writ_cnt; 949 unsigned int al_writ_cnt; 950 unsigned int bm_writ_cnt; 951 atomic_t ap_bio_cnt; /* Requests we need to complete */ 952 atomic_t ap_pending_cnt; /* AP data packets on the wire, ack expected */ 953 atomic_t rs_pending_cnt; /* RS request/data packets on the wire */ 954 atomic_t unacked_cnt; /* Need to send replies for */ 955 atomic_t local_cnt; /* Waiting for local completion */ 956 957 /* Interval tree of pending local requests */ 958 struct rb_root read_requests; 959 struct rb_root write_requests; 960 961 /* blocks to resync in this run [unit BM_BLOCK_SIZE] */ 962 unsigned long rs_total; 963 /* number of resync blocks that failed in this run */ 964 unsigned long rs_failed; 965 /* Syncer's start time [unit jiffies] */ 966 unsigned long rs_start; 967 /* cumulated time in PausedSyncX state [unit jiffies] */ 968 unsigned long rs_paused; 969 /* skipped because csum was equal [unit BM_BLOCK_SIZE] */ 970 unsigned long rs_same_csum; 971 #define DRBD_SYNC_MARKS 8 972 #define DRBD_SYNC_MARK_STEP (3*HZ) 973 /* block not up-to-date at mark [unit BM_BLOCK_SIZE] */ 974 unsigned long rs_mark_left[DRBD_SYNC_MARKS]; 975 /* marks's time [unit jiffies] */ 976 unsigned long rs_mark_time[DRBD_SYNC_MARKS]; 977 /* current index into rs_mark_{left,time} */ 978 int rs_last_mark; 979 unsigned long rs_last_bcast; /* [unit jiffies] */ 980 981 /* where does the admin want us to start? (sector) */ 982 sector_t ov_start_sector; 983 sector_t ov_stop_sector; 984 /* where are we now? (sector) */ 985 sector_t ov_position; 986 /* Start sector of out of sync range (to merge printk reporting). */ 987 sector_t ov_last_oos_start; 988 /* size of out-of-sync range in sectors. */ 989 sector_t ov_last_oos_size; 990 unsigned long ov_left; /* in bits */ 991 992 struct drbd_bitmap *bitmap; 993 unsigned long bm_resync_fo; /* bit offset for drbd_bm_find_next */ 994 995 /* Used to track operations of resync... */ 996 struct lru_cache *resync; 997 /* Number of locked elements in resync LRU */ 998 unsigned int resync_locked; 999 /* resync extent number waiting for application requests */ 1000 unsigned int resync_wenr; 1001 1002 int open_cnt; 1003 u64 *p_uuid; 1004 1005 struct list_head active_ee; /* IO in progress (P_DATA gets written to disk) */ 1006 struct list_head sync_ee; /* IO in progress (P_RS_DATA_REPLY gets written to disk) */ 1007 struct list_head done_ee; /* need to send P_WRITE_ACK */ 1008 struct list_head read_ee; /* [RS]P_DATA_REQUEST being read */ 1009 struct list_head net_ee; /* zero-copy network send in progress */ 1010 1011 int next_barrier_nr; 1012 struct list_head resync_reads; 1013 atomic_t pp_in_use; /* allocated from page pool */ 1014 atomic_t pp_in_use_by_net; /* sendpage()d, still referenced by tcp */ 1015 wait_queue_head_t ee_wait; 1016 struct page *md_io_page; /* one page buffer for md_io */ 1017 struct drbd_md_io md_io; 1018 atomic_t md_io_in_use; /* protects the md_io, md_io_page and md_io_tmpp */ 1019 spinlock_t al_lock; 1020 wait_queue_head_t al_wait; 1021 struct lru_cache *act_log; /* activity log */ 1022 unsigned int al_tr_number; 1023 int al_tr_cycle; 1024 wait_queue_head_t seq_wait; 1025 atomic_t packet_seq; 1026 unsigned int peer_seq; 1027 spinlock_t peer_seq_lock; 1028 unsigned int minor; 1029 unsigned long comm_bm_set; /* communicated number of set bits. */ 1030 struct bm_io_work bm_io_work; 1031 u64 ed_uuid; /* UUID of the exposed data */ 1032 struct mutex own_state_mutex; 1033 struct mutex *state_mutex; /* either own_state_mutex or mdev->tconn->cstate_mutex */ 1034 char congestion_reason; /* Why we where congested... */ 1035 atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */ 1036 atomic_t rs_sect_ev; /* for submitted resync data rate, both */ 1037 int rs_last_sect_ev; /* counter to compare with */ 1038 int rs_last_events; /* counter of read or write "events" (unit sectors) 1039 * on the lower level device when we last looked. */ 1040 int c_sync_rate; /* current resync rate after syncer throttle magic */ 1041 struct fifo_buffer *rs_plan_s; /* correction values of resync planer (RCU, tconn->conn_update) */ 1042 int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */ 1043 atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */ 1044 unsigned int peer_max_bio_size; 1045 unsigned int local_max_bio_size; 1046 1047 /* any requests that would block in drbd_make_request() 1048 * are deferred to this single-threaded work queue */ 1049 struct submit_worker submit; 1050 }; 1051 1052 static inline struct drbd_conf *minor_to_mdev(unsigned int minor) 1053 { 1054 return (struct drbd_conf *)idr_find(&minors, minor); 1055 } 1056 1057 static inline unsigned int mdev_to_minor(struct drbd_conf *mdev) 1058 { 1059 return mdev->minor; 1060 } 1061 1062 static inline struct drbd_conf *vnr_to_mdev(struct drbd_tconn *tconn, int vnr) 1063 { 1064 return (struct drbd_conf *)idr_find(&tconn->volumes, vnr); 1065 } 1066 1067 /* 1068 * function declarations 1069 *************************/ 1070 1071 /* drbd_main.c */ 1072 1073 enum dds_flags { 1074 DDSF_FORCED = 1, 1075 DDSF_NO_RESYNC = 2, /* Do not run a resync for the new space */ 1076 }; 1077 1078 extern void drbd_init_set_defaults(struct drbd_conf *mdev); 1079 extern int drbd_thread_start(struct drbd_thread *thi); 1080 extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait); 1081 extern char *drbd_task_to_thread_name(struct drbd_tconn *tconn, struct task_struct *task); 1082 #ifdef CONFIG_SMP 1083 extern void drbd_thread_current_set_cpu(struct drbd_thread *thi); 1084 extern void drbd_calc_cpu_mask(struct drbd_tconn *tconn); 1085 #else 1086 #define drbd_thread_current_set_cpu(A) ({}) 1087 #define drbd_calc_cpu_mask(A) ({}) 1088 #endif 1089 extern void tl_release(struct drbd_tconn *, unsigned int barrier_nr, 1090 unsigned int set_size); 1091 extern void tl_clear(struct drbd_tconn *); 1092 extern void drbd_free_sock(struct drbd_tconn *tconn); 1093 extern int drbd_send(struct drbd_tconn *tconn, struct socket *sock, 1094 void *buf, size_t size, unsigned msg_flags); 1095 extern int drbd_send_all(struct drbd_tconn *, struct socket *, void *, size_t, 1096 unsigned); 1097 1098 extern int __drbd_send_protocol(struct drbd_tconn *tconn, enum drbd_packet cmd); 1099 extern int drbd_send_protocol(struct drbd_tconn *tconn); 1100 extern int drbd_send_uuids(struct drbd_conf *mdev); 1101 extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev); 1102 extern void drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev); 1103 extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags); 1104 extern int drbd_send_state(struct drbd_conf *mdev, union drbd_state s); 1105 extern int drbd_send_current_state(struct drbd_conf *mdev); 1106 extern int drbd_send_sync_param(struct drbd_conf *mdev); 1107 extern void drbd_send_b_ack(struct drbd_tconn *tconn, u32 barrier_nr, 1108 u32 set_size); 1109 extern int drbd_send_ack(struct drbd_conf *, enum drbd_packet, 1110 struct drbd_peer_request *); 1111 extern void drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd, 1112 struct p_block_req *rp); 1113 extern void drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packet cmd, 1114 struct p_data *dp, int data_size); 1115 extern int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packet cmd, 1116 sector_t sector, int blksize, u64 block_id); 1117 extern int drbd_send_out_of_sync(struct drbd_conf *, struct drbd_request *); 1118 extern int drbd_send_block(struct drbd_conf *, enum drbd_packet, 1119 struct drbd_peer_request *); 1120 extern int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req); 1121 extern int drbd_send_drequest(struct drbd_conf *mdev, int cmd, 1122 sector_t sector, int size, u64 block_id); 1123 extern int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector, 1124 int size, void *digest, int digest_size, 1125 enum drbd_packet cmd); 1126 extern int drbd_send_ov_request(struct drbd_conf *mdev,sector_t sector,int size); 1127 1128 extern int drbd_send_bitmap(struct drbd_conf *mdev); 1129 extern void drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode); 1130 extern void conn_send_sr_reply(struct drbd_tconn *tconn, enum drbd_state_rv retcode); 1131 extern void drbd_free_bc(struct drbd_backing_dev *ldev); 1132 extern void drbd_mdev_cleanup(struct drbd_conf *mdev); 1133 void drbd_print_uuids(struct drbd_conf *mdev, const char *text); 1134 1135 extern void conn_md_sync(struct drbd_tconn *tconn); 1136 extern void drbd_md_write(struct drbd_conf *mdev, void *buffer); 1137 extern void drbd_md_sync(struct drbd_conf *mdev); 1138 extern int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev); 1139 extern void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local); 1140 extern void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local); 1141 extern void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local); 1142 extern void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local); 1143 extern void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local); 1144 extern void __drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local); 1145 extern void drbd_md_set_flag(struct drbd_conf *mdev, int flags) __must_hold(local); 1146 extern void drbd_md_clear_flag(struct drbd_conf *mdev, int flags)__must_hold(local); 1147 extern int drbd_md_test_flag(struct drbd_backing_dev *, int); 1148 #ifndef DRBD_DEBUG_MD_SYNC 1149 extern void drbd_md_mark_dirty(struct drbd_conf *mdev); 1150 #else 1151 #define drbd_md_mark_dirty(m) drbd_md_mark_dirty_(m, __LINE__ , __func__ ) 1152 extern void drbd_md_mark_dirty_(struct drbd_conf *mdev, 1153 unsigned int line, const char *func); 1154 #endif 1155 extern void drbd_queue_bitmap_io(struct drbd_conf *mdev, 1156 int (*io_fn)(struct drbd_conf *), 1157 void (*done)(struct drbd_conf *, int), 1158 char *why, enum bm_flag flags); 1159 extern int drbd_bitmap_io(struct drbd_conf *mdev, 1160 int (*io_fn)(struct drbd_conf *), 1161 char *why, enum bm_flag flags); 1162 extern int drbd_bitmap_io_from_worker(struct drbd_conf *mdev, 1163 int (*io_fn)(struct drbd_conf *), 1164 char *why, enum bm_flag flags); 1165 extern int drbd_bmio_set_n_write(struct drbd_conf *mdev); 1166 extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev); 1167 extern void drbd_ldev_destroy(struct drbd_conf *mdev); 1168 1169 /* Meta data layout 1170 * 1171 * We currently have two possible layouts. 1172 * Offsets in (512 byte) sectors. 1173 * external: 1174 * |----------- md_size_sect ------------------| 1175 * [ 4k superblock ][ activity log ][ Bitmap ] 1176 * | al_offset == 8 | 1177 * | bm_offset = al_offset + X | 1178 * ==> bitmap sectors = md_size_sect - bm_offset 1179 * 1180 * Variants: 1181 * old, indexed fixed size meta data: 1182 * 1183 * internal: 1184 * |----------- md_size_sect ------------------| 1185 * [data.....][ Bitmap ][ activity log ][ 4k superblock ][padding*] 1186 * | al_offset < 0 | 1187 * | bm_offset = al_offset - Y | 1188 * ==> bitmap sectors = Y = al_offset - bm_offset 1189 * 1190 * [padding*] are zero or up to 7 unused 512 Byte sectors to the 1191 * end of the device, so that the [4k superblock] will be 4k aligned. 1192 * 1193 * The activity log consists of 4k transaction blocks, 1194 * which are written in a ring-buffer, or striped ring-buffer like fashion, 1195 * which are writtensize used to be fixed 32kB, 1196 * but is about to become configurable. 1197 */ 1198 1199 /* Our old fixed size meta data layout 1200 * allows up to about 3.8TB, so if you want more, 1201 * you need to use the "flexible" meta data format. */ 1202 #define MD_128MB_SECT (128LLU << 11) /* 128 MB, unit sectors */ 1203 #define MD_4kB_SECT 8 1204 #define MD_32kB_SECT 64 1205 1206 /* One activity log extent represents 4M of storage */ 1207 #define AL_EXTENT_SHIFT 22 1208 #define AL_EXTENT_SIZE (1<<AL_EXTENT_SHIFT) 1209 1210 /* We could make these currently hardcoded constants configurable 1211 * variables at create-md time (or even re-configurable at runtime?). 1212 * Which will require some more changes to the DRBD "super block" 1213 * and attach code. 1214 * 1215 * updates per transaction: 1216 * This many changes to the active set can be logged with one transaction. 1217 * This number is arbitrary. 1218 * context per transaction: 1219 * This many context extent numbers are logged with each transaction. 1220 * This number is resulting from the transaction block size (4k), the layout 1221 * of the transaction header, and the number of updates per transaction. 1222 * See drbd_actlog.c:struct al_transaction_on_disk 1223 * */ 1224 #define AL_UPDATES_PER_TRANSACTION 64 // arbitrary 1225 #define AL_CONTEXT_PER_TRANSACTION 919 // (4096 - 36 - 6*64)/4 1226 1227 #if BITS_PER_LONG == 32 1228 #define LN2_BPL 5 1229 #define cpu_to_lel(A) cpu_to_le32(A) 1230 #define lel_to_cpu(A) le32_to_cpu(A) 1231 #elif BITS_PER_LONG == 64 1232 #define LN2_BPL 6 1233 #define cpu_to_lel(A) cpu_to_le64(A) 1234 #define lel_to_cpu(A) le64_to_cpu(A) 1235 #else 1236 #error "LN2 of BITS_PER_LONG unknown!" 1237 #endif 1238 1239 /* resync bitmap */ 1240 /* 16MB sized 'bitmap extent' to track syncer usage */ 1241 struct bm_extent { 1242 int rs_left; /* number of bits set (out of sync) in this extent. */ 1243 int rs_failed; /* number of failed resync requests in this extent. */ 1244 unsigned long flags; 1245 struct lc_element lce; 1246 }; 1247 1248 #define BME_NO_WRITES 0 /* bm_extent.flags: no more requests on this one! */ 1249 #define BME_LOCKED 1 /* bm_extent.flags: syncer active on this one. */ 1250 #define BME_PRIORITY 2 /* finish resync IO on this extent ASAP! App IO waiting! */ 1251 1252 /* drbd_bitmap.c */ 1253 /* 1254 * We need to store one bit for a block. 1255 * Example: 1GB disk @ 4096 byte blocks ==> we need 32 KB bitmap. 1256 * Bit 0 ==> local node thinks this block is binary identical on both nodes 1257 * Bit 1 ==> local node thinks this block needs to be synced. 1258 */ 1259 1260 #define SLEEP_TIME (HZ/10) 1261 1262 /* We do bitmap IO in units of 4k blocks. 1263 * We also still have a hardcoded 4k per bit relation. */ 1264 #define BM_BLOCK_SHIFT 12 /* 4k per bit */ 1265 #define BM_BLOCK_SIZE (1<<BM_BLOCK_SHIFT) 1266 /* mostly arbitrarily set the represented size of one bitmap extent, 1267 * aka resync extent, to 16 MiB (which is also 512 Byte worth of bitmap 1268 * at 4k per bit resolution) */ 1269 #define BM_EXT_SHIFT 24 /* 16 MiB per resync extent */ 1270 #define BM_EXT_SIZE (1<<BM_EXT_SHIFT) 1271 1272 #if (BM_EXT_SHIFT != 24) || (BM_BLOCK_SHIFT != 12) 1273 #error "HAVE YOU FIXED drbdmeta AS WELL??" 1274 #endif 1275 1276 /* thus many _storage_ sectors are described by one bit */ 1277 #define BM_SECT_TO_BIT(x) ((x)>>(BM_BLOCK_SHIFT-9)) 1278 #define BM_BIT_TO_SECT(x) ((sector_t)(x)<<(BM_BLOCK_SHIFT-9)) 1279 #define BM_SECT_PER_BIT BM_BIT_TO_SECT(1) 1280 1281 /* bit to represented kilo byte conversion */ 1282 #define Bit2KB(bits) ((bits)<<(BM_BLOCK_SHIFT-10)) 1283 1284 /* in which _bitmap_ extent (resp. sector) the bit for a certain 1285 * _storage_ sector is located in */ 1286 #define BM_SECT_TO_EXT(x) ((x)>>(BM_EXT_SHIFT-9)) 1287 1288 /* how much _storage_ sectors we have per bitmap sector */ 1289 #define BM_EXT_TO_SECT(x) ((sector_t)(x) << (BM_EXT_SHIFT-9)) 1290 #define BM_SECT_PER_EXT BM_EXT_TO_SECT(1) 1291 1292 /* in one sector of the bitmap, we have this many activity_log extents. */ 1293 #define AL_EXT_PER_BM_SECT (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT)) 1294 1295 #define BM_BLOCKS_PER_BM_EXT_B (BM_EXT_SHIFT - BM_BLOCK_SHIFT) 1296 #define BM_BLOCKS_PER_BM_EXT_MASK ((1<<BM_BLOCKS_PER_BM_EXT_B) - 1) 1297 1298 /* the extent in "PER_EXTENT" below is an activity log extent 1299 * we need that many (long words/bytes) to store the bitmap 1300 * of one AL_EXTENT_SIZE chunk of storage. 1301 * we can store the bitmap for that many AL_EXTENTS within 1302 * one sector of the _on_disk_ bitmap: 1303 * bit 0 bit 37 bit 38 bit (512*8)-1 1304 * ...|........|........|.. // ..|........| 1305 * sect. 0 `296 `304 ^(512*8*8)-1 1306 * 1307 #define BM_WORDS_PER_EXT ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / BITS_PER_LONG ) 1308 #define BM_BYTES_PER_EXT ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / 8 ) // 128 1309 #define BM_EXT_PER_SECT ( 512 / BM_BYTES_PER_EXTENT ) // 4 1310 */ 1311 1312 #define DRBD_MAX_SECTORS_32 (0xffffffffLU) 1313 /* we have a certain meta data variant that has a fixed on-disk size of 128 1314 * MiB, of which 4k are our "superblock", and 32k are the fixed size activity 1315 * log, leaving this many sectors for the bitmap. 1316 */ 1317 1318 #define DRBD_MAX_SECTORS_FIXED_BM \ 1319 ((MD_128MB_SECT - MD_32kB_SECT - MD_4kB_SECT) * (1LL<<(BM_EXT_SHIFT-9))) 1320 #if !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32 1321 #define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_32 1322 #define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32 1323 #else 1324 #define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_FIXED_BM 1325 /* 16 TB in units of sectors */ 1326 #if BITS_PER_LONG == 32 1327 /* adjust by one page worth of bitmap, 1328 * so we won't wrap around in drbd_bm_find_next_bit. 1329 * you should use 64bit OS for that much storage, anyways. */ 1330 #define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff) 1331 #else 1332 /* we allow up to 1 PiB now on 64bit architecture with "flexible" meta data */ 1333 #define DRBD_MAX_SECTORS_FLEX (1UL << 51) 1334 /* corresponds to (1UL << 38) bits right now. */ 1335 #endif 1336 #endif 1337 1338 /* BIO_MAX_SIZE is 256 * PAGE_CACHE_SIZE, 1339 * so for typical PAGE_CACHE_SIZE of 4k, that is (1<<20) Byte. 1340 * Since we may live in a mixed-platform cluster, 1341 * we limit us to a platform agnostic constant here for now. 1342 * A followup commit may allow even bigger BIO sizes, 1343 * once we thought that through. */ 1344 #define DRBD_MAX_BIO_SIZE (1U << 20) 1345 #if DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE 1346 #error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE 1347 #endif 1348 #define DRBD_MAX_BIO_SIZE_SAFE (1U << 12) /* Works always = 4k */ 1349 1350 #define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* Header 80 only allows packets up to 32KiB data */ 1351 #define DRBD_MAX_BIO_SIZE_P95 (1U << 17) /* Protocol 95 to 99 allows bios up to 128KiB */ 1352 1353 extern int drbd_bm_init(struct drbd_conf *mdev); 1354 extern int drbd_bm_resize(struct drbd_conf *mdev, sector_t sectors, int set_new_bits); 1355 extern void drbd_bm_cleanup(struct drbd_conf *mdev); 1356 extern void drbd_bm_set_all(struct drbd_conf *mdev); 1357 extern void drbd_bm_clear_all(struct drbd_conf *mdev); 1358 /* set/clear/test only a few bits at a time */ 1359 extern int drbd_bm_set_bits( 1360 struct drbd_conf *mdev, unsigned long s, unsigned long e); 1361 extern int drbd_bm_clear_bits( 1362 struct drbd_conf *mdev, unsigned long s, unsigned long e); 1363 extern int drbd_bm_count_bits( 1364 struct drbd_conf *mdev, const unsigned long s, const unsigned long e); 1365 /* bm_set_bits variant for use while holding drbd_bm_lock, 1366 * may process the whole bitmap in one go */ 1367 extern void _drbd_bm_set_bits(struct drbd_conf *mdev, 1368 const unsigned long s, const unsigned long e); 1369 extern int drbd_bm_test_bit(struct drbd_conf *mdev, unsigned long bitnr); 1370 extern int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr); 1371 extern int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local); 1372 extern int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local); 1373 extern void drbd_bm_mark_for_writeout(struct drbd_conf *mdev, int page_nr); 1374 extern int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local); 1375 extern int drbd_bm_write_hinted(struct drbd_conf *mdev) __must_hold(local); 1376 extern int drbd_bm_write_all(struct drbd_conf *mdev) __must_hold(local); 1377 extern int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local); 1378 extern size_t drbd_bm_words(struct drbd_conf *mdev); 1379 extern unsigned long drbd_bm_bits(struct drbd_conf *mdev); 1380 extern sector_t drbd_bm_capacity(struct drbd_conf *mdev); 1381 1382 #define DRBD_END_OF_BITMAP (~(unsigned long)0) 1383 extern unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo); 1384 /* bm_find_next variants for use while you hold drbd_bm_lock() */ 1385 extern unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo); 1386 extern unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo); 1387 extern unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev); 1388 extern unsigned long drbd_bm_total_weight(struct drbd_conf *mdev); 1389 extern int drbd_bm_rs_done(struct drbd_conf *mdev); 1390 /* for receive_bitmap */ 1391 extern void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, 1392 size_t number, unsigned long *buffer); 1393 /* for _drbd_send_bitmap */ 1394 extern void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, 1395 size_t number, unsigned long *buffer); 1396 1397 extern void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags); 1398 extern void drbd_bm_unlock(struct drbd_conf *mdev); 1399 /* drbd_main.c */ 1400 1401 extern struct kmem_cache *drbd_request_cache; 1402 extern struct kmem_cache *drbd_ee_cache; /* peer requests */ 1403 extern struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */ 1404 extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */ 1405 extern mempool_t *drbd_request_mempool; 1406 extern mempool_t *drbd_ee_mempool; 1407 1408 /* drbd's page pool, used to buffer data received from the peer, 1409 * or data requested by the peer. 1410 * 1411 * This does not have an emergency reserve. 1412 * 1413 * When allocating from this pool, it first takes pages from the pool. 1414 * Only if the pool is depleted will try to allocate from the system. 1415 * 1416 * The assumption is that pages taken from this pool will be processed, 1417 * and given back, "quickly", and then can be recycled, so we can avoid 1418 * frequent calls to alloc_page(), and still will be able to make progress even 1419 * under memory pressure. 1420 */ 1421 extern struct page *drbd_pp_pool; 1422 extern spinlock_t drbd_pp_lock; 1423 extern int drbd_pp_vacant; 1424 extern wait_queue_head_t drbd_pp_wait; 1425 1426 /* We also need a standard (emergency-reserve backed) page pool 1427 * for meta data IO (activity log, bitmap). 1428 * We can keep it global, as long as it is used as "N pages at a time". 1429 * 128 should be plenty, currently we probably can get away with as few as 1. 1430 */ 1431 #define DRBD_MIN_POOL_PAGES 128 1432 extern mempool_t *drbd_md_io_page_pool; 1433 1434 /* We also need to make sure we get a bio 1435 * when we need it for housekeeping purposes */ 1436 extern struct bio_set *drbd_md_io_bio_set; 1437 /* to allocate from that set */ 1438 extern struct bio *bio_alloc_drbd(gfp_t gfp_mask); 1439 1440 extern rwlock_t global_state_lock; 1441 1442 extern int conn_lowest_minor(struct drbd_tconn *tconn); 1443 enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr); 1444 extern void drbd_minor_destroy(struct kref *kref); 1445 1446 extern int set_resource_options(struct drbd_tconn *tconn, struct res_opts *res_opts); 1447 extern struct drbd_tconn *conn_create(const char *name, struct res_opts *res_opts); 1448 extern void conn_destroy(struct kref *kref); 1449 struct drbd_tconn *conn_get_by_name(const char *name); 1450 extern struct drbd_tconn *conn_get_by_addrs(void *my_addr, int my_addr_len, 1451 void *peer_addr, int peer_addr_len); 1452 extern void conn_free_crypto(struct drbd_tconn *tconn); 1453 1454 extern int proc_details; 1455 1456 /* drbd_req */ 1457 extern void do_submit(struct work_struct *ws); 1458 extern void __drbd_make_request(struct drbd_conf *, struct bio *, unsigned long); 1459 extern void drbd_make_request(struct request_queue *q, struct bio *bio); 1460 extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req); 1461 extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec); 1462 extern int is_valid_ar_handle(struct drbd_request *, sector_t); 1463 1464 1465 /* drbd_nl.c */ 1466 extern int drbd_msg_put_info(const char *info); 1467 extern void drbd_suspend_io(struct drbd_conf *mdev); 1468 extern void drbd_resume_io(struct drbd_conf *mdev); 1469 extern char *ppsize(char *buf, unsigned long long size); 1470 extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, sector_t, int); 1471 enum determine_dev_size { 1472 DS_ERROR_SHRINK = -3, 1473 DS_ERROR_SPACE_MD = -2, 1474 DS_ERROR = -1, 1475 DS_UNCHANGED = 0, 1476 DS_SHRUNK = 1, 1477 DS_GREW = 2, 1478 DS_GREW_FROM_ZERO = 3, 1479 }; 1480 extern enum determine_dev_size 1481 drbd_determine_dev_size(struct drbd_conf *, enum dds_flags, struct resize_parms *) __must_hold(local); 1482 extern void resync_after_online_grow(struct drbd_conf *); 1483 extern void drbd_reconsider_max_bio_size(struct drbd_conf *mdev); 1484 extern enum drbd_state_rv drbd_set_role(struct drbd_conf *mdev, 1485 enum drbd_role new_role, 1486 int force); 1487 extern bool conn_try_outdate_peer(struct drbd_tconn *tconn); 1488 extern void conn_try_outdate_peer_async(struct drbd_tconn *tconn); 1489 extern int drbd_khelper(struct drbd_conf *mdev, char *cmd); 1490 1491 /* drbd_worker.c */ 1492 extern int drbd_worker(struct drbd_thread *thi); 1493 enum drbd_ret_code drbd_resync_after_valid(struct drbd_conf *mdev, int o_minor); 1494 void drbd_resync_after_changed(struct drbd_conf *mdev); 1495 extern void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side); 1496 extern void resume_next_sg(struct drbd_conf *mdev); 1497 extern void suspend_other_sg(struct drbd_conf *mdev); 1498 extern int drbd_resync_finished(struct drbd_conf *mdev); 1499 /* maybe rather drbd_main.c ? */ 1500 extern void *drbd_md_get_buffer(struct drbd_conf *mdev); 1501 extern void drbd_md_put_buffer(struct drbd_conf *mdev); 1502 extern int drbd_md_sync_page_io(struct drbd_conf *mdev, 1503 struct drbd_backing_dev *bdev, sector_t sector, int rw); 1504 extern void drbd_ov_out_of_sync_found(struct drbd_conf *, sector_t, int); 1505 extern void wait_until_done_or_force_detached(struct drbd_conf *mdev, 1506 struct drbd_backing_dev *bdev, unsigned int *done); 1507 extern void drbd_rs_controller_reset(struct drbd_conf *mdev); 1508 1509 static inline void ov_out_of_sync_print(struct drbd_conf *mdev) 1510 { 1511 if (mdev->ov_last_oos_size) { 1512 dev_err(DEV, "Out of sync: start=%llu, size=%lu (sectors)\n", 1513 (unsigned long long)mdev->ov_last_oos_start, 1514 (unsigned long)mdev->ov_last_oos_size); 1515 } 1516 mdev->ov_last_oos_size=0; 1517 } 1518 1519 1520 extern void drbd_csum_bio(struct drbd_conf *, struct crypto_hash *, struct bio *, void *); 1521 extern void drbd_csum_ee(struct drbd_conf *, struct crypto_hash *, 1522 struct drbd_peer_request *, void *); 1523 /* worker callbacks */ 1524 extern int w_e_end_data_req(struct drbd_work *, int); 1525 extern int w_e_end_rsdata_req(struct drbd_work *, int); 1526 extern int w_e_end_csum_rs_req(struct drbd_work *, int); 1527 extern int w_e_end_ov_reply(struct drbd_work *, int); 1528 extern int w_e_end_ov_req(struct drbd_work *, int); 1529 extern int w_ov_finished(struct drbd_work *, int); 1530 extern int w_resync_timer(struct drbd_work *, int); 1531 extern int w_send_write_hint(struct drbd_work *, int); 1532 extern int w_make_resync_request(struct drbd_work *, int); 1533 extern int w_send_dblock(struct drbd_work *, int); 1534 extern int w_send_read_req(struct drbd_work *, int); 1535 extern int w_prev_work_done(struct drbd_work *, int); 1536 extern int w_e_reissue(struct drbd_work *, int); 1537 extern int w_restart_disk_io(struct drbd_work *, int); 1538 extern int w_send_out_of_sync(struct drbd_work *, int); 1539 extern int w_start_resync(struct drbd_work *, int); 1540 1541 extern void resync_timer_fn(unsigned long data); 1542 extern void start_resync_timer_fn(unsigned long data); 1543 1544 /* drbd_receiver.c */ 1545 extern int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector); 1546 extern int drbd_submit_peer_request(struct drbd_conf *, 1547 struct drbd_peer_request *, const unsigned, 1548 const int); 1549 extern int drbd_free_peer_reqs(struct drbd_conf *, struct list_head *); 1550 extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_conf *, u64, 1551 sector_t, unsigned int, 1552 gfp_t) __must_hold(local); 1553 extern void __drbd_free_peer_req(struct drbd_conf *, struct drbd_peer_request *, 1554 int); 1555 #define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0) 1556 #define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1) 1557 extern struct page *drbd_alloc_pages(struct drbd_conf *, unsigned int, bool); 1558 extern void drbd_set_recv_tcq(struct drbd_conf *mdev, int tcq_enabled); 1559 extern void _drbd_clear_done_ee(struct drbd_conf *mdev, struct list_head *to_be_freed); 1560 extern void conn_flush_workqueue(struct drbd_tconn *tconn); 1561 extern int drbd_connected(struct drbd_conf *mdev); 1562 static inline void drbd_flush_workqueue(struct drbd_conf *mdev) 1563 { 1564 conn_flush_workqueue(mdev->tconn); 1565 } 1566 1567 /* Yes, there is kernel_setsockopt, but only since 2.6.18. 1568 * So we have our own copy of it here. */ 1569 static inline int drbd_setsockopt(struct socket *sock, int level, int optname, 1570 char *optval, int optlen) 1571 { 1572 mm_segment_t oldfs = get_fs(); 1573 char __user *uoptval; 1574 int err; 1575 1576 uoptval = (char __user __force *)optval; 1577 1578 set_fs(KERNEL_DS); 1579 if (level == SOL_SOCKET) 1580 err = sock_setsockopt(sock, level, optname, uoptval, optlen); 1581 else 1582 err = sock->ops->setsockopt(sock, level, optname, uoptval, 1583 optlen); 1584 set_fs(oldfs); 1585 return err; 1586 } 1587 1588 static inline void drbd_tcp_cork(struct socket *sock) 1589 { 1590 int val = 1; 1591 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK, 1592 (char*)&val, sizeof(val)); 1593 } 1594 1595 static inline void drbd_tcp_uncork(struct socket *sock) 1596 { 1597 int val = 0; 1598 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK, 1599 (char*)&val, sizeof(val)); 1600 } 1601 1602 static inline void drbd_tcp_nodelay(struct socket *sock) 1603 { 1604 int val = 1; 1605 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY, 1606 (char*)&val, sizeof(val)); 1607 } 1608 1609 static inline void drbd_tcp_quickack(struct socket *sock) 1610 { 1611 int val = 2; 1612 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK, 1613 (char*)&val, sizeof(val)); 1614 } 1615 1616 void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo); 1617 1618 /* drbd_proc.c */ 1619 extern struct proc_dir_entry *drbd_proc; 1620 extern const struct file_operations drbd_proc_fops; 1621 extern const char *drbd_conn_str(enum drbd_conns s); 1622 extern const char *drbd_role_str(enum drbd_role s); 1623 1624 /* drbd_actlog.c */ 1625 extern int drbd_al_begin_io_nonblock(struct drbd_conf *mdev, struct drbd_interval *i); 1626 extern void drbd_al_begin_io_commit(struct drbd_conf *mdev, bool delegate); 1627 extern bool drbd_al_begin_io_fastpath(struct drbd_conf *mdev, struct drbd_interval *i); 1628 extern void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i, bool delegate); 1629 extern void drbd_al_complete_io(struct drbd_conf *mdev, struct drbd_interval *i); 1630 extern void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector); 1631 extern int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector); 1632 extern int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector); 1633 extern void drbd_rs_cancel_all(struct drbd_conf *mdev); 1634 extern int drbd_rs_del_all(struct drbd_conf *mdev); 1635 extern void drbd_rs_failed_io(struct drbd_conf *mdev, 1636 sector_t sector, int size); 1637 extern void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go); 1638 extern void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, 1639 int size, const char *file, const unsigned int line); 1640 #define drbd_set_in_sync(mdev, sector, size) \ 1641 __drbd_set_in_sync(mdev, sector, size, __FILE__, __LINE__) 1642 extern int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, 1643 int size, const char *file, const unsigned int line); 1644 #define drbd_set_out_of_sync(mdev, sector, size) \ 1645 __drbd_set_out_of_sync(mdev, sector, size, __FILE__, __LINE__) 1646 extern void drbd_al_shrink(struct drbd_conf *mdev); 1647 extern int drbd_initialize_al(struct drbd_conf *, void *); 1648 1649 /* drbd_nl.c */ 1650 /* state info broadcast */ 1651 struct sib_info { 1652 enum drbd_state_info_bcast_reason sib_reason; 1653 union { 1654 struct { 1655 char *helper_name; 1656 unsigned helper_exit_code; 1657 }; 1658 struct { 1659 union drbd_state os; 1660 union drbd_state ns; 1661 }; 1662 }; 1663 }; 1664 void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib); 1665 1666 /* 1667 * inline helper functions 1668 *************************/ 1669 1670 /* see also page_chain_add and friends in drbd_receiver.c */ 1671 static inline struct page *page_chain_next(struct page *page) 1672 { 1673 return (struct page *)page_private(page); 1674 } 1675 #define page_chain_for_each(page) \ 1676 for (; page && ({ prefetch(page_chain_next(page)); 1; }); \ 1677 page = page_chain_next(page)) 1678 #define page_chain_for_each_safe(page, n) \ 1679 for (; page && ({ n = page_chain_next(page); 1; }); page = n) 1680 1681 1682 static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req) 1683 { 1684 struct page *page = peer_req->pages; 1685 page_chain_for_each(page) { 1686 if (page_count(page) > 1) 1687 return 1; 1688 } 1689 return 0; 1690 } 1691 1692 static inline enum drbd_state_rv 1693 _drbd_set_state(struct drbd_conf *mdev, union drbd_state ns, 1694 enum chg_state_flags flags, struct completion *done) 1695 { 1696 enum drbd_state_rv rv; 1697 1698 read_lock(&global_state_lock); 1699 rv = __drbd_set_state(mdev, ns, flags, done); 1700 read_unlock(&global_state_lock); 1701 1702 return rv; 1703 } 1704 1705 static inline union drbd_state drbd_read_state(struct drbd_conf *mdev) 1706 { 1707 union drbd_state rv; 1708 1709 rv.i = mdev->state.i; 1710 rv.susp = mdev->tconn->susp; 1711 rv.susp_nod = mdev->tconn->susp_nod; 1712 rv.susp_fen = mdev->tconn->susp_fen; 1713 1714 return rv; 1715 } 1716 1717 enum drbd_force_detach_flags { 1718 DRBD_READ_ERROR, 1719 DRBD_WRITE_ERROR, 1720 DRBD_META_IO_ERROR, 1721 DRBD_FORCE_DETACH, 1722 }; 1723 1724 #define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__) 1725 static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, 1726 enum drbd_force_detach_flags df, 1727 const char *where) 1728 { 1729 enum drbd_io_error_p ep; 1730 1731 rcu_read_lock(); 1732 ep = rcu_dereference(mdev->ldev->disk_conf)->on_io_error; 1733 rcu_read_unlock(); 1734 switch (ep) { 1735 case EP_PASS_ON: /* FIXME would this be better named "Ignore"? */ 1736 if (df == DRBD_READ_ERROR || df == DRBD_WRITE_ERROR) { 1737 if (__ratelimit(&drbd_ratelimit_state)) 1738 dev_err(DEV, "Local IO failed in %s.\n", where); 1739 if (mdev->state.disk > D_INCONSISTENT) 1740 _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_HARD, NULL); 1741 break; 1742 } 1743 /* NOTE fall through for DRBD_META_IO_ERROR or DRBD_FORCE_DETACH */ 1744 case EP_DETACH: 1745 case EP_CALL_HELPER: 1746 /* Remember whether we saw a READ or WRITE error. 1747 * 1748 * Recovery of the affected area for WRITE failure is covered 1749 * by the activity log. 1750 * READ errors may fall outside that area though. Certain READ 1751 * errors can be "healed" by writing good data to the affected 1752 * blocks, which triggers block re-allocation in lower layers. 1753 * 1754 * If we can not write the bitmap after a READ error, 1755 * we may need to trigger a full sync (see w_go_diskless()). 1756 * 1757 * Force-detach is not really an IO error, but rather a 1758 * desperate measure to try to deal with a completely 1759 * unresponsive lower level IO stack. 1760 * Still it should be treated as a WRITE error. 1761 * 1762 * Meta IO error is always WRITE error: 1763 * we read meta data only once during attach, 1764 * which will fail in case of errors. 1765 */ 1766 set_bit(WAS_IO_ERROR, &mdev->flags); 1767 if (df == DRBD_READ_ERROR) 1768 set_bit(WAS_READ_ERROR, &mdev->flags); 1769 if (df == DRBD_FORCE_DETACH) 1770 set_bit(FORCE_DETACH, &mdev->flags); 1771 if (mdev->state.disk > D_FAILED) { 1772 _drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL); 1773 dev_err(DEV, 1774 "Local IO failed in %s. Detaching...\n", where); 1775 } 1776 break; 1777 } 1778 } 1779 1780 /** 1781 * drbd_chk_io_error: Handle the on_io_error setting, should be called from all io completion handlers 1782 * @mdev: DRBD device. 1783 * @error: Error code passed to the IO completion callback 1784 * @forcedetach: Force detach. I.e. the error happened while accessing the meta data 1785 * 1786 * See also drbd_main.c:after_state_ch() if (os.disk > D_FAILED && ns.disk == D_FAILED) 1787 */ 1788 #define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__) 1789 static inline void drbd_chk_io_error_(struct drbd_conf *mdev, 1790 int error, enum drbd_force_detach_flags forcedetach, const char *where) 1791 { 1792 if (error) { 1793 unsigned long flags; 1794 spin_lock_irqsave(&mdev->tconn->req_lock, flags); 1795 __drbd_chk_io_error_(mdev, forcedetach, where); 1796 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags); 1797 } 1798 } 1799 1800 1801 /** 1802 * drbd_md_first_sector() - Returns the first sector number of the meta data area 1803 * @bdev: Meta data block device. 1804 * 1805 * BTW, for internal meta data, this happens to be the maximum capacity 1806 * we could agree upon with our peer node. 1807 */ 1808 static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev) 1809 { 1810 switch (bdev->md.meta_dev_idx) { 1811 case DRBD_MD_INDEX_INTERNAL: 1812 case DRBD_MD_INDEX_FLEX_INT: 1813 return bdev->md.md_offset + bdev->md.bm_offset; 1814 case DRBD_MD_INDEX_FLEX_EXT: 1815 default: 1816 return bdev->md.md_offset; 1817 } 1818 } 1819 1820 /** 1821 * drbd_md_last_sector() - Return the last sector number of the meta data area 1822 * @bdev: Meta data block device. 1823 */ 1824 static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev) 1825 { 1826 switch (bdev->md.meta_dev_idx) { 1827 case DRBD_MD_INDEX_INTERNAL: 1828 case DRBD_MD_INDEX_FLEX_INT: 1829 return bdev->md.md_offset + MD_4kB_SECT -1; 1830 case DRBD_MD_INDEX_FLEX_EXT: 1831 default: 1832 return bdev->md.md_offset + bdev->md.md_size_sect -1; 1833 } 1834 } 1835 1836 /* Returns the number of 512 byte sectors of the device */ 1837 static inline sector_t drbd_get_capacity(struct block_device *bdev) 1838 { 1839 /* return bdev ? get_capacity(bdev->bd_disk) : 0; */ 1840 return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0; 1841 } 1842 1843 /** 1844 * drbd_get_max_capacity() - Returns the capacity we announce to out peer 1845 * @bdev: Meta data block device. 1846 * 1847 * returns the capacity we announce to out peer. we clip ourselves at the 1848 * various MAX_SECTORS, because if we don't, current implementation will 1849 * oops sooner or later 1850 */ 1851 static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev) 1852 { 1853 sector_t s; 1854 1855 switch (bdev->md.meta_dev_idx) { 1856 case DRBD_MD_INDEX_INTERNAL: 1857 case DRBD_MD_INDEX_FLEX_INT: 1858 s = drbd_get_capacity(bdev->backing_bdev) 1859 ? min_t(sector_t, DRBD_MAX_SECTORS_FLEX, 1860 drbd_md_first_sector(bdev)) 1861 : 0; 1862 break; 1863 case DRBD_MD_INDEX_FLEX_EXT: 1864 s = min_t(sector_t, DRBD_MAX_SECTORS_FLEX, 1865 drbd_get_capacity(bdev->backing_bdev)); 1866 /* clip at maximum size the meta device can support */ 1867 s = min_t(sector_t, s, 1868 BM_EXT_TO_SECT(bdev->md.md_size_sect 1869 - bdev->md.bm_offset)); 1870 break; 1871 default: 1872 s = min_t(sector_t, DRBD_MAX_SECTORS, 1873 drbd_get_capacity(bdev->backing_bdev)); 1874 } 1875 return s; 1876 } 1877 1878 /** 1879 * drbd_md_ss() - Return the sector number of our meta data super block 1880 * @bdev: Meta data block device. 1881 */ 1882 static inline sector_t drbd_md_ss(struct drbd_backing_dev *bdev) 1883 { 1884 const int meta_dev_idx = bdev->md.meta_dev_idx; 1885 1886 if (meta_dev_idx == DRBD_MD_INDEX_FLEX_EXT) 1887 return 0; 1888 1889 /* Since drbd08, internal meta data is always "flexible". 1890 * position: last 4k aligned block of 4k size */ 1891 if (meta_dev_idx == DRBD_MD_INDEX_INTERNAL || 1892 meta_dev_idx == DRBD_MD_INDEX_FLEX_INT) 1893 return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL) - 8; 1894 1895 /* external, some index; this is the old fixed size layout */ 1896 return MD_128MB_SECT * bdev->md.meta_dev_idx; 1897 } 1898 1899 static inline void 1900 drbd_queue_work_front(struct drbd_work_queue *q, struct drbd_work *w) 1901 { 1902 unsigned long flags; 1903 spin_lock_irqsave(&q->q_lock, flags); 1904 list_add(&w->list, &q->q); 1905 spin_unlock_irqrestore(&q->q_lock, flags); 1906 wake_up(&q->q_wait); 1907 } 1908 1909 static inline void 1910 drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w) 1911 { 1912 unsigned long flags; 1913 spin_lock_irqsave(&q->q_lock, flags); 1914 list_add_tail(&w->list, &q->q); 1915 spin_unlock_irqrestore(&q->q_lock, flags); 1916 wake_up(&q->q_wait); 1917 } 1918 1919 static inline void wake_asender(struct drbd_tconn *tconn) 1920 { 1921 if (test_bit(SIGNAL_ASENDER, &tconn->flags)) 1922 force_sig(DRBD_SIG, tconn->asender.task); 1923 } 1924 1925 static inline void request_ping(struct drbd_tconn *tconn) 1926 { 1927 set_bit(SEND_PING, &tconn->flags); 1928 wake_asender(tconn); 1929 } 1930 1931 extern void *conn_prepare_command(struct drbd_tconn *, struct drbd_socket *); 1932 extern void *drbd_prepare_command(struct drbd_conf *, struct drbd_socket *); 1933 extern int conn_send_command(struct drbd_tconn *, struct drbd_socket *, 1934 enum drbd_packet, unsigned int, void *, 1935 unsigned int); 1936 extern int drbd_send_command(struct drbd_conf *, struct drbd_socket *, 1937 enum drbd_packet, unsigned int, void *, 1938 unsigned int); 1939 1940 extern int drbd_send_ping(struct drbd_tconn *tconn); 1941 extern int drbd_send_ping_ack(struct drbd_tconn *tconn); 1942 extern int drbd_send_state_req(struct drbd_conf *, union drbd_state, union drbd_state); 1943 extern int conn_send_state_req(struct drbd_tconn *, union drbd_state, union drbd_state); 1944 1945 static inline void drbd_thread_stop(struct drbd_thread *thi) 1946 { 1947 _drbd_thread_stop(thi, false, true); 1948 } 1949 1950 static inline void drbd_thread_stop_nowait(struct drbd_thread *thi) 1951 { 1952 _drbd_thread_stop(thi, false, false); 1953 } 1954 1955 static inline void drbd_thread_restart_nowait(struct drbd_thread *thi) 1956 { 1957 _drbd_thread_stop(thi, true, false); 1958 } 1959 1960 /* counts how many answer packets packets we expect from our peer, 1961 * for either explicit application requests, 1962 * or implicit barrier packets as necessary. 1963 * increased: 1964 * w_send_barrier 1965 * _req_mod(req, QUEUE_FOR_NET_WRITE or QUEUE_FOR_NET_READ); 1966 * it is much easier and equally valid to count what we queue for the 1967 * worker, even before it actually was queued or send. 1968 * (drbd_make_request_common; recovery path on read io-error) 1969 * decreased: 1970 * got_BarrierAck (respective tl_clear, tl_clear_barrier) 1971 * _req_mod(req, DATA_RECEIVED) 1972 * [from receive_DataReply] 1973 * _req_mod(req, WRITE_ACKED_BY_PEER or RECV_ACKED_BY_PEER or NEG_ACKED) 1974 * [from got_BlockAck (P_WRITE_ACK, P_RECV_ACK)] 1975 * for some reason it is NOT decreased in got_NegAck, 1976 * but in the resulting cleanup code from report_params. 1977 * we should try to remember the reason for that... 1978 * _req_mod(req, SEND_FAILED or SEND_CANCELED) 1979 * _req_mod(req, CONNECTION_LOST_WHILE_PENDING) 1980 * [from tl_clear_barrier] 1981 */ 1982 static inline void inc_ap_pending(struct drbd_conf *mdev) 1983 { 1984 atomic_inc(&mdev->ap_pending_cnt); 1985 } 1986 1987 #define ERR_IF_CNT_IS_NEGATIVE(which, func, line) \ 1988 if (atomic_read(&mdev->which) < 0) \ 1989 dev_err(DEV, "in %s:%d: " #which " = %d < 0 !\n", \ 1990 func, line, \ 1991 atomic_read(&mdev->which)) 1992 1993 #define dec_ap_pending(mdev) _dec_ap_pending(mdev, __FUNCTION__, __LINE__) 1994 static inline void _dec_ap_pending(struct drbd_conf *mdev, const char *func, int line) 1995 { 1996 if (atomic_dec_and_test(&mdev->ap_pending_cnt)) 1997 wake_up(&mdev->misc_wait); 1998 ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt, func, line); 1999 } 2000 2001 /* counts how many resync-related answers we still expect from the peer 2002 * increase decrease 2003 * C_SYNC_TARGET sends P_RS_DATA_REQUEST (and expects P_RS_DATA_REPLY) 2004 * C_SYNC_SOURCE sends P_RS_DATA_REPLY (and expects P_WRITE_ACK with ID_SYNCER) 2005 * (or P_NEG_ACK with ID_SYNCER) 2006 */ 2007 static inline void inc_rs_pending(struct drbd_conf *mdev) 2008 { 2009 atomic_inc(&mdev->rs_pending_cnt); 2010 } 2011 2012 #define dec_rs_pending(mdev) _dec_rs_pending(mdev, __FUNCTION__, __LINE__) 2013 static inline void _dec_rs_pending(struct drbd_conf *mdev, const char *func, int line) 2014 { 2015 atomic_dec(&mdev->rs_pending_cnt); 2016 ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt, func, line); 2017 } 2018 2019 /* counts how many answers we still need to send to the peer. 2020 * increased on 2021 * receive_Data unless protocol A; 2022 * we need to send a P_RECV_ACK (proto B) 2023 * or P_WRITE_ACK (proto C) 2024 * receive_RSDataReply (recv_resync_read) we need to send a P_WRITE_ACK 2025 * receive_DataRequest (receive_RSDataRequest) we need to send back P_DATA 2026 * receive_Barrier_* we need to send a P_BARRIER_ACK 2027 */ 2028 static inline void inc_unacked(struct drbd_conf *mdev) 2029 { 2030 atomic_inc(&mdev->unacked_cnt); 2031 } 2032 2033 #define dec_unacked(mdev) _dec_unacked(mdev, __FUNCTION__, __LINE__) 2034 static inline void _dec_unacked(struct drbd_conf *mdev, const char *func, int line) 2035 { 2036 atomic_dec(&mdev->unacked_cnt); 2037 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line); 2038 } 2039 2040 #define sub_unacked(mdev, n) _sub_unacked(mdev, n, __FUNCTION__, __LINE__) 2041 static inline void _sub_unacked(struct drbd_conf *mdev, int n, const char *func, int line) 2042 { 2043 atomic_sub(n, &mdev->unacked_cnt); 2044 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line); 2045 } 2046 2047 /** 2048 * get_ldev() - Increase the ref count on mdev->ldev. Returns 0 if there is no ldev 2049 * @M: DRBD device. 2050 * 2051 * You have to call put_ldev() when finished working with mdev->ldev. 2052 */ 2053 #define get_ldev(M) __cond_lock(local, _get_ldev_if_state(M,D_INCONSISTENT)) 2054 #define get_ldev_if_state(M,MINS) __cond_lock(local, _get_ldev_if_state(M,MINS)) 2055 2056 static inline void put_ldev(struct drbd_conf *mdev) 2057 { 2058 int i = atomic_dec_return(&mdev->local_cnt); 2059 2060 /* This may be called from some endio handler, 2061 * so we must not sleep here. */ 2062 2063 __release(local); 2064 D_ASSERT(i >= 0); 2065 if (i == 0) { 2066 if (mdev->state.disk == D_DISKLESS) 2067 /* even internal references gone, safe to destroy */ 2068 drbd_ldev_destroy(mdev); 2069 if (mdev->state.disk == D_FAILED) { 2070 /* all application IO references gone. */ 2071 if (!test_and_set_bit(GO_DISKLESS, &mdev->flags)) 2072 drbd_queue_work(&mdev->tconn->sender_work, &mdev->go_diskless); 2073 } 2074 wake_up(&mdev->misc_wait); 2075 } 2076 } 2077 2078 #ifndef __CHECKER__ 2079 static inline int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins) 2080 { 2081 int io_allowed; 2082 2083 /* never get a reference while D_DISKLESS */ 2084 if (mdev->state.disk == D_DISKLESS) 2085 return 0; 2086 2087 atomic_inc(&mdev->local_cnt); 2088 io_allowed = (mdev->state.disk >= mins); 2089 if (!io_allowed) 2090 put_ldev(mdev); 2091 return io_allowed; 2092 } 2093 #else 2094 extern int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins); 2095 #endif 2096 2097 /* you must have an "get_ldev" reference */ 2098 static inline void drbd_get_syncer_progress(struct drbd_conf *mdev, 2099 unsigned long *bits_left, unsigned int *per_mil_done) 2100 { 2101 /* this is to break it at compile time when we change that, in case we 2102 * want to support more than (1<<32) bits on a 32bit arch. */ 2103 typecheck(unsigned long, mdev->rs_total); 2104 2105 /* note: both rs_total and rs_left are in bits, i.e. in 2106 * units of BM_BLOCK_SIZE. 2107 * for the percentage, we don't care. */ 2108 2109 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T) 2110 *bits_left = mdev->ov_left; 2111 else 2112 *bits_left = drbd_bm_total_weight(mdev) - mdev->rs_failed; 2113 /* >> 10 to prevent overflow, 2114 * +1 to prevent division by zero */ 2115 if (*bits_left > mdev->rs_total) { 2116 /* doh. maybe a logic bug somewhere. 2117 * may also be just a race condition 2118 * between this and a disconnect during sync. 2119 * for now, just prevent in-kernel buffer overflow. 2120 */ 2121 smp_rmb(); 2122 dev_warn(DEV, "cs:%s rs_left=%lu > rs_total=%lu (rs_failed %lu)\n", 2123 drbd_conn_str(mdev->state.conn), 2124 *bits_left, mdev->rs_total, mdev->rs_failed); 2125 *per_mil_done = 0; 2126 } else { 2127 /* Make sure the division happens in long context. 2128 * We allow up to one petabyte storage right now, 2129 * at a granularity of 4k per bit that is 2**38 bits. 2130 * After shift right and multiplication by 1000, 2131 * this should still fit easily into a 32bit long, 2132 * so we don't need a 64bit division on 32bit arch. 2133 * Note: currently we don't support such large bitmaps on 32bit 2134 * arch anyways, but no harm done to be prepared for it here. 2135 */ 2136 unsigned int shift = mdev->rs_total > UINT_MAX ? 16 : 10; 2137 unsigned long left = *bits_left >> shift; 2138 unsigned long total = 1UL + (mdev->rs_total >> shift); 2139 unsigned long tmp = 1000UL - left * 1000UL/total; 2140 *per_mil_done = tmp; 2141 } 2142 } 2143 2144 2145 /* this throttles on-the-fly application requests 2146 * according to max_buffers settings; 2147 * maybe re-implement using semaphores? */ 2148 static inline int drbd_get_max_buffers(struct drbd_conf *mdev) 2149 { 2150 struct net_conf *nc; 2151 int mxb; 2152 2153 rcu_read_lock(); 2154 nc = rcu_dereference(mdev->tconn->net_conf); 2155 mxb = nc ? nc->max_buffers : 1000000; /* arbitrary limit on open requests */ 2156 rcu_read_unlock(); 2157 2158 return mxb; 2159 } 2160 2161 static inline int drbd_state_is_stable(struct drbd_conf *mdev) 2162 { 2163 union drbd_dev_state s = mdev->state; 2164 2165 /* DO NOT add a default clause, we want the compiler to warn us 2166 * for any newly introduced state we may have forgotten to add here */ 2167 2168 switch ((enum drbd_conns)s.conn) { 2169 /* new io only accepted when there is no connection, ... */ 2170 case C_STANDALONE: 2171 case C_WF_CONNECTION: 2172 /* ... or there is a well established connection. */ 2173 case C_CONNECTED: 2174 case C_SYNC_SOURCE: 2175 case C_SYNC_TARGET: 2176 case C_VERIFY_S: 2177 case C_VERIFY_T: 2178 case C_PAUSED_SYNC_S: 2179 case C_PAUSED_SYNC_T: 2180 case C_AHEAD: 2181 case C_BEHIND: 2182 /* transitional states, IO allowed */ 2183 case C_DISCONNECTING: 2184 case C_UNCONNECTED: 2185 case C_TIMEOUT: 2186 case C_BROKEN_PIPE: 2187 case C_NETWORK_FAILURE: 2188 case C_PROTOCOL_ERROR: 2189 case C_TEAR_DOWN: 2190 case C_WF_REPORT_PARAMS: 2191 case C_STARTING_SYNC_S: 2192 case C_STARTING_SYNC_T: 2193 break; 2194 2195 /* Allow IO in BM exchange states with new protocols */ 2196 case C_WF_BITMAP_S: 2197 if (mdev->tconn->agreed_pro_version < 96) 2198 return 0; 2199 break; 2200 2201 /* no new io accepted in these states */ 2202 case C_WF_BITMAP_T: 2203 case C_WF_SYNC_UUID: 2204 case C_MASK: 2205 /* not "stable" */ 2206 return 0; 2207 } 2208 2209 switch ((enum drbd_disk_state)s.disk) { 2210 case D_DISKLESS: 2211 case D_INCONSISTENT: 2212 case D_OUTDATED: 2213 case D_CONSISTENT: 2214 case D_UP_TO_DATE: 2215 case D_FAILED: 2216 /* disk state is stable as well. */ 2217 break; 2218 2219 /* no new io accepted during transitional states */ 2220 case D_ATTACHING: 2221 case D_NEGOTIATING: 2222 case D_UNKNOWN: 2223 case D_MASK: 2224 /* not "stable" */ 2225 return 0; 2226 } 2227 2228 return 1; 2229 } 2230 2231 static inline int drbd_suspended(struct drbd_conf *mdev) 2232 { 2233 struct drbd_tconn *tconn = mdev->tconn; 2234 2235 return tconn->susp || tconn->susp_fen || tconn->susp_nod; 2236 } 2237 2238 static inline bool may_inc_ap_bio(struct drbd_conf *mdev) 2239 { 2240 int mxb = drbd_get_max_buffers(mdev); 2241 2242 if (drbd_suspended(mdev)) 2243 return false; 2244 if (test_bit(SUSPEND_IO, &mdev->flags)) 2245 return false; 2246 2247 /* to avoid potential deadlock or bitmap corruption, 2248 * in various places, we only allow new application io 2249 * to start during "stable" states. */ 2250 2251 /* no new io accepted when attaching or detaching the disk */ 2252 if (!drbd_state_is_stable(mdev)) 2253 return false; 2254 2255 /* since some older kernels don't have atomic_add_unless, 2256 * and we are within the spinlock anyways, we have this workaround. */ 2257 if (atomic_read(&mdev->ap_bio_cnt) > mxb) 2258 return false; 2259 if (test_bit(BITMAP_IO, &mdev->flags)) 2260 return false; 2261 return true; 2262 } 2263 2264 static inline bool inc_ap_bio_cond(struct drbd_conf *mdev) 2265 { 2266 bool rv = false; 2267 2268 spin_lock_irq(&mdev->tconn->req_lock); 2269 rv = may_inc_ap_bio(mdev); 2270 if (rv) 2271 atomic_inc(&mdev->ap_bio_cnt); 2272 spin_unlock_irq(&mdev->tconn->req_lock); 2273 2274 return rv; 2275 } 2276 2277 static inline void inc_ap_bio(struct drbd_conf *mdev) 2278 { 2279 /* we wait here 2280 * as long as the device is suspended 2281 * until the bitmap is no longer on the fly during connection 2282 * handshake as long as we would exceed the max_buffer limit. 2283 * 2284 * to avoid races with the reconnect code, 2285 * we need to atomic_inc within the spinlock. */ 2286 2287 wait_event(mdev->misc_wait, inc_ap_bio_cond(mdev)); 2288 } 2289 2290 static inline void dec_ap_bio(struct drbd_conf *mdev) 2291 { 2292 int mxb = drbd_get_max_buffers(mdev); 2293 int ap_bio = atomic_dec_return(&mdev->ap_bio_cnt); 2294 2295 D_ASSERT(ap_bio >= 0); 2296 2297 if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) { 2298 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags)) 2299 drbd_queue_work(&mdev->tconn->sender_work, &mdev->bm_io_work.w); 2300 } 2301 2302 /* this currently does wake_up for every dec_ap_bio! 2303 * maybe rather introduce some type of hysteresis? 2304 * e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */ 2305 if (ap_bio < mxb) 2306 wake_up(&mdev->misc_wait); 2307 } 2308 2309 static inline bool verify_can_do_stop_sector(struct drbd_conf *mdev) 2310 { 2311 return mdev->tconn->agreed_pro_version >= 97 && 2312 mdev->tconn->agreed_pro_version != 100; 2313 } 2314 2315 static inline int drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val) 2316 { 2317 int changed = mdev->ed_uuid != val; 2318 mdev->ed_uuid = val; 2319 return changed; 2320 } 2321 2322 static inline int drbd_queue_order_type(struct drbd_conf *mdev) 2323 { 2324 /* sorry, we currently have no working implementation 2325 * of distributed TCQ stuff */ 2326 #ifndef QUEUE_ORDERED_NONE 2327 #define QUEUE_ORDERED_NONE 0 2328 #endif 2329 return QUEUE_ORDERED_NONE; 2330 } 2331 2332 static inline void drbd_md_flush(struct drbd_conf *mdev) 2333 { 2334 int r; 2335 2336 if (mdev->ldev == NULL) { 2337 dev_warn(DEV, "mdev->ldev == NULL in drbd_md_flush\n"); 2338 return; 2339 } 2340 2341 if (test_bit(MD_NO_FUA, &mdev->flags)) 2342 return; 2343 2344 r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_NOIO, NULL); 2345 if (r) { 2346 set_bit(MD_NO_FUA, &mdev->flags); 2347 dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r); 2348 } 2349 } 2350 2351 #endif 2352