1 /* 2 drbd.c 3 4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg. 5 6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. 7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. 8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. 9 10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev 11 from Logicworks, Inc. for making SDP replication support possible. 12 13 drbd is free software; you can redistribute it and/or modify 14 it under the terms of the GNU General Public License as published by 15 the Free Software Foundation; either version 2, or (at your option) 16 any later version. 17 18 drbd is distributed in the hope that it will be useful, 19 but WITHOUT ANY WARRANTY; without even the implied warranty of 20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 GNU General Public License for more details. 22 23 You should have received a copy of the GNU General Public License 24 along with drbd; see the file COPYING. If not, write to 25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 26 27 */ 28 29 #include <linux/module.h> 30 #include <linux/drbd.h> 31 #include <asm/uaccess.h> 32 #include <asm/types.h> 33 #include <net/sock.h> 34 #include <linux/ctype.h> 35 #include <linux/mutex.h> 36 #include <linux/fs.h> 37 #include <linux/file.h> 38 #include <linux/proc_fs.h> 39 #include <linux/init.h> 40 #include <linux/mm.h> 41 #include <linux/memcontrol.h> 42 #include <linux/mm_inline.h> 43 #include <linux/slab.h> 44 #include <linux/random.h> 45 #include <linux/reboot.h> 46 #include <linux/notifier.h> 47 #include <linux/kthread.h> 48 #include <linux/workqueue.h> 49 #define __KERNEL_SYSCALLS__ 50 #include <linux/unistd.h> 51 #include <linux/vmalloc.h> 52 53 #include <linux/drbd_limits.h> 54 #include "drbd_int.h" 55 #include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */ 56 57 #include "drbd_vli.h" 58 59 static DEFINE_MUTEX(drbd_main_mutex); 60 int drbdd_init(struct drbd_thread *); 61 int drbd_worker(struct drbd_thread *); 62 int drbd_asender(struct drbd_thread *); 63 64 int drbd_init(void); 65 static int drbd_open(struct block_device *bdev, fmode_t mode); 66 static void drbd_release(struct gendisk *gd, fmode_t mode); 67 static int w_md_sync(struct drbd_work *w, int unused); 68 static void md_sync_timer_fn(unsigned long data); 69 static int w_bitmap_io(struct drbd_work *w, int unused); 70 static int w_go_diskless(struct drbd_work *w, int unused); 71 72 MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, " 73 "Lars Ellenberg <lars@linbit.com>"); 74 MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION); 75 MODULE_VERSION(REL_VERSION); 76 MODULE_LICENSE("GPL"); 77 MODULE_PARM_DESC(minor_count, "Approximate number of drbd devices (" 78 __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")"); 79 MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR); 80 81 #include <linux/moduleparam.h> 82 /* allow_open_on_secondary */ 83 MODULE_PARM_DESC(allow_oos, "DONT USE!"); 84 /* thanks to these macros, if compiled into the kernel (not-module), 85 * this becomes the boot parameter drbd.minor_count */ 86 module_param(minor_count, uint, 0444); 87 module_param(disable_sendpage, bool, 0644); 88 module_param(allow_oos, bool, 0); 89 module_param(proc_details, int, 0644); 90 91 #ifdef CONFIG_DRBD_FAULT_INJECTION 92 int enable_faults; 93 int fault_rate; 94 static int fault_count; 95 int fault_devs; 96 /* bitmap of enabled faults */ 97 module_param(enable_faults, int, 0664); 98 /* fault rate % value - applies to all enabled faults */ 99 module_param(fault_rate, int, 0664); 100 /* count of faults inserted */ 101 module_param(fault_count, int, 0664); 102 /* bitmap of devices to insert faults on */ 103 module_param(fault_devs, int, 0644); 104 #endif 105 106 /* module parameter, defined */ 107 unsigned int minor_count = DRBD_MINOR_COUNT_DEF; 108 bool disable_sendpage; 109 bool allow_oos; 110 int proc_details; /* Detail level in proc drbd*/ 111 112 /* Module parameter for setting the user mode helper program 113 * to run. Default is /sbin/drbdadm */ 114 char usermode_helper[80] = "/sbin/drbdadm"; 115 116 module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644); 117 118 /* in 2.6.x, our device mapping and config info contains our virtual gendisks 119 * as member "struct gendisk *vdisk;" 120 */ 121 struct idr minors; 122 struct list_head drbd_tconns; /* list of struct drbd_tconn */ 123 124 struct kmem_cache *drbd_request_cache; 125 struct kmem_cache *drbd_ee_cache; /* peer requests */ 126 struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */ 127 struct kmem_cache *drbd_al_ext_cache; /* activity log extents */ 128 mempool_t *drbd_request_mempool; 129 mempool_t *drbd_ee_mempool; 130 mempool_t *drbd_md_io_page_pool; 131 struct bio_set *drbd_md_io_bio_set; 132 133 /* I do not use a standard mempool, because: 134 1) I want to hand out the pre-allocated objects first. 135 2) I want to be able to interrupt sleeping allocation with a signal. 136 Note: This is a single linked list, the next pointer is the private 137 member of struct page. 138 */ 139 struct page *drbd_pp_pool; 140 spinlock_t drbd_pp_lock; 141 int drbd_pp_vacant; 142 wait_queue_head_t drbd_pp_wait; 143 144 DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5); 145 146 static const struct block_device_operations drbd_ops = { 147 .owner = THIS_MODULE, 148 .open = drbd_open, 149 .release = drbd_release, 150 }; 151 152 struct bio *bio_alloc_drbd(gfp_t gfp_mask) 153 { 154 struct bio *bio; 155 156 if (!drbd_md_io_bio_set) 157 return bio_alloc(gfp_mask, 1); 158 159 bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set); 160 if (!bio) 161 return NULL; 162 return bio; 163 } 164 165 #ifdef __CHECKER__ 166 /* When checking with sparse, and this is an inline function, sparse will 167 give tons of false positives. When this is a real functions sparse works. 168 */ 169 int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins) 170 { 171 int io_allowed; 172 173 atomic_inc(&mdev->local_cnt); 174 io_allowed = (mdev->state.disk >= mins); 175 if (!io_allowed) { 176 if (atomic_dec_and_test(&mdev->local_cnt)) 177 wake_up(&mdev->misc_wait); 178 } 179 return io_allowed; 180 } 181 182 #endif 183 184 /** 185 * tl_release() - mark as BARRIER_ACKED all requests in the corresponding transfer log epoch 186 * @tconn: DRBD connection. 187 * @barrier_nr: Expected identifier of the DRBD write barrier packet. 188 * @set_size: Expected number of requests before that barrier. 189 * 190 * In case the passed barrier_nr or set_size does not match the oldest 191 * epoch of not yet barrier-acked requests, this function will cause a 192 * termination of the connection. 193 */ 194 void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr, 195 unsigned int set_size) 196 { 197 struct drbd_request *r; 198 struct drbd_request *req = NULL; 199 int expect_epoch = 0; 200 int expect_size = 0; 201 202 spin_lock_irq(&tconn->req_lock); 203 204 /* find oldest not yet barrier-acked write request, 205 * count writes in its epoch. */ 206 list_for_each_entry(r, &tconn->transfer_log, tl_requests) { 207 const unsigned s = r->rq_state; 208 if (!req) { 209 if (!(s & RQ_WRITE)) 210 continue; 211 if (!(s & RQ_NET_MASK)) 212 continue; 213 if (s & RQ_NET_DONE) 214 continue; 215 req = r; 216 expect_epoch = req->epoch; 217 expect_size ++; 218 } else { 219 if (r->epoch != expect_epoch) 220 break; 221 if (!(s & RQ_WRITE)) 222 continue; 223 /* if (s & RQ_DONE): not expected */ 224 /* if (!(s & RQ_NET_MASK)): not expected */ 225 expect_size++; 226 } 227 } 228 229 /* first some paranoia code */ 230 if (req == NULL) { 231 conn_err(tconn, "BAD! BarrierAck #%u received, but no epoch in tl!?\n", 232 barrier_nr); 233 goto bail; 234 } 235 if (expect_epoch != barrier_nr) { 236 conn_err(tconn, "BAD! BarrierAck #%u received, expected #%u!\n", 237 barrier_nr, expect_epoch); 238 goto bail; 239 } 240 241 if (expect_size != set_size) { 242 conn_err(tconn, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n", 243 barrier_nr, set_size, expect_size); 244 goto bail; 245 } 246 247 /* Clean up list of requests processed during current epoch. */ 248 /* this extra list walk restart is paranoia, 249 * to catch requests being barrier-acked "unexpectedly". 250 * It usually should find the same req again, or some READ preceding it. */ 251 list_for_each_entry(req, &tconn->transfer_log, tl_requests) 252 if (req->epoch == expect_epoch) 253 break; 254 list_for_each_entry_safe_from(req, r, &tconn->transfer_log, tl_requests) { 255 if (req->epoch != expect_epoch) 256 break; 257 _req_mod(req, BARRIER_ACKED); 258 } 259 spin_unlock_irq(&tconn->req_lock); 260 261 return; 262 263 bail: 264 spin_unlock_irq(&tconn->req_lock); 265 conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD); 266 } 267 268 269 /** 270 * _tl_restart() - Walks the transfer log, and applies an action to all requests 271 * @mdev: DRBD device. 272 * @what: The action/event to perform with all request objects 273 * 274 * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO, 275 * RESTART_FROZEN_DISK_IO. 276 */ 277 /* must hold resource->req_lock */ 278 void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what) 279 { 280 struct drbd_request *req, *r; 281 282 list_for_each_entry_safe(req, r, &tconn->transfer_log, tl_requests) 283 _req_mod(req, what); 284 } 285 286 void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what) 287 { 288 spin_lock_irq(&tconn->req_lock); 289 _tl_restart(tconn, what); 290 spin_unlock_irq(&tconn->req_lock); 291 } 292 293 /** 294 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL 295 * @mdev: DRBD device. 296 * 297 * This is called after the connection to the peer was lost. The storage covered 298 * by the requests on the transfer gets marked as our of sync. Called from the 299 * receiver thread and the worker thread. 300 */ 301 void tl_clear(struct drbd_tconn *tconn) 302 { 303 tl_restart(tconn, CONNECTION_LOST_WHILE_PENDING); 304 } 305 306 /** 307 * tl_abort_disk_io() - Abort disk I/O for all requests for a certain mdev in the TL 308 * @mdev: DRBD device. 309 */ 310 void tl_abort_disk_io(struct drbd_conf *mdev) 311 { 312 struct drbd_tconn *tconn = mdev->tconn; 313 struct drbd_request *req, *r; 314 315 spin_lock_irq(&tconn->req_lock); 316 list_for_each_entry_safe(req, r, &tconn->transfer_log, tl_requests) { 317 if (!(req->rq_state & RQ_LOCAL_PENDING)) 318 continue; 319 if (req->w.mdev != mdev) 320 continue; 321 _req_mod(req, ABORT_DISK_IO); 322 } 323 spin_unlock_irq(&tconn->req_lock); 324 } 325 326 static int drbd_thread_setup(void *arg) 327 { 328 struct drbd_thread *thi = (struct drbd_thread *) arg; 329 struct drbd_tconn *tconn = thi->tconn; 330 unsigned long flags; 331 int retval; 332 333 snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s", 334 thi->name[0], thi->tconn->name); 335 336 restart: 337 retval = thi->function(thi); 338 339 spin_lock_irqsave(&thi->t_lock, flags); 340 341 /* if the receiver has been "EXITING", the last thing it did 342 * was set the conn state to "StandAlone", 343 * if now a re-connect request comes in, conn state goes C_UNCONNECTED, 344 * and receiver thread will be "started". 345 * drbd_thread_start needs to set "RESTARTING" in that case. 346 * t_state check and assignment needs to be within the same spinlock, 347 * so either thread_start sees EXITING, and can remap to RESTARTING, 348 * or thread_start see NONE, and can proceed as normal. 349 */ 350 351 if (thi->t_state == RESTARTING) { 352 conn_info(tconn, "Restarting %s thread\n", thi->name); 353 thi->t_state = RUNNING; 354 spin_unlock_irqrestore(&thi->t_lock, flags); 355 goto restart; 356 } 357 358 thi->task = NULL; 359 thi->t_state = NONE; 360 smp_mb(); 361 complete_all(&thi->stop); 362 spin_unlock_irqrestore(&thi->t_lock, flags); 363 364 conn_info(tconn, "Terminating %s\n", current->comm); 365 366 /* Release mod reference taken when thread was started */ 367 368 kref_put(&tconn->kref, &conn_destroy); 369 module_put(THIS_MODULE); 370 return retval; 371 } 372 373 static void drbd_thread_init(struct drbd_tconn *tconn, struct drbd_thread *thi, 374 int (*func) (struct drbd_thread *), char *name) 375 { 376 spin_lock_init(&thi->t_lock); 377 thi->task = NULL; 378 thi->t_state = NONE; 379 thi->function = func; 380 thi->tconn = tconn; 381 strncpy(thi->name, name, ARRAY_SIZE(thi->name)); 382 } 383 384 int drbd_thread_start(struct drbd_thread *thi) 385 { 386 struct drbd_tconn *tconn = thi->tconn; 387 struct task_struct *nt; 388 unsigned long flags; 389 390 /* is used from state engine doing drbd_thread_stop_nowait, 391 * while holding the req lock irqsave */ 392 spin_lock_irqsave(&thi->t_lock, flags); 393 394 switch (thi->t_state) { 395 case NONE: 396 conn_info(tconn, "Starting %s thread (from %s [%d])\n", 397 thi->name, current->comm, current->pid); 398 399 /* Get ref on module for thread - this is released when thread exits */ 400 if (!try_module_get(THIS_MODULE)) { 401 conn_err(tconn, "Failed to get module reference in drbd_thread_start\n"); 402 spin_unlock_irqrestore(&thi->t_lock, flags); 403 return false; 404 } 405 406 kref_get(&thi->tconn->kref); 407 408 init_completion(&thi->stop); 409 thi->reset_cpu_mask = 1; 410 thi->t_state = RUNNING; 411 spin_unlock_irqrestore(&thi->t_lock, flags); 412 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */ 413 414 nt = kthread_create(drbd_thread_setup, (void *) thi, 415 "drbd_%c_%s", thi->name[0], thi->tconn->name); 416 417 if (IS_ERR(nt)) { 418 conn_err(tconn, "Couldn't start thread\n"); 419 420 kref_put(&tconn->kref, &conn_destroy); 421 module_put(THIS_MODULE); 422 return false; 423 } 424 spin_lock_irqsave(&thi->t_lock, flags); 425 thi->task = nt; 426 thi->t_state = RUNNING; 427 spin_unlock_irqrestore(&thi->t_lock, flags); 428 wake_up_process(nt); 429 break; 430 case EXITING: 431 thi->t_state = RESTARTING; 432 conn_info(tconn, "Restarting %s thread (from %s [%d])\n", 433 thi->name, current->comm, current->pid); 434 /* fall through */ 435 case RUNNING: 436 case RESTARTING: 437 default: 438 spin_unlock_irqrestore(&thi->t_lock, flags); 439 break; 440 } 441 442 return true; 443 } 444 445 446 void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait) 447 { 448 unsigned long flags; 449 450 enum drbd_thread_state ns = restart ? RESTARTING : EXITING; 451 452 /* may be called from state engine, holding the req lock irqsave */ 453 spin_lock_irqsave(&thi->t_lock, flags); 454 455 if (thi->t_state == NONE) { 456 spin_unlock_irqrestore(&thi->t_lock, flags); 457 if (restart) 458 drbd_thread_start(thi); 459 return; 460 } 461 462 if (thi->t_state != ns) { 463 if (thi->task == NULL) { 464 spin_unlock_irqrestore(&thi->t_lock, flags); 465 return; 466 } 467 468 thi->t_state = ns; 469 smp_mb(); 470 init_completion(&thi->stop); 471 if (thi->task != current) 472 force_sig(DRBD_SIGKILL, thi->task); 473 } 474 475 spin_unlock_irqrestore(&thi->t_lock, flags); 476 477 if (wait) 478 wait_for_completion(&thi->stop); 479 } 480 481 static struct drbd_thread *drbd_task_to_thread(struct drbd_tconn *tconn, struct task_struct *task) 482 { 483 struct drbd_thread *thi = 484 task == tconn->receiver.task ? &tconn->receiver : 485 task == tconn->asender.task ? &tconn->asender : 486 task == tconn->worker.task ? &tconn->worker : NULL; 487 488 return thi; 489 } 490 491 char *drbd_task_to_thread_name(struct drbd_tconn *tconn, struct task_struct *task) 492 { 493 struct drbd_thread *thi = drbd_task_to_thread(tconn, task); 494 return thi ? thi->name : task->comm; 495 } 496 497 int conn_lowest_minor(struct drbd_tconn *tconn) 498 { 499 struct drbd_conf *mdev; 500 int vnr = 0, m; 501 502 rcu_read_lock(); 503 mdev = idr_get_next(&tconn->volumes, &vnr); 504 m = mdev ? mdev_to_minor(mdev) : -1; 505 rcu_read_unlock(); 506 507 return m; 508 } 509 510 #ifdef CONFIG_SMP 511 /** 512 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs 513 * @mdev: DRBD device. 514 * 515 * Forces all threads of a device onto the same CPU. This is beneficial for 516 * DRBD's performance. May be overwritten by user's configuration. 517 */ 518 void drbd_calc_cpu_mask(struct drbd_tconn *tconn) 519 { 520 int ord, cpu; 521 522 /* user override. */ 523 if (cpumask_weight(tconn->cpu_mask)) 524 return; 525 526 ord = conn_lowest_minor(tconn) % cpumask_weight(cpu_online_mask); 527 for_each_online_cpu(cpu) { 528 if (ord-- == 0) { 529 cpumask_set_cpu(cpu, tconn->cpu_mask); 530 return; 531 } 532 } 533 /* should not be reached */ 534 cpumask_setall(tconn->cpu_mask); 535 } 536 537 /** 538 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread 539 * @mdev: DRBD device. 540 * @thi: drbd_thread object 541 * 542 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die 543 * prematurely. 544 */ 545 void drbd_thread_current_set_cpu(struct drbd_thread *thi) 546 { 547 struct task_struct *p = current; 548 549 if (!thi->reset_cpu_mask) 550 return; 551 thi->reset_cpu_mask = 0; 552 set_cpus_allowed_ptr(p, thi->tconn->cpu_mask); 553 } 554 #endif 555 556 /** 557 * drbd_header_size - size of a packet header 558 * 559 * The header size is a multiple of 8, so any payload following the header is 560 * word aligned on 64-bit architectures. (The bitmap send and receive code 561 * relies on this.) 562 */ 563 unsigned int drbd_header_size(struct drbd_tconn *tconn) 564 { 565 if (tconn->agreed_pro_version >= 100) { 566 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100), 8)); 567 return sizeof(struct p_header100); 568 } else { 569 BUILD_BUG_ON(sizeof(struct p_header80) != 570 sizeof(struct p_header95)); 571 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header80), 8)); 572 return sizeof(struct p_header80); 573 } 574 } 575 576 static unsigned int prepare_header80(struct p_header80 *h, enum drbd_packet cmd, int size) 577 { 578 h->magic = cpu_to_be32(DRBD_MAGIC); 579 h->command = cpu_to_be16(cmd); 580 h->length = cpu_to_be16(size); 581 return sizeof(struct p_header80); 582 } 583 584 static unsigned int prepare_header95(struct p_header95 *h, enum drbd_packet cmd, int size) 585 { 586 h->magic = cpu_to_be16(DRBD_MAGIC_BIG); 587 h->command = cpu_to_be16(cmd); 588 h->length = cpu_to_be32(size); 589 return sizeof(struct p_header95); 590 } 591 592 static unsigned int prepare_header100(struct p_header100 *h, enum drbd_packet cmd, 593 int size, int vnr) 594 { 595 h->magic = cpu_to_be32(DRBD_MAGIC_100); 596 h->volume = cpu_to_be16(vnr); 597 h->command = cpu_to_be16(cmd); 598 h->length = cpu_to_be32(size); 599 h->pad = 0; 600 return sizeof(struct p_header100); 601 } 602 603 static unsigned int prepare_header(struct drbd_tconn *tconn, int vnr, 604 void *buffer, enum drbd_packet cmd, int size) 605 { 606 if (tconn->agreed_pro_version >= 100) 607 return prepare_header100(buffer, cmd, size, vnr); 608 else if (tconn->agreed_pro_version >= 95 && 609 size > DRBD_MAX_SIZE_H80_PACKET) 610 return prepare_header95(buffer, cmd, size); 611 else 612 return prepare_header80(buffer, cmd, size); 613 } 614 615 static void *__conn_prepare_command(struct drbd_tconn *tconn, 616 struct drbd_socket *sock) 617 { 618 if (!sock->socket) 619 return NULL; 620 return sock->sbuf + drbd_header_size(tconn); 621 } 622 623 void *conn_prepare_command(struct drbd_tconn *tconn, struct drbd_socket *sock) 624 { 625 void *p; 626 627 mutex_lock(&sock->mutex); 628 p = __conn_prepare_command(tconn, sock); 629 if (!p) 630 mutex_unlock(&sock->mutex); 631 632 return p; 633 } 634 635 void *drbd_prepare_command(struct drbd_conf *mdev, struct drbd_socket *sock) 636 { 637 return conn_prepare_command(mdev->tconn, sock); 638 } 639 640 static int __send_command(struct drbd_tconn *tconn, int vnr, 641 struct drbd_socket *sock, enum drbd_packet cmd, 642 unsigned int header_size, void *data, 643 unsigned int size) 644 { 645 int msg_flags; 646 int err; 647 648 /* 649 * Called with @data == NULL and the size of the data blocks in @size 650 * for commands that send data blocks. For those commands, omit the 651 * MSG_MORE flag: this will increase the likelihood that data blocks 652 * which are page aligned on the sender will end up page aligned on the 653 * receiver. 654 */ 655 msg_flags = data ? MSG_MORE : 0; 656 657 header_size += prepare_header(tconn, vnr, sock->sbuf, cmd, 658 header_size + size); 659 err = drbd_send_all(tconn, sock->socket, sock->sbuf, header_size, 660 msg_flags); 661 if (data && !err) 662 err = drbd_send_all(tconn, sock->socket, data, size, 0); 663 return err; 664 } 665 666 static int __conn_send_command(struct drbd_tconn *tconn, struct drbd_socket *sock, 667 enum drbd_packet cmd, unsigned int header_size, 668 void *data, unsigned int size) 669 { 670 return __send_command(tconn, 0, sock, cmd, header_size, data, size); 671 } 672 673 int conn_send_command(struct drbd_tconn *tconn, struct drbd_socket *sock, 674 enum drbd_packet cmd, unsigned int header_size, 675 void *data, unsigned int size) 676 { 677 int err; 678 679 err = __conn_send_command(tconn, sock, cmd, header_size, data, size); 680 mutex_unlock(&sock->mutex); 681 return err; 682 } 683 684 int drbd_send_command(struct drbd_conf *mdev, struct drbd_socket *sock, 685 enum drbd_packet cmd, unsigned int header_size, 686 void *data, unsigned int size) 687 { 688 int err; 689 690 err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, header_size, 691 data, size); 692 mutex_unlock(&sock->mutex); 693 return err; 694 } 695 696 int drbd_send_ping(struct drbd_tconn *tconn) 697 { 698 struct drbd_socket *sock; 699 700 sock = &tconn->meta; 701 if (!conn_prepare_command(tconn, sock)) 702 return -EIO; 703 return conn_send_command(tconn, sock, P_PING, 0, NULL, 0); 704 } 705 706 int drbd_send_ping_ack(struct drbd_tconn *tconn) 707 { 708 struct drbd_socket *sock; 709 710 sock = &tconn->meta; 711 if (!conn_prepare_command(tconn, sock)) 712 return -EIO; 713 return conn_send_command(tconn, sock, P_PING_ACK, 0, NULL, 0); 714 } 715 716 int drbd_send_sync_param(struct drbd_conf *mdev) 717 { 718 struct drbd_socket *sock; 719 struct p_rs_param_95 *p; 720 int size; 721 const int apv = mdev->tconn->agreed_pro_version; 722 enum drbd_packet cmd; 723 struct net_conf *nc; 724 struct disk_conf *dc; 725 726 sock = &mdev->tconn->data; 727 p = drbd_prepare_command(mdev, sock); 728 if (!p) 729 return -EIO; 730 731 rcu_read_lock(); 732 nc = rcu_dereference(mdev->tconn->net_conf); 733 734 size = apv <= 87 ? sizeof(struct p_rs_param) 735 : apv == 88 ? sizeof(struct p_rs_param) 736 + strlen(nc->verify_alg) + 1 737 : apv <= 94 ? sizeof(struct p_rs_param_89) 738 : /* apv >= 95 */ sizeof(struct p_rs_param_95); 739 740 cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM; 741 742 /* initialize verify_alg and csums_alg */ 743 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX); 744 745 if (get_ldev(mdev)) { 746 dc = rcu_dereference(mdev->ldev->disk_conf); 747 p->resync_rate = cpu_to_be32(dc->resync_rate); 748 p->c_plan_ahead = cpu_to_be32(dc->c_plan_ahead); 749 p->c_delay_target = cpu_to_be32(dc->c_delay_target); 750 p->c_fill_target = cpu_to_be32(dc->c_fill_target); 751 p->c_max_rate = cpu_to_be32(dc->c_max_rate); 752 put_ldev(mdev); 753 } else { 754 p->resync_rate = cpu_to_be32(DRBD_RESYNC_RATE_DEF); 755 p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF); 756 p->c_delay_target = cpu_to_be32(DRBD_C_DELAY_TARGET_DEF); 757 p->c_fill_target = cpu_to_be32(DRBD_C_FILL_TARGET_DEF); 758 p->c_max_rate = cpu_to_be32(DRBD_C_MAX_RATE_DEF); 759 } 760 761 if (apv >= 88) 762 strcpy(p->verify_alg, nc->verify_alg); 763 if (apv >= 89) 764 strcpy(p->csums_alg, nc->csums_alg); 765 rcu_read_unlock(); 766 767 return drbd_send_command(mdev, sock, cmd, size, NULL, 0); 768 } 769 770 int __drbd_send_protocol(struct drbd_tconn *tconn, enum drbd_packet cmd) 771 { 772 struct drbd_socket *sock; 773 struct p_protocol *p; 774 struct net_conf *nc; 775 int size, cf; 776 777 sock = &tconn->data; 778 p = __conn_prepare_command(tconn, sock); 779 if (!p) 780 return -EIO; 781 782 rcu_read_lock(); 783 nc = rcu_dereference(tconn->net_conf); 784 785 if (nc->tentative && tconn->agreed_pro_version < 92) { 786 rcu_read_unlock(); 787 mutex_unlock(&sock->mutex); 788 conn_err(tconn, "--dry-run is not supported by peer"); 789 return -EOPNOTSUPP; 790 } 791 792 size = sizeof(*p); 793 if (tconn->agreed_pro_version >= 87) 794 size += strlen(nc->integrity_alg) + 1; 795 796 p->protocol = cpu_to_be32(nc->wire_protocol); 797 p->after_sb_0p = cpu_to_be32(nc->after_sb_0p); 798 p->after_sb_1p = cpu_to_be32(nc->after_sb_1p); 799 p->after_sb_2p = cpu_to_be32(nc->after_sb_2p); 800 p->two_primaries = cpu_to_be32(nc->two_primaries); 801 cf = 0; 802 if (nc->discard_my_data) 803 cf |= CF_DISCARD_MY_DATA; 804 if (nc->tentative) 805 cf |= CF_DRY_RUN; 806 p->conn_flags = cpu_to_be32(cf); 807 808 if (tconn->agreed_pro_version >= 87) 809 strcpy(p->integrity_alg, nc->integrity_alg); 810 rcu_read_unlock(); 811 812 return __conn_send_command(tconn, sock, cmd, size, NULL, 0); 813 } 814 815 int drbd_send_protocol(struct drbd_tconn *tconn) 816 { 817 int err; 818 819 mutex_lock(&tconn->data.mutex); 820 err = __drbd_send_protocol(tconn, P_PROTOCOL); 821 mutex_unlock(&tconn->data.mutex); 822 823 return err; 824 } 825 826 int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags) 827 { 828 struct drbd_socket *sock; 829 struct p_uuids *p; 830 int i; 831 832 if (!get_ldev_if_state(mdev, D_NEGOTIATING)) 833 return 0; 834 835 sock = &mdev->tconn->data; 836 p = drbd_prepare_command(mdev, sock); 837 if (!p) { 838 put_ldev(mdev); 839 return -EIO; 840 } 841 spin_lock_irq(&mdev->ldev->md.uuid_lock); 842 for (i = UI_CURRENT; i < UI_SIZE; i++) 843 p->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]); 844 spin_unlock_irq(&mdev->ldev->md.uuid_lock); 845 846 mdev->comm_bm_set = drbd_bm_total_weight(mdev); 847 p->uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set); 848 rcu_read_lock(); 849 uuid_flags |= rcu_dereference(mdev->tconn->net_conf)->discard_my_data ? 1 : 0; 850 rcu_read_unlock(); 851 uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0; 852 uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0; 853 p->uuid[UI_FLAGS] = cpu_to_be64(uuid_flags); 854 855 put_ldev(mdev); 856 return drbd_send_command(mdev, sock, P_UUIDS, sizeof(*p), NULL, 0); 857 } 858 859 int drbd_send_uuids(struct drbd_conf *mdev) 860 { 861 return _drbd_send_uuids(mdev, 0); 862 } 863 864 int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev) 865 { 866 return _drbd_send_uuids(mdev, 8); 867 } 868 869 void drbd_print_uuids(struct drbd_conf *mdev, const char *text) 870 { 871 if (get_ldev_if_state(mdev, D_NEGOTIATING)) { 872 u64 *uuid = mdev->ldev->md.uuid; 873 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX\n", 874 text, 875 (unsigned long long)uuid[UI_CURRENT], 876 (unsigned long long)uuid[UI_BITMAP], 877 (unsigned long long)uuid[UI_HISTORY_START], 878 (unsigned long long)uuid[UI_HISTORY_END]); 879 put_ldev(mdev); 880 } else { 881 dev_info(DEV, "%s effective data uuid: %016llX\n", 882 text, 883 (unsigned long long)mdev->ed_uuid); 884 } 885 } 886 887 void drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev) 888 { 889 struct drbd_socket *sock; 890 struct p_rs_uuid *p; 891 u64 uuid; 892 893 D_ASSERT(mdev->state.disk == D_UP_TO_DATE); 894 895 uuid = mdev->ldev->md.uuid[UI_BITMAP]; 896 if (uuid && uuid != UUID_JUST_CREATED) 897 uuid = uuid + UUID_NEW_BM_OFFSET; 898 else 899 get_random_bytes(&uuid, sizeof(u64)); 900 drbd_uuid_set(mdev, UI_BITMAP, uuid); 901 drbd_print_uuids(mdev, "updated sync UUID"); 902 drbd_md_sync(mdev); 903 904 sock = &mdev->tconn->data; 905 p = drbd_prepare_command(mdev, sock); 906 if (p) { 907 p->uuid = cpu_to_be64(uuid); 908 drbd_send_command(mdev, sock, P_SYNC_UUID, sizeof(*p), NULL, 0); 909 } 910 } 911 912 int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags) 913 { 914 struct drbd_socket *sock; 915 struct p_sizes *p; 916 sector_t d_size, u_size; 917 int q_order_type; 918 unsigned int max_bio_size; 919 920 if (get_ldev_if_state(mdev, D_NEGOTIATING)) { 921 D_ASSERT(mdev->ldev->backing_bdev); 922 d_size = drbd_get_max_capacity(mdev->ldev); 923 rcu_read_lock(); 924 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size; 925 rcu_read_unlock(); 926 q_order_type = drbd_queue_order_type(mdev); 927 max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9; 928 max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE); 929 put_ldev(mdev); 930 } else { 931 d_size = 0; 932 u_size = 0; 933 q_order_type = QUEUE_ORDERED_NONE; 934 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */ 935 } 936 937 sock = &mdev->tconn->data; 938 p = drbd_prepare_command(mdev, sock); 939 if (!p) 940 return -EIO; 941 942 if (mdev->tconn->agreed_pro_version <= 94) 943 max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET); 944 else if (mdev->tconn->agreed_pro_version < 100) 945 max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE_P95); 946 947 p->d_size = cpu_to_be64(d_size); 948 p->u_size = cpu_to_be64(u_size); 949 p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev)); 950 p->max_bio_size = cpu_to_be32(max_bio_size); 951 p->queue_order_type = cpu_to_be16(q_order_type); 952 p->dds_flags = cpu_to_be16(flags); 953 return drbd_send_command(mdev, sock, P_SIZES, sizeof(*p), NULL, 0); 954 } 955 956 /** 957 * drbd_send_current_state() - Sends the drbd state to the peer 958 * @mdev: DRBD device. 959 */ 960 int drbd_send_current_state(struct drbd_conf *mdev) 961 { 962 struct drbd_socket *sock; 963 struct p_state *p; 964 965 sock = &mdev->tconn->data; 966 p = drbd_prepare_command(mdev, sock); 967 if (!p) 968 return -EIO; 969 p->state = cpu_to_be32(mdev->state.i); /* Within the send mutex */ 970 return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0); 971 } 972 973 /** 974 * drbd_send_state() - After a state change, sends the new state to the peer 975 * @mdev: DRBD device. 976 * @state: the state to send, not necessarily the current state. 977 * 978 * Each state change queues an "after_state_ch" work, which will eventually 979 * send the resulting new state to the peer. If more state changes happen 980 * between queuing and processing of the after_state_ch work, we still 981 * want to send each intermediary state in the order it occurred. 982 */ 983 int drbd_send_state(struct drbd_conf *mdev, union drbd_state state) 984 { 985 struct drbd_socket *sock; 986 struct p_state *p; 987 988 sock = &mdev->tconn->data; 989 p = drbd_prepare_command(mdev, sock); 990 if (!p) 991 return -EIO; 992 p->state = cpu_to_be32(state.i); /* Within the send mutex */ 993 return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0); 994 } 995 996 int drbd_send_state_req(struct drbd_conf *mdev, union drbd_state mask, union drbd_state val) 997 { 998 struct drbd_socket *sock; 999 struct p_req_state *p; 1000 1001 sock = &mdev->tconn->data; 1002 p = drbd_prepare_command(mdev, sock); 1003 if (!p) 1004 return -EIO; 1005 p->mask = cpu_to_be32(mask.i); 1006 p->val = cpu_to_be32(val.i); 1007 return drbd_send_command(mdev, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0); 1008 } 1009 1010 int conn_send_state_req(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val) 1011 { 1012 enum drbd_packet cmd; 1013 struct drbd_socket *sock; 1014 struct p_req_state *p; 1015 1016 cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ; 1017 sock = &tconn->data; 1018 p = conn_prepare_command(tconn, sock); 1019 if (!p) 1020 return -EIO; 1021 p->mask = cpu_to_be32(mask.i); 1022 p->val = cpu_to_be32(val.i); 1023 return conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0); 1024 } 1025 1026 void drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode) 1027 { 1028 struct drbd_socket *sock; 1029 struct p_req_state_reply *p; 1030 1031 sock = &mdev->tconn->meta; 1032 p = drbd_prepare_command(mdev, sock); 1033 if (p) { 1034 p->retcode = cpu_to_be32(retcode); 1035 drbd_send_command(mdev, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0); 1036 } 1037 } 1038 1039 void conn_send_sr_reply(struct drbd_tconn *tconn, enum drbd_state_rv retcode) 1040 { 1041 struct drbd_socket *sock; 1042 struct p_req_state_reply *p; 1043 enum drbd_packet cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY; 1044 1045 sock = &tconn->meta; 1046 p = conn_prepare_command(tconn, sock); 1047 if (p) { 1048 p->retcode = cpu_to_be32(retcode); 1049 conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0); 1050 } 1051 } 1052 1053 static void dcbp_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code) 1054 { 1055 BUG_ON(code & ~0xf); 1056 p->encoding = (p->encoding & ~0xf) | code; 1057 } 1058 1059 static void dcbp_set_start(struct p_compressed_bm *p, int set) 1060 { 1061 p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0); 1062 } 1063 1064 static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n) 1065 { 1066 BUG_ON(n & ~0x7); 1067 p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4); 1068 } 1069 1070 int fill_bitmap_rle_bits(struct drbd_conf *mdev, 1071 struct p_compressed_bm *p, 1072 unsigned int size, 1073 struct bm_xfer_ctx *c) 1074 { 1075 struct bitstream bs; 1076 unsigned long plain_bits; 1077 unsigned long tmp; 1078 unsigned long rl; 1079 unsigned len; 1080 unsigned toggle; 1081 int bits, use_rle; 1082 1083 /* may we use this feature? */ 1084 rcu_read_lock(); 1085 use_rle = rcu_dereference(mdev->tconn->net_conf)->use_rle; 1086 rcu_read_unlock(); 1087 if (!use_rle || mdev->tconn->agreed_pro_version < 90) 1088 return 0; 1089 1090 if (c->bit_offset >= c->bm_bits) 1091 return 0; /* nothing to do. */ 1092 1093 /* use at most thus many bytes */ 1094 bitstream_init(&bs, p->code, size, 0); 1095 memset(p->code, 0, size); 1096 /* plain bits covered in this code string */ 1097 plain_bits = 0; 1098 1099 /* p->encoding & 0x80 stores whether the first run length is set. 1100 * bit offset is implicit. 1101 * start with toggle == 2 to be able to tell the first iteration */ 1102 toggle = 2; 1103 1104 /* see how much plain bits we can stuff into one packet 1105 * using RLE and VLI. */ 1106 do { 1107 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset) 1108 : _drbd_bm_find_next(mdev, c->bit_offset); 1109 if (tmp == -1UL) 1110 tmp = c->bm_bits; 1111 rl = tmp - c->bit_offset; 1112 1113 if (toggle == 2) { /* first iteration */ 1114 if (rl == 0) { 1115 /* the first checked bit was set, 1116 * store start value, */ 1117 dcbp_set_start(p, 1); 1118 /* but skip encoding of zero run length */ 1119 toggle = !toggle; 1120 continue; 1121 } 1122 dcbp_set_start(p, 0); 1123 } 1124 1125 /* paranoia: catch zero runlength. 1126 * can only happen if bitmap is modified while we scan it. */ 1127 if (rl == 0) { 1128 dev_err(DEV, "unexpected zero runlength while encoding bitmap " 1129 "t:%u bo:%lu\n", toggle, c->bit_offset); 1130 return -1; 1131 } 1132 1133 bits = vli_encode_bits(&bs, rl); 1134 if (bits == -ENOBUFS) /* buffer full */ 1135 break; 1136 if (bits <= 0) { 1137 dev_err(DEV, "error while encoding bitmap: %d\n", bits); 1138 return 0; 1139 } 1140 1141 toggle = !toggle; 1142 plain_bits += rl; 1143 c->bit_offset = tmp; 1144 } while (c->bit_offset < c->bm_bits); 1145 1146 len = bs.cur.b - p->code + !!bs.cur.bit; 1147 1148 if (plain_bits < (len << 3)) { 1149 /* incompressible with this method. 1150 * we need to rewind both word and bit position. */ 1151 c->bit_offset -= plain_bits; 1152 bm_xfer_ctx_bit_to_word_offset(c); 1153 c->bit_offset = c->word_offset * BITS_PER_LONG; 1154 return 0; 1155 } 1156 1157 /* RLE + VLI was able to compress it just fine. 1158 * update c->word_offset. */ 1159 bm_xfer_ctx_bit_to_word_offset(c); 1160 1161 /* store pad_bits */ 1162 dcbp_set_pad_bits(p, (8 - bs.cur.bit) & 0x7); 1163 1164 return len; 1165 } 1166 1167 /** 1168 * send_bitmap_rle_or_plain 1169 * 1170 * Return 0 when done, 1 when another iteration is needed, and a negative error 1171 * code upon failure. 1172 */ 1173 static int 1174 send_bitmap_rle_or_plain(struct drbd_conf *mdev, struct bm_xfer_ctx *c) 1175 { 1176 struct drbd_socket *sock = &mdev->tconn->data; 1177 unsigned int header_size = drbd_header_size(mdev->tconn); 1178 struct p_compressed_bm *p = sock->sbuf + header_size; 1179 int len, err; 1180 1181 len = fill_bitmap_rle_bits(mdev, p, 1182 DRBD_SOCKET_BUFFER_SIZE - header_size - sizeof(*p), c); 1183 if (len < 0) 1184 return -EIO; 1185 1186 if (len) { 1187 dcbp_set_code(p, RLE_VLI_Bits); 1188 err = __send_command(mdev->tconn, mdev->vnr, sock, 1189 P_COMPRESSED_BITMAP, sizeof(*p) + len, 1190 NULL, 0); 1191 c->packets[0]++; 1192 c->bytes[0] += header_size + sizeof(*p) + len; 1193 1194 if (c->bit_offset >= c->bm_bits) 1195 len = 0; /* DONE */ 1196 } else { 1197 /* was not compressible. 1198 * send a buffer full of plain text bits instead. */ 1199 unsigned int data_size; 1200 unsigned long num_words; 1201 unsigned long *p = sock->sbuf + header_size; 1202 1203 data_size = DRBD_SOCKET_BUFFER_SIZE - header_size; 1204 num_words = min_t(size_t, data_size / sizeof(*p), 1205 c->bm_words - c->word_offset); 1206 len = num_words * sizeof(*p); 1207 if (len) 1208 drbd_bm_get_lel(mdev, c->word_offset, num_words, p); 1209 err = __send_command(mdev->tconn, mdev->vnr, sock, P_BITMAP, len, NULL, 0); 1210 c->word_offset += num_words; 1211 c->bit_offset = c->word_offset * BITS_PER_LONG; 1212 1213 c->packets[1]++; 1214 c->bytes[1] += header_size + len; 1215 1216 if (c->bit_offset > c->bm_bits) 1217 c->bit_offset = c->bm_bits; 1218 } 1219 if (!err) { 1220 if (len == 0) { 1221 INFO_bm_xfer_stats(mdev, "send", c); 1222 return 0; 1223 } else 1224 return 1; 1225 } 1226 return -EIO; 1227 } 1228 1229 /* See the comment at receive_bitmap() */ 1230 static int _drbd_send_bitmap(struct drbd_conf *mdev) 1231 { 1232 struct bm_xfer_ctx c; 1233 int err; 1234 1235 if (!expect(mdev->bitmap)) 1236 return false; 1237 1238 if (get_ldev(mdev)) { 1239 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) { 1240 dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n"); 1241 drbd_bm_set_all(mdev); 1242 if (drbd_bm_write(mdev)) { 1243 /* write_bm did fail! Leave full sync flag set in Meta P_DATA 1244 * but otherwise process as per normal - need to tell other 1245 * side that a full resync is required! */ 1246 dev_err(DEV, "Failed to write bitmap to disk!\n"); 1247 } else { 1248 drbd_md_clear_flag(mdev, MDF_FULL_SYNC); 1249 drbd_md_sync(mdev); 1250 } 1251 } 1252 put_ldev(mdev); 1253 } 1254 1255 c = (struct bm_xfer_ctx) { 1256 .bm_bits = drbd_bm_bits(mdev), 1257 .bm_words = drbd_bm_words(mdev), 1258 }; 1259 1260 do { 1261 err = send_bitmap_rle_or_plain(mdev, &c); 1262 } while (err > 0); 1263 1264 return err == 0; 1265 } 1266 1267 int drbd_send_bitmap(struct drbd_conf *mdev) 1268 { 1269 struct drbd_socket *sock = &mdev->tconn->data; 1270 int err = -1; 1271 1272 mutex_lock(&sock->mutex); 1273 if (sock->socket) 1274 err = !_drbd_send_bitmap(mdev); 1275 mutex_unlock(&sock->mutex); 1276 return err; 1277 } 1278 1279 void drbd_send_b_ack(struct drbd_tconn *tconn, u32 barrier_nr, u32 set_size) 1280 { 1281 struct drbd_socket *sock; 1282 struct p_barrier_ack *p; 1283 1284 if (tconn->cstate < C_WF_REPORT_PARAMS) 1285 return; 1286 1287 sock = &tconn->meta; 1288 p = conn_prepare_command(tconn, sock); 1289 if (!p) 1290 return; 1291 p->barrier = barrier_nr; 1292 p->set_size = cpu_to_be32(set_size); 1293 conn_send_command(tconn, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0); 1294 } 1295 1296 /** 1297 * _drbd_send_ack() - Sends an ack packet 1298 * @mdev: DRBD device. 1299 * @cmd: Packet command code. 1300 * @sector: sector, needs to be in big endian byte order 1301 * @blksize: size in byte, needs to be in big endian byte order 1302 * @block_id: Id, big endian byte order 1303 */ 1304 static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd, 1305 u64 sector, u32 blksize, u64 block_id) 1306 { 1307 struct drbd_socket *sock; 1308 struct p_block_ack *p; 1309 1310 if (mdev->state.conn < C_CONNECTED) 1311 return -EIO; 1312 1313 sock = &mdev->tconn->meta; 1314 p = drbd_prepare_command(mdev, sock); 1315 if (!p) 1316 return -EIO; 1317 p->sector = sector; 1318 p->block_id = block_id; 1319 p->blksize = blksize; 1320 p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq)); 1321 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0); 1322 } 1323 1324 /* dp->sector and dp->block_id already/still in network byte order, 1325 * data_size is payload size according to dp->head, 1326 * and may need to be corrected for digest size. */ 1327 void drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packet cmd, 1328 struct p_data *dp, int data_size) 1329 { 1330 if (mdev->tconn->peer_integrity_tfm) 1331 data_size -= crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm); 1332 _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size), 1333 dp->block_id); 1334 } 1335 1336 void drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd, 1337 struct p_block_req *rp) 1338 { 1339 _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id); 1340 } 1341 1342 /** 1343 * drbd_send_ack() - Sends an ack packet 1344 * @mdev: DRBD device 1345 * @cmd: packet command code 1346 * @peer_req: peer request 1347 */ 1348 int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd, 1349 struct drbd_peer_request *peer_req) 1350 { 1351 return _drbd_send_ack(mdev, cmd, 1352 cpu_to_be64(peer_req->i.sector), 1353 cpu_to_be32(peer_req->i.size), 1354 peer_req->block_id); 1355 } 1356 1357 /* This function misuses the block_id field to signal if the blocks 1358 * are is sync or not. */ 1359 int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packet cmd, 1360 sector_t sector, int blksize, u64 block_id) 1361 { 1362 return _drbd_send_ack(mdev, cmd, 1363 cpu_to_be64(sector), 1364 cpu_to_be32(blksize), 1365 cpu_to_be64(block_id)); 1366 } 1367 1368 int drbd_send_drequest(struct drbd_conf *mdev, int cmd, 1369 sector_t sector, int size, u64 block_id) 1370 { 1371 struct drbd_socket *sock; 1372 struct p_block_req *p; 1373 1374 sock = &mdev->tconn->data; 1375 p = drbd_prepare_command(mdev, sock); 1376 if (!p) 1377 return -EIO; 1378 p->sector = cpu_to_be64(sector); 1379 p->block_id = block_id; 1380 p->blksize = cpu_to_be32(size); 1381 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0); 1382 } 1383 1384 int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector, int size, 1385 void *digest, int digest_size, enum drbd_packet cmd) 1386 { 1387 struct drbd_socket *sock; 1388 struct p_block_req *p; 1389 1390 /* FIXME: Put the digest into the preallocated socket buffer. */ 1391 1392 sock = &mdev->tconn->data; 1393 p = drbd_prepare_command(mdev, sock); 1394 if (!p) 1395 return -EIO; 1396 p->sector = cpu_to_be64(sector); 1397 p->block_id = ID_SYNCER /* unused */; 1398 p->blksize = cpu_to_be32(size); 1399 return drbd_send_command(mdev, sock, cmd, sizeof(*p), 1400 digest, digest_size); 1401 } 1402 1403 int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size) 1404 { 1405 struct drbd_socket *sock; 1406 struct p_block_req *p; 1407 1408 sock = &mdev->tconn->data; 1409 p = drbd_prepare_command(mdev, sock); 1410 if (!p) 1411 return -EIO; 1412 p->sector = cpu_to_be64(sector); 1413 p->block_id = ID_SYNCER /* unused */; 1414 p->blksize = cpu_to_be32(size); 1415 return drbd_send_command(mdev, sock, P_OV_REQUEST, sizeof(*p), NULL, 0); 1416 } 1417 1418 /* called on sndtimeo 1419 * returns false if we should retry, 1420 * true if we think connection is dead 1421 */ 1422 static int we_should_drop_the_connection(struct drbd_tconn *tconn, struct socket *sock) 1423 { 1424 int drop_it; 1425 /* long elapsed = (long)(jiffies - mdev->last_received); */ 1426 1427 drop_it = tconn->meta.socket == sock 1428 || !tconn->asender.task 1429 || get_t_state(&tconn->asender) != RUNNING 1430 || tconn->cstate < C_WF_REPORT_PARAMS; 1431 1432 if (drop_it) 1433 return true; 1434 1435 drop_it = !--tconn->ko_count; 1436 if (!drop_it) { 1437 conn_err(tconn, "[%s/%d] sock_sendmsg time expired, ko = %u\n", 1438 current->comm, current->pid, tconn->ko_count); 1439 request_ping(tconn); 1440 } 1441 1442 return drop_it; /* && (mdev->state == R_PRIMARY) */; 1443 } 1444 1445 static void drbd_update_congested(struct drbd_tconn *tconn) 1446 { 1447 struct sock *sk = tconn->data.socket->sk; 1448 if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5) 1449 set_bit(NET_CONGESTED, &tconn->flags); 1450 } 1451 1452 /* The idea of sendpage seems to be to put some kind of reference 1453 * to the page into the skb, and to hand it over to the NIC. In 1454 * this process get_page() gets called. 1455 * 1456 * As soon as the page was really sent over the network put_page() 1457 * gets called by some part of the network layer. [ NIC driver? ] 1458 * 1459 * [ get_page() / put_page() increment/decrement the count. If count 1460 * reaches 0 the page will be freed. ] 1461 * 1462 * This works nicely with pages from FSs. 1463 * But this means that in protocol A we might signal IO completion too early! 1464 * 1465 * In order not to corrupt data during a resync we must make sure 1466 * that we do not reuse our own buffer pages (EEs) to early, therefore 1467 * we have the net_ee list. 1468 * 1469 * XFS seems to have problems, still, it submits pages with page_count == 0! 1470 * As a workaround, we disable sendpage on pages 1471 * with page_count == 0 or PageSlab. 1472 */ 1473 static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page, 1474 int offset, size_t size, unsigned msg_flags) 1475 { 1476 struct socket *socket; 1477 void *addr; 1478 int err; 1479 1480 socket = mdev->tconn->data.socket; 1481 addr = kmap(page) + offset; 1482 err = drbd_send_all(mdev->tconn, socket, addr, size, msg_flags); 1483 kunmap(page); 1484 if (!err) 1485 mdev->send_cnt += size >> 9; 1486 return err; 1487 } 1488 1489 static int _drbd_send_page(struct drbd_conf *mdev, struct page *page, 1490 int offset, size_t size, unsigned msg_flags) 1491 { 1492 struct socket *socket = mdev->tconn->data.socket; 1493 mm_segment_t oldfs = get_fs(); 1494 int len = size; 1495 int err = -EIO; 1496 1497 /* e.g. XFS meta- & log-data is in slab pages, which have a 1498 * page_count of 0 and/or have PageSlab() set. 1499 * we cannot use send_page for those, as that does get_page(); 1500 * put_page(); and would cause either a VM_BUG directly, or 1501 * __page_cache_release a page that would actually still be referenced 1502 * by someone, leading to some obscure delayed Oops somewhere else. */ 1503 if (disable_sendpage || (page_count(page) < 1) || PageSlab(page)) 1504 return _drbd_no_send_page(mdev, page, offset, size, msg_flags); 1505 1506 msg_flags |= MSG_NOSIGNAL; 1507 drbd_update_congested(mdev->tconn); 1508 set_fs(KERNEL_DS); 1509 do { 1510 int sent; 1511 1512 sent = socket->ops->sendpage(socket, page, offset, len, msg_flags); 1513 if (sent <= 0) { 1514 if (sent == -EAGAIN) { 1515 if (we_should_drop_the_connection(mdev->tconn, socket)) 1516 break; 1517 continue; 1518 } 1519 dev_warn(DEV, "%s: size=%d len=%d sent=%d\n", 1520 __func__, (int)size, len, sent); 1521 if (sent < 0) 1522 err = sent; 1523 break; 1524 } 1525 len -= sent; 1526 offset += sent; 1527 } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/); 1528 set_fs(oldfs); 1529 clear_bit(NET_CONGESTED, &mdev->tconn->flags); 1530 1531 if (len == 0) { 1532 err = 0; 1533 mdev->send_cnt += size >> 9; 1534 } 1535 return err; 1536 } 1537 1538 static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio) 1539 { 1540 struct bio_vec bvec; 1541 struct bvec_iter iter; 1542 1543 /* hint all but last page with MSG_MORE */ 1544 bio_for_each_segment(bvec, bio, iter) { 1545 int err; 1546 1547 err = _drbd_no_send_page(mdev, bvec.bv_page, 1548 bvec.bv_offset, bvec.bv_len, 1549 bio_iter_last(bvec, iter) 1550 ? 0 : MSG_MORE); 1551 if (err) 1552 return err; 1553 } 1554 return 0; 1555 } 1556 1557 static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio) 1558 { 1559 struct bio_vec bvec; 1560 struct bvec_iter iter; 1561 1562 /* hint all but last page with MSG_MORE */ 1563 bio_for_each_segment(bvec, bio, iter) { 1564 int err; 1565 1566 err = _drbd_send_page(mdev, bvec.bv_page, 1567 bvec.bv_offset, bvec.bv_len, 1568 bio_iter_last(bvec, iter) ? 0 : MSG_MORE); 1569 if (err) 1570 return err; 1571 } 1572 return 0; 1573 } 1574 1575 static int _drbd_send_zc_ee(struct drbd_conf *mdev, 1576 struct drbd_peer_request *peer_req) 1577 { 1578 struct page *page = peer_req->pages; 1579 unsigned len = peer_req->i.size; 1580 int err; 1581 1582 /* hint all but last page with MSG_MORE */ 1583 page_chain_for_each(page) { 1584 unsigned l = min_t(unsigned, len, PAGE_SIZE); 1585 1586 err = _drbd_send_page(mdev, page, 0, l, 1587 page_chain_next(page) ? MSG_MORE : 0); 1588 if (err) 1589 return err; 1590 len -= l; 1591 } 1592 return 0; 1593 } 1594 1595 static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw) 1596 { 1597 if (mdev->tconn->agreed_pro_version >= 95) 1598 return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) | 1599 (bi_rw & REQ_FUA ? DP_FUA : 0) | 1600 (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) | 1601 (bi_rw & REQ_DISCARD ? DP_DISCARD : 0); 1602 else 1603 return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0; 1604 } 1605 1606 /* Used to send write requests 1607 * R_PRIMARY -> Peer (P_DATA) 1608 */ 1609 int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req) 1610 { 1611 struct drbd_socket *sock; 1612 struct p_data *p; 1613 unsigned int dp_flags = 0; 1614 int dgs; 1615 int err; 1616 1617 sock = &mdev->tconn->data; 1618 p = drbd_prepare_command(mdev, sock); 1619 dgs = mdev->tconn->integrity_tfm ? crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0; 1620 1621 if (!p) 1622 return -EIO; 1623 p->sector = cpu_to_be64(req->i.sector); 1624 p->block_id = (unsigned long)req; 1625 p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq)); 1626 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw); 1627 if (mdev->state.conn >= C_SYNC_SOURCE && 1628 mdev->state.conn <= C_PAUSED_SYNC_T) 1629 dp_flags |= DP_MAY_SET_IN_SYNC; 1630 if (mdev->tconn->agreed_pro_version >= 100) { 1631 if (req->rq_state & RQ_EXP_RECEIVE_ACK) 1632 dp_flags |= DP_SEND_RECEIVE_ACK; 1633 if (req->rq_state & RQ_EXP_WRITE_ACK) 1634 dp_flags |= DP_SEND_WRITE_ACK; 1635 } 1636 p->dp_flags = cpu_to_be32(dp_flags); 1637 if (dgs) 1638 drbd_csum_bio(mdev, mdev->tconn->integrity_tfm, req->master_bio, p + 1); 1639 err = __send_command(mdev->tconn, mdev->vnr, sock, P_DATA, sizeof(*p) + dgs, NULL, req->i.size); 1640 if (!err) { 1641 /* For protocol A, we have to memcpy the payload into 1642 * socket buffers, as we may complete right away 1643 * as soon as we handed it over to tcp, at which point the data 1644 * pages may become invalid. 1645 * 1646 * For data-integrity enabled, we copy it as well, so we can be 1647 * sure that even if the bio pages may still be modified, it 1648 * won't change the data on the wire, thus if the digest checks 1649 * out ok after sending on this side, but does not fit on the 1650 * receiving side, we sure have detected corruption elsewhere. 1651 */ 1652 if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)) || dgs) 1653 err = _drbd_send_bio(mdev, req->master_bio); 1654 else 1655 err = _drbd_send_zc_bio(mdev, req->master_bio); 1656 1657 /* double check digest, sometimes buffers have been modified in flight. */ 1658 if (dgs > 0 && dgs <= 64) { 1659 /* 64 byte, 512 bit, is the largest digest size 1660 * currently supported in kernel crypto. */ 1661 unsigned char digest[64]; 1662 drbd_csum_bio(mdev, mdev->tconn->integrity_tfm, req->master_bio, digest); 1663 if (memcmp(p + 1, digest, dgs)) { 1664 dev_warn(DEV, 1665 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n", 1666 (unsigned long long)req->i.sector, req->i.size); 1667 } 1668 } /* else if (dgs > 64) { 1669 ... Be noisy about digest too large ... 1670 } */ 1671 } 1672 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */ 1673 1674 return err; 1675 } 1676 1677 /* answer packet, used to send data back for read requests: 1678 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY) 1679 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY) 1680 */ 1681 int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd, 1682 struct drbd_peer_request *peer_req) 1683 { 1684 struct drbd_socket *sock; 1685 struct p_data *p; 1686 int err; 1687 int dgs; 1688 1689 sock = &mdev->tconn->data; 1690 p = drbd_prepare_command(mdev, sock); 1691 1692 dgs = mdev->tconn->integrity_tfm ? crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0; 1693 1694 if (!p) 1695 return -EIO; 1696 p->sector = cpu_to_be64(peer_req->i.sector); 1697 p->block_id = peer_req->block_id; 1698 p->seq_num = 0; /* unused */ 1699 p->dp_flags = 0; 1700 if (dgs) 1701 drbd_csum_ee(mdev, mdev->tconn->integrity_tfm, peer_req, p + 1); 1702 err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, sizeof(*p) + dgs, NULL, peer_req->i.size); 1703 if (!err) 1704 err = _drbd_send_zc_ee(mdev, peer_req); 1705 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */ 1706 1707 return err; 1708 } 1709 1710 int drbd_send_out_of_sync(struct drbd_conf *mdev, struct drbd_request *req) 1711 { 1712 struct drbd_socket *sock; 1713 struct p_block_desc *p; 1714 1715 sock = &mdev->tconn->data; 1716 p = drbd_prepare_command(mdev, sock); 1717 if (!p) 1718 return -EIO; 1719 p->sector = cpu_to_be64(req->i.sector); 1720 p->blksize = cpu_to_be32(req->i.size); 1721 return drbd_send_command(mdev, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0); 1722 } 1723 1724 /* 1725 drbd_send distinguishes two cases: 1726 1727 Packets sent via the data socket "sock" 1728 and packets sent via the meta data socket "msock" 1729 1730 sock msock 1731 -----------------+-------------------------+------------------------------ 1732 timeout conf.timeout / 2 conf.timeout / 2 1733 timeout action send a ping via msock Abort communication 1734 and close all sockets 1735 */ 1736 1737 /* 1738 * you must have down()ed the appropriate [m]sock_mutex elsewhere! 1739 */ 1740 int drbd_send(struct drbd_tconn *tconn, struct socket *sock, 1741 void *buf, size_t size, unsigned msg_flags) 1742 { 1743 struct kvec iov; 1744 struct msghdr msg; 1745 int rv, sent = 0; 1746 1747 if (!sock) 1748 return -EBADR; 1749 1750 /* THINK if (signal_pending) return ... ? */ 1751 1752 iov.iov_base = buf; 1753 iov.iov_len = size; 1754 1755 msg.msg_name = NULL; 1756 msg.msg_namelen = 0; 1757 msg.msg_control = NULL; 1758 msg.msg_controllen = 0; 1759 msg.msg_flags = msg_flags | MSG_NOSIGNAL; 1760 1761 if (sock == tconn->data.socket) { 1762 rcu_read_lock(); 1763 tconn->ko_count = rcu_dereference(tconn->net_conf)->ko_count; 1764 rcu_read_unlock(); 1765 drbd_update_congested(tconn); 1766 } 1767 do { 1768 /* STRANGE 1769 * tcp_sendmsg does _not_ use its size parameter at all ? 1770 * 1771 * -EAGAIN on timeout, -EINTR on signal. 1772 */ 1773 /* THINK 1774 * do we need to block DRBD_SIG if sock == &meta.socket ?? 1775 * otherwise wake_asender() might interrupt some send_*Ack ! 1776 */ 1777 rv = kernel_sendmsg(sock, &msg, &iov, 1, size); 1778 if (rv == -EAGAIN) { 1779 if (we_should_drop_the_connection(tconn, sock)) 1780 break; 1781 else 1782 continue; 1783 } 1784 if (rv == -EINTR) { 1785 flush_signals(current); 1786 rv = 0; 1787 } 1788 if (rv < 0) 1789 break; 1790 sent += rv; 1791 iov.iov_base += rv; 1792 iov.iov_len -= rv; 1793 } while (sent < size); 1794 1795 if (sock == tconn->data.socket) 1796 clear_bit(NET_CONGESTED, &tconn->flags); 1797 1798 if (rv <= 0) { 1799 if (rv != -EAGAIN) { 1800 conn_err(tconn, "%s_sendmsg returned %d\n", 1801 sock == tconn->meta.socket ? "msock" : "sock", 1802 rv); 1803 conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD); 1804 } else 1805 conn_request_state(tconn, NS(conn, C_TIMEOUT), CS_HARD); 1806 } 1807 1808 return sent; 1809 } 1810 1811 /** 1812 * drbd_send_all - Send an entire buffer 1813 * 1814 * Returns 0 upon success and a negative error value otherwise. 1815 */ 1816 int drbd_send_all(struct drbd_tconn *tconn, struct socket *sock, void *buffer, 1817 size_t size, unsigned msg_flags) 1818 { 1819 int err; 1820 1821 err = drbd_send(tconn, sock, buffer, size, msg_flags); 1822 if (err < 0) 1823 return err; 1824 if (err != size) 1825 return -EIO; 1826 return 0; 1827 } 1828 1829 static int drbd_open(struct block_device *bdev, fmode_t mode) 1830 { 1831 struct drbd_conf *mdev = bdev->bd_disk->private_data; 1832 unsigned long flags; 1833 int rv = 0; 1834 1835 mutex_lock(&drbd_main_mutex); 1836 spin_lock_irqsave(&mdev->tconn->req_lock, flags); 1837 /* to have a stable mdev->state.role 1838 * and no race with updating open_cnt */ 1839 1840 if (mdev->state.role != R_PRIMARY) { 1841 if (mode & FMODE_WRITE) 1842 rv = -EROFS; 1843 else if (!allow_oos) 1844 rv = -EMEDIUMTYPE; 1845 } 1846 1847 if (!rv) 1848 mdev->open_cnt++; 1849 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags); 1850 mutex_unlock(&drbd_main_mutex); 1851 1852 return rv; 1853 } 1854 1855 static void drbd_release(struct gendisk *gd, fmode_t mode) 1856 { 1857 struct drbd_conf *mdev = gd->private_data; 1858 mutex_lock(&drbd_main_mutex); 1859 mdev->open_cnt--; 1860 mutex_unlock(&drbd_main_mutex); 1861 } 1862 1863 static void drbd_set_defaults(struct drbd_conf *mdev) 1864 { 1865 /* Beware! The actual layout differs 1866 * between big endian and little endian */ 1867 mdev->state = (union drbd_dev_state) { 1868 { .role = R_SECONDARY, 1869 .peer = R_UNKNOWN, 1870 .conn = C_STANDALONE, 1871 .disk = D_DISKLESS, 1872 .pdsk = D_UNKNOWN, 1873 } }; 1874 } 1875 1876 void drbd_init_set_defaults(struct drbd_conf *mdev) 1877 { 1878 /* the memset(,0,) did most of this. 1879 * note: only assignments, no allocation in here */ 1880 1881 drbd_set_defaults(mdev); 1882 1883 atomic_set(&mdev->ap_bio_cnt, 0); 1884 atomic_set(&mdev->ap_pending_cnt, 0); 1885 atomic_set(&mdev->rs_pending_cnt, 0); 1886 atomic_set(&mdev->unacked_cnt, 0); 1887 atomic_set(&mdev->local_cnt, 0); 1888 atomic_set(&mdev->pp_in_use_by_net, 0); 1889 atomic_set(&mdev->rs_sect_in, 0); 1890 atomic_set(&mdev->rs_sect_ev, 0); 1891 atomic_set(&mdev->ap_in_flight, 0); 1892 atomic_set(&mdev->md_io_in_use, 0); 1893 1894 mutex_init(&mdev->own_state_mutex); 1895 mdev->state_mutex = &mdev->own_state_mutex; 1896 1897 spin_lock_init(&mdev->al_lock); 1898 spin_lock_init(&mdev->peer_seq_lock); 1899 1900 INIT_LIST_HEAD(&mdev->active_ee); 1901 INIT_LIST_HEAD(&mdev->sync_ee); 1902 INIT_LIST_HEAD(&mdev->done_ee); 1903 INIT_LIST_HEAD(&mdev->read_ee); 1904 INIT_LIST_HEAD(&mdev->net_ee); 1905 INIT_LIST_HEAD(&mdev->resync_reads); 1906 INIT_LIST_HEAD(&mdev->resync_work.list); 1907 INIT_LIST_HEAD(&mdev->unplug_work.list); 1908 INIT_LIST_HEAD(&mdev->go_diskless.list); 1909 INIT_LIST_HEAD(&mdev->md_sync_work.list); 1910 INIT_LIST_HEAD(&mdev->start_resync_work.list); 1911 INIT_LIST_HEAD(&mdev->bm_io_work.w.list); 1912 1913 mdev->resync_work.cb = w_resync_timer; 1914 mdev->unplug_work.cb = w_send_write_hint; 1915 mdev->go_diskless.cb = w_go_diskless; 1916 mdev->md_sync_work.cb = w_md_sync; 1917 mdev->bm_io_work.w.cb = w_bitmap_io; 1918 mdev->start_resync_work.cb = w_start_resync; 1919 1920 mdev->resync_work.mdev = mdev; 1921 mdev->unplug_work.mdev = mdev; 1922 mdev->go_diskless.mdev = mdev; 1923 mdev->md_sync_work.mdev = mdev; 1924 mdev->bm_io_work.w.mdev = mdev; 1925 mdev->start_resync_work.mdev = mdev; 1926 1927 init_timer(&mdev->resync_timer); 1928 init_timer(&mdev->md_sync_timer); 1929 init_timer(&mdev->start_resync_timer); 1930 init_timer(&mdev->request_timer); 1931 mdev->resync_timer.function = resync_timer_fn; 1932 mdev->resync_timer.data = (unsigned long) mdev; 1933 mdev->md_sync_timer.function = md_sync_timer_fn; 1934 mdev->md_sync_timer.data = (unsigned long) mdev; 1935 mdev->start_resync_timer.function = start_resync_timer_fn; 1936 mdev->start_resync_timer.data = (unsigned long) mdev; 1937 mdev->request_timer.function = request_timer_fn; 1938 mdev->request_timer.data = (unsigned long) mdev; 1939 1940 init_waitqueue_head(&mdev->misc_wait); 1941 init_waitqueue_head(&mdev->state_wait); 1942 init_waitqueue_head(&mdev->ee_wait); 1943 init_waitqueue_head(&mdev->al_wait); 1944 init_waitqueue_head(&mdev->seq_wait); 1945 1946 mdev->resync_wenr = LC_FREE; 1947 mdev->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE; 1948 mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE; 1949 } 1950 1951 void drbd_mdev_cleanup(struct drbd_conf *mdev) 1952 { 1953 int i; 1954 if (mdev->tconn->receiver.t_state != NONE) 1955 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n", 1956 mdev->tconn->receiver.t_state); 1957 1958 mdev->al_writ_cnt = 1959 mdev->bm_writ_cnt = 1960 mdev->read_cnt = 1961 mdev->recv_cnt = 1962 mdev->send_cnt = 1963 mdev->writ_cnt = 1964 mdev->p_size = 1965 mdev->rs_start = 1966 mdev->rs_total = 1967 mdev->rs_failed = 0; 1968 mdev->rs_last_events = 0; 1969 mdev->rs_last_sect_ev = 0; 1970 for (i = 0; i < DRBD_SYNC_MARKS; i++) { 1971 mdev->rs_mark_left[i] = 0; 1972 mdev->rs_mark_time[i] = 0; 1973 } 1974 D_ASSERT(mdev->tconn->net_conf == NULL); 1975 1976 drbd_set_my_capacity(mdev, 0); 1977 if (mdev->bitmap) { 1978 /* maybe never allocated. */ 1979 drbd_bm_resize(mdev, 0, 1); 1980 drbd_bm_cleanup(mdev); 1981 } 1982 1983 drbd_free_bc(mdev->ldev); 1984 mdev->ldev = NULL; 1985 1986 clear_bit(AL_SUSPENDED, &mdev->flags); 1987 1988 D_ASSERT(list_empty(&mdev->active_ee)); 1989 D_ASSERT(list_empty(&mdev->sync_ee)); 1990 D_ASSERT(list_empty(&mdev->done_ee)); 1991 D_ASSERT(list_empty(&mdev->read_ee)); 1992 D_ASSERT(list_empty(&mdev->net_ee)); 1993 D_ASSERT(list_empty(&mdev->resync_reads)); 1994 D_ASSERT(list_empty(&mdev->tconn->sender_work.q)); 1995 D_ASSERT(list_empty(&mdev->resync_work.list)); 1996 D_ASSERT(list_empty(&mdev->unplug_work.list)); 1997 D_ASSERT(list_empty(&mdev->go_diskless.list)); 1998 1999 drbd_set_defaults(mdev); 2000 } 2001 2002 2003 static void drbd_destroy_mempools(void) 2004 { 2005 struct page *page; 2006 2007 while (drbd_pp_pool) { 2008 page = drbd_pp_pool; 2009 drbd_pp_pool = (struct page *)page_private(page); 2010 __free_page(page); 2011 drbd_pp_vacant--; 2012 } 2013 2014 /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */ 2015 2016 if (drbd_md_io_bio_set) 2017 bioset_free(drbd_md_io_bio_set); 2018 if (drbd_md_io_page_pool) 2019 mempool_destroy(drbd_md_io_page_pool); 2020 if (drbd_ee_mempool) 2021 mempool_destroy(drbd_ee_mempool); 2022 if (drbd_request_mempool) 2023 mempool_destroy(drbd_request_mempool); 2024 if (drbd_ee_cache) 2025 kmem_cache_destroy(drbd_ee_cache); 2026 if (drbd_request_cache) 2027 kmem_cache_destroy(drbd_request_cache); 2028 if (drbd_bm_ext_cache) 2029 kmem_cache_destroy(drbd_bm_ext_cache); 2030 if (drbd_al_ext_cache) 2031 kmem_cache_destroy(drbd_al_ext_cache); 2032 2033 drbd_md_io_bio_set = NULL; 2034 drbd_md_io_page_pool = NULL; 2035 drbd_ee_mempool = NULL; 2036 drbd_request_mempool = NULL; 2037 drbd_ee_cache = NULL; 2038 drbd_request_cache = NULL; 2039 drbd_bm_ext_cache = NULL; 2040 drbd_al_ext_cache = NULL; 2041 2042 return; 2043 } 2044 2045 static int drbd_create_mempools(void) 2046 { 2047 struct page *page; 2048 const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count; 2049 int i; 2050 2051 /* prepare our caches and mempools */ 2052 drbd_request_mempool = NULL; 2053 drbd_ee_cache = NULL; 2054 drbd_request_cache = NULL; 2055 drbd_bm_ext_cache = NULL; 2056 drbd_al_ext_cache = NULL; 2057 drbd_pp_pool = NULL; 2058 drbd_md_io_page_pool = NULL; 2059 drbd_md_io_bio_set = NULL; 2060 2061 /* caches */ 2062 drbd_request_cache = kmem_cache_create( 2063 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL); 2064 if (drbd_request_cache == NULL) 2065 goto Enomem; 2066 2067 drbd_ee_cache = kmem_cache_create( 2068 "drbd_ee", sizeof(struct drbd_peer_request), 0, 0, NULL); 2069 if (drbd_ee_cache == NULL) 2070 goto Enomem; 2071 2072 drbd_bm_ext_cache = kmem_cache_create( 2073 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL); 2074 if (drbd_bm_ext_cache == NULL) 2075 goto Enomem; 2076 2077 drbd_al_ext_cache = kmem_cache_create( 2078 "drbd_al", sizeof(struct lc_element), 0, 0, NULL); 2079 if (drbd_al_ext_cache == NULL) 2080 goto Enomem; 2081 2082 /* mempools */ 2083 drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0); 2084 if (drbd_md_io_bio_set == NULL) 2085 goto Enomem; 2086 2087 drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0); 2088 if (drbd_md_io_page_pool == NULL) 2089 goto Enomem; 2090 2091 drbd_request_mempool = mempool_create(number, 2092 mempool_alloc_slab, mempool_free_slab, drbd_request_cache); 2093 if (drbd_request_mempool == NULL) 2094 goto Enomem; 2095 2096 drbd_ee_mempool = mempool_create(number, 2097 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache); 2098 if (drbd_ee_mempool == NULL) 2099 goto Enomem; 2100 2101 /* drbd's page pool */ 2102 spin_lock_init(&drbd_pp_lock); 2103 2104 for (i = 0; i < number; i++) { 2105 page = alloc_page(GFP_HIGHUSER); 2106 if (!page) 2107 goto Enomem; 2108 set_page_private(page, (unsigned long)drbd_pp_pool); 2109 drbd_pp_pool = page; 2110 } 2111 drbd_pp_vacant = number; 2112 2113 return 0; 2114 2115 Enomem: 2116 drbd_destroy_mempools(); /* in case we allocated some */ 2117 return -ENOMEM; 2118 } 2119 2120 static int drbd_notify_sys(struct notifier_block *this, unsigned long code, 2121 void *unused) 2122 { 2123 /* just so we have it. you never know what interesting things we 2124 * might want to do here some day... 2125 */ 2126 2127 return NOTIFY_DONE; 2128 } 2129 2130 static struct notifier_block drbd_notifier = { 2131 .notifier_call = drbd_notify_sys, 2132 }; 2133 2134 static void drbd_release_all_peer_reqs(struct drbd_conf *mdev) 2135 { 2136 int rr; 2137 2138 rr = drbd_free_peer_reqs(mdev, &mdev->active_ee); 2139 if (rr) 2140 dev_err(DEV, "%d EEs in active list found!\n", rr); 2141 2142 rr = drbd_free_peer_reqs(mdev, &mdev->sync_ee); 2143 if (rr) 2144 dev_err(DEV, "%d EEs in sync list found!\n", rr); 2145 2146 rr = drbd_free_peer_reqs(mdev, &mdev->read_ee); 2147 if (rr) 2148 dev_err(DEV, "%d EEs in read list found!\n", rr); 2149 2150 rr = drbd_free_peer_reqs(mdev, &mdev->done_ee); 2151 if (rr) 2152 dev_err(DEV, "%d EEs in done list found!\n", rr); 2153 2154 rr = drbd_free_peer_reqs(mdev, &mdev->net_ee); 2155 if (rr) 2156 dev_err(DEV, "%d EEs in net list found!\n", rr); 2157 } 2158 2159 /* caution. no locking. */ 2160 void drbd_minor_destroy(struct kref *kref) 2161 { 2162 struct drbd_conf *mdev = container_of(kref, struct drbd_conf, kref); 2163 struct drbd_tconn *tconn = mdev->tconn; 2164 2165 del_timer_sync(&mdev->request_timer); 2166 2167 /* paranoia asserts */ 2168 D_ASSERT(mdev->open_cnt == 0); 2169 /* end paranoia asserts */ 2170 2171 /* cleanup stuff that may have been allocated during 2172 * device (re-)configuration or state changes */ 2173 2174 if (mdev->this_bdev) 2175 bdput(mdev->this_bdev); 2176 2177 drbd_free_bc(mdev->ldev); 2178 mdev->ldev = NULL; 2179 2180 drbd_release_all_peer_reqs(mdev); 2181 2182 lc_destroy(mdev->act_log); 2183 lc_destroy(mdev->resync); 2184 2185 kfree(mdev->p_uuid); 2186 /* mdev->p_uuid = NULL; */ 2187 2188 if (mdev->bitmap) /* should no longer be there. */ 2189 drbd_bm_cleanup(mdev); 2190 __free_page(mdev->md_io_page); 2191 put_disk(mdev->vdisk); 2192 blk_cleanup_queue(mdev->rq_queue); 2193 kfree(mdev->rs_plan_s); 2194 kfree(mdev); 2195 2196 kref_put(&tconn->kref, &conn_destroy); 2197 } 2198 2199 /* One global retry thread, if we need to push back some bio and have it 2200 * reinserted through our make request function. 2201 */ 2202 static struct retry_worker { 2203 struct workqueue_struct *wq; 2204 struct work_struct worker; 2205 2206 spinlock_t lock; 2207 struct list_head writes; 2208 } retry; 2209 2210 static void do_retry(struct work_struct *ws) 2211 { 2212 struct retry_worker *retry = container_of(ws, struct retry_worker, worker); 2213 LIST_HEAD(writes); 2214 struct drbd_request *req, *tmp; 2215 2216 spin_lock_irq(&retry->lock); 2217 list_splice_init(&retry->writes, &writes); 2218 spin_unlock_irq(&retry->lock); 2219 2220 list_for_each_entry_safe(req, tmp, &writes, tl_requests) { 2221 struct drbd_conf *mdev = req->w.mdev; 2222 struct bio *bio = req->master_bio; 2223 unsigned long start_time = req->start_time; 2224 bool expected; 2225 2226 expected = 2227 expect(atomic_read(&req->completion_ref) == 0) && 2228 expect(req->rq_state & RQ_POSTPONED) && 2229 expect((req->rq_state & RQ_LOCAL_PENDING) == 0 || 2230 (req->rq_state & RQ_LOCAL_ABORTED) != 0); 2231 2232 if (!expected) 2233 dev_err(DEV, "req=%p completion_ref=%d rq_state=%x\n", 2234 req, atomic_read(&req->completion_ref), 2235 req->rq_state); 2236 2237 /* We still need to put one kref associated with the 2238 * "completion_ref" going zero in the code path that queued it 2239 * here. The request object may still be referenced by a 2240 * frozen local req->private_bio, in case we force-detached. 2241 */ 2242 kref_put(&req->kref, drbd_req_destroy); 2243 2244 /* A single suspended or otherwise blocking device may stall 2245 * all others as well. Fortunately, this code path is to 2246 * recover from a situation that "should not happen": 2247 * concurrent writes in multi-primary setup. 2248 * In a "normal" lifecycle, this workqueue is supposed to be 2249 * destroyed without ever doing anything. 2250 * If it turns out to be an issue anyways, we can do per 2251 * resource (replication group) or per device (minor) retry 2252 * workqueues instead. 2253 */ 2254 2255 /* We are not just doing generic_make_request(), 2256 * as we want to keep the start_time information. */ 2257 inc_ap_bio(mdev); 2258 __drbd_make_request(mdev, bio, start_time); 2259 } 2260 } 2261 2262 void drbd_restart_request(struct drbd_request *req) 2263 { 2264 unsigned long flags; 2265 spin_lock_irqsave(&retry.lock, flags); 2266 list_move_tail(&req->tl_requests, &retry.writes); 2267 spin_unlock_irqrestore(&retry.lock, flags); 2268 2269 /* Drop the extra reference that would otherwise 2270 * have been dropped by complete_master_bio. 2271 * do_retry() needs to grab a new one. */ 2272 dec_ap_bio(req->w.mdev); 2273 2274 queue_work(retry.wq, &retry.worker); 2275 } 2276 2277 2278 static void drbd_cleanup(void) 2279 { 2280 unsigned int i; 2281 struct drbd_conf *mdev; 2282 struct drbd_tconn *tconn, *tmp; 2283 2284 unregister_reboot_notifier(&drbd_notifier); 2285 2286 /* first remove proc, 2287 * drbdsetup uses it's presence to detect 2288 * whether DRBD is loaded. 2289 * If we would get stuck in proc removal, 2290 * but have netlink already deregistered, 2291 * some drbdsetup commands may wait forever 2292 * for an answer. 2293 */ 2294 if (drbd_proc) 2295 remove_proc_entry("drbd", NULL); 2296 2297 if (retry.wq) 2298 destroy_workqueue(retry.wq); 2299 2300 drbd_genl_unregister(); 2301 2302 idr_for_each_entry(&minors, mdev, i) { 2303 idr_remove(&minors, mdev_to_minor(mdev)); 2304 idr_remove(&mdev->tconn->volumes, mdev->vnr); 2305 destroy_workqueue(mdev->submit.wq); 2306 del_gendisk(mdev->vdisk); 2307 /* synchronize_rcu(); No other threads running at this point */ 2308 kref_put(&mdev->kref, &drbd_minor_destroy); 2309 } 2310 2311 /* not _rcu since, no other updater anymore. Genl already unregistered */ 2312 list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) { 2313 list_del(&tconn->all_tconn); /* not _rcu no proc, not other threads */ 2314 /* synchronize_rcu(); */ 2315 kref_put(&tconn->kref, &conn_destroy); 2316 } 2317 2318 drbd_destroy_mempools(); 2319 unregister_blkdev(DRBD_MAJOR, "drbd"); 2320 2321 idr_destroy(&minors); 2322 2323 printk(KERN_INFO "drbd: module cleanup done.\n"); 2324 } 2325 2326 /** 2327 * drbd_congested() - Callback for the flusher thread 2328 * @congested_data: User data 2329 * @bdi_bits: Bits the BDI flusher thread is currently interested in 2330 * 2331 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested. 2332 */ 2333 static int drbd_congested(void *congested_data, int bdi_bits) 2334 { 2335 struct drbd_conf *mdev = congested_data; 2336 struct request_queue *q; 2337 char reason = '-'; 2338 int r = 0; 2339 2340 if (!may_inc_ap_bio(mdev)) { 2341 /* DRBD has frozen IO */ 2342 r = bdi_bits; 2343 reason = 'd'; 2344 goto out; 2345 } 2346 2347 if (test_bit(CALLBACK_PENDING, &mdev->tconn->flags)) { 2348 r |= (1 << BDI_async_congested); 2349 /* Without good local data, we would need to read from remote, 2350 * and that would need the worker thread as well, which is 2351 * currently blocked waiting for that usermode helper to 2352 * finish. 2353 */ 2354 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) 2355 r |= (1 << BDI_sync_congested); 2356 else 2357 put_ldev(mdev); 2358 r &= bdi_bits; 2359 reason = 'c'; 2360 goto out; 2361 } 2362 2363 if (get_ldev(mdev)) { 2364 q = bdev_get_queue(mdev->ldev->backing_bdev); 2365 r = bdi_congested(&q->backing_dev_info, bdi_bits); 2366 put_ldev(mdev); 2367 if (r) 2368 reason = 'b'; 2369 } 2370 2371 if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->tconn->flags)) { 2372 r |= (1 << BDI_async_congested); 2373 reason = reason == 'b' ? 'a' : 'n'; 2374 } 2375 2376 out: 2377 mdev->congestion_reason = reason; 2378 return r; 2379 } 2380 2381 static void drbd_init_workqueue(struct drbd_work_queue* wq) 2382 { 2383 spin_lock_init(&wq->q_lock); 2384 INIT_LIST_HEAD(&wq->q); 2385 init_waitqueue_head(&wq->q_wait); 2386 } 2387 2388 struct drbd_tconn *conn_get_by_name(const char *name) 2389 { 2390 struct drbd_tconn *tconn; 2391 2392 if (!name || !name[0]) 2393 return NULL; 2394 2395 rcu_read_lock(); 2396 list_for_each_entry_rcu(tconn, &drbd_tconns, all_tconn) { 2397 if (!strcmp(tconn->name, name)) { 2398 kref_get(&tconn->kref); 2399 goto found; 2400 } 2401 } 2402 tconn = NULL; 2403 found: 2404 rcu_read_unlock(); 2405 return tconn; 2406 } 2407 2408 struct drbd_tconn *conn_get_by_addrs(void *my_addr, int my_addr_len, 2409 void *peer_addr, int peer_addr_len) 2410 { 2411 struct drbd_tconn *tconn; 2412 2413 rcu_read_lock(); 2414 list_for_each_entry_rcu(tconn, &drbd_tconns, all_tconn) { 2415 if (tconn->my_addr_len == my_addr_len && 2416 tconn->peer_addr_len == peer_addr_len && 2417 !memcmp(&tconn->my_addr, my_addr, my_addr_len) && 2418 !memcmp(&tconn->peer_addr, peer_addr, peer_addr_len)) { 2419 kref_get(&tconn->kref); 2420 goto found; 2421 } 2422 } 2423 tconn = NULL; 2424 found: 2425 rcu_read_unlock(); 2426 return tconn; 2427 } 2428 2429 static int drbd_alloc_socket(struct drbd_socket *socket) 2430 { 2431 socket->rbuf = (void *) __get_free_page(GFP_KERNEL); 2432 if (!socket->rbuf) 2433 return -ENOMEM; 2434 socket->sbuf = (void *) __get_free_page(GFP_KERNEL); 2435 if (!socket->sbuf) 2436 return -ENOMEM; 2437 return 0; 2438 } 2439 2440 static void drbd_free_socket(struct drbd_socket *socket) 2441 { 2442 free_page((unsigned long) socket->sbuf); 2443 free_page((unsigned long) socket->rbuf); 2444 } 2445 2446 void conn_free_crypto(struct drbd_tconn *tconn) 2447 { 2448 drbd_free_sock(tconn); 2449 2450 crypto_free_hash(tconn->csums_tfm); 2451 crypto_free_hash(tconn->verify_tfm); 2452 crypto_free_hash(tconn->cram_hmac_tfm); 2453 crypto_free_hash(tconn->integrity_tfm); 2454 crypto_free_hash(tconn->peer_integrity_tfm); 2455 kfree(tconn->int_dig_in); 2456 kfree(tconn->int_dig_vv); 2457 2458 tconn->csums_tfm = NULL; 2459 tconn->verify_tfm = NULL; 2460 tconn->cram_hmac_tfm = NULL; 2461 tconn->integrity_tfm = NULL; 2462 tconn->peer_integrity_tfm = NULL; 2463 tconn->int_dig_in = NULL; 2464 tconn->int_dig_vv = NULL; 2465 } 2466 2467 int set_resource_options(struct drbd_tconn *tconn, struct res_opts *res_opts) 2468 { 2469 cpumask_var_t new_cpu_mask; 2470 int err; 2471 2472 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) 2473 return -ENOMEM; 2474 /* 2475 retcode = ERR_NOMEM; 2476 drbd_msg_put_info("unable to allocate cpumask"); 2477 */ 2478 2479 /* silently ignore cpu mask on UP kernel */ 2480 if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) { 2481 /* FIXME: Get rid of constant 32 here */ 2482 err = bitmap_parse(res_opts->cpu_mask, 32, 2483 cpumask_bits(new_cpu_mask), nr_cpu_ids); 2484 if (err) { 2485 conn_warn(tconn, "bitmap_parse() failed with %d\n", err); 2486 /* retcode = ERR_CPU_MASK_PARSE; */ 2487 goto fail; 2488 } 2489 } 2490 tconn->res_opts = *res_opts; 2491 if (!cpumask_equal(tconn->cpu_mask, new_cpu_mask)) { 2492 cpumask_copy(tconn->cpu_mask, new_cpu_mask); 2493 drbd_calc_cpu_mask(tconn); 2494 tconn->receiver.reset_cpu_mask = 1; 2495 tconn->asender.reset_cpu_mask = 1; 2496 tconn->worker.reset_cpu_mask = 1; 2497 } 2498 err = 0; 2499 2500 fail: 2501 free_cpumask_var(new_cpu_mask); 2502 return err; 2503 2504 } 2505 2506 /* caller must be under genl_lock() */ 2507 struct drbd_tconn *conn_create(const char *name, struct res_opts *res_opts) 2508 { 2509 struct drbd_tconn *tconn; 2510 2511 tconn = kzalloc(sizeof(struct drbd_tconn), GFP_KERNEL); 2512 if (!tconn) 2513 return NULL; 2514 2515 tconn->name = kstrdup(name, GFP_KERNEL); 2516 if (!tconn->name) 2517 goto fail; 2518 2519 if (drbd_alloc_socket(&tconn->data)) 2520 goto fail; 2521 if (drbd_alloc_socket(&tconn->meta)) 2522 goto fail; 2523 2524 if (!zalloc_cpumask_var(&tconn->cpu_mask, GFP_KERNEL)) 2525 goto fail; 2526 2527 if (set_resource_options(tconn, res_opts)) 2528 goto fail; 2529 2530 tconn->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL); 2531 if (!tconn->current_epoch) 2532 goto fail; 2533 2534 INIT_LIST_HEAD(&tconn->transfer_log); 2535 2536 INIT_LIST_HEAD(&tconn->current_epoch->list); 2537 tconn->epochs = 1; 2538 spin_lock_init(&tconn->epoch_lock); 2539 tconn->write_ordering = WO_bdev_flush; 2540 2541 tconn->send.seen_any_write_yet = false; 2542 tconn->send.current_epoch_nr = 0; 2543 tconn->send.current_epoch_writes = 0; 2544 2545 tconn->cstate = C_STANDALONE; 2546 mutex_init(&tconn->cstate_mutex); 2547 spin_lock_init(&tconn->req_lock); 2548 mutex_init(&tconn->conf_update); 2549 init_waitqueue_head(&tconn->ping_wait); 2550 idr_init(&tconn->volumes); 2551 2552 drbd_init_workqueue(&tconn->sender_work); 2553 mutex_init(&tconn->data.mutex); 2554 mutex_init(&tconn->meta.mutex); 2555 2556 drbd_thread_init(tconn, &tconn->receiver, drbdd_init, "receiver"); 2557 drbd_thread_init(tconn, &tconn->worker, drbd_worker, "worker"); 2558 drbd_thread_init(tconn, &tconn->asender, drbd_asender, "asender"); 2559 2560 kref_init(&tconn->kref); 2561 list_add_tail_rcu(&tconn->all_tconn, &drbd_tconns); 2562 2563 return tconn; 2564 2565 fail: 2566 kfree(tconn->current_epoch); 2567 free_cpumask_var(tconn->cpu_mask); 2568 drbd_free_socket(&tconn->meta); 2569 drbd_free_socket(&tconn->data); 2570 kfree(tconn->name); 2571 kfree(tconn); 2572 2573 return NULL; 2574 } 2575 2576 void conn_destroy(struct kref *kref) 2577 { 2578 struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref); 2579 2580 if (atomic_read(&tconn->current_epoch->epoch_size) != 0) 2581 conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size)); 2582 kfree(tconn->current_epoch); 2583 2584 idr_destroy(&tconn->volumes); 2585 2586 free_cpumask_var(tconn->cpu_mask); 2587 drbd_free_socket(&tconn->meta); 2588 drbd_free_socket(&tconn->data); 2589 kfree(tconn->name); 2590 kfree(tconn->int_dig_in); 2591 kfree(tconn->int_dig_vv); 2592 kfree(tconn); 2593 } 2594 2595 int init_submitter(struct drbd_conf *mdev) 2596 { 2597 /* opencoded create_singlethread_workqueue(), 2598 * to be able to say "drbd%d", ..., minor */ 2599 mdev->submit.wq = alloc_workqueue("drbd%u_submit", 2600 WQ_UNBOUND | WQ_MEM_RECLAIM, 1, mdev->minor); 2601 if (!mdev->submit.wq) 2602 return -ENOMEM; 2603 2604 INIT_WORK(&mdev->submit.worker, do_submit); 2605 spin_lock_init(&mdev->submit.lock); 2606 INIT_LIST_HEAD(&mdev->submit.writes); 2607 return 0; 2608 } 2609 2610 enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr) 2611 { 2612 struct drbd_conf *mdev; 2613 struct gendisk *disk; 2614 struct request_queue *q; 2615 int vnr_got = vnr; 2616 int minor_got = minor; 2617 enum drbd_ret_code err = ERR_NOMEM; 2618 2619 mdev = minor_to_mdev(minor); 2620 if (mdev) 2621 return ERR_MINOR_EXISTS; 2622 2623 /* GFP_KERNEL, we are outside of all write-out paths */ 2624 mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL); 2625 if (!mdev) 2626 return ERR_NOMEM; 2627 2628 kref_get(&tconn->kref); 2629 mdev->tconn = tconn; 2630 2631 mdev->minor = minor; 2632 mdev->vnr = vnr; 2633 2634 drbd_init_set_defaults(mdev); 2635 2636 q = blk_alloc_queue(GFP_KERNEL); 2637 if (!q) 2638 goto out_no_q; 2639 mdev->rq_queue = q; 2640 q->queuedata = mdev; 2641 2642 disk = alloc_disk(1); 2643 if (!disk) 2644 goto out_no_disk; 2645 mdev->vdisk = disk; 2646 2647 set_disk_ro(disk, true); 2648 2649 disk->queue = q; 2650 disk->major = DRBD_MAJOR; 2651 disk->first_minor = minor; 2652 disk->fops = &drbd_ops; 2653 sprintf(disk->disk_name, "drbd%d", minor); 2654 disk->private_data = mdev; 2655 2656 mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor)); 2657 /* we have no partitions. we contain only ourselves. */ 2658 mdev->this_bdev->bd_contains = mdev->this_bdev; 2659 2660 q->backing_dev_info.congested_fn = drbd_congested; 2661 q->backing_dev_info.congested_data = mdev; 2662 2663 blk_queue_make_request(q, drbd_make_request); 2664 blk_queue_flush(q, REQ_FLUSH | REQ_FUA); 2665 /* Setting the max_hw_sectors to an odd value of 8kibyte here 2666 This triggers a max_bio_size message upon first attach or connect */ 2667 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8); 2668 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); 2669 blk_queue_merge_bvec(q, drbd_merge_bvec); 2670 q->queue_lock = &mdev->tconn->req_lock; /* needed since we use */ 2671 2672 mdev->md_io_page = alloc_page(GFP_KERNEL); 2673 if (!mdev->md_io_page) 2674 goto out_no_io_page; 2675 2676 if (drbd_bm_init(mdev)) 2677 goto out_no_bitmap; 2678 mdev->read_requests = RB_ROOT; 2679 mdev->write_requests = RB_ROOT; 2680 2681 minor_got = idr_alloc(&minors, mdev, minor, minor + 1, GFP_KERNEL); 2682 if (minor_got < 0) { 2683 if (minor_got == -ENOSPC) { 2684 err = ERR_MINOR_EXISTS; 2685 drbd_msg_put_info("requested minor exists already"); 2686 } 2687 goto out_no_minor_idr; 2688 } 2689 2690 vnr_got = idr_alloc(&tconn->volumes, mdev, vnr, vnr + 1, GFP_KERNEL); 2691 if (vnr_got < 0) { 2692 if (vnr_got == -ENOSPC) { 2693 err = ERR_INVALID_REQUEST; 2694 drbd_msg_put_info("requested volume exists already"); 2695 } 2696 goto out_idr_remove_minor; 2697 } 2698 2699 if (init_submitter(mdev)) { 2700 err = ERR_NOMEM; 2701 drbd_msg_put_info("unable to create submit workqueue"); 2702 goto out_idr_remove_vol; 2703 } 2704 2705 add_disk(disk); 2706 kref_init(&mdev->kref); /* one ref for both idrs and the the add_disk */ 2707 2708 /* inherit the connection state */ 2709 mdev->state.conn = tconn->cstate; 2710 if (mdev->state.conn == C_WF_REPORT_PARAMS) 2711 drbd_connected(mdev); 2712 2713 return NO_ERROR; 2714 2715 out_idr_remove_vol: 2716 idr_remove(&tconn->volumes, vnr_got); 2717 out_idr_remove_minor: 2718 idr_remove(&minors, minor_got); 2719 synchronize_rcu(); 2720 out_no_minor_idr: 2721 drbd_bm_cleanup(mdev); 2722 out_no_bitmap: 2723 __free_page(mdev->md_io_page); 2724 out_no_io_page: 2725 put_disk(disk); 2726 out_no_disk: 2727 blk_cleanup_queue(q); 2728 out_no_q: 2729 kfree(mdev); 2730 kref_put(&tconn->kref, &conn_destroy); 2731 return err; 2732 } 2733 2734 int __init drbd_init(void) 2735 { 2736 int err; 2737 2738 if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) { 2739 printk(KERN_ERR 2740 "drbd: invalid minor_count (%d)\n", minor_count); 2741 #ifdef MODULE 2742 return -EINVAL; 2743 #else 2744 minor_count = DRBD_MINOR_COUNT_DEF; 2745 #endif 2746 } 2747 2748 err = register_blkdev(DRBD_MAJOR, "drbd"); 2749 if (err) { 2750 printk(KERN_ERR 2751 "drbd: unable to register block device major %d\n", 2752 DRBD_MAJOR); 2753 return err; 2754 } 2755 2756 register_reboot_notifier(&drbd_notifier); 2757 2758 /* 2759 * allocate all necessary structs 2760 */ 2761 init_waitqueue_head(&drbd_pp_wait); 2762 2763 drbd_proc = NULL; /* play safe for drbd_cleanup */ 2764 idr_init(&minors); 2765 2766 rwlock_init(&global_state_lock); 2767 INIT_LIST_HEAD(&drbd_tconns); 2768 2769 err = drbd_genl_register(); 2770 if (err) { 2771 printk(KERN_ERR "drbd: unable to register generic netlink family\n"); 2772 goto fail; 2773 } 2774 2775 err = drbd_create_mempools(); 2776 if (err) 2777 goto fail; 2778 2779 err = -ENOMEM; 2780 drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL); 2781 if (!drbd_proc) { 2782 printk(KERN_ERR "drbd: unable to register proc file\n"); 2783 goto fail; 2784 } 2785 2786 retry.wq = create_singlethread_workqueue("drbd-reissue"); 2787 if (!retry.wq) { 2788 printk(KERN_ERR "drbd: unable to create retry workqueue\n"); 2789 goto fail; 2790 } 2791 INIT_WORK(&retry.worker, do_retry); 2792 spin_lock_init(&retry.lock); 2793 INIT_LIST_HEAD(&retry.writes); 2794 2795 printk(KERN_INFO "drbd: initialized. " 2796 "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n", 2797 API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX); 2798 printk(KERN_INFO "drbd: %s\n", drbd_buildtag()); 2799 printk(KERN_INFO "drbd: registered as block device major %d\n", 2800 DRBD_MAJOR); 2801 2802 return 0; /* Success! */ 2803 2804 fail: 2805 drbd_cleanup(); 2806 if (err == -ENOMEM) 2807 printk(KERN_ERR "drbd: ran out of memory\n"); 2808 else 2809 printk(KERN_ERR "drbd: initialization failure\n"); 2810 return err; 2811 } 2812 2813 void drbd_free_bc(struct drbd_backing_dev *ldev) 2814 { 2815 if (ldev == NULL) 2816 return; 2817 2818 blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); 2819 blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); 2820 2821 kfree(ldev->disk_conf); 2822 kfree(ldev); 2823 } 2824 2825 void drbd_free_sock(struct drbd_tconn *tconn) 2826 { 2827 if (tconn->data.socket) { 2828 mutex_lock(&tconn->data.mutex); 2829 kernel_sock_shutdown(tconn->data.socket, SHUT_RDWR); 2830 sock_release(tconn->data.socket); 2831 tconn->data.socket = NULL; 2832 mutex_unlock(&tconn->data.mutex); 2833 } 2834 if (tconn->meta.socket) { 2835 mutex_lock(&tconn->meta.mutex); 2836 kernel_sock_shutdown(tconn->meta.socket, SHUT_RDWR); 2837 sock_release(tconn->meta.socket); 2838 tconn->meta.socket = NULL; 2839 mutex_unlock(&tconn->meta.mutex); 2840 } 2841 } 2842 2843 /* meta data management */ 2844 2845 void conn_md_sync(struct drbd_tconn *tconn) 2846 { 2847 struct drbd_conf *mdev; 2848 int vnr; 2849 2850 rcu_read_lock(); 2851 idr_for_each_entry(&tconn->volumes, mdev, vnr) { 2852 kref_get(&mdev->kref); 2853 rcu_read_unlock(); 2854 drbd_md_sync(mdev); 2855 kref_put(&mdev->kref, &drbd_minor_destroy); 2856 rcu_read_lock(); 2857 } 2858 rcu_read_unlock(); 2859 } 2860 2861 /* aligned 4kByte */ 2862 struct meta_data_on_disk { 2863 u64 la_size_sect; /* last agreed size. */ 2864 u64 uuid[UI_SIZE]; /* UUIDs. */ 2865 u64 device_uuid; 2866 u64 reserved_u64_1; 2867 u32 flags; /* MDF */ 2868 u32 magic; 2869 u32 md_size_sect; 2870 u32 al_offset; /* offset to this block */ 2871 u32 al_nr_extents; /* important for restoring the AL (userspace) */ 2872 /* `-- act_log->nr_elements <-- ldev->dc.al_extents */ 2873 u32 bm_offset; /* offset to the bitmap, from here */ 2874 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */ 2875 u32 la_peer_max_bio_size; /* last peer max_bio_size */ 2876 2877 /* see al_tr_number_to_on_disk_sector() */ 2878 u32 al_stripes; 2879 u32 al_stripe_size_4k; 2880 2881 u8 reserved_u8[4096 - (7*8 + 10*4)]; 2882 } __packed; 2883 2884 2885 2886 void drbd_md_write(struct drbd_conf *mdev, void *b) 2887 { 2888 struct meta_data_on_disk *buffer = b; 2889 sector_t sector; 2890 int i; 2891 2892 memset(buffer, 0, sizeof(*buffer)); 2893 2894 buffer->la_size_sect = cpu_to_be64(drbd_get_capacity(mdev->this_bdev)); 2895 for (i = UI_CURRENT; i < UI_SIZE; i++) 2896 buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]); 2897 buffer->flags = cpu_to_be32(mdev->ldev->md.flags); 2898 buffer->magic = cpu_to_be32(DRBD_MD_MAGIC_84_UNCLEAN); 2899 2900 buffer->md_size_sect = cpu_to_be32(mdev->ldev->md.md_size_sect); 2901 buffer->al_offset = cpu_to_be32(mdev->ldev->md.al_offset); 2902 buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements); 2903 buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE); 2904 buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid); 2905 2906 buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset); 2907 buffer->la_peer_max_bio_size = cpu_to_be32(mdev->peer_max_bio_size); 2908 2909 buffer->al_stripes = cpu_to_be32(mdev->ldev->md.al_stripes); 2910 buffer->al_stripe_size_4k = cpu_to_be32(mdev->ldev->md.al_stripe_size_4k); 2911 2912 D_ASSERT(drbd_md_ss(mdev->ldev) == mdev->ldev->md.md_offset); 2913 sector = mdev->ldev->md.md_offset; 2914 2915 if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) { 2916 /* this was a try anyways ... */ 2917 dev_err(DEV, "meta data update failed!\n"); 2918 drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR); 2919 } 2920 } 2921 2922 /** 2923 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set 2924 * @mdev: DRBD device. 2925 */ 2926 void drbd_md_sync(struct drbd_conf *mdev) 2927 { 2928 struct meta_data_on_disk *buffer; 2929 2930 /* Don't accidentally change the DRBD meta data layout. */ 2931 BUILD_BUG_ON(UI_SIZE != 4); 2932 BUILD_BUG_ON(sizeof(struct meta_data_on_disk) != 4096); 2933 2934 del_timer(&mdev->md_sync_timer); 2935 /* timer may be rearmed by drbd_md_mark_dirty() now. */ 2936 if (!test_and_clear_bit(MD_DIRTY, &mdev->flags)) 2937 return; 2938 2939 /* We use here D_FAILED and not D_ATTACHING because we try to write 2940 * metadata even if we detach due to a disk failure! */ 2941 if (!get_ldev_if_state(mdev, D_FAILED)) 2942 return; 2943 2944 buffer = drbd_md_get_buffer(mdev); 2945 if (!buffer) 2946 goto out; 2947 2948 drbd_md_write(mdev, buffer); 2949 2950 /* Update mdev->ldev->md.la_size_sect, 2951 * since we updated it on metadata. */ 2952 mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev); 2953 2954 drbd_md_put_buffer(mdev); 2955 out: 2956 put_ldev(mdev); 2957 } 2958 2959 static int check_activity_log_stripe_size(struct drbd_conf *mdev, 2960 struct meta_data_on_disk *on_disk, 2961 struct drbd_md *in_core) 2962 { 2963 u32 al_stripes = be32_to_cpu(on_disk->al_stripes); 2964 u32 al_stripe_size_4k = be32_to_cpu(on_disk->al_stripe_size_4k); 2965 u64 al_size_4k; 2966 2967 /* both not set: default to old fixed size activity log */ 2968 if (al_stripes == 0 && al_stripe_size_4k == 0) { 2969 al_stripes = 1; 2970 al_stripe_size_4k = MD_32kB_SECT/8; 2971 } 2972 2973 /* some paranoia plausibility checks */ 2974 2975 /* we need both values to be set */ 2976 if (al_stripes == 0 || al_stripe_size_4k == 0) 2977 goto err; 2978 2979 al_size_4k = (u64)al_stripes * al_stripe_size_4k; 2980 2981 /* Upper limit of activity log area, to avoid potential overflow 2982 * problems in al_tr_number_to_on_disk_sector(). As right now, more 2983 * than 72 * 4k blocks total only increases the amount of history, 2984 * limiting this arbitrarily to 16 GB is not a real limitation ;-) */ 2985 if (al_size_4k > (16 * 1024 * 1024/4)) 2986 goto err; 2987 2988 /* Lower limit: we need at least 8 transaction slots (32kB) 2989 * to not break existing setups */ 2990 if (al_size_4k < MD_32kB_SECT/8) 2991 goto err; 2992 2993 in_core->al_stripe_size_4k = al_stripe_size_4k; 2994 in_core->al_stripes = al_stripes; 2995 in_core->al_size_4k = al_size_4k; 2996 2997 return 0; 2998 err: 2999 dev_err(DEV, "invalid activity log striping: al_stripes=%u, al_stripe_size_4k=%u\n", 3000 al_stripes, al_stripe_size_4k); 3001 return -EINVAL; 3002 } 3003 3004 static int check_offsets_and_sizes(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) 3005 { 3006 sector_t capacity = drbd_get_capacity(bdev->md_bdev); 3007 struct drbd_md *in_core = &bdev->md; 3008 s32 on_disk_al_sect; 3009 s32 on_disk_bm_sect; 3010 3011 /* The on-disk size of the activity log, calculated from offsets, and 3012 * the size of the activity log calculated from the stripe settings, 3013 * should match. 3014 * Though we could relax this a bit: it is ok, if the striped activity log 3015 * fits in the available on-disk activity log size. 3016 * Right now, that would break how resize is implemented. 3017 * TODO: make drbd_determine_dev_size() (and the drbdmeta tool) aware 3018 * of possible unused padding space in the on disk layout. */ 3019 if (in_core->al_offset < 0) { 3020 if (in_core->bm_offset > in_core->al_offset) 3021 goto err; 3022 on_disk_al_sect = -in_core->al_offset; 3023 on_disk_bm_sect = in_core->al_offset - in_core->bm_offset; 3024 } else { 3025 if (in_core->al_offset != MD_4kB_SECT) 3026 goto err; 3027 if (in_core->bm_offset < in_core->al_offset + in_core->al_size_4k * MD_4kB_SECT) 3028 goto err; 3029 3030 on_disk_al_sect = in_core->bm_offset - MD_4kB_SECT; 3031 on_disk_bm_sect = in_core->md_size_sect - in_core->bm_offset; 3032 } 3033 3034 /* old fixed size meta data is exactly that: fixed. */ 3035 if (in_core->meta_dev_idx >= 0) { 3036 if (in_core->md_size_sect != MD_128MB_SECT 3037 || in_core->al_offset != MD_4kB_SECT 3038 || in_core->bm_offset != MD_4kB_SECT + MD_32kB_SECT 3039 || in_core->al_stripes != 1 3040 || in_core->al_stripe_size_4k != MD_32kB_SECT/8) 3041 goto err; 3042 } 3043 3044 if (capacity < in_core->md_size_sect) 3045 goto err; 3046 if (capacity - in_core->md_size_sect < drbd_md_first_sector(bdev)) 3047 goto err; 3048 3049 /* should be aligned, and at least 32k */ 3050 if ((on_disk_al_sect & 7) || (on_disk_al_sect < MD_32kB_SECT)) 3051 goto err; 3052 3053 /* should fit (for now: exactly) into the available on-disk space; 3054 * overflow prevention is in check_activity_log_stripe_size() above. */ 3055 if (on_disk_al_sect != in_core->al_size_4k * MD_4kB_SECT) 3056 goto err; 3057 3058 /* again, should be aligned */ 3059 if (in_core->bm_offset & 7) 3060 goto err; 3061 3062 /* FIXME check for device grow with flex external meta data? */ 3063 3064 /* can the available bitmap space cover the last agreed device size? */ 3065 if (on_disk_bm_sect < (in_core->la_size_sect+7)/MD_4kB_SECT/8/512) 3066 goto err; 3067 3068 return 0; 3069 3070 err: 3071 dev_err(DEV, "meta data offsets don't make sense: idx=%d " 3072 "al_s=%u, al_sz4k=%u, al_offset=%d, bm_offset=%d, " 3073 "md_size_sect=%u, la_size=%llu, md_capacity=%llu\n", 3074 in_core->meta_dev_idx, 3075 in_core->al_stripes, in_core->al_stripe_size_4k, 3076 in_core->al_offset, in_core->bm_offset, in_core->md_size_sect, 3077 (unsigned long long)in_core->la_size_sect, 3078 (unsigned long long)capacity); 3079 3080 return -EINVAL; 3081 } 3082 3083 3084 /** 3085 * drbd_md_read() - Reads in the meta data super block 3086 * @mdev: DRBD device. 3087 * @bdev: Device from which the meta data should be read in. 3088 * 3089 * Return NO_ERROR on success, and an enum drbd_ret_code in case 3090 * something goes wrong. 3091 * 3092 * Called exactly once during drbd_adm_attach(), while still being D_DISKLESS, 3093 * even before @bdev is assigned to @mdev->ldev. 3094 */ 3095 int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) 3096 { 3097 struct meta_data_on_disk *buffer; 3098 u32 magic, flags; 3099 int i, rv = NO_ERROR; 3100 3101 if (mdev->state.disk != D_DISKLESS) 3102 return ERR_DISK_CONFIGURED; 3103 3104 buffer = drbd_md_get_buffer(mdev); 3105 if (!buffer) 3106 return ERR_NOMEM; 3107 3108 /* First, figure out where our meta data superblock is located, 3109 * and read it. */ 3110 bdev->md.meta_dev_idx = bdev->disk_conf->meta_dev_idx; 3111 bdev->md.md_offset = drbd_md_ss(bdev); 3112 3113 if (drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) { 3114 /* NOTE: can't do normal error processing here as this is 3115 called BEFORE disk is attached */ 3116 dev_err(DEV, "Error while reading metadata.\n"); 3117 rv = ERR_IO_MD_DISK; 3118 goto err; 3119 } 3120 3121 magic = be32_to_cpu(buffer->magic); 3122 flags = be32_to_cpu(buffer->flags); 3123 if (magic == DRBD_MD_MAGIC_84_UNCLEAN || 3124 (magic == DRBD_MD_MAGIC_08 && !(flags & MDF_AL_CLEAN))) { 3125 /* btw: that's Activity Log clean, not "all" clean. */ 3126 dev_err(DEV, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n"); 3127 rv = ERR_MD_UNCLEAN; 3128 goto err; 3129 } 3130 3131 rv = ERR_MD_INVALID; 3132 if (magic != DRBD_MD_MAGIC_08) { 3133 if (magic == DRBD_MD_MAGIC_07) 3134 dev_err(DEV, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n"); 3135 else 3136 dev_err(DEV, "Meta data magic not found. Did you \"drbdadm create-md\"?\n"); 3137 goto err; 3138 } 3139 3140 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) { 3141 dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n", 3142 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE); 3143 goto err; 3144 } 3145 3146 3147 /* convert to in_core endian */ 3148 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size_sect); 3149 for (i = UI_CURRENT; i < UI_SIZE; i++) 3150 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]); 3151 bdev->md.flags = be32_to_cpu(buffer->flags); 3152 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid); 3153 3154 bdev->md.md_size_sect = be32_to_cpu(buffer->md_size_sect); 3155 bdev->md.al_offset = be32_to_cpu(buffer->al_offset); 3156 bdev->md.bm_offset = be32_to_cpu(buffer->bm_offset); 3157 3158 if (check_activity_log_stripe_size(mdev, buffer, &bdev->md)) 3159 goto err; 3160 if (check_offsets_and_sizes(mdev, bdev)) 3161 goto err; 3162 3163 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) { 3164 dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n", 3165 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset); 3166 goto err; 3167 } 3168 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) { 3169 dev_err(DEV, "unexpected md_size: %u (expected %u)\n", 3170 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect); 3171 goto err; 3172 } 3173 3174 rv = NO_ERROR; 3175 3176 spin_lock_irq(&mdev->tconn->req_lock); 3177 if (mdev->state.conn < C_CONNECTED) { 3178 unsigned int peer; 3179 peer = be32_to_cpu(buffer->la_peer_max_bio_size); 3180 peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE); 3181 mdev->peer_max_bio_size = peer; 3182 } 3183 spin_unlock_irq(&mdev->tconn->req_lock); 3184 3185 err: 3186 drbd_md_put_buffer(mdev); 3187 3188 return rv; 3189 } 3190 3191 /** 3192 * drbd_md_mark_dirty() - Mark meta data super block as dirty 3193 * @mdev: DRBD device. 3194 * 3195 * Call this function if you change anything that should be written to 3196 * the meta-data super block. This function sets MD_DIRTY, and starts a 3197 * timer that ensures that within five seconds you have to call drbd_md_sync(). 3198 */ 3199 #ifdef DEBUG 3200 void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func) 3201 { 3202 if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) { 3203 mod_timer(&mdev->md_sync_timer, jiffies + HZ); 3204 mdev->last_md_mark_dirty.line = line; 3205 mdev->last_md_mark_dirty.func = func; 3206 } 3207 } 3208 #else 3209 void drbd_md_mark_dirty(struct drbd_conf *mdev) 3210 { 3211 if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) 3212 mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ); 3213 } 3214 #endif 3215 3216 void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local) 3217 { 3218 int i; 3219 3220 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++) 3221 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i]; 3222 } 3223 3224 void __drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) 3225 { 3226 if (idx == UI_CURRENT) { 3227 if (mdev->state.role == R_PRIMARY) 3228 val |= 1; 3229 else 3230 val &= ~((u64)1); 3231 3232 drbd_set_ed_uuid(mdev, val); 3233 } 3234 3235 mdev->ldev->md.uuid[idx] = val; 3236 drbd_md_mark_dirty(mdev); 3237 } 3238 3239 void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) 3240 { 3241 unsigned long flags; 3242 spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags); 3243 __drbd_uuid_set(mdev, idx, val); 3244 spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags); 3245 } 3246 3247 void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) 3248 { 3249 unsigned long flags; 3250 spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags); 3251 if (mdev->ldev->md.uuid[idx]) { 3252 drbd_uuid_move_history(mdev); 3253 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx]; 3254 } 3255 __drbd_uuid_set(mdev, idx, val); 3256 spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags); 3257 } 3258 3259 /** 3260 * drbd_uuid_new_current() - Creates a new current UUID 3261 * @mdev: DRBD device. 3262 * 3263 * Creates a new current UUID, and rotates the old current UUID into 3264 * the bitmap slot. Causes an incremental resync upon next connect. 3265 */ 3266 void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local) 3267 { 3268 u64 val; 3269 unsigned long long bm_uuid; 3270 3271 get_random_bytes(&val, sizeof(u64)); 3272 3273 spin_lock_irq(&mdev->ldev->md.uuid_lock); 3274 bm_uuid = mdev->ldev->md.uuid[UI_BITMAP]; 3275 3276 if (bm_uuid) 3277 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid); 3278 3279 mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT]; 3280 __drbd_uuid_set(mdev, UI_CURRENT, val); 3281 spin_unlock_irq(&mdev->ldev->md.uuid_lock); 3282 3283 drbd_print_uuids(mdev, "new current UUID"); 3284 /* get it to stable storage _now_ */ 3285 drbd_md_sync(mdev); 3286 } 3287 3288 void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local) 3289 { 3290 unsigned long flags; 3291 if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0) 3292 return; 3293 3294 spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags); 3295 if (val == 0) { 3296 drbd_uuid_move_history(mdev); 3297 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP]; 3298 mdev->ldev->md.uuid[UI_BITMAP] = 0; 3299 } else { 3300 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP]; 3301 if (bm_uuid) 3302 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid); 3303 3304 mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1); 3305 } 3306 spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags); 3307 3308 drbd_md_mark_dirty(mdev); 3309 } 3310 3311 /** 3312 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io() 3313 * @mdev: DRBD device. 3314 * 3315 * Sets all bits in the bitmap and writes the whole bitmap to stable storage. 3316 */ 3317 int drbd_bmio_set_n_write(struct drbd_conf *mdev) 3318 { 3319 int rv = -EIO; 3320 3321 if (get_ldev_if_state(mdev, D_ATTACHING)) { 3322 drbd_md_set_flag(mdev, MDF_FULL_SYNC); 3323 drbd_md_sync(mdev); 3324 drbd_bm_set_all(mdev); 3325 3326 rv = drbd_bm_write(mdev); 3327 3328 if (!rv) { 3329 drbd_md_clear_flag(mdev, MDF_FULL_SYNC); 3330 drbd_md_sync(mdev); 3331 } 3332 3333 put_ldev(mdev); 3334 } 3335 3336 return rv; 3337 } 3338 3339 /** 3340 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io() 3341 * @mdev: DRBD device. 3342 * 3343 * Clears all bits in the bitmap and writes the whole bitmap to stable storage. 3344 */ 3345 int drbd_bmio_clear_n_write(struct drbd_conf *mdev) 3346 { 3347 int rv = -EIO; 3348 3349 drbd_resume_al(mdev); 3350 if (get_ldev_if_state(mdev, D_ATTACHING)) { 3351 drbd_bm_clear_all(mdev); 3352 rv = drbd_bm_write(mdev); 3353 put_ldev(mdev); 3354 } 3355 3356 return rv; 3357 } 3358 3359 static int w_bitmap_io(struct drbd_work *w, int unused) 3360 { 3361 struct bm_io_work *work = container_of(w, struct bm_io_work, w); 3362 struct drbd_conf *mdev = w->mdev; 3363 int rv = -EIO; 3364 3365 D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0); 3366 3367 if (get_ldev(mdev)) { 3368 drbd_bm_lock(mdev, work->why, work->flags); 3369 rv = work->io_fn(mdev); 3370 drbd_bm_unlock(mdev); 3371 put_ldev(mdev); 3372 } 3373 3374 clear_bit_unlock(BITMAP_IO, &mdev->flags); 3375 wake_up(&mdev->misc_wait); 3376 3377 if (work->done) 3378 work->done(mdev, rv); 3379 3380 clear_bit(BITMAP_IO_QUEUED, &mdev->flags); 3381 work->why = NULL; 3382 work->flags = 0; 3383 3384 return 0; 3385 } 3386 3387 void drbd_ldev_destroy(struct drbd_conf *mdev) 3388 { 3389 lc_destroy(mdev->resync); 3390 mdev->resync = NULL; 3391 lc_destroy(mdev->act_log); 3392 mdev->act_log = NULL; 3393 __no_warn(local, 3394 drbd_free_bc(mdev->ldev); 3395 mdev->ldev = NULL;); 3396 3397 clear_bit(GO_DISKLESS, &mdev->flags); 3398 } 3399 3400 static int w_go_diskless(struct drbd_work *w, int unused) 3401 { 3402 struct drbd_conf *mdev = w->mdev; 3403 3404 D_ASSERT(mdev->state.disk == D_FAILED); 3405 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will 3406 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch 3407 * the protected members anymore, though, so once put_ldev reaches zero 3408 * again, it will be safe to free them. */ 3409 3410 /* Try to write changed bitmap pages, read errors may have just 3411 * set some bits outside the area covered by the activity log. 3412 * 3413 * If we have an IO error during the bitmap writeout, 3414 * we will want a full sync next time, just in case. 3415 * (Do we want a specific meta data flag for this?) 3416 * 3417 * If that does not make it to stable storage either, 3418 * we cannot do anything about that anymore. 3419 * 3420 * We still need to check if both bitmap and ldev are present, we may 3421 * end up here after a failed attach, before ldev was even assigned. 3422 */ 3423 if (mdev->bitmap && mdev->ldev) { 3424 /* An interrupted resync or similar is allowed to recounts bits 3425 * while we detach. 3426 * Any modifications would not be expected anymore, though. 3427 */ 3428 if (drbd_bitmap_io_from_worker(mdev, drbd_bm_write, 3429 "detach", BM_LOCKED_TEST_ALLOWED)) { 3430 if (test_bit(WAS_READ_ERROR, &mdev->flags)) { 3431 drbd_md_set_flag(mdev, MDF_FULL_SYNC); 3432 drbd_md_sync(mdev); 3433 } 3434 } 3435 } 3436 3437 drbd_force_state(mdev, NS(disk, D_DISKLESS)); 3438 return 0; 3439 } 3440 3441 /** 3442 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap 3443 * @mdev: DRBD device. 3444 * @io_fn: IO callback to be called when bitmap IO is possible 3445 * @done: callback to be called after the bitmap IO was performed 3446 * @why: Descriptive text of the reason for doing the IO 3447 * 3448 * While IO on the bitmap happens we freeze application IO thus we ensure 3449 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be 3450 * called from worker context. It MUST NOT be used while a previous such 3451 * work is still pending! 3452 */ 3453 void drbd_queue_bitmap_io(struct drbd_conf *mdev, 3454 int (*io_fn)(struct drbd_conf *), 3455 void (*done)(struct drbd_conf *, int), 3456 char *why, enum bm_flag flags) 3457 { 3458 D_ASSERT(current == mdev->tconn->worker.task); 3459 3460 D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags)); 3461 D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags)); 3462 D_ASSERT(list_empty(&mdev->bm_io_work.w.list)); 3463 if (mdev->bm_io_work.why) 3464 dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n", 3465 why, mdev->bm_io_work.why); 3466 3467 mdev->bm_io_work.io_fn = io_fn; 3468 mdev->bm_io_work.done = done; 3469 mdev->bm_io_work.why = why; 3470 mdev->bm_io_work.flags = flags; 3471 3472 spin_lock_irq(&mdev->tconn->req_lock); 3473 set_bit(BITMAP_IO, &mdev->flags); 3474 if (atomic_read(&mdev->ap_bio_cnt) == 0) { 3475 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags)) 3476 drbd_queue_work(&mdev->tconn->sender_work, &mdev->bm_io_work.w); 3477 } 3478 spin_unlock_irq(&mdev->tconn->req_lock); 3479 } 3480 3481 /** 3482 * drbd_bitmap_io() - Does an IO operation on the whole bitmap 3483 * @mdev: DRBD device. 3484 * @io_fn: IO callback to be called when bitmap IO is possible 3485 * @why: Descriptive text of the reason for doing the IO 3486 * 3487 * freezes application IO while that the actual IO operations runs. This 3488 * functions MAY NOT be called from worker context. 3489 */ 3490 int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), 3491 char *why, enum bm_flag flags) 3492 { 3493 int rv; 3494 3495 D_ASSERT(current != mdev->tconn->worker.task); 3496 3497 if ((flags & BM_LOCKED_SET_ALLOWED) == 0) 3498 drbd_suspend_io(mdev); 3499 3500 drbd_bm_lock(mdev, why, flags); 3501 rv = io_fn(mdev); 3502 drbd_bm_unlock(mdev); 3503 3504 if ((flags & BM_LOCKED_SET_ALLOWED) == 0) 3505 drbd_resume_io(mdev); 3506 3507 return rv; 3508 } 3509 3510 void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local) 3511 { 3512 if ((mdev->ldev->md.flags & flag) != flag) { 3513 drbd_md_mark_dirty(mdev); 3514 mdev->ldev->md.flags |= flag; 3515 } 3516 } 3517 3518 void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local) 3519 { 3520 if ((mdev->ldev->md.flags & flag) != 0) { 3521 drbd_md_mark_dirty(mdev); 3522 mdev->ldev->md.flags &= ~flag; 3523 } 3524 } 3525 int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag) 3526 { 3527 return (bdev->md.flags & flag) != 0; 3528 } 3529 3530 static void md_sync_timer_fn(unsigned long data) 3531 { 3532 struct drbd_conf *mdev = (struct drbd_conf *) data; 3533 3534 /* must not double-queue! */ 3535 if (list_empty(&mdev->md_sync_work.list)) 3536 drbd_queue_work_front(&mdev->tconn->sender_work, &mdev->md_sync_work); 3537 } 3538 3539 static int w_md_sync(struct drbd_work *w, int unused) 3540 { 3541 struct drbd_conf *mdev = w->mdev; 3542 3543 dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n"); 3544 #ifdef DEBUG 3545 dev_warn(DEV, "last md_mark_dirty: %s:%u\n", 3546 mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line); 3547 #endif 3548 drbd_md_sync(mdev); 3549 return 0; 3550 } 3551 3552 const char *cmdname(enum drbd_packet cmd) 3553 { 3554 /* THINK may need to become several global tables 3555 * when we want to support more than 3556 * one PRO_VERSION */ 3557 static const char *cmdnames[] = { 3558 [P_DATA] = "Data", 3559 [P_DATA_REPLY] = "DataReply", 3560 [P_RS_DATA_REPLY] = "RSDataReply", 3561 [P_BARRIER] = "Barrier", 3562 [P_BITMAP] = "ReportBitMap", 3563 [P_BECOME_SYNC_TARGET] = "BecomeSyncTarget", 3564 [P_BECOME_SYNC_SOURCE] = "BecomeSyncSource", 3565 [P_UNPLUG_REMOTE] = "UnplugRemote", 3566 [P_DATA_REQUEST] = "DataRequest", 3567 [P_RS_DATA_REQUEST] = "RSDataRequest", 3568 [P_SYNC_PARAM] = "SyncParam", 3569 [P_SYNC_PARAM89] = "SyncParam89", 3570 [P_PROTOCOL] = "ReportProtocol", 3571 [P_UUIDS] = "ReportUUIDs", 3572 [P_SIZES] = "ReportSizes", 3573 [P_STATE] = "ReportState", 3574 [P_SYNC_UUID] = "ReportSyncUUID", 3575 [P_AUTH_CHALLENGE] = "AuthChallenge", 3576 [P_AUTH_RESPONSE] = "AuthResponse", 3577 [P_PING] = "Ping", 3578 [P_PING_ACK] = "PingAck", 3579 [P_RECV_ACK] = "RecvAck", 3580 [P_WRITE_ACK] = "WriteAck", 3581 [P_RS_WRITE_ACK] = "RSWriteAck", 3582 [P_SUPERSEDED] = "Superseded", 3583 [P_NEG_ACK] = "NegAck", 3584 [P_NEG_DREPLY] = "NegDReply", 3585 [P_NEG_RS_DREPLY] = "NegRSDReply", 3586 [P_BARRIER_ACK] = "BarrierAck", 3587 [P_STATE_CHG_REQ] = "StateChgRequest", 3588 [P_STATE_CHG_REPLY] = "StateChgReply", 3589 [P_OV_REQUEST] = "OVRequest", 3590 [P_OV_REPLY] = "OVReply", 3591 [P_OV_RESULT] = "OVResult", 3592 [P_CSUM_RS_REQUEST] = "CsumRSRequest", 3593 [P_RS_IS_IN_SYNC] = "CsumRSIsInSync", 3594 [P_COMPRESSED_BITMAP] = "CBitmap", 3595 [P_DELAY_PROBE] = "DelayProbe", 3596 [P_OUT_OF_SYNC] = "OutOfSync", 3597 [P_RETRY_WRITE] = "RetryWrite", 3598 [P_RS_CANCEL] = "RSCancel", 3599 [P_CONN_ST_CHG_REQ] = "conn_st_chg_req", 3600 [P_CONN_ST_CHG_REPLY] = "conn_st_chg_reply", 3601 [P_RETRY_WRITE] = "retry_write", 3602 [P_PROTOCOL_UPDATE] = "protocol_update", 3603 3604 /* enum drbd_packet, but not commands - obsoleted flags: 3605 * P_MAY_IGNORE 3606 * P_MAX_OPT_CMD 3607 */ 3608 }; 3609 3610 /* too big for the array: 0xfffX */ 3611 if (cmd == P_INITIAL_META) 3612 return "InitialMeta"; 3613 if (cmd == P_INITIAL_DATA) 3614 return "InitialData"; 3615 if (cmd == P_CONNECTION_FEATURES) 3616 return "ConnectionFeatures"; 3617 if (cmd >= ARRAY_SIZE(cmdnames)) 3618 return "Unknown"; 3619 return cmdnames[cmd]; 3620 } 3621 3622 /** 3623 * drbd_wait_misc - wait for a request to make progress 3624 * @mdev: device associated with the request 3625 * @i: the struct drbd_interval embedded in struct drbd_request or 3626 * struct drbd_peer_request 3627 */ 3628 int drbd_wait_misc(struct drbd_conf *mdev, struct drbd_interval *i) 3629 { 3630 struct net_conf *nc; 3631 DEFINE_WAIT(wait); 3632 long timeout; 3633 3634 rcu_read_lock(); 3635 nc = rcu_dereference(mdev->tconn->net_conf); 3636 if (!nc) { 3637 rcu_read_unlock(); 3638 return -ETIMEDOUT; 3639 } 3640 timeout = nc->ko_count ? nc->timeout * HZ / 10 * nc->ko_count : MAX_SCHEDULE_TIMEOUT; 3641 rcu_read_unlock(); 3642 3643 /* Indicate to wake up mdev->misc_wait on progress. */ 3644 i->waiting = true; 3645 prepare_to_wait(&mdev->misc_wait, &wait, TASK_INTERRUPTIBLE); 3646 spin_unlock_irq(&mdev->tconn->req_lock); 3647 timeout = schedule_timeout(timeout); 3648 finish_wait(&mdev->misc_wait, &wait); 3649 spin_lock_irq(&mdev->tconn->req_lock); 3650 if (!timeout || mdev->state.conn < C_CONNECTED) 3651 return -ETIMEDOUT; 3652 if (signal_pending(current)) 3653 return -ERESTARTSYS; 3654 return 0; 3655 } 3656 3657 #ifdef CONFIG_DRBD_FAULT_INJECTION 3658 /* Fault insertion support including random number generator shamelessly 3659 * stolen from kernel/rcutorture.c */ 3660 struct fault_random_state { 3661 unsigned long state; 3662 unsigned long count; 3663 }; 3664 3665 #define FAULT_RANDOM_MULT 39916801 /* prime */ 3666 #define FAULT_RANDOM_ADD 479001701 /* prime */ 3667 #define FAULT_RANDOM_REFRESH 10000 3668 3669 /* 3670 * Crude but fast random-number generator. Uses a linear congruential 3671 * generator, with occasional help from get_random_bytes(). 3672 */ 3673 static unsigned long 3674 _drbd_fault_random(struct fault_random_state *rsp) 3675 { 3676 long refresh; 3677 3678 if (!rsp->count--) { 3679 get_random_bytes(&refresh, sizeof(refresh)); 3680 rsp->state += refresh; 3681 rsp->count = FAULT_RANDOM_REFRESH; 3682 } 3683 rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD; 3684 return swahw32(rsp->state); 3685 } 3686 3687 static char * 3688 _drbd_fault_str(unsigned int type) { 3689 static char *_faults[] = { 3690 [DRBD_FAULT_MD_WR] = "Meta-data write", 3691 [DRBD_FAULT_MD_RD] = "Meta-data read", 3692 [DRBD_FAULT_RS_WR] = "Resync write", 3693 [DRBD_FAULT_RS_RD] = "Resync read", 3694 [DRBD_FAULT_DT_WR] = "Data write", 3695 [DRBD_FAULT_DT_RD] = "Data read", 3696 [DRBD_FAULT_DT_RA] = "Data read ahead", 3697 [DRBD_FAULT_BM_ALLOC] = "BM allocation", 3698 [DRBD_FAULT_AL_EE] = "EE allocation", 3699 [DRBD_FAULT_RECEIVE] = "receive data corruption", 3700 }; 3701 3702 return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**"; 3703 } 3704 3705 unsigned int 3706 _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type) 3707 { 3708 static struct fault_random_state rrs = {0, 0}; 3709 3710 unsigned int ret = ( 3711 (fault_devs == 0 || 3712 ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) && 3713 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate)); 3714 3715 if (ret) { 3716 fault_count++; 3717 3718 if (__ratelimit(&drbd_ratelimit_state)) 3719 dev_warn(DEV, "***Simulating %s failure\n", 3720 _drbd_fault_str(type)); 3721 } 3722 3723 return ret; 3724 } 3725 #endif 3726 3727 const char *drbd_buildtag(void) 3728 { 3729 /* DRBD built from external sources has here a reference to the 3730 git hash of the source code. */ 3731 3732 static char buildtag[38] = "\0uilt-in"; 3733 3734 if (buildtag[0] == 0) { 3735 #ifdef MODULE 3736 sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion); 3737 #else 3738 buildtag[0] = 'b'; 3739 #endif 3740 } 3741 3742 return buildtag; 3743 } 3744 3745 module_init(drbd_init) 3746 module_exit(drbd_cleanup) 3747 3748 EXPORT_SYMBOL(drbd_conn_str); 3749 EXPORT_SYMBOL(drbd_role_str); 3750 EXPORT_SYMBOL(drbd_disk_str); 3751 EXPORT_SYMBOL(drbd_set_st_err_str); 3752