1 /* 2 drbd.c 3 4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg. 5 6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. 7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. 8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. 9 10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev 11 from Logicworks, Inc. for making SDP replication support possible. 12 13 drbd is free software; you can redistribute it and/or modify 14 it under the terms of the GNU General Public License as published by 15 the Free Software Foundation; either version 2, or (at your option) 16 any later version. 17 18 drbd is distributed in the hope that it will be useful, 19 but WITHOUT ANY WARRANTY; without even the implied warranty of 20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 GNU General Public License for more details. 22 23 You should have received a copy of the GNU General Public License 24 along with drbd; see the file COPYING. If not, write to 25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 26 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/module.h> 32 #include <linux/jiffies.h> 33 #include <linux/drbd.h> 34 #include <asm/uaccess.h> 35 #include <asm/types.h> 36 #include <net/sock.h> 37 #include <linux/ctype.h> 38 #include <linux/mutex.h> 39 #include <linux/fs.h> 40 #include <linux/file.h> 41 #include <linux/proc_fs.h> 42 #include <linux/init.h> 43 #include <linux/mm.h> 44 #include <linux/memcontrol.h> 45 #include <linux/mm_inline.h> 46 #include <linux/slab.h> 47 #include <linux/random.h> 48 #include <linux/reboot.h> 49 #include <linux/notifier.h> 50 #include <linux/kthread.h> 51 #include <linux/workqueue.h> 52 #define __KERNEL_SYSCALLS__ 53 #include <linux/unistd.h> 54 #include <linux/vmalloc.h> 55 56 #include <linux/drbd_limits.h> 57 #include "drbd_int.h" 58 #include "drbd_protocol.h" 59 #include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */ 60 #include "drbd_vli.h" 61 #include "drbd_debugfs.h" 62 63 static DEFINE_MUTEX(drbd_main_mutex); 64 static int drbd_open(struct block_device *bdev, fmode_t mode); 65 static void drbd_release(struct gendisk *gd, fmode_t mode); 66 static void md_sync_timer_fn(unsigned long data); 67 static int w_bitmap_io(struct drbd_work *w, int unused); 68 69 MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, " 70 "Lars Ellenberg <lars@linbit.com>"); 71 MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION); 72 MODULE_VERSION(REL_VERSION); 73 MODULE_LICENSE("GPL"); 74 MODULE_PARM_DESC(minor_count, "Approximate number of drbd devices (" 75 __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")"); 76 MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR); 77 78 #include <linux/moduleparam.h> 79 /* allow_open_on_secondary */ 80 MODULE_PARM_DESC(allow_oos, "DONT USE!"); 81 /* thanks to these macros, if compiled into the kernel (not-module), 82 * this becomes the boot parameter drbd.minor_count */ 83 module_param(minor_count, uint, 0444); 84 module_param(disable_sendpage, bool, 0644); 85 module_param(allow_oos, bool, 0); 86 module_param(proc_details, int, 0644); 87 88 #ifdef CONFIG_DRBD_FAULT_INJECTION 89 int enable_faults; 90 int fault_rate; 91 static int fault_count; 92 int fault_devs; 93 /* bitmap of enabled faults */ 94 module_param(enable_faults, int, 0664); 95 /* fault rate % value - applies to all enabled faults */ 96 module_param(fault_rate, int, 0664); 97 /* count of faults inserted */ 98 module_param(fault_count, int, 0664); 99 /* bitmap of devices to insert faults on */ 100 module_param(fault_devs, int, 0644); 101 #endif 102 103 /* module parameter, defined */ 104 unsigned int minor_count = DRBD_MINOR_COUNT_DEF; 105 bool disable_sendpage; 106 bool allow_oos; 107 int proc_details; /* Detail level in proc drbd*/ 108 109 /* Module parameter for setting the user mode helper program 110 * to run. Default is /sbin/drbdadm */ 111 char usermode_helper[80] = "/sbin/drbdadm"; 112 113 module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644); 114 115 /* in 2.6.x, our device mapping and config info contains our virtual gendisks 116 * as member "struct gendisk *vdisk;" 117 */ 118 struct idr drbd_devices; 119 struct list_head drbd_resources; 120 121 struct kmem_cache *drbd_request_cache; 122 struct kmem_cache *drbd_ee_cache; /* peer requests */ 123 struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */ 124 struct kmem_cache *drbd_al_ext_cache; /* activity log extents */ 125 mempool_t *drbd_request_mempool; 126 mempool_t *drbd_ee_mempool; 127 mempool_t *drbd_md_io_page_pool; 128 struct bio_set *drbd_md_io_bio_set; 129 130 /* I do not use a standard mempool, because: 131 1) I want to hand out the pre-allocated objects first. 132 2) I want to be able to interrupt sleeping allocation with a signal. 133 Note: This is a single linked list, the next pointer is the private 134 member of struct page. 135 */ 136 struct page *drbd_pp_pool; 137 spinlock_t drbd_pp_lock; 138 int drbd_pp_vacant; 139 wait_queue_head_t drbd_pp_wait; 140 141 DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5); 142 143 static const struct block_device_operations drbd_ops = { 144 .owner = THIS_MODULE, 145 .open = drbd_open, 146 .release = drbd_release, 147 }; 148 149 struct bio *bio_alloc_drbd(gfp_t gfp_mask) 150 { 151 struct bio *bio; 152 153 if (!drbd_md_io_bio_set) 154 return bio_alloc(gfp_mask, 1); 155 156 bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set); 157 if (!bio) 158 return NULL; 159 return bio; 160 } 161 162 #ifdef __CHECKER__ 163 /* When checking with sparse, and this is an inline function, sparse will 164 give tons of false positives. When this is a real functions sparse works. 165 */ 166 int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins) 167 { 168 int io_allowed; 169 170 atomic_inc(&device->local_cnt); 171 io_allowed = (device->state.disk >= mins); 172 if (!io_allowed) { 173 if (atomic_dec_and_test(&device->local_cnt)) 174 wake_up(&device->misc_wait); 175 } 176 return io_allowed; 177 } 178 179 #endif 180 181 /** 182 * tl_release() - mark as BARRIER_ACKED all requests in the corresponding transfer log epoch 183 * @connection: DRBD connection. 184 * @barrier_nr: Expected identifier of the DRBD write barrier packet. 185 * @set_size: Expected number of requests before that barrier. 186 * 187 * In case the passed barrier_nr or set_size does not match the oldest 188 * epoch of not yet barrier-acked requests, this function will cause a 189 * termination of the connection. 190 */ 191 void tl_release(struct drbd_connection *connection, unsigned int barrier_nr, 192 unsigned int set_size) 193 { 194 struct drbd_request *r; 195 struct drbd_request *req = NULL; 196 int expect_epoch = 0; 197 int expect_size = 0; 198 199 spin_lock_irq(&connection->resource->req_lock); 200 201 /* find oldest not yet barrier-acked write request, 202 * count writes in its epoch. */ 203 list_for_each_entry(r, &connection->transfer_log, tl_requests) { 204 const unsigned s = r->rq_state; 205 if (!req) { 206 if (!(s & RQ_WRITE)) 207 continue; 208 if (!(s & RQ_NET_MASK)) 209 continue; 210 if (s & RQ_NET_DONE) 211 continue; 212 req = r; 213 expect_epoch = req->epoch; 214 expect_size ++; 215 } else { 216 if (r->epoch != expect_epoch) 217 break; 218 if (!(s & RQ_WRITE)) 219 continue; 220 /* if (s & RQ_DONE): not expected */ 221 /* if (!(s & RQ_NET_MASK)): not expected */ 222 expect_size++; 223 } 224 } 225 226 /* first some paranoia code */ 227 if (req == NULL) { 228 drbd_err(connection, "BAD! BarrierAck #%u received, but no epoch in tl!?\n", 229 barrier_nr); 230 goto bail; 231 } 232 if (expect_epoch != barrier_nr) { 233 drbd_err(connection, "BAD! BarrierAck #%u received, expected #%u!\n", 234 barrier_nr, expect_epoch); 235 goto bail; 236 } 237 238 if (expect_size != set_size) { 239 drbd_err(connection, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n", 240 barrier_nr, set_size, expect_size); 241 goto bail; 242 } 243 244 /* Clean up list of requests processed during current epoch. */ 245 /* this extra list walk restart is paranoia, 246 * to catch requests being barrier-acked "unexpectedly". 247 * It usually should find the same req again, or some READ preceding it. */ 248 list_for_each_entry(req, &connection->transfer_log, tl_requests) 249 if (req->epoch == expect_epoch) 250 break; 251 list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) { 252 if (req->epoch != expect_epoch) 253 break; 254 _req_mod(req, BARRIER_ACKED); 255 } 256 spin_unlock_irq(&connection->resource->req_lock); 257 258 return; 259 260 bail: 261 spin_unlock_irq(&connection->resource->req_lock); 262 conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD); 263 } 264 265 266 /** 267 * _tl_restart() - Walks the transfer log, and applies an action to all requests 268 * @connection: DRBD connection to operate on. 269 * @what: The action/event to perform with all request objects 270 * 271 * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO, 272 * RESTART_FROZEN_DISK_IO. 273 */ 274 /* must hold resource->req_lock */ 275 void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what) 276 { 277 struct drbd_request *req, *r; 278 279 list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests) 280 _req_mod(req, what); 281 } 282 283 void tl_restart(struct drbd_connection *connection, enum drbd_req_event what) 284 { 285 spin_lock_irq(&connection->resource->req_lock); 286 _tl_restart(connection, what); 287 spin_unlock_irq(&connection->resource->req_lock); 288 } 289 290 /** 291 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL 292 * @device: DRBD device. 293 * 294 * This is called after the connection to the peer was lost. The storage covered 295 * by the requests on the transfer gets marked as our of sync. Called from the 296 * receiver thread and the worker thread. 297 */ 298 void tl_clear(struct drbd_connection *connection) 299 { 300 tl_restart(connection, CONNECTION_LOST_WHILE_PENDING); 301 } 302 303 /** 304 * tl_abort_disk_io() - Abort disk I/O for all requests for a certain device in the TL 305 * @device: DRBD device. 306 */ 307 void tl_abort_disk_io(struct drbd_device *device) 308 { 309 struct drbd_connection *connection = first_peer_device(device)->connection; 310 struct drbd_request *req, *r; 311 312 spin_lock_irq(&connection->resource->req_lock); 313 list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests) { 314 if (!(req->rq_state & RQ_LOCAL_PENDING)) 315 continue; 316 if (req->device != device) 317 continue; 318 _req_mod(req, ABORT_DISK_IO); 319 } 320 spin_unlock_irq(&connection->resource->req_lock); 321 } 322 323 static int drbd_thread_setup(void *arg) 324 { 325 struct drbd_thread *thi = (struct drbd_thread *) arg; 326 struct drbd_resource *resource = thi->resource; 327 unsigned long flags; 328 int retval; 329 330 snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s", 331 thi->name[0], 332 resource->name); 333 334 restart: 335 retval = thi->function(thi); 336 337 spin_lock_irqsave(&thi->t_lock, flags); 338 339 /* if the receiver has been "EXITING", the last thing it did 340 * was set the conn state to "StandAlone", 341 * if now a re-connect request comes in, conn state goes C_UNCONNECTED, 342 * and receiver thread will be "started". 343 * drbd_thread_start needs to set "RESTARTING" in that case. 344 * t_state check and assignment needs to be within the same spinlock, 345 * so either thread_start sees EXITING, and can remap to RESTARTING, 346 * or thread_start see NONE, and can proceed as normal. 347 */ 348 349 if (thi->t_state == RESTARTING) { 350 drbd_info(resource, "Restarting %s thread\n", thi->name); 351 thi->t_state = RUNNING; 352 spin_unlock_irqrestore(&thi->t_lock, flags); 353 goto restart; 354 } 355 356 thi->task = NULL; 357 thi->t_state = NONE; 358 smp_mb(); 359 complete_all(&thi->stop); 360 spin_unlock_irqrestore(&thi->t_lock, flags); 361 362 drbd_info(resource, "Terminating %s\n", current->comm); 363 364 /* Release mod reference taken when thread was started */ 365 366 if (thi->connection) 367 kref_put(&thi->connection->kref, drbd_destroy_connection); 368 kref_put(&resource->kref, drbd_destroy_resource); 369 module_put(THIS_MODULE); 370 return retval; 371 } 372 373 static void drbd_thread_init(struct drbd_resource *resource, struct drbd_thread *thi, 374 int (*func) (struct drbd_thread *), const char *name) 375 { 376 spin_lock_init(&thi->t_lock); 377 thi->task = NULL; 378 thi->t_state = NONE; 379 thi->function = func; 380 thi->resource = resource; 381 thi->connection = NULL; 382 thi->name = name; 383 } 384 385 int drbd_thread_start(struct drbd_thread *thi) 386 { 387 struct drbd_resource *resource = thi->resource; 388 struct task_struct *nt; 389 unsigned long flags; 390 391 /* is used from state engine doing drbd_thread_stop_nowait, 392 * while holding the req lock irqsave */ 393 spin_lock_irqsave(&thi->t_lock, flags); 394 395 switch (thi->t_state) { 396 case NONE: 397 drbd_info(resource, "Starting %s thread (from %s [%d])\n", 398 thi->name, current->comm, current->pid); 399 400 /* Get ref on module for thread - this is released when thread exits */ 401 if (!try_module_get(THIS_MODULE)) { 402 drbd_err(resource, "Failed to get module reference in drbd_thread_start\n"); 403 spin_unlock_irqrestore(&thi->t_lock, flags); 404 return false; 405 } 406 407 kref_get(&resource->kref); 408 if (thi->connection) 409 kref_get(&thi->connection->kref); 410 411 init_completion(&thi->stop); 412 thi->reset_cpu_mask = 1; 413 thi->t_state = RUNNING; 414 spin_unlock_irqrestore(&thi->t_lock, flags); 415 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */ 416 417 nt = kthread_create(drbd_thread_setup, (void *) thi, 418 "drbd_%c_%s", thi->name[0], thi->resource->name); 419 420 if (IS_ERR(nt)) { 421 drbd_err(resource, "Couldn't start thread\n"); 422 423 if (thi->connection) 424 kref_put(&thi->connection->kref, drbd_destroy_connection); 425 kref_put(&resource->kref, drbd_destroy_resource); 426 module_put(THIS_MODULE); 427 return false; 428 } 429 spin_lock_irqsave(&thi->t_lock, flags); 430 thi->task = nt; 431 thi->t_state = RUNNING; 432 spin_unlock_irqrestore(&thi->t_lock, flags); 433 wake_up_process(nt); 434 break; 435 case EXITING: 436 thi->t_state = RESTARTING; 437 drbd_info(resource, "Restarting %s thread (from %s [%d])\n", 438 thi->name, current->comm, current->pid); 439 /* fall through */ 440 case RUNNING: 441 case RESTARTING: 442 default: 443 spin_unlock_irqrestore(&thi->t_lock, flags); 444 break; 445 } 446 447 return true; 448 } 449 450 451 void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait) 452 { 453 unsigned long flags; 454 455 enum drbd_thread_state ns = restart ? RESTARTING : EXITING; 456 457 /* may be called from state engine, holding the req lock irqsave */ 458 spin_lock_irqsave(&thi->t_lock, flags); 459 460 if (thi->t_state == NONE) { 461 spin_unlock_irqrestore(&thi->t_lock, flags); 462 if (restart) 463 drbd_thread_start(thi); 464 return; 465 } 466 467 if (thi->t_state != ns) { 468 if (thi->task == NULL) { 469 spin_unlock_irqrestore(&thi->t_lock, flags); 470 return; 471 } 472 473 thi->t_state = ns; 474 smp_mb(); 475 init_completion(&thi->stop); 476 if (thi->task != current) 477 force_sig(DRBD_SIGKILL, thi->task); 478 } 479 480 spin_unlock_irqrestore(&thi->t_lock, flags); 481 482 if (wait) 483 wait_for_completion(&thi->stop); 484 } 485 486 int conn_lowest_minor(struct drbd_connection *connection) 487 { 488 struct drbd_peer_device *peer_device; 489 int vnr = 0, minor = -1; 490 491 rcu_read_lock(); 492 peer_device = idr_get_next(&connection->peer_devices, &vnr); 493 if (peer_device) 494 minor = device_to_minor(peer_device->device); 495 rcu_read_unlock(); 496 497 return minor; 498 } 499 500 #ifdef CONFIG_SMP 501 /** 502 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs 503 * 504 * Forces all threads of a resource onto the same CPU. This is beneficial for 505 * DRBD's performance. May be overwritten by user's configuration. 506 */ 507 static void drbd_calc_cpu_mask(cpumask_var_t *cpu_mask) 508 { 509 unsigned int *resources_per_cpu, min_index = ~0; 510 511 resources_per_cpu = kzalloc(nr_cpu_ids * sizeof(*resources_per_cpu), GFP_KERNEL); 512 if (resources_per_cpu) { 513 struct drbd_resource *resource; 514 unsigned int cpu, min = ~0; 515 516 rcu_read_lock(); 517 for_each_resource_rcu(resource, &drbd_resources) { 518 for_each_cpu(cpu, resource->cpu_mask) 519 resources_per_cpu[cpu]++; 520 } 521 rcu_read_unlock(); 522 for_each_online_cpu(cpu) { 523 if (resources_per_cpu[cpu] < min) { 524 min = resources_per_cpu[cpu]; 525 min_index = cpu; 526 } 527 } 528 kfree(resources_per_cpu); 529 } 530 if (min_index == ~0) { 531 cpumask_setall(*cpu_mask); 532 return; 533 } 534 cpumask_set_cpu(min_index, *cpu_mask); 535 } 536 537 /** 538 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread 539 * @device: DRBD device. 540 * @thi: drbd_thread object 541 * 542 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die 543 * prematurely. 544 */ 545 void drbd_thread_current_set_cpu(struct drbd_thread *thi) 546 { 547 struct drbd_resource *resource = thi->resource; 548 struct task_struct *p = current; 549 550 if (!thi->reset_cpu_mask) 551 return; 552 thi->reset_cpu_mask = 0; 553 set_cpus_allowed_ptr(p, resource->cpu_mask); 554 } 555 #else 556 #define drbd_calc_cpu_mask(A) ({}) 557 #endif 558 559 /** 560 * drbd_header_size - size of a packet header 561 * 562 * The header size is a multiple of 8, so any payload following the header is 563 * word aligned on 64-bit architectures. (The bitmap send and receive code 564 * relies on this.) 565 */ 566 unsigned int drbd_header_size(struct drbd_connection *connection) 567 { 568 if (connection->agreed_pro_version >= 100) { 569 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100), 8)); 570 return sizeof(struct p_header100); 571 } else { 572 BUILD_BUG_ON(sizeof(struct p_header80) != 573 sizeof(struct p_header95)); 574 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header80), 8)); 575 return sizeof(struct p_header80); 576 } 577 } 578 579 static unsigned int prepare_header80(struct p_header80 *h, enum drbd_packet cmd, int size) 580 { 581 h->magic = cpu_to_be32(DRBD_MAGIC); 582 h->command = cpu_to_be16(cmd); 583 h->length = cpu_to_be16(size); 584 return sizeof(struct p_header80); 585 } 586 587 static unsigned int prepare_header95(struct p_header95 *h, enum drbd_packet cmd, int size) 588 { 589 h->magic = cpu_to_be16(DRBD_MAGIC_BIG); 590 h->command = cpu_to_be16(cmd); 591 h->length = cpu_to_be32(size); 592 return sizeof(struct p_header95); 593 } 594 595 static unsigned int prepare_header100(struct p_header100 *h, enum drbd_packet cmd, 596 int size, int vnr) 597 { 598 h->magic = cpu_to_be32(DRBD_MAGIC_100); 599 h->volume = cpu_to_be16(vnr); 600 h->command = cpu_to_be16(cmd); 601 h->length = cpu_to_be32(size); 602 h->pad = 0; 603 return sizeof(struct p_header100); 604 } 605 606 static unsigned int prepare_header(struct drbd_connection *connection, int vnr, 607 void *buffer, enum drbd_packet cmd, int size) 608 { 609 if (connection->agreed_pro_version >= 100) 610 return prepare_header100(buffer, cmd, size, vnr); 611 else if (connection->agreed_pro_version >= 95 && 612 size > DRBD_MAX_SIZE_H80_PACKET) 613 return prepare_header95(buffer, cmd, size); 614 else 615 return prepare_header80(buffer, cmd, size); 616 } 617 618 static void *__conn_prepare_command(struct drbd_connection *connection, 619 struct drbd_socket *sock) 620 { 621 if (!sock->socket) 622 return NULL; 623 return sock->sbuf + drbd_header_size(connection); 624 } 625 626 void *conn_prepare_command(struct drbd_connection *connection, struct drbd_socket *sock) 627 { 628 void *p; 629 630 mutex_lock(&sock->mutex); 631 p = __conn_prepare_command(connection, sock); 632 if (!p) 633 mutex_unlock(&sock->mutex); 634 635 return p; 636 } 637 638 void *drbd_prepare_command(struct drbd_peer_device *peer_device, struct drbd_socket *sock) 639 { 640 return conn_prepare_command(peer_device->connection, sock); 641 } 642 643 static int __send_command(struct drbd_connection *connection, int vnr, 644 struct drbd_socket *sock, enum drbd_packet cmd, 645 unsigned int header_size, void *data, 646 unsigned int size) 647 { 648 int msg_flags; 649 int err; 650 651 /* 652 * Called with @data == NULL and the size of the data blocks in @size 653 * for commands that send data blocks. For those commands, omit the 654 * MSG_MORE flag: this will increase the likelihood that data blocks 655 * which are page aligned on the sender will end up page aligned on the 656 * receiver. 657 */ 658 msg_flags = data ? MSG_MORE : 0; 659 660 header_size += prepare_header(connection, vnr, sock->sbuf, cmd, 661 header_size + size); 662 err = drbd_send_all(connection, sock->socket, sock->sbuf, header_size, 663 msg_flags); 664 if (data && !err) 665 err = drbd_send_all(connection, sock->socket, data, size, 0); 666 /* DRBD protocol "pings" are latency critical. 667 * This is supposed to trigger tcp_push_pending_frames() */ 668 if (!err && (cmd == P_PING || cmd == P_PING_ACK)) 669 drbd_tcp_nodelay(sock->socket); 670 671 return err; 672 } 673 674 static int __conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock, 675 enum drbd_packet cmd, unsigned int header_size, 676 void *data, unsigned int size) 677 { 678 return __send_command(connection, 0, sock, cmd, header_size, data, size); 679 } 680 681 int conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock, 682 enum drbd_packet cmd, unsigned int header_size, 683 void *data, unsigned int size) 684 { 685 int err; 686 687 err = __conn_send_command(connection, sock, cmd, header_size, data, size); 688 mutex_unlock(&sock->mutex); 689 return err; 690 } 691 692 int drbd_send_command(struct drbd_peer_device *peer_device, struct drbd_socket *sock, 693 enum drbd_packet cmd, unsigned int header_size, 694 void *data, unsigned int size) 695 { 696 int err; 697 698 err = __send_command(peer_device->connection, peer_device->device->vnr, 699 sock, cmd, header_size, data, size); 700 mutex_unlock(&sock->mutex); 701 return err; 702 } 703 704 int drbd_send_ping(struct drbd_connection *connection) 705 { 706 struct drbd_socket *sock; 707 708 sock = &connection->meta; 709 if (!conn_prepare_command(connection, sock)) 710 return -EIO; 711 return conn_send_command(connection, sock, P_PING, 0, NULL, 0); 712 } 713 714 int drbd_send_ping_ack(struct drbd_connection *connection) 715 { 716 struct drbd_socket *sock; 717 718 sock = &connection->meta; 719 if (!conn_prepare_command(connection, sock)) 720 return -EIO; 721 return conn_send_command(connection, sock, P_PING_ACK, 0, NULL, 0); 722 } 723 724 int drbd_send_sync_param(struct drbd_peer_device *peer_device) 725 { 726 struct drbd_socket *sock; 727 struct p_rs_param_95 *p; 728 int size; 729 const int apv = peer_device->connection->agreed_pro_version; 730 enum drbd_packet cmd; 731 struct net_conf *nc; 732 struct disk_conf *dc; 733 734 sock = &peer_device->connection->data; 735 p = drbd_prepare_command(peer_device, sock); 736 if (!p) 737 return -EIO; 738 739 rcu_read_lock(); 740 nc = rcu_dereference(peer_device->connection->net_conf); 741 742 size = apv <= 87 ? sizeof(struct p_rs_param) 743 : apv == 88 ? sizeof(struct p_rs_param) 744 + strlen(nc->verify_alg) + 1 745 : apv <= 94 ? sizeof(struct p_rs_param_89) 746 : /* apv >= 95 */ sizeof(struct p_rs_param_95); 747 748 cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM; 749 750 /* initialize verify_alg and csums_alg */ 751 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX); 752 753 if (get_ldev(peer_device->device)) { 754 dc = rcu_dereference(peer_device->device->ldev->disk_conf); 755 p->resync_rate = cpu_to_be32(dc->resync_rate); 756 p->c_plan_ahead = cpu_to_be32(dc->c_plan_ahead); 757 p->c_delay_target = cpu_to_be32(dc->c_delay_target); 758 p->c_fill_target = cpu_to_be32(dc->c_fill_target); 759 p->c_max_rate = cpu_to_be32(dc->c_max_rate); 760 put_ldev(peer_device->device); 761 } else { 762 p->resync_rate = cpu_to_be32(DRBD_RESYNC_RATE_DEF); 763 p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF); 764 p->c_delay_target = cpu_to_be32(DRBD_C_DELAY_TARGET_DEF); 765 p->c_fill_target = cpu_to_be32(DRBD_C_FILL_TARGET_DEF); 766 p->c_max_rate = cpu_to_be32(DRBD_C_MAX_RATE_DEF); 767 } 768 769 if (apv >= 88) 770 strcpy(p->verify_alg, nc->verify_alg); 771 if (apv >= 89) 772 strcpy(p->csums_alg, nc->csums_alg); 773 rcu_read_unlock(); 774 775 return drbd_send_command(peer_device, sock, cmd, size, NULL, 0); 776 } 777 778 int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd) 779 { 780 struct drbd_socket *sock; 781 struct p_protocol *p; 782 struct net_conf *nc; 783 int size, cf; 784 785 sock = &connection->data; 786 p = __conn_prepare_command(connection, sock); 787 if (!p) 788 return -EIO; 789 790 rcu_read_lock(); 791 nc = rcu_dereference(connection->net_conf); 792 793 if (nc->tentative && connection->agreed_pro_version < 92) { 794 rcu_read_unlock(); 795 mutex_unlock(&sock->mutex); 796 drbd_err(connection, "--dry-run is not supported by peer"); 797 return -EOPNOTSUPP; 798 } 799 800 size = sizeof(*p); 801 if (connection->agreed_pro_version >= 87) 802 size += strlen(nc->integrity_alg) + 1; 803 804 p->protocol = cpu_to_be32(nc->wire_protocol); 805 p->after_sb_0p = cpu_to_be32(nc->after_sb_0p); 806 p->after_sb_1p = cpu_to_be32(nc->after_sb_1p); 807 p->after_sb_2p = cpu_to_be32(nc->after_sb_2p); 808 p->two_primaries = cpu_to_be32(nc->two_primaries); 809 cf = 0; 810 if (nc->discard_my_data) 811 cf |= CF_DISCARD_MY_DATA; 812 if (nc->tentative) 813 cf |= CF_DRY_RUN; 814 p->conn_flags = cpu_to_be32(cf); 815 816 if (connection->agreed_pro_version >= 87) 817 strcpy(p->integrity_alg, nc->integrity_alg); 818 rcu_read_unlock(); 819 820 return __conn_send_command(connection, sock, cmd, size, NULL, 0); 821 } 822 823 int drbd_send_protocol(struct drbd_connection *connection) 824 { 825 int err; 826 827 mutex_lock(&connection->data.mutex); 828 err = __drbd_send_protocol(connection, P_PROTOCOL); 829 mutex_unlock(&connection->data.mutex); 830 831 return err; 832 } 833 834 static int _drbd_send_uuids(struct drbd_peer_device *peer_device, u64 uuid_flags) 835 { 836 struct drbd_device *device = peer_device->device; 837 struct drbd_socket *sock; 838 struct p_uuids *p; 839 int i; 840 841 if (!get_ldev_if_state(device, D_NEGOTIATING)) 842 return 0; 843 844 sock = &peer_device->connection->data; 845 p = drbd_prepare_command(peer_device, sock); 846 if (!p) { 847 put_ldev(device); 848 return -EIO; 849 } 850 spin_lock_irq(&device->ldev->md.uuid_lock); 851 for (i = UI_CURRENT; i < UI_SIZE; i++) 852 p->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]); 853 spin_unlock_irq(&device->ldev->md.uuid_lock); 854 855 device->comm_bm_set = drbd_bm_total_weight(device); 856 p->uuid[UI_SIZE] = cpu_to_be64(device->comm_bm_set); 857 rcu_read_lock(); 858 uuid_flags |= rcu_dereference(peer_device->connection->net_conf)->discard_my_data ? 1 : 0; 859 rcu_read_unlock(); 860 uuid_flags |= test_bit(CRASHED_PRIMARY, &device->flags) ? 2 : 0; 861 uuid_flags |= device->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0; 862 p->uuid[UI_FLAGS] = cpu_to_be64(uuid_flags); 863 864 put_ldev(device); 865 return drbd_send_command(peer_device, sock, P_UUIDS, sizeof(*p), NULL, 0); 866 } 867 868 int drbd_send_uuids(struct drbd_peer_device *peer_device) 869 { 870 return _drbd_send_uuids(peer_device, 0); 871 } 872 873 int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *peer_device) 874 { 875 return _drbd_send_uuids(peer_device, 8); 876 } 877 878 void drbd_print_uuids(struct drbd_device *device, const char *text) 879 { 880 if (get_ldev_if_state(device, D_NEGOTIATING)) { 881 u64 *uuid = device->ldev->md.uuid; 882 drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX\n", 883 text, 884 (unsigned long long)uuid[UI_CURRENT], 885 (unsigned long long)uuid[UI_BITMAP], 886 (unsigned long long)uuid[UI_HISTORY_START], 887 (unsigned long long)uuid[UI_HISTORY_END]); 888 put_ldev(device); 889 } else { 890 drbd_info(device, "%s effective data uuid: %016llX\n", 891 text, 892 (unsigned long long)device->ed_uuid); 893 } 894 } 895 896 void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *peer_device) 897 { 898 struct drbd_device *device = peer_device->device; 899 struct drbd_socket *sock; 900 struct p_rs_uuid *p; 901 u64 uuid; 902 903 D_ASSERT(device, device->state.disk == D_UP_TO_DATE); 904 905 uuid = device->ldev->md.uuid[UI_BITMAP]; 906 if (uuid && uuid != UUID_JUST_CREATED) 907 uuid = uuid + UUID_NEW_BM_OFFSET; 908 else 909 get_random_bytes(&uuid, sizeof(u64)); 910 drbd_uuid_set(device, UI_BITMAP, uuid); 911 drbd_print_uuids(device, "updated sync UUID"); 912 drbd_md_sync(device); 913 914 sock = &peer_device->connection->data; 915 p = drbd_prepare_command(peer_device, sock); 916 if (p) { 917 p->uuid = cpu_to_be64(uuid); 918 drbd_send_command(peer_device, sock, P_SYNC_UUID, sizeof(*p), NULL, 0); 919 } 920 } 921 922 int drbd_send_sizes(struct drbd_peer_device *peer_device, int trigger_reply, enum dds_flags flags) 923 { 924 struct drbd_device *device = peer_device->device; 925 struct drbd_socket *sock; 926 struct p_sizes *p; 927 sector_t d_size, u_size; 928 int q_order_type; 929 unsigned int max_bio_size; 930 931 if (get_ldev_if_state(device, D_NEGOTIATING)) { 932 D_ASSERT(device, device->ldev->backing_bdev); 933 d_size = drbd_get_max_capacity(device->ldev); 934 rcu_read_lock(); 935 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size; 936 rcu_read_unlock(); 937 q_order_type = drbd_queue_order_type(device); 938 max_bio_size = queue_max_hw_sectors(device->ldev->backing_bdev->bd_disk->queue) << 9; 939 max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE); 940 put_ldev(device); 941 } else { 942 d_size = 0; 943 u_size = 0; 944 q_order_type = QUEUE_ORDERED_NONE; 945 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */ 946 } 947 948 sock = &peer_device->connection->data; 949 p = drbd_prepare_command(peer_device, sock); 950 if (!p) 951 return -EIO; 952 953 if (peer_device->connection->agreed_pro_version <= 94) 954 max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET); 955 else if (peer_device->connection->agreed_pro_version < 100) 956 max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE_P95); 957 958 p->d_size = cpu_to_be64(d_size); 959 p->u_size = cpu_to_be64(u_size); 960 p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(device->this_bdev)); 961 p->max_bio_size = cpu_to_be32(max_bio_size); 962 p->queue_order_type = cpu_to_be16(q_order_type); 963 p->dds_flags = cpu_to_be16(flags); 964 return drbd_send_command(peer_device, sock, P_SIZES, sizeof(*p), NULL, 0); 965 } 966 967 /** 968 * drbd_send_current_state() - Sends the drbd state to the peer 969 * @peer_device: DRBD peer device. 970 */ 971 int drbd_send_current_state(struct drbd_peer_device *peer_device) 972 { 973 struct drbd_socket *sock; 974 struct p_state *p; 975 976 sock = &peer_device->connection->data; 977 p = drbd_prepare_command(peer_device, sock); 978 if (!p) 979 return -EIO; 980 p->state = cpu_to_be32(peer_device->device->state.i); /* Within the send mutex */ 981 return drbd_send_command(peer_device, sock, P_STATE, sizeof(*p), NULL, 0); 982 } 983 984 /** 985 * drbd_send_state() - After a state change, sends the new state to the peer 986 * @peer_device: DRBD peer device. 987 * @state: the state to send, not necessarily the current state. 988 * 989 * Each state change queues an "after_state_ch" work, which will eventually 990 * send the resulting new state to the peer. If more state changes happen 991 * between queuing and processing of the after_state_ch work, we still 992 * want to send each intermediary state in the order it occurred. 993 */ 994 int drbd_send_state(struct drbd_peer_device *peer_device, union drbd_state state) 995 { 996 struct drbd_socket *sock; 997 struct p_state *p; 998 999 sock = &peer_device->connection->data; 1000 p = drbd_prepare_command(peer_device, sock); 1001 if (!p) 1002 return -EIO; 1003 p->state = cpu_to_be32(state.i); /* Within the send mutex */ 1004 return drbd_send_command(peer_device, sock, P_STATE, sizeof(*p), NULL, 0); 1005 } 1006 1007 int drbd_send_state_req(struct drbd_peer_device *peer_device, union drbd_state mask, union drbd_state val) 1008 { 1009 struct drbd_socket *sock; 1010 struct p_req_state *p; 1011 1012 sock = &peer_device->connection->data; 1013 p = drbd_prepare_command(peer_device, sock); 1014 if (!p) 1015 return -EIO; 1016 p->mask = cpu_to_be32(mask.i); 1017 p->val = cpu_to_be32(val.i); 1018 return drbd_send_command(peer_device, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0); 1019 } 1020 1021 int conn_send_state_req(struct drbd_connection *connection, union drbd_state mask, union drbd_state val) 1022 { 1023 enum drbd_packet cmd; 1024 struct drbd_socket *sock; 1025 struct p_req_state *p; 1026 1027 cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ; 1028 sock = &connection->data; 1029 p = conn_prepare_command(connection, sock); 1030 if (!p) 1031 return -EIO; 1032 p->mask = cpu_to_be32(mask.i); 1033 p->val = cpu_to_be32(val.i); 1034 return conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0); 1035 } 1036 1037 void drbd_send_sr_reply(struct drbd_peer_device *peer_device, enum drbd_state_rv retcode) 1038 { 1039 struct drbd_socket *sock; 1040 struct p_req_state_reply *p; 1041 1042 sock = &peer_device->connection->meta; 1043 p = drbd_prepare_command(peer_device, sock); 1044 if (p) { 1045 p->retcode = cpu_to_be32(retcode); 1046 drbd_send_command(peer_device, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0); 1047 } 1048 } 1049 1050 void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode) 1051 { 1052 struct drbd_socket *sock; 1053 struct p_req_state_reply *p; 1054 enum drbd_packet cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY; 1055 1056 sock = &connection->meta; 1057 p = conn_prepare_command(connection, sock); 1058 if (p) { 1059 p->retcode = cpu_to_be32(retcode); 1060 conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0); 1061 } 1062 } 1063 1064 static void dcbp_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code) 1065 { 1066 BUG_ON(code & ~0xf); 1067 p->encoding = (p->encoding & ~0xf) | code; 1068 } 1069 1070 static void dcbp_set_start(struct p_compressed_bm *p, int set) 1071 { 1072 p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0); 1073 } 1074 1075 static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n) 1076 { 1077 BUG_ON(n & ~0x7); 1078 p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4); 1079 } 1080 1081 static int fill_bitmap_rle_bits(struct drbd_device *device, 1082 struct p_compressed_bm *p, 1083 unsigned int size, 1084 struct bm_xfer_ctx *c) 1085 { 1086 struct bitstream bs; 1087 unsigned long plain_bits; 1088 unsigned long tmp; 1089 unsigned long rl; 1090 unsigned len; 1091 unsigned toggle; 1092 int bits, use_rle; 1093 1094 /* may we use this feature? */ 1095 rcu_read_lock(); 1096 use_rle = rcu_dereference(first_peer_device(device)->connection->net_conf)->use_rle; 1097 rcu_read_unlock(); 1098 if (!use_rle || first_peer_device(device)->connection->agreed_pro_version < 90) 1099 return 0; 1100 1101 if (c->bit_offset >= c->bm_bits) 1102 return 0; /* nothing to do. */ 1103 1104 /* use at most thus many bytes */ 1105 bitstream_init(&bs, p->code, size, 0); 1106 memset(p->code, 0, size); 1107 /* plain bits covered in this code string */ 1108 plain_bits = 0; 1109 1110 /* p->encoding & 0x80 stores whether the first run length is set. 1111 * bit offset is implicit. 1112 * start with toggle == 2 to be able to tell the first iteration */ 1113 toggle = 2; 1114 1115 /* see how much plain bits we can stuff into one packet 1116 * using RLE and VLI. */ 1117 do { 1118 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(device, c->bit_offset) 1119 : _drbd_bm_find_next(device, c->bit_offset); 1120 if (tmp == -1UL) 1121 tmp = c->bm_bits; 1122 rl = tmp - c->bit_offset; 1123 1124 if (toggle == 2) { /* first iteration */ 1125 if (rl == 0) { 1126 /* the first checked bit was set, 1127 * store start value, */ 1128 dcbp_set_start(p, 1); 1129 /* but skip encoding of zero run length */ 1130 toggle = !toggle; 1131 continue; 1132 } 1133 dcbp_set_start(p, 0); 1134 } 1135 1136 /* paranoia: catch zero runlength. 1137 * can only happen if bitmap is modified while we scan it. */ 1138 if (rl == 0) { 1139 drbd_err(device, "unexpected zero runlength while encoding bitmap " 1140 "t:%u bo:%lu\n", toggle, c->bit_offset); 1141 return -1; 1142 } 1143 1144 bits = vli_encode_bits(&bs, rl); 1145 if (bits == -ENOBUFS) /* buffer full */ 1146 break; 1147 if (bits <= 0) { 1148 drbd_err(device, "error while encoding bitmap: %d\n", bits); 1149 return 0; 1150 } 1151 1152 toggle = !toggle; 1153 plain_bits += rl; 1154 c->bit_offset = tmp; 1155 } while (c->bit_offset < c->bm_bits); 1156 1157 len = bs.cur.b - p->code + !!bs.cur.bit; 1158 1159 if (plain_bits < (len << 3)) { 1160 /* incompressible with this method. 1161 * we need to rewind both word and bit position. */ 1162 c->bit_offset -= plain_bits; 1163 bm_xfer_ctx_bit_to_word_offset(c); 1164 c->bit_offset = c->word_offset * BITS_PER_LONG; 1165 return 0; 1166 } 1167 1168 /* RLE + VLI was able to compress it just fine. 1169 * update c->word_offset. */ 1170 bm_xfer_ctx_bit_to_word_offset(c); 1171 1172 /* store pad_bits */ 1173 dcbp_set_pad_bits(p, (8 - bs.cur.bit) & 0x7); 1174 1175 return len; 1176 } 1177 1178 /** 1179 * send_bitmap_rle_or_plain 1180 * 1181 * Return 0 when done, 1 when another iteration is needed, and a negative error 1182 * code upon failure. 1183 */ 1184 static int 1185 send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c) 1186 { 1187 struct drbd_socket *sock = &first_peer_device(device)->connection->data; 1188 unsigned int header_size = drbd_header_size(first_peer_device(device)->connection); 1189 struct p_compressed_bm *p = sock->sbuf + header_size; 1190 int len, err; 1191 1192 len = fill_bitmap_rle_bits(device, p, 1193 DRBD_SOCKET_BUFFER_SIZE - header_size - sizeof(*p), c); 1194 if (len < 0) 1195 return -EIO; 1196 1197 if (len) { 1198 dcbp_set_code(p, RLE_VLI_Bits); 1199 err = __send_command(first_peer_device(device)->connection, device->vnr, sock, 1200 P_COMPRESSED_BITMAP, sizeof(*p) + len, 1201 NULL, 0); 1202 c->packets[0]++; 1203 c->bytes[0] += header_size + sizeof(*p) + len; 1204 1205 if (c->bit_offset >= c->bm_bits) 1206 len = 0; /* DONE */ 1207 } else { 1208 /* was not compressible. 1209 * send a buffer full of plain text bits instead. */ 1210 unsigned int data_size; 1211 unsigned long num_words; 1212 unsigned long *p = sock->sbuf + header_size; 1213 1214 data_size = DRBD_SOCKET_BUFFER_SIZE - header_size; 1215 num_words = min_t(size_t, data_size / sizeof(*p), 1216 c->bm_words - c->word_offset); 1217 len = num_words * sizeof(*p); 1218 if (len) 1219 drbd_bm_get_lel(device, c->word_offset, num_words, p); 1220 err = __send_command(first_peer_device(device)->connection, device->vnr, sock, P_BITMAP, len, NULL, 0); 1221 c->word_offset += num_words; 1222 c->bit_offset = c->word_offset * BITS_PER_LONG; 1223 1224 c->packets[1]++; 1225 c->bytes[1] += header_size + len; 1226 1227 if (c->bit_offset > c->bm_bits) 1228 c->bit_offset = c->bm_bits; 1229 } 1230 if (!err) { 1231 if (len == 0) { 1232 INFO_bm_xfer_stats(device, "send", c); 1233 return 0; 1234 } else 1235 return 1; 1236 } 1237 return -EIO; 1238 } 1239 1240 /* See the comment at receive_bitmap() */ 1241 static int _drbd_send_bitmap(struct drbd_device *device) 1242 { 1243 struct bm_xfer_ctx c; 1244 int err; 1245 1246 if (!expect(device->bitmap)) 1247 return false; 1248 1249 if (get_ldev(device)) { 1250 if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC)) { 1251 drbd_info(device, "Writing the whole bitmap, MDF_FullSync was set.\n"); 1252 drbd_bm_set_all(device); 1253 if (drbd_bm_write(device)) { 1254 /* write_bm did fail! Leave full sync flag set in Meta P_DATA 1255 * but otherwise process as per normal - need to tell other 1256 * side that a full resync is required! */ 1257 drbd_err(device, "Failed to write bitmap to disk!\n"); 1258 } else { 1259 drbd_md_clear_flag(device, MDF_FULL_SYNC); 1260 drbd_md_sync(device); 1261 } 1262 } 1263 put_ldev(device); 1264 } 1265 1266 c = (struct bm_xfer_ctx) { 1267 .bm_bits = drbd_bm_bits(device), 1268 .bm_words = drbd_bm_words(device), 1269 }; 1270 1271 do { 1272 err = send_bitmap_rle_or_plain(device, &c); 1273 } while (err > 0); 1274 1275 return err == 0; 1276 } 1277 1278 int drbd_send_bitmap(struct drbd_device *device) 1279 { 1280 struct drbd_socket *sock = &first_peer_device(device)->connection->data; 1281 int err = -1; 1282 1283 mutex_lock(&sock->mutex); 1284 if (sock->socket) 1285 err = !_drbd_send_bitmap(device); 1286 mutex_unlock(&sock->mutex); 1287 return err; 1288 } 1289 1290 void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr, u32 set_size) 1291 { 1292 struct drbd_socket *sock; 1293 struct p_barrier_ack *p; 1294 1295 if (connection->cstate < C_WF_REPORT_PARAMS) 1296 return; 1297 1298 sock = &connection->meta; 1299 p = conn_prepare_command(connection, sock); 1300 if (!p) 1301 return; 1302 p->barrier = barrier_nr; 1303 p->set_size = cpu_to_be32(set_size); 1304 conn_send_command(connection, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0); 1305 } 1306 1307 /** 1308 * _drbd_send_ack() - Sends an ack packet 1309 * @device: DRBD device. 1310 * @cmd: Packet command code. 1311 * @sector: sector, needs to be in big endian byte order 1312 * @blksize: size in byte, needs to be in big endian byte order 1313 * @block_id: Id, big endian byte order 1314 */ 1315 static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet cmd, 1316 u64 sector, u32 blksize, u64 block_id) 1317 { 1318 struct drbd_socket *sock; 1319 struct p_block_ack *p; 1320 1321 if (peer_device->device->state.conn < C_CONNECTED) 1322 return -EIO; 1323 1324 sock = &peer_device->connection->meta; 1325 p = drbd_prepare_command(peer_device, sock); 1326 if (!p) 1327 return -EIO; 1328 p->sector = sector; 1329 p->block_id = block_id; 1330 p->blksize = blksize; 1331 p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq)); 1332 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0); 1333 } 1334 1335 /* dp->sector and dp->block_id already/still in network byte order, 1336 * data_size is payload size according to dp->head, 1337 * and may need to be corrected for digest size. */ 1338 void drbd_send_ack_dp(struct drbd_peer_device *peer_device, enum drbd_packet cmd, 1339 struct p_data *dp, int data_size) 1340 { 1341 if (peer_device->connection->peer_integrity_tfm) 1342 data_size -= crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm); 1343 _drbd_send_ack(peer_device, cmd, dp->sector, cpu_to_be32(data_size), 1344 dp->block_id); 1345 } 1346 1347 void drbd_send_ack_rp(struct drbd_peer_device *peer_device, enum drbd_packet cmd, 1348 struct p_block_req *rp) 1349 { 1350 _drbd_send_ack(peer_device, cmd, rp->sector, rp->blksize, rp->block_id); 1351 } 1352 1353 /** 1354 * drbd_send_ack() - Sends an ack packet 1355 * @device: DRBD device 1356 * @cmd: packet command code 1357 * @peer_req: peer request 1358 */ 1359 int drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet cmd, 1360 struct drbd_peer_request *peer_req) 1361 { 1362 return _drbd_send_ack(peer_device, cmd, 1363 cpu_to_be64(peer_req->i.sector), 1364 cpu_to_be32(peer_req->i.size), 1365 peer_req->block_id); 1366 } 1367 1368 /* This function misuses the block_id field to signal if the blocks 1369 * are is sync or not. */ 1370 int drbd_send_ack_ex(struct drbd_peer_device *peer_device, enum drbd_packet cmd, 1371 sector_t sector, int blksize, u64 block_id) 1372 { 1373 return _drbd_send_ack(peer_device, cmd, 1374 cpu_to_be64(sector), 1375 cpu_to_be32(blksize), 1376 cpu_to_be64(block_id)); 1377 } 1378 1379 int drbd_send_drequest(struct drbd_peer_device *peer_device, int cmd, 1380 sector_t sector, int size, u64 block_id) 1381 { 1382 struct drbd_socket *sock; 1383 struct p_block_req *p; 1384 1385 sock = &peer_device->connection->data; 1386 p = drbd_prepare_command(peer_device, sock); 1387 if (!p) 1388 return -EIO; 1389 p->sector = cpu_to_be64(sector); 1390 p->block_id = block_id; 1391 p->blksize = cpu_to_be32(size); 1392 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0); 1393 } 1394 1395 int drbd_send_drequest_csum(struct drbd_peer_device *peer_device, sector_t sector, int size, 1396 void *digest, int digest_size, enum drbd_packet cmd) 1397 { 1398 struct drbd_socket *sock; 1399 struct p_block_req *p; 1400 1401 /* FIXME: Put the digest into the preallocated socket buffer. */ 1402 1403 sock = &peer_device->connection->data; 1404 p = drbd_prepare_command(peer_device, sock); 1405 if (!p) 1406 return -EIO; 1407 p->sector = cpu_to_be64(sector); 1408 p->block_id = ID_SYNCER /* unused */; 1409 p->blksize = cpu_to_be32(size); 1410 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), digest, digest_size); 1411 } 1412 1413 int drbd_send_ov_request(struct drbd_peer_device *peer_device, sector_t sector, int size) 1414 { 1415 struct drbd_socket *sock; 1416 struct p_block_req *p; 1417 1418 sock = &peer_device->connection->data; 1419 p = drbd_prepare_command(peer_device, sock); 1420 if (!p) 1421 return -EIO; 1422 p->sector = cpu_to_be64(sector); 1423 p->block_id = ID_SYNCER /* unused */; 1424 p->blksize = cpu_to_be32(size); 1425 return drbd_send_command(peer_device, sock, P_OV_REQUEST, sizeof(*p), NULL, 0); 1426 } 1427 1428 /* called on sndtimeo 1429 * returns false if we should retry, 1430 * true if we think connection is dead 1431 */ 1432 static int we_should_drop_the_connection(struct drbd_connection *connection, struct socket *sock) 1433 { 1434 int drop_it; 1435 /* long elapsed = (long)(jiffies - device->last_received); */ 1436 1437 drop_it = connection->meta.socket == sock 1438 || !connection->asender.task 1439 || get_t_state(&connection->asender) != RUNNING 1440 || connection->cstate < C_WF_REPORT_PARAMS; 1441 1442 if (drop_it) 1443 return true; 1444 1445 drop_it = !--connection->ko_count; 1446 if (!drop_it) { 1447 drbd_err(connection, "[%s/%d] sock_sendmsg time expired, ko = %u\n", 1448 current->comm, current->pid, connection->ko_count); 1449 request_ping(connection); 1450 } 1451 1452 return drop_it; /* && (device->state == R_PRIMARY) */; 1453 } 1454 1455 static void drbd_update_congested(struct drbd_connection *connection) 1456 { 1457 struct sock *sk = connection->data.socket->sk; 1458 if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5) 1459 set_bit(NET_CONGESTED, &connection->flags); 1460 } 1461 1462 /* The idea of sendpage seems to be to put some kind of reference 1463 * to the page into the skb, and to hand it over to the NIC. In 1464 * this process get_page() gets called. 1465 * 1466 * As soon as the page was really sent over the network put_page() 1467 * gets called by some part of the network layer. [ NIC driver? ] 1468 * 1469 * [ get_page() / put_page() increment/decrement the count. If count 1470 * reaches 0 the page will be freed. ] 1471 * 1472 * This works nicely with pages from FSs. 1473 * But this means that in protocol A we might signal IO completion too early! 1474 * 1475 * In order not to corrupt data during a resync we must make sure 1476 * that we do not reuse our own buffer pages (EEs) to early, therefore 1477 * we have the net_ee list. 1478 * 1479 * XFS seems to have problems, still, it submits pages with page_count == 0! 1480 * As a workaround, we disable sendpage on pages 1481 * with page_count == 0 or PageSlab. 1482 */ 1483 static int _drbd_no_send_page(struct drbd_peer_device *peer_device, struct page *page, 1484 int offset, size_t size, unsigned msg_flags) 1485 { 1486 struct socket *socket; 1487 void *addr; 1488 int err; 1489 1490 socket = peer_device->connection->data.socket; 1491 addr = kmap(page) + offset; 1492 err = drbd_send_all(peer_device->connection, socket, addr, size, msg_flags); 1493 kunmap(page); 1494 if (!err) 1495 peer_device->device->send_cnt += size >> 9; 1496 return err; 1497 } 1498 1499 static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *page, 1500 int offset, size_t size, unsigned msg_flags) 1501 { 1502 struct socket *socket = peer_device->connection->data.socket; 1503 mm_segment_t oldfs = get_fs(); 1504 int len = size; 1505 int err = -EIO; 1506 1507 /* e.g. XFS meta- & log-data is in slab pages, which have a 1508 * page_count of 0 and/or have PageSlab() set. 1509 * we cannot use send_page for those, as that does get_page(); 1510 * put_page(); and would cause either a VM_BUG directly, or 1511 * __page_cache_release a page that would actually still be referenced 1512 * by someone, leading to some obscure delayed Oops somewhere else. */ 1513 if (disable_sendpage || (page_count(page) < 1) || PageSlab(page)) 1514 return _drbd_no_send_page(peer_device, page, offset, size, msg_flags); 1515 1516 msg_flags |= MSG_NOSIGNAL; 1517 drbd_update_congested(peer_device->connection); 1518 set_fs(KERNEL_DS); 1519 do { 1520 int sent; 1521 1522 sent = socket->ops->sendpage(socket, page, offset, len, msg_flags); 1523 if (sent <= 0) { 1524 if (sent == -EAGAIN) { 1525 if (we_should_drop_the_connection(peer_device->connection, socket)) 1526 break; 1527 continue; 1528 } 1529 drbd_warn(peer_device->device, "%s: size=%d len=%d sent=%d\n", 1530 __func__, (int)size, len, sent); 1531 if (sent < 0) 1532 err = sent; 1533 break; 1534 } 1535 len -= sent; 1536 offset += sent; 1537 } while (len > 0 /* THINK && device->cstate >= C_CONNECTED*/); 1538 set_fs(oldfs); 1539 clear_bit(NET_CONGESTED, &peer_device->connection->flags); 1540 1541 if (len == 0) { 1542 err = 0; 1543 peer_device->device->send_cnt += size >> 9; 1544 } 1545 return err; 1546 } 1547 1548 static int _drbd_send_bio(struct drbd_peer_device *peer_device, struct bio *bio) 1549 { 1550 struct bio_vec bvec; 1551 struct bvec_iter iter; 1552 1553 /* hint all but last page with MSG_MORE */ 1554 bio_for_each_segment(bvec, bio, iter) { 1555 int err; 1556 1557 err = _drbd_no_send_page(peer_device, bvec.bv_page, 1558 bvec.bv_offset, bvec.bv_len, 1559 bio_iter_last(bvec, iter) 1560 ? 0 : MSG_MORE); 1561 if (err) 1562 return err; 1563 } 1564 return 0; 1565 } 1566 1567 static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *bio) 1568 { 1569 struct bio_vec bvec; 1570 struct bvec_iter iter; 1571 1572 /* hint all but last page with MSG_MORE */ 1573 bio_for_each_segment(bvec, bio, iter) { 1574 int err; 1575 1576 err = _drbd_send_page(peer_device, bvec.bv_page, 1577 bvec.bv_offset, bvec.bv_len, 1578 bio_iter_last(bvec, iter) ? 0 : MSG_MORE); 1579 if (err) 1580 return err; 1581 } 1582 return 0; 1583 } 1584 1585 static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device, 1586 struct drbd_peer_request *peer_req) 1587 { 1588 struct page *page = peer_req->pages; 1589 unsigned len = peer_req->i.size; 1590 int err; 1591 1592 /* hint all but last page with MSG_MORE */ 1593 page_chain_for_each(page) { 1594 unsigned l = min_t(unsigned, len, PAGE_SIZE); 1595 1596 err = _drbd_send_page(peer_device, page, 0, l, 1597 page_chain_next(page) ? MSG_MORE : 0); 1598 if (err) 1599 return err; 1600 len -= l; 1601 } 1602 return 0; 1603 } 1604 1605 static u32 bio_flags_to_wire(struct drbd_connection *connection, unsigned long bi_rw) 1606 { 1607 if (connection->agreed_pro_version >= 95) 1608 return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) | 1609 (bi_rw & REQ_FUA ? DP_FUA : 0) | 1610 (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) | 1611 (bi_rw & REQ_DISCARD ? DP_DISCARD : 0); 1612 else 1613 return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0; 1614 } 1615 1616 /* Used to send write or TRIM aka REQ_DISCARD requests 1617 * R_PRIMARY -> Peer (P_DATA, P_TRIM) 1618 */ 1619 int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *req) 1620 { 1621 struct drbd_device *device = peer_device->device; 1622 struct drbd_socket *sock; 1623 struct p_data *p; 1624 unsigned int dp_flags = 0; 1625 int digest_size; 1626 int err; 1627 1628 sock = &peer_device->connection->data; 1629 p = drbd_prepare_command(peer_device, sock); 1630 digest_size = peer_device->connection->integrity_tfm ? 1631 crypto_hash_digestsize(peer_device->connection->integrity_tfm) : 0; 1632 1633 if (!p) 1634 return -EIO; 1635 p->sector = cpu_to_be64(req->i.sector); 1636 p->block_id = (unsigned long)req; 1637 p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq)); 1638 dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw); 1639 if (device->state.conn >= C_SYNC_SOURCE && 1640 device->state.conn <= C_PAUSED_SYNC_T) 1641 dp_flags |= DP_MAY_SET_IN_SYNC; 1642 if (peer_device->connection->agreed_pro_version >= 100) { 1643 if (req->rq_state & RQ_EXP_RECEIVE_ACK) 1644 dp_flags |= DP_SEND_RECEIVE_ACK; 1645 /* During resync, request an explicit write ack, 1646 * even in protocol != C */ 1647 if (req->rq_state & RQ_EXP_WRITE_ACK 1648 || (dp_flags & DP_MAY_SET_IN_SYNC)) 1649 dp_flags |= DP_SEND_WRITE_ACK; 1650 } 1651 p->dp_flags = cpu_to_be32(dp_flags); 1652 1653 if (dp_flags & DP_DISCARD) { 1654 struct p_trim *t = (struct p_trim*)p; 1655 t->size = cpu_to_be32(req->i.size); 1656 err = __send_command(peer_device->connection, device->vnr, sock, P_TRIM, sizeof(*t), NULL, 0); 1657 goto out; 1658 } 1659 1660 /* our digest is still only over the payload. 1661 * TRIM does not carry any payload. */ 1662 if (digest_size) 1663 drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, p + 1); 1664 err = __send_command(peer_device->connection, device->vnr, sock, P_DATA, sizeof(*p) + digest_size, NULL, req->i.size); 1665 if (!err) { 1666 /* For protocol A, we have to memcpy the payload into 1667 * socket buffers, as we may complete right away 1668 * as soon as we handed it over to tcp, at which point the data 1669 * pages may become invalid. 1670 * 1671 * For data-integrity enabled, we copy it as well, so we can be 1672 * sure that even if the bio pages may still be modified, it 1673 * won't change the data on the wire, thus if the digest checks 1674 * out ok after sending on this side, but does not fit on the 1675 * receiving side, we sure have detected corruption elsewhere. 1676 */ 1677 if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)) || digest_size) 1678 err = _drbd_send_bio(peer_device, req->master_bio); 1679 else 1680 err = _drbd_send_zc_bio(peer_device, req->master_bio); 1681 1682 /* double check digest, sometimes buffers have been modified in flight. */ 1683 if (digest_size > 0 && digest_size <= 64) { 1684 /* 64 byte, 512 bit, is the largest digest size 1685 * currently supported in kernel crypto. */ 1686 unsigned char digest[64]; 1687 drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest); 1688 if (memcmp(p + 1, digest, digest_size)) { 1689 drbd_warn(device, 1690 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n", 1691 (unsigned long long)req->i.sector, req->i.size); 1692 } 1693 } /* else if (digest_size > 64) { 1694 ... Be noisy about digest too large ... 1695 } */ 1696 } 1697 out: 1698 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */ 1699 1700 return err; 1701 } 1702 1703 /* answer packet, used to send data back for read requests: 1704 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY) 1705 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY) 1706 */ 1707 int drbd_send_block(struct drbd_peer_device *peer_device, enum drbd_packet cmd, 1708 struct drbd_peer_request *peer_req) 1709 { 1710 struct drbd_device *device = peer_device->device; 1711 struct drbd_socket *sock; 1712 struct p_data *p; 1713 int err; 1714 int digest_size; 1715 1716 sock = &peer_device->connection->data; 1717 p = drbd_prepare_command(peer_device, sock); 1718 1719 digest_size = peer_device->connection->integrity_tfm ? 1720 crypto_hash_digestsize(peer_device->connection->integrity_tfm) : 0; 1721 1722 if (!p) 1723 return -EIO; 1724 p->sector = cpu_to_be64(peer_req->i.sector); 1725 p->block_id = peer_req->block_id; 1726 p->seq_num = 0; /* unused */ 1727 p->dp_flags = 0; 1728 if (digest_size) 1729 drbd_csum_ee(peer_device->connection->integrity_tfm, peer_req, p + 1); 1730 err = __send_command(peer_device->connection, device->vnr, sock, cmd, sizeof(*p) + digest_size, NULL, peer_req->i.size); 1731 if (!err) 1732 err = _drbd_send_zc_ee(peer_device, peer_req); 1733 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */ 1734 1735 return err; 1736 } 1737 1738 int drbd_send_out_of_sync(struct drbd_peer_device *peer_device, struct drbd_request *req) 1739 { 1740 struct drbd_socket *sock; 1741 struct p_block_desc *p; 1742 1743 sock = &peer_device->connection->data; 1744 p = drbd_prepare_command(peer_device, sock); 1745 if (!p) 1746 return -EIO; 1747 p->sector = cpu_to_be64(req->i.sector); 1748 p->blksize = cpu_to_be32(req->i.size); 1749 return drbd_send_command(peer_device, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0); 1750 } 1751 1752 /* 1753 drbd_send distinguishes two cases: 1754 1755 Packets sent via the data socket "sock" 1756 and packets sent via the meta data socket "msock" 1757 1758 sock msock 1759 -----------------+-------------------------+------------------------------ 1760 timeout conf.timeout / 2 conf.timeout / 2 1761 timeout action send a ping via msock Abort communication 1762 and close all sockets 1763 */ 1764 1765 /* 1766 * you must have down()ed the appropriate [m]sock_mutex elsewhere! 1767 */ 1768 int drbd_send(struct drbd_connection *connection, struct socket *sock, 1769 void *buf, size_t size, unsigned msg_flags) 1770 { 1771 struct kvec iov; 1772 struct msghdr msg; 1773 int rv, sent = 0; 1774 1775 if (!sock) 1776 return -EBADR; 1777 1778 /* THINK if (signal_pending) return ... ? */ 1779 1780 iov.iov_base = buf; 1781 iov.iov_len = size; 1782 1783 msg.msg_name = NULL; 1784 msg.msg_namelen = 0; 1785 msg.msg_control = NULL; 1786 msg.msg_controllen = 0; 1787 msg.msg_flags = msg_flags | MSG_NOSIGNAL; 1788 1789 if (sock == connection->data.socket) { 1790 rcu_read_lock(); 1791 connection->ko_count = rcu_dereference(connection->net_conf)->ko_count; 1792 rcu_read_unlock(); 1793 drbd_update_congested(connection); 1794 } 1795 do { 1796 /* STRANGE 1797 * tcp_sendmsg does _not_ use its size parameter at all ? 1798 * 1799 * -EAGAIN on timeout, -EINTR on signal. 1800 */ 1801 /* THINK 1802 * do we need to block DRBD_SIG if sock == &meta.socket ?? 1803 * otherwise wake_asender() might interrupt some send_*Ack ! 1804 */ 1805 rv = kernel_sendmsg(sock, &msg, &iov, 1, size); 1806 if (rv == -EAGAIN) { 1807 if (we_should_drop_the_connection(connection, sock)) 1808 break; 1809 else 1810 continue; 1811 } 1812 if (rv == -EINTR) { 1813 flush_signals(current); 1814 rv = 0; 1815 } 1816 if (rv < 0) 1817 break; 1818 sent += rv; 1819 iov.iov_base += rv; 1820 iov.iov_len -= rv; 1821 } while (sent < size); 1822 1823 if (sock == connection->data.socket) 1824 clear_bit(NET_CONGESTED, &connection->flags); 1825 1826 if (rv <= 0) { 1827 if (rv != -EAGAIN) { 1828 drbd_err(connection, "%s_sendmsg returned %d\n", 1829 sock == connection->meta.socket ? "msock" : "sock", 1830 rv); 1831 conn_request_state(connection, NS(conn, C_BROKEN_PIPE), CS_HARD); 1832 } else 1833 conn_request_state(connection, NS(conn, C_TIMEOUT), CS_HARD); 1834 } 1835 1836 return sent; 1837 } 1838 1839 /** 1840 * drbd_send_all - Send an entire buffer 1841 * 1842 * Returns 0 upon success and a negative error value otherwise. 1843 */ 1844 int drbd_send_all(struct drbd_connection *connection, struct socket *sock, void *buffer, 1845 size_t size, unsigned msg_flags) 1846 { 1847 int err; 1848 1849 err = drbd_send(connection, sock, buffer, size, msg_flags); 1850 if (err < 0) 1851 return err; 1852 if (err != size) 1853 return -EIO; 1854 return 0; 1855 } 1856 1857 static int drbd_open(struct block_device *bdev, fmode_t mode) 1858 { 1859 struct drbd_device *device = bdev->bd_disk->private_data; 1860 unsigned long flags; 1861 int rv = 0; 1862 1863 mutex_lock(&drbd_main_mutex); 1864 spin_lock_irqsave(&device->resource->req_lock, flags); 1865 /* to have a stable device->state.role 1866 * and no race with updating open_cnt */ 1867 1868 if (device->state.role != R_PRIMARY) { 1869 if (mode & FMODE_WRITE) 1870 rv = -EROFS; 1871 else if (!allow_oos) 1872 rv = -EMEDIUMTYPE; 1873 } 1874 1875 if (!rv) 1876 device->open_cnt++; 1877 spin_unlock_irqrestore(&device->resource->req_lock, flags); 1878 mutex_unlock(&drbd_main_mutex); 1879 1880 return rv; 1881 } 1882 1883 static void drbd_release(struct gendisk *gd, fmode_t mode) 1884 { 1885 struct drbd_device *device = gd->private_data; 1886 mutex_lock(&drbd_main_mutex); 1887 device->open_cnt--; 1888 mutex_unlock(&drbd_main_mutex); 1889 } 1890 1891 static void drbd_set_defaults(struct drbd_device *device) 1892 { 1893 /* Beware! The actual layout differs 1894 * between big endian and little endian */ 1895 device->state = (union drbd_dev_state) { 1896 { .role = R_SECONDARY, 1897 .peer = R_UNKNOWN, 1898 .conn = C_STANDALONE, 1899 .disk = D_DISKLESS, 1900 .pdsk = D_UNKNOWN, 1901 } }; 1902 } 1903 1904 void drbd_init_set_defaults(struct drbd_device *device) 1905 { 1906 /* the memset(,0,) did most of this. 1907 * note: only assignments, no allocation in here */ 1908 1909 drbd_set_defaults(device); 1910 1911 atomic_set(&device->ap_bio_cnt, 0); 1912 atomic_set(&device->ap_actlog_cnt, 0); 1913 atomic_set(&device->ap_pending_cnt, 0); 1914 atomic_set(&device->rs_pending_cnt, 0); 1915 atomic_set(&device->unacked_cnt, 0); 1916 atomic_set(&device->local_cnt, 0); 1917 atomic_set(&device->pp_in_use_by_net, 0); 1918 atomic_set(&device->rs_sect_in, 0); 1919 atomic_set(&device->rs_sect_ev, 0); 1920 atomic_set(&device->ap_in_flight, 0); 1921 atomic_set(&device->md_io.in_use, 0); 1922 1923 mutex_init(&device->own_state_mutex); 1924 device->state_mutex = &device->own_state_mutex; 1925 1926 spin_lock_init(&device->al_lock); 1927 spin_lock_init(&device->peer_seq_lock); 1928 1929 INIT_LIST_HEAD(&device->active_ee); 1930 INIT_LIST_HEAD(&device->sync_ee); 1931 INIT_LIST_HEAD(&device->done_ee); 1932 INIT_LIST_HEAD(&device->read_ee); 1933 INIT_LIST_HEAD(&device->net_ee); 1934 INIT_LIST_HEAD(&device->resync_reads); 1935 INIT_LIST_HEAD(&device->resync_work.list); 1936 INIT_LIST_HEAD(&device->unplug_work.list); 1937 INIT_LIST_HEAD(&device->bm_io_work.w.list); 1938 INIT_LIST_HEAD(&device->pending_master_completion[0]); 1939 INIT_LIST_HEAD(&device->pending_master_completion[1]); 1940 INIT_LIST_HEAD(&device->pending_completion[0]); 1941 INIT_LIST_HEAD(&device->pending_completion[1]); 1942 1943 device->resync_work.cb = w_resync_timer; 1944 device->unplug_work.cb = w_send_write_hint; 1945 device->bm_io_work.w.cb = w_bitmap_io; 1946 1947 init_timer(&device->resync_timer); 1948 init_timer(&device->md_sync_timer); 1949 init_timer(&device->start_resync_timer); 1950 init_timer(&device->request_timer); 1951 device->resync_timer.function = resync_timer_fn; 1952 device->resync_timer.data = (unsigned long) device; 1953 device->md_sync_timer.function = md_sync_timer_fn; 1954 device->md_sync_timer.data = (unsigned long) device; 1955 device->start_resync_timer.function = start_resync_timer_fn; 1956 device->start_resync_timer.data = (unsigned long) device; 1957 device->request_timer.function = request_timer_fn; 1958 device->request_timer.data = (unsigned long) device; 1959 1960 init_waitqueue_head(&device->misc_wait); 1961 init_waitqueue_head(&device->state_wait); 1962 init_waitqueue_head(&device->ee_wait); 1963 init_waitqueue_head(&device->al_wait); 1964 init_waitqueue_head(&device->seq_wait); 1965 1966 device->resync_wenr = LC_FREE; 1967 device->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE; 1968 device->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE; 1969 } 1970 1971 void drbd_device_cleanup(struct drbd_device *device) 1972 { 1973 int i; 1974 if (first_peer_device(device)->connection->receiver.t_state != NONE) 1975 drbd_err(device, "ASSERT FAILED: receiver t_state == %d expected 0.\n", 1976 first_peer_device(device)->connection->receiver.t_state); 1977 1978 device->al_writ_cnt = 1979 device->bm_writ_cnt = 1980 device->read_cnt = 1981 device->recv_cnt = 1982 device->send_cnt = 1983 device->writ_cnt = 1984 device->p_size = 1985 device->rs_start = 1986 device->rs_total = 1987 device->rs_failed = 0; 1988 device->rs_last_events = 0; 1989 device->rs_last_sect_ev = 0; 1990 for (i = 0; i < DRBD_SYNC_MARKS; i++) { 1991 device->rs_mark_left[i] = 0; 1992 device->rs_mark_time[i] = 0; 1993 } 1994 D_ASSERT(device, first_peer_device(device)->connection->net_conf == NULL); 1995 1996 drbd_set_my_capacity(device, 0); 1997 if (device->bitmap) { 1998 /* maybe never allocated. */ 1999 drbd_bm_resize(device, 0, 1); 2000 drbd_bm_cleanup(device); 2001 } 2002 2003 drbd_free_ldev(device->ldev); 2004 device->ldev = NULL; 2005 2006 clear_bit(AL_SUSPENDED, &device->flags); 2007 2008 D_ASSERT(device, list_empty(&device->active_ee)); 2009 D_ASSERT(device, list_empty(&device->sync_ee)); 2010 D_ASSERT(device, list_empty(&device->done_ee)); 2011 D_ASSERT(device, list_empty(&device->read_ee)); 2012 D_ASSERT(device, list_empty(&device->net_ee)); 2013 D_ASSERT(device, list_empty(&device->resync_reads)); 2014 D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q)); 2015 D_ASSERT(device, list_empty(&device->resync_work.list)); 2016 D_ASSERT(device, list_empty(&device->unplug_work.list)); 2017 2018 drbd_set_defaults(device); 2019 } 2020 2021 2022 static void drbd_destroy_mempools(void) 2023 { 2024 struct page *page; 2025 2026 while (drbd_pp_pool) { 2027 page = drbd_pp_pool; 2028 drbd_pp_pool = (struct page *)page_private(page); 2029 __free_page(page); 2030 drbd_pp_vacant--; 2031 } 2032 2033 /* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */ 2034 2035 if (drbd_md_io_bio_set) 2036 bioset_free(drbd_md_io_bio_set); 2037 if (drbd_md_io_page_pool) 2038 mempool_destroy(drbd_md_io_page_pool); 2039 if (drbd_ee_mempool) 2040 mempool_destroy(drbd_ee_mempool); 2041 if (drbd_request_mempool) 2042 mempool_destroy(drbd_request_mempool); 2043 if (drbd_ee_cache) 2044 kmem_cache_destroy(drbd_ee_cache); 2045 if (drbd_request_cache) 2046 kmem_cache_destroy(drbd_request_cache); 2047 if (drbd_bm_ext_cache) 2048 kmem_cache_destroy(drbd_bm_ext_cache); 2049 if (drbd_al_ext_cache) 2050 kmem_cache_destroy(drbd_al_ext_cache); 2051 2052 drbd_md_io_bio_set = NULL; 2053 drbd_md_io_page_pool = NULL; 2054 drbd_ee_mempool = NULL; 2055 drbd_request_mempool = NULL; 2056 drbd_ee_cache = NULL; 2057 drbd_request_cache = NULL; 2058 drbd_bm_ext_cache = NULL; 2059 drbd_al_ext_cache = NULL; 2060 2061 return; 2062 } 2063 2064 static int drbd_create_mempools(void) 2065 { 2066 struct page *page; 2067 const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count; 2068 int i; 2069 2070 /* prepare our caches and mempools */ 2071 drbd_request_mempool = NULL; 2072 drbd_ee_cache = NULL; 2073 drbd_request_cache = NULL; 2074 drbd_bm_ext_cache = NULL; 2075 drbd_al_ext_cache = NULL; 2076 drbd_pp_pool = NULL; 2077 drbd_md_io_page_pool = NULL; 2078 drbd_md_io_bio_set = NULL; 2079 2080 /* caches */ 2081 drbd_request_cache = kmem_cache_create( 2082 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL); 2083 if (drbd_request_cache == NULL) 2084 goto Enomem; 2085 2086 drbd_ee_cache = kmem_cache_create( 2087 "drbd_ee", sizeof(struct drbd_peer_request), 0, 0, NULL); 2088 if (drbd_ee_cache == NULL) 2089 goto Enomem; 2090 2091 drbd_bm_ext_cache = kmem_cache_create( 2092 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL); 2093 if (drbd_bm_ext_cache == NULL) 2094 goto Enomem; 2095 2096 drbd_al_ext_cache = kmem_cache_create( 2097 "drbd_al", sizeof(struct lc_element), 0, 0, NULL); 2098 if (drbd_al_ext_cache == NULL) 2099 goto Enomem; 2100 2101 /* mempools */ 2102 drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0); 2103 if (drbd_md_io_bio_set == NULL) 2104 goto Enomem; 2105 2106 drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0); 2107 if (drbd_md_io_page_pool == NULL) 2108 goto Enomem; 2109 2110 drbd_request_mempool = mempool_create_slab_pool(number, 2111 drbd_request_cache); 2112 if (drbd_request_mempool == NULL) 2113 goto Enomem; 2114 2115 drbd_ee_mempool = mempool_create_slab_pool(number, drbd_ee_cache); 2116 if (drbd_ee_mempool == NULL) 2117 goto Enomem; 2118 2119 /* drbd's page pool */ 2120 spin_lock_init(&drbd_pp_lock); 2121 2122 for (i = 0; i < number; i++) { 2123 page = alloc_page(GFP_HIGHUSER); 2124 if (!page) 2125 goto Enomem; 2126 set_page_private(page, (unsigned long)drbd_pp_pool); 2127 drbd_pp_pool = page; 2128 } 2129 drbd_pp_vacant = number; 2130 2131 return 0; 2132 2133 Enomem: 2134 drbd_destroy_mempools(); /* in case we allocated some */ 2135 return -ENOMEM; 2136 } 2137 2138 static void drbd_release_all_peer_reqs(struct drbd_device *device) 2139 { 2140 int rr; 2141 2142 rr = drbd_free_peer_reqs(device, &device->active_ee); 2143 if (rr) 2144 drbd_err(device, "%d EEs in active list found!\n", rr); 2145 2146 rr = drbd_free_peer_reqs(device, &device->sync_ee); 2147 if (rr) 2148 drbd_err(device, "%d EEs in sync list found!\n", rr); 2149 2150 rr = drbd_free_peer_reqs(device, &device->read_ee); 2151 if (rr) 2152 drbd_err(device, "%d EEs in read list found!\n", rr); 2153 2154 rr = drbd_free_peer_reqs(device, &device->done_ee); 2155 if (rr) 2156 drbd_err(device, "%d EEs in done list found!\n", rr); 2157 2158 rr = drbd_free_peer_reqs(device, &device->net_ee); 2159 if (rr) 2160 drbd_err(device, "%d EEs in net list found!\n", rr); 2161 } 2162 2163 /* caution. no locking. */ 2164 void drbd_destroy_device(struct kref *kref) 2165 { 2166 struct drbd_device *device = container_of(kref, struct drbd_device, kref); 2167 struct drbd_resource *resource = device->resource; 2168 struct drbd_peer_device *peer_device, *tmp_peer_device; 2169 2170 del_timer_sync(&device->request_timer); 2171 2172 /* paranoia asserts */ 2173 D_ASSERT(device, device->open_cnt == 0); 2174 /* end paranoia asserts */ 2175 2176 /* cleanup stuff that may have been allocated during 2177 * device (re-)configuration or state changes */ 2178 2179 if (device->this_bdev) 2180 bdput(device->this_bdev); 2181 2182 drbd_free_ldev(device->ldev); 2183 device->ldev = NULL; 2184 2185 drbd_release_all_peer_reqs(device); 2186 2187 lc_destroy(device->act_log); 2188 lc_destroy(device->resync); 2189 2190 kfree(device->p_uuid); 2191 /* device->p_uuid = NULL; */ 2192 2193 if (device->bitmap) /* should no longer be there. */ 2194 drbd_bm_cleanup(device); 2195 __free_page(device->md_io.page); 2196 put_disk(device->vdisk); 2197 blk_cleanup_queue(device->rq_queue); 2198 kfree(device->rs_plan_s); 2199 2200 /* not for_each_connection(connection, resource): 2201 * those may have been cleaned up and disassociated already. 2202 */ 2203 for_each_peer_device_safe(peer_device, tmp_peer_device, device) { 2204 kref_put(&peer_device->connection->kref, drbd_destroy_connection); 2205 kfree(peer_device); 2206 } 2207 memset(device, 0xfd, sizeof(*device)); 2208 kfree(device); 2209 kref_put(&resource->kref, drbd_destroy_resource); 2210 } 2211 2212 /* One global retry thread, if we need to push back some bio and have it 2213 * reinserted through our make request function. 2214 */ 2215 static struct retry_worker { 2216 struct workqueue_struct *wq; 2217 struct work_struct worker; 2218 2219 spinlock_t lock; 2220 struct list_head writes; 2221 } retry; 2222 2223 static void do_retry(struct work_struct *ws) 2224 { 2225 struct retry_worker *retry = container_of(ws, struct retry_worker, worker); 2226 LIST_HEAD(writes); 2227 struct drbd_request *req, *tmp; 2228 2229 spin_lock_irq(&retry->lock); 2230 list_splice_init(&retry->writes, &writes); 2231 spin_unlock_irq(&retry->lock); 2232 2233 list_for_each_entry_safe(req, tmp, &writes, tl_requests) { 2234 struct drbd_device *device = req->device; 2235 struct bio *bio = req->master_bio; 2236 unsigned long start_jif = req->start_jif; 2237 bool expected; 2238 2239 expected = 2240 expect(atomic_read(&req->completion_ref) == 0) && 2241 expect(req->rq_state & RQ_POSTPONED) && 2242 expect((req->rq_state & RQ_LOCAL_PENDING) == 0 || 2243 (req->rq_state & RQ_LOCAL_ABORTED) != 0); 2244 2245 if (!expected) 2246 drbd_err(device, "req=%p completion_ref=%d rq_state=%x\n", 2247 req, atomic_read(&req->completion_ref), 2248 req->rq_state); 2249 2250 /* We still need to put one kref associated with the 2251 * "completion_ref" going zero in the code path that queued it 2252 * here. The request object may still be referenced by a 2253 * frozen local req->private_bio, in case we force-detached. 2254 */ 2255 kref_put(&req->kref, drbd_req_destroy); 2256 2257 /* A single suspended or otherwise blocking device may stall 2258 * all others as well. Fortunately, this code path is to 2259 * recover from a situation that "should not happen": 2260 * concurrent writes in multi-primary setup. 2261 * In a "normal" lifecycle, this workqueue is supposed to be 2262 * destroyed without ever doing anything. 2263 * If it turns out to be an issue anyways, we can do per 2264 * resource (replication group) or per device (minor) retry 2265 * workqueues instead. 2266 */ 2267 2268 /* We are not just doing generic_make_request(), 2269 * as we want to keep the start_time information. */ 2270 inc_ap_bio(device); 2271 __drbd_make_request(device, bio, start_jif); 2272 } 2273 } 2274 2275 /* called via drbd_req_put_completion_ref(), 2276 * holds resource->req_lock */ 2277 void drbd_restart_request(struct drbd_request *req) 2278 { 2279 unsigned long flags; 2280 spin_lock_irqsave(&retry.lock, flags); 2281 list_move_tail(&req->tl_requests, &retry.writes); 2282 spin_unlock_irqrestore(&retry.lock, flags); 2283 2284 /* Drop the extra reference that would otherwise 2285 * have been dropped by complete_master_bio. 2286 * do_retry() needs to grab a new one. */ 2287 dec_ap_bio(req->device); 2288 2289 queue_work(retry.wq, &retry.worker); 2290 } 2291 2292 void drbd_destroy_resource(struct kref *kref) 2293 { 2294 struct drbd_resource *resource = 2295 container_of(kref, struct drbd_resource, kref); 2296 2297 idr_destroy(&resource->devices); 2298 free_cpumask_var(resource->cpu_mask); 2299 kfree(resource->name); 2300 memset(resource, 0xf2, sizeof(*resource)); 2301 kfree(resource); 2302 } 2303 2304 void drbd_free_resource(struct drbd_resource *resource) 2305 { 2306 struct drbd_connection *connection, *tmp; 2307 2308 for_each_connection_safe(connection, tmp, resource) { 2309 list_del(&connection->connections); 2310 drbd_debugfs_connection_cleanup(connection); 2311 kref_put(&connection->kref, drbd_destroy_connection); 2312 } 2313 drbd_debugfs_resource_cleanup(resource); 2314 kref_put(&resource->kref, drbd_destroy_resource); 2315 } 2316 2317 static void drbd_cleanup(void) 2318 { 2319 unsigned int i; 2320 struct drbd_device *device; 2321 struct drbd_resource *resource, *tmp; 2322 2323 /* first remove proc, 2324 * drbdsetup uses it's presence to detect 2325 * whether DRBD is loaded. 2326 * If we would get stuck in proc removal, 2327 * but have netlink already deregistered, 2328 * some drbdsetup commands may wait forever 2329 * for an answer. 2330 */ 2331 if (drbd_proc) 2332 remove_proc_entry("drbd", NULL); 2333 2334 if (retry.wq) 2335 destroy_workqueue(retry.wq); 2336 2337 drbd_genl_unregister(); 2338 drbd_debugfs_cleanup(); 2339 2340 idr_for_each_entry(&drbd_devices, device, i) 2341 drbd_delete_device(device); 2342 2343 /* not _rcu since, no other updater anymore. Genl already unregistered */ 2344 for_each_resource_safe(resource, tmp, &drbd_resources) { 2345 list_del(&resource->resources); 2346 drbd_free_resource(resource); 2347 } 2348 2349 drbd_destroy_mempools(); 2350 unregister_blkdev(DRBD_MAJOR, "drbd"); 2351 2352 idr_destroy(&drbd_devices); 2353 2354 pr_info("module cleanup done.\n"); 2355 } 2356 2357 /** 2358 * drbd_congested() - Callback for the flusher thread 2359 * @congested_data: User data 2360 * @bdi_bits: Bits the BDI flusher thread is currently interested in 2361 * 2362 * Returns 1<<WB_async_congested and/or 1<<WB_sync_congested if we are congested. 2363 */ 2364 static int drbd_congested(void *congested_data, int bdi_bits) 2365 { 2366 struct drbd_device *device = congested_data; 2367 struct request_queue *q; 2368 char reason = '-'; 2369 int r = 0; 2370 2371 if (!may_inc_ap_bio(device)) { 2372 /* DRBD has frozen IO */ 2373 r = bdi_bits; 2374 reason = 'd'; 2375 goto out; 2376 } 2377 2378 if (test_bit(CALLBACK_PENDING, &first_peer_device(device)->connection->flags)) { 2379 r |= (1 << WB_async_congested); 2380 /* Without good local data, we would need to read from remote, 2381 * and that would need the worker thread as well, which is 2382 * currently blocked waiting for that usermode helper to 2383 * finish. 2384 */ 2385 if (!get_ldev_if_state(device, D_UP_TO_DATE)) 2386 r |= (1 << WB_sync_congested); 2387 else 2388 put_ldev(device); 2389 r &= bdi_bits; 2390 reason = 'c'; 2391 goto out; 2392 } 2393 2394 if (get_ldev(device)) { 2395 q = bdev_get_queue(device->ldev->backing_bdev); 2396 r = bdi_congested(&q->backing_dev_info, bdi_bits); 2397 put_ldev(device); 2398 if (r) 2399 reason = 'b'; 2400 } 2401 2402 if (bdi_bits & (1 << WB_async_congested) && 2403 test_bit(NET_CONGESTED, &first_peer_device(device)->connection->flags)) { 2404 r |= (1 << WB_async_congested); 2405 reason = reason == 'b' ? 'a' : 'n'; 2406 } 2407 2408 out: 2409 device->congestion_reason = reason; 2410 return r; 2411 } 2412 2413 static void drbd_init_workqueue(struct drbd_work_queue* wq) 2414 { 2415 spin_lock_init(&wq->q_lock); 2416 INIT_LIST_HEAD(&wq->q); 2417 init_waitqueue_head(&wq->q_wait); 2418 } 2419 2420 struct completion_work { 2421 struct drbd_work w; 2422 struct completion done; 2423 }; 2424 2425 static int w_complete(struct drbd_work *w, int cancel) 2426 { 2427 struct completion_work *completion_work = 2428 container_of(w, struct completion_work, w); 2429 2430 complete(&completion_work->done); 2431 return 0; 2432 } 2433 2434 void drbd_flush_workqueue(struct drbd_work_queue *work_queue) 2435 { 2436 struct completion_work completion_work; 2437 2438 completion_work.w.cb = w_complete; 2439 init_completion(&completion_work.done); 2440 drbd_queue_work(work_queue, &completion_work.w); 2441 wait_for_completion(&completion_work.done); 2442 } 2443 2444 struct drbd_resource *drbd_find_resource(const char *name) 2445 { 2446 struct drbd_resource *resource; 2447 2448 if (!name || !name[0]) 2449 return NULL; 2450 2451 rcu_read_lock(); 2452 for_each_resource_rcu(resource, &drbd_resources) { 2453 if (!strcmp(resource->name, name)) { 2454 kref_get(&resource->kref); 2455 goto found; 2456 } 2457 } 2458 resource = NULL; 2459 found: 2460 rcu_read_unlock(); 2461 return resource; 2462 } 2463 2464 struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len, 2465 void *peer_addr, int peer_addr_len) 2466 { 2467 struct drbd_resource *resource; 2468 struct drbd_connection *connection; 2469 2470 rcu_read_lock(); 2471 for_each_resource_rcu(resource, &drbd_resources) { 2472 for_each_connection_rcu(connection, resource) { 2473 if (connection->my_addr_len == my_addr_len && 2474 connection->peer_addr_len == peer_addr_len && 2475 !memcmp(&connection->my_addr, my_addr, my_addr_len) && 2476 !memcmp(&connection->peer_addr, peer_addr, peer_addr_len)) { 2477 kref_get(&connection->kref); 2478 goto found; 2479 } 2480 } 2481 } 2482 connection = NULL; 2483 found: 2484 rcu_read_unlock(); 2485 return connection; 2486 } 2487 2488 static int drbd_alloc_socket(struct drbd_socket *socket) 2489 { 2490 socket->rbuf = (void *) __get_free_page(GFP_KERNEL); 2491 if (!socket->rbuf) 2492 return -ENOMEM; 2493 socket->sbuf = (void *) __get_free_page(GFP_KERNEL); 2494 if (!socket->sbuf) 2495 return -ENOMEM; 2496 return 0; 2497 } 2498 2499 static void drbd_free_socket(struct drbd_socket *socket) 2500 { 2501 free_page((unsigned long) socket->sbuf); 2502 free_page((unsigned long) socket->rbuf); 2503 } 2504 2505 void conn_free_crypto(struct drbd_connection *connection) 2506 { 2507 drbd_free_sock(connection); 2508 2509 crypto_free_hash(connection->csums_tfm); 2510 crypto_free_hash(connection->verify_tfm); 2511 crypto_free_hash(connection->cram_hmac_tfm); 2512 crypto_free_hash(connection->integrity_tfm); 2513 crypto_free_hash(connection->peer_integrity_tfm); 2514 kfree(connection->int_dig_in); 2515 kfree(connection->int_dig_vv); 2516 2517 connection->csums_tfm = NULL; 2518 connection->verify_tfm = NULL; 2519 connection->cram_hmac_tfm = NULL; 2520 connection->integrity_tfm = NULL; 2521 connection->peer_integrity_tfm = NULL; 2522 connection->int_dig_in = NULL; 2523 connection->int_dig_vv = NULL; 2524 } 2525 2526 int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts) 2527 { 2528 struct drbd_connection *connection; 2529 cpumask_var_t new_cpu_mask; 2530 int err; 2531 2532 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) 2533 return -ENOMEM; 2534 2535 /* silently ignore cpu mask on UP kernel */ 2536 if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) { 2537 err = bitmap_parse(res_opts->cpu_mask, DRBD_CPU_MASK_SIZE, 2538 cpumask_bits(new_cpu_mask), nr_cpu_ids); 2539 if (err == -EOVERFLOW) { 2540 /* So what. mask it out. */ 2541 cpumask_var_t tmp_cpu_mask; 2542 if (zalloc_cpumask_var(&tmp_cpu_mask, GFP_KERNEL)) { 2543 cpumask_setall(tmp_cpu_mask); 2544 cpumask_and(new_cpu_mask, new_cpu_mask, tmp_cpu_mask); 2545 drbd_warn(resource, "Overflow in bitmap_parse(%.12s%s), truncating to %u bits\n", 2546 res_opts->cpu_mask, 2547 strlen(res_opts->cpu_mask) > 12 ? "..." : "", 2548 nr_cpu_ids); 2549 free_cpumask_var(tmp_cpu_mask); 2550 err = 0; 2551 } 2552 } 2553 if (err) { 2554 drbd_warn(resource, "bitmap_parse() failed with %d\n", err); 2555 /* retcode = ERR_CPU_MASK_PARSE; */ 2556 goto fail; 2557 } 2558 } 2559 resource->res_opts = *res_opts; 2560 if (cpumask_empty(new_cpu_mask)) 2561 drbd_calc_cpu_mask(&new_cpu_mask); 2562 if (!cpumask_equal(resource->cpu_mask, new_cpu_mask)) { 2563 cpumask_copy(resource->cpu_mask, new_cpu_mask); 2564 for_each_connection_rcu(connection, resource) { 2565 connection->receiver.reset_cpu_mask = 1; 2566 connection->asender.reset_cpu_mask = 1; 2567 connection->worker.reset_cpu_mask = 1; 2568 } 2569 } 2570 err = 0; 2571 2572 fail: 2573 free_cpumask_var(new_cpu_mask); 2574 return err; 2575 2576 } 2577 2578 struct drbd_resource *drbd_create_resource(const char *name) 2579 { 2580 struct drbd_resource *resource; 2581 2582 resource = kzalloc(sizeof(struct drbd_resource), GFP_KERNEL); 2583 if (!resource) 2584 goto fail; 2585 resource->name = kstrdup(name, GFP_KERNEL); 2586 if (!resource->name) 2587 goto fail_free_resource; 2588 if (!zalloc_cpumask_var(&resource->cpu_mask, GFP_KERNEL)) 2589 goto fail_free_name; 2590 kref_init(&resource->kref); 2591 idr_init(&resource->devices); 2592 INIT_LIST_HEAD(&resource->connections); 2593 resource->write_ordering = WO_bdev_flush; 2594 list_add_tail_rcu(&resource->resources, &drbd_resources); 2595 mutex_init(&resource->conf_update); 2596 mutex_init(&resource->adm_mutex); 2597 spin_lock_init(&resource->req_lock); 2598 drbd_debugfs_resource_add(resource); 2599 return resource; 2600 2601 fail_free_name: 2602 kfree(resource->name); 2603 fail_free_resource: 2604 kfree(resource); 2605 fail: 2606 return NULL; 2607 } 2608 2609 /* caller must be under adm_mutex */ 2610 struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts) 2611 { 2612 struct drbd_resource *resource; 2613 struct drbd_connection *connection; 2614 2615 connection = kzalloc(sizeof(struct drbd_connection), GFP_KERNEL); 2616 if (!connection) 2617 return NULL; 2618 2619 if (drbd_alloc_socket(&connection->data)) 2620 goto fail; 2621 if (drbd_alloc_socket(&connection->meta)) 2622 goto fail; 2623 2624 connection->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL); 2625 if (!connection->current_epoch) 2626 goto fail; 2627 2628 INIT_LIST_HEAD(&connection->transfer_log); 2629 2630 INIT_LIST_HEAD(&connection->current_epoch->list); 2631 connection->epochs = 1; 2632 spin_lock_init(&connection->epoch_lock); 2633 2634 connection->send.seen_any_write_yet = false; 2635 connection->send.current_epoch_nr = 0; 2636 connection->send.current_epoch_writes = 0; 2637 2638 resource = drbd_create_resource(name); 2639 if (!resource) 2640 goto fail; 2641 2642 connection->cstate = C_STANDALONE; 2643 mutex_init(&connection->cstate_mutex); 2644 init_waitqueue_head(&connection->ping_wait); 2645 idr_init(&connection->peer_devices); 2646 2647 drbd_init_workqueue(&connection->sender_work); 2648 mutex_init(&connection->data.mutex); 2649 mutex_init(&connection->meta.mutex); 2650 2651 drbd_thread_init(resource, &connection->receiver, drbd_receiver, "receiver"); 2652 connection->receiver.connection = connection; 2653 drbd_thread_init(resource, &connection->worker, drbd_worker, "worker"); 2654 connection->worker.connection = connection; 2655 drbd_thread_init(resource, &connection->asender, drbd_asender, "asender"); 2656 connection->asender.connection = connection; 2657 2658 kref_init(&connection->kref); 2659 2660 connection->resource = resource; 2661 2662 if (set_resource_options(resource, res_opts)) 2663 goto fail_resource; 2664 2665 kref_get(&resource->kref); 2666 list_add_tail_rcu(&connection->connections, &resource->connections); 2667 drbd_debugfs_connection_add(connection); 2668 return connection; 2669 2670 fail_resource: 2671 list_del(&resource->resources); 2672 drbd_free_resource(resource); 2673 fail: 2674 kfree(connection->current_epoch); 2675 drbd_free_socket(&connection->meta); 2676 drbd_free_socket(&connection->data); 2677 kfree(connection); 2678 return NULL; 2679 } 2680 2681 void drbd_destroy_connection(struct kref *kref) 2682 { 2683 struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref); 2684 struct drbd_resource *resource = connection->resource; 2685 2686 if (atomic_read(&connection->current_epoch->epoch_size) != 0) 2687 drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size)); 2688 kfree(connection->current_epoch); 2689 2690 idr_destroy(&connection->peer_devices); 2691 2692 drbd_free_socket(&connection->meta); 2693 drbd_free_socket(&connection->data); 2694 kfree(connection->int_dig_in); 2695 kfree(connection->int_dig_vv); 2696 memset(connection, 0xfc, sizeof(*connection)); 2697 kfree(connection); 2698 kref_put(&resource->kref, drbd_destroy_resource); 2699 } 2700 2701 static int init_submitter(struct drbd_device *device) 2702 { 2703 /* opencoded create_singlethread_workqueue(), 2704 * to be able to say "drbd%d", ..., minor */ 2705 device->submit.wq = alloc_workqueue("drbd%u_submit", 2706 WQ_UNBOUND | WQ_MEM_RECLAIM, 1, device->minor); 2707 if (!device->submit.wq) 2708 return -ENOMEM; 2709 2710 INIT_WORK(&device->submit.worker, do_submit); 2711 INIT_LIST_HEAD(&device->submit.writes); 2712 return 0; 2713 } 2714 2715 enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor) 2716 { 2717 struct drbd_resource *resource = adm_ctx->resource; 2718 struct drbd_connection *connection; 2719 struct drbd_device *device; 2720 struct drbd_peer_device *peer_device, *tmp_peer_device; 2721 struct gendisk *disk; 2722 struct request_queue *q; 2723 int id; 2724 int vnr = adm_ctx->volume; 2725 enum drbd_ret_code err = ERR_NOMEM; 2726 2727 device = minor_to_device(minor); 2728 if (device) 2729 return ERR_MINOR_OR_VOLUME_EXISTS; 2730 2731 /* GFP_KERNEL, we are outside of all write-out paths */ 2732 device = kzalloc(sizeof(struct drbd_device), GFP_KERNEL); 2733 if (!device) 2734 return ERR_NOMEM; 2735 kref_init(&device->kref); 2736 2737 kref_get(&resource->kref); 2738 device->resource = resource; 2739 device->minor = minor; 2740 device->vnr = vnr; 2741 2742 drbd_init_set_defaults(device); 2743 2744 q = blk_alloc_queue(GFP_KERNEL); 2745 if (!q) 2746 goto out_no_q; 2747 device->rq_queue = q; 2748 q->queuedata = device; 2749 2750 disk = alloc_disk(1); 2751 if (!disk) 2752 goto out_no_disk; 2753 device->vdisk = disk; 2754 2755 set_disk_ro(disk, true); 2756 2757 disk->queue = q; 2758 disk->major = DRBD_MAJOR; 2759 disk->first_minor = minor; 2760 disk->fops = &drbd_ops; 2761 sprintf(disk->disk_name, "drbd%d", minor); 2762 disk->private_data = device; 2763 2764 device->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor)); 2765 /* we have no partitions. we contain only ourselves. */ 2766 device->this_bdev->bd_contains = device->this_bdev; 2767 2768 q->backing_dev_info.congested_fn = drbd_congested; 2769 q->backing_dev_info.congested_data = device; 2770 2771 blk_queue_make_request(q, drbd_make_request); 2772 blk_queue_flush(q, REQ_FLUSH | REQ_FUA); 2773 /* Setting the max_hw_sectors to an odd value of 8kibyte here 2774 This triggers a max_bio_size message upon first attach or connect */ 2775 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8); 2776 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); 2777 q->queue_lock = &resource->req_lock; 2778 2779 device->md_io.page = alloc_page(GFP_KERNEL); 2780 if (!device->md_io.page) 2781 goto out_no_io_page; 2782 2783 if (drbd_bm_init(device)) 2784 goto out_no_bitmap; 2785 device->read_requests = RB_ROOT; 2786 device->write_requests = RB_ROOT; 2787 2788 id = idr_alloc(&drbd_devices, device, minor, minor + 1, GFP_KERNEL); 2789 if (id < 0) { 2790 if (id == -ENOSPC) 2791 err = ERR_MINOR_OR_VOLUME_EXISTS; 2792 goto out_no_minor_idr; 2793 } 2794 kref_get(&device->kref); 2795 2796 id = idr_alloc(&resource->devices, device, vnr, vnr + 1, GFP_KERNEL); 2797 if (id < 0) { 2798 if (id == -ENOSPC) 2799 err = ERR_MINOR_OR_VOLUME_EXISTS; 2800 goto out_idr_remove_minor; 2801 } 2802 kref_get(&device->kref); 2803 2804 INIT_LIST_HEAD(&device->peer_devices); 2805 INIT_LIST_HEAD(&device->pending_bitmap_io); 2806 for_each_connection(connection, resource) { 2807 peer_device = kzalloc(sizeof(struct drbd_peer_device), GFP_KERNEL); 2808 if (!peer_device) 2809 goto out_idr_remove_from_resource; 2810 peer_device->connection = connection; 2811 peer_device->device = device; 2812 2813 list_add(&peer_device->peer_devices, &device->peer_devices); 2814 kref_get(&device->kref); 2815 2816 id = idr_alloc(&connection->peer_devices, peer_device, vnr, vnr + 1, GFP_KERNEL); 2817 if (id < 0) { 2818 if (id == -ENOSPC) 2819 err = ERR_INVALID_REQUEST; 2820 goto out_idr_remove_from_resource; 2821 } 2822 kref_get(&connection->kref); 2823 } 2824 2825 if (init_submitter(device)) { 2826 err = ERR_NOMEM; 2827 goto out_idr_remove_vol; 2828 } 2829 2830 add_disk(disk); 2831 2832 /* inherit the connection state */ 2833 device->state.conn = first_connection(resource)->cstate; 2834 if (device->state.conn == C_WF_REPORT_PARAMS) { 2835 for_each_peer_device(peer_device, device) 2836 drbd_connected(peer_device); 2837 } 2838 /* move to create_peer_device() */ 2839 for_each_peer_device(peer_device, device) 2840 drbd_debugfs_peer_device_add(peer_device); 2841 drbd_debugfs_device_add(device); 2842 return NO_ERROR; 2843 2844 out_idr_remove_vol: 2845 idr_remove(&connection->peer_devices, vnr); 2846 out_idr_remove_from_resource: 2847 for_each_connection(connection, resource) { 2848 peer_device = idr_find(&connection->peer_devices, vnr); 2849 if (peer_device) { 2850 idr_remove(&connection->peer_devices, vnr); 2851 kref_put(&connection->kref, drbd_destroy_connection); 2852 } 2853 } 2854 for_each_peer_device_safe(peer_device, tmp_peer_device, device) { 2855 list_del(&peer_device->peer_devices); 2856 kfree(peer_device); 2857 } 2858 idr_remove(&resource->devices, vnr); 2859 out_idr_remove_minor: 2860 idr_remove(&drbd_devices, minor); 2861 synchronize_rcu(); 2862 out_no_minor_idr: 2863 drbd_bm_cleanup(device); 2864 out_no_bitmap: 2865 __free_page(device->md_io.page); 2866 out_no_io_page: 2867 put_disk(disk); 2868 out_no_disk: 2869 blk_cleanup_queue(q); 2870 out_no_q: 2871 kref_put(&resource->kref, drbd_destroy_resource); 2872 kfree(device); 2873 return err; 2874 } 2875 2876 void drbd_delete_device(struct drbd_device *device) 2877 { 2878 struct drbd_resource *resource = device->resource; 2879 struct drbd_connection *connection; 2880 struct drbd_peer_device *peer_device; 2881 int refs = 3; 2882 2883 /* move to free_peer_device() */ 2884 for_each_peer_device(peer_device, device) 2885 drbd_debugfs_peer_device_cleanup(peer_device); 2886 drbd_debugfs_device_cleanup(device); 2887 for_each_connection(connection, resource) { 2888 idr_remove(&connection->peer_devices, device->vnr); 2889 refs++; 2890 } 2891 idr_remove(&resource->devices, device->vnr); 2892 idr_remove(&drbd_devices, device_to_minor(device)); 2893 del_gendisk(device->vdisk); 2894 synchronize_rcu(); 2895 kref_sub(&device->kref, refs, drbd_destroy_device); 2896 } 2897 2898 static int __init drbd_init(void) 2899 { 2900 int err; 2901 2902 if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) { 2903 pr_err("invalid minor_count (%d)\n", minor_count); 2904 #ifdef MODULE 2905 return -EINVAL; 2906 #else 2907 minor_count = DRBD_MINOR_COUNT_DEF; 2908 #endif 2909 } 2910 2911 err = register_blkdev(DRBD_MAJOR, "drbd"); 2912 if (err) { 2913 pr_err("unable to register block device major %d\n", 2914 DRBD_MAJOR); 2915 return err; 2916 } 2917 2918 /* 2919 * allocate all necessary structs 2920 */ 2921 init_waitqueue_head(&drbd_pp_wait); 2922 2923 drbd_proc = NULL; /* play safe for drbd_cleanup */ 2924 idr_init(&drbd_devices); 2925 2926 rwlock_init(&global_state_lock); 2927 INIT_LIST_HEAD(&drbd_resources); 2928 2929 err = drbd_genl_register(); 2930 if (err) { 2931 pr_err("unable to register generic netlink family\n"); 2932 goto fail; 2933 } 2934 2935 err = drbd_create_mempools(); 2936 if (err) 2937 goto fail; 2938 2939 err = -ENOMEM; 2940 drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL); 2941 if (!drbd_proc) { 2942 pr_err("unable to register proc file\n"); 2943 goto fail; 2944 } 2945 2946 retry.wq = create_singlethread_workqueue("drbd-reissue"); 2947 if (!retry.wq) { 2948 pr_err("unable to create retry workqueue\n"); 2949 goto fail; 2950 } 2951 INIT_WORK(&retry.worker, do_retry); 2952 spin_lock_init(&retry.lock); 2953 INIT_LIST_HEAD(&retry.writes); 2954 2955 if (drbd_debugfs_init()) 2956 pr_notice("failed to initialize debugfs -- will not be available\n"); 2957 2958 pr_info("initialized. " 2959 "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n", 2960 API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX); 2961 pr_info("%s\n", drbd_buildtag()); 2962 pr_info("registered as block device major %d\n", DRBD_MAJOR); 2963 return 0; /* Success! */ 2964 2965 fail: 2966 drbd_cleanup(); 2967 if (err == -ENOMEM) 2968 pr_err("ran out of memory\n"); 2969 else 2970 pr_err("initialization failure\n"); 2971 return err; 2972 } 2973 2974 void drbd_free_ldev(struct drbd_backing_dev *ldev) 2975 { 2976 if (ldev == NULL) 2977 return; 2978 2979 blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); 2980 blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); 2981 2982 kfree(ldev->disk_conf); 2983 kfree(ldev); 2984 } 2985 2986 static void drbd_free_one_sock(struct drbd_socket *ds) 2987 { 2988 struct socket *s; 2989 mutex_lock(&ds->mutex); 2990 s = ds->socket; 2991 ds->socket = NULL; 2992 mutex_unlock(&ds->mutex); 2993 if (s) { 2994 /* so debugfs does not need to mutex_lock() */ 2995 synchronize_rcu(); 2996 kernel_sock_shutdown(s, SHUT_RDWR); 2997 sock_release(s); 2998 } 2999 } 3000 3001 void drbd_free_sock(struct drbd_connection *connection) 3002 { 3003 if (connection->data.socket) 3004 drbd_free_one_sock(&connection->data); 3005 if (connection->meta.socket) 3006 drbd_free_one_sock(&connection->meta); 3007 } 3008 3009 /* meta data management */ 3010 3011 void conn_md_sync(struct drbd_connection *connection) 3012 { 3013 struct drbd_peer_device *peer_device; 3014 int vnr; 3015 3016 rcu_read_lock(); 3017 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { 3018 struct drbd_device *device = peer_device->device; 3019 3020 kref_get(&device->kref); 3021 rcu_read_unlock(); 3022 drbd_md_sync(device); 3023 kref_put(&device->kref, drbd_destroy_device); 3024 rcu_read_lock(); 3025 } 3026 rcu_read_unlock(); 3027 } 3028 3029 /* aligned 4kByte */ 3030 struct meta_data_on_disk { 3031 u64 la_size_sect; /* last agreed size. */ 3032 u64 uuid[UI_SIZE]; /* UUIDs. */ 3033 u64 device_uuid; 3034 u64 reserved_u64_1; 3035 u32 flags; /* MDF */ 3036 u32 magic; 3037 u32 md_size_sect; 3038 u32 al_offset; /* offset to this block */ 3039 u32 al_nr_extents; /* important for restoring the AL (userspace) */ 3040 /* `-- act_log->nr_elements <-- ldev->dc.al_extents */ 3041 u32 bm_offset; /* offset to the bitmap, from here */ 3042 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */ 3043 u32 la_peer_max_bio_size; /* last peer max_bio_size */ 3044 3045 /* see al_tr_number_to_on_disk_sector() */ 3046 u32 al_stripes; 3047 u32 al_stripe_size_4k; 3048 3049 u8 reserved_u8[4096 - (7*8 + 10*4)]; 3050 } __packed; 3051 3052 3053 3054 void drbd_md_write(struct drbd_device *device, void *b) 3055 { 3056 struct meta_data_on_disk *buffer = b; 3057 sector_t sector; 3058 int i; 3059 3060 memset(buffer, 0, sizeof(*buffer)); 3061 3062 buffer->la_size_sect = cpu_to_be64(drbd_get_capacity(device->this_bdev)); 3063 for (i = UI_CURRENT; i < UI_SIZE; i++) 3064 buffer->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]); 3065 buffer->flags = cpu_to_be32(device->ldev->md.flags); 3066 buffer->magic = cpu_to_be32(DRBD_MD_MAGIC_84_UNCLEAN); 3067 3068 buffer->md_size_sect = cpu_to_be32(device->ldev->md.md_size_sect); 3069 buffer->al_offset = cpu_to_be32(device->ldev->md.al_offset); 3070 buffer->al_nr_extents = cpu_to_be32(device->act_log->nr_elements); 3071 buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE); 3072 buffer->device_uuid = cpu_to_be64(device->ldev->md.device_uuid); 3073 3074 buffer->bm_offset = cpu_to_be32(device->ldev->md.bm_offset); 3075 buffer->la_peer_max_bio_size = cpu_to_be32(device->peer_max_bio_size); 3076 3077 buffer->al_stripes = cpu_to_be32(device->ldev->md.al_stripes); 3078 buffer->al_stripe_size_4k = cpu_to_be32(device->ldev->md.al_stripe_size_4k); 3079 3080 D_ASSERT(device, drbd_md_ss(device->ldev) == device->ldev->md.md_offset); 3081 sector = device->ldev->md.md_offset; 3082 3083 if (drbd_md_sync_page_io(device, device->ldev, sector, WRITE)) { 3084 /* this was a try anyways ... */ 3085 drbd_err(device, "meta data update failed!\n"); 3086 drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR); 3087 } 3088 } 3089 3090 /** 3091 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set 3092 * @device: DRBD device. 3093 */ 3094 void drbd_md_sync(struct drbd_device *device) 3095 { 3096 struct meta_data_on_disk *buffer; 3097 3098 /* Don't accidentally change the DRBD meta data layout. */ 3099 BUILD_BUG_ON(UI_SIZE != 4); 3100 BUILD_BUG_ON(sizeof(struct meta_data_on_disk) != 4096); 3101 3102 del_timer(&device->md_sync_timer); 3103 /* timer may be rearmed by drbd_md_mark_dirty() now. */ 3104 if (!test_and_clear_bit(MD_DIRTY, &device->flags)) 3105 return; 3106 3107 /* We use here D_FAILED and not D_ATTACHING because we try to write 3108 * metadata even if we detach due to a disk failure! */ 3109 if (!get_ldev_if_state(device, D_FAILED)) 3110 return; 3111 3112 buffer = drbd_md_get_buffer(device, __func__); 3113 if (!buffer) 3114 goto out; 3115 3116 drbd_md_write(device, buffer); 3117 3118 /* Update device->ldev->md.la_size_sect, 3119 * since we updated it on metadata. */ 3120 device->ldev->md.la_size_sect = drbd_get_capacity(device->this_bdev); 3121 3122 drbd_md_put_buffer(device); 3123 out: 3124 put_ldev(device); 3125 } 3126 3127 static int check_activity_log_stripe_size(struct drbd_device *device, 3128 struct meta_data_on_disk *on_disk, 3129 struct drbd_md *in_core) 3130 { 3131 u32 al_stripes = be32_to_cpu(on_disk->al_stripes); 3132 u32 al_stripe_size_4k = be32_to_cpu(on_disk->al_stripe_size_4k); 3133 u64 al_size_4k; 3134 3135 /* both not set: default to old fixed size activity log */ 3136 if (al_stripes == 0 && al_stripe_size_4k == 0) { 3137 al_stripes = 1; 3138 al_stripe_size_4k = MD_32kB_SECT/8; 3139 } 3140 3141 /* some paranoia plausibility checks */ 3142 3143 /* we need both values to be set */ 3144 if (al_stripes == 0 || al_stripe_size_4k == 0) 3145 goto err; 3146 3147 al_size_4k = (u64)al_stripes * al_stripe_size_4k; 3148 3149 /* Upper limit of activity log area, to avoid potential overflow 3150 * problems in al_tr_number_to_on_disk_sector(). As right now, more 3151 * than 72 * 4k blocks total only increases the amount of history, 3152 * limiting this arbitrarily to 16 GB is not a real limitation ;-) */ 3153 if (al_size_4k > (16 * 1024 * 1024/4)) 3154 goto err; 3155 3156 /* Lower limit: we need at least 8 transaction slots (32kB) 3157 * to not break existing setups */ 3158 if (al_size_4k < MD_32kB_SECT/8) 3159 goto err; 3160 3161 in_core->al_stripe_size_4k = al_stripe_size_4k; 3162 in_core->al_stripes = al_stripes; 3163 in_core->al_size_4k = al_size_4k; 3164 3165 return 0; 3166 err: 3167 drbd_err(device, "invalid activity log striping: al_stripes=%u, al_stripe_size_4k=%u\n", 3168 al_stripes, al_stripe_size_4k); 3169 return -EINVAL; 3170 } 3171 3172 static int check_offsets_and_sizes(struct drbd_device *device, struct drbd_backing_dev *bdev) 3173 { 3174 sector_t capacity = drbd_get_capacity(bdev->md_bdev); 3175 struct drbd_md *in_core = &bdev->md; 3176 s32 on_disk_al_sect; 3177 s32 on_disk_bm_sect; 3178 3179 /* The on-disk size of the activity log, calculated from offsets, and 3180 * the size of the activity log calculated from the stripe settings, 3181 * should match. 3182 * Though we could relax this a bit: it is ok, if the striped activity log 3183 * fits in the available on-disk activity log size. 3184 * Right now, that would break how resize is implemented. 3185 * TODO: make drbd_determine_dev_size() (and the drbdmeta tool) aware 3186 * of possible unused padding space in the on disk layout. */ 3187 if (in_core->al_offset < 0) { 3188 if (in_core->bm_offset > in_core->al_offset) 3189 goto err; 3190 on_disk_al_sect = -in_core->al_offset; 3191 on_disk_bm_sect = in_core->al_offset - in_core->bm_offset; 3192 } else { 3193 if (in_core->al_offset != MD_4kB_SECT) 3194 goto err; 3195 if (in_core->bm_offset < in_core->al_offset + in_core->al_size_4k * MD_4kB_SECT) 3196 goto err; 3197 3198 on_disk_al_sect = in_core->bm_offset - MD_4kB_SECT; 3199 on_disk_bm_sect = in_core->md_size_sect - in_core->bm_offset; 3200 } 3201 3202 /* old fixed size meta data is exactly that: fixed. */ 3203 if (in_core->meta_dev_idx >= 0) { 3204 if (in_core->md_size_sect != MD_128MB_SECT 3205 || in_core->al_offset != MD_4kB_SECT 3206 || in_core->bm_offset != MD_4kB_SECT + MD_32kB_SECT 3207 || in_core->al_stripes != 1 3208 || in_core->al_stripe_size_4k != MD_32kB_SECT/8) 3209 goto err; 3210 } 3211 3212 if (capacity < in_core->md_size_sect) 3213 goto err; 3214 if (capacity - in_core->md_size_sect < drbd_md_first_sector(bdev)) 3215 goto err; 3216 3217 /* should be aligned, and at least 32k */ 3218 if ((on_disk_al_sect & 7) || (on_disk_al_sect < MD_32kB_SECT)) 3219 goto err; 3220 3221 /* should fit (for now: exactly) into the available on-disk space; 3222 * overflow prevention is in check_activity_log_stripe_size() above. */ 3223 if (on_disk_al_sect != in_core->al_size_4k * MD_4kB_SECT) 3224 goto err; 3225 3226 /* again, should be aligned */ 3227 if (in_core->bm_offset & 7) 3228 goto err; 3229 3230 /* FIXME check for device grow with flex external meta data? */ 3231 3232 /* can the available bitmap space cover the last agreed device size? */ 3233 if (on_disk_bm_sect < (in_core->la_size_sect+7)/MD_4kB_SECT/8/512) 3234 goto err; 3235 3236 return 0; 3237 3238 err: 3239 drbd_err(device, "meta data offsets don't make sense: idx=%d " 3240 "al_s=%u, al_sz4k=%u, al_offset=%d, bm_offset=%d, " 3241 "md_size_sect=%u, la_size=%llu, md_capacity=%llu\n", 3242 in_core->meta_dev_idx, 3243 in_core->al_stripes, in_core->al_stripe_size_4k, 3244 in_core->al_offset, in_core->bm_offset, in_core->md_size_sect, 3245 (unsigned long long)in_core->la_size_sect, 3246 (unsigned long long)capacity); 3247 3248 return -EINVAL; 3249 } 3250 3251 3252 /** 3253 * drbd_md_read() - Reads in the meta data super block 3254 * @device: DRBD device. 3255 * @bdev: Device from which the meta data should be read in. 3256 * 3257 * Return NO_ERROR on success, and an enum drbd_ret_code in case 3258 * something goes wrong. 3259 * 3260 * Called exactly once during drbd_adm_attach(), while still being D_DISKLESS, 3261 * even before @bdev is assigned to @device->ldev. 3262 */ 3263 int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev) 3264 { 3265 struct meta_data_on_disk *buffer; 3266 u32 magic, flags; 3267 int i, rv = NO_ERROR; 3268 3269 if (device->state.disk != D_DISKLESS) 3270 return ERR_DISK_CONFIGURED; 3271 3272 buffer = drbd_md_get_buffer(device, __func__); 3273 if (!buffer) 3274 return ERR_NOMEM; 3275 3276 /* First, figure out where our meta data superblock is located, 3277 * and read it. */ 3278 bdev->md.meta_dev_idx = bdev->disk_conf->meta_dev_idx; 3279 bdev->md.md_offset = drbd_md_ss(bdev); 3280 3281 if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset, READ)) { 3282 /* NOTE: can't do normal error processing here as this is 3283 called BEFORE disk is attached */ 3284 drbd_err(device, "Error while reading metadata.\n"); 3285 rv = ERR_IO_MD_DISK; 3286 goto err; 3287 } 3288 3289 magic = be32_to_cpu(buffer->magic); 3290 flags = be32_to_cpu(buffer->flags); 3291 if (magic == DRBD_MD_MAGIC_84_UNCLEAN || 3292 (magic == DRBD_MD_MAGIC_08 && !(flags & MDF_AL_CLEAN))) { 3293 /* btw: that's Activity Log clean, not "all" clean. */ 3294 drbd_err(device, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n"); 3295 rv = ERR_MD_UNCLEAN; 3296 goto err; 3297 } 3298 3299 rv = ERR_MD_INVALID; 3300 if (magic != DRBD_MD_MAGIC_08) { 3301 if (magic == DRBD_MD_MAGIC_07) 3302 drbd_err(device, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n"); 3303 else 3304 drbd_err(device, "Meta data magic not found. Did you \"drbdadm create-md\"?\n"); 3305 goto err; 3306 } 3307 3308 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) { 3309 drbd_err(device, "unexpected bm_bytes_per_bit: %u (expected %u)\n", 3310 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE); 3311 goto err; 3312 } 3313 3314 3315 /* convert to in_core endian */ 3316 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size_sect); 3317 for (i = UI_CURRENT; i < UI_SIZE; i++) 3318 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]); 3319 bdev->md.flags = be32_to_cpu(buffer->flags); 3320 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid); 3321 3322 bdev->md.md_size_sect = be32_to_cpu(buffer->md_size_sect); 3323 bdev->md.al_offset = be32_to_cpu(buffer->al_offset); 3324 bdev->md.bm_offset = be32_to_cpu(buffer->bm_offset); 3325 3326 if (check_activity_log_stripe_size(device, buffer, &bdev->md)) 3327 goto err; 3328 if (check_offsets_and_sizes(device, bdev)) 3329 goto err; 3330 3331 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) { 3332 drbd_err(device, "unexpected bm_offset: %d (expected %d)\n", 3333 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset); 3334 goto err; 3335 } 3336 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) { 3337 drbd_err(device, "unexpected md_size: %u (expected %u)\n", 3338 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect); 3339 goto err; 3340 } 3341 3342 rv = NO_ERROR; 3343 3344 spin_lock_irq(&device->resource->req_lock); 3345 if (device->state.conn < C_CONNECTED) { 3346 unsigned int peer; 3347 peer = be32_to_cpu(buffer->la_peer_max_bio_size); 3348 peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE); 3349 device->peer_max_bio_size = peer; 3350 } 3351 spin_unlock_irq(&device->resource->req_lock); 3352 3353 err: 3354 drbd_md_put_buffer(device); 3355 3356 return rv; 3357 } 3358 3359 /** 3360 * drbd_md_mark_dirty() - Mark meta data super block as dirty 3361 * @device: DRBD device. 3362 * 3363 * Call this function if you change anything that should be written to 3364 * the meta-data super block. This function sets MD_DIRTY, and starts a 3365 * timer that ensures that within five seconds you have to call drbd_md_sync(). 3366 */ 3367 #ifdef DEBUG 3368 void drbd_md_mark_dirty_(struct drbd_device *device, unsigned int line, const char *func) 3369 { 3370 if (!test_and_set_bit(MD_DIRTY, &device->flags)) { 3371 mod_timer(&device->md_sync_timer, jiffies + HZ); 3372 device->last_md_mark_dirty.line = line; 3373 device->last_md_mark_dirty.func = func; 3374 } 3375 } 3376 #else 3377 void drbd_md_mark_dirty(struct drbd_device *device) 3378 { 3379 if (!test_and_set_bit(MD_DIRTY, &device->flags)) 3380 mod_timer(&device->md_sync_timer, jiffies + 5*HZ); 3381 } 3382 #endif 3383 3384 void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local) 3385 { 3386 int i; 3387 3388 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++) 3389 device->ldev->md.uuid[i+1] = device->ldev->md.uuid[i]; 3390 } 3391 3392 void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local) 3393 { 3394 if (idx == UI_CURRENT) { 3395 if (device->state.role == R_PRIMARY) 3396 val |= 1; 3397 else 3398 val &= ~((u64)1); 3399 3400 drbd_set_ed_uuid(device, val); 3401 } 3402 3403 device->ldev->md.uuid[idx] = val; 3404 drbd_md_mark_dirty(device); 3405 } 3406 3407 void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local) 3408 { 3409 unsigned long flags; 3410 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags); 3411 __drbd_uuid_set(device, idx, val); 3412 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags); 3413 } 3414 3415 void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local) 3416 { 3417 unsigned long flags; 3418 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags); 3419 if (device->ldev->md.uuid[idx]) { 3420 drbd_uuid_move_history(device); 3421 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[idx]; 3422 } 3423 __drbd_uuid_set(device, idx, val); 3424 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags); 3425 } 3426 3427 /** 3428 * drbd_uuid_new_current() - Creates a new current UUID 3429 * @device: DRBD device. 3430 * 3431 * Creates a new current UUID, and rotates the old current UUID into 3432 * the bitmap slot. Causes an incremental resync upon next connect. 3433 */ 3434 void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local) 3435 { 3436 u64 val; 3437 unsigned long long bm_uuid; 3438 3439 get_random_bytes(&val, sizeof(u64)); 3440 3441 spin_lock_irq(&device->ldev->md.uuid_lock); 3442 bm_uuid = device->ldev->md.uuid[UI_BITMAP]; 3443 3444 if (bm_uuid) 3445 drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid); 3446 3447 device->ldev->md.uuid[UI_BITMAP] = device->ldev->md.uuid[UI_CURRENT]; 3448 __drbd_uuid_set(device, UI_CURRENT, val); 3449 spin_unlock_irq(&device->ldev->md.uuid_lock); 3450 3451 drbd_print_uuids(device, "new current UUID"); 3452 /* get it to stable storage _now_ */ 3453 drbd_md_sync(device); 3454 } 3455 3456 void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local) 3457 { 3458 unsigned long flags; 3459 if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0) 3460 return; 3461 3462 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags); 3463 if (val == 0) { 3464 drbd_uuid_move_history(device); 3465 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP]; 3466 device->ldev->md.uuid[UI_BITMAP] = 0; 3467 } else { 3468 unsigned long long bm_uuid = device->ldev->md.uuid[UI_BITMAP]; 3469 if (bm_uuid) 3470 drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid); 3471 3472 device->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1); 3473 } 3474 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags); 3475 3476 drbd_md_mark_dirty(device); 3477 } 3478 3479 /** 3480 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io() 3481 * @device: DRBD device. 3482 * 3483 * Sets all bits in the bitmap and writes the whole bitmap to stable storage. 3484 */ 3485 int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local) 3486 { 3487 int rv = -EIO; 3488 3489 drbd_md_set_flag(device, MDF_FULL_SYNC); 3490 drbd_md_sync(device); 3491 drbd_bm_set_all(device); 3492 3493 rv = drbd_bm_write(device); 3494 3495 if (!rv) { 3496 drbd_md_clear_flag(device, MDF_FULL_SYNC); 3497 drbd_md_sync(device); 3498 } 3499 3500 return rv; 3501 } 3502 3503 /** 3504 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io() 3505 * @device: DRBD device. 3506 * 3507 * Clears all bits in the bitmap and writes the whole bitmap to stable storage. 3508 */ 3509 int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local) 3510 { 3511 drbd_resume_al(device); 3512 drbd_bm_clear_all(device); 3513 return drbd_bm_write(device); 3514 } 3515 3516 static int w_bitmap_io(struct drbd_work *w, int unused) 3517 { 3518 struct drbd_device *device = 3519 container_of(w, struct drbd_device, bm_io_work.w); 3520 struct bm_io_work *work = &device->bm_io_work; 3521 int rv = -EIO; 3522 3523 D_ASSERT(device, atomic_read(&device->ap_bio_cnt) == 0); 3524 3525 if (get_ldev(device)) { 3526 drbd_bm_lock(device, work->why, work->flags); 3527 rv = work->io_fn(device); 3528 drbd_bm_unlock(device); 3529 put_ldev(device); 3530 } 3531 3532 clear_bit_unlock(BITMAP_IO, &device->flags); 3533 wake_up(&device->misc_wait); 3534 3535 if (work->done) 3536 work->done(device, rv); 3537 3538 clear_bit(BITMAP_IO_QUEUED, &device->flags); 3539 work->why = NULL; 3540 work->flags = 0; 3541 3542 return 0; 3543 } 3544 3545 /** 3546 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap 3547 * @device: DRBD device. 3548 * @io_fn: IO callback to be called when bitmap IO is possible 3549 * @done: callback to be called after the bitmap IO was performed 3550 * @why: Descriptive text of the reason for doing the IO 3551 * 3552 * While IO on the bitmap happens we freeze application IO thus we ensure 3553 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be 3554 * called from worker context. It MUST NOT be used while a previous such 3555 * work is still pending! 3556 * 3557 * Its worker function encloses the call of io_fn() by get_ldev() and 3558 * put_ldev(). 3559 */ 3560 void drbd_queue_bitmap_io(struct drbd_device *device, 3561 int (*io_fn)(struct drbd_device *), 3562 void (*done)(struct drbd_device *, int), 3563 char *why, enum bm_flag flags) 3564 { 3565 D_ASSERT(device, current == first_peer_device(device)->connection->worker.task); 3566 3567 D_ASSERT(device, !test_bit(BITMAP_IO_QUEUED, &device->flags)); 3568 D_ASSERT(device, !test_bit(BITMAP_IO, &device->flags)); 3569 D_ASSERT(device, list_empty(&device->bm_io_work.w.list)); 3570 if (device->bm_io_work.why) 3571 drbd_err(device, "FIXME going to queue '%s' but '%s' still pending?\n", 3572 why, device->bm_io_work.why); 3573 3574 device->bm_io_work.io_fn = io_fn; 3575 device->bm_io_work.done = done; 3576 device->bm_io_work.why = why; 3577 device->bm_io_work.flags = flags; 3578 3579 spin_lock_irq(&device->resource->req_lock); 3580 set_bit(BITMAP_IO, &device->flags); 3581 if (atomic_read(&device->ap_bio_cnt) == 0) { 3582 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags)) 3583 drbd_queue_work(&first_peer_device(device)->connection->sender_work, 3584 &device->bm_io_work.w); 3585 } 3586 spin_unlock_irq(&device->resource->req_lock); 3587 } 3588 3589 /** 3590 * drbd_bitmap_io() - Does an IO operation on the whole bitmap 3591 * @device: DRBD device. 3592 * @io_fn: IO callback to be called when bitmap IO is possible 3593 * @why: Descriptive text of the reason for doing the IO 3594 * 3595 * freezes application IO while that the actual IO operations runs. This 3596 * functions MAY NOT be called from worker context. 3597 */ 3598 int drbd_bitmap_io(struct drbd_device *device, int (*io_fn)(struct drbd_device *), 3599 char *why, enum bm_flag flags) 3600 { 3601 int rv; 3602 3603 D_ASSERT(device, current != first_peer_device(device)->connection->worker.task); 3604 3605 if ((flags & BM_LOCKED_SET_ALLOWED) == 0) 3606 drbd_suspend_io(device); 3607 3608 drbd_bm_lock(device, why, flags); 3609 rv = io_fn(device); 3610 drbd_bm_unlock(device); 3611 3612 if ((flags & BM_LOCKED_SET_ALLOWED) == 0) 3613 drbd_resume_io(device); 3614 3615 return rv; 3616 } 3617 3618 void drbd_md_set_flag(struct drbd_device *device, int flag) __must_hold(local) 3619 { 3620 if ((device->ldev->md.flags & flag) != flag) { 3621 drbd_md_mark_dirty(device); 3622 device->ldev->md.flags |= flag; 3623 } 3624 } 3625 3626 void drbd_md_clear_flag(struct drbd_device *device, int flag) __must_hold(local) 3627 { 3628 if ((device->ldev->md.flags & flag) != 0) { 3629 drbd_md_mark_dirty(device); 3630 device->ldev->md.flags &= ~flag; 3631 } 3632 } 3633 int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag) 3634 { 3635 return (bdev->md.flags & flag) != 0; 3636 } 3637 3638 static void md_sync_timer_fn(unsigned long data) 3639 { 3640 struct drbd_device *device = (struct drbd_device *) data; 3641 drbd_device_post_work(device, MD_SYNC); 3642 } 3643 3644 const char *cmdname(enum drbd_packet cmd) 3645 { 3646 /* THINK may need to become several global tables 3647 * when we want to support more than 3648 * one PRO_VERSION */ 3649 static const char *cmdnames[] = { 3650 [P_DATA] = "Data", 3651 [P_DATA_REPLY] = "DataReply", 3652 [P_RS_DATA_REPLY] = "RSDataReply", 3653 [P_BARRIER] = "Barrier", 3654 [P_BITMAP] = "ReportBitMap", 3655 [P_BECOME_SYNC_TARGET] = "BecomeSyncTarget", 3656 [P_BECOME_SYNC_SOURCE] = "BecomeSyncSource", 3657 [P_UNPLUG_REMOTE] = "UnplugRemote", 3658 [P_DATA_REQUEST] = "DataRequest", 3659 [P_RS_DATA_REQUEST] = "RSDataRequest", 3660 [P_SYNC_PARAM] = "SyncParam", 3661 [P_SYNC_PARAM89] = "SyncParam89", 3662 [P_PROTOCOL] = "ReportProtocol", 3663 [P_UUIDS] = "ReportUUIDs", 3664 [P_SIZES] = "ReportSizes", 3665 [P_STATE] = "ReportState", 3666 [P_SYNC_UUID] = "ReportSyncUUID", 3667 [P_AUTH_CHALLENGE] = "AuthChallenge", 3668 [P_AUTH_RESPONSE] = "AuthResponse", 3669 [P_PING] = "Ping", 3670 [P_PING_ACK] = "PingAck", 3671 [P_RECV_ACK] = "RecvAck", 3672 [P_WRITE_ACK] = "WriteAck", 3673 [P_RS_WRITE_ACK] = "RSWriteAck", 3674 [P_SUPERSEDED] = "Superseded", 3675 [P_NEG_ACK] = "NegAck", 3676 [P_NEG_DREPLY] = "NegDReply", 3677 [P_NEG_RS_DREPLY] = "NegRSDReply", 3678 [P_BARRIER_ACK] = "BarrierAck", 3679 [P_STATE_CHG_REQ] = "StateChgRequest", 3680 [P_STATE_CHG_REPLY] = "StateChgReply", 3681 [P_OV_REQUEST] = "OVRequest", 3682 [P_OV_REPLY] = "OVReply", 3683 [P_OV_RESULT] = "OVResult", 3684 [P_CSUM_RS_REQUEST] = "CsumRSRequest", 3685 [P_RS_IS_IN_SYNC] = "CsumRSIsInSync", 3686 [P_COMPRESSED_BITMAP] = "CBitmap", 3687 [P_DELAY_PROBE] = "DelayProbe", 3688 [P_OUT_OF_SYNC] = "OutOfSync", 3689 [P_RETRY_WRITE] = "RetryWrite", 3690 [P_RS_CANCEL] = "RSCancel", 3691 [P_CONN_ST_CHG_REQ] = "conn_st_chg_req", 3692 [P_CONN_ST_CHG_REPLY] = "conn_st_chg_reply", 3693 [P_RETRY_WRITE] = "retry_write", 3694 [P_PROTOCOL_UPDATE] = "protocol_update", 3695 3696 /* enum drbd_packet, but not commands - obsoleted flags: 3697 * P_MAY_IGNORE 3698 * P_MAX_OPT_CMD 3699 */ 3700 }; 3701 3702 /* too big for the array: 0xfffX */ 3703 if (cmd == P_INITIAL_META) 3704 return "InitialMeta"; 3705 if (cmd == P_INITIAL_DATA) 3706 return "InitialData"; 3707 if (cmd == P_CONNECTION_FEATURES) 3708 return "ConnectionFeatures"; 3709 if (cmd >= ARRAY_SIZE(cmdnames)) 3710 return "Unknown"; 3711 return cmdnames[cmd]; 3712 } 3713 3714 /** 3715 * drbd_wait_misc - wait for a request to make progress 3716 * @device: device associated with the request 3717 * @i: the struct drbd_interval embedded in struct drbd_request or 3718 * struct drbd_peer_request 3719 */ 3720 int drbd_wait_misc(struct drbd_device *device, struct drbd_interval *i) 3721 { 3722 struct net_conf *nc; 3723 DEFINE_WAIT(wait); 3724 long timeout; 3725 3726 rcu_read_lock(); 3727 nc = rcu_dereference(first_peer_device(device)->connection->net_conf); 3728 if (!nc) { 3729 rcu_read_unlock(); 3730 return -ETIMEDOUT; 3731 } 3732 timeout = nc->ko_count ? nc->timeout * HZ / 10 * nc->ko_count : MAX_SCHEDULE_TIMEOUT; 3733 rcu_read_unlock(); 3734 3735 /* Indicate to wake up device->misc_wait on progress. */ 3736 i->waiting = true; 3737 prepare_to_wait(&device->misc_wait, &wait, TASK_INTERRUPTIBLE); 3738 spin_unlock_irq(&device->resource->req_lock); 3739 timeout = schedule_timeout(timeout); 3740 finish_wait(&device->misc_wait, &wait); 3741 spin_lock_irq(&device->resource->req_lock); 3742 if (!timeout || device->state.conn < C_CONNECTED) 3743 return -ETIMEDOUT; 3744 if (signal_pending(current)) 3745 return -ERESTARTSYS; 3746 return 0; 3747 } 3748 3749 #ifdef CONFIG_DRBD_FAULT_INJECTION 3750 /* Fault insertion support including random number generator shamelessly 3751 * stolen from kernel/rcutorture.c */ 3752 struct fault_random_state { 3753 unsigned long state; 3754 unsigned long count; 3755 }; 3756 3757 #define FAULT_RANDOM_MULT 39916801 /* prime */ 3758 #define FAULT_RANDOM_ADD 479001701 /* prime */ 3759 #define FAULT_RANDOM_REFRESH 10000 3760 3761 /* 3762 * Crude but fast random-number generator. Uses a linear congruential 3763 * generator, with occasional help from get_random_bytes(). 3764 */ 3765 static unsigned long 3766 _drbd_fault_random(struct fault_random_state *rsp) 3767 { 3768 long refresh; 3769 3770 if (!rsp->count--) { 3771 get_random_bytes(&refresh, sizeof(refresh)); 3772 rsp->state += refresh; 3773 rsp->count = FAULT_RANDOM_REFRESH; 3774 } 3775 rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD; 3776 return swahw32(rsp->state); 3777 } 3778 3779 static char * 3780 _drbd_fault_str(unsigned int type) { 3781 static char *_faults[] = { 3782 [DRBD_FAULT_MD_WR] = "Meta-data write", 3783 [DRBD_FAULT_MD_RD] = "Meta-data read", 3784 [DRBD_FAULT_RS_WR] = "Resync write", 3785 [DRBD_FAULT_RS_RD] = "Resync read", 3786 [DRBD_FAULT_DT_WR] = "Data write", 3787 [DRBD_FAULT_DT_RD] = "Data read", 3788 [DRBD_FAULT_DT_RA] = "Data read ahead", 3789 [DRBD_FAULT_BM_ALLOC] = "BM allocation", 3790 [DRBD_FAULT_AL_EE] = "EE allocation", 3791 [DRBD_FAULT_RECEIVE] = "receive data corruption", 3792 }; 3793 3794 return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**"; 3795 } 3796 3797 unsigned int 3798 _drbd_insert_fault(struct drbd_device *device, unsigned int type) 3799 { 3800 static struct fault_random_state rrs = {0, 0}; 3801 3802 unsigned int ret = ( 3803 (fault_devs == 0 || 3804 ((1 << device_to_minor(device)) & fault_devs) != 0) && 3805 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate)); 3806 3807 if (ret) { 3808 fault_count++; 3809 3810 if (__ratelimit(&drbd_ratelimit_state)) 3811 drbd_warn(device, "***Simulating %s failure\n", 3812 _drbd_fault_str(type)); 3813 } 3814 3815 return ret; 3816 } 3817 #endif 3818 3819 const char *drbd_buildtag(void) 3820 { 3821 /* DRBD built from external sources has here a reference to the 3822 git hash of the source code. */ 3823 3824 static char buildtag[38] = "\0uilt-in"; 3825 3826 if (buildtag[0] == 0) { 3827 #ifdef MODULE 3828 sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion); 3829 #else 3830 buildtag[0] = 'b'; 3831 #endif 3832 } 3833 3834 return buildtag; 3835 } 3836 3837 module_init(drbd_init) 3838 module_exit(drbd_cleanup) 3839 3840 EXPORT_SYMBOL(drbd_conn_str); 3841 EXPORT_SYMBOL(drbd_role_str); 3842 EXPORT_SYMBOL(drbd_disk_str); 3843 EXPORT_SYMBOL(drbd_set_st_err_str); 3844